diff options
986 files changed, 9556 insertions, 5882 deletions
| @@ -21,6 +21,7 @@ Andrey Ryabinin <ryabinin.a.a@gmail.com> <a.ryabinin@samsung.com> | |||
| 21 | Andrew Morton <akpm@linux-foundation.org> | 21 | Andrew Morton <akpm@linux-foundation.org> |
| 22 | Andrew Vasquez <andrew.vasquez@qlogic.com> | 22 | Andrew Vasquez <andrew.vasquez@qlogic.com> |
| 23 | Andy Adamson <andros@citi.umich.edu> | 23 | Andy Adamson <andros@citi.umich.edu> |
| 24 | Antoine Tenart <antoine.tenart@free-electrons.com> | ||
| 24 | Antonio Ospite <ao2@ao2.it> <ao2@amarulasolutions.com> | 25 | Antonio Ospite <ao2@ao2.it> <ao2@amarulasolutions.com> |
| 25 | Archit Taneja <archit@ti.com> | 26 | Archit Taneja <archit@ti.com> |
| 26 | Arnaud Patard <arnaud.patard@rtp-net.org> | 27 | Arnaud Patard <arnaud.patard@rtp-net.org> |
| @@ -30,6 +31,9 @@ Axel Lin <axel.lin@gmail.com> | |||
| 30 | Ben Gardner <bgardner@wabtec.com> | 31 | Ben Gardner <bgardner@wabtec.com> |
| 31 | Ben M Cahill <ben.m.cahill@intel.com> | 32 | Ben M Cahill <ben.m.cahill@intel.com> |
| 32 | Björn Steinbrink <B.Steinbrink@gmx.de> | 33 | Björn Steinbrink <B.Steinbrink@gmx.de> |
| 34 | Boris Brezillon <boris.brezillon@free-electrons.com> | ||
| 35 | Boris Brezillon <boris.brezillon@free-electrons.com> <b.brezillon.dev@gmail.com> | ||
| 36 | Boris Brezillon <boris.brezillon@free-electrons.com> <b.brezillon@overkiz.com> | ||
| 33 | Brian Avery <b.avery@hp.com> | 37 | Brian Avery <b.avery@hp.com> |
| 34 | Brian King <brking@us.ibm.com> | 38 | Brian King <brking@us.ibm.com> |
| 35 | Christoph Hellwig <hch@lst.de> | 39 | Christoph Hellwig <hch@lst.de> |
| @@ -89,6 +93,7 @@ Leonid I Ananiev <leonid.i.ananiev@intel.com> | |||
| 89 | Linas Vepstas <linas@austin.ibm.com> | 93 | Linas Vepstas <linas@austin.ibm.com> |
| 90 | Mark Brown <broonie@sirena.org.uk> | 94 | Mark Brown <broonie@sirena.org.uk> |
| 91 | Matthieu CASTET <castet.matthieu@free.fr> | 95 | Matthieu CASTET <castet.matthieu@free.fr> |
| 96 | Mauro Carvalho Chehab <mchehab@kernel.org> <maurochehab@gmail.com> <mchehab@infradead.org> <mchehab@redhat.com> <m.chehab@samsung.com> <mchehab@osg.samsung.com> <mchehab@s-opensource.com> | ||
| 92 | Mayuresh Janorkar <mayur@ti.com> | 97 | Mayuresh Janorkar <mayur@ti.com> |
| 93 | Michael Buesch <m@bues.ch> | 98 | Michael Buesch <m@bues.ch> |
| 94 | Michel Dänzer <michel@tungstengraphics.com> | 99 | Michel Dänzer <michel@tungstengraphics.com> |
| @@ -122,6 +127,7 @@ Santosh Shilimkar <santosh.shilimkar@oracle.org> | |||
| 122 | Sascha Hauer <s.hauer@pengutronix.de> | 127 | Sascha Hauer <s.hauer@pengutronix.de> |
| 123 | S.Çağlar Onur <caglar@pardus.org.tr> | 128 | S.Çağlar Onur <caglar@pardus.org.tr> |
| 124 | Shiraz Hashim <shiraz.linux.kernel@gmail.com> <shiraz.hashim@st.com> | 129 | Shiraz Hashim <shiraz.linux.kernel@gmail.com> <shiraz.hashim@st.com> |
| 130 | Shuah Khan <shuah@kernel.org> <shuahkhan@gmail.com> <shuah.khan@hp.com> <shuahkh@osg.samsung.com> <shuah.kh@samsung.com> | ||
| 125 | Simon Kelley <simon@thekelleys.org.uk> | 131 | Simon Kelley <simon@thekelleys.org.uk> |
| 126 | Stéphane Witzmann <stephane.witzmann@ubpmes.univ-bpclermont.fr> | 132 | Stéphane Witzmann <stephane.witzmann@ubpmes.univ-bpclermont.fr> |
| 127 | Stephen Hemminger <shemminger@osdl.org> | 133 | Stephen Hemminger <shemminger@osdl.org> |
| @@ -649,6 +649,7 @@ D: Configure, Menuconfig, xconfig | |||
| 649 | 649 | ||
| 650 | N: Mauro Carvalho Chehab | 650 | N: Mauro Carvalho Chehab |
| 651 | E: m.chehab@samsung.org | 651 | E: m.chehab@samsung.org |
| 652 | E: mchehab@osg.samsung.com | ||
| 652 | E: mchehab@infradead.org | 653 | E: mchehab@infradead.org |
| 653 | D: Media subsystem (V4L/DVB) drivers and core | 654 | D: Media subsystem (V4L/DVB) drivers and core |
| 654 | D: EDAC drivers and EDAC 3.0 core rework | 655 | D: EDAC drivers and EDAC 3.0 core rework |
diff --git a/Documentation/ABI/testing/configfs-usb-gadget-uvc b/Documentation/ABI/testing/configfs-usb-gadget-uvc index 2f4a0051b32d..1ba0d0fda9c0 100644 --- a/Documentation/ABI/testing/configfs-usb-gadget-uvc +++ b/Documentation/ABI/testing/configfs-usb-gadget-uvc | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | What: /config/usb-gadget/gadget/functions/uvc.name | 1 | What: /config/usb-gadget/gadget/functions/uvc.name |
| 2 | Date: Dec 2014 | 2 | Date: Dec 2014 |
| 3 | KernelVersion: 3.20 | 3 | KernelVersion: 4.0 |
| 4 | Description: UVC function directory | 4 | Description: UVC function directory |
| 5 | 5 | ||
| 6 | streaming_maxburst - 0..15 (ss only) | 6 | streaming_maxburst - 0..15 (ss only) |
| @@ -9,37 +9,37 @@ Description: UVC function directory | |||
| 9 | 9 | ||
| 10 | What: /config/usb-gadget/gadget/functions/uvc.name/control | 10 | What: /config/usb-gadget/gadget/functions/uvc.name/control |
| 11 | Date: Dec 2014 | 11 | Date: Dec 2014 |
| 12 | KernelVersion: 3.20 | 12 | KernelVersion: 4.0 |
| 13 | Description: Control descriptors | 13 | Description: Control descriptors |
| 14 | 14 | ||
| 15 | What: /config/usb-gadget/gadget/functions/uvc.name/control/class | 15 | What: /config/usb-gadget/gadget/functions/uvc.name/control/class |
| 16 | Date: Dec 2014 | 16 | Date: Dec 2014 |
| 17 | KernelVersion: 3.20 | 17 | KernelVersion: 4.0 |
| 18 | Description: Class descriptors | 18 | Description: Class descriptors |
| 19 | 19 | ||
| 20 | What: /config/usb-gadget/gadget/functions/uvc.name/control/class/ss | 20 | What: /config/usb-gadget/gadget/functions/uvc.name/control/class/ss |
| 21 | Date: Dec 2014 | 21 | Date: Dec 2014 |
| 22 | KernelVersion: 3.20 | 22 | KernelVersion: 4.0 |
| 23 | Description: Super speed control class descriptors | 23 | Description: Super speed control class descriptors |
| 24 | 24 | ||
| 25 | What: /config/usb-gadget/gadget/functions/uvc.name/control/class/fs | 25 | What: /config/usb-gadget/gadget/functions/uvc.name/control/class/fs |
| 26 | Date: Dec 2014 | 26 | Date: Dec 2014 |
| 27 | KernelVersion: 3.20 | 27 | KernelVersion: 4.0 |
| 28 | Description: Full speed control class descriptors | 28 | Description: Full speed control class descriptors |
| 29 | 29 | ||
| 30 | What: /config/usb-gadget/gadget/functions/uvc.name/control/terminal | 30 | What: /config/usb-gadget/gadget/functions/uvc.name/control/terminal |
| 31 | Date: Dec 2014 | 31 | Date: Dec 2014 |
| 32 | KernelVersion: 3.20 | 32 | KernelVersion: 4.0 |
| 33 | Description: Terminal descriptors | 33 | Description: Terminal descriptors |
| 34 | 34 | ||
| 35 | What: /config/usb-gadget/gadget/functions/uvc.name/control/terminal/output | 35 | What: /config/usb-gadget/gadget/functions/uvc.name/control/terminal/output |
| 36 | Date: Dec 2014 | 36 | Date: Dec 2014 |
| 37 | KernelVersion: 3.20 | 37 | KernelVersion: 4.0 |
| 38 | Description: Output terminal descriptors | 38 | Description: Output terminal descriptors |
| 39 | 39 | ||
| 40 | What: /config/usb-gadget/gadget/functions/uvc.name/control/terminal/output/default | 40 | What: /config/usb-gadget/gadget/functions/uvc.name/control/terminal/output/default |
| 41 | Date: Dec 2014 | 41 | Date: Dec 2014 |
| 42 | KernelVersion: 3.20 | 42 | KernelVersion: 4.0 |
| 43 | Description: Default output terminal descriptors | 43 | Description: Default output terminal descriptors |
| 44 | 44 | ||
| 45 | All attributes read only: | 45 | All attributes read only: |
| @@ -53,12 +53,12 @@ Description: Default output terminal descriptors | |||
| 53 | 53 | ||
| 54 | What: /config/usb-gadget/gadget/functions/uvc.name/control/terminal/camera | 54 | What: /config/usb-gadget/gadget/functions/uvc.name/control/terminal/camera |
| 55 | Date: Dec 2014 | 55 | Date: Dec 2014 |
| 56 | KernelVersion: 3.20 | 56 | KernelVersion: 4.0 |
| 57 | Description: Camera terminal descriptors | 57 | Description: Camera terminal descriptors |
| 58 | 58 | ||
| 59 | What: /config/usb-gadget/gadget/functions/uvc.name/control/terminal/camera/default | 59 | What: /config/usb-gadget/gadget/functions/uvc.name/control/terminal/camera/default |
| 60 | Date: Dec 2014 | 60 | Date: Dec 2014 |
| 61 | KernelVersion: 3.20 | 61 | KernelVersion: 4.0 |
| 62 | Description: Default camera terminal descriptors | 62 | Description: Default camera terminal descriptors |
| 63 | 63 | ||
| 64 | All attributes read only: | 64 | All attributes read only: |
| @@ -75,12 +75,12 @@ Description: Default camera terminal descriptors | |||
| 75 | 75 | ||
| 76 | What: /config/usb-gadget/gadget/functions/uvc.name/control/processing | 76 | What: /config/usb-gadget/gadget/functions/uvc.name/control/processing |
| 77 | Date: Dec 2014 | 77 | Date: Dec 2014 |
| 78 | KernelVersion: 3.20 | 78 | KernelVersion: 4.0 |
| 79 | Description: Processing unit descriptors | 79 | Description: Processing unit descriptors |
| 80 | 80 | ||
| 81 | What: /config/usb-gadget/gadget/functions/uvc.name/control/processing/default | 81 | What: /config/usb-gadget/gadget/functions/uvc.name/control/processing/default |
| 82 | Date: Dec 2014 | 82 | Date: Dec 2014 |
| 83 | KernelVersion: 3.20 | 83 | KernelVersion: 4.0 |
| 84 | Description: Default processing unit descriptors | 84 | Description: Default processing unit descriptors |
| 85 | 85 | ||
| 86 | All attributes read only: | 86 | All attributes read only: |
| @@ -94,49 +94,49 @@ Description: Default processing unit descriptors | |||
| 94 | 94 | ||
| 95 | What: /config/usb-gadget/gadget/functions/uvc.name/control/header | 95 | What: /config/usb-gadget/gadget/functions/uvc.name/control/header |
| 96 | Date: Dec 2014 | 96 | Date: Dec 2014 |
| 97 | KernelVersion: 3.20 | 97 | KernelVersion: 4.0 |
| 98 | Description: Control header descriptors | 98 | Description: Control header descriptors |
| 99 | 99 | ||
| 100 | What: /config/usb-gadget/gadget/functions/uvc.name/control/header/name | 100 | What: /config/usb-gadget/gadget/functions/uvc.name/control/header/name |
| 101 | Date: Dec 2014 | 101 | Date: Dec 2014 |
| 102 | KernelVersion: 3.20 | 102 | KernelVersion: 4.0 |
| 103 | Description: Specific control header descriptors | 103 | Description: Specific control header descriptors |
| 104 | 104 | ||
| 105 | dwClockFrequency | 105 | dwClockFrequency |
| 106 | bcdUVC | 106 | bcdUVC |
| 107 | What: /config/usb-gadget/gadget/functions/uvc.name/streaming | 107 | What: /config/usb-gadget/gadget/functions/uvc.name/streaming |
| 108 | Date: Dec 2014 | 108 | Date: Dec 2014 |
| 109 | KernelVersion: 3.20 | 109 | KernelVersion: 4.0 |
| 110 | Description: Streaming descriptors | 110 | Description: Streaming descriptors |
| 111 | 111 | ||
| 112 | What: /config/usb-gadget/gadget/functions/uvc.name/streaming/class | 112 | What: /config/usb-gadget/gadget/functions/uvc.name/streaming/class |
| 113 | Date: Dec 2014 | 113 | Date: Dec 2014 |
| 114 | KernelVersion: 3.20 | 114 | KernelVersion: 4.0 |
| 115 | Description: Streaming class descriptors | 115 | Description: Streaming class descriptors |
| 116 | 116 | ||
| 117 | What: /config/usb-gadget/gadget/functions/uvc.name/streaming/class/ss | 117 | What: /config/usb-gadget/gadget/functions/uvc.name/streaming/class/ss |
| 118 | Date: Dec 2014 | 118 | Date: Dec 2014 |
| 119 | KernelVersion: 3.20 | 119 | KernelVersion: 4.0 |
| 120 | Description: Super speed streaming class descriptors | 120 | Description: Super speed streaming class descriptors |
| 121 | 121 | ||
| 122 | What: /config/usb-gadget/gadget/functions/uvc.name/streaming/class/hs | 122 | What: /config/usb-gadget/gadget/functions/uvc.name/streaming/class/hs |
| 123 | Date: Dec 2014 | 123 | Date: Dec 2014 |
| 124 | KernelVersion: 3.20 | 124 | KernelVersion: 4.0 |
| 125 | Description: High speed streaming class descriptors | 125 | Description: High speed streaming class descriptors |
| 126 | 126 | ||
| 127 | What: /config/usb-gadget/gadget/functions/uvc.name/streaming/class/fs | 127 | What: /config/usb-gadget/gadget/functions/uvc.name/streaming/class/fs |
| 128 | Date: Dec 2014 | 128 | Date: Dec 2014 |
| 129 | KernelVersion: 3.20 | 129 | KernelVersion: 4.0 |
| 130 | Description: Full speed streaming class descriptors | 130 | Description: Full speed streaming class descriptors |
| 131 | 131 | ||
| 132 | What: /config/usb-gadget/gadget/functions/uvc.name/streaming/color_matching | 132 | What: /config/usb-gadget/gadget/functions/uvc.name/streaming/color_matching |
| 133 | Date: Dec 2014 | 133 | Date: Dec 2014 |
| 134 | KernelVersion: 3.20 | 134 | KernelVersion: 4.0 |
| 135 | Description: Color matching descriptors | 135 | Description: Color matching descriptors |
| 136 | 136 | ||
| 137 | What: /config/usb-gadget/gadget/functions/uvc.name/streaming/color_matching/default | 137 | What: /config/usb-gadget/gadget/functions/uvc.name/streaming/color_matching/default |
| 138 | Date: Dec 2014 | 138 | Date: Dec 2014 |
| 139 | KernelVersion: 3.20 | 139 | KernelVersion: 4.0 |
| 140 | Description: Default color matching descriptors | 140 | Description: Default color matching descriptors |
| 141 | 141 | ||
| 142 | All attributes read only: | 142 | All attributes read only: |
| @@ -150,12 +150,12 @@ Description: Default color matching descriptors | |||
| 150 | 150 | ||
| 151 | What: /config/usb-gadget/gadget/functions/uvc.name/streaming/mjpeg | 151 | What: /config/usb-gadget/gadget/functions/uvc.name/streaming/mjpeg |
| 152 | Date: Dec 2014 | 152 | Date: Dec 2014 |
| 153 | KernelVersion: 3.20 | 153 | KernelVersion: 4.0 |
| 154 | Description: MJPEG format descriptors | 154 | Description: MJPEG format descriptors |
| 155 | 155 | ||
| 156 | What: /config/usb-gadget/gadget/functions/uvc.name/streaming/mjpeg/name | 156 | What: /config/usb-gadget/gadget/functions/uvc.name/streaming/mjpeg/name |
| 157 | Date: Dec 2014 | 157 | Date: Dec 2014 |
| 158 | KernelVersion: 3.20 | 158 | KernelVersion: 4.0 |
| 159 | Description: Specific MJPEG format descriptors | 159 | Description: Specific MJPEG format descriptors |
| 160 | 160 | ||
| 161 | All attributes read only, | 161 | All attributes read only, |
| @@ -174,7 +174,7 @@ Description: Specific MJPEG format descriptors | |||
| 174 | 174 | ||
| 175 | What: /config/usb-gadget/gadget/functions/uvc.name/streaming/mjpeg/name/name | 175 | What: /config/usb-gadget/gadget/functions/uvc.name/streaming/mjpeg/name/name |
| 176 | Date: Dec 2014 | 176 | Date: Dec 2014 |
| 177 | KernelVersion: 3.20 | 177 | KernelVersion: 4.0 |
| 178 | Description: Specific MJPEG frame descriptors | 178 | Description: Specific MJPEG frame descriptors |
| 179 | 179 | ||
| 180 | dwFrameInterval - indicates how frame interval can be | 180 | dwFrameInterval - indicates how frame interval can be |
| @@ -196,12 +196,12 @@ Description: Specific MJPEG frame descriptors | |||
| 196 | 196 | ||
| 197 | What: /config/usb-gadget/gadget/functions/uvc.name/streaming/uncompressed | 197 | What: /config/usb-gadget/gadget/functions/uvc.name/streaming/uncompressed |
| 198 | Date: Dec 2014 | 198 | Date: Dec 2014 |
| 199 | KernelVersion: 3.20 | 199 | KernelVersion: 4.0 |
| 200 | Description: Uncompressed format descriptors | 200 | Description: Uncompressed format descriptors |
| 201 | 201 | ||
| 202 | What: /config/usb-gadget/gadget/functions/uvc.name/streaming/uncompressed/name | 202 | What: /config/usb-gadget/gadget/functions/uvc.name/streaming/uncompressed/name |
| 203 | Date: Dec 2014 | 203 | Date: Dec 2014 |
| 204 | KernelVersion: 3.20 | 204 | KernelVersion: 4.0 |
| 205 | Description: Specific uncompressed format descriptors | 205 | Description: Specific uncompressed format descriptors |
| 206 | 206 | ||
| 207 | bmaControls - this format's data for bmaControls in | 207 | bmaControls - this format's data for bmaControls in |
| @@ -221,7 +221,7 @@ Description: Specific uncompressed format descriptors | |||
| 221 | 221 | ||
| 222 | What: /config/usb-gadget/gadget/functions/uvc.name/streaming/uncompressed/name/name | 222 | What: /config/usb-gadget/gadget/functions/uvc.name/streaming/uncompressed/name/name |
| 223 | Date: Dec 2014 | 223 | Date: Dec 2014 |
| 224 | KernelVersion: 3.20 | 224 | KernelVersion: 4.0 |
| 225 | Description: Specific uncompressed frame descriptors | 225 | Description: Specific uncompressed frame descriptors |
| 226 | 226 | ||
| 227 | dwFrameInterval - indicates how frame interval can be | 227 | dwFrameInterval - indicates how frame interval can be |
| @@ -243,12 +243,12 @@ Description: Specific uncompressed frame descriptors | |||
| 243 | 243 | ||
| 244 | What: /config/usb-gadget/gadget/functions/uvc.name/streaming/header | 244 | What: /config/usb-gadget/gadget/functions/uvc.name/streaming/header |
| 245 | Date: Dec 2014 | 245 | Date: Dec 2014 |
| 246 | KernelVersion: 3.20 | 246 | KernelVersion: 4.0 |
| 247 | Description: Streaming header descriptors | 247 | Description: Streaming header descriptors |
| 248 | 248 | ||
| 249 | What: /config/usb-gadget/gadget/functions/uvc.name/streaming/header/name | 249 | What: /config/usb-gadget/gadget/functions/uvc.name/streaming/header/name |
| 250 | Date: Dec 2014 | 250 | Date: Dec 2014 |
| 251 | KernelVersion: 3.20 | 251 | KernelVersion: 4.0 |
| 252 | Description: Specific streaming header descriptors | 252 | Description: Specific streaming header descriptors |
| 253 | 253 | ||
| 254 | All attributes read only: | 254 | All attributes read only: |
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-proximity-as3935 b/Documentation/ABI/testing/sysfs-bus-iio-proximity-as3935 index 6708c5e264aa..33e96f740639 100644 --- a/Documentation/ABI/testing/sysfs-bus-iio-proximity-as3935 +++ b/Documentation/ABI/testing/sysfs-bus-iio-proximity-as3935 | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | What /sys/bus/iio/devices/iio:deviceX/in_proximity_raw | 1 | What /sys/bus/iio/devices/iio:deviceX/in_proximity_input |
| 2 | Date: March 2014 | 2 | Date: March 2014 |
| 3 | KernelVersion: 3.15 | 3 | KernelVersion: 3.15 |
| 4 | Contact: Matt Ranostay <mranostay@gmail.com> | 4 | Contact: Matt Ranostay <mranostay@gmail.com> |
diff --git a/Documentation/DocBook/device-drivers.tmpl b/Documentation/DocBook/device-drivers.tmpl index de79efdad46c..8c68768ebee5 100644 --- a/Documentation/DocBook/device-drivers.tmpl +++ b/Documentation/DocBook/device-drivers.tmpl | |||
| @@ -128,16 +128,44 @@ X!Edrivers/base/interface.c | |||
| 128 | !Edrivers/base/platform.c | 128 | !Edrivers/base/platform.c |
| 129 | !Edrivers/base/bus.c | 129 | !Edrivers/base/bus.c |
| 130 | </sect1> | 130 | </sect1> |
| 131 | <sect1><title>Device Drivers DMA Management</title> | 131 | <sect1> |
| 132 | <title>Buffer Sharing and Synchronization</title> | ||
| 133 | <para> | ||
| 134 | The dma-buf subsystem provides the framework for sharing buffers | ||
| 135 | for hardware (DMA) access across multiple device drivers and | ||
| 136 | subsystems, and for synchronizing asynchronous hardware access. | ||
| 137 | </para> | ||
| 138 | <para> | ||
| 139 | This is used, for example, by drm "prime" multi-GPU support, but | ||
| 140 | is of course not limited to GPU use cases. | ||
| 141 | </para> | ||
| 142 | <para> | ||
| 143 | The three main components of this are: (1) dma-buf, representing | ||
| 144 | a sg_table and exposed to userspace as a file descriptor to allow | ||
| 145 | passing between devices, (2) fence, which provides a mechanism | ||
| 146 | to signal when one device as finished access, and (3) reservation, | ||
| 147 | which manages the shared or exclusive fence(s) associated with | ||
| 148 | the buffer. | ||
| 149 | </para> | ||
| 150 | <sect2><title>dma-buf</title> | ||
| 132 | !Edrivers/dma-buf/dma-buf.c | 151 | !Edrivers/dma-buf/dma-buf.c |
| 152 | !Iinclude/linux/dma-buf.h | ||
| 153 | </sect2> | ||
| 154 | <sect2><title>reservation</title> | ||
| 155 | !Pdrivers/dma-buf/reservation.c Reservation Object Overview | ||
| 156 | !Edrivers/dma-buf/reservation.c | ||
| 157 | !Iinclude/linux/reservation.h | ||
| 158 | </sect2> | ||
| 159 | <sect2><title>fence</title> | ||
| 133 | !Edrivers/dma-buf/fence.c | 160 | !Edrivers/dma-buf/fence.c |
| 134 | !Edrivers/dma-buf/seqno-fence.c | ||
| 135 | !Iinclude/linux/fence.h | 161 | !Iinclude/linux/fence.h |
| 162 | !Edrivers/dma-buf/seqno-fence.c | ||
| 136 | !Iinclude/linux/seqno-fence.h | 163 | !Iinclude/linux/seqno-fence.h |
| 137 | !Edrivers/dma-buf/reservation.c | ||
| 138 | !Iinclude/linux/reservation.h | ||
| 139 | !Edrivers/dma-buf/sync_file.c | 164 | !Edrivers/dma-buf/sync_file.c |
| 140 | !Iinclude/linux/sync_file.h | 165 | !Iinclude/linux/sync_file.h |
| 166 | </sect2> | ||
| 167 | </sect1> | ||
| 168 | <sect1><title>Device Drivers DMA Management</title> | ||
| 141 | !Edrivers/base/dma-coherent.c | 169 | !Edrivers/base/dma-coherent.c |
| 142 | !Edrivers/base/dma-mapping.c | 170 | !Edrivers/base/dma-mapping.c |
| 143 | </sect1> | 171 | </sect1> |
diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt index c6938e50e71f..4da60b463995 100644 --- a/Documentation/arm64/silicon-errata.txt +++ b/Documentation/arm64/silicon-errata.txt | |||
| @@ -56,6 +56,7 @@ stable kernels. | |||
| 56 | | ARM | MMU-500 | #841119,#826419 | N/A | | 56 | | ARM | MMU-500 | #841119,#826419 | N/A | |
| 57 | | | | | | | 57 | | | | | | |
| 58 | | Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 | | 58 | | Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 | |
| 59 | | Cavium | ThunderX ITS | #23144 | CAVIUM_ERRATUM_23144 | | ||
| 59 | | Cavium | ThunderX GICv3 | #23154 | CAVIUM_ERRATUM_23154 | | 60 | | Cavium | ThunderX GICv3 | #23154 | CAVIUM_ERRATUM_23154 | |
| 60 | | Cavium | ThunderX Core | #27456 | CAVIUM_ERRATUM_27456 | | 61 | | Cavium | ThunderX Core | #27456 | CAVIUM_ERRATUM_27456 | |
| 61 | | Cavium | ThunderX SMMUv2 | #27704 | N/A | | 62 | | Cavium | ThunderX SMMUv2 | #27704 | N/A | |
diff --git a/Documentation/devicetree/bindings/display/imx/ldb.txt b/Documentation/devicetree/bindings/display/imx/ldb.txt index 0a175d991b52..a407462c885e 100644 --- a/Documentation/devicetree/bindings/display/imx/ldb.txt +++ b/Documentation/devicetree/bindings/display/imx/ldb.txt | |||
| @@ -62,6 +62,7 @@ Required properties: | |||
| 62 | display-timings are used instead. | 62 | display-timings are used instead. |
| 63 | 63 | ||
| 64 | Optional properties (required if display-timings are used): | 64 | Optional properties (required if display-timings are used): |
| 65 | - ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing | ||
| 65 | - display-timings : A node that describes the display timings as defined in | 66 | - display-timings : A node that describes the display timings as defined in |
| 66 | Documentation/devicetree/bindings/display/display-timing.txt. | 67 | Documentation/devicetree/bindings/display/display-timing.txt. |
| 67 | - fsl,data-mapping : should be "spwg" or "jeida" | 68 | - fsl,data-mapping : should be "spwg" or "jeida" |
diff --git a/Documentation/devicetree/bindings/hwmon/ina2xx.txt b/Documentation/devicetree/bindings/hwmon/ina2xx.txt index 9bcd5e87830d..02af0d94e921 100644 --- a/Documentation/devicetree/bindings/hwmon/ina2xx.txt +++ b/Documentation/devicetree/bindings/hwmon/ina2xx.txt | |||
| @@ -7,6 +7,7 @@ Required properties: | |||
| 7 | - "ti,ina220" for ina220 | 7 | - "ti,ina220" for ina220 |
| 8 | - "ti,ina226" for ina226 | 8 | - "ti,ina226" for ina226 |
| 9 | - "ti,ina230" for ina230 | 9 | - "ti,ina230" for ina230 |
| 10 | - "ti,ina231" for ina231 | ||
| 10 | - reg: I2C address | 11 | - reg: I2C address |
| 11 | 12 | ||
| 12 | Optional properties: | 13 | Optional properties: |
diff --git a/Documentation/devicetree/bindings/i2c/i2c-arb-gpio-challenge.txt b/Documentation/devicetree/bindings/i2c/i2c-arb-gpio-challenge.txt index bfeabb843941..71191ff0e781 100644 --- a/Documentation/devicetree/bindings/i2c/i2c-arb-gpio-challenge.txt +++ b/Documentation/devicetree/bindings/i2c/i2c-arb-gpio-challenge.txt | |||
| @@ -44,8 +44,8 @@ Required properties: | |||
| 44 | - our-claim-gpio: The GPIO that we use to claim the bus. | 44 | - our-claim-gpio: The GPIO that we use to claim the bus. |
| 45 | - their-claim-gpios: The GPIOs that the other sides use to claim the bus. | 45 | - their-claim-gpios: The GPIOs that the other sides use to claim the bus. |
| 46 | Note that some implementations may only support a single other master. | 46 | Note that some implementations may only support a single other master. |
| 47 | - Standard I2C mux properties. See mux.txt in this directory. | 47 | - Standard I2C mux properties. See i2c-mux.txt in this directory. |
| 48 | - Single I2C child bus node at reg 0. See mux.txt in this directory. | 48 | - Single I2C child bus node at reg 0. See i2c-mux.txt in this directory. |
| 49 | 49 | ||
| 50 | Optional properties: | 50 | Optional properties: |
| 51 | - slew-delay-us: microseconds to wait for a GPIO to go high. Default is 10 us. | 51 | - slew-delay-us: microseconds to wait for a GPIO to go high. Default is 10 us. |
diff --git a/Documentation/devicetree/bindings/i2c/i2c-demux-pinctrl.txt b/Documentation/devicetree/bindings/i2c/i2c-demux-pinctrl.txt index 6078aefe7ed4..7ce23ac61308 100644 --- a/Documentation/devicetree/bindings/i2c/i2c-demux-pinctrl.txt +++ b/Documentation/devicetree/bindings/i2c/i2c-demux-pinctrl.txt | |||
| @@ -27,7 +27,8 @@ Required properties: | |||
| 27 | - i2c-bus-name: The name of this bus. Also needed as pinctrl-name for the I2C | 27 | - i2c-bus-name: The name of this bus. Also needed as pinctrl-name for the I2C |
| 28 | parents. | 28 | parents. |
| 29 | 29 | ||
| 30 | Furthermore, I2C mux properties and child nodes. See mux.txt in this directory. | 30 | Furthermore, I2C mux properties and child nodes. See i2c-mux.txt in this |
| 31 | directory. | ||
| 31 | 32 | ||
| 32 | Example: | 33 | Example: |
| 33 | 34 | ||
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mux-gpio.txt b/Documentation/devicetree/bindings/i2c/i2c-mux-gpio.txt index 66709a825541..21da3ecbb370 100644 --- a/Documentation/devicetree/bindings/i2c/i2c-mux-gpio.txt +++ b/Documentation/devicetree/bindings/i2c/i2c-mux-gpio.txt | |||
| @@ -22,8 +22,8 @@ Required properties: | |||
| 22 | - i2c-parent: The phandle of the I2C bus that this multiplexer's master-side | 22 | - i2c-parent: The phandle of the I2C bus that this multiplexer's master-side |
| 23 | port is connected to. | 23 | port is connected to. |
| 24 | - mux-gpios: list of gpios used to control the muxer | 24 | - mux-gpios: list of gpios used to control the muxer |
| 25 | * Standard I2C mux properties. See mux.txt in this directory. | 25 | * Standard I2C mux properties. See i2c-mux.txt in this directory. |
| 26 | * I2C child bus nodes. See mux.txt in this directory. | 26 | * I2C child bus nodes. See i2c-mux.txt in this directory. |
| 27 | 27 | ||
| 28 | Optional properties: | 28 | Optional properties: |
| 29 | - idle-state: value to set the muxer to when idle. When no value is | 29 | - idle-state: value to set the muxer to when idle. When no value is |
| @@ -33,7 +33,7 @@ For each i2c child node, an I2C child bus will be created. They will | |||
| 33 | be numbered based on their order in the device tree. | 33 | be numbered based on their order in the device tree. |
| 34 | 34 | ||
| 35 | Whenever an access is made to a device on a child bus, the value set | 35 | Whenever an access is made to a device on a child bus, the value set |
| 36 | in the revelant node's reg property will be output using the list of | 36 | in the relevant node's reg property will be output using the list of |
| 37 | GPIOs, the first in the list holding the least-significant value. | 37 | GPIOs, the first in the list holding the least-significant value. |
| 38 | 38 | ||
| 39 | If an idle state is defined, using the idle-state (optional) property, | 39 | If an idle state is defined, using the idle-state (optional) property, |
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mux-pinctrl.txt b/Documentation/devicetree/bindings/i2c/i2c-mux-pinctrl.txt index ae8af1694e95..33119a98e144 100644 --- a/Documentation/devicetree/bindings/i2c/i2c-mux-pinctrl.txt +++ b/Documentation/devicetree/bindings/i2c/i2c-mux-pinctrl.txt | |||
| @@ -28,9 +28,9 @@ Also required are: | |||
| 28 | * Standard pinctrl properties that specify the pin mux state for each child | 28 | * Standard pinctrl properties that specify the pin mux state for each child |
| 29 | bus. See ../pinctrl/pinctrl-bindings.txt. | 29 | bus. See ../pinctrl/pinctrl-bindings.txt. |
| 30 | 30 | ||
| 31 | * Standard I2C mux properties. See mux.txt in this directory. | 31 | * Standard I2C mux properties. See i2c-mux.txt in this directory. |
| 32 | 32 | ||
| 33 | * I2C child bus nodes. See mux.txt in this directory. | 33 | * I2C child bus nodes. See i2c-mux.txt in this directory. |
| 34 | 34 | ||
| 35 | For each named state defined in the pinctrl-names property, an I2C child bus | 35 | For each named state defined in the pinctrl-names property, an I2C child bus |
| 36 | will be created. I2C child bus numbers are assigned based on the index into | 36 | will be created. I2C child bus numbers are assigned based on the index into |
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mux-reg.txt b/Documentation/devicetree/bindings/i2c/i2c-mux-reg.txt index 688783fbe696..de00d7fc450b 100644 --- a/Documentation/devicetree/bindings/i2c/i2c-mux-reg.txt +++ b/Documentation/devicetree/bindings/i2c/i2c-mux-reg.txt | |||
| @@ -7,8 +7,8 @@ Required properties: | |||
| 7 | - compatible: i2c-mux-reg | 7 | - compatible: i2c-mux-reg |
| 8 | - i2c-parent: The phandle of the I2C bus that this multiplexer's master-side | 8 | - i2c-parent: The phandle of the I2C bus that this multiplexer's master-side |
| 9 | port is connected to. | 9 | port is connected to. |
| 10 | * Standard I2C mux properties. See mux.txt in this directory. | 10 | * Standard I2C mux properties. See i2c-mux.txt in this directory. |
| 11 | * I2C child bus nodes. See mux.txt in this directory. | 11 | * I2C child bus nodes. See i2c-mux.txt in this directory. |
| 12 | 12 | ||
| 13 | Optional properties: | 13 | Optional properties: |
| 14 | - reg: this pair of <offset size> specifies the register to control the mux. | 14 | - reg: this pair of <offset size> specifies the register to control the mux. |
| @@ -24,7 +24,7 @@ Optional properties: | |||
| 24 | given, it defaults to the last value used. | 24 | given, it defaults to the last value used. |
| 25 | 25 | ||
| 26 | Whenever an access is made to a device on a child bus, the value set | 26 | Whenever an access is made to a device on a child bus, the value set |
| 27 | in the revelant node's reg property will be output to the register. | 27 | in the relevant node's reg property will be output to the register. |
| 28 | 28 | ||
| 29 | If an idle state is defined, using the idle-state (optional) property, | 29 | If an idle state is defined, using the idle-state (optional) property, |
| 30 | whenever an access is not being made to a device on a child bus, the | 30 | whenever an access is not being made to a device on a child bus, the |
diff --git a/Documentation/devicetree/bindings/net/marvell-bt-sd8xxx.txt b/Documentation/devicetree/bindings/net/marvell-bt-sd8xxx.txt index 14aa6cf58201..6a9a63cb0543 100644 --- a/Documentation/devicetree/bindings/net/marvell-bt-sd8xxx.txt +++ b/Documentation/devicetree/bindings/net/marvell-bt-sd8xxx.txt | |||
| @@ -13,10 +13,10 @@ Optional properties: | |||
| 13 | initialization. This is an array of 28 values(u8). | 13 | initialization. This is an array of 28 values(u8). |
| 14 | 14 | ||
| 15 | - marvell,wakeup-pin: It represents wakeup pin number of the bluetooth chip. | 15 | - marvell,wakeup-pin: It represents wakeup pin number of the bluetooth chip. |
| 16 | firmware will use the pin to wakeup host system. | 16 | firmware will use the pin to wakeup host system (u16). |
| 17 | - marvell,wakeup-gap-ms: wakeup gap represents wakeup latency of the host | 17 | - marvell,wakeup-gap-ms: wakeup gap represents wakeup latency of the host |
| 18 | platform. The value will be configured to firmware. This | 18 | platform. The value will be configured to firmware. This |
| 19 | is needed to work chip's sleep feature as expected. | 19 | is needed to work chip's sleep feature as expected (u16). |
| 20 | - interrupt-parent: phandle of the parent interrupt controller | 20 | - interrupt-parent: phandle of the parent interrupt controller |
| 21 | - interrupts : interrupt pin number to the cpu. Driver will request an irq based | 21 | - interrupts : interrupt pin number to the cpu. Driver will request an irq based |
| 22 | on this interrupt number. During system suspend, the irq will be | 22 | on this interrupt number. During system suspend, the irq will be |
| @@ -50,7 +50,7 @@ calibration data is also available in below example. | |||
| 50 | 0x37 0x01 0x1c 0x00 0xff 0xff 0xff 0xff 0x01 0x7f 0x04 0x02 | 50 | 0x37 0x01 0x1c 0x00 0xff 0xff 0xff 0xff 0x01 0x7f 0x04 0x02 |
| 51 | 0x00 0x00 0xba 0xce 0xc0 0xc6 0x2d 0x00 0x00 0x00 0x00 0x00 | 51 | 0x00 0x00 0xba 0xce 0xc0 0xc6 0x2d 0x00 0x00 0x00 0x00 0x00 |
| 52 | 0x00 0x00 0xf0 0x00>; | 52 | 0x00 0x00 0xf0 0x00>; |
| 53 | marvell,wakeup-pin = <0x0d>; | 53 | marvell,wakeup-pin = /bits/ 16 <0x0d>; |
| 54 | marvell,wakeup-gap-ms = <0x64>; | 54 | marvell,wakeup-gap-ms = /bits/ 16 <0x64>; |
| 55 | }; | 55 | }; |
| 56 | }; | 56 | }; |
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt index a7440bcd67ff..2c2500df0dce 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.txt +++ b/Documentation/devicetree/bindings/vendor-prefixes.txt | |||
| @@ -255,6 +255,7 @@ synology Synology, Inc. | |||
| 255 | SUNW Sun Microsystems, Inc | 255 | SUNW Sun Microsystems, Inc |
| 256 | tbs TBS Technologies | 256 | tbs TBS Technologies |
| 257 | tcl Toby Churchill Ltd. | 257 | tcl Toby Churchill Ltd. |
| 258 | technexion TechNexion | ||
| 258 | technologic Technologic Systems | 259 | technologic Technologic Systems |
| 259 | thine THine Electronics, Inc. | 260 | thine THine Electronics, Inc. |
| 260 | ti Texas Instruments | 261 | ti Texas Instruments |
| @@ -269,6 +270,7 @@ tronsmart Tronsmart | |||
| 269 | truly Truly Semiconductors Limited | 270 | truly Truly Semiconductors Limited |
| 270 | tyan Tyan Computer Corporation | 271 | tyan Tyan Computer Corporation |
| 271 | upisemi uPI Semiconductor Corp. | 272 | upisemi uPI Semiconductor Corp. |
| 273 | uniwest United Western Technologies Corp (UniWest) | ||
| 272 | urt United Radiant Technology Corporation | 274 | urt United Radiant Technology Corporation |
| 273 | usi Universal Scientific Industrial Co., Ltd. | 275 | usi Universal Scientific Industrial Co., Ltd. |
| 274 | v3 V3 Semiconductor | 276 | v3 V3 Semiconductor |
diff --git a/Documentation/filesystems/devpts.txt b/Documentation/filesystems/devpts.txt index 30d2fcb32f72..9f94fe276dea 100644 --- a/Documentation/filesystems/devpts.txt +++ b/Documentation/filesystems/devpts.txt | |||
| @@ -1,141 +1,26 @@ | |||
| 1 | Each mount of the devpts filesystem is now distinct such that ptys | ||
| 2 | and their indicies allocated in one mount are independent from ptys | ||
| 3 | and their indicies in all other mounts. | ||
| 1 | 4 | ||
| 2 | To support containers, we now allow multiple instances of devpts filesystem, | 5 | All mounts of the devpts filesystem now create a /dev/pts/ptmx node |
| 3 | such that indices of ptys allocated in one instance are independent of indices | 6 | with permissions 0000. |
| 4 | allocated in other instances of devpts. | ||
| 5 | 7 | ||
| 6 | To preserve backward compatibility, this support for multiple instances is | 8 | To retain backwards compatibility the a ptmx device node (aka any node |
| 7 | enabled only if: | 9 | created with "mknod name c 5 2") when opened will look for an instance |
| 10 | of devpts under the name "pts" in the same directory as the ptmx device | ||
| 11 | node. | ||
| 8 | 12 | ||
| 9 | - CONFIG_DEVPTS_MULTIPLE_INSTANCES=y, and | 13 | As an option instead of placing a /dev/ptmx device node at /dev/ptmx |
| 10 | - '-o newinstance' mount option is specified while mounting devpts | 14 | it is possible to place a symlink to /dev/pts/ptmx at /dev/ptmx or |
| 11 | 15 | to bind mount /dev/ptx/ptmx to /dev/ptmx. If you opt for using | |
| 12 | IOW, devpts now supports both single-instance and multi-instance semantics. | 16 | the devpts filesystem in this manner devpts should be mounted with |
| 13 | 17 | the ptmxmode=0666, or chmod 0666 /dev/pts/ptmx should be called. | |
| 14 | If CONFIG_DEVPTS_MULTIPLE_INSTANCES=n, there is no change in behavior and | ||
| 15 | this referred to as the "legacy" mode. In this mode, the new mount options | ||
| 16 | (-o newinstance and -o ptmxmode) will be ignored with a 'bogus option' message | ||
| 17 | on console. | ||
| 18 | |||
| 19 | If CONFIG_DEVPTS_MULTIPLE_INSTANCES=y and devpts is mounted without the | ||
| 20 | 'newinstance' option (as in current start-up scripts) the new mount binds | ||
| 21 | to the initial kernel mount of devpts. This mode is referred to as the | ||
| 22 | 'single-instance' mode and the current, single-instance semantics are | ||
| 23 | preserved, i.e PTYs are common across the system. | ||
| 24 | |||
| 25 | The only difference between this single-instance mode and the legacy mode | ||
| 26 | is the presence of new, '/dev/pts/ptmx' node with permissions 0000, which | ||
| 27 | can safely be ignored. | ||
| 28 | |||
| 29 | If CONFIG_DEVPTS_MULTIPLE_INSTANCES=y and 'newinstance' option is specified, | ||
| 30 | the mount is considered to be in the multi-instance mode and a new instance | ||
| 31 | of the devpts fs is created. Any ptys created in this instance are independent | ||
| 32 | of ptys in other instances of devpts. Like in the single-instance mode, the | ||
| 33 | /dev/pts/ptmx node is present. To effectively use the multi-instance mode, | ||
| 34 | open of /dev/ptmx must be a redirected to '/dev/pts/ptmx' using a symlink or | ||
| 35 | bind-mount. | ||
| 36 | |||
| 37 | Eg: A container startup script could do the following: | ||
| 38 | |||
| 39 | $ chmod 0666 /dev/pts/ptmx | ||
| 40 | $ rm /dev/ptmx | ||
| 41 | $ ln -s pts/ptmx /dev/ptmx | ||
| 42 | $ ns_exec -cm /bin/bash | ||
| 43 | |||
| 44 | # We are now in new container | ||
| 45 | |||
| 46 | $ umount /dev/pts | ||
| 47 | $ mount -t devpts -o newinstance lxcpts /dev/pts | ||
| 48 | $ sshd -p 1234 | ||
| 49 | |||
| 50 | where 'ns_exec -cm /bin/bash' calls clone() with CLONE_NEWNS flag and execs | ||
| 51 | /bin/bash in the child process. A pty created by the sshd is not visible in | ||
| 52 | the original mount of /dev/pts. | ||
| 53 | 18 | ||
| 54 | Total count of pty pairs in all instances is limited by sysctls: | 19 | Total count of pty pairs in all instances is limited by sysctls: |
| 55 | kernel.pty.max = 4096 - global limit | 20 | kernel.pty.max = 4096 - global limit |
| 56 | kernel.pty.reserve = 1024 - reserve for initial instance | 21 | kernel.pty.reserve = 1024 - reserved for filesystems mounted from the initial mount namespace |
| 57 | kernel.pty.nr - current count of ptys | 22 | kernel.pty.nr - current count of ptys |
| 58 | 23 | ||
| 59 | Per-instance limit could be set by adding mount option "max=<count>". | 24 | Per-instance limit could be set by adding mount option "max=<count>". |
| 60 | This feature was added in kernel 3.4 together with sysctl kernel.pty.reserve. | 25 | This feature was added in kernel 3.4 together with sysctl kernel.pty.reserve. |
| 61 | In kernels older than 3.4 sysctl kernel.pty.max works as per-instance limit. | 26 | In kernels older than 3.4 sysctl kernel.pty.max works as per-instance limit. |
| 62 | |||
| 63 | User-space changes | ||
| 64 | ------------------ | ||
| 65 | |||
| 66 | In multi-instance mode (i.e '-o newinstance' mount option is specified at least | ||
| 67 | once), following user-space issues should be noted. | ||
| 68 | |||
| 69 | 1. If -o newinstance mount option is never used, /dev/pts/ptmx can be ignored | ||
| 70 | and no change is needed to system-startup scripts. | ||
| 71 | |||
| 72 | 2. To effectively use multi-instance mode (i.e -o newinstance is specified) | ||
| 73 | administrators or startup scripts should "redirect" open of /dev/ptmx to | ||
| 74 | /dev/pts/ptmx using either a bind mount or symlink. | ||
| 75 | |||
| 76 | $ mount -t devpts -o newinstance devpts /dev/pts | ||
| 77 | |||
| 78 | followed by either | ||
| 79 | |||
| 80 | $ rm /dev/ptmx | ||
| 81 | $ ln -s pts/ptmx /dev/ptmx | ||
| 82 | $ chmod 666 /dev/pts/ptmx | ||
| 83 | or | ||
| 84 | $ mount -o bind /dev/pts/ptmx /dev/ptmx | ||
| 85 | |||
| 86 | 3. The '/dev/ptmx -> pts/ptmx' symlink is the preferred method since it | ||
| 87 | enables better error-reporting and treats both single-instance and | ||
| 88 | multi-instance mounts similarly. | ||
| 89 | |||
| 90 | But this method requires that system-startup scripts set the mode of | ||
| 91 | /dev/pts/ptmx correctly (default mode is 0000). The scripts can set the | ||
| 92 | mode by, either | ||
| 93 | |||
| 94 | - adding ptmxmode mount option to devpts entry in /etc/fstab, or | ||
| 95 | - using 'chmod 0666 /dev/pts/ptmx' | ||
| 96 | |||
| 97 | 4. If multi-instance mode mount is needed for containers, but the system | ||
| 98 | startup scripts have not yet been updated, container-startup scripts | ||
| 99 | should bind mount /dev/ptmx to /dev/pts/ptmx to avoid breaking single- | ||
| 100 | instance mounts. | ||
| 101 | |||
| 102 | Or, in general, container-startup scripts should use: | ||
| 103 | |||
| 104 | mount -t devpts -o newinstance -o ptmxmode=0666 devpts /dev/pts | ||
| 105 | if [ ! -L /dev/ptmx ]; then | ||
| 106 | mount -o bind /dev/pts/ptmx /dev/ptmx | ||
| 107 | fi | ||
| 108 | |||
| 109 | When all devpts mounts are multi-instance, /dev/ptmx can permanently be | ||
| 110 | a symlink to pts/ptmx and the bind mount can be ignored. | ||
| 111 | |||
| 112 | 5. A multi-instance mount that is not accompanied by the /dev/ptmx to | ||
| 113 | /dev/pts/ptmx redirection would result in an unusable/unreachable pty. | ||
| 114 | |||
| 115 | mount -t devpts -o newinstance lxcpts /dev/pts | ||
| 116 | |||
| 117 | immediately followed by: | ||
| 118 | |||
| 119 | open("/dev/ptmx") | ||
| 120 | |||
| 121 | would create a pty, say /dev/pts/7, in the initial kernel mount. | ||
| 122 | But /dev/pts/7 would be invisible in the new mount. | ||
| 123 | |||
| 124 | 6. The permissions for /dev/pts/ptmx node should be specified when mounting | ||
| 125 | /dev/pts, using the '-o ptmxmode=%o' mount option (default is 0000). | ||
| 126 | |||
| 127 | mount -t devpts -o newinstance -o ptmxmode=0644 devpts /dev/pts | ||
| 128 | |||
| 129 | The permissions can be later be changed as usual with 'chmod'. | ||
| 130 | |||
| 131 | chmod 666 /dev/pts/ptmx | ||
| 132 | |||
| 133 | 7. A mount of devpts without the 'newinstance' option results in binding to | ||
| 134 | initial kernel mount. This behavior while preserving legacy semantics, | ||
| 135 | does not provide strict isolation in a container environment. i.e by | ||
| 136 | mounting devpts without the 'newinstance' option, a container could | ||
| 137 | get visibility into the 'host' or root container's devpts. | ||
| 138 | |||
| 139 | To workaround this and have strict isolation, all mounts of devpts, | ||
| 140 | including the mount in the root container, should use the newinstance | ||
| 141 | option. | ||
diff --git a/Documentation/kdump/gdbmacros.txt b/Documentation/kdump/gdbmacros.txt index 35f6a982a0d5..220d0a80ca2c 100644 --- a/Documentation/kdump/gdbmacros.txt +++ b/Documentation/kdump/gdbmacros.txt | |||
| @@ -170,21 +170,92 @@ document trapinfo | |||
| 170 | address the kernel panicked. | 170 | address the kernel panicked. |
| 171 | end | 171 | end |
| 172 | 172 | ||
| 173 | define dump_log_idx | ||
| 174 | set $idx = $arg0 | ||
| 175 | if ($argc > 1) | ||
| 176 | set $prev_flags = $arg1 | ||
| 177 | else | ||
| 178 | set $prev_flags = 0 | ||
| 179 | end | ||
| 180 | set $msg = ((struct printk_log *) (log_buf + $idx)) | ||
| 181 | set $prefix = 1 | ||
| 182 | set $newline = 1 | ||
| 183 | set $log = log_buf + $idx + sizeof(*$msg) | ||
| 173 | 184 | ||
| 174 | define dmesg | 185 | # prev & LOG_CONT && !(msg->flags & LOG_PREIX) |
| 175 | set $i = 0 | 186 | if (($prev_flags & 8) && !($msg->flags & 4)) |
| 176 | set $end_idx = (log_end - 1) & (log_buf_len - 1) | 187 | set $prefix = 0 |
| 188 | end | ||
| 189 | |||
| 190 | # msg->flags & LOG_CONT | ||
| 191 | if ($msg->flags & 8) | ||
| 192 | # (prev & LOG_CONT && !(prev & LOG_NEWLINE)) | ||
| 193 | if (($prev_flags & 8) && !($prev_flags & 2)) | ||
| 194 | set $prefix = 0 | ||
| 195 | end | ||
| 196 | # (!(msg->flags & LOG_NEWLINE)) | ||
| 197 | if (!($msg->flags & 2)) | ||
| 198 | set $newline = 0 | ||
| 199 | end | ||
| 200 | end | ||
| 201 | |||
| 202 | if ($prefix) | ||
| 203 | printf "[%5lu.%06lu] ", $msg->ts_nsec / 1000000000, $msg->ts_nsec % 1000000000 | ||
| 204 | end | ||
| 205 | if ($msg->text_len != 0) | ||
| 206 | eval "printf \"%%%d.%ds\", $log", $msg->text_len, $msg->text_len | ||
| 207 | end | ||
| 208 | if ($newline) | ||
| 209 | printf "\n" | ||
| 210 | end | ||
| 211 | if ($msg->dict_len > 0) | ||
| 212 | set $dict = $log + $msg->text_len | ||
| 213 | set $idx = 0 | ||
| 214 | set $line = 1 | ||
| 215 | while ($idx < $msg->dict_len) | ||
| 216 | if ($line) | ||
| 217 | printf " " | ||
| 218 | set $line = 0 | ||
| 219 | end | ||
| 220 | set $c = $dict[$idx] | ||
| 221 | if ($c == '\0') | ||
| 222 | printf "\n" | ||
| 223 | set $line = 1 | ||
| 224 | else | ||
| 225 | if ($c < ' ' || $c >= 127 || $c == '\\') | ||
| 226 | printf "\\x%02x", $c | ||
| 227 | else | ||
| 228 | printf "%c", $c | ||
| 229 | end | ||
| 230 | end | ||
| 231 | set $idx = $idx + 1 | ||
| 232 | end | ||
| 233 | printf "\n" | ||
| 234 | end | ||
| 235 | end | ||
| 236 | document dump_log_idx | ||
| 237 | Dump a single log given its index in the log buffer. The first | ||
| 238 | parameter is the index into log_buf, the second is optional and | ||
| 239 | specified the previous log buffer's flags, used for properly | ||
| 240 | formatting continued lines. | ||
| 241 | end | ||
| 177 | 242 | ||
| 178 | while ($i < logged_chars) | 243 | define dmesg |
| 179 | set $idx = (log_end - 1 - logged_chars + $i) & (log_buf_len - 1) | 244 | set $i = log_first_idx |
| 245 | set $end_idx = log_first_idx | ||
| 246 | set $prev_flags = 0 | ||
| 180 | 247 | ||
| 181 | if ($idx + 100 <= $end_idx) || \ | 248 | while (1) |
| 182 | ($end_idx <= $idx && $idx + 100 < log_buf_len) | 249 | set $msg = ((struct printk_log *) (log_buf + $i)) |
| 183 | printf "%.100s", &log_buf[$idx] | 250 | if ($msg->len == 0) |
| 184 | set $i = $i + 100 | 251 | set $i = 0 |
| 185 | else | 252 | else |
| 186 | printf "%c", log_buf[$idx] | 253 | dump_log_idx $i $prev_flags |
| 187 | set $i = $i + 1 | 254 | set $i = $i + $msg->len |
| 255 | set $prev_flags = $msg->flags | ||
| 256 | end | ||
| 257 | if ($i == $end_idx) | ||
| 258 | loop_break | ||
| 188 | end | 259 | end |
| 189 | end | 260 | end |
| 190 | end | 261 | end |
diff --git a/Documentation/leds/leds-class.txt b/Documentation/leds/leds-class.txt index d406d98339b2..44f5e6bccd97 100644 --- a/Documentation/leds/leds-class.txt +++ b/Documentation/leds/leds-class.txt | |||
| @@ -74,8 +74,8 @@ blink_set() function (see <linux/leds.h>). To set an LED to blinking, | |||
| 74 | however, it is better to use the API function led_blink_set(), as it | 74 | however, it is better to use the API function led_blink_set(), as it |
| 75 | will check and implement software fallback if necessary. | 75 | will check and implement software fallback if necessary. |
| 76 | 76 | ||
| 77 | To turn off blinking again, use the API function led_brightness_set() | 77 | To turn off blinking, use the API function led_brightness_set() |
| 78 | as that will not just set the LED brightness but also stop any software | 78 | with brightness value LED_OFF, which should stop any software |
| 79 | timers that may have been required for blinking. | 79 | timers that may have been required for blinking. |
| 80 | 80 | ||
| 81 | The blink_set() function should choose a user friendly blinking value | 81 | The blink_set() function should choose a user friendly blinking value |
diff --git a/Documentation/networking/dsa/dsa.txt b/Documentation/networking/dsa/dsa.txt index 631b0f7ae16f..9d05ed7f7da5 100644 --- a/Documentation/networking/dsa/dsa.txt +++ b/Documentation/networking/dsa/dsa.txt | |||
| @@ -369,8 +369,6 @@ does not allocate any driver private context space. | |||
| 369 | Switch configuration | 369 | Switch configuration |
| 370 | -------------------- | 370 | -------------------- |
| 371 | 371 | ||
| 372 | - priv_size: additional size needed by the switch driver for its private context | ||
| 373 | |||
| 374 | - tag_protocol: this is to indicate what kind of tagging protocol is supported, | 372 | - tag_protocol: this is to indicate what kind of tagging protocol is supported, |
| 375 | should be a valid value from the dsa_tag_protocol enum | 373 | should be a valid value from the dsa_tag_protocol enum |
| 376 | 374 | ||
| @@ -416,11 +414,6 @@ PHY devices and link management | |||
| 416 | to the switch port MDIO registers. If unavailable return a negative error | 414 | to the switch port MDIO registers. If unavailable return a negative error |
| 417 | code. | 415 | code. |
| 418 | 416 | ||
| 419 | - poll_link: Function invoked by DSA to query the link state of the switch | ||
| 420 | builtin Ethernet PHYs, per port. This function is responsible for calling | ||
| 421 | netif_carrier_{on,off} when appropriate, and can be used to poll all ports in a | ||
| 422 | single call. Executes from workqueue context. | ||
| 423 | |||
| 424 | - adjust_link: Function invoked by the PHY library when a slave network device | 417 | - adjust_link: Function invoked by the PHY library when a slave network device |
| 425 | is attached to a PHY device. This function is responsible for appropriately | 418 | is attached to a PHY device. This function is responsible for appropriately |
| 426 | configuring the switch port link parameters: speed, duplex, pause based on | 419 | configuring the switch port link parameters: speed, duplex, pause based on |
| @@ -542,6 +535,16 @@ Bridge layer | |||
| 542 | Bridge VLAN filtering | 535 | Bridge VLAN filtering |
| 543 | --------------------- | 536 | --------------------- |
| 544 | 537 | ||
| 538 | - port_vlan_filtering: bridge layer function invoked when the bridge gets | ||
| 539 | configured for turning on or off VLAN filtering. If nothing specific needs to | ||
| 540 | be done at the hardware level, this callback does not need to be implemented. | ||
| 541 | When VLAN filtering is turned on, the hardware must be programmed with | ||
| 542 | rejecting 802.1Q frames which have VLAN IDs outside of the programmed allowed | ||
| 543 | VLAN ID map/rules. If there is no PVID programmed into the switch port, | ||
| 544 | untagged frames must be rejected as well. When turned off the switch must | ||
| 545 | accept any 802.1Q frames irrespective of their VLAN ID, and untagged frames are | ||
| 546 | allowed. | ||
| 547 | |||
| 545 | - port_vlan_prepare: bridge layer function invoked when the bridge prepares the | 548 | - port_vlan_prepare: bridge layer function invoked when the bridge prepares the |
| 546 | configuration of a VLAN on the given port. If the operation is not supported | 549 | configuration of a VLAN on the given port. If the operation is not supported |
| 547 | by the hardware, this function should return -EOPNOTSUPP to inform the bridge | 550 | by the hardware, this function should return -EOPNOTSUPP to inform the bridge |
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index 6c7f365b1515..9ae929395b24 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt | |||
| @@ -1036,15 +1036,17 @@ proxy_arp_pvlan - BOOLEAN | |||
| 1036 | 1036 | ||
| 1037 | shared_media - BOOLEAN | 1037 | shared_media - BOOLEAN |
| 1038 | Send(router) or accept(host) RFC1620 shared media redirects. | 1038 | Send(router) or accept(host) RFC1620 shared media redirects. |
| 1039 | Overrides ip_secure_redirects. | 1039 | Overrides secure_redirects. |
| 1040 | shared_media for the interface will be enabled if at least one of | 1040 | shared_media for the interface will be enabled if at least one of |
| 1041 | conf/{all,interface}/shared_media is set to TRUE, | 1041 | conf/{all,interface}/shared_media is set to TRUE, |
| 1042 | it will be disabled otherwise | 1042 | it will be disabled otherwise |
| 1043 | default TRUE | 1043 | default TRUE |
| 1044 | 1044 | ||
| 1045 | secure_redirects - BOOLEAN | 1045 | secure_redirects - BOOLEAN |
| 1046 | Accept ICMP redirect messages only for gateways, | 1046 | Accept ICMP redirect messages only to gateways listed in the |
| 1047 | listed in default gateway list. | 1047 | interface's current gateway list. Even if disabled, RFC1122 redirect |
| 1048 | rules still apply. | ||
| 1049 | Overridden by shared_media. | ||
| 1048 | secure_redirects for the interface will be enabled if at least one of | 1050 | secure_redirects for the interface will be enabled if at least one of |
| 1049 | conf/{all,interface}/secure_redirects is set to TRUE, | 1051 | conf/{all,interface}/secure_redirects is set to TRUE, |
| 1050 | it will be disabled otherwise | 1052 | it will be disabled otherwise |
diff --git a/Documentation/scsi/scsi_eh.txt b/Documentation/scsi/scsi_eh.txt index 8638f61c8c9d..37eca00796ee 100644 --- a/Documentation/scsi/scsi_eh.txt +++ b/Documentation/scsi/scsi_eh.txt | |||
| @@ -263,19 +263,23 @@ scmd->allowed. | |||
| 263 | 263 | ||
| 264 | 3. scmd recovered | 264 | 3. scmd recovered |
| 265 | ACTION: scsi_eh_finish_cmd() is invoked to EH-finish scmd | 265 | ACTION: scsi_eh_finish_cmd() is invoked to EH-finish scmd |
| 266 | - shost->host_failed-- | ||
| 267 | - clear scmd->eh_eflags | 266 | - clear scmd->eh_eflags |
| 268 | - scsi_setup_cmd_retry() | 267 | - scsi_setup_cmd_retry() |
| 269 | - move from local eh_work_q to local eh_done_q | 268 | - move from local eh_work_q to local eh_done_q |
| 270 | LOCKING: none | 269 | LOCKING: none |
| 270 | CONCURRENCY: at most one thread per separate eh_work_q to | ||
| 271 | keep queue manipulation lockless | ||
| 271 | 272 | ||
| 272 | 4. EH completes | 273 | 4. EH completes |
| 273 | ACTION: scsi_eh_flush_done_q() retries scmds or notifies upper | 274 | ACTION: scsi_eh_flush_done_q() retries scmds or notifies upper |
| 274 | layer of failure. | 275 | layer of failure. May be called concurrently but must have |
| 276 | a no more than one thread per separate eh_work_q to | ||
| 277 | manipulate the queue locklessly | ||
| 275 | - scmd is removed from eh_done_q and scmd->eh_entry is cleared | 278 | - scmd is removed from eh_done_q and scmd->eh_entry is cleared |
| 276 | - if retry is necessary, scmd is requeued using | 279 | - if retry is necessary, scmd is requeued using |
| 277 | scsi_queue_insert() | 280 | scsi_queue_insert() |
| 278 | - otherwise, scsi_finish_command() is invoked for scmd | 281 | - otherwise, scsi_finish_command() is invoked for scmd |
| 282 | - zero shost->host_failed | ||
| 279 | LOCKING: queue or finish function performs appropriate locking | 283 | LOCKING: queue or finish function performs appropriate locking |
| 280 | 284 | ||
| 281 | 285 | ||
diff --git a/Documentation/security/keys.txt b/Documentation/security/keys.txt index 20d05719bceb..3849814bfe6d 100644 --- a/Documentation/security/keys.txt +++ b/Documentation/security/keys.txt | |||
| @@ -826,7 +826,8 @@ The keyctl syscall functions are: | |||
| 826 | (*) Compute a Diffie-Hellman shared secret or public key | 826 | (*) Compute a Diffie-Hellman shared secret or public key |
| 827 | 827 | ||
| 828 | long keyctl(KEYCTL_DH_COMPUTE, struct keyctl_dh_params *params, | 828 | long keyctl(KEYCTL_DH_COMPUTE, struct keyctl_dh_params *params, |
| 829 | char *buffer, size_t buflen); | 829 | char *buffer, size_t buflen, |
| 830 | void *reserved); | ||
| 830 | 831 | ||
| 831 | The params struct contains serial numbers for three keys: | 832 | The params struct contains serial numbers for three keys: |
| 832 | 833 | ||
| @@ -843,6 +844,8 @@ The keyctl syscall functions are: | |||
| 843 | public key. If the base is the remote public key, the result is | 844 | public key. If the base is the remote public key, the result is |
| 844 | the shared secret. | 845 | the shared secret. |
| 845 | 846 | ||
| 847 | The reserved argument must be set to NULL. | ||
| 848 | |||
| 846 | The buffer length must be at least the length of the prime, or zero. | 849 | The buffer length must be at least the length of the prime, or zero. |
| 847 | 850 | ||
| 848 | If the buffer length is nonzero, the length of the result is | 851 | If the buffer length is nonzero, the length of the result is |
diff --git a/MAINTAINERS b/MAINTAINERS index 7304d2e37a98..952fd2aba7b7 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -1159,6 +1159,7 @@ F: arch/arm/mach-footbridge/ | |||
| 1159 | ARM/FREESCALE IMX / MXC ARM ARCHITECTURE | 1159 | ARM/FREESCALE IMX / MXC ARM ARCHITECTURE |
| 1160 | M: Shawn Guo <shawnguo@kernel.org> | 1160 | M: Shawn Guo <shawnguo@kernel.org> |
| 1161 | M: Sascha Hauer <kernel@pengutronix.de> | 1161 | M: Sascha Hauer <kernel@pengutronix.de> |
| 1162 | R: Fabio Estevam <fabio.estevam@nxp.com> | ||
| 1162 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 1163 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
| 1163 | S: Maintained | 1164 | S: Maintained |
| 1164 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux.git | 1165 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux.git |
| @@ -2242,7 +2243,8 @@ F: include/net/ax25.h | |||
| 2242 | F: net/ax25/ | 2243 | F: net/ax25/ |
| 2243 | 2244 | ||
| 2244 | AZ6007 DVB DRIVER | 2245 | AZ6007 DVB DRIVER |
| 2245 | M: Mauro Carvalho Chehab <mchehab@osg.samsung.com> | 2246 | M: Mauro Carvalho Chehab <mchehab@s-opensource.com> |
| 2247 | M: Mauro Carvalho Chehab <mchehab@kernel.org> | ||
| 2246 | L: linux-media@vger.kernel.org | 2248 | L: linux-media@vger.kernel.org |
| 2247 | W: https://linuxtv.org | 2249 | W: https://linuxtv.org |
| 2248 | T: git git://linuxtv.org/media_tree.git | 2250 | T: git git://linuxtv.org/media_tree.git |
| @@ -2709,7 +2711,8 @@ F: Documentation/filesystems/btrfs.txt | |||
| 2709 | F: fs/btrfs/ | 2711 | F: fs/btrfs/ |
| 2710 | 2712 | ||
| 2711 | BTTV VIDEO4LINUX DRIVER | 2713 | BTTV VIDEO4LINUX DRIVER |
| 2712 | M: Mauro Carvalho Chehab <mchehab@osg.samsung.com> | 2714 | M: Mauro Carvalho Chehab <mchehab@s-opensource.com> |
| 2715 | M: Mauro Carvalho Chehab <mchehab@kernel.org> | ||
| 2713 | L: linux-media@vger.kernel.org | 2716 | L: linux-media@vger.kernel.org |
| 2714 | W: https://linuxtv.org | 2717 | W: https://linuxtv.org |
| 2715 | T: git git://linuxtv.org/media_tree.git | 2718 | T: git git://linuxtv.org/media_tree.git |
| @@ -2773,9 +2776,9 @@ F: include/net/caif/ | |||
| 2773 | F: net/caif/ | 2776 | F: net/caif/ |
| 2774 | 2777 | ||
| 2775 | CALGARY x86-64 IOMMU | 2778 | CALGARY x86-64 IOMMU |
| 2776 | M: Muli Ben-Yehuda <muli@il.ibm.com> | 2779 | M: Muli Ben-Yehuda <mulix@mulix.org> |
| 2777 | M: "Jon D. Mason" <jdmason@kudzu.us> | 2780 | M: Jon Mason <jdmason@kudzu.us> |
| 2778 | L: discuss@x86-64.org | 2781 | L: iommu@lists.linux-foundation.org |
| 2779 | S: Maintained | 2782 | S: Maintained |
| 2780 | F: arch/x86/kernel/pci-calgary_64.c | 2783 | F: arch/x86/kernel/pci-calgary_64.c |
| 2781 | F: arch/x86/kernel/tce_64.c | 2784 | F: arch/x86/kernel/tce_64.c |
| @@ -3086,6 +3089,7 @@ M: Stephen Boyd <sboyd@codeaurora.org> | |||
| 3086 | L: linux-clk@vger.kernel.org | 3089 | L: linux-clk@vger.kernel.org |
| 3087 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/clk/linux.git | 3090 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/clk/linux.git |
| 3088 | S: Maintained | 3091 | S: Maintained |
| 3092 | F: Documentation/devicetree/bindings/clock/ | ||
| 3089 | F: drivers/clk/ | 3093 | F: drivers/clk/ |
| 3090 | X: drivers/clk/clkdev.c | 3094 | X: drivers/clk/clkdev.c |
| 3091 | F: include/linux/clk-pr* | 3095 | F: include/linux/clk-pr* |
| @@ -3343,7 +3347,8 @@ S: Maintained | |||
| 3343 | F: drivers/media/dvb-frontends/cx24120* | 3347 | F: drivers/media/dvb-frontends/cx24120* |
| 3344 | 3348 | ||
| 3345 | CX88 VIDEO4LINUX DRIVER | 3349 | CX88 VIDEO4LINUX DRIVER |
| 3346 | M: Mauro Carvalho Chehab <mchehab@osg.samsung.com> | 3350 | M: Mauro Carvalho Chehab <mchehab@s-opensource.com> |
| 3351 | M: Mauro Carvalho Chehab <mchehab@kernel.org> | ||
| 3347 | L: linux-media@vger.kernel.org | 3352 | L: linux-media@vger.kernel.org |
| 3348 | W: https://linuxtv.org | 3353 | W: https://linuxtv.org |
| 3349 | T: git git://linuxtv.org/media_tree.git | 3354 | T: git git://linuxtv.org/media_tree.git |
| @@ -3773,6 +3778,7 @@ Q: https://patchwork.kernel.org/project/linux-dmaengine/list/ | |||
| 3773 | S: Maintained | 3778 | S: Maintained |
| 3774 | F: drivers/dma/ | 3779 | F: drivers/dma/ |
| 3775 | F: include/linux/dmaengine.h | 3780 | F: include/linux/dmaengine.h |
| 3781 | F: Documentation/devicetree/bindings/dma/ | ||
| 3776 | F: Documentation/dmaengine/ | 3782 | F: Documentation/dmaengine/ |
| 3777 | T: git git://git.infradead.org/users/vkoul/slave-dma.git | 3783 | T: git git://git.infradead.org/users/vkoul/slave-dma.git |
| 3778 | 3784 | ||
| @@ -4290,7 +4296,8 @@ F: fs/ecryptfs/ | |||
| 4290 | EDAC-CORE | 4296 | EDAC-CORE |
| 4291 | M: Doug Thompson <dougthompson@xmission.com> | 4297 | M: Doug Thompson <dougthompson@xmission.com> |
| 4292 | M: Borislav Petkov <bp@alien8.de> | 4298 | M: Borislav Petkov <bp@alien8.de> |
| 4293 | M: Mauro Carvalho Chehab <mchehab@osg.samsung.com> | 4299 | M: Mauro Carvalho Chehab <mchehab@s-opensource.com> |
| 4300 | M: Mauro Carvalho Chehab <mchehab@kernel.org> | ||
| 4294 | L: linux-edac@vger.kernel.org | 4301 | L: linux-edac@vger.kernel.org |
| 4295 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp.git for-next | 4302 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp.git for-next |
| 4296 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-edac.git linux_next | 4303 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-edac.git linux_next |
| @@ -4335,7 +4342,8 @@ S: Maintained | |||
| 4335 | F: drivers/edac/e7xxx_edac.c | 4342 | F: drivers/edac/e7xxx_edac.c |
| 4336 | 4343 | ||
| 4337 | EDAC-GHES | 4344 | EDAC-GHES |
| 4338 | M: Mauro Carvalho Chehab <mchehab@osg.samsung.com> | 4345 | M: Mauro Carvalho Chehab <mchehab@s-opensource.com> |
| 4346 | M: Mauro Carvalho Chehab <mchehab@kernel.org> | ||
| 4339 | L: linux-edac@vger.kernel.org | 4347 | L: linux-edac@vger.kernel.org |
| 4340 | S: Maintained | 4348 | S: Maintained |
| 4341 | F: drivers/edac/ghes_edac.c | 4349 | F: drivers/edac/ghes_edac.c |
| @@ -4359,19 +4367,22 @@ S: Maintained | |||
| 4359 | F: drivers/edac/i5000_edac.c | 4367 | F: drivers/edac/i5000_edac.c |
| 4360 | 4368 | ||
| 4361 | EDAC-I5400 | 4369 | EDAC-I5400 |
| 4362 | M: Mauro Carvalho Chehab <mchehab@osg.samsung.com> | 4370 | M: Mauro Carvalho Chehab <mchehab@s-opensource.com> |
| 4371 | M: Mauro Carvalho Chehab <mchehab@kernel.org> | ||
| 4363 | L: linux-edac@vger.kernel.org | 4372 | L: linux-edac@vger.kernel.org |
| 4364 | S: Maintained | 4373 | S: Maintained |
| 4365 | F: drivers/edac/i5400_edac.c | 4374 | F: drivers/edac/i5400_edac.c |
| 4366 | 4375 | ||
| 4367 | EDAC-I7300 | 4376 | EDAC-I7300 |
| 4368 | M: Mauro Carvalho Chehab <mchehab@osg.samsung.com> | 4377 | M: Mauro Carvalho Chehab <mchehab@s-opensource.com> |
| 4378 | M: Mauro Carvalho Chehab <mchehab@kernel.org> | ||
| 4369 | L: linux-edac@vger.kernel.org | 4379 | L: linux-edac@vger.kernel.org |
| 4370 | S: Maintained | 4380 | S: Maintained |
| 4371 | F: drivers/edac/i7300_edac.c | 4381 | F: drivers/edac/i7300_edac.c |
| 4372 | 4382 | ||
| 4373 | EDAC-I7CORE | 4383 | EDAC-I7CORE |
| 4374 | M: Mauro Carvalho Chehab <mchehab@osg.samsung.com> | 4384 | M: Mauro Carvalho Chehab <mchehab@s-opensource.com> |
| 4385 | M: Mauro Carvalho Chehab <mchehab@kernel.org> | ||
| 4375 | L: linux-edac@vger.kernel.org | 4386 | L: linux-edac@vger.kernel.org |
| 4376 | S: Maintained | 4387 | S: Maintained |
| 4377 | F: drivers/edac/i7core_edac.c | 4388 | F: drivers/edac/i7core_edac.c |
| @@ -4408,7 +4419,8 @@ S: Maintained | |||
| 4408 | F: drivers/edac/r82600_edac.c | 4419 | F: drivers/edac/r82600_edac.c |
| 4409 | 4420 | ||
| 4410 | EDAC-SBRIDGE | 4421 | EDAC-SBRIDGE |
| 4411 | M: Mauro Carvalho Chehab <mchehab@osg.samsung.com> | 4422 | M: Mauro Carvalho Chehab <mchehab@s-opensource.com> |
| 4423 | M: Mauro Carvalho Chehab <mchehab@kernel.org> | ||
| 4412 | L: linux-edac@vger.kernel.org | 4424 | L: linux-edac@vger.kernel.org |
| 4413 | S: Maintained | 4425 | S: Maintained |
| 4414 | F: drivers/edac/sb_edac.c | 4426 | F: drivers/edac/sb_edac.c |
| @@ -4467,7 +4479,8 @@ S: Maintained | |||
| 4467 | F: drivers/net/ethernet/ibm/ehea/ | 4479 | F: drivers/net/ethernet/ibm/ehea/ |
| 4468 | 4480 | ||
| 4469 | EM28XX VIDEO4LINUX DRIVER | 4481 | EM28XX VIDEO4LINUX DRIVER |
| 4470 | M: Mauro Carvalho Chehab <mchehab@osg.samsung.com> | 4482 | M: Mauro Carvalho Chehab <mchehab@s-opensource.com> |
| 4483 | M: Mauro Carvalho Chehab <mchehab@kernel.org> | ||
| 4471 | L: linux-media@vger.kernel.org | 4484 | L: linux-media@vger.kernel.org |
| 4472 | W: https://linuxtv.org | 4485 | W: https://linuxtv.org |
| 4473 | T: git git://linuxtv.org/media_tree.git | 4486 | T: git git://linuxtv.org/media_tree.git |
| @@ -6486,6 +6499,7 @@ F: include/uapi/linux/sunrpc/ | |||
| 6486 | 6499 | ||
| 6487 | KERNEL SELFTEST FRAMEWORK | 6500 | KERNEL SELFTEST FRAMEWORK |
| 6488 | M: Shuah Khan <shuahkh@osg.samsung.com> | 6501 | M: Shuah Khan <shuahkh@osg.samsung.com> |
| 6502 | M: Shuah Khan <shuah@kernel.org> | ||
| 6489 | L: linux-kselftest@vger.kernel.org | 6503 | L: linux-kselftest@vger.kernel.org |
| 6490 | T: git git://git.kernel.org/pub/scm/shuah/linux-kselftest | 6504 | T: git git://git.kernel.org/pub/scm/shuah/linux-kselftest |
| 6491 | S: Maintained | 6505 | S: Maintained |
| @@ -7357,7 +7371,8 @@ S: Supported | |||
| 7357 | F: drivers/media/pci/netup_unidvb/* | 7371 | F: drivers/media/pci/netup_unidvb/* |
| 7358 | 7372 | ||
| 7359 | MEDIA INPUT INFRASTRUCTURE (V4L/DVB) | 7373 | MEDIA INPUT INFRASTRUCTURE (V4L/DVB) |
| 7360 | M: Mauro Carvalho Chehab <mchehab@osg.samsung.com> | 7374 | M: Mauro Carvalho Chehab <mchehab@s-opensource.com> |
| 7375 | M: Mauro Carvalho Chehab <mchehab@kernel.org> | ||
| 7361 | P: LinuxTV.org Project | 7376 | P: LinuxTV.org Project |
| 7362 | L: linux-media@vger.kernel.org | 7377 | L: linux-media@vger.kernel.org |
| 7363 | W: https://linuxtv.org | 7378 | W: https://linuxtv.org |
| @@ -7989,6 +8004,7 @@ Q: http://patchwork.ozlabs.org/project/netdev/list/ | |||
| 7989 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git | 8004 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git |
| 7990 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git | 8005 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git |
| 7991 | S: Odd Fixes | 8006 | S: Odd Fixes |
| 8007 | F: Documentation/devicetree/bindings/net/ | ||
| 7992 | F: drivers/net/ | 8008 | F: drivers/net/ |
| 7993 | F: include/linux/if_* | 8009 | F: include/linux/if_* |
| 7994 | F: include/linux/netdevice.h | 8010 | F: include/linux/netdevice.h |
| @@ -8007,6 +8023,7 @@ Q: http://patchwork.kernel.org/project/linux-wireless/list/ | |||
| 8007 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers.git | 8023 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers.git |
| 8008 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next.git | 8024 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next.git |
| 8009 | S: Maintained | 8025 | S: Maintained |
| 8026 | F: Documentation/devicetree/bindings/net/wireless/ | ||
| 8010 | F: drivers/net/wireless/ | 8027 | F: drivers/net/wireless/ |
| 8011 | 8028 | ||
| 8012 | NETXEN (1/10) GbE SUPPORT | 8029 | NETXEN (1/10) GbE SUPPORT |
| @@ -8404,10 +8421,9 @@ F: drivers/i2c/busses/i2c-ocores.c | |||
| 8404 | OPEN FIRMWARE AND FLATTENED DEVICE TREE | 8421 | OPEN FIRMWARE AND FLATTENED DEVICE TREE |
| 8405 | M: Rob Herring <robh+dt@kernel.org> | 8422 | M: Rob Herring <robh+dt@kernel.org> |
| 8406 | M: Frank Rowand <frowand.list@gmail.com> | 8423 | M: Frank Rowand <frowand.list@gmail.com> |
| 8407 | M: Grant Likely <grant.likely@linaro.org> | ||
| 8408 | L: devicetree@vger.kernel.org | 8424 | L: devicetree@vger.kernel.org |
| 8409 | W: http://www.devicetree.org/ | 8425 | W: http://www.devicetree.org/ |
| 8410 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/glikely/linux.git | 8426 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/robh/linux.git |
| 8411 | S: Maintained | 8427 | S: Maintained |
| 8412 | F: drivers/of/ | 8428 | F: drivers/of/ |
| 8413 | F: include/linux/of*.h | 8429 | F: include/linux/of*.h |
| @@ -8415,12 +8431,10 @@ F: scripts/dtc/ | |||
| 8415 | 8431 | ||
| 8416 | OPEN FIRMWARE AND FLATTENED DEVICE TREE BINDINGS | 8432 | OPEN FIRMWARE AND FLATTENED DEVICE TREE BINDINGS |
| 8417 | M: Rob Herring <robh+dt@kernel.org> | 8433 | M: Rob Herring <robh+dt@kernel.org> |
| 8418 | M: Pawel Moll <pawel.moll@arm.com> | ||
| 8419 | M: Mark Rutland <mark.rutland@arm.com> | 8434 | M: Mark Rutland <mark.rutland@arm.com> |
| 8420 | M: Ian Campbell <ijc+devicetree@hellion.org.uk> | ||
| 8421 | M: Kumar Gala <galak@codeaurora.org> | ||
| 8422 | L: devicetree@vger.kernel.org | 8435 | L: devicetree@vger.kernel.org |
| 8423 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/robh/linux.git | 8436 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/robh/linux.git |
| 8437 | Q: http://patchwork.ozlabs.org/project/devicetree-bindings/list/ | ||
| 8424 | S: Maintained | 8438 | S: Maintained |
| 8425 | F: Documentation/devicetree/ | 8439 | F: Documentation/devicetree/ |
| 8426 | F: arch/*/boot/dts/ | 8440 | F: arch/*/boot/dts/ |
| @@ -8944,6 +8958,7 @@ M: Linus Walleij <linus.walleij@linaro.org> | |||
| 8944 | L: linux-gpio@vger.kernel.org | 8958 | L: linux-gpio@vger.kernel.org |
| 8945 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-pinctrl.git | 8959 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-pinctrl.git |
| 8946 | S: Maintained | 8960 | S: Maintained |
| 8961 | F: Documentation/devicetree/bindings/pinctrl/ | ||
| 8947 | F: drivers/pinctrl/ | 8962 | F: drivers/pinctrl/ |
| 8948 | F: include/linux/pinctrl/ | 8963 | F: include/linux/pinctrl/ |
| 8949 | 8964 | ||
| @@ -9851,7 +9866,8 @@ S: Odd Fixes | |||
| 9851 | F: drivers/media/i2c/saa6588* | 9866 | F: drivers/media/i2c/saa6588* |
| 9852 | 9867 | ||
| 9853 | SAA7134 VIDEO4LINUX DRIVER | 9868 | SAA7134 VIDEO4LINUX DRIVER |
| 9854 | M: Mauro Carvalho Chehab <mchehab@osg.samsung.com> | 9869 | M: Mauro Carvalho Chehab <mchehab@s-opensource.com> |
| 9870 | M: Mauro Carvalho Chehab <mchehab@kernel.org> | ||
| 9855 | L: linux-media@vger.kernel.org | 9871 | L: linux-media@vger.kernel.org |
| 9856 | W: https://linuxtv.org | 9872 | W: https://linuxtv.org |
| 9857 | T: git git://linuxtv.org/media_tree.git | 9873 | T: git git://linuxtv.org/media_tree.git |
| @@ -10370,7 +10386,8 @@ S: Maintained | |||
| 10370 | F: drivers/media/radio/si4713/radio-usb-si4713.c | 10386 | F: drivers/media/radio/si4713/radio-usb-si4713.c |
| 10371 | 10387 | ||
| 10372 | SIANO DVB DRIVER | 10388 | SIANO DVB DRIVER |
| 10373 | M: Mauro Carvalho Chehab <mchehab@osg.samsung.com> | 10389 | M: Mauro Carvalho Chehab <mchehab@s-opensource.com> |
| 10390 | M: Mauro Carvalho Chehab <mchehab@kernel.org> | ||
| 10374 | L: linux-media@vger.kernel.org | 10391 | L: linux-media@vger.kernel.org |
| 10375 | W: https://linuxtv.org | 10392 | W: https://linuxtv.org |
| 10376 | T: git git://linuxtv.org/media_tree.git | 10393 | T: git git://linuxtv.org/media_tree.git |
| @@ -11136,7 +11153,8 @@ S: Maintained | |||
| 11136 | F: drivers/media/i2c/tda9840* | 11153 | F: drivers/media/i2c/tda9840* |
| 11137 | 11154 | ||
| 11138 | TEA5761 TUNER DRIVER | 11155 | TEA5761 TUNER DRIVER |
| 11139 | M: Mauro Carvalho Chehab <mchehab@osg.samsung.com> | 11156 | M: Mauro Carvalho Chehab <mchehab@s-opensource.com> |
| 11157 | M: Mauro Carvalho Chehab <mchehab@kernel.org> | ||
| 11140 | L: linux-media@vger.kernel.org | 11158 | L: linux-media@vger.kernel.org |
| 11141 | W: https://linuxtv.org | 11159 | W: https://linuxtv.org |
| 11142 | T: git git://linuxtv.org/media_tree.git | 11160 | T: git git://linuxtv.org/media_tree.git |
| @@ -11144,7 +11162,8 @@ S: Odd fixes | |||
| 11144 | F: drivers/media/tuners/tea5761.* | 11162 | F: drivers/media/tuners/tea5761.* |
| 11145 | 11163 | ||
| 11146 | TEA5767 TUNER DRIVER | 11164 | TEA5767 TUNER DRIVER |
| 11147 | M: Mauro Carvalho Chehab <mchehab@osg.samsung.com> | 11165 | M: Mauro Carvalho Chehab <mchehab@s-opensource.com> |
| 11166 | M: Mauro Carvalho Chehab <mchehab@kernel.org> | ||
| 11148 | L: linux-media@vger.kernel.org | 11167 | L: linux-media@vger.kernel.org |
| 11149 | W: https://linuxtv.org | 11168 | W: https://linuxtv.org |
| 11150 | T: git git://linuxtv.org/media_tree.git | 11169 | T: git git://linuxtv.org/media_tree.git |
| @@ -11531,7 +11550,8 @@ F: include/linux/shmem_fs.h | |||
| 11531 | F: mm/shmem.c | 11550 | F: mm/shmem.c |
| 11532 | 11551 | ||
| 11533 | TM6000 VIDEO4LINUX DRIVER | 11552 | TM6000 VIDEO4LINUX DRIVER |
| 11534 | M: Mauro Carvalho Chehab <mchehab@osg.samsung.com> | 11553 | M: Mauro Carvalho Chehab <mchehab@s-opensource.com> |
| 11554 | M: Mauro Carvalho Chehab <mchehab@kernel.org> | ||
| 11535 | L: linux-media@vger.kernel.org | 11555 | L: linux-media@vger.kernel.org |
| 11536 | W: https://linuxtv.org | 11556 | W: https://linuxtv.org |
| 11537 | T: git git://linuxtv.org/media_tree.git | 11557 | T: git git://linuxtv.org/media_tree.git |
| @@ -11885,7 +11905,8 @@ F: drivers/usb/common/usb-otg-fsm.c | |||
| 11885 | 11905 | ||
| 11886 | USB OVER IP DRIVER | 11906 | USB OVER IP DRIVER |
| 11887 | M: Valentina Manea <valentina.manea.m@gmail.com> | 11907 | M: Valentina Manea <valentina.manea.m@gmail.com> |
| 11888 | M: Shuah Khan <shuah.kh@samsung.com> | 11908 | M: Shuah Khan <shuahkh@osg.samsung.com> |
| 11909 | M: Shuah Khan <shuah@kernel.org> | ||
| 11889 | L: linux-usb@vger.kernel.org | 11910 | L: linux-usb@vger.kernel.org |
| 11890 | S: Maintained | 11911 | S: Maintained |
| 11891 | F: Documentation/usb/usbip_protocol.txt | 11912 | F: Documentation/usb/usbip_protocol.txt |
| @@ -11956,6 +11977,7 @@ L: linux-usb@vger.kernel.org | |||
| 11956 | W: http://www.linux-usb.org | 11977 | W: http://www.linux-usb.org |
| 11957 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb.git | 11978 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb.git |
| 11958 | S: Supported | 11979 | S: Supported |
| 11980 | F: Documentation/devicetree/bindings/usb/ | ||
| 11959 | F: Documentation/usb/ | 11981 | F: Documentation/usb/ |
| 11960 | F: drivers/usb/ | 11982 | F: drivers/usb/ |
| 11961 | F: include/linux/usb.h | 11983 | F: include/linux/usb.h |
| @@ -12129,6 +12151,7 @@ VIRTIO CORE, NET AND BLOCK DRIVERS | |||
| 12129 | M: "Michael S. Tsirkin" <mst@redhat.com> | 12151 | M: "Michael S. Tsirkin" <mst@redhat.com> |
| 12130 | L: virtualization@lists.linux-foundation.org | 12152 | L: virtualization@lists.linux-foundation.org |
| 12131 | S: Maintained | 12153 | S: Maintained |
| 12154 | F: Documentation/devicetree/bindings/virtio/ | ||
| 12132 | F: drivers/virtio/ | 12155 | F: drivers/virtio/ |
| 12133 | F: tools/virtio/ | 12156 | F: tools/virtio/ |
| 12134 | F: drivers/net/virtio_net.c | 12157 | F: drivers/net/virtio_net.c |
| @@ -12517,7 +12540,8 @@ S: Maintained | |||
| 12517 | F: arch/x86/entry/vdso/ | 12540 | F: arch/x86/entry/vdso/ |
| 12518 | 12541 | ||
| 12519 | XC2028/3028 TUNER DRIVER | 12542 | XC2028/3028 TUNER DRIVER |
| 12520 | M: Mauro Carvalho Chehab <mchehab@osg.samsung.com> | 12543 | M: Mauro Carvalho Chehab <mchehab@s-opensource.com> |
| 12544 | M: Mauro Carvalho Chehab <mchehab@kernel.org> | ||
| 12521 | L: linux-media@vger.kernel.org | 12545 | L: linux-media@vger.kernel.org |
| 12522 | W: https://linuxtv.org | 12546 | W: https://linuxtv.org |
| 12523 | T: git git://linuxtv.org/media_tree.git | 12547 | T: git git://linuxtv.org/media_tree.git |
| @@ -1,7 +1,7 @@ | |||
| 1 | VERSION = 4 | 1 | VERSION = 4 |
| 2 | PATCHLEVEL = 7 | 2 | PATCHLEVEL = 7 |
| 3 | SUBLEVEL = 0 | 3 | SUBLEVEL = 0 |
| 4 | EXTRAVERSION = -rc1 | 4 | EXTRAVERSION = -rc5 |
| 5 | NAME = Psychotic Stoned Sheep | 5 | NAME = Psychotic Stoned Sheep |
| 6 | 6 | ||
| 7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/Kconfig b/arch/Kconfig index d794384a0404..15996290fed4 100644 --- a/arch/Kconfig +++ b/arch/Kconfig | |||
| @@ -226,8 +226,8 @@ config ARCH_INIT_TASK | |||
| 226 | config ARCH_TASK_STRUCT_ALLOCATOR | 226 | config ARCH_TASK_STRUCT_ALLOCATOR |
| 227 | bool | 227 | bool |
| 228 | 228 | ||
| 229 | # Select if arch has its private alloc_thread_info() function | 229 | # Select if arch has its private alloc_thread_stack() function |
| 230 | config ARCH_THREAD_INFO_ALLOCATOR | 230 | config ARCH_THREAD_STACK_ALLOCATOR |
| 231 | bool | 231 | bool |
| 232 | 232 | ||
| 233 | # Select if arch wants to size task_struct dynamically via arch_task_struct_size: | 233 | # Select if arch wants to size task_struct dynamically via arch_task_struct_size: |
| @@ -606,6 +606,9 @@ config HAVE_ARCH_HASH | |||
| 606 | file which provides platform-specific implementations of some | 606 | file which provides platform-specific implementations of some |
| 607 | functions in <linux/hash.h> or fs/namei.c. | 607 | functions in <linux/hash.h> or fs/namei.c. |
| 608 | 608 | ||
| 609 | config ISA_BUS_API | ||
| 610 | def_bool ISA | ||
| 611 | |||
| 609 | # | 612 | # |
| 610 | # ABI hall of shame | 613 | # ABI hall of shame |
| 611 | # | 614 | # |
diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h index aab14a019c20..c2ebb6f36c9d 100644 --- a/arch/alpha/include/asm/pgalloc.h +++ b/arch/alpha/include/asm/pgalloc.h | |||
| @@ -40,7 +40,7 @@ pgd_free(struct mm_struct *mm, pgd_t *pgd) | |||
| 40 | static inline pmd_t * | 40 | static inline pmd_t * |
| 41 | pmd_alloc_one(struct mm_struct *mm, unsigned long address) | 41 | pmd_alloc_one(struct mm_struct *mm, unsigned long address) |
| 42 | { | 42 | { |
| 43 | pmd_t *ret = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); | 43 | pmd_t *ret = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO); |
| 44 | return ret; | 44 | return ret; |
| 45 | } | 45 | } |
| 46 | 46 | ||
| @@ -53,7 +53,7 @@ pmd_free(struct mm_struct *mm, pmd_t *pmd) | |||
| 53 | static inline pte_t * | 53 | static inline pte_t * |
| 54 | pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) | 54 | pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) |
| 55 | { | 55 | { |
| 56 | pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); | 56 | pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO); |
| 57 | return pte; | 57 | return pte; |
| 58 | } | 58 | } |
| 59 | 59 | ||
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index 0dcbacfdea4b..0d3e59f56974 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig | |||
| @@ -61,7 +61,7 @@ config RWSEM_GENERIC_SPINLOCK | |||
| 61 | def_bool y | 61 | def_bool y |
| 62 | 62 | ||
| 63 | config ARCH_DISCONTIGMEM_ENABLE | 63 | config ARCH_DISCONTIGMEM_ENABLE |
| 64 | def_bool y | 64 | def_bool n |
| 65 | 65 | ||
| 66 | config ARCH_FLATMEM_ENABLE | 66 | config ARCH_FLATMEM_ENABLE |
| 67 | def_bool y | 67 | def_bool y |
| @@ -186,9 +186,6 @@ if SMP | |||
| 186 | config ARC_HAS_COH_CACHES | 186 | config ARC_HAS_COH_CACHES |
| 187 | def_bool n | 187 | def_bool n |
| 188 | 188 | ||
| 189 | config ARC_HAS_REENTRANT_IRQ_LV2 | ||
| 190 | def_bool n | ||
| 191 | |||
| 192 | config ARC_MCIP | 189 | config ARC_MCIP |
| 193 | bool "ARConnect Multicore IP (MCIP) Support " | 190 | bool "ARConnect Multicore IP (MCIP) Support " |
| 194 | depends on ISA_ARCV2 | 191 | depends on ISA_ARCV2 |
| @@ -366,25 +363,10 @@ config NODES_SHIFT | |||
| 366 | if ISA_ARCOMPACT | 363 | if ISA_ARCOMPACT |
| 367 | 364 | ||
| 368 | config ARC_COMPACT_IRQ_LEVELS | 365 | config ARC_COMPACT_IRQ_LEVELS |
| 369 | bool "ARCompact IRQ Priorities: High(2)/Low(1)" | 366 | bool "Setup Timer IRQ as high Priority" |
| 370 | default n | 367 | default n |
| 371 | # Timer HAS to be high priority, for any other high priority config | ||
| 372 | select ARC_IRQ3_LV2 | ||
| 373 | # if SMP, LV2 enabled ONLY if ARC implementation has LV2 re-entrancy | 368 | # if SMP, LV2 enabled ONLY if ARC implementation has LV2 re-entrancy |
| 374 | depends on !SMP || ARC_HAS_REENTRANT_IRQ_LV2 | 369 | depends on !SMP |
| 375 | |||
| 376 | if ARC_COMPACT_IRQ_LEVELS | ||
| 377 | |||
| 378 | config ARC_IRQ3_LV2 | ||
| 379 | bool | ||
| 380 | |||
| 381 | config ARC_IRQ5_LV2 | ||
| 382 | bool | ||
| 383 | |||
| 384 | config ARC_IRQ6_LV2 | ||
| 385 | bool | ||
| 386 | |||
| 387 | endif #ARC_COMPACT_IRQ_LEVELS | ||
| 388 | 370 | ||
| 389 | config ARC_FPU_SAVE_RESTORE | 371 | config ARC_FPU_SAVE_RESTORE |
| 390 | bool "Enable FPU state persistence across context switch" | 372 | bool "Enable FPU state persistence across context switch" |
| @@ -407,11 +389,6 @@ config ARC_HAS_LLSC | |||
| 407 | default y | 389 | default y |
| 408 | depends on !ARC_CANT_LLSC | 390 | depends on !ARC_CANT_LLSC |
| 409 | 391 | ||
| 410 | config ARC_STAR_9000923308 | ||
| 411 | bool "Workaround for llock/scond livelock" | ||
| 412 | default n | ||
| 413 | depends on ISA_ARCV2 && SMP && ARC_HAS_LLSC | ||
| 414 | |||
| 415 | config ARC_HAS_SWAPE | 392 | config ARC_HAS_SWAPE |
| 416 | bool "Insn: SWAPE (endian-swap)" | 393 | bool "Insn: SWAPE (endian-swap)" |
| 417 | default y | 394 | default y |
| @@ -471,7 +448,7 @@ config LINUX_LINK_BASE | |||
| 471 | 448 | ||
| 472 | config HIGHMEM | 449 | config HIGHMEM |
| 473 | bool "High Memory Support" | 450 | bool "High Memory Support" |
| 474 | select DISCONTIGMEM | 451 | select ARCH_DISCONTIGMEM_ENABLE |
| 475 | help | 452 | help |
| 476 | With ARC 2G:2G address split, only upper 2G is directly addressable by | 453 | With ARC 2G:2G address split, only upper 2G is directly addressable by |
| 477 | kernel. Enable this to potentially allow access to rest of 2G and PAE | 454 | kernel. Enable this to potentially allow access to rest of 2G and PAE |
diff --git a/arch/arc/Makefile b/arch/arc/Makefile index 02fabef2891c..d4df6be66d58 100644 --- a/arch/arc/Makefile +++ b/arch/arc/Makefile | |||
| @@ -127,7 +127,7 @@ libs-y += arch/arc/lib/ $(LIBGCC) | |||
| 127 | 127 | ||
| 128 | boot := arch/arc/boot | 128 | boot := arch/arc/boot |
| 129 | 129 | ||
| 130 | #default target for make without any arguements. | 130 | #default target for make without any arguments. |
| 131 | KBUILD_IMAGE := bootpImage | 131 | KBUILD_IMAGE := bootpImage |
| 132 | 132 | ||
| 133 | all: $(KBUILD_IMAGE) | 133 | all: $(KBUILD_IMAGE) |
diff --git a/arch/arc/boot/dts/abilis_tb100.dtsi b/arch/arc/boot/dts/abilis_tb100.dtsi index 3942634f805a..02410b211433 100644 --- a/arch/arc/boot/dts/abilis_tb100.dtsi +++ b/arch/arc/boot/dts/abilis_tb100.dtsi | |||
| @@ -23,8 +23,6 @@ | |||
| 23 | 23 | ||
| 24 | 24 | ||
| 25 | / { | 25 | / { |
| 26 | clock-frequency = <500000000>; /* 500 MHZ */ | ||
| 27 | |||
| 28 | soc100 { | 26 | soc100 { |
| 29 | bus-frequency = <166666666>; | 27 | bus-frequency = <166666666>; |
| 30 | 28 | ||
diff --git a/arch/arc/boot/dts/abilis_tb101.dtsi b/arch/arc/boot/dts/abilis_tb101.dtsi index b0467229a5c4..f9e7686044eb 100644 --- a/arch/arc/boot/dts/abilis_tb101.dtsi +++ b/arch/arc/boot/dts/abilis_tb101.dtsi | |||
| @@ -23,8 +23,6 @@ | |||
| 23 | 23 | ||
| 24 | 24 | ||
| 25 | / { | 25 | / { |
| 26 | clock-frequency = <500000000>; /* 500 MHZ */ | ||
| 27 | |||
| 28 | soc100 { | 26 | soc100 { |
| 29 | bus-frequency = <166666666>; | 27 | bus-frequency = <166666666>; |
| 30 | 28 | ||
diff --git a/arch/arc/boot/dts/axc001.dtsi b/arch/arc/boot/dts/axc001.dtsi index 3e02f152edcb..6ae2c476ad82 100644 --- a/arch/arc/boot/dts/axc001.dtsi +++ b/arch/arc/boot/dts/axc001.dtsi | |||
| @@ -15,7 +15,6 @@ | |||
| 15 | 15 | ||
| 16 | / { | 16 | / { |
| 17 | compatible = "snps,arc"; | 17 | compatible = "snps,arc"; |
| 18 | clock-frequency = <750000000>; /* 750 MHZ */ | ||
| 19 | #address-cells = <1>; | 18 | #address-cells = <1>; |
| 20 | #size-cells = <1>; | 19 | #size-cells = <1>; |
| 21 | 20 | ||
diff --git a/arch/arc/boot/dts/axc003.dtsi b/arch/arc/boot/dts/axc003.dtsi index 378e455a94c4..14df46f141bf 100644 --- a/arch/arc/boot/dts/axc003.dtsi +++ b/arch/arc/boot/dts/axc003.dtsi | |||
| @@ -14,7 +14,6 @@ | |||
| 14 | 14 | ||
| 15 | / { | 15 | / { |
| 16 | compatible = "snps,arc"; | 16 | compatible = "snps,arc"; |
| 17 | clock-frequency = <90000000>; | ||
| 18 | #address-cells = <1>; | 17 | #address-cells = <1>; |
| 19 | #size-cells = <1>; | 18 | #size-cells = <1>; |
| 20 | 19 | ||
diff --git a/arch/arc/boot/dts/axc003_idu.dtsi b/arch/arc/boot/dts/axc003_idu.dtsi index 64c94b2860ab..3d6cfa32bf51 100644 --- a/arch/arc/boot/dts/axc003_idu.dtsi +++ b/arch/arc/boot/dts/axc003_idu.dtsi | |||
| @@ -14,7 +14,6 @@ | |||
| 14 | 14 | ||
| 15 | / { | 15 | / { |
| 16 | compatible = "snps,arc"; | 16 | compatible = "snps,arc"; |
| 17 | clock-frequency = <90000000>; | ||
| 18 | #address-cells = <1>; | 17 | #address-cells = <1>; |
| 19 | #size-cells = <1>; | 18 | #size-cells = <1>; |
| 20 | 19 | ||
diff --git a/arch/arc/boot/dts/eznps.dts b/arch/arc/boot/dts/eznps.dts index b89f6c3eb352..1e0d225791c1 100644 --- a/arch/arc/boot/dts/eznps.dts +++ b/arch/arc/boot/dts/eznps.dts | |||
| @@ -18,7 +18,6 @@ | |||
| 18 | 18 | ||
| 19 | / { | 19 | / { |
| 20 | compatible = "ezchip,arc-nps"; | 20 | compatible = "ezchip,arc-nps"; |
| 21 | clock-frequency = <83333333>; /* 83.333333 MHZ */ | ||
| 22 | #address-cells = <1>; | 21 | #address-cells = <1>; |
| 23 | #size-cells = <1>; | 22 | #size-cells = <1>; |
| 24 | interrupt-parent = <&intc>; | 23 | interrupt-parent = <&intc>; |
diff --git a/arch/arc/boot/dts/nsim_700.dts b/arch/arc/boot/dts/nsim_700.dts index 5d5e373e0ebc..63970513e4ae 100644 --- a/arch/arc/boot/dts/nsim_700.dts +++ b/arch/arc/boot/dts/nsim_700.dts | |||
| @@ -11,7 +11,6 @@ | |||
| 11 | 11 | ||
| 12 | / { | 12 | / { |
| 13 | compatible = "snps,nsim"; | 13 | compatible = "snps,nsim"; |
| 14 | clock-frequency = <80000000>; /* 80 MHZ */ | ||
| 15 | #address-cells = <1>; | 14 | #address-cells = <1>; |
| 16 | #size-cells = <1>; | 15 | #size-cells = <1>; |
| 17 | interrupt-parent = <&core_intc>; | 16 | interrupt-parent = <&core_intc>; |
diff --git a/arch/arc/boot/dts/nsimosci.dts b/arch/arc/boot/dts/nsimosci.dts index b5b060adce8a..763d66c883da 100644 --- a/arch/arc/boot/dts/nsimosci.dts +++ b/arch/arc/boot/dts/nsimosci.dts | |||
| @@ -11,7 +11,6 @@ | |||
| 11 | 11 | ||
| 12 | / { | 12 | / { |
| 13 | compatible = "snps,nsimosci"; | 13 | compatible = "snps,nsimosci"; |
| 14 | clock-frequency = <20000000>; /* 20 MHZ */ | ||
| 15 | #address-cells = <1>; | 14 | #address-cells = <1>; |
| 16 | #size-cells = <1>; | 15 | #size-cells = <1>; |
| 17 | interrupt-parent = <&core_intc>; | 16 | interrupt-parent = <&core_intc>; |
diff --git a/arch/arc/boot/dts/nsimosci_hs.dts b/arch/arc/boot/dts/nsimosci_hs.dts index 325e73090a18..4eb97c584b18 100644 --- a/arch/arc/boot/dts/nsimosci_hs.dts +++ b/arch/arc/boot/dts/nsimosci_hs.dts | |||
| @@ -11,7 +11,6 @@ | |||
| 11 | 11 | ||
| 12 | / { | 12 | / { |
| 13 | compatible = "snps,nsimosci_hs"; | 13 | compatible = "snps,nsimosci_hs"; |
| 14 | clock-frequency = <20000000>; /* 20 MHZ */ | ||
| 15 | #address-cells = <1>; | 14 | #address-cells = <1>; |
| 16 | #size-cells = <1>; | 15 | #size-cells = <1>; |
| 17 | interrupt-parent = <&core_intc>; | 16 | interrupt-parent = <&core_intc>; |
diff --git a/arch/arc/boot/dts/nsimosci_hs_idu.dts b/arch/arc/boot/dts/nsimosci_hs_idu.dts index ee03d7126581..853f897eb2a3 100644 --- a/arch/arc/boot/dts/nsimosci_hs_idu.dts +++ b/arch/arc/boot/dts/nsimosci_hs_idu.dts | |||
| @@ -11,7 +11,6 @@ | |||
| 11 | 11 | ||
| 12 | / { | 12 | / { |
| 13 | compatible = "snps,nsimosci_hs"; | 13 | compatible = "snps,nsimosci_hs"; |
| 14 | clock-frequency = <5000000>; /* 5 MHZ */ | ||
| 15 | #address-cells = <1>; | 14 | #address-cells = <1>; |
| 16 | #size-cells = <1>; | 15 | #size-cells = <1>; |
| 17 | interrupt-parent = <&core_intc>; | 16 | interrupt-parent = <&core_intc>; |
diff --git a/arch/arc/boot/dts/skeleton.dtsi b/arch/arc/boot/dts/skeleton.dtsi index 3a10cc633e2b..65808fe0a290 100644 --- a/arch/arc/boot/dts/skeleton.dtsi +++ b/arch/arc/boot/dts/skeleton.dtsi | |||
| @@ -13,7 +13,6 @@ | |||
| 13 | 13 | ||
| 14 | / { | 14 | / { |
| 15 | compatible = "snps,arc"; | 15 | compatible = "snps,arc"; |
| 16 | clock-frequency = <80000000>; /* 80 MHZ */ | ||
| 17 | #address-cells = <1>; | 16 | #address-cells = <1>; |
| 18 | #size-cells = <1>; | 17 | #size-cells = <1>; |
| 19 | chosen { }; | 18 | chosen { }; |
diff --git a/arch/arc/boot/dts/skeleton_hs.dtsi b/arch/arc/boot/dts/skeleton_hs.dtsi index 71fd308a9298..2dfe8037dfbb 100644 --- a/arch/arc/boot/dts/skeleton_hs.dtsi +++ b/arch/arc/boot/dts/skeleton_hs.dtsi | |||
| @@ -8,7 +8,6 @@ | |||
| 8 | 8 | ||
| 9 | / { | 9 | / { |
| 10 | compatible = "snps,arc"; | 10 | compatible = "snps,arc"; |
| 11 | clock-frequency = <80000000>; /* 80 MHZ */ | ||
| 12 | #address-cells = <1>; | 11 | #address-cells = <1>; |
| 13 | #size-cells = <1>; | 12 | #size-cells = <1>; |
| 14 | chosen { }; | 13 | chosen { }; |
diff --git a/arch/arc/boot/dts/skeleton_hs_idu.dtsi b/arch/arc/boot/dts/skeleton_hs_idu.dtsi index d1cb25a66989..4c11079f3565 100644 --- a/arch/arc/boot/dts/skeleton_hs_idu.dtsi +++ b/arch/arc/boot/dts/skeleton_hs_idu.dtsi | |||
| @@ -8,7 +8,6 @@ | |||
| 8 | 8 | ||
| 9 | / { | 9 | / { |
| 10 | compatible = "snps,arc"; | 10 | compatible = "snps,arc"; |
| 11 | clock-frequency = <80000000>; /* 80 MHZ */ | ||
| 12 | #address-cells = <1>; | 11 | #address-cells = <1>; |
| 13 | #size-cells = <1>; | 12 | #size-cells = <1>; |
| 14 | chosen { }; | 13 | chosen { }; |
diff --git a/arch/arc/boot/dts/vdk_axc003.dtsi b/arch/arc/boot/dts/vdk_axc003.dtsi index ad4ee43bd2ac..0fd6ba985b16 100644 --- a/arch/arc/boot/dts/vdk_axc003.dtsi +++ b/arch/arc/boot/dts/vdk_axc003.dtsi | |||
| @@ -14,7 +14,6 @@ | |||
| 14 | 14 | ||
| 15 | / { | 15 | / { |
| 16 | compatible = "snps,arc"; | 16 | compatible = "snps,arc"; |
| 17 | clock-frequency = <50000000>; | ||
| 18 | #address-cells = <1>; | 17 | #address-cells = <1>; |
| 19 | #size-cells = <1>; | 18 | #size-cells = <1>; |
| 20 | 19 | ||
diff --git a/arch/arc/boot/dts/vdk_axc003_idu.dtsi b/arch/arc/boot/dts/vdk_axc003_idu.dtsi index a3cb6263c581..82214cd7ba0c 100644 --- a/arch/arc/boot/dts/vdk_axc003_idu.dtsi +++ b/arch/arc/boot/dts/vdk_axc003_idu.dtsi | |||
| @@ -15,7 +15,6 @@ | |||
| 15 | 15 | ||
| 16 | / { | 16 | / { |
| 17 | compatible = "snps,arc"; | 17 | compatible = "snps,arc"; |
| 18 | clock-frequency = <50000000>; | ||
| 19 | #address-cells = <1>; | 18 | #address-cells = <1>; |
| 20 | #size-cells = <1>; | 19 | #size-cells = <1>; |
| 21 | 20 | ||
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h index 5f3dcbbc0cc9..dd683995bc9d 100644 --- a/arch/arc/include/asm/atomic.h +++ b/arch/arc/include/asm/atomic.h | |||
| @@ -25,50 +25,17 @@ | |||
| 25 | 25 | ||
| 26 | #define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) | 26 | #define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) |
| 27 | 27 | ||
| 28 | #ifdef CONFIG_ARC_STAR_9000923308 | ||
| 29 | |||
| 30 | #define SCOND_FAIL_RETRY_VAR_DEF \ | ||
| 31 | unsigned int delay = 1, tmp; \ | ||
| 32 | |||
| 33 | #define SCOND_FAIL_RETRY_ASM \ | ||
| 34 | " bz 4f \n" \ | ||
| 35 | " ; --- scond fail delay --- \n" \ | ||
| 36 | " mov %[tmp], %[delay] \n" /* tmp = delay */ \ | ||
| 37 | "2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \ | ||
| 38 | " sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \ | ||
| 39 | " rol %[delay], %[delay] \n" /* delay *= 2 */ \ | ||
| 40 | " b 1b \n" /* start over */ \ | ||
| 41 | "4: ; --- success --- \n" \ | ||
| 42 | |||
| 43 | #define SCOND_FAIL_RETRY_VARS \ | ||
| 44 | ,[delay] "+&r" (delay),[tmp] "=&r" (tmp) \ | ||
| 45 | |||
| 46 | #else /* !CONFIG_ARC_STAR_9000923308 */ | ||
| 47 | |||
| 48 | #define SCOND_FAIL_RETRY_VAR_DEF | ||
| 49 | |||
| 50 | #define SCOND_FAIL_RETRY_ASM \ | ||
| 51 | " bnz 1b \n" \ | ||
| 52 | |||
| 53 | #define SCOND_FAIL_RETRY_VARS | ||
| 54 | |||
| 55 | #endif | ||
| 56 | |||
| 57 | #define ATOMIC_OP(op, c_op, asm_op) \ | 28 | #define ATOMIC_OP(op, c_op, asm_op) \ |
| 58 | static inline void atomic_##op(int i, atomic_t *v) \ | 29 | static inline void atomic_##op(int i, atomic_t *v) \ |
| 59 | { \ | 30 | { \ |
| 60 | unsigned int val; \ | 31 | unsigned int val; \ |
| 61 | SCOND_FAIL_RETRY_VAR_DEF \ | ||
| 62 | \ | 32 | \ |
| 63 | __asm__ __volatile__( \ | 33 | __asm__ __volatile__( \ |
| 64 | "1: llock %[val], [%[ctr]] \n" \ | 34 | "1: llock %[val], [%[ctr]] \n" \ |
| 65 | " " #asm_op " %[val], %[val], %[i] \n" \ | 35 | " " #asm_op " %[val], %[val], %[i] \n" \ |
| 66 | " scond %[val], [%[ctr]] \n" \ | 36 | " scond %[val], [%[ctr]] \n" \ |
| 67 | " \n" \ | 37 | " bnz 1b \n" \ |
| 68 | SCOND_FAIL_RETRY_ASM \ | ||
| 69 | \ | ||
| 70 | : [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \ | 38 | : [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \ |
| 71 | SCOND_FAIL_RETRY_VARS \ | ||
| 72 | : [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \ | 39 | : [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \ |
| 73 | [i] "ir" (i) \ | 40 | [i] "ir" (i) \ |
| 74 | : "cc"); \ | 41 | : "cc"); \ |
| @@ -77,8 +44,7 @@ static inline void atomic_##op(int i, atomic_t *v) \ | |||
| 77 | #define ATOMIC_OP_RETURN(op, c_op, asm_op) \ | 44 | #define ATOMIC_OP_RETURN(op, c_op, asm_op) \ |
| 78 | static inline int atomic_##op##_return(int i, atomic_t *v) \ | 45 | static inline int atomic_##op##_return(int i, atomic_t *v) \ |
| 79 | { \ | 46 | { \ |
| 80 | unsigned int val; \ | 47 | unsigned int val; \ |
| 81 | SCOND_FAIL_RETRY_VAR_DEF \ | ||
| 82 | \ | 48 | \ |
| 83 | /* \ | 49 | /* \ |
| 84 | * Explicit full memory barrier needed before/after as \ | 50 | * Explicit full memory barrier needed before/after as \ |
| @@ -90,11 +56,8 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ | |||
| 90 | "1: llock %[val], [%[ctr]] \n" \ | 56 | "1: llock %[val], [%[ctr]] \n" \ |
| 91 | " " #asm_op " %[val], %[val], %[i] \n" \ | 57 | " " #asm_op " %[val], %[val], %[i] \n" \ |
| 92 | " scond %[val], [%[ctr]] \n" \ | 58 | " scond %[val], [%[ctr]] \n" \ |
| 93 | " \n" \ | 59 | " bnz 1b \n" \ |
| 94 | SCOND_FAIL_RETRY_ASM \ | ||
| 95 | \ | ||
| 96 | : [val] "=&r" (val) \ | 60 | : [val] "=&r" (val) \ |
| 97 | SCOND_FAIL_RETRY_VARS \ | ||
| 98 | : [ctr] "r" (&v->counter), \ | 61 | : [ctr] "r" (&v->counter), \ |
| 99 | [i] "ir" (i) \ | 62 | [i] "ir" (i) \ |
| 100 | : "cc"); \ | 63 | : "cc"); \ |
diff --git a/arch/arc/include/asm/entry-compact.h b/arch/arc/include/asm/entry-compact.h index e0e1faf03c50..14c310f2e0b1 100644 --- a/arch/arc/include/asm/entry-compact.h +++ b/arch/arc/include/asm/entry-compact.h | |||
| @@ -76,8 +76,8 @@ | |||
| 76 | * We need to be a bit more cautious here. What if a kernel bug in | 76 | * We need to be a bit more cautious here. What if a kernel bug in |
| 77 | * L1 ISR, caused SP to go whaco (some small value which looks like | 77 | * L1 ISR, caused SP to go whaco (some small value which looks like |
| 78 | * USER stk) and then we take L2 ISR. | 78 | * USER stk) and then we take L2 ISR. |
| 79 | * Above brlo alone would treat it as a valid L1-L2 sceanrio | 79 | * Above brlo alone would treat it as a valid L1-L2 scenario |
| 80 | * instead of shouting alound | 80 | * instead of shouting around |
| 81 | * The only feasible way is to make sure this L2 happened in | 81 | * The only feasible way is to make sure this L2 happened in |
| 82 | * L1 prelogue ONLY i.e. ilink2 is less than a pre-set marker in | 82 | * L1 prelogue ONLY i.e. ilink2 is less than a pre-set marker in |
| 83 | * L1 ISR before it switches stack | 83 | * L1 ISR before it switches stack |
diff --git a/arch/arc/include/asm/mmu_context.h b/arch/arc/include/asm/mmu_context.h index 1fd467ef658f..b0b87f2447f5 100644 --- a/arch/arc/include/asm/mmu_context.h +++ b/arch/arc/include/asm/mmu_context.h | |||
| @@ -83,7 +83,7 @@ static inline void get_new_mmu_context(struct mm_struct *mm) | |||
| 83 | local_flush_tlb_all(); | 83 | local_flush_tlb_all(); |
| 84 | 84 | ||
| 85 | /* | 85 | /* |
| 86 | * Above checke for rollover of 8 bit ASID in 32 bit container. | 86 | * Above check for rollover of 8 bit ASID in 32 bit container. |
| 87 | * If the container itself wrapped around, set it to a non zero | 87 | * If the container itself wrapped around, set it to a non zero |
| 88 | * "generation" to distinguish from no context | 88 | * "generation" to distinguish from no context |
| 89 | */ | 89 | */ |
diff --git a/arch/arc/include/asm/pgalloc.h b/arch/arc/include/asm/pgalloc.h index 86ed671286df..3749234b7419 100644 --- a/arch/arc/include/asm/pgalloc.h +++ b/arch/arc/include/asm/pgalloc.h | |||
| @@ -95,7 +95,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, | |||
| 95 | { | 95 | { |
| 96 | pte_t *pte; | 96 | pte_t *pte; |
| 97 | 97 | ||
| 98 | pte = (pte_t *) __get_free_pages(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO, | 98 | pte = (pte_t *) __get_free_pages(GFP_KERNEL | __GFP_ZERO, |
| 99 | __get_order_pte()); | 99 | __get_order_pte()); |
| 100 | 100 | ||
| 101 | return pte; | 101 | return pte; |
| @@ -107,7 +107,7 @@ pte_alloc_one(struct mm_struct *mm, unsigned long address) | |||
| 107 | pgtable_t pte_pg; | 107 | pgtable_t pte_pg; |
| 108 | struct page *page; | 108 | struct page *page; |
| 109 | 109 | ||
| 110 | pte_pg = (pgtable_t)__get_free_pages(GFP_KERNEL | __GFP_REPEAT, __get_order_pte()); | 110 | pte_pg = (pgtable_t)__get_free_pages(GFP_KERNEL, __get_order_pte()); |
| 111 | if (!pte_pg) | 111 | if (!pte_pg) |
| 112 | return 0; | 112 | return 0; |
| 113 | memzero((void *)pte_pg, PTRS_PER_PTE * sizeof(pte_t)); | 113 | memzero((void *)pte_pg, PTRS_PER_PTE * sizeof(pte_t)); |
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h index 034bbdc0ff61..858f98ef7f1b 100644 --- a/arch/arc/include/asm/pgtable.h +++ b/arch/arc/include/asm/pgtable.h | |||
| @@ -47,7 +47,7 @@ | |||
| 47 | * Page Tables are purely for Linux VM's consumption and the bits below are | 47 | * Page Tables are purely for Linux VM's consumption and the bits below are |
| 48 | * suited to that (uniqueness). Hence some are not implemented in the TLB and | 48 | * suited to that (uniqueness). Hence some are not implemented in the TLB and |
| 49 | * some have different value in TLB. | 49 | * some have different value in TLB. |
| 50 | * e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible becoz they live in | 50 | * e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible because they live in |
| 51 | * seperate PD0 and PD1, which combined forms a translation entry) | 51 | * seperate PD0 and PD1, which combined forms a translation entry) |
| 52 | * while for PTE perspective, they are 8 and 9 respectively | 52 | * while for PTE perspective, they are 8 and 9 respectively |
| 53 | * with MMU v3: Most bits (except SHARED) represent the exact hardware pos | 53 | * with MMU v3: Most bits (except SHARED) represent the exact hardware pos |
diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h index f9048994b22f..16b630fbeb6a 100644 --- a/arch/arc/include/asm/processor.h +++ b/arch/arc/include/asm/processor.h | |||
| @@ -78,7 +78,7 @@ struct task_struct; | |||
| 78 | #define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp) | 78 | #define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp) |
| 79 | 79 | ||
| 80 | /* | 80 | /* |
| 81 | * Where abouts of Task's sp, fp, blink when it was last seen in kernel mode. | 81 | * Where about of Task's sp, fp, blink when it was last seen in kernel mode. |
| 82 | * Look in process.c for details of kernel stack layout | 82 | * Look in process.c for details of kernel stack layout |
| 83 | */ | 83 | */ |
| 84 | #define TSK_K_ESP(tsk) (tsk->thread.ksp) | 84 | #define TSK_K_ESP(tsk) (tsk->thread.ksp) |
diff --git a/arch/arc/include/asm/smp.h b/arch/arc/include/asm/smp.h index 991380438d6b..89fdd1b0a76e 100644 --- a/arch/arc/include/asm/smp.h +++ b/arch/arc/include/asm/smp.h | |||
| @@ -86,7 +86,7 @@ static inline const char *arc_platform_smp_cpuinfo(void) | |||
| 86 | * (1) These insn were introduced only in 4.10 release. So for older released | 86 | * (1) These insn were introduced only in 4.10 release. So for older released |
| 87 | * support needed. | 87 | * support needed. |
| 88 | * | 88 | * |
| 89 | * (2) In a SMP setup, the LLOCK/SCOND atomiticity across CPUs needs to be | 89 | * (2) In a SMP setup, the LLOCK/SCOND atomicity across CPUs needs to be |
| 90 | * gaurantted by the platform (not something which core handles). | 90 | * gaurantted by the platform (not something which core handles). |
| 91 | * Assuming a platform won't, SMP Linux needs to use spinlocks + local IRQ | 91 | * Assuming a platform won't, SMP Linux needs to use spinlocks + local IRQ |
| 92 | * disabling for atomicity. | 92 | * disabling for atomicity. |
diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h index 800e7c430ca5..cded4a9b5438 100644 --- a/arch/arc/include/asm/spinlock.h +++ b/arch/arc/include/asm/spinlock.h | |||
| @@ -20,11 +20,6 @@ | |||
| 20 | 20 | ||
| 21 | #ifdef CONFIG_ARC_HAS_LLSC | 21 | #ifdef CONFIG_ARC_HAS_LLSC |
| 22 | 22 | ||
| 23 | /* | ||
| 24 | * A normal LLOCK/SCOND based system, w/o need for livelock workaround | ||
| 25 | */ | ||
| 26 | #ifndef CONFIG_ARC_STAR_9000923308 | ||
| 27 | |||
| 28 | static inline void arch_spin_lock(arch_spinlock_t *lock) | 23 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
| 29 | { | 24 | { |
| 30 | unsigned int val; | 25 | unsigned int val; |
| @@ -238,293 +233,6 @@ static inline void arch_write_unlock(arch_rwlock_t *rw) | |||
| 238 | smp_mb(); | 233 | smp_mb(); |
| 239 | } | 234 | } |
| 240 | 235 | ||
| 241 | #else /* CONFIG_ARC_STAR_9000923308 */ | ||
| 242 | |||
| 243 | /* | ||
| 244 | * HS38x4 could get into a LLOCK/SCOND livelock in case of multiple overlapping | ||
| 245 | * coherency transactions in the SCU. The exclusive line state keeps rotating | ||
| 246 | * among contenting cores leading to a never ending cycle. So break the cycle | ||
| 247 | * by deferring the retry of failed exclusive access (SCOND). The actual delay | ||
| 248 | * needed is function of number of contending cores as well as the unrelated | ||
| 249 | * coherency traffic from other cores. To keep the code simple, start off with | ||
| 250 | * small delay of 1 which would suffice most cases and in case of contention | ||
| 251 | * double the delay. Eventually the delay is sufficient such that the coherency | ||
| 252 | * pipeline is drained, thus a subsequent exclusive access would succeed. | ||
| 253 | */ | ||
| 254 | |||
| 255 | #define SCOND_FAIL_RETRY_VAR_DEF \ | ||
| 256 | unsigned int delay, tmp; \ | ||
| 257 | |||
| 258 | #define SCOND_FAIL_RETRY_ASM \ | ||
| 259 | " ; --- scond fail delay --- \n" \ | ||
| 260 | " mov %[tmp], %[delay] \n" /* tmp = delay */ \ | ||
| 261 | "2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \ | ||
| 262 | " sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \ | ||
| 263 | " rol %[delay], %[delay] \n" /* delay *= 2 */ \ | ||
| 264 | " b 1b \n" /* start over */ \ | ||
| 265 | " \n" \ | ||
| 266 | "4: ; --- done --- \n" \ | ||
| 267 | |||
| 268 | #define SCOND_FAIL_RETRY_VARS \ | ||
| 269 | ,[delay] "=&r" (delay), [tmp] "=&r" (tmp) \ | ||
| 270 | |||
| 271 | static inline void arch_spin_lock(arch_spinlock_t *lock) | ||
| 272 | { | ||
| 273 | unsigned int val; | ||
| 274 | SCOND_FAIL_RETRY_VAR_DEF; | ||
| 275 | |||
| 276 | smp_mb(); | ||
| 277 | |||
| 278 | __asm__ __volatile__( | ||
| 279 | "0: mov %[delay], 1 \n" | ||
| 280 | "1: llock %[val], [%[slock]] \n" | ||
| 281 | " breq %[val], %[LOCKED], 0b \n" /* spin while LOCKED */ | ||
| 282 | " scond %[LOCKED], [%[slock]] \n" /* acquire */ | ||
| 283 | " bz 4f \n" /* done */ | ||
| 284 | " \n" | ||
| 285 | SCOND_FAIL_RETRY_ASM | ||
| 286 | |||
| 287 | : [val] "=&r" (val) | ||
| 288 | SCOND_FAIL_RETRY_VARS | ||
| 289 | : [slock] "r" (&(lock->slock)), | ||
| 290 | [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__) | ||
| 291 | : "memory", "cc"); | ||
| 292 | |||
| 293 | smp_mb(); | ||
| 294 | } | ||
| 295 | |||
| 296 | /* 1 - lock taken successfully */ | ||
| 297 | static inline int arch_spin_trylock(arch_spinlock_t *lock) | ||
| 298 | { | ||
| 299 | unsigned int val, got_it = 0; | ||
| 300 | SCOND_FAIL_RETRY_VAR_DEF; | ||
| 301 | |||
| 302 | smp_mb(); | ||
| 303 | |||
| 304 | __asm__ __volatile__( | ||
| 305 | "0: mov %[delay], 1 \n" | ||
| 306 | "1: llock %[val], [%[slock]] \n" | ||
| 307 | " breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */ | ||
| 308 | " scond %[LOCKED], [%[slock]] \n" /* acquire */ | ||
| 309 | " bz.d 4f \n" | ||
| 310 | " mov.z %[got_it], 1 \n" /* got it */ | ||
| 311 | " \n" | ||
| 312 | SCOND_FAIL_RETRY_ASM | ||
| 313 | |||
| 314 | : [val] "=&r" (val), | ||
| 315 | [got_it] "+&r" (got_it) | ||
| 316 | SCOND_FAIL_RETRY_VARS | ||
| 317 | : [slock] "r" (&(lock->slock)), | ||
| 318 | [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__) | ||
| 319 | : "memory", "cc"); | ||
| 320 | |||
| 321 | smp_mb(); | ||
| 322 | |||
| 323 | return got_it; | ||
| 324 | } | ||
| 325 | |||
| 326 | static inline void arch_spin_unlock(arch_spinlock_t *lock) | ||
| 327 | { | ||
| 328 | smp_mb(); | ||
| 329 | |||
| 330 | lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__; | ||
| 331 | |||
| 332 | smp_mb(); | ||
| 333 | } | ||
| 334 | |||
| 335 | /* | ||
| 336 | * Read-write spinlocks, allowing multiple readers but only one writer. | ||
| 337 | * Unfair locking as Writers could be starved indefinitely by Reader(s) | ||
| 338 | */ | ||
| 339 | |||
| 340 | static inline void arch_read_lock(arch_rwlock_t *rw) | ||
| 341 | { | ||
| 342 | unsigned int val; | ||
| 343 | SCOND_FAIL_RETRY_VAR_DEF; | ||
| 344 | |||
| 345 | smp_mb(); | ||
| 346 | |||
| 347 | /* | ||
| 348 | * zero means writer holds the lock exclusively, deny Reader. | ||
| 349 | * Otherwise grant lock to first/subseq reader | ||
| 350 | * | ||
| 351 | * if (rw->counter > 0) { | ||
| 352 | * rw->counter--; | ||
| 353 | * ret = 1; | ||
| 354 | * } | ||
| 355 | */ | ||
| 356 | |||
| 357 | __asm__ __volatile__( | ||
| 358 | "0: mov %[delay], 1 \n" | ||
| 359 | "1: llock %[val], [%[rwlock]] \n" | ||
| 360 | " brls %[val], %[WR_LOCKED], 0b\n" /* <= 0: spin while write locked */ | ||
| 361 | " sub %[val], %[val], 1 \n" /* reader lock */ | ||
| 362 | " scond %[val], [%[rwlock]] \n" | ||
| 363 | " bz 4f \n" /* done */ | ||
| 364 | " \n" | ||
| 365 | SCOND_FAIL_RETRY_ASM | ||
| 366 | |||
| 367 | : [val] "=&r" (val) | ||
| 368 | SCOND_FAIL_RETRY_VARS | ||
| 369 | : [rwlock] "r" (&(rw->counter)), | ||
| 370 | [WR_LOCKED] "ir" (0) | ||
| 371 | : "memory", "cc"); | ||
| 372 | |||
| 373 | smp_mb(); | ||
| 374 | } | ||
| 375 | |||
| 376 | /* 1 - lock taken successfully */ | ||
| 377 | static inline int arch_read_trylock(arch_rwlock_t *rw) | ||
| 378 | { | ||
| 379 | unsigned int val, got_it = 0; | ||
| 380 | SCOND_FAIL_RETRY_VAR_DEF; | ||
| 381 | |||
| 382 | smp_mb(); | ||
| 383 | |||
| 384 | __asm__ __volatile__( | ||
| 385 | "0: mov %[delay], 1 \n" | ||
| 386 | "1: llock %[val], [%[rwlock]] \n" | ||
| 387 | " brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */ | ||
| 388 | " sub %[val], %[val], 1 \n" /* counter-- */ | ||
| 389 | " scond %[val], [%[rwlock]] \n" | ||
| 390 | " bz.d 4f \n" | ||
| 391 | " mov.z %[got_it], 1 \n" /* got it */ | ||
| 392 | " \n" | ||
| 393 | SCOND_FAIL_RETRY_ASM | ||
| 394 | |||
| 395 | : [val] "=&r" (val), | ||
| 396 | [got_it] "+&r" (got_it) | ||
| 397 | SCOND_FAIL_RETRY_VARS | ||
| 398 | : [rwlock] "r" (&(rw->counter)), | ||
| 399 | [WR_LOCKED] "ir" (0) | ||
| 400 | : "memory", "cc"); | ||
| 401 | |||
| 402 | smp_mb(); | ||
| 403 | |||
| 404 | return got_it; | ||
| 405 | } | ||
| 406 | |||
| 407 | static inline void arch_write_lock(arch_rwlock_t *rw) | ||
| 408 | { | ||
| 409 | unsigned int val; | ||
| 410 | SCOND_FAIL_RETRY_VAR_DEF; | ||
| 411 | |||
| 412 | smp_mb(); | ||
| 413 | |||
| 414 | /* | ||
| 415 | * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__), | ||
| 416 | * deny writer. Otherwise if unlocked grant to writer | ||
| 417 | * Hence the claim that Linux rwlocks are unfair to writers. | ||
| 418 | * (can be starved for an indefinite time by readers). | ||
| 419 | * | ||
| 420 | * if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) { | ||
| 421 | * rw->counter = 0; | ||
| 422 | * ret = 1; | ||
| 423 | * } | ||
| 424 | */ | ||
| 425 | |||
| 426 | __asm__ __volatile__( | ||
| 427 | "0: mov %[delay], 1 \n" | ||
| 428 | "1: llock %[val], [%[rwlock]] \n" | ||
| 429 | " brne %[val], %[UNLOCKED], 0b \n" /* while !UNLOCKED spin */ | ||
| 430 | " mov %[val], %[WR_LOCKED] \n" | ||
| 431 | " scond %[val], [%[rwlock]] \n" | ||
| 432 | " bz 4f \n" | ||
| 433 | " \n" | ||
| 434 | SCOND_FAIL_RETRY_ASM | ||
| 435 | |||
| 436 | : [val] "=&r" (val) | ||
| 437 | SCOND_FAIL_RETRY_VARS | ||
| 438 | : [rwlock] "r" (&(rw->counter)), | ||
| 439 | [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__), | ||
| 440 | [WR_LOCKED] "ir" (0) | ||
| 441 | : "memory", "cc"); | ||
| 442 | |||
| 443 | smp_mb(); | ||
| 444 | } | ||
| 445 | |||
| 446 | /* 1 - lock taken successfully */ | ||
| 447 | static inline int arch_write_trylock(arch_rwlock_t *rw) | ||
| 448 | { | ||
| 449 | unsigned int val, got_it = 0; | ||
| 450 | SCOND_FAIL_RETRY_VAR_DEF; | ||
| 451 | |||
| 452 | smp_mb(); | ||
| 453 | |||
| 454 | __asm__ __volatile__( | ||
| 455 | "0: mov %[delay], 1 \n" | ||
| 456 | "1: llock %[val], [%[rwlock]] \n" | ||
| 457 | " brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */ | ||
| 458 | " mov %[val], %[WR_LOCKED] \n" | ||
| 459 | " scond %[val], [%[rwlock]] \n" | ||
| 460 | " bz.d 4f \n" | ||
| 461 | " mov.z %[got_it], 1 \n" /* got it */ | ||
| 462 | " \n" | ||
| 463 | SCOND_FAIL_RETRY_ASM | ||
| 464 | |||
| 465 | : [val] "=&r" (val), | ||
| 466 | [got_it] "+&r" (got_it) | ||
| 467 | SCOND_FAIL_RETRY_VARS | ||
| 468 | : [rwlock] "r" (&(rw->counter)), | ||
| 469 | [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__), | ||
| 470 | [WR_LOCKED] "ir" (0) | ||
| 471 | : "memory", "cc"); | ||
| 472 | |||
| 473 | smp_mb(); | ||
| 474 | |||
| 475 | return got_it; | ||
| 476 | } | ||
| 477 | |||
| 478 | static inline void arch_read_unlock(arch_rwlock_t *rw) | ||
| 479 | { | ||
| 480 | unsigned int val; | ||
| 481 | |||
| 482 | smp_mb(); | ||
| 483 | |||
| 484 | /* | ||
| 485 | * rw->counter++; | ||
| 486 | */ | ||
| 487 | __asm__ __volatile__( | ||
| 488 | "1: llock %[val], [%[rwlock]] \n" | ||
| 489 | " add %[val], %[val], 1 \n" | ||
| 490 | " scond %[val], [%[rwlock]] \n" | ||
| 491 | " bnz 1b \n" | ||
| 492 | " \n" | ||
| 493 | : [val] "=&r" (val) | ||
| 494 | : [rwlock] "r" (&(rw->counter)) | ||
| 495 | : "memory", "cc"); | ||
| 496 | |||
| 497 | smp_mb(); | ||
| 498 | } | ||
| 499 | |||
| 500 | static inline void arch_write_unlock(arch_rwlock_t *rw) | ||
| 501 | { | ||
| 502 | unsigned int val; | ||
| 503 | |||
| 504 | smp_mb(); | ||
| 505 | |||
| 506 | /* | ||
| 507 | * rw->counter = __ARCH_RW_LOCK_UNLOCKED__; | ||
| 508 | */ | ||
| 509 | __asm__ __volatile__( | ||
| 510 | "1: llock %[val], [%[rwlock]] \n" | ||
| 511 | " scond %[UNLOCKED], [%[rwlock]]\n" | ||
| 512 | " bnz 1b \n" | ||
| 513 | " \n" | ||
| 514 | : [val] "=&r" (val) | ||
| 515 | : [rwlock] "r" (&(rw->counter)), | ||
| 516 | [UNLOCKED] "r" (__ARCH_RW_LOCK_UNLOCKED__) | ||
| 517 | : "memory", "cc"); | ||
| 518 | |||
| 519 | smp_mb(); | ||
| 520 | } | ||
| 521 | |||
| 522 | #undef SCOND_FAIL_RETRY_VAR_DEF | ||
| 523 | #undef SCOND_FAIL_RETRY_ASM | ||
| 524 | #undef SCOND_FAIL_RETRY_VARS | ||
| 525 | |||
| 526 | #endif /* CONFIG_ARC_STAR_9000923308 */ | ||
| 527 | |||
| 528 | #else /* !CONFIG_ARC_HAS_LLSC */ | 236 | #else /* !CONFIG_ARC_HAS_LLSC */ |
| 529 | 237 | ||
| 530 | static inline void arch_spin_lock(arch_spinlock_t *lock) | 238 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
diff --git a/arch/arc/include/asm/thread_info.h b/arch/arc/include/asm/thread_info.h index 3af67455659a..2d79e527fa50 100644 --- a/arch/arc/include/asm/thread_info.h +++ b/arch/arc/include/asm/thread_info.h | |||
| @@ -103,7 +103,7 @@ static inline __attribute_const__ struct thread_info *current_thread_info(void) | |||
| 103 | 103 | ||
| 104 | /* | 104 | /* |
| 105 | * _TIF_ALLWORK_MASK includes SYSCALL_TRACE, but we don't need it. | 105 | * _TIF_ALLWORK_MASK includes SYSCALL_TRACE, but we don't need it. |
| 106 | * SYSCALL_TRACE is anways seperately/unconditionally tested right after a | 106 | * SYSCALL_TRACE is anyway seperately/unconditionally tested right after a |
| 107 | * syscall, so all that reamins to be tested is _TIF_WORK_MASK | 107 | * syscall, so all that reamins to be tested is _TIF_WORK_MASK |
| 108 | */ | 108 | */ |
| 109 | 109 | ||
diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h index d1da6032b715..a78d5670884f 100644 --- a/arch/arc/include/asm/uaccess.h +++ b/arch/arc/include/asm/uaccess.h | |||
| @@ -32,7 +32,7 @@ | |||
| 32 | #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) | 32 | #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) |
| 33 | 33 | ||
| 34 | /* | 34 | /* |
| 35 | * Algorthmically, for __user_ok() we want do: | 35 | * Algorithmically, for __user_ok() we want do: |
| 36 | * (start < TASK_SIZE) && (start+len < TASK_SIZE) | 36 | * (start < TASK_SIZE) && (start+len < TASK_SIZE) |
| 37 | * where TASK_SIZE could either be retrieved from thread_info->addr_limit or | 37 | * where TASK_SIZE could either be retrieved from thread_info->addr_limit or |
| 38 | * emitted directly in code. | 38 | * emitted directly in code. |
diff --git a/arch/arc/include/uapi/asm/swab.h b/arch/arc/include/uapi/asm/swab.h index 095599a73195..71f3918b0fc3 100644 --- a/arch/arc/include/uapi/asm/swab.h +++ b/arch/arc/include/uapi/asm/swab.h | |||
| @@ -74,7 +74,7 @@ | |||
| 74 | __tmp ^ __in; \ | 74 | __tmp ^ __in; \ |
| 75 | }) | 75 | }) |
| 76 | 76 | ||
| 77 | #elif (ARC_BSWAP_TYPE == 2) /* Custom single cycle bwap instruction */ | 77 | #elif (ARC_BSWAP_TYPE == 2) /* Custom single cycle bswap instruction */ |
| 78 | 78 | ||
| 79 | #define __arch_swab32(x) \ | 79 | #define __arch_swab32(x) \ |
| 80 | ({ \ | 80 | ({ \ |
diff --git a/arch/arc/kernel/entry-compact.S b/arch/arc/kernel/entry-compact.S index 0cb0abaa0479..98812c1248df 100644 --- a/arch/arc/kernel/entry-compact.S +++ b/arch/arc/kernel/entry-compact.S | |||
| @@ -91,27 +91,13 @@ VECTOR mem_service ; 0x8, Mem exception (0x1) | |||
| 91 | VECTOR instr_service ; 0x10, Instrn Error (0x2) | 91 | VECTOR instr_service ; 0x10, Instrn Error (0x2) |
| 92 | 92 | ||
| 93 | ; ******************** Device ISRs ********************** | 93 | ; ******************** Device ISRs ********************** |
| 94 | #ifdef CONFIG_ARC_IRQ3_LV2 | 94 | #ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS |
| 95 | VECTOR handle_interrupt_level2 | ||
| 96 | #else | ||
| 97 | VECTOR handle_interrupt_level1 | ||
| 98 | #endif | ||
| 99 | |||
| 100 | VECTOR handle_interrupt_level1 | ||
| 101 | |||
| 102 | #ifdef CONFIG_ARC_IRQ5_LV2 | ||
| 103 | VECTOR handle_interrupt_level2 | ||
| 104 | #else | ||
| 105 | VECTOR handle_interrupt_level1 | ||
| 106 | #endif | ||
| 107 | |||
| 108 | #ifdef CONFIG_ARC_IRQ6_LV2 | ||
| 109 | VECTOR handle_interrupt_level2 | 95 | VECTOR handle_interrupt_level2 |
| 110 | #else | 96 | #else |
| 111 | VECTOR handle_interrupt_level1 | 97 | VECTOR handle_interrupt_level1 |
| 112 | #endif | 98 | #endif |
| 113 | 99 | ||
| 114 | .rept 25 | 100 | .rept 28 |
| 115 | VECTOR handle_interrupt_level1 ; Other devices | 101 | VECTOR handle_interrupt_level1 ; Other devices |
| 116 | .endr | 102 | .endr |
| 117 | 103 | ||
diff --git a/arch/arc/kernel/intc-compact.c b/arch/arc/kernel/intc-compact.c index c5cceca36118..ce9deb953ca9 100644 --- a/arch/arc/kernel/intc-compact.c +++ b/arch/arc/kernel/intc-compact.c | |||
| @@ -28,10 +28,8 @@ void arc_init_IRQ(void) | |||
| 28 | { | 28 | { |
| 29 | int level_mask = 0; | 29 | int level_mask = 0; |
| 30 | 30 | ||
| 31 | /* setup any high priority Interrupts (Level2 in ARCompact jargon) */ | 31 | /* Is timer high priority Interrupt (Level2 in ARCompact jargon) */ |
| 32 | level_mask |= IS_ENABLED(CONFIG_ARC_IRQ3_LV2) << 3; | 32 | level_mask |= IS_ENABLED(CONFIG_ARC_COMPACT_IRQ_LEVELS) << TIMER0_IRQ; |
| 33 | level_mask |= IS_ENABLED(CONFIG_ARC_IRQ5_LV2) << 5; | ||
| 34 | level_mask |= IS_ENABLED(CONFIG_ARC_IRQ6_LV2) << 6; | ||
| 35 | 33 | ||
| 36 | /* | 34 | /* |
| 37 | * Write to register, even if no LV2 IRQs configured to reset it | 35 | * Write to register, even if no LV2 IRQs configured to reset it |
diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c index 6fd48021324b..08f03d9b5b3e 100644 --- a/arch/arc/kernel/perf_event.c +++ b/arch/arc/kernel/perf_event.c | |||
| @@ -108,7 +108,7 @@ static void arc_perf_event_update(struct perf_event *event, | |||
| 108 | int64_t delta = new_raw_count - prev_raw_count; | 108 | int64_t delta = new_raw_count - prev_raw_count; |
| 109 | 109 | ||
| 110 | /* | 110 | /* |
| 111 | * We don't afaraid of hwc->prev_count changing beneath our feet | 111 | * We aren't afraid of hwc->prev_count changing beneath our feet |
| 112 | * because there's no way for us to re-enter this function anytime. | 112 | * because there's no way for us to re-enter this function anytime. |
| 113 | */ | 113 | */ |
| 114 | local64_set(&hwc->prev_count, new_raw_count); | 114 | local64_set(&hwc->prev_count, new_raw_count); |
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c index f63b8bfefb0c..2ee7a4d758a8 100644 --- a/arch/arc/kernel/setup.c +++ b/arch/arc/kernel/setup.c | |||
| @@ -392,7 +392,7 @@ void __init setup_arch(char **cmdline_p) | |||
| 392 | /* | 392 | /* |
| 393 | * If we are here, it is established that @uboot_arg didn't | 393 | * If we are here, it is established that @uboot_arg didn't |
| 394 | * point to DT blob. Instead if u-boot says it is cmdline, | 394 | * point to DT blob. Instead if u-boot says it is cmdline, |
| 395 | * Appent to embedded DT cmdline. | 395 | * append to embedded DT cmdline. |
| 396 | * setup_machine_fdt() would have populated @boot_command_line | 396 | * setup_machine_fdt() would have populated @boot_command_line |
| 397 | */ | 397 | */ |
| 398 | if (uboot_tag == 1) { | 398 | if (uboot_tag == 1) { |
diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c index 004b7f0bc76c..6cb3736b6b83 100644 --- a/arch/arc/kernel/signal.c +++ b/arch/arc/kernel/signal.c | |||
| @@ -34,7 +34,7 @@ | |||
| 34 | * -ViXS were still seeing crashes when using insmod to load drivers. | 34 | * -ViXS were still seeing crashes when using insmod to load drivers. |
| 35 | * It turned out that the code to change Execute permssions for TLB entries | 35 | * It turned out that the code to change Execute permssions for TLB entries |
| 36 | * of user was not guarded for interrupts (mod_tlb_permission) | 36 | * of user was not guarded for interrupts (mod_tlb_permission) |
| 37 | * This was cauing TLB entries to be overwritten on unrelated indexes | 37 | * This was causing TLB entries to be overwritten on unrelated indexes |
| 38 | * | 38 | * |
| 39 | * Vineetg: July 15th 2008: Bug #94183 | 39 | * Vineetg: July 15th 2008: Bug #94183 |
| 40 | * -Exception happens in Delay slot of a JMP, and before user space resumes, | 40 | * -Exception happens in Delay slot of a JMP, and before user space resumes, |
diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c index a6f91e88ce36..934150e7ac48 100644 --- a/arch/arc/kernel/troubleshoot.c +++ b/arch/arc/kernel/troubleshoot.c | |||
| @@ -276,7 +276,7 @@ static int tlb_stats_open(struct inode *inode, struct file *file) | |||
| 276 | return 0; | 276 | return 0; |
| 277 | } | 277 | } |
| 278 | 278 | ||
| 279 | /* called on user read(): display the couters */ | 279 | /* called on user read(): display the counters */ |
| 280 | static ssize_t tlb_stats_output(struct file *file, /* file descriptor */ | 280 | static ssize_t tlb_stats_output(struct file *file, /* file descriptor */ |
| 281 | char __user *user_buf, /* user buffer */ | 281 | char __user *user_buf, /* user buffer */ |
| 282 | size_t len, /* length of buffer */ | 282 | size_t len, /* length of buffer */ |
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c index 9e5eddbb856f..5a294b2c3cb3 100644 --- a/arch/arc/mm/cache.c +++ b/arch/arc/mm/cache.c | |||
| @@ -215,7 +215,7 @@ slc_chk: | |||
| 215 | * ------------------ | 215 | * ------------------ |
| 216 | * This ver of MMU supports variable page sizes (1k-16k): although Linux will | 216 | * This ver of MMU supports variable page sizes (1k-16k): although Linux will |
| 217 | * only support 8k (default), 16k and 4k. | 217 | * only support 8k (default), 16k and 4k. |
| 218 | * However from hardware perspective, smaller page sizes aggrevate aliasing | 218 | * However from hardware perspective, smaller page sizes aggravate aliasing |
| 219 | * meaning more vaddr bits needed to disambiguate the cache-line-op ; | 219 | * meaning more vaddr bits needed to disambiguate the cache-line-op ; |
| 220 | * the existing scheme of piggybacking won't work for certain configurations. | 220 | * the existing scheme of piggybacking won't work for certain configurations. |
| 221 | * Two new registers IC_PTAG and DC_PTAG inttoduced. | 221 | * Two new registers IC_PTAG and DC_PTAG inttoduced. |
| @@ -302,7 +302,7 @@ void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr, | |||
| 302 | 302 | ||
| 303 | /* | 303 | /* |
| 304 | * This is technically for MMU v4, using the MMU v3 programming model | 304 | * This is technically for MMU v4, using the MMU v3 programming model |
| 305 | * Special work for HS38 aliasing I-cache configuratino with PAE40 | 305 | * Special work for HS38 aliasing I-cache configuration with PAE40 |
| 306 | * - upper 8 bits of paddr need to be written into PTAG_HI | 306 | * - upper 8 bits of paddr need to be written into PTAG_HI |
| 307 | * - (and needs to be written before the lower 32 bits) | 307 | * - (and needs to be written before the lower 32 bits) |
| 308 | * Note that PTAG_HI is hoisted outside the line loop | 308 | * Note that PTAG_HI is hoisted outside the line loop |
| @@ -936,7 +936,7 @@ void arc_cache_init(void) | |||
| 936 | ic->ver, CONFIG_ARC_MMU_VER); | 936 | ic->ver, CONFIG_ARC_MMU_VER); |
| 937 | 937 | ||
| 938 | /* | 938 | /* |
| 939 | * In MMU v4 (HS38x) the alising icache config uses IVIL/PTAG | 939 | * In MMU v4 (HS38x) the aliasing icache config uses IVIL/PTAG |
| 940 | * pair to provide vaddr/paddr respectively, just as in MMU v3 | 940 | * pair to provide vaddr/paddr respectively, just as in MMU v3 |
| 941 | */ | 941 | */ |
| 942 | if (is_isa_arcv2() && ic->alias) | 942 | if (is_isa_arcv2() && ic->alias) |
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c index 8c8e36fa5659..73d7e4c75b7d 100644 --- a/arch/arc/mm/dma.c +++ b/arch/arc/mm/dma.c | |||
| @@ -10,7 +10,7 @@ | |||
| 10 | * DMA Coherent API Notes | 10 | * DMA Coherent API Notes |
| 11 | * | 11 | * |
| 12 | * I/O is inherently non-coherent on ARC. So a coherent DMA buffer is | 12 | * I/O is inherently non-coherent on ARC. So a coherent DMA buffer is |
| 13 | * implemented by accessintg it using a kernel virtual address, with | 13 | * implemented by accessing it using a kernel virtual address, with |
| 14 | * Cache bit off in the TLB entry. | 14 | * Cache bit off in the TLB entry. |
| 15 | * | 15 | * |
| 16 | * The default DMA address == Phy address which is 0x8000_0000 based. | 16 | * The default DMA address == Phy address which is 0x8000_0000 based. |
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile index 06b6c2d695bf..414b42710a36 100644 --- a/arch/arm/boot/dts/Makefile +++ b/arch/arm/boot/dts/Makefile | |||
| @@ -741,6 +741,7 @@ dtb-$(CONFIG_MACH_SUN7I) += \ | |||
| 741 | sun7i-a20-olimex-som-evb.dtb \ | 741 | sun7i-a20-olimex-som-evb.dtb \ |
| 742 | sun7i-a20-olinuxino-lime.dtb \ | 742 | sun7i-a20-olinuxino-lime.dtb \ |
| 743 | sun7i-a20-olinuxino-lime2.dtb \ | 743 | sun7i-a20-olinuxino-lime2.dtb \ |
| 744 | sun7i-a20-olinuxino-lime2-emmc.dtb \ | ||
| 744 | sun7i-a20-olinuxino-micro.dtb \ | 745 | sun7i-a20-olinuxino-micro.dtb \ |
| 745 | sun7i-a20-orangepi.dtb \ | 746 | sun7i-a20-orangepi.dtb \ |
| 746 | sun7i-a20-orangepi-mini.dtb \ | 747 | sun7i-a20-orangepi-mini.dtb \ |
diff --git a/arch/arm/boot/dts/am437x-sk-evm.dts b/arch/arm/boot/dts/am437x-sk-evm.dts index d82dd6e3f9b1..5687d6b4da60 100644 --- a/arch/arm/boot/dts/am437x-sk-evm.dts +++ b/arch/arm/boot/dts/am437x-sk-evm.dts | |||
| @@ -418,7 +418,7 @@ | |||
| 418 | status = "okay"; | 418 | status = "okay"; |
| 419 | pinctrl-names = "default"; | 419 | pinctrl-names = "default"; |
| 420 | pinctrl-0 = <&i2c0_pins>; | 420 | pinctrl-0 = <&i2c0_pins>; |
| 421 | clock-frequency = <400000>; | 421 | clock-frequency = <100000>; |
| 422 | 422 | ||
| 423 | tps@24 { | 423 | tps@24 { |
| 424 | compatible = "ti,tps65218"; | 424 | compatible = "ti,tps65218"; |
diff --git a/arch/arm/boot/dts/am57xx-idk-common.dtsi b/arch/arm/boot/dts/am57xx-idk-common.dtsi index b01a5948cdd0..0e63b9dff6e7 100644 --- a/arch/arm/boot/dts/am57xx-idk-common.dtsi +++ b/arch/arm/boot/dts/am57xx-idk-common.dtsi | |||
| @@ -60,10 +60,26 @@ | |||
| 60 | 60 | ||
| 61 | tps659038_pmic { | 61 | tps659038_pmic { |
| 62 | compatible = "ti,tps659038-pmic"; | 62 | compatible = "ti,tps659038-pmic"; |
| 63 | |||
| 64 | smps12-in-supply = <&vmain>; | ||
| 65 | smps3-in-supply = <&vmain>; | ||
| 66 | smps45-in-supply = <&vmain>; | ||
| 67 | smps6-in-supply = <&vmain>; | ||
| 68 | smps7-in-supply = <&vmain>; | ||
| 69 | smps8-in-supply = <&vmain>; | ||
| 70 | smps9-in-supply = <&vmain>; | ||
| 71 | ldo1-in-supply = <&vmain>; | ||
| 72 | ldo2-in-supply = <&vmain>; | ||
| 73 | ldo3-in-supply = <&vmain>; | ||
| 74 | ldo4-in-supply = <&vmain>; | ||
| 75 | ldo9-in-supply = <&vmain>; | ||
| 76 | ldoln-in-supply = <&vmain>; | ||
| 77 | ldousb-in-supply = <&vmain>; | ||
| 78 | ldortc-in-supply = <&vmain>; | ||
| 79 | |||
| 63 | regulators { | 80 | regulators { |
| 64 | smps12_reg: smps12 { | 81 | smps12_reg: smps12 { |
| 65 | /* VDD_MPU */ | 82 | /* VDD_MPU */ |
| 66 | vin-supply = <&vmain>; | ||
| 67 | regulator-name = "smps12"; | 83 | regulator-name = "smps12"; |
| 68 | regulator-min-microvolt = <850000>; | 84 | regulator-min-microvolt = <850000>; |
| 69 | regulator-max-microvolt = <1250000>; | 85 | regulator-max-microvolt = <1250000>; |
| @@ -73,7 +89,6 @@ | |||
| 73 | 89 | ||
| 74 | smps3_reg: smps3 { | 90 | smps3_reg: smps3 { |
| 75 | /* VDD_DDR EMIF1 EMIF2 */ | 91 | /* VDD_DDR EMIF1 EMIF2 */ |
| 76 | vin-supply = <&vmain>; | ||
| 77 | regulator-name = "smps3"; | 92 | regulator-name = "smps3"; |
| 78 | regulator-min-microvolt = <1350000>; | 93 | regulator-min-microvolt = <1350000>; |
| 79 | regulator-max-microvolt = <1350000>; | 94 | regulator-max-microvolt = <1350000>; |
| @@ -84,7 +99,6 @@ | |||
| 84 | smps45_reg: smps45 { | 99 | smps45_reg: smps45 { |
| 85 | /* VDD_DSPEVE on AM572 */ | 100 | /* VDD_DSPEVE on AM572 */ |
| 86 | /* VDD_IVA + VDD_DSP on AM571 */ | 101 | /* VDD_IVA + VDD_DSP on AM571 */ |
| 87 | vin-supply = <&vmain>; | ||
| 88 | regulator-name = "smps45"; | 102 | regulator-name = "smps45"; |
| 89 | regulator-min-microvolt = <850000>; | 103 | regulator-min-microvolt = <850000>; |
| 90 | regulator-max-microvolt = <1250000>; | 104 | regulator-max-microvolt = <1250000>; |
| @@ -94,7 +108,6 @@ | |||
| 94 | 108 | ||
| 95 | smps6_reg: smps6 { | 109 | smps6_reg: smps6 { |
| 96 | /* VDD_GPU */ | 110 | /* VDD_GPU */ |
| 97 | vin-supply = <&vmain>; | ||
| 98 | regulator-name = "smps6"; | 111 | regulator-name = "smps6"; |
| 99 | regulator-min-microvolt = <850000>; | 112 | regulator-min-microvolt = <850000>; |
| 100 | regulator-max-microvolt = <1250000>; | 113 | regulator-max-microvolt = <1250000>; |
| @@ -104,7 +117,6 @@ | |||
| 104 | 117 | ||
| 105 | smps7_reg: smps7 { | 118 | smps7_reg: smps7 { |
| 106 | /* VDD_CORE */ | 119 | /* VDD_CORE */ |
| 107 | vin-supply = <&vmain>; | ||
| 108 | regulator-name = "smps7"; | 120 | regulator-name = "smps7"; |
| 109 | regulator-min-microvolt = <850000>; | 121 | regulator-min-microvolt = <850000>; |
| 110 | regulator-max-microvolt = <1150000>; | 122 | regulator-max-microvolt = <1150000>; |
| @@ -115,13 +127,11 @@ | |||
| 115 | smps8_reg: smps8 { | 127 | smps8_reg: smps8 { |
| 116 | /* 5728 - VDD_IVAHD */ | 128 | /* 5728 - VDD_IVAHD */ |
| 117 | /* 5718 - N.C. test point */ | 129 | /* 5718 - N.C. test point */ |
| 118 | vin-supply = <&vmain>; | ||
| 119 | regulator-name = "smps8"; | 130 | regulator-name = "smps8"; |
| 120 | }; | 131 | }; |
| 121 | 132 | ||
| 122 | smps9_reg: smps9 { | 133 | smps9_reg: smps9 { |
| 123 | /* VDD_3_3D */ | 134 | /* VDD_3_3D */ |
| 124 | vin-supply = <&vmain>; | ||
| 125 | regulator-name = "smps9"; | 135 | regulator-name = "smps9"; |
| 126 | regulator-min-microvolt = <3300000>; | 136 | regulator-min-microvolt = <3300000>; |
| 127 | regulator-max-microvolt = <3300000>; | 137 | regulator-max-microvolt = <3300000>; |
| @@ -132,7 +142,6 @@ | |||
| 132 | ldo1_reg: ldo1 { | 142 | ldo1_reg: ldo1 { |
| 133 | /* VDDSHV8 - VSDMMC */ | 143 | /* VDDSHV8 - VSDMMC */ |
| 134 | /* NOTE: on rev 1.3a, data supply */ | 144 | /* NOTE: on rev 1.3a, data supply */ |
| 135 | vin-supply = <&vmain>; | ||
| 136 | regulator-name = "ldo1"; | 145 | regulator-name = "ldo1"; |
| 137 | regulator-min-microvolt = <1800000>; | 146 | regulator-min-microvolt = <1800000>; |
| 138 | regulator-max-microvolt = <3300000>; | 147 | regulator-max-microvolt = <3300000>; |
| @@ -142,7 +151,6 @@ | |||
| 142 | 151 | ||
| 143 | ldo2_reg: ldo2 { | 152 | ldo2_reg: ldo2 { |
| 144 | /* VDDSH18V */ | 153 | /* VDDSH18V */ |
| 145 | vin-supply = <&vmain>; | ||
| 146 | regulator-name = "ldo2"; | 154 | regulator-name = "ldo2"; |
| 147 | regulator-min-microvolt = <1800000>; | 155 | regulator-min-microvolt = <1800000>; |
| 148 | regulator-max-microvolt = <1800000>; | 156 | regulator-max-microvolt = <1800000>; |
| @@ -152,7 +160,6 @@ | |||
| 152 | 160 | ||
| 153 | ldo3_reg: ldo3 { | 161 | ldo3_reg: ldo3 { |
| 154 | /* R1.3a 572x V1_8PHY_LDO3: USB, SATA */ | 162 | /* R1.3a 572x V1_8PHY_LDO3: USB, SATA */ |
| 155 | vin-supply = <&vmain>; | ||
| 156 | regulator-name = "ldo3"; | 163 | regulator-name = "ldo3"; |
| 157 | regulator-min-microvolt = <1800000>; | 164 | regulator-min-microvolt = <1800000>; |
| 158 | regulator-max-microvolt = <1800000>; | 165 | regulator-max-microvolt = <1800000>; |
| @@ -162,7 +169,6 @@ | |||
| 162 | 169 | ||
| 163 | ldo4_reg: ldo4 { | 170 | ldo4_reg: ldo4 { |
| 164 | /* R1.3a 572x V1_8PHY_LDO4: PCIE, HDMI*/ | 171 | /* R1.3a 572x V1_8PHY_LDO4: PCIE, HDMI*/ |
| 165 | vin-supply = <&vmain>; | ||
| 166 | regulator-name = "ldo4"; | 172 | regulator-name = "ldo4"; |
| 167 | regulator-min-microvolt = <1800000>; | 173 | regulator-min-microvolt = <1800000>; |
| 168 | regulator-max-microvolt = <1800000>; | 174 | regulator-max-microvolt = <1800000>; |
| @@ -174,7 +180,6 @@ | |||
| 174 | 180 | ||
| 175 | ldo9_reg: ldo9 { | 181 | ldo9_reg: ldo9 { |
| 176 | /* VDD_RTC */ | 182 | /* VDD_RTC */ |
| 177 | vin-supply = <&vmain>; | ||
| 178 | regulator-name = "ldo9"; | 183 | regulator-name = "ldo9"; |
| 179 | regulator-min-microvolt = <840000>; | 184 | regulator-min-microvolt = <840000>; |
| 180 | regulator-max-microvolt = <1160000>; | 185 | regulator-max-microvolt = <1160000>; |
| @@ -184,7 +189,6 @@ | |||
| 184 | 189 | ||
| 185 | ldoln_reg: ldoln { | 190 | ldoln_reg: ldoln { |
| 186 | /* VDDA_1V8_PLL */ | 191 | /* VDDA_1V8_PLL */ |
| 187 | vin-supply = <&vmain>; | ||
| 188 | regulator-name = "ldoln"; | 192 | regulator-name = "ldoln"; |
| 189 | regulator-min-microvolt = <1800000>; | 193 | regulator-min-microvolt = <1800000>; |
| 190 | regulator-max-microvolt = <1800000>; | 194 | regulator-max-microvolt = <1800000>; |
| @@ -194,7 +198,6 @@ | |||
| 194 | 198 | ||
| 195 | ldousb_reg: ldousb { | 199 | ldousb_reg: ldousb { |
| 196 | /* VDDA_3V_USB: VDDA_USBHS33 */ | 200 | /* VDDA_3V_USB: VDDA_USBHS33 */ |
| 197 | vin-supply = <&vmain>; | ||
| 198 | regulator-name = "ldousb"; | 201 | regulator-name = "ldousb"; |
| 199 | regulator-min-microvolt = <3300000>; | 202 | regulator-min-microvolt = <3300000>; |
| 200 | regulator-max-microvolt = <3300000>; | 203 | regulator-max-microvolt = <3300000>; |
| @@ -204,7 +207,6 @@ | |||
| 204 | 207 | ||
| 205 | ldortc_reg: ldortc { | 208 | ldortc_reg: ldortc { |
| 206 | /* VDDA_RTC */ | 209 | /* VDDA_RTC */ |
| 207 | vin-supply = <&vmain>; | ||
| 208 | regulator-name = "ldortc"; | 210 | regulator-name = "ldortc"; |
| 209 | regulator-min-microvolt = <1800000>; | 211 | regulator-min-microvolt = <1800000>; |
| 210 | regulator-max-microvolt = <1800000>; | 212 | regulator-max-microvolt = <1800000>; |
diff --git a/arch/arm/boot/dts/dm8148-evm.dts b/arch/arm/boot/dts/dm8148-evm.dts index cbc17b0794b1..4128fa91823c 100644 --- a/arch/arm/boot/dts/dm8148-evm.dts +++ b/arch/arm/boot/dts/dm8148-evm.dts | |||
| @@ -93,6 +93,10 @@ | |||
| 93 | }; | 93 | }; |
| 94 | }; | 94 | }; |
| 95 | 95 | ||
| 96 | &mmc1 { | ||
| 97 | status = "disabled"; | ||
| 98 | }; | ||
| 99 | |||
| 96 | &mmc2 { | 100 | &mmc2 { |
| 97 | pinctrl-names = "default"; | 101 | pinctrl-names = "default"; |
| 98 | pinctrl-0 = <&sd1_pins>; | 102 | pinctrl-0 = <&sd1_pins>; |
| @@ -101,6 +105,10 @@ | |||
| 101 | cd-gpios = <&gpio2 6 GPIO_ACTIVE_LOW>; | 105 | cd-gpios = <&gpio2 6 GPIO_ACTIVE_LOW>; |
| 102 | }; | 106 | }; |
| 103 | 107 | ||
| 108 | &mmc3 { | ||
| 109 | status = "disabled"; | ||
| 110 | }; | ||
| 111 | |||
| 104 | &pincntl { | 112 | &pincntl { |
| 105 | sd1_pins: pinmux_sd1_pins { | 113 | sd1_pins: pinmux_sd1_pins { |
| 106 | pinctrl-single,pins = < | 114 | pinctrl-single,pins = < |
diff --git a/arch/arm/boot/dts/dm8148-t410.dts b/arch/arm/boot/dts/dm8148-t410.dts index 5d4313fd5a46..3f184863e0c5 100644 --- a/arch/arm/boot/dts/dm8148-t410.dts +++ b/arch/arm/boot/dts/dm8148-t410.dts | |||
| @@ -45,6 +45,14 @@ | |||
| 45 | phy-mode = "rgmii"; | 45 | phy-mode = "rgmii"; |
| 46 | }; | 46 | }; |
| 47 | 47 | ||
| 48 | &mmc1 { | ||
| 49 | status = "disabled"; | ||
| 50 | }; | ||
| 51 | |||
| 52 | &mmc2 { | ||
| 53 | status = "disabled"; | ||
| 54 | }; | ||
| 55 | |||
| 48 | &mmc3 { | 56 | &mmc3 { |
| 49 | pinctrl-names = "default"; | 57 | pinctrl-names = "default"; |
| 50 | pinctrl-0 = <&sd2_pins>; | 58 | pinctrl-0 = <&sd2_pins>; |
| @@ -53,6 +61,7 @@ | |||
| 53 | dmas = <&edma_xbar 8 0 1 /* use SDTXEVT1 instead of MCASP0TX */ | 61 | dmas = <&edma_xbar 8 0 1 /* use SDTXEVT1 instead of MCASP0TX */ |
| 54 | &edma_xbar 9 0 2>; /* use SDRXEVT1 instead of MCASP0RX */ | 62 | &edma_xbar 9 0 2>; /* use SDRXEVT1 instead of MCASP0RX */ |
| 55 | dma-names = "tx", "rx"; | 63 | dma-names = "tx", "rx"; |
| 64 | non-removable; | ||
| 56 | }; | 65 | }; |
| 57 | 66 | ||
| 58 | &pincntl { | 67 | &pincntl { |
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi index e0074014385a..3a8f3976f6f9 100644 --- a/arch/arm/boot/dts/dra7.dtsi +++ b/arch/arm/boot/dts/dra7.dtsi | |||
| @@ -1451,6 +1451,8 @@ | |||
| 1451 | ti,hwmods = "gpmc"; | 1451 | ti,hwmods = "gpmc"; |
| 1452 | reg = <0x50000000 0x37c>; /* device IO registers */ | 1452 | reg = <0x50000000 0x37c>; /* device IO registers */ |
| 1453 | interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>; | 1453 | interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>; |
| 1454 | dmas = <&edma_xbar 4 0>; | ||
| 1455 | dma-names = "rxtx"; | ||
| 1454 | gpmc,num-cs = <8>; | 1456 | gpmc,num-cs = <8>; |
| 1455 | gpmc,num-waitpins = <2>; | 1457 | gpmc,num-waitpins = <2>; |
| 1456 | #address-cells = <2>; | 1458 | #address-cells = <2>; |
diff --git a/arch/arm/boot/dts/dra74x.dtsi b/arch/arm/boot/dts/dra74x.dtsi index 4220eeffc65a..5e06020f450b 100644 --- a/arch/arm/boot/dts/dra74x.dtsi +++ b/arch/arm/boot/dts/dra74x.dtsi | |||
| @@ -107,8 +107,8 @@ | |||
| 107 | reg = <0x58000000 0x80>, | 107 | reg = <0x58000000 0x80>, |
| 108 | <0x58004054 0x4>, | 108 | <0x58004054 0x4>, |
| 109 | <0x58004300 0x20>, | 109 | <0x58004300 0x20>, |
| 110 | <0x58005054 0x4>, | 110 | <0x58009054 0x4>, |
| 111 | <0x58005300 0x20>; | 111 | <0x58009300 0x20>; |
| 112 | reg-names = "dss", "pll1_clkctrl", "pll1", | 112 | reg-names = "dss", "pll1_clkctrl", "pll1", |
| 113 | "pll2_clkctrl", "pll2"; | 113 | "pll2_clkctrl", "pll2"; |
| 114 | 114 | ||
diff --git a/arch/arm/boot/dts/exynos5250-snow-common.dtsi b/arch/arm/boot/dts/exynos5250-snow-common.dtsi index ddfe1f558c10..fa14f77df563 100644 --- a/arch/arm/boot/dts/exynos5250-snow-common.dtsi +++ b/arch/arm/boot/dts/exynos5250-snow-common.dtsi | |||
| @@ -242,7 +242,7 @@ | |||
| 242 | hpd-gpios = <&gpx0 7 GPIO_ACTIVE_HIGH>; | 242 | hpd-gpios = <&gpx0 7 GPIO_ACTIVE_HIGH>; |
| 243 | 243 | ||
| 244 | ports { | 244 | ports { |
| 245 | port0 { | 245 | port { |
| 246 | dp_out: endpoint { | 246 | dp_out: endpoint { |
| 247 | remote-endpoint = <&bridge_in>; | 247 | remote-endpoint = <&bridge_in>; |
| 248 | }; | 248 | }; |
| @@ -485,13 +485,20 @@ | |||
| 485 | edid-emulation = <5>; | 485 | edid-emulation = <5>; |
| 486 | 486 | ||
| 487 | ports { | 487 | ports { |
| 488 | port0 { | 488 | #address-cells = <1>; |
| 489 | #size-cells = <0>; | ||
| 490 | |||
| 491 | port@0 { | ||
| 492 | reg = <0>; | ||
| 493 | |||
| 489 | bridge_out: endpoint { | 494 | bridge_out: endpoint { |
| 490 | remote-endpoint = <&panel_in>; | 495 | remote-endpoint = <&panel_in>; |
| 491 | }; | 496 | }; |
| 492 | }; | 497 | }; |
| 493 | 498 | ||
| 494 | port1 { | 499 | port@1 { |
| 500 | reg = <1>; | ||
| 501 | |||
| 495 | bridge_in: endpoint { | 502 | bridge_in: endpoint { |
| 496 | remote-endpoint = <&dp_out>; | 503 | remote-endpoint = <&dp_out>; |
| 497 | }; | 504 | }; |
diff --git a/arch/arm/boot/dts/exynos5420-peach-pit.dts b/arch/arm/boot/dts/exynos5420-peach-pit.dts index f9d2e4f1a0e0..1de972d46a87 100644 --- a/arch/arm/boot/dts/exynos5420-peach-pit.dts +++ b/arch/arm/boot/dts/exynos5420-peach-pit.dts | |||
| @@ -163,7 +163,7 @@ | |||
| 163 | hpd-gpios = <&gpx2 6 GPIO_ACTIVE_HIGH>; | 163 | hpd-gpios = <&gpx2 6 GPIO_ACTIVE_HIGH>; |
| 164 | 164 | ||
| 165 | ports { | 165 | ports { |
| 166 | port0 { | 166 | port { |
| 167 | dp_out: endpoint { | 167 | dp_out: endpoint { |
| 168 | remote-endpoint = <&bridge_in>; | 168 | remote-endpoint = <&bridge_in>; |
| 169 | }; | 169 | }; |
| @@ -631,13 +631,20 @@ | |||
| 631 | use-external-pwm; | 631 | use-external-pwm; |
| 632 | 632 | ||
| 633 | ports { | 633 | ports { |
| 634 | port0 { | 634 | #address-cells = <1>; |
| 635 | #size-cells = <0>; | ||
| 636 | |||
| 637 | port@0 { | ||
| 638 | reg = <0>; | ||
| 639 | |||
| 635 | bridge_out: endpoint { | 640 | bridge_out: endpoint { |
| 636 | remote-endpoint = <&panel_in>; | 641 | remote-endpoint = <&panel_in>; |
| 637 | }; | 642 | }; |
| 638 | }; | 643 | }; |
| 639 | 644 | ||
| 640 | port1 { | 645 | port@1 { |
| 646 | reg = <1>; | ||
| 647 | |||
| 641 | bridge_in: endpoint { | 648 | bridge_in: endpoint { |
| 642 | remote-endpoint = <&dp_out>; | 649 | remote-endpoint = <&dp_out>; |
| 643 | }; | 650 | }; |
diff --git a/arch/arm/boot/dts/omap3-evm-37xx.dts b/arch/arm/boot/dts/omap3-evm-37xx.dts index 76056ba92ced..ed449827c3d3 100644 --- a/arch/arm/boot/dts/omap3-evm-37xx.dts +++ b/arch/arm/boot/dts/omap3-evm-37xx.dts | |||
| @@ -85,7 +85,7 @@ | |||
| 85 | OMAP3_CORE1_IOPAD(0x2158, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_clk.sdmmc2_clk */ | 85 | OMAP3_CORE1_IOPAD(0x2158, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_clk.sdmmc2_clk */ |
| 86 | OMAP3_CORE1_IOPAD(0x215a, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_cmd.sdmmc2_cmd */ | 86 | OMAP3_CORE1_IOPAD(0x215a, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_cmd.sdmmc2_cmd */ |
| 87 | OMAP3_CORE1_IOPAD(0x215c, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat0.sdmmc2_dat0 */ | 87 | OMAP3_CORE1_IOPAD(0x215c, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat0.sdmmc2_dat0 */ |
| 88 | OMAP3_CORE1_IOPAD(0x215e, WAKEUP_EN | PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat1.sdmmc2_dat1 */ | 88 | OMAP3_CORE1_IOPAD(0x215e, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat1.sdmmc2_dat1 */ |
| 89 | OMAP3_CORE1_IOPAD(0x2160, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat2.sdmmc2_dat2 */ | 89 | OMAP3_CORE1_IOPAD(0x2160, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat2.sdmmc2_dat2 */ |
| 90 | OMAP3_CORE1_IOPAD(0x2162, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat3.sdmmc2_dat3 */ | 90 | OMAP3_CORE1_IOPAD(0x2162, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat3.sdmmc2_dat3 */ |
| 91 | >; | 91 | >; |
diff --git a/arch/arm/boot/dts/omap3-igep.dtsi b/arch/arm/boot/dts/omap3-igep.dtsi index 41f5d386f21f..f4f2ce46d681 100644 --- a/arch/arm/boot/dts/omap3-igep.dtsi +++ b/arch/arm/boot/dts/omap3-igep.dtsi | |||
| @@ -188,6 +188,7 @@ | |||
| 188 | vmmc-supply = <&vmmc1>; | 188 | vmmc-supply = <&vmmc1>; |
| 189 | vmmc_aux-supply = <&vsim>; | 189 | vmmc_aux-supply = <&vsim>; |
| 190 | bus-width = <4>; | 190 | bus-width = <4>; |
| 191 | cd-gpios = <&twl_gpio 0 GPIO_ACTIVE_LOW>; | ||
| 191 | }; | 192 | }; |
| 192 | 193 | ||
| 193 | &mmc3 { | 194 | &mmc3 { |
diff --git a/arch/arm/boot/dts/omap3-igep0020-common.dtsi b/arch/arm/boot/dts/omap3-igep0020-common.dtsi index d6f839cab649..b6971060648a 100644 --- a/arch/arm/boot/dts/omap3-igep0020-common.dtsi +++ b/arch/arm/boot/dts/omap3-igep0020-common.dtsi | |||
| @@ -194,6 +194,12 @@ | |||
| 194 | OMAP3630_CORE2_IOPAD(0x25f8, PIN_OUTPUT | MUX_MODE4) /* etk_d14.gpio_28 */ | 194 | OMAP3630_CORE2_IOPAD(0x25f8, PIN_OUTPUT | MUX_MODE4) /* etk_d14.gpio_28 */ |
| 195 | >; | 195 | >; |
| 196 | }; | 196 | }; |
| 197 | |||
| 198 | mmc1_wp_pins: pinmux_mmc1_cd_pins { | ||
| 199 | pinctrl-single,pins = < | ||
| 200 | OMAP3630_CORE2_IOPAD(0x25fa, PIN_INPUT | MUX_MODE4) /* etk_d15.gpio_29 */ | ||
| 201 | >; | ||
| 202 | }; | ||
| 197 | }; | 203 | }; |
| 198 | 204 | ||
| 199 | &i2c3 { | 205 | &i2c3 { |
| @@ -250,3 +256,8 @@ | |||
| 250 | }; | 256 | }; |
| 251 | }; | 257 | }; |
| 252 | }; | 258 | }; |
| 259 | |||
| 260 | &mmc1 { | ||
| 261 | pinctrl-0 = <&mmc1_pins &mmc1_wp_pins>; | ||
| 262 | wp-gpios = <&gpio1 29 GPIO_ACTIVE_LOW>; /* gpio_29 */ | ||
| 263 | }; | ||
diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts index d9e2d9c6e999..2b74a81d1de2 100644 --- a/arch/arm/boot/dts/omap3-n900.dts +++ b/arch/arm/boot/dts/omap3-n900.dts | |||
| @@ -288,7 +288,7 @@ | |||
| 288 | pinctrl-single,pins = < | 288 | pinctrl-single,pins = < |
| 289 | OMAP3_CORE1_IOPAD(0x2180, PIN_INPUT_PULLUP | MUX_MODE1) /* ssi1_rdy_tx */ | 289 | OMAP3_CORE1_IOPAD(0x2180, PIN_INPUT_PULLUP | MUX_MODE1) /* ssi1_rdy_tx */ |
| 290 | OMAP3_CORE1_IOPAD(0x217e, PIN_OUTPUT | MUX_MODE1) /* ssi1_flag_tx */ | 290 | OMAP3_CORE1_IOPAD(0x217e, PIN_OUTPUT | MUX_MODE1) /* ssi1_flag_tx */ |
| 291 | OMAP3_CORE1_IOPAD(0x2182, PIN_INPUT | WAKEUP_EN | MUX_MODE4) /* ssi1_wake_tx (cawake) */ | 291 | OMAP3_CORE1_IOPAD(0x2182, PIN_INPUT | MUX_MODE4) /* ssi1_wake_tx (cawake) */ |
| 292 | OMAP3_CORE1_IOPAD(0x217c, PIN_OUTPUT | MUX_MODE1) /* ssi1_dat_tx */ | 292 | OMAP3_CORE1_IOPAD(0x217c, PIN_OUTPUT | MUX_MODE1) /* ssi1_dat_tx */ |
| 293 | OMAP3_CORE1_IOPAD(0x2184, PIN_INPUT | MUX_MODE1) /* ssi1_dat_rx */ | 293 | OMAP3_CORE1_IOPAD(0x2184, PIN_INPUT | MUX_MODE1) /* ssi1_dat_rx */ |
| 294 | OMAP3_CORE1_IOPAD(0x2186, PIN_INPUT | MUX_MODE1) /* ssi1_flag_rx */ | 294 | OMAP3_CORE1_IOPAD(0x2186, PIN_INPUT | MUX_MODE1) /* ssi1_flag_rx */ |
| @@ -300,7 +300,7 @@ | |||
| 300 | modem_pins: pinmux_modem { | 300 | modem_pins: pinmux_modem { |
| 301 | pinctrl-single,pins = < | 301 | pinctrl-single,pins = < |
| 302 | OMAP3_CORE1_IOPAD(0x20dc, PIN_OUTPUT | MUX_MODE4) /* gpio 70 => cmt_apeslpx */ | 302 | OMAP3_CORE1_IOPAD(0x20dc, PIN_OUTPUT | MUX_MODE4) /* gpio 70 => cmt_apeslpx */ |
| 303 | OMAP3_CORE1_IOPAD(0x20e0, PIN_INPUT | WAKEUP_EN | MUX_MODE4) /* gpio 72 => ape_rst_rq */ | 303 | OMAP3_CORE1_IOPAD(0x20e0, PIN_INPUT | MUX_MODE4) /* gpio 72 => ape_rst_rq */ |
| 304 | OMAP3_CORE1_IOPAD(0x20e2, PIN_OUTPUT | MUX_MODE4) /* gpio 73 => cmt_rst_rq */ | 304 | OMAP3_CORE1_IOPAD(0x20e2, PIN_OUTPUT | MUX_MODE4) /* gpio 73 => cmt_rst_rq */ |
| 305 | OMAP3_CORE1_IOPAD(0x20e4, PIN_OUTPUT | MUX_MODE4) /* gpio 74 => cmt_en */ | 305 | OMAP3_CORE1_IOPAD(0x20e4, PIN_OUTPUT | MUX_MODE4) /* gpio 74 => cmt_en */ |
| 306 | OMAP3_CORE1_IOPAD(0x20e6, PIN_OUTPUT | MUX_MODE4) /* gpio 75 => cmt_rst */ | 306 | OMAP3_CORE1_IOPAD(0x20e6, PIN_OUTPUT | MUX_MODE4) /* gpio 75 => cmt_rst */ |
diff --git a/arch/arm/boot/dts/omap3-n950-n9.dtsi b/arch/arm/boot/dts/omap3-n950-n9.dtsi index a00ca761675d..927b17fc4ed8 100644 --- a/arch/arm/boot/dts/omap3-n950-n9.dtsi +++ b/arch/arm/boot/dts/omap3-n950-n9.dtsi | |||
| @@ -97,7 +97,7 @@ | |||
| 97 | OMAP3_CORE1_IOPAD(0x217c, PIN_OUTPUT | MUX_MODE1) /* ssi1_dat_tx */ | 97 | OMAP3_CORE1_IOPAD(0x217c, PIN_OUTPUT | MUX_MODE1) /* ssi1_dat_tx */ |
| 98 | OMAP3_CORE1_IOPAD(0x217e, PIN_OUTPUT | MUX_MODE1) /* ssi1_flag_tx */ | 98 | OMAP3_CORE1_IOPAD(0x217e, PIN_OUTPUT | MUX_MODE1) /* ssi1_flag_tx */ |
| 99 | OMAP3_CORE1_IOPAD(0x2180, PIN_INPUT_PULLUP | MUX_MODE1) /* ssi1_rdy_tx */ | 99 | OMAP3_CORE1_IOPAD(0x2180, PIN_INPUT_PULLUP | MUX_MODE1) /* ssi1_rdy_tx */ |
| 100 | OMAP3_CORE1_IOPAD(0x2182, PIN_INPUT | WAKEUP_EN | MUX_MODE4) /* ssi1_wake_tx (cawake) */ | 100 | OMAP3_CORE1_IOPAD(0x2182, PIN_INPUT | MUX_MODE4) /* ssi1_wake_tx (cawake) */ |
| 101 | OMAP3_CORE1_IOPAD(0x2184, PIN_INPUT | MUX_MODE1) /* ssi1_dat_rx */ | 101 | OMAP3_CORE1_IOPAD(0x2184, PIN_INPUT | MUX_MODE1) /* ssi1_dat_rx */ |
| 102 | OMAP3_CORE1_IOPAD(0x2186, PIN_INPUT | MUX_MODE1) /* ssi1_flag_rx */ | 102 | OMAP3_CORE1_IOPAD(0x2186, PIN_INPUT | MUX_MODE1) /* ssi1_flag_rx */ |
| 103 | OMAP3_CORE1_IOPAD(0x2188, PIN_OUTPUT | MUX_MODE1) /* ssi1_rdy_rx */ | 103 | OMAP3_CORE1_IOPAD(0x2188, PIN_OUTPUT | MUX_MODE1) /* ssi1_rdy_rx */ |
| @@ -110,7 +110,7 @@ | |||
| 110 | OMAP3_CORE1_IOPAD(0x217c, PIN_OUTPUT | MUX_MODE7) /* ssi1_dat_tx */ | 110 | OMAP3_CORE1_IOPAD(0x217c, PIN_OUTPUT | MUX_MODE7) /* ssi1_dat_tx */ |
| 111 | OMAP3_CORE1_IOPAD(0x217e, PIN_OUTPUT | MUX_MODE7) /* ssi1_flag_tx */ | 111 | OMAP3_CORE1_IOPAD(0x217e, PIN_OUTPUT | MUX_MODE7) /* ssi1_flag_tx */ |
| 112 | OMAP3_CORE1_IOPAD(0x2180, PIN_INPUT_PULLDOWN | MUX_MODE7) /* ssi1_rdy_tx */ | 112 | OMAP3_CORE1_IOPAD(0x2180, PIN_INPUT_PULLDOWN | MUX_MODE7) /* ssi1_rdy_tx */ |
| 113 | OMAP3_CORE1_IOPAD(0x2182, PIN_INPUT | WAKEUP_EN | MUX_MODE4) /* ssi1_wake_tx (cawake) */ | 113 | OMAP3_CORE1_IOPAD(0x2182, PIN_INPUT | MUX_MODE4) /* ssi1_wake_tx (cawake) */ |
| 114 | OMAP3_CORE1_IOPAD(0x2184, PIN_INPUT | MUX_MODE7) /* ssi1_dat_rx */ | 114 | OMAP3_CORE1_IOPAD(0x2184, PIN_INPUT | MUX_MODE7) /* ssi1_dat_rx */ |
| 115 | OMAP3_CORE1_IOPAD(0x2186, PIN_INPUT | MUX_MODE7) /* ssi1_flag_rx */ | 115 | OMAP3_CORE1_IOPAD(0x2186, PIN_INPUT | MUX_MODE7) /* ssi1_flag_rx */ |
| 116 | OMAP3_CORE1_IOPAD(0x2188, PIN_OUTPUT | MUX_MODE4) /* ssi1_rdy_rx */ | 116 | OMAP3_CORE1_IOPAD(0x2188, PIN_OUTPUT | MUX_MODE4) /* ssi1_rdy_rx */ |
| @@ -120,7 +120,7 @@ | |||
| 120 | 120 | ||
| 121 | modem_pins1: pinmux_modem_core1_pins { | 121 | modem_pins1: pinmux_modem_core1_pins { |
| 122 | pinctrl-single,pins = < | 122 | pinctrl-single,pins = < |
| 123 | OMAP3_CORE1_IOPAD(0x207a, PIN_INPUT | WAKEUP_EN | MUX_MODE4) /* gpio_34 (ape_rst_rq) */ | 123 | OMAP3_CORE1_IOPAD(0x207a, PIN_INPUT | MUX_MODE4) /* gpio_34 (ape_rst_rq) */ |
| 124 | OMAP3_CORE1_IOPAD(0x2100, PIN_OUTPUT | MUX_MODE4) /* gpio_88 (cmt_rst_rq) */ | 124 | OMAP3_CORE1_IOPAD(0x2100, PIN_OUTPUT | MUX_MODE4) /* gpio_88 (cmt_rst_rq) */ |
| 125 | OMAP3_CORE1_IOPAD(0x210a, PIN_OUTPUT | MUX_MODE4) /* gpio_93 (cmt_apeslpx) */ | 125 | OMAP3_CORE1_IOPAD(0x210a, PIN_OUTPUT | MUX_MODE4) /* gpio_93 (cmt_apeslpx) */ |
| 126 | >; | 126 | >; |
diff --git a/arch/arm/boot/dts/omap3-zoom3.dts b/arch/arm/boot/dts/omap3-zoom3.dts index f19170bdcc1f..c29b41dc7b95 100644 --- a/arch/arm/boot/dts/omap3-zoom3.dts +++ b/arch/arm/boot/dts/omap3-zoom3.dts | |||
| @@ -98,7 +98,7 @@ | |||
| 98 | pinctrl-single,pins = < | 98 | pinctrl-single,pins = < |
| 99 | OMAP3_CORE1_IOPAD(0x2174, PIN_INPUT_PULLUP | MUX_MODE0) /* uart2_cts.uart2_cts */ | 99 | OMAP3_CORE1_IOPAD(0x2174, PIN_INPUT_PULLUP | MUX_MODE0) /* uart2_cts.uart2_cts */ |
| 100 | OMAP3_CORE1_IOPAD(0x2176, PIN_OUTPUT | MUX_MODE0) /* uart2_rts.uart2_rts */ | 100 | OMAP3_CORE1_IOPAD(0x2176, PIN_OUTPUT | MUX_MODE0) /* uart2_rts.uart2_rts */ |
| 101 | OMAP3_CORE1_IOPAD(0x217a, WAKEUP_EN | PIN_INPUT | MUX_MODE0) /* uart2_rx.uart2_rx */ | 101 | OMAP3_CORE1_IOPAD(0x217a, PIN_INPUT | MUX_MODE0) /* uart2_rx.uart2_rx */ |
| 102 | OMAP3_CORE1_IOPAD(0x2178, PIN_OUTPUT | MUX_MODE0) /* uart2_tx.uart2_tx */ | 102 | OMAP3_CORE1_IOPAD(0x2178, PIN_OUTPUT | MUX_MODE0) /* uart2_tx.uart2_tx */ |
| 103 | >; | 103 | >; |
| 104 | }; | 104 | }; |
| @@ -107,7 +107,7 @@ | |||
| 107 | pinctrl-single,pins = < | 107 | pinctrl-single,pins = < |
| 108 | OMAP3_CORE1_IOPAD(0x219a, PIN_INPUT_PULLDOWN | MUX_MODE0) /* uart3_cts_rctx.uart3_cts_rctx */ | 108 | OMAP3_CORE1_IOPAD(0x219a, PIN_INPUT_PULLDOWN | MUX_MODE0) /* uart3_cts_rctx.uart3_cts_rctx */ |
| 109 | OMAP3_CORE1_IOPAD(0x219c, PIN_OUTPUT | MUX_MODE0) /* uart3_rts_sd.uart3_rts_sd */ | 109 | OMAP3_CORE1_IOPAD(0x219c, PIN_OUTPUT | MUX_MODE0) /* uart3_rts_sd.uart3_rts_sd */ |
| 110 | OMAP3_CORE1_IOPAD(0x219e, WAKEUP_EN | PIN_INPUT | MUX_MODE0) /* uart3_rx_irrx.uart3_rx_irrx */ | 110 | OMAP3_CORE1_IOPAD(0x219e, PIN_INPUT | MUX_MODE0) /* uart3_rx_irrx.uart3_rx_irrx */ |
| 111 | OMAP3_CORE1_IOPAD(0x21a0, PIN_OUTPUT | MUX_MODE0) /* uart3_tx_irtx.uart3_tx_irtx */ | 111 | OMAP3_CORE1_IOPAD(0x21a0, PIN_OUTPUT | MUX_MODE0) /* uart3_tx_irtx.uart3_tx_irtx */ |
| 112 | >; | 112 | >; |
| 113 | }; | 113 | }; |
| @@ -125,7 +125,7 @@ | |||
| 125 | pinctrl-single,pins = < | 125 | pinctrl-single,pins = < |
| 126 | OMAP3630_CORE2_IOPAD(0x25d8, PIN_INPUT_PULLUP | MUX_MODE2) /* etk_clk.sdmmc3_clk */ | 126 | OMAP3630_CORE2_IOPAD(0x25d8, PIN_INPUT_PULLUP | MUX_MODE2) /* etk_clk.sdmmc3_clk */ |
| 127 | OMAP3630_CORE2_IOPAD(0x25e4, PIN_INPUT_PULLUP | MUX_MODE2) /* etk_d4.sdmmc3_dat0 */ | 127 | OMAP3630_CORE2_IOPAD(0x25e4, PIN_INPUT_PULLUP | MUX_MODE2) /* etk_d4.sdmmc3_dat0 */ |
| 128 | OMAP3630_CORE2_IOPAD(0x25e6, WAKEUP_EN | PIN_INPUT_PULLUP | MUX_MODE2) /* etk_d5.sdmmc3_dat1 */ | 128 | OMAP3630_CORE2_IOPAD(0x25e6, PIN_INPUT_PULLUP | MUX_MODE2) /* etk_d5.sdmmc3_dat1 */ |
| 129 | OMAP3630_CORE2_IOPAD(0x25e8, PIN_INPUT_PULLUP | MUX_MODE2) /* etk_d6.sdmmc3_dat2 */ | 129 | OMAP3630_CORE2_IOPAD(0x25e8, PIN_INPUT_PULLUP | MUX_MODE2) /* etk_d6.sdmmc3_dat2 */ |
| 130 | OMAP3630_CORE2_IOPAD(0x25e2, PIN_INPUT_PULLUP | MUX_MODE2) /* etk_d3.sdmmc3_dat3 */ | 130 | OMAP3630_CORE2_IOPAD(0x25e2, PIN_INPUT_PULLUP | MUX_MODE2) /* etk_d3.sdmmc3_dat3 */ |
| 131 | >; | 131 | >; |
diff --git a/arch/arm/boot/dts/omap5-board-common.dtsi b/arch/arm/boot/dts/omap5-board-common.dtsi index dc759a3028b7..5d5b620b7d9b 100644 --- a/arch/arm/boot/dts/omap5-board-common.dtsi +++ b/arch/arm/boot/dts/omap5-board-common.dtsi | |||
| @@ -14,6 +14,29 @@ | |||
| 14 | display0 = &hdmi0; | 14 | display0 = &hdmi0; |
| 15 | }; | 15 | }; |
| 16 | 16 | ||
| 17 | vmain: fixedregulator-vmain { | ||
| 18 | compatible = "regulator-fixed"; | ||
| 19 | regulator-name = "vmain"; | ||
| 20 | regulator-min-microvolt = <5000000>; | ||
| 21 | regulator-max-microvolt = <5000000>; | ||
| 22 | }; | ||
| 23 | |||
| 24 | vsys_cobra: fixedregulator-vsys_cobra { | ||
| 25 | compatible = "regulator-fixed"; | ||
| 26 | regulator-name = "vsys_cobra"; | ||
| 27 | vin-supply = <&vmain>; | ||
| 28 | regulator-min-microvolt = <5000000>; | ||
| 29 | regulator-max-microvolt = <5000000>; | ||
| 30 | }; | ||
| 31 | |||
| 32 | vdds_1v8_main: fixedregulator-vdds_1v8_main { | ||
| 33 | compatible = "regulator-fixed"; | ||
| 34 | regulator-name = "vdds_1v8_main"; | ||
| 35 | vin-supply = <&smps7_reg>; | ||
| 36 | regulator-min-microvolt = <1800000>; | ||
| 37 | regulator-max-microvolt = <1800000>; | ||
| 38 | }; | ||
| 39 | |||
| 17 | vmmcsd_fixed: fixedregulator-mmcsd { | 40 | vmmcsd_fixed: fixedregulator-mmcsd { |
| 18 | compatible = "regulator-fixed"; | 41 | compatible = "regulator-fixed"; |
| 19 | regulator-name = "vmmcsd_fixed"; | 42 | regulator-name = "vmmcsd_fixed"; |
| @@ -309,7 +332,7 @@ | |||
| 309 | 332 | ||
| 310 | wlcore_irq_pin: pinmux_wlcore_irq_pin { | 333 | wlcore_irq_pin: pinmux_wlcore_irq_pin { |
| 311 | pinctrl-single,pins = < | 334 | pinctrl-single,pins = < |
| 312 | OMAP5_IOPAD(0x40, WAKEUP_EN | PIN_INPUT_PULLUP | MUX_MODE6) /* llia_wakereqin.gpio1_wk14 */ | 335 | OMAP5_IOPAD(0x40, PIN_INPUT_PULLUP | MUX_MODE6) /* llia_wakereqin.gpio1_wk14 */ |
| 313 | >; | 336 | >; |
| 314 | }; | 337 | }; |
| 315 | }; | 338 | }; |
| @@ -409,6 +432,26 @@ | |||
| 409 | 432 | ||
| 410 | ti,ldo6-vibrator; | 433 | ti,ldo6-vibrator; |
| 411 | 434 | ||
| 435 | smps123-in-supply = <&vsys_cobra>; | ||
| 436 | smps45-in-supply = <&vsys_cobra>; | ||
| 437 | smps6-in-supply = <&vsys_cobra>; | ||
| 438 | smps7-in-supply = <&vsys_cobra>; | ||
| 439 | smps8-in-supply = <&vsys_cobra>; | ||
| 440 | smps9-in-supply = <&vsys_cobra>; | ||
| 441 | smps10_out2-in-supply = <&vsys_cobra>; | ||
| 442 | smps10_out1-in-supply = <&vsys_cobra>; | ||
| 443 | ldo1-in-supply = <&vsys_cobra>; | ||
| 444 | ldo2-in-supply = <&vsys_cobra>; | ||
| 445 | ldo3-in-supply = <&vdds_1v8_main>; | ||
| 446 | ldo4-in-supply = <&vdds_1v8_main>; | ||
| 447 | ldo5-in-supply = <&vsys_cobra>; | ||
| 448 | ldo6-in-supply = <&vdds_1v8_main>; | ||
| 449 | ldo7-in-supply = <&vsys_cobra>; | ||
| 450 | ldo8-in-supply = <&vsys_cobra>; | ||
| 451 | ldo9-in-supply = <&vmmcsd_fixed>; | ||
| 452 | ldoln-in-supply = <&vsys_cobra>; | ||
| 453 | ldousb-in-supply = <&vsys_cobra>; | ||
| 454 | |||
| 412 | regulators { | 455 | regulators { |
| 413 | smps123_reg: smps123 { | 456 | smps123_reg: smps123 { |
| 414 | /* VDD_OPP_MPU */ | 457 | /* VDD_OPP_MPU */ |
| @@ -600,7 +643,8 @@ | |||
| 600 | pinctrl-0 = <&twl6040_pins>; | 643 | pinctrl-0 = <&twl6040_pins>; |
| 601 | 644 | ||
| 602 | interrupts = <GIC_SPI 119 IRQ_TYPE_NONE>; /* IRQ_SYS_2N cascaded to gic */ | 645 | interrupts = <GIC_SPI 119 IRQ_TYPE_NONE>; /* IRQ_SYS_2N cascaded to gic */ |
| 603 | ti,audpwron-gpio = <&gpio5 13 GPIO_ACTIVE_HIGH>; /* gpio line 141 */ | 646 | |
| 647 | /* audpwron gpio defined in the board specific dts */ | ||
| 604 | 648 | ||
| 605 | vio-supply = <&smps7_reg>; | 649 | vio-supply = <&smps7_reg>; |
| 606 | v2v1-supply = <&smps9_reg>; | 650 | v2v1-supply = <&smps9_reg>; |
diff --git a/arch/arm/boot/dts/omap5-igep0050.dts b/arch/arm/boot/dts/omap5-igep0050.dts index 46ecb1dd3b5c..f75ce02fb398 100644 --- a/arch/arm/boot/dts/omap5-igep0050.dts +++ b/arch/arm/boot/dts/omap5-igep0050.dts | |||
| @@ -35,6 +35,22 @@ | |||
| 35 | }; | 35 | }; |
| 36 | }; | 36 | }; |
| 37 | 37 | ||
| 38 | /* LDO4 is VPP1 - ball AD9 */ | ||
| 39 | &ldo4_reg { | ||
| 40 | regulator-min-microvolt = <2000000>; | ||
| 41 | regulator-max-microvolt = <2000000>; | ||
| 42 | }; | ||
| 43 | |||
| 44 | /* | ||
| 45 | * LDO7 is used for HDMI: VDDA_DSIPORTA - ball AA33, VDDA_DSIPORTC - ball AE33, | ||
| 46 | * VDDA_HDMI - ball AN25 | ||
| 47 | */ | ||
| 48 | &ldo7_reg { | ||
| 49 | status = "okay"; | ||
| 50 | regulator-min-microvolt = <1800000>; | ||
| 51 | regulator-max-microvolt = <1800000>; | ||
| 52 | }; | ||
| 53 | |||
| 38 | &omap5_pmx_core { | 54 | &omap5_pmx_core { |
| 39 | i2c4_pins: pinmux_i2c4_pins { | 55 | i2c4_pins: pinmux_i2c4_pins { |
| 40 | pinctrl-single,pins = < | 56 | pinctrl-single,pins = < |
| @@ -52,3 +68,13 @@ | |||
| 52 | <&gpio7 3 0>; /* 195, SDA */ | 68 | <&gpio7 3 0>; /* 195, SDA */ |
| 53 | }; | 69 | }; |
| 54 | 70 | ||
| 71 | &twl6040 { | ||
| 72 | ti,audpwron-gpio = <&gpio5 16 GPIO_ACTIVE_HIGH>; /* gpio line 144 */ | ||
| 73 | }; | ||
| 74 | |||
| 75 | &twl6040_pins { | ||
| 76 | pinctrl-single,pins = < | ||
| 77 | OMAP5_IOPAD(0x1c4, PIN_OUTPUT | MUX_MODE6) /* mcspi1_somi.gpio5_144 */ | ||
| 78 | OMAP5_IOPAD(0x1ca, PIN_OUTPUT | MUX_MODE6) /* perslimbus2_clock.gpio5_145 */ | ||
| 79 | >; | ||
| 80 | }; | ||
diff --git a/arch/arm/boot/dts/omap5-uevm.dts b/arch/arm/boot/dts/omap5-uevm.dts index 60b3fbb3bf07..a51e60518eb6 100644 --- a/arch/arm/boot/dts/omap5-uevm.dts +++ b/arch/arm/boot/dts/omap5-uevm.dts | |||
| @@ -51,3 +51,13 @@ | |||
| 51 | <&gpio9 1 GPIO_ACTIVE_HIGH>, /* TCA6424A P00, LS OE */ | 51 | <&gpio9 1 GPIO_ACTIVE_HIGH>, /* TCA6424A P00, LS OE */ |
| 52 | <&gpio7 1 GPIO_ACTIVE_HIGH>; /* GPIO 193, HPD */ | 52 | <&gpio7 1 GPIO_ACTIVE_HIGH>; /* GPIO 193, HPD */ |
| 53 | }; | 53 | }; |
| 54 | |||
| 55 | &twl6040 { | ||
| 56 | ti,audpwron-gpio = <&gpio5 13 GPIO_ACTIVE_HIGH>; /* gpio line 141 */ | ||
| 57 | }; | ||
| 58 | |||
| 59 | &twl6040_pins { | ||
| 60 | pinctrl-single,pins = < | ||
| 61 | OMAP5_IOPAD(0x1be, PIN_OUTPUT | MUX_MODE6) /* mcspi1_somi.gpio5_141 */ | ||
| 62 | >; | ||
| 63 | }; | ||
diff --git a/arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts b/arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts index a3601e4c0a2e..b844473601d2 100644 --- a/arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts +++ b/arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts | |||
| @@ -136,6 +136,7 @@ | |||
| 136 | &gmac1 { | 136 | &gmac1 { |
| 137 | status = "okay"; | 137 | status = "okay"; |
| 138 | phy-mode = "rgmii"; | 138 | phy-mode = "rgmii"; |
| 139 | phy-handle = <&phy1>; | ||
| 139 | 140 | ||
| 140 | snps,reset-gpio = <&porta 0 GPIO_ACTIVE_LOW>; | 141 | snps,reset-gpio = <&porta 0 GPIO_ACTIVE_LOW>; |
| 141 | snps,reset-active-low; | 142 | snps,reset-active-low; |
diff --git a/arch/arm/boot/dts/stih407-family.dtsi b/arch/arm/boot/dts/stih407-family.dtsi index ad8ba10764a3..d294e82447a2 100644 --- a/arch/arm/boot/dts/stih407-family.dtsi +++ b/arch/arm/boot/dts/stih407-family.dtsi | |||
| @@ -24,18 +24,21 @@ | |||
| 24 | compatible = "shared-dma-pool"; | 24 | compatible = "shared-dma-pool"; |
| 25 | reg = <0x40000000 0x01000000>; | 25 | reg = <0x40000000 0x01000000>; |
| 26 | no-map; | 26 | no-map; |
| 27 | status = "disabled"; | ||
| 27 | }; | 28 | }; |
| 28 | 29 | ||
| 29 | gp1_reserved: rproc@41000000 { | 30 | gp1_reserved: rproc@41000000 { |
| 30 | compatible = "shared-dma-pool"; | 31 | compatible = "shared-dma-pool"; |
| 31 | reg = <0x41000000 0x01000000>; | 32 | reg = <0x41000000 0x01000000>; |
| 32 | no-map; | 33 | no-map; |
| 34 | status = "disabled"; | ||
| 33 | }; | 35 | }; |
| 34 | 36 | ||
| 35 | audio_reserved: rproc@42000000 { | 37 | audio_reserved: rproc@42000000 { |
| 36 | compatible = "shared-dma-pool"; | 38 | compatible = "shared-dma-pool"; |
| 37 | reg = <0x42000000 0x01000000>; | 39 | reg = <0x42000000 0x01000000>; |
| 38 | no-map; | 40 | no-map; |
| 41 | status = "disabled"; | ||
| 39 | }; | 42 | }; |
| 40 | 43 | ||
| 41 | dmu_reserved: rproc@43000000 { | 44 | dmu_reserved: rproc@43000000 { |
diff --git a/arch/arm/boot/dts/sun6i-a31s-primo81.dts b/arch/arm/boot/dts/sun6i-a31s-primo81.dts index 68b479b8772c..73c133f5e79c 100644 --- a/arch/arm/boot/dts/sun6i-a31s-primo81.dts +++ b/arch/arm/boot/dts/sun6i-a31s-primo81.dts | |||
| @@ -176,8 +176,6 @@ | |||
| 176 | }; | 176 | }; |
| 177 | 177 | ||
| 178 | ®_dc1sw { | 178 | ®_dc1sw { |
| 179 | regulator-min-microvolt = <3000000>; | ||
| 180 | regulator-max-microvolt = <3000000>; | ||
| 181 | regulator-name = "vcc-lcd"; | 179 | regulator-name = "vcc-lcd"; |
| 182 | }; | 180 | }; |
| 183 | 181 | ||
diff --git a/arch/arm/boot/dts/sun6i-a31s-yones-toptech-bs1078-v2.dts b/arch/arm/boot/dts/sun6i-a31s-yones-toptech-bs1078-v2.dts index 360adfb1e9ca..d6ad6196a768 100644 --- a/arch/arm/boot/dts/sun6i-a31s-yones-toptech-bs1078-v2.dts +++ b/arch/arm/boot/dts/sun6i-a31s-yones-toptech-bs1078-v2.dts | |||
| @@ -135,8 +135,6 @@ | |||
| 135 | 135 | ||
| 136 | ®_dc1sw { | 136 | ®_dc1sw { |
| 137 | regulator-name = "vcc-lcd-usb2"; | 137 | regulator-name = "vcc-lcd-usb2"; |
| 138 | regulator-min-microvolt = <3000000>; | ||
| 139 | regulator-max-microvolt = <3000000>; | ||
| 140 | }; | 138 | }; |
| 141 | 139 | ||
| 142 | ®_dc5ldo { | 140 | ®_dc5ldo { |
diff --git a/arch/arm/configs/exynos_defconfig b/arch/arm/configs/exynos_defconfig index 10f49ab5328e..47195e8690b4 100644 --- a/arch/arm/configs/exynos_defconfig +++ b/arch/arm/configs/exynos_defconfig | |||
| @@ -82,6 +82,7 @@ CONFIG_TOUCHSCREEN_MMS114=y | |||
| 82 | CONFIG_INPUT_MISC=y | 82 | CONFIG_INPUT_MISC=y |
| 83 | CONFIG_INPUT_MAX77693_HAPTIC=y | 83 | CONFIG_INPUT_MAX77693_HAPTIC=y |
| 84 | CONFIG_INPUT_MAX8997_HAPTIC=y | 84 | CONFIG_INPUT_MAX8997_HAPTIC=y |
| 85 | CONFIG_KEYBOARD_SAMSUNG=y | ||
| 85 | CONFIG_SERIAL_8250=y | 86 | CONFIG_SERIAL_8250=y |
| 86 | CONFIG_SERIAL_SAMSUNG=y | 87 | CONFIG_SERIAL_SAMSUNG=y |
| 87 | CONFIG_SERIAL_SAMSUNG_CONSOLE=y | 88 | CONFIG_SERIAL_SAMSUNG_CONSOLE=y |
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig index 8f857564657f..8a5fff1b7f6f 100644 --- a/arch/arm/configs/multi_v7_defconfig +++ b/arch/arm/configs/multi_v7_defconfig | |||
| @@ -264,6 +264,7 @@ CONFIG_KEYBOARD_TEGRA=y | |||
| 264 | CONFIG_KEYBOARD_SPEAR=y | 264 | CONFIG_KEYBOARD_SPEAR=y |
| 265 | CONFIG_KEYBOARD_ST_KEYSCAN=y | 265 | CONFIG_KEYBOARD_ST_KEYSCAN=y |
| 266 | CONFIG_KEYBOARD_CROS_EC=m | 266 | CONFIG_KEYBOARD_CROS_EC=m |
| 267 | CONFIG_KEYBOARD_SAMSUNG=m | ||
| 267 | CONFIG_MOUSE_PS2_ELANTECH=y | 268 | CONFIG_MOUSE_PS2_ELANTECH=y |
| 268 | CONFIG_MOUSE_CYAPA=m | 269 | CONFIG_MOUSE_CYAPA=m |
| 269 | CONFIG_MOUSE_ELAN_I2C=y | 270 | CONFIG_MOUSE_ELAN_I2C=y |
diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h index 19cfab526d13..20febb368844 100644 --- a/arch/arm/include/asm/pgalloc.h +++ b/arch/arm/include/asm/pgalloc.h | |||
| @@ -29,7 +29,7 @@ | |||
| 29 | 29 | ||
| 30 | static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) | 30 | static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) |
| 31 | { | 31 | { |
| 32 | return (pmd_t *)get_zeroed_page(GFP_KERNEL | __GFP_REPEAT); | 32 | return (pmd_t *)get_zeroed_page(GFP_KERNEL); |
| 33 | } | 33 | } |
| 34 | 34 | ||
| 35 | static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) | 35 | static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) |
diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h index aeddd28b3595..92fd2c8a9af0 100644 --- a/arch/arm/include/asm/pgtable-2level.h +++ b/arch/arm/include/asm/pgtable-2level.h | |||
| @@ -193,6 +193,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) | |||
| 193 | 193 | ||
| 194 | #define pmd_large(pmd) (pmd_val(pmd) & 2) | 194 | #define pmd_large(pmd) (pmd_val(pmd) & 2) |
| 195 | #define pmd_bad(pmd) (pmd_val(pmd) & 2) | 195 | #define pmd_bad(pmd) (pmd_val(pmd) & 2) |
| 196 | #define pmd_present(pmd) (pmd_val(pmd)) | ||
| 196 | 197 | ||
| 197 | #define copy_pmd(pmdpd,pmdps) \ | 198 | #define copy_pmd(pmdpd,pmdps) \ |
| 198 | do { \ | 199 | do { \ |
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h index fa70db7c714b..2a029bceaf2f 100644 --- a/arch/arm/include/asm/pgtable-3level.h +++ b/arch/arm/include/asm/pgtable-3level.h | |||
| @@ -211,6 +211,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) | |||
| 211 | : !!(pmd_val(pmd) & (val))) | 211 | : !!(pmd_val(pmd) & (val))) |
| 212 | #define pmd_isclear(pmd, val) (!(pmd_val(pmd) & (val))) | 212 | #define pmd_isclear(pmd, val) (!(pmd_val(pmd) & (val))) |
| 213 | 213 | ||
| 214 | #define pmd_present(pmd) (pmd_isset((pmd), L_PMD_SECT_VALID)) | ||
| 214 | #define pmd_young(pmd) (pmd_isset((pmd), PMD_SECT_AF)) | 215 | #define pmd_young(pmd) (pmd_isset((pmd), PMD_SECT_AF)) |
| 215 | #define pte_special(pte) (pte_isset((pte), L_PTE_SPECIAL)) | 216 | #define pte_special(pte) (pte_isset((pte), L_PTE_SPECIAL)) |
| 216 | static inline pte_t pte_mkspecial(pte_t pte) | 217 | static inline pte_t pte_mkspecial(pte_t pte) |
| @@ -249,10 +250,10 @@ PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF); | |||
| 249 | #define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))) | 250 | #define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))) |
| 250 | #define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot) | 251 | #define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot) |
| 251 | 252 | ||
| 252 | /* represent a notpresent pmd by zero, this is used by pmdp_invalidate */ | 253 | /* represent a notpresent pmd by faulting entry, this is used by pmdp_invalidate */ |
| 253 | static inline pmd_t pmd_mknotpresent(pmd_t pmd) | 254 | static inline pmd_t pmd_mknotpresent(pmd_t pmd) |
| 254 | { | 255 | { |
| 255 | return __pmd(0); | 256 | return __pmd(pmd_val(pmd) & ~L_PMD_SECT_VALID); |
| 256 | } | 257 | } |
| 257 | 258 | ||
| 258 | static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) | 259 | static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) |
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 348caabb7625..d62204060cbe 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h | |||
| @@ -182,7 +182,6 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; | |||
| 182 | #define pgd_offset_k(addr) pgd_offset(&init_mm, addr) | 182 | #define pgd_offset_k(addr) pgd_offset(&init_mm, addr) |
| 183 | 183 | ||
| 184 | #define pmd_none(pmd) (!pmd_val(pmd)) | 184 | #define pmd_none(pmd) (!pmd_val(pmd)) |
| 185 | #define pmd_present(pmd) (pmd_val(pmd)) | ||
| 186 | 185 | ||
| 187 | static inline pte_t *pmd_page_vaddr(pmd_t pmd) | 186 | static inline pte_t *pmd_page_vaddr(pmd_t pmd) |
| 188 | { | 187 | { |
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index ef9119f7462e..4d9375814b53 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c | |||
| @@ -733,8 +733,8 @@ static int vfp_set(struct task_struct *target, | |||
| 733 | if (ret) | 733 | if (ret) |
| 734 | return ret; | 734 | return ret; |
| 735 | 735 | ||
| 736 | vfp_flush_hwstate(thread); | ||
| 737 | thread->vfpstate.hard = new_vfp; | 736 | thread->vfpstate.hard = new_vfp; |
| 737 | vfp_flush_hwstate(thread); | ||
| 738 | 738 | ||
| 739 | return 0; | 739 | return 0; |
| 740 | } | 740 | } |
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index df90bc59bfce..861521606c6d 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c | |||
| @@ -486,7 +486,7 @@ static const char *ipi_types[NR_IPI] __tracepoint_string = { | |||
| 486 | 486 | ||
| 487 | static void smp_cross_call(const struct cpumask *target, unsigned int ipinr) | 487 | static void smp_cross_call(const struct cpumask *target, unsigned int ipinr) |
| 488 | { | 488 | { |
| 489 | trace_ipi_raise(target, ipi_types[ipinr]); | 489 | trace_ipi_raise_rcuidle(target, ipi_types[ipinr]); |
| 490 | __smp_cross_call(target, ipinr); | 490 | __smp_cross_call(target, ipinr); |
| 491 | } | 491 | } |
| 492 | 492 | ||
diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig index e65aa7d11b20..20dcf6e904b2 100644 --- a/arch/arm/mach-exynos/Kconfig +++ b/arch/arm/mach-exynos/Kconfig | |||
| @@ -61,7 +61,6 @@ config ARCH_EXYNOS4 | |||
| 61 | select CLKSRC_SAMSUNG_PWM if CPU_EXYNOS4210 | 61 | select CLKSRC_SAMSUNG_PWM if CPU_EXYNOS4210 |
| 62 | select CPU_EXYNOS4210 | 62 | select CPU_EXYNOS4210 |
| 63 | select GIC_NON_BANKED | 63 | select GIC_NON_BANKED |
| 64 | select KEYBOARD_SAMSUNG if INPUT_KEYBOARD | ||
| 65 | select MIGHT_HAVE_CACHE_L2X0 | 64 | select MIGHT_HAVE_CACHE_L2X0 |
| 66 | help | 65 | help |
| 67 | Samsung EXYNOS4 (Cortex-A9) SoC based systems | 66 | Samsung EXYNOS4 (Cortex-A9) SoC based systems |
diff --git a/arch/arm/mach-imx/mach-imx6ul.c b/arch/arm/mach-imx/mach-imx6ul.c index a38b16b69923..b56de4b8cdf2 100644 --- a/arch/arm/mach-imx/mach-imx6ul.c +++ b/arch/arm/mach-imx/mach-imx6ul.c | |||
| @@ -46,7 +46,7 @@ static int ksz8081_phy_fixup(struct phy_device *dev) | |||
| 46 | static void __init imx6ul_enet_phy_init(void) | 46 | static void __init imx6ul_enet_phy_init(void) |
| 47 | { | 47 | { |
| 48 | if (IS_BUILTIN(CONFIG_PHYLIB)) | 48 | if (IS_BUILTIN(CONFIG_PHYLIB)) |
| 49 | phy_register_fixup_for_uid(PHY_ID_KSZ8081, 0xffffffff, | 49 | phy_register_fixup_for_uid(PHY_ID_KSZ8081, MICREL_PHY_ID_MASK, |
| 50 | ksz8081_phy_fixup); | 50 | ksz8081_phy_fixup); |
| 51 | } | 51 | } |
| 52 | 52 | ||
diff --git a/arch/arm/mach-omap1/ams-delta-fiq-handler.S b/arch/arm/mach-omap1/ams-delta-fiq-handler.S index 5d7fb596bf4a..bf608441b357 100644 --- a/arch/arm/mach-omap1/ams-delta-fiq-handler.S +++ b/arch/arm/mach-omap1/ams-delta-fiq-handler.S | |||
| @@ -43,8 +43,8 @@ | |||
| 43 | #define OTHERS_MASK (MODEM_IRQ_MASK | HOOK_SWITCH_MASK) | 43 | #define OTHERS_MASK (MODEM_IRQ_MASK | HOOK_SWITCH_MASK) |
| 44 | 44 | ||
| 45 | /* IRQ handler register bitmasks */ | 45 | /* IRQ handler register bitmasks */ |
| 46 | #define DEFERRED_FIQ_MASK (0x1 << (INT_DEFERRED_FIQ % IH2_BASE)) | 46 | #define DEFERRED_FIQ_MASK OMAP_IRQ_BIT(INT_DEFERRED_FIQ) |
| 47 | #define GPIO_BANK1_MASK (0x1 << INT_GPIO_BANK1) | 47 | #define GPIO_BANK1_MASK OMAP_IRQ_BIT(INT_GPIO_BANK1) |
| 48 | 48 | ||
| 49 | /* Driver buffer byte offsets */ | 49 | /* Driver buffer byte offsets */ |
| 50 | #define BUF_MASK (FIQ_MASK * 4) | 50 | #define BUF_MASK (FIQ_MASK * 4) |
| @@ -110,7 +110,7 @@ ENTRY(qwerty_fiqin_start) | |||
| 110 | mov r8, #2 @ reset FIQ agreement | 110 | mov r8, #2 @ reset FIQ agreement |
| 111 | str r8, [r12, #IRQ_CONTROL_REG_OFFSET] | 111 | str r8, [r12, #IRQ_CONTROL_REG_OFFSET] |
| 112 | 112 | ||
| 113 | cmp r10, #INT_GPIO_BANK1 @ is it GPIO bank interrupt? | 113 | cmp r10, #(INT_GPIO_BANK1 - NR_IRQS_LEGACY) @ is it GPIO interrupt? |
| 114 | beq gpio @ yes - process it | 114 | beq gpio @ yes - process it |
| 115 | 115 | ||
| 116 | mov r8, #1 | 116 | mov r8, #1 |
diff --git a/arch/arm/mach-omap1/ams-delta-fiq.c b/arch/arm/mach-omap1/ams-delta-fiq.c index d1f12095f315..ec760ae2f917 100644 --- a/arch/arm/mach-omap1/ams-delta-fiq.c +++ b/arch/arm/mach-omap1/ams-delta-fiq.c | |||
| @@ -109,7 +109,8 @@ void __init ams_delta_init_fiq(void) | |||
| 109 | * Since no set_type() method is provided by OMAP irq chip, | 109 | * Since no set_type() method is provided by OMAP irq chip, |
| 110 | * switch to edge triggered interrupt type manually. | 110 | * switch to edge triggered interrupt type manually. |
| 111 | */ | 111 | */ |
| 112 | offset = IRQ_ILR0_REG_OFFSET + INT_DEFERRED_FIQ * 0x4; | 112 | offset = IRQ_ILR0_REG_OFFSET + |
| 113 | ((INT_DEFERRED_FIQ - NR_IRQS_LEGACY) & 0x1f) * 0x4; | ||
| 113 | val = omap_readl(DEFERRED_FIQ_IH_BASE + offset) & ~(1 << 1); | 114 | val = omap_readl(DEFERRED_FIQ_IH_BASE + offset) & ~(1 << 1); |
| 114 | omap_writel(val, DEFERRED_FIQ_IH_BASE + offset); | 115 | omap_writel(val, DEFERRED_FIQ_IH_BASE + offset); |
| 115 | 116 | ||
| @@ -149,7 +150,7 @@ void __init ams_delta_init_fiq(void) | |||
| 149 | /* | 150 | /* |
| 150 | * Redirect GPIO interrupts to FIQ | 151 | * Redirect GPIO interrupts to FIQ |
| 151 | */ | 152 | */ |
| 152 | offset = IRQ_ILR0_REG_OFFSET + INT_GPIO_BANK1 * 0x4; | 153 | offset = IRQ_ILR0_REG_OFFSET + (INT_GPIO_BANK1 - NR_IRQS_LEGACY) * 0x4; |
| 153 | val = omap_readl(OMAP_IH1_BASE + offset) | 1; | 154 | val = omap_readl(OMAP_IH1_BASE + offset) | 1; |
| 154 | omap_writel(val, OMAP_IH1_BASE + offset); | 155 | omap_writel(val, OMAP_IH1_BASE + offset); |
| 155 | } | 156 | } |
diff --git a/arch/arm/mach-omap1/include/mach/ams-delta-fiq.h b/arch/arm/mach-omap1/include/mach/ams-delta-fiq.h index adb5e7649659..6dfc3e1210a3 100644 --- a/arch/arm/mach-omap1/include/mach/ams-delta-fiq.h +++ b/arch/arm/mach-omap1/include/mach/ams-delta-fiq.h | |||
| @@ -14,6 +14,8 @@ | |||
| 14 | #ifndef __AMS_DELTA_FIQ_H | 14 | #ifndef __AMS_DELTA_FIQ_H |
| 15 | #define __AMS_DELTA_FIQ_H | 15 | #define __AMS_DELTA_FIQ_H |
| 16 | 16 | ||
| 17 | #include <mach/irqs.h> | ||
| 18 | |||
| 17 | /* | 19 | /* |
| 18 | * Interrupt number used for passing control from FIQ to IRQ. | 20 | * Interrupt number used for passing control from FIQ to IRQ. |
| 19 | * IRQ12, described as reserved, has been selected. | 21 | * IRQ12, described as reserved, has been selected. |
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig index 0517f0c1581a..1a648e9dfaa0 100644 --- a/arch/arm/mach-omap2/Kconfig +++ b/arch/arm/mach-omap2/Kconfig | |||
| @@ -17,6 +17,7 @@ config ARCH_OMAP3 | |||
| 17 | select PM_OPP if PM | 17 | select PM_OPP if PM |
| 18 | select PM if CPU_IDLE | 18 | select PM if CPU_IDLE |
| 19 | select SOC_HAS_OMAP2_SDRC | 19 | select SOC_HAS_OMAP2_SDRC |
| 20 | select ARM_ERRATA_430973 | ||
| 20 | 21 | ||
| 21 | config ARCH_OMAP4 | 22 | config ARCH_OMAP4 |
| 22 | bool "TI OMAP4" | 23 | bool "TI OMAP4" |
| @@ -36,6 +37,7 @@ config ARCH_OMAP4 | |||
| 36 | select PM if CPU_IDLE | 37 | select PM if CPU_IDLE |
| 37 | select ARM_ERRATA_754322 | 38 | select ARM_ERRATA_754322 |
| 38 | select ARM_ERRATA_775420 | 39 | select ARM_ERRATA_775420 |
| 40 | select OMAP_INTERCONNECT | ||
| 39 | 41 | ||
| 40 | config SOC_OMAP5 | 42 | config SOC_OMAP5 |
| 41 | bool "TI OMAP5" | 43 | bool "TI OMAP5" |
| @@ -67,6 +69,8 @@ config SOC_AM43XX | |||
| 67 | select HAVE_ARM_SCU | 69 | select HAVE_ARM_SCU |
| 68 | select GENERIC_CLOCKEVENTS_BROADCAST | 70 | select GENERIC_CLOCKEVENTS_BROADCAST |
| 69 | select HAVE_ARM_TWD | 71 | select HAVE_ARM_TWD |
| 72 | select ARM_ERRATA_754322 | ||
| 73 | select ARM_ERRATA_775420 | ||
| 70 | 74 | ||
| 71 | config SOC_DRA7XX | 75 | config SOC_DRA7XX |
| 72 | bool "TI DRA7XX" | 76 | bool "TI DRA7XX" |
| @@ -240,4 +244,12 @@ endmenu | |||
| 240 | 244 | ||
| 241 | endif | 245 | endif |
| 242 | 246 | ||
| 247 | config OMAP5_ERRATA_801819 | ||
| 248 | bool "Errata 801819: An eviction from L1 data cache might stall indefinitely" | ||
| 249 | depends on SOC_OMAP5 || SOC_DRA7XX | ||
| 250 | help | ||
| 251 | A livelock can occur in the L2 cache arbitration that might prevent | ||
| 252 | a snoop from completing. Under certain conditions this can cause the | ||
| 253 | system to deadlock. | ||
| 254 | |||
| 243 | endmenu | 255 | endmenu |
diff --git a/arch/arm/mach-omap2/omap-secure.h b/arch/arm/mach-omap2/omap-secure.h index af2851fbcdf0..bae263fba640 100644 --- a/arch/arm/mach-omap2/omap-secure.h +++ b/arch/arm/mach-omap2/omap-secure.h | |||
| @@ -46,6 +46,7 @@ | |||
| 46 | 46 | ||
| 47 | #define OMAP5_DRA7_MON_SET_CNTFRQ_INDEX 0x109 | 47 | #define OMAP5_DRA7_MON_SET_CNTFRQ_INDEX 0x109 |
| 48 | #define OMAP5_MON_AMBA_IF_INDEX 0x108 | 48 | #define OMAP5_MON_AMBA_IF_INDEX 0x108 |
| 49 | #define OMAP5_DRA7_MON_SET_ACR_INDEX 0x107 | ||
| 49 | 50 | ||
| 50 | /* Secure PPA(Primary Protected Application) APIs */ | 51 | /* Secure PPA(Primary Protected Application) APIs */ |
| 51 | #define OMAP4_PPA_L2_POR_INDEX 0x23 | 52 | #define OMAP4_PPA_L2_POR_INDEX 0x23 |
diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c index c625cc10d9f9..8cd1de914ee4 100644 --- a/arch/arm/mach-omap2/omap-smp.c +++ b/arch/arm/mach-omap2/omap-smp.c | |||
| @@ -50,6 +50,39 @@ void __iomem *omap4_get_scu_base(void) | |||
| 50 | return scu_base; | 50 | return scu_base; |
| 51 | } | 51 | } |
| 52 | 52 | ||
| 53 | #ifdef CONFIG_OMAP5_ERRATA_801819 | ||
| 54 | void omap5_erratum_workaround_801819(void) | ||
| 55 | { | ||
| 56 | u32 acr, revidr; | ||
| 57 | u32 acr_mask; | ||
| 58 | |||
| 59 | /* REVIDR[3] indicates erratum fix available on silicon */ | ||
| 60 | asm volatile ("mrc p15, 0, %0, c0, c0, 6" : "=r" (revidr)); | ||
| 61 | if (revidr & (0x1 << 3)) | ||
| 62 | return; | ||
| 63 | |||
| 64 | asm volatile ("mrc p15, 0, %0, c1, c0, 1" : "=r" (acr)); | ||
| 65 | /* | ||
| 66 | * BIT(27) - Disables streaming. All write-allocate lines allocate in | ||
| 67 | * the L1 or L2 cache. | ||
| 68 | * BIT(25) - Disables streaming. All write-allocate lines allocate in | ||
| 69 | * the L1 cache. | ||
| 70 | */ | ||
| 71 | acr_mask = (0x3 << 25) | (0x3 << 27); | ||
| 72 | /* do we already have it done.. if yes, skip expensive smc */ | ||
| 73 | if ((acr & acr_mask) == acr_mask) | ||
| 74 | return; | ||
| 75 | |||
| 76 | acr |= acr_mask; | ||
| 77 | omap_smc1(OMAP5_DRA7_MON_SET_ACR_INDEX, acr); | ||
| 78 | |||
| 79 | pr_debug("%s: ARM erratum workaround 801819 applied on CPU%d\n", | ||
| 80 | __func__, smp_processor_id()); | ||
| 81 | } | ||
| 82 | #else | ||
| 83 | static inline void omap5_erratum_workaround_801819(void) { } | ||
| 84 | #endif | ||
| 85 | |||
| 53 | static void omap4_secondary_init(unsigned int cpu) | 86 | static void omap4_secondary_init(unsigned int cpu) |
| 54 | { | 87 | { |
| 55 | /* | 88 | /* |
| @@ -64,12 +97,15 @@ static void omap4_secondary_init(unsigned int cpu) | |||
| 64 | omap_secure_dispatcher(OMAP4_PPA_CPU_ACTRL_SMP_INDEX, | 97 | omap_secure_dispatcher(OMAP4_PPA_CPU_ACTRL_SMP_INDEX, |
| 65 | 4, 0, 0, 0, 0, 0); | 98 | 4, 0, 0, 0, 0, 0); |
| 66 | 99 | ||
| 67 | /* | 100 | if (soc_is_omap54xx() || soc_is_dra7xx()) { |
| 68 | * Configure the CNTFRQ register for the secondary cpu's which | 101 | /* |
| 69 | * indicates the frequency of the cpu local timers. | 102 | * Configure the CNTFRQ register for the secondary cpu's which |
| 70 | */ | 103 | * indicates the frequency of the cpu local timers. |
| 71 | if (soc_is_omap54xx() || soc_is_dra7xx()) | 104 | */ |
| 72 | set_cntfreq(); | 105 | set_cntfreq(); |
| 106 | /* Configure ACR to disable streaming WA for 801819 */ | ||
| 107 | omap5_erratum_workaround_801819(); | ||
| 108 | } | ||
| 73 | 109 | ||
| 74 | /* | 110 | /* |
| 75 | * Synchronise with the boot thread. | 111 | * Synchronise with the boot thread. |
| @@ -218,6 +254,8 @@ static void __init omap4_smp_prepare_cpus(unsigned int max_cpus) | |||
| 218 | 254 | ||
| 219 | if (cpu_is_omap446x()) | 255 | if (cpu_is_omap446x()) |
| 220 | startup_addr = omap4460_secondary_startup; | 256 | startup_addr = omap4460_secondary_startup; |
| 257 | if (soc_is_dra74x() || soc_is_omap54xx()) | ||
| 258 | omap5_erratum_workaround_801819(); | ||
| 221 | 259 | ||
| 222 | /* | 260 | /* |
| 223 | * Write the address of secondary startup routine into the | 261 | * Write the address of secondary startup routine into the |
diff --git a/arch/arm/mach-omap2/powerdomain.c b/arch/arm/mach-omap2/powerdomain.c index 78af6d8cf2e2..daf2753de7aa 100644 --- a/arch/arm/mach-omap2/powerdomain.c +++ b/arch/arm/mach-omap2/powerdomain.c | |||
| @@ -186,8 +186,9 @@ static int _pwrdm_state_switch(struct powerdomain *pwrdm, int flag) | |||
| 186 | trace_state = (PWRDM_TRACE_STATES_FLAG | | 186 | trace_state = (PWRDM_TRACE_STATES_FLAG | |
| 187 | ((next & OMAP_POWERSTATE_MASK) << 8) | | 187 | ((next & OMAP_POWERSTATE_MASK) << 8) | |
| 188 | ((prev & OMAP_POWERSTATE_MASK) << 0)); | 188 | ((prev & OMAP_POWERSTATE_MASK) << 0)); |
| 189 | trace_power_domain_target(pwrdm->name, trace_state, | 189 | trace_power_domain_target_rcuidle(pwrdm->name, |
| 190 | smp_processor_id()); | 190 | trace_state, |
| 191 | smp_processor_id()); | ||
| 191 | } | 192 | } |
| 192 | break; | 193 | break; |
| 193 | default: | 194 | default: |
| @@ -523,8 +524,8 @@ int pwrdm_set_next_pwrst(struct powerdomain *pwrdm, u8 pwrst) | |||
| 523 | 524 | ||
| 524 | if (arch_pwrdm && arch_pwrdm->pwrdm_set_next_pwrst) { | 525 | if (arch_pwrdm && arch_pwrdm->pwrdm_set_next_pwrst) { |
| 525 | /* Trace the pwrdm desired target state */ | 526 | /* Trace the pwrdm desired target state */ |
| 526 | trace_power_domain_target(pwrdm->name, pwrst, | 527 | trace_power_domain_target_rcuidle(pwrdm->name, pwrst, |
| 527 | smp_processor_id()); | 528 | smp_processor_id()); |
| 528 | /* Program the pwrdm desired target state */ | 529 | /* Program the pwrdm desired target state */ |
| 529 | ret = arch_pwrdm->pwrdm_set_next_pwrst(pwrdm, pwrst); | 530 | ret = arch_pwrdm->pwrdm_set_next_pwrst(pwrdm, pwrst); |
| 530 | } | 531 | } |
diff --git a/arch/arm/mach-omap2/powerdomains7xx_data.c b/arch/arm/mach-omap2/powerdomains7xx_data.c index 0ec2d00f4237..eb350a673133 100644 --- a/arch/arm/mach-omap2/powerdomains7xx_data.c +++ b/arch/arm/mach-omap2/powerdomains7xx_data.c | |||
| @@ -36,14 +36,7 @@ static struct powerdomain iva_7xx_pwrdm = { | |||
| 36 | .prcm_offs = DRA7XX_PRM_IVA_INST, | 36 | .prcm_offs = DRA7XX_PRM_IVA_INST, |
| 37 | .prcm_partition = DRA7XX_PRM_PARTITION, | 37 | .prcm_partition = DRA7XX_PRM_PARTITION, |
| 38 | .pwrsts = PWRSTS_OFF_ON, | 38 | .pwrsts = PWRSTS_OFF_ON, |
| 39 | .pwrsts_logic_ret = PWRSTS_OFF, | ||
| 40 | .banks = 4, | 39 | .banks = 4, |
| 41 | .pwrsts_mem_ret = { | ||
| 42 | [0] = PWRSTS_OFF_RET, /* hwa_mem */ | ||
| 43 | [1] = PWRSTS_OFF_RET, /* sl2_mem */ | ||
| 44 | [2] = PWRSTS_OFF_RET, /* tcm1_mem */ | ||
| 45 | [3] = PWRSTS_OFF_RET, /* tcm2_mem */ | ||
| 46 | }, | ||
| 47 | .pwrsts_mem_on = { | 40 | .pwrsts_mem_on = { |
| 48 | [0] = PWRSTS_ON, /* hwa_mem */ | 41 | [0] = PWRSTS_ON, /* hwa_mem */ |
| 49 | [1] = PWRSTS_ON, /* sl2_mem */ | 42 | [1] = PWRSTS_ON, /* sl2_mem */ |
| @@ -76,12 +69,7 @@ static struct powerdomain ipu_7xx_pwrdm = { | |||
| 76 | .prcm_offs = DRA7XX_PRM_IPU_INST, | 69 | .prcm_offs = DRA7XX_PRM_IPU_INST, |
| 77 | .prcm_partition = DRA7XX_PRM_PARTITION, | 70 | .prcm_partition = DRA7XX_PRM_PARTITION, |
| 78 | .pwrsts = PWRSTS_OFF_ON, | 71 | .pwrsts = PWRSTS_OFF_ON, |
| 79 | .pwrsts_logic_ret = PWRSTS_OFF, | ||
| 80 | .banks = 2, | 72 | .banks = 2, |
| 81 | .pwrsts_mem_ret = { | ||
| 82 | [0] = PWRSTS_OFF_RET, /* aessmem */ | ||
| 83 | [1] = PWRSTS_OFF_RET, /* periphmem */ | ||
| 84 | }, | ||
| 85 | .pwrsts_mem_on = { | 73 | .pwrsts_mem_on = { |
| 86 | [0] = PWRSTS_ON, /* aessmem */ | 74 | [0] = PWRSTS_ON, /* aessmem */ |
| 87 | [1] = PWRSTS_ON, /* periphmem */ | 75 | [1] = PWRSTS_ON, /* periphmem */ |
| @@ -95,11 +83,7 @@ static struct powerdomain dss_7xx_pwrdm = { | |||
| 95 | .prcm_offs = DRA7XX_PRM_DSS_INST, | 83 | .prcm_offs = DRA7XX_PRM_DSS_INST, |
| 96 | .prcm_partition = DRA7XX_PRM_PARTITION, | 84 | .prcm_partition = DRA7XX_PRM_PARTITION, |
| 97 | .pwrsts = PWRSTS_OFF_ON, | 85 | .pwrsts = PWRSTS_OFF_ON, |
| 98 | .pwrsts_logic_ret = PWRSTS_OFF, | ||
| 99 | .banks = 1, | 86 | .banks = 1, |
| 100 | .pwrsts_mem_ret = { | ||
| 101 | [0] = PWRSTS_OFF_RET, /* dss_mem */ | ||
| 102 | }, | ||
| 103 | .pwrsts_mem_on = { | 87 | .pwrsts_mem_on = { |
| 104 | [0] = PWRSTS_ON, /* dss_mem */ | 88 | [0] = PWRSTS_ON, /* dss_mem */ |
| 105 | }, | 89 | }, |
| @@ -111,13 +95,8 @@ static struct powerdomain l4per_7xx_pwrdm = { | |||
| 111 | .name = "l4per_pwrdm", | 95 | .name = "l4per_pwrdm", |
| 112 | .prcm_offs = DRA7XX_PRM_L4PER_INST, | 96 | .prcm_offs = DRA7XX_PRM_L4PER_INST, |
| 113 | .prcm_partition = DRA7XX_PRM_PARTITION, | 97 | .prcm_partition = DRA7XX_PRM_PARTITION, |
| 114 | .pwrsts = PWRSTS_RET_ON, | 98 | .pwrsts = PWRSTS_ON, |
| 115 | .pwrsts_logic_ret = PWRSTS_RET, | ||
| 116 | .banks = 2, | 99 | .banks = 2, |
| 117 | .pwrsts_mem_ret = { | ||
| 118 | [0] = PWRSTS_OFF_RET, /* nonretained_bank */ | ||
| 119 | [1] = PWRSTS_OFF_RET, /* retained_bank */ | ||
| 120 | }, | ||
| 121 | .pwrsts_mem_on = { | 100 | .pwrsts_mem_on = { |
| 122 | [0] = PWRSTS_ON, /* nonretained_bank */ | 101 | [0] = PWRSTS_ON, /* nonretained_bank */ |
| 123 | [1] = PWRSTS_ON, /* retained_bank */ | 102 | [1] = PWRSTS_ON, /* retained_bank */ |
| @@ -132,9 +111,6 @@ static struct powerdomain gpu_7xx_pwrdm = { | |||
| 132 | .prcm_partition = DRA7XX_PRM_PARTITION, | 111 | .prcm_partition = DRA7XX_PRM_PARTITION, |
| 133 | .pwrsts = PWRSTS_OFF_ON, | 112 | .pwrsts = PWRSTS_OFF_ON, |
| 134 | .banks = 1, | 113 | .banks = 1, |
| 135 | .pwrsts_mem_ret = { | ||
| 136 | [0] = PWRSTS_OFF_RET, /* gpu_mem */ | ||
| 137 | }, | ||
| 138 | .pwrsts_mem_on = { | 114 | .pwrsts_mem_on = { |
| 139 | [0] = PWRSTS_ON, /* gpu_mem */ | 115 | [0] = PWRSTS_ON, /* gpu_mem */ |
| 140 | }, | 116 | }, |
| @@ -148,8 +124,6 @@ static struct powerdomain wkupaon_7xx_pwrdm = { | |||
| 148 | .prcm_partition = DRA7XX_PRM_PARTITION, | 124 | .prcm_partition = DRA7XX_PRM_PARTITION, |
| 149 | .pwrsts = PWRSTS_ON, | 125 | .pwrsts = PWRSTS_ON, |
| 150 | .banks = 1, | 126 | .banks = 1, |
| 151 | .pwrsts_mem_ret = { | ||
| 152 | }, | ||
| 153 | .pwrsts_mem_on = { | 127 | .pwrsts_mem_on = { |
| 154 | [0] = PWRSTS_ON, /* wkup_bank */ | 128 | [0] = PWRSTS_ON, /* wkup_bank */ |
| 155 | }, | 129 | }, |
| @@ -161,15 +135,7 @@ static struct powerdomain core_7xx_pwrdm = { | |||
| 161 | .prcm_offs = DRA7XX_PRM_CORE_INST, | 135 | .prcm_offs = DRA7XX_PRM_CORE_INST, |
| 162 | .prcm_partition = DRA7XX_PRM_PARTITION, | 136 | .prcm_partition = DRA7XX_PRM_PARTITION, |
| 163 | .pwrsts = PWRSTS_ON, | 137 | .pwrsts = PWRSTS_ON, |
| 164 | .pwrsts_logic_ret = PWRSTS_RET, | ||
| 165 | .banks = 5, | 138 | .banks = 5, |
| 166 | .pwrsts_mem_ret = { | ||
| 167 | [0] = PWRSTS_OFF_RET, /* core_nret_bank */ | ||
| 168 | [1] = PWRSTS_OFF_RET, /* core_ocmram */ | ||
| 169 | [2] = PWRSTS_OFF_RET, /* core_other_bank */ | ||
| 170 | [3] = PWRSTS_OFF_RET, /* ipu_l2ram */ | ||
| 171 | [4] = PWRSTS_OFF_RET, /* ipu_unicache */ | ||
| 172 | }, | ||
| 173 | .pwrsts_mem_on = { | 139 | .pwrsts_mem_on = { |
| 174 | [0] = PWRSTS_ON, /* core_nret_bank */ | 140 | [0] = PWRSTS_ON, /* core_nret_bank */ |
| 175 | [1] = PWRSTS_ON, /* core_ocmram */ | 141 | [1] = PWRSTS_ON, /* core_ocmram */ |
| @@ -226,11 +192,7 @@ static struct powerdomain vpe_7xx_pwrdm = { | |||
| 226 | .prcm_offs = DRA7XX_PRM_VPE_INST, | 192 | .prcm_offs = DRA7XX_PRM_VPE_INST, |
| 227 | .prcm_partition = DRA7XX_PRM_PARTITION, | 193 | .prcm_partition = DRA7XX_PRM_PARTITION, |
| 228 | .pwrsts = PWRSTS_OFF_ON, | 194 | .pwrsts = PWRSTS_OFF_ON, |
| 229 | .pwrsts_logic_ret = PWRSTS_OFF, | ||
| 230 | .banks = 1, | 195 | .banks = 1, |
| 231 | .pwrsts_mem_ret = { | ||
| 232 | [0] = PWRSTS_OFF_RET, /* vpe_bank */ | ||
| 233 | }, | ||
| 234 | .pwrsts_mem_on = { | 196 | .pwrsts_mem_on = { |
| 235 | [0] = PWRSTS_ON, /* vpe_bank */ | 197 | [0] = PWRSTS_ON, /* vpe_bank */ |
| 236 | }, | 198 | }, |
| @@ -260,14 +222,8 @@ static struct powerdomain l3init_7xx_pwrdm = { | |||
| 260 | .name = "l3init_pwrdm", | 222 | .name = "l3init_pwrdm", |
| 261 | .prcm_offs = DRA7XX_PRM_L3INIT_INST, | 223 | .prcm_offs = DRA7XX_PRM_L3INIT_INST, |
| 262 | .prcm_partition = DRA7XX_PRM_PARTITION, | 224 | .prcm_partition = DRA7XX_PRM_PARTITION, |
| 263 | .pwrsts = PWRSTS_RET_ON, | 225 | .pwrsts = PWRSTS_ON, |
| 264 | .pwrsts_logic_ret = PWRSTS_RET, | ||
| 265 | .banks = 3, | 226 | .banks = 3, |
| 266 | .pwrsts_mem_ret = { | ||
| 267 | [0] = PWRSTS_OFF_RET, /* gmac_bank */ | ||
| 268 | [1] = PWRSTS_OFF_RET, /* l3init_bank1 */ | ||
| 269 | [2] = PWRSTS_OFF_RET, /* l3init_bank2 */ | ||
| 270 | }, | ||
| 271 | .pwrsts_mem_on = { | 227 | .pwrsts_mem_on = { |
| 272 | [0] = PWRSTS_ON, /* gmac_bank */ | 228 | [0] = PWRSTS_ON, /* gmac_bank */ |
| 273 | [1] = PWRSTS_ON, /* l3init_bank1 */ | 229 | [1] = PWRSTS_ON, /* l3init_bank1 */ |
| @@ -283,9 +239,6 @@ static struct powerdomain eve3_7xx_pwrdm = { | |||
| 283 | .prcm_partition = DRA7XX_PRM_PARTITION, | 239 | .prcm_partition = DRA7XX_PRM_PARTITION, |
| 284 | .pwrsts = PWRSTS_OFF_ON, | 240 | .pwrsts = PWRSTS_OFF_ON, |
| 285 | .banks = 1, | 241 | .banks = 1, |
| 286 | .pwrsts_mem_ret = { | ||
| 287 | [0] = PWRSTS_OFF_RET, /* eve3_bank */ | ||
| 288 | }, | ||
| 289 | .pwrsts_mem_on = { | 242 | .pwrsts_mem_on = { |
| 290 | [0] = PWRSTS_ON, /* eve3_bank */ | 243 | [0] = PWRSTS_ON, /* eve3_bank */ |
| 291 | }, | 244 | }, |
| @@ -299,9 +252,6 @@ static struct powerdomain emu_7xx_pwrdm = { | |||
| 299 | .prcm_partition = DRA7XX_PRM_PARTITION, | 252 | .prcm_partition = DRA7XX_PRM_PARTITION, |
| 300 | .pwrsts = PWRSTS_OFF_ON, | 253 | .pwrsts = PWRSTS_OFF_ON, |
| 301 | .banks = 1, | 254 | .banks = 1, |
| 302 | .pwrsts_mem_ret = { | ||
| 303 | [0] = PWRSTS_OFF_RET, /* emu_bank */ | ||
| 304 | }, | ||
| 305 | .pwrsts_mem_on = { | 255 | .pwrsts_mem_on = { |
| 306 | [0] = PWRSTS_ON, /* emu_bank */ | 256 | [0] = PWRSTS_ON, /* emu_bank */ |
| 307 | }, | 257 | }, |
| @@ -314,11 +264,6 @@ static struct powerdomain dsp2_7xx_pwrdm = { | |||
| 314 | .prcm_partition = DRA7XX_PRM_PARTITION, | 264 | .prcm_partition = DRA7XX_PRM_PARTITION, |
| 315 | .pwrsts = PWRSTS_OFF_ON, | 265 | .pwrsts = PWRSTS_OFF_ON, |
| 316 | .banks = 3, | 266 | .banks = 3, |
| 317 | .pwrsts_mem_ret = { | ||
| 318 | [0] = PWRSTS_OFF_RET, /* dsp2_edma */ | ||
| 319 | [1] = PWRSTS_OFF_RET, /* dsp2_l1 */ | ||
| 320 | [2] = PWRSTS_OFF_RET, /* dsp2_l2 */ | ||
| 321 | }, | ||
| 322 | .pwrsts_mem_on = { | 267 | .pwrsts_mem_on = { |
| 323 | [0] = PWRSTS_ON, /* dsp2_edma */ | 268 | [0] = PWRSTS_ON, /* dsp2_edma */ |
| 324 | [1] = PWRSTS_ON, /* dsp2_l1 */ | 269 | [1] = PWRSTS_ON, /* dsp2_l1 */ |
| @@ -334,11 +279,6 @@ static struct powerdomain dsp1_7xx_pwrdm = { | |||
| 334 | .prcm_partition = DRA7XX_PRM_PARTITION, | 279 | .prcm_partition = DRA7XX_PRM_PARTITION, |
| 335 | .pwrsts = PWRSTS_OFF_ON, | 280 | .pwrsts = PWRSTS_OFF_ON, |
| 336 | .banks = 3, | 281 | .banks = 3, |
| 337 | .pwrsts_mem_ret = { | ||
| 338 | [0] = PWRSTS_OFF_RET, /* dsp1_edma */ | ||
| 339 | [1] = PWRSTS_OFF_RET, /* dsp1_l1 */ | ||
| 340 | [2] = PWRSTS_OFF_RET, /* dsp1_l2 */ | ||
| 341 | }, | ||
| 342 | .pwrsts_mem_on = { | 282 | .pwrsts_mem_on = { |
| 343 | [0] = PWRSTS_ON, /* dsp1_edma */ | 283 | [0] = PWRSTS_ON, /* dsp1_edma */ |
| 344 | [1] = PWRSTS_ON, /* dsp1_l1 */ | 284 | [1] = PWRSTS_ON, /* dsp1_l1 */ |
| @@ -354,9 +294,6 @@ static struct powerdomain cam_7xx_pwrdm = { | |||
| 354 | .prcm_partition = DRA7XX_PRM_PARTITION, | 294 | .prcm_partition = DRA7XX_PRM_PARTITION, |
| 355 | .pwrsts = PWRSTS_OFF_ON, | 295 | .pwrsts = PWRSTS_OFF_ON, |
| 356 | .banks = 1, | 296 | .banks = 1, |
| 357 | .pwrsts_mem_ret = { | ||
| 358 | [0] = PWRSTS_OFF_RET, /* vip_bank */ | ||
| 359 | }, | ||
| 360 | .pwrsts_mem_on = { | 297 | .pwrsts_mem_on = { |
| 361 | [0] = PWRSTS_ON, /* vip_bank */ | 298 | [0] = PWRSTS_ON, /* vip_bank */ |
| 362 | }, | 299 | }, |
| @@ -370,9 +307,6 @@ static struct powerdomain eve4_7xx_pwrdm = { | |||
| 370 | .prcm_partition = DRA7XX_PRM_PARTITION, | 307 | .prcm_partition = DRA7XX_PRM_PARTITION, |
| 371 | .pwrsts = PWRSTS_OFF_ON, | 308 | .pwrsts = PWRSTS_OFF_ON, |
| 372 | .banks = 1, | 309 | .banks = 1, |
| 373 | .pwrsts_mem_ret = { | ||
| 374 | [0] = PWRSTS_OFF_RET, /* eve4_bank */ | ||
| 375 | }, | ||
| 376 | .pwrsts_mem_on = { | 310 | .pwrsts_mem_on = { |
| 377 | [0] = PWRSTS_ON, /* eve4_bank */ | 311 | [0] = PWRSTS_ON, /* eve4_bank */ |
| 378 | }, | 312 | }, |
| @@ -386,9 +320,6 @@ static struct powerdomain eve2_7xx_pwrdm = { | |||
| 386 | .prcm_partition = DRA7XX_PRM_PARTITION, | 320 | .prcm_partition = DRA7XX_PRM_PARTITION, |
| 387 | .pwrsts = PWRSTS_OFF_ON, | 321 | .pwrsts = PWRSTS_OFF_ON, |
| 388 | .banks = 1, | 322 | .banks = 1, |
| 389 | .pwrsts_mem_ret = { | ||
| 390 | [0] = PWRSTS_OFF_RET, /* eve2_bank */ | ||
| 391 | }, | ||
| 392 | .pwrsts_mem_on = { | 323 | .pwrsts_mem_on = { |
| 393 | [0] = PWRSTS_ON, /* eve2_bank */ | 324 | [0] = PWRSTS_ON, /* eve2_bank */ |
| 394 | }, | 325 | }, |
| @@ -402,9 +333,6 @@ static struct powerdomain eve1_7xx_pwrdm = { | |||
| 402 | .prcm_partition = DRA7XX_PRM_PARTITION, | 333 | .prcm_partition = DRA7XX_PRM_PARTITION, |
| 403 | .pwrsts = PWRSTS_OFF_ON, | 334 | .pwrsts = PWRSTS_OFF_ON, |
| 404 | .banks = 1, | 335 | .banks = 1, |
| 405 | .pwrsts_mem_ret = { | ||
| 406 | [0] = PWRSTS_OFF_RET, /* eve1_bank */ | ||
| 407 | }, | ||
| 408 | .pwrsts_mem_on = { | 336 | .pwrsts_mem_on = { |
| 409 | [0] = PWRSTS_ON, /* eve1_bank */ | 337 | [0] = PWRSTS_ON, /* eve1_bank */ |
| 410 | }, | 338 | }, |
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c index 5b385bb8aff9..cb9497a20fb3 100644 --- a/arch/arm/mach-omap2/timer.c +++ b/arch/arm/mach-omap2/timer.c | |||
| @@ -496,8 +496,7 @@ void __init omap_init_time(void) | |||
| 496 | __omap_sync32k_timer_init(1, "timer_32k_ck", "ti,timer-alwon", | 496 | __omap_sync32k_timer_init(1, "timer_32k_ck", "ti,timer-alwon", |
| 497 | 2, "timer_sys_ck", NULL, false); | 497 | 2, "timer_sys_ck", NULL, false); |
| 498 | 498 | ||
| 499 | if (of_have_populated_dt()) | 499 | clocksource_probe(); |
| 500 | clocksource_probe(); | ||
| 501 | } | 500 | } |
| 502 | 501 | ||
| 503 | #if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_SOC_AM43XX) | 502 | #if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_SOC_AM43XX) |
| @@ -505,6 +504,8 @@ void __init omap3_secure_sync32k_timer_init(void) | |||
| 505 | { | 504 | { |
| 506 | __omap_sync32k_timer_init(12, "secure_32k_fck", "ti,timer-secure", | 505 | __omap_sync32k_timer_init(12, "secure_32k_fck", "ti,timer-secure", |
| 507 | 2, "timer_sys_ck", NULL, false); | 506 | 2, "timer_sys_ck", NULL, false); |
| 507 | |||
| 508 | clocksource_probe(); | ||
| 508 | } | 509 | } |
| 509 | #endif /* CONFIG_ARCH_OMAP3 */ | 510 | #endif /* CONFIG_ARCH_OMAP3 */ |
| 510 | 511 | ||
| @@ -513,6 +514,8 @@ void __init omap3_gptimer_timer_init(void) | |||
| 513 | { | 514 | { |
| 514 | __omap_sync32k_timer_init(2, "timer_sys_ck", NULL, | 515 | __omap_sync32k_timer_init(2, "timer_sys_ck", NULL, |
| 515 | 1, "timer_sys_ck", "ti,timer-alwon", true); | 516 | 1, "timer_sys_ck", "ti,timer-alwon", true); |
| 517 | |||
| 518 | clocksource_probe(); | ||
| 516 | } | 519 | } |
| 517 | #endif | 520 | #endif |
| 518 | 521 | ||
diff --git a/arch/arm/mach-vexpress/spc.c b/arch/arm/mach-vexpress/spc.c index 5766ce2be32b..8409cab3f760 100644 --- a/arch/arm/mach-vexpress/spc.c +++ b/arch/arm/mach-vexpress/spc.c | |||
| @@ -547,7 +547,7 @@ static struct clk *ve_spc_clk_register(struct device *cpu_dev) | |||
| 547 | 547 | ||
| 548 | init.name = dev_name(cpu_dev); | 548 | init.name = dev_name(cpu_dev); |
| 549 | init.ops = &clk_spc_ops; | 549 | init.ops = &clk_spc_ops; |
| 550 | init.flags = CLK_IS_ROOT | CLK_GET_RATE_NOCACHE; | 550 | init.flags = CLK_GET_RATE_NOCACHE; |
| 551 | init.num_parents = 0; | 551 | init.num_parents = 0; |
| 552 | 552 | ||
| 553 | return devm_clk_register(cpu_dev, &spc->hw); | 553 | return devm_clk_register(cpu_dev, &spc->hw); |
diff --git a/arch/arm/plat-samsung/devs.c b/arch/arm/plat-samsung/devs.c index 84baa16f4c0b..e93aa6734147 100644 --- a/arch/arm/plat-samsung/devs.c +++ b/arch/arm/plat-samsung/devs.c | |||
| @@ -68,7 +68,7 @@ | |||
| 68 | #include <linux/platform_data/asoc-s3c.h> | 68 | #include <linux/platform_data/asoc-s3c.h> |
| 69 | #include <linux/platform_data/spi-s3c64xx.h> | 69 | #include <linux/platform_data/spi-s3c64xx.h> |
| 70 | 70 | ||
| 71 | static u64 samsung_device_dma_mask = DMA_BIT_MASK(32); | 71 | #define samsung_device_dma_mask (*((u64[]) { DMA_BIT_MASK(32) })) |
| 72 | 72 | ||
| 73 | /* AC97 */ | 73 | /* AC97 */ |
| 74 | #ifdef CONFIG_CPU_S3C2440 | 74 | #ifdef CONFIG_CPU_S3C2440 |
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 76747d92bc72..5a0a691d4220 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig | |||
| @@ -113,6 +113,18 @@ config ARCH_PHYS_ADDR_T_64BIT | |||
| 113 | config MMU | 113 | config MMU |
| 114 | def_bool y | 114 | def_bool y |
| 115 | 115 | ||
| 116 | config ARM64_PAGE_SHIFT | ||
| 117 | int | ||
| 118 | default 16 if ARM64_64K_PAGES | ||
| 119 | default 14 if ARM64_16K_PAGES | ||
| 120 | default 12 | ||
| 121 | |||
| 122 | config ARM64_CONT_SHIFT | ||
| 123 | int | ||
| 124 | default 5 if ARM64_64K_PAGES | ||
| 125 | default 7 if ARM64_16K_PAGES | ||
| 126 | default 4 | ||
| 127 | |||
| 116 | config ARCH_MMAP_RND_BITS_MIN | 128 | config ARCH_MMAP_RND_BITS_MIN |
| 117 | default 14 if ARM64_64K_PAGES | 129 | default 14 if ARM64_64K_PAGES |
| 118 | default 16 if ARM64_16K_PAGES | 130 | default 16 if ARM64_16K_PAGES |
| @@ -426,6 +438,15 @@ config CAVIUM_ERRATUM_22375 | |||
| 426 | 438 | ||
| 427 | If unsure, say Y. | 439 | If unsure, say Y. |
| 428 | 440 | ||
| 441 | config CAVIUM_ERRATUM_23144 | ||
| 442 | bool "Cavium erratum 23144: ITS SYNC hang on dual socket system" | ||
| 443 | depends on NUMA | ||
| 444 | default y | ||
| 445 | help | ||
| 446 | ITS SYNC command hang for cross node io and collections/cpu mapping. | ||
| 447 | |||
| 448 | If unsure, say Y. | ||
| 449 | |||
| 429 | config CAVIUM_ERRATUM_23154 | 450 | config CAVIUM_ERRATUM_23154 |
| 430 | bool "Cavium erratum 23154: Access to ICC_IAR1_EL1 is not sync'ed" | 451 | bool "Cavium erratum 23154: Access to ICC_IAR1_EL1 is not sync'ed" |
| 431 | default y | 452 | default y |
diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug index 710fde4ad0f0..0cc758cdd0dc 100644 --- a/arch/arm64/Kconfig.debug +++ b/arch/arm64/Kconfig.debug | |||
| @@ -12,7 +12,8 @@ config ARM64_PTDUMP | |||
| 12 | who are working in architecture specific areas of the kernel. | 12 | who are working in architecture specific areas of the kernel. |
| 13 | It is probably not a good idea to enable this feature in a production | 13 | It is probably not a good idea to enable this feature in a production |
| 14 | kernel. | 14 | kernel. |
| 15 | If in doubt, say "N" | 15 | |
| 16 | If in doubt, say N. | ||
| 16 | 17 | ||
| 17 | config PID_IN_CONTEXTIDR | 18 | config PID_IN_CONTEXTIDR |
| 18 | bool "Write the current PID to the CONTEXTIDR register" | 19 | bool "Write the current PID to the CONTEXTIDR register" |
| @@ -38,15 +39,15 @@ config ARM64_RANDOMIZE_TEXT_OFFSET | |||
| 38 | value. | 39 | value. |
| 39 | 40 | ||
| 40 | config DEBUG_SET_MODULE_RONX | 41 | config DEBUG_SET_MODULE_RONX |
| 41 | bool "Set loadable kernel module data as NX and text as RO" | 42 | bool "Set loadable kernel module data as NX and text as RO" |
| 42 | depends on MODULES | 43 | depends on MODULES |
| 43 | help | 44 | default y |
| 44 | This option helps catch unintended modifications to loadable | 45 | help |
| 45 | kernel module's text and read-only data. It also prevents execution | 46 | Is this is set, kernel module text and rodata will be made read-only. |
| 46 | of module data. Such protection may interfere with run-time code | 47 | This is to help catch accidental or malicious attempts to change the |
| 47 | patching and dynamic kernel tracing - and they might also protect | 48 | kernel's executable code. |
| 48 | against certain classes of kernel exploits. | 49 | |
| 49 | If in doubt, say "N". | 50 | If in doubt, say Y. |
| 50 | 51 | ||
| 51 | config DEBUG_RODATA | 52 | config DEBUG_RODATA |
| 52 | bool "Make kernel text and rodata read-only" | 53 | bool "Make kernel text and rodata read-only" |
| @@ -56,7 +57,7 @@ config DEBUG_RODATA | |||
| 56 | is to help catch accidental or malicious attempts to change the | 57 | is to help catch accidental or malicious attempts to change the |
| 57 | kernel's executable code. | 58 | kernel's executable code. |
| 58 | 59 | ||
| 59 | If in doubt, say Y | 60 | If in doubt, say Y. |
| 60 | 61 | ||
| 61 | config DEBUG_ALIGN_RODATA | 62 | config DEBUG_ALIGN_RODATA |
| 62 | depends on DEBUG_RODATA | 63 | depends on DEBUG_RODATA |
| @@ -69,7 +70,7 @@ config DEBUG_ALIGN_RODATA | |||
| 69 | alignment and potentially wasted space. Turn on this option if | 70 | alignment and potentially wasted space. Turn on this option if |
| 70 | performance is more important than memory pressure. | 71 | performance is more important than memory pressure. |
| 71 | 72 | ||
| 72 | If in doubt, say N | 73 | If in doubt, say N. |
| 73 | 74 | ||
| 74 | source "drivers/hwtracing/coresight/Kconfig" | 75 | source "drivers/hwtracing/coresight/Kconfig" |
| 75 | 76 | ||
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 354d75402ace..648a32c89541 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile | |||
| @@ -60,7 +60,9 @@ head-y := arch/arm64/kernel/head.o | |||
| 60 | 60 | ||
| 61 | # The byte offset of the kernel image in RAM from the start of RAM. | 61 | # The byte offset of the kernel image in RAM from the start of RAM. |
| 62 | ifeq ($(CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET), y) | 62 | ifeq ($(CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET), y) |
| 63 | TEXT_OFFSET := $(shell awk 'BEGIN {srand(); printf "0x%03x000\n", int(512 * rand())}') | 63 | TEXT_OFFSET := $(shell awk "BEGIN {srand(); printf \"0x%06x\n\", \ |
| 64 | int(2 * 1024 * 1024 / (2 ^ $(CONFIG_ARM64_PAGE_SHIFT)) * \ | ||
| 65 | rand()) * (2 ^ $(CONFIG_ARM64_PAGE_SHIFT))}") | ||
| 64 | else | 66 | else |
| 65 | TEXT_OFFSET := 0x00080000 | 67 | TEXT_OFFSET := 0x00080000 |
| 66 | endif | 68 | endif |
| @@ -93,7 +95,7 @@ boot := arch/arm64/boot | |||
| 93 | Image: vmlinux | 95 | Image: vmlinux |
| 94 | $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ | 96 | $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ |
| 95 | 97 | ||
| 96 | Image.%: vmlinux | 98 | Image.%: Image |
| 97 | $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ | 99 | $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ |
| 98 | 100 | ||
| 99 | zinstall install: | 101 | zinstall install: |
diff --git a/arch/arm64/boot/dts/lg/lg1312.dtsi b/arch/arm64/boot/dts/lg/lg1312.dtsi index 3a4e9a2ab313..fbafa24cd533 100644 --- a/arch/arm64/boot/dts/lg/lg1312.dtsi +++ b/arch/arm64/boot/dts/lg/lg1312.dtsi | |||
| @@ -125,7 +125,7 @@ | |||
| 125 | #size-cells = <1>; | 125 | #size-cells = <1>; |
| 126 | #interrupts-cells = <3>; | 126 | #interrupts-cells = <3>; |
| 127 | 127 | ||
| 128 | compatible = "arm,amba-bus"; | 128 | compatible = "simple-bus"; |
| 129 | interrupt-parent = <&gic>; | 129 | interrupt-parent = <&gic>; |
| 130 | ranges; | 130 | ranges; |
| 131 | 131 | ||
diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi index 46f325a143b0..d7f8e06910bc 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi | |||
| @@ -163,7 +163,7 @@ | |||
| 163 | }; | 163 | }; |
| 164 | 164 | ||
| 165 | amba { | 165 | amba { |
| 166 | compatible = "arm,amba-bus"; | 166 | compatible = "simple-bus"; |
| 167 | #address-cells = <2>; | 167 | #address-cells = <2>; |
| 168 | #size-cells = <2>; | 168 | #size-cells = <2>; |
| 169 | ranges; | 169 | ranges; |
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h index 7a09c48c0475..579b6e654f2d 100644 --- a/arch/arm64/include/asm/elf.h +++ b/arch/arm64/include/asm/elf.h | |||
| @@ -160,14 +160,14 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm, | |||
| 160 | #define STACK_RND_MASK (0x3ffff >> (PAGE_SHIFT - 12)) | 160 | #define STACK_RND_MASK (0x3ffff >> (PAGE_SHIFT - 12)) |
| 161 | #endif | 161 | #endif |
| 162 | 162 | ||
| 163 | #ifdef CONFIG_COMPAT | ||
| 164 | |||
| 165 | #ifdef __AARCH64EB__ | 163 | #ifdef __AARCH64EB__ |
| 166 | #define COMPAT_ELF_PLATFORM ("v8b") | 164 | #define COMPAT_ELF_PLATFORM ("v8b") |
| 167 | #else | 165 | #else |
| 168 | #define COMPAT_ELF_PLATFORM ("v8l") | 166 | #define COMPAT_ELF_PLATFORM ("v8l") |
| 169 | #endif | 167 | #endif |
| 170 | 168 | ||
| 169 | #ifdef CONFIG_COMPAT | ||
| 170 | |||
| 171 | #define COMPAT_ELF_ET_DYN_BASE (2 * TASK_SIZE_32 / 3) | 171 | #define COMPAT_ELF_ET_DYN_BASE (2 * TASK_SIZE_32 / 3) |
| 172 | 172 | ||
| 173 | /* AArch32 registers. */ | 173 | /* AArch32 registers. */ |
diff --git a/arch/arm64/include/asm/kgdb.h b/arch/arm64/include/asm/kgdb.h index f69f69c8120c..da84645525b9 100644 --- a/arch/arm64/include/asm/kgdb.h +++ b/arch/arm64/include/asm/kgdb.h | |||
| @@ -38,25 +38,54 @@ extern int kgdb_fault_expected; | |||
| 38 | #endif /* !__ASSEMBLY__ */ | 38 | #endif /* !__ASSEMBLY__ */ |
| 39 | 39 | ||
| 40 | /* | 40 | /* |
| 41 | * gdb is expecting the following registers layout. | 41 | * gdb remote procotol (well most versions of it) expects the following |
| 42 | * register layout. | ||
| 42 | * | 43 | * |
| 43 | * General purpose regs: | 44 | * General purpose regs: |
| 44 | * r0-r30: 64 bit | 45 | * r0-r30: 64 bit |
| 45 | * sp,pc : 64 bit | 46 | * sp,pc : 64 bit |
| 46 | * pstate : 64 bit | 47 | * pstate : 32 bit |
| 47 | * Total: 34 | 48 | * Total: 33 + 1 |
| 48 | * FPU regs: | 49 | * FPU regs: |
| 49 | * f0-f31: 128 bit | 50 | * f0-f31: 128 bit |
| 50 | * Total: 32 | ||
| 51 | * Extra regs | ||
| 52 | * fpsr & fpcr: 32 bit | 51 | * fpsr & fpcr: 32 bit |
| 53 | * Total: 2 | 52 | * Total: 32 + 2 |
| 54 | * | 53 | * |
| 54 | * To expand a little on the "most versions of it"... when the gdb remote | ||
| 55 | * protocol for AArch64 was developed it depended on a statement in the | ||
| 56 | * Architecture Reference Manual that claimed "SPSR_ELx is a 32-bit register". | ||
| 57 | * and, as a result, allocated only 32-bits for the PSTATE in the remote | ||
| 58 | * protocol. In fact this statement is still present in ARM DDI 0487A.i. | ||
| 59 | * | ||
| 60 | * Unfortunately "is a 32-bit register" has a very special meaning for | ||
| 61 | * system registers. It means that "the upper bits, bits[63:32], are | ||
| 62 | * RES0.". RES0 is heavily used in the ARM architecture documents as a | ||
| 63 | * way to leave space for future architecture changes. So to translate a | ||
| 64 | * little for people who don't spend their spare time reading ARM architecture | ||
| 65 | * manuals, what "is a 32-bit register" actually means in this context is | ||
| 66 | * "is a 64-bit register but one with no meaning allocated to any of the | ||
| 67 | * upper 32-bits... *yet*". | ||
| 68 | * | ||
| 69 | * Perhaps then we should not be surprised that this has led to some | ||
| 70 | * confusion. Specifically a patch, influenced by the above translation, | ||
| 71 | * that extended PSTATE to 64-bit was accepted into gdb-7.7 but the patch | ||
| 72 | * was reverted in gdb-7.8.1 and all later releases, when this was | ||
| 73 | * discovered to be an undocumented protocol change. | ||
| 74 | * | ||
| 75 | * So... it is *not* wrong for us to only allocate 32-bits to PSTATE | ||
| 76 | * here even though the kernel itself allocates 64-bits for the same | ||
| 77 | * state. That is because this bit of code tells the kernel how the gdb | ||
| 78 | * remote protocol (well most versions of it) describes the register state. | ||
| 79 | * | ||
| 80 | * Note that if you are using one of the versions of gdb that supports | ||
| 81 | * the gdb-7.7 version of the protocol you cannot use kgdb directly | ||
| 82 | * without providing a custom register description (gdb can load new | ||
| 83 | * protocol descriptions at runtime). | ||
| 55 | */ | 84 | */ |
| 56 | 85 | ||
| 57 | #define _GP_REGS 34 | 86 | #define _GP_REGS 33 |
| 58 | #define _FP_REGS 32 | 87 | #define _FP_REGS 32 |
| 59 | #define _EXTRA_REGS 2 | 88 | #define _EXTRA_REGS 3 |
| 60 | /* | 89 | /* |
| 61 | * general purpose registers size in bytes. | 90 | * general purpose registers size in bytes. |
| 62 | * pstate is only 4 bytes. subtract 4 bytes | 91 | * pstate is only 4 bytes. subtract 4 bytes |
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 72a3025bb583..31b73227b41f 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h | |||
| @@ -55,8 +55,9 @@ | |||
| 55 | #define VMEMMAP_SIZE (UL(1) << (VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT)) | 55 | #define VMEMMAP_SIZE (UL(1) << (VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT)) |
| 56 | 56 | ||
| 57 | /* | 57 | /* |
| 58 | * PAGE_OFFSET - the virtual address of the start of the kernel image (top | 58 | * PAGE_OFFSET - the virtual address of the start of the linear map (top |
| 59 | * (VA_BITS - 1)) | 59 | * (VA_BITS - 1)) |
| 60 | * KIMAGE_VADDR - the virtual address of the start of the kernel image | ||
| 60 | * VA_BITS - the maximum number of bits for virtual addresses. | 61 | * VA_BITS - the maximum number of bits for virtual addresses. |
| 61 | * VA_START - the first kernel virtual address. | 62 | * VA_START - the first kernel virtual address. |
| 62 | * TASK_SIZE - the maximum size of a user space task. | 63 | * TASK_SIZE - the maximum size of a user space task. |
diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h index 17b45f7d96d3..8472c6def5ef 100644 --- a/arch/arm64/include/asm/page.h +++ b/arch/arm64/include/asm/page.h | |||
| @@ -23,16 +23,8 @@ | |||
| 23 | 23 | ||
| 24 | /* PAGE_SHIFT determines the page size */ | 24 | /* PAGE_SHIFT determines the page size */ |
| 25 | /* CONT_SHIFT determines the number of pages which can be tracked together */ | 25 | /* CONT_SHIFT determines the number of pages which can be tracked together */ |
| 26 | #ifdef CONFIG_ARM64_64K_PAGES | 26 | #define PAGE_SHIFT CONFIG_ARM64_PAGE_SHIFT |
| 27 | #define PAGE_SHIFT 16 | 27 | #define CONT_SHIFT CONFIG_ARM64_CONT_SHIFT |
| 28 | #define CONT_SHIFT 5 | ||
| 29 | #elif defined(CONFIG_ARM64_16K_PAGES) | ||
| 30 | #define PAGE_SHIFT 14 | ||
| 31 | #define CONT_SHIFT 7 | ||
| 32 | #else | ||
| 33 | #define PAGE_SHIFT 12 | ||
| 34 | #define CONT_SHIFT 4 | ||
| 35 | #endif | ||
| 36 | #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) | 28 | #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) |
| 37 | #define PAGE_MASK (~(PAGE_SIZE-1)) | 29 | #define PAGE_MASK (~(PAGE_SIZE-1)) |
| 38 | 30 | ||
diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h index ff98585d085a..d25f4f137c2a 100644 --- a/arch/arm64/include/asm/pgalloc.h +++ b/arch/arm64/include/asm/pgalloc.h | |||
| @@ -26,7 +26,7 @@ | |||
| 26 | 26 | ||
| 27 | #define check_pgt_cache() do { } while (0) | 27 | #define check_pgt_cache() do { } while (0) |
| 28 | 28 | ||
| 29 | #define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO) | 29 | #define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO) |
| 30 | #define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t)) | 30 | #define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t)) |
| 31 | 31 | ||
| 32 | #if CONFIG_PGTABLE_LEVELS > 2 | 32 | #if CONFIG_PGTABLE_LEVELS > 2 |
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h index 433e50405274..022644704a93 100644 --- a/arch/arm64/include/asm/smp.h +++ b/arch/arm64/include/asm/smp.h | |||
| @@ -124,6 +124,18 @@ static inline void cpu_panic_kernel(void) | |||
| 124 | cpu_park_loop(); | 124 | cpu_park_loop(); |
| 125 | } | 125 | } |
| 126 | 126 | ||
| 127 | /* | ||
| 128 | * If a secondary CPU enters the kernel but fails to come online, | ||
| 129 | * (e.g. due to mismatched features), and cannot exit the kernel, | ||
| 130 | * we increment cpus_stuck_in_kernel and leave the CPU in a | ||
| 131 | * quiesecent loop within the kernel text. The memory containing | ||
| 132 | * this loop must not be re-used for anything else as the 'stuck' | ||
| 133 | * core is executing it. | ||
| 134 | * | ||
| 135 | * This function is used to inhibit features like kexec and hibernate. | ||
| 136 | */ | ||
| 137 | bool cpus_are_stuck_in_kernel(void); | ||
| 138 | |||
| 127 | #endif /* ifndef __ASSEMBLY__ */ | 139 | #endif /* ifndef __ASSEMBLY__ */ |
| 128 | 140 | ||
| 129 | #endif /* ifndef __ASM_SMP_H */ | 141 | #endif /* ifndef __ASM_SMP_H */ |
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h index fc9682bfe002..e875a5a551d7 100644 --- a/arch/arm64/include/asm/spinlock.h +++ b/arch/arm64/include/asm/spinlock.h | |||
| @@ -30,22 +30,53 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) | |||
| 30 | { | 30 | { |
| 31 | unsigned int tmp; | 31 | unsigned int tmp; |
| 32 | arch_spinlock_t lockval; | 32 | arch_spinlock_t lockval; |
| 33 | u32 owner; | ||
| 34 | |||
| 35 | /* | ||
| 36 | * Ensure prior spin_lock operations to other locks have completed | ||
| 37 | * on this CPU before we test whether "lock" is locked. | ||
| 38 | */ | ||
| 39 | smp_mb(); | ||
| 40 | owner = READ_ONCE(lock->owner) << 16; | ||
| 33 | 41 | ||
| 34 | asm volatile( | 42 | asm volatile( |
| 35 | " sevl\n" | 43 | " sevl\n" |
| 36 | "1: wfe\n" | 44 | "1: wfe\n" |
| 37 | "2: ldaxr %w0, %2\n" | 45 | "2: ldaxr %w0, %2\n" |
| 46 | /* Is the lock free? */ | ||
| 38 | " eor %w1, %w0, %w0, ror #16\n" | 47 | " eor %w1, %w0, %w0, ror #16\n" |
| 39 | " cbnz %w1, 1b\n" | 48 | " cbz %w1, 3f\n" |
| 49 | /* Lock taken -- has there been a subsequent unlock->lock transition? */ | ||
| 50 | " eor %w1, %w3, %w0, lsl #16\n" | ||
| 51 | " cbz %w1, 1b\n" | ||
| 52 | /* | ||
| 53 | * The owner has been updated, so there was an unlock->lock | ||
| 54 | * transition that we missed. That means we can rely on the | ||
| 55 | * store-release of the unlock operation paired with the | ||
| 56 | * load-acquire of the lock operation to publish any of our | ||
| 57 | * previous stores to the new lock owner and therefore don't | ||
| 58 | * need to bother with the writeback below. | ||
| 59 | */ | ||
| 60 | " b 4f\n" | ||
| 61 | "3:\n" | ||
| 62 | /* | ||
| 63 | * Serialise against any concurrent lockers by writing back the | ||
| 64 | * unlocked lock value | ||
| 65 | */ | ||
| 40 | ARM64_LSE_ATOMIC_INSN( | 66 | ARM64_LSE_ATOMIC_INSN( |
| 41 | /* LL/SC */ | 67 | /* LL/SC */ |
| 42 | " stxr %w1, %w0, %2\n" | 68 | " stxr %w1, %w0, %2\n" |
| 43 | " cbnz %w1, 2b\n", /* Serialise against any concurrent lockers */ | ||
| 44 | /* LSE atomics */ | ||
| 45 | " nop\n" | 69 | " nop\n" |
| 46 | " nop\n") | 70 | " nop\n", |
| 71 | /* LSE atomics */ | ||
| 72 | " mov %w1, %w0\n" | ||
| 73 | " cas %w0, %w0, %2\n" | ||
| 74 | " eor %w1, %w1, %w0\n") | ||
| 75 | /* Somebody else wrote to the lock, GOTO 10 and reload the value */ | ||
| 76 | " cbnz %w1, 2b\n" | ||
| 77 | "4:" | ||
| 47 | : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock) | 78 | : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock) |
| 48 | : | 79 | : "r" (owner) |
| 49 | : "memory"); | 80 | : "memory"); |
| 50 | } | 81 | } |
| 51 | 82 | ||
| @@ -148,6 +179,7 @@ static inline int arch_spin_value_unlocked(arch_spinlock_t lock) | |||
| 148 | 179 | ||
| 149 | static inline int arch_spin_is_locked(arch_spinlock_t *lock) | 180 | static inline int arch_spin_is_locked(arch_spinlock_t *lock) |
| 150 | { | 181 | { |
| 182 | smp_mb(); /* See arch_spin_unlock_wait */ | ||
| 151 | return !arch_spin_value_unlocked(READ_ONCE(*lock)); | 183 | return !arch_spin_value_unlocked(READ_ONCE(*lock)); |
| 152 | } | 184 | } |
| 153 | 185 | ||
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 0685d74572af..9e397a542756 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h | |||
| @@ -81,19 +81,6 @@ static inline void set_fs(mm_segment_t fs) | |||
| 81 | #define segment_eq(a, b) ((a) == (b)) | 81 | #define segment_eq(a, b) ((a) == (b)) |
| 82 | 82 | ||
| 83 | /* | 83 | /* |
| 84 | * Return 1 if addr < current->addr_limit, 0 otherwise. | ||
| 85 | */ | ||
| 86 | #define __addr_ok(addr) \ | ||
| 87 | ({ \ | ||
| 88 | unsigned long flag; \ | ||
| 89 | asm("cmp %1, %0; cset %0, lo" \ | ||
| 90 | : "=&r" (flag) \ | ||
| 91 | : "r" (addr), "0" (current_thread_info()->addr_limit) \ | ||
| 92 | : "cc"); \ | ||
| 93 | flag; \ | ||
| 94 | }) | ||
| 95 | |||
| 96 | /* | ||
| 97 | * Test whether a block of memory is a valid user space address. | 84 | * Test whether a block of memory is a valid user space address. |
| 98 | * Returns 1 if the range is valid, 0 otherwise. | 85 | * Returns 1 if the range is valid, 0 otherwise. |
| 99 | * | 86 | * |
diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h index 41e58fe3c041..e78ac26324bd 100644 --- a/arch/arm64/include/asm/unistd.h +++ b/arch/arm64/include/asm/unistd.h | |||
| @@ -44,7 +44,7 @@ | |||
| 44 | #define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2) | 44 | #define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2) |
| 45 | #define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5) | 45 | #define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5) |
| 46 | 46 | ||
| 47 | #define __NR_compat_syscalls 390 | 47 | #define __NR_compat_syscalls 394 |
| 48 | #endif | 48 | #endif |
| 49 | 49 | ||
| 50 | #define __ARCH_WANT_SYS_CLONE | 50 | #define __ARCH_WANT_SYS_CLONE |
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h index 5b925b761a2a..b7e8ef16ff0d 100644 --- a/arch/arm64/include/asm/unistd32.h +++ b/arch/arm64/include/asm/unistd32.h | |||
| @@ -801,6 +801,14 @@ __SYSCALL(__NR_execveat, compat_sys_execveat) | |||
| 801 | __SYSCALL(__NR_userfaultfd, sys_userfaultfd) | 801 | __SYSCALL(__NR_userfaultfd, sys_userfaultfd) |
| 802 | #define __NR_membarrier 389 | 802 | #define __NR_membarrier 389 |
| 803 | __SYSCALL(__NR_membarrier, sys_membarrier) | 803 | __SYSCALL(__NR_membarrier, sys_membarrier) |
| 804 | #define __NR_mlock2 390 | ||
| 805 | __SYSCALL(__NR_mlock2, sys_mlock2) | ||
| 806 | #define __NR_copy_file_range 391 | ||
| 807 | __SYSCALL(__NR_copy_file_range, sys_copy_file_range) | ||
| 808 | #define __NR_preadv2 392 | ||
| 809 | __SYSCALL(__NR_preadv2, compat_sys_preadv2) | ||
| 810 | #define __NR_pwritev2 393 | ||
| 811 | __SYSCALL(__NR_pwritev2, compat_sys_pwritev2) | ||
| 804 | 812 | ||
| 805 | /* | 813 | /* |
| 806 | * Please add new compat syscalls above this comment and update | 814 | * Please add new compat syscalls above this comment and update |
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 3808470486f3..c173d329397f 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c | |||
| @@ -22,6 +22,8 @@ | |||
| 22 | 22 | ||
| 23 | #include <linux/bitops.h> | 23 | #include <linux/bitops.h> |
| 24 | #include <linux/bug.h> | 24 | #include <linux/bug.h> |
| 25 | #include <linux/compat.h> | ||
| 26 | #include <linux/elf.h> | ||
| 25 | #include <linux/init.h> | 27 | #include <linux/init.h> |
| 26 | #include <linux/kernel.h> | 28 | #include <linux/kernel.h> |
| 27 | #include <linux/personality.h> | 29 | #include <linux/personality.h> |
| @@ -104,6 +106,7 @@ static const char *const compat_hwcap2_str[] = { | |||
| 104 | static int c_show(struct seq_file *m, void *v) | 106 | static int c_show(struct seq_file *m, void *v) |
| 105 | { | 107 | { |
| 106 | int i, j; | 108 | int i, j; |
| 109 | bool compat = personality(current->personality) == PER_LINUX32; | ||
| 107 | 110 | ||
| 108 | for_each_online_cpu(i) { | 111 | for_each_online_cpu(i) { |
| 109 | struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i); | 112 | struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i); |
| @@ -115,6 +118,9 @@ static int c_show(struct seq_file *m, void *v) | |||
| 115 | * "processor". Give glibc what it expects. | 118 | * "processor". Give glibc what it expects. |
| 116 | */ | 119 | */ |
| 117 | seq_printf(m, "processor\t: %d\n", i); | 120 | seq_printf(m, "processor\t: %d\n", i); |
| 121 | if (compat) | ||
| 122 | seq_printf(m, "model name\t: ARMv8 Processor rev %d (%s)\n", | ||
| 123 | MIDR_REVISION(midr), COMPAT_ELF_PLATFORM); | ||
| 118 | 124 | ||
| 119 | seq_printf(m, "BogoMIPS\t: %lu.%02lu\n", | 125 | seq_printf(m, "BogoMIPS\t: %lu.%02lu\n", |
| 120 | loops_per_jiffy / (500000UL/HZ), | 126 | loops_per_jiffy / (500000UL/HZ), |
| @@ -127,7 +133,7 @@ static int c_show(struct seq_file *m, void *v) | |||
| 127 | * software which does already (at least for 32-bit). | 133 | * software which does already (at least for 32-bit). |
| 128 | */ | 134 | */ |
| 129 | seq_puts(m, "Features\t:"); | 135 | seq_puts(m, "Features\t:"); |
| 130 | if (personality(current->personality) == PER_LINUX32) { | 136 | if (compat) { |
| 131 | #ifdef CONFIG_COMPAT | 137 | #ifdef CONFIG_COMPAT |
| 132 | for (j = 0; compat_hwcap_str[j]; j++) | 138 | for (j = 0; compat_hwcap_str[j]; j++) |
| 133 | if (compat_elf_hwcap & (1 << j)) | 139 | if (compat_elf_hwcap & (1 << j)) |
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c index f8df75d740f4..21ab5df9fa76 100644 --- a/arch/arm64/kernel/hibernate.c +++ b/arch/arm64/kernel/hibernate.c | |||
| @@ -33,6 +33,7 @@ | |||
| 33 | #include <asm/pgtable.h> | 33 | #include <asm/pgtable.h> |
| 34 | #include <asm/pgtable-hwdef.h> | 34 | #include <asm/pgtable-hwdef.h> |
| 35 | #include <asm/sections.h> | 35 | #include <asm/sections.h> |
| 36 | #include <asm/smp.h> | ||
| 36 | #include <asm/suspend.h> | 37 | #include <asm/suspend.h> |
| 37 | #include <asm/virt.h> | 38 | #include <asm/virt.h> |
| 38 | 39 | ||
| @@ -236,6 +237,11 @@ int swsusp_arch_suspend(void) | |||
| 236 | unsigned long flags; | 237 | unsigned long flags; |
| 237 | struct sleep_stack_data state; | 238 | struct sleep_stack_data state; |
| 238 | 239 | ||
| 240 | if (cpus_are_stuck_in_kernel()) { | ||
| 241 | pr_err("Can't hibernate: no mechanism to offline secondary CPUs.\n"); | ||
| 242 | return -EBUSY; | ||
| 243 | } | ||
| 244 | |||
| 239 | local_dbg_save(flags); | 245 | local_dbg_save(flags); |
| 240 | 246 | ||
| 241 | if (__cpu_suspend_enter(&state)) { | 247 | if (__cpu_suspend_enter(&state)) { |
diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c index b67531a13136..b5f063e5eff7 100644 --- a/arch/arm64/kernel/kgdb.c +++ b/arch/arm64/kernel/kgdb.c | |||
| @@ -58,7 +58,17 @@ struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = { | |||
| 58 | { "x30", 8, offsetof(struct pt_regs, regs[30])}, | 58 | { "x30", 8, offsetof(struct pt_regs, regs[30])}, |
| 59 | { "sp", 8, offsetof(struct pt_regs, sp)}, | 59 | { "sp", 8, offsetof(struct pt_regs, sp)}, |
| 60 | { "pc", 8, offsetof(struct pt_regs, pc)}, | 60 | { "pc", 8, offsetof(struct pt_regs, pc)}, |
| 61 | { "pstate", 8, offsetof(struct pt_regs, pstate)}, | 61 | /* |
| 62 | * struct pt_regs thinks PSTATE is 64-bits wide but gdb remote | ||
| 63 | * protocol disagrees. Therefore we must extract only the lower | ||
| 64 | * 32-bits. Look for the big comment in asm/kgdb.h for more | ||
| 65 | * detail. | ||
| 66 | */ | ||
| 67 | { "pstate", 4, offsetof(struct pt_regs, pstate) | ||
| 68 | #ifdef CONFIG_CPU_BIG_ENDIAN | ||
| 69 | + 4 | ||
| 70 | #endif | ||
| 71 | }, | ||
| 62 | { "v0", 16, -1 }, | 72 | { "v0", 16, -1 }, |
| 63 | { "v1", 16, -1 }, | 73 | { "v1", 16, -1 }, |
| 64 | { "v2", 16, -1 }, | 74 | { "v2", 16, -1 }, |
| @@ -128,6 +138,8 @@ sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task) | |||
| 128 | memset((char *)gdb_regs, 0, NUMREGBYTES); | 138 | memset((char *)gdb_regs, 0, NUMREGBYTES); |
| 129 | thread_regs = task_pt_regs(task); | 139 | thread_regs = task_pt_regs(task); |
| 130 | memcpy((void *)gdb_regs, (void *)thread_regs->regs, GP_REG_BYTES); | 140 | memcpy((void *)gdb_regs, (void *)thread_regs->regs, GP_REG_BYTES); |
| 141 | /* Special case for PSTATE (check comments in asm/kgdb.h for details) */ | ||
| 142 | dbg_get_reg(33, gdb_regs + GP_REG_BYTES, thread_regs); | ||
| 131 | } | 143 | } |
| 132 | 144 | ||
| 133 | void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc) | 145 | void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc) |
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 678e0842cb3b..62ff3c0622e2 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c | |||
| @@ -909,3 +909,21 @@ int setup_profiling_timer(unsigned int multiplier) | |||
| 909 | { | 909 | { |
| 910 | return -EINVAL; | 910 | return -EINVAL; |
| 911 | } | 911 | } |
| 912 | |||
| 913 | static bool have_cpu_die(void) | ||
| 914 | { | ||
| 915 | #ifdef CONFIG_HOTPLUG_CPU | ||
| 916 | int any_cpu = raw_smp_processor_id(); | ||
| 917 | |||
| 918 | if (cpu_ops[any_cpu]->cpu_die) | ||
| 919 | return true; | ||
| 920 | #endif | ||
| 921 | return false; | ||
| 922 | } | ||
| 923 | |||
| 924 | bool cpus_are_stuck_in_kernel(void) | ||
| 925 | { | ||
| 926 | bool smp_spin_tables = (num_possible_cpus() > 1 && !have_cpu_die()); | ||
| 927 | |||
| 928 | return !!cpus_stuck_in_kernel || smp_spin_tables; | ||
| 929 | } | ||
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index c5392081b49b..2a43012616b7 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c | |||
| @@ -64,8 +64,7 @@ static void dump_mem(const char *lvl, const char *str, unsigned long bottom, | |||
| 64 | 64 | ||
| 65 | /* | 65 | /* |
| 66 | * We need to switch to kernel mode so that we can use __get_user | 66 | * We need to switch to kernel mode so that we can use __get_user |
| 67 | * to safely read from kernel space. Note that we now dump the | 67 | * to safely read from kernel space. |
| 68 | * code first, just in case the backtrace kills us. | ||
| 69 | */ | 68 | */ |
| 70 | fs = get_fs(); | 69 | fs = get_fs(); |
| 71 | set_fs(KERNEL_DS); | 70 | set_fs(KERNEL_DS); |
| @@ -111,21 +110,12 @@ static void dump_backtrace_entry(unsigned long where) | |||
| 111 | print_ip_sym(where); | 110 | print_ip_sym(where); |
| 112 | } | 111 | } |
| 113 | 112 | ||
| 114 | static void dump_instr(const char *lvl, struct pt_regs *regs) | 113 | static void __dump_instr(const char *lvl, struct pt_regs *regs) |
| 115 | { | 114 | { |
| 116 | unsigned long addr = instruction_pointer(regs); | 115 | unsigned long addr = instruction_pointer(regs); |
| 117 | mm_segment_t fs; | ||
| 118 | char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str; | 116 | char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str; |
| 119 | int i; | 117 | int i; |
| 120 | 118 | ||
| 121 | /* | ||
| 122 | * We need to switch to kernel mode so that we can use __get_user | ||
| 123 | * to safely read from kernel space. Note that we now dump the | ||
| 124 | * code first, just in case the backtrace kills us. | ||
| 125 | */ | ||
| 126 | fs = get_fs(); | ||
| 127 | set_fs(KERNEL_DS); | ||
| 128 | |||
| 129 | for (i = -4; i < 1; i++) { | 119 | for (i = -4; i < 1; i++) { |
| 130 | unsigned int val, bad; | 120 | unsigned int val, bad; |
| 131 | 121 | ||
| @@ -139,8 +129,18 @@ static void dump_instr(const char *lvl, struct pt_regs *regs) | |||
| 139 | } | 129 | } |
| 140 | } | 130 | } |
| 141 | printk("%sCode: %s\n", lvl, str); | 131 | printk("%sCode: %s\n", lvl, str); |
| 132 | } | ||
| 142 | 133 | ||
| 143 | set_fs(fs); | 134 | static void dump_instr(const char *lvl, struct pt_regs *regs) |
| 135 | { | ||
| 136 | if (!user_mode(regs)) { | ||
| 137 | mm_segment_t fs = get_fs(); | ||
| 138 | set_fs(KERNEL_DS); | ||
| 139 | __dump_instr(lvl, regs); | ||
| 140 | set_fs(fs); | ||
| 141 | } else { | ||
| 142 | __dump_instr(lvl, regs); | ||
| 143 | } | ||
| 144 | } | 144 | } |
| 145 | 145 | ||
| 146 | static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) | 146 | static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) |
| @@ -477,8 +477,9 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr) | |||
| 477 | void __user *pc = (void __user *)instruction_pointer(regs); | 477 | void __user *pc = (void __user *)instruction_pointer(regs); |
| 478 | console_verbose(); | 478 | console_verbose(); |
| 479 | 479 | ||
| 480 | pr_crit("Bad mode in %s handler detected, code 0x%08x -- %s\n", | 480 | pr_crit("Bad mode in %s handler detected on CPU%d, code 0x%08x -- %s\n", |
| 481 | handler[reason], esr, esr_get_class_string(esr)); | 481 | handler[reason], smp_processor_id(), esr, |
| 482 | esr_get_class_string(esr)); | ||
| 482 | __show_regs(regs); | 483 | __show_regs(regs); |
| 483 | 484 | ||
| 484 | info.si_signo = SIGILL; | 485 | info.si_signo = SIGILL; |
diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c index fff7cd42b3a3..5f8f80b4a224 100644 --- a/arch/arm64/kvm/hyp/vgic-v3-sr.c +++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c | |||
| @@ -169,7 +169,8 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu) | |||
| 169 | * Make sure stores to the GIC via the memory mapped interface | 169 | * Make sure stores to the GIC via the memory mapped interface |
| 170 | * are now visible to the system register interface. | 170 | * are now visible to the system register interface. |
| 171 | */ | 171 | */ |
| 172 | dsb(st); | 172 | if (!cpu_if->vgic_sre) |
| 173 | dsb(st); | ||
| 173 | 174 | ||
| 174 | cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2); | 175 | cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2); |
| 175 | 176 | ||
| @@ -190,12 +191,11 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu) | |||
| 190 | if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i))) | 191 | if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i))) |
| 191 | continue; | 192 | continue; |
| 192 | 193 | ||
| 193 | if (cpu_if->vgic_elrsr & (1 << i)) { | 194 | if (cpu_if->vgic_elrsr & (1 << i)) |
| 194 | cpu_if->vgic_lr[i] &= ~ICH_LR_STATE; | 195 | cpu_if->vgic_lr[i] &= ~ICH_LR_STATE; |
| 195 | continue; | 196 | else |
| 196 | } | 197 | cpu_if->vgic_lr[i] = __gic_v3_get_lr(i); |
| 197 | 198 | ||
| 198 | cpu_if->vgic_lr[i] = __gic_v3_get_lr(i); | ||
| 199 | __gic_v3_set_lr(0, i); | 199 | __gic_v3_set_lr(0, i); |
| 200 | } | 200 | } |
| 201 | 201 | ||
| @@ -236,8 +236,12 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu) | |||
| 236 | 236 | ||
| 237 | val = read_gicreg(ICC_SRE_EL2); | 237 | val = read_gicreg(ICC_SRE_EL2); |
| 238 | write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2); | 238 | write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2); |
| 239 | isb(); /* Make sure ENABLE is set at EL2 before setting SRE at EL1 */ | 239 | |
| 240 | write_gicreg(1, ICC_SRE_EL1); | 240 | if (!cpu_if->vgic_sre) { |
| 241 | /* Make sure ENABLE is set at EL2 before setting SRE at EL1 */ | ||
| 242 | isb(); | ||
| 243 | write_gicreg(1, ICC_SRE_EL1); | ||
| 244 | } | ||
| 241 | } | 245 | } |
| 242 | 246 | ||
| 243 | void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu) | 247 | void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu) |
| @@ -256,8 +260,10 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu) | |||
| 256 | * been actually programmed with the value we want before | 260 | * been actually programmed with the value we want before |
| 257 | * starting to mess with the rest of the GIC. | 261 | * starting to mess with the rest of the GIC. |
| 258 | */ | 262 | */ |
| 259 | write_gicreg(cpu_if->vgic_sre, ICC_SRE_EL1); | 263 | if (!cpu_if->vgic_sre) { |
| 260 | isb(); | 264 | write_gicreg(0, ICC_SRE_EL1); |
| 265 | isb(); | ||
| 266 | } | ||
| 261 | 267 | ||
| 262 | val = read_gicreg(ICH_VTR_EL2); | 268 | val = read_gicreg(ICH_VTR_EL2); |
| 263 | max_lr_idx = vtr_to_max_lr_idx(val); | 269 | max_lr_idx = vtr_to_max_lr_idx(val); |
| @@ -306,18 +312,18 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu) | |||
| 306 | * (re)distributors. This ensure the guest will read the | 312 | * (re)distributors. This ensure the guest will read the |
| 307 | * correct values from the memory-mapped interface. | 313 | * correct values from the memory-mapped interface. |
| 308 | */ | 314 | */ |
| 309 | isb(); | 315 | if (!cpu_if->vgic_sre) { |
| 310 | dsb(sy); | 316 | isb(); |
| 317 | dsb(sy); | ||
| 318 | } | ||
| 311 | vcpu->arch.vgic_cpu.live_lrs = live_lrs; | 319 | vcpu->arch.vgic_cpu.live_lrs = live_lrs; |
| 312 | 320 | ||
| 313 | /* | 321 | /* |
| 314 | * Prevent the guest from touching the GIC system registers if | 322 | * Prevent the guest from touching the GIC system registers if |
| 315 | * SRE isn't enabled for GICv3 emulation. | 323 | * SRE isn't enabled for GICv3 emulation. |
| 316 | */ | 324 | */ |
| 317 | if (!cpu_if->vgic_sre) { | 325 | write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE, |
| 318 | write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE, | 326 | ICC_SRE_EL2); |
| 319 | ICC_SRE_EL2); | ||
| 320 | } | ||
| 321 | } | 327 | } |
| 322 | 328 | ||
| 323 | void __hyp_text __vgic_v3_init_lrs(void) | 329 | void __hyp_text __vgic_v3_init_lrs(void) |
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 7bbe3ff02602..a57d650f552c 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c | |||
| @@ -134,6 +134,17 @@ static bool access_gic_sgi(struct kvm_vcpu *vcpu, | |||
| 134 | return true; | 134 | return true; |
| 135 | } | 135 | } |
| 136 | 136 | ||
| 137 | static bool access_gic_sre(struct kvm_vcpu *vcpu, | ||
| 138 | struct sys_reg_params *p, | ||
| 139 | const struct sys_reg_desc *r) | ||
| 140 | { | ||
| 141 | if (p->is_write) | ||
| 142 | return ignore_write(vcpu, p); | ||
| 143 | |||
| 144 | p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre; | ||
| 145 | return true; | ||
| 146 | } | ||
| 147 | |||
| 137 | static bool trap_raz_wi(struct kvm_vcpu *vcpu, | 148 | static bool trap_raz_wi(struct kvm_vcpu *vcpu, |
| 138 | struct sys_reg_params *p, | 149 | struct sys_reg_params *p, |
| 139 | const struct sys_reg_desc *r) | 150 | const struct sys_reg_desc *r) |
| @@ -958,7 +969,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { | |||
| 958 | access_gic_sgi }, | 969 | access_gic_sgi }, |
| 959 | /* ICC_SRE_EL1 */ | 970 | /* ICC_SRE_EL1 */ |
| 960 | { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b101), | 971 | { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b101), |
| 961 | trap_raz_wi }, | 972 | access_gic_sre }, |
| 962 | 973 | ||
| 963 | /* CONTEXTIDR_EL1 */ | 974 | /* CONTEXTIDR_EL1 */ |
| 964 | { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001), | 975 | { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001), |
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c index b7b397802088..efcf1f7ef1e4 100644 --- a/arch/arm64/mm/context.c +++ b/arch/arm64/mm/context.c | |||
| @@ -179,7 +179,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu) | |||
| 179 | &asid_generation); | 179 | &asid_generation); |
| 180 | flush_context(cpu); | 180 | flush_context(cpu); |
| 181 | 181 | ||
| 182 | /* We have at least 1 ASID per CPU, so this will always succeed */ | 182 | /* We have more ASIDs than CPUs, so this will always succeed */ |
| 183 | asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1); | 183 | asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1); |
| 184 | 184 | ||
| 185 | set_asid: | 185 | set_asid: |
| @@ -227,8 +227,11 @@ switch_mm_fastpath: | |||
| 227 | static int asids_init(void) | 227 | static int asids_init(void) |
| 228 | { | 228 | { |
| 229 | asid_bits = get_cpu_asid_bits(); | 229 | asid_bits = get_cpu_asid_bits(); |
| 230 | /* If we end up with more CPUs than ASIDs, expect things to crash */ | 230 | /* |
| 231 | WARN_ON(NUM_USER_ASIDS < num_possible_cpus()); | 231 | * Expect allocation after rollover to fail if we don't have at least |
| 232 | * one more ASID than CPUs. ASID #0 is reserved for init_mm. | ||
| 233 | */ | ||
| 234 | WARN_ON(NUM_USER_ASIDS - 1 <= num_possible_cpus()); | ||
| 232 | atomic64_set(&asid_generation, ASID_FIRST_VERSION); | 235 | atomic64_set(&asid_generation, ASID_FIRST_VERSION); |
| 233 | asid_map = kzalloc(BITS_TO_LONGS(NUM_USER_ASIDS) * sizeof(*asid_map), | 236 | asid_map = kzalloc(BITS_TO_LONGS(NUM_USER_ASIDS) * sizeof(*asid_map), |
| 234 | GFP_KERNEL); | 237 | GFP_KERNEL); |
diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c index 8404190fe2bd..ccfde237d6e6 100644 --- a/arch/arm64/mm/dump.c +++ b/arch/arm64/mm/dump.c | |||
| @@ -150,6 +150,7 @@ static const struct prot_bits pte_bits[] = { | |||
| 150 | 150 | ||
| 151 | struct pg_level { | 151 | struct pg_level { |
| 152 | const struct prot_bits *bits; | 152 | const struct prot_bits *bits; |
| 153 | const char *name; | ||
| 153 | size_t num; | 154 | size_t num; |
| 154 | u64 mask; | 155 | u64 mask; |
| 155 | }; | 156 | }; |
| @@ -157,15 +158,19 @@ struct pg_level { | |||
| 157 | static struct pg_level pg_level[] = { | 158 | static struct pg_level pg_level[] = { |
| 158 | { | 159 | { |
| 159 | }, { /* pgd */ | 160 | }, { /* pgd */ |
| 161 | .name = "PGD", | ||
| 160 | .bits = pte_bits, | 162 | .bits = pte_bits, |
| 161 | .num = ARRAY_SIZE(pte_bits), | 163 | .num = ARRAY_SIZE(pte_bits), |
| 162 | }, { /* pud */ | 164 | }, { /* pud */ |
| 165 | .name = (CONFIG_PGTABLE_LEVELS > 3) ? "PUD" : "PGD", | ||
| 163 | .bits = pte_bits, | 166 | .bits = pte_bits, |
| 164 | .num = ARRAY_SIZE(pte_bits), | 167 | .num = ARRAY_SIZE(pte_bits), |
| 165 | }, { /* pmd */ | 168 | }, { /* pmd */ |
| 169 | .name = (CONFIG_PGTABLE_LEVELS > 2) ? "PMD" : "PGD", | ||
| 166 | .bits = pte_bits, | 170 | .bits = pte_bits, |
| 167 | .num = ARRAY_SIZE(pte_bits), | 171 | .num = ARRAY_SIZE(pte_bits), |
| 168 | }, { /* pte */ | 172 | }, { /* pte */ |
| 173 | .name = "PTE", | ||
| 169 | .bits = pte_bits, | 174 | .bits = pte_bits, |
| 170 | .num = ARRAY_SIZE(pte_bits), | 175 | .num = ARRAY_SIZE(pte_bits), |
| 171 | }, | 176 | }, |
| @@ -214,7 +219,8 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level, | |||
| 214 | delta >>= 10; | 219 | delta >>= 10; |
| 215 | unit++; | 220 | unit++; |
| 216 | } | 221 | } |
| 217 | seq_printf(st->seq, "%9lu%c", delta, *unit); | 222 | seq_printf(st->seq, "%9lu%c %s", delta, *unit, |
| 223 | pg_level[st->level].name); | ||
| 218 | if (pg_level[st->level].bits) | 224 | if (pg_level[st->level].bits) |
| 219 | dump_prot(st, pg_level[st->level].bits, | 225 | dump_prot(st, pg_level[st->level].bits, |
| 220 | pg_level[st->level].num); | 226 | pg_level[st->level].num); |
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 5954881a35ac..013e2cbe7924 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c | |||
| @@ -109,7 +109,7 @@ int ptep_set_access_flags(struct vm_area_struct *vma, | |||
| 109 | * PTE_RDONLY is cleared by default in the asm below, so set it in | 109 | * PTE_RDONLY is cleared by default in the asm below, so set it in |
| 110 | * back if necessary (read-only or clean PTE). | 110 | * back if necessary (read-only or clean PTE). |
| 111 | */ | 111 | */ |
| 112 | if (!pte_write(entry) || !dirty) | 112 | if (!pte_write(entry) || !pte_sw_dirty(entry)) |
| 113 | pte_val(entry) |= PTE_RDONLY; | 113 | pte_val(entry) |= PTE_RDONLY; |
| 114 | 114 | ||
| 115 | /* | 115 | /* |
| @@ -441,7 +441,7 @@ static int do_bad(unsigned long addr, unsigned int esr, struct pt_regs *regs) | |||
| 441 | return 1; | 441 | return 1; |
| 442 | } | 442 | } |
| 443 | 443 | ||
| 444 | static struct fault_info { | 444 | static const struct fault_info { |
| 445 | int (*fn)(unsigned long addr, unsigned int esr, struct pt_regs *regs); | 445 | int (*fn)(unsigned long addr, unsigned int esr, struct pt_regs *regs); |
| 446 | int sig; | 446 | int sig; |
| 447 | int code; | 447 | int code; |
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c index dbd12ea8ce68..43a76b07eb32 100644 --- a/arch/arm64/mm/flush.c +++ b/arch/arm64/mm/flush.c | |||
| @@ -71,10 +71,6 @@ void __sync_icache_dcache(pte_t pte, unsigned long addr) | |||
| 71 | { | 71 | { |
| 72 | struct page *page = pte_page(pte); | 72 | struct page *page = pte_page(pte); |
| 73 | 73 | ||
| 74 | /* no flushing needed for anonymous pages */ | ||
| 75 | if (!page_mapping(page)) | ||
| 76 | return; | ||
| 77 | |||
| 78 | if (!test_and_set_bit(PG_dcache_clean, &page->flags)) | 74 | if (!test_and_set_bit(PG_dcache_clean, &page->flags)) |
| 79 | sync_icache_aliases(page_address(page), | 75 | sync_icache_aliases(page_address(page), |
| 80 | PAGE_SIZE << compound_order(page)); | 76 | PAGE_SIZE << compound_order(page)); |
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index aa8aee7d6929..2e49bd252fe7 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c | |||
| @@ -306,6 +306,10 @@ static __init int setup_hugepagesz(char *opt) | |||
| 306 | hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT); | 306 | hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT); |
| 307 | } else if (ps == PUD_SIZE) { | 307 | } else if (ps == PUD_SIZE) { |
| 308 | hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); | 308 | hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); |
| 309 | } else if (ps == (PAGE_SIZE * CONT_PTES)) { | ||
| 310 | hugetlb_add_hstate(CONT_PTE_SHIFT); | ||
| 311 | } else if (ps == (PMD_SIZE * CONT_PMDS)) { | ||
| 312 | hugetlb_add_hstate((PMD_SHIFT + CONT_PMD_SHIFT) - PAGE_SHIFT); | ||
| 309 | } else { | 313 | } else { |
| 310 | hugetlb_bad_size(); | 314 | hugetlb_bad_size(); |
| 311 | pr_err("hugepagesz: Unsupported page size %lu K\n", ps >> 10); | 315 | pr_err("hugepagesz: Unsupported page size %lu K\n", ps >> 10); |
| @@ -314,3 +318,13 @@ static __init int setup_hugepagesz(char *opt) | |||
| 314 | return 1; | 318 | return 1; |
| 315 | } | 319 | } |
| 316 | __setup("hugepagesz=", setup_hugepagesz); | 320 | __setup("hugepagesz=", setup_hugepagesz); |
| 321 | |||
| 322 | #ifdef CONFIG_ARM64_64K_PAGES | ||
| 323 | static __init int add_default_hugepagesz(void) | ||
| 324 | { | ||
| 325 | if (size_to_hstate(CONT_PTES * PAGE_SIZE) == NULL) | ||
| 326 | hugetlb_add_hstate(CONT_PMD_SHIFT); | ||
| 327 | return 0; | ||
| 328 | } | ||
| 329 | arch_initcall(add_default_hugepagesz); | ||
| 330 | #endif | ||
diff --git a/arch/avr32/include/asm/pgalloc.h b/arch/avr32/include/asm/pgalloc.h index 1aba19d68c5e..db039cb368be 100644 --- a/arch/avr32/include/asm/pgalloc.h +++ b/arch/avr32/include/asm/pgalloc.h | |||
| @@ -43,7 +43,7 @@ static inline void pgd_ctor(void *x) | |||
| 43 | */ | 43 | */ |
| 44 | static inline pgd_t *pgd_alloc(struct mm_struct *mm) | 44 | static inline pgd_t *pgd_alloc(struct mm_struct *mm) |
| 45 | { | 45 | { |
| 46 | return quicklist_alloc(QUICK_PGD, GFP_KERNEL | __GFP_REPEAT, pgd_ctor); | 46 | return quicklist_alloc(QUICK_PGD, GFP_KERNEL, pgd_ctor); |
| 47 | } | 47 | } |
| 48 | 48 | ||
| 49 | static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) | 49 | static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) |
| @@ -54,7 +54,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) | |||
| 54 | static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, | 54 | static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, |
| 55 | unsigned long address) | 55 | unsigned long address) |
| 56 | { | 56 | { |
| 57 | return quicklist_alloc(QUICK_PT, GFP_KERNEL | __GFP_REPEAT, NULL); | 57 | return quicklist_alloc(QUICK_PT, GFP_KERNEL, NULL); |
| 58 | } | 58 | } |
| 59 | 59 | ||
| 60 | static inline pgtable_t pte_alloc_one(struct mm_struct *mm, | 60 | static inline pgtable_t pte_alloc_one(struct mm_struct *mm, |
| @@ -63,7 +63,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm, | |||
| 63 | struct page *page; | 63 | struct page *page; |
| 64 | void *pg; | 64 | void *pg; |
| 65 | 65 | ||
| 66 | pg = quicklist_alloc(QUICK_PT, GFP_KERNEL | __GFP_REPEAT, NULL); | 66 | pg = quicklist_alloc(QUICK_PT, GFP_KERNEL, NULL); |
| 67 | if (!pg) | 67 | if (!pg) |
| 68 | return NULL; | 68 | return NULL; |
| 69 | 69 | ||
diff --git a/arch/cris/include/asm/pgalloc.h b/arch/cris/include/asm/pgalloc.h index 235ece437ddd..42f1affb9c2d 100644 --- a/arch/cris/include/asm/pgalloc.h +++ b/arch/cris/include/asm/pgalloc.h | |||
| @@ -24,14 +24,14 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) | |||
| 24 | 24 | ||
| 25 | static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) | 25 | static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) |
| 26 | { | 26 | { |
| 27 | pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); | 27 | pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO); |
| 28 | return pte; | 28 | return pte; |
| 29 | } | 29 | } |
| 30 | 30 | ||
| 31 | static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) | 31 | static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) |
| 32 | { | 32 | { |
| 33 | struct page *pte; | 33 | struct page *pte; |
| 34 | pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0); | 34 | pte = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0); |
| 35 | if (!pte) | 35 | if (!pte) |
| 36 | return NULL; | 36 | return NULL; |
| 37 | if (!pgtable_page_ctor(pte)) { | 37 | if (!pgtable_page_ctor(pte)) { |
diff --git a/arch/frv/mm/pgalloc.c b/arch/frv/mm/pgalloc.c index 41907d25ed38..c9ed14f6c67d 100644 --- a/arch/frv/mm/pgalloc.c +++ b/arch/frv/mm/pgalloc.c | |||
| @@ -22,7 +22,7 @@ pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((aligned(PAGE_SIZE))); | |||
| 22 | 22 | ||
| 23 | pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) | 23 | pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) |
| 24 | { | 24 | { |
| 25 | pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); | 25 | pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL); |
| 26 | if (pte) | 26 | if (pte) |
| 27 | clear_page(pte); | 27 | clear_page(pte); |
| 28 | return pte; | 28 | return pte; |
| @@ -33,9 +33,9 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) | |||
| 33 | struct page *page; | 33 | struct page *page; |
| 34 | 34 | ||
| 35 | #ifdef CONFIG_HIGHPTE | 35 | #ifdef CONFIG_HIGHPTE |
| 36 | page = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT, 0); | 36 | page = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM, 0); |
| 37 | #else | 37 | #else |
| 38 | page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0); | 38 | page = alloc_pages(GFP_KERNEL, 0); |
| 39 | #endif | 39 | #endif |
| 40 | if (!page) | 40 | if (!page) |
| 41 | return NULL; | 41 | return NULL; |
diff --git a/arch/hexagon/include/asm/pgalloc.h b/arch/hexagon/include/asm/pgalloc.h index 77da3b0ae3c2..eeebf862c46c 100644 --- a/arch/hexagon/include/asm/pgalloc.h +++ b/arch/hexagon/include/asm/pgalloc.h | |||
| @@ -64,7 +64,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm, | |||
| 64 | { | 64 | { |
| 65 | struct page *pte; | 65 | struct page *pte; |
| 66 | 66 | ||
| 67 | pte = alloc_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO); | 67 | pte = alloc_page(GFP_KERNEL | __GFP_ZERO); |
| 68 | if (!pte) | 68 | if (!pte) |
| 69 | return NULL; | 69 | return NULL; |
| 70 | if (!pgtable_page_ctor(pte)) { | 70 | if (!pgtable_page_ctor(pte)) { |
| @@ -78,7 +78,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm, | |||
| 78 | static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, | 78 | static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, |
| 79 | unsigned long address) | 79 | unsigned long address) |
| 80 | { | 80 | { |
| 81 | gfp_t flags = GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO; | 81 | gfp_t flags = GFP_KERNEL | __GFP_ZERO; |
| 82 | return (pte_t *) __get_free_page(flags); | 82 | return (pte_t *) __get_free_page(flags); |
| 83 | } | 83 | } |
| 84 | 84 | ||
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index f80758cb7157..e109ee95e919 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig | |||
| @@ -45,7 +45,7 @@ config IA64 | |||
| 45 | select GENERIC_SMP_IDLE_THREAD | 45 | select GENERIC_SMP_IDLE_THREAD |
| 46 | select ARCH_INIT_TASK | 46 | select ARCH_INIT_TASK |
| 47 | select ARCH_TASK_STRUCT_ALLOCATOR | 47 | select ARCH_TASK_STRUCT_ALLOCATOR |
| 48 | select ARCH_THREAD_INFO_ALLOCATOR | 48 | select ARCH_THREAD_STACK_ALLOCATOR |
| 49 | select ARCH_CLOCKSOURCE_DATA | 49 | select ARCH_CLOCKSOURCE_DATA |
| 50 | select GENERIC_TIME_VSYSCALL_OLD | 50 | select GENERIC_TIME_VSYSCALL_OLD |
| 51 | select SYSCTL_ARCH_UNALIGN_NO_WARN | 51 | select SYSCTL_ARCH_UNALIGN_NO_WARN |
diff --git a/arch/ia64/include/asm/thread_info.h b/arch/ia64/include/asm/thread_info.h index aa995b67c3f5..d1212b84fb83 100644 --- a/arch/ia64/include/asm/thread_info.h +++ b/arch/ia64/include/asm/thread_info.h | |||
| @@ -48,15 +48,15 @@ struct thread_info { | |||
| 48 | #ifndef ASM_OFFSETS_C | 48 | #ifndef ASM_OFFSETS_C |
| 49 | /* how to get the thread information struct from C */ | 49 | /* how to get the thread information struct from C */ |
| 50 | #define current_thread_info() ((struct thread_info *) ((char *) current + IA64_TASK_SIZE)) | 50 | #define current_thread_info() ((struct thread_info *) ((char *) current + IA64_TASK_SIZE)) |
| 51 | #define alloc_thread_info_node(tsk, node) \ | 51 | #define alloc_thread_stack_node(tsk, node) \ |
| 52 | ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE)) | 52 | ((unsigned long *) ((char *) (tsk) + IA64_TASK_SIZE)) |
| 53 | #define task_thread_info(tsk) ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE)) | 53 | #define task_thread_info(tsk) ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE)) |
| 54 | #else | 54 | #else |
| 55 | #define current_thread_info() ((struct thread_info *) 0) | 55 | #define current_thread_info() ((struct thread_info *) 0) |
| 56 | #define alloc_thread_info_node(tsk, node) ((struct thread_info *) 0) | 56 | #define alloc_thread_stack_node(tsk, node) ((unsigned long *) 0) |
| 57 | #define task_thread_info(tsk) ((struct thread_info *) 0) | 57 | #define task_thread_info(tsk) ((struct thread_info *) 0) |
| 58 | #endif | 58 | #endif |
| 59 | #define free_thread_info(ti) /* nothing */ | 59 | #define free_thread_stack(ti) /* nothing */ |
| 60 | #define task_stack_page(tsk) ((void *)(tsk)) | 60 | #define task_stack_page(tsk) ((void *)(tsk)) |
| 61 | 61 | ||
| 62 | #define __HAVE_THREAD_FUNCTIONS | 62 | #define __HAVE_THREAD_FUNCTIONS |
diff --git a/arch/ia64/kernel/init_task.c b/arch/ia64/kernel/init_task.c index f9efe9739d3f..0eaa89f3defd 100644 --- a/arch/ia64/kernel/init_task.c +++ b/arch/ia64/kernel/init_task.c | |||
| @@ -26,6 +26,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); | |||
| 26 | * handled. This is done by having a special ".data..init_task" section... | 26 | * handled. This is done by having a special ".data..init_task" section... |
| 27 | */ | 27 | */ |
| 28 | #define init_thread_info init_task_mem.s.thread_info | 28 | #define init_thread_info init_task_mem.s.thread_info |
| 29 | #define init_stack init_task_mem.stack | ||
| 29 | 30 | ||
| 30 | union { | 31 | union { |
| 31 | struct { | 32 | struct { |
diff --git a/arch/m68k/include/asm/mcf_pgalloc.h b/arch/m68k/include/asm/mcf_pgalloc.h index f9924fbcfe42..fb95aed5f428 100644 --- a/arch/m68k/include/asm/mcf_pgalloc.h +++ b/arch/m68k/include/asm/mcf_pgalloc.h | |||
| @@ -14,7 +14,7 @@ extern const char bad_pmd_string[]; | |||
| 14 | extern inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, | 14 | extern inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, |
| 15 | unsigned long address) | 15 | unsigned long address) |
| 16 | { | 16 | { |
| 17 | unsigned long page = __get_free_page(GFP_DMA|__GFP_REPEAT); | 17 | unsigned long page = __get_free_page(GFP_DMA); |
| 18 | 18 | ||
| 19 | if (!page) | 19 | if (!page) |
| 20 | return NULL; | 20 | return NULL; |
| @@ -51,7 +51,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page, | |||
| 51 | static inline struct page *pte_alloc_one(struct mm_struct *mm, | 51 | static inline struct page *pte_alloc_one(struct mm_struct *mm, |
| 52 | unsigned long address) | 52 | unsigned long address) |
| 53 | { | 53 | { |
| 54 | struct page *page = alloc_pages(GFP_DMA|__GFP_REPEAT, 0); | 54 | struct page *page = alloc_pages(GFP_DMA, 0); |
| 55 | pte_t *pte; | 55 | pte_t *pte; |
| 56 | 56 | ||
| 57 | if (!page) | 57 | if (!page) |
diff --git a/arch/m68k/include/asm/motorola_pgalloc.h b/arch/m68k/include/asm/motorola_pgalloc.h index 24bcba496c75..c895b987202c 100644 --- a/arch/m68k/include/asm/motorola_pgalloc.h +++ b/arch/m68k/include/asm/motorola_pgalloc.h | |||
| @@ -11,7 +11,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long ad | |||
| 11 | { | 11 | { |
| 12 | pte_t *pte; | 12 | pte_t *pte; |
| 13 | 13 | ||
| 14 | pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); | 14 | pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO); |
| 15 | if (pte) { | 15 | if (pte) { |
| 16 | __flush_page_to_ram(pte); | 16 | __flush_page_to_ram(pte); |
| 17 | flush_tlb_kernel_page(pte); | 17 | flush_tlb_kernel_page(pte); |
| @@ -32,7 +32,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addres | |||
| 32 | struct page *page; | 32 | struct page *page; |
| 33 | pte_t *pte; | 33 | pte_t *pte; |
| 34 | 34 | ||
| 35 | page = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0); | 35 | page = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0); |
| 36 | if(!page) | 36 | if(!page) |
| 37 | return NULL; | 37 | return NULL; |
| 38 | if (!pgtable_page_ctor(page)) { | 38 | if (!pgtable_page_ctor(page)) { |
diff --git a/arch/m68k/include/asm/sun3_pgalloc.h b/arch/m68k/include/asm/sun3_pgalloc.h index 0931388de47f..1901f61f926f 100644 --- a/arch/m68k/include/asm/sun3_pgalloc.h +++ b/arch/m68k/include/asm/sun3_pgalloc.h | |||
| @@ -37,7 +37,7 @@ do { \ | |||
| 37 | static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, | 37 | static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, |
| 38 | unsigned long address) | 38 | unsigned long address) |
| 39 | { | 39 | { |
| 40 | unsigned long page = __get_free_page(GFP_KERNEL|__GFP_REPEAT); | 40 | unsigned long page = __get_free_page(GFP_KERNEL); |
| 41 | 41 | ||
| 42 | if (!page) | 42 | if (!page) |
| 43 | return NULL; | 43 | return NULL; |
| @@ -49,7 +49,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, | |||
| 49 | static inline pgtable_t pte_alloc_one(struct mm_struct *mm, | 49 | static inline pgtable_t pte_alloc_one(struct mm_struct *mm, |
| 50 | unsigned long address) | 50 | unsigned long address) |
| 51 | { | 51 | { |
| 52 | struct page *page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0); | 52 | struct page *page = alloc_pages(GFP_KERNEL, 0); |
| 53 | 53 | ||
| 54 | if (page == NULL) | 54 | if (page == NULL) |
| 55 | return NULL; | 55 | return NULL; |
diff --git a/arch/metag/include/asm/pgalloc.h b/arch/metag/include/asm/pgalloc.h index 3104df0a4822..c2caa1ee4360 100644 --- a/arch/metag/include/asm/pgalloc.h +++ b/arch/metag/include/asm/pgalloc.h | |||
| @@ -42,8 +42,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) | |||
| 42 | static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, | 42 | static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, |
| 43 | unsigned long address) | 43 | unsigned long address) |
| 44 | { | 44 | { |
| 45 | pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | | 45 | pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); |
| 46 | __GFP_ZERO); | ||
| 47 | return pte; | 46 | return pte; |
| 48 | } | 47 | } |
| 49 | 48 | ||
| @@ -51,7 +50,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm, | |||
| 51 | unsigned long address) | 50 | unsigned long address) |
| 52 | { | 51 | { |
| 53 | struct page *pte; | 52 | struct page *pte; |
| 54 | pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO, 0); | 53 | pte = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0); |
| 55 | if (!pte) | 54 | if (!pte) |
| 56 | return NULL; | 55 | return NULL; |
| 57 | if (!pgtable_page_ctor(pte)) { | 56 | if (!pgtable_page_ctor(pte)) { |
diff --git a/arch/microblaze/include/asm/pgalloc.h b/arch/microblaze/include/asm/pgalloc.h index 61436d69775c..7c89390c0c13 100644 --- a/arch/microblaze/include/asm/pgalloc.h +++ b/arch/microblaze/include/asm/pgalloc.h | |||
| @@ -116,9 +116,9 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm, | |||
| 116 | struct page *ptepage; | 116 | struct page *ptepage; |
| 117 | 117 | ||
| 118 | #ifdef CONFIG_HIGHPTE | 118 | #ifdef CONFIG_HIGHPTE |
| 119 | int flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_REPEAT; | 119 | int flags = GFP_KERNEL | __GFP_HIGHMEM; |
| 120 | #else | 120 | #else |
| 121 | int flags = GFP_KERNEL | __GFP_REPEAT; | 121 | int flags = GFP_KERNEL; |
| 122 | #endif | 122 | #endif |
| 123 | 123 | ||
| 124 | ptepage = alloc_pages(flags, 0); | 124 | ptepage = alloc_pages(flags, 0); |
diff --git a/arch/microblaze/mm/pgtable.c b/arch/microblaze/mm/pgtable.c index 4f4520e779a5..eb99fcc76088 100644 --- a/arch/microblaze/mm/pgtable.c +++ b/arch/microblaze/mm/pgtable.c | |||
| @@ -239,8 +239,7 @@ __init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm, | |||
| 239 | { | 239 | { |
| 240 | pte_t *pte; | 240 | pte_t *pte; |
| 241 | if (mem_init_done) { | 241 | if (mem_init_done) { |
| 242 | pte = (pte_t *)__get_free_page(GFP_KERNEL | | 242 | pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); |
| 243 | __GFP_REPEAT | __GFP_ZERO); | ||
| 244 | } else { | 243 | } else { |
| 245 | pte = (pte_t *)early_get_page(); | 244 | pte = (pte_t *)early_get_page(); |
| 246 | if (pte) | 245 | if (pte) |
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index 6733ac575da4..36a391d289aa 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h | |||
| @@ -74,7 +74,7 @@ | |||
| 74 | #define KVM_GUEST_KUSEG 0x00000000UL | 74 | #define KVM_GUEST_KUSEG 0x00000000UL |
| 75 | #define KVM_GUEST_KSEG0 0x40000000UL | 75 | #define KVM_GUEST_KSEG0 0x40000000UL |
| 76 | #define KVM_GUEST_KSEG23 0x60000000UL | 76 | #define KVM_GUEST_KSEG23 0x60000000UL |
| 77 | #define KVM_GUEST_KSEGX(a) ((_ACAST32_(a)) & 0x60000000) | 77 | #define KVM_GUEST_KSEGX(a) ((_ACAST32_(a)) & 0xe0000000) |
| 78 | #define KVM_GUEST_CPHYSADDR(a) ((_ACAST32_(a)) & 0x1fffffff) | 78 | #define KVM_GUEST_CPHYSADDR(a) ((_ACAST32_(a)) & 0x1fffffff) |
| 79 | 79 | ||
| 80 | #define KVM_GUEST_CKSEG0ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0) | 80 | #define KVM_GUEST_CKSEG0ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0) |
| @@ -338,6 +338,7 @@ struct kvm_mips_tlb { | |||
| 338 | #define KVM_MIPS_GUEST_TLB_SIZE 64 | 338 | #define KVM_MIPS_GUEST_TLB_SIZE 64 |
| 339 | struct kvm_vcpu_arch { | 339 | struct kvm_vcpu_arch { |
| 340 | void *host_ebase, *guest_ebase; | 340 | void *host_ebase, *guest_ebase; |
| 341 | int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu); | ||
| 341 | unsigned long host_stack; | 342 | unsigned long host_stack; |
| 342 | unsigned long host_gp; | 343 | unsigned long host_gp; |
| 343 | 344 | ||
diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h index b336037e8768..93c079a1cfc8 100644 --- a/arch/mips/include/asm/pgalloc.h +++ b/arch/mips/include/asm/pgalloc.h | |||
| @@ -69,7 +69,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, | |||
| 69 | { | 69 | { |
| 70 | pte_t *pte; | 70 | pte_t *pte; |
| 71 | 71 | ||
| 72 | pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, PTE_ORDER); | 72 | pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_ZERO, PTE_ORDER); |
| 73 | 73 | ||
| 74 | return pte; | 74 | return pte; |
| 75 | } | 75 | } |
| @@ -79,7 +79,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm, | |||
| 79 | { | 79 | { |
| 80 | struct page *pte; | 80 | struct page *pte; |
| 81 | 81 | ||
| 82 | pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER); | 82 | pte = alloc_pages(GFP_KERNEL, PTE_ORDER); |
| 83 | if (!pte) | 83 | if (!pte) |
| 84 | return NULL; | 84 | return NULL; |
| 85 | clear_highpage(pte); | 85 | clear_highpage(pte); |
| @@ -113,7 +113,7 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) | |||
| 113 | { | 113 | { |
| 114 | pmd_t *pmd; | 114 | pmd_t *pmd; |
| 115 | 115 | ||
| 116 | pmd = (pmd_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT, PMD_ORDER); | 116 | pmd = (pmd_t *) __get_free_pages(GFP_KERNEL, PMD_ORDER); |
| 117 | if (pmd) | 117 | if (pmd) |
| 118 | pmd_init((unsigned long)pmd, (unsigned long)invalid_pte_table); | 118 | pmd_init((unsigned long)pmd, (unsigned long)invalid_pte_table); |
| 119 | return pmd; | 119 | return pmd; |
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c index 396df6eb0a12..645c8a1982a7 100644 --- a/arch/mips/kvm/emulate.c +++ b/arch/mips/kvm/emulate.c | |||
| @@ -1636,6 +1636,7 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, | |||
| 1636 | if (index < 0) { | 1636 | if (index < 0) { |
| 1637 | vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK); | 1637 | vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK); |
| 1638 | vcpu->arch.host_cp0_badvaddr = va; | 1638 | vcpu->arch.host_cp0_badvaddr = va; |
| 1639 | vcpu->arch.pc = curr_pc; | ||
| 1639 | er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run, | 1640 | er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run, |
| 1640 | vcpu); | 1641 | vcpu); |
| 1641 | preempt_enable(); | 1642 | preempt_enable(); |
| @@ -1647,6 +1648,8 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, | |||
| 1647 | * invalid exception to the guest | 1648 | * invalid exception to the guest |
| 1648 | */ | 1649 | */ |
| 1649 | if (!TLB_IS_VALID(*tlb, va)) { | 1650 | if (!TLB_IS_VALID(*tlb, va)) { |
| 1651 | vcpu->arch.host_cp0_badvaddr = va; | ||
| 1652 | vcpu->arch.pc = curr_pc; | ||
| 1650 | er = kvm_mips_emulate_tlbinv_ld(cause, NULL, | 1653 | er = kvm_mips_emulate_tlbinv_ld(cause, NULL, |
| 1651 | run, vcpu); | 1654 | run, vcpu); |
| 1652 | preempt_enable(); | 1655 | preempt_enable(); |
| @@ -1666,7 +1669,7 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, | |||
| 1666 | cache, op, base, arch->gprs[base], offset); | 1669 | cache, op, base, arch->gprs[base], offset); |
| 1667 | er = EMULATE_FAIL; | 1670 | er = EMULATE_FAIL; |
| 1668 | preempt_enable(); | 1671 | preempt_enable(); |
| 1669 | goto dont_update_pc; | 1672 | goto done; |
| 1670 | 1673 | ||
| 1671 | } | 1674 | } |
| 1672 | 1675 | ||
| @@ -1694,16 +1697,20 @@ skip_fault: | |||
| 1694 | kvm_err("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", | 1697 | kvm_err("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", |
| 1695 | cache, op, base, arch->gprs[base], offset); | 1698 | cache, op, base, arch->gprs[base], offset); |
| 1696 | er = EMULATE_FAIL; | 1699 | er = EMULATE_FAIL; |
| 1697 | preempt_enable(); | ||
| 1698 | goto dont_update_pc; | ||
| 1699 | } | 1700 | } |
| 1700 | 1701 | ||
| 1701 | preempt_enable(); | 1702 | preempt_enable(); |
| 1703 | done: | ||
| 1704 | /* Rollback PC only if emulation was unsuccessful */ | ||
| 1705 | if (er == EMULATE_FAIL) | ||
| 1706 | vcpu->arch.pc = curr_pc; | ||
| 1702 | 1707 | ||
| 1703 | dont_update_pc: | 1708 | dont_update_pc: |
| 1704 | /* Rollback PC */ | 1709 | /* |
| 1705 | vcpu->arch.pc = curr_pc; | 1710 | * This is for exceptions whose emulation updates the PC, so do not |
| 1706 | done: | 1711 | * overwrite the PC under any circumstances |
| 1712 | */ | ||
| 1713 | |||
| 1707 | return er; | 1714 | return er; |
| 1708 | } | 1715 | } |
| 1709 | 1716 | ||
diff --git a/arch/mips/kvm/interrupt.h b/arch/mips/kvm/interrupt.h index 4ab4bdfad703..2143884709e4 100644 --- a/arch/mips/kvm/interrupt.h +++ b/arch/mips/kvm/interrupt.h | |||
| @@ -28,6 +28,7 @@ | |||
| 28 | #define MIPS_EXC_MAX 12 | 28 | #define MIPS_EXC_MAX 12 |
| 29 | /* XXXSL More to follow */ | 29 | /* XXXSL More to follow */ |
| 30 | 30 | ||
| 31 | extern char __kvm_mips_vcpu_run_end[]; | ||
| 31 | extern char mips32_exception[], mips32_exceptionEnd[]; | 32 | extern char mips32_exception[], mips32_exceptionEnd[]; |
| 32 | extern char mips32_GuestException[], mips32_GuestExceptionEnd[]; | 33 | extern char mips32_GuestException[], mips32_GuestExceptionEnd[]; |
| 33 | 34 | ||
diff --git a/arch/mips/kvm/locore.S b/arch/mips/kvm/locore.S index 3ef03009de5f..828fcfc1cd7f 100644 --- a/arch/mips/kvm/locore.S +++ b/arch/mips/kvm/locore.S | |||
| @@ -202,6 +202,7 @@ FEXPORT(__kvm_mips_load_k0k1) | |||
| 202 | 202 | ||
| 203 | /* Jump to guest */ | 203 | /* Jump to guest */ |
| 204 | eret | 204 | eret |
| 205 | EXPORT(__kvm_mips_vcpu_run_end) | ||
| 205 | 206 | ||
| 206 | VECTOR(MIPSX(exception), unknown) | 207 | VECTOR(MIPSX(exception), unknown) |
| 207 | /* Find out what mode we came from and jump to the proper handler. */ | 208 | /* Find out what mode we came from and jump to the proper handler. */ |
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index dc052fb5c7a2..44da5259f390 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c | |||
| @@ -315,6 +315,15 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) | |||
| 315 | memcpy(gebase + offset, mips32_GuestException, | 315 | memcpy(gebase + offset, mips32_GuestException, |
| 316 | mips32_GuestExceptionEnd - mips32_GuestException); | 316 | mips32_GuestExceptionEnd - mips32_GuestException); |
| 317 | 317 | ||
| 318 | #ifdef MODULE | ||
| 319 | offset += mips32_GuestExceptionEnd - mips32_GuestException; | ||
| 320 | memcpy(gebase + offset, (char *)__kvm_mips_vcpu_run, | ||
| 321 | __kvm_mips_vcpu_run_end - (char *)__kvm_mips_vcpu_run); | ||
| 322 | vcpu->arch.vcpu_run = gebase + offset; | ||
| 323 | #else | ||
| 324 | vcpu->arch.vcpu_run = __kvm_mips_vcpu_run; | ||
| 325 | #endif | ||
| 326 | |||
| 318 | /* Invalidate the icache for these ranges */ | 327 | /* Invalidate the icache for these ranges */ |
| 319 | local_flush_icache_range((unsigned long)gebase, | 328 | local_flush_icache_range((unsigned long)gebase, |
| 320 | (unsigned long)gebase + ALIGN(size, PAGE_SIZE)); | 329 | (unsigned long)gebase + ALIGN(size, PAGE_SIZE)); |
| @@ -404,7 +413,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
| 404 | /* Disable hardware page table walking while in guest */ | 413 | /* Disable hardware page table walking while in guest */ |
| 405 | htw_stop(); | 414 | htw_stop(); |
| 406 | 415 | ||
| 407 | r = __kvm_mips_vcpu_run(run, vcpu); | 416 | r = vcpu->arch.vcpu_run(run, vcpu); |
| 408 | 417 | ||
| 409 | /* Re-enable HTW before enabling interrupts */ | 418 | /* Re-enable HTW before enabling interrupts */ |
| 410 | htw_start(); | 419 | htw_start(); |
diff --git a/arch/mn10300/include/asm/thread_info.h b/arch/mn10300/include/asm/thread_info.h index 4861a78c7160..f5f90bbf019d 100644 --- a/arch/mn10300/include/asm/thread_info.h +++ b/arch/mn10300/include/asm/thread_info.h | |||
| @@ -115,7 +115,7 @@ static inline unsigned long current_stack_pointer(void) | |||
| 115 | } | 115 | } |
| 116 | 116 | ||
| 117 | #ifndef CONFIG_KGDB | 117 | #ifndef CONFIG_KGDB |
| 118 | void arch_release_thread_info(struct thread_info *ti); | 118 | void arch_release_thread_stack(unsigned long *stack); |
| 119 | #endif | 119 | #endif |
| 120 | #define get_thread_info(ti) get_task_struct((ti)->task) | 120 | #define get_thread_info(ti) get_task_struct((ti)->task) |
| 121 | #define put_thread_info(ti) put_task_struct((ti)->task) | 121 | #define put_thread_info(ti) put_task_struct((ti)->task) |
diff --git a/arch/mn10300/kernel/kgdb.c b/arch/mn10300/kernel/kgdb.c index 99770823451a..2d7986c386fe 100644 --- a/arch/mn10300/kernel/kgdb.c +++ b/arch/mn10300/kernel/kgdb.c | |||
| @@ -397,8 +397,9 @@ static bool kgdb_arch_undo_singlestep(struct pt_regs *regs) | |||
| 397 | * single-step state is cleared. At this point the breakpoints should have | 397 | * single-step state is cleared. At this point the breakpoints should have |
| 398 | * been removed by __switch_to(). | 398 | * been removed by __switch_to(). |
| 399 | */ | 399 | */ |
| 400 | void arch_release_thread_info(struct thread_info *ti) | 400 | void arch_release_thread_stack(unsigned long *stack) |
| 401 | { | 401 | { |
| 402 | struct thread_info *ti = (void *)stack; | ||
| 402 | if (kgdb_sstep_thread == ti) { | 403 | if (kgdb_sstep_thread == ti) { |
| 403 | kgdb_sstep_thread = NULL; | 404 | kgdb_sstep_thread = NULL; |
| 404 | 405 | ||
diff --git a/arch/mn10300/mm/pgtable.c b/arch/mn10300/mm/pgtable.c index e77a7c728081..9577cf768875 100644 --- a/arch/mn10300/mm/pgtable.c +++ b/arch/mn10300/mm/pgtable.c | |||
| @@ -63,7 +63,7 @@ void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags) | |||
| 63 | 63 | ||
| 64 | pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) | 64 | pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) |
| 65 | { | 65 | { |
| 66 | pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); | 66 | pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL); |
| 67 | if (pte) | 67 | if (pte) |
| 68 | clear_page(pte); | 68 | clear_page(pte); |
| 69 | return pte; | 69 | return pte; |
| @@ -74,9 +74,9 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) | |||
| 74 | struct page *pte; | 74 | struct page *pte; |
| 75 | 75 | ||
| 76 | #ifdef CONFIG_HIGHPTE | 76 | #ifdef CONFIG_HIGHPTE |
| 77 | pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT, 0); | 77 | pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM, 0); |
| 78 | #else | 78 | #else |
| 79 | pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0); | 79 | pte = alloc_pages(GFP_KERNEL, 0); |
| 80 | #endif | 80 | #endif |
| 81 | if (!pte) | 81 | if (!pte) |
| 82 | return NULL; | 82 | return NULL; |
diff --git a/arch/nios2/include/asm/pgalloc.h b/arch/nios2/include/asm/pgalloc.h index 6e2985e0a7b9..bb47d08c8ef7 100644 --- a/arch/nios2/include/asm/pgalloc.h +++ b/arch/nios2/include/asm/pgalloc.h | |||
| @@ -42,8 +42,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, | |||
| 42 | { | 42 | { |
| 43 | pte_t *pte; | 43 | pte_t *pte; |
| 44 | 44 | ||
| 45 | pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, | 45 | pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_ZERO, PTE_ORDER); |
| 46 | PTE_ORDER); | ||
| 47 | 46 | ||
| 48 | return pte; | 47 | return pte; |
| 49 | } | 48 | } |
| @@ -53,7 +52,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm, | |||
| 53 | { | 52 | { |
| 54 | struct page *pte; | 53 | struct page *pte; |
| 55 | 54 | ||
| 56 | pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER); | 55 | pte = alloc_pages(GFP_KERNEL, PTE_ORDER); |
| 57 | if (pte) { | 56 | if (pte) { |
| 58 | if (!pgtable_page_ctor(pte)) { | 57 | if (!pgtable_page_ctor(pte)) { |
| 59 | __free_page(pte); | 58 | __free_page(pte); |
diff --git a/arch/openrisc/include/asm/pgalloc.h b/arch/openrisc/include/asm/pgalloc.h index 21484e5b9e9a..87eebd185089 100644 --- a/arch/openrisc/include/asm/pgalloc.h +++ b/arch/openrisc/include/asm/pgalloc.h | |||
| @@ -77,7 +77,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm, | |||
| 77 | unsigned long address) | 77 | unsigned long address) |
| 78 | { | 78 | { |
| 79 | struct page *pte; | 79 | struct page *pte; |
| 80 | pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0); | 80 | pte = alloc_pages(GFP_KERNEL, 0); |
| 81 | if (!pte) | 81 | if (!pte) |
| 82 | return NULL; | 82 | return NULL; |
| 83 | clear_page(page_address(pte)); | 83 | clear_page(page_address(pte)); |
diff --git a/arch/openrisc/mm/ioremap.c b/arch/openrisc/mm/ioremap.c index 62b08ef392be..5b2a95116e8f 100644 --- a/arch/openrisc/mm/ioremap.c +++ b/arch/openrisc/mm/ioremap.c | |||
| @@ -122,7 +122,7 @@ pte_t __init_refok *pte_alloc_one_kernel(struct mm_struct *mm, | |||
| 122 | pte_t *pte; | 122 | pte_t *pte; |
| 123 | 123 | ||
| 124 | if (likely(mem_init_done)) { | 124 | if (likely(mem_init_done)) { |
| 125 | pte = (pte_t *) __get_free_page(GFP_KERNEL | __GFP_REPEAT); | 125 | pte = (pte_t *) __get_free_page(GFP_KERNEL); |
| 126 | } else { | 126 | } else { |
| 127 | pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); | 127 | pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); |
| 128 | #if 0 | 128 | #if 0 |
diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h index f2fd327dce2e..f08dda3f0995 100644 --- a/arch/parisc/include/asm/pgalloc.h +++ b/arch/parisc/include/asm/pgalloc.h | |||
| @@ -63,8 +63,7 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) | |||
| 63 | 63 | ||
| 64 | static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) | 64 | static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) |
| 65 | { | 65 | { |
| 66 | pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT, | 66 | pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL, PMD_ORDER); |
| 67 | PMD_ORDER); | ||
| 68 | if (pmd) | 67 | if (pmd) |
| 69 | memset(pmd, 0, PAGE_SIZE<<PMD_ORDER); | 68 | memset(pmd, 0, PAGE_SIZE<<PMD_ORDER); |
| 70 | return pmd; | 69 | return pmd; |
| @@ -124,7 +123,7 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) | |||
| 124 | static inline pgtable_t | 123 | static inline pgtable_t |
| 125 | pte_alloc_one(struct mm_struct *mm, unsigned long address) | 124 | pte_alloc_one(struct mm_struct *mm, unsigned long address) |
| 126 | { | 125 | { |
| 127 | struct page *page = alloc_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); | 126 | struct page *page = alloc_page(GFP_KERNEL|__GFP_ZERO); |
| 128 | if (!page) | 127 | if (!page) |
| 129 | return NULL; | 128 | return NULL; |
| 130 | if (!pgtable_page_ctor(page)) { | 129 | if (!pgtable_page_ctor(page)) { |
| @@ -137,7 +136,7 @@ pte_alloc_one(struct mm_struct *mm, unsigned long address) | |||
| 137 | static inline pte_t * | 136 | static inline pte_t * |
| 138 | pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr) | 137 | pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr) |
| 139 | { | 138 | { |
| 140 | pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); | 139 | pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO); |
| 141 | return pte; | 140 | return pte; |
| 142 | } | 141 | } |
| 143 | 142 | ||
diff --git a/arch/parisc/include/asm/traps.h b/arch/parisc/include/asm/traps.h index 4736020ba5ea..5e953ab4530d 100644 --- a/arch/parisc/include/asm/traps.h +++ b/arch/parisc/include/asm/traps.h | |||
| @@ -8,6 +8,8 @@ struct pt_regs; | |||
| 8 | void parisc_terminate(char *msg, struct pt_regs *regs, | 8 | void parisc_terminate(char *msg, struct pt_regs *regs, |
| 9 | int code, unsigned long offset) __noreturn __cold; | 9 | int code, unsigned long offset) __noreturn __cold; |
| 10 | 10 | ||
| 11 | void die_if_kernel(char *str, struct pt_regs *regs, long err); | ||
| 12 | |||
| 11 | /* mm/fault.c */ | 13 | /* mm/fault.c */ |
| 12 | void do_page_fault(struct pt_regs *regs, unsigned long code, | 14 | void do_page_fault(struct pt_regs *regs, unsigned long code, |
| 13 | unsigned long address); | 15 | unsigned long address); |
diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c index e81ccf1716e9..5adc339eb7c8 100644 --- a/arch/parisc/kernel/processor.c +++ b/arch/parisc/kernel/processor.c | |||
| @@ -324,8 +324,9 @@ int init_per_cpu(int cpunum) | |||
| 324 | per_cpu(cpu_data, cpunum).fp_rev = coproc_cfg.revision; | 324 | per_cpu(cpu_data, cpunum).fp_rev = coproc_cfg.revision; |
| 325 | per_cpu(cpu_data, cpunum).fp_model = coproc_cfg.model; | 325 | per_cpu(cpu_data, cpunum).fp_model = coproc_cfg.model; |
| 326 | 326 | ||
| 327 | printk(KERN_INFO "FP[%d] enabled: Rev %ld Model %ld\n", | 327 | if (cpunum == 0) |
| 328 | cpunum, coproc_cfg.revision, coproc_cfg.model); | 328 | printk(KERN_INFO "FP[%d] enabled: Rev %ld Model %ld\n", |
| 329 | cpunum, coproc_cfg.revision, coproc_cfg.model); | ||
| 329 | 330 | ||
| 330 | /* | 331 | /* |
| 331 | ** store status register to stack (hopefully aligned) | 332 | ** store status register to stack (hopefully aligned) |
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c index 58dd6801f5be..31ec99a5f119 100644 --- a/arch/parisc/kernel/time.c +++ b/arch/parisc/kernel/time.c | |||
| @@ -309,11 +309,6 @@ void __init time_init(void) | |||
| 309 | clocks_calc_mult_shift(&cyc2ns_mul, &cyc2ns_shift, current_cr16_khz, | 309 | clocks_calc_mult_shift(&cyc2ns_mul, &cyc2ns_shift, current_cr16_khz, |
| 310 | NSEC_PER_MSEC, 0); | 310 | NSEC_PER_MSEC, 0); |
| 311 | 311 | ||
| 312 | #if defined(CONFIG_HAVE_UNSTABLE_SCHED_CLOCK) && defined(CONFIG_64BIT) | ||
| 313 | /* At bootup only one 64bit CPU is online and cr16 is "stable" */ | ||
| 314 | set_sched_clock_stable(); | ||
| 315 | #endif | ||
| 316 | |||
| 317 | start_cpu_itimer(); /* get CPU 0 started */ | 312 | start_cpu_itimer(); /* get CPU 0 started */ |
| 318 | 313 | ||
| 319 | /* register at clocksource framework */ | 314 | /* register at clocksource framework */ |
diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c index d7c0acb35ec2..2b65c0177778 100644 --- a/arch/parisc/kernel/unaligned.c +++ b/arch/parisc/kernel/unaligned.c | |||
| @@ -28,6 +28,7 @@ | |||
| 28 | #include <linux/ratelimit.h> | 28 | #include <linux/ratelimit.h> |
| 29 | #include <asm/uaccess.h> | 29 | #include <asm/uaccess.h> |
| 30 | #include <asm/hardirq.h> | 30 | #include <asm/hardirq.h> |
| 31 | #include <asm/traps.h> | ||
| 31 | 32 | ||
| 32 | /* #define DEBUG_UNALIGNED 1 */ | 33 | /* #define DEBUG_UNALIGNED 1 */ |
| 33 | 34 | ||
| @@ -130,8 +131,6 @@ | |||
| 130 | 131 | ||
| 131 | int unaligned_enabled __read_mostly = 1; | 132 | int unaligned_enabled __read_mostly = 1; |
| 132 | 133 | ||
| 133 | void die_if_kernel (char *str, struct pt_regs *regs, long err); | ||
| 134 | |||
| 135 | static int emulate_ldh(struct pt_regs *regs, int toreg) | 134 | static int emulate_ldh(struct pt_regs *regs, int toreg) |
| 136 | { | 135 | { |
| 137 | unsigned long saddr = regs->ior; | 136 | unsigned long saddr = regs->ior; |
| @@ -666,7 +665,7 @@ void handle_unaligned(struct pt_regs *regs) | |||
| 666 | break; | 665 | break; |
| 667 | } | 666 | } |
| 668 | 667 | ||
| 669 | if (modify && R1(regs->iir)) | 668 | if (ret == 0 && modify && R1(regs->iir)) |
| 670 | regs->gr[R1(regs->iir)] = newbase; | 669 | regs->gr[R1(regs->iir)] = newbase; |
| 671 | 670 | ||
| 672 | 671 | ||
| @@ -677,6 +676,14 @@ void handle_unaligned(struct pt_regs *regs) | |||
| 677 | 676 | ||
| 678 | if (ret) | 677 | if (ret) |
| 679 | { | 678 | { |
| 679 | /* | ||
| 680 | * The unaligned handler failed. | ||
| 681 | * If we were called by __get_user() or __put_user() jump | ||
| 682 | * to it's exception fixup handler instead of crashing. | ||
| 683 | */ | ||
| 684 | if (!user_mode(regs) && fixup_exception(regs)) | ||
| 685 | return; | ||
| 686 | |||
| 680 | printk(KERN_CRIT "Unaligned handler failed, ret = %d\n", ret); | 687 | printk(KERN_CRIT "Unaligned handler failed, ret = %d\n", ret); |
| 681 | die_if_kernel("Unaligned data reference", regs, 28); | 688 | die_if_kernel("Unaligned data reference", regs, 28); |
| 682 | 689 | ||
diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c index ddd988b267a9..e278a87f43cc 100644 --- a/arch/parisc/kernel/unwind.c +++ b/arch/parisc/kernel/unwind.c | |||
| @@ -75,7 +75,10 @@ find_unwind_entry(unsigned long addr) | |||
| 75 | if (addr >= kernel_unwind_table.start && | 75 | if (addr >= kernel_unwind_table.start && |
| 76 | addr <= kernel_unwind_table.end) | 76 | addr <= kernel_unwind_table.end) |
| 77 | e = find_unwind_entry_in_table(&kernel_unwind_table, addr); | 77 | e = find_unwind_entry_in_table(&kernel_unwind_table, addr); |
| 78 | else | 78 | else { |
| 79 | unsigned long flags; | ||
| 80 | |||
| 81 | spin_lock_irqsave(&unwind_lock, flags); | ||
| 79 | list_for_each_entry(table, &unwind_tables, list) { | 82 | list_for_each_entry(table, &unwind_tables, list) { |
| 80 | if (addr >= table->start && | 83 | if (addr >= table->start && |
| 81 | addr <= table->end) | 84 | addr <= table->end) |
| @@ -86,6 +89,8 @@ find_unwind_entry(unsigned long addr) | |||
| 86 | break; | 89 | break; |
| 87 | } | 90 | } |
| 88 | } | 91 | } |
| 92 | spin_unlock_irqrestore(&unwind_lock, flags); | ||
| 93 | } | ||
| 89 | 94 | ||
| 90 | return e; | 95 | return e; |
| 91 | } | 96 | } |
| @@ -303,18 +308,16 @@ static void unwind_frame_regs(struct unwind_frame_info *info) | |||
| 303 | 308 | ||
| 304 | insn = *(unsigned int *)npc; | 309 | insn = *(unsigned int *)npc; |
| 305 | 310 | ||
| 306 | if ((insn & 0xffffc000) == 0x37de0000 || | 311 | if ((insn & 0xffffc001) == 0x37de0000 || |
| 307 | (insn & 0xffe00000) == 0x6fc00000) { | 312 | (insn & 0xffe00001) == 0x6fc00000) { |
| 308 | /* ldo X(sp), sp, or stwm X,D(sp) */ | 313 | /* ldo X(sp), sp, or stwm X,D(sp) */ |
| 309 | frame_size += (insn & 0x1 ? -1 << 13 : 0) | | 314 | frame_size += (insn & 0x3fff) >> 1; |
| 310 | ((insn & 0x3fff) >> 1); | ||
| 311 | dbg("analyzing func @ %lx, insn=%08x @ " | 315 | dbg("analyzing func @ %lx, insn=%08x @ " |
| 312 | "%lx, frame_size = %ld\n", info->ip, | 316 | "%lx, frame_size = %ld\n", info->ip, |
| 313 | insn, npc, frame_size); | 317 | insn, npc, frame_size); |
| 314 | } else if ((insn & 0xffe00008) == 0x73c00008) { | 318 | } else if ((insn & 0xffe00009) == 0x73c00008) { |
| 315 | /* std,ma X,D(sp) */ | 319 | /* std,ma X,D(sp) */ |
| 316 | frame_size += (insn & 0x1 ? -1 << 13 : 0) | | 320 | frame_size += ((insn >> 4) & 0x3ff) << 3; |
| 317 | (((insn >> 4) & 0x3ff) << 3); | ||
| 318 | dbg("analyzing func @ %lx, insn=%08x @ " | 321 | dbg("analyzing func @ %lx, insn=%08x @ " |
| 319 | "%lx, frame_size = %ld\n", info->ip, | 322 | "%lx, frame_size = %ld\n", info->ip, |
| 320 | insn, npc, frame_size); | 323 | insn, npc, frame_size); |
| @@ -333,6 +336,9 @@ static void unwind_frame_regs(struct unwind_frame_info *info) | |||
| 333 | } | 336 | } |
| 334 | } | 337 | } |
| 335 | 338 | ||
| 339 | if (frame_size > e->Total_frame_size << 3) | ||
| 340 | frame_size = e->Total_frame_size << 3; | ||
| 341 | |||
| 336 | if (!unwind_special(info, e->region_start, frame_size)) { | 342 | if (!unwind_special(info, e->region_start, frame_size)) { |
| 337 | info->prev_sp = info->sp - frame_size; | 343 | info->prev_sp = info->sp - frame_size; |
| 338 | if (e->Millicode) | 344 | if (e->Millicode) |
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 01f7464d9fea..0a9d439bcda6 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig | |||
| @@ -128,7 +128,7 @@ config PPC | |||
| 128 | select IRQ_FORCED_THREADING | 128 | select IRQ_FORCED_THREADING |
| 129 | select HAVE_RCU_TABLE_FREE if SMP | 129 | select HAVE_RCU_TABLE_FREE if SMP |
| 130 | select HAVE_SYSCALL_TRACEPOINTS | 130 | select HAVE_SYSCALL_TRACEPOINTS |
| 131 | select HAVE_CBPF_JIT | 131 | select HAVE_CBPF_JIT if CPU_BIG_ENDIAN |
| 132 | select HAVE_ARCH_JUMP_LABEL | 132 | select HAVE_ARCH_JUMP_LABEL |
| 133 | select ARCH_HAVE_NMI_SAFE_CMPXCHG | 133 | select ARCH_HAVE_NMI_SAFE_CMPXCHG |
| 134 | select ARCH_HAS_GCOV_PROFILE_ALL | 134 | select ARCH_HAS_GCOV_PROFILE_ALL |
diff --git a/arch/powerpc/include/asm/book3s/32/pgalloc.h b/arch/powerpc/include/asm/book3s/32/pgalloc.h index a2350194fc76..8e21bb492dca 100644 --- a/arch/powerpc/include/asm/book3s/32/pgalloc.h +++ b/arch/powerpc/include/asm/book3s/32/pgalloc.h | |||
| @@ -102,7 +102,6 @@ static inline void pgtable_free_tlb(struct mmu_gather *tlb, | |||
| 102 | static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, | 102 | static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, |
| 103 | unsigned long address) | 103 | unsigned long address) |
| 104 | { | 104 | { |
| 105 | tlb_flush_pgtable(tlb, address); | ||
| 106 | pgtable_page_dtor(table); | 105 | pgtable_page_dtor(table); |
| 107 | pgtable_free_tlb(tlb, page_address(table), 0); | 106 | pgtable_free_tlb(tlb, page_address(table), 0); |
| 108 | } | 107 | } |
diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h index 290157e8d5b2..74839f24f412 100644 --- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h +++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h | |||
| @@ -88,6 +88,7 @@ | |||
| 88 | #define HPTE_R_RPN_SHIFT 12 | 88 | #define HPTE_R_RPN_SHIFT 12 |
| 89 | #define HPTE_R_RPN ASM_CONST(0x0ffffffffffff000) | 89 | #define HPTE_R_RPN ASM_CONST(0x0ffffffffffff000) |
| 90 | #define HPTE_R_PP ASM_CONST(0x0000000000000003) | 90 | #define HPTE_R_PP ASM_CONST(0x0000000000000003) |
| 91 | #define HPTE_R_PPP ASM_CONST(0x8000000000000003) | ||
| 91 | #define HPTE_R_N ASM_CONST(0x0000000000000004) | 92 | #define HPTE_R_N ASM_CONST(0x0000000000000004) |
| 92 | #define HPTE_R_G ASM_CONST(0x0000000000000008) | 93 | #define HPTE_R_G ASM_CONST(0x0000000000000008) |
| 93 | #define HPTE_R_M ASM_CONST(0x0000000000000010) | 94 | #define HPTE_R_M ASM_CONST(0x0000000000000010) |
diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h index 488279edb1f0..cd5e7aa8cc34 100644 --- a/arch/powerpc/include/asm/book3s/64/pgalloc.h +++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h | |||
| @@ -41,7 +41,7 @@ extern struct kmem_cache *pgtable_cache[]; | |||
| 41 | pgtable_cache[(shift) - 1]; \ | 41 | pgtable_cache[(shift) - 1]; \ |
| 42 | }) | 42 | }) |
| 43 | 43 | ||
| 44 | #define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO | 44 | #define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO |
| 45 | 45 | ||
| 46 | extern pte_t *pte_fragment_alloc(struct mm_struct *, unsigned long, int); | 46 | extern pte_t *pte_fragment_alloc(struct mm_struct *, unsigned long, int); |
| 47 | extern void pte_fragment_free(unsigned long *, int); | 47 | extern void pte_fragment_free(unsigned long *, int); |
| @@ -56,7 +56,7 @@ static inline pgd_t *radix__pgd_alloc(struct mm_struct *mm) | |||
| 56 | return (pgd_t *)__get_free_page(PGALLOC_GFP); | 56 | return (pgd_t *)__get_free_page(PGALLOC_GFP); |
| 57 | #else | 57 | #else |
| 58 | struct page *page; | 58 | struct page *page; |
| 59 | page = alloc_pages(PGALLOC_GFP, 4); | 59 | page = alloc_pages(PGALLOC_GFP | __GFP_REPEAT, 4); |
| 60 | if (!page) | 60 | if (!page) |
| 61 | return NULL; | 61 | return NULL; |
| 62 | return (pgd_t *) page_address(page); | 62 | return (pgd_t *) page_address(page); |
| @@ -93,8 +93,7 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) | |||
| 93 | 93 | ||
| 94 | static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) | 94 | static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) |
| 95 | { | 95 | { |
| 96 | return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE), | 96 | return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE), GFP_KERNEL); |
| 97 | GFP_KERNEL|__GFP_REPEAT); | ||
| 98 | } | 97 | } |
| 99 | 98 | ||
| 100 | static inline void pud_free(struct mm_struct *mm, pud_t *pud) | 99 | static inline void pud_free(struct mm_struct *mm, pud_t *pud) |
| @@ -110,13 +109,17 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) | |||
| 110 | static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, | 109 | static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, |
| 111 | unsigned long address) | 110 | unsigned long address) |
| 112 | { | 111 | { |
| 112 | /* | ||
| 113 | * By now all the pud entries should be none entries. So go | ||
| 114 | * ahead and flush the page walk cache | ||
| 115 | */ | ||
| 116 | flush_tlb_pgtable(tlb, address); | ||
| 113 | pgtable_free_tlb(tlb, pud, PUD_INDEX_SIZE); | 117 | pgtable_free_tlb(tlb, pud, PUD_INDEX_SIZE); |
| 114 | } | 118 | } |
| 115 | 119 | ||
| 116 | static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) | 120 | static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) |
| 117 | { | 121 | { |
| 118 | return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX), | 122 | return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX), GFP_KERNEL); |
| 119 | GFP_KERNEL|__GFP_REPEAT); | ||
| 120 | } | 123 | } |
| 121 | 124 | ||
| 122 | static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) | 125 | static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) |
| @@ -127,6 +130,11 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) | |||
| 127 | static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, | 130 | static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, |
| 128 | unsigned long address) | 131 | unsigned long address) |
| 129 | { | 132 | { |
| 133 | /* | ||
| 134 | * By now all the pud entries should be none entries. So go | ||
| 135 | * ahead and flush the page walk cache | ||
| 136 | */ | ||
| 137 | flush_tlb_pgtable(tlb, address); | ||
| 130 | return pgtable_free_tlb(tlb, pmd, PMD_CACHE_INDEX); | 138 | return pgtable_free_tlb(tlb, pmd, PMD_CACHE_INDEX); |
| 131 | } | 139 | } |
| 132 | 140 | ||
| @@ -151,7 +159,7 @@ static inline pgtable_t pmd_pgtable(pmd_t pmd) | |||
| 151 | static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, | 159 | static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, |
| 152 | unsigned long address) | 160 | unsigned long address) |
| 153 | { | 161 | { |
| 154 | return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO); | 162 | return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); |
| 155 | } | 163 | } |
| 156 | 164 | ||
| 157 | static inline pgtable_t pte_alloc_one(struct mm_struct *mm, | 165 | static inline pgtable_t pte_alloc_one(struct mm_struct *mm, |
| @@ -198,7 +206,11 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage) | |||
| 198 | static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, | 206 | static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, |
| 199 | unsigned long address) | 207 | unsigned long address) |
| 200 | { | 208 | { |
| 201 | tlb_flush_pgtable(tlb, address); | 209 | /* |
| 210 | * By now all the pud entries should be none entries. So go | ||
| 211 | * ahead and flush the page walk cache | ||
| 212 | */ | ||
| 213 | flush_tlb_pgtable(tlb, address); | ||
| 202 | pgtable_free_tlb(tlb, table, 0); | 214 | pgtable_free_tlb(tlb, table, 0); |
| 203 | } | 215 | } |
| 204 | 216 | ||
diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h index 937d4e247ac3..df294224e280 100644 --- a/arch/powerpc/include/asm/book3s/64/radix.h +++ b/arch/powerpc/include/asm/book3s/64/radix.h | |||
| @@ -228,5 +228,20 @@ extern void radix__vmemmap_remove_mapping(unsigned long start, | |||
| 228 | 228 | ||
| 229 | extern int radix__map_kernel_page(unsigned long ea, unsigned long pa, | 229 | extern int radix__map_kernel_page(unsigned long ea, unsigned long pa, |
| 230 | pgprot_t flags, unsigned int psz); | 230 | pgprot_t flags, unsigned int psz); |
| 231 | |||
| 232 | static inline unsigned long radix__get_tree_size(void) | ||
| 233 | { | ||
| 234 | unsigned long rts_field; | ||
| 235 | /* | ||
| 236 | * we support 52 bits, hence 52-31 = 21, 0b10101 | ||
| 237 | * RTS encoding details | ||
| 238 | * bits 0 - 3 of rts -> bits 6 - 8 unsigned long | ||
| 239 | * bits 4 - 5 of rts -> bits 62 - 63 of unsigned long | ||
| 240 | */ | ||
| 241 | rts_field = (0x5UL << 5); /* 6 - 8 bits */ | ||
| 242 | rts_field |= (0x2UL << 61); | ||
| 243 | |||
| 244 | return rts_field; | ||
| 245 | } | ||
| 231 | #endif /* __ASSEMBLY__ */ | 246 | #endif /* __ASSEMBLY__ */ |
| 232 | #endif | 247 | #endif |
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h index 13ef38828dfe..3fa94fcac628 100644 --- a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h +++ b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h | |||
| @@ -18,16 +18,19 @@ extern void radix__local_flush_tlb_mm(struct mm_struct *mm); | |||
| 18 | extern void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); | 18 | extern void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); |
| 19 | extern void radix___local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, | 19 | extern void radix___local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, |
| 20 | unsigned long ap, int nid); | 20 | unsigned long ap, int nid); |
| 21 | extern void radix__local_flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr); | ||
| 21 | extern void radix__tlb_flush(struct mmu_gather *tlb); | 22 | extern void radix__tlb_flush(struct mmu_gather *tlb); |
| 22 | #ifdef CONFIG_SMP | 23 | #ifdef CONFIG_SMP |
| 23 | extern void radix__flush_tlb_mm(struct mm_struct *mm); | 24 | extern void radix__flush_tlb_mm(struct mm_struct *mm); |
| 24 | extern void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); | 25 | extern void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); |
| 25 | extern void radix___flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, | 26 | extern void radix___flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, |
| 26 | unsigned long ap, int nid); | 27 | unsigned long ap, int nid); |
| 28 | extern void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr); | ||
| 27 | #else | 29 | #else |
| 28 | #define radix__flush_tlb_mm(mm) radix__local_flush_tlb_mm(mm) | 30 | #define radix__flush_tlb_mm(mm) radix__local_flush_tlb_mm(mm) |
| 29 | #define radix__flush_tlb_page(vma,addr) radix__local_flush_tlb_page(vma,addr) | 31 | #define radix__flush_tlb_page(vma,addr) radix__local_flush_tlb_page(vma,addr) |
| 30 | #define radix___flush_tlb_page(mm,addr,p,i) radix___local_flush_tlb_page(mm,addr,p,i) | 32 | #define radix___flush_tlb_page(mm,addr,p,i) radix___local_flush_tlb_page(mm,addr,p,i) |
| 33 | #define radix__flush_tlb_pwc(tlb, addr) radix__local_flush_tlb_pwc(tlb, addr) | ||
| 31 | #endif | 34 | #endif |
| 32 | 35 | ||
| 33 | #endif | 36 | #endif |
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush.h b/arch/powerpc/include/asm/book3s/64/tlbflush.h index d98424ae356c..96e5769b18b0 100644 --- a/arch/powerpc/include/asm/book3s/64/tlbflush.h +++ b/arch/powerpc/include/asm/book3s/64/tlbflush.h | |||
| @@ -72,5 +72,19 @@ static inline void flush_tlb_page(struct vm_area_struct *vma, | |||
| 72 | #define flush_tlb_mm(mm) local_flush_tlb_mm(mm) | 72 | #define flush_tlb_mm(mm) local_flush_tlb_mm(mm) |
| 73 | #define flush_tlb_page(vma, addr) local_flush_tlb_page(vma, addr) | 73 | #define flush_tlb_page(vma, addr) local_flush_tlb_page(vma, addr) |
| 74 | #endif /* CONFIG_SMP */ | 74 | #endif /* CONFIG_SMP */ |
| 75 | /* | ||
| 76 | * flush the page walk cache for the address | ||
| 77 | */ | ||
| 78 | static inline void flush_tlb_pgtable(struct mmu_gather *tlb, unsigned long address) | ||
| 79 | { | ||
| 80 | /* | ||
| 81 | * Flush the page table walk cache on freeing a page table. We already | ||
| 82 | * have marked the upper/higher level page table entry none by now. | ||
| 83 | * So it is safe to flush PWC here. | ||
| 84 | */ | ||
| 85 | if (!radix_enabled()) | ||
| 86 | return; | ||
| 75 | 87 | ||
| 88 | radix__flush_tlb_pwc(tlb, address); | ||
| 89 | } | ||
| 76 | #endif /* _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H */ | 90 | #endif /* _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H */ |
diff --git a/arch/powerpc/include/asm/book3s/pgalloc.h b/arch/powerpc/include/asm/book3s/pgalloc.h index 54f591e9572e..c0a69ae92256 100644 --- a/arch/powerpc/include/asm/book3s/pgalloc.h +++ b/arch/powerpc/include/asm/book3s/pgalloc.h | |||
| @@ -4,11 +4,6 @@ | |||
| 4 | #include <linux/mm.h> | 4 | #include <linux/mm.h> |
| 5 | 5 | ||
| 6 | extern void tlb_remove_table(struct mmu_gather *tlb, void *table); | 6 | extern void tlb_remove_table(struct mmu_gather *tlb, void *table); |
| 7 | static inline void tlb_flush_pgtable(struct mmu_gather *tlb, | ||
| 8 | unsigned long address) | ||
| 9 | { | ||
| 10 | |||
| 11 | } | ||
| 12 | 7 | ||
| 13 | #ifdef CONFIG_PPC64 | 8 | #ifdef CONFIG_PPC64 |
| 14 | #include <asm/book3s/64/pgalloc.h> | 9 | #include <asm/book3s/64/pgalloc.h> |
diff --git a/arch/powerpc/include/asm/nohash/64/pgalloc.h b/arch/powerpc/include/asm/nohash/64/pgalloc.h index 0c12a3bfe2ab..897d2e1c8a9b 100644 --- a/arch/powerpc/include/asm/nohash/64/pgalloc.h +++ b/arch/powerpc/include/asm/nohash/64/pgalloc.h | |||
| @@ -57,8 +57,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) | |||
| 57 | 57 | ||
| 58 | static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) | 58 | static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) |
| 59 | { | 59 | { |
| 60 | return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE), | 60 | return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE), GFP_KERNEL); |
| 61 | GFP_KERNEL|__GFP_REPEAT); | ||
| 62 | } | 61 | } |
| 63 | 62 | ||
| 64 | static inline void pud_free(struct mm_struct *mm, pud_t *pud) | 63 | static inline void pud_free(struct mm_struct *mm, pud_t *pud) |
| @@ -88,7 +87,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, | |||
| 88 | static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, | 87 | static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, |
| 89 | unsigned long address) | 88 | unsigned long address) |
| 90 | { | 89 | { |
| 91 | return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO); | 90 | return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); |
| 92 | } | 91 | } |
| 93 | 92 | ||
| 94 | static inline pgtable_t pte_alloc_one(struct mm_struct *mm, | 93 | static inline pgtable_t pte_alloc_one(struct mm_struct *mm, |
| @@ -172,7 +171,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm, | |||
| 172 | 171 | ||
| 173 | static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) | 172 | static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) |
| 174 | { | 173 | { |
| 175 | pte_fragment_fre((unsigned long *)pte, 1); | 174 | pte_fragment_free((unsigned long *)pte, 1); |
| 176 | } | 175 | } |
| 177 | 176 | ||
| 178 | static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage) | 177 | static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage) |
| @@ -190,8 +189,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, | |||
| 190 | 189 | ||
| 191 | static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) | 190 | static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) |
| 192 | { | 191 | { |
| 193 | return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX), | 192 | return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX), GFP_KERNEL); |
| 194 | GFP_KERNEL|__GFP_REPEAT); | ||
| 195 | } | 193 | } |
| 196 | 194 | ||
| 197 | static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) | 195 | static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) |
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index c1e82e968506..a0948f40bc7b 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h | |||
| @@ -717,7 +717,7 @@ | |||
| 717 | #define MMCR0_FCWAIT 0x00000002UL /* freeze counter in WAIT state */ | 717 | #define MMCR0_FCWAIT 0x00000002UL /* freeze counter in WAIT state */ |
| 718 | #define MMCR0_FCHV 0x00000001UL /* freeze conditions in hypervisor mode */ | 718 | #define MMCR0_FCHV 0x00000001UL /* freeze conditions in hypervisor mode */ |
| 719 | #define SPRN_MMCR1 798 | 719 | #define SPRN_MMCR1 798 |
| 720 | #define SPRN_MMCR2 769 | 720 | #define SPRN_MMCR2 785 |
| 721 | #define SPRN_MMCRA 0x312 | 721 | #define SPRN_MMCRA 0x312 |
| 722 | #define MMCRA_SDSYNC 0x80000000UL /* SDAR synced with SIAR */ | 722 | #define MMCRA_SDSYNC 0x80000000UL /* SDAR synced with SIAR */ |
| 723 | #define MMCRA_SDAR_DCACHE_MISS 0x40000000UL | 723 | #define MMCRA_SDAR_DCACHE_MISS 0x40000000UL |
| @@ -754,13 +754,13 @@ | |||
| 754 | #define SPRN_PMC6 792 | 754 | #define SPRN_PMC6 792 |
| 755 | #define SPRN_PMC7 793 | 755 | #define SPRN_PMC7 793 |
| 756 | #define SPRN_PMC8 794 | 756 | #define SPRN_PMC8 794 |
| 757 | #define SPRN_SIAR 780 | ||
| 758 | #define SPRN_SDAR 781 | ||
| 759 | #define SPRN_SIER 784 | 757 | #define SPRN_SIER 784 |
| 760 | #define SIER_SIPR 0x2000000 /* Sampled MSR_PR */ | 758 | #define SIER_SIPR 0x2000000 /* Sampled MSR_PR */ |
| 761 | #define SIER_SIHV 0x1000000 /* Sampled MSR_HV */ | 759 | #define SIER_SIHV 0x1000000 /* Sampled MSR_HV */ |
| 762 | #define SIER_SIAR_VALID 0x0400000 /* SIAR contents valid */ | 760 | #define SIER_SIAR_VALID 0x0400000 /* SIAR contents valid */ |
| 763 | #define SIER_SDAR_VALID 0x0200000 /* SDAR contents valid */ | 761 | #define SIER_SDAR_VALID 0x0200000 /* SDAR contents valid */ |
| 762 | #define SPRN_SIAR 796 | ||
| 763 | #define SPRN_SDAR 797 | ||
| 764 | #define SPRN_TACR 888 | 764 | #define SPRN_TACR 888 |
| 765 | #define SPRN_TCSCR 889 | 765 | #define SPRN_TCSCR 889 |
| 766 | #define SPRN_CSIGR 890 | 766 | #define SPRN_CSIGR 890 |
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c index 2714a3b81d24..b5f73cb5eeb6 100644 --- a/arch/powerpc/kernel/eeh_driver.c +++ b/arch/powerpc/kernel/eeh_driver.c | |||
| @@ -642,7 +642,6 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus, | |||
| 642 | if (pe->type & EEH_PE_VF) { | 642 | if (pe->type & EEH_PE_VF) { |
| 643 | eeh_pe_dev_traverse(pe, eeh_rmv_device, NULL); | 643 | eeh_pe_dev_traverse(pe, eeh_rmv_device, NULL); |
| 644 | } else { | 644 | } else { |
| 645 | eeh_pe_state_clear(pe, EEH_PE_PRI_BUS); | ||
| 646 | pci_lock_rescan_remove(); | 645 | pci_lock_rescan_remove(); |
| 647 | pci_hp_remove_devices(bus); | 646 | pci_hp_remove_devices(bus); |
| 648 | pci_unlock_rescan_remove(); | 647 | pci_unlock_rescan_remove(); |
| @@ -692,10 +691,12 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus, | |||
| 692 | */ | 691 | */ |
| 693 | edev = list_first_entry(&pe->edevs, struct eeh_dev, list); | 692 | edev = list_first_entry(&pe->edevs, struct eeh_dev, list); |
| 694 | eeh_pe_traverse(pe, eeh_pe_detach_dev, NULL); | 693 | eeh_pe_traverse(pe, eeh_pe_detach_dev, NULL); |
| 695 | if (pe->type & EEH_PE_VF) | 694 | if (pe->type & EEH_PE_VF) { |
| 696 | eeh_add_virt_device(edev, NULL); | 695 | eeh_add_virt_device(edev, NULL); |
| 697 | else | 696 | } else { |
| 697 | eeh_pe_state_clear(pe, EEH_PE_PRI_BUS); | ||
| 698 | pci_hp_add_devices(bus); | 698 | pci_hp_add_devices(bus); |
| 699 | } | ||
| 699 | } else if (frozen_bus && rmv_data->removed) { | 700 | } else if (frozen_bus && rmv_data->removed) { |
| 700 | pr_info("EEH: Sleep 5s ahead of partial hotplug\n"); | 701 | pr_info("EEH: Sleep 5s ahead of partial hotplug\n"); |
| 701 | ssleep(5); | 702 | ssleep(5); |
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 4c9440629128..8bcc1b457115 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S | |||
| @@ -1399,11 +1399,12 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_RADIX) | |||
| 1399 | lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ | 1399 | lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ |
| 1400 | 1400 | ||
| 1401 | mtlr r10 | 1401 | mtlr r10 |
| 1402 | BEGIN_MMU_FTR_SECTION | ||
| 1403 | b 2f | ||
| 1404 | END_MMU_FTR_SECTION_IFSET(MMU_FTR_RADIX) | ||
| 1405 | andi. r10,r12,MSR_RI /* check for unrecoverable exception */ | 1402 | andi. r10,r12,MSR_RI /* check for unrecoverable exception */ |
| 1403 | BEGIN_MMU_FTR_SECTION | ||
| 1406 | beq- 2f | 1404 | beq- 2f |
| 1405 | FTR_SECTION_ELSE | ||
| 1406 | b 2f | ||
| 1407 | ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_RADIX) | ||
| 1407 | 1408 | ||
| 1408 | .machine push | 1409 | .machine push |
| 1409 | .machine "power4" | 1410 | .machine "power4" |
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index da5192590c44..6ee4b72cda42 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c | |||
| @@ -656,6 +656,7 @@ unsigned char ibm_architecture_vec[] = { | |||
| 656 | W(0xffff0000), W(0x003e0000), /* POWER6 */ | 656 | W(0xffff0000), W(0x003e0000), /* POWER6 */ |
| 657 | W(0xffff0000), W(0x003f0000), /* POWER7 */ | 657 | W(0xffff0000), W(0x003f0000), /* POWER7 */ |
| 658 | W(0xffff0000), W(0x004b0000), /* POWER8E */ | 658 | W(0xffff0000), W(0x004b0000), /* POWER8E */ |
| 659 | W(0xffff0000), W(0x004c0000), /* POWER8NVL */ | ||
| 659 | W(0xffff0000), W(0x004d0000), /* POWER8 */ | 660 | W(0xffff0000), W(0x004d0000), /* POWER8 */ |
| 660 | W(0xffffffff), W(0x0f000004), /* all 2.07-compliant */ | 661 | W(0xffffffff), W(0x0f000004), /* all 2.07-compliant */ |
| 661 | W(0xffffffff), W(0x0f000003), /* all 2.06-compliant */ | 662 | W(0xffffffff), W(0x0f000003), /* all 2.06-compliant */ |
| @@ -718,7 +719,7 @@ unsigned char ibm_architecture_vec[] = { | |||
| 718 | * must match by the macro below. Update the definition if | 719 | * must match by the macro below. Update the definition if |
| 719 | * the structure layout changes. | 720 | * the structure layout changes. |
| 720 | */ | 721 | */ |
| 721 | #define IBM_ARCH_VEC_NRCORES_OFFSET 125 | 722 | #define IBM_ARCH_VEC_NRCORES_OFFSET 133 |
| 722 | W(NR_CPUS), /* number of cores supported */ | 723 | W(NR_CPUS), /* number of cores supported */ |
| 723 | 0, | 724 | 0, |
| 724 | 0, | 725 | 0, |
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index 30a03c03fe73..060b140f03c6 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c | |||
| @@ -377,7 +377,7 @@ static int fpr_get(struct task_struct *target, const struct user_regset *regset, | |||
| 377 | 377 | ||
| 378 | #else | 378 | #else |
| 379 | BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) != | 379 | BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) != |
| 380 | offsetof(struct thread_fp_state, fpr[32][0])); | 380 | offsetof(struct thread_fp_state, fpr[32])); |
| 381 | 381 | ||
| 382 | return user_regset_copyout(&pos, &count, &kbuf, &ubuf, | 382 | return user_regset_copyout(&pos, &count, &kbuf, &ubuf, |
| 383 | &target->thread.fp_state, 0, -1); | 383 | &target->thread.fp_state, 0, -1); |
| @@ -405,7 +405,7 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset, | |||
| 405 | return 0; | 405 | return 0; |
| 406 | #else | 406 | #else |
| 407 | BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) != | 407 | BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) != |
| 408 | offsetof(struct thread_fp_state, fpr[32][0])); | 408 | offsetof(struct thread_fp_state, fpr[32])); |
| 409 | 409 | ||
| 410 | return user_regset_copyin(&pos, &count, &kbuf, &ubuf, | 410 | return user_regset_copyin(&pos, &count, &kbuf, &ubuf, |
| 411 | &target->thread.fp_state, 0, -1); | 411 | &target->thread.fp_state, 0, -1); |
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c index d873f6507f72..f8a871a72985 100644 --- a/arch/powerpc/mm/hash_native_64.c +++ b/arch/powerpc/mm/hash_native_64.c | |||
| @@ -316,8 +316,8 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp, | |||
| 316 | DBG_LOW(" -> hit\n"); | 316 | DBG_LOW(" -> hit\n"); |
| 317 | /* Update the HPTE */ | 317 | /* Update the HPTE */ |
| 318 | hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) & | 318 | hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) & |
| 319 | ~(HPTE_R_PP | HPTE_R_N)) | | 319 | ~(HPTE_R_PPP | HPTE_R_N)) | |
| 320 | (newpp & (HPTE_R_PP | HPTE_R_N | | 320 | (newpp & (HPTE_R_PPP | HPTE_R_N | |
| 321 | HPTE_R_C))); | 321 | HPTE_R_C))); |
| 322 | } | 322 | } |
| 323 | native_unlock_hpte(hptep); | 323 | native_unlock_hpte(hptep); |
| @@ -385,8 +385,8 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea, | |||
| 385 | 385 | ||
| 386 | /* Update the HPTE */ | 386 | /* Update the HPTE */ |
| 387 | hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) & | 387 | hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) & |
| 388 | ~(HPTE_R_PP | HPTE_R_N)) | | 388 | ~(HPTE_R_PPP | HPTE_R_N)) | |
| 389 | (newpp & (HPTE_R_PP | HPTE_R_N))); | 389 | (newpp & (HPTE_R_PPP | HPTE_R_N))); |
| 390 | /* | 390 | /* |
| 391 | * Ensure it is out of the tlb too. Bolted entries base and | 391 | * Ensure it is out of the tlb too. Bolted entries base and |
| 392 | * actual page size will be same. | 392 | * actual page size will be same. |
| @@ -550,7 +550,11 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot, | |||
| 550 | } | 550 | } |
| 551 | } | 551 | } |
| 552 | /* This works for all page sizes, and for 256M and 1T segments */ | 552 | /* This works for all page sizes, and for 256M and 1T segments */ |
| 553 | *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT; | 553 | if (cpu_has_feature(CPU_FTR_ARCH_300)) |
| 554 | *ssize = hpte_r >> HPTE_R_3_0_SSIZE_SHIFT; | ||
| 555 | else | ||
| 556 | *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT; | ||
| 557 | |||
| 554 | shift = mmu_psize_defs[size].shift; | 558 | shift = mmu_psize_defs[size].shift; |
| 555 | 559 | ||
| 556 | avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm); | 560 | avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm); |
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 59268969a0bc..5b22ba0b58bc 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c | |||
| @@ -159,6 +159,19 @@ static struct mmu_psize_def mmu_psize_defaults_gp[] = { | |||
| 159 | }, | 159 | }, |
| 160 | }; | 160 | }; |
| 161 | 161 | ||
| 162 | /* | ||
| 163 | * 'R' and 'C' update notes: | ||
| 164 | * - Under pHyp or KVM, the updatepp path will not set C, thus it *will* | ||
| 165 | * create writeable HPTEs without C set, because the hcall H_PROTECT | ||
| 166 | * that we use in that case will not update C | ||
| 167 | * - The above is however not a problem, because we also don't do that | ||
| 168 | * fancy "no flush" variant of eviction and we use H_REMOVE which will | ||
| 169 | * do the right thing and thus we don't have the race I described earlier | ||
| 170 | * | ||
| 171 | * - Under bare metal, we do have the race, so we need R and C set | ||
| 172 | * - We make sure R is always set and never lost | ||
| 173 | * - C is _PAGE_DIRTY, and *should* always be set for a writeable mapping | ||
| 174 | */ | ||
| 162 | unsigned long htab_convert_pte_flags(unsigned long pteflags) | 175 | unsigned long htab_convert_pte_flags(unsigned long pteflags) |
| 163 | { | 176 | { |
| 164 | unsigned long rflags = 0; | 177 | unsigned long rflags = 0; |
| @@ -186,19 +199,28 @@ unsigned long htab_convert_pte_flags(unsigned long pteflags) | |||
| 186 | rflags |= 0x1; | 199 | rflags |= 0x1; |
| 187 | } | 200 | } |
| 188 | /* | 201 | /* |
| 189 | * Always add "C" bit for perf. Memory coherence is always enabled | 202 | * We can't allow hardware to update hpte bits. Hence always |
| 203 | * set 'R' bit and set 'C' if it is a write fault | ||
| 190 | */ | 204 | */ |
| 191 | rflags |= HPTE_R_C | HPTE_R_M; | 205 | rflags |= HPTE_R_R; |
| 206 | |||
| 207 | if (pteflags & _PAGE_DIRTY) | ||
| 208 | rflags |= HPTE_R_C; | ||
| 192 | /* | 209 | /* |
| 193 | * Add in WIG bits | 210 | * Add in WIG bits |
| 194 | */ | 211 | */ |
| 195 | 212 | ||
| 196 | if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_TOLERANT) | 213 | if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_TOLERANT) |
| 197 | rflags |= HPTE_R_I; | 214 | rflags |= HPTE_R_I; |
| 198 | if ((pteflags & _PAGE_CACHE_CTL ) == _PAGE_NON_IDEMPOTENT) | 215 | else if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_NON_IDEMPOTENT) |
| 199 | rflags |= (HPTE_R_I | HPTE_R_G); | 216 | rflags |= (HPTE_R_I | HPTE_R_G); |
| 200 | if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_SAO) | 217 | else if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_SAO) |
| 201 | rflags |= (HPTE_R_I | HPTE_R_W); | 218 | rflags |= (HPTE_R_W | HPTE_R_I | HPTE_R_M); |
| 219 | else | ||
| 220 | /* | ||
| 221 | * Add memory coherence if cache inhibited is not set | ||
| 222 | */ | ||
| 223 | rflags |= HPTE_R_M; | ||
| 202 | 224 | ||
| 203 | return rflags; | 225 | return rflags; |
| 204 | } | 226 | } |
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 5aac1a3f86cd..119d18611500 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c | |||
| @@ -73,7 +73,7 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, | |||
| 73 | cachep = PGT_CACHE(pdshift - pshift); | 73 | cachep = PGT_CACHE(pdshift - pshift); |
| 74 | #endif | 74 | #endif |
| 75 | 75 | ||
| 76 | new = kmem_cache_zalloc(cachep, GFP_KERNEL|__GFP_REPEAT); | 76 | new = kmem_cache_zalloc(cachep, GFP_KERNEL); |
| 77 | 77 | ||
| 78 | BUG_ON(pshift > HUGEPD_SHIFT_MASK); | 78 | BUG_ON(pshift > HUGEPD_SHIFT_MASK); |
| 79 | BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK); | 79 | BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK); |
diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c index 227b2a6c4544..196222227e82 100644 --- a/arch/powerpc/mm/mmu_context_book3s64.c +++ b/arch/powerpc/mm/mmu_context_book3s64.c | |||
| @@ -65,7 +65,7 @@ static int radix__init_new_context(struct mm_struct *mm, int index) | |||
| 65 | /* | 65 | /* |
| 66 | * set the process table entry, | 66 | * set the process table entry, |
| 67 | */ | 67 | */ |
| 68 | rts_field = 3ull << PPC_BITLSHIFT(2); | 68 | rts_field = radix__get_tree_size(); |
| 69 | process_tb[index].prtb0 = cpu_to_be64(rts_field | __pa(mm->pgd) | RADIX_PGD_INDEX_SIZE); | 69 | process_tb[index].prtb0 = cpu_to_be64(rts_field | __pa(mm->pgd) | RADIX_PGD_INDEX_SIZE); |
| 70 | return 0; | 70 | return 0; |
| 71 | } | 71 | } |
diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c index eb4451144746..670318766545 100644 --- a/arch/powerpc/mm/pgtable-book3s64.c +++ b/arch/powerpc/mm/pgtable-book3s64.c | |||
| @@ -33,10 +33,7 @@ int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address, | |||
| 33 | changed = !pmd_same(*(pmdp), entry); | 33 | changed = !pmd_same(*(pmdp), entry); |
| 34 | if (changed) { | 34 | if (changed) { |
| 35 | __ptep_set_access_flags(pmdp_ptep(pmdp), pmd_pte(entry)); | 35 | __ptep_set_access_flags(pmdp_ptep(pmdp), pmd_pte(entry)); |
| 36 | /* | 36 | flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); |
| 37 | * Since we are not supporting SW TLB systems, we don't | ||
| 38 | * have any thing similar to flush_tlb_page_nohash() | ||
| 39 | */ | ||
| 40 | } | 37 | } |
| 41 | return changed; | 38 | return changed; |
| 42 | } | 39 | } |
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c index 18b2c11604fa..e58707deef5c 100644 --- a/arch/powerpc/mm/pgtable-radix.c +++ b/arch/powerpc/mm/pgtable-radix.c | |||
| @@ -160,9 +160,8 @@ redo: | |||
| 160 | process_tb = early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT); | 160 | process_tb = early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT); |
| 161 | /* | 161 | /* |
| 162 | * Fill in the process table. | 162 | * Fill in the process table. |
| 163 | * we support 52 bits, hence 52-28 = 24, 11000 | ||
| 164 | */ | 163 | */ |
| 165 | rts_field = 3ull << PPC_BITLSHIFT(2); | 164 | rts_field = radix__get_tree_size(); |
| 166 | process_tb->prtb0 = cpu_to_be64(rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE); | 165 | process_tb->prtb0 = cpu_to_be64(rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE); |
| 167 | /* | 166 | /* |
| 168 | * Fill in the partition table. We are suppose to use effective address | 167 | * Fill in the partition table. We are suppose to use effective address |
| @@ -176,10 +175,8 @@ redo: | |||
| 176 | static void __init radix_init_partition_table(void) | 175 | static void __init radix_init_partition_table(void) |
| 177 | { | 176 | { |
| 178 | unsigned long rts_field; | 177 | unsigned long rts_field; |
| 179 | /* | 178 | |
| 180 | * we support 52 bits, hence 52-28 = 24, 11000 | 179 | rts_field = radix__get_tree_size(); |
| 181 | */ | ||
| 182 | rts_field = 3ull << PPC_BITLSHIFT(2); | ||
| 183 | 180 | ||
| 184 | BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 24), "Partition table size too large."); | 181 | BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 24), "Partition table size too large."); |
| 185 | partition_tb = early_alloc_pgtable(1UL << PATB_SIZE_SHIFT); | 182 | partition_tb = early_alloc_pgtable(1UL << PATB_SIZE_SHIFT); |
| @@ -296,11 +293,6 @@ found: | |||
| 296 | void __init radix__early_init_mmu(void) | 293 | void __init radix__early_init_mmu(void) |
| 297 | { | 294 | { |
| 298 | unsigned long lpcr; | 295 | unsigned long lpcr; |
| 299 | /* | ||
| 300 | * setup LPCR UPRT based on mmu_features | ||
| 301 | */ | ||
| 302 | lpcr = mfspr(SPRN_LPCR); | ||
| 303 | mtspr(SPRN_LPCR, lpcr | LPCR_UPRT); | ||
| 304 | 296 | ||
| 305 | #ifdef CONFIG_PPC_64K_PAGES | 297 | #ifdef CONFIG_PPC_64K_PAGES |
| 306 | /* PAGE_SIZE mappings */ | 298 | /* PAGE_SIZE mappings */ |
| @@ -343,8 +335,11 @@ void __init radix__early_init_mmu(void) | |||
| 343 | __pte_frag_size_shift = H_PTE_FRAG_SIZE_SHIFT; | 335 | __pte_frag_size_shift = H_PTE_FRAG_SIZE_SHIFT; |
| 344 | 336 | ||
| 345 | radix_init_page_sizes(); | 337 | radix_init_page_sizes(); |
| 346 | if (!firmware_has_feature(FW_FEATURE_LPAR)) | 338 | if (!firmware_has_feature(FW_FEATURE_LPAR)) { |
| 339 | lpcr = mfspr(SPRN_LPCR); | ||
| 340 | mtspr(SPRN_LPCR, lpcr | LPCR_UPRT); | ||
| 347 | radix_init_partition_table(); | 341 | radix_init_partition_table(); |
| 342 | } | ||
| 348 | 343 | ||
| 349 | radix_init_pgtable(); | 344 | radix_init_pgtable(); |
| 350 | } | 345 | } |
| @@ -353,16 +348,15 @@ void radix__early_init_mmu_secondary(void) | |||
| 353 | { | 348 | { |
| 354 | unsigned long lpcr; | 349 | unsigned long lpcr; |
| 355 | /* | 350 | /* |
| 356 | * setup LPCR UPRT based on mmu_features | 351 | * update partition table control register and UPRT |
| 357 | */ | ||
| 358 | lpcr = mfspr(SPRN_LPCR); | ||
| 359 | mtspr(SPRN_LPCR, lpcr | LPCR_UPRT); | ||
| 360 | /* | ||
| 361 | * update partition table control register, 64 K size. | ||
| 362 | */ | 352 | */ |
| 363 | if (!firmware_has_feature(FW_FEATURE_LPAR)) | 353 | if (!firmware_has_feature(FW_FEATURE_LPAR)) { |
| 354 | lpcr = mfspr(SPRN_LPCR); | ||
| 355 | mtspr(SPRN_LPCR, lpcr | LPCR_UPRT); | ||
| 356 | |||
| 364 | mtspr(SPRN_PTCR, | 357 | mtspr(SPRN_PTCR, |
| 365 | __pa(partition_tb) | (PATB_SIZE_SHIFT - 12)); | 358 | __pa(partition_tb) | (PATB_SIZE_SHIFT - 12)); |
| 359 | } | ||
| 366 | } | 360 | } |
| 367 | 361 | ||
| 368 | void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base, | 362 | void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base, |
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index bf7bf32b54f8..7f922f557936 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c | |||
| @@ -84,7 +84,7 @@ __init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long add | |||
| 84 | pte_t *pte; | 84 | pte_t *pte; |
| 85 | 85 | ||
| 86 | if (slab_is_available()) { | 86 | if (slab_is_available()) { |
| 87 | pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); | 87 | pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO); |
| 88 | } else { | 88 | } else { |
| 89 | pte = __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE)); | 89 | pte = __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE)); |
| 90 | if (pte) | 90 | if (pte) |
| @@ -97,7 +97,7 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) | |||
| 97 | { | 97 | { |
| 98 | struct page *ptepage; | 98 | struct page *ptepage; |
| 99 | 99 | ||
| 100 | gfp_t flags = GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO; | 100 | gfp_t flags = GFP_KERNEL | __GFP_ZERO; |
| 101 | 101 | ||
| 102 | ptepage = alloc_pages(flags, 0); | 102 | ptepage = alloc_pages(flags, 0); |
| 103 | if (!ptepage) | 103 | if (!ptepage) |
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index e009e0604a8a..f5e8d4edb808 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c | |||
| @@ -350,8 +350,7 @@ static pte_t *get_from_cache(struct mm_struct *mm) | |||
| 350 | static pte_t *__alloc_for_cache(struct mm_struct *mm, int kernel) | 350 | static pte_t *__alloc_for_cache(struct mm_struct *mm, int kernel) |
| 351 | { | 351 | { |
| 352 | void *ret = NULL; | 352 | void *ret = NULL; |
| 353 | struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | | 353 | struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); |
| 354 | __GFP_REPEAT | __GFP_ZERO); | ||
| 355 | if (!page) | 354 | if (!page) |
| 356 | return NULL; | 355 | return NULL; |
| 357 | if (!kernel && !pgtable_page_ctor(page)) { | 356 | if (!kernel && !pgtable_page_ctor(page)) { |
diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c index 0fdaf93a3e09..ab2f60e812e2 100644 --- a/arch/powerpc/mm/tlb-radix.c +++ b/arch/powerpc/mm/tlb-radix.c | |||
| @@ -18,16 +18,20 @@ | |||
| 18 | 18 | ||
| 19 | static DEFINE_RAW_SPINLOCK(native_tlbie_lock); | 19 | static DEFINE_RAW_SPINLOCK(native_tlbie_lock); |
| 20 | 20 | ||
| 21 | static inline void __tlbiel_pid(unsigned long pid, int set) | 21 | #define RIC_FLUSH_TLB 0 |
| 22 | #define RIC_FLUSH_PWC 1 | ||
| 23 | #define RIC_FLUSH_ALL 2 | ||
| 24 | |||
| 25 | static inline void __tlbiel_pid(unsigned long pid, int set, | ||
| 26 | unsigned long ric) | ||
| 22 | { | 27 | { |
| 23 | unsigned long rb,rs,ric,prs,r; | 28 | unsigned long rb,rs,prs,r; |
| 24 | 29 | ||
| 25 | rb = PPC_BIT(53); /* IS = 1 */ | 30 | rb = PPC_BIT(53); /* IS = 1 */ |
| 26 | rb |= set << PPC_BITLSHIFT(51); | 31 | rb |= set << PPC_BITLSHIFT(51); |
| 27 | rs = ((unsigned long)pid) << PPC_BITLSHIFT(31); | 32 | rs = ((unsigned long)pid) << PPC_BITLSHIFT(31); |
| 28 | prs = 1; /* process scoped */ | 33 | prs = 1; /* process scoped */ |
| 29 | r = 1; /* raidx format */ | 34 | r = 1; /* raidx format */ |
| 30 | ric = 2; /* invalidate all the caches */ | ||
| 31 | 35 | ||
| 32 | asm volatile("ptesync": : :"memory"); | 36 | asm volatile("ptesync": : :"memory"); |
| 33 | asm volatile(".long 0x7c000224 | (%0 << 11) | (%1 << 16) |" | 37 | asm volatile(".long 0x7c000224 | (%0 << 11) | (%1 << 16) |" |
| @@ -39,25 +43,24 @@ static inline void __tlbiel_pid(unsigned long pid, int set) | |||
| 39 | /* | 43 | /* |
| 40 | * We use 128 set in radix mode and 256 set in hpt mode. | 44 | * We use 128 set in radix mode and 256 set in hpt mode. |
| 41 | */ | 45 | */ |
| 42 | static inline void _tlbiel_pid(unsigned long pid) | 46 | static inline void _tlbiel_pid(unsigned long pid, unsigned long ric) |
| 43 | { | 47 | { |
| 44 | int set; | 48 | int set; |
| 45 | 49 | ||
| 46 | for (set = 0; set < POWER9_TLB_SETS_RADIX ; set++) { | 50 | for (set = 0; set < POWER9_TLB_SETS_RADIX ; set++) { |
| 47 | __tlbiel_pid(pid, set); | 51 | __tlbiel_pid(pid, set, ric); |
| 48 | } | 52 | } |
| 49 | return; | 53 | return; |
| 50 | } | 54 | } |
| 51 | 55 | ||
| 52 | static inline void _tlbie_pid(unsigned long pid) | 56 | static inline void _tlbie_pid(unsigned long pid, unsigned long ric) |
| 53 | { | 57 | { |
| 54 | unsigned long rb,rs,ric,prs,r; | 58 | unsigned long rb,rs,prs,r; |
| 55 | 59 | ||
| 56 | rb = PPC_BIT(53); /* IS = 1 */ | 60 | rb = PPC_BIT(53); /* IS = 1 */ |
| 57 | rs = pid << PPC_BITLSHIFT(31); | 61 | rs = pid << PPC_BITLSHIFT(31); |
| 58 | prs = 1; /* process scoped */ | 62 | prs = 1; /* process scoped */ |
| 59 | r = 1; /* raidx format */ | 63 | r = 1; /* raidx format */ |
| 60 | ric = 2; /* invalidate all the caches */ | ||
| 61 | 64 | ||
| 62 | asm volatile("ptesync": : :"memory"); | 65 | asm volatile("ptesync": : :"memory"); |
| 63 | asm volatile(".long 0x7c000264 | (%0 << 11) | (%1 << 16) |" | 66 | asm volatile(".long 0x7c000264 | (%0 << 11) | (%1 << 16) |" |
| @@ -67,16 +70,15 @@ static inline void _tlbie_pid(unsigned long pid) | |||
| 67 | } | 70 | } |
| 68 | 71 | ||
| 69 | static inline void _tlbiel_va(unsigned long va, unsigned long pid, | 72 | static inline void _tlbiel_va(unsigned long va, unsigned long pid, |
| 70 | unsigned long ap) | 73 | unsigned long ap, unsigned long ric) |
| 71 | { | 74 | { |
| 72 | unsigned long rb,rs,ric,prs,r; | 75 | unsigned long rb,rs,prs,r; |
| 73 | 76 | ||
| 74 | rb = va & ~(PPC_BITMASK(52, 63)); | 77 | rb = va & ~(PPC_BITMASK(52, 63)); |
| 75 | rb |= ap << PPC_BITLSHIFT(58); | 78 | rb |= ap << PPC_BITLSHIFT(58); |
| 76 | rs = pid << PPC_BITLSHIFT(31); | 79 | rs = pid << PPC_BITLSHIFT(31); |
| 77 | prs = 1; /* process scoped */ | 80 | prs = 1; /* process scoped */ |
| 78 | r = 1; /* raidx format */ | 81 | r = 1; /* raidx format */ |
| 79 | ric = 0; /* no cluster flush yet */ | ||
| 80 | 82 | ||
| 81 | asm volatile("ptesync": : :"memory"); | 83 | asm volatile("ptesync": : :"memory"); |
| 82 | asm volatile(".long 0x7c000224 | (%0 << 11) | (%1 << 16) |" | 84 | asm volatile(".long 0x7c000224 | (%0 << 11) | (%1 << 16) |" |
| @@ -86,16 +88,15 @@ static inline void _tlbiel_va(unsigned long va, unsigned long pid, | |||
| 86 | } | 88 | } |
| 87 | 89 | ||
| 88 | static inline void _tlbie_va(unsigned long va, unsigned long pid, | 90 | static inline void _tlbie_va(unsigned long va, unsigned long pid, |
| 89 | unsigned long ap) | 91 | unsigned long ap, unsigned long ric) |
| 90 | { | 92 | { |
| 91 | unsigned long rb,rs,ric,prs,r; | 93 | unsigned long rb,rs,prs,r; |
| 92 | 94 | ||
| 93 | rb = va & ~(PPC_BITMASK(52, 63)); | 95 | rb = va & ~(PPC_BITMASK(52, 63)); |
| 94 | rb |= ap << PPC_BITLSHIFT(58); | 96 | rb |= ap << PPC_BITLSHIFT(58); |
| 95 | rs = pid << PPC_BITLSHIFT(31); | 97 | rs = pid << PPC_BITLSHIFT(31); |
| 96 | prs = 1; /* process scoped */ | 98 | prs = 1; /* process scoped */ |
| 97 | r = 1; /* raidx format */ | 99 | r = 1; /* raidx format */ |
| 98 | ric = 0; /* no cluster flush yet */ | ||
| 99 | 100 | ||
| 100 | asm volatile("ptesync": : :"memory"); | 101 | asm volatile("ptesync": : :"memory"); |
| 101 | asm volatile(".long 0x7c000264 | (%0 << 11) | (%1 << 16) |" | 102 | asm volatile(".long 0x7c000264 | (%0 << 11) | (%1 << 16) |" |
| @@ -117,25 +118,40 @@ static inline void _tlbie_va(unsigned long va, unsigned long pid, | |||
| 117 | */ | 118 | */ |
| 118 | void radix__local_flush_tlb_mm(struct mm_struct *mm) | 119 | void radix__local_flush_tlb_mm(struct mm_struct *mm) |
| 119 | { | 120 | { |
| 120 | unsigned int pid; | 121 | unsigned long pid; |
| 121 | 122 | ||
| 122 | preempt_disable(); | 123 | preempt_disable(); |
| 123 | pid = mm->context.id; | 124 | pid = mm->context.id; |
| 124 | if (pid != MMU_NO_CONTEXT) | 125 | if (pid != MMU_NO_CONTEXT) |
| 125 | _tlbiel_pid(pid); | 126 | _tlbiel_pid(pid, RIC_FLUSH_ALL); |
| 126 | preempt_enable(); | 127 | preempt_enable(); |
| 127 | } | 128 | } |
| 128 | EXPORT_SYMBOL(radix__local_flush_tlb_mm); | 129 | EXPORT_SYMBOL(radix__local_flush_tlb_mm); |
| 129 | 130 | ||
| 131 | void radix__local_flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr) | ||
| 132 | { | ||
| 133 | unsigned long pid; | ||
| 134 | struct mm_struct *mm = tlb->mm; | ||
| 135 | |||
| 136 | preempt_disable(); | ||
| 137 | |||
| 138 | pid = mm->context.id; | ||
| 139 | if (pid != MMU_NO_CONTEXT) | ||
| 140 | _tlbiel_pid(pid, RIC_FLUSH_PWC); | ||
| 141 | |||
| 142 | preempt_enable(); | ||
| 143 | } | ||
| 144 | EXPORT_SYMBOL(radix__local_flush_tlb_pwc); | ||
| 145 | |||
| 130 | void radix___local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, | 146 | void radix___local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, |
| 131 | unsigned long ap, int nid) | 147 | unsigned long ap, int nid) |
| 132 | { | 148 | { |
| 133 | unsigned int pid; | 149 | unsigned long pid; |
| 134 | 150 | ||
| 135 | preempt_disable(); | 151 | preempt_disable(); |
| 136 | pid = mm ? mm->context.id : 0; | 152 | pid = mm ? mm->context.id : 0; |
| 137 | if (pid != MMU_NO_CONTEXT) | 153 | if (pid != MMU_NO_CONTEXT) |
| 138 | _tlbiel_va(vmaddr, pid, ap); | 154 | _tlbiel_va(vmaddr, pid, ap, RIC_FLUSH_TLB); |
| 139 | preempt_enable(); | 155 | preempt_enable(); |
| 140 | } | 156 | } |
| 141 | 157 | ||
| @@ -160,7 +176,7 @@ static int mm_is_core_local(struct mm_struct *mm) | |||
| 160 | 176 | ||
| 161 | void radix__flush_tlb_mm(struct mm_struct *mm) | 177 | void radix__flush_tlb_mm(struct mm_struct *mm) |
| 162 | { | 178 | { |
| 163 | unsigned int pid; | 179 | unsigned long pid; |
| 164 | 180 | ||
| 165 | preempt_disable(); | 181 | preempt_disable(); |
| 166 | pid = mm->context.id; | 182 | pid = mm->context.id; |
| @@ -172,20 +188,46 @@ void radix__flush_tlb_mm(struct mm_struct *mm) | |||
| 172 | 188 | ||
| 173 | if (lock_tlbie) | 189 | if (lock_tlbie) |
| 174 | raw_spin_lock(&native_tlbie_lock); | 190 | raw_spin_lock(&native_tlbie_lock); |
| 175 | _tlbie_pid(pid); | 191 | _tlbie_pid(pid, RIC_FLUSH_ALL); |
| 176 | if (lock_tlbie) | 192 | if (lock_tlbie) |
| 177 | raw_spin_unlock(&native_tlbie_lock); | 193 | raw_spin_unlock(&native_tlbie_lock); |
| 178 | } else | 194 | } else |
| 179 | _tlbiel_pid(pid); | 195 | _tlbiel_pid(pid, RIC_FLUSH_ALL); |
| 180 | no_context: | 196 | no_context: |
| 181 | preempt_enable(); | 197 | preempt_enable(); |
| 182 | } | 198 | } |
| 183 | EXPORT_SYMBOL(radix__flush_tlb_mm); | 199 | EXPORT_SYMBOL(radix__flush_tlb_mm); |
| 184 | 200 | ||
| 201 | void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr) | ||
| 202 | { | ||
| 203 | unsigned long pid; | ||
| 204 | struct mm_struct *mm = tlb->mm; | ||
| 205 | |||
| 206 | preempt_disable(); | ||
| 207 | |||
| 208 | pid = mm->context.id; | ||
| 209 | if (unlikely(pid == MMU_NO_CONTEXT)) | ||
| 210 | goto no_context; | ||
| 211 | |||
| 212 | if (!mm_is_core_local(mm)) { | ||
| 213 | int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); | ||
| 214 | |||
| 215 | if (lock_tlbie) | ||
| 216 | raw_spin_lock(&native_tlbie_lock); | ||
| 217 | _tlbie_pid(pid, RIC_FLUSH_PWC); | ||
| 218 | if (lock_tlbie) | ||
| 219 | raw_spin_unlock(&native_tlbie_lock); | ||
| 220 | } else | ||
| 221 | _tlbiel_pid(pid, RIC_FLUSH_PWC); | ||
| 222 | no_context: | ||
| 223 | preempt_enable(); | ||
| 224 | } | ||
| 225 | EXPORT_SYMBOL(radix__flush_tlb_pwc); | ||
| 226 | |||
| 185 | void radix___flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, | 227 | void radix___flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, |
| 186 | unsigned long ap, int nid) | 228 | unsigned long ap, int nid) |
| 187 | { | 229 | { |
| 188 | unsigned int pid; | 230 | unsigned long pid; |
| 189 | 231 | ||
| 190 | preempt_disable(); | 232 | preempt_disable(); |
| 191 | pid = mm ? mm->context.id : 0; | 233 | pid = mm ? mm->context.id : 0; |
| @@ -196,11 +238,11 @@ void radix___flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, | |||
| 196 | 238 | ||
| 197 | if (lock_tlbie) | 239 | if (lock_tlbie) |
| 198 | raw_spin_lock(&native_tlbie_lock); | 240 | raw_spin_lock(&native_tlbie_lock); |
| 199 | _tlbie_va(vmaddr, pid, ap); | 241 | _tlbie_va(vmaddr, pid, ap, RIC_FLUSH_TLB); |
| 200 | if (lock_tlbie) | 242 | if (lock_tlbie) |
| 201 | raw_spin_unlock(&native_tlbie_lock); | 243 | raw_spin_unlock(&native_tlbie_lock); |
| 202 | } else | 244 | } else |
| 203 | _tlbiel_va(vmaddr, pid, ap); | 245 | _tlbiel_va(vmaddr, pid, ap, RIC_FLUSH_TLB); |
| 204 | bail: | 246 | bail: |
| 205 | preempt_enable(); | 247 | preempt_enable(); |
| 206 | } | 248 | } |
| @@ -224,7 +266,7 @@ void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end) | |||
| 224 | 266 | ||
| 225 | if (lock_tlbie) | 267 | if (lock_tlbie) |
| 226 | raw_spin_lock(&native_tlbie_lock); | 268 | raw_spin_lock(&native_tlbie_lock); |
| 227 | _tlbie_pid(0); | 269 | _tlbie_pid(0, RIC_FLUSH_ALL); |
| 228 | if (lock_tlbie) | 270 | if (lock_tlbie) |
| 229 | raw_spin_unlock(&native_tlbie_lock); | 271 | raw_spin_unlock(&native_tlbie_lock); |
| 230 | } | 272 | } |
diff --git a/arch/powerpc/platforms/512x/clock-commonclk.c b/arch/powerpc/platforms/512x/clock-commonclk.c index c50ea76ba66c..6081fbd75330 100644 --- a/arch/powerpc/platforms/512x/clock-commonclk.c +++ b/arch/powerpc/platforms/512x/clock-commonclk.c | |||
| @@ -221,7 +221,7 @@ static bool soc_has_mclk_mux0_canin(void) | |||
| 221 | /* convenience wrappers around the common clk API */ | 221 | /* convenience wrappers around the common clk API */ |
| 222 | static inline struct clk *mpc512x_clk_fixed(const char *name, int rate) | 222 | static inline struct clk *mpc512x_clk_fixed(const char *name, int rate) |
| 223 | { | 223 | { |
| 224 | return clk_register_fixed_rate(NULL, name, NULL, CLK_IS_ROOT, rate); | 224 | return clk_register_fixed_rate(NULL, name, NULL, 0, rate); |
| 225 | } | 225 | } |
| 226 | 226 | ||
| 227 | static inline struct clk *mpc512x_clk_factor( | 227 | static inline struct clk *mpc512x_clk_factor( |
diff --git a/arch/powerpc/platforms/cell/spufs/coredump.c b/arch/powerpc/platforms/cell/spufs/coredump.c index 84fb984f29c1..85c85eb3e245 100644 --- a/arch/powerpc/platforms/cell/spufs/coredump.c +++ b/arch/powerpc/platforms/cell/spufs/coredump.c | |||
| @@ -172,7 +172,7 @@ static int spufs_arch_write_note(struct spu_context *ctx, int i, | |||
| 172 | if (rc < 0) | 172 | if (rc < 0) |
| 173 | goto out; | 173 | goto out; |
| 174 | 174 | ||
| 175 | skip = roundup(cprm->file->f_pos - total + sz, 4) - cprm->file->f_pos; | 175 | skip = roundup(cprm->pos - total + sz, 4) - cprm->pos; |
| 176 | if (!dump_skip(cprm, skip)) | 176 | if (!dump_skip(cprm, skip)) |
| 177 | goto Eio; | 177 | goto Eio; |
| 178 | out: | 178 | out: |
diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c index ac3ffd97e059..3998e0f9a03b 100644 --- a/arch/powerpc/platforms/pseries/eeh_pseries.c +++ b/arch/powerpc/platforms/pseries/eeh_pseries.c | |||
| @@ -53,7 +53,6 @@ static int ibm_read_slot_reset_state2; | |||
| 53 | static int ibm_slot_error_detail; | 53 | static int ibm_slot_error_detail; |
| 54 | static int ibm_get_config_addr_info; | 54 | static int ibm_get_config_addr_info; |
| 55 | static int ibm_get_config_addr_info2; | 55 | static int ibm_get_config_addr_info2; |
| 56 | static int ibm_configure_bridge; | ||
| 57 | static int ibm_configure_pe; | 56 | static int ibm_configure_pe; |
| 58 | 57 | ||
| 59 | /* | 58 | /* |
| @@ -81,7 +80,14 @@ static int pseries_eeh_init(void) | |||
| 81 | ibm_get_config_addr_info2 = rtas_token("ibm,get-config-addr-info2"); | 80 | ibm_get_config_addr_info2 = rtas_token("ibm,get-config-addr-info2"); |
| 82 | ibm_get_config_addr_info = rtas_token("ibm,get-config-addr-info"); | 81 | ibm_get_config_addr_info = rtas_token("ibm,get-config-addr-info"); |
| 83 | ibm_configure_pe = rtas_token("ibm,configure-pe"); | 82 | ibm_configure_pe = rtas_token("ibm,configure-pe"); |
| 84 | ibm_configure_bridge = rtas_token("ibm,configure-bridge"); | 83 | |
| 84 | /* | ||
| 85 | * ibm,configure-pe and ibm,configure-bridge have the same semantics, | ||
| 86 | * however ibm,configure-pe can be faster. If we can't find | ||
| 87 | * ibm,configure-pe then fall back to using ibm,configure-bridge. | ||
| 88 | */ | ||
| 89 | if (ibm_configure_pe == RTAS_UNKNOWN_SERVICE) | ||
| 90 | ibm_configure_pe = rtas_token("ibm,configure-bridge"); | ||
| 85 | 91 | ||
| 86 | /* | 92 | /* |
| 87 | * Necessary sanity check. We needn't check "get-config-addr-info" | 93 | * Necessary sanity check. We needn't check "get-config-addr-info" |
| @@ -93,8 +99,7 @@ static int pseries_eeh_init(void) | |||
| 93 | (ibm_read_slot_reset_state2 == RTAS_UNKNOWN_SERVICE && | 99 | (ibm_read_slot_reset_state2 == RTAS_UNKNOWN_SERVICE && |
| 94 | ibm_read_slot_reset_state == RTAS_UNKNOWN_SERVICE) || | 100 | ibm_read_slot_reset_state == RTAS_UNKNOWN_SERVICE) || |
| 95 | ibm_slot_error_detail == RTAS_UNKNOWN_SERVICE || | 101 | ibm_slot_error_detail == RTAS_UNKNOWN_SERVICE || |
| 96 | (ibm_configure_pe == RTAS_UNKNOWN_SERVICE && | 102 | ibm_configure_pe == RTAS_UNKNOWN_SERVICE) { |
| 97 | ibm_configure_bridge == RTAS_UNKNOWN_SERVICE)) { | ||
| 98 | pr_info("EEH functionality not supported\n"); | 103 | pr_info("EEH functionality not supported\n"); |
| 99 | return -EINVAL; | 104 | return -EINVAL; |
| 100 | } | 105 | } |
| @@ -615,29 +620,41 @@ static int pseries_eeh_configure_bridge(struct eeh_pe *pe) | |||
| 615 | { | 620 | { |
| 616 | int config_addr; | 621 | int config_addr; |
| 617 | int ret; | 622 | int ret; |
| 623 | /* Waiting 0.2s maximum before skipping configuration */ | ||
| 624 | int max_wait = 200; | ||
| 618 | 625 | ||
| 619 | /* Figure out the PE address */ | 626 | /* Figure out the PE address */ |
| 620 | config_addr = pe->config_addr; | 627 | config_addr = pe->config_addr; |
| 621 | if (pe->addr) | 628 | if (pe->addr) |
| 622 | config_addr = pe->addr; | 629 | config_addr = pe->addr; |
| 623 | 630 | ||
| 624 | /* Use new configure-pe function, if supported */ | 631 | while (max_wait > 0) { |
| 625 | if (ibm_configure_pe != RTAS_UNKNOWN_SERVICE) { | ||
| 626 | ret = rtas_call(ibm_configure_pe, 3, 1, NULL, | 632 | ret = rtas_call(ibm_configure_pe, 3, 1, NULL, |
| 627 | config_addr, BUID_HI(pe->phb->buid), | 633 | config_addr, BUID_HI(pe->phb->buid), |
| 628 | BUID_LO(pe->phb->buid)); | 634 | BUID_LO(pe->phb->buid)); |
| 629 | } else if (ibm_configure_bridge != RTAS_UNKNOWN_SERVICE) { | ||
| 630 | ret = rtas_call(ibm_configure_bridge, 3, 1, NULL, | ||
| 631 | config_addr, BUID_HI(pe->phb->buid), | ||
| 632 | BUID_LO(pe->phb->buid)); | ||
| 633 | } else { | ||
| 634 | return -EFAULT; | ||
| 635 | } | ||
| 636 | 635 | ||
| 637 | if (ret) | 636 | if (!ret) |
| 638 | pr_warn("%s: Unable to configure bridge PHB#%d-PE#%x (%d)\n", | 637 | return ret; |
| 639 | __func__, pe->phb->global_number, pe->addr, ret); | 638 | |
| 639 | /* | ||
| 640 | * If RTAS returns a delay value that's above 100ms, cut it | ||
| 641 | * down to 100ms in case firmware made a mistake. For more | ||
| 642 | * on how these delay values work see rtas_busy_delay_time | ||
| 643 | */ | ||
| 644 | if (ret > RTAS_EXTENDED_DELAY_MIN+2 && | ||
| 645 | ret <= RTAS_EXTENDED_DELAY_MAX) | ||
| 646 | ret = RTAS_EXTENDED_DELAY_MIN+2; | ||
| 647 | |||
| 648 | max_wait -= rtas_busy_delay_time(ret); | ||
| 649 | |||
| 650 | if (max_wait < 0) | ||
| 651 | break; | ||
| 652 | |||
| 653 | rtas_busy_delay(ret); | ||
| 654 | } | ||
| 640 | 655 | ||
| 656 | pr_warn("%s: Unable to configure bridge PHB#%d-PE#%x (%d)\n", | ||
| 657 | __func__, pe->phb->global_number, pe->addr, ret); | ||
| 641 | return ret; | 658 | return ret; |
| 642 | } | 659 | } |
| 643 | 660 | ||
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index b7dfc1359d01..3e8865b187de 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c | |||
| @@ -927,7 +927,7 @@ static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail, | |||
| 927 | dn = pci_device_to_OF_node(dev); | 927 | dn = pci_device_to_OF_node(dev); |
| 928 | pdn = PCI_DN(dn); | 928 | pdn = PCI_DN(dn); |
| 929 | buid = pdn->phb->buid; | 929 | buid = pdn->phb->buid; |
| 930 | cfg_addr = (pdn->busno << 8) | pdn->devfn; | 930 | cfg_addr = ((pdn->busno << 16) | (pdn->devfn << 8)); |
| 931 | 931 | ||
| 932 | ret = rtas_call(ddw_avail[0], 3, 5, (u32 *)query, | 932 | ret = rtas_call(ddw_avail[0], 3, 5, (u32 *)query, |
| 933 | cfg_addr, BUID_HI(buid), BUID_LO(buid)); | 933 | cfg_addr, BUID_HI(buid), BUID_LO(buid)); |
| @@ -956,7 +956,7 @@ static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail, | |||
| 956 | dn = pci_device_to_OF_node(dev); | 956 | dn = pci_device_to_OF_node(dev); |
| 957 | pdn = PCI_DN(dn); | 957 | pdn = PCI_DN(dn); |
| 958 | buid = pdn->phb->buid; | 958 | buid = pdn->phb->buid; |
| 959 | cfg_addr = (pdn->busno << 8) | pdn->devfn; | 959 | cfg_addr = ((pdn->busno << 16) | (pdn->devfn << 8)); |
| 960 | 960 | ||
| 961 | do { | 961 | do { |
| 962 | /* extra outputs are LIOBN and dma-addr (hi, lo) */ | 962 | /* extra outputs are LIOBN and dma-addr (hi, lo) */ |
diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig index 0ac42cc4f880..d5ec71b2ed02 100644 --- a/arch/s390/configs/default_defconfig +++ b/arch/s390/configs/default_defconfig | |||
| @@ -1,8 +1,7 @@ | |||
| 1 | CONFIG_SYSVIPC=y | 1 | CONFIG_SYSVIPC=y |
| 2 | CONFIG_POSIX_MQUEUE=y | 2 | CONFIG_POSIX_MQUEUE=y |
| 3 | CONFIG_FHANDLE=y | ||
| 4 | CONFIG_AUDIT=y | 3 | CONFIG_AUDIT=y |
| 5 | CONFIG_NO_HZ=y | 4 | CONFIG_NO_HZ_IDLE=y |
| 6 | CONFIG_HIGH_RES_TIMERS=y | 5 | CONFIG_HIGH_RES_TIMERS=y |
| 7 | CONFIG_BSD_PROCESS_ACCT=y | 6 | CONFIG_BSD_PROCESS_ACCT=y |
| 8 | CONFIG_BSD_PROCESS_ACCT_V3=y | 7 | CONFIG_BSD_PROCESS_ACCT_V3=y |
| @@ -13,19 +12,19 @@ CONFIG_TASK_IO_ACCOUNTING=y | |||
| 13 | CONFIG_IKCONFIG=y | 12 | CONFIG_IKCONFIG=y |
| 14 | CONFIG_IKCONFIG_PROC=y | 13 | CONFIG_IKCONFIG_PROC=y |
| 15 | CONFIG_NUMA_BALANCING=y | 14 | CONFIG_NUMA_BALANCING=y |
| 16 | CONFIG_CGROUP_FREEZER=y | ||
| 17 | CONFIG_CGROUP_PIDS=y | ||
| 18 | CONFIG_CGROUP_DEVICE=y | ||
| 19 | CONFIG_CPUSETS=y | ||
| 20 | CONFIG_CGROUP_CPUACCT=y | ||
| 21 | CONFIG_MEMCG=y | 15 | CONFIG_MEMCG=y |
| 22 | CONFIG_MEMCG_SWAP=y | 16 | CONFIG_MEMCG_SWAP=y |
| 23 | CONFIG_MEMCG_KMEM=y | 17 | CONFIG_BLK_CGROUP=y |
| 24 | CONFIG_CGROUP_HUGETLB=y | ||
| 25 | CONFIG_CGROUP_PERF=y | ||
| 26 | CONFIG_CFS_BANDWIDTH=y | 18 | CONFIG_CFS_BANDWIDTH=y |
| 27 | CONFIG_RT_GROUP_SCHED=y | 19 | CONFIG_RT_GROUP_SCHED=y |
| 28 | CONFIG_BLK_CGROUP=y | 20 | CONFIG_CGROUP_PIDS=y |
| 21 | CONFIG_CGROUP_FREEZER=y | ||
| 22 | CONFIG_CGROUP_HUGETLB=y | ||
| 23 | CONFIG_CPUSETS=y | ||
| 24 | CONFIG_CGROUP_DEVICE=y | ||
| 25 | CONFIG_CGROUP_CPUACCT=y | ||
| 26 | CONFIG_CGROUP_PERF=y | ||
| 27 | CONFIG_CHECKPOINT_RESTORE=y | ||
| 29 | CONFIG_NAMESPACES=y | 28 | CONFIG_NAMESPACES=y |
| 30 | CONFIG_USER_NS=y | 29 | CONFIG_USER_NS=y |
| 31 | CONFIG_SCHED_AUTOGROUP=y | 30 | CONFIG_SCHED_AUTOGROUP=y |
| @@ -55,7 +54,6 @@ CONFIG_UNIXWARE_DISKLABEL=y | |||
| 55 | CONFIG_CFQ_GROUP_IOSCHED=y | 54 | CONFIG_CFQ_GROUP_IOSCHED=y |
| 56 | CONFIG_DEFAULT_DEADLINE=y | 55 | CONFIG_DEFAULT_DEADLINE=y |
| 57 | CONFIG_LIVEPATCH=y | 56 | CONFIG_LIVEPATCH=y |
| 58 | CONFIG_MARCH_Z196=y | ||
| 59 | CONFIG_TUNE_ZEC12=y | 57 | CONFIG_TUNE_ZEC12=y |
| 60 | CONFIG_NR_CPUS=256 | 58 | CONFIG_NR_CPUS=256 |
| 61 | CONFIG_NUMA=y | 59 | CONFIG_NUMA=y |
| @@ -65,6 +63,15 @@ CONFIG_MEMORY_HOTPLUG=y | |||
| 65 | CONFIG_MEMORY_HOTREMOVE=y | 63 | CONFIG_MEMORY_HOTREMOVE=y |
| 66 | CONFIG_KSM=y | 64 | CONFIG_KSM=y |
| 67 | CONFIG_TRANSPARENT_HUGEPAGE=y | 65 | CONFIG_TRANSPARENT_HUGEPAGE=y |
| 66 | CONFIG_CLEANCACHE=y | ||
| 67 | CONFIG_FRONTSWAP=y | ||
| 68 | CONFIG_CMA=y | ||
| 69 | CONFIG_MEM_SOFT_DIRTY=y | ||
| 70 | CONFIG_ZPOOL=m | ||
| 71 | CONFIG_ZBUD=m | ||
| 72 | CONFIG_ZSMALLOC=m | ||
| 73 | CONFIG_ZSMALLOC_STAT=y | ||
| 74 | CONFIG_IDLE_PAGE_TRACKING=y | ||
| 68 | CONFIG_PCI=y | 75 | CONFIG_PCI=y |
| 69 | CONFIG_PCI_DEBUG=y | 76 | CONFIG_PCI_DEBUG=y |
| 70 | CONFIG_HOTPLUG_PCI=y | 77 | CONFIG_HOTPLUG_PCI=y |
| @@ -452,6 +459,7 @@ CONFIG_HW_RANDOM_VIRTIO=m | |||
| 452 | CONFIG_RAW_DRIVER=m | 459 | CONFIG_RAW_DRIVER=m |
| 453 | CONFIG_HANGCHECK_TIMER=m | 460 | CONFIG_HANGCHECK_TIMER=m |
| 454 | CONFIG_TN3270_FS=y | 461 | CONFIG_TN3270_FS=y |
| 462 | # CONFIG_HWMON is not set | ||
| 455 | CONFIG_WATCHDOG=y | 463 | CONFIG_WATCHDOG=y |
| 456 | CONFIG_WATCHDOG_NOWAYOUT=y | 464 | CONFIG_WATCHDOG_NOWAYOUT=y |
| 457 | CONFIG_SOFT_WATCHDOG=m | 465 | CONFIG_SOFT_WATCHDOG=m |
| @@ -537,6 +545,8 @@ CONFIG_DLM=m | |||
| 537 | CONFIG_PRINTK_TIME=y | 545 | CONFIG_PRINTK_TIME=y |
| 538 | CONFIG_DYNAMIC_DEBUG=y | 546 | CONFIG_DYNAMIC_DEBUG=y |
| 539 | CONFIG_DEBUG_INFO=y | 547 | CONFIG_DEBUG_INFO=y |
| 548 | CONFIG_DEBUG_INFO_DWARF4=y | ||
| 549 | CONFIG_GDB_SCRIPTS=y | ||
| 540 | CONFIG_FRAME_WARN=1024 | 550 | CONFIG_FRAME_WARN=1024 |
| 541 | CONFIG_READABLE_ASM=y | 551 | CONFIG_READABLE_ASM=y |
| 542 | CONFIG_UNUSED_SYMBOLS=y | 552 | CONFIG_UNUSED_SYMBOLS=y |
| @@ -555,13 +565,17 @@ CONFIG_SLUB_DEBUG_ON=y | |||
| 555 | CONFIG_SLUB_STATS=y | 565 | CONFIG_SLUB_STATS=y |
| 556 | CONFIG_DEBUG_STACK_USAGE=y | 566 | CONFIG_DEBUG_STACK_USAGE=y |
| 557 | CONFIG_DEBUG_VM=y | 567 | CONFIG_DEBUG_VM=y |
| 568 | CONFIG_DEBUG_VM_VMACACHE=y | ||
| 558 | CONFIG_DEBUG_VM_RB=y | 569 | CONFIG_DEBUG_VM_RB=y |
| 570 | CONFIG_DEBUG_VM_PGFLAGS=y | ||
| 559 | CONFIG_DEBUG_MEMORY_INIT=y | 571 | CONFIG_DEBUG_MEMORY_INIT=y |
| 560 | CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m | 572 | CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m |
| 561 | CONFIG_DEBUG_PER_CPU_MAPS=y | 573 | CONFIG_DEBUG_PER_CPU_MAPS=y |
| 562 | CONFIG_DEBUG_SHIRQ=y | 574 | CONFIG_DEBUG_SHIRQ=y |
| 563 | CONFIG_DETECT_HUNG_TASK=y | 575 | CONFIG_DETECT_HUNG_TASK=y |
| 576 | CONFIG_WQ_WATCHDOG=y | ||
| 564 | CONFIG_PANIC_ON_OOPS=y | 577 | CONFIG_PANIC_ON_OOPS=y |
| 578 | CONFIG_DEBUG_TIMEKEEPING=y | ||
| 565 | CONFIG_TIMER_STATS=y | 579 | CONFIG_TIMER_STATS=y |
| 566 | CONFIG_DEBUG_RT_MUTEXES=y | 580 | CONFIG_DEBUG_RT_MUTEXES=y |
| 567 | CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y | 581 | CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y |
| @@ -596,6 +610,8 @@ CONFIG_FTRACE_SYSCALLS=y | |||
| 596 | CONFIG_STACK_TRACER=y | 610 | CONFIG_STACK_TRACER=y |
| 597 | CONFIG_BLK_DEV_IO_TRACE=y | 611 | CONFIG_BLK_DEV_IO_TRACE=y |
| 598 | CONFIG_UPROBE_EVENT=y | 612 | CONFIG_UPROBE_EVENT=y |
| 613 | CONFIG_FUNCTION_PROFILER=y | ||
| 614 | CONFIG_TRACE_ENUM_MAP_FILE=y | ||
| 599 | CONFIG_LKDTM=m | 615 | CONFIG_LKDTM=m |
| 600 | CONFIG_TEST_LIST_SORT=y | 616 | CONFIG_TEST_LIST_SORT=y |
| 601 | CONFIG_KPROBES_SANITY_TEST=y | 617 | CONFIG_KPROBES_SANITY_TEST=y |
| @@ -607,7 +623,6 @@ CONFIG_TEST_STRING_HELPERS=y | |||
| 607 | CONFIG_TEST_KSTRTOX=y | 623 | CONFIG_TEST_KSTRTOX=y |
| 608 | CONFIG_DMA_API_DEBUG=y | 624 | CONFIG_DMA_API_DEBUG=y |
| 609 | CONFIG_TEST_BPF=m | 625 | CONFIG_TEST_BPF=m |
| 610 | # CONFIG_STRICT_DEVMEM is not set | ||
| 611 | CONFIG_S390_PTDUMP=y | 626 | CONFIG_S390_PTDUMP=y |
| 612 | CONFIG_ENCRYPTED_KEYS=m | 627 | CONFIG_ENCRYPTED_KEYS=m |
| 613 | CONFIG_SECURITY=y | 628 | CONFIG_SECURITY=y |
| @@ -651,7 +666,6 @@ CONFIG_CRYPTO_SEED=m | |||
| 651 | CONFIG_CRYPTO_SERPENT=m | 666 | CONFIG_CRYPTO_SERPENT=m |
| 652 | CONFIG_CRYPTO_TEA=m | 667 | CONFIG_CRYPTO_TEA=m |
| 653 | CONFIG_CRYPTO_TWOFISH=m | 668 | CONFIG_CRYPTO_TWOFISH=m |
| 654 | CONFIG_CRYPTO_ZLIB=y | ||
| 655 | CONFIG_CRYPTO_LZO=m | 669 | CONFIG_CRYPTO_LZO=m |
| 656 | CONFIG_CRYPTO_LZ4=m | 670 | CONFIG_CRYPTO_LZ4=m |
| 657 | CONFIG_CRYPTO_LZ4HC=m | 671 | CONFIG_CRYPTO_LZ4HC=m |
| @@ -664,7 +678,7 @@ CONFIG_CRYPTO_SHA512_S390=m | |||
| 664 | CONFIG_CRYPTO_DES_S390=m | 678 | CONFIG_CRYPTO_DES_S390=m |
| 665 | CONFIG_CRYPTO_AES_S390=m | 679 | CONFIG_CRYPTO_AES_S390=m |
| 666 | CONFIG_CRYPTO_GHASH_S390=m | 680 | CONFIG_CRYPTO_GHASH_S390=m |
| 667 | CONFIG_ASYMMETRIC_KEY_TYPE=m | 681 | CONFIG_ASYMMETRIC_KEY_TYPE=y |
| 668 | CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m | 682 | CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m |
| 669 | CONFIG_X509_CERTIFICATE_PARSER=m | 683 | CONFIG_X509_CERTIFICATE_PARSER=m |
| 670 | CONFIG_CRC7=m | 684 | CONFIG_CRC7=m |
diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig index a31dcd56f7c0..f46a35115d2d 100644 --- a/arch/s390/configs/gcov_defconfig +++ b/arch/s390/configs/gcov_defconfig | |||
| @@ -1,8 +1,7 @@ | |||
| 1 | CONFIG_SYSVIPC=y | 1 | CONFIG_SYSVIPC=y |
| 2 | CONFIG_POSIX_MQUEUE=y | 2 | CONFIG_POSIX_MQUEUE=y |
| 3 | CONFIG_FHANDLE=y | ||
| 4 | CONFIG_AUDIT=y | 3 | CONFIG_AUDIT=y |
| 5 | CONFIG_NO_HZ=y | 4 | CONFIG_NO_HZ_IDLE=y |
| 6 | CONFIG_HIGH_RES_TIMERS=y | 5 | CONFIG_HIGH_RES_TIMERS=y |
| 7 | CONFIG_BSD_PROCESS_ACCT=y | 6 | CONFIG_BSD_PROCESS_ACCT=y |
| 8 | CONFIG_BSD_PROCESS_ACCT_V3=y | 7 | CONFIG_BSD_PROCESS_ACCT_V3=y |
| @@ -13,17 +12,17 @@ CONFIG_TASK_IO_ACCOUNTING=y | |||
| 13 | CONFIG_IKCONFIG=y | 12 | CONFIG_IKCONFIG=y |
| 14 | CONFIG_IKCONFIG_PROC=y | 13 | CONFIG_IKCONFIG_PROC=y |
| 15 | CONFIG_NUMA_BALANCING=y | 14 | CONFIG_NUMA_BALANCING=y |
| 16 | CONFIG_CGROUP_FREEZER=y | ||
| 17 | CONFIG_CGROUP_PIDS=y | ||
| 18 | CONFIG_CGROUP_DEVICE=y | ||
| 19 | CONFIG_CPUSETS=y | ||
| 20 | CONFIG_CGROUP_CPUACCT=y | ||
| 21 | CONFIG_MEMCG=y | 15 | CONFIG_MEMCG=y |
| 22 | CONFIG_MEMCG_SWAP=y | 16 | CONFIG_MEMCG_SWAP=y |
| 23 | CONFIG_MEMCG_KMEM=y | 17 | CONFIG_BLK_CGROUP=y |
| 18 | CONFIG_CGROUP_PIDS=y | ||
| 19 | CONFIG_CGROUP_FREEZER=y | ||
| 24 | CONFIG_CGROUP_HUGETLB=y | 20 | CONFIG_CGROUP_HUGETLB=y |
| 21 | CONFIG_CPUSETS=y | ||
| 22 | CONFIG_CGROUP_DEVICE=y | ||
| 23 | CONFIG_CGROUP_CPUACCT=y | ||
| 25 | CONFIG_CGROUP_PERF=y | 24 | CONFIG_CGROUP_PERF=y |
| 26 | CONFIG_BLK_CGROUP=y | 25 | CONFIG_CHECKPOINT_RESTORE=y |
| 27 | CONFIG_NAMESPACES=y | 26 | CONFIG_NAMESPACES=y |
| 28 | CONFIG_USER_NS=y | 27 | CONFIG_USER_NS=y |
| 29 | CONFIG_SCHED_AUTOGROUP=y | 28 | CONFIG_SCHED_AUTOGROUP=y |
| @@ -53,7 +52,6 @@ CONFIG_SOLARIS_X86_PARTITION=y | |||
| 53 | CONFIG_UNIXWARE_DISKLABEL=y | 52 | CONFIG_UNIXWARE_DISKLABEL=y |
| 54 | CONFIG_CFQ_GROUP_IOSCHED=y | 53 | CONFIG_CFQ_GROUP_IOSCHED=y |
| 55 | CONFIG_DEFAULT_DEADLINE=y | 54 | CONFIG_DEFAULT_DEADLINE=y |
| 56 | CONFIG_MARCH_Z196=y | ||
| 57 | CONFIG_TUNE_ZEC12=y | 55 | CONFIG_TUNE_ZEC12=y |
| 58 | CONFIG_NR_CPUS=256 | 56 | CONFIG_NR_CPUS=256 |
| 59 | CONFIG_NUMA=y | 57 | CONFIG_NUMA=y |
| @@ -62,6 +60,14 @@ CONFIG_MEMORY_HOTPLUG=y | |||
| 62 | CONFIG_MEMORY_HOTREMOVE=y | 60 | CONFIG_MEMORY_HOTREMOVE=y |
| 63 | CONFIG_KSM=y | 61 | CONFIG_KSM=y |
| 64 | CONFIG_TRANSPARENT_HUGEPAGE=y | 62 | CONFIG_TRANSPARENT_HUGEPAGE=y |
| 63 | CONFIG_CLEANCACHE=y | ||
| 64 | CONFIG_FRONTSWAP=y | ||
| 65 | CONFIG_CMA=y | ||
| 66 | CONFIG_ZSWAP=y | ||
| 67 | CONFIG_ZBUD=m | ||
| 68 | CONFIG_ZSMALLOC=m | ||
| 69 | CONFIG_ZSMALLOC_STAT=y | ||
| 70 | CONFIG_IDLE_PAGE_TRACKING=y | ||
| 65 | CONFIG_PCI=y | 71 | CONFIG_PCI=y |
| 66 | CONFIG_HOTPLUG_PCI=y | 72 | CONFIG_HOTPLUG_PCI=y |
| 67 | CONFIG_HOTPLUG_PCI_S390=y | 73 | CONFIG_HOTPLUG_PCI_S390=y |
| @@ -530,6 +536,8 @@ CONFIG_NLS_UTF8=m | |||
| 530 | CONFIG_DLM=m | 536 | CONFIG_DLM=m |
| 531 | CONFIG_PRINTK_TIME=y | 537 | CONFIG_PRINTK_TIME=y |
| 532 | CONFIG_DEBUG_INFO=y | 538 | CONFIG_DEBUG_INFO=y |
| 539 | CONFIG_DEBUG_INFO_DWARF4=y | ||
| 540 | CONFIG_GDB_SCRIPTS=y | ||
| 533 | # CONFIG_ENABLE_MUST_CHECK is not set | 541 | # CONFIG_ENABLE_MUST_CHECK is not set |
| 534 | CONFIG_FRAME_WARN=1024 | 542 | CONFIG_FRAME_WARN=1024 |
| 535 | CONFIG_UNUSED_SYMBOLS=y | 543 | CONFIG_UNUSED_SYMBOLS=y |
| @@ -547,13 +555,13 @@ CONFIG_LATENCYTOP=y | |||
| 547 | CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y | 555 | CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y |
| 548 | CONFIG_BLK_DEV_IO_TRACE=y | 556 | CONFIG_BLK_DEV_IO_TRACE=y |
| 549 | # CONFIG_KPROBE_EVENT is not set | 557 | # CONFIG_KPROBE_EVENT is not set |
| 558 | CONFIG_TRACE_ENUM_MAP_FILE=y | ||
| 550 | CONFIG_LKDTM=m | 559 | CONFIG_LKDTM=m |
| 551 | CONFIG_RBTREE_TEST=m | 560 | CONFIG_RBTREE_TEST=m |
| 552 | CONFIG_INTERVAL_TREE_TEST=m | 561 | CONFIG_INTERVAL_TREE_TEST=m |
| 553 | CONFIG_PERCPU_TEST=m | 562 | CONFIG_PERCPU_TEST=m |
| 554 | CONFIG_ATOMIC64_SELFTEST=y | 563 | CONFIG_ATOMIC64_SELFTEST=y |
| 555 | CONFIG_TEST_BPF=m | 564 | CONFIG_TEST_BPF=m |
| 556 | # CONFIG_STRICT_DEVMEM is not set | ||
| 557 | CONFIG_S390_PTDUMP=y | 565 | CONFIG_S390_PTDUMP=y |
| 558 | CONFIG_ENCRYPTED_KEYS=m | 566 | CONFIG_ENCRYPTED_KEYS=m |
| 559 | CONFIG_SECURITY=y | 567 | CONFIG_SECURITY=y |
| @@ -597,8 +605,6 @@ CONFIG_CRYPTO_SEED=m | |||
| 597 | CONFIG_CRYPTO_SERPENT=m | 605 | CONFIG_CRYPTO_SERPENT=m |
| 598 | CONFIG_CRYPTO_TEA=m | 606 | CONFIG_CRYPTO_TEA=m |
| 599 | CONFIG_CRYPTO_TWOFISH=m | 607 | CONFIG_CRYPTO_TWOFISH=m |
| 600 | CONFIG_CRYPTO_ZLIB=y | ||
| 601 | CONFIG_CRYPTO_LZO=m | ||
| 602 | CONFIG_CRYPTO_LZ4=m | 608 | CONFIG_CRYPTO_LZ4=m |
| 603 | CONFIG_CRYPTO_LZ4HC=m | 609 | CONFIG_CRYPTO_LZ4HC=m |
| 604 | CONFIG_CRYPTO_USER_API_HASH=m | 610 | CONFIG_CRYPTO_USER_API_HASH=m |
| @@ -610,7 +616,7 @@ CONFIG_CRYPTO_SHA512_S390=m | |||
| 610 | CONFIG_CRYPTO_DES_S390=m | 616 | CONFIG_CRYPTO_DES_S390=m |
| 611 | CONFIG_CRYPTO_AES_S390=m | 617 | CONFIG_CRYPTO_AES_S390=m |
| 612 | CONFIG_CRYPTO_GHASH_S390=m | 618 | CONFIG_CRYPTO_GHASH_S390=m |
| 613 | CONFIG_ASYMMETRIC_KEY_TYPE=m | 619 | CONFIG_ASYMMETRIC_KEY_TYPE=y |
| 614 | CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m | 620 | CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m |
| 615 | CONFIG_X509_CERTIFICATE_PARSER=m | 621 | CONFIG_X509_CERTIFICATE_PARSER=m |
| 616 | CONFIG_CRC7=m | 622 | CONFIG_CRC7=m |
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig index 7b73bf353345..ba0f2a58b8cd 100644 --- a/arch/s390/configs/performance_defconfig +++ b/arch/s390/configs/performance_defconfig | |||
| @@ -1,8 +1,7 @@ | |||
| 1 | CONFIG_SYSVIPC=y | 1 | CONFIG_SYSVIPC=y |
| 2 | CONFIG_POSIX_MQUEUE=y | 2 | CONFIG_POSIX_MQUEUE=y |
| 3 | CONFIG_FHANDLE=y | ||
| 4 | CONFIG_AUDIT=y | 3 | CONFIG_AUDIT=y |
| 5 | CONFIG_NO_HZ=y | 4 | CONFIG_NO_HZ_IDLE=y |
| 6 | CONFIG_HIGH_RES_TIMERS=y | 5 | CONFIG_HIGH_RES_TIMERS=y |
| 7 | CONFIG_BSD_PROCESS_ACCT=y | 6 | CONFIG_BSD_PROCESS_ACCT=y |
| 8 | CONFIG_BSD_PROCESS_ACCT_V3=y | 7 | CONFIG_BSD_PROCESS_ACCT_V3=y |
| @@ -14,17 +13,17 @@ CONFIG_IKCONFIG=y | |||
| 14 | CONFIG_IKCONFIG_PROC=y | 13 | CONFIG_IKCONFIG_PROC=y |
| 15 | CONFIG_NUMA_BALANCING=y | 14 | CONFIG_NUMA_BALANCING=y |
| 16 | # CONFIG_NUMA_BALANCING_DEFAULT_ENABLED is not set | 15 | # CONFIG_NUMA_BALANCING_DEFAULT_ENABLED is not set |
| 17 | CONFIG_CGROUP_FREEZER=y | ||
| 18 | CONFIG_CGROUP_PIDS=y | ||
| 19 | CONFIG_CGROUP_DEVICE=y | ||
| 20 | CONFIG_CPUSETS=y | ||
| 21 | CONFIG_CGROUP_CPUACCT=y | ||
| 22 | CONFIG_MEMCG=y | 16 | CONFIG_MEMCG=y |
| 23 | CONFIG_MEMCG_SWAP=y | 17 | CONFIG_MEMCG_SWAP=y |
| 24 | CONFIG_MEMCG_KMEM=y | 18 | CONFIG_BLK_CGROUP=y |
| 19 | CONFIG_CGROUP_PIDS=y | ||
| 20 | CONFIG_CGROUP_FREEZER=y | ||
| 25 | CONFIG_CGROUP_HUGETLB=y | 21 | CONFIG_CGROUP_HUGETLB=y |
| 22 | CONFIG_CPUSETS=y | ||
| 23 | CONFIG_CGROUP_DEVICE=y | ||
| 24 | CONFIG_CGROUP_CPUACCT=y | ||
| 26 | CONFIG_CGROUP_PERF=y | 25 | CONFIG_CGROUP_PERF=y |
| 27 | CONFIG_BLK_CGROUP=y | 26 | CONFIG_CHECKPOINT_RESTORE=y |
| 28 | CONFIG_NAMESPACES=y | 27 | CONFIG_NAMESPACES=y |
| 29 | CONFIG_USER_NS=y | 28 | CONFIG_USER_NS=y |
| 30 | CONFIG_SCHED_AUTOGROUP=y | 29 | CONFIG_SCHED_AUTOGROUP=y |
| @@ -53,7 +52,6 @@ CONFIG_UNIXWARE_DISKLABEL=y | |||
| 53 | CONFIG_CFQ_GROUP_IOSCHED=y | 52 | CONFIG_CFQ_GROUP_IOSCHED=y |
| 54 | CONFIG_DEFAULT_DEADLINE=y | 53 | CONFIG_DEFAULT_DEADLINE=y |
| 55 | CONFIG_LIVEPATCH=y | 54 | CONFIG_LIVEPATCH=y |
| 56 | CONFIG_MARCH_Z196=y | ||
| 57 | CONFIG_TUNE_ZEC12=y | 55 | CONFIG_TUNE_ZEC12=y |
| 58 | CONFIG_NR_CPUS=512 | 56 | CONFIG_NR_CPUS=512 |
| 59 | CONFIG_NUMA=y | 57 | CONFIG_NUMA=y |
| @@ -62,6 +60,14 @@ CONFIG_MEMORY_HOTPLUG=y | |||
| 62 | CONFIG_MEMORY_HOTREMOVE=y | 60 | CONFIG_MEMORY_HOTREMOVE=y |
| 63 | CONFIG_KSM=y | 61 | CONFIG_KSM=y |
| 64 | CONFIG_TRANSPARENT_HUGEPAGE=y | 62 | CONFIG_TRANSPARENT_HUGEPAGE=y |
| 63 | CONFIG_CLEANCACHE=y | ||
| 64 | CONFIG_FRONTSWAP=y | ||
| 65 | CONFIG_CMA=y | ||
| 66 | CONFIG_ZSWAP=y | ||
| 67 | CONFIG_ZBUD=m | ||
| 68 | CONFIG_ZSMALLOC=m | ||
| 69 | CONFIG_ZSMALLOC_STAT=y | ||
| 70 | CONFIG_IDLE_PAGE_TRACKING=y | ||
| 65 | CONFIG_PCI=y | 71 | CONFIG_PCI=y |
| 66 | CONFIG_HOTPLUG_PCI=y | 72 | CONFIG_HOTPLUG_PCI=y |
| 67 | CONFIG_HOTPLUG_PCI_S390=y | 73 | CONFIG_HOTPLUG_PCI_S390=y |
| @@ -447,6 +453,7 @@ CONFIG_HW_RANDOM_VIRTIO=m | |||
| 447 | CONFIG_RAW_DRIVER=m | 453 | CONFIG_RAW_DRIVER=m |
| 448 | CONFIG_HANGCHECK_TIMER=m | 454 | CONFIG_HANGCHECK_TIMER=m |
| 449 | CONFIG_TN3270_FS=y | 455 | CONFIG_TN3270_FS=y |
| 456 | # CONFIG_HWMON is not set | ||
| 450 | CONFIG_WATCHDOG=y | 457 | CONFIG_WATCHDOG=y |
| 451 | CONFIG_WATCHDOG_NOWAYOUT=y | 458 | CONFIG_WATCHDOG_NOWAYOUT=y |
| 452 | CONFIG_SOFT_WATCHDOG=m | 459 | CONFIG_SOFT_WATCHDOG=m |
| @@ -530,6 +537,8 @@ CONFIG_NLS_UTF8=m | |||
| 530 | CONFIG_DLM=m | 537 | CONFIG_DLM=m |
| 531 | CONFIG_PRINTK_TIME=y | 538 | CONFIG_PRINTK_TIME=y |
| 532 | CONFIG_DEBUG_INFO=y | 539 | CONFIG_DEBUG_INFO=y |
| 540 | CONFIG_DEBUG_INFO_DWARF4=y | ||
| 541 | CONFIG_GDB_SCRIPTS=y | ||
| 533 | # CONFIG_ENABLE_MUST_CHECK is not set | 542 | # CONFIG_ENABLE_MUST_CHECK is not set |
| 534 | CONFIG_FRAME_WARN=1024 | 543 | CONFIG_FRAME_WARN=1024 |
| 535 | CONFIG_UNUSED_SYMBOLS=y | 544 | CONFIG_UNUSED_SYMBOLS=y |
| @@ -546,11 +555,12 @@ CONFIG_FTRACE_SYSCALLS=y | |||
| 546 | CONFIG_STACK_TRACER=y | 555 | CONFIG_STACK_TRACER=y |
| 547 | CONFIG_BLK_DEV_IO_TRACE=y | 556 | CONFIG_BLK_DEV_IO_TRACE=y |
| 548 | CONFIG_UPROBE_EVENT=y | 557 | CONFIG_UPROBE_EVENT=y |
| 558 | CONFIG_FUNCTION_PROFILER=y | ||
| 559 | CONFIG_TRACE_ENUM_MAP_FILE=y | ||
| 549 | CONFIG_LKDTM=m | 560 | CONFIG_LKDTM=m |
| 550 | CONFIG_PERCPU_TEST=m | 561 | CONFIG_PERCPU_TEST=m |
| 551 | CONFIG_ATOMIC64_SELFTEST=y | 562 | CONFIG_ATOMIC64_SELFTEST=y |
| 552 | CONFIG_TEST_BPF=m | 563 | CONFIG_TEST_BPF=m |
| 553 | # CONFIG_STRICT_DEVMEM is not set | ||
| 554 | CONFIG_S390_PTDUMP=y | 564 | CONFIG_S390_PTDUMP=y |
| 555 | CONFIG_ENCRYPTED_KEYS=m | 565 | CONFIG_ENCRYPTED_KEYS=m |
| 556 | CONFIG_SECURITY=y | 566 | CONFIG_SECURITY=y |
| @@ -594,8 +604,6 @@ CONFIG_CRYPTO_SEED=m | |||
| 594 | CONFIG_CRYPTO_SERPENT=m | 604 | CONFIG_CRYPTO_SERPENT=m |
| 595 | CONFIG_CRYPTO_TEA=m | 605 | CONFIG_CRYPTO_TEA=m |
| 596 | CONFIG_CRYPTO_TWOFISH=m | 606 | CONFIG_CRYPTO_TWOFISH=m |
| 597 | CONFIG_CRYPTO_ZLIB=y | ||
| 598 | CONFIG_CRYPTO_LZO=m | ||
| 599 | CONFIG_CRYPTO_LZ4=m | 607 | CONFIG_CRYPTO_LZ4=m |
| 600 | CONFIG_CRYPTO_LZ4HC=m | 608 | CONFIG_CRYPTO_LZ4HC=m |
| 601 | CONFIG_CRYPTO_USER_API_HASH=m | 609 | CONFIG_CRYPTO_USER_API_HASH=m |
| @@ -607,7 +615,7 @@ CONFIG_CRYPTO_SHA512_S390=m | |||
| 607 | CONFIG_CRYPTO_DES_S390=m | 615 | CONFIG_CRYPTO_DES_S390=m |
| 608 | CONFIG_CRYPTO_AES_S390=m | 616 | CONFIG_CRYPTO_AES_S390=m |
| 609 | CONFIG_CRYPTO_GHASH_S390=m | 617 | CONFIG_CRYPTO_GHASH_S390=m |
| 610 | CONFIG_ASYMMETRIC_KEY_TYPE=m | 618 | CONFIG_ASYMMETRIC_KEY_TYPE=y |
| 611 | CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m | 619 | CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m |
| 612 | CONFIG_X509_CERTIFICATE_PARSER=m | 620 | CONFIG_X509_CERTIFICATE_PARSER=m |
| 613 | CONFIG_CRC7=m | 621 | CONFIG_CRC7=m |
diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig index 1719843a55a2..4366a3e3e754 100644 --- a/arch/s390/configs/zfcpdump_defconfig +++ b/arch/s390/configs/zfcpdump_defconfig | |||
| @@ -1,5 +1,5 @@ | |||
| 1 | # CONFIG_SWAP is not set | 1 | # CONFIG_SWAP is not set |
| 2 | CONFIG_NO_HZ=y | 2 | CONFIG_NO_HZ_IDLE=y |
| 3 | CONFIG_HIGH_RES_TIMERS=y | 3 | CONFIG_HIGH_RES_TIMERS=y |
| 4 | CONFIG_BLK_DEV_INITRD=y | 4 | CONFIG_BLK_DEV_INITRD=y |
| 5 | CONFIG_CC_OPTIMIZE_FOR_SIZE=y | 5 | CONFIG_CC_OPTIMIZE_FOR_SIZE=y |
| @@ -7,7 +7,6 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y | |||
| 7 | CONFIG_PARTITION_ADVANCED=y | 7 | CONFIG_PARTITION_ADVANCED=y |
| 8 | CONFIG_IBM_PARTITION=y | 8 | CONFIG_IBM_PARTITION=y |
| 9 | CONFIG_DEFAULT_DEADLINE=y | 9 | CONFIG_DEFAULT_DEADLINE=y |
| 10 | CONFIG_MARCH_Z196=y | ||
| 11 | CONFIG_TUNE_ZEC12=y | 10 | CONFIG_TUNE_ZEC12=y |
| 12 | # CONFIG_COMPAT is not set | 11 | # CONFIG_COMPAT is not set |
| 13 | CONFIG_NR_CPUS=2 | 12 | CONFIG_NR_CPUS=2 |
| @@ -64,7 +63,6 @@ CONFIG_PANIC_ON_OOPS=y | |||
| 64 | # CONFIG_SCHED_DEBUG is not set | 63 | # CONFIG_SCHED_DEBUG is not set |
| 65 | CONFIG_RCU_CPU_STALL_TIMEOUT=60 | 64 | CONFIG_RCU_CPU_STALL_TIMEOUT=60 |
| 66 | # CONFIG_FTRACE is not set | 65 | # CONFIG_FTRACE is not set |
| 67 | # CONFIG_STRICT_DEVMEM is not set | ||
| 68 | # CONFIG_PFAULT is not set | 66 | # CONFIG_PFAULT is not set |
| 69 | # CONFIG_S390_HYPFS_FS is not set | 67 | # CONFIG_S390_HYPFS_FS is not set |
| 70 | # CONFIG_VIRTUALIZATION is not set | 68 | # CONFIG_VIRTUALIZATION is not set |
diff --git a/arch/s390/defconfig b/arch/s390/defconfig index e24f2af4c73b..3f571ea89509 100644 --- a/arch/s390/defconfig +++ b/arch/s390/defconfig | |||
| @@ -1,8 +1,8 @@ | |||
| 1 | CONFIG_SYSVIPC=y | 1 | CONFIG_SYSVIPC=y |
| 2 | CONFIG_POSIX_MQUEUE=y | 2 | CONFIG_POSIX_MQUEUE=y |
| 3 | CONFIG_FHANDLE=y | 3 | CONFIG_USELIB=y |
| 4 | CONFIG_AUDIT=y | 4 | CONFIG_AUDIT=y |
| 5 | CONFIG_NO_HZ=y | 5 | CONFIG_NO_HZ_IDLE=y |
| 6 | CONFIG_HIGH_RES_TIMERS=y | 6 | CONFIG_HIGH_RES_TIMERS=y |
| 7 | CONFIG_TASKSTATS=y | 7 | CONFIG_TASKSTATS=y |
| 8 | CONFIG_TASK_DELAY_ACCT=y | 8 | CONFIG_TASK_DELAY_ACCT=y |
| @@ -11,19 +11,19 @@ CONFIG_TASK_IO_ACCOUNTING=y | |||
| 11 | CONFIG_IKCONFIG=y | 11 | CONFIG_IKCONFIG=y |
| 12 | CONFIG_IKCONFIG_PROC=y | 12 | CONFIG_IKCONFIG_PROC=y |
| 13 | CONFIG_CGROUPS=y | 13 | CONFIG_CGROUPS=y |
| 14 | CONFIG_CGROUP_FREEZER=y | ||
| 15 | CONFIG_CGROUP_PIDS=y | ||
| 16 | CONFIG_CGROUP_DEVICE=y | ||
| 17 | CONFIG_CPUSETS=y | ||
| 18 | CONFIG_CGROUP_CPUACCT=y | ||
| 19 | CONFIG_MEMCG=y | 14 | CONFIG_MEMCG=y |
| 20 | CONFIG_MEMCG_SWAP=y | 15 | CONFIG_MEMCG_SWAP=y |
| 21 | CONFIG_MEMCG_KMEM=y | 16 | CONFIG_BLK_CGROUP=y |
| 22 | CONFIG_CGROUP_HUGETLB=y | ||
| 23 | CONFIG_CGROUP_PERF=y | ||
| 24 | CONFIG_CGROUP_SCHED=y | 17 | CONFIG_CGROUP_SCHED=y |
| 25 | CONFIG_RT_GROUP_SCHED=y | 18 | CONFIG_RT_GROUP_SCHED=y |
| 26 | CONFIG_BLK_CGROUP=y | 19 | CONFIG_CGROUP_PIDS=y |
| 20 | CONFIG_CGROUP_FREEZER=y | ||
| 21 | CONFIG_CGROUP_HUGETLB=y | ||
| 22 | CONFIG_CPUSETS=y | ||
| 23 | CONFIG_CGROUP_DEVICE=y | ||
| 24 | CONFIG_CGROUP_CPUACCT=y | ||
| 25 | CONFIG_CGROUP_PERF=y | ||
| 26 | CONFIG_CHECKPOINT_RESTORE=y | ||
| 27 | CONFIG_NAMESPACES=y | 27 | CONFIG_NAMESPACES=y |
| 28 | CONFIG_USER_NS=y | 28 | CONFIG_USER_NS=y |
| 29 | CONFIG_BLK_DEV_INITRD=y | 29 | CONFIG_BLK_DEV_INITRD=y |
| @@ -44,7 +44,6 @@ CONFIG_PARTITION_ADVANCED=y | |||
| 44 | CONFIG_IBM_PARTITION=y | 44 | CONFIG_IBM_PARTITION=y |
| 45 | CONFIG_DEFAULT_DEADLINE=y | 45 | CONFIG_DEFAULT_DEADLINE=y |
| 46 | CONFIG_LIVEPATCH=y | 46 | CONFIG_LIVEPATCH=y |
| 47 | CONFIG_MARCH_Z196=y | ||
| 48 | CONFIG_NR_CPUS=256 | 47 | CONFIG_NR_CPUS=256 |
| 49 | CONFIG_NUMA=y | 48 | CONFIG_NUMA=y |
| 50 | CONFIG_HZ_100=y | 49 | CONFIG_HZ_100=y |
| @@ -52,6 +51,14 @@ CONFIG_MEMORY_HOTPLUG=y | |||
| 52 | CONFIG_MEMORY_HOTREMOVE=y | 51 | CONFIG_MEMORY_HOTREMOVE=y |
| 53 | CONFIG_KSM=y | 52 | CONFIG_KSM=y |
| 54 | CONFIG_TRANSPARENT_HUGEPAGE=y | 53 | CONFIG_TRANSPARENT_HUGEPAGE=y |
| 54 | CONFIG_CLEANCACHE=y | ||
| 55 | CONFIG_FRONTSWAP=y | ||
| 56 | CONFIG_CMA=y | ||
| 57 | CONFIG_ZSWAP=y | ||
| 58 | CONFIG_ZBUD=m | ||
| 59 | CONFIG_ZSMALLOC=m | ||
| 60 | CONFIG_ZSMALLOC_STAT=y | ||
| 61 | CONFIG_IDLE_PAGE_TRACKING=y | ||
| 55 | CONFIG_CRASH_DUMP=y | 62 | CONFIG_CRASH_DUMP=y |
| 56 | CONFIG_BINFMT_MISC=m | 63 | CONFIG_BINFMT_MISC=m |
| 57 | CONFIG_HIBERNATION=y | 64 | CONFIG_HIBERNATION=y |
| @@ -61,7 +68,6 @@ CONFIG_UNIX=y | |||
| 61 | CONFIG_NET_KEY=y | 68 | CONFIG_NET_KEY=y |
| 62 | CONFIG_INET=y | 69 | CONFIG_INET=y |
| 63 | CONFIG_IP_MULTICAST=y | 70 | CONFIG_IP_MULTICAST=y |
| 64 | # CONFIG_INET_LRO is not set | ||
| 65 | CONFIG_L2TP=m | 71 | CONFIG_L2TP=m |
| 66 | CONFIG_L2TP_DEBUGFS=m | 72 | CONFIG_L2TP_DEBUGFS=m |
| 67 | CONFIG_VLAN_8021Q=y | 73 | CONFIG_VLAN_8021Q=y |
| @@ -144,6 +150,9 @@ CONFIG_TMPFS=y | |||
| 144 | CONFIG_TMPFS_POSIX_ACL=y | 150 | CONFIG_TMPFS_POSIX_ACL=y |
| 145 | CONFIG_HUGETLBFS=y | 151 | CONFIG_HUGETLBFS=y |
| 146 | # CONFIG_NETWORK_FILESYSTEMS is not set | 152 | # CONFIG_NETWORK_FILESYSTEMS is not set |
| 153 | CONFIG_DEBUG_INFO=y | ||
| 154 | CONFIG_DEBUG_INFO_DWARF4=y | ||
| 155 | CONFIG_GDB_SCRIPTS=y | ||
| 147 | CONFIG_UNUSED_SYMBOLS=y | 156 | CONFIG_UNUSED_SYMBOLS=y |
| 148 | CONFIG_DEBUG_SECTION_MISMATCH=y | 157 | CONFIG_DEBUG_SECTION_MISMATCH=y |
| 149 | CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y | 158 | CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y |
| @@ -158,20 +167,21 @@ CONFIG_LOCK_STAT=y | |||
| 158 | CONFIG_DEBUG_LOCKDEP=y | 167 | CONFIG_DEBUG_LOCKDEP=y |
| 159 | CONFIG_DEBUG_ATOMIC_SLEEP=y | 168 | CONFIG_DEBUG_ATOMIC_SLEEP=y |
| 160 | CONFIG_DEBUG_LIST=y | 169 | CONFIG_DEBUG_LIST=y |
| 161 | CONFIG_DEBUG_PI_LIST=y | ||
| 162 | CONFIG_DEBUG_SG=y | 170 | CONFIG_DEBUG_SG=y |
| 163 | CONFIG_DEBUG_NOTIFIERS=y | 171 | CONFIG_DEBUG_NOTIFIERS=y |
| 164 | CONFIG_RCU_CPU_STALL_TIMEOUT=60 | 172 | CONFIG_RCU_CPU_STALL_TIMEOUT=60 |
| 165 | CONFIG_RCU_TRACE=y | 173 | CONFIG_RCU_TRACE=y |
| 166 | CONFIG_LATENCYTOP=y | 174 | CONFIG_LATENCYTOP=y |
| 167 | CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y | 175 | CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y |
| 168 | CONFIG_TRACER_SNAPSHOT=y | 176 | CONFIG_SCHED_TRACER=y |
| 177 | CONFIG_FTRACE_SYSCALLS=y | ||
| 169 | CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y | 178 | CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y |
| 170 | CONFIG_STACK_TRACER=y | 179 | CONFIG_STACK_TRACER=y |
| 171 | CONFIG_BLK_DEV_IO_TRACE=y | 180 | CONFIG_BLK_DEV_IO_TRACE=y |
| 172 | CONFIG_UPROBE_EVENT=y | 181 | CONFIG_UPROBE_EVENT=y |
| 182 | CONFIG_FUNCTION_PROFILER=y | ||
| 183 | CONFIG_TRACE_ENUM_MAP_FILE=y | ||
| 173 | CONFIG_KPROBES_SANITY_TEST=y | 184 | CONFIG_KPROBES_SANITY_TEST=y |
| 174 | # CONFIG_STRICT_DEVMEM is not set | ||
| 175 | CONFIG_S390_PTDUMP=y | 185 | CONFIG_S390_PTDUMP=y |
| 176 | CONFIG_CRYPTO_CRYPTD=m | 186 | CONFIG_CRYPTO_CRYPTD=m |
| 177 | CONFIG_CRYPTO_AUTHENC=m | 187 | CONFIG_CRYPTO_AUTHENC=m |
| @@ -212,8 +222,6 @@ CONFIG_CRYPTO_SERPENT=m | |||
| 212 | CONFIG_CRYPTO_TEA=m | 222 | CONFIG_CRYPTO_TEA=m |
| 213 | CONFIG_CRYPTO_TWOFISH=m | 223 | CONFIG_CRYPTO_TWOFISH=m |
| 214 | CONFIG_CRYPTO_DEFLATE=m | 224 | CONFIG_CRYPTO_DEFLATE=m |
| 215 | CONFIG_CRYPTO_ZLIB=m | ||
| 216 | CONFIG_CRYPTO_LZO=m | ||
| 217 | CONFIG_CRYPTO_LZ4=m | 225 | CONFIG_CRYPTO_LZ4=m |
| 218 | CONFIG_CRYPTO_LZ4HC=m | 226 | CONFIG_CRYPTO_LZ4HC=m |
| 219 | CONFIG_CRYPTO_ANSI_CPRNG=m | 227 | CONFIG_CRYPTO_ANSI_CPRNG=m |
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 37b9017c6a96..ac82e8eb936d 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h | |||
| @@ -245,6 +245,7 @@ struct kvm_vcpu_stat { | |||
| 245 | u32 exit_stop_request; | 245 | u32 exit_stop_request; |
| 246 | u32 exit_validity; | 246 | u32 exit_validity; |
| 247 | u32 exit_instruction; | 247 | u32 exit_instruction; |
| 248 | u32 exit_pei; | ||
| 248 | u32 halt_successful_poll; | 249 | u32 halt_successful_poll; |
| 249 | u32 halt_attempted_poll; | 250 | u32 halt_attempted_poll; |
| 250 | u32 halt_poll_invalid; | 251 | u32 halt_poll_invalid; |
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c index 59215c518f37..7ec63b1d920d 100644 --- a/arch/s390/kernel/perf_cpum_cf.c +++ b/arch/s390/kernel/perf_cpum_cf.c | |||
| @@ -649,6 +649,8 @@ static int cpumf_pmu_commit_txn(struct pmu *pmu) | |||
| 649 | 649 | ||
| 650 | /* Performance monitoring unit for s390x */ | 650 | /* Performance monitoring unit for s390x */ |
| 651 | static struct pmu cpumf_pmu = { | 651 | static struct pmu cpumf_pmu = { |
| 652 | .task_ctx_nr = perf_sw_context, | ||
| 653 | .capabilities = PERF_PMU_CAP_NO_INTERRUPT, | ||
| 652 | .pmu_enable = cpumf_pmu_enable, | 654 | .pmu_enable = cpumf_pmu_enable, |
| 653 | .pmu_disable = cpumf_pmu_disable, | 655 | .pmu_disable = cpumf_pmu_disable, |
| 654 | .event_init = cpumf_pmu_event_init, | 656 | .event_init = cpumf_pmu_event_init, |
| @@ -708,12 +710,6 @@ static int __init cpumf_pmu_init(void) | |||
| 708 | goto out; | 710 | goto out; |
| 709 | } | 711 | } |
| 710 | 712 | ||
| 711 | /* The CPU measurement counter facility does not have overflow | ||
| 712 | * interrupts to do sampling. Sampling must be provided by | ||
| 713 | * external means, for example, by timers. | ||
| 714 | */ | ||
| 715 | cpumf_pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; | ||
| 716 | |||
| 717 | cpumf_pmu.attr_groups = cpumf_cf_event_group(); | 713 | cpumf_pmu.attr_groups = cpumf_cf_event_group(); |
| 718 | rc = perf_pmu_register(&cpumf_pmu, "cpum_cf", PERF_TYPE_RAW); | 714 | rc = perf_pmu_register(&cpumf_pmu, "cpum_cf", PERF_TYPE_RAW); |
| 719 | if (rc) { | 715 | if (rc) { |
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index 2e6b54e4d3f9..252157181302 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c | |||
| @@ -341,6 +341,8 @@ static int handle_mvpg_pei(struct kvm_vcpu *vcpu) | |||
| 341 | 341 | ||
| 342 | static int handle_partial_execution(struct kvm_vcpu *vcpu) | 342 | static int handle_partial_execution(struct kvm_vcpu *vcpu) |
| 343 | { | 343 | { |
| 344 | vcpu->stat.exit_pei++; | ||
| 345 | |||
| 344 | if (vcpu->arch.sie_block->ipa == 0xb254) /* MVPG */ | 346 | if (vcpu->arch.sie_block->ipa == 0xb254) /* MVPG */ |
| 345 | return handle_mvpg_pei(vcpu); | 347 | return handle_mvpg_pei(vcpu); |
| 346 | if (vcpu->arch.sie_block->ipa >> 8 == 0xae) /* SIGP */ | 348 | if (vcpu->arch.sie_block->ipa >> 8 == 0xae) /* SIGP */ |
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 6d8ec3ac9dd8..43f2a2b80490 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c | |||
| @@ -61,6 +61,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { | |||
| 61 | { "exit_external_request", VCPU_STAT(exit_external_request) }, | 61 | { "exit_external_request", VCPU_STAT(exit_external_request) }, |
| 62 | { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) }, | 62 | { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) }, |
| 63 | { "exit_instruction", VCPU_STAT(exit_instruction) }, | 63 | { "exit_instruction", VCPU_STAT(exit_instruction) }, |
| 64 | { "exit_pei", VCPU_STAT(exit_pei) }, | ||
| 64 | { "exit_program_interruption", VCPU_STAT(exit_program_interruption) }, | 65 | { "exit_program_interruption", VCPU_STAT(exit_program_interruption) }, |
| 65 | { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) }, | 66 | { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) }, |
| 66 | { "halt_successful_poll", VCPU_STAT(halt_successful_poll) }, | 67 | { "halt_successful_poll", VCPU_STAT(halt_successful_poll) }, |
| @@ -657,7 +658,7 @@ static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr) | |||
| 657 | kvm->arch.model.cpuid = proc->cpuid; | 658 | kvm->arch.model.cpuid = proc->cpuid; |
| 658 | lowest_ibc = sclp.ibc >> 16 & 0xfff; | 659 | lowest_ibc = sclp.ibc >> 16 & 0xfff; |
| 659 | unblocked_ibc = sclp.ibc & 0xfff; | 660 | unblocked_ibc = sclp.ibc & 0xfff; |
| 660 | if (lowest_ibc) { | 661 | if (lowest_ibc && proc->ibc) { |
| 661 | if (proc->ibc > unblocked_ibc) | 662 | if (proc->ibc > unblocked_ibc) |
| 662 | kvm->arch.model.ibc = unblocked_ibc; | 663 | kvm->arch.model.ibc = unblocked_ibc; |
| 663 | else if (proc->ibc < lowest_ibc) | 664 | else if (proc->ibc < lowest_ibc) |
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 7a3144017301..19288c1b36d3 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c | |||
| @@ -250,6 +250,7 @@ static noinline void do_sigsegv(struct pt_regs *regs, int si_code) | |||
| 250 | 250 | ||
| 251 | report_user_fault(regs, SIGSEGV, 1); | 251 | report_user_fault(regs, SIGSEGV, 1); |
| 252 | si.si_signo = SIGSEGV; | 252 | si.si_signo = SIGSEGV; |
| 253 | si.si_errno = 0; | ||
| 253 | si.si_code = si_code; | 254 | si.si_code = si_code; |
| 254 | si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK); | 255 | si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK); |
| 255 | force_sig_info(SIGSEGV, &si, current); | 256 | force_sig_info(SIGSEGV, &si, current); |
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c index e8b5962ac12a..e2565d2d0c32 100644 --- a/arch/s390/mm/pgalloc.c +++ b/arch/s390/mm/pgalloc.c | |||
| @@ -169,7 +169,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm) | |||
| 169 | return table; | 169 | return table; |
| 170 | } | 170 | } |
| 171 | /* Allocate a fresh page */ | 171 | /* Allocate a fresh page */ |
| 172 | page = alloc_page(GFP_KERNEL|__GFP_REPEAT); | 172 | page = alloc_page(GFP_KERNEL); |
| 173 | if (!page) | 173 | if (!page) |
| 174 | return NULL; | 174 | return NULL; |
| 175 | if (!pgtable_page_ctor(page)) { | 175 | if (!pgtable_page_ctor(page)) { |
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 4324b87f9398..9f0ce0e6eeb4 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c | |||
| @@ -437,7 +437,7 @@ void ptep_zap_unused(struct mm_struct *mm, unsigned long addr, | |||
| 437 | pgste = pgste_get_lock(ptep); | 437 | pgste = pgste_get_lock(ptep); |
| 438 | pgstev = pgste_val(pgste); | 438 | pgstev = pgste_val(pgste); |
| 439 | pte = *ptep; | 439 | pte = *ptep; |
| 440 | if (pte_swap(pte) && | 440 | if (!reset && pte_swap(pte) && |
| 441 | ((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED || | 441 | ((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED || |
| 442 | (pgstev & _PGSTE_GPS_ZERO))) { | 442 | (pgstev & _PGSTE_GPS_ZERO))) { |
| 443 | ptep_zap_swap_entry(mm, pte_to_swp_entry(pte)); | 443 | ptep_zap_swap_entry(mm, pte_to_swp_entry(pte)); |
diff --git a/arch/s390/net/bpf_jit.h b/arch/s390/net/bpf_jit.h index f010c93a88b1..fda605dbc1b4 100644 --- a/arch/s390/net/bpf_jit.h +++ b/arch/s390/net/bpf_jit.h | |||
| @@ -37,7 +37,7 @@ extern u8 sk_load_word[], sk_load_half[], sk_load_byte[]; | |||
| 37 | * | | | | 37 | * | | | |
| 38 | * +---------------+ | | 38 | * +---------------+ | |
| 39 | * | 8 byte skbp | | | 39 | * | 8 byte skbp | | |
| 40 | * R15+170 -> +---------------+ | | 40 | * R15+176 -> +---------------+ | |
| 41 | * | 8 byte hlen | | | 41 | * | 8 byte hlen | | |
| 42 | * R15+168 -> +---------------+ | | 42 | * R15+168 -> +---------------+ | |
| 43 | * | 4 byte align | | | 43 | * | 4 byte align | | |
| @@ -58,7 +58,7 @@ extern u8 sk_load_word[], sk_load_half[], sk_load_byte[]; | |||
| 58 | #define STK_OFF (STK_SPACE - STK_160_UNUSED) | 58 | #define STK_OFF (STK_SPACE - STK_160_UNUSED) |
| 59 | #define STK_OFF_TMP 160 /* Offset of tmp buffer on stack */ | 59 | #define STK_OFF_TMP 160 /* Offset of tmp buffer on stack */ |
| 60 | #define STK_OFF_HLEN 168 /* Offset of SKB header length on stack */ | 60 | #define STK_OFF_HLEN 168 /* Offset of SKB header length on stack */ |
| 61 | #define STK_OFF_SKBP 170 /* Offset of SKB pointer on stack */ | 61 | #define STK_OFF_SKBP 176 /* Offset of SKB pointer on stack */ |
| 62 | 62 | ||
| 63 | #define STK_OFF_R6 (160 - 11 * 8) /* Offset of r6 on stack */ | 63 | #define STK_OFF_R6 (160 - 11 * 8) /* Offset of r6 on stack */ |
| 64 | #define STK_OFF_TCCNT (160 - 12 * 8) /* Offset of tail_call_cnt on stack */ | 64 | #define STK_OFF_TCCNT (160 - 12 * 8) /* Offset of tail_call_cnt on stack */ |
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 9133b0ec000b..bee281f3163d 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c | |||
| @@ -45,7 +45,7 @@ struct bpf_jit { | |||
| 45 | int labels[1]; /* Labels for local jumps */ | 45 | int labels[1]; /* Labels for local jumps */ |
| 46 | }; | 46 | }; |
| 47 | 47 | ||
| 48 | #define BPF_SIZE_MAX 0x7ffff /* Max size for program (20 bit signed displ) */ | 48 | #define BPF_SIZE_MAX 0xffff /* Max size for program (16 bit branches) */ |
| 49 | 49 | ||
| 50 | #define SEEN_SKB 1 /* skb access */ | 50 | #define SEEN_SKB 1 /* skb access */ |
| 51 | #define SEEN_MEM 2 /* use mem[] for temporary storage */ | 51 | #define SEEN_MEM 2 /* use mem[] for temporary storage */ |
| @@ -450,7 +450,7 @@ static void bpf_jit_prologue(struct bpf_jit *jit) | |||
| 450 | emit_load_skb_data_hlen(jit); | 450 | emit_load_skb_data_hlen(jit); |
| 451 | if (jit->seen & SEEN_SKB_CHANGE) | 451 | if (jit->seen & SEEN_SKB_CHANGE) |
| 452 | /* stg %b1,ST_OFF_SKBP(%r0,%r15) */ | 452 | /* stg %b1,ST_OFF_SKBP(%r0,%r15) */ |
| 453 | EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0, REG_15, | 453 | EMIT6_DISP_LH(0xe3000000, 0x0024, BPF_REG_1, REG_0, REG_15, |
| 454 | STK_OFF_SKBP); | 454 | STK_OFF_SKBP); |
| 455 | } | 455 | } |
| 456 | 456 | ||
diff --git a/arch/score/include/asm/pgalloc.h b/arch/score/include/asm/pgalloc.h index 2e067657db98..49b012d78c1a 100644 --- a/arch/score/include/asm/pgalloc.h +++ b/arch/score/include/asm/pgalloc.h | |||
| @@ -42,8 +42,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, | |||
| 42 | { | 42 | { |
| 43 | pte_t *pte; | 43 | pte_t *pte; |
| 44 | 44 | ||
| 45 | pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, | 45 | pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_ZERO, PTE_ORDER); |
| 46 | PTE_ORDER); | ||
| 47 | 46 | ||
| 48 | return pte; | 47 | return pte; |
| 49 | } | 48 | } |
| @@ -53,7 +52,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm, | |||
| 53 | { | 52 | { |
| 54 | struct page *pte; | 53 | struct page *pte; |
| 55 | 54 | ||
| 56 | pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER); | 55 | pte = alloc_pages(GFP_KERNEL, PTE_ORDER); |
| 57 | if (!pte) | 56 | if (!pte) |
| 58 | return NULL; | 57 | return NULL; |
| 59 | clear_highpage(pte); | 58 | clear_highpage(pte); |
diff --git a/arch/sh/include/asm/pgalloc.h b/arch/sh/include/asm/pgalloc.h index a33673b3687d..f3f42c84c40f 100644 --- a/arch/sh/include/asm/pgalloc.h +++ b/arch/sh/include/asm/pgalloc.h | |||
| @@ -34,7 +34,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, | |||
| 34 | static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, | 34 | static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, |
| 35 | unsigned long address) | 35 | unsigned long address) |
| 36 | { | 36 | { |
| 37 | return quicklist_alloc(QUICK_PT, GFP_KERNEL | __GFP_REPEAT, NULL); | 37 | return quicklist_alloc(QUICK_PT, GFP_KERNEL, NULL); |
| 38 | } | 38 | } |
| 39 | 39 | ||
| 40 | static inline pgtable_t pte_alloc_one(struct mm_struct *mm, | 40 | static inline pgtable_t pte_alloc_one(struct mm_struct *mm, |
| @@ -43,7 +43,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm, | |||
| 43 | struct page *page; | 43 | struct page *page; |
| 44 | void *pg; | 44 | void *pg; |
| 45 | 45 | ||
| 46 | pg = quicklist_alloc(QUICK_PT, GFP_KERNEL | __GFP_REPEAT, NULL); | 46 | pg = quicklist_alloc(QUICK_PT, GFP_KERNEL, NULL); |
| 47 | if (!pg) | 47 | if (!pg) |
| 48 | return NULL; | 48 | return NULL; |
| 49 | page = virt_to_page(pg); | 49 | page = virt_to_page(pg); |
diff --git a/arch/sh/mm/pgtable.c b/arch/sh/mm/pgtable.c index 26e03a1f7ca4..a62bd8696779 100644 --- a/arch/sh/mm/pgtable.c +++ b/arch/sh/mm/pgtable.c | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | #include <linux/mm.h> | 1 | #include <linux/mm.h> |
| 2 | #include <linux/slab.h> | 2 | #include <linux/slab.h> |
| 3 | 3 | ||
| 4 | #define PGALLOC_GFP GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO | 4 | #define PGALLOC_GFP GFP_KERNEL | __GFP_ZERO |
| 5 | 5 | ||
| 6 | static struct kmem_cache *pgd_cachep; | 6 | static struct kmem_cache *pgd_cachep; |
| 7 | #if PAGETABLE_LEVELS > 2 | 7 | #if PAGETABLE_LEVELS > 2 |
diff --git a/arch/sparc/include/asm/head_64.h b/arch/sparc/include/asm/head_64.h index 10e9dabc4c41..f0700cfeedd7 100644 --- a/arch/sparc/include/asm/head_64.h +++ b/arch/sparc/include/asm/head_64.h | |||
| @@ -15,6 +15,10 @@ | |||
| 15 | 15 | ||
| 16 | #define PTREGS_OFF (STACK_BIAS + STACKFRAME_SZ) | 16 | #define PTREGS_OFF (STACK_BIAS + STACKFRAME_SZ) |
| 17 | 17 | ||
| 18 | #define RTRAP_PSTATE (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_IE) | ||
| 19 | #define RTRAP_PSTATE_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV) | ||
| 20 | #define RTRAP_PSTATE_AG_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG) | ||
| 21 | |||
| 18 | #define __CHEETAH_ID 0x003e0014 | 22 | #define __CHEETAH_ID 0x003e0014 |
| 19 | #define __JALAPENO_ID 0x003e0016 | 23 | #define __JALAPENO_ID 0x003e0016 |
| 20 | #define __SERRANO_ID 0x003e0022 | 24 | #define __SERRANO_ID 0x003e0022 |
diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h index 5e3187185b4a..3529f1378cd8 100644 --- a/arch/sparc/include/asm/pgalloc_64.h +++ b/arch/sparc/include/asm/pgalloc_64.h | |||
| @@ -41,8 +41,7 @@ static inline void __pud_populate(pud_t *pud, pmd_t *pmd) | |||
| 41 | 41 | ||
| 42 | static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) | 42 | static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) |
| 43 | { | 43 | { |
| 44 | return kmem_cache_alloc(pgtable_cache, | 44 | return kmem_cache_alloc(pgtable_cache, GFP_KERNEL); |
| 45 | GFP_KERNEL|__GFP_REPEAT); | ||
| 46 | } | 45 | } |
| 47 | 46 | ||
| 48 | static inline void pud_free(struct mm_struct *mm, pud_t *pud) | 47 | static inline void pud_free(struct mm_struct *mm, pud_t *pud) |
| @@ -52,8 +51,7 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud) | |||
| 52 | 51 | ||
| 53 | static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) | 52 | static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) |
| 54 | { | 53 | { |
| 55 | return kmem_cache_alloc(pgtable_cache, | 54 | return kmem_cache_alloc(pgtable_cache, GFP_KERNEL); |
| 56 | GFP_KERNEL|__GFP_REPEAT); | ||
| 57 | } | 55 | } |
| 58 | 56 | ||
| 59 | static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) | 57 | static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) |
diff --git a/arch/sparc/include/asm/ttable.h b/arch/sparc/include/asm/ttable.h index 71b5a67522ab..781b9f1dbdc2 100644 --- a/arch/sparc/include/asm/ttable.h +++ b/arch/sparc/include/asm/ttable.h | |||
| @@ -589,8 +589,8 @@ user_rtt_fill_64bit: \ | |||
| 589 | restored; \ | 589 | restored; \ |
| 590 | nop; nop; nop; nop; nop; nop; \ | 590 | nop; nop; nop; nop; nop; nop; \ |
| 591 | nop; nop; nop; nop; nop; \ | 591 | nop; nop; nop; nop; nop; \ |
| 592 | ba,a,pt %xcc, user_rtt_fill_fixup; \ | 592 | ba,a,pt %xcc, user_rtt_fill_fixup_dax; \ |
| 593 | ba,a,pt %xcc, user_rtt_fill_fixup; \ | 593 | ba,a,pt %xcc, user_rtt_fill_fixup_mna; \ |
| 594 | ba,a,pt %xcc, user_rtt_fill_fixup; | 594 | ba,a,pt %xcc, user_rtt_fill_fixup; |
| 595 | 595 | ||
| 596 | 596 | ||
| @@ -652,8 +652,8 @@ user_rtt_fill_32bit: \ | |||
| 652 | restored; \ | 652 | restored; \ |
| 653 | nop; nop; nop; nop; nop; \ | 653 | nop; nop; nop; nop; nop; \ |
| 654 | nop; nop; nop; \ | 654 | nop; nop; nop; \ |
| 655 | ba,a,pt %xcc, user_rtt_fill_fixup; \ | 655 | ba,a,pt %xcc, user_rtt_fill_fixup_dax; \ |
| 656 | ba,a,pt %xcc, user_rtt_fill_fixup; \ | 656 | ba,a,pt %xcc, user_rtt_fill_fixup_mna; \ |
| 657 | ba,a,pt %xcc, user_rtt_fill_fixup; | 657 | ba,a,pt %xcc, user_rtt_fill_fixup; |
| 658 | 658 | ||
| 659 | 659 | ||
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile index 7cf9c6ea3f1f..fdb13327fded 100644 --- a/arch/sparc/kernel/Makefile +++ b/arch/sparc/kernel/Makefile | |||
| @@ -21,6 +21,7 @@ CFLAGS_REMOVE_perf_event.o := -pg | |||
| 21 | CFLAGS_REMOVE_pcr.o := -pg | 21 | CFLAGS_REMOVE_pcr.o := -pg |
| 22 | endif | 22 | endif |
| 23 | 23 | ||
| 24 | obj-$(CONFIG_SPARC64) += urtt_fill.o | ||
| 24 | obj-$(CONFIG_SPARC32) += entry.o wof.o wuf.o | 25 | obj-$(CONFIG_SPARC32) += entry.o wof.o wuf.o |
| 25 | obj-$(CONFIG_SPARC32) += etrap_32.o | 26 | obj-$(CONFIG_SPARC32) += etrap_32.o |
| 26 | obj-$(CONFIG_SPARC32) += rtrap_32.o | 27 | obj-$(CONFIG_SPARC32) += rtrap_32.o |
diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S index d08bdaffdbfc..216948ca4382 100644 --- a/arch/sparc/kernel/rtrap_64.S +++ b/arch/sparc/kernel/rtrap_64.S | |||
| @@ -14,10 +14,6 @@ | |||
| 14 | #include <asm/visasm.h> | 14 | #include <asm/visasm.h> |
| 15 | #include <asm/processor.h> | 15 | #include <asm/processor.h> |
| 16 | 16 | ||
| 17 | #define RTRAP_PSTATE (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_IE) | ||
| 18 | #define RTRAP_PSTATE_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV) | ||
| 19 | #define RTRAP_PSTATE_AG_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG) | ||
| 20 | |||
| 21 | #ifdef CONFIG_CONTEXT_TRACKING | 17 | #ifdef CONFIG_CONTEXT_TRACKING |
| 22 | # define SCHEDULE_USER schedule_user | 18 | # define SCHEDULE_USER schedule_user |
| 23 | #else | 19 | #else |
| @@ -242,52 +238,17 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1 | |||
| 242 | wrpr %g1, %cwp | 238 | wrpr %g1, %cwp |
| 243 | ba,a,pt %xcc, user_rtt_fill_64bit | 239 | ba,a,pt %xcc, user_rtt_fill_64bit |
| 244 | 240 | ||
| 245 | user_rtt_fill_fixup: | 241 | user_rtt_fill_fixup_dax: |
| 246 | rdpr %cwp, %g1 | 242 | ba,pt %xcc, user_rtt_fill_fixup_common |
| 247 | add %g1, 1, %g1 | 243 | mov 1, %g3 |
| 248 | wrpr %g1, 0x0, %cwp | ||
| 249 | |||
| 250 | rdpr %wstate, %g2 | ||
| 251 | sll %g2, 3, %g2 | ||
| 252 | wrpr %g2, 0x0, %wstate | ||
| 253 | |||
| 254 | /* We know %canrestore and %otherwin are both zero. */ | ||
| 255 | |||
| 256 | sethi %hi(sparc64_kern_pri_context), %g2 | ||
| 257 | ldx [%g2 + %lo(sparc64_kern_pri_context)], %g2 | ||
| 258 | mov PRIMARY_CONTEXT, %g1 | ||
| 259 | |||
| 260 | 661: stxa %g2, [%g1] ASI_DMMU | ||
| 261 | .section .sun4v_1insn_patch, "ax" | ||
| 262 | .word 661b | ||
| 263 | stxa %g2, [%g1] ASI_MMU | ||
| 264 | .previous | ||
| 265 | |||
| 266 | sethi %hi(KERNBASE), %g1 | ||
| 267 | flush %g1 | ||
| 268 | 244 | ||
| 269 | or %g4, FAULT_CODE_WINFIXUP, %g4 | 245 | user_rtt_fill_fixup_mna: |
| 270 | stb %g4, [%g6 + TI_FAULT_CODE] | 246 | ba,pt %xcc, user_rtt_fill_fixup_common |
| 271 | stx %g5, [%g6 + TI_FAULT_ADDR] | 247 | mov 2, %g3 |
| 272 | 248 | ||
| 273 | mov %g6, %l1 | 249 | user_rtt_fill_fixup: |
| 274 | wrpr %g0, 0x0, %tl | 250 | ba,pt %xcc, user_rtt_fill_fixup_common |
| 275 | 251 | clr %g3 | |
| 276 | 661: nop | ||
| 277 | .section .sun4v_1insn_patch, "ax" | ||
| 278 | .word 661b | ||
| 279 | SET_GL(0) | ||
| 280 | .previous | ||
| 281 | |||
| 282 | wrpr %g0, RTRAP_PSTATE, %pstate | ||
| 283 | |||
| 284 | mov %l1, %g6 | ||
| 285 | ldx [%g6 + TI_TASK], %g4 | ||
| 286 | LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3) | ||
| 287 | call do_sparc64_fault | ||
| 288 | add %sp, PTREGS_OFF, %o0 | ||
| 289 | ba,pt %xcc, rtrap | ||
| 290 | nop | ||
| 291 | 252 | ||
| 292 | user_rtt_pre_restore: | 253 | user_rtt_pre_restore: |
| 293 | add %g1, 1, %g1 | 254 | add %g1, 1, %g1 |
diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c index 3c25241fa5cb..91cc2f4ae4d9 100644 --- a/arch/sparc/kernel/signal32.c +++ b/arch/sparc/kernel/signal32.c | |||
| @@ -138,12 +138,24 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) | |||
| 138 | return 0; | 138 | return 0; |
| 139 | } | 139 | } |
| 140 | 140 | ||
| 141 | /* Checks if the fp is valid. We always build signal frames which are | ||
| 142 | * 16-byte aligned, therefore we can always enforce that the restore | ||
| 143 | * frame has that property as well. | ||
| 144 | */ | ||
| 145 | static bool invalid_frame_pointer(void __user *fp, int fplen) | ||
| 146 | { | ||
| 147 | if ((((unsigned long) fp) & 15) || | ||
| 148 | ((unsigned long)fp) > 0x100000000ULL - fplen) | ||
| 149 | return true; | ||
| 150 | return false; | ||
| 151 | } | ||
| 152 | |||
| 141 | void do_sigreturn32(struct pt_regs *regs) | 153 | void do_sigreturn32(struct pt_regs *regs) |
| 142 | { | 154 | { |
| 143 | struct signal_frame32 __user *sf; | 155 | struct signal_frame32 __user *sf; |
| 144 | compat_uptr_t fpu_save; | 156 | compat_uptr_t fpu_save; |
| 145 | compat_uptr_t rwin_save; | 157 | compat_uptr_t rwin_save; |
| 146 | unsigned int psr; | 158 | unsigned int psr, ufp; |
| 147 | unsigned int pc, npc; | 159 | unsigned int pc, npc; |
| 148 | sigset_t set; | 160 | sigset_t set; |
| 149 | compat_sigset_t seta; | 161 | compat_sigset_t seta; |
| @@ -158,11 +170,16 @@ void do_sigreturn32(struct pt_regs *regs) | |||
| 158 | sf = (struct signal_frame32 __user *) regs->u_regs[UREG_FP]; | 170 | sf = (struct signal_frame32 __user *) regs->u_regs[UREG_FP]; |
| 159 | 171 | ||
| 160 | /* 1. Make sure we are not getting garbage from the user */ | 172 | /* 1. Make sure we are not getting garbage from the user */ |
| 161 | if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) || | 173 | if (invalid_frame_pointer(sf, sizeof(*sf))) |
| 162 | (((unsigned long) sf) & 3)) | 174 | goto segv; |
| 175 | |||
| 176 | if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP])) | ||
| 177 | goto segv; | ||
| 178 | |||
| 179 | if (ufp & 0x7) | ||
| 163 | goto segv; | 180 | goto segv; |
| 164 | 181 | ||
| 165 | if (get_user(pc, &sf->info.si_regs.pc) || | 182 | if (__get_user(pc, &sf->info.si_regs.pc) || |
| 166 | __get_user(npc, &sf->info.si_regs.npc)) | 183 | __get_user(npc, &sf->info.si_regs.npc)) |
| 167 | goto segv; | 184 | goto segv; |
| 168 | 185 | ||
| @@ -227,7 +244,7 @@ segv: | |||
| 227 | asmlinkage void do_rt_sigreturn32(struct pt_regs *regs) | 244 | asmlinkage void do_rt_sigreturn32(struct pt_regs *regs) |
| 228 | { | 245 | { |
| 229 | struct rt_signal_frame32 __user *sf; | 246 | struct rt_signal_frame32 __user *sf; |
| 230 | unsigned int psr, pc, npc; | 247 | unsigned int psr, pc, npc, ufp; |
| 231 | compat_uptr_t fpu_save; | 248 | compat_uptr_t fpu_save; |
| 232 | compat_uptr_t rwin_save; | 249 | compat_uptr_t rwin_save; |
| 233 | sigset_t set; | 250 | sigset_t set; |
| @@ -242,11 +259,16 @@ asmlinkage void do_rt_sigreturn32(struct pt_regs *regs) | |||
| 242 | sf = (struct rt_signal_frame32 __user *) regs->u_regs[UREG_FP]; | 259 | sf = (struct rt_signal_frame32 __user *) regs->u_regs[UREG_FP]; |
| 243 | 260 | ||
| 244 | /* 1. Make sure we are not getting garbage from the user */ | 261 | /* 1. Make sure we are not getting garbage from the user */ |
| 245 | if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) || | 262 | if (invalid_frame_pointer(sf, sizeof(*sf))) |
| 246 | (((unsigned long) sf) & 3)) | ||
| 247 | goto segv; | 263 | goto segv; |
| 248 | 264 | ||
| 249 | if (get_user(pc, &sf->regs.pc) || | 265 | if (get_user(ufp, &sf->regs.u_regs[UREG_FP])) |
| 266 | goto segv; | ||
| 267 | |||
| 268 | if (ufp & 0x7) | ||
| 269 | goto segv; | ||
| 270 | |||
| 271 | if (__get_user(pc, &sf->regs.pc) || | ||
| 250 | __get_user(npc, &sf->regs.npc)) | 272 | __get_user(npc, &sf->regs.npc)) |
| 251 | goto segv; | 273 | goto segv; |
| 252 | 274 | ||
| @@ -307,14 +329,6 @@ segv: | |||
| 307 | force_sig(SIGSEGV, current); | 329 | force_sig(SIGSEGV, current); |
| 308 | } | 330 | } |
| 309 | 331 | ||
| 310 | /* Checks if the fp is valid */ | ||
| 311 | static int invalid_frame_pointer(void __user *fp, int fplen) | ||
| 312 | { | ||
| 313 | if ((((unsigned long) fp) & 7) || ((unsigned long)fp) > 0x100000000ULL - fplen) | ||
| 314 | return 1; | ||
| 315 | return 0; | ||
| 316 | } | ||
| 317 | |||
| 318 | static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize) | 332 | static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize) |
| 319 | { | 333 | { |
| 320 | unsigned long sp; | 334 | unsigned long sp; |
diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c index 52aa5e4ce5e7..c3c12efe0bc0 100644 --- a/arch/sparc/kernel/signal_32.c +++ b/arch/sparc/kernel/signal_32.c | |||
| @@ -60,10 +60,22 @@ struct rt_signal_frame { | |||
| 60 | #define SF_ALIGNEDSZ (((sizeof(struct signal_frame) + 7) & (~7))) | 60 | #define SF_ALIGNEDSZ (((sizeof(struct signal_frame) + 7) & (~7))) |
| 61 | #define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame) + 7) & (~7))) | 61 | #define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame) + 7) & (~7))) |
| 62 | 62 | ||
| 63 | /* Checks if the fp is valid. We always build signal frames which are | ||
| 64 | * 16-byte aligned, therefore we can always enforce that the restore | ||
| 65 | * frame has that property as well. | ||
| 66 | */ | ||
| 67 | static inline bool invalid_frame_pointer(void __user *fp, int fplen) | ||
| 68 | { | ||
| 69 | if ((((unsigned long) fp) & 15) || !__access_ok((unsigned long)fp, fplen)) | ||
| 70 | return true; | ||
| 71 | |||
| 72 | return false; | ||
| 73 | } | ||
| 74 | |||
| 63 | asmlinkage void do_sigreturn(struct pt_regs *regs) | 75 | asmlinkage void do_sigreturn(struct pt_regs *regs) |
| 64 | { | 76 | { |
| 77 | unsigned long up_psr, pc, npc, ufp; | ||
| 65 | struct signal_frame __user *sf; | 78 | struct signal_frame __user *sf; |
| 66 | unsigned long up_psr, pc, npc; | ||
| 67 | sigset_t set; | 79 | sigset_t set; |
| 68 | __siginfo_fpu_t __user *fpu_save; | 80 | __siginfo_fpu_t __user *fpu_save; |
| 69 | __siginfo_rwin_t __user *rwin_save; | 81 | __siginfo_rwin_t __user *rwin_save; |
| @@ -77,10 +89,13 @@ asmlinkage void do_sigreturn(struct pt_regs *regs) | |||
| 77 | sf = (struct signal_frame __user *) regs->u_regs[UREG_FP]; | 89 | sf = (struct signal_frame __user *) regs->u_regs[UREG_FP]; |
| 78 | 90 | ||
| 79 | /* 1. Make sure we are not getting garbage from the user */ | 91 | /* 1. Make sure we are not getting garbage from the user */ |
| 80 | if (!access_ok(VERIFY_READ, sf, sizeof(*sf))) | 92 | if (!invalid_frame_pointer(sf, sizeof(*sf))) |
| 93 | goto segv_and_exit; | ||
| 94 | |||
| 95 | if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP])) | ||
| 81 | goto segv_and_exit; | 96 | goto segv_and_exit; |
| 82 | 97 | ||
| 83 | if (((unsigned long) sf) & 3) | 98 | if (ufp & 0x7) |
| 84 | goto segv_and_exit; | 99 | goto segv_and_exit; |
| 85 | 100 | ||
| 86 | err = __get_user(pc, &sf->info.si_regs.pc); | 101 | err = __get_user(pc, &sf->info.si_regs.pc); |
| @@ -127,7 +142,7 @@ segv_and_exit: | |||
| 127 | asmlinkage void do_rt_sigreturn(struct pt_regs *regs) | 142 | asmlinkage void do_rt_sigreturn(struct pt_regs *regs) |
| 128 | { | 143 | { |
| 129 | struct rt_signal_frame __user *sf; | 144 | struct rt_signal_frame __user *sf; |
| 130 | unsigned int psr, pc, npc; | 145 | unsigned int psr, pc, npc, ufp; |
| 131 | __siginfo_fpu_t __user *fpu_save; | 146 | __siginfo_fpu_t __user *fpu_save; |
| 132 | __siginfo_rwin_t __user *rwin_save; | 147 | __siginfo_rwin_t __user *rwin_save; |
| 133 | sigset_t set; | 148 | sigset_t set; |
| @@ -135,8 +150,13 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs) | |||
| 135 | 150 | ||
| 136 | synchronize_user_stack(); | 151 | synchronize_user_stack(); |
| 137 | sf = (struct rt_signal_frame __user *) regs->u_regs[UREG_FP]; | 152 | sf = (struct rt_signal_frame __user *) regs->u_regs[UREG_FP]; |
| 138 | if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) || | 153 | if (!invalid_frame_pointer(sf, sizeof(*sf))) |
| 139 | (((unsigned long) sf) & 0x03)) | 154 | goto segv; |
| 155 | |||
| 156 | if (get_user(ufp, &sf->regs.u_regs[UREG_FP])) | ||
| 157 | goto segv; | ||
| 158 | |||
| 159 | if (ufp & 0x7) | ||
| 140 | goto segv; | 160 | goto segv; |
| 141 | 161 | ||
| 142 | err = __get_user(pc, &sf->regs.pc); | 162 | err = __get_user(pc, &sf->regs.pc); |
| @@ -178,15 +198,6 @@ segv: | |||
| 178 | force_sig(SIGSEGV, current); | 198 | force_sig(SIGSEGV, current); |
| 179 | } | 199 | } |
| 180 | 200 | ||
| 181 | /* Checks if the fp is valid */ | ||
| 182 | static inline int invalid_frame_pointer(void __user *fp, int fplen) | ||
| 183 | { | ||
| 184 | if ((((unsigned long) fp) & 7) || !__access_ok((unsigned long)fp, fplen)) | ||
| 185 | return 1; | ||
| 186 | |||
| 187 | return 0; | ||
| 188 | } | ||
| 189 | |||
| 190 | static inline void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize) | 201 | static inline void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize) |
| 191 | { | 202 | { |
| 192 | unsigned long sp = regs->u_regs[UREG_FP]; | 203 | unsigned long sp = regs->u_regs[UREG_FP]; |
diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c index 39aaec173f66..5ee930c48f4c 100644 --- a/arch/sparc/kernel/signal_64.c +++ b/arch/sparc/kernel/signal_64.c | |||
| @@ -234,6 +234,17 @@ do_sigsegv: | |||
| 234 | goto out; | 234 | goto out; |
| 235 | } | 235 | } |
| 236 | 236 | ||
| 237 | /* Checks if the fp is valid. We always build rt signal frames which | ||
| 238 | * are 16-byte aligned, therefore we can always enforce that the | ||
| 239 | * restore frame has that property as well. | ||
| 240 | */ | ||
| 241 | static bool invalid_frame_pointer(void __user *fp) | ||
| 242 | { | ||
| 243 | if (((unsigned long) fp) & 15) | ||
| 244 | return true; | ||
| 245 | return false; | ||
| 246 | } | ||
| 247 | |||
| 237 | struct rt_signal_frame { | 248 | struct rt_signal_frame { |
| 238 | struct sparc_stackf ss; | 249 | struct sparc_stackf ss; |
| 239 | siginfo_t info; | 250 | siginfo_t info; |
| @@ -246,8 +257,8 @@ struct rt_signal_frame { | |||
| 246 | 257 | ||
| 247 | void do_rt_sigreturn(struct pt_regs *regs) | 258 | void do_rt_sigreturn(struct pt_regs *regs) |
| 248 | { | 259 | { |
| 260 | unsigned long tpc, tnpc, tstate, ufp; | ||
| 249 | struct rt_signal_frame __user *sf; | 261 | struct rt_signal_frame __user *sf; |
| 250 | unsigned long tpc, tnpc, tstate; | ||
| 251 | __siginfo_fpu_t __user *fpu_save; | 262 | __siginfo_fpu_t __user *fpu_save; |
| 252 | __siginfo_rwin_t __user *rwin_save; | 263 | __siginfo_rwin_t __user *rwin_save; |
| 253 | sigset_t set; | 264 | sigset_t set; |
| @@ -261,10 +272,16 @@ void do_rt_sigreturn(struct pt_regs *regs) | |||
| 261 | (regs->u_regs [UREG_FP] + STACK_BIAS); | 272 | (regs->u_regs [UREG_FP] + STACK_BIAS); |
| 262 | 273 | ||
| 263 | /* 1. Make sure we are not getting garbage from the user */ | 274 | /* 1. Make sure we are not getting garbage from the user */ |
| 264 | if (((unsigned long) sf) & 3) | 275 | if (invalid_frame_pointer(sf)) |
| 276 | goto segv; | ||
| 277 | |||
| 278 | if (get_user(ufp, &sf->regs.u_regs[UREG_FP])) | ||
| 265 | goto segv; | 279 | goto segv; |
| 266 | 280 | ||
| 267 | err = get_user(tpc, &sf->regs.tpc); | 281 | if ((ufp + STACK_BIAS) & 0x7) |
| 282 | goto segv; | ||
| 283 | |||
| 284 | err = __get_user(tpc, &sf->regs.tpc); | ||
| 268 | err |= __get_user(tnpc, &sf->regs.tnpc); | 285 | err |= __get_user(tnpc, &sf->regs.tnpc); |
| 269 | if (test_thread_flag(TIF_32BIT)) { | 286 | if (test_thread_flag(TIF_32BIT)) { |
| 270 | tpc &= 0xffffffff; | 287 | tpc &= 0xffffffff; |
| @@ -308,14 +325,6 @@ segv: | |||
| 308 | force_sig(SIGSEGV, current); | 325 | force_sig(SIGSEGV, current); |
| 309 | } | 326 | } |
| 310 | 327 | ||
| 311 | /* Checks if the fp is valid */ | ||
| 312 | static int invalid_frame_pointer(void __user *fp) | ||
| 313 | { | ||
| 314 | if (((unsigned long) fp) & 15) | ||
| 315 | return 1; | ||
| 316 | return 0; | ||
| 317 | } | ||
| 318 | |||
| 319 | static inline void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize) | 328 | static inline void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize) |
| 320 | { | 329 | { |
| 321 | unsigned long sp = regs->u_regs[UREG_FP] + STACK_BIAS; | 330 | unsigned long sp = regs->u_regs[UREG_FP] + STACK_BIAS; |
diff --git a/arch/sparc/kernel/sigutil_32.c b/arch/sparc/kernel/sigutil_32.c index 0f6eebe71e6c..e5fe8cef9a69 100644 --- a/arch/sparc/kernel/sigutil_32.c +++ b/arch/sparc/kernel/sigutil_32.c | |||
| @@ -48,6 +48,10 @@ int save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) | |||
| 48 | int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) | 48 | int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) |
| 49 | { | 49 | { |
| 50 | int err; | 50 | int err; |
| 51 | |||
| 52 | if (((unsigned long) fpu) & 3) | ||
| 53 | return -EFAULT; | ||
| 54 | |||
| 51 | #ifdef CONFIG_SMP | 55 | #ifdef CONFIG_SMP |
| 52 | if (test_tsk_thread_flag(current, TIF_USEDFPU)) | 56 | if (test_tsk_thread_flag(current, TIF_USEDFPU)) |
| 53 | regs->psr &= ~PSR_EF; | 57 | regs->psr &= ~PSR_EF; |
| @@ -97,7 +101,10 @@ int restore_rwin_state(__siginfo_rwin_t __user *rp) | |||
| 97 | struct thread_info *t = current_thread_info(); | 101 | struct thread_info *t = current_thread_info(); |
| 98 | int i, wsaved, err; | 102 | int i, wsaved, err; |
| 99 | 103 | ||
| 100 | __get_user(wsaved, &rp->wsaved); | 104 | if (((unsigned long) rp) & 3) |
| 105 | return -EFAULT; | ||
| 106 | |||
| 107 | get_user(wsaved, &rp->wsaved); | ||
| 101 | if (wsaved > NSWINS) | 108 | if (wsaved > NSWINS) |
| 102 | return -EFAULT; | 109 | return -EFAULT; |
| 103 | 110 | ||
diff --git a/arch/sparc/kernel/sigutil_64.c b/arch/sparc/kernel/sigutil_64.c index 387834a9c56a..36aadcbeac69 100644 --- a/arch/sparc/kernel/sigutil_64.c +++ b/arch/sparc/kernel/sigutil_64.c | |||
| @@ -37,7 +37,10 @@ int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) | |||
| 37 | unsigned long fprs; | 37 | unsigned long fprs; |
| 38 | int err; | 38 | int err; |
| 39 | 39 | ||
| 40 | err = __get_user(fprs, &fpu->si_fprs); | 40 | if (((unsigned long) fpu) & 7) |
| 41 | return -EFAULT; | ||
| 42 | |||
| 43 | err = get_user(fprs, &fpu->si_fprs); | ||
| 41 | fprs_write(0); | 44 | fprs_write(0); |
| 42 | regs->tstate &= ~TSTATE_PEF; | 45 | regs->tstate &= ~TSTATE_PEF; |
| 43 | if (fprs & FPRS_DL) | 46 | if (fprs & FPRS_DL) |
| @@ -72,7 +75,10 @@ int restore_rwin_state(__siginfo_rwin_t __user *rp) | |||
| 72 | struct thread_info *t = current_thread_info(); | 75 | struct thread_info *t = current_thread_info(); |
| 73 | int i, wsaved, err; | 76 | int i, wsaved, err; |
| 74 | 77 | ||
| 75 | __get_user(wsaved, &rp->wsaved); | 78 | if (((unsigned long) rp) & 7) |
| 79 | return -EFAULT; | ||
| 80 | |||
| 81 | get_user(wsaved, &rp->wsaved); | ||
| 76 | if (wsaved > NSWINS) | 82 | if (wsaved > NSWINS) |
| 77 | return -EFAULT; | 83 | return -EFAULT; |
| 78 | 84 | ||
diff --git a/arch/sparc/kernel/urtt_fill.S b/arch/sparc/kernel/urtt_fill.S new file mode 100644 index 000000000000..5604a2b051d4 --- /dev/null +++ b/arch/sparc/kernel/urtt_fill.S | |||
| @@ -0,0 +1,98 @@ | |||
| 1 | #include <asm/thread_info.h> | ||
| 2 | #include <asm/trap_block.h> | ||
| 3 | #include <asm/spitfire.h> | ||
| 4 | #include <asm/ptrace.h> | ||
| 5 | #include <asm/head.h> | ||
| 6 | |||
| 7 | .text | ||
| 8 | .align 8 | ||
| 9 | .globl user_rtt_fill_fixup_common | ||
| 10 | user_rtt_fill_fixup_common: | ||
| 11 | rdpr %cwp, %g1 | ||
| 12 | add %g1, 1, %g1 | ||
| 13 | wrpr %g1, 0x0, %cwp | ||
| 14 | |||
| 15 | rdpr %wstate, %g2 | ||
| 16 | sll %g2, 3, %g2 | ||
| 17 | wrpr %g2, 0x0, %wstate | ||
| 18 | |||
| 19 | /* We know %canrestore and %otherwin are both zero. */ | ||
| 20 | |||
| 21 | sethi %hi(sparc64_kern_pri_context), %g2 | ||
| 22 | ldx [%g2 + %lo(sparc64_kern_pri_context)], %g2 | ||
| 23 | mov PRIMARY_CONTEXT, %g1 | ||
| 24 | |||
| 25 | 661: stxa %g2, [%g1] ASI_DMMU | ||
| 26 | .section .sun4v_1insn_patch, "ax" | ||
| 27 | .word 661b | ||
| 28 | stxa %g2, [%g1] ASI_MMU | ||
| 29 | .previous | ||
| 30 | |||
| 31 | sethi %hi(KERNBASE), %g1 | ||
| 32 | flush %g1 | ||
| 33 | |||
| 34 | mov %g4, %l4 | ||
| 35 | mov %g5, %l5 | ||
| 36 | brnz,pn %g3, 1f | ||
| 37 | mov %g3, %l3 | ||
| 38 | |||
| 39 | or %g4, FAULT_CODE_WINFIXUP, %g4 | ||
| 40 | stb %g4, [%g6 + TI_FAULT_CODE] | ||
| 41 | stx %g5, [%g6 + TI_FAULT_ADDR] | ||
| 42 | 1: | ||
| 43 | mov %g6, %l1 | ||
| 44 | wrpr %g0, 0x0, %tl | ||
| 45 | |||
| 46 | 661: nop | ||
| 47 | .section .sun4v_1insn_patch, "ax" | ||
| 48 | .word 661b | ||
| 49 | SET_GL(0) | ||
| 50 | .previous | ||
| 51 | |||
| 52 | wrpr %g0, RTRAP_PSTATE, %pstate | ||
| 53 | |||
| 54 | mov %l1, %g6 | ||
| 55 | ldx [%g6 + TI_TASK], %g4 | ||
| 56 | LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3) | ||
| 57 | |||
| 58 | brnz,pn %l3, 1f | ||
| 59 | nop | ||
| 60 | |||
| 61 | call do_sparc64_fault | ||
| 62 | add %sp, PTREGS_OFF, %o0 | ||
| 63 | ba,pt %xcc, rtrap | ||
| 64 | nop | ||
| 65 | |||
| 66 | 1: cmp %g3, 2 | ||
| 67 | bne,pn %xcc, 2f | ||
| 68 | nop | ||
| 69 | |||
| 70 | sethi %hi(tlb_type), %g1 | ||
| 71 | lduw [%g1 + %lo(tlb_type)], %g1 | ||
| 72 | cmp %g1, 3 | ||
| 73 | bne,pt %icc, 1f | ||
| 74 | add %sp, PTREGS_OFF, %o0 | ||
| 75 | mov %l4, %o2 | ||
| 76 | call sun4v_do_mna | ||
| 77 | mov %l5, %o1 | ||
| 78 | ba,a,pt %xcc, rtrap | ||
| 79 | 1: mov %l4, %o1 | ||
| 80 | mov %l5, %o2 | ||
| 81 | call mem_address_unaligned | ||
| 82 | nop | ||
| 83 | ba,a,pt %xcc, rtrap | ||
| 84 | |||
| 85 | 2: sethi %hi(tlb_type), %g1 | ||
| 86 | mov %l4, %o1 | ||
| 87 | lduw [%g1 + %lo(tlb_type)], %g1 | ||
| 88 | mov %l5, %o2 | ||
| 89 | cmp %g1, 3 | ||
| 90 | bne,pt %icc, 1f | ||
| 91 | add %sp, PTREGS_OFF, %o0 | ||
| 92 | call sun4v_data_access_exception | ||
| 93 | nop | ||
| 94 | ba,a,pt %xcc, rtrap | ||
| 95 | |||
| 96 | 1: call spitfire_data_access_exception | ||
| 97 | nop | ||
| 98 | ba,a,pt %xcc, rtrap | ||
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 652683cb4b4b..aec508e37490 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c | |||
| @@ -2704,8 +2704,7 @@ void __flush_tlb_all(void) | |||
| 2704 | pte_t *pte_alloc_one_kernel(struct mm_struct *mm, | 2704 | pte_t *pte_alloc_one_kernel(struct mm_struct *mm, |
| 2705 | unsigned long address) | 2705 | unsigned long address) |
| 2706 | { | 2706 | { |
| 2707 | struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | | 2707 | struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); |
| 2708 | __GFP_REPEAT | __GFP_ZERO); | ||
| 2709 | pte_t *pte = NULL; | 2708 | pte_t *pte = NULL; |
| 2710 | 2709 | ||
| 2711 | if (page) | 2710 | if (page) |
| @@ -2717,8 +2716,7 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm, | |||
| 2717 | pgtable_t pte_alloc_one(struct mm_struct *mm, | 2716 | pgtable_t pte_alloc_one(struct mm_struct *mm, |
| 2718 | unsigned long address) | 2717 | unsigned long address) |
| 2719 | { | 2718 | { |
| 2720 | struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | | 2719 | struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO); |
| 2721 | __GFP_REPEAT | __GFP_ZERO); | ||
| 2722 | if (!page) | 2720 | if (!page) |
| 2723 | return NULL; | 2721 | return NULL; |
| 2724 | if (!pgtable_page_ctor(page)) { | 2722 | if (!pgtable_page_ctor(page)) { |
| @@ -2824,9 +2822,10 @@ void hugetlb_setup(struct pt_regs *regs) | |||
| 2824 | * the Data-TLB for huge pages. | 2822 | * the Data-TLB for huge pages. |
| 2825 | */ | 2823 | */ |
| 2826 | if (tlb_type == cheetah_plus) { | 2824 | if (tlb_type == cheetah_plus) { |
| 2825 | bool need_context_reload = false; | ||
| 2827 | unsigned long ctx; | 2826 | unsigned long ctx; |
| 2828 | 2827 | ||
| 2829 | spin_lock(&ctx_alloc_lock); | 2828 | spin_lock_irq(&ctx_alloc_lock); |
| 2830 | ctx = mm->context.sparc64_ctx_val; | 2829 | ctx = mm->context.sparc64_ctx_val; |
| 2831 | ctx &= ~CTX_PGSZ_MASK; | 2830 | ctx &= ~CTX_PGSZ_MASK; |
| 2832 | ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT; | 2831 | ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT; |
| @@ -2845,9 +2844,12 @@ void hugetlb_setup(struct pt_regs *regs) | |||
| 2845 | * also executing in this address space. | 2844 | * also executing in this address space. |
| 2846 | */ | 2845 | */ |
| 2847 | mm->context.sparc64_ctx_val = ctx; | 2846 | mm->context.sparc64_ctx_val = ctx; |
| 2848 | on_each_cpu(context_reload, mm, 0); | 2847 | need_context_reload = true; |
| 2849 | } | 2848 | } |
| 2850 | spin_unlock(&ctx_alloc_lock); | 2849 | spin_unlock_irq(&ctx_alloc_lock); |
| 2850 | |||
| 2851 | if (need_context_reload) | ||
| 2852 | on_each_cpu(context_reload, mm, 0); | ||
| 2851 | } | 2853 | } |
| 2852 | } | 2854 | } |
| 2853 | #endif | 2855 | #endif |
diff --git a/arch/tile/include/asm/thread_info.h b/arch/tile/include/asm/thread_info.h index 4b7cef9e94e0..c1467ac59ce6 100644 --- a/arch/tile/include/asm/thread_info.h +++ b/arch/tile/include/asm/thread_info.h | |||
| @@ -78,7 +78,7 @@ struct thread_info { | |||
| 78 | 78 | ||
| 79 | #ifndef __ASSEMBLY__ | 79 | #ifndef __ASSEMBLY__ |
| 80 | 80 | ||
| 81 | void arch_release_thread_info(struct thread_info *info); | 81 | void arch_release_thread_stack(unsigned long *stack); |
| 82 | 82 | ||
| 83 | /* How to get the thread information struct from C. */ | 83 | /* How to get the thread information struct from C. */ |
| 84 | register unsigned long stack_pointer __asm__("sp"); | 84 | register unsigned long stack_pointer __asm__("sp"); |
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c index 6b705ccc9cc1..a465d8372edd 100644 --- a/arch/tile/kernel/process.c +++ b/arch/tile/kernel/process.c | |||
| @@ -73,8 +73,9 @@ void arch_cpu_idle(void) | |||
| 73 | /* | 73 | /* |
| 74 | * Release a thread_info structure | 74 | * Release a thread_info structure |
| 75 | */ | 75 | */ |
| 76 | void arch_release_thread_info(struct thread_info *info) | 76 | void arch_release_thread_stack(unsigned long *stack) |
| 77 | { | 77 | { |
| 78 | struct thread_info *info = (void *)stack; | ||
| 78 | struct single_step_state *step_state = info->step_state; | 79 | struct single_step_state *step_state = info->step_state; |
| 79 | 80 | ||
| 80 | if (step_state) { | 81 | if (step_state) { |
diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c index 7bf2491a9c1f..c4d5bf841a7f 100644 --- a/arch/tile/mm/pgtable.c +++ b/arch/tile/mm/pgtable.c | |||
| @@ -231,7 +231,7 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd) | |||
| 231 | struct page *pgtable_alloc_one(struct mm_struct *mm, unsigned long address, | 231 | struct page *pgtable_alloc_one(struct mm_struct *mm, unsigned long address, |
| 232 | int order) | 232 | int order) |
| 233 | { | 233 | { |
| 234 | gfp_t flags = GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO; | 234 | gfp_t flags = GFP_KERNEL|__GFP_ZERO; |
| 235 | struct page *p; | 235 | struct page *p; |
| 236 | int i; | 236 | int i; |
| 237 | 237 | ||
diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c index b2a2dff50b4e..e7437ec62710 100644 --- a/arch/um/kernel/mem.c +++ b/arch/um/kernel/mem.c | |||
| @@ -204,7 +204,7 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) | |||
| 204 | { | 204 | { |
| 205 | pte_t *pte; | 205 | pte_t *pte; |
| 206 | 206 | ||
| 207 | pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); | 207 | pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO); |
| 208 | return pte; | 208 | return pte; |
| 209 | } | 209 | } |
| 210 | 210 | ||
| @@ -212,7 +212,7 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) | |||
| 212 | { | 212 | { |
| 213 | struct page *pte; | 213 | struct page *pte; |
| 214 | 214 | ||
| 215 | pte = alloc_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); | 215 | pte = alloc_page(GFP_KERNEL|__GFP_ZERO); |
| 216 | if (!pte) | 216 | if (!pte) |
| 217 | return NULL; | 217 | return NULL; |
| 218 | if (!pgtable_page_ctor(pte)) { | 218 | if (!pgtable_page_ctor(pte)) { |
diff --git a/arch/unicore32/include/asm/pgalloc.h b/arch/unicore32/include/asm/pgalloc.h index 2e02d1356fdf..26775793c204 100644 --- a/arch/unicore32/include/asm/pgalloc.h +++ b/arch/unicore32/include/asm/pgalloc.h | |||
| @@ -28,7 +28,7 @@ extern void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd); | |||
| 28 | #define pgd_alloc(mm) get_pgd_slow(mm) | 28 | #define pgd_alloc(mm) get_pgd_slow(mm) |
| 29 | #define pgd_free(mm, pgd) free_pgd_slow(mm, pgd) | 29 | #define pgd_free(mm, pgd) free_pgd_slow(mm, pgd) |
| 30 | 30 | ||
| 31 | #define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO) | 31 | #define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO) |
| 32 | 32 | ||
| 33 | /* | 33 | /* |
| 34 | * Allocate one PTE table. | 34 | * Allocate one PTE table. |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 0a7b885964ba..d9a94da0c29f 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
| @@ -2439,6 +2439,15 @@ config PCI_CNB20LE_QUIRK | |||
| 2439 | 2439 | ||
| 2440 | source "drivers/pci/Kconfig" | 2440 | source "drivers/pci/Kconfig" |
| 2441 | 2441 | ||
| 2442 | config ISA_BUS | ||
| 2443 | bool "ISA-style bus support on modern systems" if EXPERT | ||
| 2444 | select ISA_BUS_API | ||
| 2445 | help | ||
| 2446 | Enables ISA-style drivers on modern systems. This is necessary to | ||
| 2447 | support PC/104 devices on X86_64 platforms. | ||
| 2448 | |||
| 2449 | If unsure, say N. | ||
| 2450 | |||
| 2442 | # x86_64 have no ISA slots, but can have ISA-style DMA. | 2451 | # x86_64 have no ISA slots, but can have ISA-style DMA. |
| 2443 | config ISA_DMA_API | 2452 | config ISA_DMA_API |
| 2444 | bool "ISA-style DMA support" if (X86_64 && EXPERT) | 2453 | bool "ISA-style DMA support" if (X86_64 && EXPERT) |
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile index 700a9c6e6159..be8e688fa0d4 100644 --- a/arch/x86/boot/Makefile +++ b/arch/x86/boot/Makefile | |||
| @@ -162,6 +162,9 @@ isoimage: $(obj)/bzImage | |||
| 162 | for i in lib lib64 share end ; do \ | 162 | for i in lib lib64 share end ; do \ |
| 163 | if [ -f /usr/$$i/syslinux/isolinux.bin ] ; then \ | 163 | if [ -f /usr/$$i/syslinux/isolinux.bin ] ; then \ |
| 164 | cp /usr/$$i/syslinux/isolinux.bin $(obj)/isoimage ; \ | 164 | cp /usr/$$i/syslinux/isolinux.bin $(obj)/isoimage ; \ |
| 165 | if [ -f /usr/$$i/syslinux/ldlinux.c32 ]; then \ | ||
| 166 | cp /usr/$$i/syslinux/ldlinux.c32 $(obj)/isoimage ; \ | ||
| 167 | fi ; \ | ||
| 165 | break ; \ | 168 | break ; \ |
| 166 | fi ; \ | 169 | fi ; \ |
| 167 | if [ $$i = end ] ; then exit 1 ; fi ; \ | 170 | if [ $$i = end ] ; then exit 1 ; fi ; \ |
diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c index 99c4bab123cd..e30eef4f29a6 100644 --- a/arch/x86/events/intel/rapl.c +++ b/arch/x86/events/intel/rapl.c | |||
| @@ -714,7 +714,7 @@ static void cleanup_rapl_pmus(void) | |||
| 714 | int i; | 714 | int i; |
| 715 | 715 | ||
| 716 | for (i = 0; i < rapl_pmus->maxpkg; i++) | 716 | for (i = 0; i < rapl_pmus->maxpkg; i++) |
| 717 | kfree(rapl_pmus->pmus + i); | 717 | kfree(rapl_pmus->pmus[i]); |
| 718 | kfree(rapl_pmus); | 718 | kfree(rapl_pmus); |
| 719 | } | 719 | } |
| 720 | 720 | ||
diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c index b2625867ebd1..874e8bd64d1d 100644 --- a/arch/x86/events/intel/uncore_snbep.c +++ b/arch/x86/events/intel/uncore_snbep.c | |||
| @@ -2868,27 +2868,10 @@ static struct intel_uncore_type bdx_uncore_cbox = { | |||
| 2868 | .format_group = &hswep_uncore_cbox_format_group, | 2868 | .format_group = &hswep_uncore_cbox_format_group, |
| 2869 | }; | 2869 | }; |
| 2870 | 2870 | ||
| 2871 | static struct intel_uncore_type bdx_uncore_sbox = { | ||
| 2872 | .name = "sbox", | ||
| 2873 | .num_counters = 4, | ||
| 2874 | .num_boxes = 4, | ||
| 2875 | .perf_ctr_bits = 48, | ||
| 2876 | .event_ctl = HSWEP_S0_MSR_PMON_CTL0, | ||
| 2877 | .perf_ctr = HSWEP_S0_MSR_PMON_CTR0, | ||
| 2878 | .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK, | ||
| 2879 | .box_ctl = HSWEP_S0_MSR_PMON_BOX_CTL, | ||
| 2880 | .msr_offset = HSWEP_SBOX_MSR_OFFSET, | ||
| 2881 | .ops = &hswep_uncore_sbox_msr_ops, | ||
| 2882 | .format_group = &hswep_uncore_sbox_format_group, | ||
| 2883 | }; | ||
| 2884 | |||
| 2885 | #define BDX_MSR_UNCORE_SBOX 3 | ||
| 2886 | |||
| 2887 | static struct intel_uncore_type *bdx_msr_uncores[] = { | 2871 | static struct intel_uncore_type *bdx_msr_uncores[] = { |
| 2888 | &bdx_uncore_ubox, | 2872 | &bdx_uncore_ubox, |
| 2889 | &bdx_uncore_cbox, | 2873 | &bdx_uncore_cbox, |
| 2890 | &hswep_uncore_pcu, | 2874 | &hswep_uncore_pcu, |
| 2891 | &bdx_uncore_sbox, | ||
| 2892 | NULL, | 2875 | NULL, |
| 2893 | }; | 2876 | }; |
| 2894 | 2877 | ||
| @@ -2897,10 +2880,6 @@ void bdx_uncore_cpu_init(void) | |||
| 2897 | if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) | 2880 | if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) |
| 2898 | bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; | 2881 | bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; |
| 2899 | uncore_msr_uncores = bdx_msr_uncores; | 2882 | uncore_msr_uncores = bdx_msr_uncores; |
| 2900 | |||
| 2901 | /* BDX-DE doesn't have SBOX */ | ||
| 2902 | if (boot_cpu_data.x86_model == 86) | ||
| 2903 | uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL; | ||
| 2904 | } | 2883 | } |
| 2905 | 2884 | ||
| 2906 | static struct intel_uncore_type bdx_uncore_ha = { | 2885 | static struct intel_uncore_type bdx_uncore_ha = { |
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h new file mode 100644 index 000000000000..6999f7d01a0d --- /dev/null +++ b/arch/x86/include/asm/intel-family.h | |||
| @@ -0,0 +1,68 @@ | |||
| 1 | #ifndef _ASM_X86_INTEL_FAMILY_H | ||
| 2 | #define _ASM_X86_INTEL_FAMILY_H | ||
| 3 | |||
| 4 | /* | ||
| 5 | * "Big Core" Processors (Branded as Core, Xeon, etc...) | ||
| 6 | * | ||
| 7 | * The "_X" parts are generally the EP and EX Xeons, or the | ||
| 8 | * "Extreme" ones, like Broadwell-E. | ||
| 9 | * | ||
| 10 | * Things ending in "2" are usually because we have no better | ||
| 11 | * name for them. There's no processor called "WESTMERE2". | ||
| 12 | */ | ||
| 13 | |||
| 14 | #define INTEL_FAM6_CORE_YONAH 0x0E | ||
| 15 | #define INTEL_FAM6_CORE2_MEROM 0x0F | ||
| 16 | #define INTEL_FAM6_CORE2_MEROM_L 0x16 | ||
| 17 | #define INTEL_FAM6_CORE2_PENRYN 0x17 | ||
| 18 | #define INTEL_FAM6_CORE2_DUNNINGTON 0x1D | ||
| 19 | |||
| 20 | #define INTEL_FAM6_NEHALEM 0x1E | ||
| 21 | #define INTEL_FAM6_NEHALEM_EP 0x1A | ||
| 22 | #define INTEL_FAM6_NEHALEM_EX 0x2E | ||
| 23 | #define INTEL_FAM6_WESTMERE 0x25 | ||
| 24 | #define INTEL_FAM6_WESTMERE2 0x1F | ||
| 25 | #define INTEL_FAM6_WESTMERE_EP 0x2C | ||
| 26 | #define INTEL_FAM6_WESTMERE_EX 0x2F | ||
| 27 | |||
| 28 | #define INTEL_FAM6_SANDYBRIDGE 0x2A | ||
| 29 | #define INTEL_FAM6_SANDYBRIDGE_X 0x2D | ||
| 30 | #define INTEL_FAM6_IVYBRIDGE 0x3A | ||
| 31 | #define INTEL_FAM6_IVYBRIDGE_X 0x3E | ||
| 32 | |||
| 33 | #define INTEL_FAM6_HASWELL_CORE 0x3C | ||
| 34 | #define INTEL_FAM6_HASWELL_X 0x3F | ||
| 35 | #define INTEL_FAM6_HASWELL_ULT 0x45 | ||
| 36 | #define INTEL_FAM6_HASWELL_GT3E 0x46 | ||
| 37 | |||
| 38 | #define INTEL_FAM6_BROADWELL_CORE 0x3D | ||
| 39 | #define INTEL_FAM6_BROADWELL_XEON_D 0x56 | ||
| 40 | #define INTEL_FAM6_BROADWELL_GT3E 0x47 | ||
| 41 | #define INTEL_FAM6_BROADWELL_X 0x4F | ||
| 42 | |||
| 43 | #define INTEL_FAM6_SKYLAKE_MOBILE 0x4E | ||
| 44 | #define INTEL_FAM6_SKYLAKE_DESKTOP 0x5E | ||
| 45 | #define INTEL_FAM6_SKYLAKE_X 0x55 | ||
| 46 | #define INTEL_FAM6_KABYLAKE_MOBILE 0x8E | ||
| 47 | #define INTEL_FAM6_KABYLAKE_DESKTOP 0x9E | ||
| 48 | |||
| 49 | /* "Small Core" Processors (Atom) */ | ||
| 50 | |||
| 51 | #define INTEL_FAM6_ATOM_PINEVIEW 0x1C | ||
| 52 | #define INTEL_FAM6_ATOM_LINCROFT 0x26 | ||
| 53 | #define INTEL_FAM6_ATOM_PENWELL 0x27 | ||
| 54 | #define INTEL_FAM6_ATOM_CLOVERVIEW 0x35 | ||
| 55 | #define INTEL_FAM6_ATOM_CEDARVIEW 0x36 | ||
| 56 | #define INTEL_FAM6_ATOM_SILVERMONT1 0x37 /* BayTrail/BYT / Valleyview */ | ||
| 57 | #define INTEL_FAM6_ATOM_SILVERMONT2 0x4D /* Avaton/Rangely */ | ||
| 58 | #define INTEL_FAM6_ATOM_AIRMONT 0x4C /* CherryTrail / Braswell */ | ||
| 59 | #define INTEL_FAM6_ATOM_MERRIFIELD1 0x4A /* Tangier */ | ||
| 60 | #define INTEL_FAM6_ATOM_MERRIFIELD2 0x5A /* Annidale */ | ||
| 61 | #define INTEL_FAM6_ATOM_GOLDMONT 0x5C | ||
| 62 | #define INTEL_FAM6_ATOM_DENVERTON 0x5F /* Goldmont Microserver */ | ||
| 63 | |||
| 64 | /* Xeon Phi */ | ||
| 65 | |||
| 66 | #define INTEL_FAM6_XEON_PHI_KNL 0x57 /* Knights Landing */ | ||
| 67 | |||
| 68 | #endif /* _ASM_X86_INTEL_FAMILY_H */ | ||
diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h index 4421b5da409d..d1d1e5094c28 100644 --- a/arch/x86/include/asm/kprobes.h +++ b/arch/x86/include/asm/kprobes.h | |||
| @@ -38,12 +38,11 @@ typedef u8 kprobe_opcode_t; | |||
| 38 | #define RELATIVECALL_OPCODE 0xe8 | 38 | #define RELATIVECALL_OPCODE 0xe8 |
| 39 | #define RELATIVE_ADDR_SIZE 4 | 39 | #define RELATIVE_ADDR_SIZE 4 |
| 40 | #define MAX_STACK_SIZE 64 | 40 | #define MAX_STACK_SIZE 64 |
| 41 | #define MIN_STACK_SIZE(ADDR) \ | 41 | #define CUR_STACK_SIZE(ADDR) \ |
| 42 | (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \ | 42 | (current_top_of_stack() - (unsigned long)(ADDR)) |
| 43 | THREAD_SIZE - (unsigned long)(ADDR))) \ | 43 | #define MIN_STACK_SIZE(ADDR) \ |
| 44 | ? (MAX_STACK_SIZE) \ | 44 | (MAX_STACK_SIZE < CUR_STACK_SIZE(ADDR) ? \ |
| 45 | : (((unsigned long)current_thread_info()) + \ | 45 | MAX_STACK_SIZE : CUR_STACK_SIZE(ADDR)) |
| 46 | THREAD_SIZE - (unsigned long)(ADDR))) | ||
| 47 | 46 | ||
| 48 | #define flush_insn_slot(p) do { } while (0) | 47 | #define flush_insn_slot(p) do { } while (0) |
| 49 | 48 | ||
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index e0fbe7e70dc1..69e62862b622 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
| @@ -27,6 +27,7 @@ | |||
| 27 | #include <linux/irqbypass.h> | 27 | #include <linux/irqbypass.h> |
| 28 | #include <linux/hyperv.h> | 28 | #include <linux/hyperv.h> |
| 29 | 29 | ||
| 30 | #include <asm/apic.h> | ||
| 30 | #include <asm/pvclock-abi.h> | 31 | #include <asm/pvclock-abi.h> |
| 31 | #include <asm/desc.h> | 32 | #include <asm/desc.h> |
| 32 | #include <asm/mtrr.h> | 33 | #include <asm/mtrr.h> |
| @@ -1368,4 +1369,14 @@ static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) | |||
| 1368 | 1369 | ||
| 1369 | static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} | 1370 | static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} |
| 1370 | 1371 | ||
| 1372 | static inline int kvm_cpu_get_apicid(int mps_cpu) | ||
| 1373 | { | ||
| 1374 | #ifdef CONFIG_X86_LOCAL_APIC | ||
| 1375 | return __default_cpu_present_to_apicid(mps_cpu); | ||
| 1376 | #else | ||
| 1377 | WARN_ON_ONCE(1); | ||
| 1378 | return BAD_APICID; | ||
| 1379 | #endif | ||
| 1380 | } | ||
| 1381 | |||
| 1371 | #endif /* _ASM_X86_KVM_HOST_H */ | 1382 | #endif /* _ASM_X86_KVM_HOST_H */ |
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h index 7dc1d8fef7fd..b5fee97813cd 100644 --- a/arch/x86/include/asm/msr.h +++ b/arch/x86/include/asm/msr.h | |||
| @@ -122,7 +122,7 @@ notrace static inline void native_write_msr(unsigned int msr, | |||
| 122 | "2:\n" | 122 | "2:\n" |
| 123 | _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_wrmsr_unsafe) | 123 | _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_wrmsr_unsafe) |
| 124 | : : "c" (msr), "a"(low), "d" (high) : "memory"); | 124 | : : "c" (msr), "a"(low), "d" (high) : "memory"); |
| 125 | if (msr_tracepoint_active(__tracepoint_read_msr)) | 125 | if (msr_tracepoint_active(__tracepoint_write_msr)) |
| 126 | do_trace_write_msr(msr, ((u64)high << 32 | low), 0); | 126 | do_trace_write_msr(msr, ((u64)high << 32 | low), 0); |
| 127 | } | 127 | } |
| 128 | 128 | ||
| @@ -141,7 +141,7 @@ notrace static inline int native_write_msr_safe(unsigned int msr, | |||
| 141 | : "c" (msr), "0" (low), "d" (high), | 141 | : "c" (msr), "0" (low), "d" (high), |
| 142 | [fault] "i" (-EIO) | 142 | [fault] "i" (-EIO) |
| 143 | : "memory"); | 143 | : "memory"); |
| 144 | if (msr_tracepoint_active(__tracepoint_read_msr)) | 144 | if (msr_tracepoint_active(__tracepoint_write_msr)) |
| 145 | do_trace_write_msr(msr, ((u64)high << 32 | low), err); | 145 | do_trace_write_msr(msr, ((u64)high << 32 | low), err); |
| 146 | return err; | 146 | return err; |
| 147 | } | 147 | } |
diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h index bf7f8b55b0f9..574c23cf761a 100644 --- a/arch/x86/include/asm/pgalloc.h +++ b/arch/x86/include/asm/pgalloc.h | |||
| @@ -81,7 +81,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, | |||
| 81 | static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) | 81 | static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) |
| 82 | { | 82 | { |
| 83 | struct page *page; | 83 | struct page *page; |
| 84 | page = alloc_pages(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO, 0); | 84 | page = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0); |
| 85 | if (!page) | 85 | if (!page) |
| 86 | return NULL; | 86 | return NULL; |
| 87 | if (!pgtable_pmd_page_ctor(page)) { | 87 | if (!pgtable_pmd_page_ctor(page)) { |
| @@ -125,7 +125,7 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) | |||
| 125 | 125 | ||
| 126 | static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) | 126 | static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) |
| 127 | { | 127 | { |
| 128 | return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT); | 128 | return (pud_t *)get_zeroed_page(GFP_KERNEL); |
| 129 | } | 129 | } |
| 130 | 130 | ||
| 131 | static inline void pud_free(struct mm_struct *mm, pud_t *pud) | 131 | static inline void pud_free(struct mm_struct *mm, pud_t *pud) |
diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h index 7c247e7404be..0944218af9e2 100644 --- a/arch/x86/include/asm/stacktrace.h +++ b/arch/x86/include/asm/stacktrace.h | |||
| @@ -14,7 +14,7 @@ extern int kstack_depth_to_print; | |||
| 14 | struct thread_info; | 14 | struct thread_info; |
| 15 | struct stacktrace_ops; | 15 | struct stacktrace_ops; |
| 16 | 16 | ||
| 17 | typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo, | 17 | typedef unsigned long (*walk_stack_t)(struct task_struct *task, |
| 18 | unsigned long *stack, | 18 | unsigned long *stack, |
| 19 | unsigned long bp, | 19 | unsigned long bp, |
| 20 | const struct stacktrace_ops *ops, | 20 | const struct stacktrace_ops *ops, |
| @@ -23,13 +23,13 @@ typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo, | |||
| 23 | int *graph); | 23 | int *graph); |
| 24 | 24 | ||
| 25 | extern unsigned long | 25 | extern unsigned long |
| 26 | print_context_stack(struct thread_info *tinfo, | 26 | print_context_stack(struct task_struct *task, |
| 27 | unsigned long *stack, unsigned long bp, | 27 | unsigned long *stack, unsigned long bp, |
| 28 | const struct stacktrace_ops *ops, void *data, | 28 | const struct stacktrace_ops *ops, void *data, |
| 29 | unsigned long *end, int *graph); | 29 | unsigned long *end, int *graph); |
| 30 | 30 | ||
| 31 | extern unsigned long | 31 | extern unsigned long |
| 32 | print_context_stack_bp(struct thread_info *tinfo, | 32 | print_context_stack_bp(struct task_struct *task, |
| 33 | unsigned long *stack, unsigned long bp, | 33 | unsigned long *stack, unsigned long bp, |
| 34 | const struct stacktrace_ops *ops, void *data, | 34 | const struct stacktrace_ops *ops, void *data, |
| 35 | unsigned long *end, int *graph); | 35 | unsigned long *end, int *graph); |
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 84e33ff5a6d5..446702ed99dc 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c | |||
| @@ -2588,8 +2588,8 @@ static struct resource * __init ioapic_setup_resources(void) | |||
| 2588 | res[num].flags = IORESOURCE_MEM | IORESOURCE_BUSY; | 2588 | res[num].flags = IORESOURCE_MEM | IORESOURCE_BUSY; |
| 2589 | snprintf(mem, IOAPIC_RESOURCE_NAME_SIZE, "IOAPIC %u", i); | 2589 | snprintf(mem, IOAPIC_RESOURCE_NAME_SIZE, "IOAPIC %u", i); |
| 2590 | mem += IOAPIC_RESOURCE_NAME_SIZE; | 2590 | mem += IOAPIC_RESOURCE_NAME_SIZE; |
| 2591 | ioapics[i].iomem_res = &res[num]; | ||
| 2591 | num++; | 2592 | num++; |
| 2592 | ioapics[i].iomem_res = res; | ||
| 2593 | } | 2593 | } |
| 2594 | 2594 | ||
| 2595 | ioapic_resources = res; | 2595 | ioapic_resources = res; |
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index c343a54bed39..f5c69d8974e1 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
| @@ -674,14 +674,14 @@ static void init_amd_bd(struct cpuinfo_x86 *c) | |||
| 674 | u64 value; | 674 | u64 value; |
| 675 | 675 | ||
| 676 | /* re-enable TopologyExtensions if switched off by BIOS */ | 676 | /* re-enable TopologyExtensions if switched off by BIOS */ |
| 677 | if ((c->x86_model >= 0x10) && (c->x86_model <= 0x1f) && | 677 | if ((c->x86_model >= 0x10) && (c->x86_model <= 0x6f) && |
| 678 | !cpu_has(c, X86_FEATURE_TOPOEXT)) { | 678 | !cpu_has(c, X86_FEATURE_TOPOEXT)) { |
| 679 | 679 | ||
| 680 | if (msr_set_bit(0xc0011005, 54) > 0) { | 680 | if (msr_set_bit(0xc0011005, 54) > 0) { |
| 681 | rdmsrl(0xc0011005, value); | 681 | rdmsrl(0xc0011005, value); |
| 682 | if (value & BIT_64(54)) { | 682 | if (value & BIT_64(54)) { |
| 683 | set_cpu_cap(c, X86_FEATURE_TOPOEXT); | 683 | set_cpu_cap(c, X86_FEATURE_TOPOEXT); |
| 684 | pr_info(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n"); | 684 | pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n"); |
| 685 | } | 685 | } |
| 686 | } | 686 | } |
| 687 | } | 687 | } |
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index 2bb25c3fe2e8..ef8017ca5ba9 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c | |||
| @@ -42,16 +42,14 @@ void printk_address(unsigned long address) | |||
| 42 | static void | 42 | static void |
| 43 | print_ftrace_graph_addr(unsigned long addr, void *data, | 43 | print_ftrace_graph_addr(unsigned long addr, void *data, |
| 44 | const struct stacktrace_ops *ops, | 44 | const struct stacktrace_ops *ops, |
| 45 | struct thread_info *tinfo, int *graph) | 45 | struct task_struct *task, int *graph) |
| 46 | { | 46 | { |
| 47 | struct task_struct *task; | ||
| 48 | unsigned long ret_addr; | 47 | unsigned long ret_addr; |
| 49 | int index; | 48 | int index; |
| 50 | 49 | ||
| 51 | if (addr != (unsigned long)return_to_handler) | 50 | if (addr != (unsigned long)return_to_handler) |
| 52 | return; | 51 | return; |
| 53 | 52 | ||
| 54 | task = tinfo->task; | ||
| 55 | index = task->curr_ret_stack; | 53 | index = task->curr_ret_stack; |
| 56 | 54 | ||
| 57 | if (!task->ret_stack || index < *graph) | 55 | if (!task->ret_stack || index < *graph) |
| @@ -68,7 +66,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data, | |||
| 68 | static inline void | 66 | static inline void |
| 69 | print_ftrace_graph_addr(unsigned long addr, void *data, | 67 | print_ftrace_graph_addr(unsigned long addr, void *data, |
| 70 | const struct stacktrace_ops *ops, | 68 | const struct stacktrace_ops *ops, |
| 71 | struct thread_info *tinfo, int *graph) | 69 | struct task_struct *task, int *graph) |
| 72 | { } | 70 | { } |
| 73 | #endif | 71 | #endif |
| 74 | 72 | ||
| @@ -79,10 +77,10 @@ print_ftrace_graph_addr(unsigned long addr, void *data, | |||
| 79 | * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack | 77 | * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack |
| 80 | */ | 78 | */ |
| 81 | 79 | ||
| 82 | static inline int valid_stack_ptr(struct thread_info *tinfo, | 80 | static inline int valid_stack_ptr(struct task_struct *task, |
| 83 | void *p, unsigned int size, void *end) | 81 | void *p, unsigned int size, void *end) |
| 84 | { | 82 | { |
| 85 | void *t = tinfo; | 83 | void *t = task_stack_page(task); |
| 86 | if (end) { | 84 | if (end) { |
| 87 | if (p < end && p >= (end-THREAD_SIZE)) | 85 | if (p < end && p >= (end-THREAD_SIZE)) |
| 88 | return 1; | 86 | return 1; |
| @@ -93,14 +91,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo, | |||
| 93 | } | 91 | } |
| 94 | 92 | ||
| 95 | unsigned long | 93 | unsigned long |
| 96 | print_context_stack(struct thread_info *tinfo, | 94 | print_context_stack(struct task_struct *task, |
| 97 | unsigned long *stack, unsigned long bp, | 95 | unsigned long *stack, unsigned long bp, |
| 98 | const struct stacktrace_ops *ops, void *data, | 96 | const struct stacktrace_ops *ops, void *data, |
| 99 | unsigned long *end, int *graph) | 97 | unsigned long *end, int *graph) |
| 100 | { | 98 | { |
| 101 | struct stack_frame *frame = (struct stack_frame *)bp; | 99 | struct stack_frame *frame = (struct stack_frame *)bp; |
| 102 | 100 | ||
| 103 | while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) { | 101 | while (valid_stack_ptr(task, stack, sizeof(*stack), end)) { |
| 104 | unsigned long addr; | 102 | unsigned long addr; |
| 105 | 103 | ||
| 106 | addr = *stack; | 104 | addr = *stack; |
| @@ -112,7 +110,7 @@ print_context_stack(struct thread_info *tinfo, | |||
| 112 | } else { | 110 | } else { |
| 113 | ops->address(data, addr, 0); | 111 | ops->address(data, addr, 0); |
| 114 | } | 112 | } |
| 115 | print_ftrace_graph_addr(addr, data, ops, tinfo, graph); | 113 | print_ftrace_graph_addr(addr, data, ops, task, graph); |
| 116 | } | 114 | } |
| 117 | stack++; | 115 | stack++; |
| 118 | } | 116 | } |
| @@ -121,7 +119,7 @@ print_context_stack(struct thread_info *tinfo, | |||
| 121 | EXPORT_SYMBOL_GPL(print_context_stack); | 119 | EXPORT_SYMBOL_GPL(print_context_stack); |
| 122 | 120 | ||
| 123 | unsigned long | 121 | unsigned long |
| 124 | print_context_stack_bp(struct thread_info *tinfo, | 122 | print_context_stack_bp(struct task_struct *task, |
| 125 | unsigned long *stack, unsigned long bp, | 123 | unsigned long *stack, unsigned long bp, |
| 126 | const struct stacktrace_ops *ops, void *data, | 124 | const struct stacktrace_ops *ops, void *data, |
| 127 | unsigned long *end, int *graph) | 125 | unsigned long *end, int *graph) |
| @@ -129,7 +127,7 @@ print_context_stack_bp(struct thread_info *tinfo, | |||
| 129 | struct stack_frame *frame = (struct stack_frame *)bp; | 127 | struct stack_frame *frame = (struct stack_frame *)bp; |
| 130 | unsigned long *ret_addr = &frame->return_address; | 128 | unsigned long *ret_addr = &frame->return_address; |
| 131 | 129 | ||
| 132 | while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) { | 130 | while (valid_stack_ptr(task, ret_addr, sizeof(*ret_addr), end)) { |
| 133 | unsigned long addr = *ret_addr; | 131 | unsigned long addr = *ret_addr; |
| 134 | 132 | ||
| 135 | if (!__kernel_text_address(addr)) | 133 | if (!__kernel_text_address(addr)) |
| @@ -139,7 +137,7 @@ print_context_stack_bp(struct thread_info *tinfo, | |||
| 139 | break; | 137 | break; |
| 140 | frame = frame->next_frame; | 138 | frame = frame->next_frame; |
| 141 | ret_addr = &frame->return_address; | 139 | ret_addr = &frame->return_address; |
| 142 | print_ftrace_graph_addr(addr, data, ops, tinfo, graph); | 140 | print_ftrace_graph_addr(addr, data, ops, task, graph); |
| 143 | } | 141 | } |
| 144 | 142 | ||
| 145 | return (unsigned long)frame; | 143 | return (unsigned long)frame; |
diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c index 464ffd69b92e..fef917e79b9d 100644 --- a/arch/x86/kernel/dumpstack_32.c +++ b/arch/x86/kernel/dumpstack_32.c | |||
| @@ -61,15 +61,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, | |||
| 61 | bp = stack_frame(task, regs); | 61 | bp = stack_frame(task, regs); |
| 62 | 62 | ||
| 63 | for (;;) { | 63 | for (;;) { |
| 64 | struct thread_info *context; | ||
| 65 | void *end_stack; | 64 | void *end_stack; |
| 66 | 65 | ||
| 67 | end_stack = is_hardirq_stack(stack, cpu); | 66 | end_stack = is_hardirq_stack(stack, cpu); |
| 68 | if (!end_stack) | 67 | if (!end_stack) |
| 69 | end_stack = is_softirq_stack(stack, cpu); | 68 | end_stack = is_softirq_stack(stack, cpu); |
| 70 | 69 | ||
| 71 | context = task_thread_info(task); | 70 | bp = ops->walk_stack(task, stack, bp, ops, data, |
| 72 | bp = ops->walk_stack(context, stack, bp, ops, data, | ||
| 73 | end_stack, &graph); | 71 | end_stack, &graph); |
| 74 | 72 | ||
| 75 | /* Stop if not on irq stack */ | 73 | /* Stop if not on irq stack */ |
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c index 5f1c6266eb30..d558a8a49016 100644 --- a/arch/x86/kernel/dumpstack_64.c +++ b/arch/x86/kernel/dumpstack_64.c | |||
| @@ -153,7 +153,6 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, | |||
| 153 | const struct stacktrace_ops *ops, void *data) | 153 | const struct stacktrace_ops *ops, void *data) |
| 154 | { | 154 | { |
| 155 | const unsigned cpu = get_cpu(); | 155 | const unsigned cpu = get_cpu(); |
| 156 | struct thread_info *tinfo; | ||
| 157 | unsigned long *irq_stack = (unsigned long *)per_cpu(irq_stack_ptr, cpu); | 156 | unsigned long *irq_stack = (unsigned long *)per_cpu(irq_stack_ptr, cpu); |
| 158 | unsigned long dummy; | 157 | unsigned long dummy; |
| 159 | unsigned used = 0; | 158 | unsigned used = 0; |
| @@ -179,7 +178,6 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, | |||
| 179 | * current stack address. If the stacks consist of nested | 178 | * current stack address. If the stacks consist of nested |
| 180 | * exceptions | 179 | * exceptions |
| 181 | */ | 180 | */ |
| 182 | tinfo = task_thread_info(task); | ||
| 183 | while (!done) { | 181 | while (!done) { |
| 184 | unsigned long *stack_end; | 182 | unsigned long *stack_end; |
| 185 | enum stack_type stype; | 183 | enum stack_type stype; |
| @@ -202,7 +200,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, | |||
| 202 | if (ops->stack(data, id) < 0) | 200 | if (ops->stack(data, id) < 0) |
| 203 | break; | 201 | break; |
| 204 | 202 | ||
| 205 | bp = ops->walk_stack(tinfo, stack, bp, ops, | 203 | bp = ops->walk_stack(task, stack, bp, ops, |
| 206 | data, stack_end, &graph); | 204 | data, stack_end, &graph); |
| 207 | ops->stack(data, "<EOE>"); | 205 | ops->stack(data, "<EOE>"); |
| 208 | /* | 206 | /* |
| @@ -218,7 +216,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, | |||
| 218 | 216 | ||
| 219 | if (ops->stack(data, "IRQ") < 0) | 217 | if (ops->stack(data, "IRQ") < 0) |
| 220 | break; | 218 | break; |
| 221 | bp = ops->walk_stack(tinfo, stack, bp, | 219 | bp = ops->walk_stack(task, stack, bp, |
| 222 | ops, data, stack_end, &graph); | 220 | ops, data, stack_end, &graph); |
| 223 | /* | 221 | /* |
| 224 | * We link to the next stack (which would be | 222 | * We link to the next stack (which would be |
| @@ -240,7 +238,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs, | |||
| 240 | /* | 238 | /* |
| 241 | * This handles the process stack: | 239 | * This handles the process stack: |
| 242 | */ | 240 | */ |
| 243 | bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph); | 241 | bp = ops->walk_stack(task, stack, bp, ops, data, NULL, &graph); |
| 244 | put_cpu(); | 242 | put_cpu(); |
| 245 | } | 243 | } |
| 246 | EXPORT_SYMBOL(dump_trace); | 244 | EXPORT_SYMBOL(dump_trace); |
diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c index 4d38416e2a7f..04f89caef9c4 100644 --- a/arch/x86/kernel/espfix_64.c +++ b/arch/x86/kernel/espfix_64.c | |||
| @@ -57,7 +57,7 @@ | |||
| 57 | # error "Need more than one PGD for the ESPFIX hack" | 57 | # error "Need more than one PGD for the ESPFIX hack" |
| 58 | #endif | 58 | #endif |
| 59 | 59 | ||
| 60 | #define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO) | 60 | #define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO) |
| 61 | 61 | ||
| 62 | /* This contains the *bottom* address of the espfix stack */ | 62 | /* This contains the *bottom* address of the espfix stack */ |
| 63 | DEFINE_PER_CPU_READ_MOSTLY(unsigned long, espfix_stack); | 63 | DEFINE_PER_CPU_READ_MOSTLY(unsigned long, espfix_stack); |
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index 38da8f29a9c8..c627bf8d98ad 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c | |||
| @@ -130,11 +130,9 @@ void irq_ctx_init(int cpu) | |||
| 130 | 130 | ||
| 131 | void do_softirq_own_stack(void) | 131 | void do_softirq_own_stack(void) |
| 132 | { | 132 | { |
| 133 | struct thread_info *curstk; | ||
| 134 | struct irq_stack *irqstk; | 133 | struct irq_stack *irqstk; |
| 135 | u32 *isp, *prev_esp; | 134 | u32 *isp, *prev_esp; |
| 136 | 135 | ||
| 137 | curstk = current_stack(); | ||
| 138 | irqstk = __this_cpu_read(softirq_stack); | 136 | irqstk = __this_cpu_read(softirq_stack); |
| 139 | 137 | ||
| 140 | /* build the stack frame on the softirq stack */ | 138 | /* build the stack frame on the softirq stack */ |
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 38cf7a741250..7847e5c0e0b5 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c | |||
| @@ -961,7 +961,19 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr) | |||
| 961 | * normal page fault. | 961 | * normal page fault. |
| 962 | */ | 962 | */ |
| 963 | regs->ip = (unsigned long)cur->addr; | 963 | regs->ip = (unsigned long)cur->addr; |
| 964 | /* | ||
| 965 | * Trap flag (TF) has been set here because this fault | ||
| 966 | * happened where the single stepping will be done. | ||
| 967 | * So clear it by resetting the current kprobe: | ||
| 968 | */ | ||
| 969 | regs->flags &= ~X86_EFLAGS_TF; | ||
| 970 | |||
| 971 | /* | ||
| 972 | * If the TF flag was set before the kprobe hit, | ||
| 973 | * don't touch it: | ||
| 974 | */ | ||
| 964 | regs->flags |= kcb->kprobe_old_flags; | 975 | regs->flags |= kcb->kprobe_old_flags; |
| 976 | |||
| 965 | if (kcb->kprobe_status == KPROBE_REENTER) | 977 | if (kcb->kprobe_status == KPROBE_REENTER) |
| 966 | restore_previous_kprobe(kcb); | 978 | restore_previous_kprobe(kcb); |
| 967 | else | 979 | else |
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index d1590486204a..00f03d82e69a 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c | |||
| @@ -96,6 +96,12 @@ static inline void cond_local_irq_disable(struct pt_regs *regs) | |||
| 96 | local_irq_disable(); | 96 | local_irq_disable(); |
| 97 | } | 97 | } |
| 98 | 98 | ||
| 99 | /* | ||
| 100 | * In IST context, we explicitly disable preemption. This serves two | ||
| 101 | * purposes: it makes it much less likely that we would accidentally | ||
| 102 | * schedule in IST context and it will force a warning if we somehow | ||
| 103 | * manage to schedule by accident. | ||
| 104 | */ | ||
| 99 | void ist_enter(struct pt_regs *regs) | 105 | void ist_enter(struct pt_regs *regs) |
| 100 | { | 106 | { |
| 101 | if (user_mode(regs)) { | 107 | if (user_mode(regs)) { |
| @@ -110,13 +116,7 @@ void ist_enter(struct pt_regs *regs) | |||
| 110 | rcu_nmi_enter(); | 116 | rcu_nmi_enter(); |
| 111 | } | 117 | } |
| 112 | 118 | ||
| 113 | /* | 119 | preempt_disable(); |
| 114 | * We are atomic because we're on the IST stack; or we're on | ||
| 115 | * x86_32, in which case we still shouldn't schedule; or we're | ||
| 116 | * on x86_64 and entered from user mode, in which case we're | ||
| 117 | * still atomic unless ist_begin_non_atomic is called. | ||
| 118 | */ | ||
| 119 | preempt_count_add(HARDIRQ_OFFSET); | ||
| 120 | 120 | ||
| 121 | /* This code is a bit fragile. Test it. */ | 121 | /* This code is a bit fragile. Test it. */ |
| 122 | RCU_LOCKDEP_WARN(!rcu_is_watching(), "ist_enter didn't work"); | 122 | RCU_LOCKDEP_WARN(!rcu_is_watching(), "ist_enter didn't work"); |
| @@ -124,7 +124,7 @@ void ist_enter(struct pt_regs *regs) | |||
| 124 | 124 | ||
| 125 | void ist_exit(struct pt_regs *regs) | 125 | void ist_exit(struct pt_regs *regs) |
| 126 | { | 126 | { |
| 127 | preempt_count_sub(HARDIRQ_OFFSET); | 127 | preempt_enable_no_resched(); |
| 128 | 128 | ||
| 129 | if (!user_mode(regs)) | 129 | if (!user_mode(regs)) |
| 130 | rcu_nmi_exit(); | 130 | rcu_nmi_exit(); |
| @@ -155,7 +155,7 @@ void ist_begin_non_atomic(struct pt_regs *regs) | |||
| 155 | BUG_ON((unsigned long)(current_top_of_stack() - | 155 | BUG_ON((unsigned long)(current_top_of_stack() - |
| 156 | current_stack_pointer()) >= THREAD_SIZE); | 156 | current_stack_pointer()) >= THREAD_SIZE); |
| 157 | 157 | ||
| 158 | preempt_count_sub(HARDIRQ_OFFSET); | 158 | preempt_enable_no_resched(); |
| 159 | } | 159 | } |
| 160 | 160 | ||
| 161 | /** | 161 | /** |
| @@ -165,7 +165,7 @@ void ist_begin_non_atomic(struct pt_regs *regs) | |||
| 165 | */ | 165 | */ |
| 166 | void ist_end_non_atomic(void) | 166 | void ist_end_non_atomic(void) |
| 167 | { | 167 | { |
| 168 | preempt_count_add(HARDIRQ_OFFSET); | 168 | preempt_disable(); |
| 169 | } | 169 | } |
| 170 | 170 | ||
| 171 | static nokprobe_inline int | 171 | static nokprobe_inline int |
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 769af907f824..7597b42a8a88 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c | |||
| @@ -181,19 +181,22 @@ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu, | |||
| 181 | struct kvm_cpuid_entry __user *entries) | 181 | struct kvm_cpuid_entry __user *entries) |
| 182 | { | 182 | { |
| 183 | int r, i; | 183 | int r, i; |
| 184 | struct kvm_cpuid_entry *cpuid_entries; | 184 | struct kvm_cpuid_entry *cpuid_entries = NULL; |
| 185 | 185 | ||
| 186 | r = -E2BIG; | 186 | r = -E2BIG; |
| 187 | if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) | 187 | if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) |
| 188 | goto out; | 188 | goto out; |
| 189 | r = -ENOMEM; | 189 | r = -ENOMEM; |
| 190 | cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent); | 190 | if (cpuid->nent) { |
| 191 | if (!cpuid_entries) | 191 | cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * |
| 192 | goto out; | 192 | cpuid->nent); |
| 193 | r = -EFAULT; | 193 | if (!cpuid_entries) |
| 194 | if (copy_from_user(cpuid_entries, entries, | 194 | goto out; |
| 195 | cpuid->nent * sizeof(struct kvm_cpuid_entry))) | 195 | r = -EFAULT; |
| 196 | goto out_free; | 196 | if (copy_from_user(cpuid_entries, entries, |
| 197 | cpuid->nent * sizeof(struct kvm_cpuid_entry))) | ||
| 198 | goto out; | ||
| 199 | } | ||
| 197 | for (i = 0; i < cpuid->nent; i++) { | 200 | for (i = 0; i < cpuid->nent; i++) { |
| 198 | vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function; | 201 | vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function; |
| 199 | vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax; | 202 | vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax; |
| @@ -212,9 +215,8 @@ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu, | |||
| 212 | kvm_x86_ops->cpuid_update(vcpu); | 215 | kvm_x86_ops->cpuid_update(vcpu); |
| 213 | r = kvm_update_cpuid(vcpu); | 216 | r = kvm_update_cpuid(vcpu); |
| 214 | 217 | ||
| 215 | out_free: | ||
| 216 | vfree(cpuid_entries); | ||
| 217 | out: | 218 | out: |
| 219 | vfree(cpuid_entries); | ||
| 218 | return r; | 220 | return r; |
| 219 | } | 221 | } |
| 220 | 222 | ||
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 24e800116ab4..def97b3a392b 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
| @@ -336,12 +336,12 @@ static gfn_t pse36_gfn_delta(u32 gpte) | |||
| 336 | #ifdef CONFIG_X86_64 | 336 | #ifdef CONFIG_X86_64 |
| 337 | static void __set_spte(u64 *sptep, u64 spte) | 337 | static void __set_spte(u64 *sptep, u64 spte) |
| 338 | { | 338 | { |
| 339 | *sptep = spte; | 339 | WRITE_ONCE(*sptep, spte); |
| 340 | } | 340 | } |
| 341 | 341 | ||
| 342 | static void __update_clear_spte_fast(u64 *sptep, u64 spte) | 342 | static void __update_clear_spte_fast(u64 *sptep, u64 spte) |
| 343 | { | 343 | { |
| 344 | *sptep = spte; | 344 | WRITE_ONCE(*sptep, spte); |
| 345 | } | 345 | } |
| 346 | 346 | ||
| 347 | static u64 __update_clear_spte_slow(u64 *sptep, u64 spte) | 347 | static u64 __update_clear_spte_slow(u64 *sptep, u64 spte) |
| @@ -390,7 +390,7 @@ static void __set_spte(u64 *sptep, u64 spte) | |||
| 390 | */ | 390 | */ |
| 391 | smp_wmb(); | 391 | smp_wmb(); |
| 392 | 392 | ||
| 393 | ssptep->spte_low = sspte.spte_low; | 393 | WRITE_ONCE(ssptep->spte_low, sspte.spte_low); |
| 394 | } | 394 | } |
| 395 | 395 | ||
| 396 | static void __update_clear_spte_fast(u64 *sptep, u64 spte) | 396 | static void __update_clear_spte_fast(u64 *sptep, u64 spte) |
| @@ -400,7 +400,7 @@ static void __update_clear_spte_fast(u64 *sptep, u64 spte) | |||
| 400 | ssptep = (union split_spte *)sptep; | 400 | ssptep = (union split_spte *)sptep; |
| 401 | sspte = (union split_spte)spte; | 401 | sspte = (union split_spte)spte; |
| 402 | 402 | ||
| 403 | ssptep->spte_low = sspte.spte_low; | 403 | WRITE_ONCE(ssptep->spte_low, sspte.spte_low); |
| 404 | 404 | ||
| 405 | /* | 405 | /* |
| 406 | * If we map the spte from present to nonpresent, we should clear | 406 | * If we map the spte from present to nonpresent, we should clear |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 1163e8173e5a..16ef31b87452 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
| @@ -238,7 +238,9 @@ module_param(nested, int, S_IRUGO); | |||
| 238 | 238 | ||
| 239 | /* enable / disable AVIC */ | 239 | /* enable / disable AVIC */ |
| 240 | static int avic; | 240 | static int avic; |
| 241 | #ifdef CONFIG_X86_LOCAL_APIC | ||
| 241 | module_param(avic, int, S_IRUGO); | 242 | module_param(avic, int, S_IRUGO); |
| 243 | #endif | ||
| 242 | 244 | ||
| 243 | static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); | 245 | static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); |
| 244 | static void svm_flush_tlb(struct kvm_vcpu *vcpu); | 246 | static void svm_flush_tlb(struct kvm_vcpu *vcpu); |
| @@ -981,11 +983,14 @@ static __init int svm_hardware_setup(void) | |||
| 981 | } else | 983 | } else |
| 982 | kvm_disable_tdp(); | 984 | kvm_disable_tdp(); |
| 983 | 985 | ||
| 984 | if (avic && (!npt_enabled || !boot_cpu_has(X86_FEATURE_AVIC))) | 986 | if (avic) { |
| 985 | avic = false; | 987 | if (!npt_enabled || |
| 986 | 988 | !boot_cpu_has(X86_FEATURE_AVIC) || | |
| 987 | if (avic) | 989 | !IS_ENABLED(CONFIG_X86_LOCAL_APIC)) |
| 988 | pr_info("AVIC enabled\n"); | 990 | avic = false; |
| 991 | else | ||
| 992 | pr_info("AVIC enabled\n"); | ||
| 993 | } | ||
| 989 | 994 | ||
| 990 | return 0; | 995 | return 0; |
| 991 | 996 | ||
| @@ -1324,7 +1329,7 @@ free_avic: | |||
| 1324 | static void avic_set_running(struct kvm_vcpu *vcpu, bool is_run) | 1329 | static void avic_set_running(struct kvm_vcpu *vcpu, bool is_run) |
| 1325 | { | 1330 | { |
| 1326 | u64 entry; | 1331 | u64 entry; |
| 1327 | int h_physical_id = __default_cpu_present_to_apicid(vcpu->cpu); | 1332 | int h_physical_id = kvm_cpu_get_apicid(vcpu->cpu); |
| 1328 | struct vcpu_svm *svm = to_svm(vcpu); | 1333 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1329 | 1334 | ||
| 1330 | if (!kvm_vcpu_apicv_active(vcpu)) | 1335 | if (!kvm_vcpu_apicv_active(vcpu)) |
| @@ -1349,7 +1354,7 @@ static void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
| 1349 | { | 1354 | { |
| 1350 | u64 entry; | 1355 | u64 entry; |
| 1351 | /* ID = 0xff (broadcast), ID > 0xff (reserved) */ | 1356 | /* ID = 0xff (broadcast), ID > 0xff (reserved) */ |
| 1352 | int h_physical_id = __default_cpu_present_to_apicid(cpu); | 1357 | int h_physical_id = kvm_cpu_get_apicid(cpu); |
| 1353 | struct vcpu_svm *svm = to_svm(vcpu); | 1358 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1354 | 1359 | ||
| 1355 | if (!kvm_vcpu_apicv_active(vcpu)) | 1360 | if (!kvm_vcpu_apicv_active(vcpu)) |
| @@ -4236,7 +4241,7 @@ static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec) | |||
| 4236 | 4241 | ||
| 4237 | if (avic_vcpu_is_running(vcpu)) | 4242 | if (avic_vcpu_is_running(vcpu)) |
| 4238 | wrmsrl(SVM_AVIC_DOORBELL, | 4243 | wrmsrl(SVM_AVIC_DOORBELL, |
| 4239 | __default_cpu_present_to_apicid(vcpu->cpu)); | 4244 | kvm_cpu_get_apicid(vcpu->cpu)); |
| 4240 | else | 4245 | else |
| 4241 | kvm_vcpu_wake_up(vcpu); | 4246 | kvm_vcpu_wake_up(vcpu); |
| 4242 | } | 4247 | } |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index fb93010beaa4..003618e324ce 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
| @@ -2072,7 +2072,8 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu) | |||
| 2072 | unsigned int dest; | 2072 | unsigned int dest; |
| 2073 | 2073 | ||
| 2074 | if (!kvm_arch_has_assigned_device(vcpu->kvm) || | 2074 | if (!kvm_arch_has_assigned_device(vcpu->kvm) || |
| 2075 | !irq_remapping_cap(IRQ_POSTING_CAP)) | 2075 | !irq_remapping_cap(IRQ_POSTING_CAP) || |
| 2076 | !kvm_vcpu_apicv_active(vcpu)) | ||
| 2076 | return; | 2077 | return; |
| 2077 | 2078 | ||
| 2078 | do { | 2079 | do { |
| @@ -2180,7 +2181,8 @@ static void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu) | |||
| 2180 | struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); | 2181 | struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); |
| 2181 | 2182 | ||
| 2182 | if (!kvm_arch_has_assigned_device(vcpu->kvm) || | 2183 | if (!kvm_arch_has_assigned_device(vcpu->kvm) || |
| 2183 | !irq_remapping_cap(IRQ_POSTING_CAP)) | 2184 | !irq_remapping_cap(IRQ_POSTING_CAP) || |
| 2185 | !kvm_vcpu_apicv_active(vcpu)) | ||
| 2184 | return; | 2186 | return; |
| 2185 | 2187 | ||
| 2186 | /* Set SN when the vCPU is preempted */ | 2188 | /* Set SN when the vCPU is preempted */ |
| @@ -10714,7 +10716,8 @@ static int vmx_pre_block(struct kvm_vcpu *vcpu) | |||
| 10714 | struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); | 10716 | struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu); |
| 10715 | 10717 | ||
| 10716 | if (!kvm_arch_has_assigned_device(vcpu->kvm) || | 10718 | if (!kvm_arch_has_assigned_device(vcpu->kvm) || |
| 10717 | !irq_remapping_cap(IRQ_POSTING_CAP)) | 10719 | !irq_remapping_cap(IRQ_POSTING_CAP) || |
| 10720 | !kvm_vcpu_apicv_active(vcpu)) | ||
| 10718 | return 0; | 10721 | return 0; |
| 10719 | 10722 | ||
| 10720 | vcpu->pre_pcpu = vcpu->cpu; | 10723 | vcpu->pre_pcpu = vcpu->cpu; |
| @@ -10780,7 +10783,8 @@ static void vmx_post_block(struct kvm_vcpu *vcpu) | |||
| 10780 | unsigned long flags; | 10783 | unsigned long flags; |
| 10781 | 10784 | ||
| 10782 | if (!kvm_arch_has_assigned_device(vcpu->kvm) || | 10785 | if (!kvm_arch_has_assigned_device(vcpu->kvm) || |
| 10783 | !irq_remapping_cap(IRQ_POSTING_CAP)) | 10786 | !irq_remapping_cap(IRQ_POSTING_CAP) || |
| 10787 | !kvm_vcpu_apicv_active(vcpu)) | ||
| 10784 | return; | 10788 | return; |
| 10785 | 10789 | ||
| 10786 | do { | 10790 | do { |
| @@ -10833,7 +10837,8 @@ static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq, | |||
| 10833 | int idx, ret = -EINVAL; | 10837 | int idx, ret = -EINVAL; |
| 10834 | 10838 | ||
| 10835 | if (!kvm_arch_has_assigned_device(kvm) || | 10839 | if (!kvm_arch_has_assigned_device(kvm) || |
| 10836 | !irq_remapping_cap(IRQ_POSTING_CAP)) | 10840 | !irq_remapping_cap(IRQ_POSTING_CAP) || |
| 10841 | !kvm_vcpu_apicv_active(kvm->vcpus[0])) | ||
| 10837 | return 0; | 10842 | return 0; |
| 10838 | 10843 | ||
| 10839 | idx = srcu_read_lock(&kvm->irq_srcu); | 10844 | idx = srcu_read_lock(&kvm->irq_srcu); |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index c805cf494154..902d9da12392 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
| @@ -2314,6 +2314,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
| 2314 | case MSR_AMD64_NB_CFG: | 2314 | case MSR_AMD64_NB_CFG: |
| 2315 | case MSR_FAM10H_MMIO_CONF_BASE: | 2315 | case MSR_FAM10H_MMIO_CONF_BASE: |
| 2316 | case MSR_AMD64_BU_CFG2: | 2316 | case MSR_AMD64_BU_CFG2: |
| 2317 | case MSR_IA32_PERF_CTL: | ||
| 2317 | msr_info->data = 0; | 2318 | msr_info->data = 0; |
| 2318 | break; | 2319 | break; |
| 2319 | case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3: | 2320 | case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3: |
| @@ -2972,6 +2973,10 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, | |||
| 2972 | | KVM_VCPUEVENT_VALID_SMM)) | 2973 | | KVM_VCPUEVENT_VALID_SMM)) |
| 2973 | return -EINVAL; | 2974 | return -EINVAL; |
| 2974 | 2975 | ||
| 2976 | if (events->exception.injected && | ||
| 2977 | (events->exception.nr > 31 || events->exception.nr == NMI_VECTOR)) | ||
| 2978 | return -EINVAL; | ||
| 2979 | |||
| 2975 | process_nmi(vcpu); | 2980 | process_nmi(vcpu); |
| 2976 | vcpu->arch.exception.pending = events->exception.injected; | 2981 | vcpu->arch.exception.pending = events->exception.injected; |
| 2977 | vcpu->arch.exception.nr = events->exception.nr; | 2982 | vcpu->arch.exception.nr = events->exception.nr; |
| @@ -3036,6 +3041,11 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, | |||
| 3036 | if (dbgregs->flags) | 3041 | if (dbgregs->flags) |
| 3037 | return -EINVAL; | 3042 | return -EINVAL; |
| 3038 | 3043 | ||
| 3044 | if (dbgregs->dr6 & ~0xffffffffull) | ||
| 3045 | return -EINVAL; | ||
| 3046 | if (dbgregs->dr7 & ~0xffffffffull) | ||
| 3047 | return -EINVAL; | ||
| 3048 | |||
| 3039 | memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db)); | 3049 | memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db)); |
| 3040 | kvm_update_dr0123(vcpu); | 3050 | kvm_update_dr0123(vcpu); |
| 3041 | vcpu->arch.dr6 = dbgregs->dr6; | 3051 | vcpu->arch.dr6 = dbgregs->dr6; |
| @@ -7815,7 +7825,7 @@ int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size) | |||
| 7815 | 7825 | ||
| 7816 | slot = id_to_memslot(slots, id); | 7826 | slot = id_to_memslot(slots, id); |
| 7817 | if (size) { | 7827 | if (size) { |
| 7818 | if (WARN_ON(slot->npages)) | 7828 | if (slot->npages) |
| 7819 | return -EEXIST; | 7829 | return -EEXIST; |
| 7820 | 7830 | ||
| 7821 | /* | 7831 | /* |
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 4eb287e25043..aa0ff4b02a96 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c | |||
| @@ -6,7 +6,7 @@ | |||
| 6 | #include <asm/fixmap.h> | 6 | #include <asm/fixmap.h> |
| 7 | #include <asm/mtrr.h> | 7 | #include <asm/mtrr.h> |
| 8 | 8 | ||
| 9 | #define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO | 9 | #define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO |
| 10 | 10 | ||
| 11 | #ifdef CONFIG_HIGHPTE | 11 | #ifdef CONFIG_HIGHPTE |
| 12 | #define PGALLOC_USER_GFP __GFP_HIGHMEM | 12 | #define PGALLOC_USER_GFP __GFP_HIGHMEM |
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index 6e7242be1c87..b226b3f497f1 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c | |||
| @@ -139,7 +139,7 @@ int __init efi_alloc_page_tables(void) | |||
| 139 | if (efi_enabled(EFI_OLD_MEMMAP)) | 139 | if (efi_enabled(EFI_OLD_MEMMAP)) |
| 140 | return 0; | 140 | return 0; |
| 141 | 141 | ||
| 142 | gfp_mask = GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO; | 142 | gfp_mask = GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO; |
| 143 | efi_pgd = (pgd_t *)__get_free_page(gfp_mask); | 143 | efi_pgd = (pgd_t *)__get_free_page(gfp_mask); |
| 144 | if (!efi_pgd) | 144 | if (!efi_pgd) |
| 145 | return -ENOMEM; | 145 | return -ENOMEM; |
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 478a2de543a5..67433714b791 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
| @@ -1113,7 +1113,7 @@ static void __init xen_cleanhighmap(unsigned long vaddr, | |||
| 1113 | 1113 | ||
| 1114 | /* NOTE: The loop is more greedy than the cleanup_highmap variant. | 1114 | /* NOTE: The loop is more greedy than the cleanup_highmap variant. |
| 1115 | * We include the PMD passed in on _both_ boundaries. */ | 1115 | * We include the PMD passed in on _both_ boundaries. */ |
| 1116 | for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PAGE_SIZE)); | 1116 | for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PTRS_PER_PMD)); |
| 1117 | pmd++, vaddr += PMD_SIZE) { | 1117 | pmd++, vaddr += PMD_SIZE) { |
| 1118 | if (pmd_none(*pmd)) | 1118 | if (pmd_none(*pmd)) |
| 1119 | continue; | 1119 | continue; |
| @@ -1551,41 +1551,6 @@ static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd) | |||
| 1551 | #endif | 1551 | #endif |
| 1552 | } | 1552 | } |
| 1553 | 1553 | ||
| 1554 | #ifdef CONFIG_X86_32 | ||
| 1555 | static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte) | ||
| 1556 | { | ||
| 1557 | /* If there's an existing pte, then don't allow _PAGE_RW to be set */ | ||
| 1558 | if (pte_val_ma(*ptep) & _PAGE_PRESENT) | ||
| 1559 | pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) & | ||
| 1560 | pte_val_ma(pte)); | ||
| 1561 | |||
| 1562 | return pte; | ||
| 1563 | } | ||
| 1564 | #else /* CONFIG_X86_64 */ | ||
| 1565 | static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte) | ||
| 1566 | { | ||
| 1567 | unsigned long pfn; | ||
| 1568 | |||
| 1569 | if (xen_feature(XENFEAT_writable_page_tables) || | ||
| 1570 | xen_feature(XENFEAT_auto_translated_physmap) || | ||
| 1571 | xen_start_info->mfn_list >= __START_KERNEL_map) | ||
| 1572 | return pte; | ||
| 1573 | |||
| 1574 | /* | ||
| 1575 | * Pages belonging to the initial p2m list mapped outside the default | ||
| 1576 | * address range must be mapped read-only. This region contains the | ||
| 1577 | * page tables for mapping the p2m list, too, and page tables MUST be | ||
| 1578 | * mapped read-only. | ||
| 1579 | */ | ||
| 1580 | pfn = pte_pfn(pte); | ||
| 1581 | if (pfn >= xen_start_info->first_p2m_pfn && | ||
| 1582 | pfn < xen_start_info->first_p2m_pfn + xen_start_info->nr_p2m_frames) | ||
| 1583 | pte = __pte_ma(pte_val_ma(pte) & ~_PAGE_RW); | ||
| 1584 | |||
| 1585 | return pte; | ||
| 1586 | } | ||
| 1587 | #endif /* CONFIG_X86_64 */ | ||
| 1588 | |||
| 1589 | /* | 1554 | /* |
| 1590 | * Init-time set_pte while constructing initial pagetables, which | 1555 | * Init-time set_pte while constructing initial pagetables, which |
| 1591 | * doesn't allow RO page table pages to be remapped RW. | 1556 | * doesn't allow RO page table pages to be remapped RW. |
| @@ -1600,13 +1565,37 @@ static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte) | |||
| 1600 | * so always write the PTE directly and rely on Xen trapping and | 1565 | * so always write the PTE directly and rely on Xen trapping and |
| 1601 | * emulating any updates as necessary. | 1566 | * emulating any updates as necessary. |
| 1602 | */ | 1567 | */ |
| 1603 | static void __init xen_set_pte_init(pte_t *ptep, pte_t pte) | 1568 | __visible pte_t xen_make_pte_init(pteval_t pte) |
| 1604 | { | 1569 | { |
| 1605 | if (pte_mfn(pte) != INVALID_P2M_ENTRY) | 1570 | #ifdef CONFIG_X86_64 |
| 1606 | pte = mask_rw_pte(ptep, pte); | 1571 | unsigned long pfn; |
| 1607 | else | 1572 | |
| 1608 | pte = __pte_ma(0); | 1573 | /* |
| 1574 | * Pages belonging to the initial p2m list mapped outside the default | ||
| 1575 | * address range must be mapped read-only. This region contains the | ||
| 1576 | * page tables for mapping the p2m list, too, and page tables MUST be | ||
| 1577 | * mapped read-only. | ||
| 1578 | */ | ||
| 1579 | pfn = (pte & PTE_PFN_MASK) >> PAGE_SHIFT; | ||
| 1580 | if (xen_start_info->mfn_list < __START_KERNEL_map && | ||
| 1581 | pfn >= xen_start_info->first_p2m_pfn && | ||
| 1582 | pfn < xen_start_info->first_p2m_pfn + xen_start_info->nr_p2m_frames) | ||
| 1583 | pte &= ~_PAGE_RW; | ||
| 1584 | #endif | ||
| 1585 | pte = pte_pfn_to_mfn(pte); | ||
| 1586 | return native_make_pte(pte); | ||
| 1587 | } | ||
| 1588 | PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_init); | ||
| 1609 | 1589 | ||
| 1590 | static void __init xen_set_pte_init(pte_t *ptep, pte_t pte) | ||
| 1591 | { | ||
| 1592 | #ifdef CONFIG_X86_32 | ||
| 1593 | /* If there's an existing pte, then don't allow _PAGE_RW to be set */ | ||
| 1594 | if (pte_mfn(pte) != INVALID_P2M_ENTRY | ||
| 1595 | && pte_val_ma(*ptep) & _PAGE_PRESENT) | ||
| 1596 | pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) & | ||
| 1597 | pte_val_ma(pte)); | ||
| 1598 | #endif | ||
| 1610 | native_set_pte(ptep, pte); | 1599 | native_set_pte(ptep, pte); |
| 1611 | } | 1600 | } |
| 1612 | 1601 | ||
| @@ -2407,6 +2396,7 @@ static void __init xen_post_allocator_init(void) | |||
| 2407 | pv_mmu_ops.alloc_pud = xen_alloc_pud; | 2396 | pv_mmu_ops.alloc_pud = xen_alloc_pud; |
| 2408 | pv_mmu_ops.release_pud = xen_release_pud; | 2397 | pv_mmu_ops.release_pud = xen_release_pud; |
| 2409 | #endif | 2398 | #endif |
| 2399 | pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte); | ||
| 2410 | 2400 | ||
| 2411 | #ifdef CONFIG_X86_64 | 2401 | #ifdef CONFIG_X86_64 |
| 2412 | pv_mmu_ops.write_cr3 = &xen_write_cr3; | 2402 | pv_mmu_ops.write_cr3 = &xen_write_cr3; |
| @@ -2455,7 +2445,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = { | |||
| 2455 | .pte_val = PV_CALLEE_SAVE(xen_pte_val), | 2445 | .pte_val = PV_CALLEE_SAVE(xen_pte_val), |
| 2456 | .pgd_val = PV_CALLEE_SAVE(xen_pgd_val), | 2446 | .pgd_val = PV_CALLEE_SAVE(xen_pgd_val), |
| 2457 | 2447 | ||
| 2458 | .make_pte = PV_CALLEE_SAVE(xen_make_pte), | 2448 | .make_pte = PV_CALLEE_SAVE(xen_make_pte_init), |
| 2459 | .make_pgd = PV_CALLEE_SAVE(xen_make_pgd), | 2449 | .make_pgd = PV_CALLEE_SAVE(xen_make_pgd), |
| 2460 | 2450 | ||
| 2461 | #ifdef CONFIG_X86_PAE | 2451 | #ifdef CONFIG_X86_PAE |
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index cab9f766bb06..dd2a49a8aacc 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c | |||
| @@ -182,7 +182,7 @@ static void * __ref alloc_p2m_page(void) | |||
| 182 | if (unlikely(!slab_is_available())) | 182 | if (unlikely(!slab_is_available())) |
| 183 | return alloc_bootmem_align(PAGE_SIZE, PAGE_SIZE); | 183 | return alloc_bootmem_align(PAGE_SIZE, PAGE_SIZE); |
| 184 | 184 | ||
| 185 | return (void *)__get_free_page(GFP_KERNEL | __GFP_REPEAT); | 185 | return (void *)__get_free_page(GFP_KERNEL); |
| 186 | } | 186 | } |
| 187 | 187 | ||
| 188 | static void __ref free_p2m_page(void *p) | 188 | static void __ref free_p2m_page(void *p) |
diff --git a/arch/xtensa/include/asm/pgalloc.h b/arch/xtensa/include/asm/pgalloc.h index d38eb9237e64..1065bc8bcae5 100644 --- a/arch/xtensa/include/asm/pgalloc.h +++ b/arch/xtensa/include/asm/pgalloc.h | |||
| @@ -44,7 +44,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, | |||
| 44 | pte_t *ptep; | 44 | pte_t *ptep; |
| 45 | int i; | 45 | int i; |
| 46 | 46 | ||
| 47 | ptep = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); | 47 | ptep = (pte_t *)__get_free_page(GFP_KERNEL); |
| 48 | if (!ptep) | 48 | if (!ptep) |
| 49 | return NULL; | 49 | return NULL; |
| 50 | for (i = 0; i < 1024; i++) | 50 | for (i = 0; i < 1024; i++) |
diff --git a/block/blk-lib.c b/block/blk-lib.c index 23d7f301a196..9e29dc351695 100644 --- a/block/blk-lib.c +++ b/block/blk-lib.c | |||
| @@ -113,6 +113,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, | |||
| 113 | ret = submit_bio_wait(type, bio); | 113 | ret = submit_bio_wait(type, bio); |
| 114 | if (ret == -EOPNOTSUPP) | 114 | if (ret == -EOPNOTSUPP) |
| 115 | ret = 0; | 115 | ret = 0; |
| 116 | bio_put(bio); | ||
| 116 | } | 117 | } |
| 117 | blk_finish_plug(&plug); | 118 | blk_finish_plug(&plug); |
| 118 | 119 | ||
| @@ -165,8 +166,10 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, | |||
| 165 | } | 166 | } |
| 166 | } | 167 | } |
| 167 | 168 | ||
| 168 | if (bio) | 169 | if (bio) { |
| 169 | ret = submit_bio_wait(REQ_WRITE | REQ_WRITE_SAME, bio); | 170 | ret = submit_bio_wait(REQ_WRITE | REQ_WRITE_SAME, bio); |
| 171 | bio_put(bio); | ||
| 172 | } | ||
| 170 | return ret != -EOPNOTSUPP ? ret : 0; | 173 | return ret != -EOPNOTSUPP ? ret : 0; |
| 171 | } | 174 | } |
| 172 | EXPORT_SYMBOL(blkdev_issue_write_same); | 175 | EXPORT_SYMBOL(blkdev_issue_write_same); |
| @@ -206,8 +209,11 @@ static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, | |||
| 206 | } | 209 | } |
| 207 | } | 210 | } |
| 208 | 211 | ||
| 209 | if (bio) | 212 | if (bio) { |
| 210 | return submit_bio_wait(WRITE, bio); | 213 | ret = submit_bio_wait(WRITE, bio); |
| 214 | bio_put(bio); | ||
| 215 | return ret; | ||
| 216 | } | ||
| 211 | return 0; | 217 | return 0; |
| 212 | } | 218 | } |
| 213 | 219 | ||
diff --git a/block/blk-mq.c b/block/blk-mq.c index 29cbc1b5fbdb..f9b9049b1284 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
| @@ -1262,12 +1262,9 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) | |||
| 1262 | 1262 | ||
| 1263 | blk_queue_split(q, &bio, q->bio_split); | 1263 | blk_queue_split(q, &bio, q->bio_split); |
| 1264 | 1264 | ||
| 1265 | if (!is_flush_fua && !blk_queue_nomerges(q)) { | 1265 | if (!is_flush_fua && !blk_queue_nomerges(q) && |
| 1266 | if (blk_attempt_plug_merge(q, bio, &request_count, | 1266 | blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq)) |
| 1267 | &same_queue_rq)) | 1267 | return BLK_QC_T_NONE; |
| 1268 | return BLK_QC_T_NONE; | ||
| 1269 | } else | ||
| 1270 | request_count = blk_plug_queued_count(q); | ||
| 1271 | 1268 | ||
| 1272 | rq = blk_mq_map_request(q, bio, &data); | 1269 | rq = blk_mq_map_request(q, bio, &data); |
| 1273 | if (unlikely(!rq)) | 1270 | if (unlikely(!rq)) |
| @@ -1358,9 +1355,11 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio) | |||
| 1358 | 1355 | ||
| 1359 | blk_queue_split(q, &bio, q->bio_split); | 1356 | blk_queue_split(q, &bio, q->bio_split); |
| 1360 | 1357 | ||
| 1361 | if (!is_flush_fua && !blk_queue_nomerges(q) && | 1358 | if (!is_flush_fua && !blk_queue_nomerges(q)) { |
| 1362 | blk_attempt_plug_merge(q, bio, &request_count, NULL)) | 1359 | if (blk_attempt_plug_merge(q, bio, &request_count, NULL)) |
| 1363 | return BLK_QC_T_NONE; | 1360 | return BLK_QC_T_NONE; |
| 1361 | } else | ||
| 1362 | request_count = blk_plug_queued_count(q); | ||
| 1364 | 1363 | ||
| 1365 | rq = blk_mq_map_request(q, bio, &data); | 1364 | rq = blk_mq_map_request(q, bio, &data); |
| 1366 | if (unlikely(!rq)) | 1365 | if (unlikely(!rq)) |
diff --git a/crypto/asymmetric_keys/Kconfig b/crypto/asymmetric_keys/Kconfig index e28e912000a7..331f6baf2df8 100644 --- a/crypto/asymmetric_keys/Kconfig +++ b/crypto/asymmetric_keys/Kconfig | |||
| @@ -13,6 +13,7 @@ config ASYMMETRIC_PUBLIC_KEY_SUBTYPE | |||
| 13 | tristate "Asymmetric public-key crypto algorithm subtype" | 13 | tristate "Asymmetric public-key crypto algorithm subtype" |
| 14 | select MPILIB | 14 | select MPILIB |
| 15 | select CRYPTO_HASH_INFO | 15 | select CRYPTO_HASH_INFO |
| 16 | select CRYPTO_AKCIPHER | ||
| 16 | help | 17 | help |
| 17 | This option provides support for asymmetric public key type handling. | 18 | This option provides support for asymmetric public key type handling. |
| 18 | If signature generation and/or verification are to be used, | 19 | If signature generation and/or verification are to be used, |
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c index 0d92d0f915e9..c7ba948d253c 100644 --- a/drivers/acpi/acpi_processor.c +++ b/drivers/acpi/acpi_processor.c | |||
| @@ -331,15 +331,6 @@ static int acpi_processor_get_info(struct acpi_device *device) | |||
| 331 | pr->throttling.duty_width = acpi_gbl_FADT.duty_width; | 331 | pr->throttling.duty_width = acpi_gbl_FADT.duty_width; |
| 332 | 332 | ||
| 333 | pr->pblk = object.processor.pblk_address; | 333 | pr->pblk = object.processor.pblk_address; |
| 334 | |||
| 335 | /* | ||
| 336 | * We don't care about error returns - we just try to mark | ||
| 337 | * these reserved so that nobody else is confused into thinking | ||
| 338 | * that this region might be unused.. | ||
| 339 | * | ||
| 340 | * (In particular, allocating the IO range for Cardbus) | ||
| 341 | */ | ||
| 342 | request_region(pr->throttling.address, 6, "ACPI CPU throttle"); | ||
| 343 | } | 334 | } |
| 344 | 335 | ||
| 345 | /* | 336 | /* |
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c index 3d5b8a099351..c1d138e128cb 100644 --- a/drivers/acpi/acpi_video.c +++ b/drivers/acpi/acpi_video.c | |||
| @@ -754,7 +754,8 @@ static int acpi_video_bqc_quirk(struct acpi_video_device *device, | |||
| 754 | } | 754 | } |
| 755 | 755 | ||
| 756 | int acpi_video_get_levels(struct acpi_device *device, | 756 | int acpi_video_get_levels(struct acpi_device *device, |
| 757 | struct acpi_video_device_brightness **dev_br) | 757 | struct acpi_video_device_brightness **dev_br, |
| 758 | int *pmax_level) | ||
| 758 | { | 759 | { |
| 759 | union acpi_object *obj = NULL; | 760 | union acpi_object *obj = NULL; |
| 760 | int i, max_level = 0, count = 0, level_ac_battery = 0; | 761 | int i, max_level = 0, count = 0, level_ac_battery = 0; |
| @@ -841,6 +842,8 @@ int acpi_video_get_levels(struct acpi_device *device, | |||
| 841 | 842 | ||
| 842 | br->count = count; | 843 | br->count = count; |
| 843 | *dev_br = br; | 844 | *dev_br = br; |
| 845 | if (pmax_level) | ||
| 846 | *pmax_level = max_level; | ||
| 844 | 847 | ||
| 845 | out: | 848 | out: |
| 846 | kfree(obj); | 849 | kfree(obj); |
| @@ -869,7 +872,7 @@ acpi_video_init_brightness(struct acpi_video_device *device) | |||
| 869 | struct acpi_video_device_brightness *br = NULL; | 872 | struct acpi_video_device_brightness *br = NULL; |
| 870 | int result = -EINVAL; | 873 | int result = -EINVAL; |
| 871 | 874 | ||
| 872 | result = acpi_video_get_levels(device->dev, &br); | 875 | result = acpi_video_get_levels(device->dev, &br, &max_level); |
| 873 | if (result) | 876 | if (result) |
| 874 | return result; | 877 | return result; |
| 875 | device->brightness = br; | 878 | device->brightness = br; |
| @@ -1737,7 +1740,7 @@ static void acpi_video_run_bcl_for_osi(struct acpi_video_bus *video) | |||
| 1737 | 1740 | ||
| 1738 | mutex_lock(&video->device_list_lock); | 1741 | mutex_lock(&video->device_list_lock); |
| 1739 | list_for_each_entry(dev, &video->video_device_list, entry) { | 1742 | list_for_each_entry(dev, &video->video_device_list, entry) { |
| 1740 | if (!acpi_video_device_lcd_query_levels(dev, &levels)) | 1743 | if (!acpi_video_device_lcd_query_levels(dev->dev->handle, &levels)) |
| 1741 | kfree(levels); | 1744 | kfree(levels); |
| 1742 | } | 1745 | } |
| 1743 | mutex_unlock(&video->device_list_lock); | 1746 | mutex_unlock(&video->device_list_lock); |
diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c index a1d177d58254..21932d640a41 100644 --- a/drivers/acpi/acpica/exconfig.c +++ b/drivers/acpi/acpica/exconfig.c | |||
| @@ -108,7 +108,9 @@ acpi_ex_add_table(u32 table_index, | |||
| 108 | 108 | ||
| 109 | /* Add the table to the namespace */ | 109 | /* Add the table to the namespace */ |
| 110 | 110 | ||
| 111 | acpi_ex_exit_interpreter(); | ||
| 111 | status = acpi_ns_load_table(table_index, parent_node); | 112 | status = acpi_ns_load_table(table_index, parent_node); |
| 113 | acpi_ex_enter_interpreter(); | ||
| 112 | if (ACPI_FAILURE(status)) { | 114 | if (ACPI_FAILURE(status)) { |
| 113 | acpi_ut_remove_reference(obj_desc); | 115 | acpi_ut_remove_reference(obj_desc); |
| 114 | *ddb_handle = NULL; | 116 | *ddb_handle = NULL; |
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c index 0f18dbc9a37f..3b7fb99362b6 100644 --- a/drivers/acpi/acpica/hwregs.c +++ b/drivers/acpi/acpica/hwregs.c | |||
| @@ -83,27 +83,22 @@ acpi_hw_write_multiple(u32 value, | |||
| 83 | static u8 | 83 | static u8 |
| 84 | acpi_hw_get_access_bit_width(struct acpi_generic_address *reg, u8 max_bit_width) | 84 | acpi_hw_get_access_bit_width(struct acpi_generic_address *reg, u8 max_bit_width) |
| 85 | { | 85 | { |
| 86 | u64 address; | ||
| 87 | |||
| 88 | if (!reg->access_width) { | 86 | if (!reg->access_width) { |
| 87 | if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) { | ||
| 88 | max_bit_width = 32; | ||
| 89 | } | ||
| 90 | |||
| 89 | /* | 91 | /* |
| 90 | * Detect old register descriptors where only the bit_width field | 92 | * Detect old register descriptors where only the bit_width field |
| 91 | * makes senses. The target address is copied to handle possible | 93 | * makes senses. |
| 92 | * alignment issues. | ||
| 93 | */ | 94 | */ |
| 94 | ACPI_MOVE_64_TO_64(&address, ®->address); | 95 | if (reg->bit_width < max_bit_width && |
| 95 | if (!reg->bit_offset && reg->bit_width && | 96 | !reg->bit_offset && reg->bit_width && |
| 96 | ACPI_IS_POWER_OF_TWO(reg->bit_width) && | 97 | ACPI_IS_POWER_OF_TWO(reg->bit_width) && |
| 97 | ACPI_IS_ALIGNED(reg->bit_width, 8) && | 98 | ACPI_IS_ALIGNED(reg->bit_width, 8)) { |
| 98 | ACPI_IS_ALIGNED(address, reg->bit_width)) { | ||
| 99 | return (reg->bit_width); | 99 | return (reg->bit_width); |
| 100 | } else { | ||
| 101 | if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) { | ||
| 102 | return (32); | ||
| 103 | } else { | ||
| 104 | return (max_bit_width); | ||
| 105 | } | ||
| 106 | } | 100 | } |
| 101 | return (max_bit_width); | ||
| 107 | } else { | 102 | } else { |
| 108 | return (1 << (reg->access_width + 2)); | 103 | return (1 << (reg->access_width + 2)); |
| 109 | } | 104 | } |
| @@ -311,12 +306,6 @@ acpi_status acpi_hw_read(u32 *value, struct acpi_generic_address *reg) | |||
| 311 | acpi_status acpi_hw_write(u32 value, struct acpi_generic_address *reg) | 306 | acpi_status acpi_hw_write(u32 value, struct acpi_generic_address *reg) |
| 312 | { | 307 | { |
| 313 | u64 address; | 308 | u64 address; |
| 314 | u8 access_width; | ||
| 315 | u32 bit_width; | ||
| 316 | u8 bit_offset; | ||
| 317 | u64 value64; | ||
| 318 | u32 new_value32, old_value32; | ||
| 319 | u8 index; | ||
| 320 | acpi_status status; | 309 | acpi_status status; |
| 321 | 310 | ||
| 322 | ACPI_FUNCTION_NAME(hw_write); | 311 | ACPI_FUNCTION_NAME(hw_write); |
| @@ -328,145 +317,23 @@ acpi_status acpi_hw_write(u32 value, struct acpi_generic_address *reg) | |||
| 328 | return (status); | 317 | return (status); |
| 329 | } | 318 | } |
| 330 | 319 | ||
| 331 | /* Convert access_width into number of bits based */ | ||
| 332 | |||
| 333 | access_width = acpi_hw_get_access_bit_width(reg, 32); | ||
| 334 | bit_width = reg->bit_offset + reg->bit_width; | ||
| 335 | bit_offset = reg->bit_offset; | ||
| 336 | |||
| 337 | /* | 320 | /* |
| 338 | * Two address spaces supported: Memory or IO. PCI_Config is | 321 | * Two address spaces supported: Memory or IO. PCI_Config is |
| 339 | * not supported here because the GAS structure is insufficient | 322 | * not supported here because the GAS structure is insufficient |
| 340 | */ | 323 | */ |
| 341 | index = 0; | 324 | if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { |
| 342 | while (bit_width) { | 325 | status = acpi_os_write_memory((acpi_physical_address) |
| 343 | /* | 326 | address, (u64)value, |
| 344 | * Use offset style bit reads because "Index * AccessWidth" is | 327 | reg->bit_width); |
| 345 | * ensured to be less than 32-bits by acpi_hw_validate_register(). | 328 | } else { /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */ |
| 346 | */ | 329 | |
| 347 | new_value32 = ACPI_GET_BITS(&value, index * access_width, | 330 | status = acpi_hw_write_port((acpi_io_address) |
| 348 | ACPI_MASK_BITS_ABOVE_32 | 331 | address, value, reg->bit_width); |
| 349 | (access_width)); | ||
| 350 | |||
| 351 | if (bit_offset >= access_width) { | ||
| 352 | bit_offset -= access_width; | ||
| 353 | } else { | ||
| 354 | /* | ||
| 355 | * Use offset style bit masks because access_width is ensured | ||
| 356 | * to be less than 32-bits by acpi_hw_validate_register() and | ||
| 357 | * bit_offset/bit_width is less than access_width here. | ||
| 358 | */ | ||
| 359 | if (bit_offset) { | ||
| 360 | new_value32 &= ACPI_MASK_BITS_BELOW(bit_offset); | ||
| 361 | } | ||
| 362 | if (bit_width < access_width) { | ||
| 363 | new_value32 &= ACPI_MASK_BITS_ABOVE(bit_width); | ||
| 364 | } | ||
| 365 | |||
| 366 | if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { | ||
| 367 | if (bit_offset || bit_width < access_width) { | ||
| 368 | /* | ||
| 369 | * Read old values in order not to modify the bits that | ||
| 370 | * are beyond the register bit_width/bit_offset setting. | ||
| 371 | */ | ||
| 372 | status = | ||
| 373 | acpi_os_read_memory((acpi_physical_address) | ||
| 374 | address + | ||
| 375 | index * | ||
| 376 | ACPI_DIV_8 | ||
| 377 | (access_width), | ||
| 378 | &value64, | ||
| 379 | access_width); | ||
| 380 | old_value32 = (u32)value64; | ||
| 381 | |||
| 382 | /* | ||
| 383 | * Use offset style bit masks because access_width is | ||
| 384 | * ensured to be less than 32-bits by | ||
| 385 | * acpi_hw_validate_register() and bit_offset/bit_width is | ||
| 386 | * less than access_width here. | ||
| 387 | */ | ||
| 388 | if (bit_offset) { | ||
| 389 | old_value32 &= | ||
| 390 | ACPI_MASK_BITS_ABOVE | ||
| 391 | (bit_offset); | ||
| 392 | bit_offset = 0; | ||
| 393 | } | ||
| 394 | if (bit_width < access_width) { | ||
| 395 | old_value32 &= | ||
| 396 | ACPI_MASK_BITS_BELOW | ||
| 397 | (bit_width); | ||
| 398 | } | ||
| 399 | |||
| 400 | new_value32 |= old_value32; | ||
| 401 | } | ||
| 402 | |||
| 403 | value64 = (u64)new_value32; | ||
| 404 | status = | ||
| 405 | acpi_os_write_memory((acpi_physical_address) | ||
| 406 | address + | ||
| 407 | index * | ||
| 408 | ACPI_DIV_8 | ||
| 409 | (access_width), | ||
| 410 | value64, access_width); | ||
| 411 | } else { /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */ | ||
| 412 | |||
| 413 | if (bit_offset || bit_width < access_width) { | ||
| 414 | /* | ||
| 415 | * Read old values in order not to modify the bits that | ||
| 416 | * are beyond the register bit_width/bit_offset setting. | ||
| 417 | */ | ||
| 418 | status = | ||
| 419 | acpi_hw_read_port((acpi_io_address) | ||
| 420 | address + | ||
| 421 | index * | ||
| 422 | ACPI_DIV_8 | ||
| 423 | (access_width), | ||
| 424 | &old_value32, | ||
| 425 | access_width); | ||
| 426 | |||
| 427 | /* | ||
| 428 | * Use offset style bit masks because access_width is | ||
| 429 | * ensured to be less than 32-bits by | ||
| 430 | * acpi_hw_validate_register() and bit_offset/bit_width is | ||
| 431 | * less than access_width here. | ||
| 432 | */ | ||
| 433 | if (bit_offset) { | ||
| 434 | old_value32 &= | ||
| 435 | ACPI_MASK_BITS_ABOVE | ||
| 436 | (bit_offset); | ||
| 437 | bit_offset = 0; | ||
| 438 | } | ||
| 439 | if (bit_width < access_width) { | ||
| 440 | old_value32 &= | ||
| 441 | ACPI_MASK_BITS_BELOW | ||
| 442 | (bit_width); | ||
| 443 | } | ||
| 444 | |||
| 445 | new_value32 |= old_value32; | ||
| 446 | } | ||
| 447 | |||
| 448 | status = acpi_hw_write_port((acpi_io_address) | ||
| 449 | address + | ||
| 450 | index * | ||
| 451 | ACPI_DIV_8 | ||
| 452 | (access_width), | ||
| 453 | new_value32, | ||
| 454 | access_width); | ||
| 455 | } | ||
| 456 | } | ||
| 457 | |||
| 458 | /* | ||
| 459 | * Index * access_width is ensured to be less than 32-bits by | ||
| 460 | * acpi_hw_validate_register(). | ||
| 461 | */ | ||
| 462 | bit_width -= | ||
| 463 | bit_width > access_width ? access_width : bit_width; | ||
| 464 | index++; | ||
| 465 | } | 332 | } |
| 466 | 333 | ||
| 467 | ACPI_DEBUG_PRINT((ACPI_DB_IO, | 334 | ACPI_DEBUG_PRINT((ACPI_DB_IO, |
| 468 | "Wrote: %8.8X width %2d to %8.8X%8.8X (%s)\n", | 335 | "Wrote: %8.8X width %2d to %8.8X%8.8X (%s)\n", |
| 469 | value, access_width, ACPI_FORMAT_UINT64(address), | 336 | value, reg->bit_width, ACPI_FORMAT_UINT64(address), |
| 470 | acpi_ut_get_region_name(reg->space_id))); | 337 | acpi_ut_get_region_name(reg->space_id))); |
| 471 | 338 | ||
| 472 | return (status); | 339 | return (status); |
diff --git a/drivers/acpi/acpica/nsparse.c b/drivers/acpi/acpica/nsparse.c index f631a47724f0..1783cd7e1446 100644 --- a/drivers/acpi/acpica/nsparse.c +++ b/drivers/acpi/acpica/nsparse.c | |||
| @@ -47,6 +47,7 @@ | |||
| 47 | #include "acparser.h" | 47 | #include "acparser.h" |
| 48 | #include "acdispat.h" | 48 | #include "acdispat.h" |
| 49 | #include "actables.h" | 49 | #include "actables.h" |
| 50 | #include "acinterp.h" | ||
| 50 | 51 | ||
| 51 | #define _COMPONENT ACPI_NAMESPACE | 52 | #define _COMPONENT ACPI_NAMESPACE |
| 52 | ACPI_MODULE_NAME("nsparse") | 53 | ACPI_MODULE_NAME("nsparse") |
| @@ -170,6 +171,8 @@ acpi_ns_parse_table(u32 table_index, struct acpi_namespace_node *start_node) | |||
| 170 | 171 | ||
| 171 | ACPI_FUNCTION_TRACE(ns_parse_table); | 172 | ACPI_FUNCTION_TRACE(ns_parse_table); |
| 172 | 173 | ||
| 174 | acpi_ex_enter_interpreter(); | ||
| 175 | |||
| 173 | /* | 176 | /* |
| 174 | * AML Parse, pass 1 | 177 | * AML Parse, pass 1 |
| 175 | * | 178 | * |
| @@ -185,7 +188,7 @@ acpi_ns_parse_table(u32 table_index, struct acpi_namespace_node *start_node) | |||
| 185 | status = acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS1, | 188 | status = acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS1, |
| 186 | table_index, start_node); | 189 | table_index, start_node); |
| 187 | if (ACPI_FAILURE(status)) { | 190 | if (ACPI_FAILURE(status)) { |
| 188 | return_ACPI_STATUS(status); | 191 | goto error_exit; |
| 189 | } | 192 | } |
| 190 | 193 | ||
| 191 | /* | 194 | /* |
| @@ -201,8 +204,10 @@ acpi_ns_parse_table(u32 table_index, struct acpi_namespace_node *start_node) | |||
| 201 | status = acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS2, | 204 | status = acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS2, |
| 202 | table_index, start_node); | 205 | table_index, start_node); |
| 203 | if (ACPI_FAILURE(status)) { | 206 | if (ACPI_FAILURE(status)) { |
| 204 | return_ACPI_STATUS(status); | 207 | goto error_exit; |
| 205 | } | 208 | } |
| 206 | 209 | ||
| 210 | error_exit: | ||
| 211 | acpi_ex_exit_interpreter(); | ||
| 207 | return_ACPI_STATUS(status); | 212 | return_ACPI_STATUS(status); |
| 208 | } | 213 | } |
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 31e8da648fff..262ca31b86d9 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c | |||
| @@ -1051,7 +1051,7 @@ static int __init acpi_bus_init(void) | |||
| 1051 | * Maybe EC region is required at bus_scan/acpi_get_devices. So it | 1051 | * Maybe EC region is required at bus_scan/acpi_get_devices. So it |
| 1052 | * is necessary to enable it as early as possible. | 1052 | * is necessary to enable it as early as possible. |
| 1053 | */ | 1053 | */ |
| 1054 | acpi_boot_ec_enable(); | 1054 | acpi_ec_dsdt_probe(); |
| 1055 | 1055 | ||
| 1056 | printk(KERN_INFO PREFIX "Interpreter enabled\n"); | 1056 | printk(KERN_INFO PREFIX "Interpreter enabled\n"); |
| 1057 | 1057 | ||
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 0e70181f150c..73c76d646064 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c | |||
| @@ -1446,10 +1446,30 @@ ec_parse_io_ports(struct acpi_resource *resource, void *context) | |||
| 1446 | return AE_OK; | 1446 | return AE_OK; |
| 1447 | } | 1447 | } |
| 1448 | 1448 | ||
| 1449 | int __init acpi_boot_ec_enable(void) | 1449 | static const struct acpi_device_id ec_device_ids[] = { |
| 1450 | {"PNP0C09", 0}, | ||
| 1451 | {"", 0}, | ||
| 1452 | }; | ||
| 1453 | |||
| 1454 | int __init acpi_ec_dsdt_probe(void) | ||
| 1450 | { | 1455 | { |
| 1451 | if (!boot_ec) | 1456 | acpi_status status; |
| 1457 | |||
| 1458 | if (boot_ec) | ||
| 1452 | return 0; | 1459 | return 0; |
| 1460 | |||
| 1461 | /* | ||
| 1462 | * Finding EC from DSDT if there is no ECDT EC available. When this | ||
| 1463 | * function is invoked, ACPI tables have been fully loaded, we can | ||
| 1464 | * walk namespace now. | ||
| 1465 | */ | ||
| 1466 | boot_ec = make_acpi_ec(); | ||
| 1467 | if (!boot_ec) | ||
| 1468 | return -ENOMEM; | ||
| 1469 | status = acpi_get_devices(ec_device_ids[0].id, | ||
| 1470 | ec_parse_device, boot_ec, NULL); | ||
| 1471 | if (ACPI_FAILURE(status) || !boot_ec->handle) | ||
| 1472 | return -ENODEV; | ||
| 1453 | if (!ec_install_handlers(boot_ec)) { | 1473 | if (!ec_install_handlers(boot_ec)) { |
| 1454 | first_ec = boot_ec; | 1474 | first_ec = boot_ec; |
| 1455 | return 0; | 1475 | return 0; |
| @@ -1457,11 +1477,6 @@ int __init acpi_boot_ec_enable(void) | |||
| 1457 | return -EFAULT; | 1477 | return -EFAULT; |
| 1458 | } | 1478 | } |
| 1459 | 1479 | ||
| 1460 | static const struct acpi_device_id ec_device_ids[] = { | ||
| 1461 | {"PNP0C09", 0}, | ||
| 1462 | {"", 0}, | ||
| 1463 | }; | ||
| 1464 | |||
| 1465 | #if 0 | 1480 | #if 0 |
| 1466 | /* | 1481 | /* |
| 1467 | * Some EC firmware variations refuses to respond QR_EC when SCI_EVT is not | 1482 | * Some EC firmware variations refuses to respond QR_EC when SCI_EVT is not |
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index 9bb0773d39bf..27cc7feabfe4 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h | |||
| @@ -181,7 +181,7 @@ typedef int (*acpi_ec_query_func) (void *data); | |||
| 181 | 181 | ||
| 182 | int acpi_ec_init(void); | 182 | int acpi_ec_init(void); |
| 183 | int acpi_ec_ecdt_probe(void); | 183 | int acpi_ec_ecdt_probe(void); |
| 184 | int acpi_boot_ec_enable(void); | 184 | int acpi_ec_dsdt_probe(void); |
| 185 | void acpi_ec_block_transactions(void); | 185 | void acpi_ec_block_transactions(void); |
| 186 | void acpi_ec_unblock_transactions(void); | 186 | void acpi_ec_unblock_transactions(void); |
| 187 | void acpi_ec_unblock_transactions_early(void); | 187 | void acpi_ec_unblock_transactions_early(void); |
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c index f170d746336d..c72e64893d03 100644 --- a/drivers/acpi/processor_throttling.c +++ b/drivers/acpi/processor_throttling.c | |||
| @@ -676,6 +676,15 @@ static int acpi_processor_get_throttling_fadt(struct acpi_processor *pr) | |||
| 676 | if (!pr->flags.throttling) | 676 | if (!pr->flags.throttling) |
| 677 | return -ENODEV; | 677 | return -ENODEV; |
| 678 | 678 | ||
| 679 | /* | ||
| 680 | * We don't care about error returns - we just try to mark | ||
| 681 | * these reserved so that nobody else is confused into thinking | ||
| 682 | * that this region might be unused.. | ||
| 683 | * | ||
| 684 | * (In particular, allocating the IO range for Cardbus) | ||
| 685 | */ | ||
| 686 | request_region(pr->throttling.address, 6, "ACPI CPU throttle"); | ||
| 687 | |||
| 679 | pr->throttling.state = 0; | 688 | pr->throttling.state = 0; |
| 680 | 689 | ||
| 681 | duty_mask = pr->throttling.state_count - 1; | 690 | duty_mask = pr->throttling.state_count - 1; |
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 61dc7a99e89a..c6f017458958 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c | |||
| @@ -606,7 +606,7 @@ void ata_scsi_error(struct Scsi_Host *host) | |||
| 606 | ata_scsi_port_error_handler(host, ap); | 606 | ata_scsi_port_error_handler(host, ap); |
| 607 | 607 | ||
| 608 | /* finish or retry handled scmd's and clean up */ | 608 | /* finish or retry handled scmd's and clean up */ |
| 609 | WARN_ON(host->host_failed || !list_empty(&eh_work_q)); | 609 | WARN_ON(!list_empty(&eh_work_q)); |
| 610 | 610 | ||
| 611 | DPRINTK("EXIT\n"); | 611 | DPRINTK("EXIT\n"); |
| 612 | } | 612 | } |
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c index a969a7e443be..85aaf2222587 100644 --- a/drivers/atm/firestream.c +++ b/drivers/atm/firestream.c | |||
| @@ -181,13 +181,17 @@ static char *res_strings[] = { | |||
| 181 | "reserved 27", | 181 | "reserved 27", |
| 182 | "reserved 28", | 182 | "reserved 28", |
| 183 | "reserved 29", | 183 | "reserved 29", |
| 184 | "reserved 30", | 184 | "reserved 30", /* FIXME: The strings between 30-40 might be wrong. */ |
| 185 | "reassembly abort: no buffers", | 185 | "reassembly abort: no buffers", |
| 186 | "receive buffer overflow", | 186 | "receive buffer overflow", |
| 187 | "change in GFC", | 187 | "change in GFC", |
| 188 | "receive buffer full", | 188 | "receive buffer full", |
| 189 | "low priority discard - no receive descriptor", | 189 | "low priority discard - no receive descriptor", |
| 190 | "low priority discard - missing end of packet", | 190 | "low priority discard - missing end of packet", |
| 191 | "reserved 37", | ||
| 192 | "reserved 38", | ||
| 193 | "reserved 39", | ||
| 194 | "reseverd 40", | ||
| 191 | "reserved 41", | 195 | "reserved 41", |
| 192 | "reserved 42", | 196 | "reserved 42", |
| 193 | "reserved 43", | 197 | "reserved 43", |
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c index 7d00f2994738..809dd1e02091 100644 --- a/drivers/atm/iphase.c +++ b/drivers/atm/iphase.c | |||
| @@ -1128,7 +1128,7 @@ static int rx_pkt(struct atm_dev *dev) | |||
| 1128 | /* make the ptr point to the corresponding buffer desc entry */ | 1128 | /* make the ptr point to the corresponding buffer desc entry */ |
| 1129 | buf_desc_ptr += desc; | 1129 | buf_desc_ptr += desc; |
| 1130 | if (!desc || (desc > iadev->num_rx_desc) || | 1130 | if (!desc || (desc > iadev->num_rx_desc) || |
| 1131 | ((buf_desc_ptr->vc_index & 0xffff) > iadev->num_vc)) { | 1131 | ((buf_desc_ptr->vc_index & 0xffff) >= iadev->num_vc)) { |
| 1132 | free_desc(dev, desc); | 1132 | free_desc(dev, desc); |
| 1133 | IF_ERR(printk("IA: bad descriptor desc = %d \n", desc);) | 1133 | IF_ERR(printk("IA: bad descriptor desc = %d \n", desc);) |
| 1134 | return -1; | 1134 | return -1; |
diff --git a/drivers/base/Makefile b/drivers/base/Makefile index 6b2a84e7f2be..2609ba20b396 100644 --- a/drivers/base/Makefile +++ b/drivers/base/Makefile | |||
| @@ -10,7 +10,7 @@ obj-$(CONFIG_DMA_CMA) += dma-contiguous.o | |||
| 10 | obj-y += power/ | 10 | obj-y += power/ |
| 11 | obj-$(CONFIG_HAS_DMA) += dma-mapping.o | 11 | obj-$(CONFIG_HAS_DMA) += dma-mapping.o |
| 12 | obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o | 12 | obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o |
| 13 | obj-$(CONFIG_ISA) += isa.o | 13 | obj-$(CONFIG_ISA_BUS_API) += isa.o |
| 14 | obj-$(CONFIG_FW_LOADER) += firmware_class.o | 14 | obj-$(CONFIG_FW_LOADER) += firmware_class.o |
| 15 | obj-$(CONFIG_NUMA) += node.o | 15 | obj-$(CONFIG_NUMA) += node.o |
| 16 | obj-$(CONFIG_MEMORY_HOTPLUG_SPARSE) += memory.o | 16 | obj-$(CONFIG_MEMORY_HOTPLUG_SPARSE) += memory.o |
diff --git a/drivers/base/isa.c b/drivers/base/isa.c index 91dba65d7264..cd6ccdcf9df0 100644 --- a/drivers/base/isa.c +++ b/drivers/base/isa.c | |||
| @@ -180,4 +180,4 @@ static int __init isa_bus_init(void) | |||
| 180 | return error; | 180 | return error; |
| 181 | } | 181 | } |
| 182 | 182 | ||
| 183 | device_initcall(isa_bus_init); | 183 | postcore_initcall(isa_bus_init); |
diff --git a/drivers/base/module.c b/drivers/base/module.c index db930d3ee312..2a215780eda2 100644 --- a/drivers/base/module.c +++ b/drivers/base/module.c | |||
| @@ -24,10 +24,12 @@ static char *make_driver_name(struct device_driver *drv) | |||
| 24 | 24 | ||
| 25 | static void module_create_drivers_dir(struct module_kobject *mk) | 25 | static void module_create_drivers_dir(struct module_kobject *mk) |
| 26 | { | 26 | { |
| 27 | if (!mk || mk->drivers_dir) | 27 | static DEFINE_MUTEX(drivers_dir_mutex); |
| 28 | return; | ||
| 29 | 28 | ||
| 30 | mk->drivers_dir = kobject_create_and_add("drivers", &mk->kobj); | 29 | mutex_lock(&drivers_dir_mutex); |
| 30 | if (mk && !mk->drivers_dir) | ||
| 31 | mk->drivers_dir = kobject_create_and_add("drivers", &mk->kobj); | ||
| 32 | mutex_unlock(&drivers_dir_mutex); | ||
| 31 | } | 33 | } |
| 32 | 34 | ||
| 33 | void module_add_driver(struct module *mod, struct device_driver *drv) | 35 | void module_add_driver(struct module *mod, struct device_driver *drv) |
diff --git a/drivers/base/power/opp/cpu.c b/drivers/base/power/opp/cpu.c index 83d6e7ba1a34..8c3434bdb26d 100644 --- a/drivers/base/power/opp/cpu.c +++ b/drivers/base/power/opp/cpu.c | |||
| @@ -211,7 +211,7 @@ int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, | |||
| 211 | } | 211 | } |
| 212 | 212 | ||
| 213 | /* Mark opp-table as multiple CPUs are sharing it now */ | 213 | /* Mark opp-table as multiple CPUs are sharing it now */ |
| 214 | opp_table->shared_opp = true; | 214 | opp_table->shared_opp = OPP_TABLE_ACCESS_SHARED; |
| 215 | } | 215 | } |
| 216 | unlock: | 216 | unlock: |
| 217 | mutex_unlock(&opp_table_lock); | 217 | mutex_unlock(&opp_table_lock); |
| @@ -227,7 +227,8 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_set_sharing_cpus); | |||
| 227 | * | 227 | * |
| 228 | * This updates the @cpumask with CPUs that are sharing OPPs with @cpu_dev. | 228 | * This updates the @cpumask with CPUs that are sharing OPPs with @cpu_dev. |
| 229 | * | 229 | * |
| 230 | * Returns -ENODEV if OPP table isn't already present. | 230 | * Returns -ENODEV if OPP table isn't already present and -EINVAL if the OPP |
| 231 | * table's status is access-unknown. | ||
| 231 | * | 232 | * |
| 232 | * Locking: The internal opp_table and opp structures are RCU protected. | 233 | * Locking: The internal opp_table and opp structures are RCU protected. |
| 233 | * Hence this function internally uses RCU updater strategy with mutex locks | 234 | * Hence this function internally uses RCU updater strategy with mutex locks |
| @@ -249,9 +250,14 @@ int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask) | |||
| 249 | goto unlock; | 250 | goto unlock; |
| 250 | } | 251 | } |
| 251 | 252 | ||
| 253 | if (opp_table->shared_opp == OPP_TABLE_ACCESS_UNKNOWN) { | ||
| 254 | ret = -EINVAL; | ||
| 255 | goto unlock; | ||
| 256 | } | ||
| 257 | |||
| 252 | cpumask_clear(cpumask); | 258 | cpumask_clear(cpumask); |
| 253 | 259 | ||
| 254 | if (opp_table->shared_opp) { | 260 | if (opp_table->shared_opp == OPP_TABLE_ACCESS_SHARED) { |
| 255 | list_for_each_entry(opp_dev, &opp_table->dev_list, node) | 261 | list_for_each_entry(opp_dev, &opp_table->dev_list, node) |
| 256 | cpumask_set_cpu(opp_dev->dev->id, cpumask); | 262 | cpumask_set_cpu(opp_dev->dev->id, cpumask); |
| 257 | } else { | 263 | } else { |
diff --git a/drivers/base/power/opp/of.c b/drivers/base/power/opp/of.c index 94d2010558e3..1dfd3dd92624 100644 --- a/drivers/base/power/opp/of.c +++ b/drivers/base/power/opp/of.c | |||
| @@ -34,7 +34,10 @@ static struct opp_table *_managed_opp(const struct device_node *np) | |||
| 34 | * But the OPPs will be considered as shared only if the | 34 | * But the OPPs will be considered as shared only if the |
| 35 | * OPP table contains a "opp-shared" property. | 35 | * OPP table contains a "opp-shared" property. |
| 36 | */ | 36 | */ |
| 37 | return opp_table->shared_opp ? opp_table : NULL; | 37 | if (opp_table->shared_opp == OPP_TABLE_ACCESS_SHARED) |
| 38 | return opp_table; | ||
| 39 | |||
| 40 | return NULL; | ||
| 38 | } | 41 | } |
| 39 | } | 42 | } |
| 40 | 43 | ||
| @@ -353,7 +356,10 @@ static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np) | |||
| 353 | } | 356 | } |
| 354 | 357 | ||
| 355 | opp_table->np = opp_np; | 358 | opp_table->np = opp_np; |
| 356 | opp_table->shared_opp = of_property_read_bool(opp_np, "opp-shared"); | 359 | if (of_property_read_bool(opp_np, "opp-shared")) |
| 360 | opp_table->shared_opp = OPP_TABLE_ACCESS_SHARED; | ||
| 361 | else | ||
| 362 | opp_table->shared_opp = OPP_TABLE_ACCESS_EXCLUSIVE; | ||
| 357 | 363 | ||
| 358 | mutex_unlock(&opp_table_lock); | 364 | mutex_unlock(&opp_table_lock); |
| 359 | 365 | ||
diff --git a/drivers/base/power/opp/opp.h b/drivers/base/power/opp/opp.h index 20f3be22e060..fabd5ca1a083 100644 --- a/drivers/base/power/opp/opp.h +++ b/drivers/base/power/opp/opp.h | |||
| @@ -119,6 +119,12 @@ struct opp_device { | |||
| 119 | #endif | 119 | #endif |
| 120 | }; | 120 | }; |
| 121 | 121 | ||
| 122 | enum opp_table_access { | ||
| 123 | OPP_TABLE_ACCESS_UNKNOWN = 0, | ||
| 124 | OPP_TABLE_ACCESS_EXCLUSIVE = 1, | ||
| 125 | OPP_TABLE_ACCESS_SHARED = 2, | ||
| 126 | }; | ||
| 127 | |||
| 122 | /** | 128 | /** |
| 123 | * struct opp_table - Device opp structure | 129 | * struct opp_table - Device opp structure |
| 124 | * @node: table node - contains the devices with OPPs that | 130 | * @node: table node - contains the devices with OPPs that |
| @@ -166,7 +172,7 @@ struct opp_table { | |||
| 166 | /* For backward compatibility with v1 bindings */ | 172 | /* For backward compatibility with v1 bindings */ |
| 167 | unsigned int voltage_tolerance_v1; | 173 | unsigned int voltage_tolerance_v1; |
| 168 | 174 | ||
| 169 | bool shared_opp; | 175 | enum opp_table_access shared_opp; |
| 170 | struct dev_pm_opp *suspend_opp; | 176 | struct dev_pm_opp *suspend_opp; |
| 171 | 177 | ||
| 172 | unsigned int *supported_hw; | 178 | unsigned int *supported_hw; |
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index d597e432e195..ab19adb07a12 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c | |||
| @@ -1750,7 +1750,7 @@ aoecmd_init(void) | |||
| 1750 | int ret; | 1750 | int ret; |
| 1751 | 1751 | ||
| 1752 | /* get_zeroed_page returns page with ref count 1 */ | 1752 | /* get_zeroed_page returns page with ref count 1 */ |
| 1753 | p = (void *) get_zeroed_page(GFP_KERNEL | __GFP_REPEAT); | 1753 | p = (void *) get_zeroed_page(GFP_KERNEL); |
| 1754 | if (!p) | 1754 | if (!p) |
| 1755 | return -ENOMEM; | 1755 | return -ENOMEM; |
| 1756 | empty_page = virt_to_page(p); | 1756 | empty_page = virt_to_page(p); |
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 31e73a7a40f2..6a48ed41963f 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c | |||
| @@ -941,7 +941,7 @@ static int nbd_dev_dbg_init(struct nbd_device *nbd) | |||
| 941 | debugfs_create_u64("size_bytes", 0444, dir, &nbd->bytesize); | 941 | debugfs_create_u64("size_bytes", 0444, dir, &nbd->bytesize); |
| 942 | debugfs_create_u32("timeout", 0444, dir, &nbd->xmit_timeout); | 942 | debugfs_create_u32("timeout", 0444, dir, &nbd->xmit_timeout); |
| 943 | debugfs_create_u32("blocksize", 0444, dir, &nbd->blksize); | 943 | debugfs_create_u32("blocksize", 0444, dir, &nbd->blksize); |
| 944 | debugfs_create_file("flags", 0444, dir, &nbd, &nbd_dbg_flags_ops); | 944 | debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_ops); |
| 945 | 945 | ||
| 946 | return 0; | 946 | return 0; |
| 947 | } | 947 | } |
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index ca13df854639..2e6d1e9c3345 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c | |||
| @@ -874,8 +874,12 @@ static int blkif_queue_rq(struct blk_mq_hw_ctx *hctx, | |||
| 874 | const struct blk_mq_queue_data *qd) | 874 | const struct blk_mq_queue_data *qd) |
| 875 | { | 875 | { |
| 876 | unsigned long flags; | 876 | unsigned long flags; |
| 877 | struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)hctx->driver_data; | 877 | int qid = hctx->queue_num; |
| 878 | struct blkfront_info *info = hctx->queue->queuedata; | ||
| 879 | struct blkfront_ring_info *rinfo = NULL; | ||
| 878 | 880 | ||
| 881 | BUG_ON(info->nr_rings <= qid); | ||
| 882 | rinfo = &info->rinfo[qid]; | ||
| 879 | blk_mq_start_request(qd->rq); | 883 | blk_mq_start_request(qd->rq); |
| 880 | spin_lock_irqsave(&rinfo->ring_lock, flags); | 884 | spin_lock_irqsave(&rinfo->ring_lock, flags); |
| 881 | if (RING_FULL(&rinfo->ring)) | 885 | if (RING_FULL(&rinfo->ring)) |
| @@ -901,20 +905,9 @@ out_busy: | |||
| 901 | return BLK_MQ_RQ_QUEUE_BUSY; | 905 | return BLK_MQ_RQ_QUEUE_BUSY; |
| 902 | } | 906 | } |
| 903 | 907 | ||
| 904 | static int blk_mq_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, | ||
| 905 | unsigned int index) | ||
| 906 | { | ||
| 907 | struct blkfront_info *info = (struct blkfront_info *)data; | ||
| 908 | |||
| 909 | BUG_ON(info->nr_rings <= index); | ||
| 910 | hctx->driver_data = &info->rinfo[index]; | ||
| 911 | return 0; | ||
| 912 | } | ||
| 913 | |||
| 914 | static struct blk_mq_ops blkfront_mq_ops = { | 908 | static struct blk_mq_ops blkfront_mq_ops = { |
| 915 | .queue_rq = blkif_queue_rq, | 909 | .queue_rq = blkif_queue_rq, |
| 916 | .map_queue = blk_mq_map_queue, | 910 | .map_queue = blk_mq_map_queue, |
| 917 | .init_hctx = blk_mq_init_hctx, | ||
| 918 | }; | 911 | }; |
| 919 | 912 | ||
| 920 | static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size, | 913 | static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size, |
| @@ -950,6 +943,7 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size, | |||
| 950 | return PTR_ERR(rq); | 943 | return PTR_ERR(rq); |
| 951 | } | 944 | } |
| 952 | 945 | ||
| 946 | rq->queuedata = info; | ||
| 953 | queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq); | 947 | queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq); |
| 954 | 948 | ||
| 955 | if (info->feature_discard) { | 949 | if (info->feature_discard) { |
| @@ -2149,6 +2143,8 @@ static int blkfront_resume(struct xenbus_device *dev) | |||
| 2149 | return err; | 2143 | return err; |
| 2150 | 2144 | ||
| 2151 | err = talk_to_blkback(dev, info); | 2145 | err = talk_to_blkback(dev, info); |
| 2146 | if (!err) | ||
| 2147 | blk_mq_update_nr_hw_queues(&info->tag_set, info->nr_rings); | ||
| 2152 | 2148 | ||
| 2153 | /* | 2149 | /* |
| 2154 | * We have to wait for the backend to switch to | 2150 | * We have to wait for the backend to switch to |
| @@ -2485,10 +2481,23 @@ static void blkback_changed(struct xenbus_device *dev, | |||
| 2485 | break; | 2481 | break; |
| 2486 | 2482 | ||
| 2487 | case XenbusStateConnected: | 2483 | case XenbusStateConnected: |
| 2488 | if (dev->state != XenbusStateInitialised) { | 2484 | /* |
| 2485 | * talk_to_blkback sets state to XenbusStateInitialised | ||
| 2486 | * and blkfront_connect sets it to XenbusStateConnected | ||
| 2487 | * (if connection went OK). | ||
| 2488 | * | ||
| 2489 | * If the backend (or toolstack) decides to poke at backend | ||
| 2490 | * state (and re-trigger the watch by setting the state repeatedly | ||
| 2491 | * to XenbusStateConnected (4)) we need to deal with this. | ||
| 2492 | * This is allowed as this is used to communicate to the guest | ||
| 2493 | * that the size of disk has changed! | ||
| 2494 | */ | ||
| 2495 | if ((dev->state != XenbusStateInitialised) && | ||
| 2496 | (dev->state != XenbusStateConnected)) { | ||
| 2489 | if (talk_to_blkback(dev, info)) | 2497 | if (talk_to_blkback(dev, info)) |
| 2490 | break; | 2498 | break; |
| 2491 | } | 2499 | } |
| 2500 | |||
| 2492 | blkfront_connect(info); | 2501 | blkfront_connect(info); |
| 2493 | break; | 2502 | break; |
| 2494 | 2503 | ||
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index 94fb407d8561..44b1bd6baa38 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c | |||
| @@ -3820,6 +3820,7 @@ static void handle_new_recv_msgs(ipmi_smi_t intf) | |||
| 3820 | while (!list_empty(&intf->waiting_rcv_msgs)) { | 3820 | while (!list_empty(&intf->waiting_rcv_msgs)) { |
| 3821 | smi_msg = list_entry(intf->waiting_rcv_msgs.next, | 3821 | smi_msg = list_entry(intf->waiting_rcv_msgs.next, |
| 3822 | struct ipmi_smi_msg, link); | 3822 | struct ipmi_smi_msg, link); |
| 3823 | list_del(&smi_msg->link); | ||
| 3823 | if (!run_to_completion) | 3824 | if (!run_to_completion) |
| 3824 | spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, | 3825 | spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, |
| 3825 | flags); | 3826 | flags); |
| @@ -3829,11 +3830,14 @@ static void handle_new_recv_msgs(ipmi_smi_t intf) | |||
| 3829 | if (rv > 0) { | 3830 | if (rv > 0) { |
| 3830 | /* | 3831 | /* |
| 3831 | * To preserve message order, quit if we | 3832 | * To preserve message order, quit if we |
| 3832 | * can't handle a message. | 3833 | * can't handle a message. Add the message |
| 3834 | * back at the head, this is safe because this | ||
| 3835 | * tasklet is the only thing that pulls the | ||
| 3836 | * messages. | ||
| 3833 | */ | 3837 | */ |
| 3838 | list_add(&smi_msg->link, &intf->waiting_rcv_msgs); | ||
| 3834 | break; | 3839 | break; |
| 3835 | } else { | 3840 | } else { |
| 3836 | list_del(&smi_msg->link); | ||
| 3837 | if (rv == 0) | 3841 | if (rv == 0) |
| 3838 | /* Message handled */ | 3842 | /* Message handled */ |
| 3839 | ipmi_free_smi_msg(smi_msg); | 3843 | ipmi_free_smi_msg(smi_msg); |
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig index 53ddba26578c..98efbfcdb503 100644 --- a/drivers/clk/Kconfig +++ b/drivers/clk/Kconfig | |||
| @@ -175,6 +175,7 @@ config COMMON_CLK_KEYSTONE | |||
| 175 | config COMMON_CLK_NXP | 175 | config COMMON_CLK_NXP |
| 176 | def_bool COMMON_CLK && (ARCH_LPC18XX || ARCH_LPC32XX) | 176 | def_bool COMMON_CLK && (ARCH_LPC18XX || ARCH_LPC32XX) |
| 177 | select REGMAP_MMIO if ARCH_LPC32XX | 177 | select REGMAP_MMIO if ARCH_LPC32XX |
| 178 | select MFD_SYSCON if ARCH_LPC18XX | ||
| 178 | ---help--- | 179 | ---help--- |
| 179 | Support for clock providers on NXP platforms. | 180 | Support for clock providers on NXP platforms. |
| 180 | 181 | ||
diff --git a/drivers/clk/microchip/clk-pic32mzda.c b/drivers/clk/microchip/clk-pic32mzda.c index 020a29acc5b0..51f54380474b 100644 --- a/drivers/clk/microchip/clk-pic32mzda.c +++ b/drivers/clk/microchip/clk-pic32mzda.c | |||
| @@ -180,15 +180,15 @@ static int pic32mzda_clk_probe(struct platform_device *pdev) | |||
| 180 | 180 | ||
| 181 | /* register fixed rate clocks */ | 181 | /* register fixed rate clocks */ |
| 182 | clks[POSCCLK] = clk_register_fixed_rate(&pdev->dev, "posc_clk", NULL, | 182 | clks[POSCCLK] = clk_register_fixed_rate(&pdev->dev, "posc_clk", NULL, |
| 183 | CLK_IS_ROOT, 24000000); | 183 | 0, 24000000); |
| 184 | clks[FRCCLK] = clk_register_fixed_rate(&pdev->dev, "frc_clk", NULL, | 184 | clks[FRCCLK] = clk_register_fixed_rate(&pdev->dev, "frc_clk", NULL, |
| 185 | CLK_IS_ROOT, 8000000); | 185 | 0, 8000000); |
| 186 | clks[BFRCCLK] = clk_register_fixed_rate(&pdev->dev, "bfrc_clk", NULL, | 186 | clks[BFRCCLK] = clk_register_fixed_rate(&pdev->dev, "bfrc_clk", NULL, |
| 187 | CLK_IS_ROOT, 8000000); | 187 | 0, 8000000); |
| 188 | clks[LPRCCLK] = clk_register_fixed_rate(&pdev->dev, "lprc_clk", NULL, | 188 | clks[LPRCCLK] = clk_register_fixed_rate(&pdev->dev, "lprc_clk", NULL, |
| 189 | CLK_IS_ROOT, 32000); | 189 | 0, 32000); |
| 190 | clks[UPLLCLK] = clk_register_fixed_rate(&pdev->dev, "usbphy_clk", NULL, | 190 | clks[UPLLCLK] = clk_register_fixed_rate(&pdev->dev, "usbphy_clk", NULL, |
| 191 | CLK_IS_ROOT, 24000000); | 191 | 0, 24000000); |
| 192 | /* fixed rate (optional) clock */ | 192 | /* fixed rate (optional) clock */ |
| 193 | if (of_find_property(np, "microchip,pic32mzda-sosc", NULL)) { | 193 | if (of_find_property(np, "microchip,pic32mzda-sosc", NULL)) { |
| 194 | pr_info("pic32-clk: dt requests SOSC.\n"); | 194 | pr_info("pic32-clk: dt requests SOSC.\n"); |
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 36bc11a106aa..9009295f5134 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
| @@ -1832,7 +1832,7 @@ EXPORT_SYMBOL(cpufreq_unregister_notifier); | |||
| 1832 | unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy, | 1832 | unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy, |
| 1833 | unsigned int target_freq) | 1833 | unsigned int target_freq) |
| 1834 | { | 1834 | { |
| 1835 | clamp_val(target_freq, policy->min, policy->max); | 1835 | target_freq = clamp_val(target_freq, policy->min, policy->max); |
| 1836 | 1836 | ||
| 1837 | return cpufreq_driver->fast_switch(policy, target_freq); | 1837 | return cpufreq_driver->fast_switch(policy, target_freq); |
| 1838 | } | 1838 | } |
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 3a9c4325d6e2..fe9dc17ea873 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c | |||
| @@ -372,26 +372,9 @@ static bool intel_pstate_get_ppc_enable_status(void) | |||
| 372 | return acpi_ppc; | 372 | return acpi_ppc; |
| 373 | } | 373 | } |
| 374 | 374 | ||
| 375 | /* | ||
| 376 | * The max target pstate ratio is a 8 bit value in both PLATFORM_INFO MSR and | ||
| 377 | * in TURBO_RATIO_LIMIT MSR, which pstate driver stores in max_pstate and | ||
| 378 | * max_turbo_pstate fields. The PERF_CTL MSR contains 16 bit value for P state | ||
| 379 | * ratio, out of it only high 8 bits are used. For example 0x1700 is setting | ||
| 380 | * target ratio 0x17. The _PSS control value stores in a format which can be | ||
| 381 | * directly written to PERF_CTL MSR. But in intel_pstate driver this shift | ||
| 382 | * occurs during write to PERF_CTL (E.g. for cores core_set_pstate()). | ||
| 383 | * This function converts the _PSS control value to intel pstate driver format | ||
| 384 | * for comparison and assignment. | ||
| 385 | */ | ||
| 386 | static int convert_to_native_pstate_format(struct cpudata *cpu, int index) | ||
| 387 | { | ||
| 388 | return cpu->acpi_perf_data.states[index].control >> 8; | ||
| 389 | } | ||
| 390 | |||
| 391 | static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) | 375 | static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) |
| 392 | { | 376 | { |
| 393 | struct cpudata *cpu; | 377 | struct cpudata *cpu; |
| 394 | int turbo_pss_ctl; | ||
| 395 | int ret; | 378 | int ret; |
| 396 | int i; | 379 | int i; |
| 397 | 380 | ||
| @@ -441,15 +424,14 @@ static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) | |||
| 441 | * max frequency, which will cause a reduced performance as | 424 | * max frequency, which will cause a reduced performance as |
| 442 | * this driver uses real max turbo frequency as the max | 425 | * this driver uses real max turbo frequency as the max |
| 443 | * frequency. So correct this frequency in _PSS table to | 426 | * frequency. So correct this frequency in _PSS table to |
| 444 | * correct max turbo frequency based on the turbo ratio. | 427 | * correct max turbo frequency based on the turbo state. |
| 445 | * Also need to convert to MHz as _PSS freq is in MHz. | 428 | * Also need to convert to MHz as _PSS freq is in MHz. |
| 446 | */ | 429 | */ |
| 447 | turbo_pss_ctl = convert_to_native_pstate_format(cpu, 0); | 430 | if (!limits->turbo_disabled) |
| 448 | if (turbo_pss_ctl > cpu->pstate.max_pstate) | ||
| 449 | cpu->acpi_perf_data.states[0].core_frequency = | 431 | cpu->acpi_perf_data.states[0].core_frequency = |
| 450 | policy->cpuinfo.max_freq / 1000; | 432 | policy->cpuinfo.max_freq / 1000; |
| 451 | cpu->valid_pss_table = true; | 433 | cpu->valid_pss_table = true; |
| 452 | pr_info("_PPC limits will be enforced\n"); | 434 | pr_debug("_PPC limits will be enforced\n"); |
| 453 | 435 | ||
| 454 | return; | 436 | return; |
| 455 | 437 | ||
| @@ -1460,6 +1442,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) | |||
| 1460 | 1442 | ||
| 1461 | intel_pstate_clear_update_util_hook(policy->cpu); | 1443 | intel_pstate_clear_update_util_hook(policy->cpu); |
| 1462 | 1444 | ||
| 1445 | pr_debug("set_policy cpuinfo.max %u policy->max %u\n", | ||
| 1446 | policy->cpuinfo.max_freq, policy->max); | ||
| 1447 | |||
| 1463 | cpu = all_cpu_data[0]; | 1448 | cpu = all_cpu_data[0]; |
| 1464 | if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate && | 1449 | if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate && |
| 1465 | policy->max < policy->cpuinfo.max_freq && | 1450 | policy->max < policy->cpuinfo.max_freq && |
| @@ -1495,13 +1480,13 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) | |||
| 1495 | limits->max_sysfs_pct); | 1480 | limits->max_sysfs_pct); |
| 1496 | limits->max_perf_pct = max(limits->min_policy_pct, | 1481 | limits->max_perf_pct = max(limits->min_policy_pct, |
| 1497 | limits->max_perf_pct); | 1482 | limits->max_perf_pct); |
| 1498 | limits->max_perf = round_up(limits->max_perf, FRAC_BITS); | ||
| 1499 | 1483 | ||
| 1500 | /* Make sure min_perf_pct <= max_perf_pct */ | 1484 | /* Make sure min_perf_pct <= max_perf_pct */ |
| 1501 | limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct); | 1485 | limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct); |
| 1502 | 1486 | ||
| 1503 | limits->min_perf = div_fp(limits->min_perf_pct, 100); | 1487 | limits->min_perf = div_fp(limits->min_perf_pct, 100); |
| 1504 | limits->max_perf = div_fp(limits->max_perf_pct, 100); | 1488 | limits->max_perf = div_fp(limits->max_perf_pct, 100); |
| 1489 | limits->max_perf = round_up(limits->max_perf, FRAC_BITS); | ||
| 1505 | 1490 | ||
| 1506 | out: | 1491 | out: |
| 1507 | intel_pstate_set_update_util_hook(policy->cpu); | 1492 | intel_pstate_set_update_util_hook(policy->cpu); |
| @@ -1558,8 +1543,11 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy) | |||
| 1558 | 1543 | ||
| 1559 | /* cpuinfo and default policy values */ | 1544 | /* cpuinfo and default policy values */ |
| 1560 | policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; | 1545 | policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; |
| 1561 | policy->cpuinfo.max_freq = | 1546 | update_turbo_state(); |
| 1562 | cpu->pstate.turbo_pstate * cpu->pstate.scaling; | 1547 | policy->cpuinfo.max_freq = limits->turbo_disabled ? |
| 1548 | cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; | ||
| 1549 | policy->cpuinfo.max_freq *= cpu->pstate.scaling; | ||
| 1550 | |||
| 1563 | intel_pstate_init_acpi_perf_limits(policy); | 1551 | intel_pstate_init_acpi_perf_limits(policy); |
| 1564 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; | 1552 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; |
| 1565 | cpumask_set_cpu(policy->cpu, policy->cpus); | 1553 | cpumask_set_cpu(policy->cpu, policy->cpus); |
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c index 808a320e9d5d..a7ecb9a84c15 100644 --- a/drivers/cpufreq/pcc-cpufreq.c +++ b/drivers/cpufreq/pcc-cpufreq.c | |||
| @@ -487,7 +487,7 @@ static int __init pcc_cpufreq_probe(void) | |||
| 487 | doorbell.space_id = reg_resource->space_id; | 487 | doorbell.space_id = reg_resource->space_id; |
| 488 | doorbell.bit_width = reg_resource->bit_width; | 488 | doorbell.bit_width = reg_resource->bit_width; |
| 489 | doorbell.bit_offset = reg_resource->bit_offset; | 489 | doorbell.bit_offset = reg_resource->bit_offset; |
| 490 | doorbell.access_width = 64; | 490 | doorbell.access_width = 4; |
| 491 | doorbell.address = reg_resource->address; | 491 | doorbell.address = reg_resource->address; |
| 492 | 492 | ||
| 493 | pr_debug("probe: doorbell: space_id is %d, bit_width is %d, " | 493 | pr_debug("probe: doorbell: space_id is %d, bit_width is %d, " |
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-xts.c b/drivers/crypto/ccp/ccp-crypto-aes-xts.c index 52c7395cb8d8..0d0d4529ee36 100644 --- a/drivers/crypto/ccp/ccp-crypto-aes-xts.c +++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c | |||
| @@ -122,6 +122,7 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req, | |||
| 122 | struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm); | 122 | struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm); |
| 123 | struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); | 123 | struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); |
| 124 | unsigned int unit; | 124 | unsigned int unit; |
| 125 | u32 unit_size; | ||
| 125 | int ret; | 126 | int ret; |
| 126 | 127 | ||
| 127 | if (!ctx->u.aes.key_len) | 128 | if (!ctx->u.aes.key_len) |
| @@ -133,11 +134,17 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req, | |||
| 133 | if (!req->info) | 134 | if (!req->info) |
| 134 | return -EINVAL; | 135 | return -EINVAL; |
| 135 | 136 | ||
| 136 | for (unit = 0; unit < ARRAY_SIZE(unit_size_map); unit++) | 137 | unit_size = CCP_XTS_AES_UNIT_SIZE__LAST; |
| 137 | if (!(req->nbytes & (unit_size_map[unit].size - 1))) | 138 | if (req->nbytes <= unit_size_map[0].size) { |
| 138 | break; | 139 | for (unit = 0; unit < ARRAY_SIZE(unit_size_map); unit++) { |
| 140 | if (!(req->nbytes & (unit_size_map[unit].size - 1))) { | ||
| 141 | unit_size = unit_size_map[unit].value; | ||
| 142 | break; | ||
| 143 | } | ||
| 144 | } | ||
| 145 | } | ||
| 139 | 146 | ||
| 140 | if ((unit_size_map[unit].value == CCP_XTS_AES_UNIT_SIZE__LAST) || | 147 | if ((unit_size == CCP_XTS_AES_UNIT_SIZE__LAST) || |
| 141 | (ctx->u.aes.key_len != AES_KEYSIZE_128)) { | 148 | (ctx->u.aes.key_len != AES_KEYSIZE_128)) { |
| 142 | /* Use the fallback to process the request for any | 149 | /* Use the fallback to process the request for any |
| 143 | * unsupported unit sizes or key sizes | 150 | * unsupported unit sizes or key sizes |
| @@ -158,7 +165,7 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req, | |||
| 158 | rctx->cmd.engine = CCP_ENGINE_XTS_AES_128; | 165 | rctx->cmd.engine = CCP_ENGINE_XTS_AES_128; |
| 159 | rctx->cmd.u.xts.action = (encrypt) ? CCP_AES_ACTION_ENCRYPT | 166 | rctx->cmd.u.xts.action = (encrypt) ? CCP_AES_ACTION_ENCRYPT |
| 160 | : CCP_AES_ACTION_DECRYPT; | 167 | : CCP_AES_ACTION_DECRYPT; |
| 161 | rctx->cmd.u.xts.unit_size = unit_size_map[unit].value; | 168 | rctx->cmd.u.xts.unit_size = unit_size; |
| 162 | rctx->cmd.u.xts.key = &ctx->u.aes.key_sg; | 169 | rctx->cmd.u.xts.key = &ctx->u.aes.key_sg; |
| 163 | rctx->cmd.u.xts.key_len = ctx->u.aes.key_len; | 170 | rctx->cmd.u.xts.key_len = ctx->u.aes.key_len; |
| 164 | rctx->cmd.u.xts.iv = &rctx->iv_sg; | 171 | rctx->cmd.u.xts.iv = &rctx->iv_sg; |
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index 6eefaa2fe58f..63464e86f2b1 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c | |||
| @@ -1986,7 +1986,7 @@ err_algs: | |||
| 1986 | &dd->pdata->algs_info[i].algs_list[j]); | 1986 | &dd->pdata->algs_info[i].algs_list[j]); |
| 1987 | err_pm: | 1987 | err_pm: |
| 1988 | pm_runtime_disable(dev); | 1988 | pm_runtime_disable(dev); |
| 1989 | if (dd->polling_mode) | 1989 | if (!dd->polling_mode) |
| 1990 | dma_release_channel(dd->dma_lch); | 1990 | dma_release_channel(dd->dma_lch); |
| 1991 | data_err: | 1991 | data_err: |
| 1992 | dev_err(dev, "initialization failed.\n"); | 1992 | dev_err(dev, "initialization failed.\n"); |
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index 1d6c803804d5..e92418facc92 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c | |||
| @@ -268,8 +268,11 @@ int update_devfreq(struct devfreq *devfreq) | |||
| 268 | devfreq_notify_transition(devfreq, &freqs, DEVFREQ_PRECHANGE); | 268 | devfreq_notify_transition(devfreq, &freqs, DEVFREQ_PRECHANGE); |
| 269 | 269 | ||
| 270 | err = devfreq->profile->target(devfreq->dev.parent, &freq, flags); | 270 | err = devfreq->profile->target(devfreq->dev.parent, &freq, flags); |
| 271 | if (err) | 271 | if (err) { |
| 272 | freqs.new = cur_freq; | ||
| 273 | devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE); | ||
| 272 | return err; | 274 | return err; |
| 275 | } | ||
| 273 | 276 | ||
| 274 | freqs.new = freq; | 277 | freqs.new = freq; |
| 275 | devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE); | 278 | devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE); |
| @@ -552,6 +555,7 @@ struct devfreq *devfreq_add_device(struct device *dev, | |||
| 552 | devfreq->profile = profile; | 555 | devfreq->profile = profile; |
| 553 | strncpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN); | 556 | strncpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN); |
| 554 | devfreq->previous_freq = profile->initial_freq; | 557 | devfreq->previous_freq = profile->initial_freq; |
| 558 | devfreq->last_status.current_frequency = profile->initial_freq; | ||
| 555 | devfreq->data = data; | 559 | devfreq->data = data; |
| 556 | devfreq->nb.notifier_call = devfreq_notifier_call; | 560 | devfreq->nb.notifier_call = devfreq_notifier_call; |
| 557 | 561 | ||
| @@ -561,23 +565,22 @@ struct devfreq *devfreq_add_device(struct device *dev, | |||
| 561 | mutex_lock(&devfreq->lock); | 565 | mutex_lock(&devfreq->lock); |
| 562 | } | 566 | } |
| 563 | 567 | ||
| 564 | devfreq->trans_table = devm_kzalloc(dev, sizeof(unsigned int) * | ||
| 565 | devfreq->profile->max_state * | ||
| 566 | devfreq->profile->max_state, | ||
| 567 | GFP_KERNEL); | ||
| 568 | devfreq->time_in_state = devm_kzalloc(dev, sizeof(unsigned long) * | ||
| 569 | devfreq->profile->max_state, | ||
| 570 | GFP_KERNEL); | ||
| 571 | devfreq->last_stat_updated = jiffies; | ||
| 572 | |||
| 573 | dev_set_name(&devfreq->dev, "%s", dev_name(dev)); | 568 | dev_set_name(&devfreq->dev, "%s", dev_name(dev)); |
| 574 | err = device_register(&devfreq->dev); | 569 | err = device_register(&devfreq->dev); |
| 575 | if (err) { | 570 | if (err) { |
| 576 | put_device(&devfreq->dev); | ||
| 577 | mutex_unlock(&devfreq->lock); | 571 | mutex_unlock(&devfreq->lock); |
| 578 | goto err_out; | 572 | goto err_out; |
| 579 | } | 573 | } |
| 580 | 574 | ||
| 575 | devfreq->trans_table = devm_kzalloc(&devfreq->dev, sizeof(unsigned int) * | ||
| 576 | devfreq->profile->max_state * | ||
| 577 | devfreq->profile->max_state, | ||
| 578 | GFP_KERNEL); | ||
| 579 | devfreq->time_in_state = devm_kzalloc(&devfreq->dev, sizeof(unsigned long) * | ||
| 580 | devfreq->profile->max_state, | ||
| 581 | GFP_KERNEL); | ||
| 582 | devfreq->last_stat_updated = jiffies; | ||
| 583 | |||
| 581 | srcu_init_notifier_head(&devfreq->transition_notifier_list); | 584 | srcu_init_notifier_head(&devfreq->transition_notifier_list); |
| 582 | 585 | ||
| 583 | mutex_unlock(&devfreq->lock); | 586 | mutex_unlock(&devfreq->lock); |
| @@ -603,7 +606,6 @@ struct devfreq *devfreq_add_device(struct device *dev, | |||
| 603 | err_init: | 606 | err_init: |
| 604 | list_del(&devfreq->node); | 607 | list_del(&devfreq->node); |
| 605 | device_unregister(&devfreq->dev); | 608 | device_unregister(&devfreq->dev); |
| 606 | kfree(devfreq); | ||
| 607 | err_out: | 609 | err_out: |
| 608 | return ERR_PTR(err); | 610 | return ERR_PTR(err); |
| 609 | } | 611 | } |
| @@ -621,7 +623,6 @@ int devfreq_remove_device(struct devfreq *devfreq) | |||
| 621 | return -EINVAL; | 623 | return -EINVAL; |
| 622 | 624 | ||
| 623 | device_unregister(&devfreq->dev); | 625 | device_unregister(&devfreq->dev); |
| 624 | put_device(&devfreq->dev); | ||
| 625 | 626 | ||
| 626 | return 0; | 627 | return 0; |
| 627 | } | 628 | } |
diff --git a/drivers/devfreq/event/exynos-nocp.c b/drivers/devfreq/event/exynos-nocp.c index 6b6a5f310486..a5841403bde8 100644 --- a/drivers/devfreq/event/exynos-nocp.c +++ b/drivers/devfreq/event/exynos-nocp.c | |||
| @@ -220,9 +220,6 @@ static int exynos_nocp_parse_dt(struct platform_device *pdev, | |||
| 220 | 220 | ||
| 221 | /* Maps the memory mapped IO to control nocp register */ | 221 | /* Maps the memory mapped IO to control nocp register */ |
| 222 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 222 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 223 | if (IS_ERR(res)) | ||
| 224 | return PTR_ERR(res); | ||
| 225 | |||
| 226 | base = devm_ioremap_resource(dev, res); | 223 | base = devm_ioremap_resource(dev, res); |
| 227 | if (IS_ERR(base)) | 224 | if (IS_ERR(base)) |
| 228 | return PTR_ERR(base); | 225 | return PTR_ERR(base); |
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 4a2c07ee6677..6355ab38d630 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c | |||
| @@ -33,6 +33,7 @@ | |||
| 33 | #include <linux/seq_file.h> | 33 | #include <linux/seq_file.h> |
| 34 | #include <linux/poll.h> | 34 | #include <linux/poll.h> |
| 35 | #include <linux/reservation.h> | 35 | #include <linux/reservation.h> |
| 36 | #include <linux/mm.h> | ||
| 36 | 37 | ||
| 37 | #include <uapi/linux/dma-buf.h> | 38 | #include <uapi/linux/dma-buf.h> |
| 38 | 39 | ||
| @@ -90,7 +91,7 @@ static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma) | |||
| 90 | dmabuf = file->private_data; | 91 | dmabuf = file->private_data; |
| 91 | 92 | ||
| 92 | /* check for overflowing the buffer's size */ | 93 | /* check for overflowing the buffer's size */ |
| 93 | if (vma->vm_pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) > | 94 | if (vma->vm_pgoff + vma_pages(vma) > |
| 94 | dmabuf->size >> PAGE_SHIFT) | 95 | dmabuf->size >> PAGE_SHIFT) |
| 95 | return -EINVAL; | 96 | return -EINVAL; |
| 96 | 97 | ||
| @@ -723,11 +724,11 @@ int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma, | |||
| 723 | return -EINVAL; | 724 | return -EINVAL; |
| 724 | 725 | ||
| 725 | /* check for offset overflow */ | 726 | /* check for offset overflow */ |
| 726 | if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) < pgoff) | 727 | if (pgoff + vma_pages(vma) < pgoff) |
| 727 | return -EOVERFLOW; | 728 | return -EOVERFLOW; |
| 728 | 729 | ||
| 729 | /* check for overflowing the buffer's size */ | 730 | /* check for overflowing the buffer's size */ |
| 730 | if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) > | 731 | if (pgoff + vma_pages(vma) > |
| 731 | dmabuf->size >> PAGE_SHIFT) | 732 | dmabuf->size >> PAGE_SHIFT) |
| 732 | return -EINVAL; | 733 | return -EINVAL; |
| 733 | 734 | ||
diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c index c0bd5722c997..9566a62ad8e3 100644 --- a/drivers/dma-buf/reservation.c +++ b/drivers/dma-buf/reservation.c | |||
| @@ -35,6 +35,17 @@ | |||
| 35 | #include <linux/reservation.h> | 35 | #include <linux/reservation.h> |
| 36 | #include <linux/export.h> | 36 | #include <linux/export.h> |
| 37 | 37 | ||
| 38 | /** | ||
| 39 | * DOC: Reservation Object Overview | ||
| 40 | * | ||
| 41 | * The reservation object provides a mechanism to manage shared and | ||
| 42 | * exclusive fences associated with a buffer. A reservation object | ||
| 43 | * can have attached one exclusive fence (normally associated with | ||
| 44 | * write operations) or N shared fences (read operations). The RCU | ||
| 45 | * mechanism is used to protect read access to fences from locked | ||
| 46 | * write-side updates. | ||
| 47 | */ | ||
| 48 | |||
| 38 | DEFINE_WW_CLASS(reservation_ww_class); | 49 | DEFINE_WW_CLASS(reservation_ww_class); |
| 39 | EXPORT_SYMBOL(reservation_ww_class); | 50 | EXPORT_SYMBOL(reservation_ww_class); |
| 40 | 51 | ||
| @@ -43,9 +54,17 @@ EXPORT_SYMBOL(reservation_seqcount_class); | |||
| 43 | 54 | ||
| 44 | const char reservation_seqcount_string[] = "reservation_seqcount"; | 55 | const char reservation_seqcount_string[] = "reservation_seqcount"; |
| 45 | EXPORT_SYMBOL(reservation_seqcount_string); | 56 | EXPORT_SYMBOL(reservation_seqcount_string); |
| 46 | /* | 57 | |
| 47 | * Reserve space to add a shared fence to a reservation_object, | 58 | /** |
| 48 | * must be called with obj->lock held. | 59 | * reservation_object_reserve_shared - Reserve space to add a shared |
| 60 | * fence to a reservation_object. | ||
| 61 | * @obj: reservation object | ||
| 62 | * | ||
| 63 | * Should be called before reservation_object_add_shared_fence(). Must | ||
| 64 | * be called with obj->lock held. | ||
| 65 | * | ||
| 66 | * RETURNS | ||
| 67 | * Zero for success, or -errno | ||
| 49 | */ | 68 | */ |
| 50 | int reservation_object_reserve_shared(struct reservation_object *obj) | 69 | int reservation_object_reserve_shared(struct reservation_object *obj) |
| 51 | { | 70 | { |
| @@ -180,7 +199,11 @@ done: | |||
| 180 | fence_put(old_fence); | 199 | fence_put(old_fence); |
| 181 | } | 200 | } |
| 182 | 201 | ||
| 183 | /* | 202 | /** |
| 203 | * reservation_object_add_shared_fence - Add a fence to a shared slot | ||
| 204 | * @obj: the reservation object | ||
| 205 | * @fence: the shared fence to add | ||
| 206 | * | ||
| 184 | * Add a fence to a shared slot, obj->lock must be held, and | 207 | * Add a fence to a shared slot, obj->lock must be held, and |
| 185 | * reservation_object_reserve_shared_fence has been called. | 208 | * reservation_object_reserve_shared_fence has been called. |
| 186 | */ | 209 | */ |
| @@ -200,6 +223,13 @@ void reservation_object_add_shared_fence(struct reservation_object *obj, | |||
| 200 | } | 223 | } |
| 201 | EXPORT_SYMBOL(reservation_object_add_shared_fence); | 224 | EXPORT_SYMBOL(reservation_object_add_shared_fence); |
| 202 | 225 | ||
| 226 | /** | ||
| 227 | * reservation_object_add_excl_fence - Add an exclusive fence. | ||
| 228 | * @obj: the reservation object | ||
| 229 | * @fence: the shared fence to add | ||
| 230 | * | ||
| 231 | * Add a fence to the exclusive slot. The obj->lock must be held. | ||
| 232 | */ | ||
| 203 | void reservation_object_add_excl_fence(struct reservation_object *obj, | 233 | void reservation_object_add_excl_fence(struct reservation_object *obj, |
| 204 | struct fence *fence) | 234 | struct fence *fence) |
| 205 | { | 235 | { |
| @@ -233,6 +263,18 @@ void reservation_object_add_excl_fence(struct reservation_object *obj, | |||
| 233 | } | 263 | } |
| 234 | EXPORT_SYMBOL(reservation_object_add_excl_fence); | 264 | EXPORT_SYMBOL(reservation_object_add_excl_fence); |
| 235 | 265 | ||
| 266 | /** | ||
| 267 | * reservation_object_get_fences_rcu - Get an object's shared and exclusive | ||
| 268 | * fences without update side lock held | ||
| 269 | * @obj: the reservation object | ||
| 270 | * @pfence_excl: the returned exclusive fence (or NULL) | ||
| 271 | * @pshared_count: the number of shared fences returned | ||
| 272 | * @pshared: the array of shared fence ptrs returned (array is krealloc'd to | ||
| 273 | * the required size, and must be freed by caller) | ||
| 274 | * | ||
| 275 | * RETURNS | ||
| 276 | * Zero or -errno | ||
| 277 | */ | ||
| 236 | int reservation_object_get_fences_rcu(struct reservation_object *obj, | 278 | int reservation_object_get_fences_rcu(struct reservation_object *obj, |
| 237 | struct fence **pfence_excl, | 279 | struct fence **pfence_excl, |
| 238 | unsigned *pshared_count, | 280 | unsigned *pshared_count, |
| @@ -319,6 +361,18 @@ unlock: | |||
| 319 | } | 361 | } |
| 320 | EXPORT_SYMBOL_GPL(reservation_object_get_fences_rcu); | 362 | EXPORT_SYMBOL_GPL(reservation_object_get_fences_rcu); |
| 321 | 363 | ||
| 364 | /** | ||
| 365 | * reservation_object_wait_timeout_rcu - Wait on reservation's objects | ||
| 366 | * shared and/or exclusive fences. | ||
| 367 | * @obj: the reservation object | ||
| 368 | * @wait_all: if true, wait on all fences, else wait on just exclusive fence | ||
| 369 | * @intr: if true, do interruptible wait | ||
| 370 | * @timeout: timeout value in jiffies or zero to return immediately | ||
| 371 | * | ||
| 372 | * RETURNS | ||
| 373 | * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or | ||
| 374 | * greater than zer on success. | ||
| 375 | */ | ||
| 322 | long reservation_object_wait_timeout_rcu(struct reservation_object *obj, | 376 | long reservation_object_wait_timeout_rcu(struct reservation_object *obj, |
| 323 | bool wait_all, bool intr, | 377 | bool wait_all, bool intr, |
| 324 | unsigned long timeout) | 378 | unsigned long timeout) |
| @@ -416,6 +470,16 @@ reservation_object_test_signaled_single(struct fence *passed_fence) | |||
| 416 | return ret; | 470 | return ret; |
| 417 | } | 471 | } |
| 418 | 472 | ||
| 473 | /** | ||
| 474 | * reservation_object_test_signaled_rcu - Test if a reservation object's | ||
| 475 | * fences have been signaled. | ||
| 476 | * @obj: the reservation object | ||
| 477 | * @test_all: if true, test all fences, otherwise only test the exclusive | ||
| 478 | * fence | ||
| 479 | * | ||
| 480 | * RETURNS | ||
| 481 | * true if all fences signaled, else false | ||
| 482 | */ | ||
| 419 | bool reservation_object_test_signaled_rcu(struct reservation_object *obj, | 483 | bool reservation_object_test_signaled_rcu(struct reservation_object *obj, |
| 420 | bool test_all) | 484 | bool test_all) |
| 421 | { | 485 | { |
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index 8e304b1befc5..75bd6621dc5d 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c | |||
| @@ -242,7 +242,7 @@ struct at_xdmac_lld { | |||
| 242 | u32 mbr_dus; /* Destination Microblock Stride Register */ | 242 | u32 mbr_dus; /* Destination Microblock Stride Register */ |
| 243 | }; | 243 | }; |
| 244 | 244 | ||
| 245 | 245 | /* 64-bit alignment needed to update CNDA and CUBC registers in an atomic way. */ | |
| 246 | struct at_xdmac_desc { | 246 | struct at_xdmac_desc { |
| 247 | struct at_xdmac_lld lld; | 247 | struct at_xdmac_lld lld; |
| 248 | enum dma_transfer_direction direction; | 248 | enum dma_transfer_direction direction; |
| @@ -253,7 +253,7 @@ struct at_xdmac_desc { | |||
| 253 | unsigned int xfer_size; | 253 | unsigned int xfer_size; |
| 254 | struct list_head descs_list; | 254 | struct list_head descs_list; |
| 255 | struct list_head xfer_node; | 255 | struct list_head xfer_node; |
| 256 | }; | 256 | } __aligned(sizeof(u64)); |
| 257 | 257 | ||
| 258 | static inline void __iomem *at_xdmac_chan_reg_base(struct at_xdmac *atxdmac, unsigned int chan_nb) | 258 | static inline void __iomem *at_xdmac_chan_reg_base(struct at_xdmac *atxdmac, unsigned int chan_nb) |
| 259 | { | 259 | { |
| @@ -1400,6 +1400,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | |||
| 1400 | u32 cur_nda, check_nda, cur_ubc, mask, value; | 1400 | u32 cur_nda, check_nda, cur_ubc, mask, value; |
| 1401 | u8 dwidth = 0; | 1401 | u8 dwidth = 0; |
| 1402 | unsigned long flags; | 1402 | unsigned long flags; |
| 1403 | bool initd; | ||
| 1403 | 1404 | ||
| 1404 | ret = dma_cookie_status(chan, cookie, txstate); | 1405 | ret = dma_cookie_status(chan, cookie, txstate); |
| 1405 | if (ret == DMA_COMPLETE) | 1406 | if (ret == DMA_COMPLETE) |
| @@ -1424,7 +1425,16 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | |||
| 1424 | residue = desc->xfer_size; | 1425 | residue = desc->xfer_size; |
| 1425 | /* | 1426 | /* |
| 1426 | * Flush FIFO: only relevant when the transfer is source peripheral | 1427 | * Flush FIFO: only relevant when the transfer is source peripheral |
| 1427 | * synchronized. | 1428 | * synchronized. Flush is needed before reading CUBC because data in |
| 1429 | * the FIFO are not reported by CUBC. Reporting a residue of the | ||
| 1430 | * transfer length while we have data in FIFO can cause issue. | ||
| 1431 | * Usecase: atmel USART has a timeout which means I have received | ||
| 1432 | * characters but there is no more character received for a while. On | ||
| 1433 | * timeout, it requests the residue. If the data are in the DMA FIFO, | ||
| 1434 | * we will return a residue of the transfer length. It means no data | ||
| 1435 | * received. If an application is waiting for these data, it will hang | ||
| 1436 | * since we won't have another USART timeout without receiving new | ||
| 1437 | * data. | ||
| 1428 | */ | 1438 | */ |
| 1429 | mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC; | 1439 | mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC; |
| 1430 | value = AT_XDMAC_CC_TYPE_PER_TRAN | AT_XDMAC_CC_DSYNC_PER2MEM; | 1440 | value = AT_XDMAC_CC_TYPE_PER_TRAN | AT_XDMAC_CC_DSYNC_PER2MEM; |
| @@ -1435,34 +1445,43 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | |||
| 1435 | } | 1445 | } |
| 1436 | 1446 | ||
| 1437 | /* | 1447 | /* |
| 1438 | * When processing the residue, we need to read two registers but we | 1448 | * The easiest way to compute the residue should be to pause the DMA |
| 1439 | * can't do it in an atomic way. AT_XDMAC_CNDA is used to find where | 1449 | * but doing this can lead to miss some data as some devices don't |
| 1440 | * we stand in the descriptor list and AT_XDMAC_CUBC is used | 1450 | * have FIFO. |
| 1441 | * to know how many data are remaining for the current descriptor. | 1451 | * We need to read several registers because: |
| 1442 | * Since the dma channel is not paused to not loose data, between the | 1452 | * - DMA is running therefore a descriptor change is possible while |
| 1443 | * AT_XDMAC_CNDA and AT_XDMAC_CUBC read, we may have change of | 1453 | * reading these registers |
| 1444 | * descriptor. | 1454 | * - When the block transfer is done, the value of the CUBC register |
| 1445 | * For that reason, after reading AT_XDMAC_CUBC, we check if we are | 1455 | * is set to its initial value until the fetch of the next descriptor. |
| 1446 | * still using the same descriptor by reading a second time | 1456 | * This value will corrupt the residue calculation so we have to skip |
| 1447 | * AT_XDMAC_CNDA. If AT_XDMAC_CNDA has changed, it means we have to | 1457 | * it. |
| 1448 | * read again AT_XDMAC_CUBC. | 1458 | * |
| 1459 | * INITD -------- ------------ | ||
| 1460 | * |____________________| | ||
| 1461 | * _______________________ _______________ | ||
| 1462 | * NDA @desc2 \/ @desc3 | ||
| 1463 | * _______________________/\_______________ | ||
| 1464 | * __________ ___________ _______________ | ||
| 1465 | * CUBC 0 \/ MAX desc1 \/ MAX desc2 | ||
| 1466 | * __________/\___________/\_______________ | ||
| 1467 | * | ||
| 1468 | * Since descriptors are aligned on 64 bits, we can assume that | ||
| 1469 | * the update of NDA and CUBC is atomic. | ||
| 1449 | * Memory barriers are used to ensure the read order of the registers. | 1470 | * Memory barriers are used to ensure the read order of the registers. |
| 1450 | * A max number of retries is set because unlikely it can never ends if | 1471 | * A max number of retries is set because unlikely it could never ends. |
| 1451 | * we are transferring a lot of data with small buffers. | ||
| 1452 | */ | 1472 | */ |
| 1453 | cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc; | ||
| 1454 | rmb(); | ||
| 1455 | cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC); | ||
| 1456 | for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) { | 1473 | for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) { |
| 1457 | rmb(); | ||
| 1458 | check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc; | 1474 | check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc; |
| 1459 | 1475 | rmb(); | |
| 1460 | if (likely(cur_nda == check_nda)) | 1476 | initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD); |
| 1461 | break; | ||
| 1462 | |||
| 1463 | cur_nda = check_nda; | ||
| 1464 | rmb(); | 1477 | rmb(); |
| 1465 | cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC); | 1478 | cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC); |
| 1479 | rmb(); | ||
| 1480 | cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc; | ||
| 1481 | rmb(); | ||
| 1482 | |||
| 1483 | if ((check_nda == cur_nda) && initd) | ||
| 1484 | break; | ||
| 1466 | } | 1485 | } |
| 1467 | 1486 | ||
| 1468 | if (unlikely(retry >= AT_XDMAC_RESIDUE_MAX_RETRIES)) { | 1487 | if (unlikely(retry >= AT_XDMAC_RESIDUE_MAX_RETRIES)) { |
| @@ -1471,6 +1490,19 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | |||
| 1471 | } | 1490 | } |
| 1472 | 1491 | ||
| 1473 | /* | 1492 | /* |
| 1493 | * Flush FIFO: only relevant when the transfer is source peripheral | ||
| 1494 | * synchronized. Another flush is needed here because CUBC is updated | ||
| 1495 | * when the controller sends the data write command. It can lead to | ||
| 1496 | * report data that are not written in the memory or the device. The | ||
| 1497 | * FIFO flush ensures that data are really written. | ||
| 1498 | */ | ||
| 1499 | if ((desc->lld.mbr_cfg & mask) == value) { | ||
| 1500 | at_xdmac_write(atxdmac, AT_XDMAC_GSWF, atchan->mask); | ||
| 1501 | while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS)) | ||
| 1502 | cpu_relax(); | ||
| 1503 | } | ||
| 1504 | |||
| 1505 | /* | ||
| 1474 | * Remove size of all microblocks already transferred and the current | 1506 | * Remove size of all microblocks already transferred and the current |
| 1475 | * one. Then add the remaining size to transfer of the current | 1507 | * one. Then add the remaining size to transfer of the current |
| 1476 | * microblock. | 1508 | * microblock. |
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 25d1dadcddd1..d0446a75990a 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c | |||
| @@ -703,8 +703,9 @@ static int mv_chan_memcpy_self_test(struct mv_xor_chan *mv_chan) | |||
| 703 | goto free_resources; | 703 | goto free_resources; |
| 704 | } | 704 | } |
| 705 | 705 | ||
| 706 | src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0, | 706 | src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), |
| 707 | PAGE_SIZE, DMA_TO_DEVICE); | 707 | (size_t)src & ~PAGE_MASK, PAGE_SIZE, |
| 708 | DMA_TO_DEVICE); | ||
| 708 | unmap->addr[0] = src_dma; | 709 | unmap->addr[0] = src_dma; |
| 709 | 710 | ||
| 710 | ret = dma_mapping_error(dma_chan->device->dev, src_dma); | 711 | ret = dma_mapping_error(dma_chan->device->dev, src_dma); |
| @@ -714,8 +715,9 @@ static int mv_chan_memcpy_self_test(struct mv_xor_chan *mv_chan) | |||
| 714 | } | 715 | } |
| 715 | unmap->to_cnt = 1; | 716 | unmap->to_cnt = 1; |
| 716 | 717 | ||
| 717 | dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0, | 718 | dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), |
| 718 | PAGE_SIZE, DMA_FROM_DEVICE); | 719 | (size_t)dest & ~PAGE_MASK, PAGE_SIZE, |
| 720 | DMA_FROM_DEVICE); | ||
| 719 | unmap->addr[1] = dest_dma; | 721 | unmap->addr[1] = dest_dma; |
| 720 | 722 | ||
| 721 | ret = dma_mapping_error(dma_chan->device->dev, dest_dma); | 723 | ret = dma_mapping_error(dma_chan->device->dev, dest_dma); |
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c index 6aa256b0a1ed..c3ee3ad98a63 100644 --- a/drivers/edac/edac_mc.c +++ b/drivers/edac/edac_mc.c | |||
| @@ -565,7 +565,8 @@ void edac_mc_reset_delay_period(unsigned long value) | |||
| 565 | list_for_each(item, &mc_devices) { | 565 | list_for_each(item, &mc_devices) { |
| 566 | mci = list_entry(item, struct mem_ctl_info, link); | 566 | mci = list_entry(item, struct mem_ctl_info, link); |
| 567 | 567 | ||
| 568 | edac_mod_work(&mci->work, value); | 568 | if (mci->op_state == OP_RUNNING_POLL) |
| 569 | edac_mod_work(&mci->work, value); | ||
| 569 | } | 570 | } |
| 570 | mutex_unlock(&mem_ctls_mutex); | 571 | mutex_unlock(&mem_ctls_mutex); |
| 571 | } | 572 | } |
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c index b4d0bf6534cf..6744d88bdea8 100644 --- a/drivers/edac/sb_edac.c +++ b/drivers/edac/sb_edac.c | |||
| @@ -239,8 +239,11 @@ static const u32 rir_offset[MAX_RIR_RANGES][MAX_RIR_WAY] = { | |||
| 239 | { 0x1a0, 0x1a4, 0x1a8, 0x1ac, 0x1b0, 0x1b4, 0x1b8, 0x1bc }, | 239 | { 0x1a0, 0x1a4, 0x1a8, 0x1ac, 0x1b0, 0x1b4, 0x1b8, 0x1bc }, |
| 240 | }; | 240 | }; |
| 241 | 241 | ||
| 242 | #define RIR_RNK_TGT(reg) GET_BITFIELD(reg, 16, 19) | 242 | #define RIR_RNK_TGT(type, reg) (((type) == BROADWELL) ? \ |
| 243 | #define RIR_OFFSET(reg) GET_BITFIELD(reg, 2, 14) | 243 | GET_BITFIELD(reg, 20, 23) : GET_BITFIELD(reg, 16, 19)) |
| 244 | |||
| 245 | #define RIR_OFFSET(type, reg) (((type) == HASWELL || (type) == BROADWELL) ? \ | ||
| 246 | GET_BITFIELD(reg, 2, 15) : GET_BITFIELD(reg, 2, 14)) | ||
| 244 | 247 | ||
| 245 | /* Device 16, functions 2-7 */ | 248 | /* Device 16, functions 2-7 */ |
| 246 | 249 | ||
| @@ -326,6 +329,7 @@ struct pci_id_descr { | |||
| 326 | struct pci_id_table { | 329 | struct pci_id_table { |
| 327 | const struct pci_id_descr *descr; | 330 | const struct pci_id_descr *descr; |
| 328 | int n_devs; | 331 | int n_devs; |
| 332 | enum type type; | ||
| 329 | }; | 333 | }; |
| 330 | 334 | ||
| 331 | struct sbridge_dev { | 335 | struct sbridge_dev { |
| @@ -394,9 +398,14 @@ static const struct pci_id_descr pci_dev_descr_sbridge[] = { | |||
| 394 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_BR, 0) }, | 398 | { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_BR, 0) }, |
| 395 | }; | 399 | }; |
| 396 | 400 | ||
| 397 | #define PCI_ID_TABLE_ENTRY(A) { .descr=A, .n_devs = ARRAY_SIZE(A) } | 401 | #define PCI_ID_TABLE_ENTRY(A, T) { \ |
| 402 | .descr = A, \ | ||
| 403 | .n_devs = ARRAY_SIZE(A), \ | ||
| 404 | .type = T \ | ||
| 405 | } | ||
| 406 | |||
| 398 | static const struct pci_id_table pci_dev_descr_sbridge_table[] = { | 407 | static const struct pci_id_table pci_dev_descr_sbridge_table[] = { |
| 399 | PCI_ID_TABLE_ENTRY(pci_dev_descr_sbridge), | 408 | PCI_ID_TABLE_ENTRY(pci_dev_descr_sbridge, SANDY_BRIDGE), |
| 400 | {0,} /* 0 terminated list. */ | 409 | {0,} /* 0 terminated list. */ |
| 401 | }; | 410 | }; |
| 402 | 411 | ||
| @@ -463,7 +472,7 @@ static const struct pci_id_descr pci_dev_descr_ibridge[] = { | |||
| 463 | }; | 472 | }; |
| 464 | 473 | ||
| 465 | static const struct pci_id_table pci_dev_descr_ibridge_table[] = { | 474 | static const struct pci_id_table pci_dev_descr_ibridge_table[] = { |
| 466 | PCI_ID_TABLE_ENTRY(pci_dev_descr_ibridge), | 475 | PCI_ID_TABLE_ENTRY(pci_dev_descr_ibridge, IVY_BRIDGE), |
| 467 | {0,} /* 0 terminated list. */ | 476 | {0,} /* 0 terminated list. */ |
| 468 | }; | 477 | }; |
| 469 | 478 | ||
| @@ -536,7 +545,7 @@ static const struct pci_id_descr pci_dev_descr_haswell[] = { | |||
| 536 | }; | 545 | }; |
| 537 | 546 | ||
| 538 | static const struct pci_id_table pci_dev_descr_haswell_table[] = { | 547 | static const struct pci_id_table pci_dev_descr_haswell_table[] = { |
| 539 | PCI_ID_TABLE_ENTRY(pci_dev_descr_haswell), | 548 | PCI_ID_TABLE_ENTRY(pci_dev_descr_haswell, HASWELL), |
| 540 | {0,} /* 0 terminated list. */ | 549 | {0,} /* 0 terminated list. */ |
| 541 | }; | 550 | }; |
| 542 | 551 | ||
| @@ -580,7 +589,7 @@ static const struct pci_id_descr pci_dev_descr_knl[] = { | |||
| 580 | }; | 589 | }; |
| 581 | 590 | ||
| 582 | static const struct pci_id_table pci_dev_descr_knl_table[] = { | 591 | static const struct pci_id_table pci_dev_descr_knl_table[] = { |
| 583 | PCI_ID_TABLE_ENTRY(pci_dev_descr_knl), | 592 | PCI_ID_TABLE_ENTRY(pci_dev_descr_knl, KNIGHTS_LANDING), |
| 584 | {0,} | 593 | {0,} |
| 585 | }; | 594 | }; |
| 586 | 595 | ||
| @@ -648,7 +657,7 @@ static const struct pci_id_descr pci_dev_descr_broadwell[] = { | |||
| 648 | }; | 657 | }; |
| 649 | 658 | ||
| 650 | static const struct pci_id_table pci_dev_descr_broadwell_table[] = { | 659 | static const struct pci_id_table pci_dev_descr_broadwell_table[] = { |
| 651 | PCI_ID_TABLE_ENTRY(pci_dev_descr_broadwell), | 660 | PCI_ID_TABLE_ENTRY(pci_dev_descr_broadwell, BROADWELL), |
| 652 | {0,} /* 0 terminated list. */ | 661 | {0,} /* 0 terminated list. */ |
| 653 | }; | 662 | }; |
| 654 | 663 | ||
| @@ -1894,14 +1903,14 @@ static void get_memory_layout(const struct mem_ctl_info *mci) | |||
| 1894 | pci_read_config_dword(pvt->pci_tad[i], | 1903 | pci_read_config_dword(pvt->pci_tad[i], |
| 1895 | rir_offset[j][k], | 1904 | rir_offset[j][k], |
| 1896 | ®); | 1905 | ®); |
| 1897 | tmp_mb = RIR_OFFSET(reg) << 6; | 1906 | tmp_mb = RIR_OFFSET(pvt->info.type, reg) << 6; |
| 1898 | 1907 | ||
| 1899 | gb = div_u64_rem(tmp_mb, 1024, &mb); | 1908 | gb = div_u64_rem(tmp_mb, 1024, &mb); |
| 1900 | edac_dbg(0, "CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n", | 1909 | edac_dbg(0, "CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n", |
| 1901 | i, j, k, | 1910 | i, j, k, |
| 1902 | gb, (mb*1000)/1024, | 1911 | gb, (mb*1000)/1024, |
| 1903 | ((u64)tmp_mb) << 20L, | 1912 | ((u64)tmp_mb) << 20L, |
| 1904 | (u32)RIR_RNK_TGT(reg), | 1913 | (u32)RIR_RNK_TGT(pvt->info.type, reg), |
| 1905 | reg); | 1914 | reg); |
| 1906 | } | 1915 | } |
| 1907 | } | 1916 | } |
| @@ -2234,7 +2243,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci, | |||
| 2234 | pci_read_config_dword(pvt->pci_tad[ch_add + base_ch], | 2243 | pci_read_config_dword(pvt->pci_tad[ch_add + base_ch], |
| 2235 | rir_offset[n_rir][idx], | 2244 | rir_offset[n_rir][idx], |
| 2236 | ®); | 2245 | ®); |
| 2237 | *rank = RIR_RNK_TGT(reg); | 2246 | *rank = RIR_RNK_TGT(pvt->info.type, reg); |
| 2238 | 2247 | ||
| 2239 | edac_dbg(0, "RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n", | 2248 | edac_dbg(0, "RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n", |
| 2240 | n_rir, | 2249 | n_rir, |
| @@ -3357,12 +3366,12 @@ fail0: | |||
| 3357 | #define ICPU(model, table) \ | 3366 | #define ICPU(model, table) \ |
| 3358 | { X86_VENDOR_INTEL, 6, model, 0, (unsigned long)&table } | 3367 | { X86_VENDOR_INTEL, 6, model, 0, (unsigned long)&table } |
| 3359 | 3368 | ||
| 3360 | /* Order here must match "enum type" */ | ||
| 3361 | static const struct x86_cpu_id sbridge_cpuids[] = { | 3369 | static const struct x86_cpu_id sbridge_cpuids[] = { |
| 3362 | ICPU(0x2d, pci_dev_descr_sbridge_table), /* SANDY_BRIDGE */ | 3370 | ICPU(0x2d, pci_dev_descr_sbridge_table), /* SANDY_BRIDGE */ |
| 3363 | ICPU(0x3e, pci_dev_descr_ibridge_table), /* IVY_BRIDGE */ | 3371 | ICPU(0x3e, pci_dev_descr_ibridge_table), /* IVY_BRIDGE */ |
| 3364 | ICPU(0x3f, pci_dev_descr_haswell_table), /* HASWELL */ | 3372 | ICPU(0x3f, pci_dev_descr_haswell_table), /* HASWELL */ |
| 3365 | ICPU(0x4f, pci_dev_descr_broadwell_table), /* BROADWELL */ | 3373 | ICPU(0x4f, pci_dev_descr_broadwell_table), /* BROADWELL */ |
| 3374 | ICPU(0x56, pci_dev_descr_broadwell_table), /* BROADWELL-DE */ | ||
| 3366 | ICPU(0x57, pci_dev_descr_knl_table), /* KNIGHTS_LANDING */ | 3375 | ICPU(0x57, pci_dev_descr_knl_table), /* KNIGHTS_LANDING */ |
| 3367 | { } | 3376 | { } |
| 3368 | }; | 3377 | }; |
| @@ -3398,7 +3407,7 @@ static int sbridge_probe(const struct x86_cpu_id *id) | |||
| 3398 | mc, mc + 1, num_mc); | 3407 | mc, mc + 1, num_mc); |
| 3399 | 3408 | ||
| 3400 | sbridge_dev->mc = mc++; | 3409 | sbridge_dev->mc = mc++; |
| 3401 | rc = sbridge_register_mci(sbridge_dev, id - sbridge_cpuids); | 3410 | rc = sbridge_register_mci(sbridge_dev, ptable->type); |
| 3402 | if (unlikely(rc < 0)) | 3411 | if (unlikely(rc < 0)) |
| 3403 | goto fail1; | 3412 | goto fail1; |
| 3404 | } | 3413 | } |
diff --git a/drivers/extcon/extcon-palmas.c b/drivers/extcon/extcon-palmas.c index 8b3226dca1d9..caff46c0e214 100644 --- a/drivers/extcon/extcon-palmas.c +++ b/drivers/extcon/extcon-palmas.c | |||
| @@ -360,6 +360,8 @@ static int palmas_usb_probe(struct platform_device *pdev) | |||
| 360 | 360 | ||
| 361 | palmas_enable_irq(palmas_usb); | 361 | palmas_enable_irq(palmas_usb); |
| 362 | /* perform initial detection */ | 362 | /* perform initial detection */ |
| 363 | if (palmas_usb->enable_gpio_vbus_detection) | ||
| 364 | palmas_vbus_irq_handler(palmas_usb->gpio_vbus_irq, palmas_usb); | ||
| 363 | palmas_gpio_id_detect(&palmas_usb->wq_detectid.work); | 365 | palmas_gpio_id_detect(&palmas_usb->wq_detectid.work); |
| 364 | device_set_wakeup_capable(&pdev->dev, true); | 366 | device_set_wakeup_capable(&pdev->dev, true); |
| 365 | return 0; | 367 | return 0; |
diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c index a850cbc48d8d..c49d50e68aee 100644 --- a/drivers/firmware/efi/arm-init.c +++ b/drivers/firmware/efi/arm-init.c | |||
| @@ -174,6 +174,7 @@ static __init void reserve_regions(void) | |||
| 174 | { | 174 | { |
| 175 | efi_memory_desc_t *md; | 175 | efi_memory_desc_t *md; |
| 176 | u64 paddr, npages, size; | 176 | u64 paddr, npages, size; |
| 177 | int resv; | ||
| 177 | 178 | ||
| 178 | if (efi_enabled(EFI_DBG)) | 179 | if (efi_enabled(EFI_DBG)) |
| 179 | pr_info("Processing EFI memory map:\n"); | 180 | pr_info("Processing EFI memory map:\n"); |
| @@ -190,12 +191,14 @@ static __init void reserve_regions(void) | |||
| 190 | paddr = md->phys_addr; | 191 | paddr = md->phys_addr; |
| 191 | npages = md->num_pages; | 192 | npages = md->num_pages; |
| 192 | 193 | ||
| 194 | resv = is_reserve_region(md); | ||
| 193 | if (efi_enabled(EFI_DBG)) { | 195 | if (efi_enabled(EFI_DBG)) { |
| 194 | char buf[64]; | 196 | char buf[64]; |
| 195 | 197 | ||
| 196 | pr_info(" 0x%012llx-0x%012llx %s", | 198 | pr_info(" 0x%012llx-0x%012llx %s%s\n", |
| 197 | paddr, paddr + (npages << EFI_PAGE_SHIFT) - 1, | 199 | paddr, paddr + (npages << EFI_PAGE_SHIFT) - 1, |
| 198 | efi_md_typeattr_format(buf, sizeof(buf), md)); | 200 | efi_md_typeattr_format(buf, sizeof(buf), md), |
| 201 | resv ? "*" : ""); | ||
| 199 | } | 202 | } |
| 200 | 203 | ||
| 201 | memrange_efi_to_native(&paddr, &npages); | 204 | memrange_efi_to_native(&paddr, &npages); |
| @@ -204,14 +207,9 @@ static __init void reserve_regions(void) | |||
| 204 | if (is_normal_ram(md)) | 207 | if (is_normal_ram(md)) |
| 205 | early_init_dt_add_memory_arch(paddr, size); | 208 | early_init_dt_add_memory_arch(paddr, size); |
| 206 | 209 | ||
| 207 | if (is_reserve_region(md)) { | 210 | if (resv) |
| 208 | memblock_mark_nomap(paddr, size); | 211 | memblock_mark_nomap(paddr, size); |
| 209 | if (efi_enabled(EFI_DBG)) | ||
| 210 | pr_cont("*"); | ||
| 211 | } | ||
| 212 | 212 | ||
| 213 | if (efi_enabled(EFI_DBG)) | ||
| 214 | pr_cont("\n"); | ||
| 215 | } | 213 | } |
| 216 | 214 | ||
| 217 | set_bit(EFI_MEMMAP, &efi.flags); | 215 | set_bit(EFI_MEMMAP, &efi.flags); |
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index 48da857f4774..cebcb405812e 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig | |||
| @@ -33,6 +33,7 @@ config ARCH_REQUIRE_GPIOLIB | |||
| 33 | 33 | ||
| 34 | menuconfig GPIOLIB | 34 | menuconfig GPIOLIB |
| 35 | bool "GPIO Support" | 35 | bool "GPIO Support" |
| 36 | select ANON_INODES | ||
| 36 | help | 37 | help |
| 37 | This enables GPIO support through the generic GPIO library. | 38 | This enables GPIO support through the generic GPIO library. |
| 38 | You only need to enable this, if you also want to enable | 39 | You only need to enable this, if you also want to enable |
| @@ -530,7 +531,7 @@ menu "Port-mapped I/O GPIO drivers" | |||
| 530 | 531 | ||
| 531 | config GPIO_104_DIO_48E | 532 | config GPIO_104_DIO_48E |
| 532 | tristate "ACCES 104-DIO-48E GPIO support" | 533 | tristate "ACCES 104-DIO-48E GPIO support" |
| 533 | depends on ISA | 534 | depends on ISA_BUS_API |
| 534 | select GPIOLIB_IRQCHIP | 535 | select GPIOLIB_IRQCHIP |
| 535 | help | 536 | help |
| 536 | Enables GPIO support for the ACCES 104-DIO-48E series (104-DIO-48E, | 537 | Enables GPIO support for the ACCES 104-DIO-48E series (104-DIO-48E, |
| @@ -540,7 +541,7 @@ config GPIO_104_DIO_48E | |||
| 540 | 541 | ||
| 541 | config GPIO_104_IDIO_16 | 542 | config GPIO_104_IDIO_16 |
| 542 | tristate "ACCES 104-IDIO-16 GPIO support" | 543 | tristate "ACCES 104-IDIO-16 GPIO support" |
| 543 | depends on ISA | 544 | depends on ISA_BUS_API |
| 544 | select GPIOLIB_IRQCHIP | 545 | select GPIOLIB_IRQCHIP |
| 545 | help | 546 | help |
| 546 | Enables GPIO support for the ACCES 104-IDIO-16 family (104-IDIO-16, | 547 | Enables GPIO support for the ACCES 104-IDIO-16 family (104-IDIO-16, |
| @@ -551,7 +552,7 @@ config GPIO_104_IDIO_16 | |||
| 551 | 552 | ||
| 552 | config GPIO_104_IDI_48 | 553 | config GPIO_104_IDI_48 |
| 553 | tristate "ACCES 104-IDI-48 GPIO support" | 554 | tristate "ACCES 104-IDI-48 GPIO support" |
| 554 | depends on ISA | 555 | depends on ISA_BUS_API |
| 555 | select GPIOLIB_IRQCHIP | 556 | select GPIOLIB_IRQCHIP |
| 556 | help | 557 | help |
| 557 | Enables GPIO support for the ACCES 104-IDI-48 family (104-IDI-48A, | 558 | Enables GPIO support for the ACCES 104-IDI-48 family (104-IDI-48A, |
| @@ -627,7 +628,7 @@ config GPIO_TS5500 | |||
| 627 | 628 | ||
| 628 | config GPIO_WS16C48 | 629 | config GPIO_WS16C48 |
| 629 | tristate "WinSystems WS16C48 GPIO support" | 630 | tristate "WinSystems WS16C48 GPIO support" |
| 630 | depends on ISA | 631 | depends on ISA_BUS_API |
| 631 | select GPIOLIB_IRQCHIP | 632 | select GPIOLIB_IRQCHIP |
| 632 | help | 633 | help |
| 633 | Enables GPIO support for the WinSystems WS16C48. The base port | 634 | Enables GPIO support for the WinSystems WS16C48. The base port |
diff --git a/drivers/gpio/gpio-104-dio-48e.c b/drivers/gpio/gpio-104-dio-48e.c index 1a647c07be67..fcf776971ca9 100644 --- a/drivers/gpio/gpio-104-dio-48e.c +++ b/drivers/gpio/gpio-104-dio-48e.c | |||
| @@ -75,7 +75,7 @@ static int dio48e_gpio_direction_input(struct gpio_chip *chip, unsigned offset) | |||
| 75 | { | 75 | { |
| 76 | struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip); | 76 | struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip); |
| 77 | const unsigned io_port = offset / 8; | 77 | const unsigned io_port = offset / 8; |
| 78 | const unsigned control_port = io_port / 2; | 78 | const unsigned int control_port = io_port / 3; |
| 79 | const unsigned control_addr = dio48egpio->base + 3 + control_port*4; | 79 | const unsigned control_addr = dio48egpio->base + 3 + control_port*4; |
| 80 | unsigned long flags; | 80 | unsigned long flags; |
| 81 | unsigned control; | 81 | unsigned control; |
| @@ -115,7 +115,7 @@ static int dio48e_gpio_direction_output(struct gpio_chip *chip, unsigned offset, | |||
| 115 | { | 115 | { |
| 116 | struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip); | 116 | struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip); |
| 117 | const unsigned io_port = offset / 8; | 117 | const unsigned io_port = offset / 8; |
| 118 | const unsigned control_port = io_port / 2; | 118 | const unsigned int control_port = io_port / 3; |
| 119 | const unsigned mask = BIT(offset % 8); | 119 | const unsigned mask = BIT(offset % 8); |
| 120 | const unsigned control_addr = dio48egpio->base + 3 + control_port*4; | 120 | const unsigned control_addr = dio48egpio->base + 3 + control_port*4; |
| 121 | const unsigned out_port = (io_port > 2) ? io_port + 1 : io_port; | 121 | const unsigned out_port = (io_port > 2) ? io_port + 1 : io_port; |
diff --git a/drivers/gpio/gpio-104-idi-48.c b/drivers/gpio/gpio-104-idi-48.c index 6c75c83baf5a..2d2763ea1a68 100644 --- a/drivers/gpio/gpio-104-idi-48.c +++ b/drivers/gpio/gpio-104-idi-48.c | |||
| @@ -247,6 +247,7 @@ static int idi_48_probe(struct device *dev, unsigned int id) | |||
| 247 | idi48gpio->irq = irq[id]; | 247 | idi48gpio->irq = irq[id]; |
| 248 | 248 | ||
| 249 | spin_lock_init(&idi48gpio->lock); | 249 | spin_lock_init(&idi48gpio->lock); |
| 250 | spin_lock_init(&idi48gpio->ack_lock); | ||
| 250 | 251 | ||
| 251 | dev_set_drvdata(dev, idi48gpio); | 252 | dev_set_drvdata(dev, idi48gpio); |
| 252 | 253 | ||
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c index 9aabc48ff5de..953e4b829e32 100644 --- a/drivers/gpio/gpio-bcm-kona.c +++ b/drivers/gpio/gpio-bcm-kona.c | |||
| @@ -547,11 +547,11 @@ static void bcm_kona_gpio_reset(struct bcm_kona_gpio *kona_gpio) | |||
| 547 | /* disable interrupts and clear status */ | 547 | /* disable interrupts and clear status */ |
| 548 | for (i = 0; i < kona_gpio->num_bank; i++) { | 548 | for (i = 0; i < kona_gpio->num_bank; i++) { |
| 549 | /* Unlock the entire bank first */ | 549 | /* Unlock the entire bank first */ |
| 550 | bcm_kona_gpio_write_lock_regs(kona_gpio, i, UNLOCK_CODE); | 550 | bcm_kona_gpio_write_lock_regs(reg_base, i, UNLOCK_CODE); |
| 551 | writel(0xffffffff, reg_base + GPIO_INT_MASK(i)); | 551 | writel(0xffffffff, reg_base + GPIO_INT_MASK(i)); |
| 552 | writel(0xffffffff, reg_base + GPIO_INT_STATUS(i)); | 552 | writel(0xffffffff, reg_base + GPIO_INT_STATUS(i)); |
| 553 | /* Now re-lock the bank */ | 553 | /* Now re-lock the bank */ |
| 554 | bcm_kona_gpio_write_lock_regs(kona_gpio, i, LOCK_CODE); | 554 | bcm_kona_gpio_write_lock_regs(reg_base, i, LOCK_CODE); |
| 555 | } | 555 | } |
| 556 | } | 556 | } |
| 557 | 557 | ||
diff --git a/drivers/gpio/gpio-lpc32xx.c b/drivers/gpio/gpio-lpc32xx.c index d39014daeef9..fc5f197906ac 100644 --- a/drivers/gpio/gpio-lpc32xx.c +++ b/drivers/gpio/gpio-lpc32xx.c | |||
| @@ -29,7 +29,6 @@ | |||
| 29 | 29 | ||
| 30 | #include <mach/hardware.h> | 30 | #include <mach/hardware.h> |
| 31 | #include <mach/platform.h> | 31 | #include <mach/platform.h> |
| 32 | #include <mach/irqs.h> | ||
| 33 | 32 | ||
| 34 | #define LPC32XX_GPIO_P3_INP_STATE _GPREG(0x000) | 33 | #define LPC32XX_GPIO_P3_INP_STATE _GPREG(0x000) |
| 35 | #define LPC32XX_GPIO_P3_OUTP_SET _GPREG(0x004) | 34 | #define LPC32XX_GPIO_P3_OUTP_SET _GPREG(0x004) |
| @@ -371,61 +370,16 @@ static int lpc32xx_gpio_request(struct gpio_chip *chip, unsigned pin) | |||
| 371 | 370 | ||
| 372 | static int lpc32xx_gpio_to_irq_p01(struct gpio_chip *chip, unsigned offset) | 371 | static int lpc32xx_gpio_to_irq_p01(struct gpio_chip *chip, unsigned offset) |
| 373 | { | 372 | { |
| 374 | return IRQ_LPC32XX_P0_P1_IRQ; | 373 | return -ENXIO; |
| 375 | } | 374 | } |
| 376 | 375 | ||
| 377 | static const char lpc32xx_gpio_to_irq_gpio_p3_table[] = { | ||
| 378 | IRQ_LPC32XX_GPIO_00, | ||
| 379 | IRQ_LPC32XX_GPIO_01, | ||
| 380 | IRQ_LPC32XX_GPIO_02, | ||
| 381 | IRQ_LPC32XX_GPIO_03, | ||
| 382 | IRQ_LPC32XX_GPIO_04, | ||
| 383 | IRQ_LPC32XX_GPIO_05, | ||
| 384 | }; | ||
| 385 | |||
| 386 | static int lpc32xx_gpio_to_irq_gpio_p3(struct gpio_chip *chip, unsigned offset) | 376 | static int lpc32xx_gpio_to_irq_gpio_p3(struct gpio_chip *chip, unsigned offset) |
| 387 | { | 377 | { |
| 388 | if (offset < ARRAY_SIZE(lpc32xx_gpio_to_irq_gpio_p3_table)) | ||
| 389 | return lpc32xx_gpio_to_irq_gpio_p3_table[offset]; | ||
| 390 | return -ENXIO; | 378 | return -ENXIO; |
| 391 | } | 379 | } |
| 392 | 380 | ||
| 393 | static const char lpc32xx_gpio_to_irq_gpi_p3_table[] = { | ||
| 394 | IRQ_LPC32XX_GPI_00, | ||
| 395 | IRQ_LPC32XX_GPI_01, | ||
| 396 | IRQ_LPC32XX_GPI_02, | ||
| 397 | IRQ_LPC32XX_GPI_03, | ||
| 398 | IRQ_LPC32XX_GPI_04, | ||
| 399 | IRQ_LPC32XX_GPI_05, | ||
| 400 | IRQ_LPC32XX_GPI_06, | ||
| 401 | IRQ_LPC32XX_GPI_07, | ||
| 402 | IRQ_LPC32XX_GPI_08, | ||
| 403 | IRQ_LPC32XX_GPI_09, | ||
| 404 | -ENXIO, /* 10 */ | ||
| 405 | -ENXIO, /* 11 */ | ||
| 406 | -ENXIO, /* 12 */ | ||
| 407 | -ENXIO, /* 13 */ | ||
| 408 | -ENXIO, /* 14 */ | ||
| 409 | -ENXIO, /* 15 */ | ||
| 410 | -ENXIO, /* 16 */ | ||
| 411 | -ENXIO, /* 17 */ | ||
| 412 | -ENXIO, /* 18 */ | ||
| 413 | IRQ_LPC32XX_GPI_19, | ||
| 414 | -ENXIO, /* 20 */ | ||
| 415 | -ENXIO, /* 21 */ | ||
| 416 | -ENXIO, /* 22 */ | ||
| 417 | -ENXIO, /* 23 */ | ||
| 418 | -ENXIO, /* 24 */ | ||
| 419 | -ENXIO, /* 25 */ | ||
| 420 | -ENXIO, /* 26 */ | ||
| 421 | -ENXIO, /* 27 */ | ||
| 422 | IRQ_LPC32XX_GPI_28, | ||
| 423 | }; | ||
| 424 | |||
| 425 | static int lpc32xx_gpio_to_irq_gpi_p3(struct gpio_chip *chip, unsigned offset) | 381 | static int lpc32xx_gpio_to_irq_gpi_p3(struct gpio_chip *chip, unsigned offset) |
| 426 | { | 382 | { |
| 427 | if (offset < ARRAY_SIZE(lpc32xx_gpio_to_irq_gpi_p3_table)) | ||
| 428 | return lpc32xx_gpio_to_irq_gpi_p3_table[offset]; | ||
| 429 | return -ENXIO; | 383 | return -ENXIO; |
| 430 | } | 384 | } |
| 431 | 385 | ||
diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c index 75c6355b018d..e72794e463aa 100644 --- a/drivers/gpio/gpio-zynq.c +++ b/drivers/gpio/gpio-zynq.c | |||
| @@ -709,7 +709,13 @@ static int zynq_gpio_probe(struct platform_device *pdev) | |||
| 709 | dev_err(&pdev->dev, "input clock not found.\n"); | 709 | dev_err(&pdev->dev, "input clock not found.\n"); |
| 710 | return PTR_ERR(gpio->clk); | 710 | return PTR_ERR(gpio->clk); |
| 711 | } | 711 | } |
| 712 | ret = clk_prepare_enable(gpio->clk); | ||
| 713 | if (ret) { | ||
| 714 | dev_err(&pdev->dev, "Unable to enable clock.\n"); | ||
| 715 | return ret; | ||
| 716 | } | ||
| 712 | 717 | ||
| 718 | pm_runtime_set_active(&pdev->dev); | ||
| 713 | pm_runtime_enable(&pdev->dev); | 719 | pm_runtime_enable(&pdev->dev); |
| 714 | ret = pm_runtime_get_sync(&pdev->dev); | 720 | ret = pm_runtime_get_sync(&pdev->dev); |
| 715 | if (ret < 0) | 721 | if (ret < 0) |
| @@ -747,6 +753,7 @@ err_pm_put: | |||
| 747 | pm_runtime_put(&pdev->dev); | 753 | pm_runtime_put(&pdev->dev); |
| 748 | err_pm_dis: | 754 | err_pm_dis: |
| 749 | pm_runtime_disable(&pdev->dev); | 755 | pm_runtime_disable(&pdev->dev); |
| 756 | clk_disable_unprepare(gpio->clk); | ||
| 750 | 757 | ||
| 751 | return ret; | 758 | return ret; |
| 752 | } | 759 | } |
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index d22dcc38179d..4aabddb38b59 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c | |||
| @@ -16,6 +16,7 @@ | |||
| 16 | #include <linux/errno.h> | 16 | #include <linux/errno.h> |
| 17 | #include <linux/module.h> | 17 | #include <linux/module.h> |
| 18 | #include <linux/io.h> | 18 | #include <linux/io.h> |
| 19 | #include <linux/io-mapping.h> | ||
| 19 | #include <linux/gpio/consumer.h> | 20 | #include <linux/gpio/consumer.h> |
| 20 | #include <linux/of.h> | 21 | #include <linux/of.h> |
| 21 | #include <linux/of_address.h> | 22 | #include <linux/of_address.h> |
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index d407f904a31c..570771ed19e6 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c | |||
| @@ -20,6 +20,7 @@ | |||
| 20 | #include <linux/cdev.h> | 20 | #include <linux/cdev.h> |
| 21 | #include <linux/fs.h> | 21 | #include <linux/fs.h> |
| 22 | #include <linux/uaccess.h> | 22 | #include <linux/uaccess.h> |
| 23 | #include <linux/compat.h> | ||
| 23 | #include <uapi/linux/gpio.h> | 24 | #include <uapi/linux/gpio.h> |
| 24 | 25 | ||
| 25 | #include "gpiolib.h" | 26 | #include "gpiolib.h" |
| @@ -316,7 +317,7 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | |||
| 316 | { | 317 | { |
| 317 | struct gpio_device *gdev = filp->private_data; | 318 | struct gpio_device *gdev = filp->private_data; |
| 318 | struct gpio_chip *chip = gdev->chip; | 319 | struct gpio_chip *chip = gdev->chip; |
| 319 | int __user *ip = (int __user *)arg; | 320 | void __user *ip = (void __user *)arg; |
| 320 | 321 | ||
| 321 | /* We fail any subsequent ioctl():s when the chip is gone */ | 322 | /* We fail any subsequent ioctl():s when the chip is gone */ |
| 322 | if (!chip) | 323 | if (!chip) |
| @@ -388,6 +389,14 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | |||
| 388 | return -EINVAL; | 389 | return -EINVAL; |
| 389 | } | 390 | } |
| 390 | 391 | ||
| 392 | #ifdef CONFIG_COMPAT | ||
| 393 | static long gpio_ioctl_compat(struct file *filp, unsigned int cmd, | ||
| 394 | unsigned long arg) | ||
| 395 | { | ||
| 396 | return gpio_ioctl(filp, cmd, (unsigned long)compat_ptr(arg)); | ||
| 397 | } | ||
| 398 | #endif | ||
| 399 | |||
| 391 | /** | 400 | /** |
| 392 | * gpio_chrdev_open() - open the chardev for ioctl operations | 401 | * gpio_chrdev_open() - open the chardev for ioctl operations |
| 393 | * @inode: inode for this chardev | 402 | * @inode: inode for this chardev |
| @@ -431,14 +440,15 @@ static const struct file_operations gpio_fileops = { | |||
| 431 | .owner = THIS_MODULE, | 440 | .owner = THIS_MODULE, |
| 432 | .llseek = noop_llseek, | 441 | .llseek = noop_llseek, |
| 433 | .unlocked_ioctl = gpio_ioctl, | 442 | .unlocked_ioctl = gpio_ioctl, |
| 434 | .compat_ioctl = gpio_ioctl, | 443 | #ifdef CONFIG_COMPAT |
| 444 | .compat_ioctl = gpio_ioctl_compat, | ||
| 445 | #endif | ||
| 435 | }; | 446 | }; |
| 436 | 447 | ||
| 437 | static void gpiodevice_release(struct device *dev) | 448 | static void gpiodevice_release(struct device *dev) |
| 438 | { | 449 | { |
| 439 | struct gpio_device *gdev = dev_get_drvdata(dev); | 450 | struct gpio_device *gdev = dev_get_drvdata(dev); |
| 440 | 451 | ||
| 441 | cdev_del(&gdev->chrdev); | ||
| 442 | list_del(&gdev->list); | 452 | list_del(&gdev->list); |
| 443 | ida_simple_remove(&gpio_ida, gdev->id); | 453 | ida_simple_remove(&gpio_ida, gdev->id); |
| 444 | kfree(gdev->label); | 454 | kfree(gdev->label); |
| @@ -471,7 +481,6 @@ static int gpiochip_setup_dev(struct gpio_device *gdev) | |||
| 471 | 481 | ||
| 472 | /* From this point, the .release() function cleans up gpio_device */ | 482 | /* From this point, the .release() function cleans up gpio_device */ |
| 473 | gdev->dev.release = gpiodevice_release; | 483 | gdev->dev.release = gpiodevice_release; |
| 474 | get_device(&gdev->dev); | ||
| 475 | pr_debug("%s: registered GPIOs %d to %d on device: %s (%s)\n", | 484 | pr_debug("%s: registered GPIOs %d to %d on device: %s (%s)\n", |
| 476 | __func__, gdev->base, gdev->base + gdev->ngpio - 1, | 485 | __func__, gdev->base, gdev->base + gdev->ngpio - 1, |
| 477 | dev_name(&gdev->dev), gdev->chip->label ? : "generic"); | 486 | dev_name(&gdev->dev), gdev->chip->label ? : "generic"); |
| @@ -618,6 +627,8 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data) | |||
| 618 | goto err_free_label; | 627 | goto err_free_label; |
| 619 | } | 628 | } |
| 620 | 629 | ||
| 630 | spin_unlock_irqrestore(&gpio_lock, flags); | ||
| 631 | |||
| 621 | for (i = 0; i < chip->ngpio; i++) { | 632 | for (i = 0; i < chip->ngpio; i++) { |
| 622 | struct gpio_desc *desc = &gdev->descs[i]; | 633 | struct gpio_desc *desc = &gdev->descs[i]; |
| 623 | 634 | ||
| @@ -649,8 +660,6 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data) | |||
| 649 | } | 660 | } |
| 650 | } | 661 | } |
| 651 | 662 | ||
| 652 | spin_unlock_irqrestore(&gpio_lock, flags); | ||
| 653 | |||
| 654 | #ifdef CONFIG_PINCTRL | 663 | #ifdef CONFIG_PINCTRL |
| 655 | INIT_LIST_HEAD(&gdev->pin_ranges); | 664 | INIT_LIST_HEAD(&gdev->pin_ranges); |
| 656 | #endif | 665 | #endif |
| @@ -759,6 +768,8 @@ void gpiochip_remove(struct gpio_chip *chip) | |||
| 759 | * be removed, else it will be dangling until the last user is | 768 | * be removed, else it will be dangling until the last user is |
| 760 | * gone. | 769 | * gone. |
| 761 | */ | 770 | */ |
| 771 | cdev_del(&gdev->chrdev); | ||
| 772 | device_del(&gdev->dev); | ||
| 762 | put_device(&gdev->dev); | 773 | put_device(&gdev->dev); |
| 763 | } | 774 | } |
| 764 | EXPORT_SYMBOL_GPL(gpiochip_remove); | 775 | EXPORT_SYMBOL_GPL(gpiochip_remove); |
| @@ -858,7 +869,7 @@ struct gpio_chip *gpiochip_find(void *data, | |||
| 858 | 869 | ||
| 859 | spin_lock_irqsave(&gpio_lock, flags); | 870 | spin_lock_irqsave(&gpio_lock, flags); |
| 860 | list_for_each_entry(gdev, &gpio_devices, list) | 871 | list_for_each_entry(gdev, &gpio_devices, list) |
| 861 | if (match(gdev->chip, data)) | 872 | if (gdev->chip && match(gdev->chip, data)) |
| 862 | break; | 873 | break; |
| 863 | 874 | ||
| 864 | /* No match? */ | 875 | /* No match? */ |
| @@ -1356,11 +1367,18 @@ done: | |||
| 1356 | /* | 1367 | /* |
| 1357 | * This descriptor validation needs to be inserted verbatim into each | 1368 | * This descriptor validation needs to be inserted verbatim into each |
| 1358 | * function taking a descriptor, so we need to use a preprocessor | 1369 | * function taking a descriptor, so we need to use a preprocessor |
| 1359 | * macro to avoid endless duplication. | 1370 | * macro to avoid endless duplication. If the desc is NULL it is an |
| 1371 | * optional GPIO and calls should just bail out. | ||
| 1360 | */ | 1372 | */ |
| 1361 | #define VALIDATE_DESC(desc) do { \ | 1373 | #define VALIDATE_DESC(desc) do { \ |
| 1362 | if (!desc || !desc->gdev) { \ | 1374 | if (!desc) \ |
| 1363 | pr_warn("%s: invalid GPIO\n", __func__); \ | 1375 | return 0; \ |
| 1376 | if (IS_ERR(desc)) { \ | ||
| 1377 | pr_warn("%s: invalid GPIO (errorpointer)\n", __func__); \ | ||
| 1378 | return PTR_ERR(desc); \ | ||
| 1379 | } \ | ||
| 1380 | if (!desc->gdev) { \ | ||
| 1381 | pr_warn("%s: invalid GPIO (no device)\n", __func__); \ | ||
| 1364 | return -EINVAL; \ | 1382 | return -EINVAL; \ |
| 1365 | } \ | 1383 | } \ |
| 1366 | if ( !desc->gdev->chip ) { \ | 1384 | if ( !desc->gdev->chip ) { \ |
| @@ -1370,8 +1388,14 @@ done: | |||
| 1370 | } } while (0) | 1388 | } } while (0) |
| 1371 | 1389 | ||
| 1372 | #define VALIDATE_DESC_VOID(desc) do { \ | 1390 | #define VALIDATE_DESC_VOID(desc) do { \ |
| 1373 | if (!desc || !desc->gdev) { \ | 1391 | if (!desc) \ |
| 1374 | pr_warn("%s: invalid GPIO\n", __func__); \ | 1392 | return; \ |
| 1393 | if (IS_ERR(desc)) { \ | ||
| 1394 | pr_warn("%s: invalid GPIO (errorpointer)\n", __func__); \ | ||
| 1395 | return; \ | ||
| 1396 | } \ | ||
| 1397 | if (!desc->gdev) { \ | ||
| 1398 | pr_warn("%s: invalid GPIO (no device)\n", __func__); \ | ||
| 1375 | return; \ | 1399 | return; \ |
| 1376 | } \ | 1400 | } \ |
| 1377 | if (!desc->gdev->chip) { \ | 1401 | if (!desc->gdev->chip) { \ |
| @@ -2040,7 +2064,14 @@ int gpiod_to_irq(const struct gpio_desc *desc) | |||
| 2040 | struct gpio_chip *chip; | 2064 | struct gpio_chip *chip; |
| 2041 | int offset; | 2065 | int offset; |
| 2042 | 2066 | ||
| 2043 | VALIDATE_DESC(desc); | 2067 | /* |
| 2068 | * Cannot VALIDATE_DESC() here as gpiod_to_irq() consumer semantics | ||
| 2069 | * requires this function to not return zero on an invalid descriptor | ||
| 2070 | * but rather a negative error number. | ||
| 2071 | */ | ||
| 2072 | if (!desc || IS_ERR(desc) || !desc->gdev || !desc->gdev->chip) | ||
| 2073 | return -EINVAL; | ||
| 2074 | |||
| 2044 | chip = desc->gdev->chip; | 2075 | chip = desc->gdev->chip; |
| 2045 | offset = gpio_chip_hwgpio(desc); | 2076 | offset = gpio_chip_hwgpio(desc); |
| 2046 | if (chip->to_irq) { | 2077 | if (chip->to_irq) { |
| @@ -2066,17 +2097,30 @@ EXPORT_SYMBOL_GPL(gpiod_to_irq); | |||
| 2066 | */ | 2097 | */ |
| 2067 | int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset) | 2098 | int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset) |
| 2068 | { | 2099 | { |
| 2069 | if (offset >= chip->ngpio) | 2100 | struct gpio_desc *desc; |
| 2070 | return -EINVAL; | 2101 | |
| 2102 | desc = gpiochip_get_desc(chip, offset); | ||
| 2103 | if (IS_ERR(desc)) | ||
| 2104 | return PTR_ERR(desc); | ||
| 2105 | |||
| 2106 | /* Flush direction if something changed behind our back */ | ||
| 2107 | if (chip->get_direction) { | ||
| 2108 | int dir = chip->get_direction(chip, offset); | ||
| 2109 | |||
| 2110 | if (dir) | ||
| 2111 | clear_bit(FLAG_IS_OUT, &desc->flags); | ||
| 2112 | else | ||
| 2113 | set_bit(FLAG_IS_OUT, &desc->flags); | ||
| 2114 | } | ||
| 2071 | 2115 | ||
| 2072 | if (test_bit(FLAG_IS_OUT, &chip->gpiodev->descs[offset].flags)) { | 2116 | if (test_bit(FLAG_IS_OUT, &desc->flags)) { |
| 2073 | chip_err(chip, | 2117 | chip_err(chip, |
| 2074 | "%s: tried to flag a GPIO set as output for IRQ\n", | 2118 | "%s: tried to flag a GPIO set as output for IRQ\n", |
| 2075 | __func__); | 2119 | __func__); |
| 2076 | return -EIO; | 2120 | return -EIO; |
| 2077 | } | 2121 | } |
| 2078 | 2122 | ||
| 2079 | set_bit(FLAG_USED_AS_IRQ, &chip->gpiodev->descs[offset].flags); | 2123 | set_bit(FLAG_USED_AS_IRQ, &desc->flags); |
| 2080 | return 0; | 2124 | return 0; |
| 2081 | } | 2125 | } |
| 2082 | EXPORT_SYMBOL_GPL(gpiochip_lock_as_irq); | 2126 | EXPORT_SYMBOL_GPL(gpiochip_lock_as_irq); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 992f00b65be4..e055d5be1c3c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h | |||
| @@ -1820,6 +1820,8 @@ struct amdgpu_asic_funcs { | |||
| 1820 | /* MM block clocks */ | 1820 | /* MM block clocks */ |
| 1821 | int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk); | 1821 | int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk); |
| 1822 | int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk); | 1822 | int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk); |
| 1823 | /* query virtual capabilities */ | ||
| 1824 | u32 (*get_virtual_caps)(struct amdgpu_device *adev); | ||
| 1823 | }; | 1825 | }; |
| 1824 | 1826 | ||
| 1825 | /* | 1827 | /* |
| @@ -1914,8 +1916,12 @@ void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device); | |||
| 1914 | 1916 | ||
| 1915 | 1917 | ||
| 1916 | /* GPU virtualization */ | 1918 | /* GPU virtualization */ |
| 1919 | #define AMDGPU_VIRT_CAPS_SRIOV_EN (1 << 0) | ||
| 1920 | #define AMDGPU_VIRT_CAPS_IS_VF (1 << 1) | ||
| 1917 | struct amdgpu_virtualization { | 1921 | struct amdgpu_virtualization { |
| 1918 | bool supports_sr_iov; | 1922 | bool supports_sr_iov; |
| 1923 | bool is_virtual; | ||
| 1924 | u32 caps; | ||
| 1919 | }; | 1925 | }; |
| 1920 | 1926 | ||
| 1921 | /* | 1927 | /* |
| @@ -2204,6 +2210,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) | |||
| 2204 | #define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev)) | 2210 | #define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev)) |
| 2205 | #define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d)) | 2211 | #define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d)) |
| 2206 | #define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec)) | 2212 | #define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec)) |
| 2213 | #define amdgpu_asic_get_virtual_caps(adev) ((adev)->asic_funcs->get_virtual_caps((adev))) | ||
| 2207 | #define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev)) | 2214 | #define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev)) |
| 2208 | #define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev)) | 2215 | #define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev)) |
| 2209 | #define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l)) | 2216 | #define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l)) |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index 199f76baf22c..cf6f49fc1c75 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c | |||
| @@ -696,6 +696,17 @@ static uint32_t fw_type_convert(struct cgs_device *cgs_device, uint32_t fw_type) | |||
| 696 | return result; | 696 | return result; |
| 697 | } | 697 | } |
| 698 | 698 | ||
| 699 | static int amdgpu_cgs_rel_firmware(struct cgs_device *cgs_device, enum cgs_ucode_id type) | ||
| 700 | { | ||
| 701 | CGS_FUNC_ADEV; | ||
| 702 | if ((CGS_UCODE_ID_SMU == type) || (CGS_UCODE_ID_SMU_SK == type)) { | ||
| 703 | release_firmware(adev->pm.fw); | ||
| 704 | return 0; | ||
| 705 | } | ||
| 706 | /* cannot release other firmware because they are not created by cgs */ | ||
| 707 | return -EINVAL; | ||
| 708 | } | ||
| 709 | |||
| 699 | static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, | 710 | static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, |
| 700 | enum cgs_ucode_id type, | 711 | enum cgs_ucode_id type, |
| 701 | struct cgs_firmware_info *info) | 712 | struct cgs_firmware_info *info) |
| @@ -898,7 +909,7 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device, | |||
| 898 | struct cgs_acpi_method_argument *argument = NULL; | 909 | struct cgs_acpi_method_argument *argument = NULL; |
| 899 | uint32_t i, count; | 910 | uint32_t i, count; |
| 900 | acpi_status status; | 911 | acpi_status status; |
| 901 | int result; | 912 | int result = 0; |
| 902 | uint32_t func_no = 0xFFFFFFFF; | 913 | uint32_t func_no = 0xFFFFFFFF; |
| 903 | 914 | ||
| 904 | handle = ACPI_HANDLE(&adev->pdev->dev); | 915 | handle = ACPI_HANDLE(&adev->pdev->dev); |
| @@ -1125,6 +1136,7 @@ static const struct cgs_ops amdgpu_cgs_ops = { | |||
| 1125 | amdgpu_cgs_pm_query_clock_limits, | 1136 | amdgpu_cgs_pm_query_clock_limits, |
| 1126 | amdgpu_cgs_set_camera_voltages, | 1137 | amdgpu_cgs_set_camera_voltages, |
| 1127 | amdgpu_cgs_get_firmware_info, | 1138 | amdgpu_cgs_get_firmware_info, |
| 1139 | amdgpu_cgs_rel_firmware, | ||
| 1128 | amdgpu_cgs_set_powergating_state, | 1140 | amdgpu_cgs_set_powergating_state, |
| 1129 | amdgpu_cgs_set_clockgating_state, | 1141 | amdgpu_cgs_set_clockgating_state, |
| 1130 | amdgpu_cgs_get_active_displays_info, | 1142 | amdgpu_cgs_get_active_displays_info, |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index bb8b149786d7..6e920086af46 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | |||
| @@ -827,8 +827,10 @@ static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg) | |||
| 827 | */ | 827 | */ |
| 828 | static void amdgpu_atombios_fini(struct amdgpu_device *adev) | 828 | static void amdgpu_atombios_fini(struct amdgpu_device *adev) |
| 829 | { | 829 | { |
| 830 | if (adev->mode_info.atom_context) | 830 | if (adev->mode_info.atom_context) { |
| 831 | kfree(adev->mode_info.atom_context->scratch); | 831 | kfree(adev->mode_info.atom_context->scratch); |
| 832 | kfree(adev->mode_info.atom_context->iio); | ||
| 833 | } | ||
| 832 | kfree(adev->mode_info.atom_context); | 834 | kfree(adev->mode_info.atom_context); |
| 833 | adev->mode_info.atom_context = NULL; | 835 | adev->mode_info.atom_context = NULL; |
| 834 | kfree(adev->mode_info.atom_card_info); | 836 | kfree(adev->mode_info.atom_card_info); |
| @@ -1325,6 +1327,11 @@ static int amdgpu_fini(struct amdgpu_device *adev) | |||
| 1325 | adev->ip_block_status[i].valid = false; | 1327 | adev->ip_block_status[i].valid = false; |
| 1326 | } | 1328 | } |
| 1327 | 1329 | ||
| 1330 | for (i = adev->num_ip_blocks - 1; i >= 0; i--) { | ||
| 1331 | if (adev->ip_blocks[i].funcs->late_fini) | ||
| 1332 | adev->ip_blocks[i].funcs->late_fini((void *)adev); | ||
| 1333 | } | ||
| 1334 | |||
| 1328 | return 0; | 1335 | return 0; |
| 1329 | } | 1336 | } |
| 1330 | 1337 | ||
| @@ -1378,6 +1385,15 @@ static int amdgpu_resume(struct amdgpu_device *adev) | |||
| 1378 | return 0; | 1385 | return 0; |
| 1379 | } | 1386 | } |
| 1380 | 1387 | ||
| 1388 | static bool amdgpu_device_is_virtual(void) | ||
| 1389 | { | ||
| 1390 | #ifdef CONFIG_X86 | ||
| 1391 | return boot_cpu_has(X86_FEATURE_HYPERVISOR); | ||
| 1392 | #else | ||
| 1393 | return false; | ||
| 1394 | #endif | ||
| 1395 | } | ||
| 1396 | |||
| 1381 | /** | 1397 | /** |
| 1382 | * amdgpu_device_init - initialize the driver | 1398 | * amdgpu_device_init - initialize the driver |
| 1383 | * | 1399 | * |
| @@ -1512,9 +1528,14 @@ int amdgpu_device_init(struct amdgpu_device *adev, | |||
| 1512 | adev->virtualization.supports_sr_iov = | 1528 | adev->virtualization.supports_sr_iov = |
| 1513 | amdgpu_atombios_has_gpu_virtualization_table(adev); | 1529 | amdgpu_atombios_has_gpu_virtualization_table(adev); |
| 1514 | 1530 | ||
| 1531 | /* Check if we are executing in a virtualized environment */ | ||
| 1532 | adev->virtualization.is_virtual = amdgpu_device_is_virtual(); | ||
| 1533 | adev->virtualization.caps = amdgpu_asic_get_virtual_caps(adev); | ||
| 1534 | |||
| 1515 | /* Post card if necessary */ | 1535 | /* Post card if necessary */ |
| 1516 | if (!amdgpu_card_posted(adev) || | 1536 | if (!amdgpu_card_posted(adev) || |
| 1517 | adev->virtualization.supports_sr_iov) { | 1537 | (adev->virtualization.is_virtual && |
| 1538 | !(adev->virtualization.caps & AMDGPU_VIRT_CAPS_SRIOV_EN))) { | ||
| 1518 | if (!adev->bios) { | 1539 | if (!adev->bios) { |
| 1519 | dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n"); | 1540 | dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n"); |
| 1520 | return -EINVAL; | 1541 | return -EINVAL; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 40a23704a981..d851ea15059f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | |||
| @@ -447,7 +447,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file | |||
| 447 | dev_info.max_memory_clock = adev->pm.default_mclk * 10; | 447 | dev_info.max_memory_clock = adev->pm.default_mclk * 10; |
| 448 | } | 448 | } |
| 449 | dev_info.enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask; | 449 | dev_info.enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask; |
| 450 | dev_info.num_rb_pipes = adev->gfx.config.num_rbs; | 450 | dev_info.num_rb_pipes = adev->gfx.config.max_backends_per_se * |
| 451 | adev->gfx.config.max_shader_engines; | ||
| 451 | dev_info.num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts; | 452 | dev_info.num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts; |
| 452 | dev_info._pad = 0; | 453 | dev_info._pad = 0; |
| 453 | dev_info.ids_flags = 0; | 454 | dev_info.ids_flags = 0; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 589b36e8c5cf..0e13d80d2a95 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | |||
| @@ -270,30 +270,28 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev, | |||
| 270 | struct drm_device *ddev = dev_get_drvdata(dev); | 270 | struct drm_device *ddev = dev_get_drvdata(dev); |
| 271 | struct amdgpu_device *adev = ddev->dev_private; | 271 | struct amdgpu_device *adev = ddev->dev_private; |
| 272 | enum amd_pm_state_type state = 0; | 272 | enum amd_pm_state_type state = 0; |
| 273 | long idx; | 273 | unsigned long idx; |
| 274 | int ret; | 274 | int ret; |
| 275 | 275 | ||
| 276 | if (strlen(buf) == 1) | 276 | if (strlen(buf) == 1) |
| 277 | adev->pp_force_state_enabled = false; | 277 | adev->pp_force_state_enabled = false; |
| 278 | else { | 278 | else if (adev->pp_enabled) { |
| 279 | ret = kstrtol(buf, 0, &idx); | 279 | struct pp_states_info data; |
| 280 | 280 | ||
| 281 | if (ret) { | 281 | ret = kstrtoul(buf, 0, &idx); |
| 282 | if (ret || idx >= ARRAY_SIZE(data.states)) { | ||
| 282 | count = -EINVAL; | 283 | count = -EINVAL; |
| 283 | goto fail; | 284 | goto fail; |
| 284 | } | 285 | } |
| 285 | 286 | ||
| 286 | if (adev->pp_enabled) { | 287 | amdgpu_dpm_get_pp_num_states(adev, &data); |
| 287 | struct pp_states_info data; | 288 | state = data.states[idx]; |
| 288 | amdgpu_dpm_get_pp_num_states(adev, &data); | 289 | /* only set user selected power states */ |
| 289 | state = data.states[idx]; | 290 | if (state != POWER_STATE_TYPE_INTERNAL_BOOT && |
| 290 | /* only set user selected power states */ | 291 | state != POWER_STATE_TYPE_DEFAULT) { |
| 291 | if (state != POWER_STATE_TYPE_INTERNAL_BOOT && | 292 | amdgpu_dpm_dispatch_task(adev, |
| 292 | state != POWER_STATE_TYPE_DEFAULT) { | 293 | AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL); |
| 293 | amdgpu_dpm_dispatch_task(adev, | 294 | adev->pp_force_state_enabled = true; |
| 294 | AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL); | ||
| 295 | adev->pp_force_state_enabled = true; | ||
| 296 | } | ||
| 297 | } | 295 | } |
| 298 | } | 296 | } |
| 299 | fail: | 297 | fail: |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c index 6bd961fb43dc..82256558e0f5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c | |||
| @@ -183,13 +183,6 @@ static int amdgpu_pp_sw_fini(void *handle) | |||
| 183 | if (ret) | 183 | if (ret) |
| 184 | return ret; | 184 | return ret; |
| 185 | 185 | ||
| 186 | #ifdef CONFIG_DRM_AMD_POWERPLAY | ||
| 187 | if (adev->pp_enabled) { | ||
| 188 | amdgpu_pm_sysfs_fini(adev); | ||
| 189 | amd_powerplay_fini(adev->powerplay.pp_handle); | ||
| 190 | } | ||
| 191 | #endif | ||
| 192 | |||
| 193 | return ret; | 186 | return ret; |
| 194 | } | 187 | } |
| 195 | 188 | ||
| @@ -223,6 +216,22 @@ static int amdgpu_pp_hw_fini(void *handle) | |||
| 223 | return ret; | 216 | return ret; |
| 224 | } | 217 | } |
| 225 | 218 | ||
| 219 | static void amdgpu_pp_late_fini(void *handle) | ||
| 220 | { | ||
| 221 | #ifdef CONFIG_DRM_AMD_POWERPLAY | ||
| 222 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||
| 223 | |||
| 224 | if (adev->pp_enabled) { | ||
| 225 | amdgpu_pm_sysfs_fini(adev); | ||
| 226 | amd_powerplay_fini(adev->powerplay.pp_handle); | ||
| 227 | } | ||
| 228 | |||
| 229 | if (adev->powerplay.ip_funcs->late_fini) | ||
| 230 | adev->powerplay.ip_funcs->late_fini( | ||
| 231 | adev->powerplay.pp_handle); | ||
| 232 | #endif | ||
| 233 | } | ||
| 234 | |||
| 226 | static int amdgpu_pp_suspend(void *handle) | 235 | static int amdgpu_pp_suspend(void *handle) |
| 227 | { | 236 | { |
| 228 | int ret = 0; | 237 | int ret = 0; |
| @@ -311,6 +320,7 @@ const struct amd_ip_funcs amdgpu_pp_ip_funcs = { | |||
| 311 | .sw_fini = amdgpu_pp_sw_fini, | 320 | .sw_fini = amdgpu_pp_sw_fini, |
| 312 | .hw_init = amdgpu_pp_hw_init, | 321 | .hw_init = amdgpu_pp_hw_init, |
| 313 | .hw_fini = amdgpu_pp_hw_fini, | 322 | .hw_fini = amdgpu_pp_hw_fini, |
| 323 | .late_fini = amdgpu_pp_late_fini, | ||
| 314 | .suspend = amdgpu_pp_suspend, | 324 | .suspend = amdgpu_pp_suspend, |
| 315 | .resume = amdgpu_pp_resume, | 325 | .resume = amdgpu_pp_resume, |
| 316 | .is_idle = amdgpu_pp_is_idle, | 326 | .is_idle = amdgpu_pp_is_idle, |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 3b02272db678..870f9494252c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c | |||
| @@ -343,6 +343,7 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring) | |||
| 343 | ring->ring = NULL; | 343 | ring->ring = NULL; |
| 344 | ring->ring_obj = NULL; | 344 | ring->ring_obj = NULL; |
| 345 | 345 | ||
| 346 | amdgpu_wb_free(ring->adev, ring->cond_exe_offs); | ||
| 346 | amdgpu_wb_free(ring->adev, ring->fence_offs); | 347 | amdgpu_wb_free(ring->adev, ring->fence_offs); |
| 347 | amdgpu_wb_free(ring->adev, ring->rptr_offs); | 348 | amdgpu_wb_free(ring->adev, ring->rptr_offs); |
| 348 | amdgpu_wb_free(ring->adev, ring->wptr_offs); | 349 | amdgpu_wb_free(ring->adev, ring->wptr_offs); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c index 8bf84efafb04..48618ee324eb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c | |||
| @@ -115,6 +115,7 @@ int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev, | |||
| 115 | return r; | 115 | return r; |
| 116 | } | 116 | } |
| 117 | r = amdgpu_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr); | 117 | r = amdgpu_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr); |
| 118 | memset(sa_manager->cpu_ptr, 0, sa_manager->size); | ||
| 118 | amdgpu_bo_unreserve(sa_manager->bo); | 119 | amdgpu_bo_unreserve(sa_manager->bo); |
| 119 | return r; | 120 | return r; |
| 120 | } | 121 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 01abfc21b4a2..e19520c4b4b6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | |||
| @@ -253,19 +253,20 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) | |||
| 253 | { | 253 | { |
| 254 | int r; | 254 | int r; |
| 255 | 255 | ||
| 256 | if (adev->uvd.vcpu_bo == NULL) | 256 | kfree(adev->uvd.saved_bo); |
| 257 | return 0; | ||
| 258 | 257 | ||
| 259 | amd_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity); | 258 | amd_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity); |
| 260 | 259 | ||
| 261 | r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false); | 260 | if (adev->uvd.vcpu_bo) { |
| 262 | if (!r) { | 261 | r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false); |
| 263 | amdgpu_bo_kunmap(adev->uvd.vcpu_bo); | 262 | if (!r) { |
| 264 | amdgpu_bo_unpin(adev->uvd.vcpu_bo); | 263 | amdgpu_bo_kunmap(adev->uvd.vcpu_bo); |
| 265 | amdgpu_bo_unreserve(adev->uvd.vcpu_bo); | 264 | amdgpu_bo_unpin(adev->uvd.vcpu_bo); |
| 266 | } | 265 | amdgpu_bo_unreserve(adev->uvd.vcpu_bo); |
| 266 | } | ||
| 267 | 267 | ||
| 268 | amdgpu_bo_unref(&adev->uvd.vcpu_bo); | 268 | amdgpu_bo_unref(&adev->uvd.vcpu_bo); |
| 269 | } | ||
| 269 | 270 | ||
| 270 | amdgpu_ring_fini(&adev->uvd.ring); | 271 | amdgpu_ring_fini(&adev->uvd.ring); |
| 271 | 272 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c index ea407db1fbcf..5ec1f1e9c983 100644 --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c | |||
| @@ -6221,6 +6221,9 @@ static int ci_dpm_sw_fini(void *handle) | |||
| 6221 | ci_dpm_fini(adev); | 6221 | ci_dpm_fini(adev); |
| 6222 | mutex_unlock(&adev->pm.mutex); | 6222 | mutex_unlock(&adev->pm.mutex); |
| 6223 | 6223 | ||
| 6224 | release_firmware(adev->pm.fw); | ||
| 6225 | adev->pm.fw = NULL; | ||
| 6226 | |||
| 6224 | return 0; | 6227 | return 0; |
| 6225 | } | 6228 | } |
| 6226 | 6229 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index 07bc795a4ca9..910431808542 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c | |||
| @@ -962,6 +962,12 @@ static bool cik_read_bios_from_rom(struct amdgpu_device *adev, | |||
| 962 | return true; | 962 | return true; |
| 963 | } | 963 | } |
| 964 | 964 | ||
| 965 | static u32 cik_get_virtual_caps(struct amdgpu_device *adev) | ||
| 966 | { | ||
| 967 | /* CIK does not support SR-IOV */ | ||
| 968 | return 0; | ||
| 969 | } | ||
| 970 | |||
| 965 | static const struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = { | 971 | static const struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = { |
| 966 | {mmGRBM_STATUS, false}, | 972 | {mmGRBM_STATUS, false}, |
| 967 | {mmGB_ADDR_CONFIG, false}, | 973 | {mmGB_ADDR_CONFIG, false}, |
| @@ -2007,6 +2013,7 @@ static const struct amdgpu_asic_funcs cik_asic_funcs = | |||
| 2007 | .get_xclk = &cik_get_xclk, | 2013 | .get_xclk = &cik_get_xclk, |
| 2008 | .set_uvd_clocks = &cik_set_uvd_clocks, | 2014 | .set_uvd_clocks = &cik_set_uvd_clocks, |
| 2009 | .set_vce_clocks = &cik_set_vce_clocks, | 2015 | .set_vce_clocks = &cik_set_vce_clocks, |
| 2016 | .get_virtual_caps = &cik_get_virtual_caps, | ||
| 2010 | /* these should be moved to their own ip modules */ | 2017 | /* these should be moved to their own ip modules */ |
| 2011 | .get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter, | 2018 | .get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter, |
| 2012 | .wait_for_mc_idle = &gmc_v7_0_mc_wait_for_idle, | 2019 | .wait_for_mc_idle = &gmc_v7_0_mc_wait_for_idle, |
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index 518dca43b133..9dc4e24e31e7 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c | |||
| @@ -66,6 +66,16 @@ MODULE_FIRMWARE("radeon/mullins_sdma1.bin"); | |||
| 66 | 66 | ||
| 67 | u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev); | 67 | u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev); |
| 68 | 68 | ||
| 69 | |||
| 70 | static void cik_sdma_free_microcode(struct amdgpu_device *adev) | ||
| 71 | { | ||
| 72 | int i; | ||
| 73 | for (i = 0; i < adev->sdma.num_instances; i++) { | ||
| 74 | release_firmware(adev->sdma.instance[i].fw); | ||
| 75 | adev->sdma.instance[i].fw = NULL; | ||
| 76 | } | ||
| 77 | } | ||
| 78 | |||
| 69 | /* | 79 | /* |
| 70 | * sDMA - System DMA | 80 | * sDMA - System DMA |
| 71 | * Starting with CIK, the GPU has new asynchronous | 81 | * Starting with CIK, the GPU has new asynchronous |
| @@ -419,6 +429,8 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev) | |||
| 419 | /* Initialize the ring buffer's read and write pointers */ | 429 | /* Initialize the ring buffer's read and write pointers */ |
| 420 | WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0); | 430 | WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0); |
| 421 | WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0); | 431 | WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0); |
| 432 | WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0); | ||
| 433 | WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0); | ||
| 422 | 434 | ||
| 423 | /* set the wb address whether it's enabled or not */ | 435 | /* set the wb address whether it's enabled or not */ |
| 424 | WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i], | 436 | WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i], |
| @@ -446,7 +458,12 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev) | |||
| 446 | WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); | 458 | WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); |
| 447 | 459 | ||
| 448 | ring->ready = true; | 460 | ring->ready = true; |
| 461 | } | ||
| 462 | |||
| 463 | cik_sdma_enable(adev, true); | ||
| 449 | 464 | ||
| 465 | for (i = 0; i < adev->sdma.num_instances; i++) { | ||
| 466 | ring = &adev->sdma.instance[i].ring; | ||
| 450 | r = amdgpu_ring_test_ring(ring); | 467 | r = amdgpu_ring_test_ring(ring); |
| 451 | if (r) { | 468 | if (r) { |
| 452 | ring->ready = false; | 469 | ring->ready = false; |
| @@ -529,8 +546,8 @@ static int cik_sdma_start(struct amdgpu_device *adev) | |||
| 529 | if (r) | 546 | if (r) |
| 530 | return r; | 547 | return r; |
| 531 | 548 | ||
| 532 | /* unhalt the MEs */ | 549 | /* halt the engine before programing */ |
| 533 | cik_sdma_enable(adev, true); | 550 | cik_sdma_enable(adev, false); |
| 534 | 551 | ||
| 535 | /* start the gfx rings and rlc compute queues */ | 552 | /* start the gfx rings and rlc compute queues */ |
| 536 | r = cik_sdma_gfx_resume(adev); | 553 | r = cik_sdma_gfx_resume(adev); |
| @@ -998,6 +1015,7 @@ static int cik_sdma_sw_fini(void *handle) | |||
| 998 | for (i = 0; i < adev->sdma.num_instances; i++) | 1015 | for (i = 0; i < adev->sdma.num_instances; i++) |
| 999 | amdgpu_ring_fini(&adev->sdma.instance[i].ring); | 1016 | amdgpu_ring_fini(&adev->sdma.instance[i].ring); |
| 1000 | 1017 | ||
| 1018 | cik_sdma_free_microcode(adev); | ||
| 1001 | return 0; | 1019 | return 0; |
| 1002 | } | 1020 | } |
| 1003 | 1021 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c b/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c index 245cabf06575..ed03b75175d4 100644 --- a/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c | |||
| @@ -72,6 +72,11 @@ static int fiji_dpm_sw_init(void *handle) | |||
| 72 | 72 | ||
| 73 | static int fiji_dpm_sw_fini(void *handle) | 73 | static int fiji_dpm_sw_fini(void *handle) |
| 74 | { | 74 | { |
| 75 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||
| 76 | |||
| 77 | release_firmware(adev->pm.fw); | ||
| 78 | adev->pm.fw = NULL; | ||
| 79 | |||
| 75 | return 0; | 80 | return 0; |
| 76 | } | 81 | } |
| 77 | 82 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 7f18a53ab53a..fc8ff4d3ccf8 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | |||
| @@ -991,6 +991,22 @@ out: | |||
| 991 | return err; | 991 | return err; |
| 992 | } | 992 | } |
| 993 | 993 | ||
| 994 | static void gfx_v7_0_free_microcode(struct amdgpu_device *adev) | ||
| 995 | { | ||
| 996 | release_firmware(adev->gfx.pfp_fw); | ||
| 997 | adev->gfx.pfp_fw = NULL; | ||
| 998 | release_firmware(adev->gfx.me_fw); | ||
| 999 | adev->gfx.me_fw = NULL; | ||
| 1000 | release_firmware(adev->gfx.ce_fw); | ||
| 1001 | adev->gfx.ce_fw = NULL; | ||
| 1002 | release_firmware(adev->gfx.mec_fw); | ||
| 1003 | adev->gfx.mec_fw = NULL; | ||
| 1004 | release_firmware(adev->gfx.mec2_fw); | ||
| 1005 | adev->gfx.mec2_fw = NULL; | ||
| 1006 | release_firmware(adev->gfx.rlc_fw); | ||
| 1007 | adev->gfx.rlc_fw = NULL; | ||
| 1008 | } | ||
| 1009 | |||
| 994 | /** | 1010 | /** |
| 995 | * gfx_v7_0_tiling_mode_table_init - init the hw tiling table | 1011 | * gfx_v7_0_tiling_mode_table_init - init the hw tiling table |
| 996 | * | 1012 | * |
| @@ -4489,6 +4505,7 @@ static int gfx_v7_0_sw_fini(void *handle) | |||
| 4489 | gfx_v7_0_cp_compute_fini(adev); | 4505 | gfx_v7_0_cp_compute_fini(adev); |
| 4490 | gfx_v7_0_rlc_fini(adev); | 4506 | gfx_v7_0_rlc_fini(adev); |
| 4491 | gfx_v7_0_mec_fini(adev); | 4507 | gfx_v7_0_mec_fini(adev); |
| 4508 | gfx_v7_0_free_microcode(adev); | ||
| 4492 | 4509 | ||
| 4493 | return 0; | 4510 | return 0; |
| 4494 | } | 4511 | } |
| @@ -4816,7 +4833,7 @@ static int gfx_v7_0_eop_irq(struct amdgpu_device *adev, | |||
| 4816 | case 2: | 4833 | case 2: |
| 4817 | for (i = 0; i < adev->gfx.num_compute_rings; i++) { | 4834 | for (i = 0; i < adev->gfx.num_compute_rings; i++) { |
| 4818 | ring = &adev->gfx.compute_ring[i]; | 4835 | ring = &adev->gfx.compute_ring[i]; |
| 4819 | if ((ring->me == me_id) & (ring->pipe == pipe_id)) | 4836 | if ((ring->me == me_id) && (ring->pipe == pipe_id)) |
| 4820 | amdgpu_fence_process(ring); | 4837 | amdgpu_fence_process(ring); |
| 4821 | } | 4838 | } |
| 4822 | break; | 4839 | break; |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index f19bab68fd83..1a5cbaff1e34 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | |||
| @@ -297,7 +297,8 @@ static const u32 polaris11_golden_common_all[] = | |||
| 297 | static const u32 golden_settings_polaris10_a11[] = | 297 | static const u32 golden_settings_polaris10_a11[] = |
| 298 | { | 298 | { |
| 299 | mmATC_MISC_CG, 0x000c0fc0, 0x000c0200, | 299 | mmATC_MISC_CG, 0x000c0fc0, 0x000c0200, |
| 300 | mmCB_HW_CONTROL, 0xfffdf3cf, 0x00006208, | 300 | mmCB_HW_CONTROL, 0xfffdf3cf, 0x00007208, |
| 301 | mmCB_HW_CONTROL_2, 0, 0x0f000000, | ||
| 301 | mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040, | 302 | mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040, |
| 302 | mmDB_DEBUG2, 0xf00fffff, 0x00000400, | 303 | mmDB_DEBUG2, 0xf00fffff, 0x00000400, |
| 303 | mmPA_SC_ENHANCE, 0xffffffff, 0x20000001, | 304 | mmPA_SC_ENHANCE, 0xffffffff, 0x20000001, |
| @@ -836,6 +837,26 @@ err1: | |||
| 836 | return r; | 837 | return r; |
| 837 | } | 838 | } |
| 838 | 839 | ||
| 840 | |||
| 841 | static void gfx_v8_0_free_microcode(struct amdgpu_device *adev) { | ||
| 842 | release_firmware(adev->gfx.pfp_fw); | ||
| 843 | adev->gfx.pfp_fw = NULL; | ||
| 844 | release_firmware(adev->gfx.me_fw); | ||
| 845 | adev->gfx.me_fw = NULL; | ||
| 846 | release_firmware(adev->gfx.ce_fw); | ||
| 847 | adev->gfx.ce_fw = NULL; | ||
| 848 | release_firmware(adev->gfx.rlc_fw); | ||
| 849 | adev->gfx.rlc_fw = NULL; | ||
| 850 | release_firmware(adev->gfx.mec_fw); | ||
| 851 | adev->gfx.mec_fw = NULL; | ||
| 852 | if ((adev->asic_type != CHIP_STONEY) && | ||
| 853 | (adev->asic_type != CHIP_TOPAZ)) | ||
| 854 | release_firmware(adev->gfx.mec2_fw); | ||
| 855 | adev->gfx.mec2_fw = NULL; | ||
| 856 | |||
| 857 | kfree(adev->gfx.rlc.register_list_format); | ||
| 858 | } | ||
| 859 | |||
| 839 | static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) | 860 | static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) |
| 840 | { | 861 | { |
| 841 | const char *chip_name; | 862 | const char *chip_name; |
| @@ -1983,7 +2004,7 @@ static int gfx_v8_0_sw_fini(void *handle) | |||
| 1983 | 2004 | ||
| 1984 | gfx_v8_0_rlc_fini(adev); | 2005 | gfx_v8_0_rlc_fini(adev); |
| 1985 | 2006 | ||
| 1986 | kfree(adev->gfx.rlc.register_list_format); | 2007 | gfx_v8_0_free_microcode(adev); |
| 1987 | 2008 | ||
| 1988 | return 0; | 2009 | return 0; |
| 1989 | } | 2010 | } |
| @@ -3974,11 +3995,15 @@ static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev) | |||
| 3974 | amdgpu_ring_write(ring, 0x3a00161a); | 3995 | amdgpu_ring_write(ring, 0x3a00161a); |
| 3975 | amdgpu_ring_write(ring, 0x0000002e); | 3996 | amdgpu_ring_write(ring, 0x0000002e); |
| 3976 | break; | 3997 | break; |
| 3977 | case CHIP_TOPAZ: | ||
| 3978 | case CHIP_CARRIZO: | 3998 | case CHIP_CARRIZO: |
| 3979 | amdgpu_ring_write(ring, 0x00000002); | 3999 | amdgpu_ring_write(ring, 0x00000002); |
| 3980 | amdgpu_ring_write(ring, 0x00000000); | 4000 | amdgpu_ring_write(ring, 0x00000000); |
| 3981 | break; | 4001 | break; |
| 4002 | case CHIP_TOPAZ: | ||
| 4003 | amdgpu_ring_write(ring, adev->gfx.config.num_rbs == 1 ? | ||
| 4004 | 0x00000000 : 0x00000002); | ||
| 4005 | amdgpu_ring_write(ring, 0x00000000); | ||
| 4006 | break; | ||
| 3982 | case CHIP_STONEY: | 4007 | case CHIP_STONEY: |
| 3983 | amdgpu_ring_write(ring, 0x00000000); | 4008 | amdgpu_ring_write(ring, 0x00000000); |
| 3984 | amdgpu_ring_write(ring, 0x00000000); | 4009 | amdgpu_ring_write(ring, 0x00000000); |
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c b/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c index 460bc8ad37e6..825ccd63f2dc 100644 --- a/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c | |||
| @@ -72,6 +72,11 @@ static int iceland_dpm_sw_init(void *handle) | |||
| 72 | 72 | ||
| 73 | static int iceland_dpm_sw_fini(void *handle) | 73 | static int iceland_dpm_sw_fini(void *handle) |
| 74 | { | 74 | { |
| 75 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||
| 76 | |||
| 77 | release_firmware(adev->pm.fw); | ||
| 78 | adev->pm.fw = NULL; | ||
| 79 | |||
| 75 | return 0; | 80 | return 0; |
| 76 | } | 81 | } |
| 77 | 82 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index f4c3130d3fdb..b556bd0a8797 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c | |||
| @@ -105,6 +105,15 @@ static void sdma_v2_4_init_golden_registers(struct amdgpu_device *adev) | |||
| 105 | } | 105 | } |
| 106 | } | 106 | } |
| 107 | 107 | ||
| 108 | static void sdma_v2_4_free_microcode(struct amdgpu_device *adev) | ||
| 109 | { | ||
| 110 | int i; | ||
| 111 | for (i = 0; i < adev->sdma.num_instances; i++) { | ||
| 112 | release_firmware(adev->sdma.instance[i].fw); | ||
| 113 | adev->sdma.instance[i].fw = NULL; | ||
| 114 | } | ||
| 115 | } | ||
| 116 | |||
| 108 | /** | 117 | /** |
| 109 | * sdma_v2_4_init_microcode - load ucode images from disk | 118 | * sdma_v2_4_init_microcode - load ucode images from disk |
| 110 | * | 119 | * |
| @@ -461,6 +470,8 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev) | |||
| 461 | /* Initialize the ring buffer's read and write pointers */ | 470 | /* Initialize the ring buffer's read and write pointers */ |
| 462 | WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0); | 471 | WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0); |
| 463 | WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0); | 472 | WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0); |
| 473 | WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0); | ||
| 474 | WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0); | ||
| 464 | 475 | ||
| 465 | /* set the wb address whether it's enabled or not */ | 476 | /* set the wb address whether it's enabled or not */ |
| 466 | WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i], | 477 | WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i], |
| @@ -489,7 +500,11 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev) | |||
| 489 | WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); | 500 | WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); |
| 490 | 501 | ||
| 491 | ring->ready = true; | 502 | ring->ready = true; |
| 503 | } | ||
| 492 | 504 | ||
| 505 | sdma_v2_4_enable(adev, true); | ||
| 506 | for (i = 0; i < adev->sdma.num_instances; i++) { | ||
| 507 | ring = &adev->sdma.instance[i].ring; | ||
| 493 | r = amdgpu_ring_test_ring(ring); | 508 | r = amdgpu_ring_test_ring(ring); |
| 494 | if (r) { | 509 | if (r) { |
| 495 | ring->ready = false; | 510 | ring->ready = false; |
| @@ -580,8 +595,8 @@ static int sdma_v2_4_start(struct amdgpu_device *adev) | |||
| 580 | return -EINVAL; | 595 | return -EINVAL; |
| 581 | } | 596 | } |
| 582 | 597 | ||
| 583 | /* unhalt the MEs */ | 598 | /* halt the engine before programing */ |
| 584 | sdma_v2_4_enable(adev, true); | 599 | sdma_v2_4_enable(adev, false); |
| 585 | 600 | ||
| 586 | /* start the gfx rings and rlc compute queues */ | 601 | /* start the gfx rings and rlc compute queues */ |
| 587 | r = sdma_v2_4_gfx_resume(adev); | 602 | r = sdma_v2_4_gfx_resume(adev); |
| @@ -1012,6 +1027,7 @@ static int sdma_v2_4_sw_fini(void *handle) | |||
| 1012 | for (i = 0; i < adev->sdma.num_instances; i++) | 1027 | for (i = 0; i < adev->sdma.num_instances; i++) |
| 1013 | amdgpu_ring_fini(&adev->sdma.instance[i].ring); | 1028 | amdgpu_ring_fini(&adev->sdma.instance[i].ring); |
| 1014 | 1029 | ||
| 1030 | sdma_v2_4_free_microcode(adev); | ||
| 1015 | return 0; | 1031 | return 0; |
| 1016 | } | 1032 | } |
| 1017 | 1033 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index 31d99b0010f7..532ea88da66a 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | |||
| @@ -236,6 +236,15 @@ static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev) | |||
| 236 | } | 236 | } |
| 237 | } | 237 | } |
| 238 | 238 | ||
| 239 | static void sdma_v3_0_free_microcode(struct amdgpu_device *adev) | ||
| 240 | { | ||
| 241 | int i; | ||
| 242 | for (i = 0; i < adev->sdma.num_instances; i++) { | ||
| 243 | release_firmware(adev->sdma.instance[i].fw); | ||
| 244 | adev->sdma.instance[i].fw = NULL; | ||
| 245 | } | ||
| 246 | } | ||
| 247 | |||
| 239 | /** | 248 | /** |
| 240 | * sdma_v3_0_init_microcode - load ucode images from disk | 249 | * sdma_v3_0_init_microcode - load ucode images from disk |
| 241 | * | 250 | * |
| @@ -672,6 +681,8 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev) | |||
| 672 | /* Initialize the ring buffer's read and write pointers */ | 681 | /* Initialize the ring buffer's read and write pointers */ |
| 673 | WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0); | 682 | WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0); |
| 674 | WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0); | 683 | WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0); |
| 684 | WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0); | ||
| 685 | WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0); | ||
| 675 | 686 | ||
| 676 | /* set the wb address whether it's enabled or not */ | 687 | /* set the wb address whether it's enabled or not */ |
| 677 | WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i], | 688 | WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i], |
| @@ -711,7 +722,15 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev) | |||
| 711 | WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); | 722 | WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); |
| 712 | 723 | ||
| 713 | ring->ready = true; | 724 | ring->ready = true; |
| 725 | } | ||
| 726 | |||
| 727 | /* unhalt the MEs */ | ||
| 728 | sdma_v3_0_enable(adev, true); | ||
| 729 | /* enable sdma ring preemption */ | ||
| 730 | sdma_v3_0_ctx_switch_enable(adev, true); | ||
| 714 | 731 | ||
| 732 | for (i = 0; i < adev->sdma.num_instances; i++) { | ||
| 733 | ring = &adev->sdma.instance[i].ring; | ||
| 715 | r = amdgpu_ring_test_ring(ring); | 734 | r = amdgpu_ring_test_ring(ring); |
| 716 | if (r) { | 735 | if (r) { |
| 717 | ring->ready = false; | 736 | ring->ready = false; |
| @@ -804,10 +823,9 @@ static int sdma_v3_0_start(struct amdgpu_device *adev) | |||
| 804 | } | 823 | } |
| 805 | } | 824 | } |
| 806 | 825 | ||
| 807 | /* unhalt the MEs */ | 826 | /* disble sdma engine before programing it */ |
| 808 | sdma_v3_0_enable(adev, true); | 827 | sdma_v3_0_ctx_switch_enable(adev, false); |
| 809 | /* enable sdma ring preemption */ | 828 | sdma_v3_0_enable(adev, false); |
| 810 | sdma_v3_0_ctx_switch_enable(adev, true); | ||
| 811 | 829 | ||
| 812 | /* start the gfx rings and rlc compute queues */ | 830 | /* start the gfx rings and rlc compute queues */ |
| 813 | r = sdma_v3_0_gfx_resume(adev); | 831 | r = sdma_v3_0_gfx_resume(adev); |
| @@ -1247,6 +1265,7 @@ static int sdma_v3_0_sw_fini(void *handle) | |||
| 1247 | for (i = 0; i < adev->sdma.num_instances; i++) | 1265 | for (i = 0; i < adev->sdma.num_instances; i++) |
| 1248 | amdgpu_ring_fini(&adev->sdma.instance[i].ring); | 1266 | amdgpu_ring_fini(&adev->sdma.instance[i].ring); |
| 1249 | 1267 | ||
| 1268 | sdma_v3_0_free_microcode(adev); | ||
| 1250 | return 0; | 1269 | return 0; |
| 1251 | } | 1270 | } |
| 1252 | 1271 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c b/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c index b7615cefcac4..f06f6f4dc3a8 100644 --- a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c | |||
| @@ -71,6 +71,11 @@ static int tonga_dpm_sw_init(void *handle) | |||
| 71 | 71 | ||
| 72 | static int tonga_dpm_sw_fini(void *handle) | 72 | static int tonga_dpm_sw_fini(void *handle) |
| 73 | { | 73 | { |
| 74 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||
| 75 | |||
| 76 | release_firmware(adev->pm.fw); | ||
| 77 | adev->pm.fw = NULL; | ||
| 78 | |||
| 74 | return 0; | 79 | return 0; |
| 75 | } | 80 | } |
| 76 | 81 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 2c88d0b66cf3..a65c96029476 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c | |||
| @@ -421,6 +421,20 @@ static bool vi_read_bios_from_rom(struct amdgpu_device *adev, | |||
| 421 | return true; | 421 | return true; |
| 422 | } | 422 | } |
| 423 | 423 | ||
| 424 | static u32 vi_get_virtual_caps(struct amdgpu_device *adev) | ||
| 425 | { | ||
| 426 | u32 caps = 0; | ||
| 427 | u32 reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER); | ||
| 428 | |||
| 429 | if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, IOV_ENABLE)) | ||
| 430 | caps |= AMDGPU_VIRT_CAPS_SRIOV_EN; | ||
| 431 | |||
| 432 | if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, FUNC_IDENTIFIER)) | ||
| 433 | caps |= AMDGPU_VIRT_CAPS_IS_VF; | ||
| 434 | |||
| 435 | return caps; | ||
| 436 | } | ||
| 437 | |||
| 424 | static const struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = { | 438 | static const struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = { |
| 425 | {mmGB_MACROTILE_MODE7, true}, | 439 | {mmGB_MACROTILE_MODE7, true}, |
| 426 | }; | 440 | }; |
| @@ -1118,6 +1132,7 @@ static const struct amdgpu_asic_funcs vi_asic_funcs = | |||
| 1118 | .get_xclk = &vi_get_xclk, | 1132 | .get_xclk = &vi_get_xclk, |
| 1119 | .set_uvd_clocks = &vi_set_uvd_clocks, | 1133 | .set_uvd_clocks = &vi_set_uvd_clocks, |
| 1120 | .set_vce_clocks = &vi_set_vce_clocks, | 1134 | .set_vce_clocks = &vi_set_vce_clocks, |
| 1135 | .get_virtual_caps = &vi_get_virtual_caps, | ||
| 1121 | /* these should be moved to their own ip modules */ | 1136 | /* these should be moved to their own ip modules */ |
| 1122 | .get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter, | 1137 | .get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter, |
| 1123 | .wait_for_mc_idle = &gmc_v8_0_mc_wait_for_idle, | 1138 | .wait_for_mc_idle = &gmc_v8_0_mc_wait_for_idle, |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index ac005796b71c..7708d90b9da9 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c | |||
| @@ -242,13 +242,19 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn, | |||
| 242 | pqm_uninit(&p->pqm); | 242 | pqm_uninit(&p->pqm); |
| 243 | 243 | ||
| 244 | /* Iterate over all process device data structure and check | 244 | /* Iterate over all process device data structure and check |
| 245 | * if we should reset all wavefronts */ | 245 | * if we should delete debug managers and reset all wavefronts |
| 246 | list_for_each_entry(pdd, &p->per_device_data, per_device_list) | 246 | */ |
| 247 | list_for_each_entry(pdd, &p->per_device_data, per_device_list) { | ||
| 248 | if ((pdd->dev->dbgmgr) && | ||
| 249 | (pdd->dev->dbgmgr->pasid == p->pasid)) | ||
| 250 | kfd_dbgmgr_destroy(pdd->dev->dbgmgr); | ||
| 251 | |||
| 247 | if (pdd->reset_wavefronts) { | 252 | if (pdd->reset_wavefronts) { |
| 248 | pr_warn("amdkfd: Resetting all wave fronts\n"); | 253 | pr_warn("amdkfd: Resetting all wave fronts\n"); |
| 249 | dbgdev_wave_reset_wavefronts(pdd->dev, p); | 254 | dbgdev_wave_reset_wavefronts(pdd->dev, p); |
| 250 | pdd->reset_wavefronts = false; | 255 | pdd->reset_wavefronts = false; |
| 251 | } | 256 | } |
| 257 | } | ||
| 252 | 258 | ||
| 253 | mutex_unlock(&p->mutex); | 259 | mutex_unlock(&p->mutex); |
| 254 | 260 | ||
| @@ -404,42 +410,52 @@ void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid) | |||
| 404 | 410 | ||
| 405 | idx = srcu_read_lock(&kfd_processes_srcu); | 411 | idx = srcu_read_lock(&kfd_processes_srcu); |
| 406 | 412 | ||
| 413 | /* | ||
| 414 | * Look for the process that matches the pasid. If there is no such | ||
| 415 | * process, we either released it in amdkfd's own notifier, or there | ||
| 416 | * is a bug. Unfortunately, there is no way to tell... | ||
| 417 | */ | ||
| 407 | hash_for_each_rcu(kfd_processes_table, i, p, kfd_processes) | 418 | hash_for_each_rcu(kfd_processes_table, i, p, kfd_processes) |
| 408 | if (p->pasid == pasid) | 419 | if (p->pasid == pasid) { |
| 409 | break; | ||
| 410 | 420 | ||
| 411 | srcu_read_unlock(&kfd_processes_srcu, idx); | 421 | srcu_read_unlock(&kfd_processes_srcu, idx); |
| 412 | 422 | ||
| 413 | BUG_ON(p->pasid != pasid); | 423 | pr_debug("Unbinding process %d from IOMMU\n", pasid); |
| 414 | 424 | ||
| 415 | mutex_lock(&p->mutex); | 425 | mutex_lock(&p->mutex); |
| 416 | 426 | ||
| 417 | if ((dev->dbgmgr) && (dev->dbgmgr->pasid == p->pasid)) | 427 | if ((dev->dbgmgr) && (dev->dbgmgr->pasid == p->pasid)) |
| 418 | kfd_dbgmgr_destroy(dev->dbgmgr); | 428 | kfd_dbgmgr_destroy(dev->dbgmgr); |
| 419 | 429 | ||
| 420 | pqm_uninit(&p->pqm); | 430 | pqm_uninit(&p->pqm); |
| 421 | 431 | ||
| 422 | pdd = kfd_get_process_device_data(dev, p); | 432 | pdd = kfd_get_process_device_data(dev, p); |
| 423 | 433 | ||
| 424 | if (!pdd) { | 434 | if (!pdd) { |
| 425 | mutex_unlock(&p->mutex); | 435 | mutex_unlock(&p->mutex); |
| 426 | return; | 436 | return; |
| 427 | } | 437 | } |
| 428 | 438 | ||
| 429 | if (pdd->reset_wavefronts) { | 439 | if (pdd->reset_wavefronts) { |
| 430 | dbgdev_wave_reset_wavefronts(pdd->dev, p); | 440 | dbgdev_wave_reset_wavefronts(pdd->dev, p); |
| 431 | pdd->reset_wavefronts = false; | 441 | pdd->reset_wavefronts = false; |
| 432 | } | 442 | } |
| 433 | 443 | ||
| 434 | /* | 444 | /* |
| 435 | * Just mark pdd as unbound, because we still need it to call | 445 | * Just mark pdd as unbound, because we still need it |
| 436 | * amd_iommu_unbind_pasid() in when the process exits. | 446 | * to call amd_iommu_unbind_pasid() in when the |
| 437 | * We don't call amd_iommu_unbind_pasid() here | 447 | * process exits. |
| 438 | * because the IOMMU called us. | 448 | * We don't call amd_iommu_unbind_pasid() here |
| 439 | */ | 449 | * because the IOMMU called us. |
| 440 | pdd->bound = false; | 450 | */ |
| 451 | pdd->bound = false; | ||
| 441 | 452 | ||
| 442 | mutex_unlock(&p->mutex); | 453 | mutex_unlock(&p->mutex); |
| 454 | |||
| 455 | return; | ||
| 456 | } | ||
| 457 | |||
| 458 | srcu_read_unlock(&kfd_processes_srcu, idx); | ||
| 443 | } | 459 | } |
| 444 | 460 | ||
| 445 | struct kfd_process_device *kfd_get_first_process_device_data(struct kfd_process *p) | 461 | struct kfd_process_device *kfd_get_first_process_device_data(struct kfd_process *p) |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 74909e72a009..884c96f50c3d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c | |||
| @@ -666,7 +666,7 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, | |||
| 666 | dev->node_props.simd_count); | 666 | dev->node_props.simd_count); |
| 667 | 667 | ||
| 668 | if (dev->mem_bank_count < dev->node_props.mem_banks_count) { | 668 | if (dev->mem_bank_count < dev->node_props.mem_banks_count) { |
| 669 | pr_warn("kfd: mem_banks_count truncated from %d to %d\n", | 669 | pr_info_once("kfd: mem_banks_count truncated from %d to %d\n", |
| 670 | dev->node_props.mem_banks_count, | 670 | dev->node_props.mem_banks_count, |
| 671 | dev->mem_bank_count); | 671 | dev->mem_bank_count); |
| 672 | sysfs_show_32bit_prop(buffer, "mem_banks_count", | 672 | sysfs_show_32bit_prop(buffer, "mem_banks_count", |
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index 6080951d539d..afce1edbe250 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h | |||
| @@ -157,6 +157,7 @@ struct amd_ip_funcs { | |||
| 157 | int (*hw_init)(void *handle); | 157 | int (*hw_init)(void *handle); |
| 158 | /* tears down the hw state */ | 158 | /* tears down the hw state */ |
| 159 | int (*hw_fini)(void *handle); | 159 | int (*hw_fini)(void *handle); |
| 160 | void (*late_fini)(void *handle); | ||
| 160 | /* handles IP specific hw/sw changes for suspend */ | 161 | /* handles IP specific hw/sw changes for suspend */ |
| 161 | int (*suspend)(void *handle); | 162 | int (*suspend)(void *handle); |
| 162 | /* handles IP specific hw/sw changes for resume */ | 163 | /* handles IP specific hw/sw changes for resume */ |
diff --git a/drivers/gpu/drm/amd/include/atombios.h b/drivers/gpu/drm/amd/include/atombios.h index 32f3e345de08..3493da5c8f0e 100644 --- a/drivers/gpu/drm/amd/include/atombios.h +++ b/drivers/gpu/drm/amd/include/atombios.h | |||
| @@ -5538,6 +5538,78 @@ typedef struct _ATOM_ASIC_PROFILING_INFO_V3_5 | |||
| 5538 | ULONG ulReserved[12]; | 5538 | ULONG ulReserved[12]; |
| 5539 | }ATOM_ASIC_PROFILING_INFO_V3_5; | 5539 | }ATOM_ASIC_PROFILING_INFO_V3_5; |
| 5540 | 5540 | ||
| 5541 | /* for Polars10/11 AVFS parameters */ | ||
| 5542 | typedef struct _ATOM_ASIC_PROFILING_INFO_V3_6 | ||
| 5543 | { | ||
| 5544 | ATOM_COMMON_TABLE_HEADER asHeader; | ||
| 5545 | ULONG ulMaxVddc; | ||
| 5546 | ULONG ulMinVddc; | ||
| 5547 | USHORT usLkgEuseIndex; | ||
| 5548 | UCHAR ucLkgEfuseBitLSB; | ||
| 5549 | UCHAR ucLkgEfuseLength; | ||
| 5550 | ULONG ulLkgEncodeLn_MaxDivMin; | ||
| 5551 | ULONG ulLkgEncodeMax; | ||
| 5552 | ULONG ulLkgEncodeMin; | ||
| 5553 | EFUSE_LINEAR_FUNC_PARAM sRoFuse; | ||
| 5554 | ULONG ulEvvDefaultVddc; | ||
| 5555 | ULONG ulEvvNoCalcVddc; | ||
| 5556 | ULONG ulSpeed_Model; | ||
| 5557 | ULONG ulSM_A0; | ||
| 5558 | ULONG ulSM_A1; | ||
| 5559 | ULONG ulSM_A2; | ||
| 5560 | ULONG ulSM_A3; | ||
| 5561 | ULONG ulSM_A4; | ||
| 5562 | ULONG ulSM_A5; | ||
| 5563 | ULONG ulSM_A6; | ||
| 5564 | ULONG ulSM_A7; | ||
| 5565 | UCHAR ucSM_A0_sign; | ||
| 5566 | UCHAR ucSM_A1_sign; | ||
| 5567 | UCHAR ucSM_A2_sign; | ||
| 5568 | UCHAR ucSM_A3_sign; | ||
| 5569 | UCHAR ucSM_A4_sign; | ||
| 5570 | UCHAR ucSM_A5_sign; | ||
| 5571 | UCHAR ucSM_A6_sign; | ||
| 5572 | UCHAR ucSM_A7_sign; | ||
| 5573 | ULONG ulMargin_RO_a; | ||
| 5574 | ULONG ulMargin_RO_b; | ||
| 5575 | ULONG ulMargin_RO_c; | ||
| 5576 | ULONG ulMargin_fixed; | ||
| 5577 | ULONG ulMargin_Fmax_mean; | ||
| 5578 | ULONG ulMargin_plat_mean; | ||
| 5579 | ULONG ulMargin_Fmax_sigma; | ||
| 5580 | ULONG ulMargin_plat_sigma; | ||
| 5581 | ULONG ulMargin_DC_sigma; | ||
| 5582 | ULONG ulLoadLineSlop; | ||
| 5583 | ULONG ulaTDClimitPerDPM[8]; | ||
| 5584 | ULONG ulaNoCalcVddcPerDPM[8]; | ||
| 5585 | ULONG ulAVFS_meanNsigma_Acontant0; | ||
| 5586 | ULONG ulAVFS_meanNsigma_Acontant1; | ||
| 5587 | ULONG ulAVFS_meanNsigma_Acontant2; | ||
| 5588 | USHORT usAVFS_meanNsigma_DC_tol_sigma; | ||
| 5589 | USHORT usAVFS_meanNsigma_Platform_mean; | ||
| 5590 | USHORT usAVFS_meanNsigma_Platform_sigma; | ||
| 5591 | ULONG ulGB_VDROOP_TABLE_CKSOFF_a0; | ||
| 5592 | ULONG ulGB_VDROOP_TABLE_CKSOFF_a1; | ||
| 5593 | ULONG ulGB_VDROOP_TABLE_CKSOFF_a2; | ||
| 5594 | ULONG ulGB_VDROOP_TABLE_CKSON_a0; | ||
| 5595 | ULONG ulGB_VDROOP_TABLE_CKSON_a1; | ||
| 5596 | ULONG ulGB_VDROOP_TABLE_CKSON_a2; | ||
| 5597 | ULONG ulAVFSGB_FUSE_TABLE_CKSOFF_m1; | ||
| 5598 | USHORT usAVFSGB_FUSE_TABLE_CKSOFF_m2; | ||
| 5599 | ULONG ulAVFSGB_FUSE_TABLE_CKSOFF_b; | ||
| 5600 | ULONG ulAVFSGB_FUSE_TABLE_CKSON_m1; | ||
| 5601 | USHORT usAVFSGB_FUSE_TABLE_CKSON_m2; | ||
| 5602 | ULONG ulAVFSGB_FUSE_TABLE_CKSON_b; | ||
| 5603 | USHORT usMaxVoltage_0_25mv; | ||
| 5604 | UCHAR ucEnableGB_VDROOP_TABLE_CKSOFF; | ||
| 5605 | UCHAR ucEnableGB_VDROOP_TABLE_CKSON; | ||
| 5606 | UCHAR ucEnableGB_FUSE_TABLE_CKSOFF; | ||
| 5607 | UCHAR ucEnableGB_FUSE_TABLE_CKSON; | ||
| 5608 | USHORT usPSM_Age_ComFactor; | ||
| 5609 | UCHAR ucEnableApplyAVFS_CKS_OFF_Voltage; | ||
| 5610 | UCHAR ucReserved; | ||
| 5611 | }ATOM_ASIC_PROFILING_INFO_V3_6; | ||
| 5612 | |||
| 5541 | 5613 | ||
| 5542 | typedef struct _ATOM_SCLK_FCW_RANGE_ENTRY_V1{ | 5614 | typedef struct _ATOM_SCLK_FCW_RANGE_ENTRY_V1{ |
| 5543 | ULONG ulMaxSclkFreq; | 5615 | ULONG ulMaxSclkFreq; |
diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h index a461e155a160..7464daf89ca1 100644 --- a/drivers/gpu/drm/amd/include/cgs_common.h +++ b/drivers/gpu/drm/amd/include/cgs_common.h | |||
| @@ -581,6 +581,9 @@ typedef int (*cgs_get_firmware_info)(struct cgs_device *cgs_device, | |||
| 581 | enum cgs_ucode_id type, | 581 | enum cgs_ucode_id type, |
| 582 | struct cgs_firmware_info *info); | 582 | struct cgs_firmware_info *info); |
| 583 | 583 | ||
| 584 | typedef int (*cgs_rel_firmware)(struct cgs_device *cgs_device, | ||
| 585 | enum cgs_ucode_id type); | ||
| 586 | |||
| 584 | typedef int(*cgs_set_powergating_state)(struct cgs_device *cgs_device, | 587 | typedef int(*cgs_set_powergating_state)(struct cgs_device *cgs_device, |
| 585 | enum amd_ip_block_type block_type, | 588 | enum amd_ip_block_type block_type, |
| 586 | enum amd_powergating_state state); | 589 | enum amd_powergating_state state); |
| @@ -645,6 +648,7 @@ struct cgs_ops { | |||
| 645 | cgs_set_camera_voltages_t set_camera_voltages; | 648 | cgs_set_camera_voltages_t set_camera_voltages; |
| 646 | /* Firmware Info */ | 649 | /* Firmware Info */ |
| 647 | cgs_get_firmware_info get_firmware_info; | 650 | cgs_get_firmware_info get_firmware_info; |
| 651 | cgs_rel_firmware rel_firmware; | ||
| 648 | /* cg pg interface*/ | 652 | /* cg pg interface*/ |
| 649 | cgs_set_powergating_state set_powergating_state; | 653 | cgs_set_powergating_state set_powergating_state; |
| 650 | cgs_set_clockgating_state set_clockgating_state; | 654 | cgs_set_clockgating_state set_clockgating_state; |
| @@ -738,6 +742,8 @@ struct cgs_device | |||
| 738 | CGS_CALL(set_camera_voltages,dev,mask,voltages) | 742 | CGS_CALL(set_camera_voltages,dev,mask,voltages) |
| 739 | #define cgs_get_firmware_info(dev, type, info) \ | 743 | #define cgs_get_firmware_info(dev, type, info) \ |
| 740 | CGS_CALL(get_firmware_info, dev, type, info) | 744 | CGS_CALL(get_firmware_info, dev, type, info) |
| 745 | #define cgs_rel_firmware(dev, type) \ | ||
| 746 | CGS_CALL(rel_firmware, dev, type) | ||
| 741 | #define cgs_set_powergating_state(dev, block_type, state) \ | 747 | #define cgs_set_powergating_state(dev, block_type, state) \ |
| 742 | CGS_CALL(set_powergating_state, dev, block_type, state) | 748 | CGS_CALL(set_powergating_state, dev, block_type, state) |
| 743 | #define cgs_set_clockgating_state(dev, block_type, state) \ | 749 | #define cgs_set_clockgating_state(dev, block_type, state) \ |
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index 8e345bfddb69..e629f8a9fe93 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c | |||
| @@ -73,11 +73,14 @@ static int pp_sw_init(void *handle) | |||
| 73 | 73 | ||
| 74 | ret = hwmgr->hwmgr_func->backend_init(hwmgr); | 74 | ret = hwmgr->hwmgr_func->backend_init(hwmgr); |
| 75 | if (ret) | 75 | if (ret) |
| 76 | goto err; | 76 | goto err1; |
| 77 | 77 | ||
| 78 | pr_info("amdgpu: powerplay initialized\n"); | 78 | pr_info("amdgpu: powerplay initialized\n"); |
| 79 | 79 | ||
| 80 | return 0; | 80 | return 0; |
| 81 | err1: | ||
| 82 | if (hwmgr->pptable_func->pptable_fini) | ||
| 83 | hwmgr->pptable_func->pptable_fini(hwmgr); | ||
| 81 | err: | 84 | err: |
| 82 | pr_err("amdgpu: powerplay initialization failed\n"); | 85 | pr_err("amdgpu: powerplay initialization failed\n"); |
| 83 | return ret; | 86 | return ret; |
| @@ -100,6 +103,9 @@ static int pp_sw_fini(void *handle) | |||
| 100 | if (hwmgr->hwmgr_func->backend_fini != NULL) | 103 | if (hwmgr->hwmgr_func->backend_fini != NULL) |
| 101 | ret = hwmgr->hwmgr_func->backend_fini(hwmgr); | 104 | ret = hwmgr->hwmgr_func->backend_fini(hwmgr); |
| 102 | 105 | ||
| 106 | if (hwmgr->pptable_func->pptable_fini) | ||
| 107 | hwmgr->pptable_func->pptable_fini(hwmgr); | ||
| 108 | |||
| 103 | return ret; | 109 | return ret; |
| 104 | } | 110 | } |
| 105 | 111 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c index 46410e3c7349..fb88e4e5d625 100644 --- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c +++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c | |||
| @@ -58,9 +58,6 @@ static void pem_fini(struct pp_eventmgr *eventmgr) | |||
| 58 | pem_unregister_interrupts(eventmgr); | 58 | pem_unregister_interrupts(eventmgr); |
| 59 | 59 | ||
| 60 | pem_handle_event(eventmgr, AMD_PP_EVENT_UNINITIALIZE, &event_data); | 60 | pem_handle_event(eventmgr, AMD_PP_EVENT_UNINITIALIZE, &event_data); |
| 61 | |||
| 62 | if (eventmgr != NULL) | ||
| 63 | kfree(eventmgr); | ||
| 64 | } | 61 | } |
| 65 | 62 | ||
| 66 | int eventmgr_init(struct pp_instance *handle) | 63 | int eventmgr_init(struct pp_instance *handle) |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c index 24a16e49b571..92912ab20944 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c | |||
| @@ -633,6 +633,8 @@ static int fiji_hwmgr_backend_init(struct pp_hwmgr *hwmgr) | |||
| 633 | data->vddci_control = FIJI_VOLTAGE_CONTROL_NONE; | 633 | data->vddci_control = FIJI_VOLTAGE_CONTROL_NONE; |
| 634 | data->mvdd_control = FIJI_VOLTAGE_CONTROL_NONE; | 634 | data->mvdd_control = FIJI_VOLTAGE_CONTROL_NONE; |
| 635 | 635 | ||
| 636 | data->force_pcie_gen = PP_PCIEGenInvalid; | ||
| 637 | |||
| 636 | if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, | 638 | if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, |
| 637 | VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) | 639 | VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) |
| 638 | data->voltage_control = FIJI_VOLTAGE_CONTROL_BY_SVID2; | 640 | data->voltage_control = FIJI_VOLTAGE_CONTROL_BY_SVID2; |
| @@ -1830,7 +1832,7 @@ static uint16_t fiji_find_closest_vddci(struct pp_hwmgr *hwmgr, uint16_t vddci) | |||
| 1830 | 1832 | ||
| 1831 | PP_ASSERT_WITH_CODE(false, | 1833 | PP_ASSERT_WITH_CODE(false, |
| 1832 | "VDDCI is larger than max VDDCI in VDDCI Voltage Table!", | 1834 | "VDDCI is larger than max VDDCI in VDDCI Voltage Table!", |
| 1833 | return vddci_table->entries[i].value); | 1835 | return vddci_table->entries[i-1].value); |
| 1834 | } | 1836 | } |
| 1835 | 1837 | ||
| 1836 | static int fiji_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr, | 1838 | static int fiji_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr, |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c index fa208ada6892..efb77eda7508 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c | |||
| @@ -306,10 +306,14 @@ int phm_store_dal_configuration_data(struct pp_hwmgr *hwmgr, | |||
| 306 | { | 306 | { |
| 307 | PHM_FUNC_CHECK(hwmgr); | 307 | PHM_FUNC_CHECK(hwmgr); |
| 308 | 308 | ||
| 309 | if (hwmgr->hwmgr_func->store_cc6_data == NULL) | 309 | if (display_config == NULL) |
| 310 | return -EINVAL; | 310 | return -EINVAL; |
| 311 | 311 | ||
| 312 | hwmgr->display_config = *display_config; | 312 | hwmgr->display_config = *display_config; |
| 313 | |||
| 314 | if (hwmgr->hwmgr_func->store_cc6_data == NULL) | ||
| 315 | return -EINVAL; | ||
| 316 | |||
| 313 | /* to do pass other display configuration in furture */ | 317 | /* to do pass other display configuration in furture */ |
| 314 | 318 | ||
| 315 | if (hwmgr->hwmgr_func->store_cc6_data) | 319 | if (hwmgr->hwmgr_func->store_cc6_data) |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index 1c48917da3cf..20f20e075588 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c | |||
| @@ -93,6 +93,13 @@ int hwmgr_fini(struct pp_hwmgr *hwmgr) | |||
| 93 | if (hwmgr == NULL || hwmgr->ps == NULL) | 93 | if (hwmgr == NULL || hwmgr->ps == NULL) |
| 94 | return -EINVAL; | 94 | return -EINVAL; |
| 95 | 95 | ||
| 96 | /* do hwmgr finish*/ | ||
| 97 | kfree(hwmgr->backend); | ||
| 98 | |||
| 99 | kfree(hwmgr->start_thermal_controller.function_list); | ||
| 100 | |||
| 101 | kfree(hwmgr->set_temperature_range.function_list); | ||
| 102 | |||
| 96 | kfree(hwmgr->ps); | 103 | kfree(hwmgr->ps); |
| 97 | kfree(hwmgr); | 104 | kfree(hwmgr); |
| 98 | return 0; | 105 | return 0; |
| @@ -462,7 +469,7 @@ uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, u | |||
| 462 | 469 | ||
| 463 | PP_ASSERT_WITH_CODE(false, | 470 | PP_ASSERT_WITH_CODE(false, |
| 464 | "VDDCI is larger than max VDDCI in VDDCI Voltage Table!", | 471 | "VDDCI is larger than max VDDCI in VDDCI Voltage Table!", |
| 465 | return vddci_table->entries[i].value); | 472 | return vddci_table->entries[i-1].value); |
| 466 | } | 473 | } |
| 467 | 474 | ||
| 468 | int phm_find_boot_level(void *table, | 475 | int phm_find_boot_level(void *table, |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr_ppt.h b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr_ppt.h index 347fef127ce9..2930a3355948 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr_ppt.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr_ppt.h | |||
| @@ -39,6 +39,7 @@ struct phm_ppt_v1_clock_voltage_dependency_record { | |||
| 39 | uint8_t phases; | 39 | uint8_t phases; |
| 40 | uint8_t cks_enable; | 40 | uint8_t cks_enable; |
| 41 | uint8_t cks_voffset; | 41 | uint8_t cks_voffset; |
| 42 | uint32_t sclk_offset; | ||
| 42 | }; | 43 | }; |
| 43 | 44 | ||
| 44 | typedef struct phm_ppt_v1_clock_voltage_dependency_record phm_ppt_v1_clock_voltage_dependency_record; | 45 | typedef struct phm_ppt_v1_clock_voltage_dependency_record phm_ppt_v1_clock_voltage_dependency_record; |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c index aa6be033f21b..64ee78f7d41e 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c | |||
| @@ -999,7 +999,7 @@ static int polaris10_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr, | |||
| 999 | vddci = phm_find_closest_vddci(&(data->vddci_voltage_table), | 999 | vddci = phm_find_closest_vddci(&(data->vddci_voltage_table), |
| 1000 | (dep_table->entries[i].vddc - | 1000 | (dep_table->entries[i].vddc - |
| 1001 | (uint16_t)data->vddc_vddci_delta)); | 1001 | (uint16_t)data->vddc_vddci_delta)); |
| 1002 | *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT; | 1002 | *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT; |
| 1003 | } | 1003 | } |
| 1004 | 1004 | ||
| 1005 | if (POLARIS10_VOLTAGE_CONTROL_NONE == data->mvdd_control) | 1005 | if (POLARIS10_VOLTAGE_CONTROL_NONE == data->mvdd_control) |
| @@ -1296,7 +1296,6 @@ static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr, | |||
| 1296 | } | 1296 | } |
| 1297 | 1297 | ||
| 1298 | mem_level->MclkFrequency = clock; | 1298 | mem_level->MclkFrequency = clock; |
| 1299 | mem_level->StutterEnable = 0; | ||
| 1300 | mem_level->EnabledForThrottle = 1; | 1299 | mem_level->EnabledForThrottle = 1; |
| 1301 | mem_level->EnabledForActivity = 0; | 1300 | mem_level->EnabledForActivity = 0; |
| 1302 | mem_level->UpHyst = 0; | 1301 | mem_level->UpHyst = 0; |
| @@ -1304,7 +1303,6 @@ static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr, | |||
| 1304 | mem_level->VoltageDownHyst = 0; | 1303 | mem_level->VoltageDownHyst = 0; |
| 1305 | mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target; | 1304 | mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target; |
| 1306 | mem_level->StutterEnable = false; | 1305 | mem_level->StutterEnable = false; |
| 1307 | |||
| 1308 | mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; | 1306 | mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; |
| 1309 | 1307 | ||
| 1310 | data->display_timing.num_existing_displays = info.display_count; | 1308 | data->display_timing.num_existing_displays = info.display_count; |
| @@ -1363,7 +1361,7 @@ static int polaris10_populate_all_memory_levels(struct pp_hwmgr *hwmgr) | |||
| 1363 | * a higher state by default such that we are not effected by | 1361 | * a higher state by default such that we are not effected by |
| 1364 | * up threshold or and MCLK DPM latency. | 1362 | * up threshold or and MCLK DPM latency. |
| 1365 | */ | 1363 | */ |
| 1366 | levels[0].ActivityLevel = (uint16_t)data->mclk_dpm0_activity_target; | 1364 | levels[0].ActivityLevel = 0x1f; |
| 1367 | CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel); | 1365 | CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel); |
| 1368 | 1366 | ||
| 1369 | data->smc_state_table.MemoryDpmLevelCount = | 1367 | data->smc_state_table.MemoryDpmLevelCount = |
| @@ -1761,12 +1759,9 @@ static int polaris10_populate_smc_initailial_state(struct pp_hwmgr *hwmgr) | |||
| 1761 | 1759 | ||
| 1762 | static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr) | 1760 | static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr) |
| 1763 | { | 1761 | { |
| 1764 | uint32_t ro, efuse, efuse2, clock_freq, volt_without_cks, | 1762 | uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min; |
| 1765 | volt_with_cks, value; | ||
| 1766 | uint16_t clock_freq_u16; | ||
| 1767 | struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); | 1763 | struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); |
| 1768 | uint8_t type, i, j, cks_setting, stretch_amount, stretch_amount2, | 1764 | uint8_t i, stretch_amount, stretch_amount2, volt_offset = 0; |
| 1769 | volt_offset = 0; | ||
| 1770 | struct phm_ppt_v1_information *table_info = | 1765 | struct phm_ppt_v1_information *table_info = |
| 1771 | (struct phm_ppt_v1_information *)(hwmgr->pptable); | 1766 | (struct phm_ppt_v1_information *)(hwmgr->pptable); |
| 1772 | struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = | 1767 | struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = |
| @@ -1778,50 +1773,38 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr) | |||
| 1778 | * if the part is SS or FF. if RO >= 1660MHz, part is FF. | 1773 | * if the part is SS or FF. if RO >= 1660MHz, part is FF. |
| 1779 | */ | 1774 | */ |
| 1780 | efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, | 1775 | efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, |
| 1781 | ixSMU_EFUSE_0 + (146 * 4)); | 1776 | ixSMU_EFUSE_0 + (67 * 4)); |
| 1782 | efuse2 = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, | ||
| 1783 | ixSMU_EFUSE_0 + (148 * 4)); | ||
| 1784 | efuse &= 0xFF000000; | 1777 | efuse &= 0xFF000000; |
| 1785 | efuse = efuse >> 24; | 1778 | efuse = efuse >> 24; |
| 1786 | efuse2 &= 0xF; | ||
| 1787 | |||
| 1788 | if (efuse2 == 1) | ||
| 1789 | ro = (2300 - 1350) * efuse / 255 + 1350; | ||
| 1790 | else | ||
| 1791 | ro = (2500 - 1000) * efuse / 255 + 1000; | ||
| 1792 | 1779 | ||
| 1793 | if (ro >= 1660) | 1780 | if (hwmgr->chip_id == CHIP_POLARIS10) { |
| 1794 | type = 0; | 1781 | min = 1000; |
| 1795 | else | 1782 | max = 2300; |
| 1796 | type = 1; | 1783 | } else { |
| 1784 | min = 1100; | ||
| 1785 | max = 2100; | ||
| 1786 | } | ||
| 1797 | 1787 | ||
| 1798 | /* Populate Stretch amount */ | 1788 | ro = efuse * (max -min)/255 + min; |
| 1799 | data->smc_state_table.ClockStretcherAmount = stretch_amount; | ||
| 1800 | 1789 | ||
| 1801 | /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */ | 1790 | /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */ |
| 1802 | for (i = 0; i < sclk_table->count; i++) { | 1791 | for (i = 0; i < sclk_table->count; i++) { |
| 1803 | data->smc_state_table.Sclk_CKS_masterEn0_7 |= | 1792 | data->smc_state_table.Sclk_CKS_masterEn0_7 |= |
| 1804 | sclk_table->entries[i].cks_enable << i; | 1793 | sclk_table->entries[i].cks_enable << i; |
| 1805 | volt_without_cks = (uint32_t)((14041 * | 1794 | |
| 1806 | (sclk_table->entries[i].clk/100) / 10000 + 3571 + 75 - ro) * 1000 / | 1795 | volt_without_cks = (uint32_t)(((ro - 40) * 1000 - 2753594 - sclk_table->entries[i].clk/100 * 136418 /1000) / \ |
| 1807 | (4026 - (13924 * (sclk_table->entries[i].clk/100) / 10000))); | 1796 | (sclk_table->entries[i].clk/100 * 1132925 /10000 - 242418)/100); |
| 1808 | volt_with_cks = (uint32_t)((13946 * | 1797 | |
| 1809 | (sclk_table->entries[i].clk/100) / 10000 + 3320 + 45 - ro) * 1000 / | 1798 | volt_with_cks = (uint32_t)((ro * 1000 -2396351 - sclk_table->entries[i].clk/100 * 329021/1000) / \ |
| 1810 | (3664 - (11454 * (sclk_table->entries[i].clk/100) / 10000))); | 1799 | (sclk_table->entries[i].clk/10000 * 649434 /1000 - 18005)/10); |
| 1800 | |||
| 1811 | if (volt_without_cks >= volt_with_cks) | 1801 | if (volt_without_cks >= volt_with_cks) |
| 1812 | volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks + | 1802 | volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks + |
| 1813 | sclk_table->entries[i].cks_voffset) * 100 / 625) + 1); | 1803 | sclk_table->entries[i].cks_voffset) * 100 / 625) + 1); |
| 1804 | |||
| 1814 | data->smc_state_table.Sclk_voltageOffset[i] = volt_offset; | 1805 | data->smc_state_table.Sclk_voltageOffset[i] = volt_offset; |
| 1815 | } | 1806 | } |
| 1816 | 1807 | ||
| 1817 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE, | ||
| 1818 | STRETCH_ENABLE, 0x0); | ||
| 1819 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE, | ||
| 1820 | masterReset, 0x1); | ||
| 1821 | /* PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE, staticEnable, 0x1); */ | ||
| 1822 | PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE, | ||
| 1823 | masterReset, 0x0); | ||
| 1824 | |||
| 1825 | /* Populate CKS Lookup Table */ | 1808 | /* Populate CKS Lookup Table */ |
| 1826 | if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5) | 1809 | if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5) |
| 1827 | stretch_amount2 = 0; | 1810 | stretch_amount2 = 0; |
| @@ -1835,69 +1818,6 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr) | |||
| 1835 | return -EINVAL); | 1818 | return -EINVAL); |
| 1836 | } | 1819 | } |
| 1837 | 1820 | ||
| 1838 | value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, | ||
| 1839 | ixPWR_CKS_CNTL); | ||
| 1840 | value &= 0xFFC2FF87; | ||
| 1841 | data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq = | ||
| 1842 | polaris10_clock_stretcher_lookup_table[stretch_amount2][0]; | ||
| 1843 | data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq = | ||
| 1844 | polaris10_clock_stretcher_lookup_table[stretch_amount2][1]; | ||
| 1845 | clock_freq_u16 = (uint16_t)(PP_SMC_TO_HOST_UL(data->smc_state_table. | ||
| 1846 | GraphicsLevel[data->smc_state_table.GraphicsDpmLevelCount - 1].SclkSetting.SclkFrequency) / 100); | ||
| 1847 | if (polaris10_clock_stretcher_lookup_table[stretch_amount2][0] < clock_freq_u16 | ||
| 1848 | && polaris10_clock_stretcher_lookup_table[stretch_amount2][1] > clock_freq_u16) { | ||
| 1849 | /* Program PWR_CKS_CNTL. CKS_USE_FOR_LOW_FREQ */ | ||
| 1850 | value |= (polaris10_clock_stretcher_lookup_table[stretch_amount2][3]) << 16; | ||
| 1851 | /* Program PWR_CKS_CNTL. CKS_LDO_REFSEL */ | ||
| 1852 | value |= (polaris10_clock_stretcher_lookup_table[stretch_amount2][2]) << 18; | ||
| 1853 | /* Program PWR_CKS_CNTL. CKS_STRETCH_AMOUNT */ | ||
| 1854 | value |= (polaris10_clock_stretch_amount_conversion | ||
| 1855 | [polaris10_clock_stretcher_lookup_table[stretch_amount2][3]] | ||
| 1856 | [stretch_amount]) << 3; | ||
| 1857 | } | ||
| 1858 | CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq); | ||
| 1859 | CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq); | ||
| 1860 | data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting = | ||
| 1861 | polaris10_clock_stretcher_lookup_table[stretch_amount2][2] & 0x7F; | ||
| 1862 | data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting |= | ||
| 1863 | (polaris10_clock_stretcher_lookup_table[stretch_amount2][3]) << 7; | ||
| 1864 | |||
| 1865 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, | ||
| 1866 | ixPWR_CKS_CNTL, value); | ||
| 1867 | |||
| 1868 | /* Populate DDT Lookup Table */ | ||
| 1869 | for (i = 0; i < 4; i++) { | ||
| 1870 | /* Assign the minimum and maximum VID stored | ||
| 1871 | * in the last row of Clock Stretcher Voltage Table. | ||
| 1872 | */ | ||
| 1873 | data->smc_state_table.ClockStretcherDataTable.ClockStretcherDataTableEntry[i].minVID = | ||
| 1874 | (uint8_t) polaris10_clock_stretcher_ddt_table[type][i][2]; | ||
| 1875 | data->smc_state_table.ClockStretcherDataTable.ClockStretcherDataTableEntry[i].maxVID = | ||
| 1876 | (uint8_t) polaris10_clock_stretcher_ddt_table[type][i][3]; | ||
| 1877 | /* Loop through each SCLK and check the frequency | ||
| 1878 | * to see if it lies within the frequency for clock stretcher. | ||
| 1879 | */ | ||
| 1880 | for (j = 0; j < data->smc_state_table.GraphicsDpmLevelCount; j++) { | ||
| 1881 | cks_setting = 0; | ||
| 1882 | clock_freq = PP_SMC_TO_HOST_UL( | ||
| 1883 | data->smc_state_table.GraphicsLevel[j].SclkSetting.SclkFrequency); | ||
| 1884 | /* Check the allowed frequency against the sclk level[j]. | ||
| 1885 | * Sclk's endianness has already been converted, | ||
| 1886 | * and it's in 10Khz unit, | ||
| 1887 | * as opposed to Data table, which is in Mhz unit. | ||
| 1888 | */ | ||
| 1889 | if (clock_freq >= (polaris10_clock_stretcher_ddt_table[type][i][0]) * 100) { | ||
| 1890 | cks_setting |= 0x2; | ||
| 1891 | if (clock_freq < (polaris10_clock_stretcher_ddt_table[type][i][1]) * 100) | ||
| 1892 | cks_setting |= 0x1; | ||
| 1893 | } | ||
| 1894 | data->smc_state_table.ClockStretcherDataTable.ClockStretcherDataTableEntry[i].setting | ||
| 1895 | |= cks_setting << (j * 2); | ||
| 1896 | } | ||
| 1897 | CONVERT_FROM_HOST_TO_SMC_US( | ||
| 1898 | data->smc_state_table.ClockStretcherDataTable.ClockStretcherDataTableEntry[i].setting); | ||
| 1899 | } | ||
| 1900 | |||
| 1901 | value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL); | 1821 | value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL); |
| 1902 | value &= 0xFFFFFFFE; | 1822 | value &= 0xFFFFFFFE; |
| 1903 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value); | 1823 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value); |
| @@ -1956,6 +1876,90 @@ static int polaris10_populate_vr_config(struct pp_hwmgr *hwmgr, | |||
| 1956 | return 0; | 1876 | return 0; |
| 1957 | } | 1877 | } |
| 1958 | 1878 | ||
| 1879 | |||
| 1880 | int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr) | ||
| 1881 | { | ||
| 1882 | struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); | ||
| 1883 | SMU74_Discrete_DpmTable *table = &(data->smc_state_table); | ||
| 1884 | int result = 0; | ||
| 1885 | struct pp_atom_ctrl__avfs_parameters avfs_params = {0}; | ||
| 1886 | AVFS_meanNsigma_t AVFS_meanNsigma = { {0} }; | ||
| 1887 | AVFS_Sclk_Offset_t AVFS_SclkOffset = { {0} }; | ||
| 1888 | uint32_t tmp, i; | ||
| 1889 | struct pp_smumgr *smumgr = hwmgr->smumgr; | ||
| 1890 | struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); | ||
| 1891 | |||
| 1892 | struct phm_ppt_v1_information *table_info = | ||
| 1893 | (struct phm_ppt_v1_information *)hwmgr->pptable; | ||
| 1894 | struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = | ||
| 1895 | table_info->vdd_dep_on_sclk; | ||
| 1896 | |||
| 1897 | |||
| 1898 | if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED) | ||
| 1899 | return result; | ||
| 1900 | |||
| 1901 | result = atomctrl_get_avfs_information(hwmgr, &avfs_params); | ||
| 1902 | |||
| 1903 | if (0 == result) { | ||
| 1904 | table->BTCGB_VDROOP_TABLE[0].a0 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a0); | ||
| 1905 | table->BTCGB_VDROOP_TABLE[0].a1 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a1); | ||
| 1906 | table->BTCGB_VDROOP_TABLE[0].a2 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a2); | ||
| 1907 | table->BTCGB_VDROOP_TABLE[1].a0 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0); | ||
| 1908 | table->BTCGB_VDROOP_TABLE[1].a1 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1); | ||
| 1909 | table->BTCGB_VDROOP_TABLE[1].a2 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2); | ||
| 1910 | table->AVFSGB_VDROOP_TABLE[0].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_m1); | ||
| 1911 | table->AVFSGB_VDROOP_TABLE[0].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSON_m2); | ||
| 1912 | table->AVFSGB_VDROOP_TABLE[0].b = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_b); | ||
| 1913 | table->AVFSGB_VDROOP_TABLE[0].m1_shift = 24; | ||
| 1914 | table->AVFSGB_VDROOP_TABLE[0].m2_shift = 12; | ||
| 1915 | table->AVFSGB_VDROOP_TABLE[1].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1); | ||
| 1916 | table->AVFSGB_VDROOP_TABLE[1].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2); | ||
| 1917 | table->AVFSGB_VDROOP_TABLE[1].b = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b); | ||
| 1918 | table->AVFSGB_VDROOP_TABLE[1].m1_shift = 24; | ||
| 1919 | table->AVFSGB_VDROOP_TABLE[1].m2_shift = 12; | ||
| 1920 | table->MaxVoltage = PP_HOST_TO_SMC_US(avfs_params.usMaxVoltage_0_25mv); | ||
| 1921 | AVFS_meanNsigma.Aconstant[0] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant0); | ||
| 1922 | AVFS_meanNsigma.Aconstant[1] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant1); | ||
| 1923 | AVFS_meanNsigma.Aconstant[2] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant2); | ||
| 1924 | AVFS_meanNsigma.DC_tol_sigma = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_DC_tol_sigma); | ||
| 1925 | AVFS_meanNsigma.Platform_mean = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_mean); | ||
| 1926 | AVFS_meanNsigma.PSM_Age_CompFactor = PP_HOST_TO_SMC_US(avfs_params.usPSM_Age_ComFactor); | ||
| 1927 | AVFS_meanNsigma.Platform_sigma = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_sigma); | ||
| 1928 | |||
| 1929 | for (i = 0; i < NUM_VFT_COLUMNS; i++) { | ||
| 1930 | AVFS_meanNsigma.Static_Voltage_Offset[i] = (uint8_t)(sclk_table->entries[i].cks_voffset * 100 / 625); | ||
| 1931 | AVFS_SclkOffset.Sclk_Offset[i] = PP_HOST_TO_SMC_US((uint16_t)(sclk_table->entries[i].sclk_offset) / 100); | ||
| 1932 | } | ||
| 1933 | |||
| 1934 | result = polaris10_read_smc_sram_dword(smumgr, | ||
| 1935 | SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsMeanNSigma), | ||
| 1936 | &tmp, data->sram_end); | ||
| 1937 | |||
| 1938 | polaris10_copy_bytes_to_smc(smumgr, | ||
| 1939 | tmp, | ||
| 1940 | (uint8_t *)&AVFS_meanNsigma, | ||
| 1941 | sizeof(AVFS_meanNsigma_t), | ||
| 1942 | data->sram_end); | ||
| 1943 | |||
| 1944 | result = polaris10_read_smc_sram_dword(smumgr, | ||
| 1945 | SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsSclkOffsetTable), | ||
| 1946 | &tmp, data->sram_end); | ||
| 1947 | polaris10_copy_bytes_to_smc(smumgr, | ||
| 1948 | tmp, | ||
| 1949 | (uint8_t *)&AVFS_SclkOffset, | ||
| 1950 | sizeof(AVFS_Sclk_Offset_t), | ||
| 1951 | data->sram_end); | ||
| 1952 | |||
| 1953 | data->avfs_vdroop_override_setting = (avfs_params.ucEnableGB_VDROOP_TABLE_CKSON << BTCGB0_Vdroop_Enable_SHIFT) | | ||
| 1954 | (avfs_params.ucEnableGB_VDROOP_TABLE_CKSOFF << BTCGB1_Vdroop_Enable_SHIFT) | | ||
| 1955 | (avfs_params.ucEnableGB_FUSE_TABLE_CKSON << AVFSGB0_Vdroop_Enable_SHIFT) | | ||
| 1956 | (avfs_params.ucEnableGB_FUSE_TABLE_CKSOFF << AVFSGB1_Vdroop_Enable_SHIFT); | ||
| 1957 | data->apply_avfs_cks_off_voltage = (avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage == 1) ? true : false; | ||
| 1958 | } | ||
| 1959 | return result; | ||
| 1960 | } | ||
| 1961 | |||
| 1962 | |||
| 1959 | /** | 1963 | /** |
| 1960 | * Initializes the SMC table and uploads it | 1964 | * Initializes the SMC table and uploads it |
| 1961 | * | 1965 | * |
| @@ -2056,6 +2060,10 @@ static int polaris10_init_smc_table(struct pp_hwmgr *hwmgr) | |||
| 2056 | "Failed to populate Clock Stretcher Data Table!", | 2060 | "Failed to populate Clock Stretcher Data Table!", |
| 2057 | return result); | 2061 | return result); |
| 2058 | } | 2062 | } |
| 2063 | |||
| 2064 | result = polaris10_populate_avfs_parameters(hwmgr); | ||
| 2065 | PP_ASSERT_WITH_CODE(0 == result, "Failed to populate AVFS Parameters!", return result;); | ||
| 2066 | |||
| 2059 | table->CurrSclkPllRange = 0xff; | 2067 | table->CurrSclkPllRange = 0xff; |
| 2060 | table->GraphicsVoltageChangeEnable = 1; | 2068 | table->GraphicsVoltageChangeEnable = 1; |
| 2061 | table->GraphicsThermThrottleEnable = 1; | 2069 | table->GraphicsThermThrottleEnable = 1; |
| @@ -2252,6 +2260,9 @@ static int polaris10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) | |||
| 2252 | static int polaris10_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) | 2260 | static int polaris10_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) |
| 2253 | { | 2261 | { |
| 2254 | struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); | 2262 | struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); |
| 2263 | uint32_t soft_register_value = 0; | ||
| 2264 | uint32_t handshake_disables_offset = data->soft_regs_start | ||
| 2265 | + offsetof(SMU74_SoftRegisters, HandshakeDisables); | ||
| 2255 | 2266 | ||
| 2256 | /* enable SCLK dpm */ | 2267 | /* enable SCLK dpm */ |
| 2257 | if (!data->sclk_dpm_key_disabled) | 2268 | if (!data->sclk_dpm_key_disabled) |
| @@ -2262,6 +2273,12 @@ static int polaris10_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) | |||
| 2262 | 2273 | ||
| 2263 | /* enable MCLK dpm */ | 2274 | /* enable MCLK dpm */ |
| 2264 | if (0 == data->mclk_dpm_key_disabled) { | 2275 | if (0 == data->mclk_dpm_key_disabled) { |
| 2276 | /* Disable UVD - SMU handshake for MCLK. */ | ||
| 2277 | soft_register_value = cgs_read_ind_register(hwmgr->device, | ||
| 2278 | CGS_IND_REG__SMC, handshake_disables_offset); | ||
| 2279 | soft_register_value |= SMU7_UVD_MCLK_HANDSHAKE_DISABLE; | ||
| 2280 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, | ||
| 2281 | handshake_disables_offset, soft_register_value); | ||
| 2265 | 2282 | ||
| 2266 | PP_ASSERT_WITH_CODE( | 2283 | PP_ASSERT_WITH_CODE( |
| 2267 | (0 == smum_send_msg_to_smc(hwmgr->smumgr, | 2284 | (0 == smum_send_msg_to_smc(hwmgr->smumgr, |
| @@ -2269,7 +2286,6 @@ static int polaris10_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) | |||
| 2269 | "Failed to enable MCLK DPM during DPM Start Function!", | 2286 | "Failed to enable MCLK DPM during DPM Start Function!", |
| 2270 | return -1); | 2287 | return -1); |
| 2271 | 2288 | ||
| 2272 | |||
| 2273 | PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1); | 2289 | PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1); |
| 2274 | 2290 | ||
| 2275 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x5); | 2291 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x5); |
| @@ -2606,6 +2622,7 @@ int polaris10_set_features_platform_caps(struct pp_hwmgr *hwmgr) | |||
| 2606 | 2622 | ||
| 2607 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, | 2623 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, |
| 2608 | PHM_PlatformCaps_FanSpeedInTableIsRPM); | 2624 | PHM_PlatformCaps_FanSpeedInTableIsRPM); |
| 2625 | |||
| 2609 | if (hwmgr->chip_id == CHIP_POLARIS11) | 2626 | if (hwmgr->chip_id == CHIP_POLARIS11) |
| 2610 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, | 2627 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, |
| 2611 | PHM_PlatformCaps_SPLLShutdownSupport); | 2628 | PHM_PlatformCaps_SPLLShutdownSupport); |
| @@ -2938,6 +2955,11 @@ int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr) | |||
| 2938 | data->vddci_control = POLARIS10_VOLTAGE_CONTROL_NONE; | 2955 | data->vddci_control = POLARIS10_VOLTAGE_CONTROL_NONE; |
| 2939 | data->mvdd_control = POLARIS10_VOLTAGE_CONTROL_NONE; | 2956 | data->mvdd_control = POLARIS10_VOLTAGE_CONTROL_NONE; |
| 2940 | 2957 | ||
| 2958 | data->enable_tdc_limit_feature = true; | ||
| 2959 | data->enable_pkg_pwr_tracking_feature = true; | ||
| 2960 | data->force_pcie_gen = PP_PCIEGenInvalid; | ||
| 2961 | data->mclk_stutter_mode_threshold = 40000; | ||
| 2962 | |||
| 2941 | if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, | 2963 | if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, |
| 2942 | VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) | 2964 | VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) |
| 2943 | data->voltage_control = POLARIS10_VOLTAGE_CONTROL_BY_SVID2; | 2965 | data->voltage_control = POLARIS10_VOLTAGE_CONTROL_BY_SVID2; |
| @@ -2962,6 +2984,10 @@ int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr) | |||
| 2962 | data->vddci_control = POLARIS10_VOLTAGE_CONTROL_BY_SVID2; | 2984 | data->vddci_control = POLARIS10_VOLTAGE_CONTROL_BY_SVID2; |
| 2963 | } | 2985 | } |
| 2964 | 2986 | ||
| 2987 | if (table_info->cac_dtp_table->usClockStretchAmount != 0) | ||
| 2988 | phm_cap_set(hwmgr->platform_descriptor.platformCaps, | ||
| 2989 | PHM_PlatformCaps_ClockStretcher); | ||
| 2990 | |||
| 2965 | polaris10_set_features_platform_caps(hwmgr); | 2991 | polaris10_set_features_platform_caps(hwmgr); |
| 2966 | 2992 | ||
| 2967 | polaris10_init_dpm_defaults(hwmgr); | 2993 | polaris10_init_dpm_defaults(hwmgr); |
| @@ -3520,10 +3546,11 @@ static int polaris10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr, | |||
| 3520 | ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state; | 3546 | ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state; |
| 3521 | ATOM_Tonga_POWERPLAYTABLE *powerplay_table = | 3547 | ATOM_Tonga_POWERPLAYTABLE *powerplay_table = |
| 3522 | (ATOM_Tonga_POWERPLAYTABLE *)pp_table; | 3548 | (ATOM_Tonga_POWERPLAYTABLE *)pp_table; |
| 3523 | ATOM_Tonga_SCLK_Dependency_Table *sclk_dep_table = | 3549 | PPTable_Generic_SubTable_Header *sclk_dep_table = |
| 3524 | (ATOM_Tonga_SCLK_Dependency_Table *) | 3550 | (PPTable_Generic_SubTable_Header *) |
| 3525 | (((unsigned long)powerplay_table) + | 3551 | (((unsigned long)powerplay_table) + |
| 3526 | le16_to_cpu(powerplay_table->usSclkDependencyTableOffset)); | 3552 | le16_to_cpu(powerplay_table->usSclkDependencyTableOffset)); |
| 3553 | |||
| 3527 | ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table = | 3554 | ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table = |
| 3528 | (ATOM_Tonga_MCLK_Dependency_Table *) | 3555 | (ATOM_Tonga_MCLK_Dependency_Table *) |
| 3529 | (((unsigned long)powerplay_table) + | 3556 | (((unsigned long)powerplay_table) + |
| @@ -3575,7 +3602,11 @@ static int polaris10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr, | |||
| 3575 | /* Performance levels are arranged from low to high. */ | 3602 | /* Performance levels are arranged from low to high. */ |
| 3576 | performance_level->memory_clock = mclk_dep_table->entries | 3603 | performance_level->memory_clock = mclk_dep_table->entries |
| 3577 | [state_entry->ucMemoryClockIndexLow].ulMclk; | 3604 | [state_entry->ucMemoryClockIndexLow].ulMclk; |
| 3578 | performance_level->engine_clock = sclk_dep_table->entries | 3605 | if (sclk_dep_table->ucRevId == 0) |
| 3606 | performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries | ||
| 3607 | [state_entry->ucEngineClockIndexLow].ulSclk; | ||
| 3608 | else if (sclk_dep_table->ucRevId == 1) | ||
| 3609 | performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries | ||
| 3579 | [state_entry->ucEngineClockIndexLow].ulSclk; | 3610 | [state_entry->ucEngineClockIndexLow].ulSclk; |
| 3580 | performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, | 3611 | performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, |
| 3581 | state_entry->ucPCIEGenLow); | 3612 | state_entry->ucPCIEGenLow); |
| @@ -3586,8 +3617,14 @@ static int polaris10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr, | |||
| 3586 | [polaris10_power_state->performance_level_count++]); | 3617 | [polaris10_power_state->performance_level_count++]); |
| 3587 | performance_level->memory_clock = mclk_dep_table->entries | 3618 | performance_level->memory_clock = mclk_dep_table->entries |
| 3588 | [state_entry->ucMemoryClockIndexHigh].ulMclk; | 3619 | [state_entry->ucMemoryClockIndexHigh].ulMclk; |
| 3589 | performance_level->engine_clock = sclk_dep_table->entries | 3620 | |
| 3621 | if (sclk_dep_table->ucRevId == 0) | ||
| 3622 | performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries | ||
| 3590 | [state_entry->ucEngineClockIndexHigh].ulSclk; | 3623 | [state_entry->ucEngineClockIndexHigh].ulSclk; |
| 3624 | else if (sclk_dep_table->ucRevId == 1) | ||
| 3625 | performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries | ||
| 3626 | [state_entry->ucEngineClockIndexHigh].ulSclk; | ||
| 3627 | |||
| 3591 | performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, | 3628 | performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, |
| 3592 | state_entry->ucPCIEGenHigh); | 3629 | state_entry->ucPCIEGenHigh); |
| 3593 | performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, | 3630 | performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, |
| @@ -3645,7 +3682,6 @@ static int polaris10_get_pp_table_entry(struct pp_hwmgr *hwmgr, | |||
| 3645 | switch (state->classification.ui_label) { | 3682 | switch (state->classification.ui_label) { |
| 3646 | case PP_StateUILabel_Performance: | 3683 | case PP_StateUILabel_Performance: |
| 3647 | data->use_pcie_performance_levels = true; | 3684 | data->use_pcie_performance_levels = true; |
| 3648 | |||
| 3649 | for (i = 0; i < ps->performance_level_count; i++) { | 3685 | for (i = 0; i < ps->performance_level_count; i++) { |
| 3650 | if (data->pcie_gen_performance.max < | 3686 | if (data->pcie_gen_performance.max < |
| 3651 | ps->performance_levels[i].pcie_gen) | 3687 | ps->performance_levels[i].pcie_gen) |
| @@ -3661,7 +3697,6 @@ static int polaris10_get_pp_table_entry(struct pp_hwmgr *hwmgr, | |||
| 3661 | ps->performance_levels[i].pcie_lane) | 3697 | ps->performance_levels[i].pcie_lane) |
| 3662 | data->pcie_lane_performance.max = | 3698 | data->pcie_lane_performance.max = |
| 3663 | ps->performance_levels[i].pcie_lane; | 3699 | ps->performance_levels[i].pcie_lane; |
| 3664 | |||
| 3665 | if (data->pcie_lane_performance.min > | 3700 | if (data->pcie_lane_performance.min > |
| 3666 | ps->performance_levels[i].pcie_lane) | 3701 | ps->performance_levels[i].pcie_lane) |
| 3667 | data->pcie_lane_performance.min = | 3702 | data->pcie_lane_performance.min = |
| @@ -4187,12 +4222,9 @@ int polaris10_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate) | |||
| 4187 | { | 4222 | { |
| 4188 | struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); | 4223 | struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); |
| 4189 | uint32_t mm_boot_level_offset, mm_boot_level_value; | 4224 | uint32_t mm_boot_level_offset, mm_boot_level_value; |
| 4190 | struct phm_ppt_v1_information *table_info = | ||
| 4191 | (struct phm_ppt_v1_information *)(hwmgr->pptable); | ||
| 4192 | 4225 | ||
| 4193 | if (!bgate) { | 4226 | if (!bgate) { |
| 4194 | data->smc_state_table.SamuBootLevel = | 4227 | data->smc_state_table.SamuBootLevel = 0; |
| 4195 | (uint8_t) (table_info->mm_dep_table->count - 1); | ||
| 4196 | mm_boot_level_offset = data->dpm_table_start + | 4228 | mm_boot_level_offset = data->dpm_table_start + |
| 4197 | offsetof(SMU74_Discrete_DpmTable, SamuBootLevel); | 4229 | offsetof(SMU74_Discrete_DpmTable, SamuBootLevel); |
| 4198 | mm_boot_level_offset /= 4; | 4230 | mm_boot_level_offset /= 4; |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h index beedf35cbfa6..d717789441f5 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h | |||
| @@ -312,6 +312,9 @@ struct polaris10_hwmgr { | |||
| 312 | 312 | ||
| 313 | /* soft pptable for re-uploading into smu */ | 313 | /* soft pptable for re-uploading into smu */ |
| 314 | void *soft_pp_table; | 314 | void *soft_pp_table; |
| 315 | |||
| 316 | uint32_t avfs_vdroop_override_setting; | ||
| 317 | bool apply_avfs_cks_off_voltage; | ||
| 315 | }; | 318 | }; |
| 316 | 319 | ||
| 317 | /* To convert to Q8.8 format for firmware */ | 320 | /* To convert to Q8.8 format for firmware */ |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c index 0b99ab3ba0c5..ae96f14b827c 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c | |||
| @@ -286,7 +286,7 @@ int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr) | |||
| 286 | 286 | ||
| 287 | if (polaris10_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset, | 287 | if (polaris10_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset, |
| 288 | (uint8_t *)&data->power_tune_table, | 288 | (uint8_t *)&data->power_tune_table, |
| 289 | sizeof(struct SMU74_Discrete_PmFuses), data->sram_end)) | 289 | (sizeof(struct SMU74_Discrete_PmFuses) - 92), data->sram_end)) |
| 290 | PP_ASSERT_WITH_CODE(false, | 290 | PP_ASSERT_WITH_CODE(false, |
| 291 | "Attempt to download PmFuseTable Failed!", | 291 | "Attempt to download PmFuseTable Failed!", |
| 292 | return -EINVAL); | 292 | return -EINVAL); |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c index aba167f7d167..b206632d4650 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c | |||
| @@ -625,10 +625,14 @@ static int tf_polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr, | |||
| 625 | int ret; | 625 | int ret; |
| 626 | struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr); | 626 | struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr); |
| 627 | struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); | 627 | struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); |
| 628 | struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); | ||
| 628 | 629 | ||
| 629 | if (smu_data->avfs.avfs_btc_status != AVFS_BTC_ENABLEAVFS) | 630 | if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED) |
| 630 | return 0; | 631 | return 0; |
| 631 | 632 | ||
| 633 | ret = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, | ||
| 634 | PPSMC_MSG_SetGBDroopSettings, data->avfs_vdroop_override_setting); | ||
| 635 | |||
| 632 | ret = (smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs) == 0) ? | 636 | ret = (smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs) == 0) ? |
| 633 | 0 : -1; | 637 | 0 : -1; |
| 634 | 638 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c index 58742e0d1492..a3c38bbd1e94 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c | |||
| @@ -44,6 +44,20 @@ bool acpi_atcs_functions_supported(void *device, uint32_t index) | |||
| 44 | return result == 0 ? (output_buf.function_bits & (1 << (index - 1))) != 0 : false; | 44 | return result == 0 ? (output_buf.function_bits & (1 << (index - 1))) != 0 : false; |
| 45 | } | 45 | } |
| 46 | 46 | ||
| 47 | bool acpi_atcs_notify_pcie_device_ready(void *device) | ||
| 48 | { | ||
| 49 | int32_t temp_buffer = 1; | ||
| 50 | |||
| 51 | return cgs_call_acpi_method(device, CGS_ACPI_METHOD_ATCS, | ||
| 52 | ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION, | ||
| 53 | &temp_buffer, | ||
| 54 | NULL, | ||
| 55 | 0, | ||
| 56 | sizeof(temp_buffer), | ||
| 57 | 0); | ||
| 58 | } | ||
| 59 | |||
| 60 | |||
| 47 | int acpi_pcie_perf_request(void *device, uint8_t perf_req, bool advertise) | 61 | int acpi_pcie_perf_request(void *device, uint8_t perf_req, bool advertise) |
| 48 | { | 62 | { |
| 49 | struct atcs_pref_req_input atcs_input; | 63 | struct atcs_pref_req_input atcs_input; |
| @@ -52,7 +66,7 @@ int acpi_pcie_perf_request(void *device, uint8_t perf_req, bool advertise) | |||
| 52 | int result; | 66 | int result; |
| 53 | struct cgs_system_info info = {0}; | 67 | struct cgs_system_info info = {0}; |
| 54 | 68 | ||
| 55 | if (!acpi_atcs_functions_supported(device, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST)) | 69 | if( 0 != acpi_atcs_notify_pcie_device_ready(device)) |
| 56 | return -EINVAL; | 70 | return -EINVAL; |
| 57 | 71 | ||
| 58 | info.size = sizeof(struct cgs_system_info); | 72 | info.size = sizeof(struct cgs_system_info); |
| @@ -77,7 +91,7 @@ int acpi_pcie_perf_request(void *device, uint8_t perf_req, bool advertise) | |||
| 77 | ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST, | 91 | ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST, |
| 78 | &atcs_input, | 92 | &atcs_input, |
| 79 | &atcs_output, | 93 | &atcs_output, |
| 80 | 0, | 94 | 1, |
| 81 | sizeof(atcs_input), | 95 | sizeof(atcs_input), |
| 82 | sizeof(atcs_output)); | 96 | sizeof(atcs_output)); |
| 83 | if (result != 0) | 97 | if (result != 0) |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c index da9f5f1b6dc2..bf4e18fd3872 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c | |||
| @@ -1302,3 +1302,46 @@ int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctr | |||
| 1302 | 1302 | ||
| 1303 | return 0; | 1303 | return 0; |
| 1304 | } | 1304 | } |
| 1305 | |||
| 1306 | int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl__avfs_parameters *param) | ||
| 1307 | { | ||
| 1308 | ATOM_ASIC_PROFILING_INFO_V3_6 *profile = NULL; | ||
| 1309 | |||
| 1310 | if (param == NULL) | ||
| 1311 | return -EINVAL; | ||
| 1312 | |||
| 1313 | profile = (ATOM_ASIC_PROFILING_INFO_V3_6 *) | ||
| 1314 | cgs_atom_get_data_table(hwmgr->device, | ||
| 1315 | GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo), | ||
| 1316 | NULL, NULL, NULL); | ||
| 1317 | if (!profile) | ||
| 1318 | return -1; | ||
| 1319 | |||
| 1320 | param->ulAVFS_meanNsigma_Acontant0 = profile->ulAVFS_meanNsigma_Acontant0; | ||
| 1321 | param->ulAVFS_meanNsigma_Acontant1 = profile->ulAVFS_meanNsigma_Acontant1; | ||
| 1322 | param->ulAVFS_meanNsigma_Acontant2 = profile->ulAVFS_meanNsigma_Acontant2; | ||
| 1323 | param->usAVFS_meanNsigma_DC_tol_sigma = profile->usAVFS_meanNsigma_DC_tol_sigma; | ||
| 1324 | param->usAVFS_meanNsigma_Platform_mean = profile->usAVFS_meanNsigma_Platform_mean; | ||
| 1325 | param->usAVFS_meanNsigma_Platform_sigma = profile->usAVFS_meanNsigma_Platform_sigma; | ||
| 1326 | param->ulGB_VDROOP_TABLE_CKSOFF_a0 = profile->ulGB_VDROOP_TABLE_CKSOFF_a0; | ||
| 1327 | param->ulGB_VDROOP_TABLE_CKSOFF_a1 = profile->ulGB_VDROOP_TABLE_CKSOFF_a1; | ||
| 1328 | param->ulGB_VDROOP_TABLE_CKSOFF_a2 = profile->ulGB_VDROOP_TABLE_CKSOFF_a2; | ||
| 1329 | param->ulGB_VDROOP_TABLE_CKSON_a0 = profile->ulGB_VDROOP_TABLE_CKSON_a0; | ||
| 1330 | param->ulGB_VDROOP_TABLE_CKSON_a1 = profile->ulGB_VDROOP_TABLE_CKSON_a1; | ||
| 1331 | param->ulGB_VDROOP_TABLE_CKSON_a2 = profile->ulGB_VDROOP_TABLE_CKSON_a2; | ||
| 1332 | param->ulAVFSGB_FUSE_TABLE_CKSOFF_m1 = profile->ulAVFSGB_FUSE_TABLE_CKSOFF_m1; | ||
| 1333 | param->usAVFSGB_FUSE_TABLE_CKSOFF_m2 = profile->usAVFSGB_FUSE_TABLE_CKSOFF_m2; | ||
| 1334 | param->ulAVFSGB_FUSE_TABLE_CKSOFF_b = profile->ulAVFSGB_FUSE_TABLE_CKSOFF_b; | ||
| 1335 | param->ulAVFSGB_FUSE_TABLE_CKSON_m1 = profile->ulAVFSGB_FUSE_TABLE_CKSON_m1; | ||
| 1336 | param->usAVFSGB_FUSE_TABLE_CKSON_m2 = profile->usAVFSGB_FUSE_TABLE_CKSON_m2; | ||
| 1337 | param->ulAVFSGB_FUSE_TABLE_CKSON_b = profile->ulAVFSGB_FUSE_TABLE_CKSON_b; | ||
| 1338 | param->usMaxVoltage_0_25mv = profile->usMaxVoltage_0_25mv; | ||
| 1339 | param->ucEnableGB_VDROOP_TABLE_CKSOFF = profile->ucEnableGB_VDROOP_TABLE_CKSOFF; | ||
| 1340 | param->ucEnableGB_VDROOP_TABLE_CKSON = profile->ucEnableGB_VDROOP_TABLE_CKSON; | ||
| 1341 | param->ucEnableGB_FUSE_TABLE_CKSOFF = profile->ucEnableGB_FUSE_TABLE_CKSOFF; | ||
| 1342 | param->ucEnableGB_FUSE_TABLE_CKSON = profile->ucEnableGB_FUSE_TABLE_CKSON; | ||
| 1343 | param->usPSM_Age_ComFactor = profile->usPSM_Age_ComFactor; | ||
| 1344 | param->ucEnableApplyAVFS_CKS_OFF_Voltage = profile->ucEnableApplyAVFS_CKS_OFF_Voltage; | ||
| 1345 | |||
| 1346 | return 0; | ||
| 1347 | } | ||
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h index d24ebb566905..248c5db5f380 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h | |||
| @@ -250,6 +250,35 @@ struct pp_atomctrl_gpio_pin_assignment { | |||
| 250 | }; | 250 | }; |
| 251 | typedef struct pp_atomctrl_gpio_pin_assignment pp_atomctrl_gpio_pin_assignment; | 251 | typedef struct pp_atomctrl_gpio_pin_assignment pp_atomctrl_gpio_pin_assignment; |
| 252 | 252 | ||
| 253 | struct pp_atom_ctrl__avfs_parameters { | ||
| 254 | uint32_t ulAVFS_meanNsigma_Acontant0; | ||
| 255 | uint32_t ulAVFS_meanNsigma_Acontant1; | ||
| 256 | uint32_t ulAVFS_meanNsigma_Acontant2; | ||
| 257 | uint16_t usAVFS_meanNsigma_DC_tol_sigma; | ||
| 258 | uint16_t usAVFS_meanNsigma_Platform_mean; | ||
| 259 | uint16_t usAVFS_meanNsigma_Platform_sigma; | ||
| 260 | uint32_t ulGB_VDROOP_TABLE_CKSOFF_a0; | ||
| 261 | uint32_t ulGB_VDROOP_TABLE_CKSOFF_a1; | ||
| 262 | uint32_t ulGB_VDROOP_TABLE_CKSOFF_a2; | ||
| 263 | uint32_t ulGB_VDROOP_TABLE_CKSON_a0; | ||
| 264 | uint32_t ulGB_VDROOP_TABLE_CKSON_a1; | ||
| 265 | uint32_t ulGB_VDROOP_TABLE_CKSON_a2; | ||
| 266 | uint32_t ulAVFSGB_FUSE_TABLE_CKSOFF_m1; | ||
| 267 | uint16_t usAVFSGB_FUSE_TABLE_CKSOFF_m2; | ||
| 268 | uint32_t ulAVFSGB_FUSE_TABLE_CKSOFF_b; | ||
| 269 | uint32_t ulAVFSGB_FUSE_TABLE_CKSON_m1; | ||
| 270 | uint16_t usAVFSGB_FUSE_TABLE_CKSON_m2; | ||
| 271 | uint32_t ulAVFSGB_FUSE_TABLE_CKSON_b; | ||
| 272 | uint16_t usMaxVoltage_0_25mv; | ||
| 273 | uint8_t ucEnableGB_VDROOP_TABLE_CKSOFF; | ||
| 274 | uint8_t ucEnableGB_VDROOP_TABLE_CKSON; | ||
| 275 | uint8_t ucEnableGB_FUSE_TABLE_CKSOFF; | ||
| 276 | uint8_t ucEnableGB_FUSE_TABLE_CKSON; | ||
| 277 | uint16_t usPSM_Age_ComFactor; | ||
| 278 | uint8_t ucEnableApplyAVFS_CKS_OFF_Voltage; | ||
| 279 | uint8_t ucReserved; | ||
| 280 | }; | ||
| 281 | |||
| 253 | extern bool atomctrl_get_pp_assign_pin(struct pp_hwmgr *hwmgr, const uint32_t pinId, pp_atomctrl_gpio_pin_assignment *gpio_pin_assignment); | 282 | extern bool atomctrl_get_pp_assign_pin(struct pp_hwmgr *hwmgr, const uint32_t pinId, pp_atomctrl_gpio_pin_assignment *gpio_pin_assignment); |
| 254 | extern int atomctrl_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage); | 283 | extern int atomctrl_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage); |
| 255 | extern uint32_t atomctrl_get_mpll_reference_clock(struct pp_hwmgr *hwmgr); | 284 | extern uint32_t atomctrl_get_mpll_reference_clock(struct pp_hwmgr *hwmgr); |
| @@ -278,5 +307,8 @@ extern int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clo | |||
| 278 | extern int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_type, | 307 | extern int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_type, |
| 279 | uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage); | 308 | uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage); |
| 280 | extern int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl_sclk_range_table *table); | 309 | extern int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl_sclk_range_table *table); |
| 310 | |||
| 311 | extern int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl__avfs_parameters *param); | ||
| 312 | |||
| 281 | #endif | 313 | #endif |
| 282 | 314 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c index 16fed487973b..233eb7f36c1d 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c | |||
| @@ -2847,27 +2847,6 @@ static int tonga_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) | |||
| 2847 | } | 2847 | } |
| 2848 | } | 2848 | } |
| 2849 | 2849 | ||
| 2850 | /* Initialize Vddc DPM table based on allow Vddc values. And populate corresponding std values. */ | ||
| 2851 | for (i = 0; i < allowed_vdd_sclk_table->count; i++) { | ||
| 2852 | data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].vddc; | ||
| 2853 | /* tonga_hwmgr->dpm_table.VddcTable.dpm_levels[i].param1 = stdVoltageTable->entries[i].Leakage; */ | ||
| 2854 | /* param1 is for corresponding std voltage */ | ||
| 2855 | data->dpm_table.vddc_table.dpm_levels[i].enabled = 1; | ||
| 2856 | } | ||
| 2857 | data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count; | ||
| 2858 | |||
| 2859 | if (NULL != allowed_vdd_mclk_table) { | ||
| 2860 | /* Initialize Vddci DPM table based on allow Mclk values */ | ||
| 2861 | for (i = 0; i < allowed_vdd_mclk_table->count; i++) { | ||
| 2862 | data->dpm_table.vdd_ci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].vddci; | ||
| 2863 | data->dpm_table.vdd_ci_table.dpm_levels[i].enabled = 1; | ||
| 2864 | data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].mvdd; | ||
| 2865 | data->dpm_table.mvdd_table.dpm_levels[i].enabled = 1; | ||
| 2866 | } | ||
| 2867 | data->dpm_table.vdd_ci_table.count = allowed_vdd_mclk_table->count; | ||
| 2868 | data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count; | ||
| 2869 | } | ||
| 2870 | |||
| 2871 | /* setup PCIE gen speed levels*/ | 2850 | /* setup PCIE gen speed levels*/ |
| 2872 | tonga_setup_default_pcie_tables(hwmgr); | 2851 | tonga_setup_default_pcie_tables(hwmgr); |
| 2873 | 2852 | ||
| @@ -4510,6 +4489,7 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr) | |||
| 4510 | data->vdd_ci_control = TONGA_VOLTAGE_CONTROL_NONE; | 4489 | data->vdd_ci_control = TONGA_VOLTAGE_CONTROL_NONE; |
| 4511 | data->vdd_gfx_control = TONGA_VOLTAGE_CONTROL_NONE; | 4490 | data->vdd_gfx_control = TONGA_VOLTAGE_CONTROL_NONE; |
| 4512 | data->mvdd_control = TONGA_VOLTAGE_CONTROL_NONE; | 4491 | data->mvdd_control = TONGA_VOLTAGE_CONTROL_NONE; |
| 4492 | data->force_pcie_gen = PP_PCIEGenInvalid; | ||
| 4513 | 4493 | ||
| 4514 | if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, | 4494 | if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, |
| 4515 | VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) { | 4495 | VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) { |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_pptable.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_pptable.h index 1b44f4e9b8f5..f127198aafc4 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_pptable.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_pptable.h | |||
| @@ -197,6 +197,22 @@ typedef struct _ATOM_Tonga_SCLK_Dependency_Table { | |||
| 197 | ATOM_Tonga_SCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ | 197 | ATOM_Tonga_SCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ |
| 198 | } ATOM_Tonga_SCLK_Dependency_Table; | 198 | } ATOM_Tonga_SCLK_Dependency_Table; |
| 199 | 199 | ||
| 200 | typedef struct _ATOM_Polaris_SCLK_Dependency_Record { | ||
| 201 | UCHAR ucVddInd; /* Base voltage */ | ||
| 202 | USHORT usVddcOffset; /* Offset relative to base voltage */ | ||
| 203 | ULONG ulSclk; | ||
| 204 | USHORT usEdcCurrent; | ||
| 205 | UCHAR ucReliabilityTemperature; | ||
| 206 | UCHAR ucCKSVOffsetandDisable; /* Bits 0~6: Voltage offset for CKS, Bit 7: Disable/enable for the SCLK level. */ | ||
| 207 | ULONG ulSclkOffset; | ||
| 208 | } ATOM_Polaris_SCLK_Dependency_Record; | ||
| 209 | |||
| 210 | typedef struct _ATOM_Polaris_SCLK_Dependency_Table { | ||
| 211 | UCHAR ucRevId; | ||
| 212 | UCHAR ucNumEntries; /* Number of entries. */ | ||
| 213 | ATOM_Polaris_SCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ | ||
| 214 | } ATOM_Polaris_SCLK_Dependency_Table; | ||
| 215 | |||
| 200 | typedef struct _ATOM_Tonga_PCIE_Record { | 216 | typedef struct _ATOM_Tonga_PCIE_Record { |
| 201 | UCHAR ucPCIEGenSpeed; | 217 | UCHAR ucPCIEGenSpeed; |
| 202 | UCHAR usPCIELaneWidth; | 218 | UCHAR usPCIELaneWidth; |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c index 10e3630ee39d..671fdb4d615a 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c | |||
| @@ -408,41 +408,78 @@ static int get_mclk_voltage_dependency_table( | |||
| 408 | static int get_sclk_voltage_dependency_table( | 408 | static int get_sclk_voltage_dependency_table( |
| 409 | struct pp_hwmgr *hwmgr, | 409 | struct pp_hwmgr *hwmgr, |
| 410 | phm_ppt_v1_clock_voltage_dependency_table **pp_tonga_sclk_dep_table, | 410 | phm_ppt_v1_clock_voltage_dependency_table **pp_tonga_sclk_dep_table, |
| 411 | const ATOM_Tonga_SCLK_Dependency_Table * sclk_dep_table | 411 | const PPTable_Generic_SubTable_Header *sclk_dep_table |
| 412 | ) | 412 | ) |
| 413 | { | 413 | { |
| 414 | uint32_t table_size, i; | 414 | uint32_t table_size, i; |
| 415 | phm_ppt_v1_clock_voltage_dependency_table *sclk_table; | 415 | phm_ppt_v1_clock_voltage_dependency_table *sclk_table; |
| 416 | 416 | ||
| 417 | PP_ASSERT_WITH_CODE((0 != sclk_dep_table->ucNumEntries), | 417 | if (sclk_dep_table->ucRevId < 1) { |
| 418 | "Invalid PowerPlay Table!", return -1); | 418 | const ATOM_Tonga_SCLK_Dependency_Table *tonga_table = |
| 419 | (ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table; | ||
| 419 | 420 | ||
| 420 | table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record) | 421 | PP_ASSERT_WITH_CODE((0 != tonga_table->ucNumEntries), |
| 421 | * sclk_dep_table->ucNumEntries; | 422 | "Invalid PowerPlay Table!", return -1); |
| 422 | 423 | ||
| 423 | sclk_table = (phm_ppt_v1_clock_voltage_dependency_table *) | 424 | table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record) |
| 424 | kzalloc(table_size, GFP_KERNEL); | 425 | * tonga_table->ucNumEntries; |
| 425 | 426 | ||
| 426 | if (NULL == sclk_table) | 427 | sclk_table = (phm_ppt_v1_clock_voltage_dependency_table *) |
| 427 | return -ENOMEM; | 428 | kzalloc(table_size, GFP_KERNEL); |
| 428 | 429 | ||
| 429 | memset(sclk_table, 0x00, table_size); | 430 | if (NULL == sclk_table) |
| 430 | 431 | return -ENOMEM; | |
| 431 | sclk_table->count = (uint32_t)sclk_dep_table->ucNumEntries; | 432 | |
| 432 | 433 | memset(sclk_table, 0x00, table_size); | |
| 433 | for (i = 0; i < sclk_dep_table->ucNumEntries; i++) { | 434 | |
| 434 | sclk_table->entries[i].vddInd = | 435 | sclk_table->count = (uint32_t)tonga_table->ucNumEntries; |
| 435 | sclk_dep_table->entries[i].ucVddInd; | 436 | |
| 436 | sclk_table->entries[i].vdd_offset = | 437 | for (i = 0; i < tonga_table->ucNumEntries; i++) { |
| 437 | sclk_dep_table->entries[i].usVddcOffset; | 438 | sclk_table->entries[i].vddInd = |
| 438 | sclk_table->entries[i].clk = | 439 | tonga_table->entries[i].ucVddInd; |
| 439 | sclk_dep_table->entries[i].ulSclk; | 440 | sclk_table->entries[i].vdd_offset = |
| 440 | sclk_table->entries[i].cks_enable = | 441 | tonga_table->entries[i].usVddcOffset; |
| 441 | (((sclk_dep_table->entries[i].ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0; | 442 | sclk_table->entries[i].clk = |
| 442 | sclk_table->entries[i].cks_voffset = | 443 | tonga_table->entries[i].ulSclk; |
| 443 | (sclk_dep_table->entries[i].ucCKSVOffsetandDisable & 0x7F); | 444 | sclk_table->entries[i].cks_enable = |
| 444 | } | 445 | (((tonga_table->entries[i].ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0; |
| 446 | sclk_table->entries[i].cks_voffset = | ||
| 447 | (tonga_table->entries[i].ucCKSVOffsetandDisable & 0x7F); | ||
| 448 | } | ||
| 449 | } else { | ||
| 450 | const ATOM_Polaris_SCLK_Dependency_Table *polaris_table = | ||
| 451 | (ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table; | ||
| 452 | |||
| 453 | PP_ASSERT_WITH_CODE((0 != polaris_table->ucNumEntries), | ||
| 454 | "Invalid PowerPlay Table!", return -1); | ||
| 455 | |||
| 456 | table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record) | ||
| 457 | * polaris_table->ucNumEntries; | ||
| 458 | |||
| 459 | sclk_table = (phm_ppt_v1_clock_voltage_dependency_table *) | ||
| 460 | kzalloc(table_size, GFP_KERNEL); | ||
| 445 | 461 | ||
| 462 | if (NULL == sclk_table) | ||
| 463 | return -ENOMEM; | ||
| 464 | |||
| 465 | memset(sclk_table, 0x00, table_size); | ||
| 466 | |||
| 467 | sclk_table->count = (uint32_t)polaris_table->ucNumEntries; | ||
| 468 | |||
| 469 | for (i = 0; i < polaris_table->ucNumEntries; i++) { | ||
| 470 | sclk_table->entries[i].vddInd = | ||
| 471 | polaris_table->entries[i].ucVddInd; | ||
| 472 | sclk_table->entries[i].vdd_offset = | ||
| 473 | polaris_table->entries[i].usVddcOffset; | ||
| 474 | sclk_table->entries[i].clk = | ||
| 475 | polaris_table->entries[i].ulSclk; | ||
| 476 | sclk_table->entries[i].cks_enable = | ||
| 477 | (((polaris_table->entries[i].ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0; | ||
| 478 | sclk_table->entries[i].cks_voffset = | ||
| 479 | (polaris_table->entries[i].ucCKSVOffsetandDisable & 0x7F); | ||
| 480 | sclk_table->entries[i].sclk_offset = polaris_table->entries[i].ulSclkOffset; | ||
| 481 | } | ||
| 482 | } | ||
| 446 | *pp_tonga_sclk_dep_table = sclk_table; | 483 | *pp_tonga_sclk_dep_table = sclk_table; |
| 447 | 484 | ||
| 448 | return 0; | 485 | return 0; |
| @@ -708,8 +745,8 @@ static int init_clock_voltage_dependency( | |||
| 708 | const ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table = | 745 | const ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table = |
| 709 | (const ATOM_Tonga_MCLK_Dependency_Table *)(((unsigned long) powerplay_table) + | 746 | (const ATOM_Tonga_MCLK_Dependency_Table *)(((unsigned long) powerplay_table) + |
| 710 | le16_to_cpu(powerplay_table->usMclkDependencyTableOffset)); | 747 | le16_to_cpu(powerplay_table->usMclkDependencyTableOffset)); |
| 711 | const ATOM_Tonga_SCLK_Dependency_Table *sclk_dep_table = | 748 | const PPTable_Generic_SubTable_Header *sclk_dep_table = |
| 712 | (const ATOM_Tonga_SCLK_Dependency_Table *)(((unsigned long) powerplay_table) + | 749 | (const PPTable_Generic_SubTable_Header *)(((unsigned long) powerplay_table) + |
| 713 | le16_to_cpu(powerplay_table->usSclkDependencyTableOffset)); | 750 | le16_to_cpu(powerplay_table->usSclkDependencyTableOffset)); |
| 714 | const ATOM_Tonga_Hard_Limit_Table *pHardLimits = | 751 | const ATOM_Tonga_Hard_Limit_Table *pHardLimits = |
| 715 | (const ATOM_Tonga_Hard_Limit_Table *)(((unsigned long) powerplay_table) + | 752 | (const ATOM_Tonga_Hard_Limit_Table *)(((unsigned long) powerplay_table) + |
| @@ -1040,48 +1077,44 @@ int tonga_pp_tables_uninitialize(struct pp_hwmgr *hwmgr) | |||
| 1040 | struct phm_ppt_v1_information *pp_table_information = | 1077 | struct phm_ppt_v1_information *pp_table_information = |
| 1041 | (struct phm_ppt_v1_information *)(hwmgr->pptable); | 1078 | (struct phm_ppt_v1_information *)(hwmgr->pptable); |
| 1042 | 1079 | ||
| 1043 | if (NULL != hwmgr->soft_pp_table) { | 1080 | if (NULL != hwmgr->soft_pp_table) |
| 1044 | kfree(hwmgr->soft_pp_table); | ||
| 1045 | hwmgr->soft_pp_table = NULL; | 1081 | hwmgr->soft_pp_table = NULL; |
| 1046 | } | ||
| 1047 | 1082 | ||
| 1048 | if (NULL != pp_table_information->vdd_dep_on_sclk) | 1083 | kfree(pp_table_information->vdd_dep_on_sclk); |
| 1049 | pp_table_information->vdd_dep_on_sclk = NULL; | 1084 | pp_table_information->vdd_dep_on_sclk = NULL; |
| 1050 | 1085 | ||
| 1051 | if (NULL != pp_table_information->vdd_dep_on_mclk) | 1086 | kfree(pp_table_information->vdd_dep_on_mclk); |
| 1052 | pp_table_information->vdd_dep_on_mclk = NULL; | 1087 | pp_table_information->vdd_dep_on_mclk = NULL; |
| 1053 | 1088 | ||
| 1054 | if (NULL != pp_table_information->valid_mclk_values) | 1089 | kfree(pp_table_information->valid_mclk_values); |
| 1055 | pp_table_information->valid_mclk_values = NULL; | 1090 | pp_table_information->valid_mclk_values = NULL; |
| 1056 | 1091 | ||
| 1057 | if (NULL != pp_table_information->valid_sclk_values) | 1092 | kfree(pp_table_information->valid_sclk_values); |
| 1058 | pp_table_information->valid_sclk_values = NULL; | 1093 | pp_table_information->valid_sclk_values = NULL; |
| 1059 | 1094 | ||
| 1060 | if (NULL != pp_table_information->vddc_lookup_table) | 1095 | kfree(pp_table_information->vddc_lookup_table); |
| 1061 | pp_table_information->vddc_lookup_table = NULL; | 1096 | pp_table_information->vddc_lookup_table = NULL; |
| 1062 | 1097 | ||
| 1063 | if (NULL != pp_table_information->vddgfx_lookup_table) | 1098 | kfree(pp_table_information->vddgfx_lookup_table); |
| 1064 | pp_table_information->vddgfx_lookup_table = NULL; | 1099 | pp_table_information->vddgfx_lookup_table = NULL; |
| 1065 | 1100 | ||
| 1066 | if (NULL != pp_table_information->mm_dep_table) | 1101 | kfree(pp_table_information->mm_dep_table); |
| 1067 | pp_table_information->mm_dep_table = NULL; | 1102 | pp_table_information->mm_dep_table = NULL; |
| 1068 | 1103 | ||
| 1069 | if (NULL != pp_table_information->cac_dtp_table) | 1104 | kfree(pp_table_information->cac_dtp_table); |
| 1070 | pp_table_information->cac_dtp_table = NULL; | 1105 | pp_table_information->cac_dtp_table = NULL; |
| 1071 | 1106 | ||
| 1072 | if (NULL != hwmgr->dyn_state.cac_dtp_table) | 1107 | kfree(hwmgr->dyn_state.cac_dtp_table); |
| 1073 | hwmgr->dyn_state.cac_dtp_table = NULL; | 1108 | hwmgr->dyn_state.cac_dtp_table = NULL; |
| 1074 | 1109 | ||
| 1075 | if (NULL != pp_table_information->ppm_parameter_table) | 1110 | kfree(pp_table_information->ppm_parameter_table); |
| 1076 | pp_table_information->ppm_parameter_table = NULL; | 1111 | pp_table_information->ppm_parameter_table = NULL; |
| 1077 | 1112 | ||
| 1078 | if (NULL != pp_table_information->pcie_table) | 1113 | kfree(pp_table_information->pcie_table); |
| 1079 | pp_table_information->pcie_table = NULL; | 1114 | pp_table_information->pcie_table = NULL; |
| 1080 | 1115 | ||
| 1081 | if (NULL != hwmgr->pptable) { | 1116 | kfree(hwmgr->pptable); |
| 1082 | kfree(hwmgr->pptable); | 1117 | hwmgr->pptable = NULL; |
| 1083 | hwmgr->pptable = NULL; | ||
| 1084 | } | ||
| 1085 | 1118 | ||
| 1086 | return result; | 1119 | return result; |
| 1087 | } | 1120 | } |
diff --git a/drivers/gpu/drm/amd/powerplay/inc/polaris10_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/polaris10_ppsmc.h index 0c6a413eaa5b..d41d37ab5b7c 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/polaris10_ppsmc.h +++ b/drivers/gpu/drm/amd/powerplay/inc/polaris10_ppsmc.h | |||
| @@ -27,6 +27,7 @@ | |||
| 27 | 27 | ||
| 28 | #pragma pack(push, 1) | 28 | #pragma pack(push, 1) |
| 29 | 29 | ||
| 30 | #define PPSMC_MSG_SetGBDroopSettings ((uint16_t) 0x305) | ||
| 30 | 31 | ||
| 31 | #define PPSMC_SWSTATE_FLAG_DC 0x01 | 32 | #define PPSMC_SWSTATE_FLAG_DC 0x01 |
| 32 | #define PPSMC_SWSTATE_FLAG_UVD 0x02 | 33 | #define PPSMC_SWSTATE_FLAG_UVD 0x02 |
diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h b/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h index 3bd5e69b9045..3df5de2cdab0 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h +++ b/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h | |||
| @@ -26,3 +26,4 @@ extern bool acpi_atcs_functions_supported(void *device, | |||
| 26 | extern int acpi_pcie_perf_request(void *device, | 26 | extern int acpi_pcie_perf_request(void *device, |
| 27 | uint8_t perf_req, | 27 | uint8_t perf_req, |
| 28 | bool advertise); | 28 | bool advertise); |
| 29 | extern bool acpi_atcs_notify_pcie_device_ready(void *device); | ||
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu74.h b/drivers/gpu/drm/amd/powerplay/inc/smu74.h index 1a12d85b8e97..fd10a9fa843d 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu74.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu74.h | |||
| @@ -34,6 +34,30 @@ | |||
| 34 | #define SMU__NUM_LCLK_DPM_LEVELS 8 | 34 | #define SMU__NUM_LCLK_DPM_LEVELS 8 |
| 35 | #define SMU__NUM_PCIE_DPM_LEVELS 8 | 35 | #define SMU__NUM_PCIE_DPM_LEVELS 8 |
| 36 | 36 | ||
| 37 | #define EXP_M1 35 | ||
| 38 | #define EXP_M2 92821 | ||
| 39 | #define EXP_B 66629747 | ||
| 40 | |||
| 41 | #define EXP_M1_1 365 | ||
| 42 | #define EXP_M2_1 658700 | ||
| 43 | #define EXP_B_1 305506134 | ||
| 44 | |||
| 45 | #define EXP_M1_2 189 | ||
| 46 | #define EXP_M2_2 379692 | ||
| 47 | #define EXP_B_2 194609469 | ||
| 48 | |||
| 49 | #define EXP_M1_3 99 | ||
| 50 | #define EXP_M2_3 217915 | ||
| 51 | #define EXP_B_3 122255994 | ||
| 52 | |||
| 53 | #define EXP_M1_4 51 | ||
| 54 | #define EXP_M2_4 122643 | ||
| 55 | #define EXP_B_4 74893384 | ||
| 56 | |||
| 57 | #define EXP_M1_5 423 | ||
| 58 | #define EXP_M2_5 1103326 | ||
| 59 | #define EXP_B_5 728122621 | ||
| 60 | |||
| 37 | enum SID_OPTION { | 61 | enum SID_OPTION { |
| 38 | SID_OPTION_HI, | 62 | SID_OPTION_HI, |
| 39 | SID_OPTION_LO, | 63 | SID_OPTION_LO, |
| @@ -548,20 +572,20 @@ struct SMU74_Firmware_Header { | |||
| 548 | uint32_t CacConfigTable; | 572 | uint32_t CacConfigTable; |
| 549 | uint32_t CacStatusTable; | 573 | uint32_t CacStatusTable; |
| 550 | 574 | ||
| 551 | |||
| 552 | uint32_t mcRegisterTable; | 575 | uint32_t mcRegisterTable; |
| 553 | 576 | ||
| 554 | |||
| 555 | uint32_t mcArbDramTimingTable; | 577 | uint32_t mcArbDramTimingTable; |
| 556 | 578 | ||
| 557 | |||
| 558 | |||
| 559 | |||
| 560 | uint32_t PmFuseTable; | 579 | uint32_t PmFuseTable; |
| 561 | uint32_t Globals; | 580 | uint32_t Globals; |
| 562 | uint32_t ClockStretcherTable; | 581 | uint32_t ClockStretcherTable; |
| 563 | uint32_t VftTable; | 582 | uint32_t VftTable; |
| 564 | uint32_t Reserved[21]; | 583 | uint32_t Reserved1; |
| 584 | uint32_t AvfsTable; | ||
| 585 | uint32_t AvfsCksOffGbvTable; | ||
| 586 | uint32_t AvfsMeanNSigma; | ||
| 587 | uint32_t AvfsSclkOffsetTable; | ||
| 588 | uint32_t Reserved[16]; | ||
| 565 | uint32_t Signature; | 589 | uint32_t Signature; |
| 566 | }; | 590 | }; |
| 567 | 591 | ||
| @@ -701,8 +725,6 @@ VR Config info is contained in dpmTable.VRConfig */ | |||
| 701 | struct SMU_ClockStretcherDataTableEntry { | 725 | struct SMU_ClockStretcherDataTableEntry { |
| 702 | uint8_t minVID; | 726 | uint8_t minVID; |
| 703 | uint8_t maxVID; | 727 | uint8_t maxVID; |
| 704 | |||
| 705 | |||
| 706 | uint16_t setting; | 728 | uint16_t setting; |
| 707 | }; | 729 | }; |
| 708 | typedef struct SMU_ClockStretcherDataTableEntry SMU_ClockStretcherDataTableEntry; | 730 | typedef struct SMU_ClockStretcherDataTableEntry SMU_ClockStretcherDataTableEntry; |
| @@ -769,6 +791,43 @@ struct VFT_TABLE_t { | |||
| 769 | typedef struct VFT_TABLE_t VFT_TABLE_t; | 791 | typedef struct VFT_TABLE_t VFT_TABLE_t; |
| 770 | 792 | ||
| 771 | 793 | ||
| 794 | /* Total margin, root mean square of Fmax + DC + Platform */ | ||
| 795 | struct AVFS_Margin_t { | ||
| 796 | VFT_CELL_t Cell[NUM_VFT_COLUMNS]; | ||
| 797 | }; | ||
| 798 | typedef struct AVFS_Margin_t AVFS_Margin_t; | ||
| 799 | |||
| 800 | #define BTCGB_VDROOP_TABLE_MAX_ENTRIES 2 | ||
| 801 | #define AVFSGB_VDROOP_TABLE_MAX_ENTRIES 2 | ||
| 802 | |||
| 803 | struct GB_VDROOP_TABLE_t { | ||
| 804 | int32_t a0; | ||
| 805 | int32_t a1; | ||
| 806 | int32_t a2; | ||
| 807 | uint32_t spare; | ||
| 808 | }; | ||
| 809 | typedef struct GB_VDROOP_TABLE_t GB_VDROOP_TABLE_t; | ||
| 810 | |||
| 811 | struct AVFS_CksOff_Gbv_t { | ||
| 812 | VFT_CELL_t Cell[NUM_VFT_COLUMNS]; | ||
| 813 | }; | ||
| 814 | typedef struct AVFS_CksOff_Gbv_t AVFS_CksOff_Gbv_t; | ||
| 815 | |||
| 816 | struct AVFS_meanNsigma_t { | ||
| 817 | uint32_t Aconstant[3]; | ||
| 818 | uint16_t DC_tol_sigma; | ||
| 819 | uint16_t Platform_mean; | ||
| 820 | uint16_t Platform_sigma; | ||
| 821 | uint16_t PSM_Age_CompFactor; | ||
| 822 | uint8_t Static_Voltage_Offset[NUM_VFT_COLUMNS]; | ||
| 823 | }; | ||
| 824 | typedef struct AVFS_meanNsigma_t AVFS_meanNsigma_t; | ||
| 825 | |||
| 826 | struct AVFS_Sclk_Offset_t { | ||
| 827 | uint16_t Sclk_Offset[8]; | ||
| 828 | }; | ||
| 829 | typedef struct AVFS_Sclk_Offset_t AVFS_Sclk_Offset_t; | ||
| 830 | |||
| 772 | #endif | 831 | #endif |
| 773 | 832 | ||
| 774 | 833 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu74_discrete.h b/drivers/gpu/drm/amd/powerplay/inc/smu74_discrete.h index 0dfe82336dc7..b85ff5400e57 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu74_discrete.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu74_discrete.h | |||
| @@ -223,6 +223,16 @@ struct SMU74_Discrete_StateInfo { | |||
| 223 | 223 | ||
| 224 | typedef struct SMU74_Discrete_StateInfo SMU74_Discrete_StateInfo; | 224 | typedef struct SMU74_Discrete_StateInfo SMU74_Discrete_StateInfo; |
| 225 | 225 | ||
| 226 | struct SMU_QuadraticCoeffs { | ||
| 227 | int32_t m1; | ||
| 228 | uint32_t b; | ||
| 229 | |||
| 230 | int16_t m2; | ||
| 231 | uint8_t m1_shift; | ||
| 232 | uint8_t m2_shift; | ||
| 233 | }; | ||
| 234 | typedef struct SMU_QuadraticCoeffs SMU_QuadraticCoeffs; | ||
| 235 | |||
| 226 | struct SMU74_Discrete_DpmTable { | 236 | struct SMU74_Discrete_DpmTable { |
| 227 | 237 | ||
| 228 | SMU74_PIDController GraphicsPIDController; | 238 | SMU74_PIDController GraphicsPIDController; |
| @@ -258,7 +268,14 @@ struct SMU74_Discrete_DpmTable { | |||
| 258 | uint8_t ThermOutPolarity; | 268 | uint8_t ThermOutPolarity; |
| 259 | uint8_t ThermOutMode; | 269 | uint8_t ThermOutMode; |
| 260 | uint8_t BootPhases; | 270 | uint8_t BootPhases; |
| 261 | uint32_t Reserved[4]; | 271 | |
| 272 | uint8_t VRHotLevel; | ||
| 273 | uint8_t Reserved1[3]; | ||
| 274 | uint16_t FanStartTemperature; | ||
| 275 | uint16_t FanStopTemperature; | ||
| 276 | uint16_t MaxVoltage; | ||
| 277 | uint16_t Reserved2; | ||
| 278 | uint32_t Reserved[1]; | ||
| 262 | 279 | ||
| 263 | SMU74_Discrete_GraphicsLevel GraphicsLevel[SMU74_MAX_LEVELS_GRAPHICS]; | 280 | SMU74_Discrete_GraphicsLevel GraphicsLevel[SMU74_MAX_LEVELS_GRAPHICS]; |
| 264 | SMU74_Discrete_MemoryLevel MemoryACPILevel; | 281 | SMU74_Discrete_MemoryLevel MemoryACPILevel; |
| @@ -347,6 +364,8 @@ struct SMU74_Discrete_DpmTable { | |||
| 347 | 364 | ||
| 348 | uint32_t CurrSclkPllRange; | 365 | uint32_t CurrSclkPllRange; |
| 349 | sclkFcwRange_t SclkFcwRangeTable[NUM_SCLK_RANGE]; | 366 | sclkFcwRange_t SclkFcwRangeTable[NUM_SCLK_RANGE]; |
| 367 | GB_VDROOP_TABLE_t BTCGB_VDROOP_TABLE[BTCGB_VDROOP_TABLE_MAX_ENTRIES]; | ||
| 368 | SMU_QuadraticCoeffs AVFSGB_VDROOP_TABLE[AVFSGB_VDROOP_TABLE_MAX_ENTRIES]; | ||
| 350 | }; | 369 | }; |
| 351 | 370 | ||
| 352 | typedef struct SMU74_Discrete_DpmTable SMU74_Discrete_DpmTable; | 371 | typedef struct SMU74_Discrete_DpmTable SMU74_Discrete_DpmTable; |
| @@ -550,16 +569,6 @@ struct SMU7_AcpiScoreboard { | |||
| 550 | 569 | ||
| 551 | typedef struct SMU7_AcpiScoreboard SMU7_AcpiScoreboard; | 570 | typedef struct SMU7_AcpiScoreboard SMU7_AcpiScoreboard; |
| 552 | 571 | ||
| 553 | struct SMU_QuadraticCoeffs { | ||
| 554 | int32_t m1; | ||
| 555 | uint32_t b; | ||
| 556 | |||
| 557 | int16_t m2; | ||
| 558 | uint8_t m1_shift; | ||
| 559 | uint8_t m2_shift; | ||
| 560 | }; | ||
| 561 | typedef struct SMU_QuadraticCoeffs SMU_QuadraticCoeffs; | ||
| 562 | |||
| 563 | struct SMU74_Discrete_PmFuses { | 572 | struct SMU74_Discrete_PmFuses { |
| 564 | uint8_t BapmVddCVidHiSidd[8]; | 573 | uint8_t BapmVddCVidHiSidd[8]; |
| 565 | uint8_t BapmVddCVidLoSidd[8]; | 574 | uint8_t BapmVddCVidLoSidd[8]; |
| @@ -821,6 +830,17 @@ typedef struct SMU7_GfxCuPgScoreboard SMU7_GfxCuPgScoreboard; | |||
| 821 | #define DB_PCC_SHIFT 26 | 830 | #define DB_PCC_SHIFT 26 |
| 822 | #define DB_EDC_SHIFT 27 | 831 | #define DB_EDC_SHIFT 27 |
| 823 | 832 | ||
| 833 | #define BTCGB0_Vdroop_Enable_MASK 0x1 | ||
| 834 | #define BTCGB1_Vdroop_Enable_MASK 0x2 | ||
| 835 | #define AVFSGB0_Vdroop_Enable_MASK 0x4 | ||
| 836 | #define AVFSGB1_Vdroop_Enable_MASK 0x8 | ||
| 837 | |||
| 838 | #define BTCGB0_Vdroop_Enable_SHIFT 0 | ||
| 839 | #define BTCGB1_Vdroop_Enable_SHIFT 1 | ||
| 840 | #define AVFSGB0_Vdroop_Enable_SHIFT 2 | ||
| 841 | #define AVFSGB1_Vdroop_Enable_SHIFT 3 | ||
| 842 | |||
| 843 | |||
| 824 | #pragma pack(pop) | 844 | #pragma pack(pop) |
| 825 | 845 | ||
| 826 | 846 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c index 673a75c74e18..8e52a2e82db5 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c | |||
| @@ -1006,10 +1006,16 @@ static int fiji_smu_init(struct pp_smumgr *smumgr) | |||
| 1006 | 1006 | ||
| 1007 | static int fiji_smu_fini(struct pp_smumgr *smumgr) | 1007 | static int fiji_smu_fini(struct pp_smumgr *smumgr) |
| 1008 | { | 1008 | { |
| 1009 | struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend); | ||
| 1010 | |||
| 1011 | smu_free_memory(smumgr->device, (void *)priv->header_buffer.handle); | ||
| 1012 | |||
| 1009 | if (smumgr->backend) { | 1013 | if (smumgr->backend) { |
| 1010 | kfree(smumgr->backend); | 1014 | kfree(smumgr->backend); |
| 1011 | smumgr->backend = NULL; | 1015 | smumgr->backend = NULL; |
| 1012 | } | 1016 | } |
| 1017 | |||
| 1018 | cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU); | ||
| 1013 | return 0; | 1019 | return 0; |
| 1014 | } | 1020 | } |
| 1015 | 1021 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c index de618ead9db8..5dba7c509710 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c | |||
| @@ -52,19 +52,18 @@ | |||
| 52 | static const SMU74_Discrete_GraphicsLevel avfs_graphics_level_polaris10[8] = { | 52 | static const SMU74_Discrete_GraphicsLevel avfs_graphics_level_polaris10[8] = { |
| 53 | /* Min pcie DeepSleep Activity CgSpll CgSpll CcPwr CcPwr Sclk Enabled Enabled Voltage Power */ | 53 | /* Min pcie DeepSleep Activity CgSpll CgSpll CcPwr CcPwr Sclk Enabled Enabled Voltage Power */ |
| 54 | /* Voltage, DpmLevel, DivId, Level, FuncCntl3, FuncCntl4, DynRm, DynRm1 Did, Padding,ForActivity, ForThrottle, UpHyst, DownHyst, DownHyst, Throttle */ | 54 | /* Voltage, DpmLevel, DivId, Level, FuncCntl3, FuncCntl4, DynRm, DynRm1 Did, Padding,ForActivity, ForThrottle, UpHyst, DownHyst, DownHyst, Throttle */ |
| 55 | { 0x3c0fd047, 0x00, 0x03, 0x1e00, 0x00200410, 0x87020000, 0, 0, 0x16, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x30750000, 0, 0, 0, 0, 0, 0, 0 } }, | 55 | { 0x100ea446, 0x00, 0x03, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x30750000, 0x3000, 0, 0x2600, 0, 0, 0x0004, 0x8f02, 0xffff, 0x2f00, 0x300e, 0x2700 } }, |
| 56 | { 0xa00fd047, 0x01, 0x04, 0x1e00, 0x00800510, 0x87020000, 0, 0, 0x16, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x409c0000, 0, 0, 0, 0, 0, 0, 0 } }, | 56 | { 0x400ea446, 0x01, 0x04, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x409c0000, 0x2000, 0, 0x1e00, 1, 1, 0x0004, 0x8300, 0xffff, 0x1f00, 0xcb5e, 0x1a00 } }, |
| 57 | { 0x0410d047, 0x01, 0x00, 0x1e00, 0x00600410, 0x87020000, 0, 0, 0x0e, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x50c30000, 0, 0, 0, 0, 0, 0, 0 } }, | 57 | { 0x740ea446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x50c30000, 0x2800, 0, 0x2000, 1, 1, 0x0004, 0x0c02, 0xffff, 0x2700, 0x6433, 0x2100 } }, |
| 58 | { 0x6810d047, 0x01, 0x00, 0x1e00, 0x00800410, 0x87020000, 0, 0, 0x0c, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x60ea0000, 0, 0, 0, 0, 0, 0, 0 } }, | 58 | { 0xa40ea446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x60ea0000, 0x3000, 0, 0x2600, 1, 1, 0x0004, 0x8f02, 0xffff, 0x2f00, 0x300e, 0x2700 } }, |
| 59 | { 0xcc10d047, 0x01, 0x00, 0x1e00, 0x00e00410, 0x87020000, 0, 0, 0x0c, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0xe8fd0000, 0, 0, 0, 0, 0, 0, 0 } }, | 59 | { 0xd80ea446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x70110100, 0x3800, 0, 0x2c00, 1, 1, 0x0004, 0x1203, 0xffff, 0x3600, 0xc9e2, 0x2e00 } }, |
| 60 | { 0x3011d047, 0x01, 0x00, 0x1e00, 0x00400510, 0x87020000, 0, 0, 0x0c, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x70110100, 0, 0, 0, 0, 0, 0, 0 } }, | 60 | { 0x3c0fa446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x80380100, 0x2000, 0, 0x1e00, 2, 1, 0x0004, 0x8300, 0xffff, 0x1f00, 0xcb5e, 0x1a00 } }, |
| 61 | { 0x9411d047, 0x01, 0x00, 0x1e00, 0x00a00510, 0x87020000, 0, 0, 0x0c, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0xf8240100, 0, 0, 0, 0, 0, 0, 0 } }, | 61 | { 0x6c0fa446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x905f0100, 0x2400, 0, 0x1e00, 2, 1, 0x0004, 0x8901, 0xffff, 0x2300, 0x314c, 0x1d00 } }, |
| 62 | { 0xf811d047, 0x01, 0x00, 0x1e00, 0x00000610, 0x87020000, 0, 0, 0x0c, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x80380100, 0, 0, 0, 0, 0, 0, 0 } } | 62 | { 0xa00fa446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0xa0860100, 0x2800, 0, 0x2000, 2, 1, 0x0004, 0x0c02, 0xffff, 0x2700, 0x6433, 0x2100 } } |
| 63 | }; | 63 | }; |
| 64 | 64 | ||
| 65 | static const SMU74_Discrete_MemoryLevel avfs_memory_level_polaris10 = | 65 | static const SMU74_Discrete_MemoryLevel avfs_memory_level_polaris10 = |
| 66 | {0x50140000, 0x50140000, 0x00320000, 0x00, 0x00, | 66 | {0x100ea446, 0, 0x30750000, 0x01, 0x01, 0x01, 0x00, 0x00, 0x64, 0x00, 0x00, 0x1f00, 0x00, 0x00}; |
| 67 | 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x0000, 0x00, 0x00}; | ||
| 68 | 67 | ||
| 69 | /** | 68 | /** |
| 70 | * Set the address for reading/writing the SMC SRAM space. | 69 | * Set the address for reading/writing the SMC SRAM space. |
| @@ -219,6 +218,18 @@ bool polaris10_is_smc_ram_running(struct pp_smumgr *smumgr) | |||
| 219 | && (0x20100 <= cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMC_PC_C))); | 218 | && (0x20100 <= cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMC_PC_C))); |
| 220 | } | 219 | } |
| 221 | 220 | ||
| 221 | static bool polaris10_is_hw_avfs_present(struct pp_smumgr *smumgr) | ||
| 222 | { | ||
| 223 | uint32_t efuse; | ||
| 224 | |||
| 225 | efuse = cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMU_EFUSE_0 + (49*4)); | ||
| 226 | efuse &= 0x00000001; | ||
| 227 | if (efuse) | ||
| 228 | return true; | ||
| 229 | |||
| 230 | return false; | ||
| 231 | } | ||
| 232 | |||
| 222 | /** | 233 | /** |
| 223 | * Send a message to the SMC, and wait for its response. | 234 | * Send a message to the SMC, and wait for its response. |
| 224 | * | 235 | * |
| @@ -228,21 +239,27 @@ bool polaris10_is_smc_ram_running(struct pp_smumgr *smumgr) | |||
| 228 | */ | 239 | */ |
| 229 | int polaris10_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg) | 240 | int polaris10_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg) |
| 230 | { | 241 | { |
| 242 | int ret; | ||
| 243 | |||
| 231 | if (!polaris10_is_smc_ram_running(smumgr)) | 244 | if (!polaris10_is_smc_ram_running(smumgr)) |
| 232 | return -1; | 245 | return -1; |
| 233 | 246 | ||
| 247 | |||
| 234 | SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); | 248 | SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); |
| 235 | 249 | ||
| 236 | if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP)) | 250 | ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP); |
| 237 | printk("Failed to send Previous Message.\n"); | ||
| 238 | 251 | ||
| 252 | if (ret != 1) | ||
| 253 | printk("\n failed to send pre message %x ret is %d \n", msg, ret); | ||
| 239 | 254 | ||
| 240 | cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg); | 255 | cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg); |
| 241 | 256 | ||
| 242 | SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); | 257 | SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); |
| 243 | 258 | ||
| 244 | if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP)) | 259 | ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP); |
| 245 | printk("Failed to send Message.\n"); | 260 | |
| 261 | if (ret != 1) | ||
| 262 | printk("\n failed to send message %x ret is %d \n", msg, ret); | ||
| 246 | 263 | ||
| 247 | return 0; | 264 | return 0; |
| 248 | } | 265 | } |
| @@ -469,6 +486,7 @@ int polaris10_smu_fini(struct pp_smumgr *smumgr) | |||
| 469 | kfree(smumgr->backend); | 486 | kfree(smumgr->backend); |
| 470 | smumgr->backend = NULL; | 487 | smumgr->backend = NULL; |
| 471 | } | 488 | } |
| 489 | cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU); | ||
| 472 | return 0; | 490 | return 0; |
| 473 | } | 491 | } |
| 474 | 492 | ||
| @@ -952,6 +970,11 @@ static int polaris10_smu_init(struct pp_smumgr *smumgr) | |||
| 952 | (cgs_handle_t)smu_data->smu_buffer.handle); | 970 | (cgs_handle_t)smu_data->smu_buffer.handle); |
| 953 | return -1;); | 971 | return -1;); |
| 954 | 972 | ||
| 973 | if (polaris10_is_hw_avfs_present(smumgr)) | ||
| 974 | smu_data->avfs.avfs_btc_status = AVFS_BTC_BOOT; | ||
| 975 | else | ||
| 976 | smu_data->avfs.avfs_btc_status = AVFS_BTC_NOTSUPPORTED; | ||
| 977 | |||
| 955 | return 0; | 978 | return 0; |
| 956 | } | 979 | } |
| 957 | 980 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c index c483baf6b4fb..0728c1e3d97a 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c | |||
| @@ -81,6 +81,7 @@ int smum_init(struct amd_pp_init *pp_init, struct pp_instance *handle) | |||
| 81 | 81 | ||
| 82 | int smum_fini(struct pp_smumgr *smumgr) | 82 | int smum_fini(struct pp_smumgr *smumgr) |
| 83 | { | 83 | { |
| 84 | kfree(smumgr->device); | ||
| 84 | kfree(smumgr); | 85 | kfree(smumgr); |
| 85 | return 0; | 86 | return 0; |
| 86 | } | 87 | } |
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c index 32820b680d88..b22722eabafc 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c | |||
| @@ -328,10 +328,17 @@ int tonga_write_smc_sram_dword(struct pp_smumgr *smumgr, | |||
| 328 | 328 | ||
| 329 | static int tonga_smu_fini(struct pp_smumgr *smumgr) | 329 | static int tonga_smu_fini(struct pp_smumgr *smumgr) |
| 330 | { | 330 | { |
| 331 | struct tonga_smumgr *priv = (struct tonga_smumgr *)(smumgr->backend); | ||
| 332 | |||
| 333 | smu_free_memory(smumgr->device, (void *)priv->smu_buffer.handle); | ||
| 334 | smu_free_memory(smumgr->device, (void *)priv->header_buffer.handle); | ||
| 335 | |||
| 331 | if (smumgr->backend != NULL) { | 336 | if (smumgr->backend != NULL) { |
| 332 | kfree(smumgr->backend); | 337 | kfree(smumgr->backend); |
| 333 | smumgr->backend = NULL; | 338 | smumgr->backend = NULL; |
| 334 | } | 339 | } |
| 340 | |||
| 341 | cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU); | ||
| 335 | return 0; | 342 | return 0; |
| 336 | } | 343 | } |
| 337 | 344 | ||
diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c index fef1b04c2aab..0813c2f06931 100644 --- a/drivers/gpu/drm/arm/hdlcd_crtc.c +++ b/drivers/gpu/drm/arm/hdlcd_crtc.c | |||
| @@ -33,8 +33,17 @@ | |||
| 33 | * | 33 | * |
| 34 | */ | 34 | */ |
| 35 | 35 | ||
| 36 | static void hdlcd_crtc_cleanup(struct drm_crtc *crtc) | ||
| 37 | { | ||
| 38 | struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc); | ||
| 39 | |||
| 40 | /* stop the controller on cleanup */ | ||
| 41 | hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 0); | ||
| 42 | drm_crtc_cleanup(crtc); | ||
| 43 | } | ||
| 44 | |||
| 36 | static const struct drm_crtc_funcs hdlcd_crtc_funcs = { | 45 | static const struct drm_crtc_funcs hdlcd_crtc_funcs = { |
| 37 | .destroy = drm_crtc_cleanup, | 46 | .destroy = hdlcd_crtc_cleanup, |
| 38 | .set_config = drm_atomic_helper_set_config, | 47 | .set_config = drm_atomic_helper_set_config, |
| 39 | .page_flip = drm_atomic_helper_page_flip, | 48 | .page_flip = drm_atomic_helper_page_flip, |
| 40 | .reset = drm_atomic_helper_crtc_reset, | 49 | .reset = drm_atomic_helper_crtc_reset, |
| @@ -97,7 +106,7 @@ static void hdlcd_crtc_mode_set_nofb(struct drm_crtc *crtc) | |||
| 97 | struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc); | 106 | struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc); |
| 98 | struct drm_display_mode *m = &crtc->state->adjusted_mode; | 107 | struct drm_display_mode *m = &crtc->state->adjusted_mode; |
| 99 | struct videomode vm; | 108 | struct videomode vm; |
| 100 | unsigned int polarities, line_length, err; | 109 | unsigned int polarities, err; |
| 101 | 110 | ||
| 102 | vm.vfront_porch = m->crtc_vsync_start - m->crtc_vdisplay; | 111 | vm.vfront_porch = m->crtc_vsync_start - m->crtc_vdisplay; |
| 103 | vm.vback_porch = m->crtc_vtotal - m->crtc_vsync_end; | 112 | vm.vback_porch = m->crtc_vtotal - m->crtc_vsync_end; |
| @@ -113,23 +122,18 @@ static void hdlcd_crtc_mode_set_nofb(struct drm_crtc *crtc) | |||
| 113 | if (m->flags & DRM_MODE_FLAG_PVSYNC) | 122 | if (m->flags & DRM_MODE_FLAG_PVSYNC) |
| 114 | polarities |= HDLCD_POLARITY_VSYNC; | 123 | polarities |= HDLCD_POLARITY_VSYNC; |
| 115 | 124 | ||
| 116 | line_length = crtc->primary->state->fb->pitches[0]; | ||
| 117 | |||
| 118 | /* Allow max number of outstanding requests and largest burst size */ | 125 | /* Allow max number of outstanding requests and largest burst size */ |
| 119 | hdlcd_write(hdlcd, HDLCD_REG_BUS_OPTIONS, | 126 | hdlcd_write(hdlcd, HDLCD_REG_BUS_OPTIONS, |
| 120 | HDLCD_BUS_MAX_OUTSTAND | HDLCD_BUS_BURST_16); | 127 | HDLCD_BUS_MAX_OUTSTAND | HDLCD_BUS_BURST_16); |
| 121 | 128 | ||
| 122 | hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_LENGTH, line_length); | ||
| 123 | hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_PITCH, line_length); | ||
| 124 | hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_COUNT, m->crtc_vdisplay - 1); | ||
| 125 | hdlcd_write(hdlcd, HDLCD_REG_V_DATA, m->crtc_vdisplay - 1); | 129 | hdlcd_write(hdlcd, HDLCD_REG_V_DATA, m->crtc_vdisplay - 1); |
| 126 | hdlcd_write(hdlcd, HDLCD_REG_V_BACK_PORCH, vm.vback_porch - 1); | 130 | hdlcd_write(hdlcd, HDLCD_REG_V_BACK_PORCH, vm.vback_porch - 1); |
| 127 | hdlcd_write(hdlcd, HDLCD_REG_V_FRONT_PORCH, vm.vfront_porch - 1); | 131 | hdlcd_write(hdlcd, HDLCD_REG_V_FRONT_PORCH, vm.vfront_porch - 1); |
| 128 | hdlcd_write(hdlcd, HDLCD_REG_V_SYNC, vm.vsync_len - 1); | 132 | hdlcd_write(hdlcd, HDLCD_REG_V_SYNC, vm.vsync_len - 1); |
| 133 | hdlcd_write(hdlcd, HDLCD_REG_H_DATA, m->crtc_hdisplay - 1); | ||
| 129 | hdlcd_write(hdlcd, HDLCD_REG_H_BACK_PORCH, vm.hback_porch - 1); | 134 | hdlcd_write(hdlcd, HDLCD_REG_H_BACK_PORCH, vm.hback_porch - 1); |
| 130 | hdlcd_write(hdlcd, HDLCD_REG_H_FRONT_PORCH, vm.hfront_porch - 1); | 135 | hdlcd_write(hdlcd, HDLCD_REG_H_FRONT_PORCH, vm.hfront_porch - 1); |
| 131 | hdlcd_write(hdlcd, HDLCD_REG_H_SYNC, vm.hsync_len - 1); | 136 | hdlcd_write(hdlcd, HDLCD_REG_H_SYNC, vm.hsync_len - 1); |
| 132 | hdlcd_write(hdlcd, HDLCD_REG_H_DATA, m->crtc_hdisplay - 1); | ||
| 133 | hdlcd_write(hdlcd, HDLCD_REG_POLARITIES, polarities); | 137 | hdlcd_write(hdlcd, HDLCD_REG_POLARITIES, polarities); |
| 134 | 138 | ||
| 135 | err = hdlcd_set_pxl_fmt(crtc); | 139 | err = hdlcd_set_pxl_fmt(crtc); |
| @@ -144,20 +148,19 @@ static void hdlcd_crtc_enable(struct drm_crtc *crtc) | |||
| 144 | struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc); | 148 | struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc); |
| 145 | 149 | ||
| 146 | clk_prepare_enable(hdlcd->clk); | 150 | clk_prepare_enable(hdlcd->clk); |
| 151 | hdlcd_crtc_mode_set_nofb(crtc); | ||
| 147 | hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 1); | 152 | hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 1); |
| 148 | drm_crtc_vblank_on(crtc); | ||
| 149 | } | 153 | } |
| 150 | 154 | ||
| 151 | static void hdlcd_crtc_disable(struct drm_crtc *crtc) | 155 | static void hdlcd_crtc_disable(struct drm_crtc *crtc) |
| 152 | { | 156 | { |
| 153 | struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc); | 157 | struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc); |
| 154 | 158 | ||
| 155 | if (!crtc->primary->fb) | 159 | if (!crtc->state->active) |
| 156 | return; | 160 | return; |
| 157 | 161 | ||
| 158 | clk_disable_unprepare(hdlcd->clk); | ||
| 159 | hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 0); | 162 | hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 0); |
| 160 | drm_crtc_vblank_off(crtc); | 163 | clk_disable_unprepare(hdlcd->clk); |
| 161 | } | 164 | } |
| 162 | 165 | ||
| 163 | static int hdlcd_crtc_atomic_check(struct drm_crtc *crtc, | 166 | static int hdlcd_crtc_atomic_check(struct drm_crtc *crtc, |
| @@ -179,20 +182,17 @@ static int hdlcd_crtc_atomic_check(struct drm_crtc *crtc, | |||
| 179 | static void hdlcd_crtc_atomic_begin(struct drm_crtc *crtc, | 182 | static void hdlcd_crtc_atomic_begin(struct drm_crtc *crtc, |
| 180 | struct drm_crtc_state *state) | 183 | struct drm_crtc_state *state) |
| 181 | { | 184 | { |
| 182 | struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc); | 185 | struct drm_pending_vblank_event *event = crtc->state->event; |
| 183 | unsigned long flags; | ||
| 184 | |||
| 185 | if (crtc->state->event) { | ||
| 186 | struct drm_pending_vblank_event *event = crtc->state->event; | ||
| 187 | 186 | ||
| 187 | if (event) { | ||
| 188 | crtc->state->event = NULL; | 188 | crtc->state->event = NULL; |
| 189 | event->pipe = drm_crtc_index(crtc); | ||
| 190 | |||
| 191 | WARN_ON(drm_crtc_vblank_get(crtc) != 0); | ||
| 192 | 189 | ||
| 193 | spin_lock_irqsave(&crtc->dev->event_lock, flags); | 190 | spin_lock_irq(&crtc->dev->event_lock); |
| 194 | list_add_tail(&event->base.link, &hdlcd->event_list); | 191 | if (drm_crtc_vblank_get(crtc) == 0) |
| 195 | spin_unlock_irqrestore(&crtc->dev->event_lock, flags); | 192 | drm_crtc_arm_vblank_event(crtc, event); |
| 193 | else | ||
| 194 | drm_crtc_send_vblank_event(crtc, event); | ||
| 195 | spin_unlock_irq(&crtc->dev->event_lock); | ||
| 196 | } | 196 | } |
| 197 | } | 197 | } |
| 198 | 198 | ||
| @@ -225,6 +225,15 @@ static const struct drm_crtc_helper_funcs hdlcd_crtc_helper_funcs = { | |||
| 225 | static int hdlcd_plane_atomic_check(struct drm_plane *plane, | 225 | static int hdlcd_plane_atomic_check(struct drm_plane *plane, |
| 226 | struct drm_plane_state *state) | 226 | struct drm_plane_state *state) |
| 227 | { | 227 | { |
| 228 | u32 src_w, src_h; | ||
| 229 | |||
| 230 | src_w = state->src_w >> 16; | ||
| 231 | src_h = state->src_h >> 16; | ||
| 232 | |||
| 233 | /* we can't do any scaling of the plane source */ | ||
| 234 | if ((src_w != state->crtc_w) || (src_h != state->crtc_h)) | ||
| 235 | return -EINVAL; | ||
| 236 | |||
| 228 | return 0; | 237 | return 0; |
| 229 | } | 238 | } |
| 230 | 239 | ||
| @@ -233,20 +242,31 @@ static void hdlcd_plane_atomic_update(struct drm_plane *plane, | |||
| 233 | { | 242 | { |
| 234 | struct hdlcd_drm_private *hdlcd; | 243 | struct hdlcd_drm_private *hdlcd; |
| 235 | struct drm_gem_cma_object *gem; | 244 | struct drm_gem_cma_object *gem; |
| 245 | unsigned int depth, bpp; | ||
| 246 | u32 src_w, src_h, dest_w, dest_h; | ||
| 236 | dma_addr_t scanout_start; | 247 | dma_addr_t scanout_start; |
| 237 | 248 | ||
| 238 | if (!plane->state->crtc || !plane->state->fb) | 249 | if (!plane->state->fb) |
| 239 | return; | 250 | return; |
| 240 | 251 | ||
| 241 | hdlcd = crtc_to_hdlcd_priv(plane->state->crtc); | 252 | drm_fb_get_bpp_depth(plane->state->fb->pixel_format, &depth, &bpp); |
| 253 | src_w = plane->state->src_w >> 16; | ||
| 254 | src_h = plane->state->src_h >> 16; | ||
| 255 | dest_w = plane->state->crtc_w; | ||
| 256 | dest_h = plane->state->crtc_h; | ||
| 242 | gem = drm_fb_cma_get_gem_obj(plane->state->fb, 0); | 257 | gem = drm_fb_cma_get_gem_obj(plane->state->fb, 0); |
| 243 | scanout_start = gem->paddr; | 258 | scanout_start = gem->paddr + plane->state->fb->offsets[0] + |
| 259 | plane->state->crtc_y * plane->state->fb->pitches[0] + | ||
| 260 | plane->state->crtc_x * bpp / 8; | ||
| 261 | |||
| 262 | hdlcd = plane->dev->dev_private; | ||
| 263 | hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_LENGTH, plane->state->fb->pitches[0]); | ||
| 264 | hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_PITCH, plane->state->fb->pitches[0]); | ||
| 265 | hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_COUNT, dest_h - 1); | ||
| 244 | hdlcd_write(hdlcd, HDLCD_REG_FB_BASE, scanout_start); | 266 | hdlcd_write(hdlcd, HDLCD_REG_FB_BASE, scanout_start); |
| 245 | } | 267 | } |
| 246 | 268 | ||
| 247 | static const struct drm_plane_helper_funcs hdlcd_plane_helper_funcs = { | 269 | static const struct drm_plane_helper_funcs hdlcd_plane_helper_funcs = { |
| 248 | .prepare_fb = NULL, | ||
| 249 | .cleanup_fb = NULL, | ||
| 250 | .atomic_check = hdlcd_plane_atomic_check, | 270 | .atomic_check = hdlcd_plane_atomic_check, |
| 251 | .atomic_update = hdlcd_plane_atomic_update, | 271 | .atomic_update = hdlcd_plane_atomic_update, |
| 252 | }; | 272 | }; |
| @@ -294,16 +314,6 @@ static struct drm_plane *hdlcd_plane_init(struct drm_device *drm) | |||
| 294 | return plane; | 314 | return plane; |
| 295 | } | 315 | } |
| 296 | 316 | ||
| 297 | void hdlcd_crtc_suspend(struct drm_crtc *crtc) | ||
| 298 | { | ||
| 299 | hdlcd_crtc_disable(crtc); | ||
| 300 | } | ||
| 301 | |||
| 302 | void hdlcd_crtc_resume(struct drm_crtc *crtc) | ||
| 303 | { | ||
| 304 | hdlcd_crtc_enable(crtc); | ||
| 305 | } | ||
| 306 | |||
| 307 | int hdlcd_setup_crtc(struct drm_device *drm) | 317 | int hdlcd_setup_crtc(struct drm_device *drm) |
| 308 | { | 318 | { |
| 309 | struct hdlcd_drm_private *hdlcd = drm->dev_private; | 319 | struct hdlcd_drm_private *hdlcd = drm->dev_private; |
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c index b987c63ba8d6..a6ca36f0096f 100644 --- a/drivers/gpu/drm/arm/hdlcd_drv.c +++ b/drivers/gpu/drm/arm/hdlcd_drv.c | |||
| @@ -49,8 +49,6 @@ static int hdlcd_load(struct drm_device *drm, unsigned long flags) | |||
| 49 | atomic_set(&hdlcd->dma_end_count, 0); | 49 | atomic_set(&hdlcd->dma_end_count, 0); |
| 50 | #endif | 50 | #endif |
| 51 | 51 | ||
| 52 | INIT_LIST_HEAD(&hdlcd->event_list); | ||
| 53 | |||
| 54 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 52 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 55 | hdlcd->mmio = devm_ioremap_resource(drm->dev, res); | 53 | hdlcd->mmio = devm_ioremap_resource(drm->dev, res); |
| 56 | if (IS_ERR(hdlcd->mmio)) { | 54 | if (IS_ERR(hdlcd->mmio)) { |
| @@ -84,11 +82,7 @@ static int hdlcd_load(struct drm_device *drm, unsigned long flags) | |||
| 84 | goto setup_fail; | 82 | goto setup_fail; |
| 85 | } | 83 | } |
| 86 | 84 | ||
| 87 | pm_runtime_enable(drm->dev); | ||
| 88 | |||
| 89 | pm_runtime_get_sync(drm->dev); | ||
| 90 | ret = drm_irq_install(drm, platform_get_irq(pdev, 0)); | 85 | ret = drm_irq_install(drm, platform_get_irq(pdev, 0)); |
| 91 | pm_runtime_put_sync(drm->dev); | ||
| 92 | if (ret < 0) { | 86 | if (ret < 0) { |
| 93 | DRM_ERROR("failed to install IRQ handler\n"); | 87 | DRM_ERROR("failed to install IRQ handler\n"); |
| 94 | goto irq_fail; | 88 | goto irq_fail; |
| @@ -164,24 +158,9 @@ static irqreturn_t hdlcd_irq(int irq, void *arg) | |||
| 164 | atomic_inc(&hdlcd->vsync_count); | 158 | atomic_inc(&hdlcd->vsync_count); |
| 165 | 159 | ||
| 166 | #endif | 160 | #endif |
| 167 | if (irq_status & HDLCD_INTERRUPT_VSYNC) { | 161 | if (irq_status & HDLCD_INTERRUPT_VSYNC) |
| 168 | bool events_sent = false; | ||
| 169 | unsigned long flags; | ||
| 170 | struct drm_pending_vblank_event *e, *t; | ||
| 171 | |||
| 172 | drm_crtc_handle_vblank(&hdlcd->crtc); | 162 | drm_crtc_handle_vblank(&hdlcd->crtc); |
| 173 | 163 | ||
| 174 | spin_lock_irqsave(&drm->event_lock, flags); | ||
| 175 | list_for_each_entry_safe(e, t, &hdlcd->event_list, base.link) { | ||
| 176 | list_del(&e->base.link); | ||
| 177 | drm_crtc_send_vblank_event(&hdlcd->crtc, e); | ||
| 178 | events_sent = true; | ||
| 179 | } | ||
| 180 | if (events_sent) | ||
| 181 | drm_crtc_vblank_put(&hdlcd->crtc); | ||
| 182 | spin_unlock_irqrestore(&drm->event_lock, flags); | ||
| 183 | } | ||
| 184 | |||
| 185 | /* acknowledge interrupt(s) */ | 164 | /* acknowledge interrupt(s) */ |
| 186 | hdlcd_write(hdlcd, HDLCD_REG_INT_CLEAR, irq_status); | 165 | hdlcd_write(hdlcd, HDLCD_REG_INT_CLEAR, irq_status); |
| 187 | 166 | ||
| @@ -275,6 +254,7 @@ static int hdlcd_show_pxlclock(struct seq_file *m, void *arg) | |||
| 275 | static struct drm_info_list hdlcd_debugfs_list[] = { | 254 | static struct drm_info_list hdlcd_debugfs_list[] = { |
| 276 | { "interrupt_count", hdlcd_show_underrun_count, 0 }, | 255 | { "interrupt_count", hdlcd_show_underrun_count, 0 }, |
| 277 | { "clocks", hdlcd_show_pxlclock, 0 }, | 256 | { "clocks", hdlcd_show_pxlclock, 0 }, |
| 257 | { "fb", drm_fb_cma_debugfs_show, 0 }, | ||
| 278 | }; | 258 | }; |
| 279 | 259 | ||
| 280 | static int hdlcd_debugfs_init(struct drm_minor *minor) | 260 | static int hdlcd_debugfs_init(struct drm_minor *minor) |
| @@ -357,6 +337,8 @@ static int hdlcd_drm_bind(struct device *dev) | |||
| 357 | return -ENOMEM; | 337 | return -ENOMEM; |
| 358 | 338 | ||
| 359 | drm->dev_private = hdlcd; | 339 | drm->dev_private = hdlcd; |
| 340 | dev_set_drvdata(dev, drm); | ||
| 341 | |||
| 360 | hdlcd_setup_mode_config(drm); | 342 | hdlcd_setup_mode_config(drm); |
| 361 | ret = hdlcd_load(drm, 0); | 343 | ret = hdlcd_load(drm, 0); |
| 362 | if (ret) | 344 | if (ret) |
| @@ -366,14 +348,18 @@ static int hdlcd_drm_bind(struct device *dev) | |||
| 366 | if (ret) | 348 | if (ret) |
| 367 | goto err_unload; | 349 | goto err_unload; |
| 368 | 350 | ||
| 369 | dev_set_drvdata(dev, drm); | ||
| 370 | |||
| 371 | ret = component_bind_all(dev, drm); | 351 | ret = component_bind_all(dev, drm); |
| 372 | if (ret) { | 352 | if (ret) { |
| 373 | DRM_ERROR("Failed to bind all components\n"); | 353 | DRM_ERROR("Failed to bind all components\n"); |
| 374 | goto err_unregister; | 354 | goto err_unregister; |
| 375 | } | 355 | } |
| 376 | 356 | ||
| 357 | ret = pm_runtime_set_active(dev); | ||
| 358 | if (ret) | ||
| 359 | goto err_pm_active; | ||
| 360 | |||
| 361 | pm_runtime_enable(dev); | ||
| 362 | |||
| 377 | ret = drm_vblank_init(drm, drm->mode_config.num_crtc); | 363 | ret = drm_vblank_init(drm, drm->mode_config.num_crtc); |
| 378 | if (ret < 0) { | 364 | if (ret < 0) { |
| 379 | DRM_ERROR("failed to initialise vblank\n"); | 365 | DRM_ERROR("failed to initialise vblank\n"); |
| @@ -399,16 +385,16 @@ err_fbdev: | |||
| 399 | drm_mode_config_cleanup(drm); | 385 | drm_mode_config_cleanup(drm); |
| 400 | drm_vblank_cleanup(drm); | 386 | drm_vblank_cleanup(drm); |
| 401 | err_vblank: | 387 | err_vblank: |
| 388 | pm_runtime_disable(drm->dev); | ||
| 389 | err_pm_active: | ||
| 402 | component_unbind_all(dev, drm); | 390 | component_unbind_all(dev, drm); |
| 403 | err_unregister: | 391 | err_unregister: |
| 404 | drm_dev_unregister(drm); | 392 | drm_dev_unregister(drm); |
| 405 | err_unload: | 393 | err_unload: |
| 406 | pm_runtime_get_sync(drm->dev); | ||
| 407 | drm_irq_uninstall(drm); | 394 | drm_irq_uninstall(drm); |
| 408 | pm_runtime_put_sync(drm->dev); | ||
| 409 | pm_runtime_disable(drm->dev); | ||
| 410 | of_reserved_mem_device_release(drm->dev); | 395 | of_reserved_mem_device_release(drm->dev); |
| 411 | err_free: | 396 | err_free: |
| 397 | dev_set_drvdata(dev, NULL); | ||
| 412 | drm_dev_unref(drm); | 398 | drm_dev_unref(drm); |
| 413 | 399 | ||
| 414 | return ret; | 400 | return ret; |
| @@ -495,30 +481,34 @@ MODULE_DEVICE_TABLE(of, hdlcd_of_match); | |||
| 495 | static int __maybe_unused hdlcd_pm_suspend(struct device *dev) | 481 | static int __maybe_unused hdlcd_pm_suspend(struct device *dev) |
| 496 | { | 482 | { |
| 497 | struct drm_device *drm = dev_get_drvdata(dev); | 483 | struct drm_device *drm = dev_get_drvdata(dev); |
| 498 | struct drm_crtc *crtc; | 484 | struct hdlcd_drm_private *hdlcd = drm ? drm->dev_private : NULL; |
| 499 | 485 | ||
| 500 | if (pm_runtime_suspended(dev)) | 486 | if (!hdlcd) |
| 501 | return 0; | 487 | return 0; |
| 502 | 488 | ||
| 503 | drm_modeset_lock_all(drm); | 489 | drm_kms_helper_poll_disable(drm); |
| 504 | list_for_each_entry(crtc, &drm->mode_config.crtc_list, head) | 490 | |
| 505 | hdlcd_crtc_suspend(crtc); | 491 | hdlcd->state = drm_atomic_helper_suspend(drm); |
| 506 | drm_modeset_unlock_all(drm); | 492 | if (IS_ERR(hdlcd->state)) { |
| 493 | drm_kms_helper_poll_enable(drm); | ||
| 494 | return PTR_ERR(hdlcd->state); | ||
| 495 | } | ||
| 496 | |||
| 507 | return 0; | 497 | return 0; |
| 508 | } | 498 | } |
| 509 | 499 | ||
| 510 | static int __maybe_unused hdlcd_pm_resume(struct device *dev) | 500 | static int __maybe_unused hdlcd_pm_resume(struct device *dev) |
| 511 | { | 501 | { |
| 512 | struct drm_device *drm = dev_get_drvdata(dev); | 502 | struct drm_device *drm = dev_get_drvdata(dev); |
| 513 | struct drm_crtc *crtc; | 503 | struct hdlcd_drm_private *hdlcd = drm ? drm->dev_private : NULL; |
| 514 | 504 | ||
| 515 | if (!pm_runtime_suspended(dev)) | 505 | if (!hdlcd) |
| 516 | return 0; | 506 | return 0; |
| 517 | 507 | ||
| 518 | drm_modeset_lock_all(drm); | 508 | drm_atomic_helper_resume(drm, hdlcd->state); |
| 519 | list_for_each_entry(crtc, &drm->mode_config.crtc_list, head) | 509 | drm_kms_helper_poll_enable(drm); |
| 520 | hdlcd_crtc_resume(crtc); | 510 | pm_runtime_set_active(dev); |
| 521 | drm_modeset_unlock_all(drm); | 511 | |
| 522 | return 0; | 512 | return 0; |
| 523 | } | 513 | } |
| 524 | 514 | ||
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.h b/drivers/gpu/drm/arm/hdlcd_drv.h index aa234784f053..e3950a071152 100644 --- a/drivers/gpu/drm/arm/hdlcd_drv.h +++ b/drivers/gpu/drm/arm/hdlcd_drv.h | |||
| @@ -9,10 +9,9 @@ struct hdlcd_drm_private { | |||
| 9 | void __iomem *mmio; | 9 | void __iomem *mmio; |
| 10 | struct clk *clk; | 10 | struct clk *clk; |
| 11 | struct drm_fbdev_cma *fbdev; | 11 | struct drm_fbdev_cma *fbdev; |
| 12 | struct drm_framebuffer *fb; | ||
| 13 | struct list_head event_list; | ||
| 14 | struct drm_crtc crtc; | 12 | struct drm_crtc crtc; |
| 15 | struct drm_plane *plane; | 13 | struct drm_plane *plane; |
| 14 | struct drm_atomic_state *state; | ||
| 16 | #ifdef CONFIG_DEBUG_FS | 15 | #ifdef CONFIG_DEBUG_FS |
| 17 | atomic_t buffer_underrun_count; | 16 | atomic_t buffer_underrun_count; |
| 18 | atomic_t bus_error_count; | 17 | atomic_t bus_error_count; |
| @@ -36,7 +35,5 @@ static inline u32 hdlcd_read(struct hdlcd_drm_private *hdlcd, unsigned int reg) | |||
| 36 | 35 | ||
| 37 | int hdlcd_setup_crtc(struct drm_device *dev); | 36 | int hdlcd_setup_crtc(struct drm_device *dev); |
| 38 | void hdlcd_set_scanout(struct hdlcd_drm_private *hdlcd); | 37 | void hdlcd_set_scanout(struct hdlcd_drm_private *hdlcd); |
| 39 | void hdlcd_crtc_suspend(struct drm_crtc *crtc); | ||
| 40 | void hdlcd_crtc_resume(struct drm_crtc *crtc); | ||
| 41 | 38 | ||
| 42 | #endif /* __HDLCD_DRV_H__ */ | 39 | #endif /* __HDLCD_DRV_H__ */ |
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c index cf23a755f777..bd12231ab0cd 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c | |||
| @@ -391,12 +391,11 @@ void atmel_hlcdc_crtc_reset(struct drm_crtc *crtc) | |||
| 391 | { | 391 | { |
| 392 | struct atmel_hlcdc_crtc_state *state; | 392 | struct atmel_hlcdc_crtc_state *state; |
| 393 | 393 | ||
| 394 | if (crtc->state && crtc->state->mode_blob) | ||
| 395 | drm_property_unreference_blob(crtc->state->mode_blob); | ||
| 396 | |||
| 397 | if (crtc->state) { | 394 | if (crtc->state) { |
| 395 | __drm_atomic_helper_crtc_destroy_state(crtc->state); | ||
| 398 | state = drm_crtc_state_to_atmel_hlcdc_crtc_state(crtc->state); | 396 | state = drm_crtc_state_to_atmel_hlcdc_crtc_state(crtc->state); |
| 399 | kfree(state); | 397 | kfree(state); |
| 398 | crtc->state = NULL; | ||
| 400 | } | 399 | } |
| 401 | 400 | ||
| 402 | state = kzalloc(sizeof(*state), GFP_KERNEL); | 401 | state = kzalloc(sizeof(*state), GFP_KERNEL); |
| @@ -415,8 +414,9 @@ atmel_hlcdc_crtc_duplicate_state(struct drm_crtc *crtc) | |||
| 415 | return NULL; | 414 | return NULL; |
| 416 | 415 | ||
| 417 | state = kmalloc(sizeof(*state), GFP_KERNEL); | 416 | state = kmalloc(sizeof(*state), GFP_KERNEL); |
| 418 | if (state) | 417 | if (!state) |
| 419 | __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base); | 418 | return NULL; |
| 419 | __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base); | ||
| 420 | 420 | ||
| 421 | cur = drm_crtc_state_to_atmel_hlcdc_crtc_state(crtc->state); | 421 | cur = drm_crtc_state_to_atmel_hlcdc_crtc_state(crtc->state); |
| 422 | state->output_mode = cur->output_mode; | 422 | state->output_mode = cur->output_mode; |
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c index 39802c0539b6..3d34fc4ca826 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c | |||
| @@ -266,9 +266,10 @@ int atmel_hlcdc_create_outputs(struct drm_device *dev) | |||
| 266 | if (!ret) | 266 | if (!ret) |
| 267 | ret = atmel_hlcdc_check_endpoint(dev, &ep); | 267 | ret = atmel_hlcdc_check_endpoint(dev, &ep); |
| 268 | 268 | ||
| 269 | of_node_put(ep_np); | 269 | if (ret) { |
| 270 | if (ret) | 270 | of_node_put(ep_np); |
| 271 | return ret; | 271 | return ret; |
| 272 | } | ||
| 272 | } | 273 | } |
| 273 | 274 | ||
| 274 | for_each_endpoint_of_node(dev->dev->of_node, ep_np) { | 275 | for_each_endpoint_of_node(dev->dev->of_node, ep_np) { |
| @@ -276,9 +277,10 @@ int atmel_hlcdc_create_outputs(struct drm_device *dev) | |||
| 276 | if (!ret) | 277 | if (!ret) |
| 277 | ret = atmel_hlcdc_attach_endpoint(dev, &ep); | 278 | ret = atmel_hlcdc_attach_endpoint(dev, &ep); |
| 278 | 279 | ||
| 279 | of_node_put(ep_np); | 280 | if (ret) { |
| 280 | if (ret) | 281 | of_node_put(ep_np); |
| 281 | return ret; | 282 | return ret; |
| 283 | } | ||
| 282 | } | 284 | } |
| 283 | 285 | ||
| 284 | return 0; | 286 | return 0; |
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c index aef3ca8a81fa..016c191221f3 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c | |||
| @@ -339,6 +339,8 @@ atmel_hlcdc_plane_update_pos_and_size(struct atmel_hlcdc_plane *plane, | |||
| 339 | 339 | ||
| 340 | atmel_hlcdc_layer_update_cfg(&plane->layer, 13, 0xffffffff, | 340 | atmel_hlcdc_layer_update_cfg(&plane->layer, 13, 0xffffffff, |
| 341 | factor_reg); | 341 | factor_reg); |
| 342 | } else { | ||
| 343 | atmel_hlcdc_layer_update_cfg(&plane->layer, 13, 0xffffffff, 0); | ||
| 342 | } | 344 | } |
| 343 | } | 345 | } |
| 344 | 346 | ||
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 3ff1ed7b33db..9bb99e274d23 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c | |||
| @@ -351,6 +351,8 @@ int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state, | |||
| 351 | drm_property_unreference_blob(state->mode_blob); | 351 | drm_property_unreference_blob(state->mode_blob); |
| 352 | state->mode_blob = NULL; | 352 | state->mode_blob = NULL; |
| 353 | 353 | ||
| 354 | memset(&state->mode, 0, sizeof(state->mode)); | ||
| 355 | |||
| 354 | if (blob) { | 356 | if (blob) { |
| 355 | if (blob->length != sizeof(struct drm_mode_modeinfo) || | 357 | if (blob->length != sizeof(struct drm_mode_modeinfo) || |
| 356 | drm_mode_convert_umode(&state->mode, | 358 | drm_mode_convert_umode(&state->mode, |
| @@ -363,7 +365,6 @@ int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state, | |||
| 363 | DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n", | 365 | DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n", |
| 364 | state->mode.name, state); | 366 | state->mode.name, state); |
| 365 | } else { | 367 | } else { |
| 366 | memset(&state->mode, 0, sizeof(state->mode)); | ||
| 367 | state->enable = false; | 368 | state->enable = false; |
| 368 | DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n", | 369 | DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n", |
| 369 | state); | 370 | state); |
| @@ -1295,14 +1296,39 @@ EXPORT_SYMBOL(drm_atomic_add_affected_planes); | |||
| 1295 | */ | 1296 | */ |
| 1296 | void drm_atomic_legacy_backoff(struct drm_atomic_state *state) | 1297 | void drm_atomic_legacy_backoff(struct drm_atomic_state *state) |
| 1297 | { | 1298 | { |
| 1299 | struct drm_device *dev = state->dev; | ||
| 1300 | unsigned crtc_mask = 0; | ||
| 1301 | struct drm_crtc *crtc; | ||
| 1298 | int ret; | 1302 | int ret; |
| 1303 | bool global = false; | ||
| 1304 | |||
| 1305 | drm_for_each_crtc(crtc, dev) { | ||
| 1306 | if (crtc->acquire_ctx != state->acquire_ctx) | ||
| 1307 | continue; | ||
| 1308 | |||
| 1309 | crtc_mask |= drm_crtc_mask(crtc); | ||
| 1310 | crtc->acquire_ctx = NULL; | ||
| 1311 | } | ||
| 1312 | |||
| 1313 | if (WARN_ON(dev->mode_config.acquire_ctx == state->acquire_ctx)) { | ||
| 1314 | global = true; | ||
| 1315 | |||
| 1316 | dev->mode_config.acquire_ctx = NULL; | ||
| 1317 | } | ||
| 1299 | 1318 | ||
| 1300 | retry: | 1319 | retry: |
| 1301 | drm_modeset_backoff(state->acquire_ctx); | 1320 | drm_modeset_backoff(state->acquire_ctx); |
| 1302 | 1321 | ||
| 1303 | ret = drm_modeset_lock_all_ctx(state->dev, state->acquire_ctx); | 1322 | ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx); |
| 1304 | if (ret) | 1323 | if (ret) |
| 1305 | goto retry; | 1324 | goto retry; |
| 1325 | |||
| 1326 | drm_for_each_crtc(crtc, dev) | ||
| 1327 | if (drm_crtc_mask(crtc) & crtc_mask) | ||
| 1328 | crtc->acquire_ctx = state->acquire_ctx; | ||
| 1329 | |||
| 1330 | if (global) | ||
| 1331 | dev->mode_config.acquire_ctx = state->acquire_ctx; | ||
| 1306 | } | 1332 | } |
| 1307 | EXPORT_SYMBOL(drm_atomic_legacy_backoff); | 1333 | EXPORT_SYMBOL(drm_atomic_legacy_backoff); |
| 1308 | 1334 | ||
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index d2a6d958ca76..0e3cc66aa8b7 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c | |||
| @@ -2821,8 +2821,6 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, | |||
| 2821 | goto out; | 2821 | goto out; |
| 2822 | } | 2822 | } |
| 2823 | 2823 | ||
| 2824 | drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); | ||
| 2825 | |||
| 2826 | /* | 2824 | /* |
| 2827 | * Check whether the primary plane supports the fb pixel format. | 2825 | * Check whether the primary plane supports the fb pixel format. |
| 2828 | * Drivers not implementing the universal planes API use a | 2826 | * Drivers not implementing the universal planes API use a |
| @@ -4841,7 +4839,8 @@ bool drm_property_change_valid_get(struct drm_property *property, | |||
| 4841 | if (value == 0) | 4839 | if (value == 0) |
| 4842 | return true; | 4840 | return true; |
| 4843 | 4841 | ||
| 4844 | return _object_find(property->dev, value, property->values[0]) != NULL; | 4842 | *ref = _object_find(property->dev, value, property->values[0]); |
| 4843 | return *ref != NULL; | ||
| 4845 | } | 4844 | } |
| 4846 | 4845 | ||
| 4847 | for (i = 0; i < property->num_values; i++) | 4846 | for (i = 0; i < property->num_values; i++) |
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index a6e42433ef0e..26feb2f8453f 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c | |||
| @@ -528,11 +528,11 @@ drm_crtc_helper_disable(struct drm_crtc *crtc) | |||
| 528 | int drm_crtc_helper_set_config(struct drm_mode_set *set) | 528 | int drm_crtc_helper_set_config(struct drm_mode_set *set) |
| 529 | { | 529 | { |
| 530 | struct drm_device *dev; | 530 | struct drm_device *dev; |
| 531 | struct drm_crtc *new_crtc; | 531 | struct drm_crtc **save_encoder_crtcs, *new_crtc; |
| 532 | struct drm_encoder *save_encoders, *new_encoder, *encoder; | 532 | struct drm_encoder **save_connector_encoders, *new_encoder, *encoder; |
| 533 | bool mode_changed = false; /* if true do a full mode set */ | 533 | bool mode_changed = false; /* if true do a full mode set */ |
| 534 | bool fb_changed = false; /* if true and !mode_changed just do a flip */ | 534 | bool fb_changed = false; /* if true and !mode_changed just do a flip */ |
| 535 | struct drm_connector *save_connectors, *connector; | 535 | struct drm_connector *connector; |
| 536 | int count = 0, ro, fail = 0; | 536 | int count = 0, ro, fail = 0; |
| 537 | const struct drm_crtc_helper_funcs *crtc_funcs; | 537 | const struct drm_crtc_helper_funcs *crtc_funcs; |
| 538 | struct drm_mode_set save_set; | 538 | struct drm_mode_set save_set; |
| @@ -574,15 +574,15 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) | |||
| 574 | * Allocate space for the backup of all (non-pointer) encoder and | 574 | * Allocate space for the backup of all (non-pointer) encoder and |
| 575 | * connector data. | 575 | * connector data. |
| 576 | */ | 576 | */ |
| 577 | save_encoders = kzalloc(dev->mode_config.num_encoder * | 577 | save_encoder_crtcs = kzalloc(dev->mode_config.num_encoder * |
| 578 | sizeof(struct drm_encoder), GFP_KERNEL); | 578 | sizeof(struct drm_crtc *), GFP_KERNEL); |
| 579 | if (!save_encoders) | 579 | if (!save_encoder_crtcs) |
| 580 | return -ENOMEM; | 580 | return -ENOMEM; |
| 581 | 581 | ||
| 582 | save_connectors = kzalloc(dev->mode_config.num_connector * | 582 | save_connector_encoders = kzalloc(dev->mode_config.num_connector * |
| 583 | sizeof(struct drm_connector), GFP_KERNEL); | 583 | sizeof(struct drm_encoder *), GFP_KERNEL); |
| 584 | if (!save_connectors) { | 584 | if (!save_connector_encoders) { |
| 585 | kfree(save_encoders); | 585 | kfree(save_encoder_crtcs); |
| 586 | return -ENOMEM; | 586 | return -ENOMEM; |
| 587 | } | 587 | } |
| 588 | 588 | ||
| @@ -593,12 +593,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) | |||
| 593 | */ | 593 | */ |
| 594 | count = 0; | 594 | count = 0; |
| 595 | drm_for_each_encoder(encoder, dev) { | 595 | drm_for_each_encoder(encoder, dev) { |
| 596 | save_encoders[count++] = *encoder; | 596 | save_encoder_crtcs[count++] = encoder->crtc; |
| 597 | } | 597 | } |
| 598 | 598 | ||
| 599 | count = 0; | 599 | count = 0; |
| 600 | drm_for_each_connector(connector, dev) { | 600 | drm_for_each_connector(connector, dev) { |
| 601 | save_connectors[count++] = *connector; | 601 | save_connector_encoders[count++] = connector->encoder; |
| 602 | } | 602 | } |
| 603 | 603 | ||
| 604 | save_set.crtc = set->crtc; | 604 | save_set.crtc = set->crtc; |
| @@ -631,8 +631,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) | |||
| 631 | mode_changed = true; | 631 | mode_changed = true; |
| 632 | } | 632 | } |
| 633 | 633 | ||
| 634 | /* take a reference on all connectors in set */ | 634 | /* take a reference on all unbound connectors in set, reuse the |
| 635 | * already taken reference for bound connectors | ||
| 636 | */ | ||
| 635 | for (ro = 0; ro < set->num_connectors; ro++) { | 637 | for (ro = 0; ro < set->num_connectors; ro++) { |
| 638 | if (set->connectors[ro]->encoder) | ||
| 639 | continue; | ||
| 636 | drm_connector_reference(set->connectors[ro]); | 640 | drm_connector_reference(set->connectors[ro]); |
| 637 | } | 641 | } |
| 638 | 642 | ||
| @@ -754,30 +758,28 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) | |||
| 754 | } | 758 | } |
| 755 | } | 759 | } |
| 756 | 760 | ||
| 757 | /* after fail drop reference on all connectors in save set */ | 761 | kfree(save_connector_encoders); |
| 758 | count = 0; | 762 | kfree(save_encoder_crtcs); |
| 759 | drm_for_each_connector(connector, dev) { | ||
| 760 | drm_connector_unreference(&save_connectors[count++]); | ||
| 761 | } | ||
| 762 | |||
| 763 | kfree(save_connectors); | ||
| 764 | kfree(save_encoders); | ||
| 765 | return 0; | 763 | return 0; |
| 766 | 764 | ||
| 767 | fail: | 765 | fail: |
| 768 | /* Restore all previous data. */ | 766 | /* Restore all previous data. */ |
| 769 | count = 0; | 767 | count = 0; |
| 770 | drm_for_each_encoder(encoder, dev) { | 768 | drm_for_each_encoder(encoder, dev) { |
| 771 | *encoder = save_encoders[count++]; | 769 | encoder->crtc = save_encoder_crtcs[count++]; |
| 772 | } | 770 | } |
| 773 | 771 | ||
| 774 | count = 0; | 772 | count = 0; |
| 775 | drm_for_each_connector(connector, dev) { | 773 | drm_for_each_connector(connector, dev) { |
| 776 | *connector = save_connectors[count++]; | 774 | connector->encoder = save_connector_encoders[count++]; |
| 777 | } | 775 | } |
| 778 | 776 | ||
| 779 | /* after fail drop reference on all connectors in set */ | 777 | /* after fail drop reference on all unbound connectors in set, let |
| 778 | * bound connectors keep their reference | ||
| 779 | */ | ||
| 780 | for (ro = 0; ro < set->num_connectors; ro++) { | 780 | for (ro = 0; ro < set->num_connectors; ro++) { |
| 781 | if (set->connectors[ro]->encoder) | ||
| 782 | continue; | ||
| 781 | drm_connector_unreference(set->connectors[ro]); | 783 | drm_connector_unreference(set->connectors[ro]); |
| 782 | } | 784 | } |
| 783 | 785 | ||
| @@ -787,8 +789,8 @@ fail: | |||
| 787 | save_set.y, save_set.fb)) | 789 | save_set.y, save_set.fb)) |
| 788 | DRM_ERROR("failed to restore config after modeset failure\n"); | 790 | DRM_ERROR("failed to restore config after modeset failure\n"); |
| 789 | 791 | ||
| 790 | kfree(save_connectors); | 792 | kfree(save_connector_encoders); |
| 791 | kfree(save_encoders); | 793 | kfree(save_encoder_crtcs); |
| 792 | return ret; | 794 | return ret; |
| 793 | } | 795 | } |
| 794 | EXPORT_SYMBOL(drm_crtc_helper_set_config); | 796 | EXPORT_SYMBOL(drm_crtc_helper_set_config); |
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index a13edf5de2d6..6537908050d7 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c | |||
| @@ -2927,11 +2927,9 @@ static void drm_dp_destroy_connector_work(struct work_struct *work) | |||
| 2927 | drm_dp_port_teardown_pdt(port, port->pdt); | 2927 | drm_dp_port_teardown_pdt(port, port->pdt); |
| 2928 | 2928 | ||
| 2929 | if (!port->input && port->vcpi.vcpi > 0) { | 2929 | if (!port->input && port->vcpi.vcpi > 0) { |
| 2930 | if (mgr->mst_state) { | 2930 | drm_dp_mst_reset_vcpi_slots(mgr, port); |
| 2931 | drm_dp_mst_reset_vcpi_slots(mgr, port); | 2931 | drm_dp_update_payload_part1(mgr); |
| 2932 | drm_dp_update_payload_part1(mgr); | 2932 | drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); |
| 2933 | drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); | ||
| 2934 | } | ||
| 2935 | } | 2933 | } |
| 2936 | 2934 | ||
| 2937 | kref_put(&port->kref, drm_dp_free_mst_port); | 2935 | kref_put(&port->kref, drm_dp_free_mst_port); |
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c index 172cafe11c71..5075fae3c4e2 100644 --- a/drivers/gpu/drm/drm_fb_cma_helper.c +++ b/drivers/gpu/drm/drm_fb_cma_helper.c | |||
| @@ -445,7 +445,7 @@ err_cma_destroy: | |||
| 445 | err_fb_info_destroy: | 445 | err_fb_info_destroy: |
| 446 | drm_fb_helper_release_fbi(helper); | 446 | drm_fb_helper_release_fbi(helper); |
| 447 | err_gem_free_object: | 447 | err_gem_free_object: |
| 448 | dev->driver->gem_free_object(&obj->base); | 448 | drm_gem_object_unreference_unlocked(&obj->base); |
| 449 | return ret; | 449 | return ret; |
| 450 | } | 450 | } |
| 451 | EXPORT_SYMBOL(drm_fbdev_cma_create_with_funcs); | 451 | EXPORT_SYMBOL(drm_fbdev_cma_create_with_funcs); |
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c index e1ab008b3f08..1d6c335584ec 100644 --- a/drivers/gpu/drm/drm_gem_cma_helper.c +++ b/drivers/gpu/drm/drm_gem_cma_helper.c | |||
| @@ -121,7 +121,7 @@ struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm, | |||
| 121 | return cma_obj; | 121 | return cma_obj; |
| 122 | 122 | ||
| 123 | error: | 123 | error: |
| 124 | drm->driver->gem_free_object(&cma_obj->base); | 124 | drm_gem_object_unreference_unlocked(&cma_obj->base); |
| 125 | return ERR_PTR(ret); | 125 | return ERR_PTR(ret); |
| 126 | } | 126 | } |
| 127 | EXPORT_SYMBOL_GPL(drm_gem_cma_create); | 127 | EXPORT_SYMBOL_GPL(drm_gem_cma_create); |
| @@ -162,18 +162,12 @@ drm_gem_cma_create_with_handle(struct drm_file *file_priv, | |||
| 162 | * and handle has the id what user can see. | 162 | * and handle has the id what user can see. |
| 163 | */ | 163 | */ |
| 164 | ret = drm_gem_handle_create(file_priv, gem_obj, handle); | 164 | ret = drm_gem_handle_create(file_priv, gem_obj, handle); |
| 165 | if (ret) | ||
| 166 | goto err_handle_create; | ||
| 167 | |||
| 168 | /* drop reference from allocate - handle holds it now. */ | 165 | /* drop reference from allocate - handle holds it now. */ |
| 169 | drm_gem_object_unreference_unlocked(gem_obj); | 166 | drm_gem_object_unreference_unlocked(gem_obj); |
| 167 | if (ret) | ||
| 168 | return ERR_PTR(ret); | ||
| 170 | 169 | ||
| 171 | return cma_obj; | 170 | return cma_obj; |
| 172 | |||
| 173 | err_handle_create: | ||
| 174 | drm->driver->gem_free_object(gem_obj); | ||
| 175 | |||
| 176 | return ERR_PTR(ret); | ||
| 177 | } | 171 | } |
| 178 | 172 | ||
| 179 | /** | 173 | /** |
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index 7def3d58da18..e5e6f504d8cc 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c | |||
| @@ -1518,6 +1518,8 @@ int drm_mode_convert_umode(struct drm_display_mode *out, | |||
| 1518 | if (out->status != MODE_OK) | 1518 | if (out->status != MODE_OK) |
| 1519 | goto out; | 1519 | goto out; |
| 1520 | 1520 | ||
| 1521 | drm_mode_set_crtcinfo(out, CRTC_INTERLACE_HALVE_V); | ||
| 1522 | |||
| 1521 | ret = 0; | 1523 | ret = 0; |
| 1522 | 1524 | ||
| 1523 | out: | 1525 | out: |
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c index 522cfd447892..16353ee81651 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c | |||
| @@ -225,6 +225,7 @@ struct iommu_domain *etnaviv_iommu_domain_alloc(struct etnaviv_gpu *gpu) | |||
| 225 | 225 | ||
| 226 | etnaviv_domain->domain.type = __IOMMU_DOMAIN_PAGING; | 226 | etnaviv_domain->domain.type = __IOMMU_DOMAIN_PAGING; |
| 227 | etnaviv_domain->domain.ops = &etnaviv_iommu_ops.ops; | 227 | etnaviv_domain->domain.ops = &etnaviv_iommu_ops.ops; |
| 228 | etnaviv_domain->domain.pgsize_bitmap = SZ_4K; | ||
| 228 | etnaviv_domain->domain.geometry.aperture_start = GPU_MEM_START; | 229 | etnaviv_domain->domain.geometry.aperture_start = GPU_MEM_START; |
| 229 | etnaviv_domain->domain.geometry.aperture_end = GPU_MEM_START + PT_ENTRIES * SZ_4K - 1; | 230 | etnaviv_domain->domain.geometry.aperture_end = GPU_MEM_START + PT_ENTRIES * SZ_4K - 1; |
| 230 | 231 | ||
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c index f6223f907c15..7f9901b7777b 100644 --- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c | |||
| @@ -31,7 +31,6 @@ | |||
| 31 | #include "exynos_drm_plane.h" | 31 | #include "exynos_drm_plane.h" |
| 32 | #include "exynos_drm_drv.h" | 32 | #include "exynos_drm_drv.h" |
| 33 | #include "exynos_drm_fb.h" | 33 | #include "exynos_drm_fb.h" |
| 34 | #include "exynos_drm_fbdev.h" | ||
| 35 | #include "exynos_drm_iommu.h" | 34 | #include "exynos_drm_iommu.h" |
| 36 | 35 | ||
| 37 | /* | 36 | /* |
diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c index 468498e3fec1..4c1fb3f8b5a6 100644 --- a/drivers/gpu/drm/exynos/exynos_dp.c +++ b/drivers/gpu/drm/exynos/exynos_dp.c | |||
| @@ -34,7 +34,7 @@ | |||
| 34 | 34 | ||
| 35 | struct exynos_dp_device { | 35 | struct exynos_dp_device { |
| 36 | struct drm_encoder encoder; | 36 | struct drm_encoder encoder; |
| 37 | struct drm_connector connector; | 37 | struct drm_connector *connector; |
| 38 | struct drm_bridge *ptn_bridge; | 38 | struct drm_bridge *ptn_bridge; |
| 39 | struct drm_device *drm_dev; | 39 | struct drm_device *drm_dev; |
| 40 | struct device *dev; | 40 | struct device *dev; |
| @@ -70,7 +70,7 @@ static int exynos_dp_poweroff(struct analogix_dp_plat_data *plat_data) | |||
| 70 | static int exynos_dp_get_modes(struct analogix_dp_plat_data *plat_data) | 70 | static int exynos_dp_get_modes(struct analogix_dp_plat_data *plat_data) |
| 71 | { | 71 | { |
| 72 | struct exynos_dp_device *dp = to_dp(plat_data); | 72 | struct exynos_dp_device *dp = to_dp(plat_data); |
| 73 | struct drm_connector *connector = &dp->connector; | 73 | struct drm_connector *connector = dp->connector; |
| 74 | struct drm_display_mode *mode; | 74 | struct drm_display_mode *mode; |
| 75 | int num_modes = 0; | 75 | int num_modes = 0; |
| 76 | 76 | ||
| @@ -103,6 +103,7 @@ static int exynos_dp_bridge_attach(struct analogix_dp_plat_data *plat_data, | |||
| 103 | int ret; | 103 | int ret; |
| 104 | 104 | ||
| 105 | drm_connector_register(connector); | 105 | drm_connector_register(connector); |
| 106 | dp->connector = connector; | ||
| 106 | 107 | ||
| 107 | /* Pre-empt DP connector creation if there's a bridge */ | 108 | /* Pre-empt DP connector creation if there's a bridge */ |
| 108 | if (dp->ptn_bridge) { | 109 | if (dp->ptn_bridge) { |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c index 011211e4167d..edbd98ff293e 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_core.c +++ b/drivers/gpu/drm/exynos/exynos_drm_core.c | |||
| @@ -15,7 +15,6 @@ | |||
| 15 | #include <drm/drmP.h> | 15 | #include <drm/drmP.h> |
| 16 | #include "exynos_drm_drv.h" | 16 | #include "exynos_drm_drv.h" |
| 17 | #include "exynos_drm_crtc.h" | 17 | #include "exynos_drm_crtc.h" |
| 18 | #include "exynos_drm_fbdev.h" | ||
| 19 | 18 | ||
| 20 | static LIST_HEAD(exynos_drm_subdrv_list); | 19 | static LIST_HEAD(exynos_drm_subdrv_list); |
| 21 | 20 | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index 3efe1aa89416..d47216488985 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c | |||
| @@ -30,7 +30,6 @@ | |||
| 30 | 30 | ||
| 31 | #include "exynos_drm_drv.h" | 31 | #include "exynos_drm_drv.h" |
| 32 | #include "exynos_drm_fb.h" | 32 | #include "exynos_drm_fb.h" |
| 33 | #include "exynos_drm_fbdev.h" | ||
| 34 | #include "exynos_drm_crtc.h" | 33 | #include "exynos_drm_crtc.h" |
| 35 | #include "exynos_drm_plane.h" | 34 | #include "exynos_drm_plane.h" |
| 36 | #include "exynos_drm_iommu.h" | 35 | #include "exynos_drm_iommu.h" |
| @@ -120,7 +119,6 @@ static struct fimd_driver_data s3c64xx_fimd_driver_data = { | |||
| 120 | .timing_base = 0x0, | 119 | .timing_base = 0x0, |
| 121 | .has_clksel = 1, | 120 | .has_clksel = 1, |
| 122 | .has_limited_fmt = 1, | 121 | .has_limited_fmt = 1, |
| 123 | .has_hw_trigger = 1, | ||
| 124 | }; | 122 | }; |
| 125 | 123 | ||
| 126 | static struct fimd_driver_data exynos3_fimd_driver_data = { | 124 | static struct fimd_driver_data exynos3_fimd_driver_data = { |
| @@ -171,14 +169,11 @@ static struct fimd_driver_data exynos5420_fimd_driver_data = { | |||
| 171 | .lcdblk_vt_shift = 24, | 169 | .lcdblk_vt_shift = 24, |
| 172 | .lcdblk_bypass_shift = 15, | 170 | .lcdblk_bypass_shift = 15, |
| 173 | .lcdblk_mic_bypass_shift = 11, | 171 | .lcdblk_mic_bypass_shift = 11, |
| 174 | .trg_type = I80_HW_TRG, | ||
| 175 | .has_shadowcon = 1, | 172 | .has_shadowcon = 1, |
| 176 | .has_vidoutcon = 1, | 173 | .has_vidoutcon = 1, |
| 177 | .has_vtsel = 1, | 174 | .has_vtsel = 1, |
| 178 | .has_mic_bypass = 1, | 175 | .has_mic_bypass = 1, |
| 179 | .has_dp_clk = 1, | 176 | .has_dp_clk = 1, |
| 180 | .has_hw_trigger = 1, | ||
| 181 | .has_trigger_per_te = 1, | ||
| 182 | }; | 177 | }; |
| 183 | 178 | ||
| 184 | struct fimd_context { | 179 | struct fimd_context { |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index 493552368295..8564c3da0d22 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c | |||
| @@ -48,13 +48,13 @@ | |||
| 48 | 48 | ||
| 49 | /* registers for base address */ | 49 | /* registers for base address */ |
| 50 | #define G2D_SRC_BASE_ADDR 0x0304 | 50 | #define G2D_SRC_BASE_ADDR 0x0304 |
| 51 | #define G2D_SRC_STRIDE_REG 0x0308 | 51 | #define G2D_SRC_STRIDE 0x0308 |
| 52 | #define G2D_SRC_COLOR_MODE 0x030C | 52 | #define G2D_SRC_COLOR_MODE 0x030C |
| 53 | #define G2D_SRC_LEFT_TOP 0x0310 | 53 | #define G2D_SRC_LEFT_TOP 0x0310 |
| 54 | #define G2D_SRC_RIGHT_BOTTOM 0x0314 | 54 | #define G2D_SRC_RIGHT_BOTTOM 0x0314 |
| 55 | #define G2D_SRC_PLANE2_BASE_ADDR 0x0318 | 55 | #define G2D_SRC_PLANE2_BASE_ADDR 0x0318 |
| 56 | #define G2D_DST_BASE_ADDR 0x0404 | 56 | #define G2D_DST_BASE_ADDR 0x0404 |
| 57 | #define G2D_DST_STRIDE_REG 0x0408 | 57 | #define G2D_DST_STRIDE 0x0408 |
| 58 | #define G2D_DST_COLOR_MODE 0x040C | 58 | #define G2D_DST_COLOR_MODE 0x040C |
| 59 | #define G2D_DST_LEFT_TOP 0x0410 | 59 | #define G2D_DST_LEFT_TOP 0x0410 |
| 60 | #define G2D_DST_RIGHT_BOTTOM 0x0414 | 60 | #define G2D_DST_RIGHT_BOTTOM 0x0414 |
| @@ -563,7 +563,7 @@ static enum g2d_reg_type g2d_get_reg_type(int reg_offset) | |||
| 563 | 563 | ||
| 564 | switch (reg_offset) { | 564 | switch (reg_offset) { |
| 565 | case G2D_SRC_BASE_ADDR: | 565 | case G2D_SRC_BASE_ADDR: |
| 566 | case G2D_SRC_STRIDE_REG: | 566 | case G2D_SRC_STRIDE: |
| 567 | case G2D_SRC_COLOR_MODE: | 567 | case G2D_SRC_COLOR_MODE: |
| 568 | case G2D_SRC_LEFT_TOP: | 568 | case G2D_SRC_LEFT_TOP: |
| 569 | case G2D_SRC_RIGHT_BOTTOM: | 569 | case G2D_SRC_RIGHT_BOTTOM: |
| @@ -573,7 +573,7 @@ static enum g2d_reg_type g2d_get_reg_type(int reg_offset) | |||
| 573 | reg_type = REG_TYPE_SRC_PLANE2; | 573 | reg_type = REG_TYPE_SRC_PLANE2; |
| 574 | break; | 574 | break; |
| 575 | case G2D_DST_BASE_ADDR: | 575 | case G2D_DST_BASE_ADDR: |
| 576 | case G2D_DST_STRIDE_REG: | 576 | case G2D_DST_STRIDE: |
| 577 | case G2D_DST_COLOR_MODE: | 577 | case G2D_DST_COLOR_MODE: |
| 578 | case G2D_DST_LEFT_TOP: | 578 | case G2D_DST_LEFT_TOP: |
| 579 | case G2D_DST_RIGHT_BOTTOM: | 579 | case G2D_DST_RIGHT_BOTTOM: |
| @@ -968,8 +968,8 @@ static int g2d_check_reg_offset(struct device *dev, | |||
| 968 | } else | 968 | } else |
| 969 | buf_info->types[reg_type] = BUF_TYPE_GEM; | 969 | buf_info->types[reg_type] = BUF_TYPE_GEM; |
| 970 | break; | 970 | break; |
| 971 | case G2D_SRC_STRIDE_REG: | 971 | case G2D_SRC_STRIDE: |
| 972 | case G2D_DST_STRIDE_REG: | 972 | case G2D_DST_STRIDE: |
| 973 | if (for_addr) | 973 | if (for_addr) |
| 974 | goto err; | 974 | goto err; |
| 975 | 975 | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c index 55f1d37c666a..77f12c00abf9 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_plane.c +++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c | |||
| @@ -242,7 +242,7 @@ exynos_drm_plane_check_size(const struct exynos_drm_plane_config *config, | |||
| 242 | state->v_ratio == (1 << 15)) | 242 | state->v_ratio == (1 << 15)) |
| 243 | height_ok = true; | 243 | height_ok = true; |
| 244 | 244 | ||
| 245 | if (width_ok & height_ok) | 245 | if (width_ok && height_ok) |
| 246 | return 0; | 246 | return 0; |
| 247 | 247 | ||
| 248 | DRM_DEBUG_KMS("scaling mode is not supported"); | 248 | DRM_DEBUG_KMS("scaling mode is not supported"); |
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c index 0ec1ad961e0d..dc723f7ead7d 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c | |||
| @@ -42,9 +42,10 @@ static const struct regmap_config fsl_dcu_regmap_config = { | |||
| 42 | .reg_bits = 32, | 42 | .reg_bits = 32, |
| 43 | .reg_stride = 4, | 43 | .reg_stride = 4, |
| 44 | .val_bits = 32, | 44 | .val_bits = 32, |
| 45 | .cache_type = REGCACHE_RBTREE, | 45 | .cache_type = REGCACHE_FLAT, |
| 46 | 46 | ||
| 47 | .volatile_reg = fsl_dcu_drm_is_volatile_reg, | 47 | .volatile_reg = fsl_dcu_drm_is_volatile_reg, |
| 48 | .max_register = 0x11fc, | ||
| 48 | }; | 49 | }; |
| 49 | 50 | ||
| 50 | static int fsl_dcu_drm_irq_init(struct drm_device *dev) | 51 | static int fsl_dcu_drm_irq_init(struct drm_device *dev) |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 5faacc6e548d..7c334e902266 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
| @@ -3481,6 +3481,7 @@ int intel_bios_init(struct drm_i915_private *dev_priv); | |||
| 3481 | bool intel_bios_is_valid_vbt(const void *buf, size_t size); | 3481 | bool intel_bios_is_valid_vbt(const void *buf, size_t size); |
| 3482 | bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv); | 3482 | bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv); |
| 3483 | bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin); | 3483 | bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin); |
| 3484 | bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port); | ||
| 3484 | bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port); | 3485 | bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port); |
| 3485 | bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port); | 3486 | bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port); |
| 3486 | bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port); | 3487 | bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port); |
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index b235b6e88ead..b9022fa053d6 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c | |||
| @@ -139,6 +139,11 @@ fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode, | |||
| 139 | else | 139 | else |
| 140 | panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC; | 140 | panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC; |
| 141 | 141 | ||
| 142 | panel_fixed_mode->width_mm = (dvo_timing->himage_hi << 8) | | ||
| 143 | dvo_timing->himage_lo; | ||
| 144 | panel_fixed_mode->height_mm = (dvo_timing->vimage_hi << 8) | | ||
| 145 | dvo_timing->vimage_lo; | ||
| 146 | |||
| 142 | /* Some VBTs have bogus h/vtotal values */ | 147 | /* Some VBTs have bogus h/vtotal values */ |
| 143 | if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal) | 148 | if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal) |
| 144 | panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1; | 149 | panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1; |
| @@ -1187,7 +1192,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv, | |||
| 1187 | } | 1192 | } |
| 1188 | if (bdb->version < 106) { | 1193 | if (bdb->version < 106) { |
| 1189 | expected_size = 22; | 1194 | expected_size = 22; |
| 1190 | } else if (bdb->version < 109) { | 1195 | } else if (bdb->version < 111) { |
| 1191 | expected_size = 27; | 1196 | expected_size = 27; |
| 1192 | } else if (bdb->version < 195) { | 1197 | } else if (bdb->version < 195) { |
| 1193 | BUILD_BUG_ON(sizeof(struct old_child_dev_config) != 33); | 1198 | BUILD_BUG_ON(sizeof(struct old_child_dev_config) != 33); |
| @@ -1546,6 +1551,45 @@ bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin) | |||
| 1546 | } | 1551 | } |
| 1547 | 1552 | ||
| 1548 | /** | 1553 | /** |
| 1554 | * intel_bios_is_port_present - is the specified digital port present | ||
| 1555 | * @dev_priv: i915 device instance | ||
| 1556 | * @port: port to check | ||
| 1557 | * | ||
| 1558 | * Return true if the device in %port is present. | ||
| 1559 | */ | ||
| 1560 | bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port) | ||
| 1561 | { | ||
| 1562 | static const struct { | ||
| 1563 | u16 dp, hdmi; | ||
| 1564 | } port_mapping[] = { | ||
| 1565 | [PORT_B] = { DVO_PORT_DPB, DVO_PORT_HDMIB, }, | ||
| 1566 | [PORT_C] = { DVO_PORT_DPC, DVO_PORT_HDMIC, }, | ||
| 1567 | [PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, }, | ||
| 1568 | [PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, }, | ||
| 1569 | }; | ||
| 1570 | int i; | ||
| 1571 | |||
| 1572 | /* FIXME maybe deal with port A as well? */ | ||
| 1573 | if (WARN_ON(port == PORT_A) || port >= ARRAY_SIZE(port_mapping)) | ||
| 1574 | return false; | ||
| 1575 | |||
| 1576 | if (!dev_priv->vbt.child_dev_num) | ||
| 1577 | return false; | ||
| 1578 | |||
| 1579 | for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { | ||
| 1580 | const union child_device_config *p_child = | ||
| 1581 | &dev_priv->vbt.child_dev[i]; | ||
| 1582 | if ((p_child->common.dvo_port == port_mapping[port].dp || | ||
| 1583 | p_child->common.dvo_port == port_mapping[port].hdmi) && | ||
| 1584 | (p_child->common.device_type & (DEVICE_TYPE_TMDS_DVI_SIGNALING | | ||
| 1585 | DEVICE_TYPE_DISPLAYPORT_OUTPUT))) | ||
| 1586 | return true; | ||
| 1587 | } | ||
| 1588 | |||
| 1589 | return false; | ||
| 1590 | } | ||
| 1591 | |||
| 1592 | /** | ||
| 1549 | * intel_bios_is_port_edp - is the device in given port eDP | 1593 | * intel_bios_is_port_edp - is the device in given port eDP |
| 1550 | * @dev_priv: i915 device instance | 1594 | * @dev_priv: i915 device instance |
| 1551 | * @port: port to check | 1595 | * @port: port to check |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 2113f401f0ba..56a1637c864f 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
| @@ -8275,12 +8275,14 @@ static void ironlake_init_pch_refclk(struct drm_device *dev) | |||
| 8275 | { | 8275 | { |
| 8276 | struct drm_i915_private *dev_priv = dev->dev_private; | 8276 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 8277 | struct intel_encoder *encoder; | 8277 | struct intel_encoder *encoder; |
| 8278 | int i; | ||
| 8278 | u32 val, final; | 8279 | u32 val, final; |
| 8279 | bool has_lvds = false; | 8280 | bool has_lvds = false; |
| 8280 | bool has_cpu_edp = false; | 8281 | bool has_cpu_edp = false; |
| 8281 | bool has_panel = false; | 8282 | bool has_panel = false; |
| 8282 | bool has_ck505 = false; | 8283 | bool has_ck505 = false; |
| 8283 | bool can_ssc = false; | 8284 | bool can_ssc = false; |
| 8285 | bool using_ssc_source = false; | ||
| 8284 | 8286 | ||
| 8285 | /* We need to take the global config into account */ | 8287 | /* We need to take the global config into account */ |
| 8286 | for_each_intel_encoder(dev, encoder) { | 8288 | for_each_intel_encoder(dev, encoder) { |
| @@ -8307,8 +8309,22 @@ static void ironlake_init_pch_refclk(struct drm_device *dev) | |||
| 8307 | can_ssc = true; | 8309 | can_ssc = true; |
| 8308 | } | 8310 | } |
| 8309 | 8311 | ||
| 8310 | DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n", | 8312 | /* Check if any DPLLs are using the SSC source */ |
| 8311 | has_panel, has_lvds, has_ck505); | 8313 | for (i = 0; i < dev_priv->num_shared_dpll; i++) { |
| 8314 | u32 temp = I915_READ(PCH_DPLL(i)); | ||
| 8315 | |||
| 8316 | if (!(temp & DPLL_VCO_ENABLE)) | ||
| 8317 | continue; | ||
| 8318 | |||
| 8319 | if ((temp & PLL_REF_INPUT_MASK) == | ||
| 8320 | PLLB_REF_INPUT_SPREADSPECTRUMIN) { | ||
| 8321 | using_ssc_source = true; | ||
| 8322 | break; | ||
| 8323 | } | ||
| 8324 | } | ||
| 8325 | |||
| 8326 | DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n", | ||
| 8327 | has_panel, has_lvds, has_ck505, using_ssc_source); | ||
| 8312 | 8328 | ||
| 8313 | /* Ironlake: try to setup display ref clock before DPLL | 8329 | /* Ironlake: try to setup display ref clock before DPLL |
| 8314 | * enabling. This is only under driver's control after | 8330 | * enabling. This is only under driver's control after |
| @@ -8345,9 +8361,9 @@ static void ironlake_init_pch_refclk(struct drm_device *dev) | |||
| 8345 | final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; | 8361 | final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; |
| 8346 | } else | 8362 | } else |
| 8347 | final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; | 8363 | final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; |
| 8348 | } else { | 8364 | } else if (using_ssc_source) { |
| 8349 | final |= DREF_SSC_SOURCE_DISABLE; | 8365 | final |= DREF_SSC_SOURCE_ENABLE; |
| 8350 | final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; | 8366 | final |= DREF_SSC1_ENABLE; |
| 8351 | } | 8367 | } |
| 8352 | 8368 | ||
| 8353 | if (final == val) | 8369 | if (final == val) |
| @@ -8393,7 +8409,7 @@ static void ironlake_init_pch_refclk(struct drm_device *dev) | |||
| 8393 | POSTING_READ(PCH_DREF_CONTROL); | 8409 | POSTING_READ(PCH_DREF_CONTROL); |
| 8394 | udelay(200); | 8410 | udelay(200); |
| 8395 | } else { | 8411 | } else { |
| 8396 | DRM_DEBUG_KMS("Disabling SSC entirely\n"); | 8412 | DRM_DEBUG_KMS("Disabling CPU source output\n"); |
| 8397 | 8413 | ||
| 8398 | val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; | 8414 | val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; |
| 8399 | 8415 | ||
| @@ -8404,16 +8420,20 @@ static void ironlake_init_pch_refclk(struct drm_device *dev) | |||
| 8404 | POSTING_READ(PCH_DREF_CONTROL); | 8420 | POSTING_READ(PCH_DREF_CONTROL); |
| 8405 | udelay(200); | 8421 | udelay(200); |
| 8406 | 8422 | ||
| 8407 | /* Turn off the SSC source */ | 8423 | if (!using_ssc_source) { |
| 8408 | val &= ~DREF_SSC_SOURCE_MASK; | 8424 | DRM_DEBUG_KMS("Disabling SSC source\n"); |
| 8409 | val |= DREF_SSC_SOURCE_DISABLE; | ||
| 8410 | 8425 | ||
| 8411 | /* Turn off SSC1 */ | 8426 | /* Turn off the SSC source */ |
| 8412 | val &= ~DREF_SSC1_ENABLE; | 8427 | val &= ~DREF_SSC_SOURCE_MASK; |
| 8428 | val |= DREF_SSC_SOURCE_DISABLE; | ||
| 8413 | 8429 | ||
| 8414 | I915_WRITE(PCH_DREF_CONTROL, val); | 8430 | /* Turn off SSC1 */ |
| 8415 | POSTING_READ(PCH_DREF_CONTROL); | 8431 | val &= ~DREF_SSC1_ENABLE; |
| 8416 | udelay(200); | 8432 | |
| 8433 | I915_WRITE(PCH_DREF_CONTROL, val); | ||
| 8434 | POSTING_READ(PCH_DREF_CONTROL); | ||
| 8435 | udelay(200); | ||
| 8436 | } | ||
| 8417 | } | 8437 | } |
| 8418 | 8438 | ||
| 8419 | BUG_ON(val != final); | 8439 | BUG_ON(val != final); |
| @@ -14554,6 +14574,8 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
| 14554 | if (I915_READ(PCH_DP_D) & DP_DETECTED) | 14574 | if (I915_READ(PCH_DP_D) & DP_DETECTED) |
| 14555 | intel_dp_init(dev, PCH_DP_D, PORT_D); | 14575 | intel_dp_init(dev, PCH_DP_D, PORT_D); |
| 14556 | } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { | 14576 | } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { |
| 14577 | bool has_edp, has_port; | ||
| 14578 | |||
| 14557 | /* | 14579 | /* |
| 14558 | * The DP_DETECTED bit is the latched state of the DDC | 14580 | * The DP_DETECTED bit is the latched state of the DDC |
| 14559 | * SDA pin at boot. However since eDP doesn't require DDC | 14581 | * SDA pin at boot. However since eDP doesn't require DDC |
| @@ -14562,27 +14584,37 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
| 14562 | * Thus we can't rely on the DP_DETECTED bit alone to detect | 14584 | * Thus we can't rely on the DP_DETECTED bit alone to detect |
| 14563 | * eDP ports. Consult the VBT as well as DP_DETECTED to | 14585 | * eDP ports. Consult the VBT as well as DP_DETECTED to |
| 14564 | * detect eDP ports. | 14586 | * detect eDP ports. |
| 14587 | * | ||
| 14588 | * Sadly the straps seem to be missing sometimes even for HDMI | ||
| 14589 | * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap | ||
| 14590 | * and VBT for the presence of the port. Additionally we can't | ||
| 14591 | * trust the port type the VBT declares as we've seen at least | ||
| 14592 | * HDMI ports that the VBT claim are DP or eDP. | ||
| 14565 | */ | 14593 | */ |
| 14566 | if (I915_READ(VLV_HDMIB) & SDVO_DETECTED && | 14594 | has_edp = intel_dp_is_edp(dev, PORT_B); |
| 14567 | !intel_dp_is_edp(dev, PORT_B)) | 14595 | has_port = intel_bios_is_port_present(dev_priv, PORT_B); |
| 14596 | if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port) | ||
| 14597 | has_edp &= intel_dp_init(dev, VLV_DP_B, PORT_B); | ||
| 14598 | if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp) | ||
| 14568 | intel_hdmi_init(dev, VLV_HDMIB, PORT_B); | 14599 | intel_hdmi_init(dev, VLV_HDMIB, PORT_B); |
| 14569 | if (I915_READ(VLV_DP_B) & DP_DETECTED || | ||
| 14570 | intel_dp_is_edp(dev, PORT_B)) | ||
| 14571 | intel_dp_init(dev, VLV_DP_B, PORT_B); | ||
| 14572 | 14600 | ||
| 14573 | if (I915_READ(VLV_HDMIC) & SDVO_DETECTED && | 14601 | has_edp = intel_dp_is_edp(dev, PORT_C); |
| 14574 | !intel_dp_is_edp(dev, PORT_C)) | 14602 | has_port = intel_bios_is_port_present(dev_priv, PORT_C); |
| 14603 | if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port) | ||
| 14604 | has_edp &= intel_dp_init(dev, VLV_DP_C, PORT_C); | ||
| 14605 | if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp) | ||
| 14575 | intel_hdmi_init(dev, VLV_HDMIC, PORT_C); | 14606 | intel_hdmi_init(dev, VLV_HDMIC, PORT_C); |
| 14576 | if (I915_READ(VLV_DP_C) & DP_DETECTED || | ||
| 14577 | intel_dp_is_edp(dev, PORT_C)) | ||
| 14578 | intel_dp_init(dev, VLV_DP_C, PORT_C); | ||
| 14579 | 14607 | ||
| 14580 | if (IS_CHERRYVIEW(dev)) { | 14608 | if (IS_CHERRYVIEW(dev)) { |
| 14581 | /* eDP not supported on port D, so don't check VBT */ | 14609 | /* |
| 14582 | if (I915_READ(CHV_HDMID) & SDVO_DETECTED) | 14610 | * eDP not supported on port D, |
| 14583 | intel_hdmi_init(dev, CHV_HDMID, PORT_D); | 14611 | * so no need to worry about it |
| 14584 | if (I915_READ(CHV_DP_D) & DP_DETECTED) | 14612 | */ |
| 14613 | has_port = intel_bios_is_port_present(dev_priv, PORT_D); | ||
| 14614 | if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port) | ||
| 14585 | intel_dp_init(dev, CHV_DP_D, PORT_D); | 14615 | intel_dp_init(dev, CHV_DP_D, PORT_D); |
| 14616 | if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port) | ||
| 14617 | intel_hdmi_init(dev, CHV_HDMID, PORT_D); | ||
| 14586 | } | 14618 | } |
| 14587 | 14619 | ||
| 14588 | intel_dsi_init(dev); | 14620 | intel_dsi_init(dev); |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index f192f58708c2..79cf2d5f5a20 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
| @@ -4977,9 +4977,6 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) | |||
| 4977 | intel_display_power_get(dev_priv, power_domain); | 4977 | intel_display_power_get(dev_priv, power_domain); |
| 4978 | 4978 | ||
| 4979 | if (long_hpd) { | 4979 | if (long_hpd) { |
| 4980 | /* indicate that we need to restart link training */ | ||
| 4981 | intel_dp->train_set_valid = false; | ||
| 4982 | |||
| 4983 | intel_dp_long_pulse(intel_dp->attached_connector); | 4980 | intel_dp_long_pulse(intel_dp->attached_connector); |
| 4984 | if (intel_dp->is_mst) | 4981 | if (intel_dp->is_mst) |
| 4985 | ret = IRQ_HANDLED; | 4982 | ret = IRQ_HANDLED; |
| @@ -5725,8 +5722,11 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, | |||
| 5725 | if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) { | 5722 | if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) { |
| 5726 | fixed_mode = drm_mode_duplicate(dev, | 5723 | fixed_mode = drm_mode_duplicate(dev, |
| 5727 | dev_priv->vbt.lfp_lvds_vbt_mode); | 5724 | dev_priv->vbt.lfp_lvds_vbt_mode); |
| 5728 | if (fixed_mode) | 5725 | if (fixed_mode) { |
| 5729 | fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; | 5726 | fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; |
| 5727 | connector->display_info.width_mm = fixed_mode->width_mm; | ||
| 5728 | connector->display_info.height_mm = fixed_mode->height_mm; | ||
| 5729 | } | ||
| 5730 | } | 5730 | } |
| 5731 | mutex_unlock(&dev->mode_config.mutex); | 5731 | mutex_unlock(&dev->mode_config.mutex); |
| 5732 | 5732 | ||
| @@ -5923,9 +5923,9 @@ fail: | |||
| 5923 | return false; | 5923 | return false; |
| 5924 | } | 5924 | } |
| 5925 | 5925 | ||
| 5926 | void | 5926 | bool intel_dp_init(struct drm_device *dev, |
| 5927 | intel_dp_init(struct drm_device *dev, | 5927 | i915_reg_t output_reg, |
| 5928 | i915_reg_t output_reg, enum port port) | 5928 | enum port port) |
| 5929 | { | 5929 | { |
| 5930 | struct drm_i915_private *dev_priv = dev->dev_private; | 5930 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 5931 | struct intel_digital_port *intel_dig_port; | 5931 | struct intel_digital_port *intel_dig_port; |
| @@ -5935,7 +5935,7 @@ intel_dp_init(struct drm_device *dev, | |||
| 5935 | 5935 | ||
| 5936 | intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL); | 5936 | intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL); |
| 5937 | if (!intel_dig_port) | 5937 | if (!intel_dig_port) |
| 5938 | return; | 5938 | return false; |
| 5939 | 5939 | ||
| 5940 | intel_connector = intel_connector_alloc(); | 5940 | intel_connector = intel_connector_alloc(); |
| 5941 | if (!intel_connector) | 5941 | if (!intel_connector) |
| @@ -5992,7 +5992,7 @@ intel_dp_init(struct drm_device *dev, | |||
| 5992 | if (!intel_dp_init_connector(intel_dig_port, intel_connector)) | 5992 | if (!intel_dp_init_connector(intel_dig_port, intel_connector)) |
| 5993 | goto err_init_connector; | 5993 | goto err_init_connector; |
| 5994 | 5994 | ||
| 5995 | return; | 5995 | return true; |
| 5996 | 5996 | ||
| 5997 | err_init_connector: | 5997 | err_init_connector: |
| 5998 | drm_encoder_cleanup(encoder); | 5998 | drm_encoder_cleanup(encoder); |
| @@ -6000,8 +6000,7 @@ err_encoder_init: | |||
| 6000 | kfree(intel_connector); | 6000 | kfree(intel_connector); |
| 6001 | err_connector_alloc: | 6001 | err_connector_alloc: |
| 6002 | kfree(intel_dig_port); | 6002 | kfree(intel_dig_port); |
| 6003 | 6003 | return false; | |
| 6004 | return; | ||
| 6005 | } | 6004 | } |
| 6006 | 6005 | ||
| 6007 | void intel_dp_mst_suspend(struct drm_device *dev) | 6006 | void intel_dp_mst_suspend(struct drm_device *dev) |
diff --git a/drivers/gpu/drm/i915/intel_dp_link_training.c b/drivers/gpu/drm/i915/intel_dp_link_training.c index 0b8eefc2acc5..60fb39cd220b 100644 --- a/drivers/gpu/drm/i915/intel_dp_link_training.c +++ b/drivers/gpu/drm/i915/intel_dp_link_training.c | |||
| @@ -85,8 +85,7 @@ static bool | |||
| 85 | intel_dp_reset_link_train(struct intel_dp *intel_dp, | 85 | intel_dp_reset_link_train(struct intel_dp *intel_dp, |
| 86 | uint8_t dp_train_pat) | 86 | uint8_t dp_train_pat) |
| 87 | { | 87 | { |
| 88 | if (!intel_dp->train_set_valid) | 88 | memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set)); |
| 89 | memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set)); | ||
| 90 | intel_dp_set_signal_levels(intel_dp); | 89 | intel_dp_set_signal_levels(intel_dp); |
| 91 | return intel_dp_set_link_train(intel_dp, dp_train_pat); | 90 | return intel_dp_set_link_train(intel_dp, dp_train_pat); |
| 92 | } | 91 | } |
| @@ -161,23 +160,6 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp) | |||
| 161 | break; | 160 | break; |
| 162 | } | 161 | } |
| 163 | 162 | ||
| 164 | /* | ||
| 165 | * if we used previously trained voltage and pre-emphasis values | ||
| 166 | * and we don't get clock recovery, reset link training values | ||
| 167 | */ | ||
| 168 | if (intel_dp->train_set_valid) { | ||
| 169 | DRM_DEBUG_KMS("clock recovery not ok, reset"); | ||
| 170 | /* clear the flag as we are not reusing train set */ | ||
| 171 | intel_dp->train_set_valid = false; | ||
| 172 | if (!intel_dp_reset_link_train(intel_dp, | ||
| 173 | DP_TRAINING_PATTERN_1 | | ||
| 174 | DP_LINK_SCRAMBLING_DISABLE)) { | ||
| 175 | DRM_ERROR("failed to enable link training\n"); | ||
| 176 | return; | ||
| 177 | } | ||
| 178 | continue; | ||
| 179 | } | ||
| 180 | |||
| 181 | /* Check to see if we've tried the max voltage */ | 163 | /* Check to see if we've tried the max voltage */ |
| 182 | for (i = 0; i < intel_dp->lane_count; i++) | 164 | for (i = 0; i < intel_dp->lane_count; i++) |
| 183 | if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) | 165 | if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) |
| @@ -284,7 +266,6 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp) | |||
| 284 | /* Make sure clock is still ok */ | 266 | /* Make sure clock is still ok */ |
| 285 | if (!drm_dp_clock_recovery_ok(link_status, | 267 | if (!drm_dp_clock_recovery_ok(link_status, |
| 286 | intel_dp->lane_count)) { | 268 | intel_dp->lane_count)) { |
| 287 | intel_dp->train_set_valid = false; | ||
| 288 | intel_dp_link_training_clock_recovery(intel_dp); | 269 | intel_dp_link_training_clock_recovery(intel_dp); |
| 289 | intel_dp_set_link_train(intel_dp, | 270 | intel_dp_set_link_train(intel_dp, |
| 290 | training_pattern | | 271 | training_pattern | |
| @@ -301,7 +282,6 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp) | |||
| 301 | 282 | ||
| 302 | /* Try 5 times, then try clock recovery if that fails */ | 283 | /* Try 5 times, then try clock recovery if that fails */ |
| 303 | if (tries > 5) { | 284 | if (tries > 5) { |
| 304 | intel_dp->train_set_valid = false; | ||
| 305 | intel_dp_link_training_clock_recovery(intel_dp); | 285 | intel_dp_link_training_clock_recovery(intel_dp); |
| 306 | intel_dp_set_link_train(intel_dp, | 286 | intel_dp_set_link_train(intel_dp, |
| 307 | training_pattern | | 287 | training_pattern | |
| @@ -322,10 +302,8 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp) | |||
| 322 | 302 | ||
| 323 | intel_dp_set_idle_link_train(intel_dp); | 303 | intel_dp_set_idle_link_train(intel_dp); |
| 324 | 304 | ||
| 325 | if (channel_eq) { | 305 | if (channel_eq) |
| 326 | intel_dp->train_set_valid = true; | ||
| 327 | DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n"); | 306 | DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n"); |
| 328 | } | ||
| 329 | } | 307 | } |
| 330 | 308 | ||
| 331 | void intel_dp_stop_link_train(struct intel_dp *intel_dp) | 309 | void intel_dp_stop_link_train(struct intel_dp *intel_dp) |
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c index 3ac705936b04..baf6f5584cbd 100644 --- a/drivers/gpu/drm/i915/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c | |||
| @@ -366,6 +366,9 @@ ibx_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, | |||
| 366 | DPLL_ID_PCH_PLL_B); | 366 | DPLL_ID_PCH_PLL_B); |
| 367 | } | 367 | } |
| 368 | 368 | ||
| 369 | if (!pll) | ||
| 370 | return NULL; | ||
| 371 | |||
| 369 | /* reference the pll */ | 372 | /* reference the pll */ |
| 370 | intel_reference_shared_dpll(pll, crtc_state); | 373 | intel_reference_shared_dpll(pll, crtc_state); |
| 371 | 374 | ||
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index a28b4aac1e02..f7f0f01814f6 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
| @@ -863,8 +863,6 @@ struct intel_dp { | |||
| 863 | /* This is called before a link training is starterd */ | 863 | /* This is called before a link training is starterd */ |
| 864 | void (*prepare_link_retrain)(struct intel_dp *intel_dp); | 864 | void (*prepare_link_retrain)(struct intel_dp *intel_dp); |
| 865 | 865 | ||
| 866 | bool train_set_valid; | ||
| 867 | |||
| 868 | /* Displayport compliance testing */ | 866 | /* Displayport compliance testing */ |
| 869 | unsigned long compliance_test_type; | 867 | unsigned long compliance_test_type; |
| 870 | unsigned long compliance_test_data; | 868 | unsigned long compliance_test_data; |
| @@ -1284,7 +1282,7 @@ void intel_csr_ucode_suspend(struct drm_i915_private *); | |||
| 1284 | void intel_csr_ucode_resume(struct drm_i915_private *); | 1282 | void intel_csr_ucode_resume(struct drm_i915_private *); |
| 1285 | 1283 | ||
| 1286 | /* intel_dp.c */ | 1284 | /* intel_dp.c */ |
| 1287 | void intel_dp_init(struct drm_device *dev, i915_reg_t output_reg, enum port port); | 1285 | bool intel_dp_init(struct drm_device *dev, i915_reg_t output_reg, enum port port); |
| 1288 | bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port, | 1286 | bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port, |
| 1289 | struct intel_connector *intel_connector); | 1287 | struct intel_connector *intel_connector); |
| 1290 | void intel_dp_set_link_params(struct intel_dp *intel_dp, | 1288 | void intel_dp_set_link_params(struct intel_dp *intel_dp, |
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c index 366ad6c67ce4..4756ef639648 100644 --- a/drivers/gpu/drm/i915/intel_dsi.c +++ b/drivers/gpu/drm/i915/intel_dsi.c | |||
| @@ -1545,6 +1545,9 @@ void intel_dsi_init(struct drm_device *dev) | |||
| 1545 | goto err; | 1545 | goto err; |
| 1546 | } | 1546 | } |
| 1547 | 1547 | ||
| 1548 | connector->display_info.width_mm = fixed_mode->width_mm; | ||
| 1549 | connector->display_info.height_mm = fixed_mode->height_mm; | ||
| 1550 | |||
| 1548 | intel_panel_init(&intel_connector->panel, fixed_mode, NULL); | 1551 | intel_panel_init(&intel_connector->panel, fixed_mode, NULL); |
| 1549 | 1552 | ||
| 1550 | intel_dsi_add_properties(intel_connector); | 1553 | intel_dsi_add_properties(intel_connector); |
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c index d5a7cfec589b..647127f3aaff 100644 --- a/drivers/gpu/drm/i915/intel_fbc.c +++ b/drivers/gpu/drm/i915/intel_fbc.c | |||
| @@ -824,8 +824,7 @@ static bool intel_fbc_can_choose(struct intel_crtc *crtc) | |||
| 824 | { | 824 | { |
| 825 | struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; | 825 | struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; |
| 826 | struct intel_fbc *fbc = &dev_priv->fbc; | 826 | struct intel_fbc *fbc = &dev_priv->fbc; |
| 827 | bool enable_by_default = IS_HASWELL(dev_priv) || | 827 | bool enable_by_default = IS_BROADWELL(dev_priv); |
| 828 | IS_BROADWELL(dev_priv); | ||
| 829 | 828 | ||
| 830 | if (intel_vgpu_active(dev_priv->dev)) { | 829 | if (intel_vgpu_active(dev_priv->dev)) { |
| 831 | fbc->no_fbc_reason = "VGPU is active"; | 830 | fbc->no_fbc_reason = "VGPU is active"; |
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 2c3bd9c2573e..a8844702d11b 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
| @@ -2142,6 +2142,9 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, | |||
| 2142 | enum port port = intel_dig_port->port; | 2142 | enum port port = intel_dig_port->port; |
| 2143 | uint8_t alternate_ddc_pin; | 2143 | uint8_t alternate_ddc_pin; |
| 2144 | 2144 | ||
| 2145 | DRM_DEBUG_KMS("Adding HDMI connector on port %c\n", | ||
| 2146 | port_name(port)); | ||
| 2147 | |||
| 2145 | if (WARN(intel_dig_port->max_lanes < 4, | 2148 | if (WARN(intel_dig_port->max_lanes < 4, |
| 2146 | "Not enough lanes (%d) for HDMI on port %c\n", | 2149 | "Not enough lanes (%d) for HDMI on port %c\n", |
| 2147 | intel_dig_port->max_lanes, port_name(port))) | 2150 | intel_dig_port->max_lanes, port_name(port))) |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index bc53c0dd34d0..96281e628d2a 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
| @@ -1082,6 +1082,8 @@ void intel_lvds_init(struct drm_device *dev) | |||
| 1082 | fixed_mode = drm_mode_duplicate(dev, dev_priv->vbt.lfp_lvds_vbt_mode); | 1082 | fixed_mode = drm_mode_duplicate(dev, dev_priv->vbt.lfp_lvds_vbt_mode); |
| 1083 | if (fixed_mode) { | 1083 | if (fixed_mode) { |
| 1084 | fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; | 1084 | fixed_mode->type |= DRM_MODE_TYPE_PREFERRED; |
| 1085 | connector->display_info.width_mm = fixed_mode->width_mm; | ||
| 1086 | connector->display_info.height_mm = fixed_mode->height_mm; | ||
| 1085 | goto out; | 1087 | goto out; |
| 1086 | } | 1088 | } |
| 1087 | } | 1089 | } |
diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h index c15051de8023..44fb0b35eed3 100644 --- a/drivers/gpu/drm/i915/intel_vbt_defs.h +++ b/drivers/gpu/drm/i915/intel_vbt_defs.h | |||
| @@ -403,9 +403,10 @@ struct lvds_dvo_timing { | |||
| 403 | u8 vsync_off:4; | 403 | u8 vsync_off:4; |
| 404 | u8 rsvd0:6; | 404 | u8 rsvd0:6; |
| 405 | u8 hsync_off_hi:2; | 405 | u8 hsync_off_hi:2; |
| 406 | u8 h_image; | 406 | u8 himage_lo; |
| 407 | u8 v_image; | 407 | u8 vimage_lo; |
| 408 | u8 max_hv; | 408 | u8 vimage_hi:4; |
| 409 | u8 himage_hi:4; | ||
| 409 | u8 h_border; | 410 | u8 h_border; |
| 410 | u8 v_border; | 411 | u8 v_border; |
| 411 | u8 rsvd1:3; | 412 | u8 rsvd1:3; |
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c index 1f14b602882b..82656654fb21 100644 --- a/drivers/gpu/drm/imx/imx-drm-core.c +++ b/drivers/gpu/drm/imx/imx-drm-core.c | |||
| @@ -97,8 +97,8 @@ static struct imx_drm_crtc *imx_drm_find_crtc(struct drm_crtc *crtc) | |||
| 97 | return NULL; | 97 | return NULL; |
| 98 | } | 98 | } |
| 99 | 99 | ||
| 100 | int imx_drm_set_bus_format_pins(struct drm_encoder *encoder, u32 bus_format, | 100 | int imx_drm_set_bus_config(struct drm_encoder *encoder, u32 bus_format, |
| 101 | int hsync_pin, int vsync_pin) | 101 | int hsync_pin, int vsync_pin, u32 bus_flags) |
| 102 | { | 102 | { |
| 103 | struct imx_drm_crtc_helper_funcs *helper; | 103 | struct imx_drm_crtc_helper_funcs *helper; |
| 104 | struct imx_drm_crtc *imx_crtc; | 104 | struct imx_drm_crtc *imx_crtc; |
| @@ -110,14 +110,17 @@ int imx_drm_set_bus_format_pins(struct drm_encoder *encoder, u32 bus_format, | |||
| 110 | helper = &imx_crtc->imx_drm_helper_funcs; | 110 | helper = &imx_crtc->imx_drm_helper_funcs; |
| 111 | if (helper->set_interface_pix_fmt) | 111 | if (helper->set_interface_pix_fmt) |
| 112 | return helper->set_interface_pix_fmt(encoder->crtc, | 112 | return helper->set_interface_pix_fmt(encoder->crtc, |
| 113 | bus_format, hsync_pin, vsync_pin); | 113 | bus_format, hsync_pin, vsync_pin, |
| 114 | bus_flags); | ||
| 114 | return 0; | 115 | return 0; |
| 115 | } | 116 | } |
| 116 | EXPORT_SYMBOL_GPL(imx_drm_set_bus_format_pins); | 117 | EXPORT_SYMBOL_GPL(imx_drm_set_bus_config); |
| 117 | 118 | ||
| 118 | int imx_drm_set_bus_format(struct drm_encoder *encoder, u32 bus_format) | 119 | int imx_drm_set_bus_format(struct drm_encoder *encoder, u32 bus_format) |
| 119 | { | 120 | { |
| 120 | return imx_drm_set_bus_format_pins(encoder, bus_format, 2, 3); | 121 | return imx_drm_set_bus_config(encoder, bus_format, 2, 3, |
| 122 | DRM_BUS_FLAG_DE_HIGH | | ||
| 123 | DRM_BUS_FLAG_PIXDATA_NEGEDGE); | ||
| 121 | } | 124 | } |
| 122 | EXPORT_SYMBOL_GPL(imx_drm_set_bus_format); | 125 | EXPORT_SYMBOL_GPL(imx_drm_set_bus_format); |
| 123 | 126 | ||
diff --git a/drivers/gpu/drm/imx/imx-drm.h b/drivers/gpu/drm/imx/imx-drm.h index b0241b9d1334..74320a1723b7 100644 --- a/drivers/gpu/drm/imx/imx-drm.h +++ b/drivers/gpu/drm/imx/imx-drm.h | |||
| @@ -19,7 +19,8 @@ struct imx_drm_crtc_helper_funcs { | |||
| 19 | int (*enable_vblank)(struct drm_crtc *crtc); | 19 | int (*enable_vblank)(struct drm_crtc *crtc); |
| 20 | void (*disable_vblank)(struct drm_crtc *crtc); | 20 | void (*disable_vblank)(struct drm_crtc *crtc); |
| 21 | int (*set_interface_pix_fmt)(struct drm_crtc *crtc, | 21 | int (*set_interface_pix_fmt)(struct drm_crtc *crtc, |
| 22 | u32 bus_format, int hsync_pin, int vsync_pin); | 22 | u32 bus_format, int hsync_pin, int vsync_pin, |
| 23 | u32 bus_flags); | ||
| 23 | const struct drm_crtc_helper_funcs *crtc_helper_funcs; | 24 | const struct drm_crtc_helper_funcs *crtc_helper_funcs; |
| 24 | const struct drm_crtc_funcs *crtc_funcs; | 25 | const struct drm_crtc_funcs *crtc_funcs; |
| 25 | }; | 26 | }; |
| @@ -41,8 +42,8 @@ void imx_drm_mode_config_init(struct drm_device *drm); | |||
| 41 | 42 | ||
| 42 | struct drm_gem_cma_object *imx_drm_fb_get_obj(struct drm_framebuffer *fb); | 43 | struct drm_gem_cma_object *imx_drm_fb_get_obj(struct drm_framebuffer *fb); |
| 43 | 44 | ||
| 44 | int imx_drm_set_bus_format_pins(struct drm_encoder *encoder, | 45 | int imx_drm_set_bus_config(struct drm_encoder *encoder, u32 bus_format, |
| 45 | u32 bus_format, int hsync_pin, int vsync_pin); | 46 | int hsync_pin, int vsync_pin, u32 bus_flags); |
| 46 | int imx_drm_set_bus_format(struct drm_encoder *encoder, | 47 | int imx_drm_set_bus_format(struct drm_encoder *encoder, |
| 47 | u32 bus_format); | 48 | u32 bus_format); |
| 48 | 49 | ||
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c index a58eee59550a..beff793bb717 100644 --- a/drivers/gpu/drm/imx/imx-ldb.c +++ b/drivers/gpu/drm/imx/imx-ldb.c | |||
| @@ -25,6 +25,7 @@ | |||
| 25 | #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> | 25 | #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> |
| 26 | #include <linux/of_device.h> | 26 | #include <linux/of_device.h> |
| 27 | #include <linux/of_graph.h> | 27 | #include <linux/of_graph.h> |
| 28 | #include <video/of_display_timing.h> | ||
| 28 | #include <video/of_videomode.h> | 29 | #include <video/of_videomode.h> |
| 29 | #include <linux/regmap.h> | 30 | #include <linux/regmap.h> |
| 30 | #include <linux/videodev2.h> | 31 | #include <linux/videodev2.h> |
| @@ -59,6 +60,7 @@ struct imx_ldb_channel { | |||
| 59 | struct drm_encoder encoder; | 60 | struct drm_encoder encoder; |
| 60 | struct drm_panel *panel; | 61 | struct drm_panel *panel; |
| 61 | struct device_node *child; | 62 | struct device_node *child; |
| 63 | struct i2c_adapter *ddc; | ||
| 62 | int chno; | 64 | int chno; |
| 63 | void *edid; | 65 | void *edid; |
| 64 | int edid_len; | 66 | int edid_len; |
| @@ -107,6 +109,9 @@ static int imx_ldb_connector_get_modes(struct drm_connector *connector) | |||
| 107 | return num_modes; | 109 | return num_modes; |
| 108 | } | 110 | } |
| 109 | 111 | ||
| 112 | if (!imx_ldb_ch->edid && imx_ldb_ch->ddc) | ||
| 113 | imx_ldb_ch->edid = drm_get_edid(connector, imx_ldb_ch->ddc); | ||
| 114 | |||
| 110 | if (imx_ldb_ch->edid) { | 115 | if (imx_ldb_ch->edid) { |
| 111 | drm_mode_connector_update_edid_property(connector, | 116 | drm_mode_connector_update_edid_property(connector, |
| 112 | imx_ldb_ch->edid); | 117 | imx_ldb_ch->edid); |
| @@ -553,7 +558,8 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data) | |||
| 553 | 558 | ||
| 554 | for_each_child_of_node(np, child) { | 559 | for_each_child_of_node(np, child) { |
| 555 | struct imx_ldb_channel *channel; | 560 | struct imx_ldb_channel *channel; |
| 556 | struct device_node *port; | 561 | struct device_node *ddc_node; |
| 562 | struct device_node *ep; | ||
| 557 | 563 | ||
| 558 | ret = of_property_read_u32(child, "reg", &i); | 564 | ret = of_property_read_u32(child, "reg", &i); |
| 559 | if (ret || i < 0 || i > 1) | 565 | if (ret || i < 0 || i > 1) |
| @@ -576,33 +582,54 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data) | |||
| 576 | * The output port is port@4 with an external 4-port mux or | 582 | * The output port is port@4 with an external 4-port mux or |
| 577 | * port@2 with the internal 2-port mux. | 583 | * port@2 with the internal 2-port mux. |
| 578 | */ | 584 | */ |
| 579 | port = of_graph_get_port_by_id(child, imx_ldb->lvds_mux ? 4 : 2); | 585 | ep = of_graph_get_endpoint_by_regs(child, |
| 580 | if (port) { | 586 | imx_ldb->lvds_mux ? 4 : 2, |
| 581 | struct device_node *endpoint, *remote; | 587 | -1); |
| 582 | 588 | if (ep) { | |
| 583 | endpoint = of_get_child_by_name(port, "endpoint"); | 589 | struct device_node *remote; |
| 584 | if (endpoint) { | 590 | |
| 585 | remote = of_graph_get_remote_port_parent(endpoint); | 591 | remote = of_graph_get_remote_port_parent(ep); |
| 586 | if (remote) | 592 | of_node_put(ep); |
| 587 | channel->panel = of_drm_find_panel(remote); | 593 | if (remote) |
| 588 | else | 594 | channel->panel = of_drm_find_panel(remote); |
| 589 | return -EPROBE_DEFER; | 595 | else |
| 590 | if (!channel->panel) { | 596 | return -EPROBE_DEFER; |
| 591 | dev_err(dev, "panel not found: %s\n", | 597 | of_node_put(remote); |
| 592 | remote->full_name); | 598 | if (!channel->panel) { |
| 593 | return -EPROBE_DEFER; | 599 | dev_err(dev, "panel not found: %s\n", |
| 594 | } | 600 | remote->full_name); |
| 601 | return -EPROBE_DEFER; | ||
| 595 | } | 602 | } |
| 596 | } | 603 | } |
| 597 | 604 | ||
| 598 | edidp = of_get_property(child, "edid", &channel->edid_len); | 605 | ddc_node = of_parse_phandle(child, "ddc-i2c-bus", 0); |
| 599 | if (edidp) { | 606 | if (ddc_node) { |
| 600 | channel->edid = kmemdup(edidp, channel->edid_len, | 607 | channel->ddc = of_find_i2c_adapter_by_node(ddc_node); |
| 601 | GFP_KERNEL); | 608 | of_node_put(ddc_node); |
| 602 | } else if (!channel->panel) { | 609 | if (!channel->ddc) { |
| 603 | ret = of_get_drm_display_mode(child, &channel->mode, 0); | 610 | dev_warn(dev, "failed to get ddc i2c adapter\n"); |
| 604 | if (!ret) | 611 | return -EPROBE_DEFER; |
| 605 | channel->mode_valid = 1; | 612 | } |
| 613 | } | ||
| 614 | |||
| 615 | if (!channel->ddc) { | ||
| 616 | /* if no DDC available, fallback to hardcoded EDID */ | ||
| 617 | dev_dbg(dev, "no ddc available\n"); | ||
| 618 | |||
| 619 | edidp = of_get_property(child, "edid", | ||
| 620 | &channel->edid_len); | ||
| 621 | if (edidp) { | ||
| 622 | channel->edid = kmemdup(edidp, | ||
| 623 | channel->edid_len, | ||
| 624 | GFP_KERNEL); | ||
| 625 | } else if (!channel->panel) { | ||
| 626 | /* fallback to display-timings node */ | ||
| 627 | ret = of_get_drm_display_mode(child, | ||
| 628 | &channel->mode, | ||
| 629 | OF_USE_NATIVE_MODE); | ||
| 630 | if (!ret) | ||
| 631 | channel->mode_valid = 1; | ||
| 632 | } | ||
| 606 | } | 633 | } |
| 607 | 634 | ||
| 608 | channel->bus_format = of_get_bus_format(dev, child); | 635 | channel->bus_format = of_get_bus_format(dev, child); |
| @@ -647,6 +674,7 @@ static void imx_ldb_unbind(struct device *dev, struct device *master, | |||
| 647 | channel->encoder.funcs->destroy(&channel->encoder); | 674 | channel->encoder.funcs->destroy(&channel->encoder); |
| 648 | 675 | ||
| 649 | kfree(channel->edid); | 676 | kfree(channel->edid); |
| 677 | i2c_put_adapter(channel->ddc); | ||
| 650 | } | 678 | } |
| 651 | } | 679 | } |
| 652 | 680 | ||
diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c index ae7a9fb3b8a2..baf788121287 100644 --- a/drivers/gpu/drm/imx/imx-tve.c +++ b/drivers/gpu/drm/imx/imx-tve.c | |||
| @@ -294,8 +294,10 @@ static void imx_tve_encoder_prepare(struct drm_encoder *encoder) | |||
| 294 | 294 | ||
| 295 | switch (tve->mode) { | 295 | switch (tve->mode) { |
| 296 | case TVE_MODE_VGA: | 296 | case TVE_MODE_VGA: |
| 297 | imx_drm_set_bus_format_pins(encoder, MEDIA_BUS_FMT_GBR888_1X24, | 297 | imx_drm_set_bus_config(encoder, MEDIA_BUS_FMT_GBR888_1X24, |
| 298 | tve->hsync_pin, tve->vsync_pin); | 298 | tve->hsync_pin, tve->vsync_pin, |
| 299 | DRM_BUS_FLAG_DE_HIGH | | ||
| 300 | DRM_BUS_FLAG_PIXDATA_NEGEDGE); | ||
| 299 | break; | 301 | break; |
| 300 | case TVE_MODE_TVOUT: | 302 | case TVE_MODE_TVOUT: |
| 301 | imx_drm_set_bus_format(encoder, MEDIA_BUS_FMT_YUV8_1X24); | 303 | imx_drm_set_bus_format(encoder, MEDIA_BUS_FMT_YUV8_1X24); |
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c index b2c30b8d9816..fc040417e1e8 100644 --- a/drivers/gpu/drm/imx/ipuv3-crtc.c +++ b/drivers/gpu/drm/imx/ipuv3-crtc.c | |||
| @@ -66,6 +66,7 @@ struct ipu_crtc { | |||
| 66 | struct ipu_flip_work *flip_work; | 66 | struct ipu_flip_work *flip_work; |
| 67 | int irq; | 67 | int irq; |
| 68 | u32 bus_format; | 68 | u32 bus_format; |
| 69 | u32 bus_flags; | ||
| 69 | int di_hsync_pin; | 70 | int di_hsync_pin; |
| 70 | int di_vsync_pin; | 71 | int di_vsync_pin; |
| 71 | }; | 72 | }; |
| @@ -271,8 +272,10 @@ static int ipu_crtc_mode_set(struct drm_crtc *crtc, | |||
| 271 | else | 272 | else |
| 272 | sig_cfg.clkflags = 0; | 273 | sig_cfg.clkflags = 0; |
| 273 | 274 | ||
| 274 | sig_cfg.enable_pol = 1; | 275 | sig_cfg.enable_pol = !(ipu_crtc->bus_flags & DRM_BUS_FLAG_DE_LOW); |
| 275 | sig_cfg.clk_pol = 0; | 276 | /* Default to driving pixel data on negative clock edges */ |
| 277 | sig_cfg.clk_pol = !!(ipu_crtc->bus_flags & | ||
| 278 | DRM_BUS_FLAG_PIXDATA_POSEDGE); | ||
| 276 | sig_cfg.bus_format = ipu_crtc->bus_format; | 279 | sig_cfg.bus_format = ipu_crtc->bus_format; |
| 277 | sig_cfg.v_to_h_sync = 0; | 280 | sig_cfg.v_to_h_sync = 0; |
| 278 | sig_cfg.hsync_pin = ipu_crtc->di_hsync_pin; | 281 | sig_cfg.hsync_pin = ipu_crtc->di_hsync_pin; |
| @@ -396,11 +399,12 @@ static void ipu_disable_vblank(struct drm_crtc *crtc) | |||
| 396 | } | 399 | } |
| 397 | 400 | ||
| 398 | static int ipu_set_interface_pix_fmt(struct drm_crtc *crtc, | 401 | static int ipu_set_interface_pix_fmt(struct drm_crtc *crtc, |
| 399 | u32 bus_format, int hsync_pin, int vsync_pin) | 402 | u32 bus_format, int hsync_pin, int vsync_pin, u32 bus_flags) |
| 400 | { | 403 | { |
| 401 | struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc); | 404 | struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc); |
| 402 | 405 | ||
| 403 | ipu_crtc->bus_format = bus_format; | 406 | ipu_crtc->bus_format = bus_format; |
| 407 | ipu_crtc->bus_flags = bus_flags; | ||
| 404 | ipu_crtc->di_hsync_pin = hsync_pin; | 408 | ipu_crtc->di_hsync_pin = hsync_pin; |
| 405 | ipu_crtc->di_vsync_pin = vsync_pin; | 409 | ipu_crtc->di_vsync_pin = vsync_pin; |
| 406 | 410 | ||
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c index 681ec6eb77d9..a4bb44118d33 100644 --- a/drivers/gpu/drm/imx/ipuv3-plane.c +++ b/drivers/gpu/drm/imx/ipuv3-plane.c | |||
| @@ -38,6 +38,8 @@ static const uint32_t ipu_plane_formats[] = { | |||
| 38 | DRM_FORMAT_RGBX8888, | 38 | DRM_FORMAT_RGBX8888, |
| 39 | DRM_FORMAT_BGRA8888, | 39 | DRM_FORMAT_BGRA8888, |
| 40 | DRM_FORMAT_BGRA8888, | 40 | DRM_FORMAT_BGRA8888, |
| 41 | DRM_FORMAT_UYVY, | ||
| 42 | DRM_FORMAT_VYUY, | ||
| 41 | DRM_FORMAT_YUYV, | 43 | DRM_FORMAT_YUYV, |
| 42 | DRM_FORMAT_YVYU, | 44 | DRM_FORMAT_YVYU, |
| 43 | DRM_FORMAT_YUV420, | 45 | DRM_FORMAT_YUV420, |
| @@ -428,7 +430,6 @@ static int ipu_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, | |||
| 428 | if (crtc != plane->crtc) | 430 | if (crtc != plane->crtc) |
| 429 | dev_dbg(plane->dev->dev, "crtc change: %p -> %p\n", | 431 | dev_dbg(plane->dev->dev, "crtc change: %p -> %p\n", |
| 430 | plane->crtc, crtc); | 432 | plane->crtc, crtc); |
| 431 | plane->crtc = crtc; | ||
| 432 | 433 | ||
| 433 | if (!ipu_plane->enabled) | 434 | if (!ipu_plane->enabled) |
| 434 | ipu_plane_enable(ipu_plane); | 435 | ipu_plane_enable(ipu_plane); |
| @@ -461,7 +462,7 @@ static void ipu_plane_destroy(struct drm_plane *plane) | |||
| 461 | kfree(ipu_plane); | 462 | kfree(ipu_plane); |
| 462 | } | 463 | } |
| 463 | 464 | ||
| 464 | static struct drm_plane_funcs ipu_plane_funcs = { | 465 | static const struct drm_plane_funcs ipu_plane_funcs = { |
| 465 | .update_plane = ipu_update_plane, | 466 | .update_plane = ipu_update_plane, |
| 466 | .disable_plane = ipu_disable_plane, | 467 | .disable_plane = ipu_disable_plane, |
| 467 | .destroy = ipu_plane_destroy, | 468 | .destroy = ipu_plane_destroy, |
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c index 363e2c7741e2..2d1fd02cd3d6 100644 --- a/drivers/gpu/drm/imx/parallel-display.c +++ b/drivers/gpu/drm/imx/parallel-display.c | |||
| @@ -35,7 +35,6 @@ struct imx_parallel_display { | |||
| 35 | void *edid; | 35 | void *edid; |
| 36 | int edid_len; | 36 | int edid_len; |
| 37 | u32 bus_format; | 37 | u32 bus_format; |
| 38 | int mode_valid; | ||
| 39 | struct drm_display_mode mode; | 38 | struct drm_display_mode mode; |
| 40 | struct drm_panel *panel; | 39 | struct drm_panel *panel; |
| 41 | }; | 40 | }; |
| @@ -68,17 +67,6 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector) | |||
| 68 | num_modes = drm_add_edid_modes(connector, imxpd->edid); | 67 | num_modes = drm_add_edid_modes(connector, imxpd->edid); |
| 69 | } | 68 | } |
| 70 | 69 | ||
| 71 | if (imxpd->mode_valid) { | ||
| 72 | struct drm_display_mode *mode = drm_mode_create(connector->dev); | ||
| 73 | |||
| 74 | if (!mode) | ||
| 75 | return -EINVAL; | ||
| 76 | drm_mode_copy(mode, &imxpd->mode); | ||
| 77 | mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, | ||
| 78 | drm_mode_probed_add(connector, mode); | ||
| 79 | num_modes++; | ||
| 80 | } | ||
| 81 | |||
| 82 | if (np) { | 70 | if (np) { |
| 83 | struct drm_display_mode *mode = drm_mode_create(connector->dev); | 71 | struct drm_display_mode *mode = drm_mode_create(connector->dev); |
| 84 | 72 | ||
| @@ -115,8 +103,8 @@ static void imx_pd_encoder_dpms(struct drm_encoder *encoder, int mode) | |||
| 115 | static void imx_pd_encoder_prepare(struct drm_encoder *encoder) | 103 | static void imx_pd_encoder_prepare(struct drm_encoder *encoder) |
| 116 | { | 104 | { |
| 117 | struct imx_parallel_display *imxpd = enc_to_imxpd(encoder); | 105 | struct imx_parallel_display *imxpd = enc_to_imxpd(encoder); |
| 118 | 106 | imx_drm_set_bus_config(encoder, imxpd->bus_format, 2, 3, | |
| 119 | imx_drm_set_bus_format(encoder, imxpd->bus_format); | 107 | imxpd->connector.display_info.bus_flags); |
| 120 | } | 108 | } |
| 121 | 109 | ||
| 122 | static void imx_pd_encoder_commit(struct drm_encoder *encoder) | 110 | static void imx_pd_encoder_commit(struct drm_encoder *encoder) |
| @@ -203,7 +191,7 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data) | |||
| 203 | { | 191 | { |
| 204 | struct drm_device *drm = data; | 192 | struct drm_device *drm = data; |
| 205 | struct device_node *np = dev->of_node; | 193 | struct device_node *np = dev->of_node; |
| 206 | struct device_node *port; | 194 | struct device_node *ep; |
| 207 | const u8 *edidp; | 195 | const u8 *edidp; |
| 208 | struct imx_parallel_display *imxpd; | 196 | struct imx_parallel_display *imxpd; |
| 209 | int ret; | 197 | int ret; |
| @@ -230,18 +218,18 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data) | |||
| 230 | } | 218 | } |
| 231 | 219 | ||
| 232 | /* port@1 is the output port */ | 220 | /* port@1 is the output port */ |
| 233 | port = of_graph_get_port_by_id(np, 1); | 221 | ep = of_graph_get_endpoint_by_regs(np, 1, -1); |
| 234 | if (port) { | 222 | if (ep) { |
| 235 | struct device_node *endpoint, *remote; | 223 | struct device_node *remote; |
| 236 | 224 | ||
| 237 | endpoint = of_get_child_by_name(port, "endpoint"); | 225 | remote = of_graph_get_remote_port_parent(ep); |
| 238 | if (endpoint) { | 226 | of_node_put(ep); |
| 239 | remote = of_graph_get_remote_port_parent(endpoint); | 227 | if (remote) { |
| 240 | if (remote) | 228 | imxpd->panel = of_drm_find_panel(remote); |
| 241 | imxpd->panel = of_drm_find_panel(remote); | 229 | of_node_put(remote); |
| 242 | if (!imxpd->panel) | ||
| 243 | return -EPROBE_DEFER; | ||
| 244 | } | 230 | } |
| 231 | if (!imxpd->panel) | ||
| 232 | return -EPROBE_DEFER; | ||
| 245 | } | 233 | } |
| 246 | 234 | ||
| 247 | imxpd->dev = dev; | 235 | imxpd->dev = dev; |
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c index d05ca7901315..0186e500d2a5 100644 --- a/drivers/gpu/drm/mediatek/mtk_dpi.c +++ b/drivers/gpu/drm/mediatek/mtk_dpi.c | |||
| @@ -432,11 +432,6 @@ static int mtk_dpi_set_display_mode(struct mtk_dpi *dpi, | |||
| 432 | unsigned long pll_rate; | 432 | unsigned long pll_rate; |
| 433 | unsigned int factor; | 433 | unsigned int factor; |
| 434 | 434 | ||
| 435 | if (!dpi) { | ||
| 436 | dev_err(dpi->dev, "invalid argument\n"); | ||
| 437 | return -EINVAL; | ||
| 438 | } | ||
| 439 | |||
| 440 | pix_rate = 1000UL * mode->clock; | 435 | pix_rate = 1000UL * mode->clock; |
| 441 | if (mode->clock <= 74000) | 436 | if (mode->clock <= 74000) |
| 442 | factor = 8 * 3; | 437 | factor = 8 * 3; |
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c index 2d808e59fefd..769559124562 100644 --- a/drivers/gpu/drm/mediatek/mtk_dsi.c +++ b/drivers/gpu/drm/mediatek/mtk_dsi.c | |||
| @@ -695,10 +695,8 @@ static void mtk_dsi_destroy_conn_enc(struct mtk_dsi *dsi) | |||
| 695 | { | 695 | { |
| 696 | drm_encoder_cleanup(&dsi->encoder); | 696 | drm_encoder_cleanup(&dsi->encoder); |
| 697 | /* Skip connector cleanup if creation was delegated to the bridge */ | 697 | /* Skip connector cleanup if creation was delegated to the bridge */ |
| 698 | if (dsi->conn.dev) { | 698 | if (dsi->conn.dev) |
| 699 | drm_connector_unregister(&dsi->conn); | ||
| 700 | drm_connector_cleanup(&dsi->conn); | 699 | drm_connector_cleanup(&dsi->conn); |
| 701 | } | ||
| 702 | } | 700 | } |
| 703 | 701 | ||
| 704 | static void mtk_dsi_ddp_start(struct mtk_ddp_comp *comp) | 702 | static void mtk_dsi_ddp_start(struct mtk_ddp_comp *comp) |
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index 14e64e08909e..d347dca17267 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c | |||
| @@ -182,7 +182,7 @@ static int mga_g200se_set_plls(struct mga_device *mdev, long clock) | |||
| 182 | } | 182 | } |
| 183 | } | 183 | } |
| 184 | 184 | ||
| 185 | fvv = pllreffreq * testn / testm; | 185 | fvv = pllreffreq * (n + 1) / (m + 1); |
| 186 | fvv = (fvv - 800000) / 50000; | 186 | fvv = (fvv - 800000) / 50000; |
| 187 | 187 | ||
| 188 | if (fvv > 15) | 188 | if (fvv > 15) |
| @@ -202,6 +202,14 @@ static int mga_g200se_set_plls(struct mga_device *mdev, long clock) | |||
| 202 | WREG_DAC(MGA1064_PIX_PLLC_M, m); | 202 | WREG_DAC(MGA1064_PIX_PLLC_M, m); |
| 203 | WREG_DAC(MGA1064_PIX_PLLC_N, n); | 203 | WREG_DAC(MGA1064_PIX_PLLC_N, n); |
| 204 | WREG_DAC(MGA1064_PIX_PLLC_P, p); | 204 | WREG_DAC(MGA1064_PIX_PLLC_P, p); |
| 205 | |||
| 206 | if (mdev->unique_rev_id >= 0x04) { | ||
| 207 | WREG_DAC(0x1a, 0x09); | ||
| 208 | msleep(20); | ||
| 209 | WREG_DAC(0x1a, 0x01); | ||
| 210 | |||
| 211 | } | ||
| 212 | |||
| 205 | return 0; | 213 | return 0; |
| 206 | } | 214 | } |
| 207 | 215 | ||
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index fbe304ee6c80..2aec27dbb5bb 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c | |||
| @@ -408,7 +408,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, | |||
| 408 | } | 408 | } |
| 409 | 409 | ||
| 410 | adreno_gpu->memptrs = msm_gem_vaddr(adreno_gpu->memptrs_bo); | 410 | adreno_gpu->memptrs = msm_gem_vaddr(adreno_gpu->memptrs_bo); |
| 411 | if (!adreno_gpu->memptrs) { | 411 | if (IS_ERR(adreno_gpu->memptrs)) { |
| 412 | dev_err(drm->dev, "could not vmap memptrs\n"); | 412 | dev_err(drm->dev, "could not vmap memptrs\n"); |
| 413 | return -ENOMEM; | 413 | return -ENOMEM; |
| 414 | } | 414 | } |
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c index d9759bf3482e..c6cf837c5193 100644 --- a/drivers/gpu/drm/msm/msm_fbdev.c +++ b/drivers/gpu/drm/msm/msm_fbdev.c | |||
| @@ -159,6 +159,10 @@ static int msm_fbdev_create(struct drm_fb_helper *helper, | |||
| 159 | dev->mode_config.fb_base = paddr; | 159 | dev->mode_config.fb_base = paddr; |
| 160 | 160 | ||
| 161 | fbi->screen_base = msm_gem_vaddr_locked(fbdev->bo); | 161 | fbi->screen_base = msm_gem_vaddr_locked(fbdev->bo); |
| 162 | if (IS_ERR(fbi->screen_base)) { | ||
| 163 | ret = PTR_ERR(fbi->screen_base); | ||
| 164 | goto fail_unlock; | ||
| 165 | } | ||
| 162 | fbi->screen_size = fbdev->bo->size; | 166 | fbi->screen_size = fbdev->bo->size; |
| 163 | fbi->fix.smem_start = paddr; | 167 | fbi->fix.smem_start = paddr; |
| 164 | fbi->fix.smem_len = fbdev->bo->size; | 168 | fbi->fix.smem_len = fbdev->bo->size; |
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 7daf4054dd2b..69836f5685b1 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c | |||
| @@ -398,6 +398,8 @@ void *msm_gem_vaddr_locked(struct drm_gem_object *obj) | |||
| 398 | return ERR_CAST(pages); | 398 | return ERR_CAST(pages); |
| 399 | msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, | 399 | msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, |
| 400 | VM_MAP, pgprot_writecombine(PAGE_KERNEL)); | 400 | VM_MAP, pgprot_writecombine(PAGE_KERNEL)); |
| 401 | if (msm_obj->vaddr == NULL) | ||
| 402 | return ERR_PTR(-ENOMEM); | ||
| 401 | } | 403 | } |
| 402 | return msm_obj->vaddr; | 404 | return msm_obj->vaddr; |
| 403 | } | 405 | } |
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index b89ca5174863..eb4bb8b2f3a5 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c | |||
| @@ -40,12 +40,14 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev, | |||
| 40 | 40 | ||
| 41 | submit->dev = dev; | 41 | submit->dev = dev; |
| 42 | submit->gpu = gpu; | 42 | submit->gpu = gpu; |
| 43 | submit->fence = NULL; | ||
| 43 | submit->pid = get_pid(task_pid(current)); | 44 | submit->pid = get_pid(task_pid(current)); |
| 44 | 45 | ||
| 45 | /* initially, until copy_from_user() and bo lookup succeeds: */ | 46 | /* initially, until copy_from_user() and bo lookup succeeds: */ |
| 46 | submit->nr_bos = 0; | 47 | submit->nr_bos = 0; |
| 47 | submit->nr_cmds = 0; | 48 | submit->nr_cmds = 0; |
| 48 | 49 | ||
| 50 | INIT_LIST_HEAD(&submit->node); | ||
| 49 | INIT_LIST_HEAD(&submit->bo_list); | 51 | INIT_LIST_HEAD(&submit->bo_list); |
| 50 | ww_acquire_init(&submit->ticket, &reservation_ww_class); | 52 | ww_acquire_init(&submit->ticket, &reservation_ww_class); |
| 51 | 53 | ||
| @@ -75,6 +77,11 @@ static int submit_lookup_objects(struct msm_gem_submit *submit, | |||
| 75 | void __user *userptr = | 77 | void __user *userptr = |
| 76 | u64_to_user_ptr(args->bos + (i * sizeof(submit_bo))); | 78 | u64_to_user_ptr(args->bos + (i * sizeof(submit_bo))); |
| 77 | 79 | ||
| 80 | /* make sure we don't have garbage flags, in case we hit | ||
| 81 | * error path before flags is initialized: | ||
| 82 | */ | ||
| 83 | submit->bos[i].flags = 0; | ||
| 84 | |||
| 78 | ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo)); | 85 | ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo)); |
| 79 | if (ret) { | 86 | if (ret) { |
| 80 | ret = -EFAULT; | 87 | ret = -EFAULT; |
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c index b48f73ac6389..0857710c2ff2 100644 --- a/drivers/gpu/drm/msm/msm_rd.c +++ b/drivers/gpu/drm/msm/msm_rd.c | |||
| @@ -312,6 +312,9 @@ void msm_rd_dump_submit(struct msm_gem_submit *submit) | |||
| 312 | struct msm_gem_object *obj = submit->bos[idx].obj; | 312 | struct msm_gem_object *obj = submit->bos[idx].obj; |
| 313 | const char *buf = msm_gem_vaddr_locked(&obj->base); | 313 | const char *buf = msm_gem_vaddr_locked(&obj->base); |
| 314 | 314 | ||
| 315 | if (IS_ERR(buf)) | ||
| 316 | continue; | ||
| 317 | |||
| 315 | buf += iova - submit->bos[idx].iova; | 318 | buf += iova - submit->bos[idx].iova; |
| 316 | 319 | ||
| 317 | rd_write_section(rd, RD_GPUADDR, | 320 | rd_write_section(rd, RD_GPUADDR, |
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c index 1f14b908b221..42f5359cf988 100644 --- a/drivers/gpu/drm/msm/msm_ringbuffer.c +++ b/drivers/gpu/drm/msm/msm_ringbuffer.c | |||
| @@ -40,6 +40,10 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size) | |||
| 40 | } | 40 | } |
| 41 | 41 | ||
| 42 | ring->start = msm_gem_vaddr_locked(ring->bo); | 42 | ring->start = msm_gem_vaddr_locked(ring->bo); |
| 43 | if (IS_ERR(ring->start)) { | ||
| 44 | ret = PTR_ERR(ring->start); | ||
| 45 | goto fail; | ||
| 46 | } | ||
| 43 | ring->end = ring->start + (size / 4); | 47 | ring->end = ring->start + (size / 4); |
| 44 | ring->cur = ring->start; | 48 | ring->cur = ring->start; |
| 45 | 49 | ||
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h index c612dc1f1eb4..126a85cc81bc 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h | |||
| @@ -16,9 +16,9 @@ enum nvkm_devidx { | |||
| 16 | NVKM_SUBDEV_MC, | 16 | NVKM_SUBDEV_MC, |
| 17 | NVKM_SUBDEV_BUS, | 17 | NVKM_SUBDEV_BUS, |
| 18 | NVKM_SUBDEV_TIMER, | 18 | NVKM_SUBDEV_TIMER, |
| 19 | NVKM_SUBDEV_INSTMEM, | ||
| 19 | NVKM_SUBDEV_FB, | 20 | NVKM_SUBDEV_FB, |
| 20 | NVKM_SUBDEV_LTC, | 21 | NVKM_SUBDEV_LTC, |
| 21 | NVKM_SUBDEV_INSTMEM, | ||
| 22 | NVKM_SUBDEV_MMU, | 22 | NVKM_SUBDEV_MMU, |
| 23 | NVKM_SUBDEV_BAR, | 23 | NVKM_SUBDEV_BAR, |
| 24 | NVKM_SUBDEV_PMU, | 24 | NVKM_SUBDEV_PMU, |
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h index db10c11f0595..c5a6ebd5a478 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h | |||
| @@ -25,7 +25,8 @@ u16 nvbios_outp_match(struct nvkm_bios *, u16 type, u16 mask, | |||
| 25 | u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_outp *); | 25 | u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_outp *); |
| 26 | 26 | ||
| 27 | struct nvbios_ocfg { | 27 | struct nvbios_ocfg { |
| 28 | u16 match; | 28 | u8 proto; |
| 29 | u8 flags; | ||
| 29 | u16 clkcmp[2]; | 30 | u16 clkcmp[2]; |
| 30 | }; | 31 | }; |
| 31 | 32 | ||
| @@ -33,7 +34,7 @@ u16 nvbios_ocfg_entry(struct nvkm_bios *, u16 outp, u8 idx, | |||
| 33 | u8 *ver, u8 *hdr, u8 *cnt, u8 *len); | 34 | u8 *ver, u8 *hdr, u8 *cnt, u8 *len); |
| 34 | u16 nvbios_ocfg_parse(struct nvkm_bios *, u16 outp, u8 idx, | 35 | u16 nvbios_ocfg_parse(struct nvkm_bios *, u16 outp, u8 idx, |
| 35 | u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *); | 36 | u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *); |
| 36 | u16 nvbios_ocfg_match(struct nvkm_bios *, u16 outp, u16 type, | 37 | u16 nvbios_ocfg_match(struct nvkm_bios *, u16 outp, u8 proto, u8 flags, |
| 37 | u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *); | 38 | u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *); |
| 38 | u16 nvbios_oclk_match(struct nvkm_bios *, u16 cmp, u32 khz); | 39 | u16 nvbios_oclk_match(struct nvkm_bios *, u16 cmp, u32 khz); |
| 39 | #endif | 40 | #endif |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 57aaf98a26f9..d1f248fd3506 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c | |||
| @@ -552,6 +552,8 @@ nouveau_fbcon_init(struct drm_device *dev) | |||
| 552 | if (ret) | 552 | if (ret) |
| 553 | goto fini; | 553 | goto fini; |
| 554 | 554 | ||
| 555 | if (fbcon->helper.fbdev) | ||
| 556 | fbcon->helper.fbdev->pixmap.buf_align = 4; | ||
| 555 | return 0; | 557 | return 0; |
| 556 | 558 | ||
| 557 | fini: | 559 | fini: |
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c index 0f3e4bb411cc..7d9248b8c664 100644 --- a/drivers/gpu/drm/nouveau/nv04_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c | |||
| @@ -82,7 +82,6 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) | |||
| 82 | uint32_t fg; | 82 | uint32_t fg; |
| 83 | uint32_t bg; | 83 | uint32_t bg; |
| 84 | uint32_t dsize; | 84 | uint32_t dsize; |
| 85 | uint32_t width; | ||
| 86 | uint32_t *data = (uint32_t *)image->data; | 85 | uint32_t *data = (uint32_t *)image->data; |
| 87 | int ret; | 86 | int ret; |
| 88 | 87 | ||
| @@ -93,9 +92,6 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) | |||
| 93 | if (ret) | 92 | if (ret) |
| 94 | return ret; | 93 | return ret; |
| 95 | 94 | ||
| 96 | width = ALIGN(image->width, 8); | ||
| 97 | dsize = ALIGN(width * image->height, 32) >> 5; | ||
| 98 | |||
| 99 | if (info->fix.visual == FB_VISUAL_TRUECOLOR || | 95 | if (info->fix.visual == FB_VISUAL_TRUECOLOR || |
| 100 | info->fix.visual == FB_VISUAL_DIRECTCOLOR) { | 96 | info->fix.visual == FB_VISUAL_DIRECTCOLOR) { |
| 101 | fg = ((uint32_t *) info->pseudo_palette)[image->fg_color]; | 97 | fg = ((uint32_t *) info->pseudo_palette)[image->fg_color]; |
| @@ -111,10 +107,11 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) | |||
| 111 | ((image->dx + image->width) & 0xffff)); | 107 | ((image->dx + image->width) & 0xffff)); |
| 112 | OUT_RING(chan, bg); | 108 | OUT_RING(chan, bg); |
| 113 | OUT_RING(chan, fg); | 109 | OUT_RING(chan, fg); |
| 114 | OUT_RING(chan, (image->height << 16) | width); | 110 | OUT_RING(chan, (image->height << 16) | image->width); |
| 115 | OUT_RING(chan, (image->height << 16) | image->width); | 111 | OUT_RING(chan, (image->height << 16) | image->width); |
| 116 | OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff)); | 112 | OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff)); |
| 117 | 113 | ||
| 114 | dsize = ALIGN(image->width * image->height, 32) >> 5; | ||
| 118 | while (dsize) { | 115 | while (dsize) { |
| 119 | int iter_len = dsize > 128 ? 128 : dsize; | 116 | int iter_len = dsize > 128 ? 128 : dsize; |
| 120 | 117 | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c index 33d9ee0fac40..1aeb698e9707 100644 --- a/drivers/gpu/drm/nouveau/nv50_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c | |||
| @@ -95,7 +95,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) | |||
| 95 | struct nouveau_fbdev *nfbdev = info->par; | 95 | struct nouveau_fbdev *nfbdev = info->par; |
| 96 | struct nouveau_drm *drm = nouveau_drm(nfbdev->dev); | 96 | struct nouveau_drm *drm = nouveau_drm(nfbdev->dev); |
| 97 | struct nouveau_channel *chan = drm->channel; | 97 | struct nouveau_channel *chan = drm->channel; |
| 98 | uint32_t width, dwords, *data = (uint32_t *)image->data; | 98 | uint32_t dwords, *data = (uint32_t *)image->data; |
| 99 | uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel)); | 99 | uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel)); |
| 100 | uint32_t *palette = info->pseudo_palette; | 100 | uint32_t *palette = info->pseudo_palette; |
| 101 | int ret; | 101 | int ret; |
| @@ -107,9 +107,6 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) | |||
| 107 | if (ret) | 107 | if (ret) |
| 108 | return ret; | 108 | return ret; |
| 109 | 109 | ||
| 110 | width = ALIGN(image->width, 32); | ||
| 111 | dwords = (width * image->height) >> 5; | ||
| 112 | |||
| 113 | BEGIN_NV04(chan, NvSub2D, 0x0814, 2); | 110 | BEGIN_NV04(chan, NvSub2D, 0x0814, 2); |
| 114 | if (info->fix.visual == FB_VISUAL_TRUECOLOR || | 111 | if (info->fix.visual == FB_VISUAL_TRUECOLOR || |
| 115 | info->fix.visual == FB_VISUAL_DIRECTCOLOR) { | 112 | info->fix.visual == FB_VISUAL_DIRECTCOLOR) { |
| @@ -128,6 +125,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) | |||
| 128 | OUT_RING(chan, 0); | 125 | OUT_RING(chan, 0); |
| 129 | OUT_RING(chan, image->dy); | 126 | OUT_RING(chan, image->dy); |
| 130 | 127 | ||
| 128 | dwords = ALIGN(image->width * image->height, 32) >> 5; | ||
| 131 | while (dwords) { | 129 | while (dwords) { |
| 132 | int push = dwords > 2047 ? 2047 : dwords; | 130 | int push = dwords > 2047 ? 2047 : dwords; |
| 133 | 131 | ||
diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c index a0913359ac05..839f4c8c1805 100644 --- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c +++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c | |||
| @@ -95,7 +95,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) | |||
| 95 | struct nouveau_fbdev *nfbdev = info->par; | 95 | struct nouveau_fbdev *nfbdev = info->par; |
| 96 | struct nouveau_drm *drm = nouveau_drm(nfbdev->dev); | 96 | struct nouveau_drm *drm = nouveau_drm(nfbdev->dev); |
| 97 | struct nouveau_channel *chan = drm->channel; | 97 | struct nouveau_channel *chan = drm->channel; |
| 98 | uint32_t width, dwords, *data = (uint32_t *)image->data; | 98 | uint32_t dwords, *data = (uint32_t *)image->data; |
| 99 | uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel)); | 99 | uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel)); |
| 100 | uint32_t *palette = info->pseudo_palette; | 100 | uint32_t *palette = info->pseudo_palette; |
| 101 | int ret; | 101 | int ret; |
| @@ -107,9 +107,6 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) | |||
| 107 | if (ret) | 107 | if (ret) |
| 108 | return ret; | 108 | return ret; |
| 109 | 109 | ||
| 110 | width = ALIGN(image->width, 32); | ||
| 111 | dwords = (width * image->height) >> 5; | ||
| 112 | |||
| 113 | BEGIN_NVC0(chan, NvSub2D, 0x0814, 2); | 110 | BEGIN_NVC0(chan, NvSub2D, 0x0814, 2); |
| 114 | if (info->fix.visual == FB_VISUAL_TRUECOLOR || | 111 | if (info->fix.visual == FB_VISUAL_TRUECOLOR || |
| 115 | info->fix.visual == FB_VISUAL_DIRECTCOLOR) { | 112 | info->fix.visual == FB_VISUAL_DIRECTCOLOR) { |
| @@ -128,6 +125,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) | |||
| 128 | OUT_RING (chan, 0); | 125 | OUT_RING (chan, 0); |
| 129 | OUT_RING (chan, image->dy); | 126 | OUT_RING (chan, image->dy); |
| 130 | 127 | ||
| 128 | dwords = ALIGN(image->width * image->height, 32) >> 5; | ||
| 131 | while (dwords) { | 129 | while (dwords) { |
| 132 | int push = dwords > 2047 ? 2047 : dwords; | 130 | int push = dwords > 2047 ? 2047 : dwords; |
| 133 | 131 | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c index 18fab3973ce5..62ad0300cfa5 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c | |||
| @@ -1614,7 +1614,7 @@ nvkm_device_pci_func = { | |||
| 1614 | .fini = nvkm_device_pci_fini, | 1614 | .fini = nvkm_device_pci_fini, |
| 1615 | .resource_addr = nvkm_device_pci_resource_addr, | 1615 | .resource_addr = nvkm_device_pci_resource_addr, |
| 1616 | .resource_size = nvkm_device_pci_resource_size, | 1616 | .resource_size = nvkm_device_pci_resource_size, |
| 1617 | .cpu_coherent = !IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_ARM64), | 1617 | .cpu_coherent = !IS_ENABLED(CONFIG_ARM), |
| 1618 | }; | 1618 | }; |
| 1619 | 1619 | ||
| 1620 | int | 1620 | int |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild index a74c5dd27dc0..e2a64ed14b22 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild | |||
| @@ -18,6 +18,7 @@ nvkm-y += nvkm/engine/disp/piornv50.o | |||
| 18 | nvkm-y += nvkm/engine/disp/sornv50.o | 18 | nvkm-y += nvkm/engine/disp/sornv50.o |
| 19 | nvkm-y += nvkm/engine/disp/sorg94.o | 19 | nvkm-y += nvkm/engine/disp/sorg94.o |
| 20 | nvkm-y += nvkm/engine/disp/sorgf119.o | 20 | nvkm-y += nvkm/engine/disp/sorgf119.o |
| 21 | nvkm-y += nvkm/engine/disp/sorgm107.o | ||
| 21 | nvkm-y += nvkm/engine/disp/sorgm200.o | 22 | nvkm-y += nvkm/engine/disp/sorgm200.o |
| 22 | nvkm-y += nvkm/engine/disp/dport.o | 23 | nvkm-y += nvkm/engine/disp/dport.o |
| 23 | 24 | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c index f0314664349c..5dd34382f55a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c | |||
| @@ -76,6 +76,7 @@ exec_lookup(struct nv50_disp *disp, int head, int or, u32 ctrl, | |||
| 76 | mask |= 0x0001 << or; | 76 | mask |= 0x0001 << or; |
| 77 | mask |= 0x0100 << head; | 77 | mask |= 0x0100 << head; |
| 78 | 78 | ||
| 79 | |||
| 79 | list_for_each_entry(outp, &disp->base.outp, head) { | 80 | list_for_each_entry(outp, &disp->base.outp, head) { |
| 80 | if ((outp->info.hasht & 0xff) == type && | 81 | if ((outp->info.hasht & 0xff) == type && |
| 81 | (outp->info.hashm & mask) == mask) { | 82 | (outp->info.hashm & mask) == mask) { |
| @@ -155,25 +156,21 @@ exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf) | |||
| 155 | if (!outp) | 156 | if (!outp) |
| 156 | return NULL; | 157 | return NULL; |
| 157 | 158 | ||
| 159 | *conf = (ctrl & 0x00000f00) >> 8; | ||
| 158 | switch (outp->info.type) { | 160 | switch (outp->info.type) { |
| 159 | case DCB_OUTPUT_TMDS: | 161 | case DCB_OUTPUT_TMDS: |
| 160 | *conf = (ctrl & 0x00000f00) >> 8; | ||
| 161 | if (*conf == 5) | 162 | if (*conf == 5) |
| 162 | *conf |= 0x0100; | 163 | *conf |= 0x0100; |
| 163 | break; | 164 | break; |
| 164 | case DCB_OUTPUT_LVDS: | 165 | case DCB_OUTPUT_LVDS: |
| 165 | *conf = disp->sor.lvdsconf; | 166 | *conf |= disp->sor.lvdsconf; |
| 166 | break; | ||
| 167 | case DCB_OUTPUT_DP: | ||
| 168 | *conf = (ctrl & 0x00000f00) >> 8; | ||
| 169 | break; | 167 | break; |
| 170 | case DCB_OUTPUT_ANALOG: | ||
| 171 | default: | 168 | default: |
| 172 | *conf = 0x00ff; | ||
| 173 | break; | 169 | break; |
| 174 | } | 170 | } |
| 175 | 171 | ||
| 176 | data = nvbios_ocfg_match(bios, data, *conf, &ver, &hdr, &cnt, &len, &info2); | 172 | data = nvbios_ocfg_match(bios, data, *conf & 0xff, *conf >> 8, |
| 173 | &ver, &hdr, &cnt, &len, &info2); | ||
| 177 | if (data && id < 0xff) { | 174 | if (data && id < 0xff) { |
| 178 | data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk); | 175 | data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk); |
| 179 | if (data) { | 176 | if (data) { |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c index b6944142d616..f4b9cf8574be 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c | |||
| @@ -36,7 +36,7 @@ gm107_disp = { | |||
| 36 | .outp.internal.crt = nv50_dac_output_new, | 36 | .outp.internal.crt = nv50_dac_output_new, |
| 37 | .outp.internal.tmds = nv50_sor_output_new, | 37 | .outp.internal.tmds = nv50_sor_output_new, |
| 38 | .outp.internal.lvds = nv50_sor_output_new, | 38 | .outp.internal.lvds = nv50_sor_output_new, |
| 39 | .outp.internal.dp = gf119_sor_dp_new, | 39 | .outp.internal.dp = gm107_sor_dp_new, |
| 40 | .dac.nr = 3, | 40 | .dac.nr = 3, |
| 41 | .dac.power = nv50_dac_power, | 41 | .dac.power = nv50_dac_power, |
| 42 | .dac.sense = nv50_dac_sense, | 42 | .dac.sense = nv50_dac_sense, |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c index 4226d2153b9c..fcb1b0c46d64 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c | |||
| @@ -387,22 +387,17 @@ exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf) | |||
| 387 | if (!outp) | 387 | if (!outp) |
| 388 | return NULL; | 388 | return NULL; |
| 389 | 389 | ||
| 390 | *conf = (ctrl & 0x00000f00) >> 8; | ||
| 390 | if (outp->info.location == 0) { | 391 | if (outp->info.location == 0) { |
| 391 | switch (outp->info.type) { | 392 | switch (outp->info.type) { |
| 392 | case DCB_OUTPUT_TMDS: | 393 | case DCB_OUTPUT_TMDS: |
| 393 | *conf = (ctrl & 0x00000f00) >> 8; | ||
| 394 | if (*conf == 5) | 394 | if (*conf == 5) |
| 395 | *conf |= 0x0100; | 395 | *conf |= 0x0100; |
| 396 | break; | 396 | break; |
| 397 | case DCB_OUTPUT_LVDS: | 397 | case DCB_OUTPUT_LVDS: |
| 398 | *conf = disp->sor.lvdsconf; | 398 | *conf |= disp->sor.lvdsconf; |
| 399 | break; | 399 | break; |
| 400 | case DCB_OUTPUT_DP: | ||
| 401 | *conf = (ctrl & 0x00000f00) >> 8; | ||
| 402 | break; | ||
| 403 | case DCB_OUTPUT_ANALOG: | ||
| 404 | default: | 400 | default: |
| 405 | *conf = 0x00ff; | ||
| 406 | break; | 401 | break; |
| 407 | } | 402 | } |
| 408 | } else { | 403 | } else { |
| @@ -410,7 +405,8 @@ exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf) | |||
| 410 | pclk = pclk / 2; | 405 | pclk = pclk / 2; |
| 411 | } | 406 | } |
| 412 | 407 | ||
| 413 | data = nvbios_ocfg_match(bios, data, *conf, &ver, &hdr, &cnt, &len, &info2); | 408 | data = nvbios_ocfg_match(bios, data, *conf & 0xff, *conf >> 8, |
| 409 | &ver, &hdr, &cnt, &len, &info2); | ||
| 414 | if (data && id < 0xff) { | 410 | if (data && id < 0xff) { |
| 415 | data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk); | 411 | data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk); |
| 416 | if (data) { | 412 | if (data) { |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h index e9067ba4e179..4e983f6d7032 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h | |||
| @@ -62,7 +62,12 @@ int g94_sor_dp_lnk_pwr(struct nvkm_output_dp *, int); | |||
| 62 | int gf119_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *, | 62 | int gf119_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *, |
| 63 | struct nvkm_output **); | 63 | struct nvkm_output **); |
| 64 | int gf119_sor_dp_lnk_ctl(struct nvkm_output_dp *, int, int, bool); | 64 | int gf119_sor_dp_lnk_ctl(struct nvkm_output_dp *, int, int, bool); |
| 65 | int gf119_sor_dp_drv_ctl(struct nvkm_output_dp *, int, int, int, int); | ||
| 65 | 66 | ||
| 66 | int gm200_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *, | 67 | int gm107_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *, |
| 67 | struct nvkm_output **); | 68 | struct nvkm_output **); |
| 69 | int gm107_sor_dp_pattern(struct nvkm_output_dp *, int); | ||
| 70 | |||
| 71 | int gm200_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *, | ||
| 72 | struct nvkm_output **); | ||
| 68 | #endif | 73 | #endif |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c index b4b41b135643..22706c0a54b5 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c | |||
| @@ -40,8 +40,7 @@ static int | |||
| 40 | gf119_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern) | 40 | gf119_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern) |
| 41 | { | 41 | { |
| 42 | struct nvkm_device *device = outp->base.disp->engine.subdev.device; | 42 | struct nvkm_device *device = outp->base.disp->engine.subdev.device; |
| 43 | const u32 loff = gf119_sor_loff(outp); | 43 | nvkm_mask(device, 0x61c110, 0x0f0f0f0f, 0x01010101 * pattern); |
| 44 | nvkm_mask(device, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * pattern); | ||
| 45 | return 0; | 44 | return 0; |
| 46 | } | 45 | } |
| 47 | 46 | ||
| @@ -64,7 +63,7 @@ gf119_sor_dp_lnk_ctl(struct nvkm_output_dp *outp, int nr, int bw, bool ef) | |||
| 64 | return 0; | 63 | return 0; |
| 65 | } | 64 | } |
| 66 | 65 | ||
| 67 | static int | 66 | int |
| 68 | gf119_sor_dp_drv_ctl(struct nvkm_output_dp *outp, | 67 | gf119_sor_dp_drv_ctl(struct nvkm_output_dp *outp, |
| 69 | int ln, int vs, int pe, int pc) | 68 | int ln, int vs, int pe, int pc) |
| 70 | { | 69 | { |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c new file mode 100644 index 000000000000..37790b2617c5 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c | |||
| @@ -0,0 +1,53 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2016 Red Hat Inc. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 21 | * | ||
| 22 | * Authors: Ben Skeggs <bskeggs@redhat.com> | ||
| 23 | */ | ||
| 24 | #include "nv50.h" | ||
| 25 | #include "outpdp.h" | ||
| 26 | |||
| 27 | int | ||
| 28 | gm107_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern) | ||
| 29 | { | ||
| 30 | struct nvkm_device *device = outp->base.disp->engine.subdev.device; | ||
| 31 | const u32 soff = outp->base.or * 0x800; | ||
| 32 | const u32 data = 0x01010101 * pattern; | ||
| 33 | if (outp->base.info.sorconf.link & 1) | ||
| 34 | nvkm_mask(device, 0x61c110 + soff, 0x0f0f0f0f, data); | ||
| 35 | else | ||
| 36 | nvkm_mask(device, 0x61c12c + soff, 0x0f0f0f0f, data); | ||
| 37 | return 0; | ||
| 38 | } | ||
| 39 | |||
| 40 | static const struct nvkm_output_dp_func | ||
| 41 | gm107_sor_dp_func = { | ||
| 42 | .pattern = gm107_sor_dp_pattern, | ||
| 43 | .lnk_pwr = g94_sor_dp_lnk_pwr, | ||
| 44 | .lnk_ctl = gf119_sor_dp_lnk_ctl, | ||
| 45 | .drv_ctl = gf119_sor_dp_drv_ctl, | ||
| 46 | }; | ||
| 47 | |||
| 48 | int | ||
| 49 | gm107_sor_dp_new(struct nvkm_disp *disp, int index, | ||
| 50 | struct dcb_output *dcbE, struct nvkm_output **poutp) | ||
| 51 | { | ||
| 52 | return nvkm_output_dp_new_(&gm107_sor_dp_func, disp, index, dcbE, poutp); | ||
| 53 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c index 2cfbef9c344f..c44fa7ea672a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c | |||
| @@ -57,19 +57,6 @@ gm200_sor_dp_lane_map(struct nvkm_device *device, u8 lane) | |||
| 57 | } | 57 | } |
| 58 | 58 | ||
| 59 | static int | 59 | static int |
| 60 | gm200_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern) | ||
| 61 | { | ||
| 62 | struct nvkm_device *device = outp->base.disp->engine.subdev.device; | ||
| 63 | const u32 soff = gm200_sor_soff(outp); | ||
| 64 | const u32 data = 0x01010101 * pattern; | ||
| 65 | if (outp->base.info.sorconf.link & 1) | ||
| 66 | nvkm_mask(device, 0x61c110 + soff, 0x0f0f0f0f, data); | ||
| 67 | else | ||
| 68 | nvkm_mask(device, 0x61c12c + soff, 0x0f0f0f0f, data); | ||
| 69 | return 0; | ||
| 70 | } | ||
| 71 | |||
| 72 | static int | ||
| 73 | gm200_sor_dp_lnk_pwr(struct nvkm_output_dp *outp, int nr) | 60 | gm200_sor_dp_lnk_pwr(struct nvkm_output_dp *outp, int nr) |
| 74 | { | 61 | { |
| 75 | struct nvkm_device *device = outp->base.disp->engine.subdev.device; | 62 | struct nvkm_device *device = outp->base.disp->engine.subdev.device; |
| @@ -129,7 +116,7 @@ gm200_sor_dp_drv_ctl(struct nvkm_output_dp *outp, | |||
| 129 | 116 | ||
| 130 | static const struct nvkm_output_dp_func | 117 | static const struct nvkm_output_dp_func |
| 131 | gm200_sor_dp_func = { | 118 | gm200_sor_dp_func = { |
| 132 | .pattern = gm200_sor_dp_pattern, | 119 | .pattern = gm107_sor_dp_pattern, |
| 133 | .lnk_pwr = gm200_sor_dp_lnk_pwr, | 120 | .lnk_pwr = gm200_sor_dp_lnk_pwr, |
| 134 | .lnk_ctl = gf119_sor_dp_lnk_ctl, | 121 | .lnk_ctl = gf119_sor_dp_lnk_ctl, |
| 135 | .drv_ctl = gm200_sor_dp_drv_ctl, | 122 | .drv_ctl = gm200_sor_dp_drv_ctl, |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c index 9513badb8220..ae9ab5b1ab97 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c | |||
| @@ -949,22 +949,41 @@ gf100_gr_trap_gpc_rop(struct gf100_gr *gr, int gpc) | |||
| 949 | } | 949 | } |
| 950 | 950 | ||
| 951 | static const struct nvkm_enum gf100_mp_warp_error[] = { | 951 | static const struct nvkm_enum gf100_mp_warp_error[] = { |
| 952 | { 0x00, "NO_ERROR" }, | 952 | { 0x01, "STACK_ERROR" }, |
| 953 | { 0x01, "STACK_MISMATCH" }, | 953 | { 0x02, "API_STACK_ERROR" }, |
| 954 | { 0x03, "RET_EMPTY_STACK_ERROR" }, | ||
| 955 | { 0x04, "PC_WRAP" }, | ||
| 954 | { 0x05, "MISALIGNED_PC" }, | 956 | { 0x05, "MISALIGNED_PC" }, |
| 955 | { 0x08, "MISALIGNED_GPR" }, | 957 | { 0x06, "PC_OVERFLOW" }, |
| 956 | { 0x09, "INVALID_OPCODE" }, | 958 | { 0x07, "MISALIGNED_IMMC_ADDR" }, |
| 957 | { 0x0d, "GPR_OUT_OF_BOUNDS" }, | 959 | { 0x08, "MISALIGNED_REG" }, |
| 958 | { 0x0e, "MEM_OUT_OF_BOUNDS" }, | 960 | { 0x09, "ILLEGAL_INSTR_ENCODING" }, |
| 959 | { 0x0f, "UNALIGNED_MEM_ACCESS" }, | 961 | { 0x0a, "ILLEGAL_SPH_INSTR_COMBO" }, |
| 962 | { 0x0b, "ILLEGAL_INSTR_PARAM" }, | ||
| 963 | { 0x0c, "INVALID_CONST_ADDR" }, | ||
| 964 | { 0x0d, "OOR_REG" }, | ||
| 965 | { 0x0e, "OOR_ADDR" }, | ||
| 966 | { 0x0f, "MISALIGNED_ADDR" }, | ||
| 960 | { 0x10, "INVALID_ADDR_SPACE" }, | 967 | { 0x10, "INVALID_ADDR_SPACE" }, |
| 961 | { 0x11, "INVALID_PARAM" }, | 968 | { 0x11, "ILLEGAL_INSTR_PARAM2" }, |
| 969 | { 0x12, "INVALID_CONST_ADDR_LDC" }, | ||
| 970 | { 0x13, "GEOMETRY_SM_ERROR" }, | ||
| 971 | { 0x14, "DIVERGENT" }, | ||
| 972 | { 0x15, "WARP_EXIT" }, | ||
| 962 | {} | 973 | {} |
| 963 | }; | 974 | }; |
| 964 | 975 | ||
| 965 | static const struct nvkm_bitfield gf100_mp_global_error[] = { | 976 | static const struct nvkm_bitfield gf100_mp_global_error[] = { |
| 977 | { 0x00000001, "SM_TO_SM_FAULT" }, | ||
| 978 | { 0x00000002, "L1_ERROR" }, | ||
| 966 | { 0x00000004, "MULTIPLE_WARP_ERRORS" }, | 979 | { 0x00000004, "MULTIPLE_WARP_ERRORS" }, |
| 967 | { 0x00000008, "OUT_OF_STACK_SPACE" }, | 980 | { 0x00000008, "PHYSICAL_STACK_OVERFLOW" }, |
| 981 | { 0x00000010, "BPT_INT" }, | ||
| 982 | { 0x00000020, "BPT_PAUSE" }, | ||
| 983 | { 0x00000040, "SINGLE_STEP_COMPLETE" }, | ||
| 984 | { 0x20000000, "ECC_SEC_ERROR" }, | ||
| 985 | { 0x40000000, "ECC_DED_ERROR" }, | ||
| 986 | { 0x80000000, "TIMEOUT" }, | ||
| 968 | {} | 987 | {} |
| 969 | }; | 988 | }; |
| 970 | 989 | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c index a5e92135cd77..9efb1b48cd54 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c | |||
| @@ -141,7 +141,8 @@ nvbios_ocfg_parse(struct nvkm_bios *bios, u16 outp, u8 idx, | |||
| 141 | { | 141 | { |
| 142 | u16 data = nvbios_ocfg_entry(bios, outp, idx, ver, hdr, cnt, len); | 142 | u16 data = nvbios_ocfg_entry(bios, outp, idx, ver, hdr, cnt, len); |
| 143 | if (data) { | 143 | if (data) { |
| 144 | info->match = nvbios_rd16(bios, data + 0x00); | 144 | info->proto = nvbios_rd08(bios, data + 0x00); |
| 145 | info->flags = nvbios_rd16(bios, data + 0x01); | ||
| 145 | info->clkcmp[0] = nvbios_rd16(bios, data + 0x02); | 146 | info->clkcmp[0] = nvbios_rd16(bios, data + 0x02); |
| 146 | info->clkcmp[1] = nvbios_rd16(bios, data + 0x04); | 147 | info->clkcmp[1] = nvbios_rd16(bios, data + 0x04); |
| 147 | } | 148 | } |
| @@ -149,12 +150,13 @@ nvbios_ocfg_parse(struct nvkm_bios *bios, u16 outp, u8 idx, | |||
| 149 | } | 150 | } |
| 150 | 151 | ||
| 151 | u16 | 152 | u16 |
| 152 | nvbios_ocfg_match(struct nvkm_bios *bios, u16 outp, u16 type, | 153 | nvbios_ocfg_match(struct nvkm_bios *bios, u16 outp, u8 proto, u8 flags, |
| 153 | u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *info) | 154 | u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *info) |
| 154 | { | 155 | { |
| 155 | u16 data, idx = 0; | 156 | u16 data, idx = 0; |
| 156 | while ((data = nvbios_ocfg_parse(bios, outp, idx++, ver, hdr, cnt, len, info))) { | 157 | while ((data = nvbios_ocfg_parse(bios, outp, idx++, ver, hdr, cnt, len, info))) { |
| 157 | if (info->match == type) | 158 | if ((info->proto == proto || info->proto == 0xff) && |
| 159 | (info->flags == flags)) | ||
| 158 | break; | 160 | break; |
| 159 | } | 161 | } |
| 160 | return data; | 162 | return data; |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c index 323c79abe468..41bd5d0f7692 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c | |||
| @@ -276,6 +276,8 @@ nvkm_iccsense_oneinit(struct nvkm_subdev *subdev) | |||
| 276 | struct pwr_rail_t *r = &stbl.rail[i]; | 276 | struct pwr_rail_t *r = &stbl.rail[i]; |
| 277 | struct nvkm_iccsense_rail *rail; | 277 | struct nvkm_iccsense_rail *rail; |
| 278 | struct nvkm_iccsense_sensor *sensor; | 278 | struct nvkm_iccsense_sensor *sensor; |
| 279 | int (*read)(struct nvkm_iccsense *, | ||
| 280 | struct nvkm_iccsense_rail *); | ||
| 279 | 281 | ||
| 280 | if (!r->mode || r->resistor_mohm == 0) | 282 | if (!r->mode || r->resistor_mohm == 0) |
| 281 | continue; | 283 | continue; |
| @@ -284,31 +286,31 @@ nvkm_iccsense_oneinit(struct nvkm_subdev *subdev) | |||
| 284 | if (!sensor) | 286 | if (!sensor) |
| 285 | continue; | 287 | continue; |
| 286 | 288 | ||
| 287 | rail = kmalloc(sizeof(*rail), GFP_KERNEL); | ||
| 288 | if (!rail) | ||
| 289 | return -ENOMEM; | ||
| 290 | |||
| 291 | switch (sensor->type) { | 289 | switch (sensor->type) { |
| 292 | case NVBIOS_EXTDEV_INA209: | 290 | case NVBIOS_EXTDEV_INA209: |
| 293 | if (r->rail != 0) | 291 | if (r->rail != 0) |
| 294 | continue; | 292 | continue; |
| 295 | rail->read = nvkm_iccsense_ina209_read; | 293 | read = nvkm_iccsense_ina209_read; |
| 296 | break; | 294 | break; |
| 297 | case NVBIOS_EXTDEV_INA219: | 295 | case NVBIOS_EXTDEV_INA219: |
| 298 | if (r->rail != 0) | 296 | if (r->rail != 0) |
| 299 | continue; | 297 | continue; |
| 300 | rail->read = nvkm_iccsense_ina219_read; | 298 | read = nvkm_iccsense_ina219_read; |
| 301 | break; | 299 | break; |
| 302 | case NVBIOS_EXTDEV_INA3221: | 300 | case NVBIOS_EXTDEV_INA3221: |
| 303 | if (r->rail >= 3) | 301 | if (r->rail >= 3) |
| 304 | continue; | 302 | continue; |
| 305 | rail->read = nvkm_iccsense_ina3221_read; | 303 | read = nvkm_iccsense_ina3221_read; |
| 306 | break; | 304 | break; |
| 307 | default: | 305 | default: |
| 308 | continue; | 306 | continue; |
| 309 | } | 307 | } |
| 310 | 308 | ||
| 309 | rail = kmalloc(sizeof(*rail), GFP_KERNEL); | ||
| 310 | if (!rail) | ||
| 311 | return -ENOMEM; | ||
| 311 | sensor->rail_mask |= 1 << r->rail; | 312 | sensor->rail_mask |= 1 << r->rail; |
| 313 | rail->read = read; | ||
| 312 | rail->sensor = sensor; | 314 | rail->sensor = sensor; |
| 313 | rail->idx = r->rail; | 315 | rail->idx = r->rail; |
| 314 | rail->mohm = r->resistor_mohm; | 316 | rail->mohm = r->resistor_mohm; |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c index e292f5679418..389fb13a1998 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c | |||
| @@ -69,11 +69,11 @@ gm107_ltc_zbc_clear_depth(struct nvkm_ltc *ltc, int i, const u32 depth) | |||
| 69 | } | 69 | } |
| 70 | 70 | ||
| 71 | static void | 71 | static void |
| 72 | gm107_ltc_lts_isr(struct nvkm_ltc *ltc, int c, int s) | 72 | gm107_ltc_intr_lts(struct nvkm_ltc *ltc, int c, int s) |
| 73 | { | 73 | { |
| 74 | struct nvkm_subdev *subdev = <c->subdev; | 74 | struct nvkm_subdev *subdev = <c->subdev; |
| 75 | struct nvkm_device *device = subdev->device; | 75 | struct nvkm_device *device = subdev->device; |
| 76 | u32 base = 0x140000 + (c * 0x2000) + (s * 0x200); | 76 | u32 base = 0x140400 + (c * 0x2000) + (s * 0x200); |
| 77 | u32 stat = nvkm_rd32(device, base + 0x00c); | 77 | u32 stat = nvkm_rd32(device, base + 0x00c); |
| 78 | 78 | ||
| 79 | if (stat) { | 79 | if (stat) { |
| @@ -92,7 +92,7 @@ gm107_ltc_intr(struct nvkm_ltc *ltc) | |||
| 92 | while (mask) { | 92 | while (mask) { |
| 93 | u32 s, c = __ffs(mask); | 93 | u32 s, c = __ffs(mask); |
| 94 | for (s = 0; s < ltc->lts_nr; s++) | 94 | for (s = 0; s < ltc->lts_nr; s++) |
| 95 | gm107_ltc_lts_isr(ltc, c, s); | 95 | gm107_ltc_intr_lts(ltc, c, s); |
| 96 | mask &= ~(1 << c); | 96 | mask &= ~(1 << c); |
| 97 | } | 97 | } |
| 98 | } | 98 | } |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c index 2a29bfd5125a..e18e0dc19ec8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c | |||
| @@ -46,7 +46,7 @@ static const struct nvkm_ltc_func | |||
| 46 | gm200_ltc = { | 46 | gm200_ltc = { |
| 47 | .oneinit = gm200_ltc_oneinit, | 47 | .oneinit = gm200_ltc_oneinit, |
| 48 | .init = gm200_ltc_init, | 48 | .init = gm200_ltc_init, |
| 49 | .intr = gm107_ltc_intr, /*XXX: not validated */ | 49 | .intr = gm107_ltc_intr, |
| 50 | .cbc_clear = gm107_ltc_cbc_clear, | 50 | .cbc_clear = gm107_ltc_cbc_clear, |
| 51 | .cbc_wait = gm107_ltc_cbc_wait, | 51 | .cbc_wait = gm107_ltc_cbc_wait, |
| 52 | .zbc = 16, | 52 | .zbc = 16, |
diff --git a/drivers/gpu/drm/omapdrm/Kconfig b/drivers/gpu/drm/omapdrm/Kconfig index 73241c4eb7aa..336ad4de9981 100644 --- a/drivers/gpu/drm/omapdrm/Kconfig +++ b/drivers/gpu/drm/omapdrm/Kconfig | |||
| @@ -2,6 +2,7 @@ config DRM_OMAP | |||
| 2 | tristate "OMAP DRM" | 2 | tristate "OMAP DRM" |
| 3 | depends on DRM | 3 | depends on DRM |
| 4 | depends on ARCH_OMAP2PLUS || ARCH_MULTIPLATFORM | 4 | depends on ARCH_OMAP2PLUS || ARCH_MULTIPLATFORM |
| 5 | select OMAP2_DSS | ||
| 5 | select DRM_KMS_HELPER | 6 | select DRM_KMS_HELPER |
| 6 | select DRM_KMS_FB_HELPER | 7 | select DRM_KMS_FB_HELPER |
| 7 | select FB_SYS_FILLRECT | 8 | select FB_SYS_FILLRECT |
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c index 225fd8d6ab31..667ca4a24ece 100644 --- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c +++ b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c | |||
| @@ -9,6 +9,7 @@ | |||
| 9 | * the Free Software Foundation. | 9 | * the Free Software Foundation. |
| 10 | */ | 10 | */ |
| 11 | 11 | ||
| 12 | #include <linux/gpio/consumer.h> | ||
| 12 | #include <linux/slab.h> | 13 | #include <linux/slab.h> |
| 13 | #include <linux/module.h> | 14 | #include <linux/module.h> |
| 14 | #include <linux/platform_device.h> | 15 | #include <linux/platform_device.h> |
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c index 8c246c213e06..9594ff7a2b0c 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c | |||
| @@ -14,7 +14,7 @@ | |||
| 14 | * the Free Software Foundation. | 14 | * the Free Software Foundation. |
| 15 | */ | 15 | */ |
| 16 | 16 | ||
| 17 | #include <linux/gpio.h> | 17 | #include <linux/gpio/consumer.h> |
| 18 | #include <linux/module.h> | 18 | #include <linux/module.h> |
| 19 | #include <linux/platform_device.h> | 19 | #include <linux/platform_device.h> |
| 20 | #include <linux/slab.h> | 20 | #include <linux/slab.h> |
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c index 2fd5602880a7..671806ca7d6a 100644 --- a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c +++ b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c | |||
| @@ -9,7 +9,7 @@ | |||
| 9 | * the Free Software Foundation. | 9 | * the Free Software Foundation. |
| 10 | */ | 10 | */ |
| 11 | 11 | ||
| 12 | #include <linux/gpio.h> | 12 | #include <linux/gpio/consumer.h> |
| 13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
| 14 | #include <linux/platform_device.h> | 14 | #include <linux/platform_device.h> |
| 15 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c index e780fd4f8b46..7c2331be8d15 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c | |||
| @@ -9,7 +9,7 @@ | |||
| 9 | * the Free Software Foundation. | 9 | * the Free Software Foundation. |
| 10 | */ | 10 | */ |
| 11 | 11 | ||
| 12 | #include <linux/gpio.h> | 12 | #include <linux/gpio/consumer.h> |
| 13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
| 14 | #include <linux/platform_device.h> | 14 | #include <linux/platform_device.h> |
| 15 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c index 36485c2137ce..2b118071b5a1 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c | |||
| @@ -14,7 +14,7 @@ | |||
| 14 | #include <linux/backlight.h> | 14 | #include <linux/backlight.h> |
| 15 | #include <linux/delay.h> | 15 | #include <linux/delay.h> |
| 16 | #include <linux/fb.h> | 16 | #include <linux/fb.h> |
| 17 | #include <linux/gpio.h> | 17 | #include <linux/gpio/consumer.h> |
| 18 | #include <linux/interrupt.h> | 18 | #include <linux/interrupt.h> |
| 19 | #include <linux/jiffies.h> | 19 | #include <linux/jiffies.h> |
| 20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c index 458f77bc473d..ac680e1de603 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c | |||
| @@ -15,6 +15,7 @@ | |||
| 15 | #include <linux/spi/spi.h> | 15 | #include <linux/spi/spi.h> |
| 16 | #include <linux/mutex.h> | 16 | #include <linux/mutex.h> |
| 17 | #include <linux/gpio.h> | 17 | #include <linux/gpio.h> |
| 18 | #include <linux/gpio/consumer.h> | ||
| 18 | 19 | ||
| 19 | #include <video/omapdss.h> | 20 | #include <video/omapdss.h> |
| 20 | #include <video/omap-panel-data.h> | 21 | #include <video/omap-panel-data.h> |
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c index 780cb263a318..38d2920a95e6 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c | |||
| @@ -15,7 +15,7 @@ | |||
| 15 | #include <linux/delay.h> | 15 | #include <linux/delay.h> |
| 16 | #include <linux/spi/spi.h> | 16 | #include <linux/spi/spi.h> |
| 17 | #include <linux/fb.h> | 17 | #include <linux/fb.h> |
| 18 | #include <linux/gpio.h> | 18 | #include <linux/gpio/consumer.h> |
| 19 | #include <linux/of_gpio.h> | 19 | #include <linux/of_gpio.h> |
| 20 | 20 | ||
| 21 | #include <video/omapdss.h> | 21 | #include <video/omapdss.h> |
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c index 529a017602e4..4363fffc87e3 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c | |||
| @@ -10,7 +10,7 @@ | |||
| 10 | */ | 10 | */ |
| 11 | 11 | ||
| 12 | #include <linux/delay.h> | 12 | #include <linux/delay.h> |
| 13 | #include <linux/gpio.h> | 13 | #include <linux/gpio/consumer.h> |
| 14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
| 15 | #include <linux/of.h> | 15 | #include <linux/of.h> |
| 16 | #include <linux/of_gpio.h> | 16 | #include <linux/of_gpio.h> |
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c index 31efcca801bd..deb416736aad 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c | |||
| @@ -29,7 +29,7 @@ | |||
| 29 | #include <linux/sched.h> | 29 | #include <linux/sched.h> |
| 30 | #include <linux/backlight.h> | 30 | #include <linux/backlight.h> |
| 31 | #include <linux/fb.h> | 31 | #include <linux/fb.h> |
| 32 | #include <linux/gpio.h> | 32 | #include <linux/gpio/consumer.h> |
| 33 | #include <linux/of.h> | 33 | #include <linux/of.h> |
| 34 | #include <linux/of_gpio.h> | 34 | #include <linux/of_gpio.h> |
| 35 | 35 | ||
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c index 03e2beb7b4f0..d93175b03a12 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c | |||
| @@ -14,7 +14,7 @@ | |||
| 14 | #include <linux/delay.h> | 14 | #include <linux/delay.h> |
| 15 | #include <linux/spi/spi.h> | 15 | #include <linux/spi/spi.h> |
| 16 | #include <linux/regulator/consumer.h> | 16 | #include <linux/regulator/consumer.h> |
| 17 | #include <linux/gpio.h> | 17 | #include <linux/gpio/consumer.h> |
| 18 | #include <linux/err.h> | 18 | #include <linux/err.h> |
| 19 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
| 20 | #include <linux/of_gpio.h> | 20 | #include <linux/of_gpio.h> |
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c index 8730646a0cbb..56c43f355ce3 100644 --- a/drivers/gpu/drm/omapdrm/dss/dsi.c +++ b/drivers/gpu/drm/omapdrm/dss/dsi.c | |||
| @@ -1167,7 +1167,6 @@ static int dsi_regulator_init(struct platform_device *dsidev) | |||
| 1167 | { | 1167 | { |
| 1168 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | 1168 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); |
| 1169 | struct regulator *vdds_dsi; | 1169 | struct regulator *vdds_dsi; |
| 1170 | int r; | ||
| 1171 | 1170 | ||
| 1172 | if (dsi->vdds_dsi_reg != NULL) | 1171 | if (dsi->vdds_dsi_reg != NULL) |
| 1173 | return 0; | 1172 | return 0; |
| @@ -1180,15 +1179,6 @@ static int dsi_regulator_init(struct platform_device *dsidev) | |||
| 1180 | return PTR_ERR(vdds_dsi); | 1179 | return PTR_ERR(vdds_dsi); |
| 1181 | } | 1180 | } |
| 1182 | 1181 | ||
| 1183 | if (regulator_can_change_voltage(vdds_dsi)) { | ||
| 1184 | r = regulator_set_voltage(vdds_dsi, 1800000, 1800000); | ||
| 1185 | if (r) { | ||
| 1186 | devm_regulator_put(vdds_dsi); | ||
| 1187 | DSSERR("can't set the DSI regulator voltage\n"); | ||
| 1188 | return r; | ||
| 1189 | } | ||
| 1190 | } | ||
| 1191 | |||
| 1192 | dsi->vdds_dsi_reg = vdds_dsi; | 1182 | dsi->vdds_dsi_reg = vdds_dsi; |
| 1193 | 1183 | ||
| 1194 | return 0; | 1184 | return 0; |
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c index f95ff319e68e..3303cfad4838 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss.c +++ b/drivers/gpu/drm/omapdrm/dss/dss.c | |||
| @@ -30,6 +30,7 @@ | |||
| 30 | #include <linux/delay.h> | 30 | #include <linux/delay.h> |
| 31 | #include <linux/seq_file.h> | 31 | #include <linux/seq_file.h> |
| 32 | #include <linux/clk.h> | 32 | #include <linux/clk.h> |
| 33 | #include <linux/pinctrl/consumer.h> | ||
| 33 | #include <linux/platform_device.h> | 34 | #include <linux/platform_device.h> |
| 34 | #include <linux/pm_runtime.h> | 35 | #include <linux/pm_runtime.h> |
| 35 | #include <linux/gfp.h> | 36 | #include <linux/gfp.h> |
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c index f892ae157ff3..4d46cdf7a037 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c | |||
| @@ -33,6 +33,7 @@ | |||
| 33 | #include <linux/gpio.h> | 33 | #include <linux/gpio.h> |
| 34 | #include <linux/regulator/consumer.h> | 34 | #include <linux/regulator/consumer.h> |
| 35 | #include <linux/component.h> | 35 | #include <linux/component.h> |
| 36 | #include <linux/of.h> | ||
| 36 | #include <video/omapdss.h> | 37 | #include <video/omapdss.h> |
| 37 | #include <sound/omap-hdmi-audio.h> | 38 | #include <sound/omap-hdmi-audio.h> |
| 38 | 39 | ||
| @@ -100,7 +101,6 @@ static irqreturn_t hdmi_irq_handler(int irq, void *data) | |||
| 100 | 101 | ||
| 101 | static int hdmi_init_regulator(void) | 102 | static int hdmi_init_regulator(void) |
| 102 | { | 103 | { |
| 103 | int r; | ||
| 104 | struct regulator *reg; | 104 | struct regulator *reg; |
| 105 | 105 | ||
| 106 | if (hdmi.vdda_reg != NULL) | 106 | if (hdmi.vdda_reg != NULL) |
| @@ -114,15 +114,6 @@ static int hdmi_init_regulator(void) | |||
| 114 | return PTR_ERR(reg); | 114 | return PTR_ERR(reg); |
| 115 | } | 115 | } |
| 116 | 116 | ||
| 117 | if (regulator_can_change_voltage(reg)) { | ||
| 118 | r = regulator_set_voltage(reg, 1800000, 1800000); | ||
| 119 | if (r) { | ||
| 120 | devm_regulator_put(reg); | ||
| 121 | DSSWARN("can't set the regulator voltage\n"); | ||
| 122 | return r; | ||
| 123 | } | ||
| 124 | } | ||
| 125 | |||
| 126 | hdmi.vdda_reg = reg; | 117 | hdmi.vdda_reg = reg; |
| 127 | 118 | ||
| 128 | return 0; | 119 | return 0; |
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c index fa72e735dad2..ef3afe99e487 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c | |||
| @@ -211,7 +211,7 @@ static void hdmi_core_init(struct hdmi_core_video_config *video_cfg) | |||
| 211 | static void hdmi_core_powerdown_disable(struct hdmi_core_data *core) | 211 | static void hdmi_core_powerdown_disable(struct hdmi_core_data *core) |
| 212 | { | 212 | { |
| 213 | DSSDBG("Enter hdmi_core_powerdown_disable\n"); | 213 | DSSDBG("Enter hdmi_core_powerdown_disable\n"); |
| 214 | REG_FLD_MOD(core->base, HDMI_CORE_SYS_SYS_CTRL1, 0x0, 0, 0); | 214 | REG_FLD_MOD(core->base, HDMI_CORE_SYS_SYS_CTRL1, 0x1, 0, 0); |
| 215 | } | 215 | } |
| 216 | 216 | ||
| 217 | static void hdmi_core_swreset_release(struct hdmi_core_data *core) | 217 | static void hdmi_core_swreset_release(struct hdmi_core_data *core) |
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c index a43f7b10e113..9255c0e1e4a7 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c | |||
| @@ -38,6 +38,7 @@ | |||
| 38 | #include <linux/gpio.h> | 38 | #include <linux/gpio.h> |
| 39 | #include <linux/regulator/consumer.h> | 39 | #include <linux/regulator/consumer.h> |
| 40 | #include <linux/component.h> | 40 | #include <linux/component.h> |
| 41 | #include <linux/of.h> | ||
| 41 | #include <video/omapdss.h> | 42 | #include <video/omapdss.h> |
| 42 | #include <sound/omap-hdmi-audio.h> | 43 | #include <sound/omap-hdmi-audio.h> |
| 43 | 44 | ||
| @@ -119,7 +120,6 @@ static irqreturn_t hdmi_irq_handler(int irq, void *data) | |||
| 119 | 120 | ||
| 120 | static int hdmi_init_regulator(void) | 121 | static int hdmi_init_regulator(void) |
| 121 | { | 122 | { |
| 122 | int r; | ||
| 123 | struct regulator *reg; | 123 | struct regulator *reg; |
| 124 | 124 | ||
| 125 | if (hdmi.vdda_reg != NULL) | 125 | if (hdmi.vdda_reg != NULL) |
| @@ -131,15 +131,6 @@ static int hdmi_init_regulator(void) | |||
| 131 | return PTR_ERR(reg); | 131 | return PTR_ERR(reg); |
| 132 | } | 132 | } |
| 133 | 133 | ||
| 134 | if (regulator_can_change_voltage(reg)) { | ||
| 135 | r = regulator_set_voltage(reg, 1800000, 1800000); | ||
| 136 | if (r) { | ||
| 137 | devm_regulator_put(reg); | ||
| 138 | DSSWARN("can't set the regulator voltage\n"); | ||
| 139 | return r; | ||
| 140 | } | ||
| 141 | } | ||
| 142 | |||
| 143 | hdmi.vdda_reg = reg; | 134 | hdmi.vdda_reg = reg; |
| 144 | 135 | ||
| 145 | return 0; | 136 | return 0; |
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c index 6a397520cae5..8ab2093daa12 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c | |||
| @@ -51,8 +51,8 @@ static void hdmi_core_ddc_init(struct hdmi_core_data *core) | |||
| 51 | { | 51 | { |
| 52 | void __iomem *base = core->base; | 52 | void __iomem *base = core->base; |
| 53 | const unsigned long long iclk = 266000000; /* DSS L3 ICLK */ | 53 | const unsigned long long iclk = 266000000; /* DSS L3 ICLK */ |
| 54 | const unsigned ss_scl_high = 4000; /* ns */ | 54 | const unsigned ss_scl_high = 4600; /* ns */ |
| 55 | const unsigned ss_scl_low = 4700; /* ns */ | 55 | const unsigned ss_scl_low = 5400; /* ns */ |
| 56 | const unsigned fs_scl_high = 600; /* ns */ | 56 | const unsigned fs_scl_high = 600; /* ns */ |
| 57 | const unsigned fs_scl_low = 1300; /* ns */ | 57 | const unsigned fs_scl_low = 1300; /* ns */ |
| 58 | const unsigned sda_hold = 1000; /* ns */ | 58 | const unsigned sda_hold = 1000; /* ns */ |
| @@ -458,7 +458,7 @@ static void hdmi_core_write_avi_infoframe(struct hdmi_core_data *core, | |||
| 458 | 458 | ||
| 459 | c = (ptr[1] >> 6) & 0x3; | 459 | c = (ptr[1] >> 6) & 0x3; |
| 460 | m = (ptr[1] >> 4) & 0x3; | 460 | m = (ptr[1] >> 4) & 0x3; |
| 461 | r = (ptr[1] >> 0) & 0x3; | 461 | r = (ptr[1] >> 0) & 0xf; |
| 462 | 462 | ||
| 463 | itc = (ptr[2] >> 7) & 0x1; | 463 | itc = (ptr[2] >> 7) & 0x1; |
| 464 | ec = (ptr[2] >> 4) & 0x7; | 464 | ec = (ptr[2] >> 4) & 0x7; |
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c b/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c index 1f5d19c119ce..f98b750fc499 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | #include <linux/io.h> | 13 | #include <linux/io.h> |
| 14 | #include <linux/platform_device.h> | 14 | #include <linux/platform_device.h> |
| 15 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
| 16 | #include <linux/seq_file.h> | ||
| 16 | #include <video/omapdss.h> | 17 | #include <video/omapdss.h> |
| 17 | 18 | ||
| 18 | #include "dss.h" | 19 | #include "dss.h" |
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c b/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c index 06e23a7c432c..f1015e8b8267 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c | |||
| @@ -16,6 +16,7 @@ | |||
| 16 | #include <linux/io.h> | 16 | #include <linux/io.h> |
| 17 | #include <linux/platform_device.h> | 17 | #include <linux/platform_device.h> |
| 18 | #include <linux/clk.h> | 18 | #include <linux/clk.h> |
| 19 | #include <linux/seq_file.h> | ||
| 19 | 20 | ||
| 20 | #include <video/omapdss.h> | 21 | #include <video/omapdss.h> |
| 21 | 22 | ||
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c b/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c index 13442b9052d1..055f62fca5dc 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c | |||
| @@ -14,6 +14,7 @@ | |||
| 14 | #include <linux/err.h> | 14 | #include <linux/err.h> |
| 15 | #include <linux/io.h> | 15 | #include <linux/io.h> |
| 16 | #include <linux/platform_device.h> | 16 | #include <linux/platform_device.h> |
| 17 | #include <linux/seq_file.h> | ||
| 17 | #include <video/omapdss.h> | 18 | #include <video/omapdss.h> |
| 18 | 19 | ||
| 19 | #include "dss.h" | 20 | #include "dss.h" |
diff --git a/drivers/gpu/drm/omapdrm/omap_debugfs.c b/drivers/gpu/drm/omapdrm/omap_debugfs.c index 6f5fc14fc015..479bf24050f8 100644 --- a/drivers/gpu/drm/omapdrm/omap_debugfs.c +++ b/drivers/gpu/drm/omapdrm/omap_debugfs.c | |||
| @@ -17,6 +17,8 @@ | |||
| 17 | * this program. If not, see <http://www.gnu.org/licenses/>. | 17 | * this program. If not, see <http://www.gnu.org/licenses/>. |
| 18 | */ | 18 | */ |
| 19 | 19 | ||
| 20 | #include <linux/seq_file.h> | ||
| 21 | |||
| 20 | #include <drm/drm_crtc.h> | 22 | #include <drm/drm_crtc.h> |
| 21 | #include <drm/drm_fb_helper.h> | 23 | #include <drm/drm_fb_helper.h> |
| 22 | 24 | ||
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c index de275a5be1db..4ceed7a9762f 100644 --- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c +++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c | |||
| @@ -27,6 +27,7 @@ | |||
| 27 | #include <linux/module.h> | 27 | #include <linux/module.h> |
| 28 | #include <linux/platform_device.h> /* platform_device() */ | 28 | #include <linux/platform_device.h> /* platform_device() */ |
| 29 | #include <linux/sched.h> | 29 | #include <linux/sched.h> |
| 30 | #include <linux/seq_file.h> | ||
| 30 | #include <linux/slab.h> | 31 | #include <linux/slab.h> |
| 31 | #include <linux/time.h> | 32 | #include <linux/time.h> |
| 32 | #include <linux/vmalloc.h> | 33 | #include <linux/vmalloc.h> |
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c index 94ec06d3d737..f84570d1636c 100644 --- a/drivers/gpu/drm/omapdrm/omap_fb.c +++ b/drivers/gpu/drm/omapdrm/omap_fb.c | |||
| @@ -17,6 +17,8 @@ | |||
| 17 | * this program. If not, see <http://www.gnu.org/licenses/>. | 17 | * this program. If not, see <http://www.gnu.org/licenses/>. |
| 18 | */ | 18 | */ |
| 19 | 19 | ||
| 20 | #include <linux/seq_file.h> | ||
| 21 | |||
| 20 | #include <drm/drm_crtc.h> | 22 | #include <drm/drm_crtc.h> |
| 21 | #include <drm/drm_crtc_helper.h> | 23 | #include <drm/drm_crtc_helper.h> |
| 22 | 24 | ||
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c index b97afc281778..03698b6c806c 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem.c +++ b/drivers/gpu/drm/omapdrm/omap_gem.c | |||
| @@ -17,6 +17,7 @@ | |||
| 17 | * this program. If not, see <http://www.gnu.org/licenses/>. | 17 | * this program. If not, see <http://www.gnu.org/licenses/>. |
| 18 | */ | 18 | */ |
| 19 | 19 | ||
| 20 | #include <linux/seq_file.h> | ||
| 20 | #include <linux/shmem_fs.h> | 21 | #include <linux/shmem_fs.h> |
| 21 | #include <linux/spinlock.h> | 22 | #include <linux/spinlock.h> |
| 22 | #include <linux/pfn_t.h> | 23 | #include <linux/pfn_t.h> |
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 2e216e2ea78c..259cd6e6d71c 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c | |||
| @@ -589,7 +589,8 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
| 589 | if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev) || ASIC_IS_DCE8(rdev)) | 589 | if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev) || ASIC_IS_DCE8(rdev)) |
| 590 | radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; | 590 | radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; |
| 591 | /* use frac fb div on RS780/RS880 */ | 591 | /* use frac fb div on RS780/RS880 */ |
| 592 | if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880)) | 592 | if (((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880)) |
| 593 | && !radeon_crtc->ss_enabled) | ||
| 593 | radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; | 594 | radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; |
| 594 | if (ASIC_IS_DCE32(rdev) && mode->clock > 165000) | 595 | if (ASIC_IS_DCE32(rdev) && mode->clock > 165000) |
| 595 | radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; | 596 | radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; |
| @@ -626,7 +627,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
| 626 | if (radeon_crtc->ss.refdiv) { | 627 | if (radeon_crtc->ss.refdiv) { |
| 627 | radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV; | 628 | radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV; |
| 628 | radeon_crtc->pll_reference_div = radeon_crtc->ss.refdiv; | 629 | radeon_crtc->pll_reference_div = radeon_crtc->ss.refdiv; |
| 629 | if (ASIC_IS_AVIVO(rdev)) | 630 | if (rdev->family >= CHIP_RV770) |
| 630 | radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; | 631 | radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; |
| 631 | } | 632 | } |
| 632 | } | 633 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index e721e6b2766e..21c44b2293bc 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
| @@ -630,6 +630,23 @@ void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) | |||
| 630 | /* | 630 | /* |
| 631 | * GPU helpers function. | 631 | * GPU helpers function. |
| 632 | */ | 632 | */ |
| 633 | |||
| 634 | /** | ||
| 635 | * radeon_device_is_virtual - check if we are running is a virtual environment | ||
| 636 | * | ||
| 637 | * Check if the asic has been passed through to a VM (all asics). | ||
| 638 | * Used at driver startup. | ||
| 639 | * Returns true if virtual or false if not. | ||
| 640 | */ | ||
| 641 | static bool radeon_device_is_virtual(void) | ||
| 642 | { | ||
| 643 | #ifdef CONFIG_X86 | ||
| 644 | return boot_cpu_has(X86_FEATURE_HYPERVISOR); | ||
| 645 | #else | ||
| 646 | return false; | ||
| 647 | #endif | ||
| 648 | } | ||
| 649 | |||
| 633 | /** | 650 | /** |
| 634 | * radeon_card_posted - check if the hw has already been initialized | 651 | * radeon_card_posted - check if the hw has already been initialized |
| 635 | * | 652 | * |
| @@ -643,6 +660,10 @@ bool radeon_card_posted(struct radeon_device *rdev) | |||
| 643 | { | 660 | { |
| 644 | uint32_t reg; | 661 | uint32_t reg; |
| 645 | 662 | ||
| 663 | /* for pass through, always force asic_init */ | ||
| 664 | if (radeon_device_is_virtual()) | ||
| 665 | return false; | ||
| 666 | |||
| 646 | /* required for EFI mode on macbook2,1 which uses an r5xx asic */ | 667 | /* required for EFI mode on macbook2,1 which uses an r5xx asic */ |
| 647 | if (efi_enabled(EFI_BOOT) && | 668 | if (efi_enabled(EFI_BOOT) && |
| 648 | (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) && | 669 | (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) && |
| @@ -1631,7 +1652,7 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, | |||
| 1631 | radeon_agp_suspend(rdev); | 1652 | radeon_agp_suspend(rdev); |
| 1632 | 1653 | ||
| 1633 | pci_save_state(dev->pdev); | 1654 | pci_save_state(dev->pdev); |
| 1634 | if (freeze && rdev->family >= CHIP_R600) { | 1655 | if (freeze && rdev->family >= CHIP_CEDAR) { |
| 1635 | rdev->asic->asic_reset(rdev, true); | 1656 | rdev->asic->asic_reset(rdev, true); |
| 1636 | pci_restore_state(dev->pdev); | 1657 | pci_restore_state(dev->pdev); |
| 1637 | } else if (suspend) { | 1658 | } else if (suspend) { |
diff --git a/drivers/gpu/drm/sti/sti_crtc.c b/drivers/gpu/drm/sti/sti_crtc.c index 505620c7c2c8..e04deedabd4a 100644 --- a/drivers/gpu/drm/sti/sti_crtc.c +++ b/drivers/gpu/drm/sti/sti_crtc.c | |||
| @@ -51,15 +51,6 @@ static void sti_crtc_disabling(struct drm_crtc *crtc) | |||
| 51 | mixer->status = STI_MIXER_DISABLING; | 51 | mixer->status = STI_MIXER_DISABLING; |
| 52 | } | 52 | } |
| 53 | 53 | ||
| 54 | static bool sti_crtc_mode_fixup(struct drm_crtc *crtc, | ||
| 55 | const struct drm_display_mode *mode, | ||
| 56 | struct drm_display_mode *adjusted_mode) | ||
| 57 | { | ||
| 58 | /* accept the provided drm_display_mode, do not fix it up */ | ||
| 59 | drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V); | ||
| 60 | return true; | ||
| 61 | } | ||
| 62 | |||
| 63 | static int | 54 | static int |
| 64 | sti_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode) | 55 | sti_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode) |
| 65 | { | 56 | { |
| @@ -230,7 +221,6 @@ static void sti_crtc_atomic_flush(struct drm_crtc *crtc, | |||
| 230 | static const struct drm_crtc_helper_funcs sti_crtc_helper_funcs = { | 221 | static const struct drm_crtc_helper_funcs sti_crtc_helper_funcs = { |
| 231 | .enable = sti_crtc_enable, | 222 | .enable = sti_crtc_enable, |
| 232 | .disable = sti_crtc_disabling, | 223 | .disable = sti_crtc_disabling, |
| 233 | .mode_fixup = sti_crtc_mode_fixup, | ||
| 234 | .mode_set = drm_helper_crtc_mode_set, | 224 | .mode_set = drm_helper_crtc_mode_set, |
| 235 | .mode_set_nofb = sti_crtc_mode_set_nofb, | 225 | .mode_set_nofb = sti_crtc_mode_set_nofb, |
| 236 | .mode_set_base = drm_helper_crtc_mode_set_base, | 226 | .mode_set_base = drm_helper_crtc_mode_set_base, |
diff --git a/drivers/gpu/drm/sun4i/Kconfig b/drivers/gpu/drm/sun4i/Kconfig index 99510e64e91a..a4b357db8856 100644 --- a/drivers/gpu/drm/sun4i/Kconfig +++ b/drivers/gpu/drm/sun4i/Kconfig | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | config DRM_SUN4I | 1 | config DRM_SUN4I |
| 2 | tristate "DRM Support for Allwinner A10 Display Engine" | 2 | tristate "DRM Support for Allwinner A10 Display Engine" |
| 3 | depends on DRM && ARM | 3 | depends on DRM && ARM && COMMON_CLK |
| 4 | depends on ARCH_SUNXI || COMPILE_TEST | 4 | depends on ARCH_SUNXI || COMPILE_TEST |
| 5 | select DRM_GEM_CMA_HELPER | 5 | select DRM_GEM_CMA_HELPER |
| 6 | select DRM_KMS_HELPER | 6 | select DRM_KMS_HELPER |
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c index f7a15c1a93bf..3ab560450a82 100644 --- a/drivers/gpu/drm/sun4i/sun4i_backend.c +++ b/drivers/gpu/drm/sun4i/sun4i_backend.c | |||
| @@ -190,7 +190,7 @@ int sun4i_backend_update_layer_buffer(struct sun4i_backend *backend, | |||
| 190 | /* Get the physical address of the buffer in memory */ | 190 | /* Get the physical address of the buffer in memory */ |
| 191 | gem = drm_fb_cma_get_gem_obj(fb, 0); | 191 | gem = drm_fb_cma_get_gem_obj(fb, 0); |
| 192 | 192 | ||
| 193 | DRM_DEBUG_DRIVER("Using GEM @ 0x%x\n", gem->paddr); | 193 | DRM_DEBUG_DRIVER("Using GEM @ %pad\n", &gem->paddr); |
| 194 | 194 | ||
| 195 | /* Compute the start of the displayed memory */ | 195 | /* Compute the start of the displayed memory */ |
| 196 | bpp = drm_format_plane_cpp(fb->pixel_format, 0); | 196 | bpp = drm_format_plane_cpp(fb->pixel_format, 0); |
| @@ -198,7 +198,7 @@ int sun4i_backend_update_layer_buffer(struct sun4i_backend *backend, | |||
| 198 | paddr += (state->src_x >> 16) * bpp; | 198 | paddr += (state->src_x >> 16) * bpp; |
| 199 | paddr += (state->src_y >> 16) * fb->pitches[0]; | 199 | paddr += (state->src_y >> 16) * fb->pitches[0]; |
| 200 | 200 | ||
| 201 | DRM_DEBUG_DRIVER("Setting buffer address to 0x%x\n", paddr); | 201 | DRM_DEBUG_DRIVER("Setting buffer address to %pad\n", &paddr); |
| 202 | 202 | ||
| 203 | /* Write the 32 lower bits of the address (in bits) */ | 203 | /* Write the 32 lower bits of the address (in bits) */ |
| 204 | lo_paddr = paddr << 3; | 204 | lo_paddr = paddr << 3; |
diff --git a/drivers/gpu/drm/sun4i/sun4i_dotclock.c b/drivers/gpu/drm/sun4i/sun4i_dotclock.c index 3ff668cb463c..5b3463197c48 100644 --- a/drivers/gpu/drm/sun4i/sun4i_dotclock.c +++ b/drivers/gpu/drm/sun4i/sun4i_dotclock.c | |||
| @@ -72,14 +72,40 @@ static unsigned long sun4i_dclk_recalc_rate(struct clk_hw *hw, | |||
| 72 | static long sun4i_dclk_round_rate(struct clk_hw *hw, unsigned long rate, | 72 | static long sun4i_dclk_round_rate(struct clk_hw *hw, unsigned long rate, |
| 73 | unsigned long *parent_rate) | 73 | unsigned long *parent_rate) |
| 74 | { | 74 | { |
| 75 | return *parent_rate / DIV_ROUND_CLOSEST(*parent_rate, rate); | 75 | unsigned long best_parent = 0; |
| 76 | u8 best_div = 1; | ||
| 77 | int i; | ||
| 78 | |||
| 79 | for (i = 6; i < 127; i++) { | ||
| 80 | unsigned long ideal = rate * i; | ||
| 81 | unsigned long rounded; | ||
| 82 | |||
| 83 | rounded = clk_hw_round_rate(clk_hw_get_parent(hw), | ||
| 84 | ideal); | ||
| 85 | |||
| 86 | if (rounded == ideal) { | ||
| 87 | best_parent = rounded; | ||
| 88 | best_div = i; | ||
| 89 | goto out; | ||
| 90 | } | ||
| 91 | |||
| 92 | if ((rounded < ideal) && (rounded > best_parent)) { | ||
| 93 | best_parent = rounded; | ||
| 94 | best_div = i; | ||
| 95 | } | ||
| 96 | } | ||
| 97 | |||
| 98 | out: | ||
| 99 | *parent_rate = best_parent; | ||
| 100 | |||
| 101 | return best_parent / best_div; | ||
| 76 | } | 102 | } |
| 77 | 103 | ||
| 78 | static int sun4i_dclk_set_rate(struct clk_hw *hw, unsigned long rate, | 104 | static int sun4i_dclk_set_rate(struct clk_hw *hw, unsigned long rate, |
| 79 | unsigned long parent_rate) | 105 | unsigned long parent_rate) |
| 80 | { | 106 | { |
| 81 | struct sun4i_dclk *dclk = hw_to_dclk(hw); | 107 | struct sun4i_dclk *dclk = hw_to_dclk(hw); |
| 82 | int div = DIV_ROUND_CLOSEST(parent_rate, rate); | 108 | u8 div = parent_rate / rate; |
| 83 | 109 | ||
| 84 | return regmap_update_bits(dclk->regmap, SUN4I_TCON0_DCLK_REG, | 110 | return regmap_update_bits(dclk->regmap, SUN4I_TCON0_DCLK_REG, |
| 85 | GENMASK(6, 0), div); | 111 | GENMASK(6, 0), div); |
| @@ -127,10 +153,14 @@ int sun4i_dclk_create(struct device *dev, struct sun4i_tcon *tcon) | |||
| 127 | const char *clk_name, *parent_name; | 153 | const char *clk_name, *parent_name; |
| 128 | struct clk_init_data init; | 154 | struct clk_init_data init; |
| 129 | struct sun4i_dclk *dclk; | 155 | struct sun4i_dclk *dclk; |
| 156 | int ret; | ||
| 130 | 157 | ||
| 131 | parent_name = __clk_get_name(tcon->sclk0); | 158 | parent_name = __clk_get_name(tcon->sclk0); |
| 132 | of_property_read_string_index(dev->of_node, "clock-output-names", 0, | 159 | ret = of_property_read_string_index(dev->of_node, |
| 133 | &clk_name); | 160 | "clock-output-names", 0, |
| 161 | &clk_name); | ||
| 162 | if (ret) | ||
| 163 | return ret; | ||
| 134 | 164 | ||
| 135 | dclk = devm_kzalloc(dev, sizeof(*dclk), GFP_KERNEL); | 165 | dclk = devm_kzalloc(dev, sizeof(*dclk), GFP_KERNEL); |
| 136 | if (!dclk) | 166 | if (!dclk) |
| @@ -140,6 +170,7 @@ int sun4i_dclk_create(struct device *dev, struct sun4i_tcon *tcon) | |||
| 140 | init.ops = &sun4i_dclk_ops; | 170 | init.ops = &sun4i_dclk_ops; |
| 141 | init.parent_names = &parent_name; | 171 | init.parent_names = &parent_name; |
| 142 | init.num_parents = 1; | 172 | init.num_parents = 1; |
| 173 | init.flags = CLK_SET_RATE_PARENT; | ||
| 143 | 174 | ||
| 144 | dclk->regmap = tcon->regs; | 175 | dclk->regmap = tcon->regs; |
| 145 | dclk->hw.init = &init; | 176 | dclk->hw.init = &init; |
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c index 76e922bb60e5..257d2b4f3645 100644 --- a/drivers/gpu/drm/sun4i/sun4i_drv.c +++ b/drivers/gpu/drm/sun4i/sun4i_drv.c | |||
| @@ -24,34 +24,6 @@ | |||
| 24 | #include "sun4i_layer.h" | 24 | #include "sun4i_layer.h" |
| 25 | #include "sun4i_tcon.h" | 25 | #include "sun4i_tcon.h" |
| 26 | 26 | ||
| 27 | static int sun4i_drv_connector_plug_all(struct drm_device *drm) | ||
| 28 | { | ||
| 29 | struct drm_connector *connector, *failed; | ||
| 30 | int ret; | ||
| 31 | |||
| 32 | mutex_lock(&drm->mode_config.mutex); | ||
| 33 | list_for_each_entry(connector, &drm->mode_config.connector_list, head) { | ||
| 34 | ret = drm_connector_register(connector); | ||
| 35 | if (ret) { | ||
| 36 | failed = connector; | ||
| 37 | goto err; | ||
| 38 | } | ||
| 39 | } | ||
| 40 | mutex_unlock(&drm->mode_config.mutex); | ||
| 41 | return 0; | ||
| 42 | |||
| 43 | err: | ||
| 44 | list_for_each_entry(connector, &drm->mode_config.connector_list, head) { | ||
| 45 | if (failed == connector) | ||
| 46 | break; | ||
| 47 | |||
| 48 | drm_connector_unregister(connector); | ||
| 49 | } | ||
| 50 | mutex_unlock(&drm->mode_config.mutex); | ||
| 51 | |||
| 52 | return ret; | ||
| 53 | } | ||
| 54 | |||
| 55 | static int sun4i_drv_enable_vblank(struct drm_device *drm, unsigned int pipe) | 27 | static int sun4i_drv_enable_vblank(struct drm_device *drm, unsigned int pipe) |
| 56 | { | 28 | { |
| 57 | struct sun4i_drv *drv = drm->dev_private; | 29 | struct sun4i_drv *drv = drm->dev_private; |
| @@ -125,6 +97,22 @@ static struct drm_driver sun4i_drv_driver = { | |||
| 125 | .disable_vblank = sun4i_drv_disable_vblank, | 97 | .disable_vblank = sun4i_drv_disable_vblank, |
| 126 | }; | 98 | }; |
| 127 | 99 | ||
| 100 | static void sun4i_remove_framebuffers(void) | ||
| 101 | { | ||
| 102 | struct apertures_struct *ap; | ||
| 103 | |||
| 104 | ap = alloc_apertures(1); | ||
| 105 | if (!ap) | ||
| 106 | return; | ||
| 107 | |||
| 108 | /* The framebuffer can be located anywhere in RAM */ | ||
| 109 | ap->ranges[0].base = 0; | ||
| 110 | ap->ranges[0].size = ~0; | ||
| 111 | |||
| 112 | remove_conflicting_framebuffers(ap, "sun4i-drm-fb", false); | ||
| 113 | kfree(ap); | ||
| 114 | } | ||
| 115 | |||
| 128 | static int sun4i_drv_bind(struct device *dev) | 116 | static int sun4i_drv_bind(struct device *dev) |
| 129 | { | 117 | { |
| 130 | struct drm_device *drm; | 118 | struct drm_device *drm; |
| @@ -172,6 +160,9 @@ static int sun4i_drv_bind(struct device *dev) | |||
| 172 | } | 160 | } |
| 173 | drm->irq_enabled = true; | 161 | drm->irq_enabled = true; |
| 174 | 162 | ||
| 163 | /* Remove early framebuffers (ie. simplefb) */ | ||
| 164 | sun4i_remove_framebuffers(); | ||
| 165 | |||
| 175 | /* Create our framebuffer */ | 166 | /* Create our framebuffer */ |
| 176 | drv->fbdev = sun4i_framebuffer_init(drm); | 167 | drv->fbdev = sun4i_framebuffer_init(drm); |
| 177 | if (IS_ERR(drv->fbdev)) { | 168 | if (IS_ERR(drv->fbdev)) { |
| @@ -187,7 +178,7 @@ static int sun4i_drv_bind(struct device *dev) | |||
| 187 | if (ret) | 178 | if (ret) |
| 188 | goto free_drm; | 179 | goto free_drm; |
| 189 | 180 | ||
| 190 | ret = sun4i_drv_connector_plug_all(drm); | 181 | ret = drm_connector_register_all(drm); |
| 191 | if (ret) | 182 | if (ret) |
| 192 | goto unregister_drm; | 183 | goto unregister_drm; |
| 193 | 184 | ||
| @@ -204,6 +195,7 @@ static void sun4i_drv_unbind(struct device *dev) | |||
| 204 | { | 195 | { |
| 205 | struct drm_device *drm = dev_get_drvdata(dev); | 196 | struct drm_device *drm = dev_get_drvdata(dev); |
| 206 | 197 | ||
| 198 | drm_connector_unregister_all(drm); | ||
| 207 | drm_dev_unregister(drm); | 199 | drm_dev_unregister(drm); |
| 208 | drm_kms_helper_poll_fini(drm); | 200 | drm_kms_helper_poll_fini(drm); |
| 209 | sun4i_framebuffer_free(drm); | 201 | sun4i_framebuffer_free(drm); |
diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c index ab6494818050..aaffe9e64ffb 100644 --- a/drivers/gpu/drm/sun4i/sun4i_rgb.c +++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c | |||
| @@ -54,8 +54,13 @@ static int sun4i_rgb_get_modes(struct drm_connector *connector) | |||
| 54 | static int sun4i_rgb_mode_valid(struct drm_connector *connector, | 54 | static int sun4i_rgb_mode_valid(struct drm_connector *connector, |
| 55 | struct drm_display_mode *mode) | 55 | struct drm_display_mode *mode) |
| 56 | { | 56 | { |
| 57 | struct sun4i_rgb *rgb = drm_connector_to_sun4i_rgb(connector); | ||
| 58 | struct sun4i_drv *drv = rgb->drv; | ||
| 59 | struct sun4i_tcon *tcon = drv->tcon; | ||
| 57 | u32 hsync = mode->hsync_end - mode->hsync_start; | 60 | u32 hsync = mode->hsync_end - mode->hsync_start; |
| 58 | u32 vsync = mode->vsync_end - mode->vsync_start; | 61 | u32 vsync = mode->vsync_end - mode->vsync_start; |
| 62 | unsigned long rate = mode->clock * 1000; | ||
| 63 | long rounded_rate; | ||
| 59 | 64 | ||
| 60 | DRM_DEBUG_DRIVER("Validating modes...\n"); | 65 | DRM_DEBUG_DRIVER("Validating modes...\n"); |
| 61 | 66 | ||
| @@ -87,6 +92,15 @@ static int sun4i_rgb_mode_valid(struct drm_connector *connector, | |||
| 87 | 92 | ||
| 88 | DRM_DEBUG_DRIVER("Vertical parameters OK\n"); | 93 | DRM_DEBUG_DRIVER("Vertical parameters OK\n"); |
| 89 | 94 | ||
| 95 | rounded_rate = clk_round_rate(tcon->dclk, rate); | ||
| 96 | if (rounded_rate < rate) | ||
| 97 | return MODE_CLOCK_LOW; | ||
| 98 | |||
| 99 | if (rounded_rate > rate) | ||
| 100 | return MODE_CLOCK_HIGH; | ||
| 101 | |||
| 102 | DRM_DEBUG_DRIVER("Clock rate OK\n"); | ||
| 103 | |||
| 90 | return MODE_OK; | 104 | return MODE_OK; |
| 91 | } | 105 | } |
| 92 | 106 | ||
| @@ -203,7 +217,7 @@ int sun4i_rgb_init(struct drm_device *drm) | |||
| 203 | int ret; | 217 | int ret; |
| 204 | 218 | ||
| 205 | /* If we don't have a panel, there's no point in going on */ | 219 | /* If we don't have a panel, there's no point in going on */ |
| 206 | if (!tcon->panel) | 220 | if (IS_ERR(tcon->panel)) |
| 207 | return -ENODEV; | 221 | return -ENODEV; |
| 208 | 222 | ||
| 209 | rgb = devm_kzalloc(drm->dev, sizeof(*rgb), GFP_KERNEL); | 223 | rgb = devm_kzalloc(drm->dev, sizeof(*rgb), GFP_KERNEL); |
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c index 9f19b0e08560..652385f09735 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tcon.c +++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c | |||
| @@ -425,11 +425,11 @@ static struct drm_panel *sun4i_tcon_find_panel(struct device_node *node) | |||
| 425 | 425 | ||
| 426 | remote = of_graph_get_remote_port_parent(end_node); | 426 | remote = of_graph_get_remote_port_parent(end_node); |
| 427 | if (!remote) { | 427 | if (!remote) { |
| 428 | DRM_DEBUG_DRIVER("Enable to parse remote node\n"); | 428 | DRM_DEBUG_DRIVER("Unable to parse remote node\n"); |
| 429 | return ERR_PTR(-EINVAL); | 429 | return ERR_PTR(-EINVAL); |
| 430 | } | 430 | } |
| 431 | 431 | ||
| 432 | return of_drm_find_panel(remote); | 432 | return of_drm_find_panel(remote) ?: ERR_PTR(-EPROBE_DEFER); |
| 433 | } | 433 | } |
| 434 | 434 | ||
| 435 | static int sun4i_tcon_bind(struct device *dev, struct device *master, | 435 | static int sun4i_tcon_bind(struct device *dev, struct device *master, |
| @@ -490,7 +490,11 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master, | |||
| 490 | return 0; | 490 | return 0; |
| 491 | } | 491 | } |
| 492 | 492 | ||
| 493 | return sun4i_rgb_init(drm); | 493 | ret = sun4i_rgb_init(drm); |
| 494 | if (ret < 0) | ||
| 495 | goto err_free_clocks; | ||
| 496 | |||
| 497 | return 0; | ||
| 494 | 498 | ||
| 495 | err_free_clocks: | 499 | err_free_clocks: |
| 496 | sun4i_tcon_free_clocks(tcon); | 500 | sun4i_tcon_free_clocks(tcon); |
| @@ -522,12 +526,13 @@ static int sun4i_tcon_probe(struct platform_device *pdev) | |||
| 522 | * Defer the probe. | 526 | * Defer the probe. |
| 523 | */ | 527 | */ |
| 524 | panel = sun4i_tcon_find_panel(node); | 528 | panel = sun4i_tcon_find_panel(node); |
| 525 | if (IS_ERR(panel)) { | 529 | |
| 526 | /* | 530 | /* |
| 527 | * If we don't have a panel endpoint, just go on | 531 | * If we don't have a panel endpoint, just go on |
| 528 | */ | 532 | */ |
| 529 | if (PTR_ERR(panel) != -ENODEV) | 533 | if (PTR_ERR(panel) == -EPROBE_DEFER) { |
| 530 | return -EPROBE_DEFER; | 534 | DRM_DEBUG_DRIVER("Still waiting for our panel. Deferring...\n"); |
| 535 | return -EPROBE_DEFER; | ||
| 531 | } | 536 | } |
| 532 | 537 | ||
| 533 | return component_add(&pdev->dev, &sun4i_tcon_ops); | 538 | return component_add(&pdev->dev, &sun4i_tcon_ops); |
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c index 904d0754ad78..0f18b76c7906 100644 --- a/drivers/gpu/drm/vc4/vc4_crtc.c +++ b/drivers/gpu/drm/vc4/vc4_crtc.c | |||
| @@ -456,14 +456,6 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc, | |||
| 456 | 456 | ||
| 457 | WARN_ON_ONCE(dlist_next - dlist_start != vc4_state->mm.size); | 457 | WARN_ON_ONCE(dlist_next - dlist_start != vc4_state->mm.size); |
| 458 | 458 | ||
| 459 | HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel), | ||
| 460 | vc4_state->mm.start); | ||
| 461 | |||
| 462 | if (debug_dump_regs) { | ||
| 463 | DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc)); | ||
| 464 | vc4_hvs_dump_state(dev); | ||
| 465 | } | ||
| 466 | |||
| 467 | if (crtc->state->event) { | 459 | if (crtc->state->event) { |
| 468 | unsigned long flags; | 460 | unsigned long flags; |
| 469 | 461 | ||
| @@ -473,8 +465,20 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc, | |||
| 473 | 465 | ||
| 474 | spin_lock_irqsave(&dev->event_lock, flags); | 466 | spin_lock_irqsave(&dev->event_lock, flags); |
| 475 | vc4_crtc->event = crtc->state->event; | 467 | vc4_crtc->event = crtc->state->event; |
| 476 | spin_unlock_irqrestore(&dev->event_lock, flags); | ||
| 477 | crtc->state->event = NULL; | 468 | crtc->state->event = NULL; |
| 469 | |||
| 470 | HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel), | ||
| 471 | vc4_state->mm.start); | ||
| 472 | |||
| 473 | spin_unlock_irqrestore(&dev->event_lock, flags); | ||
| 474 | } else { | ||
| 475 | HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel), | ||
| 476 | vc4_state->mm.start); | ||
| 477 | } | ||
| 478 | |||
| 479 | if (debug_dump_regs) { | ||
| 480 | DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc)); | ||
| 481 | vc4_hvs_dump_state(dev); | ||
| 478 | } | 482 | } |
| 479 | } | 483 | } |
| 480 | 484 | ||
| @@ -500,12 +504,17 @@ static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc) | |||
| 500 | { | 504 | { |
| 501 | struct drm_crtc *crtc = &vc4_crtc->base; | 505 | struct drm_crtc *crtc = &vc4_crtc->base; |
| 502 | struct drm_device *dev = crtc->dev; | 506 | struct drm_device *dev = crtc->dev; |
| 507 | struct vc4_dev *vc4 = to_vc4_dev(dev); | ||
| 508 | struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state); | ||
| 509 | u32 chan = vc4_crtc->channel; | ||
| 503 | unsigned long flags; | 510 | unsigned long flags; |
| 504 | 511 | ||
| 505 | spin_lock_irqsave(&dev->event_lock, flags); | 512 | spin_lock_irqsave(&dev->event_lock, flags); |
| 506 | if (vc4_crtc->event) { | 513 | if (vc4_crtc->event && |
| 514 | (vc4_state->mm.start == HVS_READ(SCALER_DISPLACTX(chan)))) { | ||
| 507 | drm_crtc_send_vblank_event(crtc, vc4_crtc->event); | 515 | drm_crtc_send_vblank_event(crtc, vc4_crtc->event); |
| 508 | vc4_crtc->event = NULL; | 516 | vc4_crtc->event = NULL; |
| 517 | drm_crtc_vblank_put(crtc); | ||
| 509 | } | 518 | } |
| 510 | spin_unlock_irqrestore(&dev->event_lock, flags); | 519 | spin_unlock_irqrestore(&dev->event_lock, flags); |
| 511 | } | 520 | } |
| @@ -556,6 +565,7 @@ vc4_async_page_flip_complete(struct vc4_seqno_cb *cb) | |||
| 556 | spin_unlock_irqrestore(&dev->event_lock, flags); | 565 | spin_unlock_irqrestore(&dev->event_lock, flags); |
| 557 | } | 566 | } |
| 558 | 567 | ||
| 568 | drm_crtc_vblank_put(crtc); | ||
| 559 | drm_framebuffer_unreference(flip_state->fb); | 569 | drm_framebuffer_unreference(flip_state->fb); |
| 560 | kfree(flip_state); | 570 | kfree(flip_state); |
| 561 | 571 | ||
| @@ -598,6 +608,8 @@ static int vc4_async_page_flip(struct drm_crtc *crtc, | |||
| 598 | return ret; | 608 | return ret; |
| 599 | } | 609 | } |
| 600 | 610 | ||
| 611 | WARN_ON(drm_crtc_vblank_get(crtc) != 0); | ||
| 612 | |||
| 601 | /* Immediately update the plane's legacy fb pointer, so that later | 613 | /* Immediately update the plane's legacy fb pointer, so that later |
| 602 | * modeset prep sees the state that will be present when the semaphore | 614 | * modeset prep sees the state that will be present when the semaphore |
| 603 | * is released. | 615 | * is released. |
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c index 3446ece21b4a..250ed7e3754c 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.c +++ b/drivers/gpu/drm/vc4/vc4_drv.c | |||
| @@ -66,12 +66,12 @@ static const struct file_operations vc4_drm_fops = { | |||
| 66 | }; | 66 | }; |
| 67 | 67 | ||
| 68 | static const struct drm_ioctl_desc vc4_drm_ioctls[] = { | 68 | static const struct drm_ioctl_desc vc4_drm_ioctls[] = { |
| 69 | DRM_IOCTL_DEF_DRV(VC4_SUBMIT_CL, vc4_submit_cl_ioctl, 0), | 69 | DRM_IOCTL_DEF_DRV(VC4_SUBMIT_CL, vc4_submit_cl_ioctl, DRM_RENDER_ALLOW), |
| 70 | DRM_IOCTL_DEF_DRV(VC4_WAIT_SEQNO, vc4_wait_seqno_ioctl, 0), | 70 | DRM_IOCTL_DEF_DRV(VC4_WAIT_SEQNO, vc4_wait_seqno_ioctl, DRM_RENDER_ALLOW), |
| 71 | DRM_IOCTL_DEF_DRV(VC4_WAIT_BO, vc4_wait_bo_ioctl, 0), | 71 | DRM_IOCTL_DEF_DRV(VC4_WAIT_BO, vc4_wait_bo_ioctl, DRM_RENDER_ALLOW), |
| 72 | DRM_IOCTL_DEF_DRV(VC4_CREATE_BO, vc4_create_bo_ioctl, 0), | 72 | DRM_IOCTL_DEF_DRV(VC4_CREATE_BO, vc4_create_bo_ioctl, DRM_RENDER_ALLOW), |
| 73 | DRM_IOCTL_DEF_DRV(VC4_MMAP_BO, vc4_mmap_bo_ioctl, 0), | 73 | DRM_IOCTL_DEF_DRV(VC4_MMAP_BO, vc4_mmap_bo_ioctl, DRM_RENDER_ALLOW), |
| 74 | DRM_IOCTL_DEF_DRV(VC4_CREATE_SHADER_BO, vc4_create_shader_bo_ioctl, 0), | 74 | DRM_IOCTL_DEF_DRV(VC4_CREATE_SHADER_BO, vc4_create_shader_bo_ioctl, DRM_RENDER_ALLOW), |
| 75 | DRM_IOCTL_DEF_DRV(VC4_GET_HANG_STATE, vc4_get_hang_state_ioctl, | 75 | DRM_IOCTL_DEF_DRV(VC4_GET_HANG_STATE, vc4_get_hang_state_ioctl, |
| 76 | DRM_ROOT_ONLY), | 76 | DRM_ROOT_ONLY), |
| 77 | }; | 77 | }; |
| @@ -91,7 +91,7 @@ static struct drm_driver vc4_drm_driver = { | |||
| 91 | 91 | ||
| 92 | .enable_vblank = vc4_enable_vblank, | 92 | .enable_vblank = vc4_enable_vblank, |
| 93 | .disable_vblank = vc4_disable_vblank, | 93 | .disable_vblank = vc4_disable_vblank, |
| 94 | .get_vblank_counter = drm_vblank_count, | 94 | .get_vblank_counter = drm_vblank_no_hw_counter, |
| 95 | 95 | ||
| 96 | #if defined(CONFIG_DEBUG_FS) | 96 | #if defined(CONFIG_DEBUG_FS) |
| 97 | .debugfs_init = vc4_debugfs_init, | 97 | .debugfs_init = vc4_debugfs_init, |
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c index cb37751bc99f..861a623bc185 100644 --- a/drivers/gpu/drm/vc4/vc4_kms.c +++ b/drivers/gpu/drm/vc4/vc4_kms.c | |||
| @@ -117,10 +117,18 @@ static int vc4_atomic_commit(struct drm_device *dev, | |||
| 117 | return -ENOMEM; | 117 | return -ENOMEM; |
| 118 | 118 | ||
| 119 | /* Make sure that any outstanding modesets have finished. */ | 119 | /* Make sure that any outstanding modesets have finished. */ |
| 120 | ret = down_interruptible(&vc4->async_modeset); | 120 | if (nonblock) { |
| 121 | if (ret) { | 121 | ret = down_trylock(&vc4->async_modeset); |
| 122 | kfree(c); | 122 | if (ret) { |
| 123 | return ret; | 123 | kfree(c); |
| 124 | return -EBUSY; | ||
| 125 | } | ||
| 126 | } else { | ||
| 127 | ret = down_interruptible(&vc4->async_modeset); | ||
| 128 | if (ret) { | ||
| 129 | kfree(c); | ||
| 130 | return ret; | ||
| 131 | } | ||
| 124 | } | 132 | } |
| 125 | 133 | ||
| 126 | ret = drm_atomic_helper_prepare_planes(dev, state); | 134 | ret = drm_atomic_helper_prepare_planes(dev, state); |
diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h index 6163b95c5411..f99eece4cc97 100644 --- a/drivers/gpu/drm/vc4/vc4_regs.h +++ b/drivers/gpu/drm/vc4/vc4_regs.h | |||
| @@ -341,6 +341,10 @@ | |||
| 341 | #define SCALER_DISPLACT0 0x00000030 | 341 | #define SCALER_DISPLACT0 0x00000030 |
| 342 | #define SCALER_DISPLACT1 0x00000034 | 342 | #define SCALER_DISPLACT1 0x00000034 |
| 343 | #define SCALER_DISPLACT2 0x00000038 | 343 | #define SCALER_DISPLACT2 0x00000038 |
| 344 | #define SCALER_DISPLACTX(x) (SCALER_DISPLACT0 + \ | ||
| 345 | (x) * (SCALER_DISPLACT1 - \ | ||
| 346 | SCALER_DISPLACT0)) | ||
| 347 | |||
| 344 | #define SCALER_DISPCTRL0 0x00000040 | 348 | #define SCALER_DISPCTRL0 0x00000040 |
| 345 | # define SCALER_DISPCTRLX_ENABLE BIT(31) | 349 | # define SCALER_DISPCTRLX_ENABLE BIT(31) |
| 346 | # define SCALER_DISPCTRLX_RESET BIT(30) | 350 | # define SCALER_DISPCTRLX_RESET BIT(30) |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c index 6de283c8fa3e..f0374f9b56ca 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c | |||
| @@ -28,6 +28,7 @@ | |||
| 28 | #include <linux/slab.h> | 28 | #include <linux/slab.h> |
| 29 | #include <linux/module.h> | 29 | #include <linux/module.h> |
| 30 | #include <linux/kernel.h> | 30 | #include <linux/kernel.h> |
| 31 | #include <linux/frame.h> | ||
| 31 | #include <asm/hypervisor.h> | 32 | #include <asm/hypervisor.h> |
| 32 | #include "drmP.h" | 33 | #include "drmP.h" |
| 33 | #include "vmwgfx_msg.h" | 34 | #include "vmwgfx_msg.h" |
| @@ -194,7 +195,7 @@ static int vmw_send_msg(struct rpc_channel *channel, const char *msg) | |||
| 194 | 195 | ||
| 195 | return -EINVAL; | 196 | return -EINVAL; |
| 196 | } | 197 | } |
| 197 | 198 | STACK_FRAME_NON_STANDARD(vmw_send_msg); | |
| 198 | 199 | ||
| 199 | 200 | ||
| 200 | /** | 201 | /** |
| @@ -304,6 +305,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg, | |||
| 304 | 305 | ||
| 305 | return 0; | 306 | return 0; |
| 306 | } | 307 | } |
| 308 | STACK_FRAME_NON_STANDARD(vmw_recv_msg); | ||
| 307 | 309 | ||
| 308 | 310 | ||
| 309 | /** | 311 | /** |
diff --git a/drivers/hid/hid-elo.c b/drivers/hid/hid-elo.c index aad8c162a825..0cd4f7216239 100644 --- a/drivers/hid/hid-elo.c +++ b/drivers/hid/hid-elo.c | |||
| @@ -261,7 +261,7 @@ static void elo_remove(struct hid_device *hdev) | |||
| 261 | struct elo_priv *priv = hid_get_drvdata(hdev); | 261 | struct elo_priv *priv = hid_get_drvdata(hdev); |
| 262 | 262 | ||
| 263 | hid_hw_stop(hdev); | 263 | hid_hw_stop(hdev); |
| 264 | flush_workqueue(wq); | 264 | cancel_delayed_work_sync(&priv->work); |
| 265 | kfree(priv); | 265 | kfree(priv); |
| 266 | } | 266 | } |
| 267 | 267 | ||
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index c741f5e50a66..95b7d61d9910 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c | |||
| @@ -1401,6 +1401,11 @@ static const struct hid_device_id mt_devices[] = { | |||
| 1401 | MT_USB_DEVICE(USB_VENDOR_ID_NOVATEK, | 1401 | MT_USB_DEVICE(USB_VENDOR_ID_NOVATEK, |
| 1402 | USB_DEVICE_ID_NOVATEK_PCT) }, | 1402 | USB_DEVICE_ID_NOVATEK_PCT) }, |
| 1403 | 1403 | ||
| 1404 | /* Ntrig Panel */ | ||
| 1405 | { .driver_data = MT_CLS_NSMU, | ||
| 1406 | HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8, | ||
| 1407 | USB_VENDOR_ID_NTRIG, 0x1b05) }, | ||
| 1408 | |||
| 1404 | /* PixArt optical touch screen */ | 1409 | /* PixArt optical touch screen */ |
| 1405 | { .driver_data = MT_CLS_INRANGE_CONTACTNUMBER, | 1410 | { .driver_data = MT_CLS_INRANGE_CONTACTNUMBER, |
| 1406 | MT_USB_DEVICE(USB_VENDOR_ID_PIXART, | 1411 | MT_USB_DEVICE(USB_VENDOR_ID_PIXART, |
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c index 2f1ddca6f2e0..700145b15088 100644 --- a/drivers/hid/usbhid/hiddev.c +++ b/drivers/hid/usbhid/hiddev.c | |||
| @@ -516,13 +516,13 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd, | |||
| 516 | goto inval; | 516 | goto inval; |
| 517 | } else if (uref->usage_index >= field->report_count) | 517 | } else if (uref->usage_index >= field->report_count) |
| 518 | goto inval; | 518 | goto inval; |
| 519 | |||
| 520 | else if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) && | ||
| 521 | (uref_multi->num_values > HID_MAX_MULTI_USAGES || | ||
| 522 | uref->usage_index + uref_multi->num_values > field->report_count)) | ||
| 523 | goto inval; | ||
| 524 | } | 519 | } |
| 525 | 520 | ||
| 521 | if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) && | ||
| 522 | (uref_multi->num_values > HID_MAX_MULTI_USAGES || | ||
| 523 | uref->usage_index + uref_multi->num_values > field->report_count)) | ||
| 524 | goto inval; | ||
| 525 | |||
| 526 | switch (cmd) { | 526 | switch (cmd) { |
| 527 | case HIDIOCGUSAGE: | 527 | case HIDIOCGUSAGE: |
| 528 | uref->value = field->value[uref->usage_index]; | 528 | uref->value = field->value[uref->usage_index]; |
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c index c43318d3416e..2ac87d553e22 100644 --- a/drivers/hwmon/dell-smm-hwmon.c +++ b/drivers/hwmon/dell-smm-hwmon.c | |||
| @@ -35,6 +35,7 @@ | |||
| 35 | #include <linux/uaccess.h> | 35 | #include <linux/uaccess.h> |
| 36 | #include <linux/io.h> | 36 | #include <linux/io.h> |
| 37 | #include <linux/sched.h> | 37 | #include <linux/sched.h> |
| 38 | #include <linux/ctype.h> | ||
| 38 | 39 | ||
| 39 | #include <linux/i8k.h> | 40 | #include <linux/i8k.h> |
| 40 | 41 | ||
| @@ -66,11 +67,13 @@ | |||
| 66 | 67 | ||
| 67 | static DEFINE_MUTEX(i8k_mutex); | 68 | static DEFINE_MUTEX(i8k_mutex); |
| 68 | static char bios_version[4]; | 69 | static char bios_version[4]; |
| 70 | static char bios_machineid[16]; | ||
| 69 | static struct device *i8k_hwmon_dev; | 71 | static struct device *i8k_hwmon_dev; |
| 70 | static u32 i8k_hwmon_flags; | 72 | static u32 i8k_hwmon_flags; |
| 71 | static uint i8k_fan_mult = I8K_FAN_MULT; | 73 | static uint i8k_fan_mult = I8K_FAN_MULT; |
| 72 | static uint i8k_pwm_mult; | 74 | static uint i8k_pwm_mult; |
| 73 | static uint i8k_fan_max = I8K_FAN_HIGH; | 75 | static uint i8k_fan_max = I8K_FAN_HIGH; |
| 76 | static bool disallow_fan_type_call; | ||
| 74 | 77 | ||
| 75 | #define I8K_HWMON_HAVE_TEMP1 (1 << 0) | 78 | #define I8K_HWMON_HAVE_TEMP1 (1 << 0) |
| 76 | #define I8K_HWMON_HAVE_TEMP2 (1 << 1) | 79 | #define I8K_HWMON_HAVE_TEMP2 (1 << 1) |
| @@ -94,13 +97,13 @@ module_param(ignore_dmi, bool, 0); | |||
| 94 | MODULE_PARM_DESC(ignore_dmi, "Continue probing hardware even if DMI data does not match"); | 97 | MODULE_PARM_DESC(ignore_dmi, "Continue probing hardware even if DMI data does not match"); |
| 95 | 98 | ||
| 96 | #if IS_ENABLED(CONFIG_I8K) | 99 | #if IS_ENABLED(CONFIG_I8K) |
| 97 | static bool restricted; | 100 | static bool restricted = true; |
| 98 | module_param(restricted, bool, 0); | 101 | module_param(restricted, bool, 0); |
| 99 | MODULE_PARM_DESC(restricted, "Allow fan control if SYS_ADMIN capability set"); | 102 | MODULE_PARM_DESC(restricted, "Restrict fan control and serial number to CAP_SYS_ADMIN (default: 1)"); |
| 100 | 103 | ||
| 101 | static bool power_status; | 104 | static bool power_status; |
| 102 | module_param(power_status, bool, 0600); | 105 | module_param(power_status, bool, 0600); |
| 103 | MODULE_PARM_DESC(power_status, "Report power status in /proc/i8k"); | 106 | MODULE_PARM_DESC(power_status, "Report power status in /proc/i8k (default: 0)"); |
| 104 | #endif | 107 | #endif |
| 105 | 108 | ||
| 106 | static uint fan_mult; | 109 | static uint fan_mult; |
| @@ -235,14 +238,28 @@ static int i8k_get_fan_speed(int fan) | |||
| 235 | /* | 238 | /* |
| 236 | * Read the fan type. | 239 | * Read the fan type. |
| 237 | */ | 240 | */ |
| 238 | static int i8k_get_fan_type(int fan) | 241 | static int _i8k_get_fan_type(int fan) |
| 239 | { | 242 | { |
| 240 | struct smm_regs regs = { .eax = I8K_SMM_GET_FAN_TYPE, }; | 243 | struct smm_regs regs = { .eax = I8K_SMM_GET_FAN_TYPE, }; |
| 241 | 244 | ||
| 245 | if (disallow_fan_type_call) | ||
| 246 | return -EINVAL; | ||
| 247 | |||
| 242 | regs.ebx = fan & 0xff; | 248 | regs.ebx = fan & 0xff; |
| 243 | return i8k_smm(®s) ? : regs.eax & 0xff; | 249 | return i8k_smm(®s) ? : regs.eax & 0xff; |
| 244 | } | 250 | } |
| 245 | 251 | ||
| 252 | static int i8k_get_fan_type(int fan) | ||
| 253 | { | ||
| 254 | /* I8K_SMM_GET_FAN_TYPE SMM call is expensive, so cache values */ | ||
| 255 | static int types[2] = { INT_MIN, INT_MIN }; | ||
| 256 | |||
| 257 | if (types[fan] == INT_MIN) | ||
| 258 | types[fan] = _i8k_get_fan_type(fan); | ||
| 259 | |||
| 260 | return types[fan]; | ||
| 261 | } | ||
| 262 | |||
| 246 | /* | 263 | /* |
| 247 | * Read the fan nominal rpm for specific fan speed. | 264 | * Read the fan nominal rpm for specific fan speed. |
| 248 | */ | 265 | */ |
| @@ -387,14 +404,20 @@ i8k_ioctl_unlocked(struct file *fp, unsigned int cmd, unsigned long arg) | |||
| 387 | 404 | ||
| 388 | switch (cmd) { | 405 | switch (cmd) { |
| 389 | case I8K_BIOS_VERSION: | 406 | case I8K_BIOS_VERSION: |
| 407 | if (!isdigit(bios_version[0]) || !isdigit(bios_version[1]) || | ||
| 408 | !isdigit(bios_version[2])) | ||
| 409 | return -EINVAL; | ||
| 410 | |||
| 390 | val = (bios_version[0] << 16) | | 411 | val = (bios_version[0] << 16) | |
| 391 | (bios_version[1] << 8) | bios_version[2]; | 412 | (bios_version[1] << 8) | bios_version[2]; |
| 392 | break; | 413 | break; |
| 393 | 414 | ||
| 394 | case I8K_MACHINE_ID: | 415 | case I8K_MACHINE_ID: |
| 395 | memset(buff, 0, 16); | 416 | if (restricted && !capable(CAP_SYS_ADMIN)) |
| 396 | strlcpy(buff, i8k_get_dmi_data(DMI_PRODUCT_SERIAL), | 417 | return -EPERM; |
| 397 | sizeof(buff)); | 418 | |
| 419 | memset(buff, 0, sizeof(buff)); | ||
| 420 | strlcpy(buff, bios_machineid, sizeof(buff)); | ||
| 398 | break; | 421 | break; |
| 399 | 422 | ||
| 400 | case I8K_FN_STATUS: | 423 | case I8K_FN_STATUS: |
| @@ -511,7 +534,7 @@ static int i8k_proc_show(struct seq_file *seq, void *offset) | |||
| 511 | seq_printf(seq, "%s %s %s %d %d %d %d %d %d %d\n", | 534 | seq_printf(seq, "%s %s %s %d %d %d %d %d %d %d\n", |
| 512 | I8K_PROC_FMT, | 535 | I8K_PROC_FMT, |
| 513 | bios_version, | 536 | bios_version, |
| 514 | i8k_get_dmi_data(DMI_PRODUCT_SERIAL), | 537 | (restricted && !capable(CAP_SYS_ADMIN)) ? "-1" : bios_machineid, |
| 515 | cpu_temp, | 538 | cpu_temp, |
| 516 | left_fan, right_fan, left_speed, right_speed, | 539 | left_fan, right_fan, left_speed, right_speed, |
| 517 | ac_power, fn_key); | 540 | ac_power, fn_key); |
| @@ -718,6 +741,9 @@ static struct attribute *i8k_attrs[] = { | |||
| 718 | static umode_t i8k_is_visible(struct kobject *kobj, struct attribute *attr, | 741 | static umode_t i8k_is_visible(struct kobject *kobj, struct attribute *attr, |
| 719 | int index) | 742 | int index) |
| 720 | { | 743 | { |
| 744 | if (disallow_fan_type_call && | ||
| 745 | (index == 9 || index == 12)) | ||
| 746 | return 0; | ||
| 721 | if (index >= 0 && index <= 1 && | 747 | if (index >= 0 && index <= 1 && |
| 722 | !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP1)) | 748 | !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP1)) |
| 723 | return 0; | 749 | return 0; |
| @@ -767,13 +793,17 @@ static int __init i8k_init_hwmon(void) | |||
| 767 | if (err >= 0) | 793 | if (err >= 0) |
| 768 | i8k_hwmon_flags |= I8K_HWMON_HAVE_TEMP4; | 794 | i8k_hwmon_flags |= I8K_HWMON_HAVE_TEMP4; |
| 769 | 795 | ||
| 770 | /* First fan attributes, if fan type is OK */ | 796 | /* First fan attributes, if fan status or type is OK */ |
| 771 | err = i8k_get_fan_type(0); | 797 | err = i8k_get_fan_status(0); |
| 798 | if (err < 0) | ||
| 799 | err = i8k_get_fan_type(0); | ||
| 772 | if (err >= 0) | 800 | if (err >= 0) |
| 773 | i8k_hwmon_flags |= I8K_HWMON_HAVE_FAN1; | 801 | i8k_hwmon_flags |= I8K_HWMON_HAVE_FAN1; |
| 774 | 802 | ||
| 775 | /* Second fan attributes, if fan type is OK */ | 803 | /* Second fan attributes, if fan status or type is OK */ |
| 776 | err = i8k_get_fan_type(1); | 804 | err = i8k_get_fan_status(1); |
| 805 | if (err < 0) | ||
| 806 | err = i8k_get_fan_type(1); | ||
| 777 | if (err >= 0) | 807 | if (err >= 0) |
| 778 | i8k_hwmon_flags |= I8K_HWMON_HAVE_FAN2; | 808 | i8k_hwmon_flags |= I8K_HWMON_HAVE_FAN2; |
| 779 | 809 | ||
| @@ -929,12 +959,14 @@ static struct dmi_system_id i8k_dmi_table[] __initdata = { | |||
| 929 | 959 | ||
| 930 | MODULE_DEVICE_TABLE(dmi, i8k_dmi_table); | 960 | MODULE_DEVICE_TABLE(dmi, i8k_dmi_table); |
| 931 | 961 | ||
| 932 | static struct dmi_system_id i8k_blacklist_dmi_table[] __initdata = { | 962 | /* |
| 963 | * On some machines once I8K_SMM_GET_FAN_TYPE is issued then CPU fan speed | ||
| 964 | * randomly going up and down due to bug in Dell SMM or BIOS. Here is blacklist | ||
| 965 | * of affected Dell machines for which we disallow I8K_SMM_GET_FAN_TYPE call. | ||
| 966 | * See bug: https://bugzilla.kernel.org/show_bug.cgi?id=100121 | ||
| 967 | */ | ||
| 968 | static struct dmi_system_id i8k_blacklist_fan_type_dmi_table[] __initdata = { | ||
| 933 | { | 969 | { |
| 934 | /* | ||
| 935 | * CPU fan speed going up and down on Dell Studio XPS 8000 | ||
| 936 | * for unknown reasons. | ||
| 937 | */ | ||
| 938 | .ident = "Dell Studio XPS 8000", | 970 | .ident = "Dell Studio XPS 8000", |
| 939 | .matches = { | 971 | .matches = { |
| 940 | DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | 972 | DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."), |
| @@ -942,16 +974,19 @@ static struct dmi_system_id i8k_blacklist_dmi_table[] __initdata = { | |||
| 942 | }, | 974 | }, |
| 943 | }, | 975 | }, |
| 944 | { | 976 | { |
| 945 | /* | ||
| 946 | * CPU fan speed going up and down on Dell Studio XPS 8100 | ||
| 947 | * for unknown reasons. | ||
| 948 | */ | ||
| 949 | .ident = "Dell Studio XPS 8100", | 977 | .ident = "Dell Studio XPS 8100", |
| 950 | .matches = { | 978 | .matches = { |
| 951 | DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | 979 | DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."), |
| 952 | DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Studio XPS 8100"), | 980 | DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Studio XPS 8100"), |
| 953 | }, | 981 | }, |
| 954 | }, | 982 | }, |
| 983 | { | ||
| 984 | .ident = "Dell Inspiron 580", | ||
| 985 | .matches = { | ||
| 986 | DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | ||
| 987 | DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Inspiron 580 "), | ||
| 988 | }, | ||
| 989 | }, | ||
| 955 | { } | 990 | { } |
| 956 | }; | 991 | }; |
| 957 | 992 | ||
| @@ -966,8 +1001,7 @@ static int __init i8k_probe(void) | |||
| 966 | /* | 1001 | /* |
| 967 | * Get DMI information | 1002 | * Get DMI information |
| 968 | */ | 1003 | */ |
| 969 | if (!dmi_check_system(i8k_dmi_table) || | 1004 | if (!dmi_check_system(i8k_dmi_table)) { |
| 970 | dmi_check_system(i8k_blacklist_dmi_table)) { | ||
| 971 | if (!ignore_dmi && !force) | 1005 | if (!ignore_dmi && !force) |
| 972 | return -ENODEV; | 1006 | return -ENODEV; |
| 973 | 1007 | ||
| @@ -978,8 +1012,13 @@ static int __init i8k_probe(void) | |||
| 978 | i8k_get_dmi_data(DMI_BIOS_VERSION)); | 1012 | i8k_get_dmi_data(DMI_BIOS_VERSION)); |
| 979 | } | 1013 | } |
| 980 | 1014 | ||
| 1015 | if (dmi_check_system(i8k_blacklist_fan_type_dmi_table)) | ||
| 1016 | disallow_fan_type_call = true; | ||
| 1017 | |||
| 981 | strlcpy(bios_version, i8k_get_dmi_data(DMI_BIOS_VERSION), | 1018 | strlcpy(bios_version, i8k_get_dmi_data(DMI_BIOS_VERSION), |
| 982 | sizeof(bios_version)); | 1019 | sizeof(bios_version)); |
| 1020 | strlcpy(bios_machineid, i8k_get_dmi_data(DMI_PRODUCT_SERIAL), | ||
| 1021 | sizeof(bios_machineid)); | ||
| 983 | 1022 | ||
| 984 | /* | 1023 | /* |
| 985 | * Get SMM Dell signature | 1024 | * Get SMM Dell signature |
diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c index eb97a9241d17..15aa49d082c4 100644 --- a/drivers/hwmon/fam15h_power.c +++ b/drivers/hwmon/fam15h_power.c | |||
| @@ -172,9 +172,9 @@ static void do_read_registers_on_cu(void *_data) | |||
| 172 | */ | 172 | */ |
| 173 | static int read_registers(struct fam15h_power_data *data) | 173 | static int read_registers(struct fam15h_power_data *data) |
| 174 | { | 174 | { |
| 175 | int this_cpu, ret, cpu; | ||
| 176 | int core, this_core; | 175 | int core, this_core; |
| 177 | cpumask_var_t mask; | 176 | cpumask_var_t mask; |
| 177 | int ret, cpu; | ||
| 178 | 178 | ||
| 179 | ret = zalloc_cpumask_var(&mask, GFP_KERNEL); | 179 | ret = zalloc_cpumask_var(&mask, GFP_KERNEL); |
| 180 | if (!ret) | 180 | if (!ret) |
| @@ -183,7 +183,6 @@ static int read_registers(struct fam15h_power_data *data) | |||
| 183 | memset(data->cu_on, 0, sizeof(int) * MAX_CUS); | 183 | memset(data->cu_on, 0, sizeof(int) * MAX_CUS); |
| 184 | 184 | ||
| 185 | get_online_cpus(); | 185 | get_online_cpus(); |
| 186 | this_cpu = smp_processor_id(); | ||
| 187 | 186 | ||
| 188 | /* | 187 | /* |
| 189 | * Choose the first online core of each compute unit, and then | 188 | * Choose the first online core of each compute unit, and then |
| @@ -205,12 +204,9 @@ static int read_registers(struct fam15h_power_data *data) | |||
| 205 | cpumask_set_cpu(cpumask_any(topology_sibling_cpumask(cpu)), mask); | 204 | cpumask_set_cpu(cpumask_any(topology_sibling_cpumask(cpu)), mask); |
| 206 | } | 205 | } |
| 207 | 206 | ||
| 208 | if (cpumask_test_cpu(this_cpu, mask)) | 207 | on_each_cpu_mask(mask, do_read_registers_on_cu, data, true); |
| 209 | do_read_registers_on_cu(data); | ||
| 210 | 208 | ||
| 211 | smp_call_function_many(mask, do_read_registers_on_cu, data, true); | ||
| 212 | put_online_cpus(); | 209 | put_online_cpus(); |
| 213 | |||
| 214 | free_cpumask_var(mask); | 210 | free_cpumask_var(mask); |
| 215 | 211 | ||
| 216 | return 0; | 212 | return 0; |
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c index c9ff08dbe10c..e30a5939dc0d 100644 --- a/drivers/hwmon/lm90.c +++ b/drivers/hwmon/lm90.c | |||
| @@ -375,7 +375,7 @@ struct lm90_data { | |||
| 375 | int kind; | 375 | int kind; |
| 376 | u32 flags; | 376 | u32 flags; |
| 377 | 377 | ||
| 378 | int update_interval; /* in milliseconds */ | 378 | unsigned int update_interval; /* in milliseconds */ |
| 379 | 379 | ||
| 380 | u8 config_orig; /* Original configuration register value */ | 380 | u8 config_orig; /* Original configuration register value */ |
| 381 | u8 convrate_orig; /* Original conversion rate register value */ | 381 | u8 convrate_orig; /* Original conversion rate register value */ |
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c index 847d1b5f2c13..688be9e060fc 100644 --- a/drivers/hwtracing/coresight/coresight-tmc-etr.c +++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c | |||
| @@ -300,13 +300,10 @@ int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata) | |||
| 300 | if (local_read(&drvdata->mode) == CS_MODE_SYSFS) { | 300 | if (local_read(&drvdata->mode) == CS_MODE_SYSFS) { |
| 301 | /* | 301 | /* |
| 302 | * The trace run will continue with the same allocated trace | 302 | * The trace run will continue with the same allocated trace |
| 303 | * buffer. As such zero-out the buffer so that we don't end | 303 | * buffer. The trace buffer is cleared in tmc_etr_enable_hw(), |
| 304 | * up with stale data. | 304 | * so we don't have to explicitly clear it. Also, since the |
| 305 | * | 305 | * tracer is still enabled drvdata::buf can't be NULL. |
| 306 | * Since the tracer is still enabled drvdata::buf | ||
| 307 | * can't be NULL. | ||
| 308 | */ | 306 | */ |
| 309 | memset(drvdata->buf, 0, drvdata->size); | ||
| 310 | tmc_etr_enable_hw(drvdata); | 307 | tmc_etr_enable_hw(drvdata); |
| 311 | } else { | 308 | } else { |
| 312 | /* | 309 | /* |
| @@ -315,7 +312,7 @@ int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata) | |||
| 315 | */ | 312 | */ |
| 316 | vaddr = drvdata->vaddr; | 313 | vaddr = drvdata->vaddr; |
| 317 | paddr = drvdata->paddr; | 314 | paddr = drvdata->paddr; |
| 318 | drvdata->buf = NULL; | 315 | drvdata->buf = drvdata->vaddr = NULL; |
| 319 | } | 316 | } |
| 320 | 317 | ||
| 321 | drvdata->reading = false; | 318 | drvdata->reading = false; |
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c index 5443d03a1eec..d08d1ab9bba5 100644 --- a/drivers/hwtracing/coresight/coresight.c +++ b/drivers/hwtracing/coresight/coresight.c | |||
| @@ -385,7 +385,6 @@ static int _coresight_build_path(struct coresight_device *csdev, | |||
| 385 | int i; | 385 | int i; |
| 386 | bool found = false; | 386 | bool found = false; |
| 387 | struct coresight_node *node; | 387 | struct coresight_node *node; |
| 388 | struct coresight_connection *conn; | ||
| 389 | 388 | ||
| 390 | /* An activated sink has been found. Enqueue the element */ | 389 | /* An activated sink has been found. Enqueue the element */ |
| 391 | if ((csdev->type == CORESIGHT_DEV_TYPE_SINK || | 390 | if ((csdev->type == CORESIGHT_DEV_TYPE_SINK || |
| @@ -394,8 +393,9 @@ static int _coresight_build_path(struct coresight_device *csdev, | |||
| 394 | 393 | ||
| 395 | /* Not a sink - recursively explore each port found on this element */ | 394 | /* Not a sink - recursively explore each port found on this element */ |
| 396 | for (i = 0; i < csdev->nr_outport; i++) { | 395 | for (i = 0; i < csdev->nr_outport; i++) { |
| 397 | conn = &csdev->conns[i]; | 396 | struct coresight_device *child_dev = csdev->conns[i].child_dev; |
| 398 | if (_coresight_build_path(conn->child_dev, path) == 0) { | 397 | |
| 398 | if (child_dev && _coresight_build_path(child_dev, path) == 0) { | ||
| 399 | found = true; | 399 | found = true; |
| 400 | break; | 400 | break; |
| 401 | } | 401 | } |
| @@ -425,6 +425,7 @@ out: | |||
| 425 | struct list_head *coresight_build_path(struct coresight_device *csdev) | 425 | struct list_head *coresight_build_path(struct coresight_device *csdev) |
| 426 | { | 426 | { |
| 427 | struct list_head *path; | 427 | struct list_head *path; |
| 428 | int rc; | ||
| 428 | 429 | ||
| 429 | path = kzalloc(sizeof(struct list_head), GFP_KERNEL); | 430 | path = kzalloc(sizeof(struct list_head), GFP_KERNEL); |
| 430 | if (!path) | 431 | if (!path) |
| @@ -432,9 +433,10 @@ struct list_head *coresight_build_path(struct coresight_device *csdev) | |||
| 432 | 433 | ||
| 433 | INIT_LIST_HEAD(path); | 434 | INIT_LIST_HEAD(path); |
| 434 | 435 | ||
| 435 | if (_coresight_build_path(csdev, path)) { | 436 | rc = _coresight_build_path(csdev, path); |
| 437 | if (rc) { | ||
| 436 | kfree(path); | 438 | kfree(path); |
| 437 | path = NULL; | 439 | return ERR_PTR(rc); |
| 438 | } | 440 | } |
| 439 | 441 | ||
| 440 | return path; | 442 | return path; |
| @@ -507,8 +509,9 @@ int coresight_enable(struct coresight_device *csdev) | |||
| 507 | goto out; | 509 | goto out; |
| 508 | 510 | ||
| 509 | path = coresight_build_path(csdev); | 511 | path = coresight_build_path(csdev); |
| 510 | if (!path) { | 512 | if (IS_ERR(path)) { |
| 511 | pr_err("building path(s) failed\n"); | 513 | pr_err("building path(s) failed\n"); |
| 514 | ret = PTR_ERR(path); | ||
| 512 | goto out; | 515 | goto out; |
| 513 | } | 516 | } |
| 514 | 517 | ||
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index 64b1208bca5e..4a60ad214747 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c | |||
| @@ -245,6 +245,13 @@ struct i801_priv { | |||
| 245 | struct platform_device *mux_pdev; | 245 | struct platform_device *mux_pdev; |
| 246 | #endif | 246 | #endif |
| 247 | struct platform_device *tco_pdev; | 247 | struct platform_device *tco_pdev; |
| 248 | |||
| 249 | /* | ||
| 250 | * If set to true the host controller registers are reserved for | ||
| 251 | * ACPI AML use. Protected by acpi_lock. | ||
| 252 | */ | ||
| 253 | bool acpi_reserved; | ||
| 254 | struct mutex acpi_lock; | ||
| 248 | }; | 255 | }; |
| 249 | 256 | ||
| 250 | #define FEATURE_SMBUS_PEC (1 << 0) | 257 | #define FEATURE_SMBUS_PEC (1 << 0) |
| @@ -718,6 +725,12 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr, | |||
| 718 | int ret = 0, xact = 0; | 725 | int ret = 0, xact = 0; |
| 719 | struct i801_priv *priv = i2c_get_adapdata(adap); | 726 | struct i801_priv *priv = i2c_get_adapdata(adap); |
| 720 | 727 | ||
| 728 | mutex_lock(&priv->acpi_lock); | ||
| 729 | if (priv->acpi_reserved) { | ||
| 730 | mutex_unlock(&priv->acpi_lock); | ||
| 731 | return -EBUSY; | ||
| 732 | } | ||
| 733 | |||
| 721 | pm_runtime_get_sync(&priv->pci_dev->dev); | 734 | pm_runtime_get_sync(&priv->pci_dev->dev); |
| 722 | 735 | ||
| 723 | hwpec = (priv->features & FEATURE_SMBUS_PEC) && (flags & I2C_CLIENT_PEC) | 736 | hwpec = (priv->features & FEATURE_SMBUS_PEC) && (flags & I2C_CLIENT_PEC) |
| @@ -820,6 +833,7 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr, | |||
| 820 | out: | 833 | out: |
| 821 | pm_runtime_mark_last_busy(&priv->pci_dev->dev); | 834 | pm_runtime_mark_last_busy(&priv->pci_dev->dev); |
| 822 | pm_runtime_put_autosuspend(&priv->pci_dev->dev); | 835 | pm_runtime_put_autosuspend(&priv->pci_dev->dev); |
| 836 | mutex_unlock(&priv->acpi_lock); | ||
| 823 | return ret; | 837 | return ret; |
| 824 | } | 838 | } |
| 825 | 839 | ||
| @@ -1257,6 +1271,83 @@ static void i801_add_tco(struct i801_priv *priv) | |||
| 1257 | priv->tco_pdev = pdev; | 1271 | priv->tco_pdev = pdev; |
| 1258 | } | 1272 | } |
| 1259 | 1273 | ||
| 1274 | #ifdef CONFIG_ACPI | ||
| 1275 | static acpi_status | ||
| 1276 | i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits, | ||
| 1277 | u64 *value, void *handler_context, void *region_context) | ||
| 1278 | { | ||
| 1279 | struct i801_priv *priv = handler_context; | ||
| 1280 | struct pci_dev *pdev = priv->pci_dev; | ||
| 1281 | acpi_status status; | ||
| 1282 | |||
| 1283 | /* | ||
| 1284 | * Once BIOS AML code touches the OpRegion we warn and inhibit any | ||
| 1285 | * further access from the driver itself. This device is now owned | ||
| 1286 | * by the system firmware. | ||
| 1287 | */ | ||
| 1288 | mutex_lock(&priv->acpi_lock); | ||
| 1289 | |||
| 1290 | if (!priv->acpi_reserved) { | ||
| 1291 | priv->acpi_reserved = true; | ||
| 1292 | |||
| 1293 | dev_warn(&pdev->dev, "BIOS is accessing SMBus registers\n"); | ||
| 1294 | dev_warn(&pdev->dev, "Driver SMBus register access inhibited\n"); | ||
| 1295 | |||
| 1296 | /* | ||
| 1297 | * BIOS is accessing the host controller so prevent it from | ||
| 1298 | * suspending automatically from now on. | ||
| 1299 | */ | ||
| 1300 | pm_runtime_get_sync(&pdev->dev); | ||
| 1301 | } | ||
| 1302 | |||
| 1303 | if ((function & ACPI_IO_MASK) == ACPI_READ) | ||
| 1304 | status = acpi_os_read_port(address, (u32 *)value, bits); | ||
| 1305 | else | ||
| 1306 | status = acpi_os_write_port(address, (u32)*value, bits); | ||
| 1307 | |||
| 1308 | mutex_unlock(&priv->acpi_lock); | ||
| 1309 | |||
| 1310 | return status; | ||
| 1311 | } | ||
| 1312 | |||
| 1313 | static int i801_acpi_probe(struct i801_priv *priv) | ||
| 1314 | { | ||
| 1315 | struct acpi_device *adev; | ||
| 1316 | acpi_status status; | ||
| 1317 | |||
| 1318 | adev = ACPI_COMPANION(&priv->pci_dev->dev); | ||
| 1319 | if (adev) { | ||
| 1320 | status = acpi_install_address_space_handler(adev->handle, | ||
| 1321 | ACPI_ADR_SPACE_SYSTEM_IO, i801_acpi_io_handler, | ||
| 1322 | NULL, priv); | ||
| 1323 | if (ACPI_SUCCESS(status)) | ||
| 1324 | return 0; | ||
| 1325 | } | ||
| 1326 | |||
| 1327 | return acpi_check_resource_conflict(&priv->pci_dev->resource[SMBBAR]); | ||
| 1328 | } | ||
| 1329 | |||
| 1330 | static void i801_acpi_remove(struct i801_priv *priv) | ||
| 1331 | { | ||
| 1332 | struct acpi_device *adev; | ||
| 1333 | |||
| 1334 | adev = ACPI_COMPANION(&priv->pci_dev->dev); | ||
| 1335 | if (!adev) | ||
| 1336 | return; | ||
| 1337 | |||
| 1338 | acpi_remove_address_space_handler(adev->handle, | ||
| 1339 | ACPI_ADR_SPACE_SYSTEM_IO, i801_acpi_io_handler); | ||
| 1340 | |||
| 1341 | mutex_lock(&priv->acpi_lock); | ||
| 1342 | if (priv->acpi_reserved) | ||
| 1343 | pm_runtime_put(&priv->pci_dev->dev); | ||
| 1344 | mutex_unlock(&priv->acpi_lock); | ||
| 1345 | } | ||
| 1346 | #else | ||
| 1347 | static inline int i801_acpi_probe(struct i801_priv *priv) { return 0; } | ||
| 1348 | static inline void i801_acpi_remove(struct i801_priv *priv) { } | ||
| 1349 | #endif | ||
| 1350 | |||
| 1260 | static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) | 1351 | static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) |
| 1261 | { | 1352 | { |
| 1262 | unsigned char temp; | 1353 | unsigned char temp; |
| @@ -1274,6 +1365,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) | |||
| 1274 | priv->adapter.dev.parent = &dev->dev; | 1365 | priv->adapter.dev.parent = &dev->dev; |
| 1275 | ACPI_COMPANION_SET(&priv->adapter.dev, ACPI_COMPANION(&dev->dev)); | 1366 | ACPI_COMPANION_SET(&priv->adapter.dev, ACPI_COMPANION(&dev->dev)); |
| 1276 | priv->adapter.retries = 3; | 1367 | priv->adapter.retries = 3; |
| 1368 | mutex_init(&priv->acpi_lock); | ||
| 1277 | 1369 | ||
| 1278 | priv->pci_dev = dev; | 1370 | priv->pci_dev = dev; |
| 1279 | switch (dev->device) { | 1371 | switch (dev->device) { |
| @@ -1336,10 +1428,8 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) | |||
| 1336 | return -ENODEV; | 1428 | return -ENODEV; |
| 1337 | } | 1429 | } |
| 1338 | 1430 | ||
| 1339 | err = acpi_check_resource_conflict(&dev->resource[SMBBAR]); | 1431 | if (i801_acpi_probe(priv)) |
| 1340 | if (err) { | ||
| 1341 | return -ENODEV; | 1432 | return -ENODEV; |
| 1342 | } | ||
| 1343 | 1433 | ||
| 1344 | err = pcim_iomap_regions(dev, 1 << SMBBAR, | 1434 | err = pcim_iomap_regions(dev, 1 << SMBBAR, |
| 1345 | dev_driver_string(&dev->dev)); | 1435 | dev_driver_string(&dev->dev)); |
| @@ -1348,6 +1438,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) | |||
| 1348 | "Failed to request SMBus region 0x%lx-0x%Lx\n", | 1438 | "Failed to request SMBus region 0x%lx-0x%Lx\n", |
| 1349 | priv->smba, | 1439 | priv->smba, |
| 1350 | (unsigned long long)pci_resource_end(dev, SMBBAR)); | 1440 | (unsigned long long)pci_resource_end(dev, SMBBAR)); |
| 1441 | i801_acpi_remove(priv); | ||
| 1351 | return err; | 1442 | return err; |
| 1352 | } | 1443 | } |
| 1353 | 1444 | ||
| @@ -1412,6 +1503,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) | |||
| 1412 | err = i2c_add_adapter(&priv->adapter); | 1503 | err = i2c_add_adapter(&priv->adapter); |
| 1413 | if (err) { | 1504 | if (err) { |
| 1414 | dev_err(&dev->dev, "Failed to add SMBus adapter\n"); | 1505 | dev_err(&dev->dev, "Failed to add SMBus adapter\n"); |
| 1506 | i801_acpi_remove(priv); | ||
| 1415 | return err; | 1507 | return err; |
| 1416 | } | 1508 | } |
| 1417 | 1509 | ||
| @@ -1438,6 +1530,7 @@ static void i801_remove(struct pci_dev *dev) | |||
| 1438 | 1530 | ||
| 1439 | i801_del_mux(priv); | 1531 | i801_del_mux(priv); |
| 1440 | i2c_del_adapter(&priv->adapter); | 1532 | i2c_del_adapter(&priv->adapter); |
| 1533 | i801_acpi_remove(priv); | ||
| 1441 | pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg); | 1534 | pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg); |
| 1442 | 1535 | ||
| 1443 | platform_device_unregister(priv->tco_pdev); | 1536 | platform_device_unregister(priv->tco_pdev); |
diff --git a/drivers/i2c/busses/i2c-octeon.c b/drivers/i2c/busses/i2c-octeon.c index aa5f01efd826..30ae35146723 100644 --- a/drivers/i2c/busses/i2c-octeon.c +++ b/drivers/i2c/busses/i2c-octeon.c | |||
| @@ -934,8 +934,15 @@ static int octeon_i2c_read(struct octeon_i2c *i2c, int target, | |||
| 934 | return result; | 934 | return result; |
| 935 | 935 | ||
| 936 | for (i = 0; i < length; i++) { | 936 | for (i = 0; i < length; i++) { |
| 937 | /* for the last byte TWSI_CTL_AAK must not be set */ | 937 | /* |
| 938 | if (i + 1 == length) | 938 | * For the last byte to receive TWSI_CTL_AAK must not be set. |
| 939 | * | ||
| 940 | * A special case is I2C_M_RECV_LEN where we don't know the | ||
| 941 | * additional length yet. If recv_len is set we assume we're | ||
| 942 | * not reading the final byte and therefore need to set | ||
| 943 | * TWSI_CTL_AAK. | ||
| 944 | */ | ||
| 945 | if ((i + 1 == length) && !(recv_len && i == 0)) | ||
| 939 | final_read = true; | 946 | final_read = true; |
| 940 | 947 | ||
| 941 | /* clear iflg to allow next event */ | 948 | /* clear iflg to allow next event */ |
| @@ -950,12 +957,8 @@ static int octeon_i2c_read(struct octeon_i2c *i2c, int target, | |||
| 950 | 957 | ||
| 951 | data[i] = octeon_i2c_data_read(i2c); | 958 | data[i] = octeon_i2c_data_read(i2c); |
| 952 | if (recv_len && i == 0) { | 959 | if (recv_len && i == 0) { |
| 953 | if (data[i] > I2C_SMBUS_BLOCK_MAX + 1) { | 960 | if (data[i] > I2C_SMBUS_BLOCK_MAX + 1) |
| 954 | dev_err(i2c->dev, | ||
| 955 | "%s: read len > I2C_SMBUS_BLOCK_MAX %d\n", | ||
| 956 | __func__, data[i]); | ||
| 957 | return -EPROTO; | 961 | return -EPROTO; |
| 958 | } | ||
| 959 | length += data[i]; | 962 | length += data[i]; |
| 960 | } | 963 | } |
| 961 | 964 | ||
diff --git a/drivers/i2c/muxes/i2c-mux-reg.c b/drivers/i2c/muxes/i2c-mux-reg.c index 6773cadf7c9f..26e7c5187a58 100644 --- a/drivers/i2c/muxes/i2c-mux-reg.c +++ b/drivers/i2c/muxes/i2c-mux-reg.c | |||
| @@ -260,6 +260,7 @@ static struct platform_driver i2c_mux_reg_driver = { | |||
| 260 | .remove = i2c_mux_reg_remove, | 260 | .remove = i2c_mux_reg_remove, |
| 261 | .driver = { | 261 | .driver = { |
| 262 | .name = "i2c-mux-reg", | 262 | .name = "i2c-mux-reg", |
| 263 | .of_match_table = of_match_ptr(i2c_mux_reg_of_match), | ||
| 263 | }, | 264 | }, |
| 264 | }; | 265 | }; |
| 265 | 266 | ||
diff --git a/drivers/iio/accel/st_accel_buffer.c b/drivers/iio/accel/st_accel_buffer.c index a1e642ee13d6..7fddc137e91e 100644 --- a/drivers/iio/accel/st_accel_buffer.c +++ b/drivers/iio/accel/st_accel_buffer.c | |||
| @@ -91,7 +91,7 @@ static const struct iio_buffer_setup_ops st_accel_buffer_setup_ops = { | |||
| 91 | 91 | ||
| 92 | int st_accel_allocate_ring(struct iio_dev *indio_dev) | 92 | int st_accel_allocate_ring(struct iio_dev *indio_dev) |
| 93 | { | 93 | { |
| 94 | return iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time, | 94 | return iio_triggered_buffer_setup(indio_dev, NULL, |
| 95 | &st_sensors_trigger_handler, &st_accel_buffer_setup_ops); | 95 | &st_sensors_trigger_handler, &st_accel_buffer_setup_ops); |
| 96 | } | 96 | } |
| 97 | 97 | ||
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c index dc73f2d85e6d..4d95bfc4786c 100644 --- a/drivers/iio/accel/st_accel_core.c +++ b/drivers/iio/accel/st_accel_core.c | |||
| @@ -741,6 +741,7 @@ static const struct iio_info accel_info = { | |||
| 741 | static const struct iio_trigger_ops st_accel_trigger_ops = { | 741 | static const struct iio_trigger_ops st_accel_trigger_ops = { |
| 742 | .owner = THIS_MODULE, | 742 | .owner = THIS_MODULE, |
| 743 | .set_trigger_state = ST_ACCEL_TRIGGER_SET_STATE, | 743 | .set_trigger_state = ST_ACCEL_TRIGGER_SET_STATE, |
| 744 | .validate_device = st_sensors_validate_device, | ||
| 744 | }; | 745 | }; |
| 745 | #define ST_ACCEL_TRIGGER_OPS (&st_accel_trigger_ops) | 746 | #define ST_ACCEL_TRIGGER_OPS (&st_accel_trigger_ops) |
| 746 | #else | 747 | #else |
diff --git a/drivers/iio/common/st_sensors/st_sensors_buffer.c b/drivers/iio/common/st_sensors/st_sensors_buffer.c index c55898543a47..f1693dbebb8a 100644 --- a/drivers/iio/common/st_sensors/st_sensors_buffer.c +++ b/drivers/iio/common/st_sensors/st_sensors_buffer.c | |||
| @@ -57,31 +57,20 @@ irqreturn_t st_sensors_trigger_handler(int irq, void *p) | |||
| 57 | struct iio_poll_func *pf = p; | 57 | struct iio_poll_func *pf = p; |
| 58 | struct iio_dev *indio_dev = pf->indio_dev; | 58 | struct iio_dev *indio_dev = pf->indio_dev; |
| 59 | struct st_sensor_data *sdata = iio_priv(indio_dev); | 59 | struct st_sensor_data *sdata = iio_priv(indio_dev); |
| 60 | s64 timestamp; | ||
| 60 | 61 | ||
| 61 | /* If we have a status register, check if this IRQ came from us */ | 62 | /* If we do timetamping here, do it before reading the values */ |
| 62 | if (sdata->sensor_settings->drdy_irq.addr_stat_drdy) { | 63 | if (sdata->hw_irq_trigger) |
| 63 | u8 status; | 64 | timestamp = sdata->hw_timestamp; |
| 64 | 65 | else | |
| 65 | len = sdata->tf->read_byte(&sdata->tb, sdata->dev, | 66 | timestamp = iio_get_time_ns(); |
| 66 | sdata->sensor_settings->drdy_irq.addr_stat_drdy, | ||
| 67 | &status); | ||
| 68 | if (len < 0) | ||
| 69 | dev_err(sdata->dev, "could not read channel status\n"); | ||
| 70 | |||
| 71 | /* | ||
| 72 | * If this was not caused by any channels on this sensor, | ||
| 73 | * return IRQ_NONE | ||
| 74 | */ | ||
| 75 | if (!(status & (u8)indio_dev->active_scan_mask[0])) | ||
| 76 | return IRQ_NONE; | ||
| 77 | } | ||
| 78 | 67 | ||
| 79 | len = st_sensors_get_buffer_element(indio_dev, sdata->buffer_data); | 68 | len = st_sensors_get_buffer_element(indio_dev, sdata->buffer_data); |
| 80 | if (len < 0) | 69 | if (len < 0) |
| 81 | goto st_sensors_get_buffer_element_error; | 70 | goto st_sensors_get_buffer_element_error; |
| 82 | 71 | ||
| 83 | iio_push_to_buffers_with_timestamp(indio_dev, sdata->buffer_data, | 72 | iio_push_to_buffers_with_timestamp(indio_dev, sdata->buffer_data, |
| 84 | pf->timestamp); | 73 | timestamp); |
| 85 | 74 | ||
| 86 | st_sensors_get_buffer_element_error: | 75 | st_sensors_get_buffer_element_error: |
| 87 | iio_trigger_notify_done(indio_dev->trig); | 76 | iio_trigger_notify_done(indio_dev->trig); |
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c index dffe00692169..9e59c90f6a8d 100644 --- a/drivers/iio/common/st_sensors/st_sensors_core.c +++ b/drivers/iio/common/st_sensors/st_sensors_core.c | |||
| @@ -363,6 +363,11 @@ int st_sensors_init_sensor(struct iio_dev *indio_dev, | |||
| 363 | if (err < 0) | 363 | if (err < 0) |
| 364 | return err; | 364 | return err; |
| 365 | 365 | ||
| 366 | /* Disable DRDY, this might be still be enabled after reboot. */ | ||
| 367 | err = st_sensors_set_dataready_irq(indio_dev, false); | ||
| 368 | if (err < 0) | ||
| 369 | return err; | ||
| 370 | |||
| 366 | if (sdata->current_fullscale) { | 371 | if (sdata->current_fullscale) { |
| 367 | err = st_sensors_set_fullscale(indio_dev, | 372 | err = st_sensors_set_fullscale(indio_dev, |
| 368 | sdata->current_fullscale->num); | 373 | sdata->current_fullscale->num); |
| @@ -424,6 +429,9 @@ int st_sensors_set_dataready_irq(struct iio_dev *indio_dev, bool enable) | |||
| 424 | else | 429 | else |
| 425 | drdy_mask = sdata->sensor_settings->drdy_irq.mask_int2; | 430 | drdy_mask = sdata->sensor_settings->drdy_irq.mask_int2; |
| 426 | 431 | ||
| 432 | /* Flag to the poll function that the hardware trigger is in use */ | ||
| 433 | sdata->hw_irq_trigger = enable; | ||
| 434 | |||
| 427 | /* Enable/Disable the interrupt generator for data ready. */ | 435 | /* Enable/Disable the interrupt generator for data ready. */ |
| 428 | err = st_sensors_write_data_with_mask(indio_dev, | 436 | err = st_sensors_write_data_with_mask(indio_dev, |
| 429 | sdata->sensor_settings->drdy_irq.addr, | 437 | sdata->sensor_settings->drdy_irq.addr, |
diff --git a/drivers/iio/common/st_sensors/st_sensors_trigger.c b/drivers/iio/common/st_sensors/st_sensors_trigger.c index da72279fcf99..296e4ff19ae8 100644 --- a/drivers/iio/common/st_sensors/st_sensors_trigger.c +++ b/drivers/iio/common/st_sensors/st_sensors_trigger.c | |||
| @@ -17,6 +17,73 @@ | |||
| 17 | #include <linux/iio/common/st_sensors.h> | 17 | #include <linux/iio/common/st_sensors.h> |
| 18 | #include "st_sensors_core.h" | 18 | #include "st_sensors_core.h" |
| 19 | 19 | ||
| 20 | /** | ||
| 21 | * st_sensors_irq_handler() - top half of the IRQ-based triggers | ||
| 22 | * @irq: irq number | ||
| 23 | * @p: private handler data | ||
| 24 | */ | ||
| 25 | irqreturn_t st_sensors_irq_handler(int irq, void *p) | ||
| 26 | { | ||
| 27 | struct iio_trigger *trig = p; | ||
| 28 | struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig); | ||
| 29 | struct st_sensor_data *sdata = iio_priv(indio_dev); | ||
| 30 | |||
| 31 | /* Get the time stamp as close in time as possible */ | ||
| 32 | sdata->hw_timestamp = iio_get_time_ns(); | ||
| 33 | return IRQ_WAKE_THREAD; | ||
| 34 | } | ||
| 35 | |||
| 36 | /** | ||
| 37 | * st_sensors_irq_thread() - bottom half of the IRQ-based triggers | ||
| 38 | * @irq: irq number | ||
| 39 | * @p: private handler data | ||
| 40 | */ | ||
| 41 | irqreturn_t st_sensors_irq_thread(int irq, void *p) | ||
| 42 | { | ||
| 43 | struct iio_trigger *trig = p; | ||
| 44 | struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig); | ||
| 45 | struct st_sensor_data *sdata = iio_priv(indio_dev); | ||
| 46 | int ret; | ||
| 47 | |||
| 48 | /* | ||
| 49 | * If this trigger is backed by a hardware interrupt and we have a | ||
| 50 | * status register, check if this IRQ came from us | ||
| 51 | */ | ||
| 52 | if (sdata->sensor_settings->drdy_irq.addr_stat_drdy) { | ||
| 53 | u8 status; | ||
| 54 | |||
| 55 | ret = sdata->tf->read_byte(&sdata->tb, sdata->dev, | ||
| 56 | sdata->sensor_settings->drdy_irq.addr_stat_drdy, | ||
| 57 | &status); | ||
| 58 | if (ret < 0) { | ||
| 59 | dev_err(sdata->dev, "could not read channel status\n"); | ||
| 60 | goto out_poll; | ||
| 61 | } | ||
| 62 | /* | ||
| 63 | * the lower bits of .active_scan_mask[0] is directly mapped | ||
| 64 | * to the channels on the sensor: either bit 0 for | ||
| 65 | * one-dimensional sensors, or e.g. x,y,z for accelerometers, | ||
| 66 | * gyroscopes or magnetometers. No sensor use more than 3 | ||
| 67 | * channels, so cut the other status bits here. | ||
| 68 | */ | ||
| 69 | status &= 0x07; | ||
| 70 | |||
| 71 | /* | ||
| 72 | * If this was not caused by any channels on this sensor, | ||
| 73 | * return IRQ_NONE | ||
| 74 | */ | ||
| 75 | if (!indio_dev->active_scan_mask) | ||
| 76 | return IRQ_NONE; | ||
| 77 | if (!(status & (u8)indio_dev->active_scan_mask[0])) | ||
| 78 | return IRQ_NONE; | ||
| 79 | } | ||
| 80 | |||
| 81 | out_poll: | ||
| 82 | /* It's our IRQ: proceed to handle the register polling */ | ||
| 83 | iio_trigger_poll_chained(p); | ||
| 84 | return IRQ_HANDLED; | ||
| 85 | } | ||
| 86 | |||
| 20 | int st_sensors_allocate_trigger(struct iio_dev *indio_dev, | 87 | int st_sensors_allocate_trigger(struct iio_dev *indio_dev, |
| 21 | const struct iio_trigger_ops *trigger_ops) | 88 | const struct iio_trigger_ops *trigger_ops) |
| 22 | { | 89 | { |
| @@ -30,6 +97,10 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev, | |||
| 30 | return -ENOMEM; | 97 | return -ENOMEM; |
| 31 | } | 98 | } |
| 32 | 99 | ||
| 100 | iio_trigger_set_drvdata(sdata->trig, indio_dev); | ||
| 101 | sdata->trig->ops = trigger_ops; | ||
| 102 | sdata->trig->dev.parent = sdata->dev; | ||
| 103 | |||
| 33 | irq = sdata->get_irq_data_ready(indio_dev); | 104 | irq = sdata->get_irq_data_ready(indio_dev); |
| 34 | irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq)); | 105 | irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq)); |
| 35 | /* | 106 | /* |
| @@ -77,9 +148,12 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev, | |||
| 77 | sdata->sensor_settings->drdy_irq.addr_stat_drdy) | 148 | sdata->sensor_settings->drdy_irq.addr_stat_drdy) |
| 78 | irq_trig |= IRQF_SHARED; | 149 | irq_trig |= IRQF_SHARED; |
| 79 | 150 | ||
| 80 | err = request_threaded_irq(irq, | 151 | /* Let's create an interrupt thread masking the hard IRQ here */ |
| 81 | iio_trigger_generic_data_rdy_poll, | 152 | irq_trig |= IRQF_ONESHOT; |
| 82 | NULL, | 153 | |
| 154 | err = request_threaded_irq(sdata->get_irq_data_ready(indio_dev), | ||
| 155 | st_sensors_irq_handler, | ||
| 156 | st_sensors_irq_thread, | ||
| 83 | irq_trig, | 157 | irq_trig, |
| 84 | sdata->trig->name, | 158 | sdata->trig->name, |
| 85 | sdata->trig); | 159 | sdata->trig); |
| @@ -88,10 +162,6 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev, | |||
| 88 | goto iio_trigger_free; | 162 | goto iio_trigger_free; |
| 89 | } | 163 | } |
| 90 | 164 | ||
| 91 | iio_trigger_set_drvdata(sdata->trig, indio_dev); | ||
| 92 | sdata->trig->ops = trigger_ops; | ||
| 93 | sdata->trig->dev.parent = sdata->dev; | ||
| 94 | |||
| 95 | err = iio_trigger_register(sdata->trig); | 165 | err = iio_trigger_register(sdata->trig); |
| 96 | if (err < 0) { | 166 | if (err < 0) { |
| 97 | dev_err(&indio_dev->dev, "failed to register iio trigger.\n"); | 167 | dev_err(&indio_dev->dev, "failed to register iio trigger.\n"); |
| @@ -119,6 +189,18 @@ void st_sensors_deallocate_trigger(struct iio_dev *indio_dev) | |||
| 119 | } | 189 | } |
| 120 | EXPORT_SYMBOL(st_sensors_deallocate_trigger); | 190 | EXPORT_SYMBOL(st_sensors_deallocate_trigger); |
| 121 | 191 | ||
| 192 | int st_sensors_validate_device(struct iio_trigger *trig, | ||
| 193 | struct iio_dev *indio_dev) | ||
| 194 | { | ||
| 195 | struct iio_dev *indio = iio_trigger_get_drvdata(trig); | ||
| 196 | |||
| 197 | if (indio != indio_dev) | ||
| 198 | return -EINVAL; | ||
| 199 | |||
| 200 | return 0; | ||
| 201 | } | ||
| 202 | EXPORT_SYMBOL(st_sensors_validate_device); | ||
| 203 | |||
| 122 | MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>"); | 204 | MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>"); |
| 123 | MODULE_DESCRIPTION("STMicroelectronics ST-sensors trigger"); | 205 | MODULE_DESCRIPTION("STMicroelectronics ST-sensors trigger"); |
| 124 | MODULE_LICENSE("GPL v2"); | 206 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig index e63b957c985f..f7c71da42f15 100644 --- a/drivers/iio/dac/Kconfig +++ b/drivers/iio/dac/Kconfig | |||
| @@ -247,7 +247,7 @@ config MCP4922 | |||
| 247 | 247 | ||
| 248 | config STX104 | 248 | config STX104 |
| 249 | tristate "Apex Embedded Systems STX104 DAC driver" | 249 | tristate "Apex Embedded Systems STX104 DAC driver" |
| 250 | depends on X86 && ISA | 250 | depends on X86 && ISA_BUS_API |
| 251 | help | 251 | help |
| 252 | Say yes here to build support for the 2-channel DAC on the Apex | 252 | Say yes here to build support for the 2-channel DAC on the Apex |
| 253 | Embedded Systems STX104 integrated analog PC/104 card. The base port | 253 | Embedded Systems STX104 integrated analog PC/104 card. The base port |
diff --git a/drivers/iio/dac/ad5592r-base.c b/drivers/iio/dac/ad5592r-base.c index 948f600e7059..69bde5909854 100644 --- a/drivers/iio/dac/ad5592r-base.c +++ b/drivers/iio/dac/ad5592r-base.c | |||
| @@ -525,7 +525,7 @@ static int ad5592r_alloc_channels(struct ad5592r_state *st) | |||
| 525 | 525 | ||
| 526 | device_for_each_child_node(st->dev, child) { | 526 | device_for_each_child_node(st->dev, child) { |
| 527 | ret = fwnode_property_read_u32(child, "reg", ®); | 527 | ret = fwnode_property_read_u32(child, "reg", ®); |
| 528 | if (ret || reg > ARRAY_SIZE(st->channel_modes)) | 528 | if (ret || reg >= ARRAY_SIZE(st->channel_modes)) |
| 529 | continue; | 529 | continue; |
| 530 | 530 | ||
| 531 | ret = fwnode_property_read_u32(child, "adi,mode", &tmp); | 531 | ret = fwnode_property_read_u32(child, "adi,mode", &tmp); |
diff --git a/drivers/iio/gyro/st_gyro_buffer.c b/drivers/iio/gyro/st_gyro_buffer.c index d67b17b6a7aa..a5377044e42f 100644 --- a/drivers/iio/gyro/st_gyro_buffer.c +++ b/drivers/iio/gyro/st_gyro_buffer.c | |||
| @@ -91,7 +91,7 @@ static const struct iio_buffer_setup_ops st_gyro_buffer_setup_ops = { | |||
| 91 | 91 | ||
| 92 | int st_gyro_allocate_ring(struct iio_dev *indio_dev) | 92 | int st_gyro_allocate_ring(struct iio_dev *indio_dev) |
| 93 | { | 93 | { |
| 94 | return iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time, | 94 | return iio_triggered_buffer_setup(indio_dev, NULL, |
| 95 | &st_sensors_trigger_handler, &st_gyro_buffer_setup_ops); | 95 | &st_sensors_trigger_handler, &st_gyro_buffer_setup_ops); |
| 96 | } | 96 | } |
| 97 | 97 | ||
diff --git a/drivers/iio/gyro/st_gyro_core.c b/drivers/iio/gyro/st_gyro_core.c index 52a3c87c375c..a8012955a1f6 100644 --- a/drivers/iio/gyro/st_gyro_core.c +++ b/drivers/iio/gyro/st_gyro_core.c | |||
| @@ -409,6 +409,7 @@ static const struct iio_info gyro_info = { | |||
| 409 | static const struct iio_trigger_ops st_gyro_trigger_ops = { | 409 | static const struct iio_trigger_ops st_gyro_trigger_ops = { |
| 410 | .owner = THIS_MODULE, | 410 | .owner = THIS_MODULE, |
| 411 | .set_trigger_state = ST_GYRO_TRIGGER_SET_STATE, | 411 | .set_trigger_state = ST_GYRO_TRIGGER_SET_STATE, |
| 412 | .validate_device = st_sensors_validate_device, | ||
| 412 | }; | 413 | }; |
| 413 | #define ST_GYRO_TRIGGER_OPS (&st_gyro_trigger_ops) | 414 | #define ST_GYRO_TRIGGER_OPS (&st_gyro_trigger_ops) |
| 414 | #else | 415 | #else |
diff --git a/drivers/iio/humidity/am2315.c b/drivers/iio/humidity/am2315.c index 3be6d209a159..11535911a5c6 100644 --- a/drivers/iio/humidity/am2315.c +++ b/drivers/iio/humidity/am2315.c | |||
| @@ -165,10 +165,8 @@ static irqreturn_t am2315_trigger_handler(int irq, void *p) | |||
| 165 | struct am2315_sensor_data sensor_data; | 165 | struct am2315_sensor_data sensor_data; |
| 166 | 166 | ||
| 167 | ret = am2315_read_data(data, &sensor_data); | 167 | ret = am2315_read_data(data, &sensor_data); |
| 168 | if (ret < 0) { | 168 | if (ret < 0) |
| 169 | mutex_unlock(&data->lock); | ||
| 170 | goto err; | 169 | goto err; |
| 171 | } | ||
| 172 | 170 | ||
| 173 | mutex_lock(&data->lock); | 171 | mutex_lock(&data->lock); |
| 174 | if (*(indio_dev->active_scan_mask) == AM2315_ALL_CHANNEL_MASK) { | 172 | if (*(indio_dev->active_scan_mask) == AM2315_ALL_CHANNEL_MASK) { |
diff --git a/drivers/iio/humidity/hdc100x.c b/drivers/iio/humidity/hdc100x.c index fa4767613173..a03832a5fc95 100644 --- a/drivers/iio/humidity/hdc100x.c +++ b/drivers/iio/humidity/hdc100x.c | |||
| @@ -55,7 +55,7 @@ static const struct { | |||
| 55 | }, | 55 | }, |
| 56 | { /* IIO_HUMIDITYRELATIVE channel */ | 56 | { /* IIO_HUMIDITYRELATIVE channel */ |
| 57 | .shift = 8, | 57 | .shift = 8, |
| 58 | .mask = 2, | 58 | .mask = 3, |
| 59 | }, | 59 | }, |
| 60 | }; | 60 | }; |
| 61 | 61 | ||
| @@ -164,14 +164,14 @@ static int hdc100x_get_measurement(struct hdc100x_data *data, | |||
| 164 | dev_err(&client->dev, "cannot read high byte measurement"); | 164 | dev_err(&client->dev, "cannot read high byte measurement"); |
| 165 | return ret; | 165 | return ret; |
| 166 | } | 166 | } |
| 167 | val = ret << 6; | 167 | val = ret << 8; |
| 168 | 168 | ||
| 169 | ret = i2c_smbus_read_byte(client); | 169 | ret = i2c_smbus_read_byte(client); |
| 170 | if (ret < 0) { | 170 | if (ret < 0) { |
| 171 | dev_err(&client->dev, "cannot read low byte measurement"); | 171 | dev_err(&client->dev, "cannot read low byte measurement"); |
| 172 | return ret; | 172 | return ret; |
| 173 | } | 173 | } |
| 174 | val |= ret >> 2; | 174 | val |= ret; |
| 175 | 175 | ||
| 176 | return val; | 176 | return val; |
| 177 | } | 177 | } |
| @@ -211,18 +211,18 @@ static int hdc100x_read_raw(struct iio_dev *indio_dev, | |||
| 211 | return IIO_VAL_INT_PLUS_MICRO; | 211 | return IIO_VAL_INT_PLUS_MICRO; |
| 212 | case IIO_CHAN_INFO_SCALE: | 212 | case IIO_CHAN_INFO_SCALE: |
| 213 | if (chan->type == IIO_TEMP) { | 213 | if (chan->type == IIO_TEMP) { |
| 214 | *val = 165; | 214 | *val = 165000; |
| 215 | *val2 = 65536 >> 2; | 215 | *val2 = 65536; |
| 216 | return IIO_VAL_FRACTIONAL; | 216 | return IIO_VAL_FRACTIONAL; |
| 217 | } else { | 217 | } else { |
| 218 | *val = 0; | 218 | *val = 100; |
| 219 | *val2 = 10000; | 219 | *val2 = 65536; |
| 220 | return IIO_VAL_INT_PLUS_MICRO; | 220 | return IIO_VAL_FRACTIONAL; |
| 221 | } | 221 | } |
| 222 | break; | 222 | break; |
| 223 | case IIO_CHAN_INFO_OFFSET: | 223 | case IIO_CHAN_INFO_OFFSET: |
| 224 | *val = -3971; | 224 | *val = -15887; |
| 225 | *val2 = 879096; | 225 | *val2 = 515151; |
| 226 | return IIO_VAL_INT_PLUS_MICRO; | 226 | return IIO_VAL_INT_PLUS_MICRO; |
| 227 | default: | 227 | default: |
| 228 | return -EINVAL; | 228 | return -EINVAL; |
diff --git a/drivers/iio/imu/bmi160/bmi160_core.c b/drivers/iio/imu/bmi160/bmi160_core.c index 0bf92b06d7d8..b8a290ec984e 100644 --- a/drivers/iio/imu/bmi160/bmi160_core.c +++ b/drivers/iio/imu/bmi160/bmi160_core.c | |||
| @@ -209,11 +209,11 @@ static const struct bmi160_scale_item bmi160_scale_table[] = { | |||
| 209 | }; | 209 | }; |
| 210 | 210 | ||
| 211 | static const struct bmi160_odr bmi160_accel_odr[] = { | 211 | static const struct bmi160_odr bmi160_accel_odr[] = { |
| 212 | {0x01, 0, 78125}, | 212 | {0x01, 0, 781250}, |
| 213 | {0x02, 1, 5625}, | 213 | {0x02, 1, 562500}, |
| 214 | {0x03, 3, 125}, | 214 | {0x03, 3, 125000}, |
| 215 | {0x04, 6, 25}, | 215 | {0x04, 6, 250000}, |
| 216 | {0x05, 12, 5}, | 216 | {0x05, 12, 500000}, |
| 217 | {0x06, 25, 0}, | 217 | {0x06, 25, 0}, |
| 218 | {0x07, 50, 0}, | 218 | {0x07, 50, 0}, |
| 219 | {0x08, 100, 0}, | 219 | {0x08, 100, 0}, |
| @@ -229,7 +229,7 @@ static const struct bmi160_odr bmi160_gyro_odr[] = { | |||
| 229 | {0x08, 100, 0}, | 229 | {0x08, 100, 0}, |
| 230 | {0x09, 200, 0}, | 230 | {0x09, 200, 0}, |
| 231 | {0x0A, 400, 0}, | 231 | {0x0A, 400, 0}, |
| 232 | {0x0B, 8000, 0}, | 232 | {0x0B, 800, 0}, |
| 233 | {0x0C, 1600, 0}, | 233 | {0x0C, 1600, 0}, |
| 234 | {0x0D, 3200, 0}, | 234 | {0x0D, 3200, 0}, |
| 235 | }; | 235 | }; |
| @@ -364,8 +364,8 @@ int bmi160_set_odr(struct bmi160_data *data, enum bmi160_sensor_type t, | |||
| 364 | 364 | ||
| 365 | return regmap_update_bits(data->regmap, | 365 | return regmap_update_bits(data->regmap, |
| 366 | bmi160_regs[t].config, | 366 | bmi160_regs[t].config, |
| 367 | bmi160_odr_table[t].tbl[i].bits, | 367 | bmi160_regs[t].config_odr_mask, |
| 368 | bmi160_regs[t].config_odr_mask); | 368 | bmi160_odr_table[t].tbl[i].bits); |
| 369 | } | 369 | } |
| 370 | 370 | ||
| 371 | static int bmi160_get_odr(struct bmi160_data *data, enum bmi160_sensor_type t, | 371 | static int bmi160_get_odr(struct bmi160_data *data, enum bmi160_sensor_type t, |
diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c index ae2806aafb72..0c52dfe64977 100644 --- a/drivers/iio/industrialio-trigger.c +++ b/drivers/iio/industrialio-trigger.c | |||
| @@ -210,22 +210,35 @@ static int iio_trigger_attach_poll_func(struct iio_trigger *trig, | |||
| 210 | 210 | ||
| 211 | /* Prevent the module from being removed whilst attached to a trigger */ | 211 | /* Prevent the module from being removed whilst attached to a trigger */ |
| 212 | __module_get(pf->indio_dev->info->driver_module); | 212 | __module_get(pf->indio_dev->info->driver_module); |
| 213 | |||
| 214 | /* Get irq number */ | ||
| 213 | pf->irq = iio_trigger_get_irq(trig); | 215 | pf->irq = iio_trigger_get_irq(trig); |
| 216 | if (pf->irq < 0) | ||
| 217 | goto out_put_module; | ||
| 218 | |||
| 219 | /* Request irq */ | ||
| 214 | ret = request_threaded_irq(pf->irq, pf->h, pf->thread, | 220 | ret = request_threaded_irq(pf->irq, pf->h, pf->thread, |
| 215 | pf->type, pf->name, | 221 | pf->type, pf->name, |
| 216 | pf); | 222 | pf); |
| 217 | if (ret < 0) { | 223 | if (ret < 0) |
| 218 | module_put(pf->indio_dev->info->driver_module); | 224 | goto out_put_irq; |
| 219 | return ret; | ||
| 220 | } | ||
| 221 | 225 | ||
| 226 | /* Enable trigger in driver */ | ||
| 222 | if (trig->ops && trig->ops->set_trigger_state && notinuse) { | 227 | if (trig->ops && trig->ops->set_trigger_state && notinuse) { |
| 223 | ret = trig->ops->set_trigger_state(trig, true); | 228 | ret = trig->ops->set_trigger_state(trig, true); |
| 224 | if (ret < 0) | 229 | if (ret < 0) |
| 225 | module_put(pf->indio_dev->info->driver_module); | 230 | goto out_free_irq; |
| 226 | } | 231 | } |
| 227 | 232 | ||
| 228 | return ret; | 233 | return ret; |
| 234 | |||
| 235 | out_free_irq: | ||
| 236 | free_irq(pf->irq, pf); | ||
| 237 | out_put_irq: | ||
| 238 | iio_trigger_put_irq(trig, pf->irq); | ||
| 239 | out_put_module: | ||
| 240 | module_put(pf->indio_dev->info->driver_module); | ||
| 241 | return ret; | ||
| 229 | } | 242 | } |
| 230 | 243 | ||
| 231 | static int iio_trigger_detach_poll_func(struct iio_trigger *trig, | 244 | static int iio_trigger_detach_poll_func(struct iio_trigger *trig, |
diff --git a/drivers/iio/light/apds9960.c b/drivers/iio/light/apds9960.c index b4dbb3912977..651d57b8abbf 100644 --- a/drivers/iio/light/apds9960.c +++ b/drivers/iio/light/apds9960.c | |||
| @@ -1011,6 +1011,7 @@ static int apds9960_probe(struct i2c_client *client, | |||
| 1011 | 1011 | ||
| 1012 | iio_device_attach_buffer(indio_dev, buffer); | 1012 | iio_device_attach_buffer(indio_dev, buffer); |
| 1013 | 1013 | ||
| 1014 | indio_dev->dev.parent = &client->dev; | ||
| 1014 | indio_dev->info = &apds9960_info; | 1015 | indio_dev->info = &apds9960_info; |
| 1015 | indio_dev->name = APDS9960_DRV_NAME; | 1016 | indio_dev->name = APDS9960_DRV_NAME; |
| 1016 | indio_dev->channels = apds9960_channels; | 1017 | indio_dev->channels = apds9960_channels; |
diff --git a/drivers/iio/light/bh1780.c b/drivers/iio/light/bh1780.c index 72b364e4aa72..b54dcba05a82 100644 --- a/drivers/iio/light/bh1780.c +++ b/drivers/iio/light/bh1780.c | |||
| @@ -84,7 +84,7 @@ static int bh1780_debugfs_reg_access(struct iio_dev *indio_dev, | |||
| 84 | int ret; | 84 | int ret; |
| 85 | 85 | ||
| 86 | if (!readval) | 86 | if (!readval) |
| 87 | bh1780_write(bh1780, (u8)reg, (u8)writeval); | 87 | return bh1780_write(bh1780, (u8)reg, (u8)writeval); |
| 88 | 88 | ||
| 89 | ret = bh1780_read(bh1780, (u8)reg); | 89 | ret = bh1780_read(bh1780, (u8)reg); |
| 90 | if (ret < 0) | 90 | if (ret < 0) |
| @@ -187,7 +187,7 @@ static int bh1780_probe(struct i2c_client *client, | |||
| 187 | 187 | ||
| 188 | indio_dev->dev.parent = &client->dev; | 188 | indio_dev->dev.parent = &client->dev; |
| 189 | indio_dev->info = &bh1780_info; | 189 | indio_dev->info = &bh1780_info; |
| 190 | indio_dev->name = id->name; | 190 | indio_dev->name = "bh1780"; |
| 191 | indio_dev->channels = bh1780_channels; | 191 | indio_dev->channels = bh1780_channels; |
| 192 | indio_dev->num_channels = ARRAY_SIZE(bh1780_channels); | 192 | indio_dev->num_channels = ARRAY_SIZE(bh1780_channels); |
| 193 | indio_dev->modes = INDIO_DIRECT_MODE; | 193 | indio_dev->modes = INDIO_DIRECT_MODE; |
| @@ -226,7 +226,8 @@ static int bh1780_remove(struct i2c_client *client) | |||
| 226 | static int bh1780_runtime_suspend(struct device *dev) | 226 | static int bh1780_runtime_suspend(struct device *dev) |
| 227 | { | 227 | { |
| 228 | struct i2c_client *client = to_i2c_client(dev); | 228 | struct i2c_client *client = to_i2c_client(dev); |
| 229 | struct bh1780_data *bh1780 = i2c_get_clientdata(client); | 229 | struct iio_dev *indio_dev = i2c_get_clientdata(client); |
| 230 | struct bh1780_data *bh1780 = iio_priv(indio_dev); | ||
| 230 | int ret; | 231 | int ret; |
| 231 | 232 | ||
| 232 | ret = bh1780_write(bh1780, BH1780_REG_CONTROL, BH1780_POFF); | 233 | ret = bh1780_write(bh1780, BH1780_REG_CONTROL, BH1780_POFF); |
| @@ -241,7 +242,8 @@ static int bh1780_runtime_suspend(struct device *dev) | |||
| 241 | static int bh1780_runtime_resume(struct device *dev) | 242 | static int bh1780_runtime_resume(struct device *dev) |
| 242 | { | 243 | { |
| 243 | struct i2c_client *client = to_i2c_client(dev); | 244 | struct i2c_client *client = to_i2c_client(dev); |
| 244 | struct bh1780_data *bh1780 = i2c_get_clientdata(client); | 245 | struct iio_dev *indio_dev = i2c_get_clientdata(client); |
| 246 | struct bh1780_data *bh1780 = iio_priv(indio_dev); | ||
| 245 | int ret; | 247 | int ret; |
| 246 | 248 | ||
| 247 | ret = bh1780_write(bh1780, BH1780_REG_CONTROL, BH1780_PON); | 249 | ret = bh1780_write(bh1780, BH1780_REG_CONTROL, BH1780_PON); |
diff --git a/drivers/iio/light/max44000.c b/drivers/iio/light/max44000.c index e01e58a9bd14..f17cb2ea18f5 100644 --- a/drivers/iio/light/max44000.c +++ b/drivers/iio/light/max44000.c | |||
| @@ -147,7 +147,6 @@ static const struct iio_chan_spec max44000_channels[] = { | |||
| 147 | { | 147 | { |
| 148 | .type = IIO_PROXIMITY, | 148 | .type = IIO_PROXIMITY, |
| 149 | .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), | 149 | .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), |
| 150 | .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), | ||
| 151 | .scan_index = MAX44000_SCAN_INDEX_PRX, | 150 | .scan_index = MAX44000_SCAN_INDEX_PRX, |
| 152 | .scan_type = { | 151 | .scan_type = { |
| 153 | .sign = 'u', | 152 | .sign = 'u', |
diff --git a/drivers/iio/magnetometer/st_magn_buffer.c b/drivers/iio/magnetometer/st_magn_buffer.c index ecd3bd0a9769..0a9e8fadfa9d 100644 --- a/drivers/iio/magnetometer/st_magn_buffer.c +++ b/drivers/iio/magnetometer/st_magn_buffer.c | |||
| @@ -82,7 +82,7 @@ static const struct iio_buffer_setup_ops st_magn_buffer_setup_ops = { | |||
| 82 | 82 | ||
| 83 | int st_magn_allocate_ring(struct iio_dev *indio_dev) | 83 | int st_magn_allocate_ring(struct iio_dev *indio_dev) |
| 84 | { | 84 | { |
| 85 | return iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time, | 85 | return iio_triggered_buffer_setup(indio_dev, NULL, |
| 86 | &st_sensors_trigger_handler, &st_magn_buffer_setup_ops); | 86 | &st_sensors_trigger_handler, &st_magn_buffer_setup_ops); |
| 87 | } | 87 | } |
| 88 | 88 | ||
diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c index 62036d2a9956..8250fc322c56 100644 --- a/drivers/iio/magnetometer/st_magn_core.c +++ b/drivers/iio/magnetometer/st_magn_core.c | |||
| @@ -572,6 +572,7 @@ static const struct iio_info magn_info = { | |||
| 572 | static const struct iio_trigger_ops st_magn_trigger_ops = { | 572 | static const struct iio_trigger_ops st_magn_trigger_ops = { |
| 573 | .owner = THIS_MODULE, | 573 | .owner = THIS_MODULE, |
| 574 | .set_trigger_state = ST_MAGN_TRIGGER_SET_STATE, | 574 | .set_trigger_state = ST_MAGN_TRIGGER_SET_STATE, |
| 575 | .validate_device = st_sensors_validate_device, | ||
| 575 | }; | 576 | }; |
| 576 | #define ST_MAGN_TRIGGER_OPS (&st_magn_trigger_ops) | 577 | #define ST_MAGN_TRIGGER_OPS (&st_magn_trigger_ops) |
| 577 | #else | 578 | #else |
diff --git a/drivers/iio/pressure/bmp280.c b/drivers/iio/pressure/bmp280.c index 2f1498e12bb2..724452d61846 100644 --- a/drivers/iio/pressure/bmp280.c +++ b/drivers/iio/pressure/bmp280.c | |||
| @@ -879,8 +879,8 @@ static int bmp280_probe(struct i2c_client *client, | |||
| 879 | if (ret < 0) | 879 | if (ret < 0) |
| 880 | return ret; | 880 | return ret; |
| 881 | if (chip_id != id->driver_data) { | 881 | if (chip_id != id->driver_data) { |
| 882 | dev_err(&client->dev, "bad chip id. expected %x got %x\n", | 882 | dev_err(&client->dev, "bad chip id. expected %lx got %x\n", |
| 883 | BMP280_CHIP_ID, chip_id); | 883 | id->driver_data, chip_id); |
| 884 | return -EINVAL; | 884 | return -EINVAL; |
| 885 | } | 885 | } |
| 886 | 886 | ||
diff --git a/drivers/iio/pressure/st_pressure_buffer.c b/drivers/iio/pressure/st_pressure_buffer.c index 2ff53f222352..99468d0a64e7 100644 --- a/drivers/iio/pressure/st_pressure_buffer.c +++ b/drivers/iio/pressure/st_pressure_buffer.c | |||
| @@ -82,7 +82,7 @@ static const struct iio_buffer_setup_ops st_press_buffer_setup_ops = { | |||
| 82 | 82 | ||
| 83 | int st_press_allocate_ring(struct iio_dev *indio_dev) | 83 | int st_press_allocate_ring(struct iio_dev *indio_dev) |
| 84 | { | 84 | { |
| 85 | return iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time, | 85 | return iio_triggered_buffer_setup(indio_dev, NULL, |
| 86 | &st_sensors_trigger_handler, &st_press_buffer_setup_ops); | 86 | &st_sensors_trigger_handler, &st_press_buffer_setup_ops); |
| 87 | } | 87 | } |
| 88 | 88 | ||
diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c index 9e9b72a8f18f..92a118c3c4ac 100644 --- a/drivers/iio/pressure/st_pressure_core.c +++ b/drivers/iio/pressure/st_pressure_core.c | |||
| @@ -28,15 +28,21 @@ | |||
| 28 | #include <linux/iio/common/st_sensors.h> | 28 | #include <linux/iio/common/st_sensors.h> |
| 29 | #include "st_pressure.h" | 29 | #include "st_pressure.h" |
| 30 | 30 | ||
| 31 | #define MCELSIUS_PER_CELSIUS 1000 | ||
| 32 | |||
| 33 | /* Default pressure sensitivity */ | ||
| 31 | #define ST_PRESS_LSB_PER_MBAR 4096UL | 34 | #define ST_PRESS_LSB_PER_MBAR 4096UL |
| 32 | #define ST_PRESS_KPASCAL_NANO_SCALE (100000000UL / \ | 35 | #define ST_PRESS_KPASCAL_NANO_SCALE (100000000UL / \ |
| 33 | ST_PRESS_LSB_PER_MBAR) | 36 | ST_PRESS_LSB_PER_MBAR) |
| 37 | |||
| 38 | /* Default temperature sensitivity */ | ||
| 34 | #define ST_PRESS_LSB_PER_CELSIUS 480UL | 39 | #define ST_PRESS_LSB_PER_CELSIUS 480UL |
| 35 | #define ST_PRESS_CELSIUS_NANO_SCALE (1000000000UL / \ | 40 | #define ST_PRESS_MILLI_CELSIUS_OFFSET 42500UL |
| 36 | ST_PRESS_LSB_PER_CELSIUS) | 41 | |
| 37 | #define ST_PRESS_NUMBER_DATA_CHANNELS 1 | 42 | #define ST_PRESS_NUMBER_DATA_CHANNELS 1 |
| 38 | 43 | ||
| 39 | /* FULLSCALE */ | 44 | /* FULLSCALE */ |
| 45 | #define ST_PRESS_FS_AVL_1100MB 1100 | ||
| 40 | #define ST_PRESS_FS_AVL_1260MB 1260 | 46 | #define ST_PRESS_FS_AVL_1260MB 1260 |
| 41 | 47 | ||
| 42 | #define ST_PRESS_1_OUT_XL_ADDR 0x28 | 48 | #define ST_PRESS_1_OUT_XL_ADDR 0x28 |
| @@ -54,9 +60,6 @@ | |||
| 54 | #define ST_PRESS_LPS331AP_PW_MASK 0x80 | 60 | #define ST_PRESS_LPS331AP_PW_MASK 0x80 |
| 55 | #define ST_PRESS_LPS331AP_FS_ADDR 0x23 | 61 | #define ST_PRESS_LPS331AP_FS_ADDR 0x23 |
| 56 | #define ST_PRESS_LPS331AP_FS_MASK 0x30 | 62 | #define ST_PRESS_LPS331AP_FS_MASK 0x30 |
| 57 | #define ST_PRESS_LPS331AP_FS_AVL_1260_VAL 0x00 | ||
| 58 | #define ST_PRESS_LPS331AP_FS_AVL_1260_GAIN ST_PRESS_KPASCAL_NANO_SCALE | ||
| 59 | #define ST_PRESS_LPS331AP_FS_AVL_TEMP_GAIN ST_PRESS_CELSIUS_NANO_SCALE | ||
| 60 | #define ST_PRESS_LPS331AP_BDU_ADDR 0x20 | 63 | #define ST_PRESS_LPS331AP_BDU_ADDR 0x20 |
| 61 | #define ST_PRESS_LPS331AP_BDU_MASK 0x04 | 64 | #define ST_PRESS_LPS331AP_BDU_MASK 0x04 |
| 62 | #define ST_PRESS_LPS331AP_DRDY_IRQ_ADDR 0x22 | 65 | #define ST_PRESS_LPS331AP_DRDY_IRQ_ADDR 0x22 |
| @@ -67,9 +70,14 @@ | |||
| 67 | #define ST_PRESS_LPS331AP_OD_IRQ_ADDR 0x22 | 70 | #define ST_PRESS_LPS331AP_OD_IRQ_ADDR 0x22 |
| 68 | #define ST_PRESS_LPS331AP_OD_IRQ_MASK 0x40 | 71 | #define ST_PRESS_LPS331AP_OD_IRQ_MASK 0x40 |
| 69 | #define ST_PRESS_LPS331AP_MULTIREAD_BIT true | 72 | #define ST_PRESS_LPS331AP_MULTIREAD_BIT true |
| 70 | #define ST_PRESS_LPS331AP_TEMP_OFFSET 42500 | ||
| 71 | 73 | ||
| 72 | /* CUSTOM VALUES FOR LPS001WP SENSOR */ | 74 | /* CUSTOM VALUES FOR LPS001WP SENSOR */ |
| 75 | |||
| 76 | /* LPS001WP pressure resolution */ | ||
| 77 | #define ST_PRESS_LPS001WP_LSB_PER_MBAR 16UL | ||
| 78 | /* LPS001WP temperature resolution */ | ||
| 79 | #define ST_PRESS_LPS001WP_LSB_PER_CELSIUS 64UL | ||
| 80 | |||
| 73 | #define ST_PRESS_LPS001WP_WAI_EXP 0xba | 81 | #define ST_PRESS_LPS001WP_WAI_EXP 0xba |
| 74 | #define ST_PRESS_LPS001WP_ODR_ADDR 0x20 | 82 | #define ST_PRESS_LPS001WP_ODR_ADDR 0x20 |
| 75 | #define ST_PRESS_LPS001WP_ODR_MASK 0x30 | 83 | #define ST_PRESS_LPS001WP_ODR_MASK 0x30 |
| @@ -78,6 +86,8 @@ | |||
| 78 | #define ST_PRESS_LPS001WP_ODR_AVL_13HZ_VAL 0x03 | 86 | #define ST_PRESS_LPS001WP_ODR_AVL_13HZ_VAL 0x03 |
| 79 | #define ST_PRESS_LPS001WP_PW_ADDR 0x20 | 87 | #define ST_PRESS_LPS001WP_PW_ADDR 0x20 |
| 80 | #define ST_PRESS_LPS001WP_PW_MASK 0x40 | 88 | #define ST_PRESS_LPS001WP_PW_MASK 0x40 |
| 89 | #define ST_PRESS_LPS001WP_FS_AVL_PRESS_GAIN \ | ||
| 90 | (100000000UL / ST_PRESS_LPS001WP_LSB_PER_MBAR) | ||
| 81 | #define ST_PRESS_LPS001WP_BDU_ADDR 0x20 | 91 | #define ST_PRESS_LPS001WP_BDU_ADDR 0x20 |
| 82 | #define ST_PRESS_LPS001WP_BDU_MASK 0x04 | 92 | #define ST_PRESS_LPS001WP_BDU_MASK 0x04 |
| 83 | #define ST_PRESS_LPS001WP_MULTIREAD_BIT true | 93 | #define ST_PRESS_LPS001WP_MULTIREAD_BIT true |
| @@ -94,11 +104,6 @@ | |||
| 94 | #define ST_PRESS_LPS25H_ODR_AVL_25HZ_VAL 0x04 | 104 | #define ST_PRESS_LPS25H_ODR_AVL_25HZ_VAL 0x04 |
| 95 | #define ST_PRESS_LPS25H_PW_ADDR 0x20 | 105 | #define ST_PRESS_LPS25H_PW_ADDR 0x20 |
| 96 | #define ST_PRESS_LPS25H_PW_MASK 0x80 | 106 | #define ST_PRESS_LPS25H_PW_MASK 0x80 |
| 97 | #define ST_PRESS_LPS25H_FS_ADDR 0x00 | ||
| 98 | #define ST_PRESS_LPS25H_FS_MASK 0x00 | ||
| 99 | #define ST_PRESS_LPS25H_FS_AVL_1260_VAL 0x00 | ||
| 100 | #define ST_PRESS_LPS25H_FS_AVL_1260_GAIN ST_PRESS_KPASCAL_NANO_SCALE | ||
| 101 | #define ST_PRESS_LPS25H_FS_AVL_TEMP_GAIN ST_PRESS_CELSIUS_NANO_SCALE | ||
| 102 | #define ST_PRESS_LPS25H_BDU_ADDR 0x20 | 107 | #define ST_PRESS_LPS25H_BDU_ADDR 0x20 |
| 103 | #define ST_PRESS_LPS25H_BDU_MASK 0x04 | 108 | #define ST_PRESS_LPS25H_BDU_MASK 0x04 |
| 104 | #define ST_PRESS_LPS25H_DRDY_IRQ_ADDR 0x23 | 109 | #define ST_PRESS_LPS25H_DRDY_IRQ_ADDR 0x23 |
| @@ -109,7 +114,6 @@ | |||
| 109 | #define ST_PRESS_LPS25H_OD_IRQ_ADDR 0x22 | 114 | #define ST_PRESS_LPS25H_OD_IRQ_ADDR 0x22 |
| 110 | #define ST_PRESS_LPS25H_OD_IRQ_MASK 0x40 | 115 | #define ST_PRESS_LPS25H_OD_IRQ_MASK 0x40 |
| 111 | #define ST_PRESS_LPS25H_MULTIREAD_BIT true | 116 | #define ST_PRESS_LPS25H_MULTIREAD_BIT true |
| 112 | #define ST_PRESS_LPS25H_TEMP_OFFSET 42500 | ||
| 113 | #define ST_PRESS_LPS25H_OUT_XL_ADDR 0x28 | 117 | #define ST_PRESS_LPS25H_OUT_XL_ADDR 0x28 |
| 114 | #define ST_TEMP_LPS25H_OUT_L_ADDR 0x2b | 118 | #define ST_TEMP_LPS25H_OUT_L_ADDR 0x2b |
| 115 | 119 | ||
| @@ -161,7 +165,9 @@ static const struct iio_chan_spec st_press_lps001wp_channels[] = { | |||
| 161 | .storagebits = 16, | 165 | .storagebits = 16, |
| 162 | .endianness = IIO_LE, | 166 | .endianness = IIO_LE, |
| 163 | }, | 167 | }, |
| 164 | .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), | 168 | .info_mask_separate = |
| 169 | BIT(IIO_CHAN_INFO_RAW) | | ||
| 170 | BIT(IIO_CHAN_INFO_SCALE), | ||
| 165 | .modified = 0, | 171 | .modified = 0, |
| 166 | }, | 172 | }, |
| 167 | { | 173 | { |
| @@ -177,7 +183,7 @@ static const struct iio_chan_spec st_press_lps001wp_channels[] = { | |||
| 177 | }, | 183 | }, |
| 178 | .info_mask_separate = | 184 | .info_mask_separate = |
| 179 | BIT(IIO_CHAN_INFO_RAW) | | 185 | BIT(IIO_CHAN_INFO_RAW) | |
| 180 | BIT(IIO_CHAN_INFO_OFFSET), | 186 | BIT(IIO_CHAN_INFO_SCALE), |
| 181 | .modified = 0, | 187 | .modified = 0, |
| 182 | }, | 188 | }, |
| 183 | IIO_CHAN_SOFT_TIMESTAMP(1) | 189 | IIO_CHAN_SOFT_TIMESTAMP(1) |
| @@ -212,11 +218,14 @@ static const struct st_sensor_settings st_press_sensors_settings[] = { | |||
| 212 | .addr = ST_PRESS_LPS331AP_FS_ADDR, | 218 | .addr = ST_PRESS_LPS331AP_FS_ADDR, |
| 213 | .mask = ST_PRESS_LPS331AP_FS_MASK, | 219 | .mask = ST_PRESS_LPS331AP_FS_MASK, |
| 214 | .fs_avl = { | 220 | .fs_avl = { |
| 221 | /* | ||
| 222 | * Pressure and temperature sensitivity values | ||
| 223 | * as defined in table 3 of LPS331AP datasheet. | ||
| 224 | */ | ||
| 215 | [0] = { | 225 | [0] = { |
| 216 | .num = ST_PRESS_FS_AVL_1260MB, | 226 | .num = ST_PRESS_FS_AVL_1260MB, |
| 217 | .value = ST_PRESS_LPS331AP_FS_AVL_1260_VAL, | 227 | .gain = ST_PRESS_KPASCAL_NANO_SCALE, |
| 218 | .gain = ST_PRESS_LPS331AP_FS_AVL_1260_GAIN, | 228 | .gain2 = ST_PRESS_LSB_PER_CELSIUS, |
| 219 | .gain2 = ST_PRESS_LPS331AP_FS_AVL_TEMP_GAIN, | ||
| 220 | }, | 229 | }, |
| 221 | }, | 230 | }, |
| 222 | }, | 231 | }, |
| @@ -261,7 +270,17 @@ static const struct st_sensor_settings st_press_sensors_settings[] = { | |||
| 261 | .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE, | 270 | .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE, |
| 262 | }, | 271 | }, |
| 263 | .fs = { | 272 | .fs = { |
| 264 | .addr = 0, | 273 | .fs_avl = { |
| 274 | /* | ||
| 275 | * Pressure and temperature resolution values | ||
| 276 | * as defined in table 3 of LPS001WP datasheet. | ||
| 277 | */ | ||
| 278 | [0] = { | ||
| 279 | .num = ST_PRESS_FS_AVL_1100MB, | ||
| 280 | .gain = ST_PRESS_LPS001WP_FS_AVL_PRESS_GAIN, | ||
| 281 | .gain2 = ST_PRESS_LPS001WP_LSB_PER_CELSIUS, | ||
| 282 | }, | ||
| 283 | }, | ||
| 265 | }, | 284 | }, |
| 266 | .bdu = { | 285 | .bdu = { |
| 267 | .addr = ST_PRESS_LPS001WP_BDU_ADDR, | 286 | .addr = ST_PRESS_LPS001WP_BDU_ADDR, |
| @@ -298,14 +317,15 @@ static const struct st_sensor_settings st_press_sensors_settings[] = { | |||
| 298 | .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE, | 317 | .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE, |
| 299 | }, | 318 | }, |
| 300 | .fs = { | 319 | .fs = { |
| 301 | .addr = ST_PRESS_LPS25H_FS_ADDR, | ||
| 302 | .mask = ST_PRESS_LPS25H_FS_MASK, | ||
| 303 | .fs_avl = { | 320 | .fs_avl = { |
| 321 | /* | ||
| 322 | * Pressure and temperature sensitivity values | ||
| 323 | * as defined in table 3 of LPS25H datasheet. | ||
| 324 | */ | ||
| 304 | [0] = { | 325 | [0] = { |
| 305 | .num = ST_PRESS_FS_AVL_1260MB, | 326 | .num = ST_PRESS_FS_AVL_1260MB, |
| 306 | .value = ST_PRESS_LPS25H_FS_AVL_1260_VAL, | 327 | .gain = ST_PRESS_KPASCAL_NANO_SCALE, |
| 307 | .gain = ST_PRESS_LPS25H_FS_AVL_1260_GAIN, | 328 | .gain2 = ST_PRESS_LSB_PER_CELSIUS, |
| 308 | .gain2 = ST_PRESS_LPS25H_FS_AVL_TEMP_GAIN, | ||
| 309 | }, | 329 | }, |
| 310 | }, | 330 | }, |
| 311 | }, | 331 | }, |
| @@ -364,26 +384,26 @@ static int st_press_read_raw(struct iio_dev *indio_dev, | |||
| 364 | 384 | ||
| 365 | return IIO_VAL_INT; | 385 | return IIO_VAL_INT; |
| 366 | case IIO_CHAN_INFO_SCALE: | 386 | case IIO_CHAN_INFO_SCALE: |
| 367 | *val = 0; | ||
| 368 | |||
| 369 | switch (ch->type) { | 387 | switch (ch->type) { |
| 370 | case IIO_PRESSURE: | 388 | case IIO_PRESSURE: |
| 389 | *val = 0; | ||
| 371 | *val2 = press_data->current_fullscale->gain; | 390 | *val2 = press_data->current_fullscale->gain; |
| 372 | break; | 391 | return IIO_VAL_INT_PLUS_NANO; |
| 373 | case IIO_TEMP: | 392 | case IIO_TEMP: |
| 393 | *val = MCELSIUS_PER_CELSIUS; | ||
| 374 | *val2 = press_data->current_fullscale->gain2; | 394 | *val2 = press_data->current_fullscale->gain2; |
| 375 | break; | 395 | return IIO_VAL_FRACTIONAL; |
| 376 | default: | 396 | default: |
| 377 | err = -EINVAL; | 397 | err = -EINVAL; |
| 378 | goto read_error; | 398 | goto read_error; |
| 379 | } | 399 | } |
| 380 | 400 | ||
| 381 | return IIO_VAL_INT_PLUS_NANO; | ||
| 382 | case IIO_CHAN_INFO_OFFSET: | 401 | case IIO_CHAN_INFO_OFFSET: |
| 383 | switch (ch->type) { | 402 | switch (ch->type) { |
| 384 | case IIO_TEMP: | 403 | case IIO_TEMP: |
| 385 | *val = 425; | 404 | *val = ST_PRESS_MILLI_CELSIUS_OFFSET * |
| 386 | *val2 = 10; | 405 | press_data->current_fullscale->gain2; |
| 406 | *val2 = MCELSIUS_PER_CELSIUS; | ||
| 387 | break; | 407 | break; |
| 388 | default: | 408 | default: |
| 389 | err = -EINVAL; | 409 | err = -EINVAL; |
| @@ -425,6 +445,7 @@ static const struct iio_info press_info = { | |||
| 425 | static const struct iio_trigger_ops st_press_trigger_ops = { | 445 | static const struct iio_trigger_ops st_press_trigger_ops = { |
| 426 | .owner = THIS_MODULE, | 446 | .owner = THIS_MODULE, |
| 427 | .set_trigger_state = ST_PRESS_TRIGGER_SET_STATE, | 447 | .set_trigger_state = ST_PRESS_TRIGGER_SET_STATE, |
| 448 | .validate_device = st_sensors_validate_device, | ||
| 428 | }; | 449 | }; |
| 429 | #define ST_PRESS_TRIGGER_OPS (&st_press_trigger_ops) | 450 | #define ST_PRESS_TRIGGER_OPS (&st_press_trigger_ops) |
| 430 | #else | 451 | #else |
diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c index f4d29d5dbd5f..e2f926cdcad2 100644 --- a/drivers/iio/proximity/as3935.c +++ b/drivers/iio/proximity/as3935.c | |||
| @@ -64,6 +64,7 @@ struct as3935_state { | |||
| 64 | struct delayed_work work; | 64 | struct delayed_work work; |
| 65 | 65 | ||
| 66 | u32 tune_cap; | 66 | u32 tune_cap; |
| 67 | u8 buffer[16]; /* 8-bit data + 56-bit padding + 64-bit timestamp */ | ||
| 67 | u8 buf[2] ____cacheline_aligned; | 68 | u8 buf[2] ____cacheline_aligned; |
| 68 | }; | 69 | }; |
| 69 | 70 | ||
| @@ -72,7 +73,8 @@ static const struct iio_chan_spec as3935_channels[] = { | |||
| 72 | .type = IIO_PROXIMITY, | 73 | .type = IIO_PROXIMITY, |
| 73 | .info_mask_separate = | 74 | .info_mask_separate = |
| 74 | BIT(IIO_CHAN_INFO_RAW) | | 75 | BIT(IIO_CHAN_INFO_RAW) | |
| 75 | BIT(IIO_CHAN_INFO_PROCESSED), | 76 | BIT(IIO_CHAN_INFO_PROCESSED) | |
| 77 | BIT(IIO_CHAN_INFO_SCALE), | ||
| 76 | .scan_index = 0, | 78 | .scan_index = 0, |
| 77 | .scan_type = { | 79 | .scan_type = { |
| 78 | .sign = 'u', | 80 | .sign = 'u', |
| @@ -181,7 +183,12 @@ static int as3935_read_raw(struct iio_dev *indio_dev, | |||
| 181 | /* storm out of range */ | 183 | /* storm out of range */ |
| 182 | if (*val == AS3935_DATA_MASK) | 184 | if (*val == AS3935_DATA_MASK) |
| 183 | return -EINVAL; | 185 | return -EINVAL; |
| 184 | *val *= 1000; | 186 | |
| 187 | if (m == IIO_CHAN_INFO_PROCESSED) | ||
| 188 | *val *= 1000; | ||
| 189 | break; | ||
| 190 | case IIO_CHAN_INFO_SCALE: | ||
| 191 | *val = 1000; | ||
| 185 | break; | 192 | break; |
| 186 | default: | 193 | default: |
| 187 | return -EINVAL; | 194 | return -EINVAL; |
| @@ -206,10 +213,10 @@ static irqreturn_t as3935_trigger_handler(int irq, void *private) | |||
| 206 | ret = as3935_read(st, AS3935_DATA, &val); | 213 | ret = as3935_read(st, AS3935_DATA, &val); |
| 207 | if (ret) | 214 | if (ret) |
| 208 | goto err_read; | 215 | goto err_read; |
| 209 | val &= AS3935_DATA_MASK; | ||
| 210 | val *= 1000; | ||
| 211 | 216 | ||
| 212 | iio_push_to_buffers_with_timestamp(indio_dev, &val, pf->timestamp); | 217 | st->buffer[0] = val & AS3935_DATA_MASK; |
| 218 | iio_push_to_buffers_with_timestamp(indio_dev, &st->buffer, | ||
| 219 | pf->timestamp); | ||
| 213 | err_read: | 220 | err_read: |
| 214 | iio_trigger_notify_done(indio_dev->trig); | 221 | iio_trigger_notify_done(indio_dev->trig); |
| 215 | 222 | ||
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index c2e257d97eff..1a2984c28b95 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c | |||
| @@ -178,6 +178,7 @@ static int write_gid(struct ib_device *ib_dev, u8 port, | |||
| 178 | { | 178 | { |
| 179 | int ret = 0; | 179 | int ret = 0; |
| 180 | struct net_device *old_net_dev; | 180 | struct net_device *old_net_dev; |
| 181 | enum ib_gid_type old_gid_type; | ||
| 181 | 182 | ||
| 182 | /* in rdma_cap_roce_gid_table, this funciton should be protected by a | 183 | /* in rdma_cap_roce_gid_table, this funciton should be protected by a |
| 183 | * sleep-able lock. | 184 | * sleep-able lock. |
| @@ -199,6 +200,7 @@ static int write_gid(struct ib_device *ib_dev, u8 port, | |||
| 199 | } | 200 | } |
| 200 | 201 | ||
| 201 | old_net_dev = table->data_vec[ix].attr.ndev; | 202 | old_net_dev = table->data_vec[ix].attr.ndev; |
| 203 | old_gid_type = table->data_vec[ix].attr.gid_type; | ||
| 202 | if (old_net_dev && old_net_dev != attr->ndev) | 204 | if (old_net_dev && old_net_dev != attr->ndev) |
| 203 | dev_put(old_net_dev); | 205 | dev_put(old_net_dev); |
| 204 | /* if modify_gid failed, just delete the old gid */ | 206 | /* if modify_gid failed, just delete the old gid */ |
| @@ -207,10 +209,14 @@ static int write_gid(struct ib_device *ib_dev, u8 port, | |||
| 207 | attr = &zattr; | 209 | attr = &zattr; |
| 208 | table->data_vec[ix].context = NULL; | 210 | table->data_vec[ix].context = NULL; |
| 209 | } | 211 | } |
| 210 | if (default_gid) | 212 | |
| 211 | table->data_vec[ix].props |= GID_TABLE_ENTRY_DEFAULT; | ||
| 212 | memcpy(&table->data_vec[ix].gid, gid, sizeof(*gid)); | 213 | memcpy(&table->data_vec[ix].gid, gid, sizeof(*gid)); |
| 213 | memcpy(&table->data_vec[ix].attr, attr, sizeof(*attr)); | 214 | memcpy(&table->data_vec[ix].attr, attr, sizeof(*attr)); |
| 215 | if (default_gid) { | ||
| 216 | table->data_vec[ix].props |= GID_TABLE_ENTRY_DEFAULT; | ||
| 217 | if (action == GID_TABLE_WRITE_ACTION_DEL) | ||
| 218 | table->data_vec[ix].attr.gid_type = old_gid_type; | ||
| 219 | } | ||
| 214 | if (table->data_vec[ix].attr.ndev && | 220 | if (table->data_vec[ix].attr.ndev && |
| 215 | table->data_vec[ix].attr.ndev != old_net_dev) | 221 | table->data_vec[ix].attr.ndev != old_net_dev) |
| 216 | dev_hold(table->data_vec[ix].attr.ndev); | 222 | dev_hold(table->data_vec[ix].attr.ndev); |
| @@ -405,7 +411,9 @@ int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port, | |||
| 405 | 411 | ||
| 406 | for (ix = 0; ix < table->sz; ix++) | 412 | for (ix = 0; ix < table->sz; ix++) |
| 407 | if (table->data_vec[ix].attr.ndev == ndev) | 413 | if (table->data_vec[ix].attr.ndev == ndev) |
| 408 | if (!del_gid(ib_dev, port, table, ix, false)) | 414 | if (!del_gid(ib_dev, port, table, ix, |
| 415 | !!(table->data_vec[ix].props & | ||
| 416 | GID_TABLE_ENTRY_DEFAULT))) | ||
| 409 | deleted = true; | 417 | deleted = true; |
| 410 | 418 | ||
| 411 | write_unlock_irq(&table->rwlock); | 419 | write_unlock_irq(&table->rwlock); |
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 1d92e091e22e..c99525512b34 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c | |||
| @@ -3452,14 +3452,14 @@ static int cm_establish(struct ib_cm_id *cm_id) | |||
| 3452 | work->cm_event.event = IB_CM_USER_ESTABLISHED; | 3452 | work->cm_event.event = IB_CM_USER_ESTABLISHED; |
| 3453 | 3453 | ||
| 3454 | /* Check if the device started its remove_one */ | 3454 | /* Check if the device started its remove_one */ |
| 3455 | spin_lock_irq(&cm.lock); | 3455 | spin_lock_irqsave(&cm.lock, flags); |
| 3456 | if (!cm_dev->going_down) { | 3456 | if (!cm_dev->going_down) { |
| 3457 | queue_delayed_work(cm.wq, &work->work, 0); | 3457 | queue_delayed_work(cm.wq, &work->work, 0); |
| 3458 | } else { | 3458 | } else { |
| 3459 | kfree(work); | 3459 | kfree(work); |
| 3460 | ret = -ENODEV; | 3460 | ret = -ENODEV; |
| 3461 | } | 3461 | } |
| 3462 | spin_unlock_irq(&cm.lock); | 3462 | spin_unlock_irqrestore(&cm.lock, flags); |
| 3463 | 3463 | ||
| 3464 | out: | 3464 | out: |
| 3465 | return ret; | 3465 | return ret; |
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index f0c91ba3178a..ad1b1adcf6f0 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c | |||
| @@ -708,17 +708,6 @@ static void cma_deref_id(struct rdma_id_private *id_priv) | |||
| 708 | complete(&id_priv->comp); | 708 | complete(&id_priv->comp); |
| 709 | } | 709 | } |
| 710 | 710 | ||
| 711 | static int cma_disable_callback(struct rdma_id_private *id_priv, | ||
| 712 | enum rdma_cm_state state) | ||
| 713 | { | ||
| 714 | mutex_lock(&id_priv->handler_mutex); | ||
| 715 | if (id_priv->state != state) { | ||
| 716 | mutex_unlock(&id_priv->handler_mutex); | ||
| 717 | return -EINVAL; | ||
| 718 | } | ||
| 719 | return 0; | ||
| 720 | } | ||
| 721 | |||
| 722 | struct rdma_cm_id *rdma_create_id(struct net *net, | 711 | struct rdma_cm_id *rdma_create_id(struct net *net, |
| 723 | rdma_cm_event_handler event_handler, | 712 | rdma_cm_event_handler event_handler, |
| 724 | void *context, enum rdma_port_space ps, | 713 | void *context, enum rdma_port_space ps, |
| @@ -1671,11 +1660,12 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) | |||
| 1671 | struct rdma_cm_event event; | 1660 | struct rdma_cm_event event; |
| 1672 | int ret = 0; | 1661 | int ret = 0; |
| 1673 | 1662 | ||
| 1663 | mutex_lock(&id_priv->handler_mutex); | ||
| 1674 | if ((ib_event->event != IB_CM_TIMEWAIT_EXIT && | 1664 | if ((ib_event->event != IB_CM_TIMEWAIT_EXIT && |
| 1675 | cma_disable_callback(id_priv, RDMA_CM_CONNECT)) || | 1665 | id_priv->state != RDMA_CM_CONNECT) || |
| 1676 | (ib_event->event == IB_CM_TIMEWAIT_EXIT && | 1666 | (ib_event->event == IB_CM_TIMEWAIT_EXIT && |
| 1677 | cma_disable_callback(id_priv, RDMA_CM_DISCONNECT))) | 1667 | id_priv->state != RDMA_CM_DISCONNECT)) |
| 1678 | return 0; | 1668 | goto out; |
| 1679 | 1669 | ||
| 1680 | memset(&event, 0, sizeof event); | 1670 | memset(&event, 0, sizeof event); |
| 1681 | switch (ib_event->event) { | 1671 | switch (ib_event->event) { |
| @@ -1870,7 +1860,7 @@ static int cma_check_req_qp_type(struct rdma_cm_id *id, struct ib_cm_event *ib_e | |||
| 1870 | 1860 | ||
| 1871 | static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) | 1861 | static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) |
| 1872 | { | 1862 | { |
| 1873 | struct rdma_id_private *listen_id, *conn_id; | 1863 | struct rdma_id_private *listen_id, *conn_id = NULL; |
| 1874 | struct rdma_cm_event event; | 1864 | struct rdma_cm_event event; |
| 1875 | struct net_device *net_dev; | 1865 | struct net_device *net_dev; |
| 1876 | int offset, ret; | 1866 | int offset, ret; |
| @@ -1884,9 +1874,10 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) | |||
| 1884 | goto net_dev_put; | 1874 | goto net_dev_put; |
| 1885 | } | 1875 | } |
| 1886 | 1876 | ||
| 1887 | if (cma_disable_callback(listen_id, RDMA_CM_LISTEN)) { | 1877 | mutex_lock(&listen_id->handler_mutex); |
| 1878 | if (listen_id->state != RDMA_CM_LISTEN) { | ||
| 1888 | ret = -ECONNABORTED; | 1879 | ret = -ECONNABORTED; |
| 1889 | goto net_dev_put; | 1880 | goto err1; |
| 1890 | } | 1881 | } |
| 1891 | 1882 | ||
| 1892 | memset(&event, 0, sizeof event); | 1883 | memset(&event, 0, sizeof event); |
| @@ -1976,8 +1967,9 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event) | |||
| 1976 | struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr; | 1967 | struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr; |
| 1977 | struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr; | 1968 | struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr; |
| 1978 | 1969 | ||
| 1979 | if (cma_disable_callback(id_priv, RDMA_CM_CONNECT)) | 1970 | mutex_lock(&id_priv->handler_mutex); |
| 1980 | return 0; | 1971 | if (id_priv->state != RDMA_CM_CONNECT) |
| 1972 | goto out; | ||
| 1981 | 1973 | ||
| 1982 | memset(&event, 0, sizeof event); | 1974 | memset(&event, 0, sizeof event); |
| 1983 | switch (iw_event->event) { | 1975 | switch (iw_event->event) { |
| @@ -2029,6 +2021,7 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event) | |||
| 2029 | return ret; | 2021 | return ret; |
| 2030 | } | 2022 | } |
| 2031 | 2023 | ||
| 2024 | out: | ||
| 2032 | mutex_unlock(&id_priv->handler_mutex); | 2025 | mutex_unlock(&id_priv->handler_mutex); |
| 2033 | return ret; | 2026 | return ret; |
| 2034 | } | 2027 | } |
| @@ -2039,13 +2032,15 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id, | |||
| 2039 | struct rdma_cm_id *new_cm_id; | 2032 | struct rdma_cm_id *new_cm_id; |
| 2040 | struct rdma_id_private *listen_id, *conn_id; | 2033 | struct rdma_id_private *listen_id, *conn_id; |
| 2041 | struct rdma_cm_event event; | 2034 | struct rdma_cm_event event; |
| 2042 | int ret; | 2035 | int ret = -ECONNABORTED; |
| 2043 | struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr; | 2036 | struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr; |
| 2044 | struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr; | 2037 | struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr; |
| 2045 | 2038 | ||
| 2046 | listen_id = cm_id->context; | 2039 | listen_id = cm_id->context; |
| 2047 | if (cma_disable_callback(listen_id, RDMA_CM_LISTEN)) | 2040 | |
| 2048 | return -ECONNABORTED; | 2041 | mutex_lock(&listen_id->handler_mutex); |
| 2042 | if (listen_id->state != RDMA_CM_LISTEN) | ||
| 2043 | goto out; | ||
| 2049 | 2044 | ||
| 2050 | /* Create a new RDMA id for the new IW CM ID */ | 2045 | /* Create a new RDMA id for the new IW CM ID */ |
| 2051 | new_cm_id = rdma_create_id(listen_id->id.route.addr.dev_addr.net, | 2046 | new_cm_id = rdma_create_id(listen_id->id.route.addr.dev_addr.net, |
| @@ -3216,8 +3211,9 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id, | |||
| 3216 | struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd; | 3211 | struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd; |
| 3217 | int ret = 0; | 3212 | int ret = 0; |
| 3218 | 3213 | ||
| 3219 | if (cma_disable_callback(id_priv, RDMA_CM_CONNECT)) | 3214 | mutex_lock(&id_priv->handler_mutex); |
| 3220 | return 0; | 3215 | if (id_priv->state != RDMA_CM_CONNECT) |
| 3216 | goto out; | ||
| 3221 | 3217 | ||
| 3222 | memset(&event, 0, sizeof event); | 3218 | memset(&event, 0, sizeof event); |
| 3223 | switch (ib_event->event) { | 3219 | switch (ib_event->event) { |
| @@ -3673,12 +3669,13 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast) | |||
| 3673 | struct rdma_id_private *id_priv; | 3669 | struct rdma_id_private *id_priv; |
| 3674 | struct cma_multicast *mc = multicast->context; | 3670 | struct cma_multicast *mc = multicast->context; |
| 3675 | struct rdma_cm_event event; | 3671 | struct rdma_cm_event event; |
| 3676 | int ret; | 3672 | int ret = 0; |
| 3677 | 3673 | ||
| 3678 | id_priv = mc->id_priv; | 3674 | id_priv = mc->id_priv; |
| 3679 | if (cma_disable_callback(id_priv, RDMA_CM_ADDR_BOUND) && | 3675 | mutex_lock(&id_priv->handler_mutex); |
| 3680 | cma_disable_callback(id_priv, RDMA_CM_ADDR_RESOLVED)) | 3676 | if (id_priv->state != RDMA_CM_ADDR_BOUND && |
| 3681 | return 0; | 3677 | id_priv->state != RDMA_CM_ADDR_RESOLVED) |
| 3678 | goto out; | ||
| 3682 | 3679 | ||
| 3683 | if (!status) | 3680 | if (!status) |
| 3684 | status = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey)); | 3681 | status = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey)); |
| @@ -3720,6 +3717,7 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast) | |||
| 3720 | return 0; | 3717 | return 0; |
| 3721 | } | 3718 | } |
| 3722 | 3719 | ||
| 3720 | out: | ||
| 3723 | mutex_unlock(&id_priv->handler_mutex); | 3721 | mutex_unlock(&id_priv->handler_mutex); |
| 3724 | return 0; | 3722 | return 0; |
| 3725 | } | 3723 | } |
| @@ -3878,12 +3876,12 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv, | |||
| 3878 | gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num - | 3876 | gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num - |
| 3879 | rdma_start_port(id_priv->cma_dev->device)]; | 3877 | rdma_start_port(id_priv->cma_dev->device)]; |
| 3880 | if (addr->sa_family == AF_INET) { | 3878 | if (addr->sa_family == AF_INET) { |
| 3881 | if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) | 3879 | if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) { |
| 3880 | mc->multicast.ib->rec.hop_limit = IPV6_DEFAULT_HOPLIMIT; | ||
| 3882 | err = cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid, | 3881 | err = cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid, |
| 3883 | true); | 3882 | true); |
| 3884 | if (!err) { | 3883 | if (!err) |
| 3885 | mc->igmp_joined = true; | 3884 | mc->igmp_joined = true; |
| 3886 | mc->multicast.ib->rec.hop_limit = IPV6_DEFAULT_HOPLIMIT; | ||
| 3887 | } | 3885 | } |
| 3888 | } else { | 3886 | } else { |
| 3889 | if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) | 3887 | if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) |
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index 5516fb070344..5c155fa91eec 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c | |||
| @@ -661,6 +661,9 @@ int ib_query_port(struct ib_device *device, | |||
| 661 | if (err || port_attr->subnet_prefix) | 661 | if (err || port_attr->subnet_prefix) |
| 662 | return err; | 662 | return err; |
| 663 | 663 | ||
| 664 | if (rdma_port_get_link_layer(device, port_num) != IB_LINK_LAYER_INFINIBAND) | ||
| 665 | return 0; | ||
| 666 | |||
| 664 | err = ib_query_gid(device, port_num, 0, &gid, NULL); | 667 | err = ib_query_gid(device, port_num, 0, &gid, NULL); |
| 665 | if (err) | 668 | if (err) |
| 666 | return err; | 669 | return err; |
| @@ -1024,7 +1027,8 @@ static int __init ib_core_init(void) | |||
| 1024 | goto err_mad; | 1027 | goto err_mad; |
| 1025 | } | 1028 | } |
| 1026 | 1029 | ||
| 1027 | if (ib_add_ibnl_clients()) { | 1030 | ret = ib_add_ibnl_clients(); |
| 1031 | if (ret) { | ||
| 1028 | pr_warn("Couldn't register ibnl clients\n"); | 1032 | pr_warn("Couldn't register ibnl clients\n"); |
| 1029 | goto err_sa; | 1033 | goto err_sa; |
| 1030 | } | 1034 | } |
diff --git a/drivers/infiniband/core/iwpm_msg.c b/drivers/infiniband/core/iwpm_msg.c index 43e3fa27102b..1c41b95cefec 100644 --- a/drivers/infiniband/core/iwpm_msg.c +++ b/drivers/infiniband/core/iwpm_msg.c | |||
| @@ -506,7 +506,7 @@ int iwpm_add_and_query_mapping_cb(struct sk_buff *skb, | |||
| 506 | if (!nlmsg_request) { | 506 | if (!nlmsg_request) { |
| 507 | pr_info("%s: Could not find a matching request (seq = %u)\n", | 507 | pr_info("%s: Could not find a matching request (seq = %u)\n", |
| 508 | __func__, msg_seq); | 508 | __func__, msg_seq); |
| 509 | return -EINVAL; | 509 | return -EINVAL; |
| 510 | } | 510 | } |
| 511 | pm_msg = nlmsg_request->req_buffer; | 511 | pm_msg = nlmsg_request->req_buffer; |
| 512 | local_sockaddr = (struct sockaddr_storage *) | 512 | local_sockaddr = (struct sockaddr_storage *) |
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 82fb511112da..2d49228f28b2 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c | |||
| @@ -1638,9 +1638,9 @@ static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv) | |||
| 1638 | /* Now, check to see if there are any methods still in use */ | 1638 | /* Now, check to see if there are any methods still in use */ |
| 1639 | if (!check_method_table(method)) { | 1639 | if (!check_method_table(method)) { |
| 1640 | /* If not, release management method table */ | 1640 | /* If not, release management method table */ |
| 1641 | kfree(method); | 1641 | kfree(method); |
| 1642 | class->method_table[mgmt_class] = NULL; | 1642 | class->method_table[mgmt_class] = NULL; |
| 1643 | /* Any management classes left ? */ | 1643 | /* Any management classes left ? */ |
| 1644 | if (!check_class_table(class)) { | 1644 | if (!check_class_table(class)) { |
| 1645 | /* If not, release management class table */ | 1645 | /* If not, release management class table */ |
| 1646 | kfree(class); | 1646 | kfree(class); |
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index 5e573bb18660..a5793c8f1590 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c | |||
| @@ -889,9 +889,9 @@ static struct attribute *alloc_hsa_lifespan(char *name, u8 port_num) | |||
| 889 | static void setup_hw_stats(struct ib_device *device, struct ib_port *port, | 889 | static void setup_hw_stats(struct ib_device *device, struct ib_port *port, |
| 890 | u8 port_num) | 890 | u8 port_num) |
| 891 | { | 891 | { |
| 892 | struct attribute_group *hsag = NULL; | 892 | struct attribute_group *hsag; |
| 893 | struct rdma_hw_stats *stats; | 893 | struct rdma_hw_stats *stats; |
| 894 | int i = 0, ret; | 894 | int i, ret; |
| 895 | 895 | ||
| 896 | stats = device->alloc_hw_stats(device, port_num); | 896 | stats = device->alloc_hw_stats(device, port_num); |
| 897 | 897 | ||
| @@ -899,19 +899,22 @@ static void setup_hw_stats(struct ib_device *device, struct ib_port *port, | |||
| 899 | return; | 899 | return; |
| 900 | 900 | ||
| 901 | if (!stats->names || stats->num_counters <= 0) | 901 | if (!stats->names || stats->num_counters <= 0) |
| 902 | goto err; | 902 | goto err_free_stats; |
| 903 | 903 | ||
| 904 | /* | ||
| 905 | * Two extra attribue elements here, one for the lifespan entry and | ||
| 906 | * one to NULL terminate the list for the sysfs core code | ||
| 907 | */ | ||
| 904 | hsag = kzalloc(sizeof(*hsag) + | 908 | hsag = kzalloc(sizeof(*hsag) + |
| 905 | // 1 extra for the lifespan config entry | 909 | sizeof(void *) * (stats->num_counters + 2), |
| 906 | sizeof(void *) * (stats->num_counters + 1), | ||
| 907 | GFP_KERNEL); | 910 | GFP_KERNEL); |
| 908 | if (!hsag) | 911 | if (!hsag) |
| 909 | return; | 912 | goto err_free_stats; |
| 910 | 913 | ||
| 911 | ret = device->get_hw_stats(device, stats, port_num, | 914 | ret = device->get_hw_stats(device, stats, port_num, |
| 912 | stats->num_counters); | 915 | stats->num_counters); |
| 913 | if (ret != stats->num_counters) | 916 | if (ret != stats->num_counters) |
| 914 | goto err; | 917 | goto err_free_hsag; |
| 915 | 918 | ||
| 916 | stats->timestamp = jiffies; | 919 | stats->timestamp = jiffies; |
| 917 | 920 | ||
| @@ -922,10 +925,13 @@ static void setup_hw_stats(struct ib_device *device, struct ib_port *port, | |||
| 922 | hsag->attrs[i] = alloc_hsa(i, port_num, stats->names[i]); | 925 | hsag->attrs[i] = alloc_hsa(i, port_num, stats->names[i]); |
| 923 | if (!hsag->attrs[i]) | 926 | if (!hsag->attrs[i]) |
| 924 | goto err; | 927 | goto err; |
| 928 | sysfs_attr_init(hsag->attrs[i]); | ||
| 925 | } | 929 | } |
| 926 | 930 | ||
| 927 | /* treat an error here as non-fatal */ | 931 | /* treat an error here as non-fatal */ |
| 928 | hsag->attrs[i] = alloc_hsa_lifespan("lifespan", port_num); | 932 | hsag->attrs[i] = alloc_hsa_lifespan("lifespan", port_num); |
| 933 | if (hsag->attrs[i]) | ||
| 934 | sysfs_attr_init(hsag->attrs[i]); | ||
| 929 | 935 | ||
| 930 | if (port) { | 936 | if (port) { |
| 931 | struct kobject *kobj = &port->kobj; | 937 | struct kobject *kobj = &port->kobj; |
| @@ -946,10 +952,12 @@ static void setup_hw_stats(struct ib_device *device, struct ib_port *port, | |||
| 946 | return; | 952 | return; |
| 947 | 953 | ||
| 948 | err: | 954 | err: |
| 949 | kfree(stats); | ||
| 950 | for (; i >= 0; i--) | 955 | for (; i >= 0; i--) |
| 951 | kfree(hsag->attrs[i]); | 956 | kfree(hsag->attrs[i]); |
| 957 | err_free_hsag: | ||
| 952 | kfree(hsag); | 958 | kfree(hsag); |
| 959 | err_free_stats: | ||
| 960 | kfree(stats); | ||
| 953 | return; | 961 | return; |
| 954 | } | 962 | } |
| 955 | 963 | ||
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 1a8babb8ee3c..825021d1008b 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c | |||
| @@ -1747,7 +1747,7 @@ static int create_qp(struct ib_uverbs_file *file, | |||
| 1747 | struct ib_srq *srq = NULL; | 1747 | struct ib_srq *srq = NULL; |
| 1748 | struct ib_qp *qp; | 1748 | struct ib_qp *qp; |
| 1749 | char *buf; | 1749 | char *buf; |
| 1750 | struct ib_qp_init_attr attr; | 1750 | struct ib_qp_init_attr attr = {}; |
| 1751 | struct ib_uverbs_ex_create_qp_resp resp; | 1751 | struct ib_uverbs_ex_create_qp_resp resp; |
| 1752 | int ret; | 1752 | int ret; |
| 1753 | 1753 | ||
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 1d7d4cf442e3..6298f54b4137 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c | |||
| @@ -511,12 +511,16 @@ int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, | |||
| 511 | ah_attr->grh.dgid = sgid; | 511 | ah_attr->grh.dgid = sgid; |
| 512 | 512 | ||
| 513 | if (!rdma_cap_eth_ah(device, port_num)) { | 513 | if (!rdma_cap_eth_ah(device, port_num)) { |
| 514 | ret = ib_find_cached_gid_by_port(device, &dgid, | 514 | if (dgid.global.interface_id != cpu_to_be64(IB_SA_WELL_KNOWN_GUID)) { |
| 515 | IB_GID_TYPE_IB, | 515 | ret = ib_find_cached_gid_by_port(device, &dgid, |
| 516 | port_num, NULL, | 516 | IB_GID_TYPE_IB, |
| 517 | &gid_index); | 517 | port_num, NULL, |
| 518 | if (ret) | 518 | &gid_index); |
| 519 | return ret; | 519 | if (ret) |
| 520 | return ret; | ||
| 521 | } else { | ||
| 522 | gid_index = 0; | ||
| 523 | } | ||
| 520 | } | 524 | } |
| 521 | 525 | ||
| 522 | ah_attr->grh.sgid_index = (u8) gid_index; | 526 | ah_attr->grh.sgid_index = (u8) gid_index; |
diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c index 6e7050ab9e16..14d7eeb09be6 100644 --- a/drivers/infiniband/hw/hfi1/affinity.c +++ b/drivers/infiniband/hw/hfi1/affinity.c | |||
| @@ -300,16 +300,15 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node) | |||
| 300 | const struct cpumask *node_mask, | 300 | const struct cpumask *node_mask, |
| 301 | *proc_mask = tsk_cpus_allowed(current); | 301 | *proc_mask = tsk_cpus_allowed(current); |
| 302 | struct cpu_mask_set *set = &dd->affinity->proc; | 302 | struct cpu_mask_set *set = &dd->affinity->proc; |
| 303 | char buf[1024]; | ||
| 304 | 303 | ||
| 305 | /* | 304 | /* |
| 306 | * check whether process/context affinity has already | 305 | * check whether process/context affinity has already |
| 307 | * been set | 306 | * been set |
| 308 | */ | 307 | */ |
| 309 | if (cpumask_weight(proc_mask) == 1) { | 308 | if (cpumask_weight(proc_mask) == 1) { |
| 310 | scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(proc_mask)); | 309 | hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %*pbl", |
| 311 | hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %s", | 310 | current->pid, current->comm, |
| 312 | current->pid, current->comm, buf); | 311 | cpumask_pr_args(proc_mask)); |
| 313 | /* | 312 | /* |
| 314 | * Mark the pre-set CPU as used. This is atomic so we don't | 313 | * Mark the pre-set CPU as used. This is atomic so we don't |
| 315 | * need the lock | 314 | * need the lock |
| @@ -318,9 +317,9 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node) | |||
| 318 | cpumask_set_cpu(cpu, &set->used); | 317 | cpumask_set_cpu(cpu, &set->used); |
| 319 | goto done; | 318 | goto done; |
| 320 | } else if (cpumask_weight(proc_mask) < cpumask_weight(&set->mask)) { | 319 | } else if (cpumask_weight(proc_mask) < cpumask_weight(&set->mask)) { |
| 321 | scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(proc_mask)); | 320 | hfi1_cdbg(PROC, "PID %u %s affinity set to CPU set(s) %*pbl", |
| 322 | hfi1_cdbg(PROC, "PID %u %s affinity set to CPU set(s) %s", | 321 | current->pid, current->comm, |
| 323 | current->pid, current->comm, buf); | 322 | cpumask_pr_args(proc_mask)); |
| 324 | goto done; | 323 | goto done; |
| 325 | } | 324 | } |
| 326 | 325 | ||
| @@ -356,8 +355,8 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node) | |||
| 356 | cpumask_or(intrs, intrs, (dd->affinity->rcv_intr.gen ? | 355 | cpumask_or(intrs, intrs, (dd->affinity->rcv_intr.gen ? |
| 357 | &dd->affinity->rcv_intr.mask : | 356 | &dd->affinity->rcv_intr.mask : |
| 358 | &dd->affinity->rcv_intr.used)); | 357 | &dd->affinity->rcv_intr.used)); |
| 359 | scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(intrs)); | 358 | hfi1_cdbg(PROC, "CPUs used by interrupts: %*pbl", |
| 360 | hfi1_cdbg(PROC, "CPUs used by interrupts: %s", buf); | 359 | cpumask_pr_args(intrs)); |
| 361 | 360 | ||
| 362 | /* | 361 | /* |
| 363 | * If we don't have a NUMA node requested, preference is towards | 362 | * If we don't have a NUMA node requested, preference is towards |
| @@ -366,18 +365,16 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node) | |||
| 366 | if (node == -1) | 365 | if (node == -1) |
| 367 | node = dd->node; | 366 | node = dd->node; |
| 368 | node_mask = cpumask_of_node(node); | 367 | node_mask = cpumask_of_node(node); |
| 369 | scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(node_mask)); | 368 | hfi1_cdbg(PROC, "device on NUMA %u, CPUs %*pbl", node, |
| 370 | hfi1_cdbg(PROC, "device on NUMA %u, CPUs %s", node, buf); | 369 | cpumask_pr_args(node_mask)); |
| 371 | 370 | ||
| 372 | /* diff will hold all unused cpus */ | 371 | /* diff will hold all unused cpus */ |
| 373 | cpumask_andnot(diff, &set->mask, &set->used); | 372 | cpumask_andnot(diff, &set->mask, &set->used); |
| 374 | scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(diff)); | 373 | hfi1_cdbg(PROC, "unused CPUs (all) %*pbl", cpumask_pr_args(diff)); |
| 375 | hfi1_cdbg(PROC, "unused CPUs (all) %s", buf); | ||
| 376 | 374 | ||
| 377 | /* get cpumask of available CPUs on preferred NUMA */ | 375 | /* get cpumask of available CPUs on preferred NUMA */ |
| 378 | cpumask_and(mask, diff, node_mask); | 376 | cpumask_and(mask, diff, node_mask); |
| 379 | scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(mask)); | 377 | hfi1_cdbg(PROC, "available cpus on NUMA %*pbl", cpumask_pr_args(mask)); |
| 380 | hfi1_cdbg(PROC, "available cpus on NUMA %s", buf); | ||
| 381 | 378 | ||
| 382 | /* | 379 | /* |
| 383 | * At first, we don't want to place processes on the same | 380 | * At first, we don't want to place processes on the same |
| @@ -395,8 +392,8 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node) | |||
| 395 | cpumask_andnot(diff, &set->mask, &set->used); | 392 | cpumask_andnot(diff, &set->mask, &set->used); |
| 396 | cpumask_andnot(mask, diff, node_mask); | 393 | cpumask_andnot(mask, diff, node_mask); |
| 397 | } | 394 | } |
| 398 | scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(mask)); | 395 | hfi1_cdbg(PROC, "possible CPUs for process %*pbl", |
| 399 | hfi1_cdbg(PROC, "possible CPUs for process %s", buf); | 396 | cpumask_pr_args(mask)); |
| 400 | 397 | ||
| 401 | cpu = cpumask_first(mask); | 398 | cpu = cpumask_first(mask); |
| 402 | if (cpu >= nr_cpu_ids) /* empty */ | 399 | if (cpu >= nr_cpu_ids) /* empty */ |
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index 3b876da745a1..f5de85178055 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c | |||
| @@ -1037,7 +1037,7 @@ static void dc_shutdown(struct hfi1_devdata *); | |||
| 1037 | static void dc_start(struct hfi1_devdata *); | 1037 | static void dc_start(struct hfi1_devdata *); |
| 1038 | static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp, | 1038 | static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp, |
| 1039 | unsigned int *np); | 1039 | unsigned int *np); |
| 1040 | static void remove_full_mgmt_pkey(struct hfi1_pportdata *ppd); | 1040 | static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd); |
| 1041 | 1041 | ||
| 1042 | /* | 1042 | /* |
| 1043 | * Error interrupt table entry. This is used as input to the interrupt | 1043 | * Error interrupt table entry. This is used as input to the interrupt |
| @@ -6962,8 +6962,6 @@ void handle_link_down(struct work_struct *work) | |||
| 6962 | } | 6962 | } |
| 6963 | 6963 | ||
| 6964 | reset_neighbor_info(ppd); | 6964 | reset_neighbor_info(ppd); |
| 6965 | if (ppd->mgmt_allowed) | ||
| 6966 | remove_full_mgmt_pkey(ppd); | ||
| 6967 | 6965 | ||
| 6968 | /* disable the port */ | 6966 | /* disable the port */ |
| 6969 | clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK); | 6967 | clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK); |
| @@ -7070,12 +7068,16 @@ static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd) | |||
| 7070 | __func__, ppd->pkeys[2], FULL_MGMT_P_KEY); | 7068 | __func__, ppd->pkeys[2], FULL_MGMT_P_KEY); |
| 7071 | ppd->pkeys[2] = FULL_MGMT_P_KEY; | 7069 | ppd->pkeys[2] = FULL_MGMT_P_KEY; |
| 7072 | (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0); | 7070 | (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0); |
| 7071 | hfi1_event_pkey_change(ppd->dd, ppd->port); | ||
| 7073 | } | 7072 | } |
| 7074 | 7073 | ||
| 7075 | static void remove_full_mgmt_pkey(struct hfi1_pportdata *ppd) | 7074 | static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd) |
| 7076 | { | 7075 | { |
| 7077 | ppd->pkeys[2] = 0; | 7076 | if (ppd->pkeys[2] != 0) { |
| 7078 | (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0); | 7077 | ppd->pkeys[2] = 0; |
| 7078 | (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0); | ||
| 7079 | hfi1_event_pkey_change(ppd->dd, ppd->port); | ||
| 7080 | } | ||
| 7079 | } | 7081 | } |
| 7080 | 7082 | ||
| 7081 | /* | 7083 | /* |
| @@ -7832,8 +7834,8 @@ static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg) | |||
| 7832 | * save first 2 flits in the packet that caused | 7834 | * save first 2 flits in the packet that caused |
| 7833 | * the error | 7835 | * the error |
| 7834 | */ | 7836 | */ |
| 7835 | dd->err_info_rcvport.packet_flit1 = hdr0; | 7837 | dd->err_info_rcvport.packet_flit1 = hdr0; |
| 7836 | dd->err_info_rcvport.packet_flit2 = hdr1; | 7838 | dd->err_info_rcvport.packet_flit2 = hdr1; |
| 7837 | } | 7839 | } |
| 7838 | switch (info) { | 7840 | switch (info) { |
| 7839 | case 1: | 7841 | case 1: |
| @@ -9168,6 +9170,13 @@ int start_link(struct hfi1_pportdata *ppd) | |||
| 9168 | return 0; | 9170 | return 0; |
| 9169 | } | 9171 | } |
| 9170 | 9172 | ||
| 9173 | /* | ||
| 9174 | * FULL_MGMT_P_KEY is cleared from the pkey table, so that the | ||
| 9175 | * pkey table can be configured properly if the HFI unit is connected | ||
| 9176 | * to switch port with MgmtAllowed=NO | ||
| 9177 | */ | ||
| 9178 | clear_full_mgmt_pkey(ppd); | ||
| 9179 | |||
| 9171 | return set_link_state(ppd, HLS_DN_POLL); | 9180 | return set_link_state(ppd, HLS_DN_POLL); |
| 9172 | } | 9181 | } |
| 9173 | 9182 | ||
| @@ -9777,7 +9786,7 @@ static void set_send_length(struct hfi1_pportdata *ppd) | |||
| 9777 | u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2) | 9786 | u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2) |
| 9778 | & SEND_LEN_CHECK1_LEN_VL15_MASK) << | 9787 | & SEND_LEN_CHECK1_LEN_VL15_MASK) << |
| 9779 | SEND_LEN_CHECK1_LEN_VL15_SHIFT; | 9788 | SEND_LEN_CHECK1_LEN_VL15_SHIFT; |
| 9780 | int i; | 9789 | int i, j; |
| 9781 | u32 thres; | 9790 | u32 thres; |
| 9782 | 9791 | ||
| 9783 | for (i = 0; i < ppd->vls_supported; i++) { | 9792 | for (i = 0; i < ppd->vls_supported; i++) { |
| @@ -9801,7 +9810,10 @@ static void set_send_length(struct hfi1_pportdata *ppd) | |||
| 9801 | sc_mtu_to_threshold(dd->vld[i].sc, | 9810 | sc_mtu_to_threshold(dd->vld[i].sc, |
| 9802 | dd->vld[i].mtu, | 9811 | dd->vld[i].mtu, |
| 9803 | dd->rcd[0]->rcvhdrqentsize)); | 9812 | dd->rcd[0]->rcvhdrqentsize)); |
| 9804 | sc_set_cr_threshold(dd->vld[i].sc, thres); | 9813 | for (j = 0; j < INIT_SC_PER_VL; j++) |
| 9814 | sc_set_cr_threshold( | ||
| 9815 | pio_select_send_context_vl(dd, j, i), | ||
| 9816 | thres); | ||
| 9805 | } | 9817 | } |
| 9806 | thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50), | 9818 | thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50), |
| 9807 | sc_mtu_to_threshold(dd->vld[15].sc, | 9819 | sc_mtu_to_threshold(dd->vld[15].sc, |
| @@ -11906,7 +11918,7 @@ static void update_synth_timer(unsigned long opaque) | |||
| 11906 | hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit); | 11918 | hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit); |
| 11907 | } | 11919 | } |
| 11908 | 11920 | ||
| 11909 | mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME); | 11921 | mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME); |
| 11910 | } | 11922 | } |
| 11911 | 11923 | ||
| 11912 | #define C_MAX_NAME 13 /* 12 chars + one for /0 */ | 11924 | #define C_MAX_NAME 13 /* 12 chars + one for /0 */ |
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c index 7a5b0e676cc7..c702a009608f 100644 --- a/drivers/infiniband/hw/hfi1/file_ops.c +++ b/drivers/infiniband/hw/hfi1/file_ops.c | |||
| @@ -203,6 +203,9 @@ static long hfi1_file_ioctl(struct file *fp, unsigned int cmd, | |||
| 203 | 203 | ||
| 204 | switch (cmd) { | 204 | switch (cmd) { |
| 205 | case HFI1_IOCTL_ASSIGN_CTXT: | 205 | case HFI1_IOCTL_ASSIGN_CTXT: |
| 206 | if (uctxt) | ||
| 207 | return -EINVAL; | ||
| 208 | |||
| 206 | if (copy_from_user(&uinfo, | 209 | if (copy_from_user(&uinfo, |
| 207 | (struct hfi1_user_info __user *)arg, | 210 | (struct hfi1_user_info __user *)arg, |
| 208 | sizeof(uinfo))) | 211 | sizeof(uinfo))) |
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c index 5cc492e5776d..eed971ccd2a1 100644 --- a/drivers/infiniband/hw/hfi1/init.c +++ b/drivers/infiniband/hw/hfi1/init.c | |||
| @@ -1337,7 +1337,7 @@ static void cleanup_device_data(struct hfi1_devdata *dd) | |||
| 1337 | dma_free_coherent(&dd->pcidev->dev, sizeof(u64), | 1337 | dma_free_coherent(&dd->pcidev->dev, sizeof(u64), |
| 1338 | (void *)dd->rcvhdrtail_dummy_kvaddr, | 1338 | (void *)dd->rcvhdrtail_dummy_kvaddr, |
| 1339 | dd->rcvhdrtail_dummy_physaddr); | 1339 | dd->rcvhdrtail_dummy_physaddr); |
| 1340 | dd->rcvhdrtail_dummy_kvaddr = NULL; | 1340 | dd->rcvhdrtail_dummy_kvaddr = NULL; |
| 1341 | } | 1341 | } |
| 1342 | 1342 | ||
| 1343 | for (ctxt = 0; tmp && ctxt < dd->num_rcv_contexts; ctxt++) { | 1343 | for (ctxt = 0; tmp && ctxt < dd->num_rcv_contexts; ctxt++) { |
| @@ -1383,7 +1383,7 @@ static void postinit_cleanup(struct hfi1_devdata *dd) | |||
| 1383 | static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | 1383 | static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) |
| 1384 | { | 1384 | { |
| 1385 | int ret = 0, j, pidx, initfail; | 1385 | int ret = 0, j, pidx, initfail; |
| 1386 | struct hfi1_devdata *dd = NULL; | 1386 | struct hfi1_devdata *dd = ERR_PTR(-EINVAL); |
| 1387 | struct hfi1_pportdata *ppd; | 1387 | struct hfi1_pportdata *ppd; |
| 1388 | 1388 | ||
| 1389 | /* First, lock the non-writable module parameters */ | 1389 | /* First, lock the non-writable module parameters */ |
diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c index 219029576ba0..fca07a1d6c28 100644 --- a/drivers/infiniband/hw/hfi1/mad.c +++ b/drivers/infiniband/hw/hfi1/mad.c | |||
| @@ -78,6 +78,16 @@ static inline void clear_opa_smp_data(struct opa_smp *smp) | |||
| 78 | memset(data, 0, size); | 78 | memset(data, 0, size); |
| 79 | } | 79 | } |
| 80 | 80 | ||
| 81 | void hfi1_event_pkey_change(struct hfi1_devdata *dd, u8 port) | ||
| 82 | { | ||
| 83 | struct ib_event event; | ||
| 84 | |||
| 85 | event.event = IB_EVENT_PKEY_CHANGE; | ||
| 86 | event.device = &dd->verbs_dev.rdi.ibdev; | ||
| 87 | event.element.port_num = port; | ||
| 88 | ib_dispatch_event(&event); | ||
| 89 | } | ||
| 90 | |||
| 81 | static void send_trap(struct hfi1_ibport *ibp, void *data, unsigned len) | 91 | static void send_trap(struct hfi1_ibport *ibp, void *data, unsigned len) |
| 82 | { | 92 | { |
| 83 | struct ib_mad_send_buf *send_buf; | 93 | struct ib_mad_send_buf *send_buf; |
| @@ -1418,15 +1428,10 @@ static int set_pkeys(struct hfi1_devdata *dd, u8 port, u16 *pkeys) | |||
| 1418 | } | 1428 | } |
| 1419 | 1429 | ||
| 1420 | if (changed) { | 1430 | if (changed) { |
| 1421 | struct ib_event event; | ||
| 1422 | |||
| 1423 | (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0); | 1431 | (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0); |
| 1424 | 1432 | hfi1_event_pkey_change(dd, port); | |
| 1425 | event.event = IB_EVENT_PKEY_CHANGE; | ||
| 1426 | event.device = &dd->verbs_dev.rdi.ibdev; | ||
| 1427 | event.element.port_num = port; | ||
| 1428 | ib_dispatch_event(&event); | ||
| 1429 | } | 1433 | } |
| 1434 | |||
| 1430 | return 0; | 1435 | return 0; |
| 1431 | } | 1436 | } |
| 1432 | 1437 | ||
diff --git a/drivers/infiniband/hw/hfi1/mad.h b/drivers/infiniband/hw/hfi1/mad.h index 55ee08675333..8b734aaae88a 100644 --- a/drivers/infiniband/hw/hfi1/mad.h +++ b/drivers/infiniband/hw/hfi1/mad.h | |||
| @@ -434,4 +434,6 @@ struct sc2vlnt { | |||
| 434 | COUNTER_MASK(1, 3) | \ | 434 | COUNTER_MASK(1, 3) | \ |
| 435 | COUNTER_MASK(1, 4)) | 435 | COUNTER_MASK(1, 4)) |
| 436 | 436 | ||
| 437 | void hfi1_event_pkey_change(struct hfi1_devdata *dd, u8 port); | ||
| 438 | |||
| 437 | #endif /* _HFI1_MAD_H */ | 439 | #endif /* _HFI1_MAD_H */ |
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c index d5edb1afbb8f..d4022450b73f 100644 --- a/drivers/infiniband/hw/hfi1/pio.c +++ b/drivers/infiniband/hw/hfi1/pio.c | |||
| @@ -995,7 +995,7 @@ static void sc_wait_for_packet_egress(struct send_context *sc, int pause) | |||
| 995 | /* counter is reset if occupancy count changes */ | 995 | /* counter is reset if occupancy count changes */ |
| 996 | if (reg != reg_prev) | 996 | if (reg != reg_prev) |
| 997 | loop = 0; | 997 | loop = 0; |
| 998 | if (loop > 500) { | 998 | if (loop > 50000) { |
| 999 | /* timed out - bounce the link */ | 999 | /* timed out - bounce the link */ |
| 1000 | dd_dev_err(dd, | 1000 | dd_dev_err(dd, |
| 1001 | "%s: context %u(%u) timeout waiting for packets to egress, remaining count %u, bouncing link\n", | 1001 | "%s: context %u(%u) timeout waiting for packets to egress, remaining count %u, bouncing link\n", |
| @@ -1798,6 +1798,21 @@ static void pio_map_rcu_callback(struct rcu_head *list) | |||
| 1798 | } | 1798 | } |
| 1799 | 1799 | ||
| 1800 | /* | 1800 | /* |
| 1801 | * Set credit return threshold for the kernel send context | ||
| 1802 | */ | ||
| 1803 | static void set_threshold(struct hfi1_devdata *dd, int scontext, int i) | ||
| 1804 | { | ||
| 1805 | u32 thres; | ||
| 1806 | |||
| 1807 | thres = min(sc_percent_to_threshold(dd->kernel_send_context[scontext], | ||
| 1808 | 50), | ||
| 1809 | sc_mtu_to_threshold(dd->kernel_send_context[scontext], | ||
| 1810 | dd->vld[i].mtu, | ||
| 1811 | dd->rcd[0]->rcvhdrqentsize)); | ||
| 1812 | sc_set_cr_threshold(dd->kernel_send_context[scontext], thres); | ||
| 1813 | } | ||
| 1814 | |||
| 1815 | /* | ||
| 1801 | * pio_map_init - called when #vls change | 1816 | * pio_map_init - called when #vls change |
| 1802 | * @dd: hfi1_devdata | 1817 | * @dd: hfi1_devdata |
| 1803 | * @port: port number | 1818 | * @port: port number |
| @@ -1872,11 +1887,16 @@ int pio_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_scontexts) | |||
| 1872 | if (!newmap->map[i]) | 1887 | if (!newmap->map[i]) |
| 1873 | goto bail; | 1888 | goto bail; |
| 1874 | newmap->map[i]->mask = (1 << ilog2(sz)) - 1; | 1889 | newmap->map[i]->mask = (1 << ilog2(sz)) - 1; |
| 1875 | /* assign send contexts */ | 1890 | /* |
| 1891 | * assign send contexts and | ||
| 1892 | * adjust credit return threshold | ||
| 1893 | */ | ||
| 1876 | for (j = 0; j < sz; j++) { | 1894 | for (j = 0; j < sz; j++) { |
| 1877 | if (dd->kernel_send_context[scontext]) | 1895 | if (dd->kernel_send_context[scontext]) { |
| 1878 | newmap->map[i]->ksc[j] = | 1896 | newmap->map[i]->ksc[j] = |
| 1879 | dd->kernel_send_context[scontext]; | 1897 | dd->kernel_send_context[scontext]; |
| 1898 | set_threshold(dd, scontext, i); | ||
| 1899 | } | ||
| 1880 | if (++scontext >= first_scontext + | 1900 | if (++scontext >= first_scontext + |
| 1881 | vl_scontexts[i]) | 1901 | vl_scontexts[i]) |
| 1882 | /* wrap back to first send context */ | 1902 | /* wrap back to first send context */ |
diff --git a/drivers/infiniband/hw/hfi1/qsfp.c b/drivers/infiniband/hw/hfi1/qsfp.c index 2441669f0817..9fb561682c66 100644 --- a/drivers/infiniband/hw/hfi1/qsfp.c +++ b/drivers/infiniband/hw/hfi1/qsfp.c | |||
| @@ -579,7 +579,8 @@ int qsfp_dump(struct hfi1_pportdata *ppd, char *buf, int len) | |||
| 579 | 579 | ||
| 580 | if (ppd->qsfp_info.cache_valid) { | 580 | if (ppd->qsfp_info.cache_valid) { |
| 581 | if (QSFP_IS_CU(cache[QSFP_MOD_TECH_OFFS])) | 581 | if (QSFP_IS_CU(cache[QSFP_MOD_TECH_OFFS])) |
| 582 | sprintf(lenstr, "%dM ", cache[QSFP_MOD_LEN_OFFS]); | 582 | snprintf(lenstr, sizeof(lenstr), "%dM ", |
| 583 | cache[QSFP_MOD_LEN_OFFS]); | ||
| 583 | 584 | ||
| 584 | power_byte = cache[QSFP_MOD_PWR_OFFS]; | 585 | power_byte = cache[QSFP_MOD_PWR_OFFS]; |
| 585 | sofar += scnprintf(buf + sofar, len - sofar, "PWR:%.3sW\n", | 586 | sofar += scnprintf(buf + sofar, len - sofar, "PWR:%.3sW\n", |
diff --git a/drivers/infiniband/hw/hfi1/trace.c b/drivers/infiniband/hw/hfi1/trace.c index 79b2952c0dfb..4cfb13771897 100644 --- a/drivers/infiniband/hw/hfi1/trace.c +++ b/drivers/infiniband/hw/hfi1/trace.c | |||
| @@ -214,19 +214,6 @@ const char *print_u32_array( | |||
| 214 | return ret; | 214 | return ret; |
| 215 | } | 215 | } |
| 216 | 216 | ||
| 217 | const char *print_u64_array( | ||
| 218 | struct trace_seq *p, | ||
| 219 | u64 *arr, int len) | ||
| 220 | { | ||
| 221 | int i; | ||
| 222 | const char *ret = trace_seq_buffer_ptr(p); | ||
| 223 | |||
| 224 | for (i = 0; i < len; i++) | ||
| 225 | trace_seq_printf(p, "%s0x%016llx", i == 0 ? "" : " ", arr[i]); | ||
| 226 | trace_seq_putc(p, 0); | ||
| 227 | return ret; | ||
| 228 | } | ||
| 229 | |||
| 230 | __hfi1_trace_fn(PKT); | 217 | __hfi1_trace_fn(PKT); |
| 231 | __hfi1_trace_fn(PROC); | 218 | __hfi1_trace_fn(PROC); |
| 232 | __hfi1_trace_fn(SDMA); | 219 | __hfi1_trace_fn(SDMA); |
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c index 29f4795f866c..47ffd273ecbd 100644 --- a/drivers/infiniband/hw/hfi1/user_sdma.c +++ b/drivers/infiniband/hw/hfi1/user_sdma.c | |||
| @@ -183,7 +183,7 @@ struct user_sdma_iovec { | |||
| 183 | struct sdma_mmu_node *node; | 183 | struct sdma_mmu_node *node; |
| 184 | }; | 184 | }; |
| 185 | 185 | ||
| 186 | #define SDMA_CACHE_NODE_EVICT BIT(0) | 186 | #define SDMA_CACHE_NODE_EVICT 0 |
| 187 | 187 | ||
| 188 | struct sdma_mmu_node { | 188 | struct sdma_mmu_node { |
| 189 | struct mmu_rb_node rb; | 189 | struct mmu_rb_node rb; |
| @@ -1355,11 +1355,11 @@ static int set_txreq_header(struct user_sdma_request *req, | |||
| 1355 | */ | 1355 | */ |
| 1356 | SDMA_DBG(req, "TID offset %ubytes %uunits om%u", | 1356 | SDMA_DBG(req, "TID offset %ubytes %uunits om%u", |
| 1357 | req->tidoffset, req->tidoffset / req->omfactor, | 1357 | req->tidoffset, req->tidoffset / req->omfactor, |
| 1358 | !!(req->omfactor - KDETH_OM_SMALL)); | 1358 | req->omfactor != KDETH_OM_SMALL); |
| 1359 | KDETH_SET(hdr->kdeth.ver_tid_offset, OFFSET, | 1359 | KDETH_SET(hdr->kdeth.ver_tid_offset, OFFSET, |
| 1360 | req->tidoffset / req->omfactor); | 1360 | req->tidoffset / req->omfactor); |
| 1361 | KDETH_SET(hdr->kdeth.ver_tid_offset, OM, | 1361 | KDETH_SET(hdr->kdeth.ver_tid_offset, OM, |
| 1362 | !!(req->omfactor - KDETH_OM_SMALL)); | 1362 | req->omfactor != KDETH_OM_SMALL); |
| 1363 | } | 1363 | } |
| 1364 | done: | 1364 | done: |
| 1365 | trace_hfi1_sdma_user_header(pq->dd, pq->ctxt, pq->subctxt, | 1365 | trace_hfi1_sdma_user_header(pq->dd, pq->ctxt, pq->subctxt, |
diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.c b/drivers/infiniband/hw/hfi1/verbs_txreq.c index bc95c4112c61..d8fb056526f8 100644 --- a/drivers/infiniband/hw/hfi1/verbs_txreq.c +++ b/drivers/infiniband/hw/hfi1/verbs_txreq.c | |||
| @@ -92,11 +92,10 @@ void hfi1_put_txreq(struct verbs_txreq *tx) | |||
| 92 | 92 | ||
| 93 | struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev, | 93 | struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev, |
| 94 | struct rvt_qp *qp) | 94 | struct rvt_qp *qp) |
| 95 | __must_hold(&qp->s_lock) | ||
| 95 | { | 96 | { |
| 96 | struct verbs_txreq *tx = ERR_PTR(-EBUSY); | 97 | struct verbs_txreq *tx = ERR_PTR(-EBUSY); |
| 97 | unsigned long flags; | ||
| 98 | 98 | ||
| 99 | spin_lock_irqsave(&qp->s_lock, flags); | ||
| 100 | write_seqlock(&dev->iowait_lock); | 99 | write_seqlock(&dev->iowait_lock); |
| 101 | if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) { | 100 | if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) { |
| 102 | struct hfi1_qp_priv *priv; | 101 | struct hfi1_qp_priv *priv; |
| @@ -116,7 +115,6 @@ struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev, | |||
| 116 | } | 115 | } |
| 117 | out: | 116 | out: |
| 118 | write_sequnlock(&dev->iowait_lock); | 117 | write_sequnlock(&dev->iowait_lock); |
| 119 | spin_unlock_irqrestore(&qp->s_lock, flags); | ||
| 120 | return tx; | 118 | return tx; |
| 121 | } | 119 | } |
| 122 | 120 | ||
diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.h b/drivers/infiniband/hw/hfi1/verbs_txreq.h index 1cf69b2fe4a5..a1d6e0807f97 100644 --- a/drivers/infiniband/hw/hfi1/verbs_txreq.h +++ b/drivers/infiniband/hw/hfi1/verbs_txreq.h | |||
| @@ -73,6 +73,7 @@ struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev, | |||
| 73 | 73 | ||
| 74 | static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev, | 74 | static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev, |
| 75 | struct rvt_qp *qp) | 75 | struct rvt_qp *qp) |
| 76 | __must_hold(&qp->slock) | ||
| 76 | { | 77 | { |
| 77 | struct verbs_txreq *tx; | 78 | struct verbs_txreq *tx; |
| 78 | struct hfi1_qp_priv *priv = qp->priv; | 79 | struct hfi1_qp_priv *priv = qp->priv; |
diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h index 8b9532034558..b738acdb9b02 100644 --- a/drivers/infiniband/hw/i40iw/i40iw.h +++ b/drivers/infiniband/hw/i40iw/i40iw.h | |||
| @@ -113,6 +113,8 @@ | |||
| 113 | 113 | ||
| 114 | #define IW_HMC_OBJ_TYPE_NUM ARRAY_SIZE(iw_hmc_obj_types) | 114 | #define IW_HMC_OBJ_TYPE_NUM ARRAY_SIZE(iw_hmc_obj_types) |
| 115 | #define IW_CFG_FPM_QP_COUNT 32768 | 115 | #define IW_CFG_FPM_QP_COUNT 32768 |
| 116 | #define I40IW_MAX_PAGES_PER_FMR 512 | ||
| 117 | #define I40IW_MIN_PAGES_PER_FMR 1 | ||
| 116 | 118 | ||
| 117 | #define I40IW_MTU_TO_MSS 40 | 119 | #define I40IW_MTU_TO_MSS 40 |
| 118 | #define I40IW_DEFAULT_MSS 1460 | 120 | #define I40IW_DEFAULT_MSS 1460 |
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c index 02a735b64208..33959ed14563 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c | |||
| @@ -79,6 +79,7 @@ static int i40iw_query_device(struct ib_device *ibdev, | |||
| 79 | props->max_qp_init_rd_atom = props->max_qp_rd_atom; | 79 | props->max_qp_init_rd_atom = props->max_qp_rd_atom; |
| 80 | props->atomic_cap = IB_ATOMIC_NONE; | 80 | props->atomic_cap = IB_ATOMIC_NONE; |
| 81 | props->max_map_per_fmr = 1; | 81 | props->max_map_per_fmr = 1; |
| 82 | props->max_fast_reg_page_list_len = I40IW_MAX_PAGES_PER_FMR; | ||
| 82 | return 0; | 83 | return 0; |
| 83 | } | 84 | } |
| 84 | 85 | ||
| @@ -1527,7 +1528,7 @@ static struct ib_mr *i40iw_alloc_mr(struct ib_pd *pd, | |||
| 1527 | mutex_lock(&iwdev->pbl_mutex); | 1528 | mutex_lock(&iwdev->pbl_mutex); |
| 1528 | status = i40iw_get_pble(&iwdev->sc_dev, iwdev->pble_rsrc, palloc, iwmr->page_cnt); | 1529 | status = i40iw_get_pble(&iwdev->sc_dev, iwdev->pble_rsrc, palloc, iwmr->page_cnt); |
| 1529 | mutex_unlock(&iwdev->pbl_mutex); | 1530 | mutex_unlock(&iwdev->pbl_mutex); |
| 1530 | if (!status) | 1531 | if (status) |
| 1531 | goto err1; | 1532 | goto err1; |
| 1532 | 1533 | ||
| 1533 | if (palloc->level != I40IW_LEVEL_1) | 1534 | if (palloc->level != I40IW_LEVEL_1) |
| @@ -2149,6 +2150,7 @@ static int i40iw_post_send(struct ib_qp *ibqp, | |||
| 2149 | struct i40iw_sc_dev *dev = &iwqp->iwdev->sc_dev; | 2150 | struct i40iw_sc_dev *dev = &iwqp->iwdev->sc_dev; |
| 2150 | struct i40iw_fast_reg_stag_info info; | 2151 | struct i40iw_fast_reg_stag_info info; |
| 2151 | 2152 | ||
| 2153 | memset(&info, 0, sizeof(info)); | ||
| 2152 | info.access_rights = I40IW_ACCESS_FLAGS_LOCALREAD; | 2154 | info.access_rights = I40IW_ACCESS_FLAGS_LOCALREAD; |
| 2153 | info.access_rights |= i40iw_get_user_access(flags); | 2155 | info.access_rights |= i40iw_get_user_access(flags); |
| 2154 | info.stag_key = reg_wr(ib_wr)->key & 0xff; | 2156 | info.stag_key = reg_wr(ib_wr)->key & 0xff; |
| @@ -2158,10 +2160,14 @@ static int i40iw_post_send(struct ib_qp *ibqp, | |||
| 2158 | info.addr_type = I40IW_ADDR_TYPE_VA_BASED; | 2160 | info.addr_type = I40IW_ADDR_TYPE_VA_BASED; |
| 2159 | info.va = (void *)(uintptr_t)iwmr->ibmr.iova; | 2161 | info.va = (void *)(uintptr_t)iwmr->ibmr.iova; |
| 2160 | info.total_len = iwmr->ibmr.length; | 2162 | info.total_len = iwmr->ibmr.length; |
| 2163 | info.reg_addr_pa = *(u64 *)palloc->level1.addr; | ||
| 2161 | info.first_pm_pbl_index = palloc->level1.idx; | 2164 | info.first_pm_pbl_index = palloc->level1.idx; |
| 2162 | info.local_fence = ib_wr->send_flags & IB_SEND_FENCE; | 2165 | info.local_fence = ib_wr->send_flags & IB_SEND_FENCE; |
| 2163 | info.signaled = ib_wr->send_flags & IB_SEND_SIGNALED; | 2166 | info.signaled = ib_wr->send_flags & IB_SEND_SIGNALED; |
| 2164 | 2167 | ||
| 2168 | if (iwmr->npages > I40IW_MIN_PAGES_PER_FMR) | ||
| 2169 | info.chunk_size = 1; | ||
| 2170 | |||
| 2165 | if (page_shift == 21) | 2171 | if (page_shift == 21) |
| 2166 | info.page_size = 1; /* 2M page */ | 2172 | info.page_size = 1; /* 2M page */ |
| 2167 | 2173 | ||
| @@ -2327,13 +2333,16 @@ static int i40iw_req_notify_cq(struct ib_cq *ibcq, | |||
| 2327 | { | 2333 | { |
| 2328 | struct i40iw_cq *iwcq; | 2334 | struct i40iw_cq *iwcq; |
| 2329 | struct i40iw_cq_uk *ukcq; | 2335 | struct i40iw_cq_uk *ukcq; |
| 2330 | enum i40iw_completion_notify cq_notify = IW_CQ_COMPL_SOLICITED; | 2336 | unsigned long flags; |
| 2337 | enum i40iw_completion_notify cq_notify = IW_CQ_COMPL_EVENT; | ||
| 2331 | 2338 | ||
| 2332 | iwcq = (struct i40iw_cq *)ibcq; | 2339 | iwcq = (struct i40iw_cq *)ibcq; |
| 2333 | ukcq = &iwcq->sc_cq.cq_uk; | 2340 | ukcq = &iwcq->sc_cq.cq_uk; |
| 2334 | if (notify_flags == IB_CQ_NEXT_COMP) | 2341 | if (notify_flags == IB_CQ_SOLICITED) |
| 2335 | cq_notify = IW_CQ_COMPL_EVENT; | 2342 | cq_notify = IW_CQ_COMPL_SOLICITED; |
| 2343 | spin_lock_irqsave(&iwcq->lock, flags); | ||
| 2336 | ukcq->ops.iw_cq_request_notification(ukcq, cq_notify); | 2344 | ukcq->ops.iw_cq_request_notification(ukcq, cq_notify); |
| 2345 | spin_unlock_irqrestore(&iwcq->lock, flags); | ||
| 2337 | return 0; | 2346 | return 0; |
| 2338 | } | 2347 | } |
| 2339 | 2348 | ||
diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c index 105246fba2e7..5fc623362731 100644 --- a/drivers/infiniband/hw/mlx4/ah.c +++ b/drivers/infiniband/hw/mlx4/ah.c | |||
| @@ -47,6 +47,7 @@ static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr, | |||
| 47 | 47 | ||
| 48 | ah->av.ib.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24)); | 48 | ah->av.ib.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24)); |
| 49 | ah->av.ib.g_slid = ah_attr->src_path_bits; | 49 | ah->av.ib.g_slid = ah_attr->src_path_bits; |
| 50 | ah->av.ib.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28); | ||
| 50 | if (ah_attr->ah_flags & IB_AH_GRH) { | 51 | if (ah_attr->ah_flags & IB_AH_GRH) { |
| 51 | ah->av.ib.g_slid |= 0x80; | 52 | ah->av.ib.g_slid |= 0x80; |
| 52 | ah->av.ib.gid_index = ah_attr->grh.sgid_index; | 53 | ah->av.ib.gid_index = ah_attr->grh.sgid_index; |
| @@ -64,7 +65,6 @@ static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr, | |||
| 64 | !(1 << ah->av.ib.stat_rate & dev->caps.stat_rate_support)) | 65 | !(1 << ah->av.ib.stat_rate & dev->caps.stat_rate_support)) |
| 65 | --ah->av.ib.stat_rate; | 66 | --ah->av.ib.stat_rate; |
| 66 | } | 67 | } |
| 67 | ah->av.ib.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28); | ||
| 68 | 68 | ||
| 69 | return &ah->ibah; | 69 | return &ah->ibah; |
| 70 | } | 70 | } |
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index d68f506c1922..9c2e53d28f98 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c | |||
| @@ -527,7 +527,7 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port, | |||
| 527 | tun_tx_ix = (++tun_qp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1); | 527 | tun_tx_ix = (++tun_qp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1); |
| 528 | spin_unlock(&tun_qp->tx_lock); | 528 | spin_unlock(&tun_qp->tx_lock); |
| 529 | if (ret) | 529 | if (ret) |
| 530 | goto out; | 530 | goto end; |
| 531 | 531 | ||
| 532 | tun_mad = (struct mlx4_rcv_tunnel_mad *) (tun_qp->tx_ring[tun_tx_ix].buf.addr); | 532 | tun_mad = (struct mlx4_rcv_tunnel_mad *) (tun_qp->tx_ring[tun_tx_ix].buf.addr); |
| 533 | if (tun_qp->tx_ring[tun_tx_ix].ah) | 533 | if (tun_qp->tx_ring[tun_tx_ix].ah) |
| @@ -596,9 +596,15 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port, | |||
| 596 | wr.wr.send_flags = IB_SEND_SIGNALED; | 596 | wr.wr.send_flags = IB_SEND_SIGNALED; |
| 597 | 597 | ||
| 598 | ret = ib_post_send(src_qp, &wr.wr, &bad_wr); | 598 | ret = ib_post_send(src_qp, &wr.wr, &bad_wr); |
| 599 | out: | 599 | if (!ret) |
| 600 | if (ret) | 600 | return 0; |
| 601 | ib_destroy_ah(ah); | 601 | out: |
| 602 | spin_lock(&tun_qp->tx_lock); | ||
| 603 | tun_qp->tx_ix_tail++; | ||
| 604 | spin_unlock(&tun_qp->tx_lock); | ||
| 605 | tun_qp->tx_ring[tun_tx_ix].ah = NULL; | ||
| 606 | end: | ||
| 607 | ib_destroy_ah(ah); | ||
| 602 | return ret; | 608 | return ret; |
| 603 | } | 609 | } |
| 604 | 610 | ||
| @@ -1326,9 +1332,15 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port, | |||
| 1326 | 1332 | ||
| 1327 | 1333 | ||
| 1328 | ret = ib_post_send(send_qp, &wr.wr, &bad_wr); | 1334 | ret = ib_post_send(send_qp, &wr.wr, &bad_wr); |
| 1335 | if (!ret) | ||
| 1336 | return 0; | ||
| 1337 | |||
| 1338 | spin_lock(&sqp->tx_lock); | ||
| 1339 | sqp->tx_ix_tail++; | ||
| 1340 | spin_unlock(&sqp->tx_lock); | ||
| 1341 | sqp->tx_ring[wire_tx_ix].ah = NULL; | ||
| 1329 | out: | 1342 | out: |
| 1330 | if (ret) | 1343 | ib_destroy_ah(ah); |
| 1331 | ib_destroy_ah(ah); | ||
| 1332 | return ret; | 1344 | return ret; |
| 1333 | } | 1345 | } |
| 1334 | 1346 | ||
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index b01ef6eee6e8..42a46078d7d5 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c | |||
| @@ -505,9 +505,9 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, | |||
| 505 | props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B; | 505 | props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B; |
| 506 | else | 506 | else |
| 507 | props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2A; | 507 | props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2A; |
| 508 | if (dev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) | ||
| 509 | props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING; | ||
| 510 | } | 508 | } |
| 509 | if (dev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) | ||
| 510 | props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING; | ||
| 511 | 511 | ||
| 512 | props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM; | 512 | props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM; |
| 513 | 513 | ||
| @@ -1704,6 +1704,9 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp, | |||
| 1704 | struct mlx4_dev *dev = (to_mdev(qp->device))->dev; | 1704 | struct mlx4_dev *dev = (to_mdev(qp->device))->dev; |
| 1705 | int is_bonded = mlx4_is_bonded(dev); | 1705 | int is_bonded = mlx4_is_bonded(dev); |
| 1706 | 1706 | ||
| 1707 | if (flow_attr->port < 1 || flow_attr->port > qp->device->phys_port_cnt) | ||
| 1708 | return ERR_PTR(-EINVAL); | ||
| 1709 | |||
| 1707 | if ((flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) && | 1710 | if ((flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) && |
| 1708 | (flow_attr->type != IB_FLOW_ATTR_NORMAL)) | 1711 | (flow_attr->type != IB_FLOW_ATTR_NORMAL)) |
| 1709 | return ERR_PTR(-EOPNOTSUPP); | 1712 | return ERR_PTR(-EOPNOTSUPP); |
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index 6c5ac5d8f32f..29acda249612 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h | |||
| @@ -139,7 +139,7 @@ struct mlx4_ib_mr { | |||
| 139 | u32 max_pages; | 139 | u32 max_pages; |
| 140 | struct mlx4_mr mmr; | 140 | struct mlx4_mr mmr; |
| 141 | struct ib_umem *umem; | 141 | struct ib_umem *umem; |
| 142 | void *pages_alloc; | 142 | size_t page_map_size; |
| 143 | }; | 143 | }; |
| 144 | 144 | ||
| 145 | struct mlx4_ib_mw { | 145 | struct mlx4_ib_mw { |
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c index 631272172a0b..5d73989d9771 100644 --- a/drivers/infiniband/hw/mlx4/mr.c +++ b/drivers/infiniband/hw/mlx4/mr.c | |||
| @@ -277,20 +277,23 @@ mlx4_alloc_priv_pages(struct ib_device *device, | |||
| 277 | struct mlx4_ib_mr *mr, | 277 | struct mlx4_ib_mr *mr, |
| 278 | int max_pages) | 278 | int max_pages) |
| 279 | { | 279 | { |
| 280 | int size = max_pages * sizeof(u64); | ||
| 281 | int add_size; | ||
| 282 | int ret; | 280 | int ret; |
| 283 | 281 | ||
| 284 | add_size = max_t(int, MLX4_MR_PAGES_ALIGN - ARCH_KMALLOC_MINALIGN, 0); | 282 | /* Ensure that size is aligned to DMA cacheline |
| 283 | * requirements. | ||
| 284 | * max_pages is limited to MLX4_MAX_FAST_REG_PAGES | ||
| 285 | * so page_map_size will never cross PAGE_SIZE. | ||
| 286 | */ | ||
| 287 | mr->page_map_size = roundup(max_pages * sizeof(u64), | ||
| 288 | MLX4_MR_PAGES_ALIGN); | ||
| 285 | 289 | ||
| 286 | mr->pages_alloc = kzalloc(size + add_size, GFP_KERNEL); | 290 | /* Prevent cross page boundary allocation. */ |
| 287 | if (!mr->pages_alloc) | 291 | mr->pages = (__be64 *)get_zeroed_page(GFP_KERNEL); |
| 292 | if (!mr->pages) | ||
| 288 | return -ENOMEM; | 293 | return -ENOMEM; |
| 289 | 294 | ||
| 290 | mr->pages = PTR_ALIGN(mr->pages_alloc, MLX4_MR_PAGES_ALIGN); | ||
| 291 | |||
| 292 | mr->page_map = dma_map_single(device->dma_device, mr->pages, | 295 | mr->page_map = dma_map_single(device->dma_device, mr->pages, |
| 293 | size, DMA_TO_DEVICE); | 296 | mr->page_map_size, DMA_TO_DEVICE); |
| 294 | 297 | ||
| 295 | if (dma_mapping_error(device->dma_device, mr->page_map)) { | 298 | if (dma_mapping_error(device->dma_device, mr->page_map)) { |
| 296 | ret = -ENOMEM; | 299 | ret = -ENOMEM; |
| @@ -298,9 +301,9 @@ mlx4_alloc_priv_pages(struct ib_device *device, | |||
| 298 | } | 301 | } |
| 299 | 302 | ||
| 300 | return 0; | 303 | return 0; |
| 301 | err: | ||
| 302 | kfree(mr->pages_alloc); | ||
| 303 | 304 | ||
| 305 | err: | ||
| 306 | free_page((unsigned long)mr->pages); | ||
| 304 | return ret; | 307 | return ret; |
| 305 | } | 308 | } |
| 306 | 309 | ||
| @@ -309,11 +312,10 @@ mlx4_free_priv_pages(struct mlx4_ib_mr *mr) | |||
| 309 | { | 312 | { |
| 310 | if (mr->pages) { | 313 | if (mr->pages) { |
| 311 | struct ib_device *device = mr->ibmr.device; | 314 | struct ib_device *device = mr->ibmr.device; |
| 312 | int size = mr->max_pages * sizeof(u64); | ||
| 313 | 315 | ||
| 314 | dma_unmap_single(device->dma_device, mr->page_map, | 316 | dma_unmap_single(device->dma_device, mr->page_map, |
| 315 | size, DMA_TO_DEVICE); | 317 | mr->page_map_size, DMA_TO_DEVICE); |
| 316 | kfree(mr->pages_alloc); | 318 | free_page((unsigned long)mr->pages); |
| 317 | mr->pages = NULL; | 319 | mr->pages = NULL; |
| 318 | } | 320 | } |
| 319 | } | 321 | } |
| @@ -537,14 +539,12 @@ int mlx4_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, | |||
| 537 | mr->npages = 0; | 539 | mr->npages = 0; |
| 538 | 540 | ||
| 539 | ib_dma_sync_single_for_cpu(ibmr->device, mr->page_map, | 541 | ib_dma_sync_single_for_cpu(ibmr->device, mr->page_map, |
| 540 | sizeof(u64) * mr->max_pages, | 542 | mr->page_map_size, DMA_TO_DEVICE); |
| 541 | DMA_TO_DEVICE); | ||
| 542 | 543 | ||
| 543 | rc = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, mlx4_set_page); | 544 | rc = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, mlx4_set_page); |
| 544 | 545 | ||
| 545 | ib_dma_sync_single_for_device(ibmr->device, mr->page_map, | 546 | ib_dma_sync_single_for_device(ibmr->device, mr->page_map, |
| 546 | sizeof(u64) * mr->max_pages, | 547 | mr->page_map_size, DMA_TO_DEVICE); |
| 547 | DMA_TO_DEVICE); | ||
| 548 | 548 | ||
| 549 | return rc; | 549 | return rc; |
| 550 | } | 550 | } |
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 81b0e1fbec1d..8db8405c1e99 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c | |||
| @@ -362,7 +362,7 @@ static int send_wqe_overhead(enum mlx4_ib_qp_type type, u32 flags) | |||
| 362 | sizeof (struct mlx4_wqe_raddr_seg); | 362 | sizeof (struct mlx4_wqe_raddr_seg); |
| 363 | case MLX4_IB_QPT_RC: | 363 | case MLX4_IB_QPT_RC: |
| 364 | return sizeof (struct mlx4_wqe_ctrl_seg) + | 364 | return sizeof (struct mlx4_wqe_ctrl_seg) + |
| 365 | sizeof (struct mlx4_wqe_atomic_seg) + | 365 | sizeof (struct mlx4_wqe_masked_atomic_seg) + |
| 366 | sizeof (struct mlx4_wqe_raddr_seg); | 366 | sizeof (struct mlx4_wqe_raddr_seg); |
| 367 | case MLX4_IB_QPT_SMI: | 367 | case MLX4_IB_QPT_SMI: |
| 368 | case MLX4_IB_QPT_GSI: | 368 | case MLX4_IB_QPT_GSI: |
| @@ -1191,8 +1191,10 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd, | |||
| 1191 | { | 1191 | { |
| 1192 | err = create_qp_common(to_mdev(pd->device), pd, init_attr, | 1192 | err = create_qp_common(to_mdev(pd->device), pd, init_attr, |
| 1193 | udata, 0, &qp, gfp); | 1193 | udata, 0, &qp, gfp); |
| 1194 | if (err) | 1194 | if (err) { |
| 1195 | kfree(qp); | ||
| 1195 | return ERR_PTR(err); | 1196 | return ERR_PTR(err); |
| 1197 | } | ||
| 1196 | 1198 | ||
| 1197 | qp->ibqp.qp_num = qp->mqp.qpn; | 1199 | qp->ibqp.qp_num = qp->mqp.qpn; |
| 1198 | qp->xrcdn = xrcdn; | 1200 | qp->xrcdn = xrcdn; |
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index dabcc65bd65e..9c0e67bd2ba7 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c | |||
| @@ -822,7 +822,8 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, | |||
| 822 | int eqn; | 822 | int eqn; |
| 823 | int err; | 823 | int err; |
| 824 | 824 | ||
| 825 | if (entries < 0) | 825 | if (entries < 0 || |
| 826 | (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)))) | ||
| 826 | return ERR_PTR(-EINVAL); | 827 | return ERR_PTR(-EINVAL); |
| 827 | 828 | ||
| 828 | if (check_cq_create_flags(attr->flags)) | 829 | if (check_cq_create_flags(attr->flags)) |
| @@ -1168,11 +1169,16 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) | |||
| 1168 | return -ENOSYS; | 1169 | return -ENOSYS; |
| 1169 | } | 1170 | } |
| 1170 | 1171 | ||
| 1171 | if (entries < 1) | 1172 | if (entries < 1 || |
| 1173 | entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))) { | ||
| 1174 | mlx5_ib_warn(dev, "wrong entries number %d, max %d\n", | ||
| 1175 | entries, | ||
| 1176 | 1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)); | ||
| 1172 | return -EINVAL; | 1177 | return -EINVAL; |
| 1178 | } | ||
| 1173 | 1179 | ||
| 1174 | entries = roundup_pow_of_two(entries + 1); | 1180 | entries = roundup_pow_of_two(entries + 1); |
| 1175 | if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1) | 1181 | if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1) |
| 1176 | return -EINVAL; | 1182 | return -EINVAL; |
| 1177 | 1183 | ||
| 1178 | if (entries == ibcq->cqe + 1) | 1184 | if (entries == ibcq->cqe + 1) |
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c index 1534af113058..364aab9f3c9e 100644 --- a/drivers/infiniband/hw/mlx5/mad.c +++ b/drivers/infiniband/hw/mlx5/mad.c | |||
| @@ -121,7 +121,7 @@ static void pma_cnt_ext_assign(struct ib_pma_portcounters_ext *pma_cnt_ext, | |||
| 121 | pma_cnt_ext->port_xmit_data = | 121 | pma_cnt_ext->port_xmit_data = |
| 122 | cpu_to_be64(MLX5_SUM_CNT(out, transmitted_ib_unicast.octets, | 122 | cpu_to_be64(MLX5_SUM_CNT(out, transmitted_ib_unicast.octets, |
| 123 | transmitted_ib_multicast.octets) >> 2); | 123 | transmitted_ib_multicast.octets) >> 2); |
| 124 | pma_cnt_ext->port_xmit_data = | 124 | pma_cnt_ext->port_rcv_data = |
| 125 | cpu_to_be64(MLX5_SUM_CNT(out, received_ib_unicast.octets, | 125 | cpu_to_be64(MLX5_SUM_CNT(out, received_ib_unicast.octets, |
| 126 | received_ib_multicast.octets) >> 2); | 126 | received_ib_multicast.octets) >> 2); |
| 127 | pma_cnt_ext->port_xmit_packets = | 127 | pma_cnt_ext->port_xmit_packets = |
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index c72797cd9e4f..b48ad85315dc 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c | |||
| @@ -524,6 +524,9 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, | |||
| 524 | MLX5_CAP_ETH(dev->mdev, scatter_fcs)) | 524 | MLX5_CAP_ETH(dev->mdev, scatter_fcs)) |
| 525 | props->device_cap_flags |= IB_DEVICE_RAW_SCATTER_FCS; | 525 | props->device_cap_flags |= IB_DEVICE_RAW_SCATTER_FCS; |
| 526 | 526 | ||
| 527 | if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS)) | ||
| 528 | props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING; | ||
| 529 | |||
| 527 | props->vendor_part_id = mdev->pdev->device; | 530 | props->vendor_part_id = mdev->pdev->device; |
| 528 | props->hw_ver = mdev->pdev->revision; | 531 | props->hw_ver = mdev->pdev->revision; |
| 529 | 532 | ||
| @@ -915,7 +918,8 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, | |||
| 915 | num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE; | 918 | num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE; |
| 916 | gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE; | 919 | gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE; |
| 917 | resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp); | 920 | resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp); |
| 918 | resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size); | 921 | if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf)) |
| 922 | resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size); | ||
| 919 | resp.cache_line_size = L1_CACHE_BYTES; | 923 | resp.cache_line_size = L1_CACHE_BYTES; |
| 920 | resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq); | 924 | resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq); |
| 921 | resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq); | 925 | resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq); |
| @@ -988,7 +992,14 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, | |||
| 988 | if (field_avail(typeof(resp), cqe_version, udata->outlen)) | 992 | if (field_avail(typeof(resp), cqe_version, udata->outlen)) |
| 989 | resp.response_length += sizeof(resp.cqe_version); | 993 | resp.response_length += sizeof(resp.cqe_version); |
| 990 | 994 | ||
| 991 | if (field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) { | 995 | /* |
| 996 | * We don't want to expose information from the PCI bar that is located | ||
| 997 | * after 4096 bytes, so if the arch only supports larger pages, let's | ||
| 998 | * pretend we don't support reading the HCA's core clock. This is also | ||
| 999 | * forced by mmap function. | ||
| 1000 | */ | ||
| 1001 | if (PAGE_SIZE <= 4096 && | ||
| 1002 | field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) { | ||
| 992 | resp.comp_mask |= | 1003 | resp.comp_mask |= |
| 993 | MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET; | 1004 | MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET; |
| 994 | resp.hca_core_clock_offset = | 1005 | resp.hca_core_clock_offset = |
| @@ -1798,7 +1809,7 @@ static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr, | |||
| 1798 | { | 1809 | { |
| 1799 | struct mlx5_ib_dev *dev = | 1810 | struct mlx5_ib_dev *dev = |
| 1800 | container_of(device, struct mlx5_ib_dev, ib_dev.dev); | 1811 | container_of(device, struct mlx5_ib_dev, ib_dev.dev); |
| 1801 | return sprintf(buf, "%d.%d.%d\n", fw_rev_maj(dev->mdev), | 1812 | return sprintf(buf, "%d.%d.%04d\n", fw_rev_maj(dev->mdev), |
| 1802 | fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev)); | 1813 | fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev)); |
| 1803 | } | 1814 | } |
| 1804 | 1815 | ||
| @@ -1866,14 +1877,11 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context, | |||
| 1866 | break; | 1877 | break; |
| 1867 | 1878 | ||
| 1868 | case MLX5_DEV_EVENT_PORT_DOWN: | 1879 | case MLX5_DEV_EVENT_PORT_DOWN: |
| 1880 | case MLX5_DEV_EVENT_PORT_INITIALIZED: | ||
| 1869 | ibev.event = IB_EVENT_PORT_ERR; | 1881 | ibev.event = IB_EVENT_PORT_ERR; |
| 1870 | port = (u8)param; | 1882 | port = (u8)param; |
| 1871 | break; | 1883 | break; |
| 1872 | 1884 | ||
| 1873 | case MLX5_DEV_EVENT_PORT_INITIALIZED: | ||
| 1874 | /* not used by ULPs */ | ||
| 1875 | return; | ||
| 1876 | |||
| 1877 | case MLX5_DEV_EVENT_LID_CHANGE: | 1885 | case MLX5_DEV_EVENT_LID_CHANGE: |
| 1878 | ibev.event = IB_EVENT_LID_CHANGE; | 1886 | ibev.event = IB_EVENT_LID_CHANGE; |
| 1879 | port = (u8)param; | 1887 | port = (u8)param; |
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 504117657d41..ce0a7ab35a22 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c | |||
| @@ -235,6 +235,8 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap, | |||
| 235 | qp->rq.max_gs = 0; | 235 | qp->rq.max_gs = 0; |
| 236 | qp->rq.wqe_cnt = 0; | 236 | qp->rq.wqe_cnt = 0; |
| 237 | qp->rq.wqe_shift = 0; | 237 | qp->rq.wqe_shift = 0; |
| 238 | cap->max_recv_wr = 0; | ||
| 239 | cap->max_recv_sge = 0; | ||
| 238 | } else { | 240 | } else { |
| 239 | if (ucmd) { | 241 | if (ucmd) { |
| 240 | qp->rq.wqe_cnt = ucmd->rq_wqe_count; | 242 | qp->rq.wqe_cnt = ucmd->rq_wqe_count; |
| @@ -1851,13 +1853,15 @@ static int modify_raw_packet_eth_prio(struct mlx5_core_dev *dev, | |||
| 1851 | static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, | 1853 | static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, |
| 1852 | const struct ib_ah_attr *ah, | 1854 | const struct ib_ah_attr *ah, |
| 1853 | struct mlx5_qp_path *path, u8 port, int attr_mask, | 1855 | struct mlx5_qp_path *path, u8 port, int attr_mask, |
| 1854 | u32 path_flags, const struct ib_qp_attr *attr) | 1856 | u32 path_flags, const struct ib_qp_attr *attr, |
| 1857 | bool alt) | ||
| 1855 | { | 1858 | { |
| 1856 | enum rdma_link_layer ll = rdma_port_get_link_layer(&dev->ib_dev, port); | 1859 | enum rdma_link_layer ll = rdma_port_get_link_layer(&dev->ib_dev, port); |
| 1857 | int err; | 1860 | int err; |
| 1858 | 1861 | ||
| 1859 | if (attr_mask & IB_QP_PKEY_INDEX) | 1862 | if (attr_mask & IB_QP_PKEY_INDEX) |
| 1860 | path->pkey_index = attr->pkey_index; | 1863 | path->pkey_index = cpu_to_be16(alt ? attr->alt_pkey_index : |
| 1864 | attr->pkey_index); | ||
| 1861 | 1865 | ||
| 1862 | if (ah->ah_flags & IB_AH_GRH) { | 1866 | if (ah->ah_flags & IB_AH_GRH) { |
| 1863 | if (ah->grh.sgid_index >= | 1867 | if (ah->grh.sgid_index >= |
| @@ -1877,9 +1881,9 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, | |||
| 1877 | ah->grh.sgid_index); | 1881 | ah->grh.sgid_index); |
| 1878 | path->dci_cfi_prio_sl = (ah->sl & 0x7) << 4; | 1882 | path->dci_cfi_prio_sl = (ah->sl & 0x7) << 4; |
| 1879 | } else { | 1883 | } else { |
| 1880 | path->fl = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0; | 1884 | path->fl_free_ar = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0; |
| 1881 | path->free_ar = (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x80 : | 1885 | path->fl_free_ar |= |
| 1882 | 0; | 1886 | (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x40 : 0; |
| 1883 | path->rlid = cpu_to_be16(ah->dlid); | 1887 | path->rlid = cpu_to_be16(ah->dlid); |
| 1884 | path->grh_mlid = ah->src_path_bits & 0x7f; | 1888 | path->grh_mlid = ah->src_path_bits & 0x7f; |
| 1885 | if (ah->ah_flags & IB_AH_GRH) | 1889 | if (ah->ah_flags & IB_AH_GRH) |
| @@ -1903,7 +1907,7 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, | |||
| 1903 | path->port = port; | 1907 | path->port = port; |
| 1904 | 1908 | ||
| 1905 | if (attr_mask & IB_QP_TIMEOUT) | 1909 | if (attr_mask & IB_QP_TIMEOUT) |
| 1906 | path->ackto_lt = attr->timeout << 3; | 1910 | path->ackto_lt = (alt ? attr->alt_timeout : attr->timeout) << 3; |
| 1907 | 1911 | ||
| 1908 | if ((qp->ibqp.qp_type == IB_QPT_RAW_PACKET) && qp->sq.wqe_cnt) | 1912 | if ((qp->ibqp.qp_type == IB_QPT_RAW_PACKET) && qp->sq.wqe_cnt) |
| 1909 | return modify_raw_packet_eth_prio(dev->mdev, | 1913 | return modify_raw_packet_eth_prio(dev->mdev, |
| @@ -2264,7 +2268,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, | |||
| 2264 | context->log_pg_sz_remote_qpn = cpu_to_be32(attr->dest_qp_num); | 2268 | context->log_pg_sz_remote_qpn = cpu_to_be32(attr->dest_qp_num); |
| 2265 | 2269 | ||
| 2266 | if (attr_mask & IB_QP_PKEY_INDEX) | 2270 | if (attr_mask & IB_QP_PKEY_INDEX) |
| 2267 | context->pri_path.pkey_index = attr->pkey_index; | 2271 | context->pri_path.pkey_index = cpu_to_be16(attr->pkey_index); |
| 2268 | 2272 | ||
| 2269 | /* todo implement counter_index functionality */ | 2273 | /* todo implement counter_index functionality */ |
| 2270 | 2274 | ||
| @@ -2277,7 +2281,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, | |||
| 2277 | if (attr_mask & IB_QP_AV) { | 2281 | if (attr_mask & IB_QP_AV) { |
| 2278 | err = mlx5_set_path(dev, qp, &attr->ah_attr, &context->pri_path, | 2282 | err = mlx5_set_path(dev, qp, &attr->ah_attr, &context->pri_path, |
| 2279 | attr_mask & IB_QP_PORT ? attr->port_num : qp->port, | 2283 | attr_mask & IB_QP_PORT ? attr->port_num : qp->port, |
| 2280 | attr_mask, 0, attr); | 2284 | attr_mask, 0, attr, false); |
| 2281 | if (err) | 2285 | if (err) |
| 2282 | goto out; | 2286 | goto out; |
| 2283 | } | 2287 | } |
| @@ -2288,7 +2292,9 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, | |||
| 2288 | if (attr_mask & IB_QP_ALT_PATH) { | 2292 | if (attr_mask & IB_QP_ALT_PATH) { |
| 2289 | err = mlx5_set_path(dev, qp, &attr->alt_ah_attr, | 2293 | err = mlx5_set_path(dev, qp, &attr->alt_ah_attr, |
| 2290 | &context->alt_path, | 2294 | &context->alt_path, |
| 2291 | attr->alt_port_num, attr_mask, 0, attr); | 2295 | attr->alt_port_num, |
| 2296 | attr_mask | IB_QP_PKEY_INDEX | IB_QP_TIMEOUT, | ||
| 2297 | 0, attr, true); | ||
| 2292 | if (err) | 2298 | if (err) |
| 2293 | goto out; | 2299 | goto out; |
| 2294 | } | 2300 | } |
| @@ -3326,10 +3332,11 @@ static u8 get_fence(u8 fence, struct ib_send_wr *wr) | |||
| 3326 | return MLX5_FENCE_MODE_SMALL_AND_FENCE; | 3332 | return MLX5_FENCE_MODE_SMALL_AND_FENCE; |
| 3327 | else | 3333 | else |
| 3328 | return fence; | 3334 | return fence; |
| 3329 | 3335 | } else if (unlikely(wr->send_flags & IB_SEND_FENCE)) { | |
| 3330 | } else { | 3336 | return MLX5_FENCE_MODE_FENCE; |
| 3331 | return 0; | ||
| 3332 | } | 3337 | } |
| 3338 | |||
| 3339 | return 0; | ||
| 3333 | } | 3340 | } |
| 3334 | 3341 | ||
| 3335 | static int begin_wqe(struct mlx5_ib_qp *qp, void **seg, | 3342 | static int begin_wqe(struct mlx5_ib_qp *qp, void **seg, |
| @@ -4013,11 +4020,12 @@ static int query_qp_attr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, | |||
| 4013 | if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) { | 4020 | if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) { |
| 4014 | to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path); | 4021 | to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path); |
| 4015 | to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path); | 4022 | to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path); |
| 4016 | qp_attr->alt_pkey_index = context->alt_path.pkey_index & 0x7f; | 4023 | qp_attr->alt_pkey_index = |
| 4024 | be16_to_cpu(context->alt_path.pkey_index); | ||
| 4017 | qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num; | 4025 | qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num; |
| 4018 | } | 4026 | } |
| 4019 | 4027 | ||
| 4020 | qp_attr->pkey_index = context->pri_path.pkey_index & 0x7f; | 4028 | qp_attr->pkey_index = be16_to_cpu(context->pri_path.pkey_index); |
| 4021 | qp_attr->port_num = context->pri_path.port; | 4029 | qp_attr->port_num = context->pri_path.port; |
| 4022 | 4030 | ||
| 4023 | /* qp_attr->en_sqd_async_notify is only applicable in modify qp */ | 4031 | /* qp_attr->en_sqd_async_notify is only applicable in modify qp */ |
| @@ -4079,17 +4087,19 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, | |||
| 4079 | qp_attr->cap.max_recv_sge = qp->rq.max_gs; | 4087 | qp_attr->cap.max_recv_sge = qp->rq.max_gs; |
| 4080 | 4088 | ||
| 4081 | if (!ibqp->uobject) { | 4089 | if (!ibqp->uobject) { |
| 4082 | qp_attr->cap.max_send_wr = qp->sq.wqe_cnt; | 4090 | qp_attr->cap.max_send_wr = qp->sq.max_post; |
| 4083 | qp_attr->cap.max_send_sge = qp->sq.max_gs; | 4091 | qp_attr->cap.max_send_sge = qp->sq.max_gs; |
| 4092 | qp_init_attr->qp_context = ibqp->qp_context; | ||
| 4084 | } else { | 4093 | } else { |
| 4085 | qp_attr->cap.max_send_wr = 0; | 4094 | qp_attr->cap.max_send_wr = 0; |
| 4086 | qp_attr->cap.max_send_sge = 0; | 4095 | qp_attr->cap.max_send_sge = 0; |
| 4087 | } | 4096 | } |
| 4088 | 4097 | ||
| 4089 | /* We don't support inline sends for kernel QPs (yet), and we | 4098 | qp_init_attr->qp_type = ibqp->qp_type; |
| 4090 | * don't know what userspace's value should be. | 4099 | qp_init_attr->recv_cq = ibqp->recv_cq; |
| 4091 | */ | 4100 | qp_init_attr->send_cq = ibqp->send_cq; |
| 4092 | qp_attr->cap.max_inline_data = 0; | 4101 | qp_init_attr->srq = ibqp->srq; |
| 4102 | qp_attr->cap.max_inline_data = qp->max_inline_data; | ||
| 4093 | 4103 | ||
| 4094 | qp_init_attr->cap = qp_attr->cap; | 4104 | qp_init_attr->cap = qp_attr->cap; |
| 4095 | 4105 | ||
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c index ff946d5f59e4..382466a90da7 100644 --- a/drivers/infiniband/hw/qib/qib_file_ops.c +++ b/drivers/infiniband/hw/qib/qib_file_ops.c | |||
| @@ -2178,6 +2178,11 @@ static ssize_t qib_write(struct file *fp, const char __user *data, | |||
| 2178 | 2178 | ||
| 2179 | switch (cmd.type) { | 2179 | switch (cmd.type) { |
| 2180 | case QIB_CMD_ASSIGN_CTXT: | 2180 | case QIB_CMD_ASSIGN_CTXT: |
| 2181 | if (rcd) { | ||
| 2182 | ret = -EINVAL; | ||
| 2183 | goto bail; | ||
| 2184 | } | ||
| 2185 | |||
| 2181 | ret = qib_assign_ctxt(fp, &cmd.cmd.user_info); | 2186 | ret = qib_assign_ctxt(fp, &cmd.cmd.user_info); |
| 2182 | if (ret) | 2187 | if (ret) |
| 2183 | goto bail; | 2188 | goto bail; |
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c index 7209fbc03ccb..a0b6ebee4d8a 100644 --- a/drivers/infiniband/hw/usnic/usnic_uiom.c +++ b/drivers/infiniband/hw/usnic/usnic_uiom.c | |||
| @@ -36,7 +36,6 @@ | |||
| 36 | #include <linux/dma-mapping.h> | 36 | #include <linux/dma-mapping.h> |
| 37 | #include <linux/sched.h> | 37 | #include <linux/sched.h> |
| 38 | #include <linux/hugetlb.h> | 38 | #include <linux/hugetlb.h> |
| 39 | #include <linux/dma-attrs.h> | ||
| 40 | #include <linux/iommu.h> | 39 | #include <linux/iommu.h> |
| 41 | #include <linux/workqueue.h> | 40 | #include <linux/workqueue.h> |
| 42 | #include <linux/list.h> | 41 | #include <linux/list.h> |
| @@ -112,10 +111,6 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable, | |||
| 112 | int i; | 111 | int i; |
| 113 | int flags; | 112 | int flags; |
| 114 | dma_addr_t pa; | 113 | dma_addr_t pa; |
| 115 | DEFINE_DMA_ATTRS(attrs); | ||
| 116 | |||
| 117 | if (dmasync) | ||
| 118 | dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs); | ||
| 119 | 114 | ||
| 120 | if (!can_do_mlock()) | 115 | if (!can_do_mlock()) |
| 121 | return -EPERM; | 116 | return -EPERM; |
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c index 5fa4d4d81ee0..41ba7e9cadaa 100644 --- a/drivers/infiniband/sw/rdmavt/qp.c +++ b/drivers/infiniband/sw/rdmavt/qp.c | |||
| @@ -369,8 +369,8 @@ static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt, | |||
| 369 | /* wrap to first map page, invert bit 0 */ | 369 | /* wrap to first map page, invert bit 0 */ |
| 370 | offset = qpt->incr | ((offset & 1) ^ 1); | 370 | offset = qpt->incr | ((offset & 1) ^ 1); |
| 371 | } | 371 | } |
| 372 | /* there can be no bits at shift and below */ | 372 | /* there can be no set bits in low-order QoS bits */ |
| 373 | WARN_ON(offset & (rdi->dparms.qos_shift - 1)); | 373 | WARN_ON(offset & (BIT(rdi->dparms.qos_shift) - 1)); |
| 374 | qpn = mk_qpn(qpt, map, offset); | 374 | qpn = mk_qpn(qpt, map, offset); |
| 375 | } | 375 | } |
| 376 | 376 | ||
| @@ -502,6 +502,12 @@ static void rvt_remove_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp) | |||
| 502 | */ | 502 | */ |
| 503 | static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, | 503 | static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, |
| 504 | enum ib_qp_type type) | 504 | enum ib_qp_type type) |
| 505 | __releases(&qp->s_lock) | ||
| 506 | __releases(&qp->s_hlock) | ||
| 507 | __releases(&qp->r_lock) | ||
| 508 | __acquires(&qp->r_lock) | ||
| 509 | __acquires(&qp->s_hlock) | ||
| 510 | __acquires(&qp->s_lock) | ||
| 505 | { | 511 | { |
| 506 | if (qp->state != IB_QPS_RESET) { | 512 | if (qp->state != IB_QPS_RESET) { |
| 507 | qp->state = IB_QPS_RESET; | 513 | qp->state = IB_QPS_RESET; |
| @@ -570,12 +576,6 @@ static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, | |||
| 570 | qp->s_ssn = 1; | 576 | qp->s_ssn = 1; |
| 571 | qp->s_lsn = 0; | 577 | qp->s_lsn = 0; |
| 572 | qp->s_mig_state = IB_MIG_MIGRATED; | 578 | qp->s_mig_state = IB_MIG_MIGRATED; |
| 573 | if (qp->s_ack_queue) | ||
| 574 | memset( | ||
| 575 | qp->s_ack_queue, | ||
| 576 | 0, | ||
| 577 | rvt_max_atomic(rdi) * | ||
| 578 | sizeof(*qp->s_ack_queue)); | ||
| 579 | qp->r_head_ack_queue = 0; | 579 | qp->r_head_ack_queue = 0; |
| 580 | qp->s_tail_ack_queue = 0; | 580 | qp->s_tail_ack_queue = 0; |
| 581 | qp->s_num_rd_atomic = 0; | 581 | qp->s_num_rd_atomic = 0; |
| @@ -699,8 +699,10 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd, | |||
| 699 | * initialization that is needed. | 699 | * initialization that is needed. |
| 700 | */ | 700 | */ |
| 701 | priv = rdi->driver_f.qp_priv_alloc(rdi, qp, gfp); | 701 | priv = rdi->driver_f.qp_priv_alloc(rdi, qp, gfp); |
| 702 | if (!priv) | 702 | if (IS_ERR(priv)) { |
| 703 | ret = priv; | ||
| 703 | goto bail_qp; | 704 | goto bail_qp; |
| 705 | } | ||
| 704 | qp->priv = priv; | 706 | qp->priv = priv; |
| 705 | qp->timeout_jiffies = | 707 | qp->timeout_jiffies = |
| 706 | usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / | 708 | usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / |
diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c index e1cc2cc42f25..30c4fda7a05a 100644 --- a/drivers/infiniband/sw/rdmavt/vt.c +++ b/drivers/infiniband/sw/rdmavt/vt.c | |||
| @@ -501,9 +501,7 @@ static noinline int check_support(struct rvt_dev_info *rdi, int verb) | |||
| 501 | !rdi->driver_f.quiesce_qp || | 501 | !rdi->driver_f.quiesce_qp || |
| 502 | !rdi->driver_f.notify_error_qp || | 502 | !rdi->driver_f.notify_error_qp || |
| 503 | !rdi->driver_f.mtu_from_qp || | 503 | !rdi->driver_f.mtu_from_qp || |
| 504 | !rdi->driver_f.mtu_to_path_mtu || | 504 | !rdi->driver_f.mtu_to_path_mtu) |
| 505 | !rdi->driver_f.shut_down_port || | ||
| 506 | !rdi->driver_f.cap_mask_chg) | ||
| 507 | return -EINVAL; | 505 | return -EINVAL; |
| 508 | break; | 506 | break; |
| 509 | 507 | ||
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index bab7db6fa9ab..4f7d9b48df64 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h | |||
| @@ -94,6 +94,7 @@ enum { | |||
| 94 | IPOIB_NEIGH_TBL_FLUSH = 12, | 94 | IPOIB_NEIGH_TBL_FLUSH = 12, |
| 95 | IPOIB_FLAG_DEV_ADDR_SET = 13, | 95 | IPOIB_FLAG_DEV_ADDR_SET = 13, |
| 96 | IPOIB_FLAG_DEV_ADDR_CTRL = 14, | 96 | IPOIB_FLAG_DEV_ADDR_CTRL = 14, |
| 97 | IPOIB_FLAG_GOING_DOWN = 15, | ||
| 97 | 98 | ||
| 98 | IPOIB_MAX_BACKOFF_SECONDS = 16, | 99 | IPOIB_MAX_BACKOFF_SECONDS = 16, |
| 99 | 100 | ||
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index b2f42835d76d..951d9abcca8b 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c | |||
| @@ -1486,6 +1486,10 @@ static ssize_t set_mode(struct device *d, struct device_attribute *attr, | |||
| 1486 | { | 1486 | { |
| 1487 | struct net_device *dev = to_net_dev(d); | 1487 | struct net_device *dev = to_net_dev(d); |
| 1488 | int ret; | 1488 | int ret; |
| 1489 | struct ipoib_dev_priv *priv = netdev_priv(dev); | ||
| 1490 | |||
| 1491 | if (test_bit(IPOIB_FLAG_GOING_DOWN, &priv->flags)) | ||
| 1492 | return -EPERM; | ||
| 1489 | 1493 | ||
| 1490 | if (!rtnl_trylock()) | 1494 | if (!rtnl_trylock()) |
| 1491 | return restart_syscall(); | 1495 | return restart_syscall(); |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 45c40a17d6a6..dc6d241b9406 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c | |||
| @@ -1015,7 +1015,7 @@ static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv) | |||
| 1015 | if (ib_query_gid(priv->ca, priv->port, 0, &gid0, NULL)) | 1015 | if (ib_query_gid(priv->ca, priv->port, 0, &gid0, NULL)) |
| 1016 | return false; | 1016 | return false; |
| 1017 | 1017 | ||
| 1018 | netif_addr_lock(priv->dev); | 1018 | netif_addr_lock_bh(priv->dev); |
| 1019 | 1019 | ||
| 1020 | /* The subnet prefix may have changed, update it now so we won't have | 1020 | /* The subnet prefix may have changed, update it now so we won't have |
| 1021 | * to do it later | 1021 | * to do it later |
| @@ -1026,12 +1026,12 @@ static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv) | |||
| 1026 | 1026 | ||
| 1027 | search_gid.global.interface_id = priv->local_gid.global.interface_id; | 1027 | search_gid.global.interface_id = priv->local_gid.global.interface_id; |
| 1028 | 1028 | ||
| 1029 | netif_addr_unlock(priv->dev); | 1029 | netif_addr_unlock_bh(priv->dev); |
| 1030 | 1030 | ||
| 1031 | err = ib_find_gid(priv->ca, &search_gid, IB_GID_TYPE_IB, | 1031 | err = ib_find_gid(priv->ca, &search_gid, IB_GID_TYPE_IB, |
| 1032 | priv->dev, &port, &index); | 1032 | priv->dev, &port, &index); |
| 1033 | 1033 | ||
| 1034 | netif_addr_lock(priv->dev); | 1034 | netif_addr_lock_bh(priv->dev); |
| 1035 | 1035 | ||
| 1036 | if (search_gid.global.interface_id != | 1036 | if (search_gid.global.interface_id != |
| 1037 | priv->local_gid.global.interface_id) | 1037 | priv->local_gid.global.interface_id) |
| @@ -1092,7 +1092,7 @@ static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv) | |||
| 1092 | } | 1092 | } |
| 1093 | 1093 | ||
| 1094 | out: | 1094 | out: |
| 1095 | netif_addr_unlock(priv->dev); | 1095 | netif_addr_unlock_bh(priv->dev); |
| 1096 | 1096 | ||
| 1097 | return ret; | 1097 | return ret; |
| 1098 | } | 1098 | } |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 2d7c16346648..5f58c41ef787 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c | |||
| @@ -1206,7 +1206,9 @@ struct ipoib_neigh *ipoib_neigh_get(struct net_device *dev, u8 *daddr) | |||
| 1206 | neigh = NULL; | 1206 | neigh = NULL; |
| 1207 | goto out_unlock; | 1207 | goto out_unlock; |
| 1208 | } | 1208 | } |
| 1209 | neigh->alive = jiffies; | 1209 | |
| 1210 | if (likely(skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE)) | ||
| 1211 | neigh->alive = jiffies; | ||
| 1210 | goto out_unlock; | 1212 | goto out_unlock; |
| 1211 | } | 1213 | } |
| 1212 | } | 1214 | } |
| @@ -1851,7 +1853,7 @@ static void set_base_guid(struct ipoib_dev_priv *priv, union ib_gid *gid) | |||
| 1851 | struct ipoib_dev_priv *child_priv; | 1853 | struct ipoib_dev_priv *child_priv; |
| 1852 | struct net_device *netdev = priv->dev; | 1854 | struct net_device *netdev = priv->dev; |
| 1853 | 1855 | ||
| 1854 | netif_addr_lock(netdev); | 1856 | netif_addr_lock_bh(netdev); |
| 1855 | 1857 | ||
| 1856 | memcpy(&priv->local_gid.global.interface_id, | 1858 | memcpy(&priv->local_gid.global.interface_id, |
| 1857 | &gid->global.interface_id, | 1859 | &gid->global.interface_id, |
| @@ -1859,7 +1861,7 @@ static void set_base_guid(struct ipoib_dev_priv *priv, union ib_gid *gid) | |||
| 1859 | memcpy(netdev->dev_addr + 4, &priv->local_gid, sizeof(priv->local_gid)); | 1861 | memcpy(netdev->dev_addr + 4, &priv->local_gid, sizeof(priv->local_gid)); |
| 1860 | clear_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags); | 1862 | clear_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags); |
| 1861 | 1863 | ||
| 1862 | netif_addr_unlock(netdev); | 1864 | netif_addr_unlock_bh(netdev); |
| 1863 | 1865 | ||
| 1864 | if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { | 1866 | if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { |
| 1865 | down_read(&priv->vlan_rwsem); | 1867 | down_read(&priv->vlan_rwsem); |
| @@ -1875,7 +1877,7 @@ static int ipoib_check_lladdr(struct net_device *dev, | |||
| 1875 | union ib_gid *gid = (union ib_gid *)(ss->__data + 4); | 1877 | union ib_gid *gid = (union ib_gid *)(ss->__data + 4); |
| 1876 | int ret = 0; | 1878 | int ret = 0; |
| 1877 | 1879 | ||
| 1878 | netif_addr_lock(dev); | 1880 | netif_addr_lock_bh(dev); |
| 1879 | 1881 | ||
| 1880 | /* Make sure the QPN, reserved and subnet prefix match the current | 1882 | /* Make sure the QPN, reserved and subnet prefix match the current |
| 1881 | * lladdr, it also makes sure the lladdr is unicast. | 1883 | * lladdr, it also makes sure the lladdr is unicast. |
| @@ -1885,7 +1887,7 @@ static int ipoib_check_lladdr(struct net_device *dev, | |||
| 1885 | gid->global.interface_id == 0) | 1887 | gid->global.interface_id == 0) |
| 1886 | ret = -EINVAL; | 1888 | ret = -EINVAL; |
| 1887 | 1889 | ||
| 1888 | netif_addr_unlock(dev); | 1890 | netif_addr_unlock_bh(dev); |
| 1889 | 1891 | ||
| 1890 | return ret; | 1892 | return ret; |
| 1891 | } | 1893 | } |
| @@ -2141,6 +2143,9 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data) | |||
| 2141 | ib_unregister_event_handler(&priv->event_handler); | 2143 | ib_unregister_event_handler(&priv->event_handler); |
| 2142 | flush_workqueue(ipoib_workqueue); | 2144 | flush_workqueue(ipoib_workqueue); |
| 2143 | 2145 | ||
| 2146 | /* mark interface in the middle of destruction */ | ||
| 2147 | set_bit(IPOIB_FLAG_GOING_DOWN, &priv->flags); | ||
| 2148 | |||
| 2144 | rtnl_lock(); | 2149 | rtnl_lock(); |
| 2145 | dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP); | 2150 | dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP); |
| 2146 | rtnl_unlock(); | 2151 | rtnl_unlock(); |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index 82fbc9442608..d3394b6add24 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c | |||
| @@ -582,13 +582,13 @@ void ipoib_mcast_join_task(struct work_struct *work) | |||
| 582 | return; | 582 | return; |
| 583 | } | 583 | } |
| 584 | priv->local_lid = port_attr.lid; | 584 | priv->local_lid = port_attr.lid; |
| 585 | netif_addr_lock(dev); | 585 | netif_addr_lock_bh(dev); |
| 586 | 586 | ||
| 587 | if (!test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) { | 587 | if (!test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) { |
| 588 | netif_addr_unlock(dev); | 588 | netif_addr_unlock_bh(dev); |
| 589 | return; | 589 | return; |
| 590 | } | 590 | } |
| 591 | netif_addr_unlock(dev); | 591 | netif_addr_unlock_bh(dev); |
| 592 | 592 | ||
| 593 | spin_lock_irq(&priv->lock); | 593 | spin_lock_irq(&priv->lock); |
| 594 | if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) | 594 | if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c index 64a35595eab8..a2f9f29c6ab5 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c | |||
| @@ -131,6 +131,9 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey) | |||
| 131 | 131 | ||
| 132 | ppriv = netdev_priv(pdev); | 132 | ppriv = netdev_priv(pdev); |
| 133 | 133 | ||
| 134 | if (test_bit(IPOIB_FLAG_GOING_DOWN, &ppriv->flags)) | ||
| 135 | return -EPERM; | ||
| 136 | |||
| 134 | snprintf(intf_name, sizeof intf_name, "%s.%04x", | 137 | snprintf(intf_name, sizeof intf_name, "%s.%04x", |
| 135 | ppriv->dev->name, pkey); | 138 | ppriv->dev->name, pkey); |
| 136 | priv = ipoib_intf_alloc(intf_name); | 139 | priv = ipoib_intf_alloc(intf_name); |
| @@ -183,6 +186,9 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey) | |||
| 183 | 186 | ||
| 184 | ppriv = netdev_priv(pdev); | 187 | ppriv = netdev_priv(pdev); |
| 185 | 188 | ||
| 189 | if (test_bit(IPOIB_FLAG_GOING_DOWN, &ppriv->flags)) | ||
| 190 | return -EPERM; | ||
| 191 | |||
| 186 | if (!rtnl_trylock()) | 192 | if (!rtnl_trylock()) |
| 187 | return restart_syscall(); | 193 | return restart_syscall(); |
| 188 | 194 | ||
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 646de170ec12..3322ed750172 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c | |||
| @@ -1457,7 +1457,6 @@ static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch, | |||
| 1457 | { | 1457 | { |
| 1458 | unsigned int sg_offset = 0; | 1458 | unsigned int sg_offset = 0; |
| 1459 | 1459 | ||
| 1460 | state->desc = req->indirect_desc; | ||
| 1461 | state->fr.next = req->fr_list; | 1460 | state->fr.next = req->fr_list; |
| 1462 | state->fr.end = req->fr_list + ch->target->mr_per_cmd; | 1461 | state->fr.end = req->fr_list + ch->target->mr_per_cmd; |
| 1463 | state->sg = scat; | 1462 | state->sg = scat; |
| @@ -1489,7 +1488,6 @@ static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch, | |||
| 1489 | struct scatterlist *sg; | 1488 | struct scatterlist *sg; |
| 1490 | int i; | 1489 | int i; |
| 1491 | 1490 | ||
| 1492 | state->desc = req->indirect_desc; | ||
| 1493 | for_each_sg(scat, sg, count, i) { | 1491 | for_each_sg(scat, sg, count, i) { |
| 1494 | srp_map_desc(state, ib_sg_dma_address(dev->dev, sg), | 1492 | srp_map_desc(state, ib_sg_dma_address(dev->dev, sg), |
| 1495 | ib_sg_dma_len(dev->dev, sg), | 1493 | ib_sg_dma_len(dev->dev, sg), |
| @@ -1655,6 +1653,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch, | |||
| 1655 | target->indirect_size, DMA_TO_DEVICE); | 1653 | target->indirect_size, DMA_TO_DEVICE); |
| 1656 | 1654 | ||
| 1657 | memset(&state, 0, sizeof(state)); | 1655 | memset(&state, 0, sizeof(state)); |
| 1656 | state.desc = req->indirect_desc; | ||
| 1658 | if (dev->use_fast_reg) | 1657 | if (dev->use_fast_reg) |
| 1659 | ret = srp_map_sg_fr(&state, ch, req, scat, count); | 1658 | ret = srp_map_sg_fr(&state, ch, req, scat, count); |
| 1660 | else if (dev->use_fmr) | 1659 | else if (dev->use_fmr) |
| @@ -3526,7 +3525,7 @@ static void srp_add_one(struct ib_device *device) | |||
| 3526 | int mr_page_shift, p; | 3525 | int mr_page_shift, p; |
| 3527 | u64 max_pages_per_mr; | 3526 | u64 max_pages_per_mr; |
| 3528 | 3527 | ||
| 3529 | srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL); | 3528 | srp_dev = kzalloc(sizeof(*srp_dev), GFP_KERNEL); |
| 3530 | if (!srp_dev) | 3529 | if (!srp_dev) |
| 3531 | return; | 3530 | return; |
| 3532 | 3531 | ||
| @@ -3586,8 +3585,6 @@ static void srp_add_one(struct ib_device *device) | |||
| 3586 | IB_ACCESS_REMOTE_WRITE); | 3585 | IB_ACCESS_REMOTE_WRITE); |
| 3587 | if (IS_ERR(srp_dev->global_mr)) | 3586 | if (IS_ERR(srp_dev->global_mr)) |
| 3588 | goto err_pd; | 3587 | goto err_pd; |
| 3589 | } else { | ||
| 3590 | srp_dev->global_mr = NULL; | ||
| 3591 | } | 3588 | } |
| 3592 | 3589 | ||
| 3593 | for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) { | 3590 | for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) { |
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index e68b20cba70b..4a4155640d51 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c | |||
| @@ -1638,8 +1638,7 @@ retry: | |||
| 1638 | */ | 1638 | */ |
| 1639 | qp_init->cap.max_send_wr = srp_sq_size / 2; | 1639 | qp_init->cap.max_send_wr = srp_sq_size / 2; |
| 1640 | qp_init->cap.max_rdma_ctxs = srp_sq_size / 2; | 1640 | qp_init->cap.max_rdma_ctxs = srp_sq_size / 2; |
| 1641 | qp_init->cap.max_send_sge = max(sdev->device->attrs.max_sge_rd, | 1641 | qp_init->cap.max_send_sge = SRPT_DEF_SG_PER_WQE; |
| 1642 | sdev->device->attrs.max_sge); | ||
| 1643 | qp_init->port_num = ch->sport->port; | 1642 | qp_init->port_num = ch->sport->port; |
| 1644 | 1643 | ||
| 1645 | ch->qp = ib_create_qp(sdev->pd, qp_init); | 1644 | ch->qp = ib_create_qp(sdev->pd, qp_init); |
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h index fee6bfd7ca21..389030487da7 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.h +++ b/drivers/infiniband/ulp/srpt/ib_srpt.h | |||
| @@ -106,6 +106,7 @@ enum { | |||
| 106 | SRP_LOGIN_RSP_MULTICHAN_MAINTAINED = 0x2, | 106 | SRP_LOGIN_RSP_MULTICHAN_MAINTAINED = 0x2, |
| 107 | 107 | ||
| 108 | SRPT_DEF_SG_TABLESIZE = 128, | 108 | SRPT_DEF_SG_TABLESIZE = 128, |
| 109 | SRPT_DEF_SG_PER_WQE = 16, | ||
| 109 | 110 | ||
| 110 | MIN_SRPT_SQ_SIZE = 16, | 111 | MIN_SRPT_SQ_SIZE = 16, |
| 111 | DEF_SRPT_SQ_SIZE = 4096, | 112 | DEF_SRPT_SQ_SIZE = 4096, |
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 94b68213c50d..5f6b3bcab078 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c | |||
| @@ -1941,6 +1941,7 @@ static struct iommu_ops arm_smmu_ops = { | |||
| 1941 | .attach_dev = arm_smmu_attach_dev, | 1941 | .attach_dev = arm_smmu_attach_dev, |
| 1942 | .map = arm_smmu_map, | 1942 | .map = arm_smmu_map, |
| 1943 | .unmap = arm_smmu_unmap, | 1943 | .unmap = arm_smmu_unmap, |
| 1944 | .map_sg = default_iommu_map_sg, | ||
| 1944 | .iova_to_phys = arm_smmu_iova_to_phys, | 1945 | .iova_to_phys = arm_smmu_iova_to_phys, |
| 1945 | .add_device = arm_smmu_add_device, | 1946 | .add_device = arm_smmu_add_device, |
| 1946 | .remove_device = arm_smmu_remove_device, | 1947 | .remove_device = arm_smmu_remove_device, |
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index a644d0cec2d8..10700945994e 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
| @@ -3222,11 +3222,6 @@ static int __init init_dmars(void) | |||
| 3222 | } | 3222 | } |
| 3223 | } | 3223 | } |
| 3224 | 3224 | ||
| 3225 | iommu_flush_write_buffer(iommu); | ||
| 3226 | iommu_set_root_entry(iommu); | ||
| 3227 | iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL); | ||
| 3228 | iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); | ||
| 3229 | |||
| 3230 | if (!ecap_pass_through(iommu->ecap)) | 3225 | if (!ecap_pass_through(iommu->ecap)) |
| 3231 | hw_pass_through = 0; | 3226 | hw_pass_through = 0; |
| 3232 | #ifdef CONFIG_INTEL_IOMMU_SVM | 3227 | #ifdef CONFIG_INTEL_IOMMU_SVM |
| @@ -3235,6 +3230,18 @@ static int __init init_dmars(void) | |||
| 3235 | #endif | 3230 | #endif |
| 3236 | } | 3231 | } |
| 3237 | 3232 | ||
| 3233 | /* | ||
| 3234 | * Now that qi is enabled on all iommus, set the root entry and flush | ||
| 3235 | * caches. This is required on some Intel X58 chipsets, otherwise the | ||
| 3236 | * flush_context function will loop forever and the boot hangs. | ||
| 3237 | */ | ||
| 3238 | for_each_active_iommu(iommu, drhd) { | ||
| 3239 | iommu_flush_write_buffer(iommu); | ||
| 3240 | iommu_set_root_entry(iommu); | ||
| 3241 | iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL); | ||
| 3242 | iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); | ||
| 3243 | } | ||
| 3244 | |||
| 3238 | if (iommu_pass_through) | 3245 | if (iommu_pass_through) |
| 3239 | iommu_identity_mapping |= IDENTMAP_ALL; | 3246 | iommu_identity_mapping |= IDENTMAP_ALL; |
| 3240 | 3247 | ||
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index c7d6156ff536..25b4627cb57f 100644 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c | |||
| @@ -815,7 +815,7 @@ static int rk_iommu_attach_device(struct iommu_domain *domain, | |||
| 815 | dte_addr = virt_to_phys(rk_domain->dt); | 815 | dte_addr = virt_to_phys(rk_domain->dt); |
| 816 | for (i = 0; i < iommu->num_mmu; i++) { | 816 | for (i = 0; i < iommu->num_mmu; i++) { |
| 817 | rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, dte_addr); | 817 | rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, dte_addr); |
| 818 | rk_iommu_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE); | 818 | rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE); |
| 819 | rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK); | 819 | rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK); |
| 820 | } | 820 | } |
| 821 | 821 | ||
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 6bd881be24ea..5eb1f9e17a98 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c | |||
| @@ -41,6 +41,7 @@ | |||
| 41 | 41 | ||
| 42 | #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0) | 42 | #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0) |
| 43 | #define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1) | 43 | #define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1) |
| 44 | #define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2) | ||
| 44 | 45 | ||
| 45 | #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0) | 46 | #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0) |
| 46 | 47 | ||
| @@ -82,6 +83,7 @@ struct its_node { | |||
| 82 | u64 flags; | 83 | u64 flags; |
| 83 | u32 ite_size; | 84 | u32 ite_size; |
| 84 | u32 device_ids; | 85 | u32 device_ids; |
| 86 | int numa_node; | ||
| 85 | }; | 87 | }; |
| 86 | 88 | ||
| 87 | #define ITS_ITT_ALIGN SZ_256 | 89 | #define ITS_ITT_ALIGN SZ_256 |
| @@ -613,11 +615,23 @@ static void its_unmask_irq(struct irq_data *d) | |||
| 613 | static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val, | 615 | static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val, |
| 614 | bool force) | 616 | bool force) |
| 615 | { | 617 | { |
| 616 | unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask); | 618 | unsigned int cpu; |
| 619 | const struct cpumask *cpu_mask = cpu_online_mask; | ||
| 617 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); | 620 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); |
| 618 | struct its_collection *target_col; | 621 | struct its_collection *target_col; |
| 619 | u32 id = its_get_event_id(d); | 622 | u32 id = its_get_event_id(d); |
| 620 | 623 | ||
| 624 | /* lpi cannot be routed to a redistributor that is on a foreign node */ | ||
| 625 | if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) { | ||
| 626 | if (its_dev->its->numa_node >= 0) { | ||
| 627 | cpu_mask = cpumask_of_node(its_dev->its->numa_node); | ||
| 628 | if (!cpumask_intersects(mask_val, cpu_mask)) | ||
| 629 | return -EINVAL; | ||
| 630 | } | ||
| 631 | } | ||
| 632 | |||
| 633 | cpu = cpumask_any_and(mask_val, cpu_mask); | ||
| 634 | |||
| 621 | if (cpu >= nr_cpu_ids) | 635 | if (cpu >= nr_cpu_ids) |
| 622 | return -EINVAL; | 636 | return -EINVAL; |
| 623 | 637 | ||
| @@ -1101,6 +1115,16 @@ static void its_cpu_init_collection(void) | |||
| 1101 | list_for_each_entry(its, &its_nodes, entry) { | 1115 | list_for_each_entry(its, &its_nodes, entry) { |
| 1102 | u64 target; | 1116 | u64 target; |
| 1103 | 1117 | ||
| 1118 | /* avoid cross node collections and its mapping */ | ||
| 1119 | if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) { | ||
| 1120 | struct device_node *cpu_node; | ||
| 1121 | |||
| 1122 | cpu_node = of_get_cpu_node(cpu, NULL); | ||
| 1123 | if (its->numa_node != NUMA_NO_NODE && | ||
| 1124 | its->numa_node != of_node_to_nid(cpu_node)) | ||
| 1125 | continue; | ||
| 1126 | } | ||
| 1127 | |||
| 1104 | /* | 1128 | /* |
| 1105 | * We now have to bind each collection to its target | 1129 | * We now have to bind each collection to its target |
| 1106 | * redistributor. | 1130 | * redistributor. |
| @@ -1351,9 +1375,14 @@ static void its_irq_domain_activate(struct irq_domain *domain, | |||
| 1351 | { | 1375 | { |
| 1352 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); | 1376 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); |
| 1353 | u32 event = its_get_event_id(d); | 1377 | u32 event = its_get_event_id(d); |
| 1378 | const struct cpumask *cpu_mask = cpu_online_mask; | ||
| 1379 | |||
| 1380 | /* get the cpu_mask of local node */ | ||
| 1381 | if (its_dev->its->numa_node >= 0) | ||
| 1382 | cpu_mask = cpumask_of_node(its_dev->its->numa_node); | ||
| 1354 | 1383 | ||
| 1355 | /* Bind the LPI to the first possible CPU */ | 1384 | /* Bind the LPI to the first possible CPU */ |
| 1356 | its_dev->event_map.col_map[event] = cpumask_first(cpu_online_mask); | 1385 | its_dev->event_map.col_map[event] = cpumask_first(cpu_mask); |
| 1357 | 1386 | ||
| 1358 | /* Map the GIC IRQ and event to the device */ | 1387 | /* Map the GIC IRQ and event to the device */ |
| 1359 | its_send_mapvi(its_dev, d->hwirq, event); | 1388 | its_send_mapvi(its_dev, d->hwirq, event); |
| @@ -1443,6 +1472,13 @@ static void __maybe_unused its_enable_quirk_cavium_22375(void *data) | |||
| 1443 | its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375; | 1472 | its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375; |
| 1444 | } | 1473 | } |
| 1445 | 1474 | ||
| 1475 | static void __maybe_unused its_enable_quirk_cavium_23144(void *data) | ||
| 1476 | { | ||
| 1477 | struct its_node *its = data; | ||
| 1478 | |||
| 1479 | its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144; | ||
| 1480 | } | ||
| 1481 | |||
| 1446 | static const struct gic_quirk its_quirks[] = { | 1482 | static const struct gic_quirk its_quirks[] = { |
| 1447 | #ifdef CONFIG_CAVIUM_ERRATUM_22375 | 1483 | #ifdef CONFIG_CAVIUM_ERRATUM_22375 |
| 1448 | { | 1484 | { |
| @@ -1452,6 +1488,14 @@ static const struct gic_quirk its_quirks[] = { | |||
| 1452 | .init = its_enable_quirk_cavium_22375, | 1488 | .init = its_enable_quirk_cavium_22375, |
| 1453 | }, | 1489 | }, |
| 1454 | #endif | 1490 | #endif |
| 1491 | #ifdef CONFIG_CAVIUM_ERRATUM_23144 | ||
| 1492 | { | ||
| 1493 | .desc = "ITS: Cavium erratum 23144", | ||
| 1494 | .iidr = 0xa100034c, /* ThunderX pass 1.x */ | ||
| 1495 | .mask = 0xffff0fff, | ||
| 1496 | .init = its_enable_quirk_cavium_23144, | ||
| 1497 | }, | ||
| 1498 | #endif | ||
| 1455 | { | 1499 | { |
| 1456 | } | 1500 | } |
| 1457 | }; | 1501 | }; |
| @@ -1514,6 +1558,7 @@ static int __init its_probe(struct device_node *node, | |||
| 1514 | its->base = its_base; | 1558 | its->base = its_base; |
| 1515 | its->phys_base = res.start; | 1559 | its->phys_base = res.start; |
| 1516 | its->ite_size = ((readl_relaxed(its_base + GITS_TYPER) >> 4) & 0xf) + 1; | 1560 | its->ite_size = ((readl_relaxed(its_base + GITS_TYPER) >> 4) & 0xf) + 1; |
| 1561 | its->numa_node = of_node_to_nid(node); | ||
| 1517 | 1562 | ||
| 1518 | its->cmd_base = kzalloc(ITS_CMD_QUEUE_SZ, GFP_KERNEL); | 1563 | its->cmd_base = kzalloc(ITS_CMD_QUEUE_SZ, GFP_KERNEL); |
| 1519 | if (!its->cmd_base) { | 1564 | if (!its->cmd_base) { |
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index fb042ba9a3db..2c5ba0e704bf 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c | |||
| @@ -155,7 +155,7 @@ static void gic_enable_redist(bool enable) | |||
| 155 | 155 | ||
| 156 | while (count--) { | 156 | while (count--) { |
| 157 | val = readl_relaxed(rbase + GICR_WAKER); | 157 | val = readl_relaxed(rbase + GICR_WAKER); |
| 158 | if (enable ^ (val & GICR_WAKER_ChildrenAsleep)) | 158 | if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep)) |
| 159 | break; | 159 | break; |
| 160 | cpu_relax(); | 160 | cpu_relax(); |
| 161 | udelay(1); | 161 | udelay(1); |
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c index 3b5e10aa48ab..8a4adbeb2b8c 100644 --- a/drivers/irqchip/irq-mips-gic.c +++ b/drivers/irqchip/irq-mips-gic.c | |||
| @@ -746,6 +746,12 @@ static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq, | |||
| 746 | /* verify that it doesn't conflict with an IPI irq */ | 746 | /* verify that it doesn't conflict with an IPI irq */ |
| 747 | if (test_bit(spec->hwirq, ipi_resrv)) | 747 | if (test_bit(spec->hwirq, ipi_resrv)) |
| 748 | return -EBUSY; | 748 | return -EBUSY; |
| 749 | |||
| 750 | hwirq = GIC_SHARED_TO_HWIRQ(spec->hwirq); | ||
| 751 | |||
| 752 | return irq_domain_set_hwirq_and_chip(d, virq, hwirq, | ||
| 753 | &gic_level_irq_controller, | ||
| 754 | NULL); | ||
| 749 | } else { | 755 | } else { |
| 750 | base_hwirq = find_first_bit(ipi_resrv, gic_shared_intrs); | 756 | base_hwirq = find_first_bit(ipi_resrv, gic_shared_intrs); |
| 751 | if (base_hwirq == gic_shared_intrs) { | 757 | if (base_hwirq == gic_shared_intrs) { |
| @@ -867,10 +873,14 @@ static int gic_dev_domain_alloc(struct irq_domain *d, unsigned int virq, | |||
| 867 | &gic_level_irq_controller, | 873 | &gic_level_irq_controller, |
| 868 | NULL); | 874 | NULL); |
| 869 | if (ret) | 875 | if (ret) |
| 870 | return ret; | 876 | goto error; |
| 871 | } | 877 | } |
| 872 | 878 | ||
| 873 | return 0; | 879 | return 0; |
| 880 | |||
| 881 | error: | ||
| 882 | irq_domain_free_irqs_parent(d, virq, nr_irqs); | ||
| 883 | return ret; | ||
| 874 | } | 884 | } |
| 875 | 885 | ||
| 876 | void gic_dev_domain_free(struct irq_domain *d, unsigned int virq, | 886 | void gic_dev_domain_free(struct irq_domain *d, unsigned int virq, |
diff --git a/drivers/irqchip/irq-pic32-evic.c b/drivers/irqchip/irq-pic32-evic.c index e7155db01d55..73addb4b625b 100644 --- a/drivers/irqchip/irq-pic32-evic.c +++ b/drivers/irqchip/irq-pic32-evic.c | |||
| @@ -91,7 +91,7 @@ static int pic32_set_type_edge(struct irq_data *data, | |||
| 91 | /* set polarity for external interrupts only */ | 91 | /* set polarity for external interrupts only */ |
| 92 | for (i = 0; i < ARRAY_SIZE(priv->ext_irqs); i++) { | 92 | for (i = 0; i < ARRAY_SIZE(priv->ext_irqs); i++) { |
| 93 | if (priv->ext_irqs[i] == data->hwirq) { | 93 | if (priv->ext_irqs[i] == data->hwirq) { |
| 94 | ret = pic32_set_ext_polarity(i + 1, flow_type); | 94 | ret = pic32_set_ext_polarity(i, flow_type); |
| 95 | if (ret) | 95 | if (ret) |
| 96 | return ret; | 96 | return ret; |
| 97 | } | 97 | } |
diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c index 3495d5d6547f..3bce44893021 100644 --- a/drivers/leds/led-core.c +++ b/drivers/leds/led-core.c | |||
| @@ -53,11 +53,12 @@ static void led_timer_function(unsigned long data) | |||
| 53 | 53 | ||
| 54 | if (!led_cdev->blink_delay_on || !led_cdev->blink_delay_off) { | 54 | if (!led_cdev->blink_delay_on || !led_cdev->blink_delay_off) { |
| 55 | led_set_brightness_nosleep(led_cdev, LED_OFF); | 55 | led_set_brightness_nosleep(led_cdev, LED_OFF); |
| 56 | led_cdev->flags &= ~LED_BLINK_SW; | ||
| 56 | return; | 57 | return; |
| 57 | } | 58 | } |
| 58 | 59 | ||
| 59 | if (led_cdev->flags & LED_BLINK_ONESHOT_STOP) { | 60 | if (led_cdev->flags & LED_BLINK_ONESHOT_STOP) { |
| 60 | led_cdev->flags &= ~LED_BLINK_ONESHOT_STOP; | 61 | led_cdev->flags &= ~(LED_BLINK_ONESHOT_STOP | LED_BLINK_SW); |
| 61 | return; | 62 | return; |
| 62 | } | 63 | } |
| 63 | 64 | ||
| @@ -151,6 +152,7 @@ static void led_set_software_blink(struct led_classdev *led_cdev, | |||
| 151 | return; | 152 | return; |
| 152 | } | 153 | } |
| 153 | 154 | ||
| 155 | led_cdev->flags |= LED_BLINK_SW; | ||
| 154 | mod_timer(&led_cdev->blink_timer, jiffies + 1); | 156 | mod_timer(&led_cdev->blink_timer, jiffies + 1); |
| 155 | } | 157 | } |
| 156 | 158 | ||
| @@ -219,6 +221,7 @@ void led_stop_software_blink(struct led_classdev *led_cdev) | |||
| 219 | del_timer_sync(&led_cdev->blink_timer); | 221 | del_timer_sync(&led_cdev->blink_timer); |
| 220 | led_cdev->blink_delay_on = 0; | 222 | led_cdev->blink_delay_on = 0; |
| 221 | led_cdev->blink_delay_off = 0; | 223 | led_cdev->blink_delay_off = 0; |
| 224 | led_cdev->flags &= ~LED_BLINK_SW; | ||
| 222 | } | 225 | } |
| 223 | EXPORT_SYMBOL_GPL(led_stop_software_blink); | 226 | EXPORT_SYMBOL_GPL(led_stop_software_blink); |
| 224 | 227 | ||
| @@ -226,10 +229,10 @@ void led_set_brightness(struct led_classdev *led_cdev, | |||
| 226 | enum led_brightness brightness) | 229 | enum led_brightness brightness) |
| 227 | { | 230 | { |
| 228 | /* | 231 | /* |
| 229 | * In case blinking is on delay brightness setting | 232 | * If software blink is active, delay brightness setting |
| 230 | * until the next timer tick. | 233 | * until the next timer tick. |
| 231 | */ | 234 | */ |
| 232 | if (led_cdev->blink_delay_on || led_cdev->blink_delay_off) { | 235 | if (led_cdev->flags & LED_BLINK_SW) { |
| 233 | /* | 236 | /* |
| 234 | * If we need to disable soft blinking delegate this to the | 237 | * If we need to disable soft blinking delegate this to the |
| 235 | * work queue task to avoid problems in case we are called | 238 | * work queue task to avoid problems in case we are called |
diff --git a/drivers/leds/trigger/ledtrig-heartbeat.c b/drivers/leds/trigger/ledtrig-heartbeat.c index 410c39c62dc7..c9f386213e9e 100644 --- a/drivers/leds/trigger/ledtrig-heartbeat.c +++ b/drivers/leds/trigger/ledtrig-heartbeat.c | |||
| @@ -19,6 +19,7 @@ | |||
| 19 | #include <linux/sched.h> | 19 | #include <linux/sched.h> |
| 20 | #include <linux/leds.h> | 20 | #include <linux/leds.h> |
| 21 | #include <linux/reboot.h> | 21 | #include <linux/reboot.h> |
| 22 | #include <linux/suspend.h> | ||
| 22 | #include "../leds.h" | 23 | #include "../leds.h" |
| 23 | 24 | ||
| 24 | static int panic_heartbeats; | 25 | static int panic_heartbeats; |
| @@ -154,6 +155,30 @@ static struct led_trigger heartbeat_led_trigger = { | |||
| 154 | .deactivate = heartbeat_trig_deactivate, | 155 | .deactivate = heartbeat_trig_deactivate, |
| 155 | }; | 156 | }; |
| 156 | 157 | ||
| 158 | static int heartbeat_pm_notifier(struct notifier_block *nb, | ||
| 159 | unsigned long pm_event, void *unused) | ||
| 160 | { | ||
| 161 | int rc; | ||
| 162 | |||
| 163 | switch (pm_event) { | ||
| 164 | case PM_SUSPEND_PREPARE: | ||
| 165 | case PM_HIBERNATION_PREPARE: | ||
| 166 | case PM_RESTORE_PREPARE: | ||
| 167 | led_trigger_unregister(&heartbeat_led_trigger); | ||
| 168 | break; | ||
| 169 | case PM_POST_SUSPEND: | ||
| 170 | case PM_POST_HIBERNATION: | ||
| 171 | case PM_POST_RESTORE: | ||
| 172 | rc = led_trigger_register(&heartbeat_led_trigger); | ||
| 173 | if (rc) | ||
| 174 | pr_err("could not re-register heartbeat trigger\n"); | ||
| 175 | break; | ||
| 176 | default: | ||
| 177 | break; | ||
| 178 | } | ||
| 179 | return NOTIFY_DONE; | ||
| 180 | } | ||
| 181 | |||
| 157 | static int heartbeat_reboot_notifier(struct notifier_block *nb, | 182 | static int heartbeat_reboot_notifier(struct notifier_block *nb, |
| 158 | unsigned long code, void *unused) | 183 | unsigned long code, void *unused) |
| 159 | { | 184 | { |
| @@ -168,6 +193,10 @@ static int heartbeat_panic_notifier(struct notifier_block *nb, | |||
| 168 | return NOTIFY_DONE; | 193 | return NOTIFY_DONE; |
| 169 | } | 194 | } |
| 170 | 195 | ||
| 196 | static struct notifier_block heartbeat_pm_nb = { | ||
| 197 | .notifier_call = heartbeat_pm_notifier, | ||
| 198 | }; | ||
| 199 | |||
| 171 | static struct notifier_block heartbeat_reboot_nb = { | 200 | static struct notifier_block heartbeat_reboot_nb = { |
| 172 | .notifier_call = heartbeat_reboot_notifier, | 201 | .notifier_call = heartbeat_reboot_notifier, |
| 173 | }; | 202 | }; |
| @@ -184,12 +213,14 @@ static int __init heartbeat_trig_init(void) | |||
| 184 | atomic_notifier_chain_register(&panic_notifier_list, | 213 | atomic_notifier_chain_register(&panic_notifier_list, |
| 185 | &heartbeat_panic_nb); | 214 | &heartbeat_panic_nb); |
| 186 | register_reboot_notifier(&heartbeat_reboot_nb); | 215 | register_reboot_notifier(&heartbeat_reboot_nb); |
| 216 | register_pm_notifier(&heartbeat_pm_nb); | ||
| 187 | } | 217 | } |
| 188 | return rc; | 218 | return rc; |
| 189 | } | 219 | } |
| 190 | 220 | ||
| 191 | static void __exit heartbeat_trig_exit(void) | 221 | static void __exit heartbeat_trig_exit(void) |
| 192 | { | 222 | { |
| 223 | unregister_pm_notifier(&heartbeat_pm_nb); | ||
| 193 | unregister_reboot_notifier(&heartbeat_reboot_nb); | 224 | unregister_reboot_notifier(&heartbeat_reboot_nb); |
| 194 | atomic_notifier_chain_unregister(&panic_notifier_list, | 225 | atomic_notifier_chain_unregister(&panic_notifier_list, |
| 195 | &heartbeat_panic_nb); | 226 | &heartbeat_panic_nb); |
diff --git a/drivers/mcb/mcb-core.c b/drivers/mcb/mcb-core.c index b73c6e7d28e4..6f2c8522e14a 100644 --- a/drivers/mcb/mcb-core.c +++ b/drivers/mcb/mcb-core.c | |||
| @@ -61,21 +61,36 @@ static int mcb_probe(struct device *dev) | |||
| 61 | struct mcb_driver *mdrv = to_mcb_driver(dev->driver); | 61 | struct mcb_driver *mdrv = to_mcb_driver(dev->driver); |
| 62 | struct mcb_device *mdev = to_mcb_device(dev); | 62 | struct mcb_device *mdev = to_mcb_device(dev); |
| 63 | const struct mcb_device_id *found_id; | 63 | const struct mcb_device_id *found_id; |
| 64 | struct module *carrier_mod; | ||
| 65 | int ret; | ||
| 64 | 66 | ||
| 65 | found_id = mcb_match_id(mdrv->id_table, mdev); | 67 | found_id = mcb_match_id(mdrv->id_table, mdev); |
| 66 | if (!found_id) | 68 | if (!found_id) |
| 67 | return -ENODEV; | 69 | return -ENODEV; |
| 68 | 70 | ||
| 69 | return mdrv->probe(mdev, found_id); | 71 | carrier_mod = mdev->dev.parent->driver->owner; |
| 72 | if (!try_module_get(carrier_mod)) | ||
| 73 | return -EINVAL; | ||
| 74 | |||
| 75 | get_device(dev); | ||
| 76 | ret = mdrv->probe(mdev, found_id); | ||
| 77 | if (ret) | ||
| 78 | module_put(carrier_mod); | ||
| 79 | |||
| 80 | return ret; | ||
| 70 | } | 81 | } |
| 71 | 82 | ||
| 72 | static int mcb_remove(struct device *dev) | 83 | static int mcb_remove(struct device *dev) |
| 73 | { | 84 | { |
| 74 | struct mcb_driver *mdrv = to_mcb_driver(dev->driver); | 85 | struct mcb_driver *mdrv = to_mcb_driver(dev->driver); |
| 75 | struct mcb_device *mdev = to_mcb_device(dev); | 86 | struct mcb_device *mdev = to_mcb_device(dev); |
| 87 | struct module *carrier_mod; | ||
| 76 | 88 | ||
| 77 | mdrv->remove(mdev); | 89 | mdrv->remove(mdev); |
| 78 | 90 | ||
| 91 | carrier_mod = mdev->dev.parent->driver->owner; | ||
| 92 | module_put(carrier_mod); | ||
| 93 | |||
| 79 | put_device(&mdev->dev); | 94 | put_device(&mdev->dev); |
| 80 | 95 | ||
| 81 | return 0; | 96 | return 0; |
diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c index d7723ce772b3..c04bc6afb965 100644 --- a/drivers/media/usb/uvc/uvc_v4l2.c +++ b/drivers/media/usb/uvc/uvc_v4l2.c | |||
| @@ -1274,8 +1274,6 @@ struct uvc_xu_control_mapping32 { | |||
| 1274 | static int uvc_v4l2_get_xu_mapping(struct uvc_xu_control_mapping *kp, | 1274 | static int uvc_v4l2_get_xu_mapping(struct uvc_xu_control_mapping *kp, |
| 1275 | const struct uvc_xu_control_mapping32 __user *up) | 1275 | const struct uvc_xu_control_mapping32 __user *up) |
| 1276 | { | 1276 | { |
| 1277 | struct uvc_menu_info __user *umenus; | ||
| 1278 | struct uvc_menu_info __user *kmenus; | ||
| 1279 | compat_caddr_t p; | 1277 | compat_caddr_t p; |
| 1280 | 1278 | ||
| 1281 | if (!access_ok(VERIFY_READ, up, sizeof(*up)) || | 1279 | if (!access_ok(VERIFY_READ, up, sizeof(*up)) || |
| @@ -1292,17 +1290,7 @@ static int uvc_v4l2_get_xu_mapping(struct uvc_xu_control_mapping *kp, | |||
| 1292 | 1290 | ||
| 1293 | if (__get_user(p, &up->menu_info)) | 1291 | if (__get_user(p, &up->menu_info)) |
| 1294 | return -EFAULT; | 1292 | return -EFAULT; |
| 1295 | umenus = compat_ptr(p); | 1293 | kp->menu_info = compat_ptr(p); |
| 1296 | if (!access_ok(VERIFY_READ, umenus, kp->menu_count * sizeof(*umenus))) | ||
| 1297 | return -EFAULT; | ||
| 1298 | |||
| 1299 | kmenus = compat_alloc_user_space(kp->menu_count * sizeof(*kmenus)); | ||
| 1300 | if (kmenus == NULL) | ||
| 1301 | return -EFAULT; | ||
| 1302 | kp->menu_info = kmenus; | ||
| 1303 | |||
| 1304 | if (copy_in_user(kmenus, umenus, kp->menu_count * sizeof(*umenus))) | ||
| 1305 | return -EFAULT; | ||
| 1306 | 1294 | ||
| 1307 | return 0; | 1295 | return 0; |
| 1308 | } | 1296 | } |
| @@ -1310,10 +1298,6 @@ static int uvc_v4l2_get_xu_mapping(struct uvc_xu_control_mapping *kp, | |||
| 1310 | static int uvc_v4l2_put_xu_mapping(const struct uvc_xu_control_mapping *kp, | 1298 | static int uvc_v4l2_put_xu_mapping(const struct uvc_xu_control_mapping *kp, |
| 1311 | struct uvc_xu_control_mapping32 __user *up) | 1299 | struct uvc_xu_control_mapping32 __user *up) |
| 1312 | { | 1300 | { |
| 1313 | struct uvc_menu_info __user *umenus; | ||
| 1314 | struct uvc_menu_info __user *kmenus = kp->menu_info; | ||
| 1315 | compat_caddr_t p; | ||
| 1316 | |||
| 1317 | if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) || | 1301 | if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) || |
| 1318 | __copy_to_user(up, kp, offsetof(typeof(*up), menu_info)) || | 1302 | __copy_to_user(up, kp, offsetof(typeof(*up), menu_info)) || |
| 1319 | __put_user(kp->menu_count, &up->menu_count)) | 1303 | __put_user(kp->menu_count, &up->menu_count)) |
| @@ -1322,16 +1306,6 @@ static int uvc_v4l2_put_xu_mapping(const struct uvc_xu_control_mapping *kp, | |||
| 1322 | if (__clear_user(up->reserved, sizeof(up->reserved))) | 1306 | if (__clear_user(up->reserved, sizeof(up->reserved))) |
| 1323 | return -EFAULT; | 1307 | return -EFAULT; |
| 1324 | 1308 | ||
| 1325 | if (kp->menu_count == 0) | ||
| 1326 | return 0; | ||
| 1327 | |||
| 1328 | if (get_user(p, &up->menu_info)) | ||
| 1329 | return -EFAULT; | ||
| 1330 | umenus = compat_ptr(p); | ||
| 1331 | |||
| 1332 | if (copy_in_user(umenus, kmenus, kp->menu_count * sizeof(*umenus))) | ||
| 1333 | return -EFAULT; | ||
| 1334 | |||
| 1335 | return 0; | 1309 | return 0; |
| 1336 | } | 1310 | } |
| 1337 | 1311 | ||
| @@ -1346,8 +1320,6 @@ struct uvc_xu_control_query32 { | |||
| 1346 | static int uvc_v4l2_get_xu_query(struct uvc_xu_control_query *kp, | 1320 | static int uvc_v4l2_get_xu_query(struct uvc_xu_control_query *kp, |
| 1347 | const struct uvc_xu_control_query32 __user *up) | 1321 | const struct uvc_xu_control_query32 __user *up) |
| 1348 | { | 1322 | { |
| 1349 | u8 __user *udata; | ||
| 1350 | u8 __user *kdata; | ||
| 1351 | compat_caddr_t p; | 1323 | compat_caddr_t p; |
| 1352 | 1324 | ||
| 1353 | if (!access_ok(VERIFY_READ, up, sizeof(*up)) || | 1325 | if (!access_ok(VERIFY_READ, up, sizeof(*up)) || |
| @@ -1361,17 +1333,7 @@ static int uvc_v4l2_get_xu_query(struct uvc_xu_control_query *kp, | |||
| 1361 | 1333 | ||
| 1362 | if (__get_user(p, &up->data)) | 1334 | if (__get_user(p, &up->data)) |
| 1363 | return -EFAULT; | 1335 | return -EFAULT; |
| 1364 | udata = compat_ptr(p); | 1336 | kp->data = compat_ptr(p); |
| 1365 | if (!access_ok(VERIFY_READ, udata, kp->size)) | ||
| 1366 | return -EFAULT; | ||
| 1367 | |||
| 1368 | kdata = compat_alloc_user_space(kp->size); | ||
| 1369 | if (kdata == NULL) | ||
| 1370 | return -EFAULT; | ||
| 1371 | kp->data = kdata; | ||
| 1372 | |||
| 1373 | if (copy_in_user(kdata, udata, kp->size)) | ||
| 1374 | return -EFAULT; | ||
| 1375 | 1337 | ||
| 1376 | return 0; | 1338 | return 0; |
| 1377 | } | 1339 | } |
| @@ -1379,26 +1341,10 @@ static int uvc_v4l2_get_xu_query(struct uvc_xu_control_query *kp, | |||
| 1379 | static int uvc_v4l2_put_xu_query(const struct uvc_xu_control_query *kp, | 1341 | static int uvc_v4l2_put_xu_query(const struct uvc_xu_control_query *kp, |
| 1380 | struct uvc_xu_control_query32 __user *up) | 1342 | struct uvc_xu_control_query32 __user *up) |
| 1381 | { | 1343 | { |
| 1382 | u8 __user *udata; | ||
| 1383 | u8 __user *kdata = kp->data; | ||
| 1384 | compat_caddr_t p; | ||
| 1385 | |||
| 1386 | if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) || | 1344 | if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) || |
| 1387 | __copy_to_user(up, kp, offsetof(typeof(*up), data))) | 1345 | __copy_to_user(up, kp, offsetof(typeof(*up), data))) |
| 1388 | return -EFAULT; | 1346 | return -EFAULT; |
| 1389 | 1347 | ||
| 1390 | if (kp->size == 0) | ||
| 1391 | return 0; | ||
| 1392 | |||
| 1393 | if (get_user(p, &up->data)) | ||
| 1394 | return -EFAULT; | ||
| 1395 | udata = compat_ptr(p); | ||
| 1396 | if (!access_ok(VERIFY_READ, udata, kp->size)) | ||
| 1397 | return -EFAULT; | ||
| 1398 | |||
| 1399 | if (copy_in_user(udata, kdata, kp->size)) | ||
| 1400 | return -EFAULT; | ||
| 1401 | |||
| 1402 | return 0; | 1348 | return 0; |
| 1403 | } | 1349 | } |
| 1404 | 1350 | ||
| @@ -1408,47 +1354,44 @@ static int uvc_v4l2_put_xu_query(const struct uvc_xu_control_query *kp, | |||
| 1408 | static long uvc_v4l2_compat_ioctl32(struct file *file, | 1354 | static long uvc_v4l2_compat_ioctl32(struct file *file, |
| 1409 | unsigned int cmd, unsigned long arg) | 1355 | unsigned int cmd, unsigned long arg) |
| 1410 | { | 1356 | { |
| 1357 | struct uvc_fh *handle = file->private_data; | ||
| 1411 | union { | 1358 | union { |
| 1412 | struct uvc_xu_control_mapping xmap; | 1359 | struct uvc_xu_control_mapping xmap; |
| 1413 | struct uvc_xu_control_query xqry; | 1360 | struct uvc_xu_control_query xqry; |
| 1414 | } karg; | 1361 | } karg; |
| 1415 | void __user *up = compat_ptr(arg); | 1362 | void __user *up = compat_ptr(arg); |
| 1416 | mm_segment_t old_fs; | ||
| 1417 | long ret; | 1363 | long ret; |
| 1418 | 1364 | ||
| 1419 | switch (cmd) { | 1365 | switch (cmd) { |
| 1420 | case UVCIOC_CTRL_MAP32: | 1366 | case UVCIOC_CTRL_MAP32: |
| 1421 | cmd = UVCIOC_CTRL_MAP; | ||
| 1422 | ret = uvc_v4l2_get_xu_mapping(&karg.xmap, up); | 1367 | ret = uvc_v4l2_get_xu_mapping(&karg.xmap, up); |
| 1368 | if (ret) | ||
| 1369 | return ret; | ||
| 1370 | ret = uvc_ioctl_ctrl_map(handle->chain, &karg.xmap); | ||
| 1371 | if (ret) | ||
| 1372 | return ret; | ||
| 1373 | ret = uvc_v4l2_put_xu_mapping(&karg.xmap, up); | ||
| 1374 | if (ret) | ||
| 1375 | return ret; | ||
| 1376 | |||
| 1423 | break; | 1377 | break; |
| 1424 | 1378 | ||
| 1425 | case UVCIOC_CTRL_QUERY32: | 1379 | case UVCIOC_CTRL_QUERY32: |
| 1426 | cmd = UVCIOC_CTRL_QUERY; | ||
| 1427 | ret = uvc_v4l2_get_xu_query(&karg.xqry, up); | 1380 | ret = uvc_v4l2_get_xu_query(&karg.xqry, up); |
| 1381 | if (ret) | ||
| 1382 | return ret; | ||
| 1383 | ret = uvc_xu_ctrl_query(handle->chain, &karg.xqry); | ||
| 1384 | if (ret) | ||
| 1385 | return ret; | ||
| 1386 | ret = uvc_v4l2_put_xu_query(&karg.xqry, up); | ||
| 1387 | if (ret) | ||
| 1388 | return ret; | ||
| 1428 | break; | 1389 | break; |
| 1429 | 1390 | ||
| 1430 | default: | 1391 | default: |
| 1431 | return -ENOIOCTLCMD; | 1392 | return -ENOIOCTLCMD; |
| 1432 | } | 1393 | } |
| 1433 | 1394 | ||
| 1434 | old_fs = get_fs(); | ||
| 1435 | set_fs(KERNEL_DS); | ||
| 1436 | ret = video_ioctl2(file, cmd, (unsigned long)&karg); | ||
| 1437 | set_fs(old_fs); | ||
| 1438 | |||
| 1439 | if (ret < 0) | ||
| 1440 | return ret; | ||
| 1441 | |||
| 1442 | switch (cmd) { | ||
| 1443 | case UVCIOC_CTRL_MAP: | ||
| 1444 | ret = uvc_v4l2_put_xu_mapping(&karg.xmap, up); | ||
| 1445 | break; | ||
| 1446 | |||
| 1447 | case UVCIOC_CTRL_QUERY: | ||
| 1448 | ret = uvc_v4l2_put_xu_query(&karg.xqry, up); | ||
| 1449 | break; | ||
| 1450 | } | ||
| 1451 | |||
| 1452 | return ret; | 1395 | return ret; |
| 1453 | } | 1396 | } |
| 1454 | #endif | 1397 | #endif |
diff --git a/drivers/media/v4l2-core/v4l2-mc.c b/drivers/media/v4l2-core/v4l2-mc.c index ca94bded3386..8bef4331bd51 100644 --- a/drivers/media/v4l2-core/v4l2-mc.c +++ b/drivers/media/v4l2-core/v4l2-mc.c | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Media Controller ancillary functions | 2 | * Media Controller ancillary functions |
| 3 | * | 3 | * |
| 4 | * Copyright (c) 2016 Mauro Carvalho Chehab <mchehab@osg.samsung.com> | 4 | * Copyright (c) 2016 Mauro Carvalho Chehab <mchehab@kernel.org> |
| 5 | * Copyright (C) 2016 Shuah Khan <shuahkh@osg.samsung.com> | 5 | * Copyright (C) 2016 Shuah Khan <shuahkh@osg.samsung.com> |
| 6 | * Copyright (C) 2006-2010 Nokia Corporation | 6 | * Copyright (C) 2006-2010 Nokia Corporation |
| 7 | * Copyright (c) 2016 Intel Corporation. | 7 | * Copyright (c) 2016 Intel Corporation. |
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c index af4884ba6b7c..15508df24e5d 100644 --- a/drivers/memory/omap-gpmc.c +++ b/drivers/memory/omap-gpmc.c | |||
| @@ -398,7 +398,7 @@ static void gpmc_cs_bool_timings(int cs, const struct gpmc_bool_timings *p) | |||
| 398 | gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG4, | 398 | gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG4, |
| 399 | GPMC_CONFIG4_OEEXTRADELAY, p->oe_extra_delay); | 399 | GPMC_CONFIG4_OEEXTRADELAY, p->oe_extra_delay); |
| 400 | gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG4, | 400 | gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG4, |
| 401 | GPMC_CONFIG4_OEEXTRADELAY, p->we_extra_delay); | 401 | GPMC_CONFIG4_WEEXTRADELAY, p->we_extra_delay); |
| 402 | gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG6, | 402 | gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG6, |
| 403 | GPMC_CONFIG6_CYCLE2CYCLESAMECSEN, | 403 | GPMC_CONFIG6_CYCLE2CYCLESAMECSEN, |
| 404 | p->cycle2cyclesamecsen); | 404 | p->cycle2cyclesamecsen); |
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c index eed254da63a8..641c1a566687 100644 --- a/drivers/misc/mei/client.c +++ b/drivers/misc/mei/client.c | |||
| @@ -730,7 +730,7 @@ static void mei_cl_wake_all(struct mei_cl *cl) | |||
| 730 | /* synchronized under device mutex */ | 730 | /* synchronized under device mutex */ |
| 731 | if (waitqueue_active(&cl->wait)) { | 731 | if (waitqueue_active(&cl->wait)) { |
| 732 | cl_dbg(dev, cl, "Waking up ctrl write clients!\n"); | 732 | cl_dbg(dev, cl, "Waking up ctrl write clients!\n"); |
| 733 | wake_up_interruptible(&cl->wait); | 733 | wake_up(&cl->wait); |
| 734 | } | 734 | } |
| 735 | } | 735 | } |
| 736 | 736 | ||
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index c984321d1881..5d438ad3ee32 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c | |||
| @@ -1276,7 +1276,7 @@ static int mmc_select_hs200(struct mmc_card *card) | |||
| 1276 | * switch to HS200 mode if bus width is set successfully. | 1276 | * switch to HS200 mode if bus width is set successfully. |
| 1277 | */ | 1277 | */ |
| 1278 | err = mmc_select_bus_width(card); | 1278 | err = mmc_select_bus_width(card); |
| 1279 | if (!err) { | 1279 | if (err >= 0) { |
| 1280 | val = EXT_CSD_TIMING_HS200 | | 1280 | val = EXT_CSD_TIMING_HS200 | |
| 1281 | card->drive_strength << EXT_CSD_DRV_STR_SHIFT; | 1281 | card->drive_strength << EXT_CSD_DRV_STR_SHIFT; |
| 1282 | err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, | 1282 | err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, |
| @@ -1583,7 +1583,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, | |||
| 1583 | } else if (mmc_card_hs(card)) { | 1583 | } else if (mmc_card_hs(card)) { |
| 1584 | /* Select the desired bus width optionally */ | 1584 | /* Select the desired bus width optionally */ |
| 1585 | err = mmc_select_bus_width(card); | 1585 | err = mmc_select_bus_width(card); |
| 1586 | if (!err) { | 1586 | if (err >= 0) { |
| 1587 | err = mmc_select_hs_ddr(card); | 1587 | err = mmc_select_hs_ddr(card); |
| 1588 | if (err) | 1588 | if (err) |
| 1589 | goto free_card; | 1589 | goto free_card; |
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c index 7fc8b7aa83f0..2ee4c21ec55e 100644 --- a/drivers/mmc/host/sunxi-mmc.c +++ b/drivers/mmc/host/sunxi-mmc.c | |||
| @@ -970,8 +970,8 @@ static const struct sunxi_mmc_clk_delay sun9i_mmc_clk_delays[] = { | |||
| 970 | [SDXC_CLK_400K] = { .output = 180, .sample = 180 }, | 970 | [SDXC_CLK_400K] = { .output = 180, .sample = 180 }, |
| 971 | [SDXC_CLK_25M] = { .output = 180, .sample = 75 }, | 971 | [SDXC_CLK_25M] = { .output = 180, .sample = 75 }, |
| 972 | [SDXC_CLK_50M] = { .output = 150, .sample = 120 }, | 972 | [SDXC_CLK_50M] = { .output = 150, .sample = 120 }, |
| 973 | [SDXC_CLK_50M_DDR] = { .output = 90, .sample = 120 }, | 973 | [SDXC_CLK_50M_DDR] = { .output = 54, .sample = 36 }, |
| 974 | [SDXC_CLK_50M_DDR_8BIT] = { .output = 90, .sample = 120 }, | 974 | [SDXC_CLK_50M_DDR_8BIT] = { .output = 72, .sample = 72 }, |
| 975 | }; | 975 | }; |
| 976 | 976 | ||
| 977 | static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host, | 977 | static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host, |
| @@ -1129,11 +1129,6 @@ static int sunxi_mmc_probe(struct platform_device *pdev) | |||
| 1129 | MMC_CAP_1_8V_DDR | | 1129 | MMC_CAP_1_8V_DDR | |
| 1130 | MMC_CAP_ERASE | MMC_CAP_SDIO_IRQ; | 1130 | MMC_CAP_ERASE | MMC_CAP_SDIO_IRQ; |
| 1131 | 1131 | ||
| 1132 | /* TODO MMC DDR is not working on A80 */ | ||
| 1133 | if (of_device_is_compatible(pdev->dev.of_node, | ||
| 1134 | "allwinner,sun9i-a80-mmc")) | ||
| 1135 | mmc->caps &= ~MMC_CAP_1_8V_DDR; | ||
| 1136 | |||
| 1137 | ret = mmc_of_parse(mmc); | 1132 | ret = mmc_of_parse(mmc); |
| 1138 | if (ret) | 1133 | if (ret) |
| 1139 | goto error_free_dma; | 1134 | goto error_free_dma; |
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c index 16baeb51b2bd..ef3618299494 100644 --- a/drivers/mtd/ubi/build.c +++ b/drivers/mtd/ubi/build.c | |||
| @@ -1147,11 +1147,17 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway) | |||
| 1147 | */ | 1147 | */ |
| 1148 | static struct mtd_info * __init open_mtd_by_chdev(const char *mtd_dev) | 1148 | static struct mtd_info * __init open_mtd_by_chdev(const char *mtd_dev) |
| 1149 | { | 1149 | { |
| 1150 | struct kstat stat; | ||
| 1151 | int err, minor; | 1150 | int err, minor; |
| 1151 | struct path path; | ||
| 1152 | struct kstat stat; | ||
| 1152 | 1153 | ||
| 1153 | /* Probably this is an MTD character device node path */ | 1154 | /* Probably this is an MTD character device node path */ |
| 1154 | err = vfs_stat(mtd_dev, &stat); | 1155 | err = kern_path(mtd_dev, LOOKUP_FOLLOW, &path); |
| 1156 | if (err) | ||
| 1157 | return ERR_PTR(err); | ||
| 1158 | |||
| 1159 | err = vfs_getattr(&path, &stat); | ||
| 1160 | path_put(&path); | ||
| 1155 | if (err) | 1161 | if (err) |
| 1156 | return ERR_PTR(err); | 1162 | return ERR_PTR(err); |
| 1157 | 1163 | ||
| @@ -1160,6 +1166,7 @@ static struct mtd_info * __init open_mtd_by_chdev(const char *mtd_dev) | |||
| 1160 | return ERR_PTR(-EINVAL); | 1166 | return ERR_PTR(-EINVAL); |
| 1161 | 1167 | ||
| 1162 | minor = MINOR(stat.rdev); | 1168 | minor = MINOR(stat.rdev); |
| 1169 | |||
| 1163 | if (minor & 1) | 1170 | if (minor & 1) |
| 1164 | /* | 1171 | /* |
| 1165 | * Just do not think the "/dev/mtdrX" devices support is need, | 1172 | * Just do not think the "/dev/mtdrX" devices support is need, |
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c index 5780dd1ba79d..ebf517271d29 100644 --- a/drivers/mtd/ubi/eba.c +++ b/drivers/mtd/ubi/eba.c | |||
| @@ -575,6 +575,7 @@ static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum, | |||
| 575 | int err, idx = vol_id2idx(ubi, vol_id), new_pnum, data_size, tries = 0; | 575 | int err, idx = vol_id2idx(ubi, vol_id), new_pnum, data_size, tries = 0; |
| 576 | struct ubi_volume *vol = ubi->volumes[idx]; | 576 | struct ubi_volume *vol = ubi->volumes[idx]; |
| 577 | struct ubi_vid_hdr *vid_hdr; | 577 | struct ubi_vid_hdr *vid_hdr; |
| 578 | uint32_t crc; | ||
| 578 | 579 | ||
| 579 | vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); | 580 | vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); |
| 580 | if (!vid_hdr) | 581 | if (!vid_hdr) |
| @@ -599,14 +600,8 @@ retry: | |||
| 599 | goto out_put; | 600 | goto out_put; |
| 600 | } | 601 | } |
| 601 | 602 | ||
| 602 | vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); | 603 | ubi_assert(vid_hdr->vol_type == UBI_VID_DYNAMIC); |
| 603 | err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr); | ||
| 604 | if (err) { | ||
| 605 | up_read(&ubi->fm_eba_sem); | ||
| 606 | goto write_error; | ||
| 607 | } | ||
| 608 | 604 | ||
| 609 | data_size = offset + len; | ||
| 610 | mutex_lock(&ubi->buf_mutex); | 605 | mutex_lock(&ubi->buf_mutex); |
| 611 | memset(ubi->peb_buf + offset, 0xFF, len); | 606 | memset(ubi->peb_buf + offset, 0xFF, len); |
| 612 | 607 | ||
| @@ -621,6 +616,19 @@ retry: | |||
| 621 | 616 | ||
| 622 | memcpy(ubi->peb_buf + offset, buf, len); | 617 | memcpy(ubi->peb_buf + offset, buf, len); |
| 623 | 618 | ||
| 619 | data_size = offset + len; | ||
| 620 | crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size); | ||
| 621 | vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); | ||
| 622 | vid_hdr->copy_flag = 1; | ||
| 623 | vid_hdr->data_size = cpu_to_be32(data_size); | ||
| 624 | vid_hdr->data_crc = cpu_to_be32(crc); | ||
| 625 | err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr); | ||
| 626 | if (err) { | ||
| 627 | mutex_unlock(&ubi->buf_mutex); | ||
| 628 | up_read(&ubi->fm_eba_sem); | ||
| 629 | goto write_error; | ||
| 630 | } | ||
| 631 | |||
| 624 | err = ubi_io_write_data(ubi, ubi->peb_buf, new_pnum, 0, data_size); | 632 | err = ubi_io_write_data(ubi, ubi->peb_buf, new_pnum, 0, data_size); |
| 625 | if (err) { | 633 | if (err) { |
| 626 | mutex_unlock(&ubi->buf_mutex); | 634 | mutex_unlock(&ubi->buf_mutex); |
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c index 348dbbcbedc8..a9e2cef7c95c 100644 --- a/drivers/mtd/ubi/kapi.c +++ b/drivers/mtd/ubi/kapi.c | |||
| @@ -302,6 +302,7 @@ EXPORT_SYMBOL_GPL(ubi_open_volume_nm); | |||
| 302 | struct ubi_volume_desc *ubi_open_volume_path(const char *pathname, int mode) | 302 | struct ubi_volume_desc *ubi_open_volume_path(const char *pathname, int mode) |
| 303 | { | 303 | { |
| 304 | int error, ubi_num, vol_id; | 304 | int error, ubi_num, vol_id; |
| 305 | struct path path; | ||
| 305 | struct kstat stat; | 306 | struct kstat stat; |
| 306 | 307 | ||
| 307 | dbg_gen("open volume %s, mode %d", pathname, mode); | 308 | dbg_gen("open volume %s, mode %d", pathname, mode); |
| @@ -309,7 +310,12 @@ struct ubi_volume_desc *ubi_open_volume_path(const char *pathname, int mode) | |||
| 309 | if (!pathname || !*pathname) | 310 | if (!pathname || !*pathname) |
| 310 | return ERR_PTR(-EINVAL); | 311 | return ERR_PTR(-EINVAL); |
| 311 | 312 | ||
| 312 | error = vfs_stat(pathname, &stat); | 313 | error = kern_path(pathname, LOOKUP_FOLLOW, &path); |
| 314 | if (error) | ||
| 315 | return ERR_PTR(error); | ||
| 316 | |||
| 317 | error = vfs_getattr(&path, &stat); | ||
| 318 | path_put(&path); | ||
| 313 | if (error) | 319 | if (error) |
| 314 | return ERR_PTR(error); | 320 | return ERR_PTR(error); |
| 315 | 321 | ||
diff --git a/drivers/net/ethernet/arc/emac_mdio.c b/drivers/net/ethernet/arc/emac_mdio.c index 16419f550eff..058460bdd5a6 100644 --- a/drivers/net/ethernet/arc/emac_mdio.c +++ b/drivers/net/ethernet/arc/emac_mdio.c | |||
| @@ -141,7 +141,7 @@ int arc_mdio_probe(struct arc_emac_priv *priv) | |||
| 141 | priv->bus = bus; | 141 | priv->bus = bus; |
| 142 | bus->priv = priv; | 142 | bus->priv = priv; |
| 143 | bus->parent = priv->dev; | 143 | bus->parent = priv->dev; |
| 144 | bus->name = "Synopsys MII Bus", | 144 | bus->name = "Synopsys MII Bus"; |
| 145 | bus->read = &arc_mdio_read; | 145 | bus->read = &arc_mdio_read; |
| 146 | bus->write = &arc_mdio_write; | 146 | bus->write = &arc_mdio_write; |
| 147 | bus->reset = &arc_mdio_reset; | 147 | bus->reset = &arc_mdio_reset; |
diff --git a/drivers/net/ethernet/atheros/alx/alx.h b/drivers/net/ethernet/atheros/alx/alx.h index 8fc93c5f6abc..d02c4240b7df 100644 --- a/drivers/net/ethernet/atheros/alx/alx.h +++ b/drivers/net/ethernet/atheros/alx/alx.h | |||
| @@ -96,6 +96,10 @@ struct alx_priv { | |||
| 96 | unsigned int rx_ringsz; | 96 | unsigned int rx_ringsz; |
| 97 | unsigned int rxbuf_size; | 97 | unsigned int rxbuf_size; |
| 98 | 98 | ||
| 99 | struct page *rx_page; | ||
| 100 | unsigned int rx_page_offset; | ||
| 101 | unsigned int rx_frag_size; | ||
| 102 | |||
| 99 | struct napi_struct napi; | 103 | struct napi_struct napi; |
| 100 | struct alx_tx_queue txq; | 104 | struct alx_tx_queue txq; |
| 101 | struct alx_rx_queue rxq; | 105 | struct alx_rx_queue rxq; |
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index 9fe8b5e310d1..c98acdc0d14f 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c | |||
| @@ -70,6 +70,35 @@ static void alx_free_txbuf(struct alx_priv *alx, int entry) | |||
| 70 | } | 70 | } |
| 71 | } | 71 | } |
| 72 | 72 | ||
| 73 | static struct sk_buff *alx_alloc_skb(struct alx_priv *alx, gfp_t gfp) | ||
| 74 | { | ||
| 75 | struct sk_buff *skb; | ||
| 76 | struct page *page; | ||
| 77 | |||
| 78 | if (alx->rx_frag_size > PAGE_SIZE) | ||
| 79 | return __netdev_alloc_skb(alx->dev, alx->rxbuf_size, gfp); | ||
| 80 | |||
| 81 | page = alx->rx_page; | ||
| 82 | if (!page) { | ||
| 83 | alx->rx_page = page = alloc_page(gfp); | ||
| 84 | if (unlikely(!page)) | ||
| 85 | return NULL; | ||
| 86 | alx->rx_page_offset = 0; | ||
| 87 | } | ||
| 88 | |||
| 89 | skb = build_skb(page_address(page) + alx->rx_page_offset, | ||
| 90 | alx->rx_frag_size); | ||
| 91 | if (likely(skb)) { | ||
| 92 | alx->rx_page_offset += alx->rx_frag_size; | ||
| 93 | if (alx->rx_page_offset >= PAGE_SIZE) | ||
| 94 | alx->rx_page = NULL; | ||
| 95 | else | ||
| 96 | get_page(page); | ||
| 97 | } | ||
| 98 | return skb; | ||
| 99 | } | ||
| 100 | |||
| 101 | |||
| 73 | static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp) | 102 | static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp) |
| 74 | { | 103 | { |
| 75 | struct alx_rx_queue *rxq = &alx->rxq; | 104 | struct alx_rx_queue *rxq = &alx->rxq; |
| @@ -86,7 +115,7 @@ static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp) | |||
| 86 | while (!cur_buf->skb && next != rxq->read_idx) { | 115 | while (!cur_buf->skb && next != rxq->read_idx) { |
| 87 | struct alx_rfd *rfd = &rxq->rfd[cur]; | 116 | struct alx_rfd *rfd = &rxq->rfd[cur]; |
| 88 | 117 | ||
| 89 | skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size, gfp); | 118 | skb = alx_alloc_skb(alx, gfp); |
| 90 | if (!skb) | 119 | if (!skb) |
| 91 | break; | 120 | break; |
| 92 | dma = dma_map_single(&alx->hw.pdev->dev, | 121 | dma = dma_map_single(&alx->hw.pdev->dev, |
| @@ -124,6 +153,7 @@ static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp) | |||
| 124 | alx_write_mem16(&alx->hw, ALX_RFD_PIDX, cur); | 153 | alx_write_mem16(&alx->hw, ALX_RFD_PIDX, cur); |
| 125 | } | 154 | } |
| 126 | 155 | ||
| 156 | |||
| 127 | return count; | 157 | return count; |
| 128 | } | 158 | } |
| 129 | 159 | ||
| @@ -592,6 +622,11 @@ static void alx_free_rings(struct alx_priv *alx) | |||
| 592 | kfree(alx->txq.bufs); | 622 | kfree(alx->txq.bufs); |
| 593 | kfree(alx->rxq.bufs); | 623 | kfree(alx->rxq.bufs); |
| 594 | 624 | ||
| 625 | if (alx->rx_page) { | ||
| 626 | put_page(alx->rx_page); | ||
| 627 | alx->rx_page = NULL; | ||
| 628 | } | ||
| 629 | |||
| 595 | dma_free_coherent(&alx->hw.pdev->dev, | 630 | dma_free_coherent(&alx->hw.pdev->dev, |
| 596 | alx->descmem.size, | 631 | alx->descmem.size, |
| 597 | alx->descmem.virt, | 632 | alx->descmem.virt, |
| @@ -646,6 +681,7 @@ static int alx_request_irq(struct alx_priv *alx) | |||
| 646 | alx->dev->name, alx); | 681 | alx->dev->name, alx); |
| 647 | if (!err) | 682 | if (!err) |
| 648 | goto out; | 683 | goto out; |
| 684 | |||
| 649 | /* fall back to legacy interrupt */ | 685 | /* fall back to legacy interrupt */ |
| 650 | pci_disable_msi(alx->hw.pdev); | 686 | pci_disable_msi(alx->hw.pdev); |
| 651 | } | 687 | } |
| @@ -689,6 +725,7 @@ static int alx_init_sw(struct alx_priv *alx) | |||
| 689 | struct pci_dev *pdev = alx->hw.pdev; | 725 | struct pci_dev *pdev = alx->hw.pdev; |
| 690 | struct alx_hw *hw = &alx->hw; | 726 | struct alx_hw *hw = &alx->hw; |
| 691 | int err; | 727 | int err; |
| 728 | unsigned int head_size; | ||
| 692 | 729 | ||
| 693 | err = alx_identify_hw(alx); | 730 | err = alx_identify_hw(alx); |
| 694 | if (err) { | 731 | if (err) { |
| @@ -704,7 +741,12 @@ static int alx_init_sw(struct alx_priv *alx) | |||
| 704 | 741 | ||
| 705 | hw->smb_timer = 400; | 742 | hw->smb_timer = 400; |
| 706 | hw->mtu = alx->dev->mtu; | 743 | hw->mtu = alx->dev->mtu; |
| 744 | |||
| 707 | alx->rxbuf_size = ALX_MAX_FRAME_LEN(hw->mtu); | 745 | alx->rxbuf_size = ALX_MAX_FRAME_LEN(hw->mtu); |
| 746 | head_size = SKB_DATA_ALIGN(alx->rxbuf_size + NET_SKB_PAD) + | ||
| 747 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | ||
| 748 | alx->rx_frag_size = roundup_pow_of_two(head_size); | ||
| 749 | |||
| 708 | alx->tx_ringsz = 256; | 750 | alx->tx_ringsz = 256; |
| 709 | alx->rx_ringsz = 512; | 751 | alx->rx_ringsz = 512; |
| 710 | hw->imt = 200; | 752 | hw->imt = 200; |
| @@ -806,6 +848,7 @@ static int alx_change_mtu(struct net_device *netdev, int mtu) | |||
| 806 | { | 848 | { |
| 807 | struct alx_priv *alx = netdev_priv(netdev); | 849 | struct alx_priv *alx = netdev_priv(netdev); |
| 808 | int max_frame = ALX_MAX_FRAME_LEN(mtu); | 850 | int max_frame = ALX_MAX_FRAME_LEN(mtu); |
| 851 | unsigned int head_size; | ||
| 809 | 852 | ||
| 810 | if ((max_frame < ALX_MIN_FRAME_SIZE) || | 853 | if ((max_frame < ALX_MIN_FRAME_SIZE) || |
| 811 | (max_frame > ALX_MAX_FRAME_SIZE)) | 854 | (max_frame > ALX_MAX_FRAME_SIZE)) |
| @@ -817,6 +860,9 @@ static int alx_change_mtu(struct net_device *netdev, int mtu) | |||
| 817 | netdev->mtu = mtu; | 860 | netdev->mtu = mtu; |
| 818 | alx->hw.mtu = mtu; | 861 | alx->hw.mtu = mtu; |
| 819 | alx->rxbuf_size = max(max_frame, ALX_DEF_RXBUF_SIZE); | 862 | alx->rxbuf_size = max(max_frame, ALX_DEF_RXBUF_SIZE); |
| 863 | head_size = SKB_DATA_ALIGN(alx->rxbuf_size + NET_SKB_PAD) + | ||
| 864 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | ||
| 865 | alx->rx_frag_size = roundup_pow_of_two(head_size); | ||
| 820 | netdev_update_features(netdev); | 866 | netdev_update_features(netdev); |
| 821 | if (netif_running(netdev)) | 867 | if (netif_running(netdev)) |
| 822 | alx_reinit(alx); | 868 | alx_reinit(alx); |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 0a5b770cefaa..a59d55e25d5f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | |||
| @@ -12895,52 +12895,71 @@ static int __bnx2x_vlan_configure_vid(struct bnx2x *bp, u16 vid, bool add) | |||
| 12895 | return rc; | 12895 | return rc; |
| 12896 | } | 12896 | } |
| 12897 | 12897 | ||
| 12898 | int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp) | 12898 | static int bnx2x_vlan_configure_vid_list(struct bnx2x *bp) |
| 12899 | { | 12899 | { |
| 12900 | struct bnx2x_vlan_entry *vlan; | 12900 | struct bnx2x_vlan_entry *vlan; |
| 12901 | int rc = 0; | 12901 | int rc = 0; |
| 12902 | 12902 | ||
| 12903 | if (!bp->vlan_cnt) { | 12903 | /* Configure all non-configured entries */ |
| 12904 | DP(NETIF_MSG_IFUP, "No need to re-configure vlan filters\n"); | ||
| 12905 | return 0; | ||
| 12906 | } | ||
| 12907 | |||
| 12908 | list_for_each_entry(vlan, &bp->vlan_reg, link) { | 12904 | list_for_each_entry(vlan, &bp->vlan_reg, link) { |
| 12909 | /* Prepare for cleanup in case of errors */ | 12905 | if (vlan->hw) |
| 12910 | if (rc) { | ||
| 12911 | vlan->hw = false; | ||
| 12912 | continue; | ||
| 12913 | } | ||
| 12914 | |||
| 12915 | if (!vlan->hw) | ||
| 12916 | continue; | 12906 | continue; |
| 12917 | 12907 | ||
| 12918 | DP(NETIF_MSG_IFUP, "Re-configuring vlan 0x%04x\n", vlan->vid); | 12908 | if (bp->vlan_cnt >= bp->vlan_credit) |
| 12909 | return -ENOBUFS; | ||
| 12919 | 12910 | ||
| 12920 | rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true); | 12911 | rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true); |
| 12921 | if (rc) { | 12912 | if (rc) { |
| 12922 | BNX2X_ERR("Unable to configure VLAN %d\n", vlan->vid); | 12913 | BNX2X_ERR("Unable to config VLAN %d\n", vlan->vid); |
| 12923 | vlan->hw = false; | 12914 | return rc; |
| 12924 | rc = -EINVAL; | ||
| 12925 | continue; | ||
| 12926 | } | 12915 | } |
| 12916 | |||
| 12917 | DP(NETIF_MSG_IFUP, "HW configured for VLAN %d\n", vlan->vid); | ||
| 12918 | vlan->hw = true; | ||
| 12919 | bp->vlan_cnt++; | ||
| 12927 | } | 12920 | } |
| 12928 | 12921 | ||
| 12929 | return rc; | 12922 | return 0; |
| 12923 | } | ||
| 12924 | |||
| 12925 | static void bnx2x_vlan_configure(struct bnx2x *bp, bool set_rx_mode) | ||
| 12926 | { | ||
| 12927 | bool need_accept_any_vlan; | ||
| 12928 | |||
| 12929 | need_accept_any_vlan = !!bnx2x_vlan_configure_vid_list(bp); | ||
| 12930 | |||
| 12931 | if (bp->accept_any_vlan != need_accept_any_vlan) { | ||
| 12932 | bp->accept_any_vlan = need_accept_any_vlan; | ||
| 12933 | DP(NETIF_MSG_IFUP, "Accept all VLAN %s\n", | ||
| 12934 | bp->accept_any_vlan ? "raised" : "cleared"); | ||
| 12935 | if (set_rx_mode) { | ||
| 12936 | if (IS_PF(bp)) | ||
| 12937 | bnx2x_set_rx_mode_inner(bp); | ||
| 12938 | else | ||
| 12939 | bnx2x_vfpf_storm_rx_mode(bp); | ||
| 12940 | } | ||
| 12941 | } | ||
| 12942 | } | ||
| 12943 | |||
| 12944 | int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp) | ||
| 12945 | { | ||
| 12946 | struct bnx2x_vlan_entry *vlan; | ||
| 12947 | |||
| 12948 | /* The hw forgot all entries after reload */ | ||
| 12949 | list_for_each_entry(vlan, &bp->vlan_reg, link) | ||
| 12950 | vlan->hw = false; | ||
| 12951 | bp->vlan_cnt = 0; | ||
| 12952 | |||
| 12953 | /* Don't set rx mode here. Our caller will do it. */ | ||
| 12954 | bnx2x_vlan_configure(bp, false); | ||
| 12955 | |||
| 12956 | return 0; | ||
| 12930 | } | 12957 | } |
| 12931 | 12958 | ||
| 12932 | static int bnx2x_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) | 12959 | static int bnx2x_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) |
| 12933 | { | 12960 | { |
| 12934 | struct bnx2x *bp = netdev_priv(dev); | 12961 | struct bnx2x *bp = netdev_priv(dev); |
| 12935 | struct bnx2x_vlan_entry *vlan; | 12962 | struct bnx2x_vlan_entry *vlan; |
| 12936 | bool hw = false; | ||
| 12937 | int rc = 0; | ||
| 12938 | |||
| 12939 | if (!netif_running(bp->dev)) { | ||
| 12940 | DP(NETIF_MSG_IFUP, | ||
| 12941 | "Ignoring VLAN configuration the interface is down\n"); | ||
| 12942 | return -EFAULT; | ||
| 12943 | } | ||
| 12944 | 12963 | ||
| 12945 | DP(NETIF_MSG_IFUP, "Adding VLAN %d\n", vid); | 12964 | DP(NETIF_MSG_IFUP, "Adding VLAN %d\n", vid); |
| 12946 | 12965 | ||
| @@ -12948,93 +12967,47 @@ static int bnx2x_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) | |||
| 12948 | if (!vlan) | 12967 | if (!vlan) |
| 12949 | return -ENOMEM; | 12968 | return -ENOMEM; |
| 12950 | 12969 | ||
| 12951 | bp->vlan_cnt++; | ||
| 12952 | if (bp->vlan_cnt > bp->vlan_credit && !bp->accept_any_vlan) { | ||
| 12953 | DP(NETIF_MSG_IFUP, "Accept all VLAN raised\n"); | ||
| 12954 | bp->accept_any_vlan = true; | ||
| 12955 | if (IS_PF(bp)) | ||
| 12956 | bnx2x_set_rx_mode_inner(bp); | ||
| 12957 | else | ||
| 12958 | bnx2x_vfpf_storm_rx_mode(bp); | ||
| 12959 | } else if (bp->vlan_cnt <= bp->vlan_credit) { | ||
| 12960 | rc = __bnx2x_vlan_configure_vid(bp, vid, true); | ||
| 12961 | hw = true; | ||
| 12962 | } | ||
| 12963 | |||
| 12964 | vlan->vid = vid; | 12970 | vlan->vid = vid; |
| 12965 | vlan->hw = hw; | 12971 | vlan->hw = false; |
| 12972 | list_add_tail(&vlan->link, &bp->vlan_reg); | ||
| 12966 | 12973 | ||
| 12967 | if (!rc) { | 12974 | if (netif_running(dev)) |
| 12968 | list_add(&vlan->link, &bp->vlan_reg); | 12975 | bnx2x_vlan_configure(bp, true); |
| 12969 | } else { | ||
| 12970 | bp->vlan_cnt--; | ||
| 12971 | kfree(vlan); | ||
| 12972 | } | ||
| 12973 | |||
| 12974 | DP(NETIF_MSG_IFUP, "Adding VLAN result %d\n", rc); | ||
| 12975 | 12976 | ||
| 12976 | return rc; | 12977 | return 0; |
| 12977 | } | 12978 | } |
| 12978 | 12979 | ||
| 12979 | static int bnx2x_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) | 12980 | static int bnx2x_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) |
| 12980 | { | 12981 | { |
| 12981 | struct bnx2x *bp = netdev_priv(dev); | 12982 | struct bnx2x *bp = netdev_priv(dev); |
| 12982 | struct bnx2x_vlan_entry *vlan; | 12983 | struct bnx2x_vlan_entry *vlan; |
| 12984 | bool found = false; | ||
| 12983 | int rc = 0; | 12985 | int rc = 0; |
| 12984 | 12986 | ||
| 12985 | if (!netif_running(bp->dev)) { | ||
| 12986 | DP(NETIF_MSG_IFUP, | ||
| 12987 | "Ignoring VLAN configuration the interface is down\n"); | ||
| 12988 | return -EFAULT; | ||
| 12989 | } | ||
| 12990 | |||
| 12991 | DP(NETIF_MSG_IFUP, "Removing VLAN %d\n", vid); | 12987 | DP(NETIF_MSG_IFUP, "Removing VLAN %d\n", vid); |
| 12992 | 12988 | ||
| 12993 | if (!bp->vlan_cnt) { | ||
| 12994 | BNX2X_ERR("Unable to kill VLAN %d\n", vid); | ||
| 12995 | return -EINVAL; | ||
| 12996 | } | ||
| 12997 | |||
| 12998 | list_for_each_entry(vlan, &bp->vlan_reg, link) | 12989 | list_for_each_entry(vlan, &bp->vlan_reg, link) |
| 12999 | if (vlan->vid == vid) | 12990 | if (vlan->vid == vid) { |
| 12991 | found = true; | ||
| 13000 | break; | 12992 | break; |
| 12993 | } | ||
| 13001 | 12994 | ||
| 13002 | if (vlan->vid != vid) { | 12995 | if (!found) { |
| 13003 | BNX2X_ERR("Unable to kill VLAN %d - not found\n", vid); | 12996 | BNX2X_ERR("Unable to kill VLAN %d - not found\n", vid); |
| 13004 | return -EINVAL; | 12997 | return -EINVAL; |
| 13005 | } | 12998 | } |
| 13006 | 12999 | ||
| 13007 | if (vlan->hw) | 13000 | if (netif_running(dev) && vlan->hw) { |
| 13008 | rc = __bnx2x_vlan_configure_vid(bp, vid, false); | 13001 | rc = __bnx2x_vlan_configure_vid(bp, vid, false); |
| 13002 | DP(NETIF_MSG_IFUP, "HW deconfigured for VLAN %d\n", vid); | ||
| 13003 | bp->vlan_cnt--; | ||
| 13004 | } | ||
| 13009 | 13005 | ||
| 13010 | list_del(&vlan->link); | 13006 | list_del(&vlan->link); |
| 13011 | kfree(vlan); | 13007 | kfree(vlan); |
| 13012 | 13008 | ||
| 13013 | bp->vlan_cnt--; | 13009 | if (netif_running(dev)) |
| 13014 | 13010 | bnx2x_vlan_configure(bp, true); | |
| 13015 | if (bp->vlan_cnt <= bp->vlan_credit && bp->accept_any_vlan) { | ||
| 13016 | /* Configure all non-configured entries */ | ||
| 13017 | list_for_each_entry(vlan, &bp->vlan_reg, link) { | ||
| 13018 | if (vlan->hw) | ||
| 13019 | continue; | ||
| 13020 | |||
| 13021 | rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true); | ||
| 13022 | if (rc) { | ||
| 13023 | BNX2X_ERR("Unable to config VLAN %d\n", | ||
| 13024 | vlan->vid); | ||
| 13025 | continue; | ||
| 13026 | } | ||
| 13027 | DP(NETIF_MSG_IFUP, "HW configured for VLAN %d\n", | ||
| 13028 | vlan->vid); | ||
| 13029 | vlan->hw = true; | ||
| 13030 | } | ||
| 13031 | DP(NETIF_MSG_IFUP, "Accept all VLAN Removed\n"); | ||
| 13032 | bp->accept_any_vlan = false; | ||
| 13033 | if (IS_PF(bp)) | ||
| 13034 | bnx2x_set_rx_mode_inner(bp); | ||
| 13035 | else | ||
| 13036 | bnx2x_vfpf_storm_rx_mode(bp); | ||
| 13037 | } | ||
| 13038 | 13011 | ||
| 13039 | DP(NETIF_MSG_IFUP, "Removing VLAN result %d\n", rc); | 13012 | DP(NETIF_MSG_IFUP, "Removing VLAN result %d\n", rc); |
| 13040 | 13013 | ||
| @@ -13941,14 +13914,14 @@ static int bnx2x_init_one(struct pci_dev *pdev, | |||
| 13941 | bp->doorbells = bnx2x_vf_doorbells(bp); | 13914 | bp->doorbells = bnx2x_vf_doorbells(bp); |
| 13942 | rc = bnx2x_vf_pci_alloc(bp); | 13915 | rc = bnx2x_vf_pci_alloc(bp); |
| 13943 | if (rc) | 13916 | if (rc) |
| 13944 | goto init_one_exit; | 13917 | goto init_one_freemem; |
| 13945 | } else { | 13918 | } else { |
| 13946 | doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT); | 13919 | doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT); |
| 13947 | if (doorbell_size > pci_resource_len(pdev, 2)) { | 13920 | if (doorbell_size > pci_resource_len(pdev, 2)) { |
| 13948 | dev_err(&bp->pdev->dev, | 13921 | dev_err(&bp->pdev->dev, |
| 13949 | "Cannot map doorbells, bar size too small, aborting\n"); | 13922 | "Cannot map doorbells, bar size too small, aborting\n"); |
| 13950 | rc = -ENOMEM; | 13923 | rc = -ENOMEM; |
| 13951 | goto init_one_exit; | 13924 | goto init_one_freemem; |
| 13952 | } | 13925 | } |
| 13953 | bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2), | 13926 | bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2), |
| 13954 | doorbell_size); | 13927 | doorbell_size); |
| @@ -13957,19 +13930,19 @@ static int bnx2x_init_one(struct pci_dev *pdev, | |||
| 13957 | dev_err(&bp->pdev->dev, | 13930 | dev_err(&bp->pdev->dev, |
| 13958 | "Cannot map doorbell space, aborting\n"); | 13931 | "Cannot map doorbell space, aborting\n"); |
| 13959 | rc = -ENOMEM; | 13932 | rc = -ENOMEM; |
| 13960 | goto init_one_exit; | 13933 | goto init_one_freemem; |
| 13961 | } | 13934 | } |
| 13962 | 13935 | ||
| 13963 | if (IS_VF(bp)) { | 13936 | if (IS_VF(bp)) { |
| 13964 | rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count); | 13937 | rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count); |
| 13965 | if (rc) | 13938 | if (rc) |
| 13966 | goto init_one_exit; | 13939 | goto init_one_freemem; |
| 13967 | } | 13940 | } |
| 13968 | 13941 | ||
| 13969 | /* Enable SRIOV if capability found in configuration space */ | 13942 | /* Enable SRIOV if capability found in configuration space */ |
| 13970 | rc = bnx2x_iov_init_one(bp, int_mode, BNX2X_MAX_NUM_OF_VFS); | 13943 | rc = bnx2x_iov_init_one(bp, int_mode, BNX2X_MAX_NUM_OF_VFS); |
| 13971 | if (rc) | 13944 | if (rc) |
| 13972 | goto init_one_exit; | 13945 | goto init_one_freemem; |
| 13973 | 13946 | ||
| 13974 | /* calc qm_cid_count */ | 13947 | /* calc qm_cid_count */ |
| 13975 | bp->qm_cid_count = bnx2x_set_qm_cid_count(bp); | 13948 | bp->qm_cid_count = bnx2x_set_qm_cid_count(bp); |
| @@ -13988,7 +13961,7 @@ static int bnx2x_init_one(struct pci_dev *pdev, | |||
| 13988 | rc = bnx2x_set_int_mode(bp); | 13961 | rc = bnx2x_set_int_mode(bp); |
| 13989 | if (rc) { | 13962 | if (rc) { |
| 13990 | dev_err(&pdev->dev, "Cannot set interrupts\n"); | 13963 | dev_err(&pdev->dev, "Cannot set interrupts\n"); |
| 13991 | goto init_one_exit; | 13964 | goto init_one_freemem; |
| 13992 | } | 13965 | } |
| 13993 | BNX2X_DEV_INFO("set interrupts successfully\n"); | 13966 | BNX2X_DEV_INFO("set interrupts successfully\n"); |
| 13994 | 13967 | ||
| @@ -13996,7 +13969,7 @@ static int bnx2x_init_one(struct pci_dev *pdev, | |||
| 13996 | rc = register_netdev(dev); | 13969 | rc = register_netdev(dev); |
| 13997 | if (rc) { | 13970 | if (rc) { |
| 13998 | dev_err(&pdev->dev, "Cannot register net device\n"); | 13971 | dev_err(&pdev->dev, "Cannot register net device\n"); |
| 13999 | goto init_one_exit; | 13972 | goto init_one_freemem; |
| 14000 | } | 13973 | } |
| 14001 | BNX2X_DEV_INFO("device name after netdev register %s\n", dev->name); | 13974 | BNX2X_DEV_INFO("device name after netdev register %s\n", dev->name); |
| 14002 | 13975 | ||
| @@ -14029,6 +14002,9 @@ static int bnx2x_init_one(struct pci_dev *pdev, | |||
| 14029 | 14002 | ||
| 14030 | return 0; | 14003 | return 0; |
| 14031 | 14004 | ||
| 14005 | init_one_freemem: | ||
| 14006 | bnx2x_free_mem_bp(bp); | ||
| 14007 | |||
| 14032 | init_one_exit: | 14008 | init_one_exit: |
| 14033 | bnx2x_disable_pcie_error_reporting(bp); | 14009 | bnx2x_disable_pcie_error_reporting(bp); |
| 14034 | 14010 | ||
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 72a2efff8e49..c777cde85ce4 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c | |||
| @@ -286,7 +286,9 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 286 | cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod); | 286 | cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod); |
| 287 | txr->tx_prod = prod; | 287 | txr->tx_prod = prod; |
| 288 | 288 | ||
| 289 | tx_buf->is_push = 1; | ||
| 289 | netdev_tx_sent_queue(txq, skb->len); | 290 | netdev_tx_sent_queue(txq, skb->len); |
| 291 | wmb(); /* Sync is_push and byte queue before pushing data */ | ||
| 290 | 292 | ||
| 291 | push_len = (length + sizeof(*tx_push) + 7) / 8; | 293 | push_len = (length + sizeof(*tx_push) + 7) / 8; |
| 292 | if (push_len > 16) { | 294 | if (push_len > 16) { |
| @@ -298,7 +300,6 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 298 | push_len); | 300 | push_len); |
| 299 | } | 301 | } |
| 300 | 302 | ||
| 301 | tx_buf->is_push = 1; | ||
| 302 | goto tx_done; | 303 | goto tx_done; |
| 303 | } | 304 | } |
| 304 | 305 | ||
| @@ -1112,19 +1113,13 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, | |||
| 1112 | if (tpa_info->hash_type != PKT_HASH_TYPE_NONE) | 1113 | if (tpa_info->hash_type != PKT_HASH_TYPE_NONE) |
| 1113 | skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type); | 1114 | skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type); |
| 1114 | 1115 | ||
| 1115 | if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) { | 1116 | if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) && |
| 1116 | netdev_features_t features = skb->dev->features; | 1117 | (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { |
| 1117 | u16 vlan_proto = tpa_info->metadata >> | 1118 | u16 vlan_proto = tpa_info->metadata >> |
| 1118 | RX_CMP_FLAGS2_METADATA_TPID_SFT; | 1119 | RX_CMP_FLAGS2_METADATA_TPID_SFT; |
| 1120 | u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_VID_MASK; | ||
| 1119 | 1121 | ||
| 1120 | if (((features & NETIF_F_HW_VLAN_CTAG_RX) && | 1122 | __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag); |
| 1121 | vlan_proto == ETH_P_8021Q) || | ||
| 1122 | ((features & NETIF_F_HW_VLAN_STAG_RX) && | ||
| 1123 | vlan_proto == ETH_P_8021AD)) { | ||
| 1124 | __vlan_hwaccel_put_tag(skb, htons(vlan_proto), | ||
| 1125 | tpa_info->metadata & | ||
| 1126 | RX_CMP_FLAGS2_METADATA_VID_MASK); | ||
| 1127 | } | ||
| 1128 | } | 1123 | } |
| 1129 | 1124 | ||
| 1130 | skb_checksum_none_assert(skb); | 1125 | skb_checksum_none_assert(skb); |
| @@ -1277,19 +1272,14 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, | |||
| 1277 | 1272 | ||
| 1278 | skb->protocol = eth_type_trans(skb, dev); | 1273 | skb->protocol = eth_type_trans(skb, dev); |
| 1279 | 1274 | ||
| 1280 | if (rxcmp1->rx_cmp_flags2 & | 1275 | if ((rxcmp1->rx_cmp_flags2 & |
| 1281 | cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) { | 1276 | cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) && |
| 1282 | netdev_features_t features = skb->dev->features; | 1277 | (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { |
| 1283 | u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data); | 1278 | u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data); |
| 1279 | u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_VID_MASK; | ||
| 1284 | u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT; | 1280 | u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT; |
| 1285 | 1281 | ||
| 1286 | if (((features & NETIF_F_HW_VLAN_CTAG_RX) && | 1282 | __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag); |
| 1287 | vlan_proto == ETH_P_8021Q) || | ||
| 1288 | ((features & NETIF_F_HW_VLAN_STAG_RX) && | ||
| 1289 | vlan_proto == ETH_P_8021AD)) | ||
| 1290 | __vlan_hwaccel_put_tag(skb, htons(vlan_proto), | ||
| 1291 | meta_data & | ||
| 1292 | RX_CMP_FLAGS2_METADATA_VID_MASK); | ||
| 1293 | } | 1283 | } |
| 1294 | 1284 | ||
| 1295 | skb_checksum_none_assert(skb); | 1285 | skb_checksum_none_assert(skb); |
| @@ -5466,6 +5456,20 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev, | |||
| 5466 | 5456 | ||
| 5467 | if (!bnxt_rfs_capable(bp)) | 5457 | if (!bnxt_rfs_capable(bp)) |
| 5468 | features &= ~NETIF_F_NTUPLE; | 5458 | features &= ~NETIF_F_NTUPLE; |
| 5459 | |||
| 5460 | /* Both CTAG and STAG VLAN accelaration on the RX side have to be | ||
| 5461 | * turned on or off together. | ||
| 5462 | */ | ||
| 5463 | if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) != | ||
| 5464 | (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) { | ||
| 5465 | if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) | ||
| 5466 | features &= ~(NETIF_F_HW_VLAN_CTAG_RX | | ||
| 5467 | NETIF_F_HW_VLAN_STAG_RX); | ||
| 5468 | else | ||
| 5469 | features |= NETIF_F_HW_VLAN_CTAG_RX | | ||
| 5470 | NETIF_F_HW_VLAN_STAG_RX; | ||
| 5471 | } | ||
| 5472 | |||
| 5469 | return features; | 5473 | return features; |
| 5470 | } | 5474 | } |
| 5471 | 5475 | ||
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h index a2cdfc1261dc..50812a1d67bd 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h | |||
| @@ -144,6 +144,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN | |||
| 144 | CH_PCI_ID_TABLE_FENTRY(0x5015), /* T502-bt */ | 144 | CH_PCI_ID_TABLE_FENTRY(0x5015), /* T502-bt */ |
| 145 | CH_PCI_ID_TABLE_FENTRY(0x5016), /* T580-OCP-SO */ | 145 | CH_PCI_ID_TABLE_FENTRY(0x5016), /* T580-OCP-SO */ |
| 146 | CH_PCI_ID_TABLE_FENTRY(0x5017), /* T520-OCP-SO */ | 146 | CH_PCI_ID_TABLE_FENTRY(0x5017), /* T520-OCP-SO */ |
| 147 | CH_PCI_ID_TABLE_FENTRY(0x5018), /* T540-BT */ | ||
| 147 | CH_PCI_ID_TABLE_FENTRY(0x5080), /* Custom T540-cr */ | 148 | CH_PCI_ID_TABLE_FENTRY(0x5080), /* Custom T540-cr */ |
| 148 | CH_PCI_ID_TABLE_FENTRY(0x5081), /* Custom T540-LL-cr */ | 149 | CH_PCI_ID_TABLE_FENTRY(0x5081), /* Custom T540-LL-cr */ |
| 149 | CH_PCI_ID_TABLE_FENTRY(0x5082), /* Custom T504-cr */ | 150 | CH_PCI_ID_TABLE_FENTRY(0x5082), /* Custom T504-cr */ |
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c index 41b010645100..4edb98c3c6c7 100644 --- a/drivers/net/ethernet/ethoc.c +++ b/drivers/net/ethernet/ethoc.c | |||
| @@ -1195,7 +1195,7 @@ static int ethoc_probe(struct platform_device *pdev) | |||
| 1195 | priv->mdio = mdiobus_alloc(); | 1195 | priv->mdio = mdiobus_alloc(); |
| 1196 | if (!priv->mdio) { | 1196 | if (!priv->mdio) { |
| 1197 | ret = -ENOMEM; | 1197 | ret = -ENOMEM; |
| 1198 | goto free; | 1198 | goto free2; |
| 1199 | } | 1199 | } |
| 1200 | 1200 | ||
| 1201 | priv->mdio->name = "ethoc-mdio"; | 1201 | priv->mdio->name = "ethoc-mdio"; |
| @@ -1208,7 +1208,7 @@ static int ethoc_probe(struct platform_device *pdev) | |||
| 1208 | ret = mdiobus_register(priv->mdio); | 1208 | ret = mdiobus_register(priv->mdio); |
| 1209 | if (ret) { | 1209 | if (ret) { |
| 1210 | dev_err(&netdev->dev, "failed to register MDIO bus\n"); | 1210 | dev_err(&netdev->dev, "failed to register MDIO bus\n"); |
| 1211 | goto free; | 1211 | goto free2; |
| 1212 | } | 1212 | } |
| 1213 | 1213 | ||
| 1214 | ret = ethoc_mdio_probe(netdev); | 1214 | ret = ethoc_mdio_probe(netdev); |
| @@ -1241,9 +1241,10 @@ error2: | |||
| 1241 | error: | 1241 | error: |
| 1242 | mdiobus_unregister(priv->mdio); | 1242 | mdiobus_unregister(priv->mdio); |
| 1243 | mdiobus_free(priv->mdio); | 1243 | mdiobus_free(priv->mdio); |
| 1244 | free: | 1244 | free2: |
| 1245 | if (priv->clk) | 1245 | if (priv->clk) |
| 1246 | clk_disable_unprepare(priv->clk); | 1246 | clk_disable_unprepare(priv->clk); |
| 1247 | free: | ||
| 1247 | free_netdev(netdev); | 1248 | free_netdev(netdev); |
| 1248 | out: | 1249 | out: |
| 1249 | return ret; | 1250 | return ret; |
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c index 085f9125cf42..06f031715b57 100644 --- a/drivers/net/ethernet/ezchip/nps_enet.c +++ b/drivers/net/ethernet/ezchip/nps_enet.c | |||
| @@ -205,8 +205,10 @@ static int nps_enet_poll(struct napi_struct *napi, int budget) | |||
| 205 | * re-adding ourselves to the poll list. | 205 | * re-adding ourselves to the poll list. |
| 206 | */ | 206 | */ |
| 207 | 207 | ||
| 208 | if (priv->tx_skb && !tx_ctrl_ct) | 208 | if (priv->tx_skb && !tx_ctrl_ct) { |
| 209 | nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0); | ||
| 209 | napi_reschedule(napi); | 210 | napi_reschedule(napi); |
| 211 | } | ||
| 210 | } | 212 | } |
| 211 | 213 | ||
| 212 | return work_done; | 214 | return work_done; |
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index ca2cccc594fd..fea0f330ddbd 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c | |||
| @@ -1197,10 +1197,8 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) | |||
| 1197 | fec16_to_cpu(bdp->cbd_datlen), | 1197 | fec16_to_cpu(bdp->cbd_datlen), |
| 1198 | DMA_TO_DEVICE); | 1198 | DMA_TO_DEVICE); |
| 1199 | bdp->cbd_bufaddr = cpu_to_fec32(0); | 1199 | bdp->cbd_bufaddr = cpu_to_fec32(0); |
| 1200 | if (!skb) { | 1200 | if (!skb) |
| 1201 | bdp = fec_enet_get_nextdesc(bdp, &txq->bd); | 1201 | goto skb_done; |
| 1202 | continue; | ||
| 1203 | } | ||
| 1204 | 1202 | ||
| 1205 | /* Check for errors. */ | 1203 | /* Check for errors. */ |
| 1206 | if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | | 1204 | if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | |
| @@ -1239,7 +1237,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) | |||
| 1239 | 1237 | ||
| 1240 | /* Free the sk buffer associated with this last transmit */ | 1238 | /* Free the sk buffer associated with this last transmit */ |
| 1241 | dev_kfree_skb_any(skb); | 1239 | dev_kfree_skb_any(skb); |
| 1242 | 1240 | skb_done: | |
| 1243 | /* Make sure the update to bdp and tx_skbuff are performed | 1241 | /* Make sure the update to bdp and tx_skbuff are performed |
| 1244 | * before dirty_tx | 1242 | * before dirty_tx |
| 1245 | */ | 1243 | */ |
| @@ -2418,24 +2416,24 @@ fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec) | |||
| 2418 | return -EOPNOTSUPP; | 2416 | return -EOPNOTSUPP; |
| 2419 | 2417 | ||
| 2420 | if (ec->rx_max_coalesced_frames > 255) { | 2418 | if (ec->rx_max_coalesced_frames > 255) { |
| 2421 | pr_err("Rx coalesced frames exceed hardware limiation"); | 2419 | pr_err("Rx coalesced frames exceed hardware limitation\n"); |
| 2422 | return -EINVAL; | 2420 | return -EINVAL; |
| 2423 | } | 2421 | } |
| 2424 | 2422 | ||
| 2425 | if (ec->tx_max_coalesced_frames > 255) { | 2423 | if (ec->tx_max_coalesced_frames > 255) { |
| 2426 | pr_err("Tx coalesced frame exceed hardware limiation"); | 2424 | pr_err("Tx coalesced frame exceed hardware limitation\n"); |
| 2427 | return -EINVAL; | 2425 | return -EINVAL; |
| 2428 | } | 2426 | } |
| 2429 | 2427 | ||
| 2430 | cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr); | 2428 | cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr); |
| 2431 | if (cycle > 0xFFFF) { | 2429 | if (cycle > 0xFFFF) { |
| 2432 | pr_err("Rx coalesed usec exceeed hardware limiation"); | 2430 | pr_err("Rx coalesced usec exceed hardware limitation\n"); |
| 2433 | return -EINVAL; | 2431 | return -EINVAL; |
| 2434 | } | 2432 | } |
| 2435 | 2433 | ||
| 2436 | cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr); | 2434 | cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr); |
| 2437 | if (cycle > 0xFFFF) { | 2435 | if (cycle > 0xFFFF) { |
| 2438 | pr_err("Rx coalesed usec exceeed hardware limiation"); | 2436 | pr_err("Rx coalesced usec exceed hardware limitation\n"); |
| 2439 | return -EINVAL; | 2437 | return -EINVAL; |
| 2440 | } | 2438 | } |
| 2441 | 2439 | ||
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 7615e0668acb..2e6785b6e8be 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c | |||
| @@ -2440,7 +2440,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 2440 | tx_queue->tx_ring_size); | 2440 | tx_queue->tx_ring_size); |
| 2441 | 2441 | ||
| 2442 | if (likely(!nr_frags)) { | 2442 | if (likely(!nr_frags)) { |
| 2443 | lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); | 2443 | if (likely(!do_tstamp)) |
| 2444 | lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); | ||
| 2444 | } else { | 2445 | } else { |
| 2445 | u32 lstatus_start = lstatus; | 2446 | u32 lstatus_start = lstatus; |
| 2446 | 2447 | ||
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c index 3d746c887873..67a648c7d3a9 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c | |||
| @@ -46,7 +46,6 @@ static u32 hns_nic_get_link(struct net_device *net_dev) | |||
| 46 | u32 link_stat = priv->link; | 46 | u32 link_stat = priv->link; |
| 47 | struct hnae_handle *h; | 47 | struct hnae_handle *h; |
| 48 | 48 | ||
| 49 | assert(priv && priv->ae_handle); | ||
| 50 | h = priv->ae_handle; | 49 | h = priv->ae_handle; |
| 51 | 50 | ||
| 52 | if (priv->phy) { | 51 | if (priv->phy) { |
| @@ -646,8 +645,6 @@ static void hns_nic_get_drvinfo(struct net_device *net_dev, | |||
| 646 | { | 645 | { |
| 647 | struct hns_nic_priv *priv = netdev_priv(net_dev); | 646 | struct hns_nic_priv *priv = netdev_priv(net_dev); |
| 648 | 647 | ||
| 649 | assert(priv); | ||
| 650 | |||
| 651 | strncpy(drvinfo->version, HNAE_DRIVER_VERSION, | 648 | strncpy(drvinfo->version, HNAE_DRIVER_VERSION, |
| 652 | sizeof(drvinfo->version)); | 649 | sizeof(drvinfo->version)); |
| 653 | drvinfo->version[sizeof(drvinfo->version) - 1] = '\0'; | 650 | drvinfo->version[sizeof(drvinfo->version) - 1] = '\0'; |
| @@ -720,8 +717,6 @@ static int hns_set_pauseparam(struct net_device *net_dev, | |||
| 720 | struct hnae_handle *h; | 717 | struct hnae_handle *h; |
| 721 | struct hnae_ae_ops *ops; | 718 | struct hnae_ae_ops *ops; |
| 722 | 719 | ||
| 723 | assert(priv || priv->ae_handle); | ||
| 724 | |||
| 725 | h = priv->ae_handle; | 720 | h = priv->ae_handle; |
| 726 | ops = h->dev->ops; | 721 | ops = h->dev->ops; |
| 727 | 722 | ||
| @@ -780,8 +775,6 @@ static int hns_set_coalesce(struct net_device *net_dev, | |||
| 780 | struct hnae_ae_ops *ops; | 775 | struct hnae_ae_ops *ops; |
| 781 | int ret; | 776 | int ret; |
| 782 | 777 | ||
| 783 | assert(priv || priv->ae_handle); | ||
| 784 | |||
| 785 | ops = priv->ae_handle->dev->ops; | 778 | ops = priv->ae_handle->dev->ops; |
| 786 | 779 | ||
| 787 | if (ec->tx_coalesce_usecs != ec->rx_coalesce_usecs) | 780 | if (ec->tx_coalesce_usecs != ec->rx_coalesce_usecs) |
| @@ -1111,8 +1104,6 @@ void hns_get_regs(struct net_device *net_dev, struct ethtool_regs *cmd, | |||
| 1111 | struct hns_nic_priv *priv = netdev_priv(net_dev); | 1104 | struct hns_nic_priv *priv = netdev_priv(net_dev); |
| 1112 | struct hnae_ae_ops *ops; | 1105 | struct hnae_ae_ops *ops; |
| 1113 | 1106 | ||
| 1114 | assert(priv || priv->ae_handle); | ||
| 1115 | |||
| 1116 | ops = priv->ae_handle->dev->ops; | 1107 | ops = priv->ae_handle->dev->ops; |
| 1117 | 1108 | ||
| 1118 | cmd->version = HNS_CHIP_VERSION; | 1109 | cmd->version = HNS_CHIP_VERSION; |
| @@ -1135,8 +1126,6 @@ static int hns_get_regs_len(struct net_device *net_dev) | |||
| 1135 | struct hns_nic_priv *priv = netdev_priv(net_dev); | 1126 | struct hns_nic_priv *priv = netdev_priv(net_dev); |
| 1136 | struct hnae_ae_ops *ops; | 1127 | struct hnae_ae_ops *ops; |
| 1137 | 1128 | ||
| 1138 | assert(priv || priv->ae_handle); | ||
| 1139 | |||
| 1140 | ops = priv->ae_handle->dev->ops; | 1129 | ops = priv->ae_handle->dev->ops; |
| 1141 | if (!ops->get_regs_len) { | 1130 | if (!ops->get_regs_len) { |
| 1142 | netdev_err(net_dev, "ops->get_regs_len is null!\n"); | 1131 | netdev_err(net_dev, "ops->get_regs_len is null!\n"); |
diff --git a/drivers/net/ethernet/marvell/mvneta_bm.c b/drivers/net/ethernet/marvell/mvneta_bm.c index 01fccec632ec..466939f8f0cf 100644 --- a/drivers/net/ethernet/marvell/mvneta_bm.c +++ b/drivers/net/ethernet/marvell/mvneta_bm.c | |||
| @@ -189,6 +189,7 @@ struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id, | |||
| 189 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | 189 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
| 190 | hwbm_pool->construct = mvneta_bm_construct; | 190 | hwbm_pool->construct = mvneta_bm_construct; |
| 191 | hwbm_pool->priv = new_pool; | 191 | hwbm_pool->priv = new_pool; |
| 192 | spin_lock_init(&hwbm_pool->lock); | ||
| 192 | 193 | ||
| 193 | /* Create new pool */ | 194 | /* Create new pool */ |
| 194 | err = mvneta_bm_pool_create(priv, new_pool); | 195 | err = mvneta_bm_pool_create(priv, new_pool); |
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index c984462fad2a..4763252bbf85 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c | |||
| @@ -133,6 +133,8 @@ static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg) | |||
| 133 | static void mtk_phy_link_adjust(struct net_device *dev) | 133 | static void mtk_phy_link_adjust(struct net_device *dev) |
| 134 | { | 134 | { |
| 135 | struct mtk_mac *mac = netdev_priv(dev); | 135 | struct mtk_mac *mac = netdev_priv(dev); |
| 136 | u16 lcl_adv = 0, rmt_adv = 0; | ||
| 137 | u8 flowctrl; | ||
| 136 | u32 mcr = MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG | | 138 | u32 mcr = MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG | |
| 137 | MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN | | 139 | MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN | |
| 138 | MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN | | 140 | MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN | |
| @@ -150,11 +152,30 @@ static void mtk_phy_link_adjust(struct net_device *dev) | |||
| 150 | if (mac->phy_dev->link) | 152 | if (mac->phy_dev->link) |
| 151 | mcr |= MAC_MCR_FORCE_LINK; | 153 | mcr |= MAC_MCR_FORCE_LINK; |
| 152 | 154 | ||
| 153 | if (mac->phy_dev->duplex) | 155 | if (mac->phy_dev->duplex) { |
| 154 | mcr |= MAC_MCR_FORCE_DPX; | 156 | mcr |= MAC_MCR_FORCE_DPX; |
| 155 | 157 | ||
| 156 | if (mac->phy_dev->pause) | 158 | if (mac->phy_dev->pause) |
| 157 | mcr |= MAC_MCR_FORCE_RX_FC | MAC_MCR_FORCE_TX_FC; | 159 | rmt_adv = LPA_PAUSE_CAP; |
| 160 | if (mac->phy_dev->asym_pause) | ||
| 161 | rmt_adv |= LPA_PAUSE_ASYM; | ||
| 162 | |||
| 163 | if (mac->phy_dev->advertising & ADVERTISED_Pause) | ||
| 164 | lcl_adv |= ADVERTISE_PAUSE_CAP; | ||
| 165 | if (mac->phy_dev->advertising & ADVERTISED_Asym_Pause) | ||
| 166 | lcl_adv |= ADVERTISE_PAUSE_ASYM; | ||
| 167 | |||
| 168 | flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); | ||
| 169 | |||
| 170 | if (flowctrl & FLOW_CTRL_TX) | ||
| 171 | mcr |= MAC_MCR_FORCE_TX_FC; | ||
| 172 | if (flowctrl & FLOW_CTRL_RX) | ||
| 173 | mcr |= MAC_MCR_FORCE_RX_FC; | ||
| 174 | |||
| 175 | netif_dbg(mac->hw, link, dev, "rx pause %s, tx pause %s\n", | ||
| 176 | flowctrl & FLOW_CTRL_RX ? "enabled" : "disabled", | ||
| 177 | flowctrl & FLOW_CTRL_TX ? "enabled" : "disabled"); | ||
| 178 | } | ||
| 158 | 179 | ||
| 159 | mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id)); | 180 | mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id)); |
| 160 | 181 | ||
| @@ -208,10 +229,16 @@ static int mtk_phy_connect(struct mtk_mac *mac) | |||
| 208 | u32 val, ge_mode; | 229 | u32 val, ge_mode; |
| 209 | 230 | ||
| 210 | np = of_parse_phandle(mac->of_node, "phy-handle", 0); | 231 | np = of_parse_phandle(mac->of_node, "phy-handle", 0); |
| 232 | if (!np && of_phy_is_fixed_link(mac->of_node)) | ||
| 233 | if (!of_phy_register_fixed_link(mac->of_node)) | ||
| 234 | np = of_node_get(mac->of_node); | ||
| 211 | if (!np) | 235 | if (!np) |
| 212 | return -ENODEV; | 236 | return -ENODEV; |
| 213 | 237 | ||
| 214 | switch (of_get_phy_mode(np)) { | 238 | switch (of_get_phy_mode(np)) { |
| 239 | case PHY_INTERFACE_MODE_RGMII_TXID: | ||
| 240 | case PHY_INTERFACE_MODE_RGMII_RXID: | ||
| 241 | case PHY_INTERFACE_MODE_RGMII_ID: | ||
| 215 | case PHY_INTERFACE_MODE_RGMII: | 242 | case PHY_INTERFACE_MODE_RGMII: |
| 216 | ge_mode = 0; | 243 | ge_mode = 0; |
| 217 | break; | 244 | break; |
| @@ -236,7 +263,8 @@ static int mtk_phy_connect(struct mtk_mac *mac) | |||
| 236 | mac->phy_dev->autoneg = AUTONEG_ENABLE; | 263 | mac->phy_dev->autoneg = AUTONEG_ENABLE; |
| 237 | mac->phy_dev->speed = 0; | 264 | mac->phy_dev->speed = 0; |
| 238 | mac->phy_dev->duplex = 0; | 265 | mac->phy_dev->duplex = 0; |
| 239 | mac->phy_dev->supported &= PHY_BASIC_FEATURES; | 266 | mac->phy_dev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause | |
| 267 | SUPPORTED_Asym_Pause; | ||
| 240 | mac->phy_dev->advertising = mac->phy_dev->supported | | 268 | mac->phy_dev->advertising = mac->phy_dev->supported | |
| 241 | ADVERTISED_Autoneg; | 269 | ADVERTISED_Autoneg; |
| 242 | phy_start_aneg(mac->phy_dev); | 270 | phy_start_aneg(mac->phy_dev); |
| @@ -280,7 +308,7 @@ static int mtk_mdio_init(struct mtk_eth *eth) | |||
| 280 | return 0; | 308 | return 0; |
| 281 | 309 | ||
| 282 | err_free_bus: | 310 | err_free_bus: |
| 283 | kfree(eth->mii_bus); | 311 | mdiobus_free(eth->mii_bus); |
| 284 | 312 | ||
| 285 | err_put_node: | 313 | err_put_node: |
| 286 | of_node_put(mii_np); | 314 | of_node_put(mii_np); |
| @@ -295,7 +323,7 @@ static void mtk_mdio_cleanup(struct mtk_eth *eth) | |||
| 295 | 323 | ||
| 296 | mdiobus_unregister(eth->mii_bus); | 324 | mdiobus_unregister(eth->mii_bus); |
| 297 | of_node_put(eth->mii_bus->dev.of_node); | 325 | of_node_put(eth->mii_bus->dev.of_node); |
| 298 | kfree(eth->mii_bus); | 326 | mdiobus_free(eth->mii_bus); |
| 299 | } | 327 | } |
| 300 | 328 | ||
| 301 | static inline void mtk_irq_disable(struct mtk_eth *eth, u32 mask) | 329 | static inline void mtk_irq_disable(struct mtk_eth *eth, u32 mask) |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index c761194bb323..fc95affaf76b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c | |||
| @@ -362,7 +362,7 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev, | |||
| 362 | 362 | ||
| 363 | for (i = 0; i < NUM_MAIN_STATS; i++, bitmap_iterator_inc(&it)) | 363 | for (i = 0; i < NUM_MAIN_STATS; i++, bitmap_iterator_inc(&it)) |
| 364 | if (bitmap_iterator_test(&it)) | 364 | if (bitmap_iterator_test(&it)) |
| 365 | data[index++] = ((unsigned long *)&priv->stats)[i]; | 365 | data[index++] = ((unsigned long *)&dev->stats)[i]; |
| 366 | 366 | ||
| 367 | for (i = 0; i < NUM_PORT_STATS; i++, bitmap_iterator_inc(&it)) | 367 | for (i = 0; i < NUM_PORT_STATS; i++, bitmap_iterator_inc(&it)) |
| 368 | if (bitmap_iterator_test(&it)) | 368 | if (bitmap_iterator_test(&it)) |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 92e0624f4cf0..19ceced6736c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c | |||
| @@ -1296,15 +1296,16 @@ static void mlx4_en_tx_timeout(struct net_device *dev) | |||
| 1296 | } | 1296 | } |
| 1297 | 1297 | ||
| 1298 | 1298 | ||
| 1299 | static struct net_device_stats *mlx4_en_get_stats(struct net_device *dev) | 1299 | static struct rtnl_link_stats64 * |
| 1300 | mlx4_en_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) | ||
| 1300 | { | 1301 | { |
| 1301 | struct mlx4_en_priv *priv = netdev_priv(dev); | 1302 | struct mlx4_en_priv *priv = netdev_priv(dev); |
| 1302 | 1303 | ||
| 1303 | spin_lock_bh(&priv->stats_lock); | 1304 | spin_lock_bh(&priv->stats_lock); |
| 1304 | memcpy(&priv->ret_stats, &priv->stats, sizeof(priv->stats)); | 1305 | netdev_stats_to_stats64(stats, &dev->stats); |
| 1305 | spin_unlock_bh(&priv->stats_lock); | 1306 | spin_unlock_bh(&priv->stats_lock); |
| 1306 | 1307 | ||
| 1307 | return &priv->ret_stats; | 1308 | return stats; |
| 1308 | } | 1309 | } |
| 1309 | 1310 | ||
| 1310 | static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv) | 1311 | static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv) |
| @@ -1876,7 +1877,6 @@ static void mlx4_en_clear_stats(struct net_device *dev) | |||
| 1876 | if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1)) | 1877 | if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1)) |
| 1877 | en_dbg(HW, priv, "Failed dumping statistics\n"); | 1878 | en_dbg(HW, priv, "Failed dumping statistics\n"); |
| 1878 | 1879 | ||
| 1879 | memset(&priv->stats, 0, sizeof(priv->stats)); | ||
| 1880 | memset(&priv->pstats, 0, sizeof(priv->pstats)); | 1880 | memset(&priv->pstats, 0, sizeof(priv->pstats)); |
| 1881 | memset(&priv->pkstats, 0, sizeof(priv->pkstats)); | 1881 | memset(&priv->pkstats, 0, sizeof(priv->pkstats)); |
| 1882 | memset(&priv->port_stats, 0, sizeof(priv->port_stats)); | 1882 | memset(&priv->port_stats, 0, sizeof(priv->port_stats)); |
| @@ -1892,6 +1892,11 @@ static void mlx4_en_clear_stats(struct net_device *dev) | |||
| 1892 | priv->tx_ring[i]->bytes = 0; | 1892 | priv->tx_ring[i]->bytes = 0; |
| 1893 | priv->tx_ring[i]->packets = 0; | 1893 | priv->tx_ring[i]->packets = 0; |
| 1894 | priv->tx_ring[i]->tx_csum = 0; | 1894 | priv->tx_ring[i]->tx_csum = 0; |
| 1895 | priv->tx_ring[i]->tx_dropped = 0; | ||
| 1896 | priv->tx_ring[i]->queue_stopped = 0; | ||
| 1897 | priv->tx_ring[i]->wake_queue = 0; | ||
| 1898 | priv->tx_ring[i]->tso_packets = 0; | ||
| 1899 | priv->tx_ring[i]->xmit_more = 0; | ||
| 1895 | } | 1900 | } |
| 1896 | for (i = 0; i < priv->rx_ring_num; i++) { | 1901 | for (i = 0; i < priv->rx_ring_num; i++) { |
| 1897 | priv->rx_ring[i]->bytes = 0; | 1902 | priv->rx_ring[i]->bytes = 0; |
| @@ -2482,7 +2487,7 @@ static const struct net_device_ops mlx4_netdev_ops = { | |||
| 2482 | .ndo_stop = mlx4_en_close, | 2487 | .ndo_stop = mlx4_en_close, |
| 2483 | .ndo_start_xmit = mlx4_en_xmit, | 2488 | .ndo_start_xmit = mlx4_en_xmit, |
| 2484 | .ndo_select_queue = mlx4_en_select_queue, | 2489 | .ndo_select_queue = mlx4_en_select_queue, |
| 2485 | .ndo_get_stats = mlx4_en_get_stats, | 2490 | .ndo_get_stats64 = mlx4_en_get_stats64, |
| 2486 | .ndo_set_rx_mode = mlx4_en_set_rx_mode, | 2491 | .ndo_set_rx_mode = mlx4_en_set_rx_mode, |
| 2487 | .ndo_set_mac_address = mlx4_en_set_mac, | 2492 | .ndo_set_mac_address = mlx4_en_set_mac, |
| 2488 | .ndo_validate_addr = eth_validate_addr, | 2493 | .ndo_validate_addr = eth_validate_addr, |
| @@ -2514,7 +2519,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = { | |||
| 2514 | .ndo_stop = mlx4_en_close, | 2519 | .ndo_stop = mlx4_en_close, |
| 2515 | .ndo_start_xmit = mlx4_en_xmit, | 2520 | .ndo_start_xmit = mlx4_en_xmit, |
| 2516 | .ndo_select_queue = mlx4_en_select_queue, | 2521 | .ndo_select_queue = mlx4_en_select_queue, |
| 2517 | .ndo_get_stats = mlx4_en_get_stats, | 2522 | .ndo_get_stats64 = mlx4_en_get_stats64, |
| 2518 | .ndo_set_rx_mode = mlx4_en_set_rx_mode, | 2523 | .ndo_set_rx_mode = mlx4_en_set_rx_mode, |
| 2519 | .ndo_set_mac_address = mlx4_en_set_mac, | 2524 | .ndo_set_mac_address = mlx4_en_set_mac, |
| 2520 | .ndo_validate_addr = eth_validate_addr, | 2525 | .ndo_validate_addr = eth_validate_addr, |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c index 20b6c2e678b8..5aa8b751f417 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_port.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c | |||
| @@ -152,8 +152,9 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) | |||
| 152 | struct mlx4_counter tmp_counter_stats; | 152 | struct mlx4_counter tmp_counter_stats; |
| 153 | struct mlx4_en_stat_out_mbox *mlx4_en_stats; | 153 | struct mlx4_en_stat_out_mbox *mlx4_en_stats; |
| 154 | struct mlx4_en_stat_out_flow_control_mbox *flowstats; | 154 | struct mlx4_en_stat_out_flow_control_mbox *flowstats; |
| 155 | struct mlx4_en_priv *priv = netdev_priv(mdev->pndev[port]); | 155 | struct net_device *dev = mdev->pndev[port]; |
| 156 | struct net_device_stats *stats = &priv->stats; | 156 | struct mlx4_en_priv *priv = netdev_priv(dev); |
| 157 | struct net_device_stats *stats = &dev->stats; | ||
| 157 | struct mlx4_cmd_mailbox *mailbox; | 158 | struct mlx4_cmd_mailbox *mailbox; |
| 158 | u64 in_mod = reset << 8 | port; | 159 | u64 in_mod = reset << 8 | port; |
| 159 | int err; | 160 | int err; |
| @@ -188,6 +189,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) | |||
| 188 | } | 189 | } |
| 189 | stats->tx_packets = 0; | 190 | stats->tx_packets = 0; |
| 190 | stats->tx_bytes = 0; | 191 | stats->tx_bytes = 0; |
| 192 | stats->tx_dropped = 0; | ||
| 191 | priv->port_stats.tx_chksum_offload = 0; | 193 | priv->port_stats.tx_chksum_offload = 0; |
| 192 | priv->port_stats.queue_stopped = 0; | 194 | priv->port_stats.queue_stopped = 0; |
| 193 | priv->port_stats.wake_queue = 0; | 195 | priv->port_stats.wake_queue = 0; |
| @@ -199,6 +201,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) | |||
| 199 | 201 | ||
| 200 | stats->tx_packets += ring->packets; | 202 | stats->tx_packets += ring->packets; |
| 201 | stats->tx_bytes += ring->bytes; | 203 | stats->tx_bytes += ring->bytes; |
| 204 | stats->tx_dropped += ring->tx_dropped; | ||
| 202 | priv->port_stats.tx_chksum_offload += ring->tx_csum; | 205 | priv->port_stats.tx_chksum_offload += ring->tx_csum; |
| 203 | priv->port_stats.queue_stopped += ring->queue_stopped; | 206 | priv->port_stats.queue_stopped += ring->queue_stopped; |
| 204 | priv->port_stats.wake_queue += ring->wake_queue; | 207 | priv->port_stats.wake_queue += ring->wake_queue; |
| @@ -237,21 +240,12 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) | |||
| 237 | stats->multicast = en_stats_adder(&mlx4_en_stats->MCAST_prio_0, | 240 | stats->multicast = en_stats_adder(&mlx4_en_stats->MCAST_prio_0, |
| 238 | &mlx4_en_stats->MCAST_prio_1, | 241 | &mlx4_en_stats->MCAST_prio_1, |
| 239 | NUM_PRIORITIES); | 242 | NUM_PRIORITIES); |
| 240 | stats->collisions = 0; | ||
| 241 | stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP) + | 243 | stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP) + |
| 242 | sw_rx_dropped; | 244 | sw_rx_dropped; |
| 243 | stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength); | 245 | stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength); |
| 244 | stats->rx_over_errors = 0; | ||
| 245 | stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC); | 246 | stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC); |
| 246 | stats->rx_frame_errors = 0; | ||
| 247 | stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw); | 247 | stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw); |
| 248 | stats->rx_missed_errors = 0; | 248 | stats->tx_dropped += be32_to_cpu(mlx4_en_stats->TDROP); |
| 249 | stats->tx_aborted_errors = 0; | ||
| 250 | stats->tx_carrier_errors = 0; | ||
| 251 | stats->tx_fifo_errors = 0; | ||
| 252 | stats->tx_heartbeat_errors = 0; | ||
| 253 | stats->tx_window_errors = 0; | ||
| 254 | stats->tx_dropped = be32_to_cpu(mlx4_en_stats->TDROP); | ||
| 255 | 249 | ||
| 256 | /* RX stats */ | 250 | /* RX stats */ |
| 257 | priv->pkstats.rx_multicast_packets = stats->multicast; | 251 | priv->pkstats.rx_multicast_packets = stats->multicast; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index f6e61570cb2c..76aa4d27183c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c | |||
| @@ -726,12 +726,12 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 726 | bool inline_ok; | 726 | bool inline_ok; |
| 727 | u32 ring_cons; | 727 | u32 ring_cons; |
| 728 | 728 | ||
| 729 | if (!priv->port_up) | ||
| 730 | goto tx_drop; | ||
| 731 | |||
| 732 | tx_ind = skb_get_queue_mapping(skb); | 729 | tx_ind = skb_get_queue_mapping(skb); |
| 733 | ring = priv->tx_ring[tx_ind]; | 730 | ring = priv->tx_ring[tx_ind]; |
| 734 | 731 | ||
| 732 | if (!priv->port_up) | ||
| 733 | goto tx_drop; | ||
| 734 | |||
| 735 | /* fetch ring->cons far ahead before needing it to avoid stall */ | 735 | /* fetch ring->cons far ahead before needing it to avoid stall */ |
| 736 | ring_cons = ACCESS_ONCE(ring->cons); | 736 | ring_cons = ACCESS_ONCE(ring->cons); |
| 737 | 737 | ||
| @@ -1030,7 +1030,7 @@ tx_drop_unmap: | |||
| 1030 | 1030 | ||
| 1031 | tx_drop: | 1031 | tx_drop: |
| 1032 | dev_kfree_skb_any(skb); | 1032 | dev_kfree_skb_any(skb); |
| 1033 | priv->stats.tx_dropped++; | 1033 | ring->tx_dropped++; |
| 1034 | return NETDEV_TX_OK; | 1034 | return NETDEV_TX_OK; |
| 1035 | } | 1035 | } |
| 1036 | 1036 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index cc84e09f324a..467d47ed2c39 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | |||
| @@ -270,6 +270,7 @@ struct mlx4_en_tx_ring { | |||
| 270 | unsigned long tx_csum; | 270 | unsigned long tx_csum; |
| 271 | unsigned long tso_packets; | 271 | unsigned long tso_packets; |
| 272 | unsigned long xmit_more; | 272 | unsigned long xmit_more; |
| 273 | unsigned int tx_dropped; | ||
| 273 | struct mlx4_bf bf; | 274 | struct mlx4_bf bf; |
| 274 | unsigned long queue_stopped; | 275 | unsigned long queue_stopped; |
| 275 | 276 | ||
| @@ -482,8 +483,6 @@ struct mlx4_en_priv { | |||
| 482 | struct mlx4_en_port_profile *prof; | 483 | struct mlx4_en_port_profile *prof; |
| 483 | struct net_device *dev; | 484 | struct net_device *dev; |
| 484 | unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; | 485 | unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; |
| 485 | struct net_device_stats stats; | ||
| 486 | struct net_device_stats ret_stats; | ||
| 487 | struct mlx4_en_port_state port_state; | 486 | struct mlx4_en_port_state port_state; |
| 488 | spinlock_t stats_lock; | 487 | spinlock_t stats_lock; |
| 489 | struct ethtool_flow_id ethtool_rules[MAX_NUM_OF_FS_RULES]; | 488 | struct ethtool_flow_id ethtool_rules[MAX_NUM_OF_FS_RULES]; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index fd4392999eee..f5c8d5db25a8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c | |||
| @@ -3192,10 +3192,7 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv) | |||
| 3192 | flush_workqueue(priv->wq); | 3192 | flush_workqueue(priv->wq); |
| 3193 | if (test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) { | 3193 | if (test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) { |
| 3194 | netif_device_detach(netdev); | 3194 | netif_device_detach(netdev); |
| 3195 | mutex_lock(&priv->state_lock); | 3195 | mlx5e_close(netdev); |
| 3196 | if (test_bit(MLX5E_STATE_OPENED, &priv->state)) | ||
| 3197 | mlx5e_close_locked(netdev); | ||
| 3198 | mutex_unlock(&priv->state_lock); | ||
| 3199 | } else { | 3196 | } else { |
| 3200 | unregister_netdev(netdev); | 3197 | unregister_netdev(netdev); |
| 3201 | } | 3198 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 229ab16fb8d3..b000ddc29553 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | |||
| @@ -317,7 +317,8 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) | |||
| 317 | while ((sq->pc & wq->sz_m1) > sq->edge) | 317 | while ((sq->pc & wq->sz_m1) > sq->edge) |
| 318 | mlx5e_send_nop(sq, false); | 318 | mlx5e_send_nop(sq, false); |
| 319 | 319 | ||
| 320 | sq->bf_budget = bf ? sq->bf_budget - 1 : 0; | 320 | if (bf) |
| 321 | sq->bf_budget--; | ||
| 321 | 322 | ||
| 322 | sq->stats.packets++; | 323 | sq->stats.packets++; |
| 323 | sq->stats.bytes += num_bytes; | 324 | sq->stats.bytes += num_bytes; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index b84a6918a700..aebbd6ccb9fe 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | |||
| @@ -383,7 +383,7 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule, | |||
| 383 | match_v, | 383 | match_v, |
| 384 | MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, | 384 | MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, |
| 385 | 0, &dest); | 385 | 0, &dest); |
| 386 | if (IS_ERR_OR_NULL(flow_rule)) { | 386 | if (IS_ERR(flow_rule)) { |
| 387 | pr_warn( | 387 | pr_warn( |
| 388 | "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n", | 388 | "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n", |
| 389 | dmac_v, dmac_c, vport, PTR_ERR(flow_rule)); | 389 | dmac_v, dmac_c, vport, PTR_ERR(flow_rule)); |
| @@ -457,7 +457,7 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports) | |||
| 457 | 457 | ||
| 458 | table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size)); | 458 | table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size)); |
| 459 | fdb = mlx5_create_flow_table(root_ns, 0, table_size, 0); | 459 | fdb = mlx5_create_flow_table(root_ns, 0, table_size, 0); |
| 460 | if (IS_ERR_OR_NULL(fdb)) { | 460 | if (IS_ERR(fdb)) { |
| 461 | err = PTR_ERR(fdb); | 461 | err = PTR_ERR(fdb); |
| 462 | esw_warn(dev, "Failed to create FDB Table err %d\n", err); | 462 | esw_warn(dev, "Failed to create FDB Table err %d\n", err); |
| 463 | goto out; | 463 | goto out; |
| @@ -474,7 +474,7 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports) | |||
| 474 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 3); | 474 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 3); |
| 475 | eth_broadcast_addr(dmac); | 475 | eth_broadcast_addr(dmac); |
| 476 | g = mlx5_create_flow_group(fdb, flow_group_in); | 476 | g = mlx5_create_flow_group(fdb, flow_group_in); |
| 477 | if (IS_ERR_OR_NULL(g)) { | 477 | if (IS_ERR(g)) { |
| 478 | err = PTR_ERR(g); | 478 | err = PTR_ERR(g); |
| 479 | esw_warn(dev, "Failed to create flow group err(%d)\n", err); | 479 | esw_warn(dev, "Failed to create flow group err(%d)\n", err); |
| 480 | goto out; | 480 | goto out; |
| @@ -489,7 +489,7 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports) | |||
| 489 | eth_zero_addr(dmac); | 489 | eth_zero_addr(dmac); |
| 490 | dmac[0] = 0x01; | 490 | dmac[0] = 0x01; |
| 491 | g = mlx5_create_flow_group(fdb, flow_group_in); | 491 | g = mlx5_create_flow_group(fdb, flow_group_in); |
| 492 | if (IS_ERR_OR_NULL(g)) { | 492 | if (IS_ERR(g)) { |
| 493 | err = PTR_ERR(g); | 493 | err = PTR_ERR(g); |
| 494 | esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err); | 494 | esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err); |
| 495 | goto out; | 495 | goto out; |
| @@ -506,7 +506,7 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports) | |||
| 506 | MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 1); | 506 | MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 1); |
| 507 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1); | 507 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1); |
| 508 | g = mlx5_create_flow_group(fdb, flow_group_in); | 508 | g = mlx5_create_flow_group(fdb, flow_group_in); |
| 509 | if (IS_ERR_OR_NULL(g)) { | 509 | if (IS_ERR(g)) { |
| 510 | err = PTR_ERR(g); | 510 | err = PTR_ERR(g); |
| 511 | esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err); | 511 | esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err); |
| 512 | goto out; | 512 | goto out; |
| @@ -529,7 +529,7 @@ out: | |||
| 529 | } | 529 | } |
| 530 | } | 530 | } |
| 531 | 531 | ||
| 532 | kfree(flow_group_in); | 532 | kvfree(flow_group_in); |
| 533 | return err; | 533 | return err; |
| 534 | } | 534 | } |
| 535 | 535 | ||
| @@ -651,6 +651,7 @@ static void update_allmulti_vports(struct mlx5_eswitch *esw, | |||
| 651 | esw_fdb_set_vport_rule(esw, | 651 | esw_fdb_set_vport_rule(esw, |
| 652 | mac, | 652 | mac, |
| 653 | vport_idx); | 653 | vport_idx); |
| 654 | iter_vaddr->mc_promisc = true; | ||
| 654 | break; | 655 | break; |
| 655 | case MLX5_ACTION_DEL: | 656 | case MLX5_ACTION_DEL: |
| 656 | if (!iter_vaddr) | 657 | if (!iter_vaddr) |
| @@ -1060,7 +1061,7 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw, | |||
| 1060 | return; | 1061 | return; |
| 1061 | 1062 | ||
| 1062 | acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport); | 1063 | acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport); |
| 1063 | if (IS_ERR_OR_NULL(acl)) { | 1064 | if (IS_ERR(acl)) { |
| 1064 | err = PTR_ERR(acl); | 1065 | err = PTR_ERR(acl); |
| 1065 | esw_warn(dev, "Failed to create E-Switch vport[%d] egress flow Table, err(%d)\n", | 1066 | esw_warn(dev, "Failed to create E-Switch vport[%d] egress flow Table, err(%d)\n", |
| 1066 | vport->vport, err); | 1067 | vport->vport, err); |
| @@ -1075,7 +1076,7 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw, | |||
| 1075 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0); | 1076 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0); |
| 1076 | 1077 | ||
| 1077 | vlan_grp = mlx5_create_flow_group(acl, flow_group_in); | 1078 | vlan_grp = mlx5_create_flow_group(acl, flow_group_in); |
| 1078 | if (IS_ERR_OR_NULL(vlan_grp)) { | 1079 | if (IS_ERR(vlan_grp)) { |
| 1079 | err = PTR_ERR(vlan_grp); | 1080 | err = PTR_ERR(vlan_grp); |
| 1080 | esw_warn(dev, "Failed to create E-Switch vport[%d] egress allowed vlans flow group, err(%d)\n", | 1081 | esw_warn(dev, "Failed to create E-Switch vport[%d] egress allowed vlans flow group, err(%d)\n", |
| 1081 | vport->vport, err); | 1082 | vport->vport, err); |
| @@ -1086,7 +1087,7 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw, | |||
| 1086 | MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1); | 1087 | MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1); |
| 1087 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1); | 1088 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1); |
| 1088 | drop_grp = mlx5_create_flow_group(acl, flow_group_in); | 1089 | drop_grp = mlx5_create_flow_group(acl, flow_group_in); |
| 1089 | if (IS_ERR_OR_NULL(drop_grp)) { | 1090 | if (IS_ERR(drop_grp)) { |
| 1090 | err = PTR_ERR(drop_grp); | 1091 | err = PTR_ERR(drop_grp); |
| 1091 | esw_warn(dev, "Failed to create E-Switch vport[%d] egress drop flow group, err(%d)\n", | 1092 | esw_warn(dev, "Failed to create E-Switch vport[%d] egress drop flow group, err(%d)\n", |
| 1092 | vport->vport, err); | 1093 | vport->vport, err); |
| @@ -1097,7 +1098,7 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw, | |||
| 1097 | vport->egress.drop_grp = drop_grp; | 1098 | vport->egress.drop_grp = drop_grp; |
| 1098 | vport->egress.allowed_vlans_grp = vlan_grp; | 1099 | vport->egress.allowed_vlans_grp = vlan_grp; |
| 1099 | out: | 1100 | out: |
| 1100 | kfree(flow_group_in); | 1101 | kvfree(flow_group_in); |
| 1101 | if (err && !IS_ERR_OR_NULL(vlan_grp)) | 1102 | if (err && !IS_ERR_OR_NULL(vlan_grp)) |
| 1102 | mlx5_destroy_flow_group(vlan_grp); | 1103 | mlx5_destroy_flow_group(vlan_grp); |
| 1103 | if (err && !IS_ERR_OR_NULL(acl)) | 1104 | if (err && !IS_ERR_OR_NULL(acl)) |
| @@ -1174,7 +1175,7 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw, | |||
| 1174 | return; | 1175 | return; |
| 1175 | 1176 | ||
| 1176 | acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport); | 1177 | acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport); |
| 1177 | if (IS_ERR_OR_NULL(acl)) { | 1178 | if (IS_ERR(acl)) { |
| 1178 | err = PTR_ERR(acl); | 1179 | err = PTR_ERR(acl); |
| 1179 | esw_warn(dev, "Failed to create E-Switch vport[%d] ingress flow Table, err(%d)\n", | 1180 | esw_warn(dev, "Failed to create E-Switch vport[%d] ingress flow Table, err(%d)\n", |
| 1180 | vport->vport, err); | 1181 | vport->vport, err); |
| @@ -1192,7 +1193,7 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw, | |||
| 1192 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0); | 1193 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0); |
| 1193 | 1194 | ||
| 1194 | g = mlx5_create_flow_group(acl, flow_group_in); | 1195 | g = mlx5_create_flow_group(acl, flow_group_in); |
| 1195 | if (IS_ERR_OR_NULL(g)) { | 1196 | if (IS_ERR(g)) { |
| 1196 | err = PTR_ERR(g); | 1197 | err = PTR_ERR(g); |
| 1197 | esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged spoofchk flow group, err(%d)\n", | 1198 | esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged spoofchk flow group, err(%d)\n", |
| 1198 | vport->vport, err); | 1199 | vport->vport, err); |
| @@ -1207,7 +1208,7 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw, | |||
| 1207 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1); | 1208 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1); |
| 1208 | 1209 | ||
| 1209 | g = mlx5_create_flow_group(acl, flow_group_in); | 1210 | g = mlx5_create_flow_group(acl, flow_group_in); |
| 1210 | if (IS_ERR_OR_NULL(g)) { | 1211 | if (IS_ERR(g)) { |
| 1211 | err = PTR_ERR(g); | 1212 | err = PTR_ERR(g); |
| 1212 | esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged flow group, err(%d)\n", | 1213 | esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged flow group, err(%d)\n", |
| 1213 | vport->vport, err); | 1214 | vport->vport, err); |
| @@ -1223,7 +1224,7 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw, | |||
| 1223 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2); | 1224 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2); |
| 1224 | 1225 | ||
| 1225 | g = mlx5_create_flow_group(acl, flow_group_in); | 1226 | g = mlx5_create_flow_group(acl, flow_group_in); |
| 1226 | if (IS_ERR_OR_NULL(g)) { | 1227 | if (IS_ERR(g)) { |
| 1227 | err = PTR_ERR(g); | 1228 | err = PTR_ERR(g); |
| 1228 | esw_warn(dev, "Failed to create E-Switch vport[%d] ingress spoofchk flow group, err(%d)\n", | 1229 | esw_warn(dev, "Failed to create E-Switch vport[%d] ingress spoofchk flow group, err(%d)\n", |
| 1229 | vport->vport, err); | 1230 | vport->vport, err); |
| @@ -1236,7 +1237,7 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw, | |||
| 1236 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3); | 1237 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3); |
| 1237 | 1238 | ||
| 1238 | g = mlx5_create_flow_group(acl, flow_group_in); | 1239 | g = mlx5_create_flow_group(acl, flow_group_in); |
| 1239 | if (IS_ERR_OR_NULL(g)) { | 1240 | if (IS_ERR(g)) { |
| 1240 | err = PTR_ERR(g); | 1241 | err = PTR_ERR(g); |
| 1241 | esw_warn(dev, "Failed to create E-Switch vport[%d] ingress drop flow group, err(%d)\n", | 1242 | esw_warn(dev, "Failed to create E-Switch vport[%d] ingress drop flow group, err(%d)\n", |
| 1242 | vport->vport, err); | 1243 | vport->vport, err); |
| @@ -1259,7 +1260,7 @@ out: | |||
| 1259 | mlx5_destroy_flow_table(vport->ingress.acl); | 1260 | mlx5_destroy_flow_table(vport->ingress.acl); |
| 1260 | } | 1261 | } |
| 1261 | 1262 | ||
| 1262 | kfree(flow_group_in); | 1263 | kvfree(flow_group_in); |
| 1263 | } | 1264 | } |
| 1264 | 1265 | ||
| 1265 | static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw, | 1266 | static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw, |
| @@ -1363,7 +1364,7 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, | |||
| 1363 | match_v, | 1364 | match_v, |
| 1364 | MLX5_FLOW_CONTEXT_ACTION_ALLOW, | 1365 | MLX5_FLOW_CONTEXT_ACTION_ALLOW, |
| 1365 | 0, NULL); | 1366 | 0, NULL); |
| 1366 | if (IS_ERR_OR_NULL(vport->ingress.allow_rule)) { | 1367 | if (IS_ERR(vport->ingress.allow_rule)) { |
| 1367 | err = PTR_ERR(vport->ingress.allow_rule); | 1368 | err = PTR_ERR(vport->ingress.allow_rule); |
| 1368 | pr_warn("vport[%d] configure ingress allow rule, err(%d)\n", | 1369 | pr_warn("vport[%d] configure ingress allow rule, err(%d)\n", |
| 1369 | vport->vport, err); | 1370 | vport->vport, err); |
| @@ -1380,7 +1381,7 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, | |||
| 1380 | match_v, | 1381 | match_v, |
| 1381 | MLX5_FLOW_CONTEXT_ACTION_DROP, | 1382 | MLX5_FLOW_CONTEXT_ACTION_DROP, |
| 1382 | 0, NULL); | 1383 | 0, NULL); |
| 1383 | if (IS_ERR_OR_NULL(vport->ingress.drop_rule)) { | 1384 | if (IS_ERR(vport->ingress.drop_rule)) { |
| 1384 | err = PTR_ERR(vport->ingress.drop_rule); | 1385 | err = PTR_ERR(vport->ingress.drop_rule); |
| 1385 | pr_warn("vport[%d] configure ingress drop rule, err(%d)\n", | 1386 | pr_warn("vport[%d] configure ingress drop rule, err(%d)\n", |
| 1386 | vport->vport, err); | 1387 | vport->vport, err); |
| @@ -1439,7 +1440,7 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw, | |||
| 1439 | match_v, | 1440 | match_v, |
| 1440 | MLX5_FLOW_CONTEXT_ACTION_ALLOW, | 1441 | MLX5_FLOW_CONTEXT_ACTION_ALLOW, |
| 1441 | 0, NULL); | 1442 | 0, NULL); |
| 1442 | if (IS_ERR_OR_NULL(vport->egress.allowed_vlan)) { | 1443 | if (IS_ERR(vport->egress.allowed_vlan)) { |
| 1443 | err = PTR_ERR(vport->egress.allowed_vlan); | 1444 | err = PTR_ERR(vport->egress.allowed_vlan); |
| 1444 | pr_warn("vport[%d] configure egress allowed vlan rule failed, err(%d)\n", | 1445 | pr_warn("vport[%d] configure egress allowed vlan rule failed, err(%d)\n", |
| 1445 | vport->vport, err); | 1446 | vport->vport, err); |
| @@ -1457,7 +1458,7 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw, | |||
| 1457 | match_v, | 1458 | match_v, |
| 1458 | MLX5_FLOW_CONTEXT_ACTION_DROP, | 1459 | MLX5_FLOW_CONTEXT_ACTION_DROP, |
| 1459 | 0, NULL); | 1460 | 0, NULL); |
| 1460 | if (IS_ERR_OR_NULL(vport->egress.drop_rule)) { | 1461 | if (IS_ERR(vport->egress.drop_rule)) { |
| 1461 | err = PTR_ERR(vport->egress.drop_rule); | 1462 | err = PTR_ERR(vport->egress.drop_rule); |
| 1462 | pr_warn("vport[%d] configure egress drop rule failed, err(%d)\n", | 1463 | pr_warn("vport[%d] configure egress drop rule failed, err(%d)\n", |
| 1463 | vport->vport, err); | 1464 | vport->vport, err); |
| @@ -1491,14 +1492,11 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num, | |||
| 1491 | 1492 | ||
| 1492 | /* Sync with current vport context */ | 1493 | /* Sync with current vport context */ |
| 1493 | vport->enabled_events = enable_events; | 1494 | vport->enabled_events = enable_events; |
| 1494 | esw_vport_change_handle_locked(vport); | ||
| 1495 | |||
| 1496 | vport->enabled = true; | 1495 | vport->enabled = true; |
| 1497 | 1496 | ||
| 1498 | /* only PF is trusted by default */ | 1497 | /* only PF is trusted by default */ |
| 1499 | vport->trusted = (vport_num) ? false : true; | 1498 | vport->trusted = (vport_num) ? false : true; |
| 1500 | 1499 | esw_vport_change_handle_locked(vport); | |
| 1501 | arm_vport_context_events_cmd(esw->dev, vport_num, enable_events); | ||
| 1502 | 1500 | ||
| 1503 | esw->enabled_vports++; | 1501 | esw->enabled_vports++; |
| 1504 | esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num); | 1502 | esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num); |
| @@ -1728,11 +1726,24 @@ void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe) | |||
| 1728 | (esw && MLX5_CAP_GEN(esw->dev, vport_group_manager) && mlx5_core_is_pf(esw->dev)) | 1726 | (esw && MLX5_CAP_GEN(esw->dev, vport_group_manager) && mlx5_core_is_pf(esw->dev)) |
| 1729 | #define LEGAL_VPORT(esw, vport) (vport >= 0 && vport < esw->total_vports) | 1727 | #define LEGAL_VPORT(esw, vport) (vport >= 0 && vport < esw->total_vports) |
| 1730 | 1728 | ||
| 1729 | static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN]) | ||
| 1730 | { | ||
| 1731 | ((u8 *)node_guid)[7] = mac[0]; | ||
| 1732 | ((u8 *)node_guid)[6] = mac[1]; | ||
| 1733 | ((u8 *)node_guid)[5] = mac[2]; | ||
| 1734 | ((u8 *)node_guid)[4] = 0xff; | ||
| 1735 | ((u8 *)node_guid)[3] = 0xfe; | ||
| 1736 | ((u8 *)node_guid)[2] = mac[3]; | ||
| 1737 | ((u8 *)node_guid)[1] = mac[4]; | ||
| 1738 | ((u8 *)node_guid)[0] = mac[5]; | ||
| 1739 | } | ||
| 1740 | |||
| 1731 | int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, | 1741 | int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, |
| 1732 | int vport, u8 mac[ETH_ALEN]) | 1742 | int vport, u8 mac[ETH_ALEN]) |
| 1733 | { | 1743 | { |
| 1734 | int err = 0; | ||
| 1735 | struct mlx5_vport *evport; | 1744 | struct mlx5_vport *evport; |
| 1745 | u64 node_guid; | ||
| 1746 | int err = 0; | ||
| 1736 | 1747 | ||
| 1737 | if (!ESW_ALLOWED(esw)) | 1748 | if (!ESW_ALLOWED(esw)) |
| 1738 | return -EPERM; | 1749 | return -EPERM; |
| @@ -1756,11 +1767,17 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, | |||
| 1756 | return err; | 1767 | return err; |
| 1757 | } | 1768 | } |
| 1758 | 1769 | ||
| 1770 | node_guid_gen_from_mac(&node_guid, mac); | ||
| 1771 | err = mlx5_modify_nic_vport_node_guid(esw->dev, vport, node_guid); | ||
| 1772 | if (err) | ||
| 1773 | mlx5_core_warn(esw->dev, | ||
| 1774 | "Failed to set vport %d node guid, err = %d. RDMA_CM will not function properly for this VF.\n", | ||
| 1775 | vport, err); | ||
| 1776 | |||
| 1759 | mutex_lock(&esw->state_lock); | 1777 | mutex_lock(&esw->state_lock); |
| 1760 | if (evport->enabled) | 1778 | if (evport->enabled) |
| 1761 | err = esw_vport_ingress_config(esw, evport); | 1779 | err = esw_vport_ingress_config(esw, evport); |
| 1762 | mutex_unlock(&esw->state_lock); | 1780 | mutex_unlock(&esw->state_lock); |
| 1763 | |||
| 1764 | return err; | 1781 | return err; |
| 1765 | } | 1782 | } |
| 1766 | 1783 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 8b5f0b2c0d5c..e912a3d2505e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | |||
| @@ -1292,8 +1292,8 @@ static int update_root_ft_destroy(struct mlx5_flow_table *ft) | |||
| 1292 | ft->id); | 1292 | ft->id); |
| 1293 | return err; | 1293 | return err; |
| 1294 | } | 1294 | } |
| 1295 | root->root_ft = new_root_ft; | ||
| 1296 | } | 1295 | } |
| 1296 | root->root_ft = new_root_ft; | ||
| 1297 | return 0; | 1297 | return 0; |
| 1298 | } | 1298 | } |
| 1299 | 1299 | ||
| @@ -1767,6 +1767,9 @@ static void cleanup_root_ns(struct mlx5_core_dev *dev) | |||
| 1767 | 1767 | ||
| 1768 | void mlx5_cleanup_fs(struct mlx5_core_dev *dev) | 1768 | void mlx5_cleanup_fs(struct mlx5_core_dev *dev) |
| 1769 | { | 1769 | { |
| 1770 | if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) | ||
| 1771 | return; | ||
| 1772 | |||
| 1770 | cleanup_root_ns(dev); | 1773 | cleanup_root_ns(dev); |
| 1771 | cleanup_single_prio_root_ns(dev, dev->priv.fdb_root_ns); | 1774 | cleanup_single_prio_root_ns(dev, dev->priv.fdb_root_ns); |
| 1772 | cleanup_single_prio_root_ns(dev, dev->priv.esw_egress_root_ns); | 1775 | cleanup_single_prio_root_ns(dev, dev->priv.esw_egress_root_ns); |
| @@ -1828,29 +1831,36 @@ int mlx5_init_fs(struct mlx5_core_dev *dev) | |||
| 1828 | { | 1831 | { |
| 1829 | int err = 0; | 1832 | int err = 0; |
| 1830 | 1833 | ||
| 1834 | if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) | ||
| 1835 | return 0; | ||
| 1836 | |||
| 1831 | err = mlx5_init_fc_stats(dev); | 1837 | err = mlx5_init_fc_stats(dev); |
| 1832 | if (err) | 1838 | if (err) |
| 1833 | return err; | 1839 | return err; |
| 1834 | 1840 | ||
| 1835 | if (MLX5_CAP_GEN(dev, nic_flow_table)) { | 1841 | if (MLX5_CAP_GEN(dev, nic_flow_table) && |
| 1842 | MLX5_CAP_FLOWTABLE_NIC_RX(dev, ft_support)) { | ||
| 1836 | err = init_root_ns(dev); | 1843 | err = init_root_ns(dev); |
| 1837 | if (err) | 1844 | if (err) |
| 1838 | goto err; | 1845 | goto err; |
| 1839 | } | 1846 | } |
| 1847 | |||
| 1840 | if (MLX5_CAP_GEN(dev, eswitch_flow_table)) { | 1848 | if (MLX5_CAP_GEN(dev, eswitch_flow_table)) { |
| 1841 | err = init_fdb_root_ns(dev); | 1849 | if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) { |
| 1842 | if (err) | 1850 | err = init_fdb_root_ns(dev); |
| 1843 | goto err; | 1851 | if (err) |
| 1844 | } | 1852 | goto err; |
| 1845 | if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) { | 1853 | } |
| 1846 | err = init_egress_acl_root_ns(dev); | 1854 | if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) { |
| 1847 | if (err) | 1855 | err = init_egress_acl_root_ns(dev); |
| 1848 | goto err; | 1856 | if (err) |
| 1849 | } | 1857 | goto err; |
| 1850 | if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) { | 1858 | } |
| 1851 | err = init_ingress_acl_root_ns(dev); | 1859 | if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) { |
| 1852 | if (err) | 1860 | err = init_ingress_acl_root_ns(dev); |
| 1853 | goto err; | 1861 | if (err) |
| 1862 | goto err; | ||
| 1863 | } | ||
| 1854 | } | 1864 | } |
| 1855 | 1865 | ||
| 1856 | return 0; | 1866 | return 0; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c index b720a274220d..b82d65802d96 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c | |||
| @@ -418,7 +418,7 @@ int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn) | |||
| 418 | if (out.hdr.status) | 418 | if (out.hdr.status) |
| 419 | err = mlx5_cmd_status_to_err(&out.hdr); | 419 | err = mlx5_cmd_status_to_err(&out.hdr); |
| 420 | else | 420 | else |
| 421 | *xrcdn = be32_to_cpu(out.xrcdn); | 421 | *xrcdn = be32_to_cpu(out.xrcdn) & 0xffffff; |
| 422 | 422 | ||
| 423 | return err; | 423 | return err; |
| 424 | } | 424 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index b69dadcfb897..daf44cd4c566 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c | |||
| @@ -508,6 +508,44 @@ int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid) | |||
| 508 | } | 508 | } |
| 509 | EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid); | 509 | EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid); |
| 510 | 510 | ||
| 511 | int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev, | ||
| 512 | u32 vport, u64 node_guid) | ||
| 513 | { | ||
| 514 | int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in); | ||
| 515 | void *nic_vport_context; | ||
| 516 | u8 *guid; | ||
| 517 | void *in; | ||
| 518 | int err; | ||
| 519 | |||
| 520 | if (!vport) | ||
| 521 | return -EINVAL; | ||
| 522 | if (!MLX5_CAP_GEN(mdev, vport_group_manager)) | ||
| 523 | return -EACCES; | ||
| 524 | if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify)) | ||
| 525 | return -ENOTSUPP; | ||
| 526 | |||
| 527 | in = mlx5_vzalloc(inlen); | ||
| 528 | if (!in) | ||
| 529 | return -ENOMEM; | ||
| 530 | |||
| 531 | MLX5_SET(modify_nic_vport_context_in, in, | ||
| 532 | field_select.node_guid, 1); | ||
| 533 | MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport); | ||
| 534 | MLX5_SET(modify_nic_vport_context_in, in, other_vport, !!vport); | ||
| 535 | |||
| 536 | nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in, | ||
| 537 | in, nic_vport_context); | ||
| 538 | guid = MLX5_ADDR_OF(nic_vport_context, nic_vport_context, | ||
| 539 | node_guid); | ||
| 540 | MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid); | ||
| 541 | |||
| 542 | err = mlx5_modify_nic_vport_context(mdev, in, inlen); | ||
| 543 | |||
| 544 | kvfree(in); | ||
| 545 | |||
| 546 | return err; | ||
| 547 | } | ||
| 548 | |||
| 511 | int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev, | 549 | int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev, |
| 512 | u16 *qkey_viol_cntr) | 550 | u16 *qkey_viol_cntr) |
| 513 | { | 551 | { |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 4a7273771028..6f9e3ddff4a8 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c | |||
| @@ -247,15 +247,23 @@ static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) | |||
| 247 | return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); | 247 | return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); |
| 248 | } | 248 | } |
| 249 | 249 | ||
| 250 | static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid) | 250 | static int __mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, u8 local_port, |
| 251 | u8 swid) | ||
| 251 | { | 252 | { |
| 252 | struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; | ||
| 253 | char pspa_pl[MLXSW_REG_PSPA_LEN]; | 253 | char pspa_pl[MLXSW_REG_PSPA_LEN]; |
| 254 | 254 | ||
| 255 | mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port); | 255 | mlxsw_reg_pspa_pack(pspa_pl, swid, local_port); |
| 256 | return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); | 256 | return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); |
| 257 | } | 257 | } |
| 258 | 258 | ||
| 259 | static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid) | ||
| 260 | { | ||
| 261 | struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; | ||
| 262 | |||
| 263 | return __mlxsw_sp_port_swid_set(mlxsw_sp, mlxsw_sp_port->local_port, | ||
| 264 | swid); | ||
| 265 | } | ||
| 266 | |||
| 259 | static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, | 267 | static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, |
| 260 | bool enable) | 268 | bool enable) |
| 261 | { | 269 | { |
| @@ -305,9 +313,9 @@ mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) | |||
| 305 | return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); | 313 | return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); |
| 306 | } | 314 | } |
| 307 | 315 | ||
| 308 | static int __mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, | 316 | static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, |
| 309 | u8 local_port, u8 *p_module, | 317 | u8 local_port, u8 *p_module, |
| 310 | u8 *p_width, u8 *p_lane) | 318 | u8 *p_width, u8 *p_lane) |
| 311 | { | 319 | { |
| 312 | char pmlp_pl[MLXSW_REG_PMLP_LEN]; | 320 | char pmlp_pl[MLXSW_REG_PMLP_LEN]; |
| 313 | int err; | 321 | int err; |
| @@ -322,16 +330,6 @@ static int __mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, | |||
| 322 | return 0; | 330 | return 0; |
| 323 | } | 331 | } |
| 324 | 332 | ||
| 325 | static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, | ||
| 326 | u8 local_port, u8 *p_module, | ||
| 327 | u8 *p_width) | ||
| 328 | { | ||
| 329 | u8 lane; | ||
| 330 | |||
| 331 | return __mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, p_module, | ||
| 332 | p_width, &lane); | ||
| 333 | } | ||
| 334 | |||
| 335 | static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port, | 333 | static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port, |
| 336 | u8 module, u8 width, u8 lane) | 334 | u8 module, u8 width, u8 lane) |
| 337 | { | 335 | { |
| @@ -949,17 +947,11 @@ static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name, | |||
| 949 | size_t len) | 947 | size_t len) |
| 950 | { | 948 | { |
| 951 | struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); | 949 | struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); |
| 952 | u8 module, width, lane; | 950 | u8 module = mlxsw_sp_port->mapping.module; |
| 951 | u8 width = mlxsw_sp_port->mapping.width; | ||
| 952 | u8 lane = mlxsw_sp_port->mapping.lane; | ||
| 953 | int err; | 953 | int err; |
| 954 | 954 | ||
| 955 | err = __mlxsw_sp_port_module_info_get(mlxsw_sp_port->mlxsw_sp, | ||
| 956 | mlxsw_sp_port->local_port, | ||
| 957 | &module, &width, &lane); | ||
| 958 | if (err) { | ||
| 959 | netdev_err(dev, "Failed to retrieve module information\n"); | ||
| 960 | return err; | ||
| 961 | } | ||
| 962 | |||
| 963 | if (!mlxsw_sp_port->split) | 955 | if (!mlxsw_sp_port->split) |
| 964 | err = snprintf(name, len, "p%d", module + 1); | 956 | err = snprintf(name, len, "p%d", module + 1); |
| 965 | else | 957 | else |
| @@ -1681,8 +1673,8 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) | |||
| 1681 | return 0; | 1673 | return 0; |
| 1682 | } | 1674 | } |
| 1683 | 1675 | ||
| 1684 | static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, | 1676 | static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, |
| 1685 | bool split, u8 module, u8 width) | 1677 | bool split, u8 module, u8 width, u8 lane) |
| 1686 | { | 1678 | { |
| 1687 | struct mlxsw_sp_port *mlxsw_sp_port; | 1679 | struct mlxsw_sp_port *mlxsw_sp_port; |
| 1688 | struct net_device *dev; | 1680 | struct net_device *dev; |
| @@ -1697,6 +1689,9 @@ static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, | |||
| 1697 | mlxsw_sp_port->mlxsw_sp = mlxsw_sp; | 1689 | mlxsw_sp_port->mlxsw_sp = mlxsw_sp; |
| 1698 | mlxsw_sp_port->local_port = local_port; | 1690 | mlxsw_sp_port->local_port = local_port; |
| 1699 | mlxsw_sp_port->split = split; | 1691 | mlxsw_sp_port->split = split; |
| 1692 | mlxsw_sp_port->mapping.module = module; | ||
| 1693 | mlxsw_sp_port->mapping.width = width; | ||
| 1694 | mlxsw_sp_port->mapping.lane = lane; | ||
| 1700 | bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE); | 1695 | bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE); |
| 1701 | mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL); | 1696 | mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL); |
| 1702 | if (!mlxsw_sp_port->active_vlans) { | 1697 | if (!mlxsw_sp_port->active_vlans) { |
| @@ -1839,28 +1834,6 @@ err_port_active_vlans_alloc: | |||
| 1839 | return err; | 1834 | return err; |
| 1840 | } | 1835 | } |
| 1841 | 1836 | ||
| 1842 | static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, | ||
| 1843 | bool split, u8 module, u8 width, u8 lane) | ||
| 1844 | { | ||
| 1845 | int err; | ||
| 1846 | |||
| 1847 | err = mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width, | ||
| 1848 | lane); | ||
| 1849 | if (err) | ||
| 1850 | return err; | ||
| 1851 | |||
| 1852 | err = __mlxsw_sp_port_create(mlxsw_sp, local_port, split, module, | ||
| 1853 | width); | ||
| 1854 | if (err) | ||
| 1855 | goto err_port_create; | ||
| 1856 | |||
| 1857 | return 0; | ||
| 1858 | |||
| 1859 | err_port_create: | ||
| 1860 | mlxsw_sp_port_module_unmap(mlxsw_sp, local_port); | ||
| 1861 | return err; | ||
| 1862 | } | ||
| 1863 | |||
| 1864 | static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port) | 1837 | static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port) |
| 1865 | { | 1838 | { |
| 1866 | struct net_device *dev = mlxsw_sp_port->dev; | 1839 | struct net_device *dev = mlxsw_sp_port->dev; |
| @@ -1909,8 +1882,8 @@ static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) | |||
| 1909 | 1882 | ||
| 1910 | static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) | 1883 | static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) |
| 1911 | { | 1884 | { |
| 1885 | u8 module, width, lane; | ||
| 1912 | size_t alloc_size; | 1886 | size_t alloc_size; |
| 1913 | u8 module, width; | ||
| 1914 | int i; | 1887 | int i; |
| 1915 | int err; | 1888 | int err; |
| 1916 | 1889 | ||
| @@ -1921,13 +1894,14 @@ static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) | |||
| 1921 | 1894 | ||
| 1922 | for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) { | 1895 | for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) { |
| 1923 | err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module, | 1896 | err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module, |
| 1924 | &width); | 1897 | &width, &lane); |
| 1925 | if (err) | 1898 | if (err) |
| 1926 | goto err_port_module_info_get; | 1899 | goto err_port_module_info_get; |
| 1927 | if (!width) | 1900 | if (!width) |
| 1928 | continue; | 1901 | continue; |
| 1929 | mlxsw_sp->port_to_module[i] = module; | 1902 | mlxsw_sp->port_to_module[i] = module; |
| 1930 | err = __mlxsw_sp_port_create(mlxsw_sp, i, false, module, width); | 1903 | err = mlxsw_sp_port_create(mlxsw_sp, i, false, module, width, |
| 1904 | lane); | ||
| 1931 | if (err) | 1905 | if (err) |
| 1932 | goto err_port_create; | 1906 | goto err_port_create; |
| 1933 | } | 1907 | } |
| @@ -1948,12 +1922,85 @@ static u8 mlxsw_sp_cluster_base_port_get(u8 local_port) | |||
| 1948 | return local_port - offset; | 1922 | return local_port - offset; |
| 1949 | } | 1923 | } |
| 1950 | 1924 | ||
| 1925 | static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port, | ||
| 1926 | u8 module, unsigned int count) | ||
| 1927 | { | ||
| 1928 | u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count; | ||
| 1929 | int err, i; | ||
| 1930 | |||
| 1931 | for (i = 0; i < count; i++) { | ||
| 1932 | err = mlxsw_sp_port_module_map(mlxsw_sp, base_port + i, module, | ||
| 1933 | width, i * width); | ||
| 1934 | if (err) | ||
| 1935 | goto err_port_module_map; | ||
| 1936 | } | ||
| 1937 | |||
| 1938 | for (i = 0; i < count; i++) { | ||
| 1939 | err = __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i, 0); | ||
| 1940 | if (err) | ||
| 1941 | goto err_port_swid_set; | ||
| 1942 | } | ||
| 1943 | |||
| 1944 | for (i = 0; i < count; i++) { | ||
| 1945 | err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true, | ||
| 1946 | module, width, i * width); | ||
| 1947 | if (err) | ||
| 1948 | goto err_port_create; | ||
| 1949 | } | ||
| 1950 | |||
| 1951 | return 0; | ||
| 1952 | |||
| 1953 | err_port_create: | ||
| 1954 | for (i--; i >= 0; i--) | ||
| 1955 | mlxsw_sp_port_remove(mlxsw_sp, base_port + i); | ||
| 1956 | i = count; | ||
| 1957 | err_port_swid_set: | ||
| 1958 | for (i--; i >= 0; i--) | ||
| 1959 | __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i, | ||
| 1960 | MLXSW_PORT_SWID_DISABLED_PORT); | ||
| 1961 | i = count; | ||
| 1962 | err_port_module_map: | ||
| 1963 | for (i--; i >= 0; i--) | ||
| 1964 | mlxsw_sp_port_module_unmap(mlxsw_sp, base_port + i); | ||
| 1965 | return err; | ||
| 1966 | } | ||
| 1967 | |||
| 1968 | static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, | ||
| 1969 | u8 base_port, unsigned int count) | ||
| 1970 | { | ||
| 1971 | u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH; | ||
| 1972 | int i; | ||
| 1973 | |||
| 1974 | /* Split by four means we need to re-create two ports, otherwise | ||
| 1975 | * only one. | ||
| 1976 | */ | ||
| 1977 | count = count / 2; | ||
| 1978 | |||
| 1979 | for (i = 0; i < count; i++) { | ||
| 1980 | local_port = base_port + i * 2; | ||
| 1981 | module = mlxsw_sp->port_to_module[local_port]; | ||
| 1982 | |||
| 1983 | mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width, | ||
| 1984 | 0); | ||
| 1985 | } | ||
| 1986 | |||
| 1987 | for (i = 0; i < count; i++) | ||
| 1988 | __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i * 2, 0); | ||
| 1989 | |||
| 1990 | for (i = 0; i < count; i++) { | ||
| 1991 | local_port = base_port + i * 2; | ||
| 1992 | module = mlxsw_sp->port_to_module[local_port]; | ||
| 1993 | |||
| 1994 | mlxsw_sp_port_create(mlxsw_sp, local_port, false, module, | ||
| 1995 | width, 0); | ||
| 1996 | } | ||
| 1997 | } | ||
| 1998 | |||
| 1951 | static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, | 1999 | static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, |
| 1952 | unsigned int count) | 2000 | unsigned int count) |
| 1953 | { | 2001 | { |
| 1954 | struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); | 2002 | struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); |
| 1955 | struct mlxsw_sp_port *mlxsw_sp_port; | 2003 | struct mlxsw_sp_port *mlxsw_sp_port; |
| 1956 | u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count; | ||
| 1957 | u8 module, cur_width, base_port; | 2004 | u8 module, cur_width, base_port; |
| 1958 | int i; | 2005 | int i; |
| 1959 | int err; | 2006 | int err; |
| @@ -1965,18 +2012,14 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, | |||
| 1965 | return -EINVAL; | 2012 | return -EINVAL; |
| 1966 | } | 2013 | } |
| 1967 | 2014 | ||
| 2015 | module = mlxsw_sp_port->mapping.module; | ||
| 2016 | cur_width = mlxsw_sp_port->mapping.width; | ||
| 2017 | |||
| 1968 | if (count != 2 && count != 4) { | 2018 | if (count != 2 && count != 4) { |
| 1969 | netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n"); | 2019 | netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n"); |
| 1970 | return -EINVAL; | 2020 | return -EINVAL; |
| 1971 | } | 2021 | } |
| 1972 | 2022 | ||
| 1973 | err = mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, &module, | ||
| 1974 | &cur_width); | ||
| 1975 | if (err) { | ||
| 1976 | netdev_err(mlxsw_sp_port->dev, "Failed to get port's width\n"); | ||
| 1977 | return err; | ||
| 1978 | } | ||
| 1979 | |||
| 1980 | if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) { | 2023 | if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) { |
| 1981 | netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n"); | 2024 | netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n"); |
| 1982 | return -EINVAL; | 2025 | return -EINVAL; |
| @@ -2001,25 +2044,16 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, | |||
| 2001 | for (i = 0; i < count; i++) | 2044 | for (i = 0; i < count; i++) |
| 2002 | mlxsw_sp_port_remove(mlxsw_sp, base_port + i); | 2045 | mlxsw_sp_port_remove(mlxsw_sp, base_port + i); |
| 2003 | 2046 | ||
| 2004 | for (i = 0; i < count; i++) { | 2047 | err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count); |
| 2005 | err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true, | 2048 | if (err) { |
| 2006 | module, width, i * width); | 2049 | dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n"); |
| 2007 | if (err) { | 2050 | goto err_port_split_create; |
| 2008 | dev_err(mlxsw_sp->bus_info->dev, "Failed to create split port\n"); | ||
| 2009 | goto err_port_create; | ||
| 2010 | } | ||
| 2011 | } | 2051 | } |
| 2012 | 2052 | ||
| 2013 | return 0; | 2053 | return 0; |
| 2014 | 2054 | ||
| 2015 | err_port_create: | 2055 | err_port_split_create: |
| 2016 | for (i--; i >= 0; i--) | 2056 | mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count); |
| 2017 | mlxsw_sp_port_remove(mlxsw_sp, base_port + i); | ||
| 2018 | for (i = 0; i < count / 2; i++) { | ||
| 2019 | module = mlxsw_sp->port_to_module[base_port + i * 2]; | ||
| 2020 | mlxsw_sp_port_create(mlxsw_sp, base_port + i * 2, false, | ||
| 2021 | module, MLXSW_PORT_MODULE_MAX_WIDTH, 0); | ||
| 2022 | } | ||
| 2023 | return err; | 2057 | return err; |
| 2024 | } | 2058 | } |
| 2025 | 2059 | ||
| @@ -2027,10 +2061,9 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port) | |||
| 2027 | { | 2061 | { |
| 2028 | struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); | 2062 | struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); |
| 2029 | struct mlxsw_sp_port *mlxsw_sp_port; | 2063 | struct mlxsw_sp_port *mlxsw_sp_port; |
| 2030 | u8 module, cur_width, base_port; | 2064 | u8 cur_width, base_port; |
| 2031 | unsigned int count; | 2065 | unsigned int count; |
| 2032 | int i; | 2066 | int i; |
| 2033 | int err; | ||
| 2034 | 2067 | ||
| 2035 | mlxsw_sp_port = mlxsw_sp->ports[local_port]; | 2068 | mlxsw_sp_port = mlxsw_sp->ports[local_port]; |
| 2036 | if (!mlxsw_sp_port) { | 2069 | if (!mlxsw_sp_port) { |
| @@ -2044,12 +2077,7 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port) | |||
| 2044 | return -EINVAL; | 2077 | return -EINVAL; |
| 2045 | } | 2078 | } |
| 2046 | 2079 | ||
| 2047 | err = mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, &module, | 2080 | cur_width = mlxsw_sp_port->mapping.width; |
| 2048 | &cur_width); | ||
| 2049 | if (err) { | ||
| 2050 | netdev_err(mlxsw_sp_port->dev, "Failed to get port's width\n"); | ||
| 2051 | return err; | ||
| 2052 | } | ||
| 2053 | count = cur_width == 1 ? 4 : 2; | 2081 | count = cur_width == 1 ? 4 : 2; |
| 2054 | 2082 | ||
| 2055 | base_port = mlxsw_sp_cluster_base_port_get(local_port); | 2083 | base_port = mlxsw_sp_cluster_base_port_get(local_port); |
| @@ -2061,14 +2089,7 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port) | |||
| 2061 | for (i = 0; i < count; i++) | 2089 | for (i = 0; i < count; i++) |
| 2062 | mlxsw_sp_port_remove(mlxsw_sp, base_port + i); | 2090 | mlxsw_sp_port_remove(mlxsw_sp, base_port + i); |
| 2063 | 2091 | ||
| 2064 | for (i = 0; i < count / 2; i++) { | 2092 | mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count); |
| 2065 | module = mlxsw_sp->port_to_module[base_port + i * 2]; | ||
| 2066 | err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * 2, false, | ||
| 2067 | module, MLXSW_PORT_MODULE_MAX_WIDTH, | ||
| 2068 | 0); | ||
| 2069 | if (err) | ||
| 2070 | dev_err(mlxsw_sp->bus_info->dev, "Failed to reinstantiate port\n"); | ||
| 2071 | } | ||
| 2072 | 2093 | ||
| 2073 | return 0; | 2094 | return 0; |
| 2074 | } | 2095 | } |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index e2c022d3e2f3..13b30eaa13d4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h | |||
| @@ -229,6 +229,11 @@ struct mlxsw_sp_port { | |||
| 229 | struct ieee_maxrate *maxrate; | 229 | struct ieee_maxrate *maxrate; |
| 230 | struct ieee_pfc *pfc; | 230 | struct ieee_pfc *pfc; |
| 231 | } dcb; | 231 | } dcb; |
| 232 | struct { | ||
| 233 | u8 module; | ||
| 234 | u8 width; | ||
| 235 | u8 lane; | ||
| 236 | } mapping; | ||
| 232 | /* 802.1Q bridge VLANs */ | 237 | /* 802.1Q bridge VLANs */ |
| 233 | unsigned long *active_vlans; | 238 | unsigned long *active_vlans; |
| 234 | unsigned long *untagged_vlans; | 239 | unsigned long *untagged_vlans; |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index cbf58e1f9333..21ec1c2df2c7 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c | |||
| @@ -192,9 +192,10 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, | |||
| 192 | struct dcbx_app_priority_entry *p_tbl, | 192 | struct dcbx_app_priority_entry *p_tbl, |
| 193 | u32 pri_tc_tbl, int count, bool dcbx_enabled) | 193 | u32 pri_tc_tbl, int count, bool dcbx_enabled) |
| 194 | { | 194 | { |
| 195 | u8 tc, priority, priority_map; | 195 | u8 tc, priority_map; |
| 196 | enum dcbx_protocol_type type; | 196 | enum dcbx_protocol_type type; |
| 197 | u16 protocol_id; | 197 | u16 protocol_id; |
| 198 | int priority; | ||
| 198 | bool enable; | 199 | bool enable; |
| 199 | int i; | 200 | int i; |
| 200 | 201 | ||
| @@ -221,7 +222,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, | |||
| 221 | * indication, but we only got here if there was an | 222 | * indication, but we only got here if there was an |
| 222 | * app tlv for the protocol, so dcbx must be enabled. | 223 | * app tlv for the protocol, so dcbx must be enabled. |
| 223 | */ | 224 | */ |
| 224 | enable = !!(type == DCBX_PROTOCOL_ETH); | 225 | enable = !(type == DCBX_PROTOCOL_ETH); |
| 225 | 226 | ||
| 226 | qed_dcbx_update_app_info(p_data, p_hwfn, enable, true, | 227 | qed_dcbx_update_app_info(p_data, p_hwfn, enable, true, |
| 227 | priority, tc, type); | 228 | priority, tc, type); |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 089016f46f26..2d89e8c16b32 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c | |||
| @@ -155,12 +155,14 @@ void qed_resc_free(struct qed_dev *cdev) | |||
| 155 | } | 155 | } |
| 156 | } | 156 | } |
| 157 | 157 | ||
| 158 | static int qed_init_qm_info(struct qed_hwfn *p_hwfn) | 158 | static int qed_init_qm_info(struct qed_hwfn *p_hwfn, bool b_sleepable) |
| 159 | { | 159 | { |
| 160 | u8 num_vports, vf_offset = 0, i, vport_id, num_ports, curr_queue = 0; | 160 | u8 num_vports, vf_offset = 0, i, vport_id, num_ports, curr_queue = 0; |
| 161 | struct qed_qm_info *qm_info = &p_hwfn->qm_info; | 161 | struct qed_qm_info *qm_info = &p_hwfn->qm_info; |
| 162 | struct init_qm_port_params *p_qm_port; | 162 | struct init_qm_port_params *p_qm_port; |
| 163 | u16 num_pqs, multi_cos_tcs = 1; | 163 | u16 num_pqs, multi_cos_tcs = 1; |
| 164 | u8 pf_wfq = qm_info->pf_wfq; | ||
| 165 | u32 pf_rl = qm_info->pf_rl; | ||
| 164 | u16 num_vfs = 0; | 166 | u16 num_vfs = 0; |
| 165 | 167 | ||
| 166 | #ifdef CONFIG_QED_SRIOV | 168 | #ifdef CONFIG_QED_SRIOV |
| @@ -182,23 +184,28 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn) | |||
| 182 | 184 | ||
| 183 | /* PQs will be arranged as follows: First per-TC PQ then pure-LB quete. | 185 | /* PQs will be arranged as follows: First per-TC PQ then pure-LB quete. |
| 184 | */ | 186 | */ |
| 185 | qm_info->qm_pq_params = kzalloc(sizeof(*qm_info->qm_pq_params) * | 187 | qm_info->qm_pq_params = kcalloc(num_pqs, |
| 186 | num_pqs, GFP_KERNEL); | 188 | sizeof(struct init_qm_pq_params), |
| 189 | b_sleepable ? GFP_KERNEL : GFP_ATOMIC); | ||
| 187 | if (!qm_info->qm_pq_params) | 190 | if (!qm_info->qm_pq_params) |
| 188 | goto alloc_err; | 191 | goto alloc_err; |
| 189 | 192 | ||
| 190 | qm_info->qm_vport_params = kzalloc(sizeof(*qm_info->qm_vport_params) * | 193 | qm_info->qm_vport_params = kcalloc(num_vports, |
| 191 | num_vports, GFP_KERNEL); | 194 | sizeof(struct init_qm_vport_params), |
| 195 | b_sleepable ? GFP_KERNEL | ||
| 196 | : GFP_ATOMIC); | ||
| 192 | if (!qm_info->qm_vport_params) | 197 | if (!qm_info->qm_vport_params) |
| 193 | goto alloc_err; | 198 | goto alloc_err; |
| 194 | 199 | ||
| 195 | qm_info->qm_port_params = kzalloc(sizeof(*qm_info->qm_port_params) * | 200 | qm_info->qm_port_params = kcalloc(MAX_NUM_PORTS, |
| 196 | MAX_NUM_PORTS, GFP_KERNEL); | 201 | sizeof(struct init_qm_port_params), |
| 202 | b_sleepable ? GFP_KERNEL | ||
| 203 | : GFP_ATOMIC); | ||
| 197 | if (!qm_info->qm_port_params) | 204 | if (!qm_info->qm_port_params) |
| 198 | goto alloc_err; | 205 | goto alloc_err; |
| 199 | 206 | ||
| 200 | qm_info->wfq_data = kcalloc(num_vports, sizeof(*qm_info->wfq_data), | 207 | qm_info->wfq_data = kcalloc(num_vports, sizeof(struct qed_wfq_data), |
| 201 | GFP_KERNEL); | 208 | b_sleepable ? GFP_KERNEL : GFP_ATOMIC); |
| 202 | if (!qm_info->wfq_data) | 209 | if (!qm_info->wfq_data) |
| 203 | goto alloc_err; | 210 | goto alloc_err; |
| 204 | 211 | ||
| @@ -264,10 +271,10 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn) | |||
| 264 | for (i = 0; i < qm_info->num_vports; i++) | 271 | for (i = 0; i < qm_info->num_vports; i++) |
| 265 | qm_info->qm_vport_params[i].vport_wfq = 1; | 272 | qm_info->qm_vport_params[i].vport_wfq = 1; |
| 266 | 273 | ||
| 267 | qm_info->pf_wfq = 0; | ||
| 268 | qm_info->pf_rl = 0; | ||
| 269 | qm_info->vport_rl_en = 1; | 274 | qm_info->vport_rl_en = 1; |
| 270 | qm_info->vport_wfq_en = 1; | 275 | qm_info->vport_wfq_en = 1; |
| 276 | qm_info->pf_rl = pf_rl; | ||
| 277 | qm_info->pf_wfq = pf_wfq; | ||
| 271 | 278 | ||
| 272 | return 0; | 279 | return 0; |
| 273 | 280 | ||
| @@ -299,7 +306,7 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) | |||
| 299 | qed_qm_info_free(p_hwfn); | 306 | qed_qm_info_free(p_hwfn); |
| 300 | 307 | ||
| 301 | /* initialize qed's qm data structure */ | 308 | /* initialize qed's qm data structure */ |
| 302 | rc = qed_init_qm_info(p_hwfn); | 309 | rc = qed_init_qm_info(p_hwfn, false); |
| 303 | if (rc) | 310 | if (rc) |
| 304 | return rc; | 311 | return rc; |
| 305 | 312 | ||
| @@ -388,7 +395,7 @@ int qed_resc_alloc(struct qed_dev *cdev) | |||
| 388 | goto alloc_err; | 395 | goto alloc_err; |
| 389 | 396 | ||
| 390 | /* Prepare and process QM requirements */ | 397 | /* Prepare and process QM requirements */ |
| 391 | rc = qed_init_qm_info(p_hwfn); | 398 | rc = qed_init_qm_info(p_hwfn, true); |
| 392 | if (rc) | 399 | if (rc) |
| 393 | goto alloc_err; | 400 | goto alloc_err; |
| 394 | 401 | ||
| @@ -581,7 +588,14 @@ static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn) | |||
| 581 | 588 | ||
| 582 | hw_mode |= 1 << MODE_ASIC; | 589 | hw_mode |= 1 << MODE_ASIC; |
| 583 | 590 | ||
| 591 | if (p_hwfn->cdev->num_hwfns > 1) | ||
| 592 | hw_mode |= 1 << MODE_100G; | ||
| 593 | |||
| 584 | p_hwfn->hw_info.hw_mode = hw_mode; | 594 | p_hwfn->hw_info.hw_mode = hw_mode; |
| 595 | |||
| 596 | DP_VERBOSE(p_hwfn, (NETIF_MSG_PROBE | NETIF_MSG_IFUP), | ||
| 597 | "Configuring function for hw_mode: 0x%08x\n", | ||
| 598 | p_hwfn->hw_info.hw_mode); | ||
| 585 | } | 599 | } |
| 586 | 600 | ||
| 587 | /* Init run time data for all PFs on an engine. */ | 601 | /* Init run time data for all PFs on an engine. */ |
| @@ -821,6 +835,11 @@ int qed_hw_init(struct qed_dev *cdev, | |||
| 821 | u32 load_code, param; | 835 | u32 load_code, param; |
| 822 | int rc, mfw_rc, i; | 836 | int rc, mfw_rc, i; |
| 823 | 837 | ||
| 838 | if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) { | ||
| 839 | DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n"); | ||
| 840 | return -EINVAL; | ||
| 841 | } | ||
| 842 | |||
| 824 | if (IS_PF(cdev)) { | 843 | if (IS_PF(cdev)) { |
| 825 | rc = qed_init_fw_data(cdev, bin_fw_data); | 844 | rc = qed_init_fw_data(cdev, bin_fw_data); |
| 826 | if (rc != 0) | 845 | if (rc != 0) |
| @@ -2086,6 +2105,13 @@ void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate) | |||
| 2086 | { | 2105 | { |
| 2087 | int i; | 2106 | int i; |
| 2088 | 2107 | ||
| 2108 | if (cdev->num_hwfns > 1) { | ||
| 2109 | DP_VERBOSE(cdev, | ||
| 2110 | NETIF_MSG_LINK, | ||
| 2111 | "WFQ configuration is not supported for this device\n"); | ||
| 2112 | return; | ||
| 2113 | } | ||
| 2114 | |||
| 2089 | for_each_hwfn(cdev, i) { | 2115 | for_each_hwfn(cdev, i) { |
| 2090 | struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; | 2116 | struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; |
| 2091 | 2117 | ||
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 8b22f87033ce..61cc6869fa65 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c | |||
| @@ -413,15 +413,17 @@ static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode) | |||
| 413 | /* Fallthrough */ | 413 | /* Fallthrough */ |
| 414 | 414 | ||
| 415 | case QED_INT_MODE_MSI: | 415 | case QED_INT_MODE_MSI: |
| 416 | rc = pci_enable_msi(cdev->pdev); | 416 | if (cdev->num_hwfns == 1) { |
| 417 | if (!rc) { | 417 | rc = pci_enable_msi(cdev->pdev); |
| 418 | int_params->out.int_mode = QED_INT_MODE_MSI; | 418 | if (!rc) { |
| 419 | goto out; | 419 | int_params->out.int_mode = QED_INT_MODE_MSI; |
| 420 | } | 420 | goto out; |
| 421 | } | ||
| 421 | 422 | ||
| 422 | DP_NOTICE(cdev, "Failed to enable MSI\n"); | 423 | DP_NOTICE(cdev, "Failed to enable MSI\n"); |
| 423 | if (force_mode) | 424 | if (force_mode) |
| 424 | goto out; | 425 | goto out; |
| 426 | } | ||
| 425 | /* Fallthrough */ | 427 | /* Fallthrough */ |
| 426 | 428 | ||
| 427 | case QED_INT_MODE_INTA: | 429 | case QED_INT_MODE_INTA: |
| @@ -1103,6 +1105,39 @@ static int qed_get_port_type(u32 media_type) | |||
| 1103 | return port_type; | 1105 | return port_type; |
| 1104 | } | 1106 | } |
| 1105 | 1107 | ||
| 1108 | static int qed_get_link_data(struct qed_hwfn *hwfn, | ||
| 1109 | struct qed_mcp_link_params *params, | ||
| 1110 | struct qed_mcp_link_state *link, | ||
| 1111 | struct qed_mcp_link_capabilities *link_caps) | ||
| 1112 | { | ||
| 1113 | void *p; | ||
| 1114 | |||
| 1115 | if (!IS_PF(hwfn->cdev)) { | ||
| 1116 | qed_vf_get_link_params(hwfn, params); | ||
| 1117 | qed_vf_get_link_state(hwfn, link); | ||
| 1118 | qed_vf_get_link_caps(hwfn, link_caps); | ||
| 1119 | |||
| 1120 | return 0; | ||
| 1121 | } | ||
| 1122 | |||
| 1123 | p = qed_mcp_get_link_params(hwfn); | ||
| 1124 | if (!p) | ||
| 1125 | return -ENXIO; | ||
| 1126 | memcpy(params, p, sizeof(*params)); | ||
| 1127 | |||
| 1128 | p = qed_mcp_get_link_state(hwfn); | ||
| 1129 | if (!p) | ||
| 1130 | return -ENXIO; | ||
| 1131 | memcpy(link, p, sizeof(*link)); | ||
| 1132 | |||
| 1133 | p = qed_mcp_get_link_capabilities(hwfn); | ||
| 1134 | if (!p) | ||
| 1135 | return -ENXIO; | ||
| 1136 | memcpy(link_caps, p, sizeof(*link_caps)); | ||
| 1137 | |||
| 1138 | return 0; | ||
| 1139 | } | ||
| 1140 | |||
| 1106 | static void qed_fill_link(struct qed_hwfn *hwfn, | 1141 | static void qed_fill_link(struct qed_hwfn *hwfn, |
| 1107 | struct qed_link_output *if_link) | 1142 | struct qed_link_output *if_link) |
| 1108 | { | 1143 | { |
| @@ -1114,15 +1149,9 @@ static void qed_fill_link(struct qed_hwfn *hwfn, | |||
| 1114 | memset(if_link, 0, sizeof(*if_link)); | 1149 | memset(if_link, 0, sizeof(*if_link)); |
| 1115 | 1150 | ||
| 1116 | /* Prepare source inputs */ | 1151 | /* Prepare source inputs */ |
| 1117 | if (IS_PF(hwfn->cdev)) { | 1152 | if (qed_get_link_data(hwfn, ¶ms, &link, &link_caps)) { |
| 1118 | memcpy(¶ms, qed_mcp_get_link_params(hwfn), sizeof(params)); | 1153 | dev_warn(&hwfn->cdev->pdev->dev, "no link data available\n"); |
| 1119 | memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link)); | 1154 | return; |
| 1120 | memcpy(&link_caps, qed_mcp_get_link_capabilities(hwfn), | ||
| 1121 | sizeof(link_caps)); | ||
| 1122 | } else { | ||
| 1123 | qed_vf_get_link_params(hwfn, ¶ms); | ||
| 1124 | qed_vf_get_link_state(hwfn, &link); | ||
| 1125 | qed_vf_get_link_caps(hwfn, &link_caps); | ||
| 1126 | } | 1155 | } |
| 1127 | 1156 | ||
| 1128 | /* Set the link parameters to pass to protocol driver */ | 1157 | /* Set the link parameters to pass to protocol driver */ |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h index c8667c65e685..c90b2b6ad969 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h | |||
| @@ -12,11 +12,13 @@ | |||
| 12 | #include "qed_vf.h" | 12 | #include "qed_vf.h" |
| 13 | #define QED_VF_ARRAY_LENGTH (3) | 13 | #define QED_VF_ARRAY_LENGTH (3) |
| 14 | 14 | ||
| 15 | #ifdef CONFIG_QED_SRIOV | ||
| 15 | #define IS_VF(cdev) ((cdev)->b_is_vf) | 16 | #define IS_VF(cdev) ((cdev)->b_is_vf) |
| 16 | #define IS_PF(cdev) (!((cdev)->b_is_vf)) | 17 | #define IS_PF(cdev) (!((cdev)->b_is_vf)) |
| 17 | #ifdef CONFIG_QED_SRIOV | ||
| 18 | #define IS_PF_SRIOV(p_hwfn) (!!((p_hwfn)->cdev->p_iov_info)) | 18 | #define IS_PF_SRIOV(p_hwfn) (!!((p_hwfn)->cdev->p_iov_info)) |
| 19 | #else | 19 | #else |
| 20 | #define IS_VF(cdev) (0) | ||
| 21 | #define IS_PF(cdev) (1) | ||
| 20 | #define IS_PF_SRIOV(p_hwfn) (0) | 22 | #define IS_PF_SRIOV(p_hwfn) (0) |
| 21 | #endif | 23 | #endif |
| 22 | #define IS_PF_SRIOV_ALLOC(p_hwfn) (!!((p_hwfn)->pf_iov_info)) | 24 | #define IS_PF_SRIOV_ALLOC(p_hwfn) (!!((p_hwfn)->pf_iov_info)) |
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index 1bc75358cbc4..ad3cae3b7243 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c | |||
| @@ -230,7 +230,10 @@ static int qede_get_sset_count(struct net_device *dev, int stringset) | |||
| 230 | case ETH_SS_PRIV_FLAGS: | 230 | case ETH_SS_PRIV_FLAGS: |
| 231 | return QEDE_PRI_FLAG_LEN; | 231 | return QEDE_PRI_FLAG_LEN; |
| 232 | case ETH_SS_TEST: | 232 | case ETH_SS_TEST: |
| 233 | return QEDE_ETHTOOL_TEST_MAX; | 233 | if (!IS_VF(edev)) |
| 234 | return QEDE_ETHTOOL_TEST_MAX; | ||
| 235 | else | ||
| 236 | return 0; | ||
| 234 | default: | 237 | default: |
| 235 | DP_VERBOSE(edev, QED_MSG_DEBUG, | 238 | DP_VERBOSE(edev, QED_MSG_DEBUG, |
| 236 | "Unsupported stringset 0x%08x\n", stringset); | 239 | "Unsupported stringset 0x%08x\n", stringset); |
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 337e839ca586..5733d1888223 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c | |||
| @@ -87,7 +87,9 @@ static const struct pci_device_id qede_pci_tbl[] = { | |||
| 87 | {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), QEDE_PRIVATE_PF}, | 87 | {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), QEDE_PRIVATE_PF}, |
| 88 | {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), QEDE_PRIVATE_PF}, | 88 | {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), QEDE_PRIVATE_PF}, |
| 89 | {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), QEDE_PRIVATE_PF}, | 89 | {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), QEDE_PRIVATE_PF}, |
| 90 | #ifdef CONFIG_QED_SRIOV | ||
| 90 | {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF}, | 91 | {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF}, |
| 92 | #endif | ||
| 91 | { 0 } | 93 | { 0 } |
| 92 | }; | 94 | }; |
| 93 | 95 | ||
| @@ -1824,7 +1826,7 @@ static int qede_set_vf_rate(struct net_device *dev, int vfidx, | |||
| 1824 | { | 1826 | { |
| 1825 | struct qede_dev *edev = netdev_priv(dev); | 1827 | struct qede_dev *edev = netdev_priv(dev); |
| 1826 | 1828 | ||
| 1827 | return edev->ops->iov->set_rate(edev->cdev, vfidx, max_tx_rate, | 1829 | return edev->ops->iov->set_rate(edev->cdev, vfidx, min_tx_rate, |
| 1828 | max_tx_rate); | 1830 | max_tx_rate); |
| 1829 | } | 1831 | } |
| 1830 | 1832 | ||
| @@ -2091,6 +2093,29 @@ static void qede_vlan_mark_nonconfigured(struct qede_dev *edev) | |||
| 2091 | edev->accept_any_vlan = false; | 2093 | edev->accept_any_vlan = false; |
| 2092 | } | 2094 | } |
| 2093 | 2095 | ||
| 2096 | int qede_set_features(struct net_device *dev, netdev_features_t features) | ||
| 2097 | { | ||
| 2098 | struct qede_dev *edev = netdev_priv(dev); | ||
| 2099 | netdev_features_t changes = features ^ dev->features; | ||
| 2100 | bool need_reload = false; | ||
| 2101 | |||
| 2102 | /* No action needed if hardware GRO is disabled during driver load */ | ||
| 2103 | if (changes & NETIF_F_GRO) { | ||
| 2104 | if (dev->features & NETIF_F_GRO) | ||
| 2105 | need_reload = !edev->gro_disable; | ||
| 2106 | else | ||
| 2107 | need_reload = edev->gro_disable; | ||
| 2108 | } | ||
| 2109 | |||
| 2110 | if (need_reload && netif_running(edev->ndev)) { | ||
| 2111 | dev->features = features; | ||
| 2112 | qede_reload(edev, NULL, NULL); | ||
| 2113 | return 1; | ||
| 2114 | } | ||
| 2115 | |||
| 2116 | return 0; | ||
| 2117 | } | ||
| 2118 | |||
| 2094 | #ifdef CONFIG_QEDE_VXLAN | 2119 | #ifdef CONFIG_QEDE_VXLAN |
| 2095 | static void qede_add_vxlan_port(struct net_device *dev, | 2120 | static void qede_add_vxlan_port(struct net_device *dev, |
| 2096 | sa_family_t sa_family, __be16 port) | 2121 | sa_family_t sa_family, __be16 port) |
| @@ -2175,6 +2200,7 @@ static const struct net_device_ops qede_netdev_ops = { | |||
| 2175 | #endif | 2200 | #endif |
| 2176 | .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid, | 2201 | .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid, |
| 2177 | .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid, | 2202 | .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid, |
| 2203 | .ndo_set_features = qede_set_features, | ||
| 2178 | .ndo_get_stats64 = qede_get_stats64, | 2204 | .ndo_get_stats64 = qede_get_stats64, |
| 2179 | #ifdef CONFIG_QED_SRIOV | 2205 | #ifdef CONFIG_QED_SRIOV |
| 2180 | .ndo_set_vf_link_state = qede_set_vf_link_state, | 2206 | .ndo_set_vf_link_state = qede_set_vf_link_state, |
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index 83d72106471c..fd5d1c93b55b 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c | |||
| @@ -4846,7 +4846,6 @@ static void ql_eeh_close(struct net_device *ndev) | |||
| 4846 | } | 4846 | } |
| 4847 | 4847 | ||
| 4848 | /* Disabling the timer */ | 4848 | /* Disabling the timer */ |
| 4849 | del_timer_sync(&qdev->timer); | ||
| 4850 | ql_cancel_all_work_sync(qdev); | 4849 | ql_cancel_all_work_sync(qdev); |
| 4851 | 4850 | ||
| 4852 | for (i = 0; i < qdev->rss_ring_count; i++) | 4851 | for (i = 0; i < qdev->rss_ring_count; i++) |
| @@ -4873,6 +4872,7 @@ static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev, | |||
| 4873 | return PCI_ERS_RESULT_CAN_RECOVER; | 4872 | return PCI_ERS_RESULT_CAN_RECOVER; |
| 4874 | case pci_channel_io_frozen: | 4873 | case pci_channel_io_frozen: |
| 4875 | netif_device_detach(ndev); | 4874 | netif_device_detach(ndev); |
| 4875 | del_timer_sync(&qdev->timer); | ||
| 4876 | if (netif_running(ndev)) | 4876 | if (netif_running(ndev)) |
| 4877 | ql_eeh_close(ndev); | 4877 | ql_eeh_close(ndev); |
| 4878 | pci_disable_device(pdev); | 4878 | pci_disable_device(pdev); |
| @@ -4880,6 +4880,7 @@ static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev, | |||
| 4880 | case pci_channel_io_perm_failure: | 4880 | case pci_channel_io_perm_failure: |
| 4881 | dev_err(&pdev->dev, | 4881 | dev_err(&pdev->dev, |
| 4882 | "%s: pci_channel_io_perm_failure.\n", __func__); | 4882 | "%s: pci_channel_io_perm_failure.\n", __func__); |
| 4883 | del_timer_sync(&qdev->timer); | ||
| 4883 | ql_eeh_close(ndev); | 4884 | ql_eeh_close(ndev); |
| 4884 | set_bit(QL_EEH_FATAL, &qdev->flags); | 4885 | set_bit(QL_EEH_FATAL, &qdev->flags); |
| 4885 | return PCI_ERS_RESULT_DISCONNECT; | 4886 | return PCI_ERS_RESULT_DISCONNECT; |
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 1681084cc96f..1f309127457d 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c | |||
| @@ -619,6 +619,17 @@ fail: | |||
| 619 | return rc; | 619 | return rc; |
| 620 | } | 620 | } |
| 621 | 621 | ||
| 622 | static void efx_ef10_forget_old_piobufs(struct efx_nic *efx) | ||
| 623 | { | ||
| 624 | struct efx_channel *channel; | ||
| 625 | struct efx_tx_queue *tx_queue; | ||
| 626 | |||
| 627 | /* All our existing PIO buffers went away */ | ||
| 628 | efx_for_each_channel(channel, efx) | ||
| 629 | efx_for_each_channel_tx_queue(tx_queue, channel) | ||
| 630 | tx_queue->piobuf = NULL; | ||
| 631 | } | ||
| 632 | |||
| 622 | #else /* !EFX_USE_PIO */ | 633 | #else /* !EFX_USE_PIO */ |
| 623 | 634 | ||
| 624 | static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n) | 635 | static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n) |
| @@ -635,6 +646,10 @@ static void efx_ef10_free_piobufs(struct efx_nic *efx) | |||
| 635 | { | 646 | { |
| 636 | } | 647 | } |
| 637 | 648 | ||
| 649 | static void efx_ef10_forget_old_piobufs(struct efx_nic *efx) | ||
| 650 | { | ||
| 651 | } | ||
| 652 | |||
| 638 | #endif /* EFX_USE_PIO */ | 653 | #endif /* EFX_USE_PIO */ |
| 639 | 654 | ||
| 640 | static void efx_ef10_remove(struct efx_nic *efx) | 655 | static void efx_ef10_remove(struct efx_nic *efx) |
| @@ -1018,6 +1033,7 @@ static void efx_ef10_reset_mc_allocations(struct efx_nic *efx) | |||
| 1018 | nic_data->must_realloc_vis = true; | 1033 | nic_data->must_realloc_vis = true; |
| 1019 | nic_data->must_restore_filters = true; | 1034 | nic_data->must_restore_filters = true; |
| 1020 | nic_data->must_restore_piobufs = true; | 1035 | nic_data->must_restore_piobufs = true; |
| 1036 | efx_ef10_forget_old_piobufs(efx); | ||
| 1021 | nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID; | 1037 | nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID; |
| 1022 | 1038 | ||
| 1023 | /* Driver-created vswitches and vports must be re-created */ | 1039 | /* Driver-created vswitches and vports must be re-created */ |
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 0705ec869487..097f363f1630 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c | |||
| @@ -1726,14 +1726,33 @@ static int efx_probe_filters(struct efx_nic *efx) | |||
| 1726 | 1726 | ||
| 1727 | #ifdef CONFIG_RFS_ACCEL | 1727 | #ifdef CONFIG_RFS_ACCEL |
| 1728 | if (efx->type->offload_features & NETIF_F_NTUPLE) { | 1728 | if (efx->type->offload_features & NETIF_F_NTUPLE) { |
| 1729 | efx->rps_flow_id = kcalloc(efx->type->max_rx_ip_filters, | 1729 | struct efx_channel *channel; |
| 1730 | sizeof(*efx->rps_flow_id), | 1730 | int i, success = 1; |
| 1731 | GFP_KERNEL); | 1731 | |
| 1732 | if (!efx->rps_flow_id) { | 1732 | efx_for_each_channel(channel, efx) { |
| 1733 | channel->rps_flow_id = | ||
| 1734 | kcalloc(efx->type->max_rx_ip_filters, | ||
| 1735 | sizeof(*channel->rps_flow_id), | ||
| 1736 | GFP_KERNEL); | ||
| 1737 | if (!channel->rps_flow_id) | ||
| 1738 | success = 0; | ||
| 1739 | else | ||
| 1740 | for (i = 0; | ||
| 1741 | i < efx->type->max_rx_ip_filters; | ||
| 1742 | ++i) | ||
| 1743 | channel->rps_flow_id[i] = | ||
| 1744 | RPS_FLOW_ID_INVALID; | ||
| 1745 | } | ||
| 1746 | |||
| 1747 | if (!success) { | ||
| 1748 | efx_for_each_channel(channel, efx) | ||
| 1749 | kfree(channel->rps_flow_id); | ||
| 1733 | efx->type->filter_table_remove(efx); | 1750 | efx->type->filter_table_remove(efx); |
| 1734 | rc = -ENOMEM; | 1751 | rc = -ENOMEM; |
| 1735 | goto out_unlock; | 1752 | goto out_unlock; |
| 1736 | } | 1753 | } |
| 1754 | |||
| 1755 | efx->rps_expire_index = efx->rps_expire_channel = 0; | ||
| 1737 | } | 1756 | } |
| 1738 | #endif | 1757 | #endif |
| 1739 | out_unlock: | 1758 | out_unlock: |
| @@ -1744,7 +1763,10 @@ out_unlock: | |||
| 1744 | static void efx_remove_filters(struct efx_nic *efx) | 1763 | static void efx_remove_filters(struct efx_nic *efx) |
| 1745 | { | 1764 | { |
| 1746 | #ifdef CONFIG_RFS_ACCEL | 1765 | #ifdef CONFIG_RFS_ACCEL |
| 1747 | kfree(efx->rps_flow_id); | 1766 | struct efx_channel *channel; |
| 1767 | |||
| 1768 | efx_for_each_channel(channel, efx) | ||
| 1769 | kfree(channel->rps_flow_id); | ||
| 1748 | #endif | 1770 | #endif |
| 1749 | down_write(&efx->filter_sem); | 1771 | down_write(&efx->filter_sem); |
| 1750 | efx->type->filter_table_remove(efx); | 1772 | efx->type->filter_table_remove(efx); |
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c index 7f295c4d7b80..2a9228a6e4a0 100644 --- a/drivers/net/ethernet/sfc/mcdi_port.c +++ b/drivers/net/ethernet/sfc/mcdi_port.c | |||
| @@ -189,11 +189,12 @@ static u32 mcdi_to_ethtool_cap(u32 media, u32 cap) | |||
| 189 | 189 | ||
| 190 | case MC_CMD_MEDIA_XFP: | 190 | case MC_CMD_MEDIA_XFP: |
| 191 | case MC_CMD_MEDIA_SFP_PLUS: | 191 | case MC_CMD_MEDIA_SFP_PLUS: |
| 192 | result |= SUPPORTED_FIBRE; | ||
| 193 | break; | ||
| 194 | |||
| 195 | case MC_CMD_MEDIA_QSFP_PLUS: | 192 | case MC_CMD_MEDIA_QSFP_PLUS: |
| 196 | result |= SUPPORTED_FIBRE; | 193 | result |= SUPPORTED_FIBRE; |
| 194 | if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN)) | ||
| 195 | result |= SUPPORTED_1000baseT_Full; | ||
| 196 | if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN)) | ||
| 197 | result |= SUPPORTED_10000baseT_Full; | ||
| 197 | if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) | 198 | if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) |
| 198 | result |= SUPPORTED_40000baseCR4_Full; | 199 | result |= SUPPORTED_40000baseCR4_Full; |
| 199 | break; | 200 | break; |
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 38c422321cda..d13ddf9703ff 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h | |||
| @@ -403,6 +403,8 @@ enum efx_sync_events_state { | |||
| 403 | * @event_test_cpu: Last CPU to handle interrupt or test event for this channel | 403 | * @event_test_cpu: Last CPU to handle interrupt or test event for this channel |
| 404 | * @irq_count: Number of IRQs since last adaptive moderation decision | 404 | * @irq_count: Number of IRQs since last adaptive moderation decision |
| 405 | * @irq_mod_score: IRQ moderation score | 405 | * @irq_mod_score: IRQ moderation score |
| 406 | * @rps_flow_id: Flow IDs of filters allocated for accelerated RFS, | ||
| 407 | * indexed by filter ID | ||
| 406 | * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors | 408 | * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors |
| 407 | * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors | 409 | * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors |
| 408 | * @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors | 410 | * @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors |
| @@ -446,6 +448,8 @@ struct efx_channel { | |||
| 446 | unsigned int irq_mod_score; | 448 | unsigned int irq_mod_score; |
| 447 | #ifdef CONFIG_RFS_ACCEL | 449 | #ifdef CONFIG_RFS_ACCEL |
| 448 | unsigned int rfs_filters_added; | 450 | unsigned int rfs_filters_added; |
| 451 | #define RPS_FLOW_ID_INVALID 0xFFFFFFFF | ||
| 452 | u32 *rps_flow_id; | ||
| 449 | #endif | 453 | #endif |
| 450 | 454 | ||
| 451 | unsigned n_rx_tobe_disc; | 455 | unsigned n_rx_tobe_disc; |
| @@ -889,9 +893,9 @@ struct vfdi_status; | |||
| 889 | * @filter_sem: Filter table rw_semaphore, for freeing the table | 893 | * @filter_sem: Filter table rw_semaphore, for freeing the table |
| 890 | * @filter_lock: Filter table lock, for mere content changes | 894 | * @filter_lock: Filter table lock, for mere content changes |
| 891 | * @filter_state: Architecture-dependent filter table state | 895 | * @filter_state: Architecture-dependent filter table state |
| 892 | * @rps_flow_id: Flow IDs of filters allocated for accelerated RFS, | 896 | * @rps_expire_channel: Next channel to check for expiry |
| 893 | * indexed by filter ID | 897 | * @rps_expire_index: Next index to check for expiry in |
| 894 | * @rps_expire_index: Next index to check for expiry in @rps_flow_id | 898 | * @rps_expire_channel's @rps_flow_id |
| 895 | * @active_queues: Count of RX and TX queues that haven't been flushed and drained. | 899 | * @active_queues: Count of RX and TX queues that haven't been flushed and drained. |
| 896 | * @rxq_flush_pending: Count of number of receive queues that need to be flushed. | 900 | * @rxq_flush_pending: Count of number of receive queues that need to be flushed. |
| 897 | * Decremented when the efx_flush_rx_queue() is called. | 901 | * Decremented when the efx_flush_rx_queue() is called. |
| @@ -1035,7 +1039,7 @@ struct efx_nic { | |||
| 1035 | spinlock_t filter_lock; | 1039 | spinlock_t filter_lock; |
| 1036 | void *filter_state; | 1040 | void *filter_state; |
| 1037 | #ifdef CONFIG_RFS_ACCEL | 1041 | #ifdef CONFIG_RFS_ACCEL |
| 1038 | u32 *rps_flow_id; | 1042 | unsigned int rps_expire_channel; |
| 1039 | unsigned int rps_expire_index; | 1043 | unsigned int rps_expire_index; |
| 1040 | #endif | 1044 | #endif |
| 1041 | 1045 | ||
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index 8956995b2fe7..02b0b5272c14 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c | |||
| @@ -842,33 +842,18 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, | |||
| 842 | struct efx_nic *efx = netdev_priv(net_dev); | 842 | struct efx_nic *efx = netdev_priv(net_dev); |
| 843 | struct efx_channel *channel; | 843 | struct efx_channel *channel; |
| 844 | struct efx_filter_spec spec; | 844 | struct efx_filter_spec spec; |
| 845 | const __be16 *ports; | 845 | struct flow_keys fk; |
| 846 | __be16 ether_type; | ||
| 847 | int nhoff; | ||
| 848 | int rc; | 846 | int rc; |
| 849 | 847 | ||
| 850 | /* The core RPS/RFS code has already parsed and validated | 848 | if (flow_id == RPS_FLOW_ID_INVALID) |
| 851 | * VLAN, IP and transport headers. We assume they are in the | 849 | return -EINVAL; |
| 852 | * header area. | ||
| 853 | */ | ||
| 854 | |||
| 855 | if (skb->protocol == htons(ETH_P_8021Q)) { | ||
| 856 | const struct vlan_hdr *vh = | ||
| 857 | (const struct vlan_hdr *)skb->data; | ||
| 858 | 850 | ||
| 859 | /* We can't filter on the IP 5-tuple and the vlan | 851 | if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) |
| 860 | * together, so just strip the vlan header and filter | 852 | return -EPROTONOSUPPORT; |
| 861 | * on the IP part. | ||
| 862 | */ | ||
| 863 | EFX_BUG_ON_PARANOID(skb_headlen(skb) < sizeof(*vh)); | ||
| 864 | ether_type = vh->h_vlan_encapsulated_proto; | ||
| 865 | nhoff = sizeof(struct vlan_hdr); | ||
| 866 | } else { | ||
| 867 | ether_type = skb->protocol; | ||
| 868 | nhoff = 0; | ||
| 869 | } | ||
| 870 | 853 | ||
| 871 | if (ether_type != htons(ETH_P_IP) && ether_type != htons(ETH_P_IPV6)) | 854 | if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6)) |
| 855 | return -EPROTONOSUPPORT; | ||
| 856 | if (fk.control.flags & FLOW_DIS_IS_FRAGMENT) | ||
| 872 | return -EPROTONOSUPPORT; | 857 | return -EPROTONOSUPPORT; |
| 873 | 858 | ||
| 874 | efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT, | 859 | efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT, |
| @@ -878,56 +863,41 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, | |||
| 878 | EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | | 863 | EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | |
| 879 | EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT | | 864 | EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT | |
| 880 | EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT; | 865 | EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT; |
| 881 | spec.ether_type = ether_type; | 866 | spec.ether_type = fk.basic.n_proto; |
| 882 | 867 | spec.ip_proto = fk.basic.ip_proto; | |
| 883 | if (ether_type == htons(ETH_P_IP)) { | 868 | |
| 884 | const struct iphdr *ip = | 869 | if (fk.basic.n_proto == htons(ETH_P_IP)) { |
| 885 | (const struct iphdr *)(skb->data + nhoff); | 870 | spec.rem_host[0] = fk.addrs.v4addrs.src; |
| 886 | 871 | spec.loc_host[0] = fk.addrs.v4addrs.dst; | |
| 887 | EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip)); | ||
| 888 | if (ip_is_fragment(ip)) | ||
| 889 | return -EPROTONOSUPPORT; | ||
| 890 | spec.ip_proto = ip->protocol; | ||
| 891 | spec.rem_host[0] = ip->saddr; | ||
| 892 | spec.loc_host[0] = ip->daddr; | ||
| 893 | EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4); | ||
| 894 | ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl); | ||
| 895 | } else { | 872 | } else { |
| 896 | const struct ipv6hdr *ip6 = | 873 | memcpy(spec.rem_host, &fk.addrs.v6addrs.src, sizeof(struct in6_addr)); |
| 897 | (const struct ipv6hdr *)(skb->data + nhoff); | 874 | memcpy(spec.loc_host, &fk.addrs.v6addrs.dst, sizeof(struct in6_addr)); |
| 898 | |||
| 899 | EFX_BUG_ON_PARANOID(skb_headlen(skb) < | ||
| 900 | nhoff + sizeof(*ip6) + 4); | ||
| 901 | spec.ip_proto = ip6->nexthdr; | ||
| 902 | memcpy(spec.rem_host, &ip6->saddr, sizeof(ip6->saddr)); | ||
| 903 | memcpy(spec.loc_host, &ip6->daddr, sizeof(ip6->daddr)); | ||
| 904 | ports = (const __be16 *)(ip6 + 1); | ||
| 905 | } | 875 | } |
| 906 | 876 | ||
| 907 | spec.rem_port = ports[0]; | 877 | spec.rem_port = fk.ports.src; |
| 908 | spec.loc_port = ports[1]; | 878 | spec.loc_port = fk.ports.dst; |
| 909 | 879 | ||
| 910 | rc = efx->type->filter_rfs_insert(efx, &spec); | 880 | rc = efx->type->filter_rfs_insert(efx, &spec); |
| 911 | if (rc < 0) | 881 | if (rc < 0) |
| 912 | return rc; | 882 | return rc; |
| 913 | 883 | ||
| 914 | /* Remember this so we can check whether to expire the filter later */ | 884 | /* Remember this so we can check whether to expire the filter later */ |
| 915 | efx->rps_flow_id[rc] = flow_id; | 885 | channel = efx_get_channel(efx, rxq_index); |
| 916 | channel = efx_get_channel(efx, skb_get_rx_queue(skb)); | 886 | channel->rps_flow_id[rc] = flow_id; |
| 917 | ++channel->rfs_filters_added; | 887 | ++channel->rfs_filters_added; |
| 918 | 888 | ||
| 919 | if (ether_type == htons(ETH_P_IP)) | 889 | if (spec.ether_type == htons(ETH_P_IP)) |
| 920 | netif_info(efx, rx_status, efx->net_dev, | 890 | netif_info(efx, rx_status, efx->net_dev, |
| 921 | "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n", | 891 | "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n", |
| 922 | (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", | 892 | (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", |
| 923 | spec.rem_host, ntohs(ports[0]), spec.loc_host, | 893 | spec.rem_host, ntohs(spec.rem_port), spec.loc_host, |
| 924 | ntohs(ports[1]), rxq_index, flow_id, rc); | 894 | ntohs(spec.loc_port), rxq_index, flow_id, rc); |
| 925 | else | 895 | else |
| 926 | netif_info(efx, rx_status, efx->net_dev, | 896 | netif_info(efx, rx_status, efx->net_dev, |
| 927 | "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n", | 897 | "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n", |
| 928 | (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", | 898 | (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", |
| 929 | spec.rem_host, ntohs(ports[0]), spec.loc_host, | 899 | spec.rem_host, ntohs(spec.rem_port), spec.loc_host, |
| 930 | ntohs(ports[1]), rxq_index, flow_id, rc); | 900 | ntohs(spec.loc_port), rxq_index, flow_id, rc); |
| 931 | 901 | ||
| 932 | return rc; | 902 | return rc; |
| 933 | } | 903 | } |
| @@ -935,24 +905,34 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, | |||
| 935 | bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota) | 905 | bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota) |
| 936 | { | 906 | { |
| 937 | bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index); | 907 | bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index); |
| 938 | unsigned int index, size; | 908 | unsigned int channel_idx, index, size; |
| 939 | u32 flow_id; | 909 | u32 flow_id; |
| 940 | 910 | ||
| 941 | if (!spin_trylock_bh(&efx->filter_lock)) | 911 | if (!spin_trylock_bh(&efx->filter_lock)) |
| 942 | return false; | 912 | return false; |
| 943 | 913 | ||
| 944 | expire_one = efx->type->filter_rfs_expire_one; | 914 | expire_one = efx->type->filter_rfs_expire_one; |
| 915 | channel_idx = efx->rps_expire_channel; | ||
| 945 | index = efx->rps_expire_index; | 916 | index = efx->rps_expire_index; |
| 946 | size = efx->type->max_rx_ip_filters; | 917 | size = efx->type->max_rx_ip_filters; |
| 947 | while (quota--) { | 918 | while (quota--) { |
| 948 | flow_id = efx->rps_flow_id[index]; | 919 | struct efx_channel *channel = efx_get_channel(efx, channel_idx); |
| 949 | if (expire_one(efx, flow_id, index)) | 920 | flow_id = channel->rps_flow_id[index]; |
| 921 | |||
| 922 | if (flow_id != RPS_FLOW_ID_INVALID && | ||
| 923 | expire_one(efx, flow_id, index)) { | ||
| 950 | netif_info(efx, rx_status, efx->net_dev, | 924 | netif_info(efx, rx_status, efx->net_dev, |
| 951 | "expired filter %d [flow %u]\n", | 925 | "expired filter %d [queue %u flow %u]\n", |
| 952 | index, flow_id); | 926 | index, channel_idx, flow_id); |
| 953 | if (++index == size) | 927 | channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID; |
| 928 | } | ||
| 929 | if (++index == size) { | ||
| 930 | if (++channel_idx == efx->n_channels) | ||
| 931 | channel_idx = 0; | ||
| 954 | index = 0; | 932 | index = 0; |
| 933 | } | ||
| 955 | } | 934 | } |
| 935 | efx->rps_expire_channel = channel_idx; | ||
| 956 | efx->rps_expire_index = index; | 936 | efx->rps_expire_index = index; |
| 957 | 937 | ||
| 958 | spin_unlock_bh(&efx->filter_lock); | 938 | spin_unlock_bh(&efx->filter_lock); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index 4f7283d05588..44da877d2483 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c | |||
| @@ -156,7 +156,7 @@ static void dwmac4_set_filter(struct mac_device_info *hw, | |||
| 156 | struct netdev_hw_addr *ha; | 156 | struct netdev_hw_addr *ha; |
| 157 | 157 | ||
| 158 | netdev_for_each_uc_addr(ha, dev) { | 158 | netdev_for_each_uc_addr(ha, dev) { |
| 159 | dwmac4_set_umac_addr(ioaddr, ha->addr, reg); | 159 | dwmac4_set_umac_addr(hw, ha->addr, reg); |
| 160 | reg++; | 160 | reg++; |
| 161 | } | 161 | } |
| 162 | } | 162 | } |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index eac45d0c75e2..a473c182c91d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
| @@ -3450,8 +3450,6 @@ int stmmac_resume(struct device *dev) | |||
| 3450 | if (!netif_running(ndev)) | 3450 | if (!netif_running(ndev)) |
| 3451 | return 0; | 3451 | return 0; |
| 3452 | 3452 | ||
| 3453 | spin_lock_irqsave(&priv->lock, flags); | ||
| 3454 | |||
| 3455 | /* Power Down bit, into the PM register, is cleared | 3453 | /* Power Down bit, into the PM register, is cleared |
| 3456 | * automatically as soon as a magic packet or a Wake-up frame | 3454 | * automatically as soon as a magic packet or a Wake-up frame |
| 3457 | * is received. Anyway, it's better to manually clear | 3455 | * is received. Anyway, it's better to manually clear |
| @@ -3459,7 +3457,9 @@ int stmmac_resume(struct device *dev) | |||
| 3459 | * from another devices (e.g. serial console). | 3457 | * from another devices (e.g. serial console). |
| 3460 | */ | 3458 | */ |
| 3461 | if (device_may_wakeup(priv->device)) { | 3459 | if (device_may_wakeup(priv->device)) { |
| 3460 | spin_lock_irqsave(&priv->lock, flags); | ||
| 3462 | priv->hw->mac->pmt(priv->hw, 0); | 3461 | priv->hw->mac->pmt(priv->hw, 0); |
| 3462 | spin_unlock_irqrestore(&priv->lock, flags); | ||
| 3463 | priv->irq_wake = 0; | 3463 | priv->irq_wake = 0; |
| 3464 | } else { | 3464 | } else { |
| 3465 | pinctrl_pm_select_default_state(priv->device); | 3465 | pinctrl_pm_select_default_state(priv->device); |
| @@ -3473,6 +3473,8 @@ int stmmac_resume(struct device *dev) | |||
| 3473 | 3473 | ||
| 3474 | netif_device_attach(ndev); | 3474 | netif_device_attach(ndev); |
| 3475 | 3475 | ||
| 3476 | spin_lock_irqsave(&priv->lock, flags); | ||
| 3477 | |||
| 3476 | priv->cur_rx = 0; | 3478 | priv->cur_rx = 0; |
| 3477 | priv->dirty_rx = 0; | 3479 | priv->dirty_rx = 0; |
| 3478 | priv->dirty_tx = 0; | 3480 | priv->dirty_tx = 0; |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index 3f83c369f56c..ec295851812b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c | |||
| @@ -297,7 +297,7 @@ int stmmac_mdio_register(struct net_device *ndev) | |||
| 297 | return -ENOMEM; | 297 | return -ENOMEM; |
| 298 | 298 | ||
| 299 | if (mdio_bus_data->irqs) | 299 | if (mdio_bus_data->irqs) |
| 300 | memcpy(new_bus->irq, mdio_bus_data, sizeof(new_bus->irq)); | 300 | memcpy(new_bus->irq, mdio_bus_data->irqs, sizeof(new_bus->irq)); |
| 301 | 301 | ||
| 302 | #ifdef CONFIG_OF | 302 | #ifdef CONFIG_OF |
| 303 | if (priv->device->of_node) | 303 | if (priv->device->of_node) |
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 4b08a2f52b3e..e6bb0ecb12c7 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c | |||
| @@ -1339,7 +1339,7 @@ static int cpsw_ndo_open(struct net_device *ndev) | |||
| 1339 | if (priv->coal_intvl != 0) { | 1339 | if (priv->coal_intvl != 0) { |
| 1340 | struct ethtool_coalesce coal; | 1340 | struct ethtool_coalesce coal; |
| 1341 | 1341 | ||
| 1342 | coal.rx_coalesce_usecs = (priv->coal_intvl << 4); | 1342 | coal.rx_coalesce_usecs = priv->coal_intvl; |
| 1343 | cpsw_set_coalesce(ndev, &coal); | 1343 | cpsw_set_coalesce(ndev, &coal); |
| 1344 | } | 1344 | } |
| 1345 | 1345 | ||
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index a0f64cba86ba..2ace126533cd 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c | |||
| @@ -990,7 +990,7 @@ static void team_port_disable(struct team *team, | |||
| 990 | #define TEAM_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ | 990 | #define TEAM_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ |
| 991 | NETIF_F_RXCSUM | NETIF_F_ALL_TSO) | 991 | NETIF_F_RXCSUM | NETIF_F_ALL_TSO) |
| 992 | 992 | ||
| 993 | static void __team_compute_features(struct team *team) | 993 | static void ___team_compute_features(struct team *team) |
| 994 | { | 994 | { |
| 995 | struct team_port *port; | 995 | struct team_port *port; |
| 996 | u32 vlan_features = TEAM_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL; | 996 | u32 vlan_features = TEAM_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL; |
| @@ -1021,15 +1021,20 @@ static void __team_compute_features(struct team *team) | |||
| 1021 | team->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; | 1021 | team->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; |
| 1022 | if (dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM)) | 1022 | if (dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM)) |
| 1023 | team->dev->priv_flags |= IFF_XMIT_DST_RELEASE; | 1023 | team->dev->priv_flags |= IFF_XMIT_DST_RELEASE; |
| 1024 | } | ||
| 1024 | 1025 | ||
| 1026 | static void __team_compute_features(struct team *team) | ||
| 1027 | { | ||
| 1028 | ___team_compute_features(team); | ||
| 1025 | netdev_change_features(team->dev); | 1029 | netdev_change_features(team->dev); |
| 1026 | } | 1030 | } |
| 1027 | 1031 | ||
| 1028 | static void team_compute_features(struct team *team) | 1032 | static void team_compute_features(struct team *team) |
| 1029 | { | 1033 | { |
| 1030 | mutex_lock(&team->lock); | 1034 | mutex_lock(&team->lock); |
| 1031 | __team_compute_features(team); | 1035 | ___team_compute_features(team); |
| 1032 | mutex_unlock(&team->lock); | 1036 | mutex_unlock(&team->lock); |
| 1037 | netdev_change_features(team->dev); | ||
| 1033 | } | 1038 | } |
| 1034 | 1039 | ||
| 1035 | static int team_port_enter(struct team *team, struct team_port *port) | 1040 | static int team_port_enter(struct team *team, struct team_port *port) |
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c index 36cd7f016a8d..9bbe0161a2f4 100644 --- a/drivers/net/usb/pegasus.c +++ b/drivers/net/usb/pegasus.c | |||
| @@ -473,7 +473,7 @@ static void read_bulk_callback(struct urb *urb) | |||
| 473 | goto goon; | 473 | goto goon; |
| 474 | } | 474 | } |
| 475 | 475 | ||
| 476 | if (!count || count < 4) | 476 | if (count < 4) |
| 477 | goto goon; | 477 | goto goon; |
| 478 | 478 | ||
| 479 | rx_status = buf[count - 2]; | 479 | rx_status = buf[count - 2]; |
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index d9d2806a47b1..dc989a8b5afb 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c | |||
| @@ -61,6 +61,8 @@ | |||
| 61 | #define SUSPEND_ALLMODES (SUSPEND_SUSPEND0 | SUSPEND_SUSPEND1 | \ | 61 | #define SUSPEND_ALLMODES (SUSPEND_SUSPEND0 | SUSPEND_SUSPEND1 | \ |
| 62 | SUSPEND_SUSPEND2 | SUSPEND_SUSPEND3) | 62 | SUSPEND_SUSPEND2 | SUSPEND_SUSPEND3) |
| 63 | 63 | ||
| 64 | #define CARRIER_CHECK_DELAY (2 * HZ) | ||
| 65 | |||
| 64 | struct smsc95xx_priv { | 66 | struct smsc95xx_priv { |
| 65 | u32 mac_cr; | 67 | u32 mac_cr; |
| 66 | u32 hash_hi; | 68 | u32 hash_hi; |
| @@ -69,6 +71,9 @@ struct smsc95xx_priv { | |||
| 69 | spinlock_t mac_cr_lock; | 71 | spinlock_t mac_cr_lock; |
| 70 | u8 features; | 72 | u8 features; |
| 71 | u8 suspend_flags; | 73 | u8 suspend_flags; |
| 74 | bool link_ok; | ||
| 75 | struct delayed_work carrier_check; | ||
| 76 | struct usbnet *dev; | ||
| 72 | }; | 77 | }; |
| 73 | 78 | ||
| 74 | static bool turbo_mode = true; | 79 | static bool turbo_mode = true; |
| @@ -624,6 +629,44 @@ static void smsc95xx_status(struct usbnet *dev, struct urb *urb) | |||
| 624 | intdata); | 629 | intdata); |
| 625 | } | 630 | } |
| 626 | 631 | ||
| 632 | static void set_carrier(struct usbnet *dev, bool link) | ||
| 633 | { | ||
| 634 | struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]); | ||
| 635 | |||
| 636 | if (pdata->link_ok == link) | ||
| 637 | return; | ||
| 638 | |||
| 639 | pdata->link_ok = link; | ||
| 640 | |||
| 641 | if (link) | ||
| 642 | usbnet_link_change(dev, 1, 0); | ||
| 643 | else | ||
| 644 | usbnet_link_change(dev, 0, 0); | ||
| 645 | } | ||
| 646 | |||
| 647 | static void check_carrier(struct work_struct *work) | ||
| 648 | { | ||
| 649 | struct smsc95xx_priv *pdata = container_of(work, struct smsc95xx_priv, | ||
| 650 | carrier_check.work); | ||
| 651 | struct usbnet *dev = pdata->dev; | ||
| 652 | int ret; | ||
| 653 | |||
| 654 | if (pdata->suspend_flags != 0) | ||
| 655 | return; | ||
| 656 | |||
| 657 | ret = smsc95xx_mdio_read(dev->net, dev->mii.phy_id, MII_BMSR); | ||
| 658 | if (ret < 0) { | ||
| 659 | netdev_warn(dev->net, "Failed to read MII_BMSR\n"); | ||
| 660 | return; | ||
| 661 | } | ||
| 662 | if (ret & BMSR_LSTATUS) | ||
| 663 | set_carrier(dev, 1); | ||
| 664 | else | ||
| 665 | set_carrier(dev, 0); | ||
| 666 | |||
| 667 | schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY); | ||
| 668 | } | ||
| 669 | |||
| 627 | /* Enable or disable Tx & Rx checksum offload engines */ | 670 | /* Enable or disable Tx & Rx checksum offload engines */ |
| 628 | static int smsc95xx_set_features(struct net_device *netdev, | 671 | static int smsc95xx_set_features(struct net_device *netdev, |
| 629 | netdev_features_t features) | 672 | netdev_features_t features) |
| @@ -1165,13 +1208,20 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf) | |||
| 1165 | dev->net->flags |= IFF_MULTICAST; | 1208 | dev->net->flags |= IFF_MULTICAST; |
| 1166 | dev->net->hard_header_len += SMSC95XX_TX_OVERHEAD_CSUM; | 1209 | dev->net->hard_header_len += SMSC95XX_TX_OVERHEAD_CSUM; |
| 1167 | dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; | 1210 | dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; |
| 1211 | |||
| 1212 | pdata->dev = dev; | ||
| 1213 | INIT_DELAYED_WORK(&pdata->carrier_check, check_carrier); | ||
| 1214 | schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY); | ||
| 1215 | |||
| 1168 | return 0; | 1216 | return 0; |
| 1169 | } | 1217 | } |
| 1170 | 1218 | ||
| 1171 | static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf) | 1219 | static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf) |
| 1172 | { | 1220 | { |
| 1173 | struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]); | 1221 | struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]); |
| 1222 | |||
| 1174 | if (pdata) { | 1223 | if (pdata) { |
| 1224 | cancel_delayed_work(&pdata->carrier_check); | ||
| 1175 | netif_dbg(dev, ifdown, dev->net, "free pdata\n"); | 1225 | netif_dbg(dev, ifdown, dev->net, "free pdata\n"); |
| 1176 | kfree(pdata); | 1226 | kfree(pdata); |
| 1177 | pdata = NULL; | 1227 | pdata = NULL; |
| @@ -1695,6 +1745,7 @@ static int smsc95xx_resume(struct usb_interface *intf) | |||
| 1695 | 1745 | ||
| 1696 | /* do this first to ensure it's cleared even in error case */ | 1746 | /* do this first to ensure it's cleared even in error case */ |
| 1697 | pdata->suspend_flags = 0; | 1747 | pdata->suspend_flags = 0; |
| 1748 | schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY); | ||
| 1698 | 1749 | ||
| 1699 | if (suspend_flags & SUSPEND_ALLMODES) { | 1750 | if (suspend_flags & SUSPEND_ALLMODES) { |
| 1700 | /* clear wake-up sources */ | 1751 | /* clear wake-up sources */ |
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 49d84e540343..e0638e556fe7 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
| @@ -1925,24 +1925,11 @@ static int virtnet_probe(struct virtio_device *vdev) | |||
| 1925 | 1925 | ||
| 1926 | virtio_device_ready(vdev); | 1926 | virtio_device_ready(vdev); |
| 1927 | 1927 | ||
| 1928 | /* Last of all, set up some receive buffers. */ | ||
| 1929 | for (i = 0; i < vi->curr_queue_pairs; i++) { | ||
| 1930 | try_fill_recv(vi, &vi->rq[i], GFP_KERNEL); | ||
| 1931 | |||
| 1932 | /* If we didn't even get one input buffer, we're useless. */ | ||
| 1933 | if (vi->rq[i].vq->num_free == | ||
| 1934 | virtqueue_get_vring_size(vi->rq[i].vq)) { | ||
| 1935 | free_unused_bufs(vi); | ||
| 1936 | err = -ENOMEM; | ||
| 1937 | goto free_recv_bufs; | ||
| 1938 | } | ||
| 1939 | } | ||
| 1940 | |||
| 1941 | vi->nb.notifier_call = &virtnet_cpu_callback; | 1928 | vi->nb.notifier_call = &virtnet_cpu_callback; |
| 1942 | err = register_hotcpu_notifier(&vi->nb); | 1929 | err = register_hotcpu_notifier(&vi->nb); |
| 1943 | if (err) { | 1930 | if (err) { |
| 1944 | pr_debug("virtio_net: registering cpu notifier failed\n"); | 1931 | pr_debug("virtio_net: registering cpu notifier failed\n"); |
| 1945 | goto free_recv_bufs; | 1932 | goto free_unregister_netdev; |
| 1946 | } | 1933 | } |
| 1947 | 1934 | ||
| 1948 | /* Assume link up if device can't report link status, | 1935 | /* Assume link up if device can't report link status, |
| @@ -1960,10 +1947,9 @@ static int virtnet_probe(struct virtio_device *vdev) | |||
| 1960 | 1947 | ||
| 1961 | return 0; | 1948 | return 0; |
| 1962 | 1949 | ||
| 1963 | free_recv_bufs: | 1950 | free_unregister_netdev: |
| 1964 | vi->vdev->config->reset(vdev); | 1951 | vi->vdev->config->reset(vdev); |
| 1965 | 1952 | ||
| 1966 | free_receive_bufs(vi); | ||
| 1967 | unregister_netdev(dev); | 1953 | unregister_netdev(dev); |
| 1968 | free_vqs: | 1954 | free_vqs: |
| 1969 | cancel_delayed_work_sync(&vi->refill); | 1955 | cancel_delayed_work_sync(&vi->refill); |
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index db8022ae415b..08885bc8d6db 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c | |||
| @@ -1369,7 +1369,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, | |||
| 1369 | rcdlro = (struct Vmxnet3_RxCompDescExt *)rcd; | 1369 | rcdlro = (struct Vmxnet3_RxCompDescExt *)rcd; |
| 1370 | 1370 | ||
| 1371 | segCnt = rcdlro->segCnt; | 1371 | segCnt = rcdlro->segCnt; |
| 1372 | BUG_ON(segCnt <= 1); | 1372 | WARN_ON_ONCE(segCnt == 0); |
| 1373 | mss = rcdlro->mss; | 1373 | mss = rcdlro->mss; |
| 1374 | if (unlikely(segCnt <= 1)) | 1374 | if (unlikely(segCnt <= 1)) |
| 1375 | segCnt = 0; | 1375 | segCnt = 0; |
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h index c4825392d64b..3d2b64e63408 100644 --- a/drivers/net/vmxnet3/vmxnet3_int.h +++ b/drivers/net/vmxnet3/vmxnet3_int.h | |||
| @@ -69,10 +69,10 @@ | |||
| 69 | /* | 69 | /* |
| 70 | * Version numbers | 70 | * Version numbers |
| 71 | */ | 71 | */ |
| 72 | #define VMXNET3_DRIVER_VERSION_STRING "1.4.7.0-k" | 72 | #define VMXNET3_DRIVER_VERSION_STRING "1.4.8.0-k" |
| 73 | 73 | ||
| 74 | /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ | 74 | /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ |
| 75 | #define VMXNET3_DRIVER_VERSION_NUM 0x01040700 | 75 | #define VMXNET3_DRIVER_VERSION_NUM 0x01040800 |
| 76 | 76 | ||
| 77 | #if defined(CONFIG_PCI_MSI) | 77 | #if defined(CONFIG_PCI_MSI) |
| 78 | /* RSS only makes sense if MSI-X is supported. */ | 78 | /* RSS only makes sense if MSI-X is supported. */ |
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 8ff30c3bdfce..f999db2f97b4 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c | |||
| @@ -3086,6 +3086,9 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev, | |||
| 3086 | if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL]) | 3086 | if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL]) |
| 3087 | conf.flags |= VXLAN_F_REMCSUM_NOPARTIAL; | 3087 | conf.flags |= VXLAN_F_REMCSUM_NOPARTIAL; |
| 3088 | 3088 | ||
| 3089 | if (tb[IFLA_MTU]) | ||
| 3090 | conf.mtu = nla_get_u32(tb[IFLA_MTU]); | ||
| 3091 | |||
| 3089 | err = vxlan_dev_configure(src_net, dev, &conf); | 3092 | err = vxlan_dev_configure(src_net, dev, &conf); |
| 3090 | switch (err) { | 3093 | switch (err) { |
| 3091 | case -ENODEV: | 3094 | case -ENODEV: |
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index d0631b6cfd53..62f475e31077 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c | |||
| @@ -2540,12 +2540,14 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev, | |||
| 2540 | const u8 *mac, struct station_info *sinfo) | 2540 | const u8 *mac, struct station_info *sinfo) |
| 2541 | { | 2541 | { |
| 2542 | struct brcmf_if *ifp = netdev_priv(ndev); | 2542 | struct brcmf_if *ifp = netdev_priv(ndev); |
| 2543 | struct brcmf_scb_val_le scb_val; | ||
| 2543 | s32 err = 0; | 2544 | s32 err = 0; |
| 2544 | struct brcmf_sta_info_le sta_info_le; | 2545 | struct brcmf_sta_info_le sta_info_le; |
| 2545 | u32 sta_flags; | 2546 | u32 sta_flags; |
| 2546 | u32 is_tdls_peer; | 2547 | u32 is_tdls_peer; |
| 2547 | s32 total_rssi; | 2548 | s32 total_rssi; |
| 2548 | s32 count_rssi; | 2549 | s32 count_rssi; |
| 2550 | int rssi; | ||
| 2549 | u32 i; | 2551 | u32 i; |
| 2550 | 2552 | ||
| 2551 | brcmf_dbg(TRACE, "Enter, MAC %pM\n", mac); | 2553 | brcmf_dbg(TRACE, "Enter, MAC %pM\n", mac); |
| @@ -2629,6 +2631,20 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev, | |||
| 2629 | sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL); | 2631 | sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL); |
| 2630 | total_rssi /= count_rssi; | 2632 | total_rssi /= count_rssi; |
| 2631 | sinfo->signal = total_rssi; | 2633 | sinfo->signal = total_rssi; |
| 2634 | } else if (test_bit(BRCMF_VIF_STATUS_CONNECTED, | ||
| 2635 | &ifp->vif->sme_state)) { | ||
| 2636 | memset(&scb_val, 0, sizeof(scb_val)); | ||
| 2637 | err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_RSSI, | ||
| 2638 | &scb_val, sizeof(scb_val)); | ||
| 2639 | if (err) { | ||
| 2640 | brcmf_err("Could not get rssi (%d)\n", err); | ||
| 2641 | goto done; | ||
| 2642 | } else { | ||
| 2643 | rssi = le32_to_cpu(scb_val.val); | ||
| 2644 | sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL); | ||
| 2645 | sinfo->signal = rssi; | ||
| 2646 | brcmf_dbg(CONN, "RSSI %d dBm\n", rssi); | ||
| 2647 | } | ||
| 2632 | } | 2648 | } |
| 2633 | } | 2649 | } |
| 2634 | done: | 2650 | done: |
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c index 68f1ce02f4bf..2b9a2bc429d6 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c | |||
| @@ -1157,6 +1157,8 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf) | |||
| 1157 | brcmu_pkt_buf_free_skb(skb); | 1157 | brcmu_pkt_buf_free_skb(skb); |
| 1158 | return; | 1158 | return; |
| 1159 | } | 1159 | } |
| 1160 | |||
| 1161 | skb->protocol = eth_type_trans(skb, ifp->ndev); | ||
| 1160 | brcmf_netif_rx(ifp, skb); | 1162 | brcmf_netif_rx(ifp, skb); |
| 1161 | } | 1163 | } |
| 1162 | 1164 | ||
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 9ed0ed1bf514..4dd5adcdd29b 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c | |||
| @@ -2776,6 +2776,7 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2, | |||
| 2776 | if (!info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER] || | 2776 | if (!info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER] || |
| 2777 | !info->attrs[HWSIM_ATTR_FLAGS] || | 2777 | !info->attrs[HWSIM_ATTR_FLAGS] || |
| 2778 | !info->attrs[HWSIM_ATTR_COOKIE] || | 2778 | !info->attrs[HWSIM_ATTR_COOKIE] || |
| 2779 | !info->attrs[HWSIM_ATTR_SIGNAL] || | ||
| 2779 | !info->attrs[HWSIM_ATTR_TX_INFO]) | 2780 | !info->attrs[HWSIM_ATTR_TX_INFO]) |
| 2780 | goto out; | 2781 | goto out; |
| 2781 | 2782 | ||
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c index 0f48048b8654..3a0faa8fe9d4 100644 --- a/drivers/net/wireless/realtek/rtlwifi/core.c +++ b/drivers/net/wireless/realtek/rtlwifi/core.c | |||
| @@ -54,7 +54,7 @@ EXPORT_SYMBOL(channel5g_80m); | |||
| 54 | void rtl_addr_delay(u32 addr) | 54 | void rtl_addr_delay(u32 addr) |
| 55 | { | 55 | { |
| 56 | if (addr == 0xfe) | 56 | if (addr == 0xfe) |
| 57 | msleep(50); | 57 | mdelay(50); |
| 58 | else if (addr == 0xfd) | 58 | else if (addr == 0xfd) |
| 59 | msleep(5); | 59 | msleep(5); |
| 60 | else if (addr == 0xfc) | 60 | else if (addr == 0xfc) |
| @@ -75,7 +75,7 @@ void rtl_rfreg_delay(struct ieee80211_hw *hw, enum radio_path rfpath, u32 addr, | |||
| 75 | rtl_addr_delay(addr); | 75 | rtl_addr_delay(addr); |
| 76 | } else { | 76 | } else { |
| 77 | rtl_set_rfreg(hw, rfpath, addr, mask, data); | 77 | rtl_set_rfreg(hw, rfpath, addr, mask, data); |
| 78 | usleep_range(1, 2); | 78 | udelay(1); |
| 79 | } | 79 | } |
| 80 | } | 80 | } |
| 81 | EXPORT_SYMBOL(rtl_rfreg_delay); | 81 | EXPORT_SYMBOL(rtl_rfreg_delay); |
| @@ -86,7 +86,7 @@ void rtl_bb_delay(struct ieee80211_hw *hw, u32 addr, u32 data) | |||
| 86 | rtl_addr_delay(addr); | 86 | rtl_addr_delay(addr); |
| 87 | } else { | 87 | } else { |
| 88 | rtl_set_bbreg(hw, addr, MASKDWORD, data); | 88 | rtl_set_bbreg(hw, addr, MASKDWORD, data); |
| 89 | usleep_range(1, 2); | 89 | udelay(1); |
| 90 | } | 90 | } |
| 91 | } | 91 | } |
| 92 | EXPORT_SYMBOL(rtl_bb_delay); | 92 | EXPORT_SYMBOL(rtl_bb_delay); |
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 78dca3193ca4..befac5b19490 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c | |||
| @@ -1679,9 +1679,14 @@ static int nvme_pci_enable(struct nvme_dev *dev) | |||
| 1679 | 1679 | ||
| 1680 | static void nvme_dev_unmap(struct nvme_dev *dev) | 1680 | static void nvme_dev_unmap(struct nvme_dev *dev) |
| 1681 | { | 1681 | { |
| 1682 | struct pci_dev *pdev = to_pci_dev(dev->dev); | ||
| 1683 | int bars; | ||
| 1684 | |||
| 1682 | if (dev->bar) | 1685 | if (dev->bar) |
| 1683 | iounmap(dev->bar); | 1686 | iounmap(dev->bar); |
| 1684 | pci_release_regions(to_pci_dev(dev->dev)); | 1687 | |
| 1688 | bars = pci_select_bars(pdev, IORESOURCE_MEM); | ||
| 1689 | pci_release_selected_regions(pdev, bars); | ||
| 1685 | } | 1690 | } |
| 1686 | 1691 | ||
| 1687 | static void nvme_pci_disable(struct nvme_dev *dev) | 1692 | static void nvme_pci_disable(struct nvme_dev *dev) |
| @@ -1924,7 +1929,7 @@ static int nvme_dev_map(struct nvme_dev *dev) | |||
| 1924 | 1929 | ||
| 1925 | return 0; | 1930 | return 0; |
| 1926 | release: | 1931 | release: |
| 1927 | pci_release_regions(pdev); | 1932 | pci_release_selected_regions(pdev, bars); |
| 1928 | return -ENODEV; | 1933 | return -ENODEV; |
| 1929 | } | 1934 | } |
| 1930 | 1935 | ||
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index 14f2f8c7c260..33daffc4392c 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c | |||
| @@ -395,7 +395,7 @@ static int unflatten_dt_nodes(const void *blob, | |||
| 395 | struct device_node **nodepp) | 395 | struct device_node **nodepp) |
| 396 | { | 396 | { |
| 397 | struct device_node *root; | 397 | struct device_node *root; |
| 398 | int offset = 0, depth = 0; | 398 | int offset = 0, depth = 0, initial_depth = 0; |
| 399 | #define FDT_MAX_DEPTH 64 | 399 | #define FDT_MAX_DEPTH 64 |
| 400 | unsigned int fpsizes[FDT_MAX_DEPTH]; | 400 | unsigned int fpsizes[FDT_MAX_DEPTH]; |
| 401 | struct device_node *nps[FDT_MAX_DEPTH]; | 401 | struct device_node *nps[FDT_MAX_DEPTH]; |
| @@ -405,11 +405,22 @@ static int unflatten_dt_nodes(const void *blob, | |||
| 405 | if (nodepp) | 405 | if (nodepp) |
| 406 | *nodepp = NULL; | 406 | *nodepp = NULL; |
| 407 | 407 | ||
| 408 | /* | ||
| 409 | * We're unflattening device sub-tree if @dad is valid. There are | ||
| 410 | * possibly multiple nodes in the first level of depth. We need | ||
| 411 | * set @depth to 1 to make fdt_next_node() happy as it bails | ||
| 412 | * immediately when negative @depth is found. Otherwise, the device | ||
| 413 | * nodes except the first one won't be unflattened successfully. | ||
| 414 | */ | ||
| 415 | if (dad) | ||
| 416 | depth = initial_depth = 1; | ||
| 417 | |||
| 408 | root = dad; | 418 | root = dad; |
| 409 | fpsizes[depth] = dad ? strlen(of_node_full_name(dad)) : 0; | 419 | fpsizes[depth] = dad ? strlen(of_node_full_name(dad)) : 0; |
| 410 | nps[depth] = dad; | 420 | nps[depth] = dad; |
| 421 | |||
| 411 | for (offset = 0; | 422 | for (offset = 0; |
| 412 | offset >= 0 && depth >= 0; | 423 | offset >= 0 && depth >= initial_depth; |
| 413 | offset = fdt_next_node(blob, offset, &depth)) { | 424 | offset = fdt_next_node(blob, offset, &depth)) { |
| 414 | if (WARN_ON_ONCE(depth >= FDT_MAX_DEPTH)) | 425 | if (WARN_ON_ONCE(depth >= FDT_MAX_DEPTH)) |
| 415 | continue; | 426 | continue; |
diff --git a/drivers/of/irq.c b/drivers/of/irq.c index e7bfc175b8e1..6ec743faabe8 100644 --- a/drivers/of/irq.c +++ b/drivers/of/irq.c | |||
| @@ -386,13 +386,13 @@ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r) | |||
| 386 | EXPORT_SYMBOL_GPL(of_irq_to_resource); | 386 | EXPORT_SYMBOL_GPL(of_irq_to_resource); |
| 387 | 387 | ||
| 388 | /** | 388 | /** |
| 389 | * of_irq_get - Decode a node's IRQ and return it as a Linux irq number | 389 | * of_irq_get - Decode a node's IRQ and return it as a Linux IRQ number |
| 390 | * @dev: pointer to device tree node | 390 | * @dev: pointer to device tree node |
| 391 | * @index: zero-based index of the irq | 391 | * @index: zero-based index of the IRQ |
| 392 | * | ||
| 393 | * Returns Linux irq number on success, or -EPROBE_DEFER if the irq domain | ||
| 394 | * is not yet created. | ||
| 395 | * | 392 | * |
| 393 | * Returns Linux IRQ number on success, or 0 on the IRQ mapping failure, or | ||
| 394 | * -EPROBE_DEFER if the IRQ domain is not yet created, or error code in case | ||
| 395 | * of any other failure. | ||
| 396 | */ | 396 | */ |
| 397 | int of_irq_get(struct device_node *dev, int index) | 397 | int of_irq_get(struct device_node *dev, int index) |
| 398 | { | 398 | { |
| @@ -413,12 +413,13 @@ int of_irq_get(struct device_node *dev, int index) | |||
| 413 | EXPORT_SYMBOL_GPL(of_irq_get); | 413 | EXPORT_SYMBOL_GPL(of_irq_get); |
| 414 | 414 | ||
| 415 | /** | 415 | /** |
| 416 | * of_irq_get_byname - Decode a node's IRQ and return it as a Linux irq number | 416 | * of_irq_get_byname - Decode a node's IRQ and return it as a Linux IRQ number |
| 417 | * @dev: pointer to device tree node | 417 | * @dev: pointer to device tree node |
| 418 | * @name: irq name | 418 | * @name: IRQ name |
| 419 | * | 419 | * |
| 420 | * Returns Linux irq number on success, or -EPROBE_DEFER if the irq domain | 420 | * Returns Linux IRQ number on success, or 0 on the IRQ mapping failure, or |
| 421 | * is not yet created, or error code in case of any other failure. | 421 | * -EPROBE_DEFER if the IRQ domain is not yet created, or error code in case |
| 422 | * of any other failure. | ||
| 422 | */ | 423 | */ |
| 423 | int of_irq_get_byname(struct device_node *dev, const char *name) | 424 | int of_irq_get_byname(struct device_node *dev, const char *name) |
| 424 | { | 425 | { |
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c index ed01c0172e4a..216648233874 100644 --- a/drivers/of/of_reserved_mem.c +++ b/drivers/of/of_reserved_mem.c | |||
| @@ -127,8 +127,15 @@ static int __init __reserved_mem_alloc_size(unsigned long node, | |||
| 127 | } | 127 | } |
| 128 | 128 | ||
| 129 | /* Need adjust the alignment to satisfy the CMA requirement */ | 129 | /* Need adjust the alignment to satisfy the CMA requirement */ |
| 130 | if (IS_ENABLED(CONFIG_CMA) && of_flat_dt_is_compatible(node, "shared-dma-pool")) | 130 | if (IS_ENABLED(CONFIG_CMA) |
| 131 | align = max(align, (phys_addr_t)PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order)); | 131 | && of_flat_dt_is_compatible(node, "shared-dma-pool") |
| 132 | && of_get_flat_dt_prop(node, "reusable", NULL) | ||
| 133 | && !of_get_flat_dt_prop(node, "no-map", NULL)) { | ||
| 134 | unsigned long order = | ||
| 135 | max_t(unsigned long, MAX_ORDER - 1, pageblock_order); | ||
| 136 | |||
| 137 | align = max(align, (phys_addr_t)PAGE_SIZE << order); | ||
| 138 | } | ||
| 132 | 139 | ||
| 133 | prop = of_get_flat_dt_prop(node, "alloc-ranges", &len); | 140 | prop = of_get_flat_dt_prop(node, "alloc-ranges", &len); |
| 134 | if (prop) { | 141 | if (prop) { |
diff --git a/drivers/pci/vc.c b/drivers/pci/vc.c index dfbab61a1b47..1fa3a3219c45 100644 --- a/drivers/pci/vc.c +++ b/drivers/pci/vc.c | |||
| @@ -221,9 +221,9 @@ static int pci_vc_do_save_buffer(struct pci_dev *dev, int pos, | |||
| 221 | else | 221 | else |
| 222 | pci_write_config_word(dev, pos + PCI_VC_PORT_CTRL, | 222 | pci_write_config_word(dev, pos + PCI_VC_PORT_CTRL, |
| 223 | *(u16 *)buf); | 223 | *(u16 *)buf); |
| 224 | buf += 2; | 224 | buf += 4; |
| 225 | } | 225 | } |
| 226 | len += 2; | 226 | len += 4; |
| 227 | 227 | ||
| 228 | /* | 228 | /* |
| 229 | * If we have any Low Priority VCs and a VC Arbitration Table Offset | 229 | * If we have any Low Priority VCs and a VC Arbitration Table Offset |
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index f2d01d4d9364..140436a046c0 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c | |||
| @@ -950,17 +950,14 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu) | |||
| 950 | 950 | ||
| 951 | /* For SPIs, we need to track the affinity per IRQ */ | 951 | /* For SPIs, we need to track the affinity per IRQ */ |
| 952 | if (using_spi) { | 952 | if (using_spi) { |
| 953 | if (i >= pdev->num_resources) { | 953 | if (i >= pdev->num_resources) |
| 954 | of_node_put(dn); | ||
| 955 | break; | 954 | break; |
| 956 | } | ||
| 957 | 955 | ||
| 958 | irqs[i] = cpu; | 956 | irqs[i] = cpu; |
| 959 | } | 957 | } |
| 960 | 958 | ||
| 961 | /* Keep track of the CPUs containing this PMU type */ | 959 | /* Keep track of the CPUs containing this PMU type */ |
| 962 | cpumask_set_cpu(cpu, &pmu->supported_cpus); | 960 | cpumask_set_cpu(cpu, &pmu->supported_cpus); |
| 963 | of_node_put(dn); | ||
| 964 | i++; | 961 | i++; |
| 965 | } while (1); | 962 | } while (1); |
| 966 | 963 | ||
| @@ -995,9 +992,6 @@ int arm_pmu_device_probe(struct platform_device *pdev, | |||
| 995 | 992 | ||
| 996 | armpmu_init(pmu); | 993 | armpmu_init(pmu); |
| 997 | 994 | ||
| 998 | if (!__oprofile_cpu_pmu) | ||
| 999 | __oprofile_cpu_pmu = pmu; | ||
| 1000 | |||
| 1001 | pmu->plat_device = pdev; | 995 | pmu->plat_device = pdev; |
| 1002 | 996 | ||
| 1003 | if (node && (of_id = of_match_node(of_table, pdev->dev.of_node))) { | 997 | if (node && (of_id = of_match_node(of_table, pdev->dev.of_node))) { |
| @@ -1016,8 +1010,8 @@ int arm_pmu_device_probe(struct platform_device *pdev, | |||
| 1016 | if (!ret) | 1010 | if (!ret) |
| 1017 | ret = init_fn(pmu); | 1011 | ret = init_fn(pmu); |
| 1018 | } else { | 1012 | } else { |
| 1019 | ret = probe_current_pmu(pmu, probe_table); | ||
| 1020 | cpumask_setall(&pmu->supported_cpus); | 1013 | cpumask_setall(&pmu->supported_cpus); |
| 1014 | ret = probe_current_pmu(pmu, probe_table); | ||
| 1021 | } | 1015 | } |
| 1022 | 1016 | ||
| 1023 | if (ret) { | 1017 | if (ret) { |
| @@ -1033,6 +1027,9 @@ int arm_pmu_device_probe(struct platform_device *pdev, | |||
| 1033 | if (ret) | 1027 | if (ret) |
| 1034 | goto out_destroy; | 1028 | goto out_destroy; |
| 1035 | 1029 | ||
| 1030 | if (!__oprofile_cpu_pmu) | ||
| 1031 | __oprofile_cpu_pmu = pmu; | ||
| 1032 | |||
| 1036 | pr_info("enabled with %s PMU driver, %d counters available\n", | 1033 | pr_info("enabled with %s PMU driver, %d counters available\n", |
| 1037 | pmu->name, pmu->num_events); | 1034 | pmu->name, pmu->num_events); |
| 1038 | 1035 | ||
| @@ -1043,6 +1040,7 @@ out_destroy: | |||
| 1043 | out_free: | 1040 | out_free: |
| 1044 | pr_info("%s: failed to register PMU devices!\n", | 1041 | pr_info("%s: failed to register PMU devices!\n", |
| 1045 | of_node_full_name(node)); | 1042 | of_node_full_name(node)); |
| 1043 | kfree(pmu->irq_affinity); | ||
| 1046 | kfree(pmu); | 1044 | kfree(pmu); |
| 1047 | return ret; | 1045 | return ret; |
| 1048 | } | 1046 | } |
diff --git a/drivers/phy/phy-exynos-mipi-video.c b/drivers/phy/phy-exynos-mipi-video.c index cc093ebfda94..8b851f718123 100644 --- a/drivers/phy/phy-exynos-mipi-video.c +++ b/drivers/phy/phy-exynos-mipi-video.c | |||
| @@ -233,8 +233,12 @@ static inline int __is_running(const struct exynos_mipi_phy_desc *data, | |||
| 233 | struct exynos_mipi_video_phy *state) | 233 | struct exynos_mipi_video_phy *state) |
| 234 | { | 234 | { |
| 235 | u32 val; | 235 | u32 val; |
| 236 | int ret; | ||
| 237 | |||
| 238 | ret = regmap_read(state->regmaps[data->resetn_map], data->resetn_reg, &val); | ||
| 239 | if (ret) | ||
| 240 | return 0; | ||
| 236 | 241 | ||
| 237 | regmap_read(state->regmaps[data->resetn_map], data->resetn_reg, &val); | ||
| 238 | return val & data->resetn_val; | 242 | return val & data->resetn_val; |
| 239 | } | 243 | } |
| 240 | 244 | ||
diff --git a/drivers/phy/phy-ti-pipe3.c b/drivers/phy/phy-ti-pipe3.c index 0a477d24cf76..bf46844dc387 100644 --- a/drivers/phy/phy-ti-pipe3.c +++ b/drivers/phy/phy-ti-pipe3.c | |||
| @@ -293,11 +293,18 @@ static int ti_pipe3_init(struct phy *x) | |||
| 293 | ret = ti_pipe3_dpll_wait_lock(phy); | 293 | ret = ti_pipe3_dpll_wait_lock(phy); |
| 294 | } | 294 | } |
| 295 | 295 | ||
| 296 | /* Program the DPLL only if not locked */ | 296 | /* SATA has issues if re-programmed when locked */ |
| 297 | val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_STATUS); | 297 | val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_STATUS); |
| 298 | if (!(val & PLL_LOCK)) | 298 | if ((val & PLL_LOCK) && of_device_is_compatible(phy->dev->of_node, |
| 299 | if (ti_pipe3_dpll_program(phy)) | 299 | "ti,phy-pipe3-sata")) |
| 300 | return -EINVAL; | 300 | return ret; |
| 301 | |||
| 302 | /* Program the DPLL */ | ||
| 303 | ret = ti_pipe3_dpll_program(phy); | ||
| 304 | if (ret) { | ||
| 305 | ti_pipe3_disable_clocks(phy); | ||
| 306 | return -EINVAL; | ||
| 307 | } | ||
| 301 | 308 | ||
| 302 | return ret; | 309 | return ret; |
| 303 | } | 310 | } |
diff --git a/drivers/phy/phy-twl4030-usb.c b/drivers/phy/phy-twl4030-usb.c index 6b6af6cba454..d9b10a39a2cf 100644 --- a/drivers/phy/phy-twl4030-usb.c +++ b/drivers/phy/phy-twl4030-usb.c | |||
| @@ -463,7 +463,8 @@ static int twl4030_phy_power_on(struct phy *phy) | |||
| 463 | twl4030_usb_set_mode(twl, twl->usb_mode); | 463 | twl4030_usb_set_mode(twl, twl->usb_mode); |
| 464 | if (twl->usb_mode == T2_USB_MODE_ULPI) | 464 | if (twl->usb_mode == T2_USB_MODE_ULPI) |
| 465 | twl4030_i2c_access(twl, 0); | 465 | twl4030_i2c_access(twl, 0); |
| 466 | schedule_delayed_work(&twl->id_workaround_work, 0); | 466 | twl->linkstat = MUSB_UNKNOWN; |
| 467 | schedule_delayed_work(&twl->id_workaround_work, HZ); | ||
| 467 | 468 | ||
| 468 | return 0; | 469 | return 0; |
| 469 | } | 470 | } |
| @@ -537,6 +538,7 @@ static irqreturn_t twl4030_usb_irq(int irq, void *_twl) | |||
| 537 | struct twl4030_usb *twl = _twl; | 538 | struct twl4030_usb *twl = _twl; |
| 538 | enum musb_vbus_id_status status; | 539 | enum musb_vbus_id_status status; |
| 539 | bool status_changed = false; | 540 | bool status_changed = false; |
| 541 | int err; | ||
| 540 | 542 | ||
| 541 | status = twl4030_usb_linkstat(twl); | 543 | status = twl4030_usb_linkstat(twl); |
| 542 | 544 | ||
| @@ -567,7 +569,9 @@ static irqreturn_t twl4030_usb_irq(int irq, void *_twl) | |||
| 567 | pm_runtime_mark_last_busy(twl->dev); | 569 | pm_runtime_mark_last_busy(twl->dev); |
| 568 | pm_runtime_put_autosuspend(twl->dev); | 570 | pm_runtime_put_autosuspend(twl->dev); |
| 569 | } | 571 | } |
| 570 | musb_mailbox(status); | 572 | err = musb_mailbox(status); |
| 573 | if (err) | ||
| 574 | twl->linkstat = MUSB_UNKNOWN; | ||
| 571 | } | 575 | } |
| 572 | 576 | ||
| 573 | /* don't schedule during sleep - irq works right then */ | 577 | /* don't schedule during sleep - irq works right then */ |
| @@ -595,7 +599,8 @@ static int twl4030_phy_init(struct phy *phy) | |||
| 595 | struct twl4030_usb *twl = phy_get_drvdata(phy); | 599 | struct twl4030_usb *twl = phy_get_drvdata(phy); |
| 596 | 600 | ||
| 597 | pm_runtime_get_sync(twl->dev); | 601 | pm_runtime_get_sync(twl->dev); |
| 598 | schedule_delayed_work(&twl->id_workaround_work, 0); | 602 | twl->linkstat = MUSB_UNKNOWN; |
| 603 | schedule_delayed_work(&twl->id_workaround_work, HZ); | ||
| 599 | pm_runtime_mark_last_busy(twl->dev); | 604 | pm_runtime_mark_last_busy(twl->dev); |
| 600 | pm_runtime_put_autosuspend(twl->dev); | 605 | pm_runtime_put_autosuspend(twl->dev); |
| 601 | 606 | ||
| @@ -763,7 +768,8 @@ static int twl4030_usb_remove(struct platform_device *pdev) | |||
| 763 | if (cable_present(twl->linkstat)) | 768 | if (cable_present(twl->linkstat)) |
| 764 | pm_runtime_put_noidle(twl->dev); | 769 | pm_runtime_put_noidle(twl->dev); |
| 765 | pm_runtime_mark_last_busy(twl->dev); | 770 | pm_runtime_mark_last_busy(twl->dev); |
| 766 | pm_runtime_put_sync_suspend(twl->dev); | 771 | pm_runtime_dont_use_autosuspend(&pdev->dev); |
| 772 | pm_runtime_put_sync(twl->dev); | ||
| 767 | pm_runtime_disable(twl->dev); | 773 | pm_runtime_disable(twl->dev); |
| 768 | 774 | ||
| 769 | /* autogate 60MHz ULPI clock, | 775 | /* autogate 60MHz ULPI clock, |
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c index 207b13b618cf..a607655d7830 100644 --- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c +++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c | |||
| @@ -1256,9 +1256,10 @@ static void mtk_eint_irq_handler(struct irq_desc *desc) | |||
| 1256 | const struct mtk_desc_pin *pin; | 1256 | const struct mtk_desc_pin *pin; |
| 1257 | 1257 | ||
| 1258 | chained_irq_enter(chip, desc); | 1258 | chained_irq_enter(chip, desc); |
| 1259 | for (eint_num = 0; eint_num < pctl->devdata->ap_num; eint_num += 32) { | 1259 | for (eint_num = 0; |
| 1260 | eint_num < pctl->devdata->ap_num; | ||
| 1261 | eint_num += 32, reg += 4) { | ||
| 1260 | status = readl(reg); | 1262 | status = readl(reg); |
| 1261 | reg += 4; | ||
| 1262 | while (status) { | 1263 | while (status) { |
| 1263 | offset = __ffs(status); | 1264 | offset = __ffs(status); |
| 1264 | index = eint_num + offset; | 1265 | index = eint_num + offset; |
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c index ccbfc325c778..38faceff2f08 100644 --- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c +++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c | |||
| @@ -854,7 +854,7 @@ static int nmk_gpio_get_dir(struct gpio_chip *chip, unsigned offset) | |||
| 854 | 854 | ||
| 855 | clk_enable(nmk_chip->clk); | 855 | clk_enable(nmk_chip->clk); |
| 856 | 856 | ||
| 857 | dir = !!(readl(nmk_chip->addr + NMK_GPIO_DIR) & BIT(offset)); | 857 | dir = !(readl(nmk_chip->addr + NMK_GPIO_DIR) & BIT(offset)); |
| 858 | 858 | ||
| 859 | clk_disable(nmk_chip->clk); | 859 | clk_disable(nmk_chip->clk); |
| 860 | 860 | ||
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index c06bb85c2839..3ec0025d19e7 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig | |||
| @@ -103,7 +103,6 @@ config DELL_SMBIOS | |||
| 103 | 103 | ||
| 104 | config DELL_LAPTOP | 104 | config DELL_LAPTOP |
| 105 | tristate "Dell Laptop Extras" | 105 | tristate "Dell Laptop Extras" |
| 106 | depends on X86 | ||
| 107 | depends on DELL_SMBIOS | 106 | depends on DELL_SMBIOS |
| 108 | depends on DMI | 107 | depends on DMI |
| 109 | depends on BACKLIGHT_CLASS_DEVICE | 108 | depends on BACKLIGHT_CLASS_DEVICE |
| @@ -505,7 +504,7 @@ config THINKPAD_ACPI_HOTKEY_POLL | |||
| 505 | 504 | ||
| 506 | config SENSORS_HDAPS | 505 | config SENSORS_HDAPS |
| 507 | tristate "Thinkpad Hard Drive Active Protection System (hdaps)" | 506 | tristate "Thinkpad Hard Drive Active Protection System (hdaps)" |
| 508 | depends on INPUT && X86 | 507 | depends on INPUT |
| 509 | select INPUT_POLLDEV | 508 | select INPUT_POLLDEV |
| 510 | default n | 509 | default n |
| 511 | help | 510 | help |
| @@ -749,7 +748,7 @@ config TOSHIBA_WMI | |||
| 749 | 748 | ||
| 750 | config ACPI_CMPC | 749 | config ACPI_CMPC |
| 751 | tristate "CMPC Laptop Extras" | 750 | tristate "CMPC Laptop Extras" |
| 752 | depends on X86 && ACPI | 751 | depends on ACPI |
| 753 | depends on RFKILL || RFKILL=n | 752 | depends on RFKILL || RFKILL=n |
| 754 | select INPUT | 753 | select INPUT |
| 755 | select BACKLIGHT_CLASS_DEVICE | 754 | select BACKLIGHT_CLASS_DEVICE |
| @@ -848,7 +847,7 @@ config INTEL_IMR | |||
| 848 | 847 | ||
| 849 | config INTEL_PMC_CORE | 848 | config INTEL_PMC_CORE |
| 850 | bool "Intel PMC Core driver" | 849 | bool "Intel PMC Core driver" |
| 851 | depends on X86 && PCI | 850 | depends on PCI |
| 852 | ---help--- | 851 | ---help--- |
| 853 | The Intel Platform Controller Hub for Intel Core SoCs provides access | 852 | The Intel Platform Controller Hub for Intel Core SoCs provides access |
| 854 | to Power Management Controller registers via a PCI interface. This | 853 | to Power Management Controller registers via a PCI interface. This |
| @@ -860,7 +859,7 @@ config INTEL_PMC_CORE | |||
| 860 | 859 | ||
| 861 | config IBM_RTL | 860 | config IBM_RTL |
| 862 | tristate "Device driver to enable PRTL support" | 861 | tristate "Device driver to enable PRTL support" |
| 863 | depends on X86 && PCI | 862 | depends on PCI |
| 864 | ---help--- | 863 | ---help--- |
| 865 | Enable support for IBM Premium Real Time Mode (PRTM). | 864 | Enable support for IBM Premium Real Time Mode (PRTM). |
| 866 | This module will allow you the enter and exit PRTM in the BIOS via | 865 | This module will allow you the enter and exit PRTM in the BIOS via |
| @@ -894,7 +893,6 @@ config XO15_EBOOK | |||
| 894 | 893 | ||
| 895 | config SAMSUNG_LAPTOP | 894 | config SAMSUNG_LAPTOP |
| 896 | tristate "Samsung Laptop driver" | 895 | tristate "Samsung Laptop driver" |
| 897 | depends on X86 | ||
| 898 | depends on RFKILL || RFKILL = n | 896 | depends on RFKILL || RFKILL = n |
| 899 | depends on ACPI_VIDEO || ACPI_VIDEO = n | 897 | depends on ACPI_VIDEO || ACPI_VIDEO = n |
| 900 | depends on BACKLIGHT_CLASS_DEVICE | 898 | depends on BACKLIGHT_CLASS_DEVICE |
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c index 4a23fbc66b71..d1a091b93192 100644 --- a/drivers/platform/x86/ideapad-laptop.c +++ b/drivers/platform/x86/ideapad-laptop.c | |||
| @@ -567,6 +567,7 @@ static void ideapad_sysfs_exit(struct ideapad_private *priv) | |||
| 567 | static const struct key_entry ideapad_keymap[] = { | 567 | static const struct key_entry ideapad_keymap[] = { |
| 568 | { KE_KEY, 6, { KEY_SWITCHVIDEOMODE } }, | 568 | { KE_KEY, 6, { KEY_SWITCHVIDEOMODE } }, |
| 569 | { KE_KEY, 7, { KEY_CAMERA } }, | 569 | { KE_KEY, 7, { KEY_CAMERA } }, |
| 570 | { KE_KEY, 8, { KEY_MICMUTE } }, | ||
| 570 | { KE_KEY, 11, { KEY_F16 } }, | 571 | { KE_KEY, 11, { KEY_F16 } }, |
| 571 | { KE_KEY, 13, { KEY_WLAN } }, | 572 | { KE_KEY, 13, { KEY_WLAN } }, |
| 572 | { KE_KEY, 16, { KEY_PROG1 } }, | 573 | { KE_KEY, 16, { KEY_PROG1 } }, |
| @@ -809,6 +810,7 @@ static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data) | |||
| 809 | break; | 810 | break; |
| 810 | case 13: | 811 | case 13: |
| 811 | case 11: | 812 | case 11: |
| 813 | case 8: | ||
| 812 | case 7: | 814 | case 7: |
| 813 | case 6: | 815 | case 6: |
| 814 | ideapad_input_report(priv, vpc_bit); | 816 | ideapad_input_report(priv, vpc_bit); |
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index c3bfa1fe95bf..b65ce7519411 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c | |||
| @@ -2043,6 +2043,7 @@ static int hotkey_autosleep_ack; | |||
| 2043 | 2043 | ||
| 2044 | static u32 hotkey_orig_mask; /* events the BIOS had enabled */ | 2044 | static u32 hotkey_orig_mask; /* events the BIOS had enabled */ |
| 2045 | static u32 hotkey_all_mask; /* all events supported in fw */ | 2045 | static u32 hotkey_all_mask; /* all events supported in fw */ |
| 2046 | static u32 hotkey_adaptive_all_mask; /* all adaptive events supported in fw */ | ||
| 2046 | static u32 hotkey_reserved_mask; /* events better left disabled */ | 2047 | static u32 hotkey_reserved_mask; /* events better left disabled */ |
| 2047 | static u32 hotkey_driver_mask; /* events needed by the driver */ | 2048 | static u32 hotkey_driver_mask; /* events needed by the driver */ |
| 2048 | static u32 hotkey_user_mask; /* events visible to userspace */ | 2049 | static u32 hotkey_user_mask; /* events visible to userspace */ |
| @@ -2742,6 +2743,17 @@ static ssize_t hotkey_all_mask_show(struct device *dev, | |||
| 2742 | 2743 | ||
| 2743 | static DEVICE_ATTR_RO(hotkey_all_mask); | 2744 | static DEVICE_ATTR_RO(hotkey_all_mask); |
| 2744 | 2745 | ||
| 2746 | /* sysfs hotkey all_mask ----------------------------------------------- */ | ||
| 2747 | static ssize_t hotkey_adaptive_all_mask_show(struct device *dev, | ||
| 2748 | struct device_attribute *attr, | ||
| 2749 | char *buf) | ||
| 2750 | { | ||
| 2751 | return snprintf(buf, PAGE_SIZE, "0x%08x\n", | ||
| 2752 | hotkey_adaptive_all_mask | hotkey_source_mask); | ||
| 2753 | } | ||
| 2754 | |||
| 2755 | static DEVICE_ATTR_RO(hotkey_adaptive_all_mask); | ||
| 2756 | |||
| 2745 | /* sysfs hotkey recommended_mask --------------------------------------- */ | 2757 | /* sysfs hotkey recommended_mask --------------------------------------- */ |
| 2746 | static ssize_t hotkey_recommended_mask_show(struct device *dev, | 2758 | static ssize_t hotkey_recommended_mask_show(struct device *dev, |
| 2747 | struct device_attribute *attr, | 2759 | struct device_attribute *attr, |
| @@ -2985,6 +2997,7 @@ static struct attribute *hotkey_attributes[] __initdata = { | |||
| 2985 | &dev_attr_wakeup_hotunplug_complete.attr, | 2997 | &dev_attr_wakeup_hotunplug_complete.attr, |
| 2986 | &dev_attr_hotkey_mask.attr, | 2998 | &dev_attr_hotkey_mask.attr, |
| 2987 | &dev_attr_hotkey_all_mask.attr, | 2999 | &dev_attr_hotkey_all_mask.attr, |
| 3000 | &dev_attr_hotkey_adaptive_all_mask.attr, | ||
| 2988 | &dev_attr_hotkey_recommended_mask.attr, | 3001 | &dev_attr_hotkey_recommended_mask.attr, |
| 2989 | #ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL | 3002 | #ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL |
| 2990 | &dev_attr_hotkey_source_mask.attr, | 3003 | &dev_attr_hotkey_source_mask.attr, |
| @@ -3321,20 +3334,6 @@ static int __init hotkey_init(struct ibm_init_struct *iibm) | |||
| 3321 | if (!tp_features.hotkey) | 3334 | if (!tp_features.hotkey) |
| 3322 | return 1; | 3335 | return 1; |
| 3323 | 3336 | ||
| 3324 | /* | ||
| 3325 | * Check if we have an adaptive keyboard, like on the | ||
| 3326 | * Lenovo Carbon X1 2014 (2nd Gen). | ||
| 3327 | */ | ||
| 3328 | if (acpi_evalf(hkey_handle, &hkeyv, "MHKV", "qd")) { | ||
| 3329 | if ((hkeyv >> 8) == 2) { | ||
| 3330 | tp_features.has_adaptive_kbd = true; | ||
| 3331 | res = sysfs_create_group(&tpacpi_pdev->dev.kobj, | ||
| 3332 | &adaptive_kbd_attr_group); | ||
| 3333 | if (res) | ||
| 3334 | goto err_exit; | ||
| 3335 | } | ||
| 3336 | } | ||
| 3337 | |||
| 3338 | quirks = tpacpi_check_quirks(tpacpi_hotkey_qtable, | 3337 | quirks = tpacpi_check_quirks(tpacpi_hotkey_qtable, |
| 3339 | ARRAY_SIZE(tpacpi_hotkey_qtable)); | 3338 | ARRAY_SIZE(tpacpi_hotkey_qtable)); |
| 3340 | 3339 | ||
| @@ -3357,30 +3356,70 @@ static int __init hotkey_init(struct ibm_init_struct *iibm) | |||
| 3357 | A30, R30, R31, T20-22, X20-21, X22-24. Detected by checking | 3356 | A30, R30, R31, T20-22, X20-21, X22-24. Detected by checking |
| 3358 | for HKEY interface version 0x100 */ | 3357 | for HKEY interface version 0x100 */ |
| 3359 | if (acpi_evalf(hkey_handle, &hkeyv, "MHKV", "qd")) { | 3358 | if (acpi_evalf(hkey_handle, &hkeyv, "MHKV", "qd")) { |
| 3360 | if ((hkeyv >> 8) != 1) { | 3359 | vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY, |
| 3361 | pr_err("unknown version of the HKEY interface: 0x%x\n", | 3360 | "firmware HKEY interface version: 0x%x\n", |
| 3362 | hkeyv); | 3361 | hkeyv); |
| 3363 | pr_err("please report this to %s\n", TPACPI_MAIL); | 3362 | |
| 3364 | } else { | 3363 | switch (hkeyv >> 8) { |
| 3364 | case 1: | ||
| 3365 | /* | 3365 | /* |
| 3366 | * MHKV 0x100 in A31, R40, R40e, | 3366 | * MHKV 0x100 in A31, R40, R40e, |
| 3367 | * T4x, X31, and later | 3367 | * T4x, X31, and later |
| 3368 | */ | 3368 | */ |
| 3369 | vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY, | ||
| 3370 | "firmware HKEY interface version: 0x%x\n", | ||
| 3371 | hkeyv); | ||
| 3372 | 3369 | ||
| 3373 | /* Paranoia check AND init hotkey_all_mask */ | 3370 | /* Paranoia check AND init hotkey_all_mask */ |
| 3374 | if (!acpi_evalf(hkey_handle, &hotkey_all_mask, | 3371 | if (!acpi_evalf(hkey_handle, &hotkey_all_mask, |
| 3375 | "MHKA", "qd")) { | 3372 | "MHKA", "qd")) { |
| 3376 | pr_err("missing MHKA handler, " | 3373 | pr_err("missing MHKA handler, please report this to %s\n", |
| 3377 | "please report this to %s\n", | ||
| 3378 | TPACPI_MAIL); | 3374 | TPACPI_MAIL); |
| 3379 | /* Fallback: pre-init for FN+F3,F4,F12 */ | 3375 | /* Fallback: pre-init for FN+F3,F4,F12 */ |
| 3380 | hotkey_all_mask = 0x080cU; | 3376 | hotkey_all_mask = 0x080cU; |
| 3381 | } else { | 3377 | } else { |
| 3382 | tp_features.hotkey_mask = 1; | 3378 | tp_features.hotkey_mask = 1; |
| 3383 | } | 3379 | } |
| 3380 | break; | ||
| 3381 | |||
| 3382 | case 2: | ||
| 3383 | /* | ||
| 3384 | * MHKV 0x200 in X1, T460s, X260, T560, X1 Tablet (2016) | ||
| 3385 | */ | ||
| 3386 | |||
| 3387 | /* Paranoia check AND init hotkey_all_mask */ | ||
| 3388 | if (!acpi_evalf(hkey_handle, &hotkey_all_mask, | ||
| 3389 | "MHKA", "dd", 1)) { | ||
| 3390 | pr_err("missing MHKA handler, please report this to %s\n", | ||
| 3391 | TPACPI_MAIL); | ||
| 3392 | /* Fallback: pre-init for FN+F3,F4,F12 */ | ||
| 3393 | hotkey_all_mask = 0x080cU; | ||
| 3394 | } else { | ||
| 3395 | tp_features.hotkey_mask = 1; | ||
| 3396 | } | ||
| 3397 | |||
| 3398 | /* | ||
| 3399 | * Check if we have an adaptive keyboard, like on the | ||
| 3400 | * Lenovo Carbon X1 2014 (2nd Gen). | ||
| 3401 | */ | ||
| 3402 | if (acpi_evalf(hkey_handle, &hotkey_adaptive_all_mask, | ||
| 3403 | "MHKA", "dd", 2)) { | ||
| 3404 | if (hotkey_adaptive_all_mask != 0) { | ||
| 3405 | tp_features.has_adaptive_kbd = true; | ||
| 3406 | res = sysfs_create_group( | ||
| 3407 | &tpacpi_pdev->dev.kobj, | ||
| 3408 | &adaptive_kbd_attr_group); | ||
| 3409 | if (res) | ||
| 3410 | goto err_exit; | ||
| 3411 | } | ||
| 3412 | } else { | ||
| 3413 | tp_features.has_adaptive_kbd = false; | ||
| 3414 | hotkey_adaptive_all_mask = 0x0U; | ||
| 3415 | } | ||
| 3416 | break; | ||
| 3417 | |||
| 3418 | default: | ||
| 3419 | pr_err("unknown version of the HKEY interface: 0x%x\n", | ||
| 3420 | hkeyv); | ||
| 3421 | pr_err("please report this to %s\n", TPACPI_MAIL); | ||
| 3422 | break; | ||
| 3384 | } | 3423 | } |
| 3385 | } | 3424 | } |
| 3386 | 3425 | ||
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c index 579fd65299a0..d637c933c8a9 100644 --- a/drivers/ptp/ptp_chardev.c +++ b/drivers/ptp/ptp_chardev.c | |||
| @@ -208,14 +208,10 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg) | |||
| 208 | break; | 208 | break; |
| 209 | 209 | ||
| 210 | case PTP_SYS_OFFSET: | 210 | case PTP_SYS_OFFSET: |
| 211 | sysoff = kmalloc(sizeof(*sysoff), GFP_KERNEL); | 211 | sysoff = memdup_user((void __user *)arg, sizeof(*sysoff)); |
| 212 | if (!sysoff) { | 212 | if (IS_ERR(sysoff)) { |
| 213 | err = -ENOMEM; | 213 | err = PTR_ERR(sysoff); |
| 214 | break; | 214 | sysoff = NULL; |
| 215 | } | ||
| 216 | if (copy_from_user(sysoff, (void __user *)arg, | ||
| 217 | sizeof(*sysoff))) { | ||
| 218 | err = -EFAULT; | ||
| 219 | break; | 215 | break; |
| 220 | } | 216 | } |
| 221 | if (sysoff->n_samples > PTP_MAX_SAMPLES) { | 217 | if (sysoff->n_samples > PTP_MAX_SAMPLES) { |
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c index dba3843c53b8..ed337a8c34ab 100644 --- a/drivers/pwm/core.c +++ b/drivers/pwm/core.c | |||
| @@ -457,7 +457,8 @@ int pwm_apply_state(struct pwm_device *pwm, struct pwm_state *state) | |||
| 457 | { | 457 | { |
| 458 | int err; | 458 | int err; |
| 459 | 459 | ||
| 460 | if (!pwm) | 460 | if (!pwm || !state || !state->period || |
| 461 | state->duty_cycle > state->period) | ||
| 461 | return -EINVAL; | 462 | return -EINVAL; |
| 462 | 463 | ||
| 463 | if (!memcmp(state, &pwm->state, sizeof(*state))) | 464 | if (!memcmp(state, &pwm->state, sizeof(*state))) |
diff --git a/drivers/pwm/pwm-atmel-hlcdc.c b/drivers/pwm/pwm-atmel-hlcdc.c index f994c7eaf41c..14fc011faa32 100644 --- a/drivers/pwm/pwm-atmel-hlcdc.c +++ b/drivers/pwm/pwm-atmel-hlcdc.c | |||
| @@ -272,7 +272,7 @@ static int atmel_hlcdc_pwm_probe(struct platform_device *pdev) | |||
| 272 | chip->chip.of_pwm_n_cells = 3; | 272 | chip->chip.of_pwm_n_cells = 3; |
| 273 | chip->chip.can_sleep = 1; | 273 | chip->chip.can_sleep = 1; |
| 274 | 274 | ||
| 275 | ret = pwmchip_add(&chip->chip); | 275 | ret = pwmchip_add_with_polarity(&chip->chip, PWM_POLARITY_INVERSED); |
| 276 | if (ret) { | 276 | if (ret) { |
| 277 | clk_disable_unprepare(hlcdc->periph_clk); | 277 | clk_disable_unprepare(hlcdc->periph_clk); |
| 278 | return ret; | 278 | return ret; |
diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c index d98599249a05..01695d48dd54 100644 --- a/drivers/pwm/sysfs.c +++ b/drivers/pwm/sysfs.c | |||
| @@ -152,7 +152,7 @@ static ssize_t enable_store(struct device *child, | |||
| 152 | goto unlock; | 152 | goto unlock; |
| 153 | } | 153 | } |
| 154 | 154 | ||
| 155 | pwm_apply_state(pwm, &state); | 155 | ret = pwm_apply_state(pwm, &state); |
| 156 | 156 | ||
| 157 | unlock: | 157 | unlock: |
| 158 | mutex_unlock(&export->lock); | 158 | mutex_unlock(&export->lock); |
diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c index 56a17ec5b5ef..526bf23dcb49 100644 --- a/drivers/regulator/qcom_smd-regulator.c +++ b/drivers/regulator/qcom_smd-regulator.c | |||
| @@ -140,6 +140,19 @@ static const struct regulator_ops rpm_smps_ldo_ops = { | |||
| 140 | .enable = rpm_reg_enable, | 140 | .enable = rpm_reg_enable, |
| 141 | .disable = rpm_reg_disable, | 141 | .disable = rpm_reg_disable, |
| 142 | .is_enabled = rpm_reg_is_enabled, | 142 | .is_enabled = rpm_reg_is_enabled, |
| 143 | .list_voltage = regulator_list_voltage_linear_range, | ||
| 144 | |||
| 145 | .get_voltage = rpm_reg_get_voltage, | ||
| 146 | .set_voltage = rpm_reg_set_voltage, | ||
| 147 | |||
| 148 | .set_load = rpm_reg_set_load, | ||
| 149 | }; | ||
| 150 | |||
| 151 | static const struct regulator_ops rpm_smps_ldo_ops_fixed = { | ||
| 152 | .enable = rpm_reg_enable, | ||
| 153 | .disable = rpm_reg_disable, | ||
| 154 | .is_enabled = rpm_reg_is_enabled, | ||
| 155 | .list_voltage = regulator_list_voltage_linear_range, | ||
| 143 | 156 | ||
| 144 | .get_voltage = rpm_reg_get_voltage, | 157 | .get_voltage = rpm_reg_get_voltage, |
| 145 | .set_voltage = rpm_reg_set_voltage, | 158 | .set_voltage = rpm_reg_set_voltage, |
| @@ -247,7 +260,7 @@ static const struct regulator_desc pm8941_nldo = { | |||
| 247 | static const struct regulator_desc pm8941_lnldo = { | 260 | static const struct regulator_desc pm8941_lnldo = { |
| 248 | .fixed_uV = 1740000, | 261 | .fixed_uV = 1740000, |
| 249 | .n_voltages = 1, | 262 | .n_voltages = 1, |
| 250 | .ops = &rpm_smps_ldo_ops, | 263 | .ops = &rpm_smps_ldo_ops_fixed, |
| 251 | }; | 264 | }; |
| 252 | 265 | ||
| 253 | static const struct regulator_desc pm8941_switch = { | 266 | static const struct regulator_desc pm8941_switch = { |
diff --git a/drivers/regulator/tps51632-regulator.c b/drivers/regulator/tps51632-regulator.c index 572816e30095..c139890c1514 100644 --- a/drivers/regulator/tps51632-regulator.c +++ b/drivers/regulator/tps51632-regulator.c | |||
| @@ -94,11 +94,14 @@ static int tps51632_dcdc_set_ramp_delay(struct regulator_dev *rdev, | |||
| 94 | int ramp_delay) | 94 | int ramp_delay) |
| 95 | { | 95 | { |
| 96 | struct tps51632_chip *tps = rdev_get_drvdata(rdev); | 96 | struct tps51632_chip *tps = rdev_get_drvdata(rdev); |
| 97 | int bit = ramp_delay/6000; | 97 | int bit; |
| 98 | int ret; | 98 | int ret; |
| 99 | 99 | ||
| 100 | if (bit) | 100 | if (ramp_delay == 0) |
| 101 | bit--; | 101 | bit = 0; |
| 102 | else | ||
| 103 | bit = DIV_ROUND_UP(ramp_delay, 6000) - 1; | ||
| 104 | |||
| 102 | ret = regmap_write(tps->regmap, TPS51632_SLEW_REGS, BIT(bit)); | 105 | ret = regmap_write(tps->regmap, TPS51632_SLEW_REGS, BIT(bit)); |
| 103 | if (ret < 0) | 106 | if (ret < 0) |
| 104 | dev_err(tps->dev, "SLEW reg write failed, err %d\n", ret); | 107 | dev_err(tps->dev, "SLEW reg write failed, err %d\n", ret); |
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c index d4c285688ce9..3ddc85e6efd6 100644 --- a/drivers/scsi/53c700.c +++ b/drivers/scsi/53c700.c | |||
| @@ -1122,7 +1122,7 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp, | |||
| 1122 | } else { | 1122 | } else { |
| 1123 | struct scsi_cmnd *SCp; | 1123 | struct scsi_cmnd *SCp; |
| 1124 | 1124 | ||
| 1125 | SCp = scsi_host_find_tag(SDp->host, SCSI_NO_TAG); | 1125 | SCp = SDp->current_cmnd; |
| 1126 | if(unlikely(SCp == NULL)) { | 1126 | if(unlikely(SCp == NULL)) { |
| 1127 | sdev_printk(KERN_ERR, SDp, | 1127 | sdev_printk(KERN_ERR, SDp, |
| 1128 | "no saved request for untagged cmd\n"); | 1128 | "no saved request for untagged cmd\n"); |
| @@ -1826,7 +1826,7 @@ NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *) | |||
| 1826 | slot->tag, slot); | 1826 | slot->tag, slot); |
| 1827 | } else { | 1827 | } else { |
| 1828 | slot->tag = SCSI_NO_TAG; | 1828 | slot->tag = SCSI_NO_TAG; |
| 1829 | /* must populate current_cmnd for scsi_host_find_tag to work */ | 1829 | /* save current command for reselection */ |
| 1830 | SCp->device->current_cmnd = SCp; | 1830 | SCp->device->current_cmnd = SCp; |
| 1831 | } | 1831 | } |
| 1832 | /* sanity check: some of the commands generated by the mid-layer | 1832 | /* sanity check: some of the commands generated by the mid-layer |
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index 8f90d9e77104..969c312de1be 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h | |||
| @@ -621,6 +621,11 @@ struct aac_driver_ident | |||
| 621 | #define AAC_QUIRK_SCSI_32 0x0020 | 621 | #define AAC_QUIRK_SCSI_32 0x0020 |
| 622 | 622 | ||
| 623 | /* | 623 | /* |
| 624 | * SRC based adapters support the AifReqEvent functions | ||
| 625 | */ | ||
| 626 | #define AAC_QUIRK_SRC 0x0040 | ||
| 627 | |||
| 628 | /* | ||
| 624 | * The adapter interface specs all queues to be located in the same | 629 | * The adapter interface specs all queues to be located in the same |
| 625 | * physically contiguous block. The host structure that defines the | 630 | * physically contiguous block. The host structure that defines the |
| 626 | * commuication queues will assume they are each a separate physically | 631 | * commuication queues will assume they are each a separate physically |
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index a943bd230bc2..79871f3519ff 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c | |||
| @@ -236,10 +236,10 @@ static struct aac_driver_ident aac_drivers[] = { | |||
| 236 | { aac_rx_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Catch All */ | 236 | { aac_rx_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Catch All */ |
| 237 | { aac_rkt_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Rocket Catch All */ | 237 | { aac_rkt_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Rocket Catch All */ |
| 238 | { aac_nark_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec NEMER/ARK Catch All */ | 238 | { aac_nark_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec NEMER/ARK Catch All */ |
| 239 | { aac_src_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec PMC Series 6 (Tupelo) */ | 239 | { aac_src_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 6 (Tupelo) */ |
| 240 | { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec PMC Series 7 (Denali) */ | 240 | { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 7 (Denali) */ |
| 241 | { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec PMC Series 8 */ | 241 | { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 8 */ |
| 242 | { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2 } /* Adaptec PMC Series 9 */ | 242 | { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC } /* Adaptec PMC Series 9 */ |
| 243 | }; | 243 | }; |
| 244 | 244 | ||
| 245 | /** | 245 | /** |
| @@ -1299,7 +1299,8 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 1299 | else | 1299 | else |
| 1300 | shost->this_id = shost->max_id; | 1300 | shost->this_id = shost->max_id; |
| 1301 | 1301 | ||
| 1302 | aac_intr_normal(aac, 0, 2, 0, NULL); | 1302 | if (aac_drivers[index].quirks & AAC_QUIRK_SRC) |
| 1303 | aac_intr_normal(aac, 0, 2, 0, NULL); | ||
| 1303 | 1304 | ||
| 1304 | /* | 1305 | /* |
| 1305 | * dmb - we may need to move the setting of these parms somewhere else once | 1306 | * dmb - we may need to move the setting of these parms somewhere else once |
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index 6a4df5a315e9..6bff13e7afc7 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c | |||
| @@ -7975,13 +7975,14 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, | |||
| 7975 | ActiveCableEventData = | 7975 | ActiveCableEventData = |
| 7976 | (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData; | 7976 | (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData; |
| 7977 | if (ActiveCableEventData->ReasonCode == | 7977 | if (ActiveCableEventData->ReasonCode == |
| 7978 | MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER) | 7978 | MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER) { |
| 7979 | pr_info(MPT3SAS_FMT "Currently an active cable with ReceptacleID %d", | 7979 | pr_info(MPT3SAS_FMT "Currently an active cable with ReceptacleID %d", |
| 7980 | ioc->name, ActiveCableEventData->ReceptacleID); | 7980 | ioc->name, ActiveCableEventData->ReceptacleID); |
| 7981 | pr_info("cannot be powered and devices connected to this active cable"); | 7981 | pr_info("cannot be powered and devices connected to this active cable"); |
| 7982 | pr_info("will not be seen. This active cable"); | 7982 | pr_info("will not be seen. This active cable"); |
| 7983 | pr_info("requires %d mW of power", | 7983 | pr_info("requires %d mW of power", |
| 7984 | ActiveCableEventData->ActiveCablePowerRequirement); | 7984 | ActiveCableEventData->ActiveCablePowerRequirement); |
| 7985 | } | ||
| 7985 | break; | 7986 | break; |
| 7986 | 7987 | ||
| 7987 | default: /* ignore the rest */ | 7988 | default: /* ignore the rest */ |
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c index 3408578b08d6..ff41c310c900 100644 --- a/drivers/scsi/scsi_devinfo.c +++ b/drivers/scsi/scsi_devinfo.c | |||
| @@ -230,6 +230,7 @@ static struct { | |||
| 230 | {"PIONEER", "CD-ROM DRM-624X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, | 230 | {"PIONEER", "CD-ROM DRM-624X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, |
| 231 | {"Promise", "VTrak E610f", NULL, BLIST_SPARSELUN | BLIST_NO_RSOC}, | 231 | {"Promise", "VTrak E610f", NULL, BLIST_SPARSELUN | BLIST_NO_RSOC}, |
| 232 | {"Promise", "", NULL, BLIST_SPARSELUN}, | 232 | {"Promise", "", NULL, BLIST_SPARSELUN}, |
| 233 | {"QEMU", "QEMU CD-ROM", NULL, BLIST_SKIP_VPD_PAGES}, | ||
| 233 | {"QNAP", "iSCSI Storage", NULL, BLIST_MAX_1024}, | 234 | {"QNAP", "iSCSI Storage", NULL, BLIST_MAX_1024}, |
| 234 | {"SYNOLOGY", "iSCSI Storage", NULL, BLIST_MAX_1024}, | 235 | {"SYNOLOGY", "iSCSI Storage", NULL, BLIST_MAX_1024}, |
| 235 | {"QUANTUM", "XP34301", "1071", BLIST_NOTQ}, | 236 | {"QUANTUM", "XP34301", "1071", BLIST_NOTQ}, |
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index a8b610eaa0ca..106a6adbd6f1 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c | |||
| @@ -1128,7 +1128,6 @@ static int scsi_eh_action(struct scsi_cmnd *scmd, int rtn) | |||
| 1128 | */ | 1128 | */ |
| 1129 | void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, struct list_head *done_q) | 1129 | void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, struct list_head *done_q) |
| 1130 | { | 1130 | { |
| 1131 | scmd->device->host->host_failed--; | ||
| 1132 | scmd->eh_eflags = 0; | 1131 | scmd->eh_eflags = 0; |
| 1133 | list_move_tail(&scmd->eh_entry, done_q); | 1132 | list_move_tail(&scmd->eh_entry, done_q); |
| 1134 | } | 1133 | } |
| @@ -2227,6 +2226,9 @@ int scsi_error_handler(void *data) | |||
| 2227 | else | 2226 | else |
| 2228 | scsi_unjam_host(shost); | 2227 | scsi_unjam_host(shost); |
| 2229 | 2228 | ||
| 2229 | /* All scmds have been handled */ | ||
| 2230 | shost->host_failed = 0; | ||
| 2231 | |||
| 2230 | /* | 2232 | /* |
| 2231 | * Note - if the above fails completely, the action is to take | 2233 | * Note - if the above fails completely, the action is to take |
| 2232 | * individual devices offline and flush the queue of any | 2234 | * individual devices offline and flush the queue of any |
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index b2e332af0f51..c71344aebdbb 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
| @@ -821,9 +821,12 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | |||
| 821 | } | 821 | } |
| 822 | 822 | ||
| 823 | /* | 823 | /* |
| 824 | * If we finished all bytes in the request we are done now. | 824 | * special case: failed zero length commands always need to |
| 825 | * drop down into the retry code. Otherwise, if we finished | ||
| 826 | * all bytes in the request we are done now. | ||
| 825 | */ | 827 | */ |
| 826 | if (!scsi_end_request(req, error, good_bytes, 0)) | 828 | if (!(blk_rq_bytes(req) == 0 && error) && |
| 829 | !scsi_end_request(req, error, good_bytes, 0)) | ||
| 827 | return; | 830 | return; |
| 828 | 831 | ||
| 829 | /* | 832 | /* |
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 428c03ef02b2..60bff78e9ead 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
| @@ -1398,11 +1398,15 @@ static int media_not_present(struct scsi_disk *sdkp, | |||
| 1398 | **/ | 1398 | **/ |
| 1399 | static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing) | 1399 | static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing) |
| 1400 | { | 1400 | { |
| 1401 | struct scsi_disk *sdkp = scsi_disk(disk); | 1401 | struct scsi_disk *sdkp = scsi_disk_get(disk); |
| 1402 | struct scsi_device *sdp = sdkp->device; | 1402 | struct scsi_device *sdp; |
| 1403 | struct scsi_sense_hdr *sshdr = NULL; | 1403 | struct scsi_sense_hdr *sshdr = NULL; |
| 1404 | int retval; | 1404 | int retval; |
| 1405 | 1405 | ||
| 1406 | if (!sdkp) | ||
| 1407 | return 0; | ||
| 1408 | |||
| 1409 | sdp = sdkp->device; | ||
| 1406 | SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_check_events\n")); | 1410 | SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_check_events\n")); |
| 1407 | 1411 | ||
| 1408 | /* | 1412 | /* |
| @@ -1459,6 +1463,7 @@ out: | |||
| 1459 | kfree(sshdr); | 1463 | kfree(sshdr); |
| 1460 | retval = sdp->changed ? DISK_EVENT_MEDIA_CHANGE : 0; | 1464 | retval = sdp->changed ? DISK_EVENT_MEDIA_CHANGE : 0; |
| 1461 | sdp->changed = 0; | 1465 | sdp->changed = 0; |
| 1466 | scsi_disk_put(sdkp); | ||
| 1462 | return retval; | 1467 | return retval; |
| 1463 | } | 1468 | } |
| 1464 | 1469 | ||
| @@ -2862,10 +2867,10 @@ static int sd_revalidate_disk(struct gendisk *disk) | |||
| 2862 | if (sdkp->opt_xfer_blocks && | 2867 | if (sdkp->opt_xfer_blocks && |
| 2863 | sdkp->opt_xfer_blocks <= dev_max && | 2868 | sdkp->opt_xfer_blocks <= dev_max && |
| 2864 | sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS && | 2869 | sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS && |
| 2865 | sdkp->opt_xfer_blocks * sdp->sector_size >= PAGE_SIZE) | 2870 | logical_to_bytes(sdp, sdkp->opt_xfer_blocks) >= PAGE_SIZE) { |
| 2866 | rw_max = q->limits.io_opt = | 2871 | q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks); |
| 2867 | sdkp->opt_xfer_blocks * sdp->sector_size; | 2872 | rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks); |
| 2868 | else | 2873 | } else |
| 2869 | rw_max = BLK_DEF_MAX_SECTORS; | 2874 | rw_max = BLK_DEF_MAX_SECTORS; |
| 2870 | 2875 | ||
| 2871 | /* Combine with controller limits */ | 2876 | /* Combine with controller limits */ |
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h index 654630bb7d0e..765a6f1ac1b7 100644 --- a/drivers/scsi/sd.h +++ b/drivers/scsi/sd.h | |||
| @@ -151,6 +151,11 @@ static inline sector_t logical_to_sectors(struct scsi_device *sdev, sector_t blo | |||
| 151 | return blocks << (ilog2(sdev->sector_size) - 9); | 151 | return blocks << (ilog2(sdev->sector_size) - 9); |
| 152 | } | 152 | } |
| 153 | 153 | ||
| 154 | static inline unsigned int logical_to_bytes(struct scsi_device *sdev, sector_t blocks) | ||
| 155 | { | ||
| 156 | return blocks * sdev->sector_size; | ||
| 157 | } | ||
| 158 | |||
| 154 | /* | 159 | /* |
| 155 | * A DIF-capable target device can be formatted with different | 160 | * A DIF-capable target device can be formatted with different |
| 156 | * protection schemes. Currently 0 through 3 are defined: | 161 | * protection schemes. Currently 0 through 3 are defined: |
diff --git a/drivers/spi/spi-sun4i.c b/drivers/spi/spi-sun4i.c index 1ddd9e2309b6..cf007f3b83ec 100644 --- a/drivers/spi/spi-sun4i.c +++ b/drivers/spi/spi-sun4i.c | |||
| @@ -173,13 +173,17 @@ static int sun4i_spi_transfer_one(struct spi_master *master, | |||
| 173 | { | 173 | { |
| 174 | struct sun4i_spi *sspi = spi_master_get_devdata(master); | 174 | struct sun4i_spi *sspi = spi_master_get_devdata(master); |
| 175 | unsigned int mclk_rate, div, timeout; | 175 | unsigned int mclk_rate, div, timeout; |
| 176 | unsigned int start, end, tx_time; | ||
| 176 | unsigned int tx_len = 0; | 177 | unsigned int tx_len = 0; |
| 177 | int ret = 0; | 178 | int ret = 0; |
| 178 | u32 reg; | 179 | u32 reg; |
| 179 | 180 | ||
| 180 | /* We don't support transfer larger than the FIFO */ | 181 | /* We don't support transfer larger than the FIFO */ |
| 181 | if (tfr->len > SUN4I_FIFO_DEPTH) | 182 | if (tfr->len > SUN4I_FIFO_DEPTH) |
| 182 | return -EINVAL; | 183 | return -EMSGSIZE; |
| 184 | |||
| 185 | if (tfr->tx_buf && tfr->len >= SUN4I_FIFO_DEPTH) | ||
| 186 | return -EMSGSIZE; | ||
| 183 | 187 | ||
| 184 | reinit_completion(&sspi->done); | 188 | reinit_completion(&sspi->done); |
| 185 | sspi->tx_buf = tfr->tx_buf; | 189 | sspi->tx_buf = tfr->tx_buf; |
| @@ -269,8 +273,12 @@ static int sun4i_spi_transfer_one(struct spi_master *master, | |||
| 269 | sun4i_spi_write(sspi, SUN4I_BURST_CNT_REG, SUN4I_BURST_CNT(tfr->len)); | 273 | sun4i_spi_write(sspi, SUN4I_BURST_CNT_REG, SUN4I_BURST_CNT(tfr->len)); |
| 270 | sun4i_spi_write(sspi, SUN4I_XMIT_CNT_REG, SUN4I_XMIT_CNT(tx_len)); | 274 | sun4i_spi_write(sspi, SUN4I_XMIT_CNT_REG, SUN4I_XMIT_CNT(tx_len)); |
| 271 | 275 | ||
| 272 | /* Fill the TX FIFO */ | 276 | /* |
| 273 | sun4i_spi_fill_fifo(sspi, SUN4I_FIFO_DEPTH); | 277 | * Fill the TX FIFO |
| 278 | * Filling the FIFO fully causes timeout for some reason | ||
| 279 | * at least on spi2 on A10s | ||
| 280 | */ | ||
| 281 | sun4i_spi_fill_fifo(sspi, SUN4I_FIFO_DEPTH - 1); | ||
| 274 | 282 | ||
| 275 | /* Enable the interrupts */ | 283 | /* Enable the interrupts */ |
| 276 | sun4i_spi_write(sspi, SUN4I_INT_CTL_REG, SUN4I_INT_CTL_TC); | 284 | sun4i_spi_write(sspi, SUN4I_INT_CTL_REG, SUN4I_INT_CTL_TC); |
| @@ -279,9 +287,16 @@ static int sun4i_spi_transfer_one(struct spi_master *master, | |||
| 279 | reg = sun4i_spi_read(sspi, SUN4I_CTL_REG); | 287 | reg = sun4i_spi_read(sspi, SUN4I_CTL_REG); |
| 280 | sun4i_spi_write(sspi, SUN4I_CTL_REG, reg | SUN4I_CTL_XCH); | 288 | sun4i_spi_write(sspi, SUN4I_CTL_REG, reg | SUN4I_CTL_XCH); |
| 281 | 289 | ||
| 290 | tx_time = max(tfr->len * 8 * 2 / (tfr->speed_hz / 1000), 100U); | ||
| 291 | start = jiffies; | ||
| 282 | timeout = wait_for_completion_timeout(&sspi->done, | 292 | timeout = wait_for_completion_timeout(&sspi->done, |
| 283 | msecs_to_jiffies(1000)); | 293 | msecs_to_jiffies(tx_time)); |
| 294 | end = jiffies; | ||
| 284 | if (!timeout) { | 295 | if (!timeout) { |
| 296 | dev_warn(&master->dev, | ||
| 297 | "%s: timeout transferring %u bytes@%iHz for %i(%i)ms", | ||
| 298 | dev_name(&spi->dev), tfr->len, tfr->speed_hz, | ||
| 299 | jiffies_to_msecs(end - start), tx_time); | ||
| 285 | ret = -ETIMEDOUT; | 300 | ret = -ETIMEDOUT; |
| 286 | goto out; | 301 | goto out; |
| 287 | } | 302 | } |
diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c index 42e2c4bd690a..7fce79a60608 100644 --- a/drivers/spi/spi-sun6i.c +++ b/drivers/spi/spi-sun6i.c | |||
| @@ -160,6 +160,7 @@ static int sun6i_spi_transfer_one(struct spi_master *master, | |||
| 160 | { | 160 | { |
| 161 | struct sun6i_spi *sspi = spi_master_get_devdata(master); | 161 | struct sun6i_spi *sspi = spi_master_get_devdata(master); |
| 162 | unsigned int mclk_rate, div, timeout; | 162 | unsigned int mclk_rate, div, timeout; |
| 163 | unsigned int start, end, tx_time; | ||
| 163 | unsigned int tx_len = 0; | 164 | unsigned int tx_len = 0; |
| 164 | int ret = 0; | 165 | int ret = 0; |
| 165 | u32 reg; | 166 | u32 reg; |
| @@ -269,9 +270,16 @@ static int sun6i_spi_transfer_one(struct spi_master *master, | |||
| 269 | reg = sun6i_spi_read(sspi, SUN6I_TFR_CTL_REG); | 270 | reg = sun6i_spi_read(sspi, SUN6I_TFR_CTL_REG); |
| 270 | sun6i_spi_write(sspi, SUN6I_TFR_CTL_REG, reg | SUN6I_TFR_CTL_XCH); | 271 | sun6i_spi_write(sspi, SUN6I_TFR_CTL_REG, reg | SUN6I_TFR_CTL_XCH); |
| 271 | 272 | ||
| 273 | tx_time = max(tfr->len * 8 * 2 / (tfr->speed_hz / 1000), 100U); | ||
| 274 | start = jiffies; | ||
| 272 | timeout = wait_for_completion_timeout(&sspi->done, | 275 | timeout = wait_for_completion_timeout(&sspi->done, |
| 273 | msecs_to_jiffies(1000)); | 276 | msecs_to_jiffies(tx_time)); |
| 277 | end = jiffies; | ||
| 274 | if (!timeout) { | 278 | if (!timeout) { |
| 279 | dev_warn(&master->dev, | ||
| 280 | "%s: timeout transferring %u bytes@%iHz for %i(%i)ms", | ||
| 281 | dev_name(&spi->dev), tfr->len, tfr->speed_hz, | ||
| 282 | jiffies_to_msecs(end - start), tx_time); | ||
| 275 | ret = -ETIMEDOUT; | 283 | ret = -ETIMEDOUT; |
| 276 | goto out; | 284 | goto out; |
| 277 | } | 285 | } |
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c index 443f664534e1..29ea8d2f9824 100644 --- a/drivers/spi/spi-ti-qspi.c +++ b/drivers/spi/spi-ti-qspi.c | |||
| @@ -646,6 +646,13 @@ free_master: | |||
| 646 | 646 | ||
| 647 | static int ti_qspi_remove(struct platform_device *pdev) | 647 | static int ti_qspi_remove(struct platform_device *pdev) |
| 648 | { | 648 | { |
| 649 | struct ti_qspi *qspi = platform_get_drvdata(pdev); | ||
| 650 | int rc; | ||
| 651 | |||
| 652 | rc = spi_master_suspend(qspi->master); | ||
| 653 | if (rc) | ||
| 654 | return rc; | ||
| 655 | |||
| 649 | pm_runtime_put_sync(&pdev->dev); | 656 | pm_runtime_put_sync(&pdev->dev); |
| 650 | pm_runtime_disable(&pdev->dev); | 657 | pm_runtime_disable(&pdev->dev); |
| 651 | 658 | ||
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c index bbfee53cfcf5..845e49a52430 100644 --- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c +++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c | |||
| @@ -2521,12 +2521,13 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) | |||
| 2521 | return 0; | 2521 | return 0; |
| 2522 | 2522 | ||
| 2523 | failed: | 2523 | failed: |
| 2524 | if (ni) | 2524 | if (ni) { |
| 2525 | lnet_ni_decref(ni); | 2525 | lnet_ni_decref(ni); |
| 2526 | rej.ibr_cp.ibcp_queue_depth = kiblnd_msg_queue_size(version, ni); | ||
| 2527 | rej.ibr_cp.ibcp_max_frags = kiblnd_rdma_frags(version, ni); | ||
| 2528 | } | ||
| 2526 | 2529 | ||
| 2527 | rej.ibr_version = version; | 2530 | rej.ibr_version = version; |
| 2528 | rej.ibr_cp.ibcp_queue_depth = kiblnd_msg_queue_size(version, ni); | ||
| 2529 | rej.ibr_cp.ibcp_max_frags = kiblnd_rdma_frags(version, ni); | ||
| 2530 | kiblnd_reject(cmid, &rej); | 2531 | kiblnd_reject(cmid, &rej); |
| 2531 | 2532 | ||
| 2532 | return -ECONNREFUSED; | 2533 | return -ECONNREFUSED; |
diff --git a/drivers/staging/rtl8188eu/core/rtw_efuse.c b/drivers/staging/rtl8188eu/core/rtw_efuse.c index c17870cddb5b..fbce1f7e68ca 100644 --- a/drivers/staging/rtl8188eu/core/rtw_efuse.c +++ b/drivers/staging/rtl8188eu/core/rtw_efuse.c | |||
| @@ -102,7 +102,7 @@ efuse_phymap_to_logical(u8 *phymap, u16 _offset, u16 _size_byte, u8 *pbuf) | |||
| 102 | if (!efuseTbl) | 102 | if (!efuseTbl) |
| 103 | return; | 103 | return; |
| 104 | 104 | ||
| 105 | eFuseWord = (u16 **)rtw_malloc2d(EFUSE_MAX_SECTION_88E, EFUSE_MAX_WORD_UNIT, sizeof(*eFuseWord)); | 105 | eFuseWord = (u16 **)rtw_malloc2d(EFUSE_MAX_SECTION_88E, EFUSE_MAX_WORD_UNIT, sizeof(u16)); |
| 106 | if (!eFuseWord) { | 106 | if (!eFuseWord) { |
| 107 | DBG_88E("%s: alloc eFuseWord fail!\n", __func__); | 107 | DBG_88E("%s: alloc eFuseWord fail!\n", __func__); |
| 108 | goto eFuseWord_failed; | 108 | goto eFuseWord_failed; |
diff --git a/drivers/staging/rtl8188eu/hal/usb_halinit.c b/drivers/staging/rtl8188eu/hal/usb_halinit.c index 87ea3b844951..363f3a34ddce 100644 --- a/drivers/staging/rtl8188eu/hal/usb_halinit.c +++ b/drivers/staging/rtl8188eu/hal/usb_halinit.c | |||
| @@ -2072,7 +2072,8 @@ void rtl8188eu_set_hal_ops(struct adapter *adapt) | |||
| 2072 | { | 2072 | { |
| 2073 | struct hal_ops *halfunc = &adapt->HalFunc; | 2073 | struct hal_ops *halfunc = &adapt->HalFunc; |
| 2074 | 2074 | ||
| 2075 | adapt->HalData = kzalloc(sizeof(*adapt->HalData), GFP_KERNEL); | 2075 | |
| 2076 | adapt->HalData = kzalloc(sizeof(struct hal_data_8188e), GFP_KERNEL); | ||
| 2076 | if (!adapt->HalData) | 2077 | if (!adapt->HalData) |
| 2077 | DBG_88E("cant not alloc memory for HAL DATA\n"); | 2078 | DBG_88E("cant not alloc memory for HAL DATA\n"); |
| 2078 | 2079 | ||
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c index 6ceac4f2d4b2..5b4b47ed948b 100644 --- a/drivers/thermal/cpu_cooling.c +++ b/drivers/thermal/cpu_cooling.c | |||
| @@ -857,14 +857,6 @@ __cpufreq_cooling_register(struct device_node *np, | |||
| 857 | goto free_power_table; | 857 | goto free_power_table; |
| 858 | } | 858 | } |
| 859 | 859 | ||
| 860 | snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d", | ||
| 861 | cpufreq_dev->id); | ||
| 862 | |||
| 863 | cool_dev = thermal_of_cooling_device_register(np, dev_name, cpufreq_dev, | ||
| 864 | &cpufreq_cooling_ops); | ||
| 865 | if (IS_ERR(cool_dev)) | ||
| 866 | goto remove_idr; | ||
| 867 | |||
| 868 | /* Fill freq-table in descending order of frequencies */ | 860 | /* Fill freq-table in descending order of frequencies */ |
| 869 | for (i = 0, freq = -1; i <= cpufreq_dev->max_level; i++) { | 861 | for (i = 0, freq = -1; i <= cpufreq_dev->max_level; i++) { |
| 870 | freq = find_next_max(table, freq); | 862 | freq = find_next_max(table, freq); |
| @@ -877,6 +869,14 @@ __cpufreq_cooling_register(struct device_node *np, | |||
| 877 | pr_debug("%s: freq:%u KHz\n", __func__, freq); | 869 | pr_debug("%s: freq:%u KHz\n", __func__, freq); |
| 878 | } | 870 | } |
| 879 | 871 | ||
| 872 | snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d", | ||
| 873 | cpufreq_dev->id); | ||
| 874 | |||
| 875 | cool_dev = thermal_of_cooling_device_register(np, dev_name, cpufreq_dev, | ||
| 876 | &cpufreq_cooling_ops); | ||
| 877 | if (IS_ERR(cool_dev)) | ||
| 878 | goto remove_idr; | ||
| 879 | |||
| 880 | cpufreq_dev->clipped_freq = cpufreq_dev->freq_table[0]; | 880 | cpufreq_dev->clipped_freq = cpufreq_dev->freq_table[0]; |
| 881 | cpufreq_dev->cool_dev = cool_dev; | 881 | cpufreq_dev->cool_dev = cool_dev; |
| 882 | 882 | ||
diff --git a/drivers/thermal/int340x_thermal/int3406_thermal.c b/drivers/thermal/int340x_thermal/int3406_thermal.c index 13d431cbd29e..a578cd257db4 100644 --- a/drivers/thermal/int340x_thermal/int3406_thermal.c +++ b/drivers/thermal/int340x_thermal/int3406_thermal.c | |||
| @@ -177,7 +177,7 @@ static int int3406_thermal_probe(struct platform_device *pdev) | |||
| 177 | return -ENODEV; | 177 | return -ENODEV; |
| 178 | d->raw_bd = bd; | 178 | d->raw_bd = bd; |
| 179 | 179 | ||
| 180 | ret = acpi_video_get_levels(ACPI_COMPANION(&pdev->dev), &d->br); | 180 | ret = acpi_video_get_levels(ACPI_COMPANION(&pdev->dev), &d->br, NULL); |
| 181 | if (ret) | 181 | if (ret) |
| 182 | return ret; | 182 | return ret; |
| 183 | 183 | ||
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig index 82c4d2e45319..95103054c0e4 100644 --- a/drivers/tty/Kconfig +++ b/drivers/tty/Kconfig | |||
| @@ -120,17 +120,6 @@ config UNIX98_PTYS | |||
| 120 | All modern Linux systems use the Unix98 ptys. Say Y unless | 120 | All modern Linux systems use the Unix98 ptys. Say Y unless |
| 121 | you're on an embedded system and want to conserve memory. | 121 | you're on an embedded system and want to conserve memory. |
| 122 | 122 | ||
| 123 | config DEVPTS_MULTIPLE_INSTANCES | ||
| 124 | bool "Support multiple instances of devpts" | ||
| 125 | depends on UNIX98_PTYS | ||
| 126 | default n | ||
| 127 | ---help--- | ||
| 128 | Enable support for multiple instances of devpts filesystem. | ||
| 129 | If you want to have isolated PTY namespaces (eg: in containers), | ||
| 130 | say Y here. Otherwise, say N. If enabled, each mount of devpts | ||
| 131 | filesystem with the '-o newinstance' option will create an | ||
| 132 | independent PTY namespace. | ||
| 133 | |||
| 134 | config LEGACY_PTYS | 123 | config LEGACY_PTYS |
| 135 | bool "Legacy (BSD) PTY support" | 124 | bool "Legacy (BSD) PTY support" |
| 136 | default y | 125 | default y |
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c index dd4b8417e7f4..f856c4544eea 100644 --- a/drivers/tty/pty.c +++ b/drivers/tty/pty.c | |||
| @@ -668,7 +668,7 @@ static void pty_unix98_remove(struct tty_driver *driver, struct tty_struct *tty) | |||
| 668 | else | 668 | else |
| 669 | fsi = tty->link->driver_data; | 669 | fsi = tty->link->driver_data; |
| 670 | devpts_kill_index(fsi, tty->index); | 670 | devpts_kill_index(fsi, tty->index); |
| 671 | devpts_put_ref(fsi); | 671 | devpts_release(fsi); |
| 672 | } | 672 | } |
| 673 | 673 | ||
| 674 | static const struct tty_operations ptm_unix98_ops = { | 674 | static const struct tty_operations ptm_unix98_ops = { |
| @@ -733,10 +733,11 @@ static int ptmx_open(struct inode *inode, struct file *filp) | |||
| 733 | if (retval) | 733 | if (retval) |
| 734 | return retval; | 734 | return retval; |
| 735 | 735 | ||
| 736 | fsi = devpts_get_ref(inode, filp); | 736 | fsi = devpts_acquire(filp); |
| 737 | retval = -ENODEV; | 737 | if (IS_ERR(fsi)) { |
| 738 | if (!fsi) | 738 | retval = PTR_ERR(fsi); |
| 739 | goto out_free_file; | 739 | goto out_free_file; |
| 740 | } | ||
| 740 | 741 | ||
| 741 | /* find a device that is not in use. */ | 742 | /* find a device that is not in use. */ |
| 742 | mutex_lock(&devpts_mutex); | 743 | mutex_lock(&devpts_mutex); |
| @@ -745,7 +746,7 @@ static int ptmx_open(struct inode *inode, struct file *filp) | |||
| 745 | 746 | ||
| 746 | retval = index; | 747 | retval = index; |
| 747 | if (index < 0) | 748 | if (index < 0) |
| 748 | goto out_put_ref; | 749 | goto out_put_fsi; |
| 749 | 750 | ||
| 750 | 751 | ||
| 751 | mutex_lock(&tty_mutex); | 752 | mutex_lock(&tty_mutex); |
| @@ -789,8 +790,8 @@ err_release: | |||
| 789 | return retval; | 790 | return retval; |
| 790 | out: | 791 | out: |
| 791 | devpts_kill_index(fsi, index); | 792 | devpts_kill_index(fsi, index); |
| 792 | out_put_ref: | 793 | out_put_fsi: |
| 793 | devpts_put_ref(fsi); | 794 | devpts_release(fsi); |
| 794 | out_free_file: | 795 | out_free_file: |
| 795 | tty_free_file(filp); | 796 | tty_free_file(filp); |
| 796 | return retval; | 797 | return retval; |
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 6dc810bce295..944a6dca0fcb 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c | |||
| @@ -44,6 +44,9 @@ static const struct usb_device_id usb_quirk_list[] = { | |||
| 44 | /* Creative SB Audigy 2 NX */ | 44 | /* Creative SB Audigy 2 NX */ |
| 45 | { USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME }, | 45 | { USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME }, |
| 46 | 46 | ||
| 47 | /* USB3503 */ | ||
| 48 | { USB_DEVICE(0x0424, 0x3503), .driver_info = USB_QUIRK_RESET_RESUME }, | ||
| 49 | |||
| 47 | /* Microsoft Wireless Laser Mouse 6000 Receiver */ | 50 | /* Microsoft Wireless Laser Mouse 6000 Receiver */ |
| 48 | { USB_DEVICE(0x045e, 0x00e1), .driver_info = USB_QUIRK_RESET_RESUME }, | 51 | { USB_DEVICE(0x045e, 0x00e1), .driver_info = USB_QUIRK_RESET_RESUME }, |
| 49 | 52 | ||
| @@ -173,6 +176,10 @@ static const struct usb_device_id usb_quirk_list[] = { | |||
| 173 | /* MAYA44USB sound device */ | 176 | /* MAYA44USB sound device */ |
| 174 | { USB_DEVICE(0x0a92, 0x0091), .driver_info = USB_QUIRK_RESET_RESUME }, | 177 | { USB_DEVICE(0x0a92, 0x0091), .driver_info = USB_QUIRK_RESET_RESUME }, |
| 175 | 178 | ||
| 179 | /* ASUS Base Station(T100) */ | ||
| 180 | { USB_DEVICE(0x0b05, 0x17e0), .driver_info = | ||
| 181 | USB_QUIRK_IGNORE_REMOTE_WAKEUP }, | ||
| 182 | |||
| 176 | /* Action Semiconductor flash disk */ | 183 | /* Action Semiconductor flash disk */ |
| 177 | { USB_DEVICE(0x10d6, 0x2200), .driver_info = | 184 | { USB_DEVICE(0x10d6, 0x2200), .driver_info = |
| 178 | USB_QUIRK_STRING_FETCH_255 }, | 185 | USB_QUIRK_STRING_FETCH_255 }, |
| @@ -188,26 +195,22 @@ static const struct usb_device_id usb_quirk_list[] = { | |||
| 188 | { USB_DEVICE(0x1908, 0x1315), .driver_info = | 195 | { USB_DEVICE(0x1908, 0x1315), .driver_info = |
| 189 | USB_QUIRK_HONOR_BNUMINTERFACES }, | 196 | USB_QUIRK_HONOR_BNUMINTERFACES }, |
| 190 | 197 | ||
| 191 | /* INTEL VALUE SSD */ | ||
| 192 | { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME }, | ||
| 193 | |||
| 194 | /* USB3503 */ | ||
| 195 | { USB_DEVICE(0x0424, 0x3503), .driver_info = USB_QUIRK_RESET_RESUME }, | ||
| 196 | |||
| 197 | /* ASUS Base Station(T100) */ | ||
| 198 | { USB_DEVICE(0x0b05, 0x17e0), .driver_info = | ||
| 199 | USB_QUIRK_IGNORE_REMOTE_WAKEUP }, | ||
| 200 | |||
| 201 | /* Protocol and OTG Electrical Test Device */ | 198 | /* Protocol and OTG Electrical Test Device */ |
| 202 | { USB_DEVICE(0x1a0a, 0x0200), .driver_info = | 199 | { USB_DEVICE(0x1a0a, 0x0200), .driver_info = |
| 203 | USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL }, | 200 | USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL }, |
| 204 | 201 | ||
| 202 | /* Acer C120 LED Projector */ | ||
| 203 | { USB_DEVICE(0x1de1, 0xc102), .driver_info = USB_QUIRK_NO_LPM }, | ||
| 204 | |||
| 205 | /* Blackmagic Design Intensity Shuttle */ | 205 | /* Blackmagic Design Intensity Shuttle */ |
| 206 | { USB_DEVICE(0x1edb, 0xbd3b), .driver_info = USB_QUIRK_NO_LPM }, | 206 | { USB_DEVICE(0x1edb, 0xbd3b), .driver_info = USB_QUIRK_NO_LPM }, |
| 207 | 207 | ||
| 208 | /* Blackmagic Design UltraStudio SDI */ | 208 | /* Blackmagic Design UltraStudio SDI */ |
| 209 | { USB_DEVICE(0x1edb, 0xbd4f), .driver_info = USB_QUIRK_NO_LPM }, | 209 | { USB_DEVICE(0x1edb, 0xbd4f), .driver_info = USB_QUIRK_NO_LPM }, |
| 210 | 210 | ||
| 211 | /* INTEL VALUE SSD */ | ||
| 212 | { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME }, | ||
| 213 | |||
| 211 | { } /* terminating entry must be last */ | 214 | { } /* terminating entry must be last */ |
| 212 | }; | 215 | }; |
| 213 | 216 | ||
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h index 3c58d633ce80..dec0b21fc626 100644 --- a/drivers/usb/dwc2/core.h +++ b/drivers/usb/dwc2/core.h | |||
| @@ -64,6 +64,17 @@ | |||
| 64 | DWC2_TRACE_SCHEDULER_VB(pr_fmt("%s: SCH: " fmt), \ | 64 | DWC2_TRACE_SCHEDULER_VB(pr_fmt("%s: SCH: " fmt), \ |
| 65 | dev_name(hsotg->dev), ##__VA_ARGS__) | 65 | dev_name(hsotg->dev), ##__VA_ARGS__) |
| 66 | 66 | ||
| 67 | #ifdef CONFIG_MIPS | ||
| 68 | /* | ||
| 69 | * There are some MIPS machines that can run in either big-endian | ||
| 70 | * or little-endian mode and that use the dwc2 register without | ||
| 71 | * a byteswap in both ways. | ||
| 72 | * Unlike other architectures, MIPS apparently does not require a | ||
| 73 | * barrier before the __raw_writel() to synchronize with DMA but does | ||
| 74 | * require the barrier after the __raw_writel() to serialize a set of | ||
| 75 | * writes. This set of operations was added specifically for MIPS and | ||
| 76 | * should only be used there. | ||
| 77 | */ | ||
| 67 | static inline u32 dwc2_readl(const void __iomem *addr) | 78 | static inline u32 dwc2_readl(const void __iomem *addr) |
| 68 | { | 79 | { |
| 69 | u32 value = __raw_readl(addr); | 80 | u32 value = __raw_readl(addr); |
| @@ -90,6 +101,22 @@ static inline void dwc2_writel(u32 value, void __iomem *addr) | |||
| 90 | pr_info("INFO:: wrote %08x to %p\n", value, addr); | 101 | pr_info("INFO:: wrote %08x to %p\n", value, addr); |
| 91 | #endif | 102 | #endif |
| 92 | } | 103 | } |
| 104 | #else | ||
| 105 | /* Normal architectures just use readl/write */ | ||
| 106 | static inline u32 dwc2_readl(const void __iomem *addr) | ||
| 107 | { | ||
| 108 | return readl(addr); | ||
| 109 | } | ||
| 110 | |||
| 111 | static inline void dwc2_writel(u32 value, void __iomem *addr) | ||
| 112 | { | ||
| 113 | writel(value, addr); | ||
| 114 | |||
| 115 | #ifdef DWC2_LOG_WRITES | ||
| 116 | pr_info("info:: wrote %08x to %p\n", value, addr); | ||
| 117 | #endif | ||
| 118 | } | ||
| 119 | #endif | ||
| 93 | 120 | ||
| 94 | /* Maximum number of Endpoints/HostChannels */ | 121 | /* Maximum number of Endpoints/HostChannels */ |
| 95 | #define MAX_EPS_CHANNELS 16 | 122 | #define MAX_EPS_CHANNELS 16 |
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c index 4c5e3005e1dc..26cf09d0fe3c 100644 --- a/drivers/usb/dwc2/gadget.c +++ b/drivers/usb/dwc2/gadget.c | |||
| @@ -1018,7 +1018,7 @@ static int dwc2_hsotg_process_req_status(struct dwc2_hsotg *hsotg, | |||
| 1018 | return 1; | 1018 | return 1; |
| 1019 | } | 1019 | } |
| 1020 | 1020 | ||
| 1021 | static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value); | 1021 | static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value, bool now); |
| 1022 | 1022 | ||
| 1023 | /** | 1023 | /** |
| 1024 | * get_ep_head - return the first request on the endpoint | 1024 | * get_ep_head - return the first request on the endpoint |
| @@ -1094,7 +1094,7 @@ static int dwc2_hsotg_process_req_feature(struct dwc2_hsotg *hsotg, | |||
| 1094 | case USB_ENDPOINT_HALT: | 1094 | case USB_ENDPOINT_HALT: |
| 1095 | halted = ep->halted; | 1095 | halted = ep->halted; |
| 1096 | 1096 | ||
| 1097 | dwc2_hsotg_ep_sethalt(&ep->ep, set); | 1097 | dwc2_hsotg_ep_sethalt(&ep->ep, set, true); |
| 1098 | 1098 | ||
| 1099 | ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0); | 1099 | ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0); |
| 1100 | if (ret) { | 1100 | if (ret) { |
| @@ -2948,8 +2948,13 @@ static int dwc2_hsotg_ep_dequeue(struct usb_ep *ep, struct usb_request *req) | |||
| 2948 | * dwc2_hsotg_ep_sethalt - set halt on a given endpoint | 2948 | * dwc2_hsotg_ep_sethalt - set halt on a given endpoint |
| 2949 | * @ep: The endpoint to set halt. | 2949 | * @ep: The endpoint to set halt. |
| 2950 | * @value: Set or unset the halt. | 2950 | * @value: Set or unset the halt. |
| 2951 | * @now: If true, stall the endpoint now. Otherwise return -EAGAIN if | ||
| 2952 | * the endpoint is busy processing requests. | ||
| 2953 | * | ||
| 2954 | * We need to stall the endpoint immediately if request comes from set_feature | ||
| 2955 | * protocol command handler. | ||
| 2951 | */ | 2956 | */ |
| 2952 | static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value) | 2957 | static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value, bool now) |
| 2953 | { | 2958 | { |
| 2954 | struct dwc2_hsotg_ep *hs_ep = our_ep(ep); | 2959 | struct dwc2_hsotg_ep *hs_ep = our_ep(ep); |
| 2955 | struct dwc2_hsotg *hs = hs_ep->parent; | 2960 | struct dwc2_hsotg *hs = hs_ep->parent; |
| @@ -2969,6 +2974,17 @@ static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value) | |||
| 2969 | return 0; | 2974 | return 0; |
| 2970 | } | 2975 | } |
| 2971 | 2976 | ||
| 2977 | if (hs_ep->isochronous) { | ||
| 2978 | dev_err(hs->dev, "%s is Isochronous Endpoint\n", ep->name); | ||
| 2979 | return -EINVAL; | ||
| 2980 | } | ||
| 2981 | |||
| 2982 | if (!now && value && !list_empty(&hs_ep->queue)) { | ||
| 2983 | dev_dbg(hs->dev, "%s request is pending, cannot halt\n", | ||
| 2984 | ep->name); | ||
| 2985 | return -EAGAIN; | ||
| 2986 | } | ||
| 2987 | |||
| 2972 | if (hs_ep->dir_in) { | 2988 | if (hs_ep->dir_in) { |
| 2973 | epreg = DIEPCTL(index); | 2989 | epreg = DIEPCTL(index); |
| 2974 | epctl = dwc2_readl(hs->regs + epreg); | 2990 | epctl = dwc2_readl(hs->regs + epreg); |
| @@ -3020,7 +3036,7 @@ static int dwc2_hsotg_ep_sethalt_lock(struct usb_ep *ep, int value) | |||
| 3020 | int ret = 0; | 3036 | int ret = 0; |
| 3021 | 3037 | ||
| 3022 | spin_lock_irqsave(&hs->lock, flags); | 3038 | spin_lock_irqsave(&hs->lock, flags); |
| 3023 | ret = dwc2_hsotg_ep_sethalt(ep, value); | 3039 | ret = dwc2_hsotg_ep_sethalt(ep, value, false); |
| 3024 | spin_unlock_irqrestore(&hs->lock, flags); | 3040 | spin_unlock_irqrestore(&hs->lock, flags); |
| 3025 | 3041 | ||
| 3026 | return ret; | 3042 | return ret; |
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h index 7ddf9449a063..654050684f4f 100644 --- a/drivers/usb/dwc3/core.h +++ b/drivers/usb/dwc3/core.h | |||
| @@ -402,6 +402,7 @@ | |||
| 402 | #define DWC3_DEPCMD_GET_RSC_IDX(x) (((x) >> DWC3_DEPCMD_PARAM_SHIFT) & 0x7f) | 402 | #define DWC3_DEPCMD_GET_RSC_IDX(x) (((x) >> DWC3_DEPCMD_PARAM_SHIFT) & 0x7f) |
| 403 | #define DWC3_DEPCMD_STATUS(x) (((x) >> 12) & 0x0F) | 403 | #define DWC3_DEPCMD_STATUS(x) (((x) >> 12) & 0x0F) |
| 404 | #define DWC3_DEPCMD_HIPRI_FORCERM (1 << 11) | 404 | #define DWC3_DEPCMD_HIPRI_FORCERM (1 << 11) |
| 405 | #define DWC3_DEPCMD_CLEARPENDIN (1 << 11) | ||
| 405 | #define DWC3_DEPCMD_CMDACT (1 << 10) | 406 | #define DWC3_DEPCMD_CMDACT (1 << 10) |
| 406 | #define DWC3_DEPCMD_CMDIOC (1 << 8) | 407 | #define DWC3_DEPCMD_CMDIOC (1 << 8) |
| 407 | 408 | ||
diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c index dd5cb5577dca..2f1fb7e7aa54 100644 --- a/drivers/usb/dwc3/dwc3-exynos.c +++ b/drivers/usb/dwc3/dwc3-exynos.c | |||
| @@ -128,12 +128,6 @@ static int dwc3_exynos_probe(struct platform_device *pdev) | |||
| 128 | 128 | ||
| 129 | platform_set_drvdata(pdev, exynos); | 129 | platform_set_drvdata(pdev, exynos); |
| 130 | 130 | ||
| 131 | ret = dwc3_exynos_register_phys(exynos); | ||
| 132 | if (ret) { | ||
| 133 | dev_err(dev, "couldn't register PHYs\n"); | ||
| 134 | return ret; | ||
| 135 | } | ||
| 136 | |||
| 137 | exynos->dev = dev; | 131 | exynos->dev = dev; |
| 138 | 132 | ||
| 139 | exynos->clk = devm_clk_get(dev, "usbdrd30"); | 133 | exynos->clk = devm_clk_get(dev, "usbdrd30"); |
| @@ -183,20 +177,29 @@ static int dwc3_exynos_probe(struct platform_device *pdev) | |||
| 183 | goto err3; | 177 | goto err3; |
| 184 | } | 178 | } |
| 185 | 179 | ||
| 180 | ret = dwc3_exynos_register_phys(exynos); | ||
| 181 | if (ret) { | ||
| 182 | dev_err(dev, "couldn't register PHYs\n"); | ||
| 183 | goto err4; | ||
| 184 | } | ||
| 185 | |||
| 186 | if (node) { | 186 | if (node) { |
| 187 | ret = of_platform_populate(node, NULL, NULL, dev); | 187 | ret = of_platform_populate(node, NULL, NULL, dev); |
| 188 | if (ret) { | 188 | if (ret) { |
| 189 | dev_err(dev, "failed to add dwc3 core\n"); | 189 | dev_err(dev, "failed to add dwc3 core\n"); |
| 190 | goto err4; | 190 | goto err5; |
| 191 | } | 191 | } |
| 192 | } else { | 192 | } else { |
| 193 | dev_err(dev, "no device node, failed to add dwc3 core\n"); | 193 | dev_err(dev, "no device node, failed to add dwc3 core\n"); |
| 194 | ret = -ENODEV; | 194 | ret = -ENODEV; |
| 195 | goto err4; | 195 | goto err5; |
| 196 | } | 196 | } |
| 197 | 197 | ||
| 198 | return 0; | 198 | return 0; |
| 199 | 199 | ||
| 200 | err5: | ||
| 201 | platform_device_unregister(exynos->usb2_phy); | ||
| 202 | platform_device_unregister(exynos->usb3_phy); | ||
| 200 | err4: | 203 | err4: |
| 201 | regulator_disable(exynos->vdd10); | 204 | regulator_disable(exynos->vdd10); |
| 202 | err3: | 205 | err3: |
diff --git a/drivers/usb/dwc3/dwc3-st.c b/drivers/usb/dwc3/dwc3-st.c index 5c0adb9c6fb2..50d6ae6f88bc 100644 --- a/drivers/usb/dwc3/dwc3-st.c +++ b/drivers/usb/dwc3/dwc3-st.c | |||
| @@ -129,12 +129,18 @@ static int st_dwc3_drd_init(struct st_dwc3 *dwc3_data) | |||
| 129 | switch (dwc3_data->dr_mode) { | 129 | switch (dwc3_data->dr_mode) { |
| 130 | case USB_DR_MODE_PERIPHERAL: | 130 | case USB_DR_MODE_PERIPHERAL: |
| 131 | 131 | ||
| 132 | val &= ~(USB3_FORCE_VBUSVALID | USB3_DELAY_VBUSVALID | 132 | val &= ~(USB3_DELAY_VBUSVALID |
| 133 | | USB3_SEL_FORCE_OPMODE | USB3_FORCE_OPMODE(0x3) | 133 | | USB3_SEL_FORCE_OPMODE | USB3_FORCE_OPMODE(0x3) |
| 134 | | USB3_SEL_FORCE_DPPULLDOWN2 | USB3_FORCE_DPPULLDOWN2 | 134 | | USB3_SEL_FORCE_DPPULLDOWN2 | USB3_FORCE_DPPULLDOWN2 |
| 135 | | USB3_SEL_FORCE_DMPULLDOWN2 | USB3_FORCE_DMPULLDOWN2); | 135 | | USB3_SEL_FORCE_DMPULLDOWN2 | USB3_FORCE_DMPULLDOWN2); |
| 136 | 136 | ||
| 137 | val |= USB3_DEVICE_NOT_HOST; | 137 | /* |
| 138 | * USB3_PORT2_FORCE_VBUSVALID When '1' and when | ||
| 139 | * USB3_PORT2_DEVICE_NOT_HOST = 1, forces VBUSVLDEXT2 input | ||
| 140 | * of the pico PHY to 1. | ||
| 141 | */ | ||
| 142 | |||
| 143 | val |= USB3_DEVICE_NOT_HOST | USB3_FORCE_VBUSVALID; | ||
| 138 | break; | 144 | break; |
| 139 | 145 | ||
| 140 | case USB_DR_MODE_HOST: | 146 | case USB_DR_MODE_HOST: |
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 9a7d0bd15dc3..07248ff1be5c 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c | |||
| @@ -347,6 +347,28 @@ int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep, | |||
| 347 | return ret; | 347 | return ret; |
| 348 | } | 348 | } |
| 349 | 349 | ||
| 350 | static int dwc3_send_clear_stall_ep_cmd(struct dwc3_ep *dep) | ||
| 351 | { | ||
| 352 | struct dwc3 *dwc = dep->dwc; | ||
| 353 | struct dwc3_gadget_ep_cmd_params params; | ||
| 354 | u32 cmd = DWC3_DEPCMD_CLEARSTALL; | ||
| 355 | |||
| 356 | /* | ||
| 357 | * As of core revision 2.60a the recommended programming model | ||
| 358 | * is to set the ClearPendIN bit when issuing a Clear Stall EP | ||
| 359 | * command for IN endpoints. This is to prevent an issue where | ||
| 360 | * some (non-compliant) hosts may not send ACK TPs for pending | ||
| 361 | * IN transfers due to a mishandled error condition. Synopsys | ||
| 362 | * STAR 9000614252. | ||
| 363 | */ | ||
| 364 | if (dep->direction && (dwc->revision >= DWC3_REVISION_260A)) | ||
| 365 | cmd |= DWC3_DEPCMD_CLEARPENDIN; | ||
| 366 | |||
| 367 | memset(¶ms, 0, sizeof(params)); | ||
| 368 | |||
| 369 | return dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, ¶ms); | ||
| 370 | } | ||
| 371 | |||
| 350 | static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep, | 372 | static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep, |
| 351 | struct dwc3_trb *trb) | 373 | struct dwc3_trb *trb) |
| 352 | { | 374 | { |
| @@ -1314,8 +1336,7 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol) | |||
| 1314 | else | 1336 | else |
| 1315 | dep->flags |= DWC3_EP_STALL; | 1337 | dep->flags |= DWC3_EP_STALL; |
| 1316 | } else { | 1338 | } else { |
| 1317 | ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, | 1339 | ret = dwc3_send_clear_stall_ep_cmd(dep); |
| 1318 | DWC3_DEPCMD_CLEARSTALL, ¶ms); | ||
| 1319 | if (ret) | 1340 | if (ret) |
| 1320 | dev_err(dwc->dev, "failed to clear STALL on %s\n", | 1341 | dev_err(dwc->dev, "failed to clear STALL on %s\n", |
| 1321 | dep->name); | 1342 | dep->name); |
| @@ -2247,7 +2268,6 @@ static void dwc3_clear_stall_all_ep(struct dwc3 *dwc) | |||
| 2247 | 2268 | ||
| 2248 | for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) { | 2269 | for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) { |
| 2249 | struct dwc3_ep *dep; | 2270 | struct dwc3_ep *dep; |
| 2250 | struct dwc3_gadget_ep_cmd_params params; | ||
| 2251 | int ret; | 2271 | int ret; |
| 2252 | 2272 | ||
| 2253 | dep = dwc->eps[epnum]; | 2273 | dep = dwc->eps[epnum]; |
| @@ -2259,9 +2279,7 @@ static void dwc3_clear_stall_all_ep(struct dwc3 *dwc) | |||
| 2259 | 2279 | ||
| 2260 | dep->flags &= ~DWC3_EP_STALL; | 2280 | dep->flags &= ~DWC3_EP_STALL; |
| 2261 | 2281 | ||
| 2262 | memset(¶ms, 0, sizeof(params)); | 2282 | ret = dwc3_send_clear_stall_ep_cmd(dep); |
| 2263 | ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, | ||
| 2264 | DWC3_DEPCMD_CLEARSTALL, ¶ms); | ||
| 2265 | WARN_ON_ONCE(ret); | 2283 | WARN_ON_ONCE(ret); |
| 2266 | } | 2284 | } |
| 2267 | } | 2285 | } |
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index d67de0d22a2b..eb648485a58c 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c | |||
| @@ -1868,14 +1868,19 @@ unknown: | |||
| 1868 | } | 1868 | } |
| 1869 | break; | 1869 | break; |
| 1870 | } | 1870 | } |
| 1871 | req->length = value; | 1871 | |
| 1872 | req->context = cdev; | 1872 | if (value >= 0) { |
| 1873 | req->zero = value < w_length; | 1873 | req->length = value; |
| 1874 | value = composite_ep0_queue(cdev, req, GFP_ATOMIC); | 1874 | req->context = cdev; |
| 1875 | if (value < 0) { | 1875 | req->zero = value < w_length; |
| 1876 | DBG(cdev, "ep_queue --> %d\n", value); | 1876 | value = composite_ep0_queue(cdev, req, |
| 1877 | req->status = 0; | 1877 | GFP_ATOMIC); |
| 1878 | composite_setup_complete(gadget->ep0, req); | 1878 | if (value < 0) { |
| 1879 | DBG(cdev, "ep_queue --> %d\n", value); | ||
| 1880 | req->status = 0; | ||
| 1881 | composite_setup_complete(gadget->ep0, | ||
| 1882 | req); | ||
| 1883 | } | ||
| 1879 | } | 1884 | } |
| 1880 | return value; | 1885 | return value; |
| 1881 | } | 1886 | } |
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c index b6f60ca8a035..70cf3477f951 100644 --- a/drivers/usb/gadget/configfs.c +++ b/drivers/usb/gadget/configfs.c | |||
| @@ -1401,6 +1401,7 @@ static const struct usb_gadget_driver configfs_driver_template = { | |||
| 1401 | .owner = THIS_MODULE, | 1401 | .owner = THIS_MODULE, |
| 1402 | .name = "configfs-gadget", | 1402 | .name = "configfs-gadget", |
| 1403 | }, | 1403 | }, |
| 1404 | .match_existing_only = 1, | ||
| 1404 | }; | 1405 | }; |
| 1405 | 1406 | ||
| 1406 | static struct config_group *gadgets_make( | 1407 | static struct config_group *gadgets_make( |
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 73515d54e1cc..cc33d2667408 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c | |||
| @@ -2051,7 +2051,7 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type, | |||
| 2051 | 2051 | ||
| 2052 | if (len < sizeof(*d) || | 2052 | if (len < sizeof(*d) || |
| 2053 | d->bFirstInterfaceNumber >= ffs->interfaces_count || | 2053 | d->bFirstInterfaceNumber >= ffs->interfaces_count || |
| 2054 | d->Reserved1) | 2054 | !d->Reserved1) |
| 2055 | return -EINVAL; | 2055 | return -EINVAL; |
| 2056 | for (i = 0; i < ARRAY_SIZE(d->Reserved2); ++i) | 2056 | for (i = 0; i < ARRAY_SIZE(d->Reserved2); ++i) |
| 2057 | if (d->Reserved2[i]) | 2057 | if (d->Reserved2[i]) |
| @@ -2729,6 +2729,7 @@ static int _ffs_func_bind(struct usb_configuration *c, | |||
| 2729 | func->ffs->ss_descs_count; | 2729 | func->ffs->ss_descs_count; |
| 2730 | 2730 | ||
| 2731 | int fs_len, hs_len, ss_len, ret, i; | 2731 | int fs_len, hs_len, ss_len, ret, i; |
| 2732 | struct ffs_ep *eps_ptr; | ||
| 2732 | 2733 | ||
| 2733 | /* Make it a single chunk, less management later on */ | 2734 | /* Make it a single chunk, less management later on */ |
| 2734 | vla_group(d); | 2735 | vla_group(d); |
| @@ -2777,12 +2778,9 @@ static int _ffs_func_bind(struct usb_configuration *c, | |||
| 2777 | ffs->raw_descs_length); | 2778 | ffs->raw_descs_length); |
| 2778 | 2779 | ||
| 2779 | memset(vla_ptr(vlabuf, d, inums), 0xff, d_inums__sz); | 2780 | memset(vla_ptr(vlabuf, d, inums), 0xff, d_inums__sz); |
| 2780 | for (ret = ffs->eps_count; ret; --ret) { | 2781 | eps_ptr = vla_ptr(vlabuf, d, eps); |
| 2781 | struct ffs_ep *ptr; | 2782 | for (i = 0; i < ffs->eps_count; i++) |
| 2782 | 2783 | eps_ptr[i].num = -1; | |
| 2783 | ptr = vla_ptr(vlabuf, d, eps); | ||
| 2784 | ptr[ret].num = -1; | ||
| 2785 | } | ||
| 2786 | 2784 | ||
| 2787 | /* Save pointers | 2785 | /* Save pointers |
| 2788 | * d_eps == vlabuf, func->eps used to kfree vlabuf later | 2786 | * d_eps == vlabuf, func->eps used to kfree vlabuf later |
| @@ -2851,7 +2849,7 @@ static int _ffs_func_bind(struct usb_configuration *c, | |||
| 2851 | goto error; | 2849 | goto error; |
| 2852 | 2850 | ||
| 2853 | func->function.os_desc_table = vla_ptr(vlabuf, d, os_desc_table); | 2851 | func->function.os_desc_table = vla_ptr(vlabuf, d, os_desc_table); |
| 2854 | if (c->cdev->use_os_string) | 2852 | if (c->cdev->use_os_string) { |
| 2855 | for (i = 0; i < ffs->interfaces_count; ++i) { | 2853 | for (i = 0; i < ffs->interfaces_count; ++i) { |
| 2856 | struct usb_os_desc *desc; | 2854 | struct usb_os_desc *desc; |
| 2857 | 2855 | ||
| @@ -2862,13 +2860,15 @@ static int _ffs_func_bind(struct usb_configuration *c, | |||
| 2862 | vla_ptr(vlabuf, d, ext_compat) + i * 16; | 2860 | vla_ptr(vlabuf, d, ext_compat) + i * 16; |
| 2863 | INIT_LIST_HEAD(&desc->ext_prop); | 2861 | INIT_LIST_HEAD(&desc->ext_prop); |
| 2864 | } | 2862 | } |
| 2865 | ret = ffs_do_os_descs(ffs->ms_os_descs_count, | 2863 | ret = ffs_do_os_descs(ffs->ms_os_descs_count, |
| 2866 | vla_ptr(vlabuf, d, raw_descs) + | 2864 | vla_ptr(vlabuf, d, raw_descs) + |
| 2867 | fs_len + hs_len + ss_len, | 2865 | fs_len + hs_len + ss_len, |
| 2868 | d_raw_descs__sz - fs_len - hs_len - ss_len, | 2866 | d_raw_descs__sz - fs_len - hs_len - |
| 2869 | __ffs_func_bind_do_os_desc, func); | 2867 | ss_len, |
| 2870 | if (unlikely(ret < 0)) | 2868 | __ffs_func_bind_do_os_desc, func); |
| 2871 | goto error; | 2869 | if (unlikely(ret < 0)) |
| 2870 | goto error; | ||
| 2871 | } | ||
| 2872 | func->function.os_desc_n = | 2872 | func->function.os_desc_n = |
| 2873 | c->cdev->use_os_string ? ffs->interfaces_count : 0; | 2873 | c->cdev->use_os_string ? ffs->interfaces_count : 0; |
| 2874 | 2874 | ||
diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c index c45104e3a64b..64706a789580 100644 --- a/drivers/usb/gadget/function/f_printer.c +++ b/drivers/usb/gadget/function/f_printer.c | |||
| @@ -161,14 +161,6 @@ static struct usb_endpoint_descriptor hs_ep_out_desc = { | |||
| 161 | .wMaxPacketSize = cpu_to_le16(512) | 161 | .wMaxPacketSize = cpu_to_le16(512) |
| 162 | }; | 162 | }; |
| 163 | 163 | ||
| 164 | static struct usb_qualifier_descriptor dev_qualifier = { | ||
| 165 | .bLength = sizeof(dev_qualifier), | ||
| 166 | .bDescriptorType = USB_DT_DEVICE_QUALIFIER, | ||
| 167 | .bcdUSB = cpu_to_le16(0x0200), | ||
| 168 | .bDeviceClass = USB_CLASS_PRINTER, | ||
| 169 | .bNumConfigurations = 1 | ||
| 170 | }; | ||
| 171 | |||
| 172 | static struct usb_descriptor_header *hs_printer_function[] = { | 164 | static struct usb_descriptor_header *hs_printer_function[] = { |
| 173 | (struct usb_descriptor_header *) &intf_desc, | 165 | (struct usb_descriptor_header *) &intf_desc, |
| 174 | (struct usb_descriptor_header *) &hs_ep_in_desc, | 166 | (struct usb_descriptor_header *) &hs_ep_in_desc, |
diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c index 35fe3c80cfc0..197f73386fac 100644 --- a/drivers/usb/gadget/function/f_tcm.c +++ b/drivers/usb/gadget/function/f_tcm.c | |||
| @@ -1445,16 +1445,18 @@ static void usbg_drop_tpg(struct se_portal_group *se_tpg) | |||
| 1445 | for (i = 0; i < TPG_INSTANCES; ++i) | 1445 | for (i = 0; i < TPG_INSTANCES; ++i) |
| 1446 | if (tpg_instances[i].tpg == tpg) | 1446 | if (tpg_instances[i].tpg == tpg) |
| 1447 | break; | 1447 | break; |
| 1448 | if (i < TPG_INSTANCES) | 1448 | if (i < TPG_INSTANCES) { |
| 1449 | tpg_instances[i].tpg = NULL; | 1449 | tpg_instances[i].tpg = NULL; |
| 1450 | opts = container_of(tpg_instances[i].func_inst, | 1450 | opts = container_of(tpg_instances[i].func_inst, |
| 1451 | struct f_tcm_opts, func_inst); | 1451 | struct f_tcm_opts, func_inst); |
| 1452 | mutex_lock(&opts->dep_lock); | 1452 | mutex_lock(&opts->dep_lock); |
| 1453 | if (opts->has_dep) | 1453 | if (opts->has_dep) |
| 1454 | module_put(opts->dependent); | 1454 | module_put(opts->dependent); |
| 1455 | else | 1455 | else |
| 1456 | configfs_undepend_item_unlocked(&opts->func_inst.group.cg_item); | 1456 | configfs_undepend_item_unlocked( |
| 1457 | mutex_unlock(&opts->dep_lock); | 1457 | &opts->func_inst.group.cg_item); |
| 1458 | mutex_unlock(&opts->dep_lock); | ||
| 1459 | } | ||
| 1458 | mutex_unlock(&tpg_instances_lock); | 1460 | mutex_unlock(&tpg_instances_lock); |
| 1459 | 1461 | ||
| 1460 | kfree(tpg); | 1462 | kfree(tpg); |
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c index 186d4b162524..cd214ec8a601 100644 --- a/drivers/usb/gadget/function/f_uac2.c +++ b/drivers/usb/gadget/function/f_uac2.c | |||
| @@ -598,18 +598,6 @@ static struct usb_gadget_strings *fn_strings[] = { | |||
| 598 | NULL, | 598 | NULL, |
| 599 | }; | 599 | }; |
| 600 | 600 | ||
| 601 | static struct usb_qualifier_descriptor devqual_desc = { | ||
| 602 | .bLength = sizeof devqual_desc, | ||
| 603 | .bDescriptorType = USB_DT_DEVICE_QUALIFIER, | ||
| 604 | |||
| 605 | .bcdUSB = cpu_to_le16(0x200), | ||
| 606 | .bDeviceClass = USB_CLASS_MISC, | ||
| 607 | .bDeviceSubClass = 0x02, | ||
| 608 | .bDeviceProtocol = 0x01, | ||
| 609 | .bNumConfigurations = 1, | ||
| 610 | .bRESERVED = 0, | ||
| 611 | }; | ||
| 612 | |||
| 613 | static struct usb_interface_assoc_descriptor iad_desc = { | 601 | static struct usb_interface_assoc_descriptor iad_desc = { |
| 614 | .bLength = sizeof iad_desc, | 602 | .bLength = sizeof iad_desc, |
| 615 | .bDescriptorType = USB_DT_INTERFACE_ASSOCIATION, | 603 | .bDescriptorType = USB_DT_INTERFACE_ASSOCIATION, |
| @@ -1292,6 +1280,7 @@ in_rq_cur(struct usb_function *fn, const struct usb_ctrlrequest *cr) | |||
| 1292 | 1280 | ||
| 1293 | if (control_selector == UAC2_CS_CONTROL_SAM_FREQ) { | 1281 | if (control_selector == UAC2_CS_CONTROL_SAM_FREQ) { |
| 1294 | struct cntrl_cur_lay3 c; | 1282 | struct cntrl_cur_lay3 c; |
| 1283 | memset(&c, 0, sizeof(struct cntrl_cur_lay3)); | ||
| 1295 | 1284 | ||
| 1296 | if (entity_id == USB_IN_CLK_ID) | 1285 | if (entity_id == USB_IN_CLK_ID) |
| 1297 | c.dCUR = p_srate; | 1286 | c.dCUR = p_srate; |
diff --git a/drivers/usb/gadget/function/storage_common.c b/drivers/usb/gadget/function/storage_common.c index d62683017cf3..990df221c629 100644 --- a/drivers/usb/gadget/function/storage_common.c +++ b/drivers/usb/gadget/function/storage_common.c | |||
| @@ -83,9 +83,7 @@ EXPORT_SYMBOL_GPL(fsg_fs_function); | |||
| 83 | * USB 2.0 devices need to expose both high speed and full speed | 83 | * USB 2.0 devices need to expose both high speed and full speed |
| 84 | * descriptors, unless they only run at full speed. | 84 | * descriptors, unless they only run at full speed. |
| 85 | * | 85 | * |
| 86 | * That means alternate endpoint descriptors (bigger packets) | 86 | * That means alternate endpoint descriptors (bigger packets). |
| 87 | * and a "device qualifier" ... plus more construction options | ||
| 88 | * for the configuration descriptor. | ||
| 89 | */ | 87 | */ |
| 90 | struct usb_endpoint_descriptor fsg_hs_bulk_in_desc = { | 88 | struct usb_endpoint_descriptor fsg_hs_bulk_in_desc = { |
| 91 | .bLength = USB_DT_ENDPOINT_SIZE, | 89 | .bLength = USB_DT_ENDPOINT_SIZE, |
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c index e64479f882a5..aa3707bdebb4 100644 --- a/drivers/usb/gadget/legacy/inode.c +++ b/drivers/usb/gadget/legacy/inode.c | |||
| @@ -938,8 +938,11 @@ ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr) | |||
| 938 | struct usb_ep *ep = dev->gadget->ep0; | 938 | struct usb_ep *ep = dev->gadget->ep0; |
| 939 | struct usb_request *req = dev->req; | 939 | struct usb_request *req = dev->req; |
| 940 | 940 | ||
| 941 | if ((retval = setup_req (ep, req, 0)) == 0) | 941 | if ((retval = setup_req (ep, req, 0)) == 0) { |
| 942 | retval = usb_ep_queue (ep, req, GFP_ATOMIC); | 942 | spin_unlock_irq (&dev->lock); |
| 943 | retval = usb_ep_queue (ep, req, GFP_KERNEL); | ||
| 944 | spin_lock_irq (&dev->lock); | ||
| 945 | } | ||
| 943 | dev->state = STATE_DEV_CONNECTED; | 946 | dev->state = STATE_DEV_CONNECTED; |
| 944 | 947 | ||
| 945 | /* assume that was SET_CONFIGURATION */ | 948 | /* assume that was SET_CONFIGURATION */ |
| @@ -1457,8 +1460,11 @@ delegate: | |||
| 1457 | w_length); | 1460 | w_length); |
| 1458 | if (value < 0) | 1461 | if (value < 0) |
| 1459 | break; | 1462 | break; |
| 1463 | |||
| 1464 | spin_unlock (&dev->lock); | ||
| 1460 | value = usb_ep_queue (gadget->ep0, dev->req, | 1465 | value = usb_ep_queue (gadget->ep0, dev->req, |
| 1461 | GFP_ATOMIC); | 1466 | GFP_KERNEL); |
| 1467 | spin_lock (&dev->lock); | ||
| 1462 | if (value < 0) { | 1468 | if (value < 0) { |
| 1463 | clean_req (gadget->ep0, dev->req); | 1469 | clean_req (gadget->ep0, dev->req); |
| 1464 | break; | 1470 | break; |
| @@ -1481,11 +1487,14 @@ delegate: | |||
| 1481 | if (value >= 0 && dev->state != STATE_DEV_SETUP) { | 1487 | if (value >= 0 && dev->state != STATE_DEV_SETUP) { |
| 1482 | req->length = value; | 1488 | req->length = value; |
| 1483 | req->zero = value < w_length; | 1489 | req->zero = value < w_length; |
| 1484 | value = usb_ep_queue (gadget->ep0, req, GFP_ATOMIC); | 1490 | |
| 1491 | spin_unlock (&dev->lock); | ||
| 1492 | value = usb_ep_queue (gadget->ep0, req, GFP_KERNEL); | ||
| 1485 | if (value < 0) { | 1493 | if (value < 0) { |
| 1486 | DBG (dev, "ep_queue --> %d\n", value); | 1494 | DBG (dev, "ep_queue --> %d\n", value); |
| 1487 | req->status = 0; | 1495 | req->status = 0; |
| 1488 | } | 1496 | } |
| 1497 | return value; | ||
| 1489 | } | 1498 | } |
| 1490 | 1499 | ||
| 1491 | /* device stalls when value < 0 */ | 1500 | /* device stalls when value < 0 */ |
diff --git a/drivers/usb/gadget/udc/udc-core.c b/drivers/usb/gadget/udc/udc-core.c index 6e8300d6a737..e1b2dcebdc2e 100644 --- a/drivers/usb/gadget/udc/udc-core.c +++ b/drivers/usb/gadget/udc/udc-core.c | |||
| @@ -603,11 +603,15 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver) | |||
| 603 | } | 603 | } |
| 604 | } | 604 | } |
| 605 | 605 | ||
| 606 | list_add_tail(&driver->pending, &gadget_driver_pending_list); | 606 | if (!driver->match_existing_only) { |
| 607 | pr_info("udc-core: couldn't find an available UDC - added [%s] to list of pending drivers\n", | 607 | list_add_tail(&driver->pending, &gadget_driver_pending_list); |
| 608 | driver->function); | 608 | pr_info("udc-core: couldn't find an available UDC - added [%s] to list of pending drivers\n", |
| 609 | driver->function); | ||
| 610 | ret = 0; | ||
| 611 | } | ||
| 612 | |||
| 609 | mutex_unlock(&udc_lock); | 613 | mutex_unlock(&udc_lock); |
| 610 | return 0; | 614 | return ret; |
| 611 | found: | 615 | found: |
| 612 | ret = udc_bind_to_driver(udc, driver); | 616 | ret = udc_bind_to_driver(udc, driver); |
| 613 | mutex_unlock(&udc_lock); | 617 | mutex_unlock(&udc_lock); |
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index ae1b6e69eb96..a962b89b65a6 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c | |||
| @@ -368,6 +368,15 @@ static void ehci_shutdown(struct usb_hcd *hcd) | |||
| 368 | { | 368 | { |
| 369 | struct ehci_hcd *ehci = hcd_to_ehci(hcd); | 369 | struct ehci_hcd *ehci = hcd_to_ehci(hcd); |
| 370 | 370 | ||
| 371 | /** | ||
| 372 | * Protect the system from crashing at system shutdown in cases where | ||
| 373 | * usb host is not added yet from OTG controller driver. | ||
| 374 | * As ehci_setup() not done yet, so stop accessing registers or | ||
| 375 | * variables initialized in ehci_setup() | ||
| 376 | */ | ||
| 377 | if (!ehci->sbrn) | ||
| 378 | return; | ||
| 379 | |||
| 371 | spin_lock_irq(&ehci->lock); | 380 | spin_lock_irq(&ehci->lock); |
| 372 | ehci->shutdown = true; | 381 | ehci->shutdown = true; |
| 373 | ehci->rh_state = EHCI_RH_STOPPING; | 382 | ehci->rh_state = EHCI_RH_STOPPING; |
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c index ffc90295a95f..74f62d68f013 100644 --- a/drivers/usb/host/ehci-hub.c +++ b/drivers/usb/host/ehci-hub.c | |||
| @@ -872,15 +872,23 @@ int ehci_hub_control( | |||
| 872 | ) { | 872 | ) { |
| 873 | struct ehci_hcd *ehci = hcd_to_ehci (hcd); | 873 | struct ehci_hcd *ehci = hcd_to_ehci (hcd); |
| 874 | int ports = HCS_N_PORTS (ehci->hcs_params); | 874 | int ports = HCS_N_PORTS (ehci->hcs_params); |
| 875 | u32 __iomem *status_reg = &ehci->regs->port_status[ | 875 | u32 __iomem *status_reg, *hostpc_reg; |
| 876 | (wIndex & 0xff) - 1]; | ||
| 877 | u32 __iomem *hostpc_reg = &ehci->regs->hostpc[(wIndex & 0xff) - 1]; | ||
| 878 | u32 temp, temp1, status; | 876 | u32 temp, temp1, status; |
| 879 | unsigned long flags; | 877 | unsigned long flags; |
| 880 | int retval = 0; | 878 | int retval = 0; |
| 881 | unsigned selector; | 879 | unsigned selector; |
| 882 | 880 | ||
| 883 | /* | 881 | /* |
| 882 | * Avoid underflow while calculating (wIndex & 0xff) - 1. | ||
| 883 | * The compiler might deduce that wIndex can never be 0 and then | ||
| 884 | * optimize away the tests for !wIndex below. | ||
| 885 | */ | ||
| 886 | temp = wIndex & 0xff; | ||
| 887 | temp -= (temp > 0); | ||
| 888 | status_reg = &ehci->regs->port_status[temp]; | ||
| 889 | hostpc_reg = &ehci->regs->hostpc[temp]; | ||
| 890 | |||
| 891 | /* | ||
| 884 | * FIXME: support SetPortFeatures USB_PORT_FEAT_INDICATOR. | 892 | * FIXME: support SetPortFeatures USB_PORT_FEAT_INDICATOR. |
| 885 | * HCS_INDICATOR may say we can change LEDs to off/amber/green. | 893 | * HCS_INDICATOR may say we can change LEDs to off/amber/green. |
| 886 | * (track current state ourselves) ... blink for diagnostics, | 894 | * (track current state ourselves) ... blink for diagnostics, |
diff --git a/drivers/usb/host/ehci-msm.c b/drivers/usb/host/ehci-msm.c index d3afc89d00f5..2f8d3af811ce 100644 --- a/drivers/usb/host/ehci-msm.c +++ b/drivers/usb/host/ehci-msm.c | |||
| @@ -179,22 +179,32 @@ static int ehci_msm_remove(struct platform_device *pdev) | |||
| 179 | static int ehci_msm_pm_suspend(struct device *dev) | 179 | static int ehci_msm_pm_suspend(struct device *dev) |
| 180 | { | 180 | { |
| 181 | struct usb_hcd *hcd = dev_get_drvdata(dev); | 181 | struct usb_hcd *hcd = dev_get_drvdata(dev); |
| 182 | struct ehci_hcd *ehci = hcd_to_ehci(hcd); | ||
| 182 | bool do_wakeup = device_may_wakeup(dev); | 183 | bool do_wakeup = device_may_wakeup(dev); |
| 183 | 184 | ||
| 184 | dev_dbg(dev, "ehci-msm PM suspend\n"); | 185 | dev_dbg(dev, "ehci-msm PM suspend\n"); |
| 185 | 186 | ||
| 186 | return ehci_suspend(hcd, do_wakeup); | 187 | /* Only call ehci_suspend if ehci_setup has been done */ |
| 188 | if (ehci->sbrn) | ||
| 189 | return ehci_suspend(hcd, do_wakeup); | ||
| 190 | |||
| 191 | return 0; | ||
| 187 | } | 192 | } |
| 188 | 193 | ||
| 189 | static int ehci_msm_pm_resume(struct device *dev) | 194 | static int ehci_msm_pm_resume(struct device *dev) |
| 190 | { | 195 | { |
| 191 | struct usb_hcd *hcd = dev_get_drvdata(dev); | 196 | struct usb_hcd *hcd = dev_get_drvdata(dev); |
| 197 | struct ehci_hcd *ehci = hcd_to_ehci(hcd); | ||
| 192 | 198 | ||
| 193 | dev_dbg(dev, "ehci-msm PM resume\n"); | 199 | dev_dbg(dev, "ehci-msm PM resume\n"); |
| 194 | ehci_resume(hcd, false); | 200 | |
| 201 | /* Only call ehci_resume if ehci_setup has been done */ | ||
| 202 | if (ehci->sbrn) | ||
| 203 | ehci_resume(hcd, false); | ||
| 195 | 204 | ||
| 196 | return 0; | 205 | return 0; |
| 197 | } | 206 | } |
| 207 | |||
| 198 | #else | 208 | #else |
| 199 | #define ehci_msm_pm_suspend NULL | 209 | #define ehci_msm_pm_suspend NULL |
| 200 | #define ehci_msm_pm_resume NULL | 210 | #define ehci_msm_pm_resume NULL |
diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c index 4031b372008e..9a3d7db5be57 100644 --- a/drivers/usb/host/ehci-tegra.c +++ b/drivers/usb/host/ehci-tegra.c | |||
| @@ -81,15 +81,23 @@ static int tegra_reset_usb_controller(struct platform_device *pdev) | |||
| 81 | struct usb_hcd *hcd = platform_get_drvdata(pdev); | 81 | struct usb_hcd *hcd = platform_get_drvdata(pdev); |
| 82 | struct tegra_ehci_hcd *tegra = | 82 | struct tegra_ehci_hcd *tegra = |
| 83 | (struct tegra_ehci_hcd *)hcd_to_ehci(hcd)->priv; | 83 | (struct tegra_ehci_hcd *)hcd_to_ehci(hcd)->priv; |
| 84 | bool has_utmi_pad_registers = false; | ||
| 84 | 85 | ||
| 85 | phy_np = of_parse_phandle(pdev->dev.of_node, "nvidia,phy", 0); | 86 | phy_np = of_parse_phandle(pdev->dev.of_node, "nvidia,phy", 0); |
| 86 | if (!phy_np) | 87 | if (!phy_np) |
| 87 | return -ENOENT; | 88 | return -ENOENT; |
| 88 | 89 | ||
| 90 | if (of_property_read_bool(phy_np, "nvidia,has-utmi-pad-registers")) | ||
| 91 | has_utmi_pad_registers = true; | ||
| 92 | |||
| 89 | if (!usb1_reset_attempted) { | 93 | if (!usb1_reset_attempted) { |
| 90 | struct reset_control *usb1_reset; | 94 | struct reset_control *usb1_reset; |
| 91 | 95 | ||
| 92 | usb1_reset = of_reset_control_get(phy_np, "usb"); | 96 | if (!has_utmi_pad_registers) |
| 97 | usb1_reset = of_reset_control_get(phy_np, "utmi-pads"); | ||
| 98 | else | ||
| 99 | usb1_reset = tegra->rst; | ||
| 100 | |||
| 93 | if (IS_ERR(usb1_reset)) { | 101 | if (IS_ERR(usb1_reset)) { |
| 94 | dev_warn(&pdev->dev, | 102 | dev_warn(&pdev->dev, |
| 95 | "can't get utmi-pads reset from the PHY\n"); | 103 | "can't get utmi-pads reset from the PHY\n"); |
| @@ -99,13 +107,15 @@ static int tegra_reset_usb_controller(struct platform_device *pdev) | |||
| 99 | reset_control_assert(usb1_reset); | 107 | reset_control_assert(usb1_reset); |
| 100 | udelay(1); | 108 | udelay(1); |
| 101 | reset_control_deassert(usb1_reset); | 109 | reset_control_deassert(usb1_reset); |
| 110 | |||
| 111 | if (!has_utmi_pad_registers) | ||
| 112 | reset_control_put(usb1_reset); | ||
| 102 | } | 113 | } |
| 103 | 114 | ||
| 104 | reset_control_put(usb1_reset); | ||
| 105 | usb1_reset_attempted = true; | 115 | usb1_reset_attempted = true; |
| 106 | } | 116 | } |
| 107 | 117 | ||
| 108 | if (!of_property_read_bool(phy_np, "nvidia,has-utmi-pad-registers")) { | 118 | if (!has_utmi_pad_registers) { |
| 109 | reset_control_assert(tegra->rst); | 119 | reset_control_assert(tegra->rst); |
| 110 | udelay(1); | 120 | udelay(1); |
| 111 | reset_control_deassert(tegra->rst); | 121 | reset_control_deassert(tegra->rst); |
diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c index d029bbe9eb36..641fed609911 100644 --- a/drivers/usb/host/ohci-q.c +++ b/drivers/usb/host/ohci-q.c | |||
| @@ -183,7 +183,6 @@ static int ed_schedule (struct ohci_hcd *ohci, struct ed *ed) | |||
| 183 | { | 183 | { |
| 184 | int branch; | 184 | int branch; |
| 185 | 185 | ||
| 186 | ed->state = ED_OPER; | ||
| 187 | ed->ed_prev = NULL; | 186 | ed->ed_prev = NULL; |
| 188 | ed->ed_next = NULL; | 187 | ed->ed_next = NULL; |
| 189 | ed->hwNextED = 0; | 188 | ed->hwNextED = 0; |
| @@ -259,6 +258,8 @@ static int ed_schedule (struct ohci_hcd *ohci, struct ed *ed) | |||
| 259 | /* the HC may not see the schedule updates yet, but if it does | 258 | /* the HC may not see the schedule updates yet, but if it does |
| 260 | * then they'll be properly ordered. | 259 | * then they'll be properly ordered. |
| 261 | */ | 260 | */ |
| 261 | |||
| 262 | ed->state = ED_OPER; | ||
| 262 | return 0; | 263 | return 0; |
| 263 | } | 264 | } |
| 264 | 265 | ||
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 48672fac7ff3..c10972fcc8e4 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c | |||
| @@ -37,6 +37,7 @@ | |||
| 37 | /* Device for a quirk */ | 37 | /* Device for a quirk */ |
| 38 | #define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73 | 38 | #define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73 |
| 39 | #define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000 | 39 | #define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000 |
| 40 | #define PCI_DEVICE_ID_FRESCO_LOGIC_FL1009 0x1009 | ||
| 40 | #define PCI_DEVICE_ID_FRESCO_LOGIC_FL1400 0x1400 | 41 | #define PCI_DEVICE_ID_FRESCO_LOGIC_FL1400 0x1400 |
| 41 | 42 | ||
| 42 | #define PCI_VENDOR_ID_ETRON 0x1b6f | 43 | #define PCI_VENDOR_ID_ETRON 0x1b6f |
| @@ -114,6 +115,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) | |||
| 114 | xhci->quirks |= XHCI_TRUST_TX_LENGTH; | 115 | xhci->quirks |= XHCI_TRUST_TX_LENGTH; |
| 115 | } | 116 | } |
| 116 | 117 | ||
| 118 | if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC && | ||
| 119 | pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1009) | ||
| 120 | xhci->quirks |= XHCI_BROKEN_STREAMS; | ||
| 121 | |||
| 117 | if (pdev->vendor == PCI_VENDOR_ID_NEC) | 122 | if (pdev->vendor == PCI_VENDOR_ID_NEC) |
| 118 | xhci->quirks |= XHCI_NEC_HOST; | 123 | xhci->quirks |= XHCI_NEC_HOST; |
| 119 | 124 | ||
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index 676ea458148b..1f3f981fe7f8 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c | |||
| @@ -196,6 +196,9 @@ static int xhci_plat_probe(struct platform_device *pdev) | |||
| 196 | ret = clk_prepare_enable(clk); | 196 | ret = clk_prepare_enable(clk); |
| 197 | if (ret) | 197 | if (ret) |
| 198 | goto put_hcd; | 198 | goto put_hcd; |
| 199 | } else if (PTR_ERR(clk) == -EPROBE_DEFER) { | ||
| 200 | ret = -EPROBE_DEFER; | ||
| 201 | goto put_hcd; | ||
| 199 | } | 202 | } |
| 200 | 203 | ||
| 201 | xhci = hcd_to_xhci(hcd); | 204 | xhci = hcd_to_xhci(hcd); |
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 52deae4b7eac..d7d502578d79 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c | |||
| @@ -290,6 +290,14 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci) | |||
| 290 | 290 | ||
| 291 | temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); | 291 | temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); |
| 292 | xhci->cmd_ring_state = CMD_RING_STATE_ABORTED; | 292 | xhci->cmd_ring_state = CMD_RING_STATE_ABORTED; |
| 293 | |||
| 294 | /* | ||
| 295 | * Writing the CMD_RING_ABORT bit should cause a cmd completion event, | ||
| 296 | * however on some host hw the CMD_RING_RUNNING bit is correctly cleared | ||
| 297 | * but the completion event in never sent. Use the cmd timeout timer to | ||
| 298 | * handle those cases. Use twice the time to cover the bit polling retry | ||
| 299 | */ | ||
| 300 | mod_timer(&xhci->cmd_timer, jiffies + (2 * XHCI_CMD_DEFAULT_TIMEOUT)); | ||
| 293 | xhci_write_64(xhci, temp_64 | CMD_RING_ABORT, | 301 | xhci_write_64(xhci, temp_64 | CMD_RING_ABORT, |
| 294 | &xhci->op_regs->cmd_ring); | 302 | &xhci->op_regs->cmd_ring); |
| 295 | 303 | ||
| @@ -314,6 +322,7 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci) | |||
| 314 | 322 | ||
| 315 | xhci_err(xhci, "Stopped the command ring failed, " | 323 | xhci_err(xhci, "Stopped the command ring failed, " |
| 316 | "maybe the host is dead\n"); | 324 | "maybe the host is dead\n"); |
| 325 | del_timer(&xhci->cmd_timer); | ||
| 317 | xhci->xhc_state |= XHCI_STATE_DYING; | 326 | xhci->xhc_state |= XHCI_STATE_DYING; |
| 318 | xhci_quiesce(xhci); | 327 | xhci_quiesce(xhci); |
| 319 | xhci_halt(xhci); | 328 | xhci_halt(xhci); |
| @@ -1246,22 +1255,21 @@ void xhci_handle_command_timeout(unsigned long data) | |||
| 1246 | int ret; | 1255 | int ret; |
| 1247 | unsigned long flags; | 1256 | unsigned long flags; |
| 1248 | u64 hw_ring_state; | 1257 | u64 hw_ring_state; |
| 1249 | struct xhci_command *cur_cmd = NULL; | 1258 | bool second_timeout = false; |
| 1250 | xhci = (struct xhci_hcd *) data; | 1259 | xhci = (struct xhci_hcd *) data; |
| 1251 | 1260 | ||
| 1252 | /* mark this command to be cancelled */ | 1261 | /* mark this command to be cancelled */ |
| 1253 | spin_lock_irqsave(&xhci->lock, flags); | 1262 | spin_lock_irqsave(&xhci->lock, flags); |
| 1254 | if (xhci->current_cmd) { | 1263 | if (xhci->current_cmd) { |
| 1255 | cur_cmd = xhci->current_cmd; | 1264 | if (xhci->current_cmd->status == COMP_CMD_ABORT) |
| 1256 | cur_cmd->status = COMP_CMD_ABORT; | 1265 | second_timeout = true; |
| 1266 | xhci->current_cmd->status = COMP_CMD_ABORT; | ||
| 1257 | } | 1267 | } |
| 1258 | 1268 | ||
| 1259 | |||
| 1260 | /* Make sure command ring is running before aborting it */ | 1269 | /* Make sure command ring is running before aborting it */ |
| 1261 | hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); | 1270 | hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); |
| 1262 | if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) && | 1271 | if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) && |
| 1263 | (hw_ring_state & CMD_RING_RUNNING)) { | 1272 | (hw_ring_state & CMD_RING_RUNNING)) { |
| 1264 | |||
| 1265 | spin_unlock_irqrestore(&xhci->lock, flags); | 1273 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 1266 | xhci_dbg(xhci, "Command timeout\n"); | 1274 | xhci_dbg(xhci, "Command timeout\n"); |
| 1267 | ret = xhci_abort_cmd_ring(xhci); | 1275 | ret = xhci_abort_cmd_ring(xhci); |
| @@ -1273,6 +1281,15 @@ void xhci_handle_command_timeout(unsigned long data) | |||
| 1273 | } | 1281 | } |
| 1274 | return; | 1282 | return; |
| 1275 | } | 1283 | } |
| 1284 | |||
| 1285 | /* command ring failed to restart, or host removed. Bail out */ | ||
| 1286 | if (second_timeout || xhci->xhc_state & XHCI_STATE_REMOVING) { | ||
| 1287 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
| 1288 | xhci_dbg(xhci, "command timed out twice, ring start fail?\n"); | ||
| 1289 | xhci_cleanup_command_queue(xhci); | ||
| 1290 | return; | ||
| 1291 | } | ||
| 1292 | |||
| 1276 | /* command timeout on stopped ring, ring can't be aborted */ | 1293 | /* command timeout on stopped ring, ring can't be aborted */ |
| 1277 | xhci_dbg(xhci, "Command timeout on stopped ring\n"); | 1294 | xhci_dbg(xhci, "Command timeout on stopped ring\n"); |
| 1278 | xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd); | 1295 | xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd); |
| @@ -2721,7 +2738,8 @@ hw_died: | |||
| 2721 | writel(irq_pending, &xhci->ir_set->irq_pending); | 2738 | writel(irq_pending, &xhci->ir_set->irq_pending); |
| 2722 | } | 2739 | } |
| 2723 | 2740 | ||
| 2724 | if (xhci->xhc_state & XHCI_STATE_DYING) { | 2741 | if (xhci->xhc_state & XHCI_STATE_DYING || |
| 2742 | xhci->xhc_state & XHCI_STATE_HALTED) { | ||
| 2725 | xhci_dbg(xhci, "xHCI dying, ignoring interrupt. " | 2743 | xhci_dbg(xhci, "xHCI dying, ignoring interrupt. " |
| 2726 | "Shouldn't IRQs be disabled?\n"); | 2744 | "Shouldn't IRQs be disabled?\n"); |
| 2727 | /* Clear the event handler busy flag (RW1C); | 2745 | /* Clear the event handler busy flag (RW1C); |
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index fa7e1ef36cd9..f2f9518c53ab 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c | |||
| @@ -685,20 +685,23 @@ void xhci_stop(struct usb_hcd *hcd) | |||
| 685 | u32 temp; | 685 | u32 temp; |
| 686 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | 686 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
| 687 | 687 | ||
| 688 | if (xhci->xhc_state & XHCI_STATE_HALTED) | ||
| 689 | return; | ||
| 690 | |||
| 691 | mutex_lock(&xhci->mutex); | 688 | mutex_lock(&xhci->mutex); |
| 692 | spin_lock_irq(&xhci->lock); | ||
| 693 | xhci->xhc_state |= XHCI_STATE_HALTED; | ||
| 694 | xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; | ||
| 695 | 689 | ||
| 696 | /* Make sure the xHC is halted for a USB3 roothub | 690 | if (!(xhci->xhc_state & XHCI_STATE_HALTED)) { |
| 697 | * (xhci_stop() could be called as part of failed init). | 691 | spin_lock_irq(&xhci->lock); |
| 698 | */ | 692 | |
| 699 | xhci_halt(xhci); | 693 | xhci->xhc_state |= XHCI_STATE_HALTED; |
| 700 | xhci_reset(xhci); | 694 | xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; |
| 701 | spin_unlock_irq(&xhci->lock); | 695 | xhci_halt(xhci); |
| 696 | xhci_reset(xhci); | ||
| 697 | |||
| 698 | spin_unlock_irq(&xhci->lock); | ||
| 699 | } | ||
| 700 | |||
| 701 | if (!usb_hcd_is_primary_hcd(hcd)) { | ||
| 702 | mutex_unlock(&xhci->mutex); | ||
| 703 | return; | ||
| 704 | } | ||
| 702 | 705 | ||
| 703 | xhci_cleanup_msix(xhci); | 706 | xhci_cleanup_msix(xhci); |
| 704 | 707 | ||
| @@ -4886,7 +4889,7 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) | |||
| 4886 | xhci->hcc_params2 = readl(&xhci->cap_regs->hcc_params2); | 4889 | xhci->hcc_params2 = readl(&xhci->cap_regs->hcc_params2); |
| 4887 | xhci_print_registers(xhci); | 4890 | xhci_print_registers(xhci); |
| 4888 | 4891 | ||
| 4889 | xhci->quirks = quirks; | 4892 | xhci->quirks |= quirks; |
| 4890 | 4893 | ||
| 4891 | get_quirks(dev, xhci); | 4894 | get_quirks(dev, xhci); |
| 4892 | 4895 | ||
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index 39fd95833eb8..f824336def5c 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c | |||
| @@ -1090,29 +1090,6 @@ void musb_stop(struct musb *musb) | |||
| 1090 | musb_platform_try_idle(musb, 0); | 1090 | musb_platform_try_idle(musb, 0); |
| 1091 | } | 1091 | } |
| 1092 | 1092 | ||
| 1093 | static void musb_shutdown(struct platform_device *pdev) | ||
| 1094 | { | ||
| 1095 | struct musb *musb = dev_to_musb(&pdev->dev); | ||
| 1096 | unsigned long flags; | ||
| 1097 | |||
| 1098 | pm_runtime_get_sync(musb->controller); | ||
| 1099 | |||
| 1100 | musb_host_cleanup(musb); | ||
| 1101 | musb_gadget_cleanup(musb); | ||
| 1102 | |||
| 1103 | spin_lock_irqsave(&musb->lock, flags); | ||
| 1104 | musb_platform_disable(musb); | ||
| 1105 | musb_generic_disable(musb); | ||
| 1106 | spin_unlock_irqrestore(&musb->lock, flags); | ||
| 1107 | |||
| 1108 | musb_writeb(musb->mregs, MUSB_DEVCTL, 0); | ||
| 1109 | musb_platform_exit(musb); | ||
| 1110 | |||
| 1111 | pm_runtime_put(musb->controller); | ||
| 1112 | /* FIXME power down */ | ||
| 1113 | } | ||
| 1114 | |||
| 1115 | |||
| 1116 | /*-------------------------------------------------------------------------*/ | 1093 | /*-------------------------------------------------------------------------*/ |
| 1117 | 1094 | ||
| 1118 | /* | 1095 | /* |
| @@ -1702,7 +1679,7 @@ EXPORT_SYMBOL_GPL(musb_dma_completion); | |||
| 1702 | #define use_dma 0 | 1679 | #define use_dma 0 |
| 1703 | #endif | 1680 | #endif |
| 1704 | 1681 | ||
| 1705 | static void (*musb_phy_callback)(enum musb_vbus_id_status status); | 1682 | static int (*musb_phy_callback)(enum musb_vbus_id_status status); |
| 1706 | 1683 | ||
| 1707 | /* | 1684 | /* |
| 1708 | * musb_mailbox - optional phy notifier function | 1685 | * musb_mailbox - optional phy notifier function |
| @@ -1711,11 +1688,12 @@ static void (*musb_phy_callback)(enum musb_vbus_id_status status); | |||
| 1711 | * Optionally gets called from the USB PHY. Note that the USB PHY must be | 1688 | * Optionally gets called from the USB PHY. Note that the USB PHY must be |
| 1712 | * disabled at the point the phy_callback is registered or unregistered. | 1689 | * disabled at the point the phy_callback is registered or unregistered. |
| 1713 | */ | 1690 | */ |
| 1714 | void musb_mailbox(enum musb_vbus_id_status status) | 1691 | int musb_mailbox(enum musb_vbus_id_status status) |
| 1715 | { | 1692 | { |
| 1716 | if (musb_phy_callback) | 1693 | if (musb_phy_callback) |
| 1717 | musb_phy_callback(status); | 1694 | return musb_phy_callback(status); |
| 1718 | 1695 | ||
| 1696 | return -ENODEV; | ||
| 1719 | }; | 1697 | }; |
| 1720 | EXPORT_SYMBOL_GPL(musb_mailbox); | 1698 | EXPORT_SYMBOL_GPL(musb_mailbox); |
| 1721 | 1699 | ||
| @@ -2028,11 +2006,6 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl) | |||
| 2028 | musb_readl = musb_default_readl; | 2006 | musb_readl = musb_default_readl; |
| 2029 | musb_writel = musb_default_writel; | 2007 | musb_writel = musb_default_writel; |
| 2030 | 2008 | ||
| 2031 | /* We need musb_read/write functions initialized for PM */ | ||
| 2032 | pm_runtime_use_autosuspend(musb->controller); | ||
| 2033 | pm_runtime_set_autosuspend_delay(musb->controller, 200); | ||
| 2034 | pm_runtime_enable(musb->controller); | ||
| 2035 | |||
| 2036 | /* The musb_platform_init() call: | 2009 | /* The musb_platform_init() call: |
| 2037 | * - adjusts musb->mregs | 2010 | * - adjusts musb->mregs |
| 2038 | * - sets the musb->isr | 2011 | * - sets the musb->isr |
| @@ -2134,6 +2107,16 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl) | |||
| 2134 | if (musb->ops->phy_callback) | 2107 | if (musb->ops->phy_callback) |
| 2135 | musb_phy_callback = musb->ops->phy_callback; | 2108 | musb_phy_callback = musb->ops->phy_callback; |
| 2136 | 2109 | ||
| 2110 | /* | ||
| 2111 | * We need musb_read/write functions initialized for PM. | ||
| 2112 | * Note that at least 2430 glue needs autosuspend delay | ||
| 2113 | * somewhere above 300 ms for the hardware to idle properly | ||
| 2114 | * after disconnecting the cable in host mode. Let's use | ||
| 2115 | * 500 ms for some margin. | ||
| 2116 | */ | ||
| 2117 | pm_runtime_use_autosuspend(musb->controller); | ||
| 2118 | pm_runtime_set_autosuspend_delay(musb->controller, 500); | ||
| 2119 | pm_runtime_enable(musb->controller); | ||
| 2137 | pm_runtime_get_sync(musb->controller); | 2120 | pm_runtime_get_sync(musb->controller); |
| 2138 | 2121 | ||
| 2139 | status = usb_phy_init(musb->xceiv); | 2122 | status = usb_phy_init(musb->xceiv); |
| @@ -2237,13 +2220,8 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl) | |||
| 2237 | if (status) | 2220 | if (status) |
| 2238 | goto fail5; | 2221 | goto fail5; |
| 2239 | 2222 | ||
| 2240 | pm_runtime_put(musb->controller); | 2223 | pm_runtime_mark_last_busy(musb->controller); |
| 2241 | 2224 | pm_runtime_put_autosuspend(musb->controller); | |
| 2242 | /* | ||
| 2243 | * For why this is currently needed, see commit 3e43a0725637 | ||
| 2244 | * ("usb: musb: core: add pm_runtime_irq_safe()") | ||
| 2245 | */ | ||
| 2246 | pm_runtime_irq_safe(musb->controller); | ||
| 2247 | 2225 | ||
| 2248 | return 0; | 2226 | return 0; |
| 2249 | 2227 | ||
| @@ -2265,7 +2243,9 @@ fail2_5: | |||
| 2265 | usb_phy_shutdown(musb->xceiv); | 2243 | usb_phy_shutdown(musb->xceiv); |
| 2266 | 2244 | ||
| 2267 | err_usb_phy_init: | 2245 | err_usb_phy_init: |
| 2246 | pm_runtime_dont_use_autosuspend(musb->controller); | ||
| 2268 | pm_runtime_put_sync(musb->controller); | 2247 | pm_runtime_put_sync(musb->controller); |
| 2248 | pm_runtime_disable(musb->controller); | ||
| 2269 | 2249 | ||
| 2270 | fail2: | 2250 | fail2: |
| 2271 | if (musb->irq_wake) | 2251 | if (musb->irq_wake) |
| @@ -2273,7 +2253,6 @@ fail2: | |||
| 2273 | musb_platform_exit(musb); | 2253 | musb_platform_exit(musb); |
| 2274 | 2254 | ||
| 2275 | fail1: | 2255 | fail1: |
| 2276 | pm_runtime_disable(musb->controller); | ||
| 2277 | dev_err(musb->controller, | 2256 | dev_err(musb->controller, |
| 2278 | "musb_init_controller failed with status %d\n", status); | 2257 | "musb_init_controller failed with status %d\n", status); |
| 2279 | 2258 | ||
| @@ -2312,6 +2291,7 @@ static int musb_remove(struct platform_device *pdev) | |||
| 2312 | { | 2291 | { |
| 2313 | struct device *dev = &pdev->dev; | 2292 | struct device *dev = &pdev->dev; |
| 2314 | struct musb *musb = dev_to_musb(dev); | 2293 | struct musb *musb = dev_to_musb(dev); |
| 2294 | unsigned long flags; | ||
| 2315 | 2295 | ||
| 2316 | /* this gets called on rmmod. | 2296 | /* this gets called on rmmod. |
| 2317 | * - Host mode: host may still be active | 2297 | * - Host mode: host may still be active |
| @@ -2319,17 +2299,26 @@ static int musb_remove(struct platform_device *pdev) | |||
| 2319 | * - OTG mode: both roles are deactivated (or never-activated) | 2299 | * - OTG mode: both roles are deactivated (or never-activated) |
| 2320 | */ | 2300 | */ |
| 2321 | musb_exit_debugfs(musb); | 2301 | musb_exit_debugfs(musb); |
| 2322 | musb_shutdown(pdev); | ||
| 2323 | musb_phy_callback = NULL; | ||
| 2324 | |||
| 2325 | if (musb->dma_controller) | ||
| 2326 | musb_dma_controller_destroy(musb->dma_controller); | ||
| 2327 | |||
| 2328 | usb_phy_shutdown(musb->xceiv); | ||
| 2329 | 2302 | ||
| 2330 | cancel_work_sync(&musb->irq_work); | 2303 | cancel_work_sync(&musb->irq_work); |
| 2331 | cancel_delayed_work_sync(&musb->finish_resume_work); | 2304 | cancel_delayed_work_sync(&musb->finish_resume_work); |
| 2332 | cancel_delayed_work_sync(&musb->deassert_reset_work); | 2305 | cancel_delayed_work_sync(&musb->deassert_reset_work); |
| 2306 | pm_runtime_get_sync(musb->controller); | ||
| 2307 | musb_host_cleanup(musb); | ||
| 2308 | musb_gadget_cleanup(musb); | ||
| 2309 | spin_lock_irqsave(&musb->lock, flags); | ||
| 2310 | musb_platform_disable(musb); | ||
| 2311 | musb_generic_disable(musb); | ||
| 2312 | spin_unlock_irqrestore(&musb->lock, flags); | ||
| 2313 | musb_writeb(musb->mregs, MUSB_DEVCTL, 0); | ||
| 2314 | pm_runtime_dont_use_autosuspend(musb->controller); | ||
| 2315 | pm_runtime_put_sync(musb->controller); | ||
| 2316 | pm_runtime_disable(musb->controller); | ||
| 2317 | musb_platform_exit(musb); | ||
| 2318 | musb_phy_callback = NULL; | ||
| 2319 | if (musb->dma_controller) | ||
| 2320 | musb_dma_controller_destroy(musb->dma_controller); | ||
| 2321 | usb_phy_shutdown(musb->xceiv); | ||
| 2333 | musb_free(musb); | 2322 | musb_free(musb); |
| 2334 | device_init_wakeup(dev, 0); | 2323 | device_init_wakeup(dev, 0); |
| 2335 | return 0; | 2324 | return 0; |
| @@ -2429,7 +2418,8 @@ static void musb_restore_context(struct musb *musb) | |||
| 2429 | musb_writew(musb_base, MUSB_INTRTXE, musb->intrtxe); | 2418 | musb_writew(musb_base, MUSB_INTRTXE, musb->intrtxe); |
| 2430 | musb_writew(musb_base, MUSB_INTRRXE, musb->intrrxe); | 2419 | musb_writew(musb_base, MUSB_INTRRXE, musb->intrrxe); |
| 2431 | musb_writeb(musb_base, MUSB_INTRUSBE, musb->context.intrusbe); | 2420 | musb_writeb(musb_base, MUSB_INTRUSBE, musb->context.intrusbe); |
| 2432 | musb_writeb(musb_base, MUSB_DEVCTL, musb->context.devctl); | 2421 | if (musb->context.devctl & MUSB_DEVCTL_SESSION) |
| 2422 | musb_writeb(musb_base, MUSB_DEVCTL, musb->context.devctl); | ||
| 2433 | 2423 | ||
| 2434 | for (i = 0; i < musb->config->num_eps; ++i) { | 2424 | for (i = 0; i < musb->config->num_eps; ++i) { |
| 2435 | struct musb_hw_ep *hw_ep; | 2425 | struct musb_hw_ep *hw_ep; |
| @@ -2612,7 +2602,6 @@ static struct platform_driver musb_driver = { | |||
| 2612 | }, | 2602 | }, |
| 2613 | .probe = musb_probe, | 2603 | .probe = musb_probe, |
| 2614 | .remove = musb_remove, | 2604 | .remove = musb_remove, |
| 2615 | .shutdown = musb_shutdown, | ||
| 2616 | }; | 2605 | }; |
| 2617 | 2606 | ||
| 2618 | module_platform_driver(musb_driver); | 2607 | module_platform_driver(musb_driver); |
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h index b6afe9e43305..b55a776b03eb 100644 --- a/drivers/usb/musb/musb_core.h +++ b/drivers/usb/musb/musb_core.h | |||
| @@ -215,7 +215,7 @@ struct musb_platform_ops { | |||
| 215 | dma_addr_t *dma_addr, u32 *len); | 215 | dma_addr_t *dma_addr, u32 *len); |
| 216 | void (*pre_root_reset_end)(struct musb *musb); | 216 | void (*pre_root_reset_end)(struct musb *musb); |
| 217 | void (*post_root_reset_end)(struct musb *musb); | 217 | void (*post_root_reset_end)(struct musb *musb); |
| 218 | void (*phy_callback)(enum musb_vbus_id_status status); | 218 | int (*phy_callback)(enum musb_vbus_id_status status); |
| 219 | }; | 219 | }; |
| 220 | 220 | ||
| 221 | /* | 221 | /* |
| @@ -312,6 +312,7 @@ struct musb { | |||
| 312 | struct work_struct irq_work; | 312 | struct work_struct irq_work; |
| 313 | struct delayed_work deassert_reset_work; | 313 | struct delayed_work deassert_reset_work; |
| 314 | struct delayed_work finish_resume_work; | 314 | struct delayed_work finish_resume_work; |
| 315 | struct delayed_work gadget_work; | ||
| 315 | u16 hwvers; | 316 | u16 hwvers; |
| 316 | 317 | ||
| 317 | u16 intrrxe; | 318 | u16 intrrxe; |
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index 152865b36522..af2a3a7addf9 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c | |||
| @@ -1656,6 +1656,20 @@ static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA) | |||
| 1656 | return usb_phy_set_power(musb->xceiv, mA); | 1656 | return usb_phy_set_power(musb->xceiv, mA); |
| 1657 | } | 1657 | } |
| 1658 | 1658 | ||
| 1659 | static void musb_gadget_work(struct work_struct *work) | ||
| 1660 | { | ||
| 1661 | struct musb *musb; | ||
| 1662 | unsigned long flags; | ||
| 1663 | |||
| 1664 | musb = container_of(work, struct musb, gadget_work.work); | ||
| 1665 | pm_runtime_get_sync(musb->controller); | ||
| 1666 | spin_lock_irqsave(&musb->lock, flags); | ||
| 1667 | musb_pullup(musb, musb->softconnect); | ||
| 1668 | spin_unlock_irqrestore(&musb->lock, flags); | ||
| 1669 | pm_runtime_mark_last_busy(musb->controller); | ||
| 1670 | pm_runtime_put_autosuspend(musb->controller); | ||
| 1671 | } | ||
| 1672 | |||
| 1659 | static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on) | 1673 | static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on) |
| 1660 | { | 1674 | { |
| 1661 | struct musb *musb = gadget_to_musb(gadget); | 1675 | struct musb *musb = gadget_to_musb(gadget); |
| @@ -1663,20 +1677,16 @@ static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on) | |||
| 1663 | 1677 | ||
| 1664 | is_on = !!is_on; | 1678 | is_on = !!is_on; |
| 1665 | 1679 | ||
| 1666 | pm_runtime_get_sync(musb->controller); | ||
| 1667 | |||
| 1668 | /* NOTE: this assumes we are sensing vbus; we'd rather | 1680 | /* NOTE: this assumes we are sensing vbus; we'd rather |
| 1669 | * not pullup unless the B-session is active. | 1681 | * not pullup unless the B-session is active. |
| 1670 | */ | 1682 | */ |
| 1671 | spin_lock_irqsave(&musb->lock, flags); | 1683 | spin_lock_irqsave(&musb->lock, flags); |
| 1672 | if (is_on != musb->softconnect) { | 1684 | if (is_on != musb->softconnect) { |
| 1673 | musb->softconnect = is_on; | 1685 | musb->softconnect = is_on; |
| 1674 | musb_pullup(musb, is_on); | 1686 | schedule_delayed_work(&musb->gadget_work, 0); |
| 1675 | } | 1687 | } |
| 1676 | spin_unlock_irqrestore(&musb->lock, flags); | 1688 | spin_unlock_irqrestore(&musb->lock, flags); |
| 1677 | 1689 | ||
| 1678 | pm_runtime_put(musb->controller); | ||
| 1679 | |||
| 1680 | return 0; | 1690 | return 0; |
| 1681 | } | 1691 | } |
| 1682 | 1692 | ||
| @@ -1845,7 +1855,7 @@ int musb_gadget_setup(struct musb *musb) | |||
| 1845 | #elif IS_ENABLED(CONFIG_USB_MUSB_GADGET) | 1855 | #elif IS_ENABLED(CONFIG_USB_MUSB_GADGET) |
| 1846 | musb->g.is_otg = 0; | 1856 | musb->g.is_otg = 0; |
| 1847 | #endif | 1857 | #endif |
| 1848 | 1858 | INIT_DELAYED_WORK(&musb->gadget_work, musb_gadget_work); | |
| 1849 | musb_g_init_endpoints(musb); | 1859 | musb_g_init_endpoints(musb); |
| 1850 | 1860 | ||
| 1851 | musb->is_active = 0; | 1861 | musb->is_active = 0; |
| @@ -1866,6 +1876,8 @@ void musb_gadget_cleanup(struct musb *musb) | |||
| 1866 | { | 1876 | { |
| 1867 | if (musb->port_mode == MUSB_PORT_MODE_HOST) | 1877 | if (musb->port_mode == MUSB_PORT_MODE_HOST) |
| 1868 | return; | 1878 | return; |
| 1879 | |||
| 1880 | cancel_delayed_work_sync(&musb->gadget_work); | ||
| 1869 | usb_del_gadget_udc(&musb->g); | 1881 | usb_del_gadget_udc(&musb->g); |
| 1870 | } | 1882 | } |
| 1871 | 1883 | ||
| @@ -1914,8 +1926,8 @@ static int musb_gadget_start(struct usb_gadget *g, | |||
| 1914 | if (musb->xceiv->last_event == USB_EVENT_ID) | 1926 | if (musb->xceiv->last_event == USB_EVENT_ID) |
| 1915 | musb_platform_set_vbus(musb, 1); | 1927 | musb_platform_set_vbus(musb, 1); |
| 1916 | 1928 | ||
| 1917 | if (musb->xceiv->last_event == USB_EVENT_NONE) | 1929 | pm_runtime_mark_last_busy(musb->controller); |
| 1918 | pm_runtime_put(musb->controller); | 1930 | pm_runtime_put_autosuspend(musb->controller); |
| 1919 | 1931 | ||
| 1920 | return 0; | 1932 | return 0; |
| 1921 | 1933 | ||
| @@ -1934,8 +1946,7 @@ static int musb_gadget_stop(struct usb_gadget *g) | |||
| 1934 | struct musb *musb = gadget_to_musb(g); | 1946 | struct musb *musb = gadget_to_musb(g); |
| 1935 | unsigned long flags; | 1947 | unsigned long flags; |
| 1936 | 1948 | ||
| 1937 | if (musb->xceiv->last_event == USB_EVENT_NONE) | 1949 | pm_runtime_get_sync(musb->controller); |
| 1938 | pm_runtime_get_sync(musb->controller); | ||
| 1939 | 1950 | ||
| 1940 | /* | 1951 | /* |
| 1941 | * REVISIT always use otg_set_peripheral() here too; | 1952 | * REVISIT always use otg_set_peripheral() here too; |
| @@ -1963,7 +1974,8 @@ static int musb_gadget_stop(struct usb_gadget *g) | |||
| 1963 | * that currently misbehaves. | 1974 | * that currently misbehaves. |
| 1964 | */ | 1975 | */ |
| 1965 | 1976 | ||
| 1966 | pm_runtime_put(musb->controller); | 1977 | pm_runtime_mark_last_busy(musb->controller); |
| 1978 | pm_runtime_put_autosuspend(musb->controller); | ||
| 1967 | 1979 | ||
| 1968 | return 0; | 1980 | return 0; |
| 1969 | } | 1981 | } |
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c index 2f8ad7f1f482..d227a71d85e1 100644 --- a/drivers/usb/musb/musb_host.c +++ b/drivers/usb/musb/musb_host.c | |||
| @@ -434,7 +434,13 @@ static void musb_advance_schedule(struct musb *musb, struct urb *urb, | |||
| 434 | } | 434 | } |
| 435 | } | 435 | } |
| 436 | 436 | ||
| 437 | if (qh != NULL && qh->is_ready) { | 437 | /* |
| 438 | * The pipe must be broken if current urb->status is set, so don't | ||
| 439 | * start next urb. | ||
| 440 | * TODO: to minimize the risk of regression, only check urb->status | ||
| 441 | * for RX, until we have a test case to understand the behavior of TX. | ||
| 442 | */ | ||
| 443 | if ((!status || !is_in) && qh && qh->is_ready) { | ||
| 438 | dev_dbg(musb->controller, "... next ep%d %cX urb %p\n", | 444 | dev_dbg(musb->controller, "... next ep%d %cX urb %p\n", |
| 439 | hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh)); | 445 | hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh)); |
| 440 | musb_start_urb(musb, is_in, qh); | 446 | musb_start_urb(musb, is_in, qh); |
| @@ -594,14 +600,13 @@ musb_rx_reinit(struct musb *musb, struct musb_qh *qh, u8 epnum) | |||
| 594 | musb_writew(ep->regs, MUSB_TXCSR, 0); | 600 | musb_writew(ep->regs, MUSB_TXCSR, 0); |
| 595 | 601 | ||
| 596 | /* scrub all previous state, clearing toggle */ | 602 | /* scrub all previous state, clearing toggle */ |
| 597 | } else { | ||
| 598 | csr = musb_readw(ep->regs, MUSB_RXCSR); | ||
| 599 | if (csr & MUSB_RXCSR_RXPKTRDY) | ||
| 600 | WARNING("rx%d, packet/%d ready?\n", ep->epnum, | ||
| 601 | musb_readw(ep->regs, MUSB_RXCOUNT)); | ||
| 602 | |||
| 603 | musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG); | ||
| 604 | } | 603 | } |
| 604 | csr = musb_readw(ep->regs, MUSB_RXCSR); | ||
| 605 | if (csr & MUSB_RXCSR_RXPKTRDY) | ||
| 606 | WARNING("rx%d, packet/%d ready?\n", ep->epnum, | ||
| 607 | musb_readw(ep->regs, MUSB_RXCOUNT)); | ||
| 608 | |||
| 609 | musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG); | ||
| 605 | 610 | ||
| 606 | /* target addr and (for multipoint) hub addr/port */ | 611 | /* target addr and (for multipoint) hub addr/port */ |
| 607 | if (musb->is_multipoint) { | 612 | if (musb->is_multipoint) { |
| @@ -627,7 +632,7 @@ musb_rx_reinit(struct musb *musb, struct musb_qh *qh, u8 epnum) | |||
| 627 | ep->rx_reinit = 0; | 632 | ep->rx_reinit = 0; |
| 628 | } | 633 | } |
| 629 | 634 | ||
| 630 | static int musb_tx_dma_set_mode_mentor(struct dma_controller *dma, | 635 | static void musb_tx_dma_set_mode_mentor(struct dma_controller *dma, |
| 631 | struct musb_hw_ep *hw_ep, struct musb_qh *qh, | 636 | struct musb_hw_ep *hw_ep, struct musb_qh *qh, |
| 632 | struct urb *urb, u32 offset, | 637 | struct urb *urb, u32 offset, |
| 633 | u32 *length, u8 *mode) | 638 | u32 *length, u8 *mode) |
| @@ -664,23 +669,18 @@ static int musb_tx_dma_set_mode_mentor(struct dma_controller *dma, | |||
| 664 | } | 669 | } |
| 665 | channel->desired_mode = *mode; | 670 | channel->desired_mode = *mode; |
| 666 | musb_writew(epio, MUSB_TXCSR, csr); | 671 | musb_writew(epio, MUSB_TXCSR, csr); |
| 667 | |||
| 668 | return 0; | ||
| 669 | } | 672 | } |
| 670 | 673 | ||
| 671 | static int musb_tx_dma_set_mode_cppi_tusb(struct dma_controller *dma, | 674 | static void musb_tx_dma_set_mode_cppi_tusb(struct dma_controller *dma, |
| 672 | struct musb_hw_ep *hw_ep, | 675 | struct musb_hw_ep *hw_ep, |
| 673 | struct musb_qh *qh, | 676 | struct musb_qh *qh, |
| 674 | struct urb *urb, | 677 | struct urb *urb, |
| 675 | u32 offset, | 678 | u32 offset, |
| 676 | u32 *length, | 679 | u32 *length, |
| 677 | u8 *mode) | 680 | u8 *mode) |
| 678 | { | 681 | { |
| 679 | struct dma_channel *channel = hw_ep->tx_channel; | 682 | struct dma_channel *channel = hw_ep->tx_channel; |
| 680 | 683 | ||
| 681 | if (!is_cppi_enabled(hw_ep->musb) && !tusb_dma_omap(hw_ep->musb)) | ||
| 682 | return -ENODEV; | ||
| 683 | |||
| 684 | channel->actual_len = 0; | 684 | channel->actual_len = 0; |
| 685 | 685 | ||
| 686 | /* | 686 | /* |
| @@ -688,8 +688,6 @@ static int musb_tx_dma_set_mode_cppi_tusb(struct dma_controller *dma, | |||
| 688 | * to identify the zero-length-final-packet case. | 688 | * to identify the zero-length-final-packet case. |
| 689 | */ | 689 | */ |
| 690 | *mode = (urb->transfer_flags & URB_ZERO_PACKET) ? 1 : 0; | 690 | *mode = (urb->transfer_flags & URB_ZERO_PACKET) ? 1 : 0; |
| 691 | |||
| 692 | return 0; | ||
| 693 | } | 691 | } |
| 694 | 692 | ||
| 695 | static bool musb_tx_dma_program(struct dma_controller *dma, | 693 | static bool musb_tx_dma_program(struct dma_controller *dma, |
| @@ -699,15 +697,14 @@ static bool musb_tx_dma_program(struct dma_controller *dma, | |||
| 699 | struct dma_channel *channel = hw_ep->tx_channel; | 697 | struct dma_channel *channel = hw_ep->tx_channel; |
| 700 | u16 pkt_size = qh->maxpacket; | 698 | u16 pkt_size = qh->maxpacket; |
| 701 | u8 mode; | 699 | u8 mode; |
| 702 | int res; | ||
| 703 | 700 | ||
| 704 | if (musb_dma_inventra(hw_ep->musb) || musb_dma_ux500(hw_ep->musb)) | 701 | if (musb_dma_inventra(hw_ep->musb) || musb_dma_ux500(hw_ep->musb)) |
| 705 | res = musb_tx_dma_set_mode_mentor(dma, hw_ep, qh, urb, | 702 | musb_tx_dma_set_mode_mentor(dma, hw_ep, qh, urb, offset, |
| 706 | offset, &length, &mode); | 703 | &length, &mode); |
| 704 | else if (is_cppi_enabled(hw_ep->musb) || tusb_dma_omap(hw_ep->musb)) | ||
| 705 | musb_tx_dma_set_mode_cppi_tusb(dma, hw_ep, qh, urb, offset, | ||
| 706 | &length, &mode); | ||
| 707 | else | 707 | else |
| 708 | res = musb_tx_dma_set_mode_cppi_tusb(dma, hw_ep, qh, urb, | ||
| 709 | offset, &length, &mode); | ||
| 710 | if (res) | ||
| 711 | return false; | 708 | return false; |
| 712 | 709 | ||
| 713 | qh->segsize = length; | 710 | qh->segsize = length; |
| @@ -995,9 +992,15 @@ static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep, | |||
| 995 | if (is_in) { | 992 | if (is_in) { |
| 996 | dma = is_dma_capable() ? ep->rx_channel : NULL; | 993 | dma = is_dma_capable() ? ep->rx_channel : NULL; |
| 997 | 994 | ||
| 998 | /* clear nak timeout bit */ | 995 | /* |
| 996 | * Need to stop the transaction by clearing REQPKT first | ||
| 997 | * then the NAK Timeout bit ref MUSBMHDRC USB 2.0 HIGH-SPEED | ||
| 998 | * DUAL-ROLE CONTROLLER Programmer's Guide, section 9.2.2 | ||
| 999 | */ | ||
| 999 | rx_csr = musb_readw(epio, MUSB_RXCSR); | 1000 | rx_csr = musb_readw(epio, MUSB_RXCSR); |
| 1000 | rx_csr |= MUSB_RXCSR_H_WZC_BITS; | 1001 | rx_csr |= MUSB_RXCSR_H_WZC_BITS; |
| 1002 | rx_csr &= ~MUSB_RXCSR_H_REQPKT; | ||
| 1003 | musb_writew(epio, MUSB_RXCSR, rx_csr); | ||
| 1001 | rx_csr &= ~MUSB_RXCSR_DATAERROR; | 1004 | rx_csr &= ~MUSB_RXCSR_DATAERROR; |
| 1002 | musb_writew(epio, MUSB_RXCSR, rx_csr); | 1005 | musb_writew(epio, MUSB_RXCSR, rx_csr); |
| 1003 | 1006 | ||
| @@ -1551,7 +1554,7 @@ static int musb_rx_dma_iso_cppi41(struct dma_controller *dma, | |||
| 1551 | struct urb *urb, | 1554 | struct urb *urb, |
| 1552 | size_t len) | 1555 | size_t len) |
| 1553 | { | 1556 | { |
| 1554 | struct dma_channel *channel = hw_ep->tx_channel; | 1557 | struct dma_channel *channel = hw_ep->rx_channel; |
| 1555 | void __iomem *epio = hw_ep->regs; | 1558 | void __iomem *epio = hw_ep->regs; |
| 1556 | dma_addr_t *buf; | 1559 | dma_addr_t *buf; |
| 1557 | u32 length, res; | 1560 | u32 length, res; |
| @@ -1870,6 +1873,9 @@ void musb_host_rx(struct musb *musb, u8 epnum) | |||
| 1870 | status = -EPROTO; | 1873 | status = -EPROTO; |
| 1871 | musb_writeb(epio, MUSB_RXINTERVAL, 0); | 1874 | musb_writeb(epio, MUSB_RXINTERVAL, 0); |
| 1872 | 1875 | ||
| 1876 | rx_csr &= ~MUSB_RXCSR_H_ERROR; | ||
| 1877 | musb_writew(epio, MUSB_RXCSR, rx_csr); | ||
| 1878 | |||
| 1873 | } else if (rx_csr & MUSB_RXCSR_DATAERROR) { | 1879 | } else if (rx_csr & MUSB_RXCSR_DATAERROR) { |
| 1874 | 1880 | ||
| 1875 | if (USB_ENDPOINT_XFER_ISOC != qh->type) { | 1881 | if (USB_ENDPOINT_XFER_ISOC != qh->type) { |
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c index c84e0322c108..0b4cec940386 100644 --- a/drivers/usb/musb/omap2430.c +++ b/drivers/usb/musb/omap2430.c | |||
| @@ -49,97 +49,14 @@ struct omap2430_glue { | |||
| 49 | enum musb_vbus_id_status status; | 49 | enum musb_vbus_id_status status; |
| 50 | struct work_struct omap_musb_mailbox_work; | 50 | struct work_struct omap_musb_mailbox_work; |
| 51 | struct device *control_otghs; | 51 | struct device *control_otghs; |
| 52 | bool cable_connected; | ||
| 53 | bool enabled; | ||
| 54 | bool powered; | ||
| 52 | }; | 55 | }; |
| 53 | #define glue_to_musb(g) platform_get_drvdata(g->musb) | 56 | #define glue_to_musb(g) platform_get_drvdata(g->musb) |
| 54 | 57 | ||
| 55 | static struct omap2430_glue *_glue; | 58 | static struct omap2430_glue *_glue; |
| 56 | 59 | ||
| 57 | static struct timer_list musb_idle_timer; | ||
| 58 | |||
| 59 | static void musb_do_idle(unsigned long _musb) | ||
| 60 | { | ||
| 61 | struct musb *musb = (void *)_musb; | ||
| 62 | unsigned long flags; | ||
| 63 | u8 power; | ||
| 64 | u8 devctl; | ||
| 65 | |||
| 66 | spin_lock_irqsave(&musb->lock, flags); | ||
| 67 | |||
| 68 | switch (musb->xceiv->otg->state) { | ||
| 69 | case OTG_STATE_A_WAIT_BCON: | ||
| 70 | |||
| 71 | devctl = musb_readb(musb->mregs, MUSB_DEVCTL); | ||
| 72 | if (devctl & MUSB_DEVCTL_BDEVICE) { | ||
| 73 | musb->xceiv->otg->state = OTG_STATE_B_IDLE; | ||
| 74 | MUSB_DEV_MODE(musb); | ||
| 75 | } else { | ||
| 76 | musb->xceiv->otg->state = OTG_STATE_A_IDLE; | ||
| 77 | MUSB_HST_MODE(musb); | ||
| 78 | } | ||
| 79 | break; | ||
| 80 | case OTG_STATE_A_SUSPEND: | ||
| 81 | /* finish RESUME signaling? */ | ||
| 82 | if (musb->port1_status & MUSB_PORT_STAT_RESUME) { | ||
| 83 | power = musb_readb(musb->mregs, MUSB_POWER); | ||
| 84 | power &= ~MUSB_POWER_RESUME; | ||
| 85 | dev_dbg(musb->controller, "root port resume stopped, power %02x\n", power); | ||
| 86 | musb_writeb(musb->mregs, MUSB_POWER, power); | ||
| 87 | musb->is_active = 1; | ||
| 88 | musb->port1_status &= ~(USB_PORT_STAT_SUSPEND | ||
| 89 | | MUSB_PORT_STAT_RESUME); | ||
| 90 | musb->port1_status |= USB_PORT_STAT_C_SUSPEND << 16; | ||
| 91 | usb_hcd_poll_rh_status(musb->hcd); | ||
| 92 | /* NOTE: it might really be A_WAIT_BCON ... */ | ||
| 93 | musb->xceiv->otg->state = OTG_STATE_A_HOST; | ||
| 94 | } | ||
| 95 | break; | ||
| 96 | case OTG_STATE_A_HOST: | ||
| 97 | devctl = musb_readb(musb->mregs, MUSB_DEVCTL); | ||
| 98 | if (devctl & MUSB_DEVCTL_BDEVICE) | ||
| 99 | musb->xceiv->otg->state = OTG_STATE_B_IDLE; | ||
| 100 | else | ||
| 101 | musb->xceiv->otg->state = OTG_STATE_A_WAIT_BCON; | ||
| 102 | default: | ||
| 103 | break; | ||
| 104 | } | ||
| 105 | spin_unlock_irqrestore(&musb->lock, flags); | ||
| 106 | } | ||
| 107 | |||
| 108 | |||
| 109 | static void omap2430_musb_try_idle(struct musb *musb, unsigned long timeout) | ||
| 110 | { | ||
| 111 | unsigned long default_timeout = jiffies + msecs_to_jiffies(3); | ||
| 112 | static unsigned long last_timer; | ||
| 113 | |||
| 114 | if (timeout == 0) | ||
| 115 | timeout = default_timeout; | ||
| 116 | |||
| 117 | /* Never idle if active, or when VBUS timeout is not set as host */ | ||
| 118 | if (musb->is_active || ((musb->a_wait_bcon == 0) | ||
| 119 | && (musb->xceiv->otg->state == OTG_STATE_A_WAIT_BCON))) { | ||
| 120 | dev_dbg(musb->controller, "%s active, deleting timer\n", | ||
| 121 | usb_otg_state_string(musb->xceiv->otg->state)); | ||
| 122 | del_timer(&musb_idle_timer); | ||
| 123 | last_timer = jiffies; | ||
| 124 | return; | ||
| 125 | } | ||
| 126 | |||
| 127 | if (time_after(last_timer, timeout)) { | ||
| 128 | if (!timer_pending(&musb_idle_timer)) | ||
| 129 | last_timer = timeout; | ||
| 130 | else { | ||
| 131 | dev_dbg(musb->controller, "Longer idle timer already pending, ignoring\n"); | ||
| 132 | return; | ||
| 133 | } | ||
| 134 | } | ||
| 135 | last_timer = timeout; | ||
| 136 | |||
| 137 | dev_dbg(musb->controller, "%s inactive, for idle timer for %lu ms\n", | ||
| 138 | usb_otg_state_string(musb->xceiv->otg->state), | ||
| 139 | (unsigned long)jiffies_to_msecs(timeout - jiffies)); | ||
| 140 | mod_timer(&musb_idle_timer, timeout); | ||
| 141 | } | ||
| 142 | |||
| 143 | static void omap2430_musb_set_vbus(struct musb *musb, int is_on) | 60 | static void omap2430_musb_set_vbus(struct musb *musb, int is_on) |
| 144 | { | 61 | { |
| 145 | struct usb_otg *otg = musb->xceiv->otg; | 62 | struct usb_otg *otg = musb->xceiv->otg; |
| @@ -205,16 +122,6 @@ static void omap2430_musb_set_vbus(struct musb *musb, int is_on) | |||
| 205 | musb_readb(musb->mregs, MUSB_DEVCTL)); | 122 | musb_readb(musb->mregs, MUSB_DEVCTL)); |
| 206 | } | 123 | } |
| 207 | 124 | ||
| 208 | static int omap2430_musb_set_mode(struct musb *musb, u8 musb_mode) | ||
| 209 | { | ||
| 210 | u8 devctl = musb_readb(musb->mregs, MUSB_DEVCTL); | ||
| 211 | |||
| 212 | devctl |= MUSB_DEVCTL_SESSION; | ||
| 213 | musb_writeb(musb->mregs, MUSB_DEVCTL, devctl); | ||
| 214 | |||
| 215 | return 0; | ||
| 216 | } | ||
| 217 | |||
| 218 | static inline void omap2430_low_level_exit(struct musb *musb) | 125 | static inline void omap2430_low_level_exit(struct musb *musb) |
| 219 | { | 126 | { |
| 220 | u32 l; | 127 | u32 l; |
| @@ -234,22 +141,63 @@ static inline void omap2430_low_level_init(struct musb *musb) | |||
| 234 | musb_writel(musb->mregs, OTG_FORCESTDBY, l); | 141 | musb_writel(musb->mregs, OTG_FORCESTDBY, l); |
| 235 | } | 142 | } |
| 236 | 143 | ||
| 237 | static void omap2430_musb_mailbox(enum musb_vbus_id_status status) | 144 | /* |
| 145 | * We can get multiple cable events so we need to keep track | ||
| 146 | * of the power state. Only keep power enabled if USB cable is | ||
| 147 | * connected and a gadget is started. | ||
| 148 | */ | ||
| 149 | static void omap2430_set_power(struct musb *musb, bool enabled, bool cable) | ||
| 150 | { | ||
| 151 | struct device *dev = musb->controller; | ||
| 152 | struct omap2430_glue *glue = dev_get_drvdata(dev->parent); | ||
| 153 | bool power_up; | ||
| 154 | int res; | ||
| 155 | |||
| 156 | if (glue->enabled != enabled) | ||
| 157 | glue->enabled = enabled; | ||
| 158 | |||
| 159 | if (glue->cable_connected != cable) | ||
| 160 | glue->cable_connected = cable; | ||
| 161 | |||
| 162 | power_up = glue->enabled && glue->cable_connected; | ||
| 163 | if (power_up == glue->powered) { | ||
| 164 | dev_warn(musb->controller, "power state already %i\n", | ||
| 165 | power_up); | ||
| 166 | return; | ||
| 167 | } | ||
| 168 | |||
| 169 | glue->powered = power_up; | ||
| 170 | |||
| 171 | if (power_up) { | ||
| 172 | res = pm_runtime_get_sync(musb->controller); | ||
| 173 | if (res < 0) { | ||
| 174 | dev_err(musb->controller, "could not enable: %i", res); | ||
| 175 | glue->powered = false; | ||
| 176 | } | ||
| 177 | } else { | ||
| 178 | pm_runtime_mark_last_busy(musb->controller); | ||
| 179 | pm_runtime_put_autosuspend(musb->controller); | ||
| 180 | } | ||
| 181 | } | ||
| 182 | |||
| 183 | static int omap2430_musb_mailbox(enum musb_vbus_id_status status) | ||
| 238 | { | 184 | { |
| 239 | struct omap2430_glue *glue = _glue; | 185 | struct omap2430_glue *glue = _glue; |
| 240 | 186 | ||
| 241 | if (!glue) { | 187 | if (!glue) { |
| 242 | pr_err("%s: musb core is not yet initialized\n", __func__); | 188 | pr_err("%s: musb core is not yet initialized\n", __func__); |
| 243 | return; | 189 | return -EPROBE_DEFER; |
| 244 | } | 190 | } |
| 245 | glue->status = status; | 191 | glue->status = status; |
| 246 | 192 | ||
| 247 | if (!glue_to_musb(glue)) { | 193 | if (!glue_to_musb(glue)) { |
| 248 | pr_err("%s: musb core is not yet ready\n", __func__); | 194 | pr_err("%s: musb core is not yet ready\n", __func__); |
| 249 | return; | 195 | return -EPROBE_DEFER; |
| 250 | } | 196 | } |
| 251 | 197 | ||
| 252 | schedule_work(&glue->omap_musb_mailbox_work); | 198 | schedule_work(&glue->omap_musb_mailbox_work); |
| 199 | |||
| 200 | return 0; | ||
| 253 | } | 201 | } |
| 254 | 202 | ||
| 255 | static void omap_musb_set_mailbox(struct omap2430_glue *glue) | 203 | static void omap_musb_set_mailbox(struct omap2430_glue *glue) |
| @@ -259,6 +207,13 @@ static void omap_musb_set_mailbox(struct omap2430_glue *glue) | |||
| 259 | struct musb_hdrc_platform_data *pdata = dev_get_platdata(dev); | 207 | struct musb_hdrc_platform_data *pdata = dev_get_platdata(dev); |
| 260 | struct omap_musb_board_data *data = pdata->board_data; | 208 | struct omap_musb_board_data *data = pdata->board_data; |
| 261 | struct usb_otg *otg = musb->xceiv->otg; | 209 | struct usb_otg *otg = musb->xceiv->otg; |
| 210 | bool cable_connected; | ||
| 211 | |||
| 212 | cable_connected = ((glue->status == MUSB_ID_GROUND) || | ||
| 213 | (glue->status == MUSB_VBUS_VALID)); | ||
| 214 | |||
| 215 | if (cable_connected) | ||
| 216 | omap2430_set_power(musb, glue->enabled, cable_connected); | ||
| 262 | 217 | ||
| 263 | switch (glue->status) { | 218 | switch (glue->status) { |
| 264 | case MUSB_ID_GROUND: | 219 | case MUSB_ID_GROUND: |
| @@ -268,7 +223,6 @@ static void omap_musb_set_mailbox(struct omap2430_glue *glue) | |||
| 268 | musb->xceiv->otg->state = OTG_STATE_A_IDLE; | 223 | musb->xceiv->otg->state = OTG_STATE_A_IDLE; |
| 269 | musb->xceiv->last_event = USB_EVENT_ID; | 224 | musb->xceiv->last_event = USB_EVENT_ID; |
| 270 | if (musb->gadget_driver) { | 225 | if (musb->gadget_driver) { |
| 271 | pm_runtime_get_sync(dev); | ||
| 272 | omap_control_usb_set_mode(glue->control_otghs, | 226 | omap_control_usb_set_mode(glue->control_otghs, |
| 273 | USB_MODE_HOST); | 227 | USB_MODE_HOST); |
| 274 | omap2430_musb_set_vbus(musb, 1); | 228 | omap2430_musb_set_vbus(musb, 1); |
| @@ -281,8 +235,6 @@ static void omap_musb_set_mailbox(struct omap2430_glue *glue) | |||
| 281 | otg->default_a = false; | 235 | otg->default_a = false; |
| 282 | musb->xceiv->otg->state = OTG_STATE_B_IDLE; | 236 | musb->xceiv->otg->state = OTG_STATE_B_IDLE; |
| 283 | musb->xceiv->last_event = USB_EVENT_VBUS; | 237 | musb->xceiv->last_event = USB_EVENT_VBUS; |
| 284 | if (musb->gadget_driver) | ||
| 285 | pm_runtime_get_sync(dev); | ||
| 286 | omap_control_usb_set_mode(glue->control_otghs, USB_MODE_DEVICE); | 238 | omap_control_usb_set_mode(glue->control_otghs, USB_MODE_DEVICE); |
| 287 | break; | 239 | break; |
| 288 | 240 | ||
| @@ -291,11 +243,8 @@ static void omap_musb_set_mailbox(struct omap2430_glue *glue) | |||
| 291 | dev_dbg(dev, "VBUS Disconnect\n"); | 243 | dev_dbg(dev, "VBUS Disconnect\n"); |
| 292 | 244 | ||
| 293 | musb->xceiv->last_event = USB_EVENT_NONE; | 245 | musb->xceiv->last_event = USB_EVENT_NONE; |
| 294 | if (musb->gadget_driver) { | 246 | if (musb->gadget_driver) |
| 295 | omap2430_musb_set_vbus(musb, 0); | 247 | omap2430_musb_set_vbus(musb, 0); |
| 296 | pm_runtime_mark_last_busy(dev); | ||
| 297 | pm_runtime_put_autosuspend(dev); | ||
| 298 | } | ||
| 299 | 248 | ||
| 300 | if (data->interface_type == MUSB_INTERFACE_UTMI) | 249 | if (data->interface_type == MUSB_INTERFACE_UTMI) |
| 301 | otg_set_vbus(musb->xceiv->otg, 0); | 250 | otg_set_vbus(musb->xceiv->otg, 0); |
| @@ -307,6 +256,9 @@ static void omap_musb_set_mailbox(struct omap2430_glue *glue) | |||
| 307 | dev_dbg(dev, "ID float\n"); | 256 | dev_dbg(dev, "ID float\n"); |
| 308 | } | 257 | } |
| 309 | 258 | ||
| 259 | if (!cable_connected) | ||
| 260 | omap2430_set_power(musb, glue->enabled, cable_connected); | ||
| 261 | |||
| 310 | atomic_notifier_call_chain(&musb->xceiv->notifier, | 262 | atomic_notifier_call_chain(&musb->xceiv->notifier, |
| 311 | musb->xceiv->last_event, NULL); | 263 | musb->xceiv->last_event, NULL); |
| 312 | } | 264 | } |
| @@ -316,13 +268,8 @@ static void omap_musb_mailbox_work(struct work_struct *mailbox_work) | |||
| 316 | { | 268 | { |
| 317 | struct omap2430_glue *glue = container_of(mailbox_work, | 269 | struct omap2430_glue *glue = container_of(mailbox_work, |
| 318 | struct omap2430_glue, omap_musb_mailbox_work); | 270 | struct omap2430_glue, omap_musb_mailbox_work); |
| 319 | struct musb *musb = glue_to_musb(glue); | ||
| 320 | struct device *dev = musb->controller; | ||
| 321 | 271 | ||
| 322 | pm_runtime_get_sync(dev); | ||
| 323 | omap_musb_set_mailbox(glue); | 272 | omap_musb_set_mailbox(glue); |
| 324 | pm_runtime_mark_last_busy(dev); | ||
| 325 | pm_runtime_put_autosuspend(dev); | ||
| 326 | } | 273 | } |
| 327 | 274 | ||
| 328 | static irqreturn_t omap2430_musb_interrupt(int irq, void *__hci) | 275 | static irqreturn_t omap2430_musb_interrupt(int irq, void *__hci) |
| @@ -389,23 +336,7 @@ static int omap2430_musb_init(struct musb *musb) | |||
| 389 | return PTR_ERR(musb->phy); | 336 | return PTR_ERR(musb->phy); |
| 390 | } | 337 | } |
| 391 | musb->isr = omap2430_musb_interrupt; | 338 | musb->isr = omap2430_musb_interrupt; |
| 392 | 339 | phy_init(musb->phy); | |
| 393 | /* | ||
| 394 | * Enable runtime PM for musb parent (this driver). We can't | ||
| 395 | * do it earlier as struct musb is not yet allocated and we | ||
| 396 | * need to touch the musb registers for runtime PM. | ||
| 397 | */ | ||
| 398 | pm_runtime_enable(glue->dev); | ||
| 399 | status = pm_runtime_get_sync(glue->dev); | ||
| 400 | if (status < 0) | ||
| 401 | goto err1; | ||
| 402 | |||
| 403 | status = pm_runtime_get_sync(dev); | ||
| 404 | if (status < 0) { | ||
| 405 | dev_err(dev, "pm_runtime_get_sync FAILED %d\n", status); | ||
| 406 | pm_runtime_put_sync(glue->dev); | ||
| 407 | goto err1; | ||
| 408 | } | ||
| 409 | 340 | ||
| 410 | l = musb_readl(musb->mregs, OTG_INTERFSEL); | 341 | l = musb_readl(musb->mregs, OTG_INTERFSEL); |
| 411 | 342 | ||
| @@ -427,20 +358,10 @@ static int omap2430_musb_init(struct musb *musb) | |||
| 427 | musb_readl(musb->mregs, OTG_INTERFSEL), | 358 | musb_readl(musb->mregs, OTG_INTERFSEL), |
| 428 | musb_readl(musb->mregs, OTG_SIMENABLE)); | 359 | musb_readl(musb->mregs, OTG_SIMENABLE)); |
| 429 | 360 | ||
| 430 | setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb); | ||
| 431 | |||
| 432 | if (glue->status != MUSB_UNKNOWN) | 361 | if (glue->status != MUSB_UNKNOWN) |
| 433 | omap_musb_set_mailbox(glue); | 362 | omap_musb_set_mailbox(glue); |
| 434 | 363 | ||
| 435 | phy_init(musb->phy); | ||
| 436 | phy_power_on(musb->phy); | ||
| 437 | |||
| 438 | pm_runtime_put_noidle(musb->controller); | ||
| 439 | pm_runtime_put_noidle(glue->dev); | ||
| 440 | return 0; | 364 | return 0; |
| 441 | |||
| 442 | err1: | ||
| 443 | return status; | ||
| 444 | } | 365 | } |
| 445 | 366 | ||
| 446 | static void omap2430_musb_enable(struct musb *musb) | 367 | static void omap2430_musb_enable(struct musb *musb) |
| @@ -452,6 +373,11 @@ static void omap2430_musb_enable(struct musb *musb) | |||
| 452 | struct musb_hdrc_platform_data *pdata = dev_get_platdata(dev); | 373 | struct musb_hdrc_platform_data *pdata = dev_get_platdata(dev); |
| 453 | struct omap_musb_board_data *data = pdata->board_data; | 374 | struct omap_musb_board_data *data = pdata->board_data; |
| 454 | 375 | ||
| 376 | if (!WARN_ON(!musb->phy)) | ||
| 377 | phy_power_on(musb->phy); | ||
| 378 | |||
| 379 | omap2430_set_power(musb, true, glue->cable_connected); | ||
| 380 | |||
| 455 | switch (glue->status) { | 381 | switch (glue->status) { |
| 456 | 382 | ||
| 457 | case MUSB_ID_GROUND: | 383 | case MUSB_ID_GROUND: |
| @@ -487,18 +413,25 @@ static void omap2430_musb_disable(struct musb *musb) | |||
| 487 | struct device *dev = musb->controller; | 413 | struct device *dev = musb->controller; |
| 488 | struct omap2430_glue *glue = dev_get_drvdata(dev->parent); | 414 | struct omap2430_glue *glue = dev_get_drvdata(dev->parent); |
| 489 | 415 | ||
| 416 | if (!WARN_ON(!musb->phy)) | ||
| 417 | phy_power_off(musb->phy); | ||
| 418 | |||
| 490 | if (glue->status != MUSB_UNKNOWN) | 419 | if (glue->status != MUSB_UNKNOWN) |
| 491 | omap_control_usb_set_mode(glue->control_otghs, | 420 | omap_control_usb_set_mode(glue->control_otghs, |
| 492 | USB_MODE_DISCONNECT); | 421 | USB_MODE_DISCONNECT); |
| 422 | |||
| 423 | omap2430_set_power(musb, false, glue->cable_connected); | ||
| 493 | } | 424 | } |
| 494 | 425 | ||
| 495 | static int omap2430_musb_exit(struct musb *musb) | 426 | static int omap2430_musb_exit(struct musb *musb) |
| 496 | { | 427 | { |
| 497 | del_timer_sync(&musb_idle_timer); | 428 | struct device *dev = musb->controller; |
| 429 | struct omap2430_glue *glue = dev_get_drvdata(dev->parent); | ||
| 498 | 430 | ||
| 499 | omap2430_low_level_exit(musb); | 431 | omap2430_low_level_exit(musb); |
| 500 | phy_power_off(musb->phy); | ||
| 501 | phy_exit(musb->phy); | 432 | phy_exit(musb->phy); |
| 433 | musb->phy = NULL; | ||
| 434 | cancel_work_sync(&glue->omap_musb_mailbox_work); | ||
| 502 | 435 | ||
| 503 | return 0; | 436 | return 0; |
| 504 | } | 437 | } |
| @@ -512,9 +445,6 @@ static const struct musb_platform_ops omap2430_ops = { | |||
| 512 | .init = omap2430_musb_init, | 445 | .init = omap2430_musb_init, |
| 513 | .exit = omap2430_musb_exit, | 446 | .exit = omap2430_musb_exit, |
| 514 | 447 | ||
| 515 | .set_mode = omap2430_musb_set_mode, | ||
| 516 | .try_idle = omap2430_musb_try_idle, | ||
| 517 | |||
| 518 | .set_vbus = omap2430_musb_set_vbus, | 448 | .set_vbus = omap2430_musb_set_vbus, |
| 519 | 449 | ||
| 520 | .enable = omap2430_musb_enable, | 450 | .enable = omap2430_musb_enable, |
| @@ -639,11 +569,9 @@ static int omap2430_probe(struct platform_device *pdev) | |||
| 639 | goto err2; | 569 | goto err2; |
| 640 | } | 570 | } |
| 641 | 571 | ||
| 642 | /* | 572 | pm_runtime_enable(glue->dev); |
| 643 | * Note that we cannot enable PM runtime yet for this | 573 | pm_runtime_use_autosuspend(glue->dev); |
| 644 | * driver as we need struct musb initialized first. | 574 | pm_runtime_set_autosuspend_delay(glue->dev, 500); |
| 645 | * See omap2430_musb_init above. | ||
| 646 | */ | ||
| 647 | 575 | ||
| 648 | ret = platform_device_add(musb); | 576 | ret = platform_device_add(musb); |
| 649 | if (ret) { | 577 | if (ret) { |
| @@ -662,12 +590,14 @@ err0: | |||
| 662 | 590 | ||
| 663 | static int omap2430_remove(struct platform_device *pdev) | 591 | static int omap2430_remove(struct platform_device *pdev) |
| 664 | { | 592 | { |
| 665 | struct omap2430_glue *glue = platform_get_drvdata(pdev); | 593 | struct omap2430_glue *glue = platform_get_drvdata(pdev); |
| 594 | struct musb *musb = glue_to_musb(glue); | ||
| 666 | 595 | ||
| 667 | pm_runtime_get_sync(glue->dev); | 596 | pm_runtime_get_sync(glue->dev); |
| 668 | cancel_work_sync(&glue->omap_musb_mailbox_work); | ||
| 669 | platform_device_unregister(glue->musb); | 597 | platform_device_unregister(glue->musb); |
| 598 | omap2430_set_power(musb, false, false); | ||
| 670 | pm_runtime_put_sync(glue->dev); | 599 | pm_runtime_put_sync(glue->dev); |
| 600 | pm_runtime_dont_use_autosuspend(glue->dev); | ||
| 671 | pm_runtime_disable(glue->dev); | 601 | pm_runtime_disable(glue->dev); |
| 672 | 602 | ||
| 673 | return 0; | 603 | return 0; |
| @@ -680,12 +610,13 @@ static int omap2430_runtime_suspend(struct device *dev) | |||
| 680 | struct omap2430_glue *glue = dev_get_drvdata(dev); | 610 | struct omap2430_glue *glue = dev_get_drvdata(dev); |
| 681 | struct musb *musb = glue_to_musb(glue); | 611 | struct musb *musb = glue_to_musb(glue); |
| 682 | 612 | ||
| 683 | if (musb) { | 613 | if (!musb) |
| 684 | musb->context.otg_interfsel = musb_readl(musb->mregs, | 614 | return 0; |
| 685 | OTG_INTERFSEL); | ||
| 686 | 615 | ||
| 687 | omap2430_low_level_exit(musb); | 616 | musb->context.otg_interfsel = musb_readl(musb->mregs, |
| 688 | } | 617 | OTG_INTERFSEL); |
| 618 | |||
| 619 | omap2430_low_level_exit(musb); | ||
| 689 | 620 | ||
| 690 | return 0; | 621 | return 0; |
| 691 | } | 622 | } |
| @@ -696,7 +627,7 @@ static int omap2430_runtime_resume(struct device *dev) | |||
| 696 | struct musb *musb = glue_to_musb(glue); | 627 | struct musb *musb = glue_to_musb(glue); |
| 697 | 628 | ||
| 698 | if (!musb) | 629 | if (!musb) |
| 699 | return -EPROBE_DEFER; | 630 | return 0; |
| 700 | 631 | ||
| 701 | omap2430_low_level_init(musb); | 632 | omap2430_low_level_init(musb); |
| 702 | musb_writel(musb->mregs, OTG_INTERFSEL, | 633 | musb_writel(musb->mregs, OTG_INTERFSEL, |
| @@ -738,18 +669,8 @@ static struct platform_driver omap2430_driver = { | |||
| 738 | }, | 669 | }, |
| 739 | }; | 670 | }; |
| 740 | 671 | ||
| 672 | module_platform_driver(omap2430_driver); | ||
| 673 | |||
| 741 | MODULE_DESCRIPTION("OMAP2PLUS MUSB Glue Layer"); | 674 | MODULE_DESCRIPTION("OMAP2PLUS MUSB Glue Layer"); |
| 742 | MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>"); | 675 | MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>"); |
| 743 | MODULE_LICENSE("GPL v2"); | 676 | MODULE_LICENSE("GPL v2"); |
| 744 | |||
| 745 | static int __init omap2430_init(void) | ||
| 746 | { | ||
| 747 | return platform_driver_register(&omap2430_driver); | ||
| 748 | } | ||
| 749 | subsys_initcall(omap2430_init); | ||
| 750 | |||
| 751 | static void __exit omap2430_exit(void) | ||
| 752 | { | ||
| 753 | platform_driver_unregister(&omap2430_driver); | ||
| 754 | } | ||
| 755 | module_exit(omap2430_exit); | ||
diff --git a/drivers/usb/musb/sunxi.c b/drivers/usb/musb/sunxi.c index fdab4232cfbf..76500515dd8b 100644 --- a/drivers/usb/musb/sunxi.c +++ b/drivers/usb/musb/sunxi.c | |||
| @@ -80,7 +80,8 @@ static struct musb *sunxi_musb; | |||
| 80 | 80 | ||
| 81 | struct sunxi_glue { | 81 | struct sunxi_glue { |
| 82 | struct device *dev; | 82 | struct device *dev; |
| 83 | struct platform_device *musb; | 83 | struct musb *musb; |
| 84 | struct platform_device *musb_pdev; | ||
| 84 | struct clk *clk; | 85 | struct clk *clk; |
| 85 | struct reset_control *rst; | 86 | struct reset_control *rst; |
| 86 | struct phy *phy; | 87 | struct phy *phy; |
| @@ -102,7 +103,7 @@ static void sunxi_musb_work(struct work_struct *work) | |||
| 102 | return; | 103 | return; |
| 103 | 104 | ||
| 104 | if (test_and_clear_bit(SUNXI_MUSB_FL_HOSTMODE_PEND, &glue->flags)) { | 105 | if (test_and_clear_bit(SUNXI_MUSB_FL_HOSTMODE_PEND, &glue->flags)) { |
| 105 | struct musb *musb = platform_get_drvdata(glue->musb); | 106 | struct musb *musb = glue->musb; |
| 106 | unsigned long flags; | 107 | unsigned long flags; |
| 107 | u8 devctl; | 108 | u8 devctl; |
| 108 | 109 | ||
| @@ -112,7 +113,7 @@ static void sunxi_musb_work(struct work_struct *work) | |||
| 112 | if (test_bit(SUNXI_MUSB_FL_HOSTMODE, &glue->flags)) { | 113 | if (test_bit(SUNXI_MUSB_FL_HOSTMODE, &glue->flags)) { |
| 113 | set_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags); | 114 | set_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags); |
| 114 | musb->xceiv->otg->default_a = 1; | 115 | musb->xceiv->otg->default_a = 1; |
| 115 | musb->xceiv->otg->state = OTG_STATE_A_IDLE; | 116 | musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE; |
| 116 | MUSB_HST_MODE(musb); | 117 | MUSB_HST_MODE(musb); |
| 117 | devctl |= MUSB_DEVCTL_SESSION; | 118 | devctl |= MUSB_DEVCTL_SESSION; |
| 118 | } else { | 119 | } else { |
| @@ -145,10 +146,12 @@ static void sunxi_musb_set_vbus(struct musb *musb, int is_on) | |||
| 145 | { | 146 | { |
| 146 | struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent); | 147 | struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent); |
| 147 | 148 | ||
| 148 | if (is_on) | 149 | if (is_on) { |
| 149 | set_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags); | 150 | set_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags); |
| 150 | else | 151 | musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE; |
| 152 | } else { | ||
| 151 | clear_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags); | 153 | clear_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags); |
| 154 | } | ||
| 152 | 155 | ||
| 153 | schedule_work(&glue->work); | 156 | schedule_work(&glue->work); |
| 154 | } | 157 | } |
| @@ -264,15 +267,6 @@ static int sunxi_musb_init(struct musb *musb) | |||
| 264 | if (ret) | 267 | if (ret) |
| 265 | goto error_unregister_notifier; | 268 | goto error_unregister_notifier; |
| 266 | 269 | ||
| 267 | if (musb->port_mode == MUSB_PORT_MODE_HOST) { | ||
| 268 | ret = phy_power_on(glue->phy); | ||
| 269 | if (ret) | ||
| 270 | goto error_phy_exit; | ||
| 271 | set_bit(SUNXI_MUSB_FL_PHY_ON, &glue->flags); | ||
| 272 | /* Stop musb work from turning vbus off again */ | ||
| 273 | set_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags); | ||
| 274 | } | ||
| 275 | |||
| 276 | musb->isr = sunxi_musb_interrupt; | 270 | musb->isr = sunxi_musb_interrupt; |
| 277 | 271 | ||
| 278 | /* Stop the musb-core from doing runtime pm (not supported on sunxi) */ | 272 | /* Stop the musb-core from doing runtime pm (not supported on sunxi) */ |
| @@ -280,8 +274,6 @@ static int sunxi_musb_init(struct musb *musb) | |||
| 280 | 274 | ||
| 281 | return 0; | 275 | return 0; |
| 282 | 276 | ||
| 283 | error_phy_exit: | ||
| 284 | phy_exit(glue->phy); | ||
| 285 | error_unregister_notifier: | 277 | error_unregister_notifier: |
| 286 | if (musb->port_mode == MUSB_PORT_MODE_DUAL_ROLE) | 278 | if (musb->port_mode == MUSB_PORT_MODE_DUAL_ROLE) |
| 287 | extcon_unregister_notifier(glue->extcon, EXTCON_USB_HOST, | 279 | extcon_unregister_notifier(glue->extcon, EXTCON_USB_HOST, |
| @@ -323,10 +315,31 @@ static int sunxi_musb_exit(struct musb *musb) | |||
| 323 | return 0; | 315 | return 0; |
| 324 | } | 316 | } |
| 325 | 317 | ||
| 318 | static int sunxi_set_mode(struct musb *musb, u8 mode) | ||
| 319 | { | ||
| 320 | struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent); | ||
| 321 | int ret; | ||
| 322 | |||
| 323 | if (mode == MUSB_HOST) { | ||
| 324 | ret = phy_power_on(glue->phy); | ||
| 325 | if (ret) | ||
| 326 | return ret; | ||
| 327 | |||
| 328 | set_bit(SUNXI_MUSB_FL_PHY_ON, &glue->flags); | ||
| 329 | /* Stop musb work from turning vbus off again */ | ||
| 330 | set_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags); | ||
| 331 | musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE; | ||
| 332 | } | ||
| 333 | |||
| 334 | return 0; | ||
| 335 | } | ||
| 336 | |||
| 326 | static void sunxi_musb_enable(struct musb *musb) | 337 | static void sunxi_musb_enable(struct musb *musb) |
| 327 | { | 338 | { |
| 328 | struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent); | 339 | struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent); |
| 329 | 340 | ||
| 341 | glue->musb = musb; | ||
| 342 | |||
| 330 | /* musb_core does not call us in a balanced manner */ | 343 | /* musb_core does not call us in a balanced manner */ |
| 331 | if (test_and_set_bit(SUNXI_MUSB_FL_ENABLED, &glue->flags)) | 344 | if (test_and_set_bit(SUNXI_MUSB_FL_ENABLED, &glue->flags)) |
| 332 | return; | 345 | return; |
| @@ -569,6 +582,7 @@ static const struct musb_platform_ops sunxi_musb_ops = { | |||
| 569 | .exit = sunxi_musb_exit, | 582 | .exit = sunxi_musb_exit, |
| 570 | .enable = sunxi_musb_enable, | 583 | .enable = sunxi_musb_enable, |
| 571 | .disable = sunxi_musb_disable, | 584 | .disable = sunxi_musb_disable, |
| 585 | .set_mode = sunxi_set_mode, | ||
| 572 | .fifo_offset = sunxi_musb_fifo_offset, | 586 | .fifo_offset = sunxi_musb_fifo_offset, |
| 573 | .ep_offset = sunxi_musb_ep_offset, | 587 | .ep_offset = sunxi_musb_ep_offset, |
| 574 | .busctl_offset = sunxi_musb_busctl_offset, | 588 | .busctl_offset = sunxi_musb_busctl_offset, |
| @@ -721,9 +735,9 @@ static int sunxi_musb_probe(struct platform_device *pdev) | |||
| 721 | pinfo.data = &pdata; | 735 | pinfo.data = &pdata; |
| 722 | pinfo.size_data = sizeof(pdata); | 736 | pinfo.size_data = sizeof(pdata); |
| 723 | 737 | ||
| 724 | glue->musb = platform_device_register_full(&pinfo); | 738 | glue->musb_pdev = platform_device_register_full(&pinfo); |
| 725 | if (IS_ERR(glue->musb)) { | 739 | if (IS_ERR(glue->musb_pdev)) { |
| 726 | ret = PTR_ERR(glue->musb); | 740 | ret = PTR_ERR(glue->musb_pdev); |
| 727 | dev_err(&pdev->dev, "Error registering musb dev: %d\n", ret); | 741 | dev_err(&pdev->dev, "Error registering musb dev: %d\n", ret); |
| 728 | goto err_unregister_usb_phy; | 742 | goto err_unregister_usb_phy; |
| 729 | } | 743 | } |
| @@ -740,7 +754,7 @@ static int sunxi_musb_remove(struct platform_device *pdev) | |||
| 740 | struct sunxi_glue *glue = platform_get_drvdata(pdev); | 754 | struct sunxi_glue *glue = platform_get_drvdata(pdev); |
| 741 | struct platform_device *usb_phy = glue->usb_phy; | 755 | struct platform_device *usb_phy = glue->usb_phy; |
| 742 | 756 | ||
| 743 | platform_device_unregister(glue->musb); /* Frees glue ! */ | 757 | platform_device_unregister(glue->musb_pdev); |
| 744 | usb_phy_generic_unregister(usb_phy); | 758 | usb_phy_generic_unregister(usb_phy); |
| 745 | 759 | ||
| 746 | return 0; | 760 | return 0; |
diff --git a/drivers/usb/phy/phy-twl6030-usb.c b/drivers/usb/phy/phy-twl6030-usb.c index 24e2b3cf1867..a72e8d670adc 100644 --- a/drivers/usb/phy/phy-twl6030-usb.c +++ b/drivers/usb/phy/phy-twl6030-usb.c | |||
| @@ -97,6 +97,9 @@ struct twl6030_usb { | |||
| 97 | 97 | ||
| 98 | struct regulator *usb3v3; | 98 | struct regulator *usb3v3; |
| 99 | 99 | ||
| 100 | /* used to check initial cable status after probe */ | ||
| 101 | struct delayed_work get_status_work; | ||
| 102 | |||
| 100 | /* used to set vbus, in atomic path */ | 103 | /* used to set vbus, in atomic path */ |
| 101 | struct work_struct set_vbus_work; | 104 | struct work_struct set_vbus_work; |
| 102 | 105 | ||
| @@ -227,12 +230,16 @@ static irqreturn_t twl6030_usb_irq(int irq, void *_twl) | |||
| 227 | twl->asleep = 1; | 230 | twl->asleep = 1; |
| 228 | status = MUSB_VBUS_VALID; | 231 | status = MUSB_VBUS_VALID; |
| 229 | twl->linkstat = status; | 232 | twl->linkstat = status; |
| 230 | musb_mailbox(status); | 233 | ret = musb_mailbox(status); |
| 234 | if (ret) | ||
| 235 | twl->linkstat = MUSB_UNKNOWN; | ||
| 231 | } else { | 236 | } else { |
| 232 | if (twl->linkstat != MUSB_UNKNOWN) { | 237 | if (twl->linkstat != MUSB_UNKNOWN) { |
| 233 | status = MUSB_VBUS_OFF; | 238 | status = MUSB_VBUS_OFF; |
| 234 | twl->linkstat = status; | 239 | twl->linkstat = status; |
| 235 | musb_mailbox(status); | 240 | ret = musb_mailbox(status); |
| 241 | if (ret) | ||
| 242 | twl->linkstat = MUSB_UNKNOWN; | ||
| 236 | if (twl->asleep) { | 243 | if (twl->asleep) { |
| 237 | regulator_disable(twl->usb3v3); | 244 | regulator_disable(twl->usb3v3); |
| 238 | twl->asleep = 0; | 245 | twl->asleep = 0; |
| @@ -264,7 +271,9 @@ static irqreturn_t twl6030_usbotg_irq(int irq, void *_twl) | |||
| 264 | twl6030_writeb(twl, TWL_MODULE_USB, 0x10, USB_ID_INT_EN_HI_SET); | 271 | twl6030_writeb(twl, TWL_MODULE_USB, 0x10, USB_ID_INT_EN_HI_SET); |
| 265 | status = MUSB_ID_GROUND; | 272 | status = MUSB_ID_GROUND; |
| 266 | twl->linkstat = status; | 273 | twl->linkstat = status; |
| 267 | musb_mailbox(status); | 274 | ret = musb_mailbox(status); |
| 275 | if (ret) | ||
| 276 | twl->linkstat = MUSB_UNKNOWN; | ||
| 268 | } else { | 277 | } else { |
| 269 | twl6030_writeb(twl, TWL_MODULE_USB, 0x10, USB_ID_INT_EN_HI_CLR); | 278 | twl6030_writeb(twl, TWL_MODULE_USB, 0x10, USB_ID_INT_EN_HI_CLR); |
| 270 | twl6030_writeb(twl, TWL_MODULE_USB, 0x1, USB_ID_INT_EN_HI_SET); | 279 | twl6030_writeb(twl, TWL_MODULE_USB, 0x1, USB_ID_INT_EN_HI_SET); |
| @@ -274,6 +283,15 @@ static irqreturn_t twl6030_usbotg_irq(int irq, void *_twl) | |||
| 274 | return IRQ_HANDLED; | 283 | return IRQ_HANDLED; |
| 275 | } | 284 | } |
| 276 | 285 | ||
| 286 | static void twl6030_status_work(struct work_struct *work) | ||
| 287 | { | ||
| 288 | struct twl6030_usb *twl = container_of(work, struct twl6030_usb, | ||
| 289 | get_status_work.work); | ||
| 290 | |||
| 291 | twl6030_usb_irq(twl->irq2, twl); | ||
| 292 | twl6030_usbotg_irq(twl->irq1, twl); | ||
| 293 | } | ||
| 294 | |||
| 277 | static int twl6030_enable_irq(struct twl6030_usb *twl) | 295 | static int twl6030_enable_irq(struct twl6030_usb *twl) |
| 278 | { | 296 | { |
| 279 | twl6030_writeb(twl, TWL_MODULE_USB, 0x1, USB_ID_INT_EN_HI_SET); | 297 | twl6030_writeb(twl, TWL_MODULE_USB, 0x1, USB_ID_INT_EN_HI_SET); |
| @@ -284,8 +302,6 @@ static int twl6030_enable_irq(struct twl6030_usb *twl) | |||
| 284 | REG_INT_MSK_LINE_C); | 302 | REG_INT_MSK_LINE_C); |
| 285 | twl6030_interrupt_unmask(TWL6030_CHARGER_CTRL_INT_MASK, | 303 | twl6030_interrupt_unmask(TWL6030_CHARGER_CTRL_INT_MASK, |
| 286 | REG_INT_MSK_STS_C); | 304 | REG_INT_MSK_STS_C); |
| 287 | twl6030_usb_irq(twl->irq2, twl); | ||
| 288 | twl6030_usbotg_irq(twl->irq1, twl); | ||
| 289 | 305 | ||
| 290 | return 0; | 306 | return 0; |
| 291 | } | 307 | } |
| @@ -371,6 +387,7 @@ static int twl6030_usb_probe(struct platform_device *pdev) | |||
| 371 | dev_warn(&pdev->dev, "could not create sysfs file\n"); | 387 | dev_warn(&pdev->dev, "could not create sysfs file\n"); |
| 372 | 388 | ||
| 373 | INIT_WORK(&twl->set_vbus_work, otg_set_vbus_work); | 389 | INIT_WORK(&twl->set_vbus_work, otg_set_vbus_work); |
| 390 | INIT_DELAYED_WORK(&twl->get_status_work, twl6030_status_work); | ||
| 374 | 391 | ||
| 375 | status = request_threaded_irq(twl->irq1, NULL, twl6030_usbotg_irq, | 392 | status = request_threaded_irq(twl->irq1, NULL, twl6030_usbotg_irq, |
| 376 | IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING | IRQF_ONESHOT, | 393 | IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING | IRQF_ONESHOT, |
| @@ -395,6 +412,7 @@ static int twl6030_usb_probe(struct platform_device *pdev) | |||
| 395 | 412 | ||
| 396 | twl->asleep = 0; | 413 | twl->asleep = 0; |
| 397 | twl6030_enable_irq(twl); | 414 | twl6030_enable_irq(twl); |
| 415 | schedule_delayed_work(&twl->get_status_work, HZ); | ||
| 398 | dev_info(&pdev->dev, "Initialized TWL6030 USB module\n"); | 416 | dev_info(&pdev->dev, "Initialized TWL6030 USB module\n"); |
| 399 | 417 | ||
| 400 | return 0; | 418 | return 0; |
| @@ -404,6 +422,7 @@ static int twl6030_usb_remove(struct platform_device *pdev) | |||
| 404 | { | 422 | { |
| 405 | struct twl6030_usb *twl = platform_get_drvdata(pdev); | 423 | struct twl6030_usb *twl = platform_get_drvdata(pdev); |
| 406 | 424 | ||
| 425 | cancel_delayed_work(&twl->get_status_work); | ||
| 407 | twl6030_interrupt_mask(TWL6030_USBOTG_INT_MASK, | 426 | twl6030_interrupt_mask(TWL6030_USBOTG_INT_MASK, |
| 408 | REG_INT_MSK_LINE_C); | 427 | REG_INT_MSK_LINE_C); |
| 409 | twl6030_interrupt_mask(TWL6030_USBOTG_INT_MASK, | 428 | twl6030_interrupt_mask(TWL6030_USBOTG_INT_MASK, |
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c index 2eddbe538cda..5608af4a369d 100644 --- a/drivers/usb/serial/mos7720.c +++ b/drivers/usb/serial/mos7720.c | |||
| @@ -2007,6 +2007,7 @@ static void mos7720_release(struct usb_serial *serial) | |||
| 2007 | urblist_entry) | 2007 | urblist_entry) |
| 2008 | usb_unlink_urb(urbtrack->urb); | 2008 | usb_unlink_urb(urbtrack->urb); |
| 2009 | spin_unlock_irqrestore(&mos_parport->listlock, flags); | 2009 | spin_unlock_irqrestore(&mos_parport->listlock, flags); |
| 2010 | parport_del_port(mos_parport->pp); | ||
| 2010 | 2011 | ||
| 2011 | kref_put(&mos_parport->ref_count, destroy_mos_parport); | 2012 | kref_put(&mos_parport->ref_count, destroy_mos_parport); |
| 2012 | } | 2013 | } |
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c index 4d49fce406e1..5ef014ba6ae8 100644 --- a/drivers/usb/storage/uas.c +++ b/drivers/usb/storage/uas.c | |||
| @@ -836,6 +836,7 @@ static int uas_slave_configure(struct scsi_device *sdev) | |||
| 836 | if (devinfo->flags & US_FL_BROKEN_FUA) | 836 | if (devinfo->flags & US_FL_BROKEN_FUA) |
| 837 | sdev->broken_fua = 1; | 837 | sdev->broken_fua = 1; |
| 838 | 838 | ||
| 839 | scsi_change_queue_depth(sdev, devinfo->qdepth - 2); | ||
| 839 | return 0; | 840 | return 0; |
| 840 | } | 841 | } |
| 841 | 842 | ||
| @@ -848,7 +849,6 @@ static struct scsi_host_template uas_host_template = { | |||
| 848 | .slave_configure = uas_slave_configure, | 849 | .slave_configure = uas_slave_configure, |
| 849 | .eh_abort_handler = uas_eh_abort_handler, | 850 | .eh_abort_handler = uas_eh_abort_handler, |
| 850 | .eh_bus_reset_handler = uas_eh_bus_reset_handler, | 851 | .eh_bus_reset_handler = uas_eh_bus_reset_handler, |
| 851 | .can_queue = MAX_CMNDS, | ||
| 852 | .this_id = -1, | 852 | .this_id = -1, |
| 853 | .sg_tablesize = SG_NONE, | 853 | .sg_tablesize = SG_NONE, |
| 854 | .skip_settle_delay = 1, | 854 | .skip_settle_delay = 1, |
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c index fca51105974e..2e0450bec1b1 100644 --- a/drivers/usb/usbip/vhci_hcd.c +++ b/drivers/usb/usbip/vhci_hcd.c | |||
| @@ -941,7 +941,7 @@ static void vhci_stop(struct usb_hcd *hcd) | |||
| 941 | 941 | ||
| 942 | static int vhci_get_frame_number(struct usb_hcd *hcd) | 942 | static int vhci_get_frame_number(struct usb_hcd *hcd) |
| 943 | { | 943 | { |
| 944 | pr_err("Not yet implemented\n"); | 944 | dev_err_ratelimited(&hcd->self.root_hub->dev, "Not yet implemented\n"); |
| 945 | return 0; | 945 | return 0; |
| 946 | } | 946 | } |
| 947 | 947 | ||
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c index 93601407dab8..688691d9058d 100644 --- a/drivers/vfio/pci/vfio_pci_config.c +++ b/drivers/vfio/pci/vfio_pci_config.c | |||
| @@ -749,7 +749,8 @@ static int vfio_vpd_config_write(struct vfio_pci_device *vdev, int pos, | |||
| 749 | if (pci_write_vpd(pdev, addr & ~PCI_VPD_ADDR_F, 4, &data) != 4) | 749 | if (pci_write_vpd(pdev, addr & ~PCI_VPD_ADDR_F, 4, &data) != 4) |
| 750 | return count; | 750 | return count; |
| 751 | } else { | 751 | } else { |
| 752 | if (pci_read_vpd(pdev, addr, 4, &data) != 4) | 752 | data = 0; |
| 753 | if (pci_read_vpd(pdev, addr, 4, &data) < 0) | ||
| 753 | return count; | 754 | return count; |
| 754 | *pdata = cpu_to_le32(data); | 755 | *pdata = cpu_to_le32(data); |
| 755 | } | 756 | } |
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c index e9ea3fef144a..15ecfc9c5f6c 100644 --- a/drivers/vfio/pci/vfio_pci_intrs.c +++ b/drivers/vfio/pci/vfio_pci_intrs.c | |||
| @@ -228,9 +228,9 @@ static int vfio_intx_set_signal(struct vfio_pci_device *vdev, int fd) | |||
| 228 | 228 | ||
| 229 | static void vfio_intx_disable(struct vfio_pci_device *vdev) | 229 | static void vfio_intx_disable(struct vfio_pci_device *vdev) |
| 230 | { | 230 | { |
| 231 | vfio_intx_set_signal(vdev, -1); | ||
| 232 | vfio_virqfd_disable(&vdev->ctx[0].unmask); | 231 | vfio_virqfd_disable(&vdev->ctx[0].unmask); |
| 233 | vfio_virqfd_disable(&vdev->ctx[0].mask); | 232 | vfio_virqfd_disable(&vdev->ctx[0].mask); |
| 233 | vfio_intx_set_signal(vdev, -1); | ||
| 234 | vdev->irq_type = VFIO_PCI_NUM_IRQS; | 234 | vdev->irq_type = VFIO_PCI_NUM_IRQS; |
| 235 | vdev->num_ctx = 0; | 235 | vdev->num_ctx = 0; |
| 236 | kfree(vdev->ctx); | 236 | kfree(vdev->ctx); |
| @@ -401,13 +401,13 @@ static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix) | |||
| 401 | struct pci_dev *pdev = vdev->pdev; | 401 | struct pci_dev *pdev = vdev->pdev; |
| 402 | int i; | 402 | int i; |
| 403 | 403 | ||
| 404 | vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix); | ||
| 405 | |||
| 406 | for (i = 0; i < vdev->num_ctx; i++) { | 404 | for (i = 0; i < vdev->num_ctx; i++) { |
| 407 | vfio_virqfd_disable(&vdev->ctx[i].unmask); | 405 | vfio_virqfd_disable(&vdev->ctx[i].unmask); |
| 408 | vfio_virqfd_disable(&vdev->ctx[i].mask); | 406 | vfio_virqfd_disable(&vdev->ctx[i].mask); |
| 409 | } | 407 | } |
| 410 | 408 | ||
| 409 | vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix); | ||
| 410 | |||
| 411 | if (msix) { | 411 | if (msix) { |
| 412 | pci_disable_msix(vdev->pdev); | 412 | pci_disable_msix(vdev->pdev); |
| 413 | kfree(vdev->msix); | 413 | kfree(vdev->msix); |
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 15a65823aad9..2ba19424e4a1 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c | |||
| @@ -515,7 +515,7 @@ static int map_try_harder(struct vfio_domain *domain, dma_addr_t iova, | |||
| 515 | unsigned long pfn, long npage, int prot) | 515 | unsigned long pfn, long npage, int prot) |
| 516 | { | 516 | { |
| 517 | long i; | 517 | long i; |
| 518 | int ret; | 518 | int ret = 0; |
| 519 | 519 | ||
| 520 | for (i = 0; i < npage; i++, pfn++, iova += PAGE_SIZE) { | 520 | for (i = 0; i < npage; i++, pfn++, iova += PAGE_SIZE) { |
| 521 | ret = iommu_map(domain->domain, iova, | 521 | ret = iommu_map(domain->domain, iova, |
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c index 8ea531d2652c..bbfe7e2d4332 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c | |||
| @@ -51,8 +51,8 @@ static void hdmi_core_ddc_init(struct hdmi_core_data *core) | |||
| 51 | { | 51 | { |
| 52 | void __iomem *base = core->base; | 52 | void __iomem *base = core->base; |
| 53 | const unsigned long long iclk = 266000000; /* DSS L3 ICLK */ | 53 | const unsigned long long iclk = 266000000; /* DSS L3 ICLK */ |
| 54 | const unsigned ss_scl_high = 4000; /* ns */ | 54 | const unsigned ss_scl_high = 4600; /* ns */ |
| 55 | const unsigned ss_scl_low = 4700; /* ns */ | 55 | const unsigned ss_scl_low = 5400; /* ns */ |
| 56 | const unsigned fs_scl_high = 600; /* ns */ | 56 | const unsigned fs_scl_high = 600; /* ns */ |
| 57 | const unsigned fs_scl_low = 1300; /* ns */ | 57 | const unsigned fs_scl_low = 1300; /* ns */ |
| 58 | const unsigned sda_hold = 1000; /* ns */ | 58 | const unsigned sda_hold = 1000; /* ns */ |
| @@ -442,7 +442,7 @@ static void hdmi_core_write_avi_infoframe(struct hdmi_core_data *core, | |||
| 442 | 442 | ||
| 443 | c = (ptr[1] >> 6) & 0x3; | 443 | c = (ptr[1] >> 6) & 0x3; |
| 444 | m = (ptr[1] >> 4) & 0x3; | 444 | m = (ptr[1] >> 4) & 0x3; |
| 445 | r = (ptr[1] >> 0) & 0x3; | 445 | r = (ptr[1] >> 0) & 0xf; |
| 446 | 446 | ||
| 447 | itc = (ptr[2] >> 7) & 0x1; | 447 | itc = (ptr[2] >> 7) & 0x1; |
| 448 | ec = (ptr[2] >> 4) & 0x7; | 448 | ec = (ptr[2] >> 4) & 0x7; |
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index b54f26c55dfd..b4b3e256491b 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig | |||
| @@ -746,7 +746,7 @@ config ALIM7101_WDT | |||
| 746 | 746 | ||
| 747 | config EBC_C384_WDT | 747 | config EBC_C384_WDT |
| 748 | tristate "WinSystems EBC-C384 Watchdog Timer" | 748 | tristate "WinSystems EBC-C384 Watchdog Timer" |
| 749 | depends on X86 && ISA | 749 | depends on X86 && ISA_BUS_API |
| 750 | select WATCHDOG_CORE | 750 | select WATCHDOG_CORE |
| 751 | help | 751 | help |
| 752 | Enables watchdog timer support for the watchdog timer on the | 752 | Enables watchdog timer support for the watchdog timer on the |
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index d46839f51e73..e4db19e88ab1 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c | |||
| @@ -151,8 +151,6 @@ static DECLARE_WAIT_QUEUE_HEAD(balloon_wq); | |||
| 151 | static void balloon_process(struct work_struct *work); | 151 | static void balloon_process(struct work_struct *work); |
| 152 | static DECLARE_DELAYED_WORK(balloon_worker, balloon_process); | 152 | static DECLARE_DELAYED_WORK(balloon_worker, balloon_process); |
| 153 | 153 | ||
| 154 | static void release_memory_resource(struct resource *resource); | ||
| 155 | |||
| 156 | /* When ballooning out (allocating memory to return to Xen) we don't really | 154 | /* When ballooning out (allocating memory to return to Xen) we don't really |
| 157 | want the kernel to try too hard since that can trigger the oom killer. */ | 155 | want the kernel to try too hard since that can trigger the oom killer. */ |
| 158 | #define GFP_BALLOON \ | 156 | #define GFP_BALLOON \ |
| @@ -248,6 +246,19 @@ static enum bp_state update_schedule(enum bp_state state) | |||
| 248 | } | 246 | } |
| 249 | 247 | ||
| 250 | #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG | 248 | #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG |
| 249 | static void release_memory_resource(struct resource *resource) | ||
| 250 | { | ||
| 251 | if (!resource) | ||
| 252 | return; | ||
| 253 | |||
| 254 | /* | ||
| 255 | * No need to reset region to identity mapped since we now | ||
| 256 | * know that no I/O can be in this region | ||
| 257 | */ | ||
| 258 | release_resource(resource); | ||
| 259 | kfree(resource); | ||
| 260 | } | ||
| 261 | |||
| 251 | static struct resource *additional_memory_resource(phys_addr_t size) | 262 | static struct resource *additional_memory_resource(phys_addr_t size) |
| 252 | { | 263 | { |
| 253 | struct resource *res; | 264 | struct resource *res; |
| @@ -286,19 +297,6 @@ static struct resource *additional_memory_resource(phys_addr_t size) | |||
| 286 | return res; | 297 | return res; |
| 287 | } | 298 | } |
| 288 | 299 | ||
| 289 | static void release_memory_resource(struct resource *resource) | ||
| 290 | { | ||
| 291 | if (!resource) | ||
| 292 | return; | ||
| 293 | |||
| 294 | /* | ||
| 295 | * No need to reset region to identity mapped since we now | ||
| 296 | * know that no I/O can be in this region | ||
| 297 | */ | ||
| 298 | release_resource(resource); | ||
| 299 | kfree(resource); | ||
| 300 | } | ||
| 301 | |||
| 302 | static enum bp_state reserve_additional_memory(void) | 300 | static enum bp_state reserve_additional_memory(void) |
| 303 | { | 301 | { |
| 304 | long credit; | 302 | long credit; |
diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c index 8e67336f8ddd..6a25533da237 100644 --- a/drivers/xen/xen-pciback/conf_space.c +++ b/drivers/xen/xen-pciback/conf_space.c | |||
| @@ -183,8 +183,7 @@ int xen_pcibk_config_read(struct pci_dev *dev, int offset, int size, | |||
| 183 | field_start = OFFSET(cfg_entry); | 183 | field_start = OFFSET(cfg_entry); |
| 184 | field_end = OFFSET(cfg_entry) + field->size; | 184 | field_end = OFFSET(cfg_entry) + field->size; |
| 185 | 185 | ||
| 186 | if ((req_start >= field_start && req_start < field_end) | 186 | if (req_end > field_start && field_end > req_start) { |
| 187 | || (req_end > field_start && req_end <= field_end)) { | ||
| 188 | err = conf_space_read(dev, cfg_entry, field_start, | 187 | err = conf_space_read(dev, cfg_entry, field_start, |
| 189 | &tmp_val); | 188 | &tmp_val); |
| 190 | if (err) | 189 | if (err) |
| @@ -230,8 +229,7 @@ int xen_pcibk_config_write(struct pci_dev *dev, int offset, int size, u32 value) | |||
| 230 | field_start = OFFSET(cfg_entry); | 229 | field_start = OFFSET(cfg_entry); |
| 231 | field_end = OFFSET(cfg_entry) + field->size; | 230 | field_end = OFFSET(cfg_entry) + field->size; |
| 232 | 231 | ||
| 233 | if ((req_start >= field_start && req_start < field_end) | 232 | if (req_end > field_start && field_end > req_start) { |
| 234 | || (req_end > field_start && req_end <= field_end)) { | ||
| 235 | tmp_val = 0; | 233 | tmp_val = 0; |
| 236 | 234 | ||
| 237 | err = xen_pcibk_config_read(dev, field_start, | 235 | err = xen_pcibk_config_read(dev, field_start, |
diff --git a/drivers/xen/xen-pciback/conf_space_header.c b/drivers/xen/xen-pciback/conf_space_header.c index ad3d17d29c81..9ead1c2ff1dd 100644 --- a/drivers/xen/xen-pciback/conf_space_header.c +++ b/drivers/xen/xen-pciback/conf_space_header.c | |||
| @@ -145,7 +145,7 @@ static int rom_write(struct pci_dev *dev, int offset, u32 value, void *data) | |||
| 145 | /* A write to obtain the length must happen as a 32-bit write. | 145 | /* A write to obtain the length must happen as a 32-bit write. |
| 146 | * This does not (yet) support writing individual bytes | 146 | * This does not (yet) support writing individual bytes |
| 147 | */ | 147 | */ |
| 148 | if (value == ~PCI_ROM_ADDRESS_ENABLE) | 148 | if ((value | ~PCI_ROM_ADDRESS_MASK) == ~0U) |
| 149 | bar->which = 1; | 149 | bar->which = 1; |
| 150 | else { | 150 | else { |
| 151 | u32 tmpval; | 151 | u32 tmpval; |
| @@ -225,38 +225,42 @@ static inline void read_dev_bar(struct pci_dev *dev, | |||
| 225 | (PCI_BASE_ADDRESS_SPACE_MEMORY | | 225 | (PCI_BASE_ADDRESS_SPACE_MEMORY | |
| 226 | PCI_BASE_ADDRESS_MEM_TYPE_64))) { | 226 | PCI_BASE_ADDRESS_MEM_TYPE_64))) { |
| 227 | bar_info->val = res[pos - 1].start >> 32; | 227 | bar_info->val = res[pos - 1].start >> 32; |
| 228 | bar_info->len_val = res[pos - 1].end >> 32; | 228 | bar_info->len_val = -resource_size(&res[pos - 1]) >> 32; |
| 229 | return; | 229 | return; |
| 230 | } | 230 | } |
| 231 | } | 231 | } |
| 232 | 232 | ||
| 233 | if (!res[pos].flags || | ||
| 234 | (res[pos].flags & (IORESOURCE_DISABLED | IORESOURCE_UNSET | | ||
| 235 | IORESOURCE_BUSY))) | ||
| 236 | return; | ||
| 237 | |||
| 233 | bar_info->val = res[pos].start | | 238 | bar_info->val = res[pos].start | |
| 234 | (res[pos].flags & PCI_REGION_FLAG_MASK); | 239 | (res[pos].flags & PCI_REGION_FLAG_MASK); |
| 235 | bar_info->len_val = resource_size(&res[pos]); | 240 | bar_info->len_val = -resource_size(&res[pos]) | |
| 241 | (res[pos].flags & PCI_REGION_FLAG_MASK); | ||
| 236 | } | 242 | } |
| 237 | 243 | ||
| 238 | static void *bar_init(struct pci_dev *dev, int offset) | 244 | static void *bar_init(struct pci_dev *dev, int offset) |
| 239 | { | 245 | { |
| 240 | struct pci_bar_info *bar = kmalloc(sizeof(*bar), GFP_KERNEL); | 246 | struct pci_bar_info *bar = kzalloc(sizeof(*bar), GFP_KERNEL); |
| 241 | 247 | ||
| 242 | if (!bar) | 248 | if (!bar) |
| 243 | return ERR_PTR(-ENOMEM); | 249 | return ERR_PTR(-ENOMEM); |
| 244 | 250 | ||
| 245 | read_dev_bar(dev, bar, offset, ~0); | 251 | read_dev_bar(dev, bar, offset, ~0); |
| 246 | bar->which = 0; | ||
| 247 | 252 | ||
| 248 | return bar; | 253 | return bar; |
| 249 | } | 254 | } |
| 250 | 255 | ||
| 251 | static void *rom_init(struct pci_dev *dev, int offset) | 256 | static void *rom_init(struct pci_dev *dev, int offset) |
| 252 | { | 257 | { |
| 253 | struct pci_bar_info *bar = kmalloc(sizeof(*bar), GFP_KERNEL); | 258 | struct pci_bar_info *bar = kzalloc(sizeof(*bar), GFP_KERNEL); |
| 254 | 259 | ||
| 255 | if (!bar) | 260 | if (!bar) |
| 256 | return ERR_PTR(-ENOMEM); | 261 | return ERR_PTR(-ENOMEM); |
| 257 | 262 | ||
| 258 | read_dev_bar(dev, bar, offset, ~PCI_ROM_ADDRESS_ENABLE); | 263 | read_dev_bar(dev, bar, offset, ~PCI_ROM_ADDRESS_ENABLE); |
| 259 | bar->which = 0; | ||
| 260 | 264 | ||
| 261 | return bar; | 265 | return bar; |
| 262 | } | 266 | } |
diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h index f0d268b97d19..a439548de785 100644 --- a/fs/autofs4/autofs_i.h +++ b/fs/autofs4/autofs_i.h | |||
| @@ -70,9 +70,13 @@ struct autofs_info { | |||
| 70 | }; | 70 | }; |
| 71 | 71 | ||
| 72 | #define AUTOFS_INF_EXPIRING (1<<0) /* dentry in the process of expiring */ | 72 | #define AUTOFS_INF_EXPIRING (1<<0) /* dentry in the process of expiring */ |
| 73 | #define AUTOFS_INF_NO_RCU (1<<1) /* the dentry is being considered | 73 | #define AUTOFS_INF_WANT_EXPIRE (1<<1) /* the dentry is being considered |
| 74 | * for expiry, so RCU_walk is | 74 | * for expiry, so RCU_walk is |
| 75 | * not permitted | 75 | * not permitted. If it progresses to |
| 76 | * actual expiry attempt, the flag is | ||
| 77 | * not cleared when EXPIRING is set - | ||
| 78 | * in that case it gets cleared only | ||
| 79 | * when it comes to clearing EXPIRING. | ||
| 76 | */ | 80 | */ |
| 77 | #define AUTOFS_INF_PENDING (1<<2) /* dentry pending mount */ | 81 | #define AUTOFS_INF_PENDING (1<<2) /* dentry pending mount */ |
| 78 | 82 | ||
diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c index 9510d8d2e9cd..b493909e7492 100644 --- a/fs/autofs4/expire.c +++ b/fs/autofs4/expire.c | |||
| @@ -316,19 +316,17 @@ struct dentry *autofs4_expire_direct(struct super_block *sb, | |||
| 316 | if (ino->flags & AUTOFS_INF_PENDING) | 316 | if (ino->flags & AUTOFS_INF_PENDING) |
| 317 | goto out; | 317 | goto out; |
| 318 | if (!autofs4_direct_busy(mnt, root, timeout, do_now)) { | 318 | if (!autofs4_direct_busy(mnt, root, timeout, do_now)) { |
| 319 | ino->flags |= AUTOFS_INF_NO_RCU; | 319 | ino->flags |= AUTOFS_INF_WANT_EXPIRE; |
| 320 | spin_unlock(&sbi->fs_lock); | 320 | spin_unlock(&sbi->fs_lock); |
| 321 | synchronize_rcu(); | 321 | synchronize_rcu(); |
| 322 | spin_lock(&sbi->fs_lock); | 322 | spin_lock(&sbi->fs_lock); |
| 323 | if (!autofs4_direct_busy(mnt, root, timeout, do_now)) { | 323 | if (!autofs4_direct_busy(mnt, root, timeout, do_now)) { |
| 324 | ino->flags |= AUTOFS_INF_EXPIRING; | 324 | ino->flags |= AUTOFS_INF_EXPIRING; |
| 325 | smp_mb(); | ||
| 326 | ino->flags &= ~AUTOFS_INF_NO_RCU; | ||
| 327 | init_completion(&ino->expire_complete); | 325 | init_completion(&ino->expire_complete); |
| 328 | spin_unlock(&sbi->fs_lock); | 326 | spin_unlock(&sbi->fs_lock); |
| 329 | return root; | 327 | return root; |
| 330 | } | 328 | } |
| 331 | ino->flags &= ~AUTOFS_INF_NO_RCU; | 329 | ino->flags &= ~AUTOFS_INF_WANT_EXPIRE; |
| 332 | } | 330 | } |
| 333 | out: | 331 | out: |
| 334 | spin_unlock(&sbi->fs_lock); | 332 | spin_unlock(&sbi->fs_lock); |
| @@ -446,7 +444,7 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb, | |||
| 446 | while ((dentry = get_next_positive_subdir(dentry, root))) { | 444 | while ((dentry = get_next_positive_subdir(dentry, root))) { |
| 447 | spin_lock(&sbi->fs_lock); | 445 | spin_lock(&sbi->fs_lock); |
| 448 | ino = autofs4_dentry_ino(dentry); | 446 | ino = autofs4_dentry_ino(dentry); |
| 449 | if (ino->flags & AUTOFS_INF_NO_RCU) | 447 | if (ino->flags & AUTOFS_INF_WANT_EXPIRE) |
| 450 | expired = NULL; | 448 | expired = NULL; |
| 451 | else | 449 | else |
| 452 | expired = should_expire(dentry, mnt, timeout, how); | 450 | expired = should_expire(dentry, mnt, timeout, how); |
| @@ -455,7 +453,7 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb, | |||
| 455 | continue; | 453 | continue; |
| 456 | } | 454 | } |
| 457 | ino = autofs4_dentry_ino(expired); | 455 | ino = autofs4_dentry_ino(expired); |
| 458 | ino->flags |= AUTOFS_INF_NO_RCU; | 456 | ino->flags |= AUTOFS_INF_WANT_EXPIRE; |
| 459 | spin_unlock(&sbi->fs_lock); | 457 | spin_unlock(&sbi->fs_lock); |
| 460 | synchronize_rcu(); | 458 | synchronize_rcu(); |
| 461 | spin_lock(&sbi->fs_lock); | 459 | spin_lock(&sbi->fs_lock); |
| @@ -465,7 +463,7 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb, | |||
| 465 | goto found; | 463 | goto found; |
| 466 | } | 464 | } |
| 467 | 465 | ||
| 468 | ino->flags &= ~AUTOFS_INF_NO_RCU; | 466 | ino->flags &= ~AUTOFS_INF_WANT_EXPIRE; |
| 469 | if (expired != dentry) | 467 | if (expired != dentry) |
| 470 | dput(expired); | 468 | dput(expired); |
| 471 | spin_unlock(&sbi->fs_lock); | 469 | spin_unlock(&sbi->fs_lock); |
| @@ -475,17 +473,8 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb, | |||
| 475 | found: | 473 | found: |
| 476 | pr_debug("returning %p %pd\n", expired, expired); | 474 | pr_debug("returning %p %pd\n", expired, expired); |
| 477 | ino->flags |= AUTOFS_INF_EXPIRING; | 475 | ino->flags |= AUTOFS_INF_EXPIRING; |
| 478 | smp_mb(); | ||
| 479 | ino->flags &= ~AUTOFS_INF_NO_RCU; | ||
| 480 | init_completion(&ino->expire_complete); | 476 | init_completion(&ino->expire_complete); |
| 481 | spin_unlock(&sbi->fs_lock); | 477 | spin_unlock(&sbi->fs_lock); |
| 482 | spin_lock(&sbi->lookup_lock); | ||
| 483 | spin_lock(&expired->d_parent->d_lock); | ||
| 484 | spin_lock_nested(&expired->d_lock, DENTRY_D_LOCK_NESTED); | ||
| 485 | list_move(&expired->d_parent->d_subdirs, &expired->d_child); | ||
| 486 | spin_unlock(&expired->d_lock); | ||
| 487 | spin_unlock(&expired->d_parent->d_lock); | ||
| 488 | spin_unlock(&sbi->lookup_lock); | ||
| 489 | return expired; | 478 | return expired; |
| 490 | } | 479 | } |
| 491 | 480 | ||
| @@ -496,7 +485,7 @@ int autofs4_expire_wait(struct dentry *dentry, int rcu_walk) | |||
| 496 | int status; | 485 | int status; |
| 497 | 486 | ||
| 498 | /* Block on any pending expire */ | 487 | /* Block on any pending expire */ |
| 499 | if (!(ino->flags & (AUTOFS_INF_EXPIRING | AUTOFS_INF_NO_RCU))) | 488 | if (!(ino->flags & AUTOFS_INF_WANT_EXPIRE)) |
| 500 | return 0; | 489 | return 0; |
| 501 | if (rcu_walk) | 490 | if (rcu_walk) |
| 502 | return -ECHILD; | 491 | return -ECHILD; |
| @@ -554,7 +543,7 @@ int autofs4_expire_run(struct super_block *sb, | |||
| 554 | ino = autofs4_dentry_ino(dentry); | 543 | ino = autofs4_dentry_ino(dentry); |
| 555 | /* avoid rapid-fire expire attempts if expiry fails */ | 544 | /* avoid rapid-fire expire attempts if expiry fails */ |
| 556 | ino->last_used = now; | 545 | ino->last_used = now; |
| 557 | ino->flags &= ~AUTOFS_INF_EXPIRING; | 546 | ino->flags &= ~(AUTOFS_INF_EXPIRING|AUTOFS_INF_WANT_EXPIRE); |
| 558 | complete_all(&ino->expire_complete); | 547 | complete_all(&ino->expire_complete); |
| 559 | spin_unlock(&sbi->fs_lock); | 548 | spin_unlock(&sbi->fs_lock); |
| 560 | 549 | ||
| @@ -583,7 +572,7 @@ int autofs4_do_expire_multi(struct super_block *sb, struct vfsmount *mnt, | |||
| 583 | spin_lock(&sbi->fs_lock); | 572 | spin_lock(&sbi->fs_lock); |
| 584 | /* avoid rapid-fire expire attempts if expiry fails */ | 573 | /* avoid rapid-fire expire attempts if expiry fails */ |
| 585 | ino->last_used = now; | 574 | ino->last_used = now; |
| 586 | ino->flags &= ~AUTOFS_INF_EXPIRING; | 575 | ino->flags &= ~(AUTOFS_INF_EXPIRING|AUTOFS_INF_WANT_EXPIRE); |
| 587 | complete_all(&ino->expire_complete); | 576 | complete_all(&ino->expire_complete); |
| 588 | spin_unlock(&sbi->fs_lock); | 577 | spin_unlock(&sbi->fs_lock); |
| 589 | dput(dentry); | 578 | dput(dentry); |
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c index 78bd80298528..3767f6641af1 100644 --- a/fs/autofs4/root.c +++ b/fs/autofs4/root.c | |||
| @@ -458,7 +458,7 @@ static int autofs4_d_manage(struct dentry *dentry, bool rcu_walk) | |||
| 458 | */ | 458 | */ |
| 459 | struct inode *inode; | 459 | struct inode *inode; |
| 460 | 460 | ||
| 461 | if (ino->flags & (AUTOFS_INF_EXPIRING | AUTOFS_INF_NO_RCU)) | 461 | if (ino->flags & AUTOFS_INF_WANT_EXPIRE) |
| 462 | return 0; | 462 | return 0; |
| 463 | if (d_mountpoint(dentry)) | 463 | if (d_mountpoint(dentry)) |
| 464 | return 0; | 464 | return 0; |
diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c index 0146d911f468..631f1554c87b 100644 --- a/fs/autofs4/waitq.c +++ b/fs/autofs4/waitq.c | |||
| @@ -66,11 +66,12 @@ static int autofs4_write(struct autofs_sb_info *sbi, | |||
| 66 | set_fs(KERNEL_DS); | 66 | set_fs(KERNEL_DS); |
| 67 | 67 | ||
| 68 | mutex_lock(&sbi->pipe_mutex); | 68 | mutex_lock(&sbi->pipe_mutex); |
| 69 | wr = __vfs_write(file, data, bytes, &file->f_pos); | 69 | while (bytes) { |
| 70 | while (bytes && wr) { | 70 | wr = __vfs_write(file, data, bytes, &file->f_pos); |
| 71 | if (wr <= 0) | ||
| 72 | break; | ||
| 71 | data += wr; | 73 | data += wr; |
| 72 | bytes -= wr; | 74 | bytes -= wr; |
| 73 | wr = __vfs_write(file, data, bytes, &file->f_pos); | ||
| 74 | } | 75 | } |
| 75 | mutex_unlock(&sbi->pipe_mutex); | 76 | mutex_unlock(&sbi->pipe_mutex); |
| 76 | 77 | ||
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index e158b22ef32f..a7a28110dc80 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c | |||
| @@ -2275,7 +2275,7 @@ static int elf_core_dump(struct coredump_params *cprm) | |||
| 2275 | goto end_coredump; | 2275 | goto end_coredump; |
| 2276 | 2276 | ||
| 2277 | /* Align to page */ | 2277 | /* Align to page */ |
| 2278 | if (!dump_skip(cprm, dataoff - cprm->file->f_pos)) | 2278 | if (!dump_skip(cprm, dataoff - cprm->pos)) |
| 2279 | goto end_coredump; | 2279 | goto end_coredump; |
| 2280 | 2280 | ||
| 2281 | for (i = 0, vma = first_vma(current, gate_vma); vma != NULL; | 2281 | for (i = 0, vma = first_vma(current, gate_vma); vma != NULL; |
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c index 71ade0e556b7..203589311bf8 100644 --- a/fs/binfmt_elf_fdpic.c +++ b/fs/binfmt_elf_fdpic.c | |||
| @@ -1787,7 +1787,7 @@ static int elf_fdpic_core_dump(struct coredump_params *cprm) | |||
| 1787 | goto end_coredump; | 1787 | goto end_coredump; |
| 1788 | } | 1788 | } |
| 1789 | 1789 | ||
| 1790 | if (!dump_skip(cprm, dataoff - cprm->file->f_pos)) | 1790 | if (!dump_skip(cprm, dataoff - cprm->pos)) |
| 1791 | goto end_coredump; | 1791 | goto end_coredump; |
| 1792 | 1792 | ||
| 1793 | if (!elf_fdpic_dump_segments(cprm)) | 1793 | if (!elf_fdpic_dump_segments(cprm)) |
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c index b677a6ea6001..7706c8dc5fa6 100644 --- a/fs/btrfs/check-integrity.c +++ b/fs/btrfs/check-integrity.c | |||
| @@ -2645,7 +2645,7 @@ static void btrfsic_dump_tree_sub(const struct btrfsic_state *state, | |||
| 2645 | * This algorithm is recursive because the amount of used stack space | 2645 | * This algorithm is recursive because the amount of used stack space |
| 2646 | * is very small and the max recursion depth is limited. | 2646 | * is very small and the max recursion depth is limited. |
| 2647 | */ | 2647 | */ |
| 2648 | indent_add = sprintf(buf, "%c-%llu(%s/%llu/%d)", | 2648 | indent_add = sprintf(buf, "%c-%llu(%s/%llu/%u)", |
| 2649 | btrfsic_get_block_type(state, block), | 2649 | btrfsic_get_block_type(state, block), |
| 2650 | block->logical_bytenr, block->dev_state->name, | 2650 | block->logical_bytenr, block->dev_state->name, |
| 2651 | block->dev_bytenr, block->mirror_num); | 2651 | block->dev_bytenr, block->mirror_num); |
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 427c36b430a6..a85cf7d23309 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c | |||
| @@ -1373,7 +1373,8 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path, | |||
| 1373 | 1373 | ||
| 1374 | if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) { | 1374 | if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) { |
| 1375 | BUG_ON(tm->slot != 0); | 1375 | BUG_ON(tm->slot != 0); |
| 1376 | eb_rewin = alloc_dummy_extent_buffer(fs_info, eb->start); | 1376 | eb_rewin = alloc_dummy_extent_buffer(fs_info, eb->start, |
| 1377 | eb->len); | ||
| 1377 | if (!eb_rewin) { | 1378 | if (!eb_rewin) { |
| 1378 | btrfs_tree_read_unlock_blocking(eb); | 1379 | btrfs_tree_read_unlock_blocking(eb); |
| 1379 | free_extent_buffer(eb); | 1380 | free_extent_buffer(eb); |
| @@ -1454,7 +1455,8 @@ get_old_root(struct btrfs_root *root, u64 time_seq) | |||
| 1454 | } else if (old_root) { | 1455 | } else if (old_root) { |
| 1455 | btrfs_tree_read_unlock(eb_root); | 1456 | btrfs_tree_read_unlock(eb_root); |
| 1456 | free_extent_buffer(eb_root); | 1457 | free_extent_buffer(eb_root); |
| 1457 | eb = alloc_dummy_extent_buffer(root->fs_info, logical); | 1458 | eb = alloc_dummy_extent_buffer(root->fs_info, logical, |
| 1459 | root->nodesize); | ||
| 1458 | } else { | 1460 | } else { |
| 1459 | btrfs_set_lock_blocking_rw(eb_root, BTRFS_READ_LOCK); | 1461 | btrfs_set_lock_blocking_rw(eb_root, BTRFS_READ_LOCK); |
| 1460 | eb = btrfs_clone_extent_buffer(eb_root); | 1462 | eb = btrfs_clone_extent_buffer(eb_root); |
| @@ -1552,6 +1554,7 @@ noinline int btrfs_cow_block(struct btrfs_trans_handle *trans, | |||
| 1552 | trans->transid, root->fs_info->generation); | 1554 | trans->transid, root->fs_info->generation); |
| 1553 | 1555 | ||
| 1554 | if (!should_cow_block(trans, root, buf)) { | 1556 | if (!should_cow_block(trans, root, buf)) { |
| 1557 | trans->dirty = true; | ||
| 1555 | *cow_ret = buf; | 1558 | *cow_ret = buf; |
| 1556 | return 0; | 1559 | return 0; |
| 1557 | } | 1560 | } |
| @@ -1783,10 +1786,12 @@ static noinline int generic_bin_search(struct extent_buffer *eb, | |||
| 1783 | if (!err) { | 1786 | if (!err) { |
| 1784 | tmp = (struct btrfs_disk_key *)(kaddr + offset - | 1787 | tmp = (struct btrfs_disk_key *)(kaddr + offset - |
| 1785 | map_start); | 1788 | map_start); |
| 1786 | } else { | 1789 | } else if (err == 1) { |
| 1787 | read_extent_buffer(eb, &unaligned, | 1790 | read_extent_buffer(eb, &unaligned, |
| 1788 | offset, sizeof(unaligned)); | 1791 | offset, sizeof(unaligned)); |
| 1789 | tmp = &unaligned; | 1792 | tmp = &unaligned; |
| 1793 | } else { | ||
| 1794 | return err; | ||
| 1790 | } | 1795 | } |
| 1791 | 1796 | ||
| 1792 | } else { | 1797 | } else { |
| @@ -2510,6 +2515,8 @@ read_block_for_search(struct btrfs_trans_handle *trans, | |||
| 2510 | if (!btrfs_buffer_uptodate(tmp, 0, 0)) | 2515 | if (!btrfs_buffer_uptodate(tmp, 0, 0)) |
| 2511 | ret = -EIO; | 2516 | ret = -EIO; |
| 2512 | free_extent_buffer(tmp); | 2517 | free_extent_buffer(tmp); |
| 2518 | } else { | ||
| 2519 | ret = PTR_ERR(tmp); | ||
| 2513 | } | 2520 | } |
| 2514 | return ret; | 2521 | return ret; |
| 2515 | } | 2522 | } |
| @@ -2773,8 +2780,10 @@ again: | |||
| 2773 | * then we don't want to set the path blocking, | 2780 | * then we don't want to set the path blocking, |
| 2774 | * so we test it here | 2781 | * so we test it here |
| 2775 | */ | 2782 | */ |
| 2776 | if (!should_cow_block(trans, root, b)) | 2783 | if (!should_cow_block(trans, root, b)) { |
| 2784 | trans->dirty = true; | ||
| 2777 | goto cow_done; | 2785 | goto cow_done; |
| 2786 | } | ||
| 2778 | 2787 | ||
| 2779 | /* | 2788 | /* |
| 2780 | * must have write locks on this node and the | 2789 | * must have write locks on this node and the |
| @@ -2823,6 +2832,8 @@ cow_done: | |||
| 2823 | } | 2832 | } |
| 2824 | 2833 | ||
| 2825 | ret = key_search(b, key, level, &prev_cmp, &slot); | 2834 | ret = key_search(b, key, level, &prev_cmp, &slot); |
| 2835 | if (ret < 0) | ||
| 2836 | goto done; | ||
| 2826 | 2837 | ||
| 2827 | if (level != 0) { | 2838 | if (level != 0) { |
| 2828 | int dec = 0; | 2839 | int dec = 0; |
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 101c3cfd3f7c..4274a7bfdaed 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h | |||
| @@ -2518,7 +2518,7 @@ void btrfs_put_block_group(struct btrfs_block_group_cache *cache); | |||
| 2518 | int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, | 2518 | int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, |
| 2519 | struct btrfs_root *root, unsigned long count); | 2519 | struct btrfs_root *root, unsigned long count); |
| 2520 | int btrfs_async_run_delayed_refs(struct btrfs_root *root, | 2520 | int btrfs_async_run_delayed_refs(struct btrfs_root *root, |
| 2521 | unsigned long count, int wait); | 2521 | unsigned long count, u64 transid, int wait); |
| 2522 | int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len); | 2522 | int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len); |
| 2523 | int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans, | 2523 | int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans, |
| 2524 | struct btrfs_root *root, u64 bytenr, | 2524 | struct btrfs_root *root, u64 bytenr, |
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index 61561c2a3f96..d3aaabbfada0 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c | |||
| @@ -1606,15 +1606,23 @@ int btrfs_inode_delayed_dir_index_count(struct inode *inode) | |||
| 1606 | return 0; | 1606 | return 0; |
| 1607 | } | 1607 | } |
| 1608 | 1608 | ||
| 1609 | void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list, | 1609 | bool btrfs_readdir_get_delayed_items(struct inode *inode, |
| 1610 | struct list_head *del_list) | 1610 | struct list_head *ins_list, |
| 1611 | struct list_head *del_list) | ||
| 1611 | { | 1612 | { |
| 1612 | struct btrfs_delayed_node *delayed_node; | 1613 | struct btrfs_delayed_node *delayed_node; |
| 1613 | struct btrfs_delayed_item *item; | 1614 | struct btrfs_delayed_item *item; |
| 1614 | 1615 | ||
| 1615 | delayed_node = btrfs_get_delayed_node(inode); | 1616 | delayed_node = btrfs_get_delayed_node(inode); |
| 1616 | if (!delayed_node) | 1617 | if (!delayed_node) |
| 1617 | return; | 1618 | return false; |
| 1619 | |||
| 1620 | /* | ||
| 1621 | * We can only do one readdir with delayed items at a time because of | ||
| 1622 | * item->readdir_list. | ||
| 1623 | */ | ||
| 1624 | inode_unlock_shared(inode); | ||
| 1625 | inode_lock(inode); | ||
| 1618 | 1626 | ||
| 1619 | mutex_lock(&delayed_node->mutex); | 1627 | mutex_lock(&delayed_node->mutex); |
| 1620 | item = __btrfs_first_delayed_insertion_item(delayed_node); | 1628 | item = __btrfs_first_delayed_insertion_item(delayed_node); |
| @@ -1641,10 +1649,13 @@ void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list, | |||
| 1641 | * requeue or dequeue this delayed node. | 1649 | * requeue or dequeue this delayed node. |
| 1642 | */ | 1650 | */ |
| 1643 | atomic_dec(&delayed_node->refs); | 1651 | atomic_dec(&delayed_node->refs); |
| 1652 | |||
| 1653 | return true; | ||
| 1644 | } | 1654 | } |
| 1645 | 1655 | ||
| 1646 | void btrfs_put_delayed_items(struct list_head *ins_list, | 1656 | void btrfs_readdir_put_delayed_items(struct inode *inode, |
| 1647 | struct list_head *del_list) | 1657 | struct list_head *ins_list, |
| 1658 | struct list_head *del_list) | ||
| 1648 | { | 1659 | { |
| 1649 | struct btrfs_delayed_item *curr, *next; | 1660 | struct btrfs_delayed_item *curr, *next; |
| 1650 | 1661 | ||
| @@ -1659,6 +1670,12 @@ void btrfs_put_delayed_items(struct list_head *ins_list, | |||
| 1659 | if (atomic_dec_and_test(&curr->refs)) | 1670 | if (atomic_dec_and_test(&curr->refs)) |
| 1660 | kfree(curr); | 1671 | kfree(curr); |
| 1661 | } | 1672 | } |
| 1673 | |||
| 1674 | /* | ||
| 1675 | * The VFS is going to do up_read(), so we need to downgrade back to a | ||
| 1676 | * read lock. | ||
| 1677 | */ | ||
| 1678 | downgrade_write(&inode->i_rwsem); | ||
| 1662 | } | 1679 | } |
| 1663 | 1680 | ||
| 1664 | int btrfs_should_delete_dir_index(struct list_head *del_list, | 1681 | int btrfs_should_delete_dir_index(struct list_head *del_list, |
diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h index 0167853c84ae..2495b3d4075f 100644 --- a/fs/btrfs/delayed-inode.h +++ b/fs/btrfs/delayed-inode.h | |||
| @@ -137,10 +137,12 @@ void btrfs_kill_all_delayed_nodes(struct btrfs_root *root); | |||
| 137 | void btrfs_destroy_delayed_inodes(struct btrfs_root *root); | 137 | void btrfs_destroy_delayed_inodes(struct btrfs_root *root); |
| 138 | 138 | ||
| 139 | /* Used for readdir() */ | 139 | /* Used for readdir() */ |
| 140 | void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list, | 140 | bool btrfs_readdir_get_delayed_items(struct inode *inode, |
| 141 | struct list_head *del_list); | 141 | struct list_head *ins_list, |
| 142 | void btrfs_put_delayed_items(struct list_head *ins_list, | 142 | struct list_head *del_list); |
| 143 | struct list_head *del_list); | 143 | void btrfs_readdir_put_delayed_items(struct inode *inode, |
| 144 | struct list_head *ins_list, | ||
| 145 | struct list_head *del_list); | ||
| 144 | int btrfs_should_delete_dir_index(struct list_head *del_list, | 146 | int btrfs_should_delete_dir_index(struct list_head *del_list, |
| 145 | u64 index); | 147 | u64 index); |
| 146 | int btrfs_readdir_delayed_dir_index(struct dir_context *ctx, | 148 | int btrfs_readdir_delayed_dir_index(struct dir_context *ctx, |
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 6628fca9f4ed..60ce1190307b 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
| @@ -1098,7 +1098,7 @@ void readahead_tree_block(struct btrfs_root *root, u64 bytenr) | |||
| 1098 | struct inode *btree_inode = root->fs_info->btree_inode; | 1098 | struct inode *btree_inode = root->fs_info->btree_inode; |
| 1099 | 1099 | ||
| 1100 | buf = btrfs_find_create_tree_block(root, bytenr); | 1100 | buf = btrfs_find_create_tree_block(root, bytenr); |
| 1101 | if (!buf) | 1101 | if (IS_ERR(buf)) |
| 1102 | return; | 1102 | return; |
| 1103 | read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree, | 1103 | read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree, |
| 1104 | buf, 0, WAIT_NONE, btree_get_extent, 0); | 1104 | buf, 0, WAIT_NONE, btree_get_extent, 0); |
| @@ -1114,7 +1114,7 @@ int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, | |||
| 1114 | int ret; | 1114 | int ret; |
| 1115 | 1115 | ||
| 1116 | buf = btrfs_find_create_tree_block(root, bytenr); | 1116 | buf = btrfs_find_create_tree_block(root, bytenr); |
| 1117 | if (!buf) | 1117 | if (IS_ERR(buf)) |
| 1118 | return 0; | 1118 | return 0; |
| 1119 | 1119 | ||
| 1120 | set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags); | 1120 | set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags); |
| @@ -1147,7 +1147,8 @@ struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root, | |||
| 1147 | u64 bytenr) | 1147 | u64 bytenr) |
| 1148 | { | 1148 | { |
| 1149 | if (btrfs_test_is_dummy_root(root)) | 1149 | if (btrfs_test_is_dummy_root(root)) |
| 1150 | return alloc_test_extent_buffer(root->fs_info, bytenr); | 1150 | return alloc_test_extent_buffer(root->fs_info, bytenr, |
| 1151 | root->nodesize); | ||
| 1151 | return alloc_extent_buffer(root->fs_info, bytenr); | 1152 | return alloc_extent_buffer(root->fs_info, bytenr); |
| 1152 | } | 1153 | } |
| 1153 | 1154 | ||
| @@ -1171,8 +1172,8 @@ struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr, | |||
| 1171 | int ret; | 1172 | int ret; |
| 1172 | 1173 | ||
| 1173 | buf = btrfs_find_create_tree_block(root, bytenr); | 1174 | buf = btrfs_find_create_tree_block(root, bytenr); |
| 1174 | if (!buf) | 1175 | if (IS_ERR(buf)) |
| 1175 | return ERR_PTR(-ENOMEM); | 1176 | return buf; |
| 1176 | 1177 | ||
| 1177 | ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid); | 1178 | ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid); |
| 1178 | if (ret) { | 1179 | if (ret) { |
| @@ -1314,14 +1315,16 @@ static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info, | |||
| 1314 | 1315 | ||
| 1315 | #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS | 1316 | #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS |
| 1316 | /* Should only be used by the testing infrastructure */ | 1317 | /* Should only be used by the testing infrastructure */ |
| 1317 | struct btrfs_root *btrfs_alloc_dummy_root(void) | 1318 | struct btrfs_root *btrfs_alloc_dummy_root(u32 sectorsize, u32 nodesize) |
| 1318 | { | 1319 | { |
| 1319 | struct btrfs_root *root; | 1320 | struct btrfs_root *root; |
| 1320 | 1321 | ||
| 1321 | root = btrfs_alloc_root(NULL, GFP_KERNEL); | 1322 | root = btrfs_alloc_root(NULL, GFP_KERNEL); |
| 1322 | if (!root) | 1323 | if (!root) |
| 1323 | return ERR_PTR(-ENOMEM); | 1324 | return ERR_PTR(-ENOMEM); |
| 1324 | __setup_root(4096, 4096, 4096, root, NULL, 1); | 1325 | /* We don't use the stripesize in selftest, set it as sectorsize */ |
| 1326 | __setup_root(nodesize, sectorsize, sectorsize, root, NULL, | ||
| 1327 | BTRFS_ROOT_TREE_OBJECTID); | ||
| 1325 | set_bit(BTRFS_ROOT_DUMMY_ROOT, &root->state); | 1328 | set_bit(BTRFS_ROOT_DUMMY_ROOT, &root->state); |
| 1326 | root->alloc_bytenr = 0; | 1329 | root->alloc_bytenr = 0; |
| 1327 | 1330 | ||
| @@ -1803,6 +1806,13 @@ static int cleaner_kthread(void *arg) | |||
| 1803 | if (btrfs_need_cleaner_sleep(root)) | 1806 | if (btrfs_need_cleaner_sleep(root)) |
| 1804 | goto sleep; | 1807 | goto sleep; |
| 1805 | 1808 | ||
| 1809 | /* | ||
| 1810 | * Do not do anything if we might cause open_ctree() to block | ||
| 1811 | * before we have finished mounting the filesystem. | ||
| 1812 | */ | ||
| 1813 | if (!root->fs_info->open) | ||
| 1814 | goto sleep; | ||
| 1815 | |||
| 1806 | if (!mutex_trylock(&root->fs_info->cleaner_mutex)) | 1816 | if (!mutex_trylock(&root->fs_info->cleaner_mutex)) |
| 1807 | goto sleep; | 1817 | goto sleep; |
| 1808 | 1818 | ||
| @@ -2517,7 +2527,6 @@ int open_ctree(struct super_block *sb, | |||
| 2517 | int num_backups_tried = 0; | 2527 | int num_backups_tried = 0; |
| 2518 | int backup_index = 0; | 2528 | int backup_index = 0; |
| 2519 | int max_active; | 2529 | int max_active; |
| 2520 | bool cleaner_mutex_locked = false; | ||
| 2521 | 2530 | ||
| 2522 | tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info, GFP_KERNEL); | 2531 | tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info, GFP_KERNEL); |
| 2523 | chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info, GFP_KERNEL); | 2532 | chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info, GFP_KERNEL); |
| @@ -2797,7 +2806,7 @@ int open_ctree(struct super_block *sb, | |||
| 2797 | 2806 | ||
| 2798 | nodesize = btrfs_super_nodesize(disk_super); | 2807 | nodesize = btrfs_super_nodesize(disk_super); |
| 2799 | sectorsize = btrfs_super_sectorsize(disk_super); | 2808 | sectorsize = btrfs_super_sectorsize(disk_super); |
| 2800 | stripesize = btrfs_super_stripesize(disk_super); | 2809 | stripesize = sectorsize; |
| 2801 | fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids)); | 2810 | fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids)); |
| 2802 | fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids)); | 2811 | fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids)); |
| 2803 | 2812 | ||
| @@ -2996,13 +3005,6 @@ retry_root_backup: | |||
| 2996 | goto fail_sysfs; | 3005 | goto fail_sysfs; |
| 2997 | } | 3006 | } |
| 2998 | 3007 | ||
| 2999 | /* | ||
| 3000 | * Hold the cleaner_mutex thread here so that we don't block | ||
| 3001 | * for a long time on btrfs_recover_relocation. cleaner_kthread | ||
| 3002 | * will wait for us to finish mounting the filesystem. | ||
| 3003 | */ | ||
| 3004 | mutex_lock(&fs_info->cleaner_mutex); | ||
| 3005 | cleaner_mutex_locked = true; | ||
| 3006 | fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root, | 3008 | fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root, |
| 3007 | "btrfs-cleaner"); | 3009 | "btrfs-cleaner"); |
| 3008 | if (IS_ERR(fs_info->cleaner_kthread)) | 3010 | if (IS_ERR(fs_info->cleaner_kthread)) |
| @@ -3062,8 +3064,10 @@ retry_root_backup: | |||
| 3062 | ret = btrfs_cleanup_fs_roots(fs_info); | 3064 | ret = btrfs_cleanup_fs_roots(fs_info); |
| 3063 | if (ret) | 3065 | if (ret) |
| 3064 | goto fail_qgroup; | 3066 | goto fail_qgroup; |
| 3065 | /* We locked cleaner_mutex before creating cleaner_kthread. */ | 3067 | |
| 3068 | mutex_lock(&fs_info->cleaner_mutex); | ||
| 3066 | ret = btrfs_recover_relocation(tree_root); | 3069 | ret = btrfs_recover_relocation(tree_root); |
| 3070 | mutex_unlock(&fs_info->cleaner_mutex); | ||
| 3067 | if (ret < 0) { | 3071 | if (ret < 0) { |
| 3068 | btrfs_warn(fs_info, "failed to recover relocation: %d", | 3072 | btrfs_warn(fs_info, "failed to recover relocation: %d", |
| 3069 | ret); | 3073 | ret); |
| @@ -3071,8 +3075,6 @@ retry_root_backup: | |||
| 3071 | goto fail_qgroup; | 3075 | goto fail_qgroup; |
| 3072 | } | 3076 | } |
| 3073 | } | 3077 | } |
| 3074 | mutex_unlock(&fs_info->cleaner_mutex); | ||
| 3075 | cleaner_mutex_locked = false; | ||
| 3076 | 3078 | ||
| 3077 | location.objectid = BTRFS_FS_TREE_OBJECTID; | 3079 | location.objectid = BTRFS_FS_TREE_OBJECTID; |
| 3078 | location.type = BTRFS_ROOT_ITEM_KEY; | 3080 | location.type = BTRFS_ROOT_ITEM_KEY; |
| @@ -3186,10 +3188,6 @@ fail_cleaner: | |||
| 3186 | filemap_write_and_wait(fs_info->btree_inode->i_mapping); | 3188 | filemap_write_and_wait(fs_info->btree_inode->i_mapping); |
| 3187 | 3189 | ||
| 3188 | fail_sysfs: | 3190 | fail_sysfs: |
| 3189 | if (cleaner_mutex_locked) { | ||
| 3190 | mutex_unlock(&fs_info->cleaner_mutex); | ||
| 3191 | cleaner_mutex_locked = false; | ||
| 3192 | } | ||
| 3193 | btrfs_sysfs_remove_mounted(fs_info); | 3191 | btrfs_sysfs_remove_mounted(fs_info); |
| 3194 | 3192 | ||
| 3195 | fail_fsdev_sysfs: | 3193 | fail_fsdev_sysfs: |
| @@ -4130,6 +4128,16 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info, | |||
| 4130 | * Hint to catch really bogus numbers, bitflips or so, more exact checks are | 4128 | * Hint to catch really bogus numbers, bitflips or so, more exact checks are |
| 4131 | * done later | 4129 | * done later |
| 4132 | */ | 4130 | */ |
| 4131 | if (btrfs_super_bytes_used(sb) < 6 * btrfs_super_nodesize(sb)) { | ||
| 4132 | btrfs_err(fs_info, "bytes_used is too small %llu", | ||
| 4133 | btrfs_super_bytes_used(sb)); | ||
| 4134 | ret = -EINVAL; | ||
| 4135 | } | ||
| 4136 | if (!is_power_of_2(btrfs_super_stripesize(sb))) { | ||
| 4137 | btrfs_err(fs_info, "invalid stripesize %u", | ||
| 4138 | btrfs_super_stripesize(sb)); | ||
| 4139 | ret = -EINVAL; | ||
| 4140 | } | ||
| 4133 | if (btrfs_super_num_devices(sb) > (1UL << 31)) | 4141 | if (btrfs_super_num_devices(sb) > (1UL << 31)) |
| 4134 | printk(KERN_WARNING "BTRFS: suspicious number of devices: %llu\n", | 4142 | printk(KERN_WARNING "BTRFS: suspicious number of devices: %llu\n", |
| 4135 | btrfs_super_num_devices(sb)); | 4143 | btrfs_super_num_devices(sb)); |
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h index 8e79d0070bcf..acba821499a9 100644 --- a/fs/btrfs/disk-io.h +++ b/fs/btrfs/disk-io.h | |||
| @@ -90,7 +90,7 @@ void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info, | |||
| 90 | void btrfs_free_fs_root(struct btrfs_root *root); | 90 | void btrfs_free_fs_root(struct btrfs_root *root); |
| 91 | 91 | ||
| 92 | #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS | 92 | #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS |
| 93 | struct btrfs_root *btrfs_alloc_dummy_root(void); | 93 | struct btrfs_root *btrfs_alloc_dummy_root(u32 sectorsize, u32 nodesize); |
| 94 | #endif | 94 | #endif |
| 95 | 95 | ||
| 96 | /* | 96 | /* |
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index a400951e8678..82b912a293ab 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
| @@ -2042,6 +2042,11 @@ int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr, | |||
| 2042 | struct btrfs_bio *bbio = NULL; | 2042 | struct btrfs_bio *bbio = NULL; |
| 2043 | 2043 | ||
| 2044 | 2044 | ||
| 2045 | /* | ||
| 2046 | * Avoid races with device replace and make sure our bbio has devices | ||
| 2047 | * associated to its stripes that don't go away while we are discarding. | ||
| 2048 | */ | ||
| 2049 | btrfs_bio_counter_inc_blocked(root->fs_info); | ||
| 2045 | /* Tell the block device(s) that the sectors can be discarded */ | 2050 | /* Tell the block device(s) that the sectors can be discarded */ |
| 2046 | ret = btrfs_map_block(root->fs_info, REQ_DISCARD, | 2051 | ret = btrfs_map_block(root->fs_info, REQ_DISCARD, |
| 2047 | bytenr, &num_bytes, &bbio, 0); | 2052 | bytenr, &num_bytes, &bbio, 0); |
| @@ -2074,6 +2079,7 @@ int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr, | |||
| 2074 | } | 2079 | } |
| 2075 | btrfs_put_bbio(bbio); | 2080 | btrfs_put_bbio(bbio); |
| 2076 | } | 2081 | } |
| 2082 | btrfs_bio_counter_dec(root->fs_info); | ||
| 2077 | 2083 | ||
| 2078 | if (actual_bytes) | 2084 | if (actual_bytes) |
| 2079 | *actual_bytes = discarded_bytes; | 2085 | *actual_bytes = discarded_bytes; |
| @@ -2829,6 +2835,7 @@ int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans, | |||
| 2829 | 2835 | ||
| 2830 | struct async_delayed_refs { | 2836 | struct async_delayed_refs { |
| 2831 | struct btrfs_root *root; | 2837 | struct btrfs_root *root; |
| 2838 | u64 transid; | ||
| 2832 | int count; | 2839 | int count; |
| 2833 | int error; | 2840 | int error; |
| 2834 | int sync; | 2841 | int sync; |
| @@ -2844,6 +2851,10 @@ static void delayed_ref_async_start(struct btrfs_work *work) | |||
| 2844 | 2851 | ||
| 2845 | async = container_of(work, struct async_delayed_refs, work); | 2852 | async = container_of(work, struct async_delayed_refs, work); |
| 2846 | 2853 | ||
| 2854 | /* if the commit is already started, we don't need to wait here */ | ||
| 2855 | if (btrfs_transaction_blocked(async->root->fs_info)) | ||
| 2856 | goto done; | ||
| 2857 | |||
| 2847 | trans = btrfs_join_transaction(async->root); | 2858 | trans = btrfs_join_transaction(async->root); |
| 2848 | if (IS_ERR(trans)) { | 2859 | if (IS_ERR(trans)) { |
| 2849 | async->error = PTR_ERR(trans); | 2860 | async->error = PTR_ERR(trans); |
| @@ -2855,10 +2866,15 @@ static void delayed_ref_async_start(struct btrfs_work *work) | |||
| 2855 | * wait on delayed refs | 2866 | * wait on delayed refs |
| 2856 | */ | 2867 | */ |
| 2857 | trans->sync = true; | 2868 | trans->sync = true; |
| 2869 | |||
| 2870 | /* Don't bother flushing if we got into a different transaction */ | ||
| 2871 | if (trans->transid > async->transid) | ||
| 2872 | goto end; | ||
| 2873 | |||
| 2858 | ret = btrfs_run_delayed_refs(trans, async->root, async->count); | 2874 | ret = btrfs_run_delayed_refs(trans, async->root, async->count); |
| 2859 | if (ret) | 2875 | if (ret) |
| 2860 | async->error = ret; | 2876 | async->error = ret; |
| 2861 | 2877 | end: | |
| 2862 | ret = btrfs_end_transaction(trans, async->root); | 2878 | ret = btrfs_end_transaction(trans, async->root); |
| 2863 | if (ret && !async->error) | 2879 | if (ret && !async->error) |
| 2864 | async->error = ret; | 2880 | async->error = ret; |
| @@ -2870,7 +2886,7 @@ done: | |||
| 2870 | } | 2886 | } |
| 2871 | 2887 | ||
| 2872 | int btrfs_async_run_delayed_refs(struct btrfs_root *root, | 2888 | int btrfs_async_run_delayed_refs(struct btrfs_root *root, |
| 2873 | unsigned long count, int wait) | 2889 | unsigned long count, u64 transid, int wait) |
| 2874 | { | 2890 | { |
| 2875 | struct async_delayed_refs *async; | 2891 | struct async_delayed_refs *async; |
| 2876 | int ret; | 2892 | int ret; |
| @@ -2882,6 +2898,7 @@ int btrfs_async_run_delayed_refs(struct btrfs_root *root, | |||
| 2882 | async->root = root->fs_info->tree_root; | 2898 | async->root = root->fs_info->tree_root; |
| 2883 | async->count = count; | 2899 | async->count = count; |
| 2884 | async->error = 0; | 2900 | async->error = 0; |
| 2901 | async->transid = transid; | ||
| 2885 | if (wait) | 2902 | if (wait) |
| 2886 | async->sync = 1; | 2903 | async->sync = 1; |
| 2887 | else | 2904 | else |
| @@ -8010,8 +8027,9 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root, | |||
| 8010 | struct extent_buffer *buf; | 8027 | struct extent_buffer *buf; |
| 8011 | 8028 | ||
| 8012 | buf = btrfs_find_create_tree_block(root, bytenr); | 8029 | buf = btrfs_find_create_tree_block(root, bytenr); |
| 8013 | if (!buf) | 8030 | if (IS_ERR(buf)) |
| 8014 | return ERR_PTR(-ENOMEM); | 8031 | return buf; |
| 8032 | |||
| 8015 | btrfs_set_header_generation(buf, trans->transid); | 8033 | btrfs_set_header_generation(buf, trans->transid); |
| 8016 | btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level); | 8034 | btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level); |
| 8017 | btrfs_tree_lock(buf); | 8035 | btrfs_tree_lock(buf); |
| @@ -8038,7 +8056,7 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root, | |||
| 8038 | set_extent_dirty(&trans->transaction->dirty_pages, buf->start, | 8056 | set_extent_dirty(&trans->transaction->dirty_pages, buf->start, |
| 8039 | buf->start + buf->len - 1, GFP_NOFS); | 8057 | buf->start + buf->len - 1, GFP_NOFS); |
| 8040 | } | 8058 | } |
| 8041 | trans->blocks_used++; | 8059 | trans->dirty = true; |
| 8042 | /* this returns a buffer locked for blocking */ | 8060 | /* this returns a buffer locked for blocking */ |
| 8043 | return buf; | 8061 | return buf; |
| 8044 | } | 8062 | } |
| @@ -8653,8 +8671,9 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans, | |||
| 8653 | next = btrfs_find_tree_block(root->fs_info, bytenr); | 8671 | next = btrfs_find_tree_block(root->fs_info, bytenr); |
| 8654 | if (!next) { | 8672 | if (!next) { |
| 8655 | next = btrfs_find_create_tree_block(root, bytenr); | 8673 | next = btrfs_find_create_tree_block(root, bytenr); |
| 8656 | if (!next) | 8674 | if (IS_ERR(next)) |
| 8657 | return -ENOMEM; | 8675 | return PTR_ERR(next); |
| 8676 | |||
| 8658 | btrfs_set_buffer_lockdep_class(root->root_key.objectid, next, | 8677 | btrfs_set_buffer_lockdep_class(root->root_key.objectid, next, |
| 8659 | level - 1); | 8678 | level - 1); |
| 8660 | reada = 1; | 8679 | reada = 1; |
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 3cd57825c75f..75533adef998 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
| @@ -2025,9 +2025,16 @@ int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical, | |||
| 2025 | bio->bi_iter.bi_size = 0; | 2025 | bio->bi_iter.bi_size = 0; |
| 2026 | map_length = length; | 2026 | map_length = length; |
| 2027 | 2027 | ||
| 2028 | /* | ||
| 2029 | * Avoid races with device replace and make sure our bbio has devices | ||
| 2030 | * associated to its stripes that don't go away while we are doing the | ||
| 2031 | * read repair operation. | ||
| 2032 | */ | ||
| 2033 | btrfs_bio_counter_inc_blocked(fs_info); | ||
| 2028 | ret = btrfs_map_block(fs_info, WRITE, logical, | 2034 | ret = btrfs_map_block(fs_info, WRITE, logical, |
| 2029 | &map_length, &bbio, mirror_num); | 2035 | &map_length, &bbio, mirror_num); |
| 2030 | if (ret) { | 2036 | if (ret) { |
| 2037 | btrfs_bio_counter_dec(fs_info); | ||
| 2031 | bio_put(bio); | 2038 | bio_put(bio); |
| 2032 | return -EIO; | 2039 | return -EIO; |
| 2033 | } | 2040 | } |
| @@ -2037,6 +2044,7 @@ int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical, | |||
| 2037 | dev = bbio->stripes[mirror_num-1].dev; | 2044 | dev = bbio->stripes[mirror_num-1].dev; |
| 2038 | btrfs_put_bbio(bbio); | 2045 | btrfs_put_bbio(bbio); |
| 2039 | if (!dev || !dev->bdev || !dev->writeable) { | 2046 | if (!dev || !dev->bdev || !dev->writeable) { |
| 2047 | btrfs_bio_counter_dec(fs_info); | ||
| 2040 | bio_put(bio); | 2048 | bio_put(bio); |
| 2041 | return -EIO; | 2049 | return -EIO; |
| 2042 | } | 2050 | } |
| @@ -2045,6 +2053,7 @@ int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical, | |||
| 2045 | 2053 | ||
| 2046 | if (btrfsic_submit_bio_wait(WRITE_SYNC, bio)) { | 2054 | if (btrfsic_submit_bio_wait(WRITE_SYNC, bio)) { |
| 2047 | /* try to remap that extent elsewhere? */ | 2055 | /* try to remap that extent elsewhere? */ |
| 2056 | btrfs_bio_counter_dec(fs_info); | ||
| 2048 | bio_put(bio); | 2057 | bio_put(bio); |
| 2049 | btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS); | 2058 | btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS); |
| 2050 | return -EIO; | 2059 | return -EIO; |
| @@ -2054,6 +2063,7 @@ int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical, | |||
| 2054 | "read error corrected: ino %llu off %llu (dev %s sector %llu)", | 2063 | "read error corrected: ino %llu off %llu (dev %s sector %llu)", |
| 2055 | btrfs_ino(inode), start, | 2064 | btrfs_ino(inode), start, |
| 2056 | rcu_str_deref(dev->name), sector); | 2065 | rcu_str_deref(dev->name), sector); |
| 2066 | btrfs_bio_counter_dec(fs_info); | ||
| 2057 | bio_put(bio); | 2067 | bio_put(bio); |
| 2058 | return 0; | 2068 | return 0; |
| 2059 | } | 2069 | } |
| @@ -4718,16 +4728,16 @@ err: | |||
| 4718 | } | 4728 | } |
| 4719 | 4729 | ||
| 4720 | struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info, | 4730 | struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info, |
| 4721 | u64 start) | 4731 | u64 start, u32 nodesize) |
| 4722 | { | 4732 | { |
| 4723 | unsigned long len; | 4733 | unsigned long len; |
| 4724 | 4734 | ||
| 4725 | if (!fs_info) { | 4735 | if (!fs_info) { |
| 4726 | /* | 4736 | /* |
| 4727 | * Called only from tests that don't always have a fs_info | 4737 | * Called only from tests that don't always have a fs_info |
| 4728 | * available, but we know that nodesize is 4096 | 4738 | * available |
| 4729 | */ | 4739 | */ |
| 4730 | len = 4096; | 4740 | len = nodesize; |
| 4731 | } else { | 4741 | } else { |
| 4732 | len = fs_info->tree_root->nodesize; | 4742 | len = fs_info->tree_root->nodesize; |
| 4733 | } | 4743 | } |
| @@ -4823,7 +4833,7 @@ struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info, | |||
| 4823 | 4833 | ||
| 4824 | #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS | 4834 | #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS |
| 4825 | struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info, | 4835 | struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info, |
| 4826 | u64 start) | 4836 | u64 start, u32 nodesize) |
| 4827 | { | 4837 | { |
| 4828 | struct extent_buffer *eb, *exists = NULL; | 4838 | struct extent_buffer *eb, *exists = NULL; |
| 4829 | int ret; | 4839 | int ret; |
| @@ -4831,7 +4841,7 @@ struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info, | |||
| 4831 | eb = find_extent_buffer(fs_info, start); | 4841 | eb = find_extent_buffer(fs_info, start); |
| 4832 | if (eb) | 4842 | if (eb) |
| 4833 | return eb; | 4843 | return eb; |
| 4834 | eb = alloc_dummy_extent_buffer(fs_info, start); | 4844 | eb = alloc_dummy_extent_buffer(fs_info, start, nodesize); |
| 4835 | if (!eb) | 4845 | if (!eb) |
| 4836 | return NULL; | 4846 | return NULL; |
| 4837 | eb->fs_info = fs_info; | 4847 | eb->fs_info = fs_info; |
| @@ -4882,18 +4892,25 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info, | |||
| 4882 | int uptodate = 1; | 4892 | int uptodate = 1; |
| 4883 | int ret; | 4893 | int ret; |
| 4884 | 4894 | ||
| 4895 | if (!IS_ALIGNED(start, fs_info->tree_root->sectorsize)) { | ||
| 4896 | btrfs_err(fs_info, "bad tree block start %llu", start); | ||
| 4897 | return ERR_PTR(-EINVAL); | ||
| 4898 | } | ||
| 4899 | |||
| 4885 | eb = find_extent_buffer(fs_info, start); | 4900 | eb = find_extent_buffer(fs_info, start); |
| 4886 | if (eb) | 4901 | if (eb) |
| 4887 | return eb; | 4902 | return eb; |
| 4888 | 4903 | ||
| 4889 | eb = __alloc_extent_buffer(fs_info, start, len); | 4904 | eb = __alloc_extent_buffer(fs_info, start, len); |
| 4890 | if (!eb) | 4905 | if (!eb) |
| 4891 | return NULL; | 4906 | return ERR_PTR(-ENOMEM); |
| 4892 | 4907 | ||
| 4893 | for (i = 0; i < num_pages; i++, index++) { | 4908 | for (i = 0; i < num_pages; i++, index++) { |
| 4894 | p = find_or_create_page(mapping, index, GFP_NOFS|__GFP_NOFAIL); | 4909 | p = find_or_create_page(mapping, index, GFP_NOFS|__GFP_NOFAIL); |
| 4895 | if (!p) | 4910 | if (!p) { |
| 4911 | exists = ERR_PTR(-ENOMEM); | ||
| 4896 | goto free_eb; | 4912 | goto free_eb; |
| 4913 | } | ||
| 4897 | 4914 | ||
| 4898 | spin_lock(&mapping->private_lock); | 4915 | spin_lock(&mapping->private_lock); |
| 4899 | if (PagePrivate(p)) { | 4916 | if (PagePrivate(p)) { |
| @@ -4938,8 +4955,10 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info, | |||
| 4938 | set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); | 4955 | set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); |
| 4939 | again: | 4956 | again: |
| 4940 | ret = radix_tree_preload(GFP_NOFS); | 4957 | ret = radix_tree_preload(GFP_NOFS); |
| 4941 | if (ret) | 4958 | if (ret) { |
| 4959 | exists = ERR_PTR(ret); | ||
| 4942 | goto free_eb; | 4960 | goto free_eb; |
| 4961 | } | ||
| 4943 | 4962 | ||
| 4944 | spin_lock(&fs_info->buffer_lock); | 4963 | spin_lock(&fs_info->buffer_lock); |
| 4945 | ret = radix_tree_insert(&fs_info->buffer_radix, | 4964 | ret = radix_tree_insert(&fs_info->buffer_radix, |
| @@ -5323,6 +5342,11 @@ int read_extent_buffer_to_user(struct extent_buffer *eb, void __user *dstv, | |||
| 5323 | return ret; | 5342 | return ret; |
| 5324 | } | 5343 | } |
| 5325 | 5344 | ||
| 5345 | /* | ||
| 5346 | * return 0 if the item is found within a page. | ||
| 5347 | * return 1 if the item spans two pages. | ||
| 5348 | * return -EINVAL otherwise. | ||
| 5349 | */ | ||
| 5326 | int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start, | 5350 | int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start, |
| 5327 | unsigned long min_len, char **map, | 5351 | unsigned long min_len, char **map, |
| 5328 | unsigned long *map_start, | 5352 | unsigned long *map_start, |
| @@ -5337,7 +5361,7 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start, | |||
| 5337 | PAGE_SHIFT; | 5361 | PAGE_SHIFT; |
| 5338 | 5362 | ||
| 5339 | if (i != end_i) | 5363 | if (i != end_i) |
| 5340 | return -EINVAL; | 5364 | return 1; |
| 5341 | 5365 | ||
| 5342 | if (i == 0) { | 5366 | if (i == 0) { |
| 5343 | offset = start_offset; | 5367 | offset = start_offset; |
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 1baf19c9b79d..c0c1c4fef6ce 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h | |||
| @@ -348,7 +348,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info, | |||
| 348 | struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info, | 348 | struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info, |
| 349 | u64 start, unsigned long len); | 349 | u64 start, unsigned long len); |
| 350 | struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info, | 350 | struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info, |
| 351 | u64 start); | 351 | u64 start, u32 nodesize); |
| 352 | struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src); | 352 | struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src); |
| 353 | struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info, | 353 | struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info, |
| 354 | u64 start); | 354 | u64 start); |
| @@ -468,5 +468,5 @@ noinline u64 find_lock_delalloc_range(struct inode *inode, | |||
| 468 | u64 *end, u64 max_bytes); | 468 | u64 *end, u64 max_bytes); |
| 469 | #endif | 469 | #endif |
| 470 | struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info, | 470 | struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info, |
| 471 | u64 start); | 471 | u64 start, u32 nodesize); |
| 472 | #endif | 472 | #endif |
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index e0c9bd3fb02d..2234e88cf674 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c | |||
| @@ -1534,30 +1534,30 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file, | |||
| 1534 | reserve_bytes = round_up(write_bytes + sector_offset, | 1534 | reserve_bytes = round_up(write_bytes + sector_offset, |
| 1535 | root->sectorsize); | 1535 | root->sectorsize); |
| 1536 | 1536 | ||
| 1537 | if ((BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW | | ||
| 1538 | BTRFS_INODE_PREALLOC)) && | ||
| 1539 | check_can_nocow(inode, pos, &write_bytes) > 0) { | ||
| 1540 | /* | ||
| 1541 | * For nodata cow case, no need to reserve | ||
| 1542 | * data space. | ||
| 1543 | */ | ||
| 1544 | only_release_metadata = true; | ||
| 1545 | /* | ||
| 1546 | * our prealloc extent may be smaller than | ||
| 1547 | * write_bytes, so scale down. | ||
| 1548 | */ | ||
| 1549 | num_pages = DIV_ROUND_UP(write_bytes + offset, | ||
| 1550 | PAGE_SIZE); | ||
| 1551 | reserve_bytes = round_up(write_bytes + sector_offset, | ||
| 1552 | root->sectorsize); | ||
| 1553 | goto reserve_metadata; | ||
| 1554 | } | ||
| 1555 | |||
| 1556 | ret = btrfs_check_data_free_space(inode, pos, write_bytes); | 1537 | ret = btrfs_check_data_free_space(inode, pos, write_bytes); |
| 1557 | if (ret < 0) | 1538 | if (ret < 0) { |
| 1558 | break; | 1539 | if ((BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW | |
| 1540 | BTRFS_INODE_PREALLOC)) && | ||
| 1541 | check_can_nocow(inode, pos, &write_bytes) > 0) { | ||
| 1542 | /* | ||
| 1543 | * For nodata cow case, no need to reserve | ||
| 1544 | * data space. | ||
| 1545 | */ | ||
| 1546 | only_release_metadata = true; | ||
| 1547 | /* | ||
| 1548 | * our prealloc extent may be smaller than | ||
| 1549 | * write_bytes, so scale down. | ||
| 1550 | */ | ||
| 1551 | num_pages = DIV_ROUND_UP(write_bytes + offset, | ||
| 1552 | PAGE_SIZE); | ||
| 1553 | reserve_bytes = round_up(write_bytes + | ||
| 1554 | sector_offset, | ||
| 1555 | root->sectorsize); | ||
| 1556 | } else { | ||
| 1557 | break; | ||
| 1558 | } | ||
| 1559 | } | ||
| 1559 | 1560 | ||
| 1560 | reserve_metadata: | ||
| 1561 | ret = btrfs_delalloc_reserve_metadata(inode, reserve_bytes); | 1561 | ret = btrfs_delalloc_reserve_metadata(inode, reserve_bytes); |
| 1562 | if (ret) { | 1562 | if (ret) { |
| 1563 | if (!only_release_metadata) | 1563 | if (!only_release_metadata) |
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index c6dc1183f542..69d270f6602c 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c | |||
| @@ -29,7 +29,7 @@ | |||
| 29 | #include "inode-map.h" | 29 | #include "inode-map.h" |
| 30 | #include "volumes.h" | 30 | #include "volumes.h" |
| 31 | 31 | ||
| 32 | #define BITS_PER_BITMAP (PAGE_SIZE * 8) | 32 | #define BITS_PER_BITMAP (PAGE_SIZE * 8UL) |
| 33 | #define MAX_CACHE_BYTES_PER_GIG SZ_32K | 33 | #define MAX_CACHE_BYTES_PER_GIG SZ_32K |
| 34 | 34 | ||
| 35 | struct btrfs_trim_range { | 35 | struct btrfs_trim_range { |
| @@ -1415,11 +1415,11 @@ static inline u64 offset_to_bitmap(struct btrfs_free_space_ctl *ctl, | |||
| 1415 | u64 offset) | 1415 | u64 offset) |
| 1416 | { | 1416 | { |
| 1417 | u64 bitmap_start; | 1417 | u64 bitmap_start; |
| 1418 | u32 bytes_per_bitmap; | 1418 | u64 bytes_per_bitmap; |
| 1419 | 1419 | ||
| 1420 | bytes_per_bitmap = BITS_PER_BITMAP * ctl->unit; | 1420 | bytes_per_bitmap = BITS_PER_BITMAP * ctl->unit; |
| 1421 | bitmap_start = offset - ctl->start; | 1421 | bitmap_start = offset - ctl->start; |
| 1422 | bitmap_start = div_u64(bitmap_start, bytes_per_bitmap); | 1422 | bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap); |
| 1423 | bitmap_start *= bytes_per_bitmap; | 1423 | bitmap_start *= bytes_per_bitmap; |
| 1424 | bitmap_start += ctl->start; | 1424 | bitmap_start += ctl->start; |
| 1425 | 1425 | ||
| @@ -1638,10 +1638,10 @@ static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl) | |||
| 1638 | u64 bitmap_bytes; | 1638 | u64 bitmap_bytes; |
| 1639 | u64 extent_bytes; | 1639 | u64 extent_bytes; |
| 1640 | u64 size = block_group->key.offset; | 1640 | u64 size = block_group->key.offset; |
| 1641 | u32 bytes_per_bg = BITS_PER_BITMAP * ctl->unit; | 1641 | u64 bytes_per_bg = BITS_PER_BITMAP * ctl->unit; |
| 1642 | u32 max_bitmaps = div_u64(size + bytes_per_bg - 1, bytes_per_bg); | 1642 | u64 max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg); |
| 1643 | 1643 | ||
| 1644 | max_bitmaps = max_t(u32, max_bitmaps, 1); | 1644 | max_bitmaps = max_t(u64, max_bitmaps, 1); |
| 1645 | 1645 | ||
| 1646 | ASSERT(ctl->total_bitmaps <= max_bitmaps); | 1646 | ASSERT(ctl->total_bitmaps <= max_bitmaps); |
| 1647 | 1647 | ||
| @@ -1660,7 +1660,7 @@ static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl) | |||
| 1660 | * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as | 1660 | * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as |
| 1661 | * we add more bitmaps. | 1661 | * we add more bitmaps. |
| 1662 | */ | 1662 | */ |
| 1663 | bitmap_bytes = (ctl->total_bitmaps + 1) * PAGE_SIZE; | 1663 | bitmap_bytes = (ctl->total_bitmaps + 1) * ctl->unit; |
| 1664 | 1664 | ||
| 1665 | if (bitmap_bytes >= max_bytes) { | 1665 | if (bitmap_bytes >= max_bytes) { |
| 1666 | ctl->extents_thresh = 0; | 1666 | ctl->extents_thresh = 0; |
| @@ -3662,7 +3662,7 @@ have_info: | |||
| 3662 | if (tmp->offset + tmp->bytes < offset) | 3662 | if (tmp->offset + tmp->bytes < offset) |
| 3663 | break; | 3663 | break; |
| 3664 | if (offset + bytes < tmp->offset) { | 3664 | if (offset + bytes < tmp->offset) { |
| 3665 | n = rb_prev(&info->offset_index); | 3665 | n = rb_prev(&tmp->offset_index); |
| 3666 | continue; | 3666 | continue; |
| 3667 | } | 3667 | } |
| 3668 | info = tmp; | 3668 | info = tmp; |
| @@ -3676,7 +3676,7 @@ have_info: | |||
| 3676 | if (offset + bytes < tmp->offset) | 3676 | if (offset + bytes < tmp->offset) |
| 3677 | break; | 3677 | break; |
| 3678 | if (tmp->offset + tmp->bytes < offset) { | 3678 | if (tmp->offset + tmp->bytes < offset) { |
| 3679 | n = rb_next(&info->offset_index); | 3679 | n = rb_next(&tmp->offset_index); |
| 3680 | continue; | 3680 | continue; |
| 3681 | } | 3681 | } |
| 3682 | info = tmp; | 3682 | info = tmp; |
diff --git a/fs/btrfs/hash.c b/fs/btrfs/hash.c index aae520b2aee5..a97fdc156a03 100644 --- a/fs/btrfs/hash.c +++ b/fs/btrfs/hash.c | |||
| @@ -24,6 +24,11 @@ int __init btrfs_hash_init(void) | |||
| 24 | return PTR_ERR_OR_ZERO(tfm); | 24 | return PTR_ERR_OR_ZERO(tfm); |
| 25 | } | 25 | } |
| 26 | 26 | ||
| 27 | const char* btrfs_crc32c_impl(void) | ||
| 28 | { | ||
| 29 | return crypto_tfm_alg_driver_name(crypto_shash_tfm(tfm)); | ||
| 30 | } | ||
| 31 | |||
| 27 | void btrfs_hash_exit(void) | 32 | void btrfs_hash_exit(void) |
| 28 | { | 33 | { |
| 29 | crypto_free_shash(tfm); | 34 | crypto_free_shash(tfm); |
diff --git a/fs/btrfs/hash.h b/fs/btrfs/hash.h index 118a2316e5d3..c3a2ec554361 100644 --- a/fs/btrfs/hash.h +++ b/fs/btrfs/hash.h | |||
| @@ -22,6 +22,7 @@ | |||
| 22 | int __init btrfs_hash_init(void); | 22 | int __init btrfs_hash_init(void); |
| 23 | 23 | ||
| 24 | void btrfs_hash_exit(void); | 24 | void btrfs_hash_exit(void); |
| 25 | const char* btrfs_crc32c_impl(void); | ||
| 25 | 26 | ||
| 26 | u32 btrfs_crc32c(u32 crc, const void *address, unsigned int length); | 27 | u32 btrfs_crc32c(u32 crc, const void *address, unsigned int length); |
| 27 | 28 | ||
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 270499598ed4..4421954720b8 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
| @@ -3271,7 +3271,16 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode) | |||
| 3271 | /* grab metadata reservation from transaction handle */ | 3271 | /* grab metadata reservation from transaction handle */ |
| 3272 | if (reserve) { | 3272 | if (reserve) { |
| 3273 | ret = btrfs_orphan_reserve_metadata(trans, inode); | 3273 | ret = btrfs_orphan_reserve_metadata(trans, inode); |
| 3274 | BUG_ON(ret); /* -ENOSPC in reservation; Logic error? JDM */ | 3274 | ASSERT(!ret); |
| 3275 | if (ret) { | ||
| 3276 | atomic_dec(&root->orphan_inodes); | ||
| 3277 | clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED, | ||
| 3278 | &BTRFS_I(inode)->runtime_flags); | ||
| 3279 | if (insert) | ||
| 3280 | clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, | ||
| 3281 | &BTRFS_I(inode)->runtime_flags); | ||
| 3282 | return ret; | ||
| 3283 | } | ||
| 3275 | } | 3284 | } |
| 3276 | 3285 | ||
| 3277 | /* insert an orphan item to track this unlinked/truncated file */ | 3286 | /* insert an orphan item to track this unlinked/truncated file */ |
| @@ -4549,6 +4558,7 @@ delete: | |||
| 4549 | BUG_ON(ret); | 4558 | BUG_ON(ret); |
| 4550 | if (btrfs_should_throttle_delayed_refs(trans, root)) | 4559 | if (btrfs_should_throttle_delayed_refs(trans, root)) |
| 4551 | btrfs_async_run_delayed_refs(root, | 4560 | btrfs_async_run_delayed_refs(root, |
| 4561 | trans->transid, | ||
| 4552 | trans->delayed_ref_updates * 2, 0); | 4562 | trans->delayed_ref_updates * 2, 0); |
| 4553 | if (be_nice) { | 4563 | if (be_nice) { |
| 4554 | if (truncate_space_check(trans, root, | 4564 | if (truncate_space_check(trans, root, |
| @@ -5748,6 +5758,7 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx) | |||
| 5748 | int name_len; | 5758 | int name_len; |
| 5749 | int is_curr = 0; /* ctx->pos points to the current index? */ | 5759 | int is_curr = 0; /* ctx->pos points to the current index? */ |
| 5750 | bool emitted; | 5760 | bool emitted; |
| 5761 | bool put = false; | ||
| 5751 | 5762 | ||
| 5752 | /* FIXME, use a real flag for deciding about the key type */ | 5763 | /* FIXME, use a real flag for deciding about the key type */ |
| 5753 | if (root->fs_info->tree_root == root) | 5764 | if (root->fs_info->tree_root == root) |
| @@ -5765,7 +5776,8 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx) | |||
| 5765 | if (key_type == BTRFS_DIR_INDEX_KEY) { | 5776 | if (key_type == BTRFS_DIR_INDEX_KEY) { |
| 5766 | INIT_LIST_HEAD(&ins_list); | 5777 | INIT_LIST_HEAD(&ins_list); |
| 5767 | INIT_LIST_HEAD(&del_list); | 5778 | INIT_LIST_HEAD(&del_list); |
| 5768 | btrfs_get_delayed_items(inode, &ins_list, &del_list); | 5779 | put = btrfs_readdir_get_delayed_items(inode, &ins_list, |
| 5780 | &del_list); | ||
| 5769 | } | 5781 | } |
| 5770 | 5782 | ||
| 5771 | key.type = key_type; | 5783 | key.type = key_type; |
| @@ -5912,8 +5924,8 @@ next: | |||
| 5912 | nopos: | 5924 | nopos: |
| 5913 | ret = 0; | 5925 | ret = 0; |
| 5914 | err: | 5926 | err: |
| 5915 | if (key_type == BTRFS_DIR_INDEX_KEY) | 5927 | if (put) |
| 5916 | btrfs_put_delayed_items(&ins_list, &del_list); | 5928 | btrfs_readdir_put_delayed_items(inode, &ins_list, &del_list); |
| 5917 | btrfs_free_path(path); | 5929 | btrfs_free_path(path); |
| 5918 | return ret; | 5930 | return ret; |
| 5919 | } | 5931 | } |
| @@ -6979,7 +6991,18 @@ insert: | |||
| 6979 | * existing will always be non-NULL, since there must be | 6991 | * existing will always be non-NULL, since there must be |
| 6980 | * extent causing the -EEXIST. | 6992 | * extent causing the -EEXIST. |
| 6981 | */ | 6993 | */ |
| 6982 | if (start >= extent_map_end(existing) || | 6994 | if (existing->start == em->start && |
| 6995 | extent_map_end(existing) == extent_map_end(em) && | ||
| 6996 | em->block_start == existing->block_start) { | ||
| 6997 | /* | ||
| 6998 | * these two extents are the same, it happens | ||
| 6999 | * with inlines especially | ||
| 7000 | */ | ||
| 7001 | free_extent_map(em); | ||
| 7002 | em = existing; | ||
| 7003 | err = 0; | ||
| 7004 | |||
| 7005 | } else if (start >= extent_map_end(existing) || | ||
| 6983 | start <= existing->start) { | 7006 | start <= existing->start) { |
| 6984 | /* | 7007 | /* |
| 6985 | * The existing extent map is the one nearest to | 7008 | * The existing extent map is the one nearest to |
| @@ -10514,7 +10537,7 @@ static const struct inode_operations btrfs_dir_ro_inode_operations = { | |||
| 10514 | static const struct file_operations btrfs_dir_file_operations = { | 10537 | static const struct file_operations btrfs_dir_file_operations = { |
| 10515 | .llseek = generic_file_llseek, | 10538 | .llseek = generic_file_llseek, |
| 10516 | .read = generic_read_dir, | 10539 | .read = generic_read_dir, |
| 10517 | .iterate = btrfs_real_readdir, | 10540 | .iterate_shared = btrfs_real_readdir, |
| 10518 | .unlocked_ioctl = btrfs_ioctl, | 10541 | .unlocked_ioctl = btrfs_ioctl, |
| 10519 | #ifdef CONFIG_COMPAT | 10542 | #ifdef CONFIG_COMPAT |
| 10520 | .compat_ioctl = btrfs_compat_ioctl, | 10543 | .compat_ioctl = btrfs_compat_ioctl, |
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index 559170464d7c..aca8264f4a49 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c | |||
| @@ -718,12 +718,13 @@ int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr, | |||
| 718 | return count; | 718 | return count; |
| 719 | } | 719 | } |
| 720 | 720 | ||
| 721 | void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr, | 721 | int btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr, |
| 722 | const u64 range_start, const u64 range_len) | 722 | const u64 range_start, const u64 range_len) |
| 723 | { | 723 | { |
| 724 | struct btrfs_root *root; | 724 | struct btrfs_root *root; |
| 725 | struct list_head splice; | 725 | struct list_head splice; |
| 726 | int done; | 726 | int done; |
| 727 | int total_done = 0; | ||
| 727 | 728 | ||
| 728 | INIT_LIST_HEAD(&splice); | 729 | INIT_LIST_HEAD(&splice); |
| 729 | 730 | ||
| @@ -742,6 +743,7 @@ void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr, | |||
| 742 | done = btrfs_wait_ordered_extents(root, nr, | 743 | done = btrfs_wait_ordered_extents(root, nr, |
| 743 | range_start, range_len); | 744 | range_start, range_len); |
| 744 | btrfs_put_fs_root(root); | 745 | btrfs_put_fs_root(root); |
| 746 | total_done += done; | ||
| 745 | 747 | ||
| 746 | spin_lock(&fs_info->ordered_root_lock); | 748 | spin_lock(&fs_info->ordered_root_lock); |
| 747 | if (nr != -1) { | 749 | if (nr != -1) { |
| @@ -752,6 +754,8 @@ void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr, | |||
| 752 | list_splice_tail(&splice, &fs_info->ordered_roots); | 754 | list_splice_tail(&splice, &fs_info->ordered_roots); |
| 753 | spin_unlock(&fs_info->ordered_root_lock); | 755 | spin_unlock(&fs_info->ordered_root_lock); |
| 754 | mutex_unlock(&fs_info->ordered_operations_mutex); | 756 | mutex_unlock(&fs_info->ordered_operations_mutex); |
| 757 | |||
| 758 | return total_done; | ||
| 755 | } | 759 | } |
| 756 | 760 | ||
| 757 | /* | 761 | /* |
| @@ -964,6 +968,7 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset, | |||
| 964 | struct rb_node *prev = NULL; | 968 | struct rb_node *prev = NULL; |
| 965 | struct btrfs_ordered_extent *test; | 969 | struct btrfs_ordered_extent *test; |
| 966 | int ret = 1; | 970 | int ret = 1; |
| 971 | u64 orig_offset = offset; | ||
| 967 | 972 | ||
| 968 | spin_lock_irq(&tree->lock); | 973 | spin_lock_irq(&tree->lock); |
| 969 | if (ordered) { | 974 | if (ordered) { |
| @@ -979,7 +984,7 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset, | |||
| 979 | 984 | ||
| 980 | /* truncate file */ | 985 | /* truncate file */ |
| 981 | if (disk_i_size > i_size) { | 986 | if (disk_i_size > i_size) { |
| 982 | BTRFS_I(inode)->disk_i_size = i_size; | 987 | BTRFS_I(inode)->disk_i_size = orig_offset; |
| 983 | ret = 0; | 988 | ret = 0; |
| 984 | goto out; | 989 | goto out; |
| 985 | } | 990 | } |
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h index 2049c9be85ee..451507776ff5 100644 --- a/fs/btrfs/ordered-data.h +++ b/fs/btrfs/ordered-data.h | |||
| @@ -199,7 +199,7 @@ int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, | |||
| 199 | u32 *sum, int len); | 199 | u32 *sum, int len); |
| 200 | int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr, | 200 | int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr, |
| 201 | const u64 range_start, const u64 range_len); | 201 | const u64 range_start, const u64 range_len); |
| 202 | void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr, | 202 | int btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr, |
| 203 | const u64 range_start, const u64 range_len); | 203 | const u64 range_start, const u64 range_len); |
| 204 | void btrfs_get_logged_extents(struct inode *inode, | 204 | void btrfs_get_logged_extents(struct inode *inode, |
| 205 | struct list_head *logged_list, | 205 | struct list_head *logged_list, |
diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c index 298631eaee78..8428db7cd88f 100644 --- a/fs/btrfs/reada.c +++ b/fs/btrfs/reada.c | |||
| @@ -761,12 +761,14 @@ static void __reada_start_machine(struct btrfs_fs_info *fs_info) | |||
| 761 | 761 | ||
| 762 | do { | 762 | do { |
| 763 | enqueued = 0; | 763 | enqueued = 0; |
| 764 | mutex_lock(&fs_devices->device_list_mutex); | ||
| 764 | list_for_each_entry(device, &fs_devices->devices, dev_list) { | 765 | list_for_each_entry(device, &fs_devices->devices, dev_list) { |
| 765 | if (atomic_read(&device->reada_in_flight) < | 766 | if (atomic_read(&device->reada_in_flight) < |
| 766 | MAX_IN_FLIGHT) | 767 | MAX_IN_FLIGHT) |
| 767 | enqueued += reada_start_machine_dev(fs_info, | 768 | enqueued += reada_start_machine_dev(fs_info, |
| 768 | device); | 769 | device); |
| 769 | } | 770 | } |
| 771 | mutex_unlock(&fs_devices->device_list_mutex); | ||
| 770 | total += enqueued; | 772 | total += enqueued; |
| 771 | } while (enqueued && total < 10000); | 773 | } while (enqueued && total < 10000); |
| 772 | 774 | ||
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 46d847f66e4b..70427ef66b04 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c | |||
| @@ -3582,6 +3582,46 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, | |||
| 3582 | */ | 3582 | */ |
| 3583 | scrub_pause_on(fs_info); | 3583 | scrub_pause_on(fs_info); |
| 3584 | ret = btrfs_inc_block_group_ro(root, cache); | 3584 | ret = btrfs_inc_block_group_ro(root, cache); |
| 3585 | if (!ret && is_dev_replace) { | ||
| 3586 | /* | ||
| 3587 | * If we are doing a device replace wait for any tasks | ||
| 3588 | * that started dellaloc right before we set the block | ||
| 3589 | * group to RO mode, as they might have just allocated | ||
| 3590 | * an extent from it or decided they could do a nocow | ||
| 3591 | * write. And if any such tasks did that, wait for their | ||
| 3592 | * ordered extents to complete and then commit the | ||
| 3593 | * current transaction, so that we can later see the new | ||
| 3594 | * extent items in the extent tree - the ordered extents | ||
| 3595 | * create delayed data references (for cow writes) when | ||
| 3596 | * they complete, which will be run and insert the | ||
| 3597 | * corresponding extent items into the extent tree when | ||
| 3598 | * we commit the transaction they used when running | ||
| 3599 | * inode.c:btrfs_finish_ordered_io(). We later use | ||
| 3600 | * the commit root of the extent tree to find extents | ||
| 3601 | * to copy from the srcdev into the tgtdev, and we don't | ||
| 3602 | * want to miss any new extents. | ||
| 3603 | */ | ||
| 3604 | btrfs_wait_block_group_reservations(cache); | ||
| 3605 | btrfs_wait_nocow_writers(cache); | ||
| 3606 | ret = btrfs_wait_ordered_roots(fs_info, -1, | ||
| 3607 | cache->key.objectid, | ||
| 3608 | cache->key.offset); | ||
| 3609 | if (ret > 0) { | ||
| 3610 | struct btrfs_trans_handle *trans; | ||
| 3611 | |||
| 3612 | trans = btrfs_join_transaction(root); | ||
| 3613 | if (IS_ERR(trans)) | ||
| 3614 | ret = PTR_ERR(trans); | ||
| 3615 | else | ||
| 3616 | ret = btrfs_commit_transaction(trans, | ||
| 3617 | root); | ||
| 3618 | if (ret) { | ||
| 3619 | scrub_pause_off(fs_info); | ||
| 3620 | btrfs_put_block_group(cache); | ||
| 3621 | break; | ||
| 3622 | } | ||
| 3623 | } | ||
| 3624 | } | ||
| 3585 | scrub_pause_off(fs_info); | 3625 | scrub_pause_off(fs_info); |
| 3586 | 3626 | ||
| 3587 | if (ret == 0) { | 3627 | if (ret == 0) { |
| @@ -3602,9 +3642,11 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, | |||
| 3602 | break; | 3642 | break; |
| 3603 | } | 3643 | } |
| 3604 | 3644 | ||
| 3645 | btrfs_dev_replace_lock(&fs_info->dev_replace, 1); | ||
| 3605 | dev_replace->cursor_right = found_key.offset + length; | 3646 | dev_replace->cursor_right = found_key.offset + length; |
| 3606 | dev_replace->cursor_left = found_key.offset; | 3647 | dev_replace->cursor_left = found_key.offset; |
| 3607 | dev_replace->item_needs_writeback = 1; | 3648 | dev_replace->item_needs_writeback = 1; |
| 3649 | btrfs_dev_replace_unlock(&fs_info->dev_replace, 1); | ||
| 3608 | ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length, | 3650 | ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length, |
| 3609 | found_key.offset, cache, is_dev_replace); | 3651 | found_key.offset, cache, is_dev_replace); |
| 3610 | 3652 | ||
| @@ -3640,6 +3682,11 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, | |||
| 3640 | 3682 | ||
| 3641 | scrub_pause_off(fs_info); | 3683 | scrub_pause_off(fs_info); |
| 3642 | 3684 | ||
| 3685 | btrfs_dev_replace_lock(&fs_info->dev_replace, 1); | ||
| 3686 | dev_replace->cursor_left = dev_replace->cursor_right; | ||
| 3687 | dev_replace->item_needs_writeback = 1; | ||
| 3688 | btrfs_dev_replace_unlock(&fs_info->dev_replace, 1); | ||
| 3689 | |||
| 3643 | if (ro_set) | 3690 | if (ro_set) |
| 3644 | btrfs_dec_block_group_ro(root, cache); | 3691 | btrfs_dec_block_group_ro(root, cache); |
| 3645 | 3692 | ||
| @@ -3677,9 +3724,6 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, | |||
| 3677 | ret = -ENOMEM; | 3724 | ret = -ENOMEM; |
| 3678 | break; | 3725 | break; |
| 3679 | } | 3726 | } |
| 3680 | |||
| 3681 | dev_replace->cursor_left = dev_replace->cursor_right; | ||
| 3682 | dev_replace->item_needs_writeback = 1; | ||
| 3683 | skip: | 3727 | skip: |
| 3684 | key.offset = found_key.offset + length; | 3728 | key.offset = found_key.offset + length; |
| 3685 | btrfs_release_path(path); | 3729 | btrfs_release_path(path); |
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 4e59a91a11e0..60e7179ed4b7 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c | |||
| @@ -235,7 +235,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans, | |||
| 235 | trans->aborted = errno; | 235 | trans->aborted = errno; |
| 236 | /* Nothing used. The other threads that have joined this | 236 | /* Nothing used. The other threads that have joined this |
| 237 | * transaction may be able to continue. */ | 237 | * transaction may be able to continue. */ |
| 238 | if (!trans->blocks_used && list_empty(&trans->new_bgs)) { | 238 | if (!trans->dirty && list_empty(&trans->new_bgs)) { |
| 239 | const char *errstr; | 239 | const char *errstr; |
| 240 | 240 | ||
| 241 | errstr = btrfs_decode_error(errno); | 241 | errstr = btrfs_decode_error(errno); |
| @@ -1807,6 +1807,8 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data) | |||
| 1807 | } | 1807 | } |
| 1808 | } | 1808 | } |
| 1809 | sb->s_flags &= ~MS_RDONLY; | 1809 | sb->s_flags &= ~MS_RDONLY; |
| 1810 | |||
| 1811 | fs_info->open = 1; | ||
| 1810 | } | 1812 | } |
| 1811 | out: | 1813 | out: |
| 1812 | wake_up_process(fs_info->transaction_kthread); | 1814 | wake_up_process(fs_info->transaction_kthread); |
| @@ -2303,7 +2305,7 @@ static void btrfs_interface_exit(void) | |||
| 2303 | 2305 | ||
| 2304 | static void btrfs_print_mod_info(void) | 2306 | static void btrfs_print_mod_info(void) |
| 2305 | { | 2307 | { |
| 2306 | printk(KERN_INFO "Btrfs loaded" | 2308 | printk(KERN_INFO "Btrfs loaded, crc32c=%s" |
| 2307 | #ifdef CONFIG_BTRFS_DEBUG | 2309 | #ifdef CONFIG_BTRFS_DEBUG |
| 2308 | ", debug=on" | 2310 | ", debug=on" |
| 2309 | #endif | 2311 | #endif |
| @@ -2313,33 +2315,48 @@ static void btrfs_print_mod_info(void) | |||
| 2313 | #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY | 2315 | #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY |
| 2314 | ", integrity-checker=on" | 2316 | ", integrity-checker=on" |
| 2315 | #endif | 2317 | #endif |
| 2316 | "\n"); | 2318 | "\n", |
| 2319 | btrfs_crc32c_impl()); | ||
| 2317 | } | 2320 | } |
| 2318 | 2321 | ||
| 2319 | static int btrfs_run_sanity_tests(void) | 2322 | static int btrfs_run_sanity_tests(void) |
| 2320 | { | 2323 | { |
| 2321 | int ret; | 2324 | int ret, i; |
| 2322 | 2325 | u32 sectorsize, nodesize; | |
| 2326 | u32 test_sectorsize[] = { | ||
| 2327 | PAGE_SIZE, | ||
| 2328 | }; | ||
| 2323 | ret = btrfs_init_test_fs(); | 2329 | ret = btrfs_init_test_fs(); |
| 2324 | if (ret) | 2330 | if (ret) |
| 2325 | return ret; | 2331 | return ret; |
| 2326 | 2332 | for (i = 0; i < ARRAY_SIZE(test_sectorsize); i++) { | |
| 2327 | ret = btrfs_test_free_space_cache(); | 2333 | sectorsize = test_sectorsize[i]; |
| 2328 | if (ret) | 2334 | for (nodesize = sectorsize; |
| 2329 | goto out; | 2335 | nodesize <= BTRFS_MAX_METADATA_BLOCKSIZE; |
| 2330 | ret = btrfs_test_extent_buffer_operations(); | 2336 | nodesize <<= 1) { |
| 2331 | if (ret) | 2337 | pr_info("BTRFS: selftest: sectorsize: %u nodesize: %u\n", |
| 2332 | goto out; | 2338 | sectorsize, nodesize); |
| 2333 | ret = btrfs_test_extent_io(); | 2339 | ret = btrfs_test_free_space_cache(sectorsize, nodesize); |
| 2334 | if (ret) | 2340 | if (ret) |
| 2335 | goto out; | 2341 | goto out; |
| 2336 | ret = btrfs_test_inodes(); | 2342 | ret = btrfs_test_extent_buffer_operations(sectorsize, |
| 2337 | if (ret) | 2343 | nodesize); |
| 2338 | goto out; | 2344 | if (ret) |
| 2339 | ret = btrfs_test_qgroups(); | 2345 | goto out; |
| 2340 | if (ret) | 2346 | ret = btrfs_test_extent_io(sectorsize, nodesize); |
| 2341 | goto out; | 2347 | if (ret) |
| 2342 | ret = btrfs_test_free_space_tree(); | 2348 | goto out; |
| 2349 | ret = btrfs_test_inodes(sectorsize, nodesize); | ||
| 2350 | if (ret) | ||
| 2351 | goto out; | ||
| 2352 | ret = btrfs_test_qgroups(sectorsize, nodesize); | ||
| 2353 | if (ret) | ||
| 2354 | goto out; | ||
| 2355 | ret = btrfs_test_free_space_tree(sectorsize, nodesize); | ||
| 2356 | if (ret) | ||
| 2357 | goto out; | ||
| 2358 | } | ||
| 2359 | } | ||
| 2343 | out: | 2360 | out: |
| 2344 | btrfs_destroy_test_fs(); | 2361 | btrfs_destroy_test_fs(); |
| 2345 | return ret; | 2362 | return ret; |
diff --git a/fs/btrfs/tests/btrfs-tests.c b/fs/btrfs/tests/btrfs-tests.c index f54bf450bad3..02223f3f78f4 100644 --- a/fs/btrfs/tests/btrfs-tests.c +++ b/fs/btrfs/tests/btrfs-tests.c | |||
| @@ -68,7 +68,7 @@ int btrfs_init_test_fs(void) | |||
| 68 | if (IS_ERR(test_mnt)) { | 68 | if (IS_ERR(test_mnt)) { |
| 69 | printk(KERN_ERR "btrfs: cannot mount test file system\n"); | 69 | printk(KERN_ERR "btrfs: cannot mount test file system\n"); |
| 70 | unregister_filesystem(&test_type); | 70 | unregister_filesystem(&test_type); |
| 71 | return ret; | 71 | return PTR_ERR(test_mnt); |
| 72 | } | 72 | } |
| 73 | return 0; | 73 | return 0; |
| 74 | } | 74 | } |
| @@ -175,7 +175,7 @@ void btrfs_free_dummy_root(struct btrfs_root *root) | |||
| 175 | } | 175 | } |
| 176 | 176 | ||
| 177 | struct btrfs_block_group_cache * | 177 | struct btrfs_block_group_cache * |
| 178 | btrfs_alloc_dummy_block_group(unsigned long length) | 178 | btrfs_alloc_dummy_block_group(unsigned long length, u32 sectorsize) |
| 179 | { | 179 | { |
| 180 | struct btrfs_block_group_cache *cache; | 180 | struct btrfs_block_group_cache *cache; |
| 181 | 181 | ||
| @@ -192,8 +192,8 @@ btrfs_alloc_dummy_block_group(unsigned long length) | |||
| 192 | cache->key.objectid = 0; | 192 | cache->key.objectid = 0; |
| 193 | cache->key.offset = length; | 193 | cache->key.offset = length; |
| 194 | cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; | 194 | cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; |
| 195 | cache->sectorsize = 4096; | 195 | cache->sectorsize = sectorsize; |
| 196 | cache->full_stripe_len = 4096; | 196 | cache->full_stripe_len = sectorsize; |
| 197 | 197 | ||
| 198 | INIT_LIST_HEAD(&cache->list); | 198 | INIT_LIST_HEAD(&cache->list); |
| 199 | INIT_LIST_HEAD(&cache->cluster_list); | 199 | INIT_LIST_HEAD(&cache->cluster_list); |
diff --git a/fs/btrfs/tests/btrfs-tests.h b/fs/btrfs/tests/btrfs-tests.h index 054b8c73c951..66fb6b701eb7 100644 --- a/fs/btrfs/tests/btrfs-tests.h +++ b/fs/btrfs/tests/btrfs-tests.h | |||
| @@ -26,27 +26,28 @@ | |||
| 26 | struct btrfs_root; | 26 | struct btrfs_root; |
| 27 | struct btrfs_trans_handle; | 27 | struct btrfs_trans_handle; |
| 28 | 28 | ||
| 29 | int btrfs_test_free_space_cache(void); | 29 | int btrfs_test_free_space_cache(u32 sectorsize, u32 nodesize); |
| 30 | int btrfs_test_extent_buffer_operations(void); | 30 | int btrfs_test_extent_buffer_operations(u32 sectorsize, u32 nodesize); |
| 31 | int btrfs_test_extent_io(void); | 31 | int btrfs_test_extent_io(u32 sectorsize, u32 nodesize); |
| 32 | int btrfs_test_inodes(void); | 32 | int btrfs_test_inodes(u32 sectorsize, u32 nodesize); |
| 33 | int btrfs_test_qgroups(void); | 33 | int btrfs_test_qgroups(u32 sectorsize, u32 nodesize); |
| 34 | int btrfs_test_free_space_tree(void); | 34 | int btrfs_test_free_space_tree(u32 sectorsize, u32 nodesize); |
| 35 | int btrfs_init_test_fs(void); | 35 | int btrfs_init_test_fs(void); |
| 36 | void btrfs_destroy_test_fs(void); | 36 | void btrfs_destroy_test_fs(void); |
| 37 | struct inode *btrfs_new_test_inode(void); | 37 | struct inode *btrfs_new_test_inode(void); |
| 38 | struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(void); | 38 | struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(void); |
| 39 | void btrfs_free_dummy_root(struct btrfs_root *root); | 39 | void btrfs_free_dummy_root(struct btrfs_root *root); |
| 40 | struct btrfs_block_group_cache * | 40 | struct btrfs_block_group_cache * |
| 41 | btrfs_alloc_dummy_block_group(unsigned long length); | 41 | btrfs_alloc_dummy_block_group(unsigned long length, u32 sectorsize); |
| 42 | void btrfs_free_dummy_block_group(struct btrfs_block_group_cache *cache); | 42 | void btrfs_free_dummy_block_group(struct btrfs_block_group_cache *cache); |
| 43 | void btrfs_init_dummy_trans(struct btrfs_trans_handle *trans); | 43 | void btrfs_init_dummy_trans(struct btrfs_trans_handle *trans); |
| 44 | #else | 44 | #else |
| 45 | static inline int btrfs_test_free_space_cache(void) | 45 | static inline int btrfs_test_free_space_cache(u32 sectorsize, u32 nodesize) |
| 46 | { | 46 | { |
| 47 | return 0; | 47 | return 0; |
| 48 | } | 48 | } |
| 49 | static inline int btrfs_test_extent_buffer_operations(void) | 49 | static inline int btrfs_test_extent_buffer_operations(u32 sectorsize, |
| 50 | u32 nodesize) | ||
| 50 | { | 51 | { |
| 51 | return 0; | 52 | return 0; |
| 52 | } | 53 | } |
| @@ -57,19 +58,19 @@ static inline int btrfs_init_test_fs(void) | |||
| 57 | static inline void btrfs_destroy_test_fs(void) | 58 | static inline void btrfs_destroy_test_fs(void) |
| 58 | { | 59 | { |
| 59 | } | 60 | } |
| 60 | static inline int btrfs_test_extent_io(void) | 61 | static inline int btrfs_test_extent_io(u32 sectorsize, u32 nodesize) |
| 61 | { | 62 | { |
| 62 | return 0; | 63 | return 0; |
| 63 | } | 64 | } |
| 64 | static inline int btrfs_test_inodes(void) | 65 | static inline int btrfs_test_inodes(u32 sectorsize, u32 nodesize) |
| 65 | { | 66 | { |
| 66 | return 0; | 67 | return 0; |
| 67 | } | 68 | } |
| 68 | static inline int btrfs_test_qgroups(void) | 69 | static inline int btrfs_test_qgroups(u32 sectorsize, u32 nodesize) |
| 69 | { | 70 | { |
| 70 | return 0; | 71 | return 0; |
| 71 | } | 72 | } |
| 72 | static inline int btrfs_test_free_space_tree(void) | 73 | static inline int btrfs_test_free_space_tree(u32 sectorsize, u32 nodesize) |
| 73 | { | 74 | { |
| 74 | return 0; | 75 | return 0; |
| 75 | } | 76 | } |
diff --git a/fs/btrfs/tests/extent-buffer-tests.c b/fs/btrfs/tests/extent-buffer-tests.c index f51963a8f929..4f8cbd1ec5ee 100644 --- a/fs/btrfs/tests/extent-buffer-tests.c +++ b/fs/btrfs/tests/extent-buffer-tests.c | |||
| @@ -22,7 +22,7 @@ | |||
| 22 | #include "../extent_io.h" | 22 | #include "../extent_io.h" |
| 23 | #include "../disk-io.h" | 23 | #include "../disk-io.h" |
| 24 | 24 | ||
| 25 | static int test_btrfs_split_item(void) | 25 | static int test_btrfs_split_item(u32 sectorsize, u32 nodesize) |
| 26 | { | 26 | { |
| 27 | struct btrfs_path *path; | 27 | struct btrfs_path *path; |
| 28 | struct btrfs_root *root; | 28 | struct btrfs_root *root; |
| @@ -40,7 +40,7 @@ static int test_btrfs_split_item(void) | |||
| 40 | 40 | ||
| 41 | test_msg("Running btrfs_split_item tests\n"); | 41 | test_msg("Running btrfs_split_item tests\n"); |
| 42 | 42 | ||
| 43 | root = btrfs_alloc_dummy_root(); | 43 | root = btrfs_alloc_dummy_root(sectorsize, nodesize); |
| 44 | if (IS_ERR(root)) { | 44 | if (IS_ERR(root)) { |
| 45 | test_msg("Could not allocate root\n"); | 45 | test_msg("Could not allocate root\n"); |
| 46 | return PTR_ERR(root); | 46 | return PTR_ERR(root); |
| @@ -53,7 +53,8 @@ static int test_btrfs_split_item(void) | |||
| 53 | return -ENOMEM; | 53 | return -ENOMEM; |
| 54 | } | 54 | } |
| 55 | 55 | ||
| 56 | path->nodes[0] = eb = alloc_dummy_extent_buffer(NULL, 4096); | 56 | path->nodes[0] = eb = alloc_dummy_extent_buffer(NULL, nodesize, |
| 57 | nodesize); | ||
| 57 | if (!eb) { | 58 | if (!eb) { |
| 58 | test_msg("Could not allocate dummy buffer\n"); | 59 | test_msg("Could not allocate dummy buffer\n"); |
| 59 | ret = -ENOMEM; | 60 | ret = -ENOMEM; |
| @@ -222,8 +223,8 @@ out: | |||
| 222 | return ret; | 223 | return ret; |
| 223 | } | 224 | } |
| 224 | 225 | ||
| 225 | int btrfs_test_extent_buffer_operations(void) | 226 | int btrfs_test_extent_buffer_operations(u32 sectorsize, u32 nodesize) |
| 226 | { | 227 | { |
| 227 | test_msg("Running extent buffer operation tests"); | 228 | test_msg("Running extent buffer operation tests\n"); |
| 228 | return test_btrfs_split_item(); | 229 | return test_btrfs_split_item(sectorsize, nodesize); |
| 229 | } | 230 | } |
diff --git a/fs/btrfs/tests/extent-io-tests.c b/fs/btrfs/tests/extent-io-tests.c index 55724607f79b..d19ab0317283 100644 --- a/fs/btrfs/tests/extent-io-tests.c +++ b/fs/btrfs/tests/extent-io-tests.c | |||
| @@ -21,6 +21,7 @@ | |||
| 21 | #include <linux/slab.h> | 21 | #include <linux/slab.h> |
| 22 | #include <linux/sizes.h> | 22 | #include <linux/sizes.h> |
| 23 | #include "btrfs-tests.h" | 23 | #include "btrfs-tests.h" |
| 24 | #include "../ctree.h" | ||
| 24 | #include "../extent_io.h" | 25 | #include "../extent_io.h" |
| 25 | 26 | ||
| 26 | #define PROCESS_UNLOCK (1 << 0) | 27 | #define PROCESS_UNLOCK (1 << 0) |
| @@ -65,7 +66,7 @@ static noinline int process_page_range(struct inode *inode, u64 start, u64 end, | |||
| 65 | return count; | 66 | return count; |
| 66 | } | 67 | } |
| 67 | 68 | ||
| 68 | static int test_find_delalloc(void) | 69 | static int test_find_delalloc(u32 sectorsize) |
| 69 | { | 70 | { |
| 70 | struct inode *inode; | 71 | struct inode *inode; |
| 71 | struct extent_io_tree tmp; | 72 | struct extent_io_tree tmp; |
| @@ -113,7 +114,7 @@ static int test_find_delalloc(void) | |||
| 113 | * |--- delalloc ---| | 114 | * |--- delalloc ---| |
| 114 | * |--- search ---| | 115 | * |--- search ---| |
| 115 | */ | 116 | */ |
| 116 | set_extent_delalloc(&tmp, 0, 4095, NULL); | 117 | set_extent_delalloc(&tmp, 0, sectorsize - 1, NULL); |
| 117 | start = 0; | 118 | start = 0; |
| 118 | end = 0; | 119 | end = 0; |
| 119 | found = find_lock_delalloc_range(inode, &tmp, locked_page, &start, | 120 | found = find_lock_delalloc_range(inode, &tmp, locked_page, &start, |
| @@ -122,9 +123,9 @@ static int test_find_delalloc(void) | |||
| 122 | test_msg("Should have found at least one delalloc\n"); | 123 | test_msg("Should have found at least one delalloc\n"); |
| 123 | goto out_bits; | 124 | goto out_bits; |
| 124 | } | 125 | } |
| 125 | if (start != 0 || end != 4095) { | 126 | if (start != 0 || end != (sectorsize - 1)) { |
| 126 | test_msg("Expected start 0 end 4095, got start %Lu end %Lu\n", | 127 | test_msg("Expected start 0 end %u, got start %llu end %llu\n", |
| 127 | start, end); | 128 | sectorsize - 1, start, end); |
| 128 | goto out_bits; | 129 | goto out_bits; |
| 129 | } | 130 | } |
| 130 | unlock_extent(&tmp, start, end); | 131 | unlock_extent(&tmp, start, end); |
| @@ -144,7 +145,7 @@ static int test_find_delalloc(void) | |||
| 144 | test_msg("Couldn't find the locked page\n"); | 145 | test_msg("Couldn't find the locked page\n"); |
| 145 | goto out_bits; | 146 | goto out_bits; |
| 146 | } | 147 | } |
| 147 | set_extent_delalloc(&tmp, 4096, max_bytes - 1, NULL); | 148 | set_extent_delalloc(&tmp, sectorsize, max_bytes - 1, NULL); |
| 148 | start = test_start; | 149 | start = test_start; |
| 149 | end = 0; | 150 | end = 0; |
| 150 | found = find_lock_delalloc_range(inode, &tmp, locked_page, &start, | 151 | found = find_lock_delalloc_range(inode, &tmp, locked_page, &start, |
| @@ -172,7 +173,7 @@ static int test_find_delalloc(void) | |||
| 172 | * |--- delalloc ---| | 173 | * |--- delalloc ---| |
| 173 | * |--- search ---| | 174 | * |--- search ---| |
| 174 | */ | 175 | */ |
| 175 | test_start = max_bytes + 4096; | 176 | test_start = max_bytes + sectorsize; |
| 176 | locked_page = find_lock_page(inode->i_mapping, test_start >> | 177 | locked_page = find_lock_page(inode->i_mapping, test_start >> |
| 177 | PAGE_SHIFT); | 178 | PAGE_SHIFT); |
| 178 | if (!locked_page) { | 179 | if (!locked_page) { |
| @@ -272,6 +273,16 @@ out: | |||
| 272 | return ret; | 273 | return ret; |
| 273 | } | 274 | } |
| 274 | 275 | ||
| 276 | /** | ||
| 277 | * test_bit_in_byte - Determine whether a bit is set in a byte | ||
| 278 | * @nr: bit number to test | ||
| 279 | * @addr: Address to start counting from | ||
| 280 | */ | ||
| 281 | static inline int test_bit_in_byte(int nr, const u8 *addr) | ||
| 282 | { | ||
| 283 | return 1UL & (addr[nr / BITS_PER_BYTE] >> (nr & (BITS_PER_BYTE - 1))); | ||
| 284 | } | ||
| 285 | |||
| 275 | static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb, | 286 | static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb, |
| 276 | unsigned long len) | 287 | unsigned long len) |
| 277 | { | 288 | { |
| @@ -298,25 +309,29 @@ static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb, | |||
| 298 | return -EINVAL; | 309 | return -EINVAL; |
| 299 | } | 310 | } |
| 300 | 311 | ||
| 301 | bitmap_set(bitmap, (PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE, | 312 | /* Straddling pages test */ |
| 302 | sizeof(long) * BITS_PER_BYTE); | 313 | if (len > PAGE_SIZE) { |
| 303 | extent_buffer_bitmap_set(eb, PAGE_SIZE - sizeof(long) / 2, 0, | 314 | bitmap_set(bitmap, |
| 304 | sizeof(long) * BITS_PER_BYTE); | 315 | (PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE, |
| 305 | if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) { | 316 | sizeof(long) * BITS_PER_BYTE); |
| 306 | test_msg("Setting straddling pages failed\n"); | 317 | extent_buffer_bitmap_set(eb, PAGE_SIZE - sizeof(long) / 2, 0, |
| 307 | return -EINVAL; | 318 | sizeof(long) * BITS_PER_BYTE); |
| 308 | } | 319 | if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) { |
| 320 | test_msg("Setting straddling pages failed\n"); | ||
| 321 | return -EINVAL; | ||
| 322 | } | ||
| 309 | 323 | ||
| 310 | bitmap_set(bitmap, 0, len * BITS_PER_BYTE); | 324 | bitmap_set(bitmap, 0, len * BITS_PER_BYTE); |
| 311 | bitmap_clear(bitmap, | 325 | bitmap_clear(bitmap, |
| 312 | (PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE, | 326 | (PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE, |
| 313 | sizeof(long) * BITS_PER_BYTE); | 327 | sizeof(long) * BITS_PER_BYTE); |
| 314 | extent_buffer_bitmap_set(eb, 0, 0, len * BITS_PER_BYTE); | 328 | extent_buffer_bitmap_set(eb, 0, 0, len * BITS_PER_BYTE); |
| 315 | extent_buffer_bitmap_clear(eb, PAGE_SIZE - sizeof(long) / 2, 0, | 329 | extent_buffer_bitmap_clear(eb, PAGE_SIZE - sizeof(long) / 2, 0, |
| 316 | sizeof(long) * BITS_PER_BYTE); | 330 | sizeof(long) * BITS_PER_BYTE); |
| 317 | if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) { | 331 | if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) { |
| 318 | test_msg("Clearing straddling pages failed\n"); | 332 | test_msg("Clearing straddling pages failed\n"); |
| 319 | return -EINVAL; | 333 | return -EINVAL; |
| 334 | } | ||
| 320 | } | 335 | } |
| 321 | 336 | ||
| 322 | /* | 337 | /* |
| @@ -333,7 +348,7 @@ static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb, | |||
| 333 | for (i = 0; i < len * BITS_PER_BYTE; i++) { | 348 | for (i = 0; i < len * BITS_PER_BYTE; i++) { |
| 334 | int bit, bit1; | 349 | int bit, bit1; |
| 335 | 350 | ||
| 336 | bit = !!test_bit(i, bitmap); | 351 | bit = !!test_bit_in_byte(i, (u8 *)bitmap); |
| 337 | bit1 = !!extent_buffer_test_bit(eb, 0, i); | 352 | bit1 = !!extent_buffer_test_bit(eb, 0, i); |
| 338 | if (bit1 != bit) { | 353 | if (bit1 != bit) { |
| 339 | test_msg("Testing bit pattern failed\n"); | 354 | test_msg("Testing bit pattern failed\n"); |
| @@ -351,15 +366,22 @@ static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb, | |||
| 351 | return 0; | 366 | return 0; |
| 352 | } | 367 | } |
| 353 | 368 | ||
| 354 | static int test_eb_bitmaps(void) | 369 | static int test_eb_bitmaps(u32 sectorsize, u32 nodesize) |
| 355 | { | 370 | { |
| 356 | unsigned long len = PAGE_SIZE * 4; | 371 | unsigned long len; |
| 357 | unsigned long *bitmap; | 372 | unsigned long *bitmap; |
| 358 | struct extent_buffer *eb; | 373 | struct extent_buffer *eb; |
| 359 | int ret; | 374 | int ret; |
| 360 | 375 | ||
| 361 | test_msg("Running extent buffer bitmap tests\n"); | 376 | test_msg("Running extent buffer bitmap tests\n"); |
| 362 | 377 | ||
| 378 | /* | ||
| 379 | * In ppc64, sectorsize can be 64K, thus 4 * 64K will be larger than | ||
| 380 | * BTRFS_MAX_METADATA_BLOCKSIZE. | ||
| 381 | */ | ||
| 382 | len = (sectorsize < BTRFS_MAX_METADATA_BLOCKSIZE) | ||
| 383 | ? sectorsize * 4 : sectorsize; | ||
| 384 | |||
| 363 | bitmap = kmalloc(len, GFP_KERNEL); | 385 | bitmap = kmalloc(len, GFP_KERNEL); |
| 364 | if (!bitmap) { | 386 | if (!bitmap) { |
| 365 | test_msg("Couldn't allocate test bitmap\n"); | 387 | test_msg("Couldn't allocate test bitmap\n"); |
| @@ -379,7 +401,7 @@ static int test_eb_bitmaps(void) | |||
| 379 | 401 | ||
| 380 | /* Do it over again with an extent buffer which isn't page-aligned. */ | 402 | /* Do it over again with an extent buffer which isn't page-aligned. */ |
| 381 | free_extent_buffer(eb); | 403 | free_extent_buffer(eb); |
| 382 | eb = __alloc_dummy_extent_buffer(NULL, PAGE_SIZE / 2, len); | 404 | eb = __alloc_dummy_extent_buffer(NULL, nodesize / 2, len); |
| 383 | if (!eb) { | 405 | if (!eb) { |
| 384 | test_msg("Couldn't allocate test extent buffer\n"); | 406 | test_msg("Couldn't allocate test extent buffer\n"); |
| 385 | kfree(bitmap); | 407 | kfree(bitmap); |
| @@ -393,17 +415,17 @@ out: | |||
| 393 | return ret; | 415 | return ret; |
| 394 | } | 416 | } |
| 395 | 417 | ||
| 396 | int btrfs_test_extent_io(void) | 418 | int btrfs_test_extent_io(u32 sectorsize, u32 nodesize) |
| 397 | { | 419 | { |
| 398 | int ret; | 420 | int ret; |
| 399 | 421 | ||
| 400 | test_msg("Running extent I/O tests\n"); | 422 | test_msg("Running extent I/O tests\n"); |
| 401 | 423 | ||
| 402 | ret = test_find_delalloc(); | 424 | ret = test_find_delalloc(sectorsize); |
| 403 | if (ret) | 425 | if (ret) |
| 404 | goto out; | 426 | goto out; |
| 405 | 427 | ||
| 406 | ret = test_eb_bitmaps(); | 428 | ret = test_eb_bitmaps(sectorsize, nodesize); |
| 407 | out: | 429 | out: |
| 408 | test_msg("Extent I/O tests finished\n"); | 430 | test_msg("Extent I/O tests finished\n"); |
| 409 | return ret; | 431 | return ret; |
diff --git a/fs/btrfs/tests/free-space-tests.c b/fs/btrfs/tests/free-space-tests.c index 0eeb8f3d6b67..3956bb2ff84c 100644 --- a/fs/btrfs/tests/free-space-tests.c +++ b/fs/btrfs/tests/free-space-tests.c | |||
| @@ -22,7 +22,7 @@ | |||
| 22 | #include "../disk-io.h" | 22 | #include "../disk-io.h" |
| 23 | #include "../free-space-cache.h" | 23 | #include "../free-space-cache.h" |
| 24 | 24 | ||
| 25 | #define BITS_PER_BITMAP (PAGE_SIZE * 8) | 25 | #define BITS_PER_BITMAP (PAGE_SIZE * 8UL) |
| 26 | 26 | ||
| 27 | /* | 27 | /* |
| 28 | * This test just does basic sanity checking, making sure we can add an extent | 28 | * This test just does basic sanity checking, making sure we can add an extent |
| @@ -99,7 +99,8 @@ static int test_extents(struct btrfs_block_group_cache *cache) | |||
| 99 | return 0; | 99 | return 0; |
| 100 | } | 100 | } |
| 101 | 101 | ||
| 102 | static int test_bitmaps(struct btrfs_block_group_cache *cache) | 102 | static int test_bitmaps(struct btrfs_block_group_cache *cache, |
| 103 | u32 sectorsize) | ||
| 103 | { | 104 | { |
| 104 | u64 next_bitmap_offset; | 105 | u64 next_bitmap_offset; |
| 105 | int ret; | 106 | int ret; |
| @@ -139,7 +140,7 @@ static int test_bitmaps(struct btrfs_block_group_cache *cache) | |||
| 139 | * The first bitmap we have starts at offset 0 so the next one is just | 140 | * The first bitmap we have starts at offset 0 so the next one is just |
| 140 | * at the end of the first bitmap. | 141 | * at the end of the first bitmap. |
| 141 | */ | 142 | */ |
| 142 | next_bitmap_offset = (u64)(BITS_PER_BITMAP * 4096); | 143 | next_bitmap_offset = (u64)(BITS_PER_BITMAP * sectorsize); |
| 143 | 144 | ||
| 144 | /* Test a bit straddling two bitmaps */ | 145 | /* Test a bit straddling two bitmaps */ |
| 145 | ret = test_add_free_space_entry(cache, next_bitmap_offset - SZ_2M, | 146 | ret = test_add_free_space_entry(cache, next_bitmap_offset - SZ_2M, |
| @@ -167,9 +168,10 @@ static int test_bitmaps(struct btrfs_block_group_cache *cache) | |||
| 167 | } | 168 | } |
| 168 | 169 | ||
| 169 | /* This is the high grade jackassery */ | 170 | /* This is the high grade jackassery */ |
| 170 | static int test_bitmaps_and_extents(struct btrfs_block_group_cache *cache) | 171 | static int test_bitmaps_and_extents(struct btrfs_block_group_cache *cache, |
| 172 | u32 sectorsize) | ||
| 171 | { | 173 | { |
| 172 | u64 bitmap_offset = (u64)(BITS_PER_BITMAP * 4096); | 174 | u64 bitmap_offset = (u64)(BITS_PER_BITMAP * sectorsize); |
| 173 | int ret; | 175 | int ret; |
| 174 | 176 | ||
| 175 | test_msg("Running bitmap and extent tests\n"); | 177 | test_msg("Running bitmap and extent tests\n"); |
| @@ -401,7 +403,8 @@ static int check_cache_empty(struct btrfs_block_group_cache *cache) | |||
| 401 | * requests. | 403 | * requests. |
| 402 | */ | 404 | */ |
| 403 | static int | 405 | static int |
| 404 | test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache) | 406 | test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache, |
| 407 | u32 sectorsize) | ||
| 405 | { | 408 | { |
| 406 | int ret; | 409 | int ret; |
| 407 | u64 offset; | 410 | u64 offset; |
| @@ -539,7 +542,7 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache) | |||
| 539 | * The goal is to test that the bitmap entry space stealing doesn't | 542 | * The goal is to test that the bitmap entry space stealing doesn't |
| 540 | * steal this space region. | 543 | * steal this space region. |
| 541 | */ | 544 | */ |
| 542 | ret = btrfs_add_free_space(cache, SZ_128M + SZ_16M, 4096); | 545 | ret = btrfs_add_free_space(cache, SZ_128M + SZ_16M, sectorsize); |
| 543 | if (ret) { | 546 | if (ret) { |
| 544 | test_msg("Error adding free space: %d\n", ret); | 547 | test_msg("Error adding free space: %d\n", ret); |
| 545 | return ret; | 548 | return ret; |
| @@ -597,8 +600,8 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache) | |||
| 597 | return -ENOENT; | 600 | return -ENOENT; |
| 598 | } | 601 | } |
| 599 | 602 | ||
| 600 | if (cache->free_space_ctl->free_space != (SZ_1M + 4096)) { | 603 | if (cache->free_space_ctl->free_space != (SZ_1M + sectorsize)) { |
| 601 | test_msg("Cache free space is not 1Mb + 4Kb\n"); | 604 | test_msg("Cache free space is not 1Mb + %u\n", sectorsize); |
| 602 | return -EINVAL; | 605 | return -EINVAL; |
| 603 | } | 606 | } |
| 604 | 607 | ||
| @@ -611,22 +614,25 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache) | |||
| 611 | return -EINVAL; | 614 | return -EINVAL; |
| 612 | } | 615 | } |
| 613 | 616 | ||
| 614 | /* All that remains is a 4Kb free space region in a bitmap. Confirm. */ | 617 | /* |
| 618 | * All that remains is a sectorsize free space region in a bitmap. | ||
| 619 | * Confirm. | ||
| 620 | */ | ||
| 615 | ret = check_num_extents_and_bitmaps(cache, 1, 1); | 621 | ret = check_num_extents_and_bitmaps(cache, 1, 1); |
| 616 | if (ret) | 622 | if (ret) |
| 617 | return ret; | 623 | return ret; |
| 618 | 624 | ||
| 619 | if (cache->free_space_ctl->free_space != 4096) { | 625 | if (cache->free_space_ctl->free_space != sectorsize) { |
| 620 | test_msg("Cache free space is not 4Kb\n"); | 626 | test_msg("Cache free space is not %u\n", sectorsize); |
| 621 | return -EINVAL; | 627 | return -EINVAL; |
| 622 | } | 628 | } |
| 623 | 629 | ||
| 624 | offset = btrfs_find_space_for_alloc(cache, | 630 | offset = btrfs_find_space_for_alloc(cache, |
| 625 | 0, 4096, 0, | 631 | 0, sectorsize, 0, |
| 626 | &max_extent_size); | 632 | &max_extent_size); |
| 627 | if (offset != (SZ_128M + SZ_16M)) { | 633 | if (offset != (SZ_128M + SZ_16M)) { |
| 628 | test_msg("Failed to allocate 4Kb from space cache, returned offset is: %llu\n", | 634 | test_msg("Failed to allocate %u, returned offset : %llu\n", |
| 629 | offset); | 635 | sectorsize, offset); |
| 630 | return -EINVAL; | 636 | return -EINVAL; |
| 631 | } | 637 | } |
| 632 | 638 | ||
| @@ -733,7 +739,7 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache) | |||
| 733 | * The goal is to test that the bitmap entry space stealing doesn't | 739 | * The goal is to test that the bitmap entry space stealing doesn't |
| 734 | * steal this space region. | 740 | * steal this space region. |
| 735 | */ | 741 | */ |
| 736 | ret = btrfs_add_free_space(cache, SZ_32M, 8192); | 742 | ret = btrfs_add_free_space(cache, SZ_32M, 2 * sectorsize); |
| 737 | if (ret) { | 743 | if (ret) { |
| 738 | test_msg("Error adding free space: %d\n", ret); | 744 | test_msg("Error adding free space: %d\n", ret); |
| 739 | return ret; | 745 | return ret; |
| @@ -757,7 +763,7 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache) | |||
| 757 | 763 | ||
| 758 | /* | 764 | /* |
| 759 | * Confirm that our extent entry didn't stole all free space from the | 765 | * Confirm that our extent entry didn't stole all free space from the |
| 760 | * bitmap, because of the small 8Kb free space region. | 766 | * bitmap, because of the small 2 * sectorsize free space region. |
| 761 | */ | 767 | */ |
| 762 | ret = check_num_extents_and_bitmaps(cache, 2, 1); | 768 | ret = check_num_extents_and_bitmaps(cache, 2, 1); |
| 763 | if (ret) | 769 | if (ret) |
| @@ -783,8 +789,8 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache) | |||
| 783 | return -ENOENT; | 789 | return -ENOENT; |
| 784 | } | 790 | } |
| 785 | 791 | ||
| 786 | if (cache->free_space_ctl->free_space != (SZ_1M + 8192)) { | 792 | if (cache->free_space_ctl->free_space != (SZ_1M + 2 * sectorsize)) { |
| 787 | test_msg("Cache free space is not 1Mb + 8Kb\n"); | 793 | test_msg("Cache free space is not 1Mb + %u\n", 2 * sectorsize); |
| 788 | return -EINVAL; | 794 | return -EINVAL; |
| 789 | } | 795 | } |
| 790 | 796 | ||
| @@ -796,21 +802,25 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache) | |||
| 796 | return -EINVAL; | 802 | return -EINVAL; |
| 797 | } | 803 | } |
| 798 | 804 | ||
| 799 | /* All that remains is a 8Kb free space region in a bitmap. Confirm. */ | 805 | /* |
| 806 | * All that remains is 2 * sectorsize free space region | ||
| 807 | * in a bitmap. Confirm. | ||
| 808 | */ | ||
| 800 | ret = check_num_extents_and_bitmaps(cache, 1, 1); | 809 | ret = check_num_extents_and_bitmaps(cache, 1, 1); |
| 801 | if (ret) | 810 | if (ret) |
| 802 | return ret; | 811 | return ret; |
| 803 | 812 | ||
| 804 | if (cache->free_space_ctl->free_space != 8192) { | 813 | if (cache->free_space_ctl->free_space != 2 * sectorsize) { |
| 805 | test_msg("Cache free space is not 8Kb\n"); | 814 | test_msg("Cache free space is not %u\n", 2 * sectorsize); |
| 806 | return -EINVAL; | 815 | return -EINVAL; |
| 807 | } | 816 | } |
| 808 | 817 | ||
| 809 | offset = btrfs_find_space_for_alloc(cache, | 818 | offset = btrfs_find_space_for_alloc(cache, |
| 810 | 0, 8192, 0, | 819 | 0, 2 * sectorsize, 0, |
| 811 | &max_extent_size); | 820 | &max_extent_size); |
| 812 | if (offset != SZ_32M) { | 821 | if (offset != SZ_32M) { |
| 813 | test_msg("Failed to allocate 8Kb from space cache, returned offset is: %llu\n", | 822 | test_msg("Failed to allocate %u, offset: %llu\n", |
| 823 | 2 * sectorsize, | ||
| 814 | offset); | 824 | offset); |
| 815 | return -EINVAL; | 825 | return -EINVAL; |
| 816 | } | 826 | } |
| @@ -825,7 +835,7 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache) | |||
| 825 | return 0; | 835 | return 0; |
| 826 | } | 836 | } |
| 827 | 837 | ||
| 828 | int btrfs_test_free_space_cache(void) | 838 | int btrfs_test_free_space_cache(u32 sectorsize, u32 nodesize) |
| 829 | { | 839 | { |
| 830 | struct btrfs_block_group_cache *cache; | 840 | struct btrfs_block_group_cache *cache; |
| 831 | struct btrfs_root *root = NULL; | 841 | struct btrfs_root *root = NULL; |
| @@ -833,13 +843,19 @@ int btrfs_test_free_space_cache(void) | |||
| 833 | 843 | ||
| 834 | test_msg("Running btrfs free space cache tests\n"); | 844 | test_msg("Running btrfs free space cache tests\n"); |
| 835 | 845 | ||
| 836 | cache = btrfs_alloc_dummy_block_group(1024 * 1024 * 1024); | 846 | /* |
| 847 | * For ppc64 (with 64k page size), bytes per bitmap might be | ||
| 848 | * larger than 1G. To make bitmap test available in ppc64, | ||
| 849 | * alloc dummy block group whose size cross bitmaps. | ||
| 850 | */ | ||
| 851 | cache = btrfs_alloc_dummy_block_group(BITS_PER_BITMAP * sectorsize | ||
| 852 | + PAGE_SIZE, sectorsize); | ||
| 837 | if (!cache) { | 853 | if (!cache) { |
| 838 | test_msg("Couldn't run the tests\n"); | 854 | test_msg("Couldn't run the tests\n"); |
| 839 | return 0; | 855 | return 0; |
| 840 | } | 856 | } |
| 841 | 857 | ||
| 842 | root = btrfs_alloc_dummy_root(); | 858 | root = btrfs_alloc_dummy_root(sectorsize, nodesize); |
| 843 | if (IS_ERR(root)) { | 859 | if (IS_ERR(root)) { |
| 844 | ret = PTR_ERR(root); | 860 | ret = PTR_ERR(root); |
| 845 | goto out; | 861 | goto out; |
| @@ -855,14 +871,14 @@ int btrfs_test_free_space_cache(void) | |||
| 855 | ret = test_extents(cache); | 871 | ret = test_extents(cache); |
| 856 | if (ret) | 872 | if (ret) |
| 857 | goto out; | 873 | goto out; |
| 858 | ret = test_bitmaps(cache); | 874 | ret = test_bitmaps(cache, sectorsize); |
| 859 | if (ret) | 875 | if (ret) |
| 860 | goto out; | 876 | goto out; |
| 861 | ret = test_bitmaps_and_extents(cache); | 877 | ret = test_bitmaps_and_extents(cache, sectorsize); |
| 862 | if (ret) | 878 | if (ret) |
| 863 | goto out; | 879 | goto out; |
| 864 | 880 | ||
| 865 | ret = test_steal_space_from_bitmap_to_extent(cache); | 881 | ret = test_steal_space_from_bitmap_to_extent(cache, sectorsize); |
| 866 | out: | 882 | out: |
| 867 | btrfs_free_dummy_block_group(cache); | 883 | btrfs_free_dummy_block_group(cache); |
| 868 | btrfs_free_dummy_root(root); | 884 | btrfs_free_dummy_root(root); |
diff --git a/fs/btrfs/tests/free-space-tree-tests.c b/fs/btrfs/tests/free-space-tree-tests.c index 7cea4462acd5..aac507085ab0 100644 --- a/fs/btrfs/tests/free-space-tree-tests.c +++ b/fs/btrfs/tests/free-space-tree-tests.c | |||
| @@ -16,6 +16,7 @@ | |||
| 16 | * Boston, MA 021110-1307, USA. | 16 | * Boston, MA 021110-1307, USA. |
| 17 | */ | 17 | */ |
| 18 | 18 | ||
| 19 | #include <linux/types.h> | ||
| 19 | #include "btrfs-tests.h" | 20 | #include "btrfs-tests.h" |
| 20 | #include "../ctree.h" | 21 | #include "../ctree.h" |
| 21 | #include "../disk-io.h" | 22 | #include "../disk-io.h" |
| @@ -30,7 +31,7 @@ struct free_space_extent { | |||
| 30 | * The test cases align their operations to this in order to hit some of the | 31 | * The test cases align their operations to this in order to hit some of the |
| 31 | * edge cases in the bitmap code. | 32 | * edge cases in the bitmap code. |
| 32 | */ | 33 | */ |
| 33 | #define BITMAP_RANGE (BTRFS_FREE_SPACE_BITMAP_BITS * 4096) | 34 | #define BITMAP_RANGE (BTRFS_FREE_SPACE_BITMAP_BITS * PAGE_SIZE) |
| 34 | 35 | ||
| 35 | static int __check_free_space_extents(struct btrfs_trans_handle *trans, | 36 | static int __check_free_space_extents(struct btrfs_trans_handle *trans, |
| 36 | struct btrfs_fs_info *fs_info, | 37 | struct btrfs_fs_info *fs_info, |
| @@ -439,7 +440,8 @@ typedef int (*test_func_t)(struct btrfs_trans_handle *, | |||
| 439 | struct btrfs_block_group_cache *, | 440 | struct btrfs_block_group_cache *, |
| 440 | struct btrfs_path *); | 441 | struct btrfs_path *); |
| 441 | 442 | ||
| 442 | static int run_test(test_func_t test_func, int bitmaps) | 443 | static int run_test(test_func_t test_func, int bitmaps, |
| 444 | u32 sectorsize, u32 nodesize) | ||
| 443 | { | 445 | { |
| 444 | struct btrfs_root *root = NULL; | 446 | struct btrfs_root *root = NULL; |
| 445 | struct btrfs_block_group_cache *cache = NULL; | 447 | struct btrfs_block_group_cache *cache = NULL; |
| @@ -447,7 +449,7 @@ static int run_test(test_func_t test_func, int bitmaps) | |||
| 447 | struct btrfs_path *path = NULL; | 449 | struct btrfs_path *path = NULL; |
| 448 | int ret; | 450 | int ret; |
| 449 | 451 | ||
| 450 | root = btrfs_alloc_dummy_root(); | 452 | root = btrfs_alloc_dummy_root(sectorsize, nodesize); |
| 451 | if (IS_ERR(root)) { | 453 | if (IS_ERR(root)) { |
| 452 | test_msg("Couldn't allocate dummy root\n"); | 454 | test_msg("Couldn't allocate dummy root\n"); |
| 453 | ret = PTR_ERR(root); | 455 | ret = PTR_ERR(root); |
| @@ -466,7 +468,8 @@ static int run_test(test_func_t test_func, int bitmaps) | |||
| 466 | root->fs_info->free_space_root = root; | 468 | root->fs_info->free_space_root = root; |
| 467 | root->fs_info->tree_root = root; | 469 | root->fs_info->tree_root = root; |
| 468 | 470 | ||
| 469 | root->node = alloc_test_extent_buffer(root->fs_info, 4096); | 471 | root->node = alloc_test_extent_buffer(root->fs_info, |
| 472 | nodesize, nodesize); | ||
| 470 | if (!root->node) { | 473 | if (!root->node) { |
| 471 | test_msg("Couldn't allocate dummy buffer\n"); | 474 | test_msg("Couldn't allocate dummy buffer\n"); |
| 472 | ret = -ENOMEM; | 475 | ret = -ENOMEM; |
| @@ -474,9 +477,9 @@ static int run_test(test_func_t test_func, int bitmaps) | |||
| 474 | } | 477 | } |
| 475 | btrfs_set_header_level(root->node, 0); | 478 | btrfs_set_header_level(root->node, 0); |
| 476 | btrfs_set_header_nritems(root->node, 0); | 479 | btrfs_set_header_nritems(root->node, 0); |
| 477 | root->alloc_bytenr += 8192; | 480 | root->alloc_bytenr += 2 * nodesize; |
| 478 | 481 | ||
| 479 | cache = btrfs_alloc_dummy_block_group(8 * BITMAP_RANGE); | 482 | cache = btrfs_alloc_dummy_block_group(8 * BITMAP_RANGE, sectorsize); |
| 480 | if (!cache) { | 483 | if (!cache) { |
| 481 | test_msg("Couldn't allocate dummy block group cache\n"); | 484 | test_msg("Couldn't allocate dummy block group cache\n"); |
| 482 | ret = -ENOMEM; | 485 | ret = -ENOMEM; |
| @@ -534,17 +537,18 @@ out: | |||
| 534 | return ret; | 537 | return ret; |
| 535 | } | 538 | } |
| 536 | 539 | ||
| 537 | static int run_test_both_formats(test_func_t test_func) | 540 | static int run_test_both_formats(test_func_t test_func, |
| 541 | u32 sectorsize, u32 nodesize) | ||
| 538 | { | 542 | { |
| 539 | int ret; | 543 | int ret; |
| 540 | 544 | ||
| 541 | ret = run_test(test_func, 0); | 545 | ret = run_test(test_func, 0, sectorsize, nodesize); |
| 542 | if (ret) | 546 | if (ret) |
| 543 | return ret; | 547 | return ret; |
| 544 | return run_test(test_func, 1); | 548 | return run_test(test_func, 1, sectorsize, nodesize); |
| 545 | } | 549 | } |
| 546 | 550 | ||
| 547 | int btrfs_test_free_space_tree(void) | 551 | int btrfs_test_free_space_tree(u32 sectorsize, u32 nodesize) |
| 548 | { | 552 | { |
| 549 | test_func_t tests[] = { | 553 | test_func_t tests[] = { |
| 550 | test_empty_block_group, | 554 | test_empty_block_group, |
| @@ -561,9 +565,11 @@ int btrfs_test_free_space_tree(void) | |||
| 561 | 565 | ||
| 562 | test_msg("Running free space tree tests\n"); | 566 | test_msg("Running free space tree tests\n"); |
| 563 | for (i = 0; i < ARRAY_SIZE(tests); i++) { | 567 | for (i = 0; i < ARRAY_SIZE(tests); i++) { |
| 564 | int ret = run_test_both_formats(tests[i]); | 568 | int ret = run_test_both_formats(tests[i], sectorsize, |
| 569 | nodesize); | ||
| 565 | if (ret) { | 570 | if (ret) { |
| 566 | test_msg("%pf failed\n", tests[i]); | 571 | test_msg("%pf : sectorsize %u failed\n", |
| 572 | tests[i], sectorsize); | ||
| 567 | return ret; | 573 | return ret; |
| 568 | } | 574 | } |
| 569 | } | 575 | } |
diff --git a/fs/btrfs/tests/inode-tests.c b/fs/btrfs/tests/inode-tests.c index 8a25fe8b7c45..29648c0a39f1 100644 --- a/fs/btrfs/tests/inode-tests.c +++ b/fs/btrfs/tests/inode-tests.c | |||
| @@ -16,6 +16,7 @@ | |||
| 16 | * Boston, MA 021110-1307, USA. | 16 | * Boston, MA 021110-1307, USA. |
| 17 | */ | 17 | */ |
| 18 | 18 | ||
| 19 | #include <linux/types.h> | ||
| 19 | #include "btrfs-tests.h" | 20 | #include "btrfs-tests.h" |
| 20 | #include "../ctree.h" | 21 | #include "../ctree.h" |
| 21 | #include "../btrfs_inode.h" | 22 | #include "../btrfs_inode.h" |
| @@ -86,19 +87,19 @@ static void insert_inode_item_key(struct btrfs_root *root) | |||
| 86 | * diagram of how the extents will look though this may not be possible we still | 87 | * diagram of how the extents will look though this may not be possible we still |
| 87 | * want to make sure everything acts normally (the last number is not inclusive) | 88 | * want to make sure everything acts normally (the last number is not inclusive) |
| 88 | * | 89 | * |
| 89 | * [0 - 5][5 - 6][6 - 10][10 - 4096][ 4096 - 8192 ][8192 - 12288] | 90 | * [0 - 5][5 - 6][ 6 - 4096 ][ 4096 - 4100][4100 - 8195][8195 - 12291] |
| 90 | * [hole ][inline][ hole ][ regular ][regular1 split][ hole ] | 91 | * [hole ][inline][hole but no extent][ hole ][ regular ][regular1 split] |
| 91 | * | 92 | * |
| 92 | * [ 12288 - 20480][20480 - 24576][ 24576 - 28672 ][28672 - 36864][36864 - 45056] | 93 | * [12291 - 16387][16387 - 24579][24579 - 28675][ 28675 - 32771][32771 - 36867 ] |
| 93 | * [regular1 split][ prealloc1 ][prealloc1 written][ prealloc1 ][ compressed ] | 94 | * [ hole ][regular1 split][ prealloc ][ prealloc1 ][prealloc1 written] |
| 94 | * | 95 | * |
| 95 | * [45056 - 49152][49152-53248][53248-61440][61440-65536][ 65536+81920 ] | 96 | * [36867 - 45059][45059 - 53251][53251 - 57347][57347 - 61443][61443- 69635] |
| 96 | * [ compressed1 ][ regular ][compressed1][ regular ][ hole but no extent] | 97 | * [ prealloc1 ][ compressed ][ compressed1 ][ regular ][ compressed1] |
| 97 | * | 98 | * |
| 98 | * [81920-86016] | 99 | * [69635-73731][ 73731 - 86019 ][86019-90115] |
| 99 | * [ regular ] | 100 | * [ regular ][ hole but no extent][ regular ] |
| 100 | */ | 101 | */ |
| 101 | static void setup_file_extents(struct btrfs_root *root) | 102 | static void setup_file_extents(struct btrfs_root *root, u32 sectorsize) |
| 102 | { | 103 | { |
| 103 | int slot = 0; | 104 | int slot = 0; |
| 104 | u64 disk_bytenr = SZ_1M; | 105 | u64 disk_bytenr = SZ_1M; |
| @@ -119,7 +120,7 @@ static void setup_file_extents(struct btrfs_root *root) | |||
| 119 | insert_extent(root, offset, 1, 1, 0, 0, 0, BTRFS_FILE_EXTENT_INLINE, 0, | 120 | insert_extent(root, offset, 1, 1, 0, 0, 0, BTRFS_FILE_EXTENT_INLINE, 0, |
| 120 | slot); | 121 | slot); |
| 121 | slot++; | 122 | slot++; |
| 122 | offset = 4096; | 123 | offset = sectorsize; |
| 123 | 124 | ||
| 124 | /* Now another hole */ | 125 | /* Now another hole */ |
| 125 | insert_extent(root, offset, 4, 4, 0, 0, 0, BTRFS_FILE_EXTENT_REG, 0, | 126 | insert_extent(root, offset, 4, 4, 0, 0, 0, BTRFS_FILE_EXTENT_REG, 0, |
| @@ -128,99 +129,106 @@ static void setup_file_extents(struct btrfs_root *root) | |||
| 128 | offset += 4; | 129 | offset += 4; |
| 129 | 130 | ||
| 130 | /* Now for a regular extent */ | 131 | /* Now for a regular extent */ |
| 131 | insert_extent(root, offset, 4095, 4095, 0, disk_bytenr, 4096, | 132 | insert_extent(root, offset, sectorsize - 1, sectorsize - 1, 0, |
| 132 | BTRFS_FILE_EXTENT_REG, 0, slot); | 133 | disk_bytenr, sectorsize, BTRFS_FILE_EXTENT_REG, 0, slot); |
| 133 | slot++; | 134 | slot++; |
| 134 | disk_bytenr += 4096; | 135 | disk_bytenr += sectorsize; |
| 135 | offset += 4095; | 136 | offset += sectorsize - 1; |
| 136 | 137 | ||
| 137 | /* | 138 | /* |
| 138 | * Now for 3 extents that were split from a hole punch so we test | 139 | * Now for 3 extents that were split from a hole punch so we test |
| 139 | * offsets properly. | 140 | * offsets properly. |
| 140 | */ | 141 | */ |
| 141 | insert_extent(root, offset, 4096, 16384, 0, disk_bytenr, 16384, | 142 | insert_extent(root, offset, sectorsize, 4 * sectorsize, 0, disk_bytenr, |
| 142 | BTRFS_FILE_EXTENT_REG, 0, slot); | 143 | 4 * sectorsize, BTRFS_FILE_EXTENT_REG, 0, slot); |
| 143 | slot++; | 144 | slot++; |
| 144 | offset += 4096; | 145 | offset += sectorsize; |
| 145 | insert_extent(root, offset, 4096, 4096, 0, 0, 0, BTRFS_FILE_EXTENT_REG, | 146 | insert_extent(root, offset, sectorsize, sectorsize, 0, 0, 0, |
| 146 | 0, slot); | 147 | BTRFS_FILE_EXTENT_REG, 0, slot); |
| 147 | slot++; | 148 | slot++; |
| 148 | offset += 4096; | 149 | offset += sectorsize; |
| 149 | insert_extent(root, offset, 8192, 16384, 8192, disk_bytenr, 16384, | 150 | insert_extent(root, offset, 2 * sectorsize, 4 * sectorsize, |
| 151 | 2 * sectorsize, disk_bytenr, 4 * sectorsize, | ||
| 150 | BTRFS_FILE_EXTENT_REG, 0, slot); | 152 | BTRFS_FILE_EXTENT_REG, 0, slot); |
| 151 | slot++; | 153 | slot++; |
| 152 | offset += 8192; | 154 | offset += 2 * sectorsize; |
| 153 | disk_bytenr += 16384; | 155 | disk_bytenr += 4 * sectorsize; |
| 154 | 156 | ||
| 155 | /* Now for a unwritten prealloc extent */ | 157 | /* Now for a unwritten prealloc extent */ |
| 156 | insert_extent(root, offset, 4096, 4096, 0, disk_bytenr, 4096, | 158 | insert_extent(root, offset, sectorsize, sectorsize, 0, disk_bytenr, |
| 157 | BTRFS_FILE_EXTENT_PREALLOC, 0, slot); | 159 | sectorsize, BTRFS_FILE_EXTENT_PREALLOC, 0, slot); |
| 158 | slot++; | 160 | slot++; |
| 159 | offset += 4096; | 161 | offset += sectorsize; |
| 160 | 162 | ||
| 161 | /* | 163 | /* |
| 162 | * We want to jack up disk_bytenr a little more so the em stuff doesn't | 164 | * We want to jack up disk_bytenr a little more so the em stuff doesn't |
| 163 | * merge our records. | 165 | * merge our records. |
| 164 | */ | 166 | */ |
| 165 | disk_bytenr += 8192; | 167 | disk_bytenr += 2 * sectorsize; |
| 166 | 168 | ||
| 167 | /* | 169 | /* |
| 168 | * Now for a partially written prealloc extent, basically the same as | 170 | * Now for a partially written prealloc extent, basically the same as |
| 169 | * the hole punch example above. Ram_bytes never changes when you mark | 171 | * the hole punch example above. Ram_bytes never changes when you mark |
| 170 | * extents written btw. | 172 | * extents written btw. |
| 171 | */ | 173 | */ |
| 172 | insert_extent(root, offset, 4096, 16384, 0, disk_bytenr, 16384, | 174 | insert_extent(root, offset, sectorsize, 4 * sectorsize, 0, disk_bytenr, |
| 173 | BTRFS_FILE_EXTENT_PREALLOC, 0, slot); | 175 | 4 * sectorsize, BTRFS_FILE_EXTENT_PREALLOC, 0, slot); |
| 174 | slot++; | 176 | slot++; |
| 175 | offset += 4096; | 177 | offset += sectorsize; |
| 176 | insert_extent(root, offset, 4096, 16384, 4096, disk_bytenr, 16384, | 178 | insert_extent(root, offset, sectorsize, 4 * sectorsize, sectorsize, |
| 177 | BTRFS_FILE_EXTENT_REG, 0, slot); | 179 | disk_bytenr, 4 * sectorsize, BTRFS_FILE_EXTENT_REG, 0, |
| 180 | slot); | ||
| 178 | slot++; | 181 | slot++; |
| 179 | offset += 4096; | 182 | offset += sectorsize; |
| 180 | insert_extent(root, offset, 8192, 16384, 8192, disk_bytenr, 16384, | 183 | insert_extent(root, offset, 2 * sectorsize, 4 * sectorsize, |
| 184 | 2 * sectorsize, disk_bytenr, 4 * sectorsize, | ||
| 181 | BTRFS_FILE_EXTENT_PREALLOC, 0, slot); | 185 | BTRFS_FILE_EXTENT_PREALLOC, 0, slot); |
| 182 | slot++; | 186 | slot++; |
| 183 | offset += 8192; | 187 | offset += 2 * sectorsize; |
| 184 | disk_bytenr += 16384; | 188 | disk_bytenr += 4 * sectorsize; |
| 185 | 189 | ||
| 186 | /* Now a normal compressed extent */ | 190 | /* Now a normal compressed extent */ |
| 187 | insert_extent(root, offset, 8192, 8192, 0, disk_bytenr, 4096, | 191 | insert_extent(root, offset, 2 * sectorsize, 2 * sectorsize, 0, |
| 188 | BTRFS_FILE_EXTENT_REG, BTRFS_COMPRESS_ZLIB, slot); | 192 | disk_bytenr, sectorsize, BTRFS_FILE_EXTENT_REG, |
| 193 | BTRFS_COMPRESS_ZLIB, slot); | ||
| 189 | slot++; | 194 | slot++; |
| 190 | offset += 8192; | 195 | offset += 2 * sectorsize; |
| 191 | /* No merges */ | 196 | /* No merges */ |
| 192 | disk_bytenr += 8192; | 197 | disk_bytenr += 2 * sectorsize; |
| 193 | 198 | ||
| 194 | /* Now a split compressed extent */ | 199 | /* Now a split compressed extent */ |
| 195 | insert_extent(root, offset, 4096, 16384, 0, disk_bytenr, 4096, | 200 | insert_extent(root, offset, sectorsize, 4 * sectorsize, 0, disk_bytenr, |
| 196 | BTRFS_FILE_EXTENT_REG, BTRFS_COMPRESS_ZLIB, slot); | 201 | sectorsize, BTRFS_FILE_EXTENT_REG, |
| 202 | BTRFS_COMPRESS_ZLIB, slot); | ||
| 197 | slot++; | 203 | slot++; |
| 198 | offset += 4096; | 204 | offset += sectorsize; |
| 199 | insert_extent(root, offset, 4096, 4096, 0, disk_bytenr + 4096, 4096, | 205 | insert_extent(root, offset, sectorsize, sectorsize, 0, |
| 206 | disk_bytenr + sectorsize, sectorsize, | ||
| 200 | BTRFS_FILE_EXTENT_REG, 0, slot); | 207 | BTRFS_FILE_EXTENT_REG, 0, slot); |
| 201 | slot++; | 208 | slot++; |
| 202 | offset += 4096; | 209 | offset += sectorsize; |
| 203 | insert_extent(root, offset, 8192, 16384, 8192, disk_bytenr, 4096, | 210 | insert_extent(root, offset, 2 * sectorsize, 4 * sectorsize, |
| 211 | 2 * sectorsize, disk_bytenr, sectorsize, | ||
| 204 | BTRFS_FILE_EXTENT_REG, BTRFS_COMPRESS_ZLIB, slot); | 212 | BTRFS_FILE_EXTENT_REG, BTRFS_COMPRESS_ZLIB, slot); |
| 205 | slot++; | 213 | slot++; |
| 206 | offset += 8192; | 214 | offset += 2 * sectorsize; |
| 207 | disk_bytenr += 8192; | 215 | disk_bytenr += 2 * sectorsize; |
| 208 | 216 | ||
| 209 | /* Now extents that have a hole but no hole extent */ | 217 | /* Now extents that have a hole but no hole extent */ |
| 210 | insert_extent(root, offset, 4096, 4096, 0, disk_bytenr, 4096, | 218 | insert_extent(root, offset, sectorsize, sectorsize, 0, disk_bytenr, |
| 211 | BTRFS_FILE_EXTENT_REG, 0, slot); | 219 | sectorsize, BTRFS_FILE_EXTENT_REG, 0, slot); |
| 212 | slot++; | 220 | slot++; |
| 213 | offset += 16384; | 221 | offset += 4 * sectorsize; |
| 214 | disk_bytenr += 4096; | 222 | disk_bytenr += sectorsize; |
| 215 | insert_extent(root, offset, 4096, 4096, 0, disk_bytenr, 4096, | 223 | insert_extent(root, offset, sectorsize, sectorsize, 0, disk_bytenr, |
| 216 | BTRFS_FILE_EXTENT_REG, 0, slot); | 224 | sectorsize, BTRFS_FILE_EXTENT_REG, 0, slot); |
| 217 | } | 225 | } |
| 218 | 226 | ||
| 219 | static unsigned long prealloc_only = 0; | 227 | static unsigned long prealloc_only = 0; |
| 220 | static unsigned long compressed_only = 0; | 228 | static unsigned long compressed_only = 0; |
| 221 | static unsigned long vacancy_only = 0; | 229 | static unsigned long vacancy_only = 0; |
| 222 | 230 | ||
| 223 | static noinline int test_btrfs_get_extent(void) | 231 | static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) |
| 224 | { | 232 | { |
| 225 | struct inode *inode = NULL; | 233 | struct inode *inode = NULL; |
| 226 | struct btrfs_root *root = NULL; | 234 | struct btrfs_root *root = NULL; |
| @@ -240,7 +248,7 @@ static noinline int test_btrfs_get_extent(void) | |||
| 240 | BTRFS_I(inode)->location.objectid = BTRFS_FIRST_FREE_OBJECTID; | 248 | BTRFS_I(inode)->location.objectid = BTRFS_FIRST_FREE_OBJECTID; |
| 241 | BTRFS_I(inode)->location.offset = 0; | 249 | BTRFS_I(inode)->location.offset = 0; |
| 242 | 250 | ||
| 243 | root = btrfs_alloc_dummy_root(); | 251 | root = btrfs_alloc_dummy_root(sectorsize, nodesize); |
| 244 | if (IS_ERR(root)) { | 252 | if (IS_ERR(root)) { |
| 245 | test_msg("Couldn't allocate root\n"); | 253 | test_msg("Couldn't allocate root\n"); |
| 246 | goto out; | 254 | goto out; |
| @@ -256,7 +264,7 @@ static noinline int test_btrfs_get_extent(void) | |||
| 256 | goto out; | 264 | goto out; |
| 257 | } | 265 | } |
| 258 | 266 | ||
| 259 | root->node = alloc_dummy_extent_buffer(NULL, 4096); | 267 | root->node = alloc_dummy_extent_buffer(NULL, nodesize, nodesize); |
| 260 | if (!root->node) { | 268 | if (!root->node) { |
| 261 | test_msg("Couldn't allocate dummy buffer\n"); | 269 | test_msg("Couldn't allocate dummy buffer\n"); |
| 262 | goto out; | 270 | goto out; |
| @@ -273,7 +281,7 @@ static noinline int test_btrfs_get_extent(void) | |||
| 273 | 281 | ||
| 274 | /* First with no extents */ | 282 | /* First with no extents */ |
| 275 | BTRFS_I(inode)->root = root; | 283 | BTRFS_I(inode)->root = root; |
| 276 | em = btrfs_get_extent(inode, NULL, 0, 0, 4096, 0); | 284 | em = btrfs_get_extent(inode, NULL, 0, 0, sectorsize, 0); |
| 277 | if (IS_ERR(em)) { | 285 | if (IS_ERR(em)) { |
| 278 | em = NULL; | 286 | em = NULL; |
| 279 | test_msg("Got an error when we shouldn't have\n"); | 287 | test_msg("Got an error when we shouldn't have\n"); |
| @@ -295,7 +303,7 @@ static noinline int test_btrfs_get_extent(void) | |||
| 295 | * setup_file_extents, so if you change anything there you need to | 303 | * setup_file_extents, so if you change anything there you need to |
| 296 | * update the comment and update the expected values below. | 304 | * update the comment and update the expected values below. |
| 297 | */ | 305 | */ |
| 298 | setup_file_extents(root); | 306 | setup_file_extents(root, sectorsize); |
| 299 | 307 | ||
| 300 | em = btrfs_get_extent(inode, NULL, 0, 0, (u64)-1, 0); | 308 | em = btrfs_get_extent(inode, NULL, 0, 0, (u64)-1, 0); |
| 301 | if (IS_ERR(em)) { | 309 | if (IS_ERR(em)) { |
| @@ -318,7 +326,7 @@ static noinline int test_btrfs_get_extent(void) | |||
| 318 | offset = em->start + em->len; | 326 | offset = em->start + em->len; |
| 319 | free_extent_map(em); | 327 | free_extent_map(em); |
| 320 | 328 | ||
| 321 | em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0); | 329 | em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); |
| 322 | if (IS_ERR(em)) { | 330 | if (IS_ERR(em)) { |
| 323 | test_msg("Got an error when we shouldn't have\n"); | 331 | test_msg("Got an error when we shouldn't have\n"); |
| 324 | goto out; | 332 | goto out; |
| @@ -327,7 +335,8 @@ static noinline int test_btrfs_get_extent(void) | |||
| 327 | test_msg("Expected an inline, got %llu\n", em->block_start); | 335 | test_msg("Expected an inline, got %llu\n", em->block_start); |
| 328 | goto out; | 336 | goto out; |
| 329 | } | 337 | } |
| 330 | if (em->start != offset || em->len != 4091) { | 338 | |
| 339 | if (em->start != offset || em->len != (sectorsize - 5)) { | ||
| 331 | test_msg("Unexpected extent wanted start %llu len 1, got start " | 340 | test_msg("Unexpected extent wanted start %llu len 1, got start " |
| 332 | "%llu len %llu\n", offset, em->start, em->len); | 341 | "%llu len %llu\n", offset, em->start, em->len); |
| 333 | goto out; | 342 | goto out; |
| @@ -344,7 +353,7 @@ static noinline int test_btrfs_get_extent(void) | |||
| 344 | offset = em->start + em->len; | 353 | offset = em->start + em->len; |
| 345 | free_extent_map(em); | 354 | free_extent_map(em); |
| 346 | 355 | ||
| 347 | em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0); | 356 | em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); |
| 348 | if (IS_ERR(em)) { | 357 | if (IS_ERR(em)) { |
| 349 | test_msg("Got an error when we shouldn't have\n"); | 358 | test_msg("Got an error when we shouldn't have\n"); |
| 350 | goto out; | 359 | goto out; |
| @@ -366,7 +375,7 @@ static noinline int test_btrfs_get_extent(void) | |||
| 366 | free_extent_map(em); | 375 | free_extent_map(em); |
| 367 | 376 | ||
| 368 | /* Regular extent */ | 377 | /* Regular extent */ |
| 369 | em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0); | 378 | em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); |
| 370 | if (IS_ERR(em)) { | 379 | if (IS_ERR(em)) { |
| 371 | test_msg("Got an error when we shouldn't have\n"); | 380 | test_msg("Got an error when we shouldn't have\n"); |
| 372 | goto out; | 381 | goto out; |
| @@ -375,7 +384,7 @@ static noinline int test_btrfs_get_extent(void) | |||
| 375 | test_msg("Expected a real extent, got %llu\n", em->block_start); | 384 | test_msg("Expected a real extent, got %llu\n", em->block_start); |
| 376 | goto out; | 385 | goto out; |
| 377 | } | 386 | } |
| 378 | if (em->start != offset || em->len != 4095) { | 387 | if (em->start != offset || em->len != sectorsize - 1) { |
| 379 | test_msg("Unexpected extent wanted start %llu len 4095, got " | 388 | test_msg("Unexpected extent wanted start %llu len 4095, got " |
| 380 | "start %llu len %llu\n", offset, em->start, em->len); | 389 | "start %llu len %llu\n", offset, em->start, em->len); |
| 381 | goto out; | 390 | goto out; |
| @@ -393,7 +402,7 @@ static noinline int test_btrfs_get_extent(void) | |||
| 393 | free_extent_map(em); | 402 | free_extent_map(em); |
| 394 | 403 | ||
| 395 | /* The next 3 are split extents */ | 404 | /* The next 3 are split extents */ |
| 396 | em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0); | 405 | em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); |
| 397 | if (IS_ERR(em)) { | 406 | if (IS_ERR(em)) { |
| 398 | test_msg("Got an error when we shouldn't have\n"); | 407 | test_msg("Got an error when we shouldn't have\n"); |
| 399 | goto out; | 408 | goto out; |
| @@ -402,9 +411,10 @@ static noinline int test_btrfs_get_extent(void) | |||
| 402 | test_msg("Expected a real extent, got %llu\n", em->block_start); | 411 | test_msg("Expected a real extent, got %llu\n", em->block_start); |
| 403 | goto out; | 412 | goto out; |
| 404 | } | 413 | } |
| 405 | if (em->start != offset || em->len != 4096) { | 414 | if (em->start != offset || em->len != sectorsize) { |
| 406 | test_msg("Unexpected extent wanted start %llu len 4096, got " | 415 | test_msg("Unexpected extent start %llu len %u, " |
| 407 | "start %llu len %llu\n", offset, em->start, em->len); | 416 | "got start %llu len %llu\n", |
| 417 | offset, sectorsize, em->start, em->len); | ||
| 408 | goto out; | 418 | goto out; |
| 409 | } | 419 | } |
| 410 | if (em->flags != 0) { | 420 | if (em->flags != 0) { |
| @@ -421,7 +431,7 @@ static noinline int test_btrfs_get_extent(void) | |||
| 421 | offset = em->start + em->len; | 431 | offset = em->start + em->len; |
| 422 | free_extent_map(em); | 432 | free_extent_map(em); |
| 423 | 433 | ||
| 424 | em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0); | 434 | em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); |
| 425 | if (IS_ERR(em)) { | 435 | if (IS_ERR(em)) { |
| 426 | test_msg("Got an error when we shouldn't have\n"); | 436 | test_msg("Got an error when we shouldn't have\n"); |
| 427 | goto out; | 437 | goto out; |
| @@ -430,9 +440,10 @@ static noinline int test_btrfs_get_extent(void) | |||
| 430 | test_msg("Expected a hole, got %llu\n", em->block_start); | 440 | test_msg("Expected a hole, got %llu\n", em->block_start); |
| 431 | goto out; | 441 | goto out; |
| 432 | } | 442 | } |
| 433 | if (em->start != offset || em->len != 4096) { | 443 | if (em->start != offset || em->len != sectorsize) { |
| 434 | test_msg("Unexpected extent wanted start %llu len 4096, got " | 444 | test_msg("Unexpected extent wanted start %llu len %u, " |
| 435 | "start %llu len %llu\n", offset, em->start, em->len); | 445 | "got start %llu len %llu\n", |
| 446 | offset, sectorsize, em->start, em->len); | ||
| 436 | goto out; | 447 | goto out; |
| 437 | } | 448 | } |
| 438 | if (em->flags != 0) { | 449 | if (em->flags != 0) { |
| @@ -442,7 +453,7 @@ static noinline int test_btrfs_get_extent(void) | |||
| 442 | offset = em->start + em->len; | 453 | offset = em->start + em->len; |
| 443 | free_extent_map(em); | 454 | free_extent_map(em); |
| 444 | 455 | ||
| 445 | em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0); | 456 | em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); |
| 446 | if (IS_ERR(em)) { | 457 | if (IS_ERR(em)) { |
| 447 | test_msg("Got an error when we shouldn't have\n"); | 458 | test_msg("Got an error when we shouldn't have\n"); |
| 448 | goto out; | 459 | goto out; |
| @@ -451,9 +462,10 @@ static noinline int test_btrfs_get_extent(void) | |||
| 451 | test_msg("Expected a real extent, got %llu\n", em->block_start); | 462 | test_msg("Expected a real extent, got %llu\n", em->block_start); |
| 452 | goto out; | 463 | goto out; |
| 453 | } | 464 | } |
| 454 | if (em->start != offset || em->len != 8192) { | 465 | if (em->start != offset || em->len != 2 * sectorsize) { |
| 455 | test_msg("Unexpected extent wanted start %llu len 8192, got " | 466 | test_msg("Unexpected extent wanted start %llu len %u, " |
| 456 | "start %llu len %llu\n", offset, em->start, em->len); | 467 | "got start %llu len %llu\n", |
| 468 | offset, 2 * sectorsize, em->start, em->len); | ||
| 457 | goto out; | 469 | goto out; |
| 458 | } | 470 | } |
| 459 | if (em->flags != 0) { | 471 | if (em->flags != 0) { |
| @@ -475,7 +487,7 @@ static noinline int test_btrfs_get_extent(void) | |||
| 475 | free_extent_map(em); | 487 | free_extent_map(em); |
| 476 | 488 | ||
| 477 | /* Prealloc extent */ | 489 | /* Prealloc extent */ |
| 478 | em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0); | 490 | em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); |
| 479 | if (IS_ERR(em)) { | 491 | if (IS_ERR(em)) { |
| 480 | test_msg("Got an error when we shouldn't have\n"); | 492 | test_msg("Got an error when we shouldn't have\n"); |
| 481 | goto out; | 493 | goto out; |
| @@ -484,9 +496,10 @@ static noinline int test_btrfs_get_extent(void) | |||
| 484 | test_msg("Expected a real extent, got %llu\n", em->block_start); | 496 | test_msg("Expected a real extent, got %llu\n", em->block_start); |
| 485 | goto out; | 497 | goto out; |
| 486 | } | 498 | } |
| 487 | if (em->start != offset || em->len != 4096) { | 499 | if (em->start != offset || em->len != sectorsize) { |
| 488 | test_msg("Unexpected extent wanted start %llu len 4096, got " | 500 | test_msg("Unexpected extent wanted start %llu len %u, " |
| 489 | "start %llu len %llu\n", offset, em->start, em->len); | 501 | "got start %llu len %llu\n", |
| 502 | offset, sectorsize, em->start, em->len); | ||
| 490 | goto out; | 503 | goto out; |
| 491 | } | 504 | } |
| 492 | if (em->flags != prealloc_only) { | 505 | if (em->flags != prealloc_only) { |
| @@ -503,7 +516,7 @@ static noinline int test_btrfs_get_extent(void) | |||
| 503 | free_extent_map(em); | 516 | free_extent_map(em); |
| 504 | 517 | ||
| 505 | /* The next 3 are a half written prealloc extent */ | 518 | /* The next 3 are a half written prealloc extent */ |
| 506 | em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0); | 519 | em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); |
| 507 | if (IS_ERR(em)) { | 520 | if (IS_ERR(em)) { |
| 508 | test_msg("Got an error when we shouldn't have\n"); | 521 | test_msg("Got an error when we shouldn't have\n"); |
| 509 | goto out; | 522 | goto out; |
| @@ -512,9 +525,10 @@ static noinline int test_btrfs_get_extent(void) | |||
| 512 | test_msg("Expected a real extent, got %llu\n", em->block_start); | 525 | test_msg("Expected a real extent, got %llu\n", em->block_start); |
| 513 | goto out; | 526 | goto out; |
| 514 | } | 527 | } |
| 515 | if (em->start != offset || em->len != 4096) { | 528 | if (em->start != offset || em->len != sectorsize) { |
| 516 | test_msg("Unexpected extent wanted start %llu len 4096, got " | 529 | test_msg("Unexpected extent wanted start %llu len %u, " |
| 517 | "start %llu len %llu\n", offset, em->start, em->len); | 530 | "got start %llu len %llu\n", |
| 531 | offset, sectorsize, em->start, em->len); | ||
| 518 | goto out; | 532 | goto out; |
| 519 | } | 533 | } |
| 520 | if (em->flags != prealloc_only) { | 534 | if (em->flags != prealloc_only) { |
| @@ -532,7 +546,7 @@ static noinline int test_btrfs_get_extent(void) | |||
| 532 | offset = em->start + em->len; | 546 | offset = em->start + em->len; |
| 533 | free_extent_map(em); | 547 | free_extent_map(em); |
| 534 | 548 | ||
| 535 | em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0); | 549 | em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); |
| 536 | if (IS_ERR(em)) { | 550 | if (IS_ERR(em)) { |
| 537 | test_msg("Got an error when we shouldn't have\n"); | 551 | test_msg("Got an error when we shouldn't have\n"); |
| 538 | goto out; | 552 | goto out; |
| @@ -541,9 +555,10 @@ static noinline int test_btrfs_get_extent(void) | |||
| 541 | test_msg("Expected a real extent, got %llu\n", em->block_start); | 555 | test_msg("Expected a real extent, got %llu\n", em->block_start); |
| 542 | goto out; | 556 | goto out; |
| 543 | } | 557 | } |
| 544 | if (em->start != offset || em->len != 4096) { | 558 | if (em->start != offset || em->len != sectorsize) { |
| 545 | test_msg("Unexpected extent wanted start %llu len 4096, got " | 559 | test_msg("Unexpected extent wanted start %llu len %u, " |
| 546 | "start %llu len %llu\n", offset, em->start, em->len); | 560 | "got start %llu len %llu\n", |
| 561 | offset, sectorsize, em->start, em->len); | ||
| 547 | goto out; | 562 | goto out; |
| 548 | } | 563 | } |
| 549 | if (em->flags != 0) { | 564 | if (em->flags != 0) { |
| @@ -564,7 +579,7 @@ static noinline int test_btrfs_get_extent(void) | |||
| 564 | offset = em->start + em->len; | 579 | offset = em->start + em->len; |
| 565 | free_extent_map(em); | 580 | free_extent_map(em); |
| 566 | 581 | ||
| 567 | em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0); | 582 | em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); |
| 568 | if (IS_ERR(em)) { | 583 | if (IS_ERR(em)) { |
| 569 | test_msg("Got an error when we shouldn't have\n"); | 584 | test_msg("Got an error when we shouldn't have\n"); |
| 570 | goto out; | 585 | goto out; |
| @@ -573,9 +588,10 @@ static noinline int test_btrfs_get_extent(void) | |||
| 573 | test_msg("Expected a real extent, got %llu\n", em->block_start); | 588 | test_msg("Expected a real extent, got %llu\n", em->block_start); |
| 574 | goto out; | 589 | goto out; |
| 575 | } | 590 | } |
| 576 | if (em->start != offset || em->len != 8192) { | 591 | if (em->start != offset || em->len != 2 * sectorsize) { |
| 577 | test_msg("Unexpected extent wanted start %llu len 8192, got " | 592 | test_msg("Unexpected extent wanted start %llu len %u, " |
| 578 | "start %llu len %llu\n", offset, em->start, em->len); | 593 | "got start %llu len %llu\n", |
| 594 | offset, 2 * sectorsize, em->start, em->len); | ||
| 579 | goto out; | 595 | goto out; |
| 580 | } | 596 | } |
| 581 | if (em->flags != prealloc_only) { | 597 | if (em->flags != prealloc_only) { |
| @@ -598,7 +614,7 @@ static noinline int test_btrfs_get_extent(void) | |||
| 598 | free_extent_map(em); | 614 | free_extent_map(em); |
| 599 | 615 | ||
| 600 | /* Now for the compressed extent */ | 616 | /* Now for the compressed extent */ |
| 601 | em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0); | 617 | em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); |
| 602 | if (IS_ERR(em)) { | 618 | if (IS_ERR(em)) { |
| 603 | test_msg("Got an error when we shouldn't have\n"); | 619 | test_msg("Got an error when we shouldn't have\n"); |
| 604 | goto out; | 620 | goto out; |
| @@ -607,9 +623,10 @@ static noinline int test_btrfs_get_extent(void) | |||
| 607 | test_msg("Expected a real extent, got %llu\n", em->block_start); | 623 | test_msg("Expected a real extent, got %llu\n", em->block_start); |
| 608 | goto out; | 624 | goto out; |
| 609 | } | 625 | } |
| 610 | if (em->start != offset || em->len != 8192) { | 626 | if (em->start != offset || em->len != 2 * sectorsize) { |
| 611 | test_msg("Unexpected extent wanted start %llu len 8192, got " | 627 | test_msg("Unexpected extent wanted start %llu len %u," |
| 612 | "start %llu len %llu\n", offset, em->start, em->len); | 628 | "got start %llu len %llu\n", |
| 629 | offset, 2 * sectorsize, em->start, em->len); | ||
| 613 | goto out; | 630 | goto out; |
| 614 | } | 631 | } |
| 615 | if (em->flags != compressed_only) { | 632 | if (em->flags != compressed_only) { |
| @@ -631,7 +648,7 @@ static noinline int test_btrfs_get_extent(void) | |||
| 631 | free_extent_map(em); | 648 | free_extent_map(em); |
| 632 | 649 | ||
| 633 | /* Split compressed extent */ | 650 | /* Split compressed extent */ |
| 634 | em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0); | 651 | em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); |
| 635 | if (IS_ERR(em)) { | 652 | if (IS_ERR(em)) { |
| 636 | test_msg("Got an error when we shouldn't have\n"); | 653 | test_msg("Got an error when we shouldn't have\n"); |
| 637 | goto out; | 654 | goto out; |
| @@ -640,9 +657,10 @@ static noinline int test_btrfs_get_extent(void) | |||
| 640 | test_msg("Expected a real extent, got %llu\n", em->block_start); | 657 | test_msg("Expected a real extent, got %llu\n", em->block_start); |
| 641 | goto out; | 658 | goto out; |
| 642 | } | 659 | } |
| 643 | if (em->start != offset || em->len != 4096) { | 660 | if (em->start != offset || em->len != sectorsize) { |
| 644 | test_msg("Unexpected extent wanted start %llu len 4096, got " | 661 | test_msg("Unexpected extent wanted start %llu len %u," |
| 645 | "start %llu len %llu\n", offset, em->start, em->len); | 662 | "got start %llu len %llu\n", |
| 663 | offset, sectorsize, em->start, em->len); | ||
| 646 | goto out; | 664 | goto out; |
| 647 | } | 665 | } |
| 648 | if (em->flags != compressed_only) { | 666 | if (em->flags != compressed_only) { |
| @@ -665,7 +683,7 @@ static noinline int test_btrfs_get_extent(void) | |||
| 665 | offset = em->start + em->len; | 683 | offset = em->start + em->len; |
| 666 | free_extent_map(em); | 684 | free_extent_map(em); |
| 667 | 685 | ||
| 668 | em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0); | 686 | em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); |
| 669 | if (IS_ERR(em)) { | 687 | if (IS_ERR(em)) { |
| 670 | test_msg("Got an error when we shouldn't have\n"); | 688 | test_msg("Got an error when we shouldn't have\n"); |
| 671 | goto out; | 689 | goto out; |
| @@ -674,9 +692,10 @@ static noinline int test_btrfs_get_extent(void) | |||
| 674 | test_msg("Expected a real extent, got %llu\n", em->block_start); | 692 | test_msg("Expected a real extent, got %llu\n", em->block_start); |
| 675 | goto out; | 693 | goto out; |
| 676 | } | 694 | } |
| 677 | if (em->start != offset || em->len != 4096) { | 695 | if (em->start != offset || em->len != sectorsize) { |
| 678 | test_msg("Unexpected extent wanted start %llu len 4096, got " | 696 | test_msg("Unexpected extent wanted start %llu len %u, " |
| 679 | "start %llu len %llu\n", offset, em->start, em->len); | 697 | "got start %llu len %llu\n", |
| 698 | offset, sectorsize, em->start, em->len); | ||
| 680 | goto out; | 699 | goto out; |
| 681 | } | 700 | } |
| 682 | if (em->flags != 0) { | 701 | if (em->flags != 0) { |
| @@ -691,7 +710,7 @@ static noinline int test_btrfs_get_extent(void) | |||
| 691 | offset = em->start + em->len; | 710 | offset = em->start + em->len; |
| 692 | free_extent_map(em); | 711 | free_extent_map(em); |
| 693 | 712 | ||
| 694 | em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0); | 713 | em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); |
| 695 | if (IS_ERR(em)) { | 714 | if (IS_ERR(em)) { |
| 696 | test_msg("Got an error when we shouldn't have\n"); | 715 | test_msg("Got an error when we shouldn't have\n"); |
| 697 | goto out; | 716 | goto out; |
| @@ -701,9 +720,10 @@ static noinline int test_btrfs_get_extent(void) | |||
| 701 | disk_bytenr, em->block_start); | 720 | disk_bytenr, em->block_start); |
| 702 | goto out; | 721 | goto out; |
| 703 | } | 722 | } |
| 704 | if (em->start != offset || em->len != 8192) { | 723 | if (em->start != offset || em->len != 2 * sectorsize) { |
| 705 | test_msg("Unexpected extent wanted start %llu len 8192, got " | 724 | test_msg("Unexpected extent wanted start %llu len %u, " |
| 706 | "start %llu len %llu\n", offset, em->start, em->len); | 725 | "got start %llu len %llu\n", |
| 726 | offset, 2 * sectorsize, em->start, em->len); | ||
| 707 | goto out; | 727 | goto out; |
| 708 | } | 728 | } |
| 709 | if (em->flags != compressed_only) { | 729 | if (em->flags != compressed_only) { |
| @@ -725,7 +745,7 @@ static noinline int test_btrfs_get_extent(void) | |||
| 725 | free_extent_map(em); | 745 | free_extent_map(em); |
| 726 | 746 | ||
| 727 | /* A hole between regular extents but no hole extent */ | 747 | /* A hole between regular extents but no hole extent */ |
| 728 | em = btrfs_get_extent(inode, NULL, 0, offset + 6, 4096, 0); | 748 | em = btrfs_get_extent(inode, NULL, 0, offset + 6, sectorsize, 0); |
| 729 | if (IS_ERR(em)) { | 749 | if (IS_ERR(em)) { |
| 730 | test_msg("Got an error when we shouldn't have\n"); | 750 | test_msg("Got an error when we shouldn't have\n"); |
| 731 | goto out; | 751 | goto out; |
| @@ -734,9 +754,10 @@ static noinline int test_btrfs_get_extent(void) | |||
| 734 | test_msg("Expected a real extent, got %llu\n", em->block_start); | 754 | test_msg("Expected a real extent, got %llu\n", em->block_start); |
| 735 | goto out; | 755 | goto out; |
| 736 | } | 756 | } |
| 737 | if (em->start != offset || em->len != 4096) { | 757 | if (em->start != offset || em->len != sectorsize) { |
| 738 | test_msg("Unexpected extent wanted start %llu len 4096, got " | 758 | test_msg("Unexpected extent wanted start %llu len %u, " |
| 739 | "start %llu len %llu\n", offset, em->start, em->len); | 759 | "got start %llu len %llu\n", |
| 760 | offset, sectorsize, em->start, em->len); | ||
| 740 | goto out; | 761 | goto out; |
| 741 | } | 762 | } |
| 742 | if (em->flags != 0) { | 763 | if (em->flags != 0) { |
| @@ -765,9 +786,10 @@ static noinline int test_btrfs_get_extent(void) | |||
| 765 | * length of the actual hole, if this changes we'll have to change this | 786 | * length of the actual hole, if this changes we'll have to change this |
| 766 | * test. | 787 | * test. |
| 767 | */ | 788 | */ |
| 768 | if (em->start != offset || em->len != 12288) { | 789 | if (em->start != offset || em->len != 3 * sectorsize) { |
| 769 | test_msg("Unexpected extent wanted start %llu len 12288, got " | 790 | test_msg("Unexpected extent wanted start %llu len %u, " |
| 770 | "start %llu len %llu\n", offset, em->start, em->len); | 791 | "got start %llu len %llu\n", |
| 792 | offset, 3 * sectorsize, em->start, em->len); | ||
| 771 | goto out; | 793 | goto out; |
| 772 | } | 794 | } |
| 773 | if (em->flags != vacancy_only) { | 795 | if (em->flags != vacancy_only) { |
| @@ -783,7 +805,7 @@ static noinline int test_btrfs_get_extent(void) | |||
| 783 | offset = em->start + em->len; | 805 | offset = em->start + em->len; |
| 784 | free_extent_map(em); | 806 | free_extent_map(em); |
| 785 | 807 | ||
| 786 | em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0); | 808 | em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); |
| 787 | if (IS_ERR(em)) { | 809 | if (IS_ERR(em)) { |
| 788 | test_msg("Got an error when we shouldn't have\n"); | 810 | test_msg("Got an error when we shouldn't have\n"); |
| 789 | goto out; | 811 | goto out; |
| @@ -792,9 +814,10 @@ static noinline int test_btrfs_get_extent(void) | |||
| 792 | test_msg("Expected a real extent, got %llu\n", em->block_start); | 814 | test_msg("Expected a real extent, got %llu\n", em->block_start); |
| 793 | goto out; | 815 | goto out; |
| 794 | } | 816 | } |
| 795 | if (em->start != offset || em->len != 4096) { | 817 | if (em->start != offset || em->len != sectorsize) { |
| 796 | test_msg("Unexpected extent wanted start %llu len 4096, got " | 818 | test_msg("Unexpected extent wanted start %llu len %u," |
| 797 | "start %llu len %llu\n", offset, em->start, em->len); | 819 | "got start %llu len %llu\n", |
| 820 | offset, sectorsize, em->start, em->len); | ||
| 798 | goto out; | 821 | goto out; |
| 799 | } | 822 | } |
| 800 | if (em->flags != 0) { | 823 | if (em->flags != 0) { |
| @@ -815,7 +838,7 @@ out: | |||
| 815 | return ret; | 838 | return ret; |
| 816 | } | 839 | } |
| 817 | 840 | ||
| 818 | static int test_hole_first(void) | 841 | static int test_hole_first(u32 sectorsize, u32 nodesize) |
| 819 | { | 842 | { |
| 820 | struct inode *inode = NULL; | 843 | struct inode *inode = NULL; |
| 821 | struct btrfs_root *root = NULL; | 844 | struct btrfs_root *root = NULL; |
| @@ -832,7 +855,7 @@ static int test_hole_first(void) | |||
| 832 | BTRFS_I(inode)->location.objectid = BTRFS_FIRST_FREE_OBJECTID; | 855 | BTRFS_I(inode)->location.objectid = BTRFS_FIRST_FREE_OBJECTID; |
| 833 | BTRFS_I(inode)->location.offset = 0; | 856 | BTRFS_I(inode)->location.offset = 0; |
| 834 | 857 | ||
| 835 | root = btrfs_alloc_dummy_root(); | 858 | root = btrfs_alloc_dummy_root(sectorsize, nodesize); |
| 836 | if (IS_ERR(root)) { | 859 | if (IS_ERR(root)) { |
| 837 | test_msg("Couldn't allocate root\n"); | 860 | test_msg("Couldn't allocate root\n"); |
| 838 | goto out; | 861 | goto out; |
| @@ -844,7 +867,7 @@ static int test_hole_first(void) | |||
| 844 | goto out; | 867 | goto out; |
| 845 | } | 868 | } |
| 846 | 869 | ||
| 847 | root->node = alloc_dummy_extent_buffer(NULL, 4096); | 870 | root->node = alloc_dummy_extent_buffer(NULL, nodesize, nodesize); |
| 848 | if (!root->node) { | 871 | if (!root->node) { |
| 849 | test_msg("Couldn't allocate dummy buffer\n"); | 872 | test_msg("Couldn't allocate dummy buffer\n"); |
| 850 | goto out; | 873 | goto out; |
| @@ -861,9 +884,9 @@ static int test_hole_first(void) | |||
| 861 | * btrfs_get_extent. | 884 | * btrfs_get_extent. |
| 862 | */ | 885 | */ |
| 863 | insert_inode_item_key(root); | 886 | insert_inode_item_key(root); |
| 864 | insert_extent(root, 4096, 4096, 4096, 0, 4096, 4096, | 887 | insert_extent(root, sectorsize, sectorsize, sectorsize, 0, sectorsize, |
| 865 | BTRFS_FILE_EXTENT_REG, 0, 1); | 888 | sectorsize, BTRFS_FILE_EXTENT_REG, 0, 1); |
| 866 | em = btrfs_get_extent(inode, NULL, 0, 0, 8192, 0); | 889 | em = btrfs_get_extent(inode, NULL, 0, 0, 2 * sectorsize, 0); |
| 867 | if (IS_ERR(em)) { | 890 | if (IS_ERR(em)) { |
| 868 | test_msg("Got an error when we shouldn't have\n"); | 891 | test_msg("Got an error when we shouldn't have\n"); |
| 869 | goto out; | 892 | goto out; |
| @@ -872,9 +895,10 @@ static int test_hole_first(void) | |||
| 872 | test_msg("Expected a hole, got %llu\n", em->block_start); | 895 | test_msg("Expected a hole, got %llu\n", em->block_start); |
| 873 | goto out; | 896 | goto out; |
| 874 | } | 897 | } |
| 875 | if (em->start != 0 || em->len != 4096) { | 898 | if (em->start != 0 || em->len != sectorsize) { |
| 876 | test_msg("Unexpected extent wanted start 0 len 4096, got start " | 899 | test_msg("Unexpected extent wanted start 0 len %u, " |
| 877 | "%llu len %llu\n", em->start, em->len); | 900 | "got start %llu len %llu\n", |
| 901 | sectorsize, em->start, em->len); | ||
| 878 | goto out; | 902 | goto out; |
| 879 | } | 903 | } |
| 880 | if (em->flags != vacancy_only) { | 904 | if (em->flags != vacancy_only) { |
| @@ -884,18 +908,19 @@ static int test_hole_first(void) | |||
| 884 | } | 908 | } |
| 885 | free_extent_map(em); | 909 | free_extent_map(em); |
| 886 | 910 | ||
| 887 | em = btrfs_get_extent(inode, NULL, 0, 4096, 8192, 0); | 911 | em = btrfs_get_extent(inode, NULL, 0, sectorsize, 2 * sectorsize, 0); |
| 888 | if (IS_ERR(em)) { | 912 | if (IS_ERR(em)) { |
| 889 | test_msg("Got an error when we shouldn't have\n"); | 913 | test_msg("Got an error when we shouldn't have\n"); |
| 890 | goto out; | 914 | goto out; |
| 891 | } | 915 | } |
| 892 | if (em->block_start != 4096) { | 916 | if (em->block_start != sectorsize) { |
| 893 | test_msg("Expected a real extent, got %llu\n", em->block_start); | 917 | test_msg("Expected a real extent, got %llu\n", em->block_start); |
| 894 | goto out; | 918 | goto out; |
| 895 | } | 919 | } |
| 896 | if (em->start != 4096 || em->len != 4096) { | 920 | if (em->start != sectorsize || em->len != sectorsize) { |
| 897 | test_msg("Unexpected extent wanted start 4096 len 4096, got " | 921 | test_msg("Unexpected extent wanted start %u len %u, " |
| 898 | "start %llu len %llu\n", em->start, em->len); | 922 | "got start %llu len %llu\n", |
| 923 | sectorsize, sectorsize, em->start, em->len); | ||
| 899 | goto out; | 924 | goto out; |
| 900 | } | 925 | } |
| 901 | if (em->flags != 0) { | 926 | if (em->flags != 0) { |
| @@ -912,7 +937,7 @@ out: | |||
| 912 | return ret; | 937 | return ret; |
| 913 | } | 938 | } |
| 914 | 939 | ||
| 915 | static int test_extent_accounting(void) | 940 | static int test_extent_accounting(u32 sectorsize, u32 nodesize) |
| 916 | { | 941 | { |
| 917 | struct inode *inode = NULL; | 942 | struct inode *inode = NULL; |
| 918 | struct btrfs_root *root = NULL; | 943 | struct btrfs_root *root = NULL; |
| @@ -924,7 +949,7 @@ static int test_extent_accounting(void) | |||
| 924 | return ret; | 949 | return ret; |
| 925 | } | 950 | } |
| 926 | 951 | ||
| 927 | root = btrfs_alloc_dummy_root(); | 952 | root = btrfs_alloc_dummy_root(sectorsize, nodesize); |
| 928 | if (IS_ERR(root)) { | 953 | if (IS_ERR(root)) { |
| 929 | test_msg("Couldn't allocate root\n"); | 954 | test_msg("Couldn't allocate root\n"); |
| 930 | goto out; | 955 | goto out; |
| @@ -954,10 +979,11 @@ static int test_extent_accounting(void) | |||
| 954 | goto out; | 979 | goto out; |
| 955 | } | 980 | } |
| 956 | 981 | ||
| 957 | /* [BTRFS_MAX_EXTENT_SIZE][4k] */ | 982 | /* [BTRFS_MAX_EXTENT_SIZE][sectorsize] */ |
| 958 | BTRFS_I(inode)->outstanding_extents++; | 983 | BTRFS_I(inode)->outstanding_extents++; |
| 959 | ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE, | 984 | ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE, |
| 960 | BTRFS_MAX_EXTENT_SIZE + 4095, NULL); | 985 | BTRFS_MAX_EXTENT_SIZE + sectorsize - 1, |
| 986 | NULL); | ||
| 961 | if (ret) { | 987 | if (ret) { |
| 962 | test_msg("btrfs_set_extent_delalloc returned %d\n", ret); | 988 | test_msg("btrfs_set_extent_delalloc returned %d\n", ret); |
| 963 | goto out; | 989 | goto out; |
| @@ -969,10 +995,10 @@ static int test_extent_accounting(void) | |||
| 969 | goto out; | 995 | goto out; |
| 970 | } | 996 | } |
| 971 | 997 | ||
| 972 | /* [BTRFS_MAX_EXTENT_SIZE/2][4K HOLE][the rest] */ | 998 | /* [BTRFS_MAX_EXTENT_SIZE/2][sectorsize HOLE][the rest] */ |
| 973 | ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, | 999 | ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, |
| 974 | BTRFS_MAX_EXTENT_SIZE >> 1, | 1000 | BTRFS_MAX_EXTENT_SIZE >> 1, |
| 975 | (BTRFS_MAX_EXTENT_SIZE >> 1) + 4095, | 1001 | (BTRFS_MAX_EXTENT_SIZE >> 1) + sectorsize - 1, |
| 976 | EXTENT_DELALLOC | EXTENT_DIRTY | | 1002 | EXTENT_DELALLOC | EXTENT_DIRTY | |
| 977 | EXTENT_UPTODATE | EXTENT_DO_ACCOUNTING, 0, 0, | 1003 | EXTENT_UPTODATE | EXTENT_DO_ACCOUNTING, 0, 0, |
| 978 | NULL, GFP_KERNEL); | 1004 | NULL, GFP_KERNEL); |
| @@ -987,10 +1013,11 @@ static int test_extent_accounting(void) | |||
| 987 | goto out; | 1013 | goto out; |
| 988 | } | 1014 | } |
| 989 | 1015 | ||
| 990 | /* [BTRFS_MAX_EXTENT_SIZE][4K] */ | 1016 | /* [BTRFS_MAX_EXTENT_SIZE][sectorsize] */ |
| 991 | BTRFS_I(inode)->outstanding_extents++; | 1017 | BTRFS_I(inode)->outstanding_extents++; |
| 992 | ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE >> 1, | 1018 | ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE >> 1, |
| 993 | (BTRFS_MAX_EXTENT_SIZE >> 1) + 4095, | 1019 | (BTRFS_MAX_EXTENT_SIZE >> 1) |
| 1020 | + sectorsize - 1, | ||
| 994 | NULL); | 1021 | NULL); |
| 995 | if (ret) { | 1022 | if (ret) { |
| 996 | test_msg("btrfs_set_extent_delalloc returned %d\n", ret); | 1023 | test_msg("btrfs_set_extent_delalloc returned %d\n", ret); |
| @@ -1004,16 +1031,17 @@ static int test_extent_accounting(void) | |||
| 1004 | } | 1031 | } |
| 1005 | 1032 | ||
| 1006 | /* | 1033 | /* |
| 1007 | * [BTRFS_MAX_EXTENT_SIZE+4K][4K HOLE][BTRFS_MAX_EXTENT_SIZE+4K] | 1034 | * [BTRFS_MAX_EXTENT_SIZE+sectorsize][sectorsize HOLE][BTRFS_MAX_EXTENT_SIZE+sectorsize] |
| 1008 | * | 1035 | * |
| 1009 | * I'm artificially adding 2 to outstanding_extents because in the | 1036 | * I'm artificially adding 2 to outstanding_extents because in the |
| 1010 | * buffered IO case we'd add things up as we go, but I don't feel like | 1037 | * buffered IO case we'd add things up as we go, but I don't feel like |
| 1011 | * doing that here, this isn't the interesting case we want to test. | 1038 | * doing that here, this isn't the interesting case we want to test. |
| 1012 | */ | 1039 | */ |
| 1013 | BTRFS_I(inode)->outstanding_extents += 2; | 1040 | BTRFS_I(inode)->outstanding_extents += 2; |
| 1014 | ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE + 8192, | 1041 | ret = btrfs_set_extent_delalloc(inode, |
| 1015 | (BTRFS_MAX_EXTENT_SIZE << 1) + 12287, | 1042 | BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize, |
| 1016 | NULL); | 1043 | (BTRFS_MAX_EXTENT_SIZE << 1) + 3 * sectorsize - 1, |
| 1044 | NULL); | ||
| 1017 | if (ret) { | 1045 | if (ret) { |
| 1018 | test_msg("btrfs_set_extent_delalloc returned %d\n", ret); | 1046 | test_msg("btrfs_set_extent_delalloc returned %d\n", ret); |
| 1019 | goto out; | 1047 | goto out; |
| @@ -1025,10 +1053,13 @@ static int test_extent_accounting(void) | |||
| 1025 | goto out; | 1053 | goto out; |
| 1026 | } | 1054 | } |
| 1027 | 1055 | ||
| 1028 | /* [BTRFS_MAX_EXTENT_SIZE+4k][4k][BTRFS_MAX_EXTENT_SIZE+4k] */ | 1056 | /* |
| 1057 | * [BTRFS_MAX_EXTENT_SIZE+sectorsize][sectorsize][BTRFS_MAX_EXTENT_SIZE+sectorsize] | ||
| 1058 | */ | ||
| 1029 | BTRFS_I(inode)->outstanding_extents++; | 1059 | BTRFS_I(inode)->outstanding_extents++; |
| 1030 | ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE+4096, | 1060 | ret = btrfs_set_extent_delalloc(inode, |
| 1031 | BTRFS_MAX_EXTENT_SIZE+8191, NULL); | 1061 | BTRFS_MAX_EXTENT_SIZE + sectorsize, |
| 1062 | BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, NULL); | ||
| 1032 | if (ret) { | 1063 | if (ret) { |
| 1033 | test_msg("btrfs_set_extent_delalloc returned %d\n", ret); | 1064 | test_msg("btrfs_set_extent_delalloc returned %d\n", ret); |
| 1034 | goto out; | 1065 | goto out; |
| @@ -1042,8 +1073,8 @@ static int test_extent_accounting(void) | |||
| 1042 | 1073 | ||
| 1043 | /* [BTRFS_MAX_EXTENT_SIZE+4k][4K HOLE][BTRFS_MAX_EXTENT_SIZE+4k] */ | 1074 | /* [BTRFS_MAX_EXTENT_SIZE+4k][4K HOLE][BTRFS_MAX_EXTENT_SIZE+4k] */ |
| 1044 | ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, | 1075 | ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, |
| 1045 | BTRFS_MAX_EXTENT_SIZE+4096, | 1076 | BTRFS_MAX_EXTENT_SIZE + sectorsize, |
| 1046 | BTRFS_MAX_EXTENT_SIZE+8191, | 1077 | BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, |
| 1047 | EXTENT_DIRTY | EXTENT_DELALLOC | | 1078 | EXTENT_DIRTY | EXTENT_DELALLOC | |
| 1048 | EXTENT_DO_ACCOUNTING | EXTENT_UPTODATE, 0, 0, | 1079 | EXTENT_DO_ACCOUNTING | EXTENT_UPTODATE, 0, 0, |
| 1049 | NULL, GFP_KERNEL); | 1080 | NULL, GFP_KERNEL); |
| @@ -1063,8 +1094,9 @@ static int test_extent_accounting(void) | |||
| 1063 | * might fail and I'd rather satisfy my paranoia at this point. | 1094 | * might fail and I'd rather satisfy my paranoia at this point. |
| 1064 | */ | 1095 | */ |
| 1065 | BTRFS_I(inode)->outstanding_extents++; | 1096 | BTRFS_I(inode)->outstanding_extents++; |
| 1066 | ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE+4096, | 1097 | ret = btrfs_set_extent_delalloc(inode, |
| 1067 | BTRFS_MAX_EXTENT_SIZE+8191, NULL); | 1098 | BTRFS_MAX_EXTENT_SIZE + sectorsize, |
| 1099 | BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, NULL); | ||
| 1068 | if (ret) { | 1100 | if (ret) { |
| 1069 | test_msg("btrfs_set_extent_delalloc returned %d\n", ret); | 1101 | test_msg("btrfs_set_extent_delalloc returned %d\n", ret); |
| 1070 | goto out; | 1102 | goto out; |
| @@ -1103,7 +1135,7 @@ out: | |||
| 1103 | return ret; | 1135 | return ret; |
| 1104 | } | 1136 | } |
| 1105 | 1137 | ||
| 1106 | int btrfs_test_inodes(void) | 1138 | int btrfs_test_inodes(u32 sectorsize, u32 nodesize) |
| 1107 | { | 1139 | { |
| 1108 | int ret; | 1140 | int ret; |
| 1109 | 1141 | ||
| @@ -1112,13 +1144,13 @@ int btrfs_test_inodes(void) | |||
| 1112 | set_bit(EXTENT_FLAG_PREALLOC, &prealloc_only); | 1144 | set_bit(EXTENT_FLAG_PREALLOC, &prealloc_only); |
| 1113 | 1145 | ||
| 1114 | test_msg("Running btrfs_get_extent tests\n"); | 1146 | test_msg("Running btrfs_get_extent tests\n"); |
| 1115 | ret = test_btrfs_get_extent(); | 1147 | ret = test_btrfs_get_extent(sectorsize, nodesize); |
| 1116 | if (ret) | 1148 | if (ret) |
| 1117 | return ret; | 1149 | return ret; |
| 1118 | test_msg("Running hole first btrfs_get_extent test\n"); | 1150 | test_msg("Running hole first btrfs_get_extent test\n"); |
| 1119 | ret = test_hole_first(); | 1151 | ret = test_hole_first(sectorsize, nodesize); |
| 1120 | if (ret) | 1152 | if (ret) |
| 1121 | return ret; | 1153 | return ret; |
| 1122 | test_msg("Running outstanding_extents tests\n"); | 1154 | test_msg("Running outstanding_extents tests\n"); |
| 1123 | return test_extent_accounting(); | 1155 | return test_extent_accounting(sectorsize, nodesize); |
| 1124 | } | 1156 | } |
diff --git a/fs/btrfs/tests/qgroup-tests.c b/fs/btrfs/tests/qgroup-tests.c index 8aa4ded31326..57a12c0d680b 100644 --- a/fs/btrfs/tests/qgroup-tests.c +++ b/fs/btrfs/tests/qgroup-tests.c | |||
| @@ -16,6 +16,7 @@ | |||
| 16 | * Boston, MA 021110-1307, USA. | 16 | * Boston, MA 021110-1307, USA. |
| 17 | */ | 17 | */ |
| 18 | 18 | ||
| 19 | #include <linux/types.h> | ||
| 19 | #include "btrfs-tests.h" | 20 | #include "btrfs-tests.h" |
| 20 | #include "../ctree.h" | 21 | #include "../ctree.h" |
| 21 | #include "../transaction.h" | 22 | #include "../transaction.h" |
| @@ -216,7 +217,8 @@ static int remove_extent_ref(struct btrfs_root *root, u64 bytenr, | |||
| 216 | return ret; | 217 | return ret; |
| 217 | } | 218 | } |
| 218 | 219 | ||
| 219 | static int test_no_shared_qgroup(struct btrfs_root *root) | 220 | static int test_no_shared_qgroup(struct btrfs_root *root, |
| 221 | u32 sectorsize, u32 nodesize) | ||
| 220 | { | 222 | { |
| 221 | struct btrfs_trans_handle trans; | 223 | struct btrfs_trans_handle trans; |
| 222 | struct btrfs_fs_info *fs_info = root->fs_info; | 224 | struct btrfs_fs_info *fs_info = root->fs_info; |
| @@ -227,7 +229,7 @@ static int test_no_shared_qgroup(struct btrfs_root *root) | |||
| 227 | btrfs_init_dummy_trans(&trans); | 229 | btrfs_init_dummy_trans(&trans); |
| 228 | 230 | ||
| 229 | test_msg("Qgroup basic add\n"); | 231 | test_msg("Qgroup basic add\n"); |
| 230 | ret = btrfs_create_qgroup(NULL, fs_info, 5); | 232 | ret = btrfs_create_qgroup(NULL, fs_info, BTRFS_FS_TREE_OBJECTID); |
| 231 | if (ret) { | 233 | if (ret) { |
| 232 | test_msg("Couldn't create a qgroup %d\n", ret); | 234 | test_msg("Couldn't create a qgroup %d\n", ret); |
| 233 | return ret; | 235 | return ret; |
| @@ -238,18 +240,19 @@ static int test_no_shared_qgroup(struct btrfs_root *root) | |||
| 238 | * we can only call btrfs_qgroup_account_extent() directly to test | 240 | * we can only call btrfs_qgroup_account_extent() directly to test |
| 239 | * quota. | 241 | * quota. |
| 240 | */ | 242 | */ |
| 241 | ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &old_roots); | 243 | ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots); |
| 242 | if (ret) { | 244 | if (ret) { |
| 243 | ulist_free(old_roots); | 245 | ulist_free(old_roots); |
| 244 | test_msg("Couldn't find old roots: %d\n", ret); | 246 | test_msg("Couldn't find old roots: %d\n", ret); |
| 245 | return ret; | 247 | return ret; |
| 246 | } | 248 | } |
| 247 | 249 | ||
| 248 | ret = insert_normal_tree_ref(root, 4096, 4096, 0, 5); | 250 | ret = insert_normal_tree_ref(root, nodesize, nodesize, 0, |
| 251 | BTRFS_FS_TREE_OBJECTID); | ||
| 249 | if (ret) | 252 | if (ret) |
| 250 | return ret; | 253 | return ret; |
| 251 | 254 | ||
| 252 | ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &new_roots); | 255 | ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots); |
| 253 | if (ret) { | 256 | if (ret) { |
| 254 | ulist_free(old_roots); | 257 | ulist_free(old_roots); |
| 255 | ulist_free(new_roots); | 258 | ulist_free(new_roots); |
| @@ -257,32 +260,33 @@ static int test_no_shared_qgroup(struct btrfs_root *root) | |||
| 257 | return ret; | 260 | return ret; |
| 258 | } | 261 | } |
| 259 | 262 | ||
| 260 | ret = btrfs_qgroup_account_extent(&trans, fs_info, 4096, 4096, | 263 | ret = btrfs_qgroup_account_extent(&trans, fs_info, nodesize, |
| 261 | old_roots, new_roots); | 264 | nodesize, old_roots, new_roots); |
| 262 | if (ret) { | 265 | if (ret) { |
| 263 | test_msg("Couldn't account space for a qgroup %d\n", ret); | 266 | test_msg("Couldn't account space for a qgroup %d\n", ret); |
| 264 | return ret; | 267 | return ret; |
| 265 | } | 268 | } |
| 266 | 269 | ||
| 267 | if (btrfs_verify_qgroup_counts(fs_info, 5, 4096, 4096)) { | 270 | if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FS_TREE_OBJECTID, |
| 271 | nodesize, nodesize)) { | ||
| 268 | test_msg("Qgroup counts didn't match expected values\n"); | 272 | test_msg("Qgroup counts didn't match expected values\n"); |
| 269 | return -EINVAL; | 273 | return -EINVAL; |
| 270 | } | 274 | } |
| 271 | old_roots = NULL; | 275 | old_roots = NULL; |
| 272 | new_roots = NULL; | 276 | new_roots = NULL; |
| 273 | 277 | ||
| 274 | ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &old_roots); | 278 | ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots); |
| 275 | if (ret) { | 279 | if (ret) { |
| 276 | ulist_free(old_roots); | 280 | ulist_free(old_roots); |
| 277 | test_msg("Couldn't find old roots: %d\n", ret); | 281 | test_msg("Couldn't find old roots: %d\n", ret); |
| 278 | return ret; | 282 | return ret; |
| 279 | } | 283 | } |
| 280 | 284 | ||
| 281 | ret = remove_extent_item(root, 4096, 4096); | 285 | ret = remove_extent_item(root, nodesize, nodesize); |
| 282 | if (ret) | 286 | if (ret) |
| 283 | return -EINVAL; | 287 | return -EINVAL; |
| 284 | 288 | ||
| 285 | ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &new_roots); | 289 | ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots); |
| 286 | if (ret) { | 290 | if (ret) { |
| 287 | ulist_free(old_roots); | 291 | ulist_free(old_roots); |
| 288 | ulist_free(new_roots); | 292 | ulist_free(new_roots); |
| @@ -290,14 +294,14 @@ static int test_no_shared_qgroup(struct btrfs_root *root) | |||
| 290 | return ret; | 294 | return ret; |
| 291 | } | 295 | } |
| 292 | 296 | ||
| 293 | ret = btrfs_qgroup_account_extent(&trans, fs_info, 4096, 4096, | 297 | ret = btrfs_qgroup_account_extent(&trans, fs_info, nodesize, |
| 294 | old_roots, new_roots); | 298 | nodesize, old_roots, new_roots); |
| 295 | if (ret) { | 299 | if (ret) { |
| 296 | test_msg("Couldn't account space for a qgroup %d\n", ret); | 300 | test_msg("Couldn't account space for a qgroup %d\n", ret); |
| 297 | return -EINVAL; | 301 | return -EINVAL; |
| 298 | } | 302 | } |
| 299 | 303 | ||
| 300 | if (btrfs_verify_qgroup_counts(fs_info, 5, 0, 0)) { | 304 | if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FS_TREE_OBJECTID, 0, 0)) { |
| 301 | test_msg("Qgroup counts didn't match expected values\n"); | 305 | test_msg("Qgroup counts didn't match expected values\n"); |
| 302 | return -EINVAL; | 306 | return -EINVAL; |
| 303 | } | 307 | } |
| @@ -310,7 +314,8 @@ static int test_no_shared_qgroup(struct btrfs_root *root) | |||
| 310 | * right, also remove one of the roots and make sure the exclusive count is | 314 | * right, also remove one of the roots and make sure the exclusive count is |
| 311 | * adjusted properly. | 315 | * adjusted properly. |
| 312 | */ | 316 | */ |
| 313 | static int test_multiple_refs(struct btrfs_root *root) | 317 | static int test_multiple_refs(struct btrfs_root *root, |
| 318 | u32 sectorsize, u32 nodesize) | ||
| 314 | { | 319 | { |
| 315 | struct btrfs_trans_handle trans; | 320 | struct btrfs_trans_handle trans; |
| 316 | struct btrfs_fs_info *fs_info = root->fs_info; | 321 | struct btrfs_fs_info *fs_info = root->fs_info; |
| @@ -322,25 +327,29 @@ static int test_multiple_refs(struct btrfs_root *root) | |||
| 322 | 327 | ||
| 323 | test_msg("Qgroup multiple refs test\n"); | 328 | test_msg("Qgroup multiple refs test\n"); |
| 324 | 329 | ||
| 325 | /* We have 5 created already from the previous test */ | 330 | /* |
| 326 | ret = btrfs_create_qgroup(NULL, fs_info, 256); | 331 | * We have BTRFS_FS_TREE_OBJECTID created already from the |
| 332 | * previous test. | ||
| 333 | */ | ||
| 334 | ret = btrfs_create_qgroup(NULL, fs_info, BTRFS_FIRST_FREE_OBJECTID); | ||
| 327 | if (ret) { | 335 | if (ret) { |
| 328 | test_msg("Couldn't create a qgroup %d\n", ret); | 336 | test_msg("Couldn't create a qgroup %d\n", ret); |
| 329 | return ret; | 337 | return ret; |
| 330 | } | 338 | } |
| 331 | 339 | ||
| 332 | ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &old_roots); | 340 | ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots); |
| 333 | if (ret) { | 341 | if (ret) { |
| 334 | ulist_free(old_roots); | 342 | ulist_free(old_roots); |
| 335 | test_msg("Couldn't find old roots: %d\n", ret); | 343 | test_msg("Couldn't find old roots: %d\n", ret); |
| 336 | return ret; | 344 | return ret; |
| 337 | } | 345 | } |
| 338 | 346 | ||
| 339 | ret = insert_normal_tree_ref(root, 4096, 4096, 0, 5); | 347 | ret = insert_normal_tree_ref(root, nodesize, nodesize, 0, |
| 348 | BTRFS_FS_TREE_OBJECTID); | ||
| 340 | if (ret) | 349 | if (ret) |
| 341 | return ret; | 350 | return ret; |
| 342 | 351 | ||
| 343 | ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &new_roots); | 352 | ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots); |
| 344 | if (ret) { | 353 | if (ret) { |
| 345 | ulist_free(old_roots); | 354 | ulist_free(old_roots); |
| 346 | ulist_free(new_roots); | 355 | ulist_free(new_roots); |
| @@ -348,30 +357,32 @@ static int test_multiple_refs(struct btrfs_root *root) | |||
| 348 | return ret; | 357 | return ret; |
| 349 | } | 358 | } |
| 350 | 359 | ||
| 351 | ret = btrfs_qgroup_account_extent(&trans, fs_info, 4096, 4096, | 360 | ret = btrfs_qgroup_account_extent(&trans, fs_info, nodesize, |
| 352 | old_roots, new_roots); | 361 | nodesize, old_roots, new_roots); |
| 353 | if (ret) { | 362 | if (ret) { |
| 354 | test_msg("Couldn't account space for a qgroup %d\n", ret); | 363 | test_msg("Couldn't account space for a qgroup %d\n", ret); |
| 355 | return ret; | 364 | return ret; |
| 356 | } | 365 | } |
| 357 | 366 | ||
| 358 | if (btrfs_verify_qgroup_counts(fs_info, 5, 4096, 4096)) { | 367 | if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FS_TREE_OBJECTID, |
| 368 | nodesize, nodesize)) { | ||
| 359 | test_msg("Qgroup counts didn't match expected values\n"); | 369 | test_msg("Qgroup counts didn't match expected values\n"); |
| 360 | return -EINVAL; | 370 | return -EINVAL; |
| 361 | } | 371 | } |
| 362 | 372 | ||
| 363 | ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &old_roots); | 373 | ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots); |
| 364 | if (ret) { | 374 | if (ret) { |
| 365 | ulist_free(old_roots); | 375 | ulist_free(old_roots); |
| 366 | test_msg("Couldn't find old roots: %d\n", ret); | 376 | test_msg("Couldn't find old roots: %d\n", ret); |
| 367 | return ret; | 377 | return ret; |
| 368 | } | 378 | } |
| 369 | 379 | ||
| 370 | ret = add_tree_ref(root, 4096, 4096, 0, 256); | 380 | ret = add_tree_ref(root, nodesize, nodesize, 0, |
| 381 | BTRFS_FIRST_FREE_OBJECTID); | ||
| 371 | if (ret) | 382 | if (ret) |
| 372 | return ret; | 383 | return ret; |
| 373 | 384 | ||
| 374 | ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &new_roots); | 385 | ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots); |
| 375 | if (ret) { | 386 | if (ret) { |
| 376 | ulist_free(old_roots); | 387 | ulist_free(old_roots); |
| 377 | ulist_free(new_roots); | 388 | ulist_free(new_roots); |
| @@ -379,35 +390,38 @@ static int test_multiple_refs(struct btrfs_root *root) | |||
| 379 | return ret; | 390 | return ret; |
| 380 | } | 391 | } |
| 381 | 392 | ||
| 382 | ret = btrfs_qgroup_account_extent(&trans, fs_info, 4096, 4096, | 393 | ret = btrfs_qgroup_account_extent(&trans, fs_info, nodesize, |
| 383 | old_roots, new_roots); | 394 | nodesize, old_roots, new_roots); |
| 384 | if (ret) { | 395 | if (ret) { |
| 385 | test_msg("Couldn't account space for a qgroup %d\n", ret); | 396 | test_msg("Couldn't account space for a qgroup %d\n", ret); |
| 386 | return ret; | 397 | return ret; |
| 387 | } | 398 | } |
| 388 | 399 | ||
| 389 | if (btrfs_verify_qgroup_counts(fs_info, 5, 4096, 0)) { | 400 | if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FS_TREE_OBJECTID, |
| 401 | nodesize, 0)) { | ||
| 390 | test_msg("Qgroup counts didn't match expected values\n"); | 402 | test_msg("Qgroup counts didn't match expected values\n"); |
| 391 | return -EINVAL; | 403 | return -EINVAL; |
| 392 | } | 404 | } |
| 393 | 405 | ||
| 394 | if (btrfs_verify_qgroup_counts(fs_info, 256, 4096, 0)) { | 406 | if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FIRST_FREE_OBJECTID, |
| 407 | nodesize, 0)) { | ||
| 395 | test_msg("Qgroup counts didn't match expected values\n"); | 408 | test_msg("Qgroup counts didn't match expected values\n"); |
| 396 | return -EINVAL; | 409 | return -EINVAL; |
| 397 | } | 410 | } |
| 398 | 411 | ||
| 399 | ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &old_roots); | 412 | ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots); |
| 400 | if (ret) { | 413 | if (ret) { |
| 401 | ulist_free(old_roots); | 414 | ulist_free(old_roots); |
| 402 | test_msg("Couldn't find old roots: %d\n", ret); | 415 | test_msg("Couldn't find old roots: %d\n", ret); |
| 403 | return ret; | 416 | return ret; |
| 404 | } | 417 | } |
| 405 | 418 | ||
| 406 | ret = remove_extent_ref(root, 4096, 4096, 0, 256); | 419 | ret = remove_extent_ref(root, nodesize, nodesize, 0, |
| 420 | BTRFS_FIRST_FREE_OBJECTID); | ||
| 407 | if (ret) | 421 | if (ret) |
| 408 | return ret; | 422 | return ret; |
| 409 | 423 | ||
| 410 | ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &new_roots); | 424 | ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots); |
| 411 | if (ret) { | 425 | if (ret) { |
| 412 | ulist_free(old_roots); | 426 | ulist_free(old_roots); |
| 413 | ulist_free(new_roots); | 427 | ulist_free(new_roots); |
| @@ -415,19 +429,21 @@ static int test_multiple_refs(struct btrfs_root *root) | |||
| 415 | return ret; | 429 | return ret; |
| 416 | } | 430 | } |
| 417 | 431 | ||
| 418 | ret = btrfs_qgroup_account_extent(&trans, fs_info, 4096, 4096, | 432 | ret = btrfs_qgroup_account_extent(&trans, fs_info, nodesize, |
| 419 | old_roots, new_roots); | 433 | nodesize, old_roots, new_roots); |
| 420 | if (ret) { | 434 | if (ret) { |
| 421 | test_msg("Couldn't account space for a qgroup %d\n", ret); | 435 | test_msg("Couldn't account space for a qgroup %d\n", ret); |
| 422 | return ret; | 436 | return ret; |
| 423 | } | 437 | } |
| 424 | 438 | ||
| 425 | if (btrfs_verify_qgroup_counts(fs_info, 256, 0, 0)) { | 439 | if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FIRST_FREE_OBJECTID, |
| 440 | 0, 0)) { | ||
| 426 | test_msg("Qgroup counts didn't match expected values\n"); | 441 | test_msg("Qgroup counts didn't match expected values\n"); |
| 427 | return -EINVAL; | 442 | return -EINVAL; |
| 428 | } | 443 | } |
| 429 | 444 | ||
| 430 | if (btrfs_verify_qgroup_counts(fs_info, 5, 4096, 4096)) { | 445 | if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FS_TREE_OBJECTID, |
| 446 | nodesize, nodesize)) { | ||
| 431 | test_msg("Qgroup counts didn't match expected values\n"); | 447 | test_msg("Qgroup counts didn't match expected values\n"); |
| 432 | return -EINVAL; | 448 | return -EINVAL; |
| 433 | } | 449 | } |
| @@ -435,13 +451,13 @@ static int test_multiple_refs(struct btrfs_root *root) | |||
| 435 | return 0; | 451 | return 0; |
| 436 | } | 452 | } |
| 437 | 453 | ||
| 438 | int btrfs_test_qgroups(void) | 454 | int btrfs_test_qgroups(u32 sectorsize, u32 nodesize) |
| 439 | { | 455 | { |
| 440 | struct btrfs_root *root; | 456 | struct btrfs_root *root; |
| 441 | struct btrfs_root *tmp_root; | 457 | struct btrfs_root *tmp_root; |
| 442 | int ret = 0; | 458 | int ret = 0; |
| 443 | 459 | ||
| 444 | root = btrfs_alloc_dummy_root(); | 460 | root = btrfs_alloc_dummy_root(sectorsize, nodesize); |
| 445 | if (IS_ERR(root)) { | 461 | if (IS_ERR(root)) { |
| 446 | test_msg("Couldn't allocate root\n"); | 462 | test_msg("Couldn't allocate root\n"); |
| 447 | return PTR_ERR(root); | 463 | return PTR_ERR(root); |
| @@ -468,7 +484,8 @@ int btrfs_test_qgroups(void) | |||
| 468 | * Can't use bytenr 0, some things freak out | 484 | * Can't use bytenr 0, some things freak out |
| 469 | * *cough*backref walking code*cough* | 485 | * *cough*backref walking code*cough* |
| 470 | */ | 486 | */ |
| 471 | root->node = alloc_test_extent_buffer(root->fs_info, 4096); | 487 | root->node = alloc_test_extent_buffer(root->fs_info, nodesize, |
| 488 | nodesize); | ||
| 472 | if (!root->node) { | 489 | if (!root->node) { |
| 473 | test_msg("Couldn't allocate dummy buffer\n"); | 490 | test_msg("Couldn't allocate dummy buffer\n"); |
| 474 | ret = -ENOMEM; | 491 | ret = -ENOMEM; |
| @@ -476,16 +493,16 @@ int btrfs_test_qgroups(void) | |||
| 476 | } | 493 | } |
| 477 | btrfs_set_header_level(root->node, 0); | 494 | btrfs_set_header_level(root->node, 0); |
| 478 | btrfs_set_header_nritems(root->node, 0); | 495 | btrfs_set_header_nritems(root->node, 0); |
| 479 | root->alloc_bytenr += 8192; | 496 | root->alloc_bytenr += 2 * nodesize; |
| 480 | 497 | ||
| 481 | tmp_root = btrfs_alloc_dummy_root(); | 498 | tmp_root = btrfs_alloc_dummy_root(sectorsize, nodesize); |
| 482 | if (IS_ERR(tmp_root)) { | 499 | if (IS_ERR(tmp_root)) { |
| 483 | test_msg("Couldn't allocate a fs root\n"); | 500 | test_msg("Couldn't allocate a fs root\n"); |
| 484 | ret = PTR_ERR(tmp_root); | 501 | ret = PTR_ERR(tmp_root); |
| 485 | goto out; | 502 | goto out; |
| 486 | } | 503 | } |
| 487 | 504 | ||
| 488 | tmp_root->root_key.objectid = 5; | 505 | tmp_root->root_key.objectid = BTRFS_FS_TREE_OBJECTID; |
| 489 | root->fs_info->fs_root = tmp_root; | 506 | root->fs_info->fs_root = tmp_root; |
| 490 | ret = btrfs_insert_fs_root(root->fs_info, tmp_root); | 507 | ret = btrfs_insert_fs_root(root->fs_info, tmp_root); |
| 491 | if (ret) { | 508 | if (ret) { |
| @@ -493,14 +510,14 @@ int btrfs_test_qgroups(void) | |||
| 493 | goto out; | 510 | goto out; |
| 494 | } | 511 | } |
| 495 | 512 | ||
| 496 | tmp_root = btrfs_alloc_dummy_root(); | 513 | tmp_root = btrfs_alloc_dummy_root(sectorsize, nodesize); |
| 497 | if (IS_ERR(tmp_root)) { | 514 | if (IS_ERR(tmp_root)) { |
| 498 | test_msg("Couldn't allocate a fs root\n"); | 515 | test_msg("Couldn't allocate a fs root\n"); |
| 499 | ret = PTR_ERR(tmp_root); | 516 | ret = PTR_ERR(tmp_root); |
| 500 | goto out; | 517 | goto out; |
| 501 | } | 518 | } |
| 502 | 519 | ||
| 503 | tmp_root->root_key.objectid = 256; | 520 | tmp_root->root_key.objectid = BTRFS_FIRST_FREE_OBJECTID; |
| 504 | ret = btrfs_insert_fs_root(root->fs_info, tmp_root); | 521 | ret = btrfs_insert_fs_root(root->fs_info, tmp_root); |
| 505 | if (ret) { | 522 | if (ret) { |
| 506 | test_msg("Couldn't insert fs root %d\n", ret); | 523 | test_msg("Couldn't insert fs root %d\n", ret); |
| @@ -508,10 +525,10 @@ int btrfs_test_qgroups(void) | |||
| 508 | } | 525 | } |
| 509 | 526 | ||
| 510 | test_msg("Running qgroup tests\n"); | 527 | test_msg("Running qgroup tests\n"); |
| 511 | ret = test_no_shared_qgroup(root); | 528 | ret = test_no_shared_qgroup(root, sectorsize, nodesize); |
| 512 | if (ret) | 529 | if (ret) |
| 513 | goto out; | 530 | goto out; |
| 514 | ret = test_multiple_refs(root); | 531 | ret = test_multiple_refs(root, sectorsize, nodesize); |
| 515 | out: | 532 | out: |
| 516 | btrfs_free_dummy_root(root); | 533 | btrfs_free_dummy_root(root); |
| 517 | return ret; | 534 | return ret; |
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index f6e24cb423ae..948aa186b353 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c | |||
| @@ -818,6 +818,7 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, | |||
| 818 | { | 818 | { |
| 819 | struct btrfs_transaction *cur_trans = trans->transaction; | 819 | struct btrfs_transaction *cur_trans = trans->transaction; |
| 820 | struct btrfs_fs_info *info = root->fs_info; | 820 | struct btrfs_fs_info *info = root->fs_info; |
| 821 | u64 transid = trans->transid; | ||
| 821 | unsigned long cur = trans->delayed_ref_updates; | 822 | unsigned long cur = trans->delayed_ref_updates; |
| 822 | int lock = (trans->type != TRANS_JOIN_NOLOCK); | 823 | int lock = (trans->type != TRANS_JOIN_NOLOCK); |
| 823 | int err = 0; | 824 | int err = 0; |
| @@ -905,7 +906,7 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, | |||
| 905 | 906 | ||
| 906 | kmem_cache_free(btrfs_trans_handle_cachep, trans); | 907 | kmem_cache_free(btrfs_trans_handle_cachep, trans); |
| 907 | if (must_run_delayed_refs) { | 908 | if (must_run_delayed_refs) { |
| 908 | btrfs_async_run_delayed_refs(root, cur, | 909 | btrfs_async_run_delayed_refs(root, cur, transid, |
| 909 | must_run_delayed_refs == 1); | 910 | must_run_delayed_refs == 1); |
| 910 | } | 911 | } |
| 911 | return err; | 912 | return err; |
| @@ -1311,11 +1312,6 @@ int btrfs_defrag_root(struct btrfs_root *root) | |||
| 1311 | return ret; | 1312 | return ret; |
| 1312 | } | 1313 | } |
| 1313 | 1314 | ||
| 1314 | /* Bisesctability fixup, remove in 4.8 */ | ||
| 1315 | #ifndef btrfs_std_error | ||
| 1316 | #define btrfs_std_error btrfs_handle_fs_error | ||
| 1317 | #endif | ||
| 1318 | |||
| 1319 | /* | 1315 | /* |
| 1320 | * Do all special snapshot related qgroup dirty hack. | 1316 | * Do all special snapshot related qgroup dirty hack. |
| 1321 | * | 1317 | * |
| @@ -1385,7 +1381,7 @@ static int qgroup_account_snapshot(struct btrfs_trans_handle *trans, | |||
| 1385 | switch_commit_roots(trans->transaction, fs_info); | 1381 | switch_commit_roots(trans->transaction, fs_info); |
| 1386 | ret = btrfs_write_and_wait_transaction(trans, src); | 1382 | ret = btrfs_write_and_wait_transaction(trans, src); |
| 1387 | if (ret) | 1383 | if (ret) |
| 1388 | btrfs_std_error(fs_info, ret, | 1384 | btrfs_handle_fs_error(fs_info, ret, |
| 1389 | "Error while writing out transaction for qgroup"); | 1385 | "Error while writing out transaction for qgroup"); |
| 1390 | 1386 | ||
| 1391 | out: | 1387 | out: |
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index 9fe0ec2bf0fe..c5abee4f01ad 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h | |||
| @@ -110,7 +110,6 @@ struct btrfs_trans_handle { | |||
| 110 | u64 chunk_bytes_reserved; | 110 | u64 chunk_bytes_reserved; |
| 111 | unsigned long use_count; | 111 | unsigned long use_count; |
| 112 | unsigned long blocks_reserved; | 112 | unsigned long blocks_reserved; |
| 113 | unsigned long blocks_used; | ||
| 114 | unsigned long delayed_ref_updates; | 113 | unsigned long delayed_ref_updates; |
| 115 | struct btrfs_transaction *transaction; | 114 | struct btrfs_transaction *transaction; |
| 116 | struct btrfs_block_rsv *block_rsv; | 115 | struct btrfs_block_rsv *block_rsv; |
| @@ -121,6 +120,7 @@ struct btrfs_trans_handle { | |||
| 121 | bool can_flush_pending_bgs; | 120 | bool can_flush_pending_bgs; |
| 122 | bool reloc_reserved; | 121 | bool reloc_reserved; |
| 123 | bool sync; | 122 | bool sync; |
| 123 | bool dirty; | ||
| 124 | unsigned int type; | 124 | unsigned int type; |
| 125 | /* | 125 | /* |
| 126 | * this root is only needed to validate that the root passed to | 126 | * this root is only needed to validate that the root passed to |
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index b7665af471d8..c05f69a8ec42 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c | |||
| @@ -2422,8 +2422,8 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans, | |||
| 2422 | root_owner = btrfs_header_owner(parent); | 2422 | root_owner = btrfs_header_owner(parent); |
| 2423 | 2423 | ||
| 2424 | next = btrfs_find_create_tree_block(root, bytenr); | 2424 | next = btrfs_find_create_tree_block(root, bytenr); |
| 2425 | if (!next) | 2425 | if (IS_ERR(next)) |
| 2426 | return -ENOMEM; | 2426 | return PTR_ERR(next); |
| 2427 | 2427 | ||
| 2428 | if (*level == 1) { | 2428 | if (*level == 1) { |
| 2429 | ret = wc->process_func(root, next, wc, ptr_gen); | 2429 | ret = wc->process_func(root, next, wc, ptr_gen); |
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index bdc62561ede8..589f128173b1 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c | |||
| @@ -2761,6 +2761,7 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans, | |||
| 2761 | u64 dev_extent_len = 0; | 2761 | u64 dev_extent_len = 0; |
| 2762 | u64 chunk_objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; | 2762 | u64 chunk_objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID; |
| 2763 | int i, ret = 0; | 2763 | int i, ret = 0; |
| 2764 | struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices; | ||
| 2764 | 2765 | ||
| 2765 | /* Just in case */ | 2766 | /* Just in case */ |
| 2766 | root = root->fs_info->chunk_root; | 2767 | root = root->fs_info->chunk_root; |
| @@ -2787,12 +2788,19 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans, | |||
| 2787 | check_system_chunk(trans, extent_root, map->type); | 2788 | check_system_chunk(trans, extent_root, map->type); |
| 2788 | unlock_chunks(root->fs_info->chunk_root); | 2789 | unlock_chunks(root->fs_info->chunk_root); |
| 2789 | 2790 | ||
| 2791 | /* | ||
| 2792 | * Take the device list mutex to prevent races with the final phase of | ||
| 2793 | * a device replace operation that replaces the device object associated | ||
| 2794 | * with map stripes (dev-replace.c:btrfs_dev_replace_finishing()). | ||
| 2795 | */ | ||
| 2796 | mutex_lock(&fs_devices->device_list_mutex); | ||
| 2790 | for (i = 0; i < map->num_stripes; i++) { | 2797 | for (i = 0; i < map->num_stripes; i++) { |
| 2791 | struct btrfs_device *device = map->stripes[i].dev; | 2798 | struct btrfs_device *device = map->stripes[i].dev; |
| 2792 | ret = btrfs_free_dev_extent(trans, device, | 2799 | ret = btrfs_free_dev_extent(trans, device, |
| 2793 | map->stripes[i].physical, | 2800 | map->stripes[i].physical, |
| 2794 | &dev_extent_len); | 2801 | &dev_extent_len); |
| 2795 | if (ret) { | 2802 | if (ret) { |
| 2803 | mutex_unlock(&fs_devices->device_list_mutex); | ||
| 2796 | btrfs_abort_transaction(trans, root, ret); | 2804 | btrfs_abort_transaction(trans, root, ret); |
| 2797 | goto out; | 2805 | goto out; |
| 2798 | } | 2806 | } |
| @@ -2811,11 +2819,14 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans, | |||
| 2811 | if (map->stripes[i].dev) { | 2819 | if (map->stripes[i].dev) { |
| 2812 | ret = btrfs_update_device(trans, map->stripes[i].dev); | 2820 | ret = btrfs_update_device(trans, map->stripes[i].dev); |
| 2813 | if (ret) { | 2821 | if (ret) { |
| 2822 | mutex_unlock(&fs_devices->device_list_mutex); | ||
| 2814 | btrfs_abort_transaction(trans, root, ret); | 2823 | btrfs_abort_transaction(trans, root, ret); |
| 2815 | goto out; | 2824 | goto out; |
| 2816 | } | 2825 | } |
| 2817 | } | 2826 | } |
| 2818 | } | 2827 | } |
| 2828 | mutex_unlock(&fs_devices->device_list_mutex); | ||
| 2829 | |||
| 2819 | ret = btrfs_free_chunk(trans, root, chunk_objectid, chunk_offset); | 2830 | ret = btrfs_free_chunk(trans, root, chunk_objectid, chunk_offset); |
| 2820 | if (ret) { | 2831 | if (ret) { |
| 2821 | btrfs_abort_transaction(trans, root, ret); | 2832 | btrfs_abort_transaction(trans, root, ret); |
| @@ -4230,6 +4241,7 @@ int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info) | |||
| 4230 | if (IS_ERR(uuid_root)) { | 4241 | if (IS_ERR(uuid_root)) { |
| 4231 | ret = PTR_ERR(uuid_root); | 4242 | ret = PTR_ERR(uuid_root); |
| 4232 | btrfs_abort_transaction(trans, tree_root, ret); | 4243 | btrfs_abort_transaction(trans, tree_root, ret); |
| 4244 | btrfs_end_transaction(trans, tree_root); | ||
| 4233 | return ret; | 4245 | return ret; |
| 4234 | } | 4246 | } |
| 4235 | 4247 | ||
| @@ -4682,12 +4694,12 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, | |||
| 4682 | 4694 | ||
| 4683 | if (type & BTRFS_BLOCK_GROUP_RAID5) { | 4695 | if (type & BTRFS_BLOCK_GROUP_RAID5) { |
| 4684 | raid_stripe_len = find_raid56_stripe_len(ndevs - 1, | 4696 | raid_stripe_len = find_raid56_stripe_len(ndevs - 1, |
| 4685 | btrfs_super_stripesize(info->super_copy)); | 4697 | extent_root->stripesize); |
| 4686 | data_stripes = num_stripes - 1; | 4698 | data_stripes = num_stripes - 1; |
| 4687 | } | 4699 | } |
| 4688 | if (type & BTRFS_BLOCK_GROUP_RAID6) { | 4700 | if (type & BTRFS_BLOCK_GROUP_RAID6) { |
| 4689 | raid_stripe_len = find_raid56_stripe_len(ndevs - 2, | 4701 | raid_stripe_len = find_raid56_stripe_len(ndevs - 2, |
| 4690 | btrfs_super_stripesize(info->super_copy)); | 4702 | extent_root->stripesize); |
| 4691 | data_stripes = num_stripes - 2; | 4703 | data_stripes = num_stripes - 2; |
| 4692 | } | 4704 | } |
| 4693 | 4705 | ||
| @@ -5762,20 +5774,17 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, | |||
| 5762 | } | 5774 | } |
| 5763 | } | 5775 | } |
| 5764 | if (found) { | 5776 | if (found) { |
| 5765 | if (physical_of_found + map->stripe_len <= | 5777 | struct btrfs_bio_stripe *tgtdev_stripe = |
| 5766 | dev_replace->cursor_left) { | 5778 | bbio->stripes + num_stripes; |
| 5767 | struct btrfs_bio_stripe *tgtdev_stripe = | ||
| 5768 | bbio->stripes + num_stripes; | ||
| 5769 | 5779 | ||
| 5770 | tgtdev_stripe->physical = physical_of_found; | 5780 | tgtdev_stripe->physical = physical_of_found; |
| 5771 | tgtdev_stripe->length = | 5781 | tgtdev_stripe->length = |
| 5772 | bbio->stripes[index_srcdev].length; | 5782 | bbio->stripes[index_srcdev].length; |
| 5773 | tgtdev_stripe->dev = dev_replace->tgtdev; | 5783 | tgtdev_stripe->dev = dev_replace->tgtdev; |
| 5774 | bbio->tgtdev_map[index_srcdev] = num_stripes; | 5784 | bbio->tgtdev_map[index_srcdev] = num_stripes; |
| 5775 | 5785 | ||
| 5776 | tgtdev_indexes++; | 5786 | tgtdev_indexes++; |
| 5777 | num_stripes++; | 5787 | num_stripes++; |
| 5778 | } | ||
| 5779 | } | 5788 | } |
| 5780 | } | 5789 | } |
| 5781 | 5790 | ||
| @@ -6250,27 +6259,23 @@ struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info, | |||
| 6250 | return dev; | 6259 | return dev; |
| 6251 | } | 6260 | } |
| 6252 | 6261 | ||
| 6253 | static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key, | 6262 | /* Return -EIO if any error, otherwise return 0. */ |
| 6254 | struct extent_buffer *leaf, | 6263 | static int btrfs_check_chunk_valid(struct btrfs_root *root, |
| 6255 | struct btrfs_chunk *chunk) | 6264 | struct extent_buffer *leaf, |
| 6265 | struct btrfs_chunk *chunk, u64 logical) | ||
| 6256 | { | 6266 | { |
| 6257 | struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree; | ||
| 6258 | struct map_lookup *map; | ||
| 6259 | struct extent_map *em; | ||
| 6260 | u64 logical; | ||
| 6261 | u64 length; | 6267 | u64 length; |
| 6262 | u64 stripe_len; | 6268 | u64 stripe_len; |
| 6263 | u64 devid; | 6269 | u16 num_stripes; |
| 6264 | u8 uuid[BTRFS_UUID_SIZE]; | 6270 | u16 sub_stripes; |
| 6265 | int num_stripes; | 6271 | u64 type; |
| 6266 | int ret; | ||
| 6267 | int i; | ||
| 6268 | 6272 | ||
| 6269 | logical = key->offset; | ||
| 6270 | length = btrfs_chunk_length(leaf, chunk); | 6273 | length = btrfs_chunk_length(leaf, chunk); |
| 6271 | stripe_len = btrfs_chunk_stripe_len(leaf, chunk); | 6274 | stripe_len = btrfs_chunk_stripe_len(leaf, chunk); |
| 6272 | num_stripes = btrfs_chunk_num_stripes(leaf, chunk); | 6275 | num_stripes = btrfs_chunk_num_stripes(leaf, chunk); |
| 6273 | /* Validation check */ | 6276 | sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk); |
| 6277 | type = btrfs_chunk_type(leaf, chunk); | ||
| 6278 | |||
| 6274 | if (!num_stripes) { | 6279 | if (!num_stripes) { |
| 6275 | btrfs_err(root->fs_info, "invalid chunk num_stripes: %u", | 6280 | btrfs_err(root->fs_info, "invalid chunk num_stripes: %u", |
| 6276 | num_stripes); | 6281 | num_stripes); |
| @@ -6281,6 +6286,11 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key, | |||
| 6281 | "invalid chunk logical %llu", logical); | 6286 | "invalid chunk logical %llu", logical); |
| 6282 | return -EIO; | 6287 | return -EIO; |
| 6283 | } | 6288 | } |
| 6289 | if (btrfs_chunk_sector_size(leaf, chunk) != root->sectorsize) { | ||
| 6290 | btrfs_err(root->fs_info, "invalid chunk sectorsize %u", | ||
| 6291 | btrfs_chunk_sector_size(leaf, chunk)); | ||
| 6292 | return -EIO; | ||
| 6293 | } | ||
| 6284 | if (!length || !IS_ALIGNED(length, root->sectorsize)) { | 6294 | if (!length || !IS_ALIGNED(length, root->sectorsize)) { |
| 6285 | btrfs_err(root->fs_info, | 6295 | btrfs_err(root->fs_info, |
| 6286 | "invalid chunk length %llu", length); | 6296 | "invalid chunk length %llu", length); |
| @@ -6292,13 +6302,54 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key, | |||
| 6292 | return -EIO; | 6302 | return -EIO; |
| 6293 | } | 6303 | } |
| 6294 | if (~(BTRFS_BLOCK_GROUP_TYPE_MASK | BTRFS_BLOCK_GROUP_PROFILE_MASK) & | 6304 | if (~(BTRFS_BLOCK_GROUP_TYPE_MASK | BTRFS_BLOCK_GROUP_PROFILE_MASK) & |
| 6295 | btrfs_chunk_type(leaf, chunk)) { | 6305 | type) { |
| 6296 | btrfs_err(root->fs_info, "unrecognized chunk type: %llu", | 6306 | btrfs_err(root->fs_info, "unrecognized chunk type: %llu", |
| 6297 | ~(BTRFS_BLOCK_GROUP_TYPE_MASK | | 6307 | ~(BTRFS_BLOCK_GROUP_TYPE_MASK | |
| 6298 | BTRFS_BLOCK_GROUP_PROFILE_MASK) & | 6308 | BTRFS_BLOCK_GROUP_PROFILE_MASK) & |
| 6299 | btrfs_chunk_type(leaf, chunk)); | 6309 | btrfs_chunk_type(leaf, chunk)); |
| 6300 | return -EIO; | 6310 | return -EIO; |
| 6301 | } | 6311 | } |
| 6312 | if ((type & BTRFS_BLOCK_GROUP_RAID10 && sub_stripes != 2) || | ||
| 6313 | (type & BTRFS_BLOCK_GROUP_RAID1 && num_stripes < 1) || | ||
| 6314 | (type & BTRFS_BLOCK_GROUP_RAID5 && num_stripes < 2) || | ||
| 6315 | (type & BTRFS_BLOCK_GROUP_RAID6 && num_stripes < 3) || | ||
| 6316 | (type & BTRFS_BLOCK_GROUP_DUP && num_stripes > 2) || | ||
| 6317 | ((type & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 && | ||
| 6318 | num_stripes != 1)) { | ||
| 6319 | btrfs_err(root->fs_info, | ||
| 6320 | "invalid num_stripes:sub_stripes %u:%u for profile %llu", | ||
| 6321 | num_stripes, sub_stripes, | ||
| 6322 | type & BTRFS_BLOCK_GROUP_PROFILE_MASK); | ||
| 6323 | return -EIO; | ||
| 6324 | } | ||
| 6325 | |||
| 6326 | return 0; | ||
| 6327 | } | ||
| 6328 | |||
| 6329 | static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key, | ||
| 6330 | struct extent_buffer *leaf, | ||
| 6331 | struct btrfs_chunk *chunk) | ||
| 6332 | { | ||
| 6333 | struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree; | ||
| 6334 | struct map_lookup *map; | ||
| 6335 | struct extent_map *em; | ||
| 6336 | u64 logical; | ||
| 6337 | u64 length; | ||
| 6338 | u64 stripe_len; | ||
| 6339 | u64 devid; | ||
| 6340 | u8 uuid[BTRFS_UUID_SIZE]; | ||
| 6341 | int num_stripes; | ||
| 6342 | int ret; | ||
| 6343 | int i; | ||
| 6344 | |||
| 6345 | logical = key->offset; | ||
| 6346 | length = btrfs_chunk_length(leaf, chunk); | ||
| 6347 | stripe_len = btrfs_chunk_stripe_len(leaf, chunk); | ||
| 6348 | num_stripes = btrfs_chunk_num_stripes(leaf, chunk); | ||
| 6349 | |||
| 6350 | ret = btrfs_check_chunk_valid(root, leaf, chunk, logical); | ||
| 6351 | if (ret) | ||
| 6352 | return ret; | ||
| 6302 | 6353 | ||
| 6303 | read_lock(&map_tree->map_tree.lock); | 6354 | read_lock(&map_tree->map_tree.lock); |
| 6304 | em = lookup_extent_mapping(&map_tree->map_tree, logical, 1); | 6355 | em = lookup_extent_mapping(&map_tree->map_tree, logical, 1); |
| @@ -6546,6 +6597,7 @@ int btrfs_read_sys_array(struct btrfs_root *root) | |||
| 6546 | u32 array_size; | 6597 | u32 array_size; |
| 6547 | u32 len = 0; | 6598 | u32 len = 0; |
| 6548 | u32 cur_offset; | 6599 | u32 cur_offset; |
| 6600 | u64 type; | ||
| 6549 | struct btrfs_key key; | 6601 | struct btrfs_key key; |
| 6550 | 6602 | ||
| 6551 | ASSERT(BTRFS_SUPER_INFO_SIZE <= root->nodesize); | 6603 | ASSERT(BTRFS_SUPER_INFO_SIZE <= root->nodesize); |
| @@ -6555,8 +6607,8 @@ int btrfs_read_sys_array(struct btrfs_root *root) | |||
| 6555 | * overallocate but we can keep it as-is, only the first page is used. | 6607 | * overallocate but we can keep it as-is, only the first page is used. |
| 6556 | */ | 6608 | */ |
| 6557 | sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET); | 6609 | sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET); |
| 6558 | if (!sb) | 6610 | if (IS_ERR(sb)) |
| 6559 | return -ENOMEM; | 6611 | return PTR_ERR(sb); |
| 6560 | set_extent_buffer_uptodate(sb); | 6612 | set_extent_buffer_uptodate(sb); |
| 6561 | btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0); | 6613 | btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0); |
| 6562 | /* | 6614 | /* |
| @@ -6612,6 +6664,15 @@ int btrfs_read_sys_array(struct btrfs_root *root) | |||
| 6612 | break; | 6664 | break; |
| 6613 | } | 6665 | } |
| 6614 | 6666 | ||
| 6667 | type = btrfs_chunk_type(sb, chunk); | ||
| 6668 | if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) { | ||
| 6669 | btrfs_err(root->fs_info, | ||
| 6670 | "invalid chunk type %llu in sys_array at offset %u", | ||
| 6671 | type, cur_offset); | ||
| 6672 | ret = -EIO; | ||
| 6673 | break; | ||
| 6674 | } | ||
| 6675 | |||
| 6615 | len = btrfs_chunk_item_size(num_stripes); | 6676 | len = btrfs_chunk_item_size(num_stripes); |
| 6616 | if (cur_offset + len > array_size) | 6677 | if (cur_offset + len > array_size) |
| 6617 | goto out_short_read; | 6678 | goto out_short_read; |
| @@ -6630,12 +6691,14 @@ int btrfs_read_sys_array(struct btrfs_root *root) | |||
| 6630 | sb_array_offset += len; | 6691 | sb_array_offset += len; |
| 6631 | cur_offset += len; | 6692 | cur_offset += len; |
| 6632 | } | 6693 | } |
| 6694 | clear_extent_buffer_uptodate(sb); | ||
| 6633 | free_extent_buffer_stale(sb); | 6695 | free_extent_buffer_stale(sb); |
| 6634 | return ret; | 6696 | return ret; |
| 6635 | 6697 | ||
| 6636 | out_short_read: | 6698 | out_short_read: |
| 6637 | printk(KERN_ERR "BTRFS: sys_array too short to read %u bytes at offset %u\n", | 6699 | printk(KERN_ERR "BTRFS: sys_array too short to read %u bytes at offset %u\n", |
| 6638 | len, cur_offset); | 6700 | len, cur_offset); |
| 6701 | clear_extent_buffer_uptodate(sb); | ||
| 6639 | free_extent_buffer_stale(sb); | 6702 | free_extent_buffer_stale(sb); |
| 6640 | return -EIO; | 6703 | return -EIO; |
| 6641 | } | 6704 | } |
| @@ -6648,6 +6711,7 @@ int btrfs_read_chunk_tree(struct btrfs_root *root) | |||
| 6648 | struct btrfs_key found_key; | 6711 | struct btrfs_key found_key; |
| 6649 | int ret; | 6712 | int ret; |
| 6650 | int slot; | 6713 | int slot; |
| 6714 | u64 total_dev = 0; | ||
| 6651 | 6715 | ||
| 6652 | root = root->fs_info->chunk_root; | 6716 | root = root->fs_info->chunk_root; |
| 6653 | 6717 | ||
| @@ -6689,6 +6753,7 @@ int btrfs_read_chunk_tree(struct btrfs_root *root) | |||
| 6689 | ret = read_one_dev(root, leaf, dev_item); | 6753 | ret = read_one_dev(root, leaf, dev_item); |
| 6690 | if (ret) | 6754 | if (ret) |
| 6691 | goto error; | 6755 | goto error; |
| 6756 | total_dev++; | ||
| 6692 | } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) { | 6757 | } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) { |
| 6693 | struct btrfs_chunk *chunk; | 6758 | struct btrfs_chunk *chunk; |
| 6694 | chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); | 6759 | chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); |
| @@ -6698,6 +6763,28 @@ int btrfs_read_chunk_tree(struct btrfs_root *root) | |||
| 6698 | } | 6763 | } |
| 6699 | path->slots[0]++; | 6764 | path->slots[0]++; |
| 6700 | } | 6765 | } |
| 6766 | |||
| 6767 | /* | ||
| 6768 | * After loading chunk tree, we've got all device information, | ||
| 6769 | * do another round of validation checks. | ||
| 6770 | */ | ||
| 6771 | if (total_dev != root->fs_info->fs_devices->total_devices) { | ||
| 6772 | btrfs_err(root->fs_info, | ||
| 6773 | "super_num_devices %llu mismatch with num_devices %llu found here", | ||
| 6774 | btrfs_super_num_devices(root->fs_info->super_copy), | ||
| 6775 | total_dev); | ||
| 6776 | ret = -EINVAL; | ||
| 6777 | goto error; | ||
| 6778 | } | ||
| 6779 | if (btrfs_super_total_bytes(root->fs_info->super_copy) < | ||
| 6780 | root->fs_info->fs_devices->total_rw_bytes) { | ||
| 6781 | btrfs_err(root->fs_info, | ||
| 6782 | "super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu", | ||
| 6783 | btrfs_super_total_bytes(root->fs_info->super_copy), | ||
| 6784 | root->fs_info->fs_devices->total_rw_bytes); | ||
| 6785 | ret = -EINVAL; | ||
| 6786 | goto error; | ||
| 6787 | } | ||
| 6701 | ret = 0; | 6788 | ret = 0; |
| 6702 | error: | 6789 | error: |
| 6703 | unlock_chunks(root); | 6790 | unlock_chunks(root); |
diff --git a/fs/cachefiles/interface.c b/fs/cachefiles/interface.c index 861d611b8c05..ce5f345d70f5 100644 --- a/fs/cachefiles/interface.c +++ b/fs/cachefiles/interface.c | |||
| @@ -380,7 +380,7 @@ static void cachefiles_sync_cache(struct fscache_cache *_cache) | |||
| 380 | * check if the backing cache is updated to FS-Cache | 380 | * check if the backing cache is updated to FS-Cache |
| 381 | * - called by FS-Cache when evaluates if need to invalidate the cache | 381 | * - called by FS-Cache when evaluates if need to invalidate the cache |
| 382 | */ | 382 | */ |
| 383 | static bool cachefiles_check_consistency(struct fscache_operation *op) | 383 | static int cachefiles_check_consistency(struct fscache_operation *op) |
| 384 | { | 384 | { |
| 385 | struct cachefiles_object *object; | 385 | struct cachefiles_object *object; |
| 386 | struct cachefiles_cache *cache; | 386 | struct cachefiles_cache *cache; |
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index eeb71e5de27a..26a9d10d75e9 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c | |||
| @@ -276,8 +276,10 @@ static void finish_read(struct ceph_osd_request *req) | |||
| 276 | for (i = 0; i < num_pages; i++) { | 276 | for (i = 0; i < num_pages; i++) { |
| 277 | struct page *page = osd_data->pages[i]; | 277 | struct page *page = osd_data->pages[i]; |
| 278 | 278 | ||
| 279 | if (rc < 0 && rc != -ENOENT) | 279 | if (rc < 0 && rc != -ENOENT) { |
| 280 | ceph_fscache_readpage_cancel(inode, page); | ||
| 280 | goto unlock; | 281 | goto unlock; |
| 282 | } | ||
| 281 | if (bytes < (int)PAGE_SIZE) { | 283 | if (bytes < (int)PAGE_SIZE) { |
| 282 | /* zero (remainder of) page */ | 284 | /* zero (remainder of) page */ |
| 283 | int s = bytes < 0 ? 0 : bytes; | 285 | int s = bytes < 0 ? 0 : bytes; |
| @@ -535,8 +537,6 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc) | |||
| 535 | CONGESTION_ON_THRESH(fsc->mount_options->congestion_kb)) | 537 | CONGESTION_ON_THRESH(fsc->mount_options->congestion_kb)) |
| 536 | set_bdi_congested(&fsc->backing_dev_info, BLK_RW_ASYNC); | 538 | set_bdi_congested(&fsc->backing_dev_info, BLK_RW_ASYNC); |
| 537 | 539 | ||
| 538 | ceph_readpage_to_fscache(inode, page); | ||
| 539 | |||
| 540 | set_page_writeback(page); | 540 | set_page_writeback(page); |
| 541 | err = ceph_osdc_writepages(osdc, ceph_vino(inode), | 541 | err = ceph_osdc_writepages(osdc, ceph_vino(inode), |
| 542 | &ci->i_layout, snapc, | 542 | &ci->i_layout, snapc, |
diff --git a/fs/ceph/cache.c b/fs/ceph/cache.c index c052b5bf219b..238c55b01723 100644 --- a/fs/ceph/cache.c +++ b/fs/ceph/cache.c | |||
| @@ -25,6 +25,7 @@ | |||
| 25 | #include "cache.h" | 25 | #include "cache.h" |
| 26 | 26 | ||
| 27 | struct ceph_aux_inode { | 27 | struct ceph_aux_inode { |
| 28 | u64 version; | ||
| 28 | struct timespec mtime; | 29 | struct timespec mtime; |
| 29 | loff_t size; | 30 | loff_t size; |
| 30 | }; | 31 | }; |
| @@ -69,15 +70,8 @@ int ceph_fscache_register_fs(struct ceph_fs_client* fsc) | |||
| 69 | fsc->fscache = fscache_acquire_cookie(ceph_cache_netfs.primary_index, | 70 | fsc->fscache = fscache_acquire_cookie(ceph_cache_netfs.primary_index, |
| 70 | &ceph_fscache_fsid_object_def, | 71 | &ceph_fscache_fsid_object_def, |
| 71 | fsc, true); | 72 | fsc, true); |
| 72 | 73 | if (!fsc->fscache) | |
| 73 | if (fsc->fscache == NULL) { | ||
| 74 | pr_err("Unable to resgister fsid: %p fscache cookie", fsc); | 74 | pr_err("Unable to resgister fsid: %p fscache cookie", fsc); |
| 75 | return 0; | ||
| 76 | } | ||
| 77 | |||
| 78 | fsc->revalidate_wq = alloc_workqueue("ceph-revalidate", 0, 1); | ||
| 79 | if (fsc->revalidate_wq == NULL) | ||
| 80 | return -ENOMEM; | ||
| 81 | 75 | ||
| 82 | return 0; | 76 | return 0; |
| 83 | } | 77 | } |
| @@ -105,6 +99,7 @@ static uint16_t ceph_fscache_inode_get_aux(const void *cookie_netfs_data, | |||
| 105 | const struct inode* inode = &ci->vfs_inode; | 99 | const struct inode* inode = &ci->vfs_inode; |
| 106 | 100 | ||
| 107 | memset(&aux, 0, sizeof(aux)); | 101 | memset(&aux, 0, sizeof(aux)); |
| 102 | aux.version = ci->i_version; | ||
| 108 | aux.mtime = inode->i_mtime; | 103 | aux.mtime = inode->i_mtime; |
| 109 | aux.size = i_size_read(inode); | 104 | aux.size = i_size_read(inode); |
| 110 | 105 | ||
| @@ -131,6 +126,7 @@ static enum fscache_checkaux ceph_fscache_inode_check_aux( | |||
| 131 | return FSCACHE_CHECKAUX_OBSOLETE; | 126 | return FSCACHE_CHECKAUX_OBSOLETE; |
| 132 | 127 | ||
| 133 | memset(&aux, 0, sizeof(aux)); | 128 | memset(&aux, 0, sizeof(aux)); |
| 129 | aux.version = ci->i_version; | ||
| 134 | aux.mtime = inode->i_mtime; | 130 | aux.mtime = inode->i_mtime; |
| 135 | aux.size = i_size_read(inode); | 131 | aux.size = i_size_read(inode); |
| 136 | 132 | ||
| @@ -181,32 +177,26 @@ static const struct fscache_cookie_def ceph_fscache_inode_object_def = { | |||
| 181 | .now_uncached = ceph_fscache_inode_now_uncached, | 177 | .now_uncached = ceph_fscache_inode_now_uncached, |
| 182 | }; | 178 | }; |
| 183 | 179 | ||
| 184 | void ceph_fscache_register_inode_cookie(struct ceph_fs_client* fsc, | 180 | void ceph_fscache_register_inode_cookie(struct inode *inode) |
| 185 | struct ceph_inode_info* ci) | ||
| 186 | { | 181 | { |
| 187 | struct inode* inode = &ci->vfs_inode; | 182 | struct ceph_inode_info *ci = ceph_inode(inode); |
| 183 | struct ceph_fs_client *fsc = ceph_inode_to_client(inode); | ||
| 188 | 184 | ||
| 189 | /* No caching for filesystem */ | 185 | /* No caching for filesystem */ |
| 190 | if (fsc->fscache == NULL) | 186 | if (fsc->fscache == NULL) |
| 191 | return; | 187 | return; |
| 192 | 188 | ||
| 193 | /* Only cache for regular files that are read only */ | 189 | /* Only cache for regular files that are read only */ |
| 194 | if ((ci->vfs_inode.i_mode & S_IFREG) == 0) | 190 | if (!S_ISREG(inode->i_mode)) |
| 195 | return; | 191 | return; |
| 196 | 192 | ||
| 197 | /* Avoid multiple racing open requests */ | 193 | inode_lock_nested(inode, I_MUTEX_CHILD); |
| 198 | inode_lock(inode); | 194 | if (!ci->fscache) { |
| 199 | 195 | ci->fscache = fscache_acquire_cookie(fsc->fscache, | |
| 200 | if (ci->fscache) | 196 | &ceph_fscache_inode_object_def, |
| 201 | goto done; | 197 | ci, false); |
| 202 | 198 | } | |
| 203 | ci->fscache = fscache_acquire_cookie(fsc->fscache, | ||
| 204 | &ceph_fscache_inode_object_def, | ||
| 205 | ci, true); | ||
| 206 | fscache_check_consistency(ci->fscache); | ||
| 207 | done: | ||
| 208 | inode_unlock(inode); | 199 | inode_unlock(inode); |
| 209 | |||
| 210 | } | 200 | } |
| 211 | 201 | ||
| 212 | void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci) | 202 | void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci) |
| @@ -222,6 +212,34 @@ void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci) | |||
| 222 | fscache_relinquish_cookie(cookie, 0); | 212 | fscache_relinquish_cookie(cookie, 0); |
| 223 | } | 213 | } |
| 224 | 214 | ||
| 215 | static bool ceph_fscache_can_enable(void *data) | ||
| 216 | { | ||
| 217 | struct inode *inode = data; | ||
| 218 | return !inode_is_open_for_write(inode); | ||
| 219 | } | ||
| 220 | |||
| 221 | void ceph_fscache_file_set_cookie(struct inode *inode, struct file *filp) | ||
| 222 | { | ||
| 223 | struct ceph_inode_info *ci = ceph_inode(inode); | ||
| 224 | |||
| 225 | if (!fscache_cookie_valid(ci->fscache)) | ||
| 226 | return; | ||
| 227 | |||
| 228 | if (inode_is_open_for_write(inode)) { | ||
| 229 | dout("fscache_file_set_cookie %p %p disabling cache\n", | ||
| 230 | inode, filp); | ||
| 231 | fscache_disable_cookie(ci->fscache, false); | ||
| 232 | fscache_uncache_all_inode_pages(ci->fscache, inode); | ||
| 233 | } else { | ||
| 234 | fscache_enable_cookie(ci->fscache, ceph_fscache_can_enable, | ||
| 235 | inode); | ||
| 236 | if (fscache_cookie_enabled(ci->fscache)) { | ||
| 237 | dout("fscache_file_set_cookie %p %p enabing cache\n", | ||
| 238 | inode, filp); | ||
| 239 | } | ||
| 240 | } | ||
| 241 | } | ||
| 242 | |||
| 225 | static void ceph_vfs_readpage_complete(struct page *page, void *data, int error) | 243 | static void ceph_vfs_readpage_complete(struct page *page, void *data, int error) |
| 226 | { | 244 | { |
| 227 | if (!error) | 245 | if (!error) |
| @@ -238,8 +256,7 @@ static void ceph_vfs_readpage_complete_unlock(struct page *page, void *data, int | |||
| 238 | 256 | ||
| 239 | static inline bool cache_valid(struct ceph_inode_info *ci) | 257 | static inline bool cache_valid(struct ceph_inode_info *ci) |
| 240 | { | 258 | { |
| 241 | return ((ceph_caps_issued(ci) & CEPH_CAP_FILE_CACHE) && | 259 | return ci->i_fscache_gen == ci->i_rdcache_gen; |
| 242 | (ci->i_fscache_gen == ci->i_rdcache_gen)); | ||
| 243 | } | 260 | } |
| 244 | 261 | ||
| 245 | 262 | ||
| @@ -332,69 +349,27 @@ void ceph_invalidate_fscache_page(struct inode* inode, struct page *page) | |||
| 332 | 349 | ||
| 333 | void ceph_fscache_unregister_fs(struct ceph_fs_client* fsc) | 350 | void ceph_fscache_unregister_fs(struct ceph_fs_client* fsc) |
| 334 | { | 351 | { |
| 335 | if (fsc->revalidate_wq) | ||
| 336 | destroy_workqueue(fsc->revalidate_wq); | ||
| 337 | |||
| 338 | fscache_relinquish_cookie(fsc->fscache, 0); | 352 | fscache_relinquish_cookie(fsc->fscache, 0); |
| 339 | fsc->fscache = NULL; | 353 | fsc->fscache = NULL; |
| 340 | } | 354 | } |
| 341 | 355 | ||
| 342 | static void ceph_revalidate_work(struct work_struct *work) | 356 | /* |
| 343 | { | 357 | * caller should hold CEPH_CAP_FILE_{RD,CACHE} |
| 344 | int issued; | 358 | */ |
| 345 | u32 orig_gen; | 359 | void ceph_fscache_revalidate_cookie(struct ceph_inode_info *ci) |
| 346 | struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info, | ||
| 347 | i_revalidate_work); | ||
| 348 | struct inode *inode = &ci->vfs_inode; | ||
| 349 | |||
| 350 | spin_lock(&ci->i_ceph_lock); | ||
| 351 | issued = __ceph_caps_issued(ci, NULL); | ||
| 352 | orig_gen = ci->i_rdcache_gen; | ||
| 353 | spin_unlock(&ci->i_ceph_lock); | ||
| 354 | |||
| 355 | if (!(issued & CEPH_CAP_FILE_CACHE)) { | ||
| 356 | dout("revalidate_work lost cache before validation %p\n", | ||
| 357 | inode); | ||
| 358 | goto out; | ||
| 359 | } | ||
| 360 | |||
| 361 | if (!fscache_check_consistency(ci->fscache)) | ||
| 362 | fscache_invalidate(ci->fscache); | ||
| 363 | |||
| 364 | spin_lock(&ci->i_ceph_lock); | ||
| 365 | /* Update the new valid generation (backwards sanity check too) */ | ||
| 366 | if (orig_gen > ci->i_fscache_gen) { | ||
| 367 | ci->i_fscache_gen = orig_gen; | ||
| 368 | } | ||
| 369 | spin_unlock(&ci->i_ceph_lock); | ||
| 370 | |||
| 371 | out: | ||
| 372 | iput(&ci->vfs_inode); | ||
| 373 | } | ||
| 374 | |||
| 375 | void ceph_queue_revalidate(struct inode *inode) | ||
| 376 | { | 360 | { |
| 377 | struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb); | 361 | if (cache_valid(ci)) |
| 378 | struct ceph_inode_info *ci = ceph_inode(inode); | ||
| 379 | |||
| 380 | if (fsc->revalidate_wq == NULL || ci->fscache == NULL) | ||
| 381 | return; | 362 | return; |
| 382 | 363 | ||
| 383 | ihold(inode); | 364 | /* resue i_truncate_mutex. There should be no pending |
| 384 | 365 | * truncate while the caller holds CEPH_CAP_FILE_RD */ | |
| 385 | if (queue_work(ceph_sb_to_client(inode->i_sb)->revalidate_wq, | 366 | mutex_lock(&ci->i_truncate_mutex); |
| 386 | &ci->i_revalidate_work)) { | 367 | if (!cache_valid(ci)) { |
| 387 | dout("ceph_queue_revalidate %p\n", inode); | 368 | if (fscache_check_consistency(ci->fscache)) |
| 388 | } else { | 369 | fscache_invalidate(ci->fscache); |
| 389 | dout("ceph_queue_revalidate %p failed\n)", inode); | 370 | spin_lock(&ci->i_ceph_lock); |
| 390 | iput(inode); | 371 | ci->i_fscache_gen = ci->i_rdcache_gen; |
| 372 | spin_unlock(&ci->i_ceph_lock); | ||
| 391 | } | 373 | } |
| 392 | } | 374 | mutex_unlock(&ci->i_truncate_mutex); |
| 393 | |||
| 394 | void ceph_fscache_inode_init(struct ceph_inode_info *ci) | ||
| 395 | { | ||
| 396 | ci->fscache = NULL; | ||
| 397 | /* The first load is verifed cookie open time */ | ||
| 398 | ci->i_fscache_gen = 1; | ||
| 399 | INIT_WORK(&ci->i_revalidate_work, ceph_revalidate_work); | ||
| 400 | } | 375 | } |
diff --git a/fs/ceph/cache.h b/fs/ceph/cache.h index 5ac591bd012b..7e72c7594f0c 100644 --- a/fs/ceph/cache.h +++ b/fs/ceph/cache.h | |||
| @@ -34,10 +34,10 @@ void ceph_fscache_unregister(void); | |||
| 34 | int ceph_fscache_register_fs(struct ceph_fs_client* fsc); | 34 | int ceph_fscache_register_fs(struct ceph_fs_client* fsc); |
| 35 | void ceph_fscache_unregister_fs(struct ceph_fs_client* fsc); | 35 | void ceph_fscache_unregister_fs(struct ceph_fs_client* fsc); |
| 36 | 36 | ||
| 37 | void ceph_fscache_inode_init(struct ceph_inode_info *ci); | 37 | void ceph_fscache_register_inode_cookie(struct inode *inode); |
| 38 | void ceph_fscache_register_inode_cookie(struct ceph_fs_client* fsc, | ||
| 39 | struct ceph_inode_info* ci); | ||
| 40 | void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci); | 38 | void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci); |
| 39 | void ceph_fscache_file_set_cookie(struct inode *inode, struct file *filp); | ||
| 40 | void ceph_fscache_revalidate_cookie(struct ceph_inode_info *ci); | ||
| 41 | 41 | ||
| 42 | int ceph_readpage_from_fscache(struct inode *inode, struct page *page); | 42 | int ceph_readpage_from_fscache(struct inode *inode, struct page *page); |
| 43 | int ceph_readpages_from_fscache(struct inode *inode, | 43 | int ceph_readpages_from_fscache(struct inode *inode, |
| @@ -46,12 +46,11 @@ int ceph_readpages_from_fscache(struct inode *inode, | |||
| 46 | unsigned *nr_pages); | 46 | unsigned *nr_pages); |
| 47 | void ceph_readpage_to_fscache(struct inode *inode, struct page *page); | 47 | void ceph_readpage_to_fscache(struct inode *inode, struct page *page); |
| 48 | void ceph_invalidate_fscache_page(struct inode* inode, struct page *page); | 48 | void ceph_invalidate_fscache_page(struct inode* inode, struct page *page); |
| 49 | void ceph_queue_revalidate(struct inode *inode); | ||
| 50 | 49 | ||
| 51 | static inline void ceph_fscache_update_objectsize(struct inode *inode) | 50 | static inline void ceph_fscache_inode_init(struct ceph_inode_info *ci) |
| 52 | { | 51 | { |
| 53 | struct ceph_inode_info *ci = ceph_inode(inode); | 52 | ci->fscache = NULL; |
| 54 | fscache_attr_changed(ci->fscache); | 53 | ci->i_fscache_gen = 0; |
| 55 | } | 54 | } |
| 56 | 55 | ||
| 57 | static inline void ceph_fscache_invalidate(struct inode *inode) | 56 | static inline void ceph_fscache_invalidate(struct inode *inode) |
| @@ -88,6 +87,11 @@ static inline void ceph_fscache_readpages_cancel(struct inode *inode, | |||
| 88 | return fscache_readpages_cancel(ci->fscache, pages); | 87 | return fscache_readpages_cancel(ci->fscache, pages); |
| 89 | } | 88 | } |
| 90 | 89 | ||
| 90 | static inline void ceph_disable_fscache_readpage(struct ceph_inode_info *ci) | ||
| 91 | { | ||
| 92 | ci->i_fscache_gen = ci->i_rdcache_gen - 1; | ||
| 93 | } | ||
| 94 | |||
| 91 | #else | 95 | #else |
| 92 | 96 | ||
| 93 | static inline int ceph_fscache_register(void) | 97 | static inline int ceph_fscache_register(void) |
| @@ -112,8 +116,20 @@ static inline void ceph_fscache_inode_init(struct ceph_inode_info *ci) | |||
| 112 | { | 116 | { |
| 113 | } | 117 | } |
| 114 | 118 | ||
| 115 | static inline void ceph_fscache_register_inode_cookie(struct ceph_fs_client* parent_fsc, | 119 | static inline void ceph_fscache_register_inode_cookie(struct inode *inode) |
| 116 | struct ceph_inode_info* ci) | 120 | { |
| 121 | } | ||
| 122 | |||
| 123 | static inline void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci) | ||
| 124 | { | ||
| 125 | } | ||
| 126 | |||
| 127 | static inline void ceph_fscache_file_set_cookie(struct inode *inode, | ||
| 128 | struct file *filp) | ||
| 129 | { | ||
| 130 | } | ||
| 131 | |||
| 132 | static inline void ceph_fscache_revalidate_cookie(struct ceph_inode_info *ci) | ||
| 117 | { | 133 | { |
| 118 | } | 134 | } |
| 119 | 135 | ||
| @@ -141,10 +157,6 @@ static inline void ceph_readpage_to_fscache(struct inode *inode, | |||
| 141 | { | 157 | { |
| 142 | } | 158 | } |
| 143 | 159 | ||
| 144 | static inline void ceph_fscache_update_objectsize(struct inode *inode) | ||
| 145 | { | ||
| 146 | } | ||
| 147 | |||
| 148 | static inline void ceph_fscache_invalidate(struct inode *inode) | 160 | static inline void ceph_fscache_invalidate(struct inode *inode) |
| 149 | { | 161 | { |
| 150 | } | 162 | } |
| @@ -154,10 +166,6 @@ static inline void ceph_invalidate_fscache_page(struct inode *inode, | |||
| 154 | { | 166 | { |
| 155 | } | 167 | } |
| 156 | 168 | ||
| 157 | static inline void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci) | ||
| 158 | { | ||
| 159 | } | ||
| 160 | |||
| 161 | static inline int ceph_release_fscache_page(struct page *page, gfp_t gfp) | 169 | static inline int ceph_release_fscache_page(struct page *page, gfp_t gfp) |
| 162 | { | 170 | { |
| 163 | return 1; | 171 | return 1; |
| @@ -173,7 +181,7 @@ static inline void ceph_fscache_readpages_cancel(struct inode *inode, | |||
| 173 | { | 181 | { |
| 174 | } | 182 | } |
| 175 | 183 | ||
| 176 | static inline void ceph_queue_revalidate(struct inode *inode) | 184 | static inline void ceph_disable_fscache_readpage(struct ceph_inode_info *ci) |
| 177 | { | 185 | { |
| 178 | } | 186 | } |
| 179 | 187 | ||
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index c17b5d76d75e..6f60d0a3d0f9 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c | |||
| @@ -2393,6 +2393,9 @@ again: | |||
| 2393 | snap_rwsem_locked = true; | 2393 | snap_rwsem_locked = true; |
| 2394 | } | 2394 | } |
| 2395 | *got = need | (have & want); | 2395 | *got = need | (have & want); |
| 2396 | if ((need & CEPH_CAP_FILE_RD) && | ||
| 2397 | !(*got & CEPH_CAP_FILE_CACHE)) | ||
| 2398 | ceph_disable_fscache_readpage(ci); | ||
| 2396 | __take_cap_refs(ci, *got, true); | 2399 | __take_cap_refs(ci, *got, true); |
| 2397 | ret = 1; | 2400 | ret = 1; |
| 2398 | } | 2401 | } |
| @@ -2554,6 +2557,9 @@ int ceph_get_caps(struct ceph_inode_info *ci, int need, int want, | |||
| 2554 | break; | 2557 | break; |
| 2555 | } | 2558 | } |
| 2556 | 2559 | ||
| 2560 | if ((_got & CEPH_CAP_FILE_RD) && (_got & CEPH_CAP_FILE_CACHE)) | ||
| 2561 | ceph_fscache_revalidate_cookie(ci); | ||
| 2562 | |||
| 2557 | *got = _got; | 2563 | *got = _got; |
| 2558 | return 0; | 2564 | return 0; |
| 2559 | } | 2565 | } |
| @@ -2795,7 +2801,6 @@ static void handle_cap_grant(struct ceph_mds_client *mdsc, | |||
| 2795 | bool writeback = false; | 2801 | bool writeback = false; |
| 2796 | bool queue_trunc = false; | 2802 | bool queue_trunc = false; |
| 2797 | bool queue_invalidate = false; | 2803 | bool queue_invalidate = false; |
| 2798 | bool queue_revalidate = false; | ||
| 2799 | bool deleted_inode = false; | 2804 | bool deleted_inode = false; |
| 2800 | bool fill_inline = false; | 2805 | bool fill_inline = false; |
| 2801 | 2806 | ||
| @@ -2837,8 +2842,6 @@ static void handle_cap_grant(struct ceph_mds_client *mdsc, | |||
| 2837 | ci->i_rdcache_revoking = ci->i_rdcache_gen; | 2842 | ci->i_rdcache_revoking = ci->i_rdcache_gen; |
| 2838 | } | 2843 | } |
| 2839 | } | 2844 | } |
| 2840 | |||
| 2841 | ceph_fscache_invalidate(inode); | ||
| 2842 | } | 2845 | } |
| 2843 | 2846 | ||
| 2844 | /* side effects now are allowed */ | 2847 | /* side effects now are allowed */ |
| @@ -2880,11 +2883,6 @@ static void handle_cap_grant(struct ceph_mds_client *mdsc, | |||
| 2880 | } | 2883 | } |
| 2881 | } | 2884 | } |
| 2882 | 2885 | ||
| 2883 | /* Do we need to revalidate our fscache cookie. Don't bother on the | ||
| 2884 | * first cache cap as we already validate at cookie creation time. */ | ||
| 2885 | if ((issued & CEPH_CAP_FILE_CACHE) && ci->i_rdcache_gen > 1) | ||
| 2886 | queue_revalidate = true; | ||
| 2887 | |||
| 2888 | if (newcaps & CEPH_CAP_ANY_RD) { | 2886 | if (newcaps & CEPH_CAP_ANY_RD) { |
| 2889 | /* ctime/mtime/atime? */ | 2887 | /* ctime/mtime/atime? */ |
| 2890 | ceph_decode_timespec(&mtime, &grant->mtime); | 2888 | ceph_decode_timespec(&mtime, &grant->mtime); |
| @@ -2993,11 +2991,8 @@ static void handle_cap_grant(struct ceph_mds_client *mdsc, | |||
| 2993 | if (fill_inline) | 2991 | if (fill_inline) |
| 2994 | ceph_fill_inline_data(inode, NULL, inline_data, inline_len); | 2992 | ceph_fill_inline_data(inode, NULL, inline_data, inline_len); |
| 2995 | 2993 | ||
| 2996 | if (queue_trunc) { | 2994 | if (queue_trunc) |
| 2997 | ceph_queue_vmtruncate(inode); | 2995 | ceph_queue_vmtruncate(inode); |
| 2998 | ceph_queue_revalidate(inode); | ||
| 2999 | } else if (queue_revalidate) | ||
| 3000 | ceph_queue_revalidate(inode); | ||
| 3001 | 2996 | ||
| 3002 | if (writeback) | 2997 | if (writeback) |
| 3003 | /* | 2998 | /* |
| @@ -3199,10 +3194,8 @@ static void handle_cap_trunc(struct inode *inode, | |||
| 3199 | truncate_seq, truncate_size, size); | 3194 | truncate_seq, truncate_size, size); |
| 3200 | spin_unlock(&ci->i_ceph_lock); | 3195 | spin_unlock(&ci->i_ceph_lock); |
| 3201 | 3196 | ||
| 3202 | if (queue_trunc) { | 3197 | if (queue_trunc) |
| 3203 | ceph_queue_vmtruncate(inode); | 3198 | ceph_queue_vmtruncate(inode); |
| 3204 | ceph_fscache_invalidate(inode); | ||
| 3205 | } | ||
| 3206 | } | 3199 | } |
| 3207 | 3200 | ||
| 3208 | /* | 3201 | /* |
diff --git a/fs/ceph/file.c b/fs/ceph/file.c index a888df6f2d71..ce2f5795e44b 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c | |||
| @@ -137,23 +137,11 @@ static int ceph_init_file(struct inode *inode, struct file *file, int fmode) | |||
| 137 | { | 137 | { |
| 138 | struct ceph_file_info *cf; | 138 | struct ceph_file_info *cf; |
| 139 | int ret = 0; | 139 | int ret = 0; |
| 140 | struct ceph_inode_info *ci = ceph_inode(inode); | ||
| 141 | struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb); | ||
| 142 | struct ceph_mds_client *mdsc = fsc->mdsc; | ||
| 143 | 140 | ||
| 144 | switch (inode->i_mode & S_IFMT) { | 141 | switch (inode->i_mode & S_IFMT) { |
| 145 | case S_IFREG: | 142 | case S_IFREG: |
| 146 | /* First file open request creates the cookie, we want to keep | 143 | ceph_fscache_register_inode_cookie(inode); |
| 147 | * this cookie around for the filetime of the inode as not to | 144 | ceph_fscache_file_set_cookie(inode, file); |
| 148 | * have to worry about fscache register / revoke / operation | ||
| 149 | * races. | ||
| 150 | * | ||
| 151 | * Also, if we know the operation is going to invalidate data | ||
| 152 | * (non readonly) just nuke the cache right away. | ||
| 153 | */ | ||
| 154 | ceph_fscache_register_inode_cookie(mdsc->fsc, ci); | ||
| 155 | if ((fmode & CEPH_FILE_MODE_WR)) | ||
| 156 | ceph_fscache_invalidate(inode); | ||
| 157 | case S_IFDIR: | 145 | case S_IFDIR: |
| 158 | dout("init_file %p %p 0%o (regular)\n", inode, file, | 146 | dout("init_file %p %p 0%o (regular)\n", inode, file, |
| 159 | inode->i_mode); | 147 | inode->i_mode); |
| @@ -1349,7 +1337,7 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from) | |||
| 1349 | } | 1337 | } |
| 1350 | 1338 | ||
| 1351 | retry_snap: | 1339 | retry_snap: |
| 1352 | if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL)) { | 1340 | if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL)) { |
| 1353 | err = -ENOSPC; | 1341 | err = -ENOSPC; |
| 1354 | goto out; | 1342 | goto out; |
| 1355 | } | 1343 | } |
| @@ -1407,7 +1395,6 @@ retry_snap: | |||
| 1407 | iov_iter_advance(from, written); | 1395 | iov_iter_advance(from, written); |
| 1408 | ceph_put_snap_context(snapc); | 1396 | ceph_put_snap_context(snapc); |
| 1409 | } else { | 1397 | } else { |
| 1410 | loff_t old_size = i_size_read(inode); | ||
| 1411 | /* | 1398 | /* |
| 1412 | * No need to acquire the i_truncate_mutex. Because | 1399 | * No need to acquire the i_truncate_mutex. Because |
| 1413 | * the MDS revokes Fwb caps before sending truncate | 1400 | * the MDS revokes Fwb caps before sending truncate |
| @@ -1418,8 +1405,6 @@ retry_snap: | |||
| 1418 | written = generic_perform_write(file, from, pos); | 1405 | written = generic_perform_write(file, from, pos); |
| 1419 | if (likely(written >= 0)) | 1406 | if (likely(written >= 0)) |
| 1420 | iocb->ki_pos = pos + written; | 1407 | iocb->ki_pos = pos + written; |
| 1421 | if (i_size_read(inode) > old_size) | ||
| 1422 | ceph_fscache_update_objectsize(inode); | ||
| 1423 | inode_unlock(inode); | 1408 | inode_unlock(inode); |
| 1424 | } | 1409 | } |
| 1425 | 1410 | ||
| @@ -1440,7 +1425,7 @@ retry_snap: | |||
| 1440 | ceph_put_cap_refs(ci, got); | 1425 | ceph_put_cap_refs(ci, got); |
| 1441 | 1426 | ||
| 1442 | if (written >= 0) { | 1427 | if (written >= 0) { |
| 1443 | if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_NEARFULL)) | 1428 | if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_NEARFULL)) |
| 1444 | iocb->ki_flags |= IOCB_DSYNC; | 1429 | iocb->ki_flags |= IOCB_DSYNC; |
| 1445 | 1430 | ||
| 1446 | written = generic_write_sync(iocb, written); | 1431 | written = generic_write_sync(iocb, written); |
| @@ -1672,8 +1657,8 @@ static long ceph_fallocate(struct file *file, int mode, | |||
| 1672 | goto unlock; | 1657 | goto unlock; |
| 1673 | } | 1658 | } |
| 1674 | 1659 | ||
| 1675 | if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) && | 1660 | if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) && |
| 1676 | !(mode & FALLOC_FL_PUNCH_HOLE)) { | 1661 | !(mode & FALLOC_FL_PUNCH_HOLE)) { |
| 1677 | ret = -ENOSPC; | 1662 | ret = -ENOSPC; |
| 1678 | goto unlock; | 1663 | goto unlock; |
| 1679 | } | 1664 | } |
diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 0130a8592191..0168b49fb6ad 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h | |||
| @@ -103,7 +103,6 @@ struct ceph_fs_client { | |||
| 103 | 103 | ||
| 104 | #ifdef CONFIG_CEPH_FSCACHE | 104 | #ifdef CONFIG_CEPH_FSCACHE |
| 105 | struct fscache_cookie *fscache; | 105 | struct fscache_cookie *fscache; |
| 106 | struct workqueue_struct *revalidate_wq; | ||
| 107 | #endif | 106 | #endif |
| 108 | }; | 107 | }; |
| 109 | 108 | ||
| @@ -360,8 +359,7 @@ struct ceph_inode_info { | |||
| 360 | 359 | ||
| 361 | #ifdef CONFIG_CEPH_FSCACHE | 360 | #ifdef CONFIG_CEPH_FSCACHE |
| 362 | struct fscache_cookie *fscache; | 361 | struct fscache_cookie *fscache; |
| 363 | u32 i_fscache_gen; /* sequence, for delayed fscache validate */ | 362 | u32 i_fscache_gen; |
| 364 | struct work_struct i_revalidate_work; | ||
| 365 | #endif | 363 | #endif |
| 366 | struct inode vfs_inode; /* at end */ | 364 | struct inode vfs_inode; /* at end */ |
| 367 | }; | 365 | }; |
diff --git a/fs/coredump.c b/fs/coredump.c index 38a7ab87e10a..281b768000e6 100644 --- a/fs/coredump.c +++ b/fs/coredump.c | |||
| @@ -794,6 +794,7 @@ int dump_emit(struct coredump_params *cprm, const void *addr, int nr) | |||
| 794 | return 0; | 794 | return 0; |
| 795 | file->f_pos = pos; | 795 | file->f_pos = pos; |
| 796 | cprm->written += n; | 796 | cprm->written += n; |
| 797 | cprm->pos += n; | ||
| 797 | nr -= n; | 798 | nr -= n; |
| 798 | } | 799 | } |
| 799 | return 1; | 800 | return 1; |
| @@ -808,6 +809,7 @@ int dump_skip(struct coredump_params *cprm, size_t nr) | |||
| 808 | if (dump_interrupted() || | 809 | if (dump_interrupted() || |
| 809 | file->f_op->llseek(file, nr, SEEK_CUR) < 0) | 810 | file->f_op->llseek(file, nr, SEEK_CUR) < 0) |
| 810 | return 0; | 811 | return 0; |
| 812 | cprm->pos += nr; | ||
| 811 | return 1; | 813 | return 1; |
| 812 | } else { | 814 | } else { |
| 813 | while (nr > PAGE_SIZE) { | 815 | while (nr > PAGE_SIZE) { |
| @@ -822,7 +824,7 @@ EXPORT_SYMBOL(dump_skip); | |||
| 822 | 824 | ||
| 823 | int dump_align(struct coredump_params *cprm, int align) | 825 | int dump_align(struct coredump_params *cprm, int align) |
| 824 | { | 826 | { |
| 825 | unsigned mod = cprm->file->f_pos & (align - 1); | 827 | unsigned mod = cprm->pos & (align - 1); |
| 826 | if (align & (align - 1)) | 828 | if (align & (align - 1)) |
| 827 | return 0; | 829 | return 0; |
| 828 | return mod ? dump_skip(cprm, align - mod) : 1; | 830 | return mod ? dump_skip(cprm, align - mod) : 1; |
diff --git a/fs/dcache.c b/fs/dcache.c index ad4a542e9bab..d6847d7b123d 100644 --- a/fs/dcache.c +++ b/fs/dcache.c | |||
| @@ -507,6 +507,44 @@ void d_drop(struct dentry *dentry) | |||
| 507 | } | 507 | } |
| 508 | EXPORT_SYMBOL(d_drop); | 508 | EXPORT_SYMBOL(d_drop); |
| 509 | 509 | ||
| 510 | static inline void dentry_unlist(struct dentry *dentry, struct dentry *parent) | ||
| 511 | { | ||
| 512 | struct dentry *next; | ||
| 513 | /* | ||
| 514 | * Inform d_walk() and shrink_dentry_list() that we are no longer | ||
| 515 | * attached to the dentry tree | ||
| 516 | */ | ||
| 517 | dentry->d_flags |= DCACHE_DENTRY_KILLED; | ||
| 518 | if (unlikely(list_empty(&dentry->d_child))) | ||
| 519 | return; | ||
| 520 | __list_del_entry(&dentry->d_child); | ||
| 521 | /* | ||
| 522 | * Cursors can move around the list of children. While we'd been | ||
| 523 | * a normal list member, it didn't matter - ->d_child.next would've | ||
| 524 | * been updated. However, from now on it won't be and for the | ||
| 525 | * things like d_walk() it might end up with a nasty surprise. | ||
| 526 | * Normally d_walk() doesn't care about cursors moving around - | ||
| 527 | * ->d_lock on parent prevents that and since a cursor has no children | ||
| 528 | * of its own, we get through it without ever unlocking the parent. | ||
| 529 | * There is one exception, though - if we ascend from a child that | ||
| 530 | * gets killed as soon as we unlock it, the next sibling is found | ||
| 531 | * using the value left in its ->d_child.next. And if _that_ | ||
| 532 | * pointed to a cursor, and cursor got moved (e.g. by lseek()) | ||
| 533 | * before d_walk() regains parent->d_lock, we'll end up skipping | ||
| 534 | * everything the cursor had been moved past. | ||
| 535 | * | ||
| 536 | * Solution: make sure that the pointer left behind in ->d_child.next | ||
| 537 | * points to something that won't be moving around. I.e. skip the | ||
| 538 | * cursors. | ||
| 539 | */ | ||
| 540 | while (dentry->d_child.next != &parent->d_subdirs) { | ||
| 541 | next = list_entry(dentry->d_child.next, struct dentry, d_child); | ||
| 542 | if (likely(!(next->d_flags & DCACHE_DENTRY_CURSOR))) | ||
| 543 | break; | ||
| 544 | dentry->d_child.next = next->d_child.next; | ||
| 545 | } | ||
| 546 | } | ||
| 547 | |||
| 510 | static void __dentry_kill(struct dentry *dentry) | 548 | static void __dentry_kill(struct dentry *dentry) |
| 511 | { | 549 | { |
| 512 | struct dentry *parent = NULL; | 550 | struct dentry *parent = NULL; |
| @@ -532,12 +570,7 @@ static void __dentry_kill(struct dentry *dentry) | |||
| 532 | } | 570 | } |
| 533 | /* if it was on the hash then remove it */ | 571 | /* if it was on the hash then remove it */ |
| 534 | __d_drop(dentry); | 572 | __d_drop(dentry); |
| 535 | __list_del_entry(&dentry->d_child); | 573 | dentry_unlist(dentry, parent); |
| 536 | /* | ||
| 537 | * Inform d_walk() that we are no longer attached to the | ||
| 538 | * dentry tree | ||
| 539 | */ | ||
| 540 | dentry->d_flags |= DCACHE_DENTRY_KILLED; | ||
| 541 | if (parent) | 574 | if (parent) |
| 542 | spin_unlock(&parent->d_lock); | 575 | spin_unlock(&parent->d_lock); |
| 543 | dentry_iput(dentry); | 576 | dentry_iput(dentry); |
| @@ -1203,6 +1236,9 @@ resume: | |||
| 1203 | struct dentry *dentry = list_entry(tmp, struct dentry, d_child); | 1236 | struct dentry *dentry = list_entry(tmp, struct dentry, d_child); |
| 1204 | next = tmp->next; | 1237 | next = tmp->next; |
| 1205 | 1238 | ||
| 1239 | if (unlikely(dentry->d_flags & DCACHE_DENTRY_CURSOR)) | ||
| 1240 | continue; | ||
| 1241 | |||
| 1206 | spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); | 1242 | spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); |
| 1207 | 1243 | ||
| 1208 | ret = enter(data, dentry); | 1244 | ret = enter(data, dentry); |
| @@ -1636,7 +1672,7 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name) | |||
| 1636 | struct dentry *dentry = __d_alloc(parent->d_sb, name); | 1672 | struct dentry *dentry = __d_alloc(parent->d_sb, name); |
| 1637 | if (!dentry) | 1673 | if (!dentry) |
| 1638 | return NULL; | 1674 | return NULL; |
| 1639 | 1675 | dentry->d_flags |= DCACHE_RCUACCESS; | |
| 1640 | spin_lock(&parent->d_lock); | 1676 | spin_lock(&parent->d_lock); |
| 1641 | /* | 1677 | /* |
| 1642 | * don't need child lock because it is not subject | 1678 | * don't need child lock because it is not subject |
| @@ -1651,6 +1687,16 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name) | |||
| 1651 | } | 1687 | } |
| 1652 | EXPORT_SYMBOL(d_alloc); | 1688 | EXPORT_SYMBOL(d_alloc); |
| 1653 | 1689 | ||
| 1690 | struct dentry *d_alloc_cursor(struct dentry * parent) | ||
| 1691 | { | ||
| 1692 | struct dentry *dentry = __d_alloc(parent->d_sb, NULL); | ||
| 1693 | if (dentry) { | ||
| 1694 | dentry->d_flags |= DCACHE_RCUACCESS | DCACHE_DENTRY_CURSOR; | ||
| 1695 | dentry->d_parent = dget(parent); | ||
| 1696 | } | ||
| 1697 | return dentry; | ||
| 1698 | } | ||
| 1699 | |||
| 1654 | /** | 1700 | /** |
| 1655 | * d_alloc_pseudo - allocate a dentry (for lookup-less filesystems) | 1701 | * d_alloc_pseudo - allocate a dentry (for lookup-less filesystems) |
| 1656 | * @sb: the superblock | 1702 | * @sb: the superblock |
| @@ -2358,7 +2404,6 @@ static void __d_rehash(struct dentry * entry, struct hlist_bl_head *b) | |||
| 2358 | { | 2404 | { |
| 2359 | BUG_ON(!d_unhashed(entry)); | 2405 | BUG_ON(!d_unhashed(entry)); |
| 2360 | hlist_bl_lock(b); | 2406 | hlist_bl_lock(b); |
| 2361 | entry->d_flags |= DCACHE_RCUACCESS; | ||
| 2362 | hlist_bl_add_head_rcu(&entry->d_hash, b); | 2407 | hlist_bl_add_head_rcu(&entry->d_hash, b); |
| 2363 | hlist_bl_unlock(b); | 2408 | hlist_bl_unlock(b); |
| 2364 | } | 2409 | } |
| @@ -2458,7 +2503,6 @@ retry: | |||
| 2458 | rcu_read_unlock(); | 2503 | rcu_read_unlock(); |
| 2459 | goto retry; | 2504 | goto retry; |
| 2460 | } | 2505 | } |
| 2461 | rcu_read_unlock(); | ||
| 2462 | /* | 2506 | /* |
| 2463 | * No changes for the parent since the beginning of d_lookup(). | 2507 | * No changes for the parent since the beginning of d_lookup(). |
| 2464 | * Since all removals from the chain happen with hlist_bl_lock(), | 2508 | * Since all removals from the chain happen with hlist_bl_lock(), |
| @@ -2471,8 +2515,6 @@ retry: | |||
| 2471 | continue; | 2515 | continue; |
| 2472 | if (dentry->d_parent != parent) | 2516 | if (dentry->d_parent != parent) |
| 2473 | continue; | 2517 | continue; |
| 2474 | if (d_unhashed(dentry)) | ||
| 2475 | continue; | ||
| 2476 | if (parent->d_flags & DCACHE_OP_COMPARE) { | 2518 | if (parent->d_flags & DCACHE_OP_COMPARE) { |
| 2477 | int tlen = dentry->d_name.len; | 2519 | int tlen = dentry->d_name.len; |
| 2478 | const char *tname = dentry->d_name.name; | 2520 | const char *tname = dentry->d_name.name; |
| @@ -2484,9 +2526,18 @@ retry: | |||
| 2484 | if (dentry_cmp(dentry, str, len)) | 2526 | if (dentry_cmp(dentry, str, len)) |
| 2485 | continue; | 2527 | continue; |
| 2486 | } | 2528 | } |
| 2487 | dget(dentry); | ||
| 2488 | hlist_bl_unlock(b); | 2529 | hlist_bl_unlock(b); |
| 2489 | /* somebody is doing lookup for it right now; wait for it */ | 2530 | /* now we can try to grab a reference */ |
| 2531 | if (!lockref_get_not_dead(&dentry->d_lockref)) { | ||
| 2532 | rcu_read_unlock(); | ||
| 2533 | goto retry; | ||
| 2534 | } | ||
| 2535 | |||
| 2536 | rcu_read_unlock(); | ||
| 2537 | /* | ||
| 2538 | * somebody is likely to be still doing lookup for it; | ||
| 2539 | * wait for them to finish | ||
| 2540 | */ | ||
| 2490 | spin_lock(&dentry->d_lock); | 2541 | spin_lock(&dentry->d_lock); |
| 2491 | d_wait_lookup(dentry); | 2542 | d_wait_lookup(dentry); |
| 2492 | /* | 2543 | /* |
| @@ -2517,6 +2568,7 @@ retry: | |||
| 2517 | dput(new); | 2568 | dput(new); |
| 2518 | return dentry; | 2569 | return dentry; |
| 2519 | } | 2570 | } |
| 2571 | rcu_read_unlock(); | ||
| 2520 | /* we can't take ->d_lock here; it's OK, though. */ | 2572 | /* we can't take ->d_lock here; it's OK, though. */ |
| 2521 | new->d_flags |= DCACHE_PAR_LOOKUP; | 2573 | new->d_flags |= DCACHE_PAR_LOOKUP; |
| 2522 | new->d_wait = wq; | 2574 | new->d_wait = wq; |
| @@ -2843,6 +2895,7 @@ static void __d_move(struct dentry *dentry, struct dentry *target, | |||
| 2843 | /* ... and switch them in the tree */ | 2895 | /* ... and switch them in the tree */ |
| 2844 | if (IS_ROOT(dentry)) { | 2896 | if (IS_ROOT(dentry)) { |
| 2845 | /* splicing a tree */ | 2897 | /* splicing a tree */ |
| 2898 | dentry->d_flags |= DCACHE_RCUACCESS; | ||
| 2846 | dentry->d_parent = target->d_parent; | 2899 | dentry->d_parent = target->d_parent; |
| 2847 | target->d_parent = target; | 2900 | target->d_parent = target; |
| 2848 | list_del_init(&target->d_child); | 2901 | list_del_init(&target->d_child); |
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c index 9c1c9a01b7e5..592059f88e04 100644 --- a/fs/debugfs/file.c +++ b/fs/debugfs/file.c | |||
| @@ -127,7 +127,6 @@ static int open_proxy_open(struct inode *inode, struct file *filp) | |||
| 127 | r = real_fops->open(inode, filp); | 127 | r = real_fops->open(inode, filp); |
| 128 | 128 | ||
| 129 | out: | 129 | out: |
| 130 | fops_put(real_fops); | ||
| 131 | debugfs_use_file_finish(srcu_idx); | 130 | debugfs_use_file_finish(srcu_idx); |
| 132 | return r; | 131 | return r; |
| 133 | } | 132 | } |
| @@ -262,8 +261,10 @@ static int full_proxy_open(struct inode *inode, struct file *filp) | |||
| 262 | 261 | ||
| 263 | if (real_fops->open) { | 262 | if (real_fops->open) { |
| 264 | r = real_fops->open(inode, filp); | 263 | r = real_fops->open(inode, filp); |
| 265 | 264 | if (r) { | |
| 266 | if (filp->f_op != proxy_fops) { | 265 | replace_fops(filp, d_inode(dentry)->i_fop); |
| 266 | goto free_proxy; | ||
| 267 | } else if (filp->f_op != proxy_fops) { | ||
| 267 | /* No protection against file removal anymore. */ | 268 | /* No protection against file removal anymore. */ |
| 268 | WARN(1, "debugfs file owner replaced proxy fops: %pd", | 269 | WARN(1, "debugfs file owner replaced proxy fops: %pd", |
| 269 | dentry); | 270 | dentry); |
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c index 0b2954d7172d..37c134a132c7 100644 --- a/fs/devpts/inode.c +++ b/fs/devpts/inode.c | |||
| @@ -95,8 +95,6 @@ static struct ctl_table pty_root_table[] = { | |||
| 95 | 95 | ||
| 96 | static DEFINE_MUTEX(allocated_ptys_lock); | 96 | static DEFINE_MUTEX(allocated_ptys_lock); |
| 97 | 97 | ||
| 98 | static struct vfsmount *devpts_mnt; | ||
| 99 | |||
| 100 | struct pts_mount_opts { | 98 | struct pts_mount_opts { |
| 101 | int setuid; | 99 | int setuid; |
| 102 | int setgid; | 100 | int setgid; |
| @@ -104,7 +102,7 @@ struct pts_mount_opts { | |||
| 104 | kgid_t gid; | 102 | kgid_t gid; |
| 105 | umode_t mode; | 103 | umode_t mode; |
| 106 | umode_t ptmxmode; | 104 | umode_t ptmxmode; |
| 107 | int newinstance; | 105 | int reserve; |
| 108 | int max; | 106 | int max; |
| 109 | }; | 107 | }; |
| 110 | 108 | ||
| @@ -117,11 +115,9 @@ static const match_table_t tokens = { | |||
| 117 | {Opt_uid, "uid=%u"}, | 115 | {Opt_uid, "uid=%u"}, |
| 118 | {Opt_gid, "gid=%u"}, | 116 | {Opt_gid, "gid=%u"}, |
| 119 | {Opt_mode, "mode=%o"}, | 117 | {Opt_mode, "mode=%o"}, |
| 120 | #ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES | ||
| 121 | {Opt_ptmxmode, "ptmxmode=%o"}, | 118 | {Opt_ptmxmode, "ptmxmode=%o"}, |
| 122 | {Opt_newinstance, "newinstance"}, | 119 | {Opt_newinstance, "newinstance"}, |
| 123 | {Opt_max, "max=%d"}, | 120 | {Opt_max, "max=%d"}, |
| 124 | #endif | ||
| 125 | {Opt_err, NULL} | 121 | {Opt_err, NULL} |
| 126 | }; | 122 | }; |
| 127 | 123 | ||
| @@ -137,15 +133,48 @@ static inline struct pts_fs_info *DEVPTS_SB(struct super_block *sb) | |||
| 137 | return sb->s_fs_info; | 133 | return sb->s_fs_info; |
| 138 | } | 134 | } |
| 139 | 135 | ||
| 140 | static inline struct super_block *pts_sb_from_inode(struct inode *inode) | 136 | struct pts_fs_info *devpts_acquire(struct file *filp) |
| 141 | { | 137 | { |
| 142 | #ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES | 138 | struct pts_fs_info *result; |
| 143 | if (inode->i_sb->s_magic == DEVPTS_SUPER_MAGIC) | 139 | struct path path; |
| 144 | return inode->i_sb; | 140 | struct super_block *sb; |
| 145 | #endif | 141 | int err; |
| 146 | if (!devpts_mnt) | 142 | |
| 147 | return NULL; | 143 | path = filp->f_path; |
| 148 | return devpts_mnt->mnt_sb; | 144 | path_get(&path); |
| 145 | |||
| 146 | /* Has the devpts filesystem already been found? */ | ||
| 147 | sb = path.mnt->mnt_sb; | ||
| 148 | if (sb->s_magic != DEVPTS_SUPER_MAGIC) { | ||
| 149 | /* Is a devpts filesystem at "pts" in the same directory? */ | ||
| 150 | err = path_pts(&path); | ||
| 151 | if (err) { | ||
| 152 | result = ERR_PTR(err); | ||
| 153 | goto out; | ||
| 154 | } | ||
| 155 | |||
| 156 | /* Is the path the root of a devpts filesystem? */ | ||
| 157 | result = ERR_PTR(-ENODEV); | ||
| 158 | sb = path.mnt->mnt_sb; | ||
| 159 | if ((sb->s_magic != DEVPTS_SUPER_MAGIC) || | ||
| 160 | (path.mnt->mnt_root != sb->s_root)) | ||
| 161 | goto out; | ||
| 162 | } | ||
| 163 | |||
| 164 | /* | ||
| 165 | * pty code needs to hold extra references in case of last /dev/tty close | ||
| 166 | */ | ||
| 167 | atomic_inc(&sb->s_active); | ||
| 168 | result = DEVPTS_SB(sb); | ||
| 169 | |||
| 170 | out: | ||
| 171 | path_put(&path); | ||
| 172 | return result; | ||
| 173 | } | ||
| 174 | |||
| 175 | void devpts_release(struct pts_fs_info *fsi) | ||
| 176 | { | ||
| 177 | deactivate_super(fsi->sb); | ||
| 149 | } | 178 | } |
| 150 | 179 | ||
| 151 | #define PARSE_MOUNT 0 | 180 | #define PARSE_MOUNT 0 |
| @@ -154,9 +183,7 @@ static inline struct super_block *pts_sb_from_inode(struct inode *inode) | |||
| 154 | /* | 183 | /* |
| 155 | * parse_mount_options(): | 184 | * parse_mount_options(): |
| 156 | * Set @opts to mount options specified in @data. If an option is not | 185 | * Set @opts to mount options specified in @data. If an option is not |
| 157 | * specified in @data, set it to its default value. The exception is | 186 | * specified in @data, set it to its default value. |
| 158 | * 'newinstance' option which can only be set/cleared on a mount (i.e. | ||
| 159 | * cannot be changed during remount). | ||
| 160 | * | 187 | * |
| 161 | * Note: @data may be NULL (in which case all options are set to default). | 188 | * Note: @data may be NULL (in which case all options are set to default). |
| 162 | */ | 189 | */ |
| @@ -174,9 +201,12 @@ static int parse_mount_options(char *data, int op, struct pts_mount_opts *opts) | |||
| 174 | opts->ptmxmode = DEVPTS_DEFAULT_PTMX_MODE; | 201 | opts->ptmxmode = DEVPTS_DEFAULT_PTMX_MODE; |
| 175 | opts->max = NR_UNIX98_PTY_MAX; | 202 | opts->max = NR_UNIX98_PTY_MAX; |
| 176 | 203 | ||
| 177 | /* newinstance makes sense only on initial mount */ | 204 | /* Only allow instances mounted from the initial mount |
| 205 | * namespace to tap the reserve pool of ptys. | ||
| 206 | */ | ||
| 178 | if (op == PARSE_MOUNT) | 207 | if (op == PARSE_MOUNT) |
| 179 | opts->newinstance = 0; | 208 | opts->reserve = |
| 209 | (current->nsproxy->mnt_ns == init_task.nsproxy->mnt_ns); | ||
| 180 | 210 | ||
| 181 | while ((p = strsep(&data, ",")) != NULL) { | 211 | while ((p = strsep(&data, ",")) != NULL) { |
| 182 | substring_t args[MAX_OPT_ARGS]; | 212 | substring_t args[MAX_OPT_ARGS]; |
| @@ -211,16 +241,12 @@ static int parse_mount_options(char *data, int op, struct pts_mount_opts *opts) | |||
| 211 | return -EINVAL; | 241 | return -EINVAL; |
| 212 | opts->mode = option & S_IALLUGO; | 242 | opts->mode = option & S_IALLUGO; |
| 213 | break; | 243 | break; |
| 214 | #ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES | ||
| 215 | case Opt_ptmxmode: | 244 | case Opt_ptmxmode: |
| 216 | if (match_octal(&args[0], &option)) | 245 | if (match_octal(&args[0], &option)) |
| 217 | return -EINVAL; | 246 | return -EINVAL; |
| 218 | opts->ptmxmode = option & S_IALLUGO; | 247 | opts->ptmxmode = option & S_IALLUGO; |
| 219 | break; | 248 | break; |
| 220 | case Opt_newinstance: | 249 | case Opt_newinstance: |
| 221 | /* newinstance makes sense only on initial mount */ | ||
| 222 | if (op == PARSE_MOUNT) | ||
| 223 | opts->newinstance = 1; | ||
| 224 | break; | 250 | break; |
| 225 | case Opt_max: | 251 | case Opt_max: |
| 226 | if (match_int(&args[0], &option) || | 252 | if (match_int(&args[0], &option) || |
| @@ -228,7 +254,6 @@ static int parse_mount_options(char *data, int op, struct pts_mount_opts *opts) | |||
| 228 | return -EINVAL; | 254 | return -EINVAL; |
| 229 | opts->max = option; | 255 | opts->max = option; |
| 230 | break; | 256 | break; |
| 231 | #endif | ||
| 232 | default: | 257 | default: |
| 233 | pr_err("called with bogus options\n"); | 258 | pr_err("called with bogus options\n"); |
| 234 | return -EINVAL; | 259 | return -EINVAL; |
| @@ -238,7 +263,6 @@ static int parse_mount_options(char *data, int op, struct pts_mount_opts *opts) | |||
| 238 | return 0; | 263 | return 0; |
| 239 | } | 264 | } |
| 240 | 265 | ||
| 241 | #ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES | ||
| 242 | static int mknod_ptmx(struct super_block *sb) | 266 | static int mknod_ptmx(struct super_block *sb) |
| 243 | { | 267 | { |
| 244 | int mode; | 268 | int mode; |
| @@ -305,12 +329,6 @@ static void update_ptmx_mode(struct pts_fs_info *fsi) | |||
| 305 | inode->i_mode = S_IFCHR|fsi->mount_opts.ptmxmode; | 329 | inode->i_mode = S_IFCHR|fsi->mount_opts.ptmxmode; |
| 306 | } | 330 | } |
| 307 | } | 331 | } |
| 308 | #else | ||
| 309 | static inline void update_ptmx_mode(struct pts_fs_info *fsi) | ||
| 310 | { | ||
| 311 | return; | ||
| 312 | } | ||
| 313 | #endif | ||
| 314 | 332 | ||
| 315 | static int devpts_remount(struct super_block *sb, int *flags, char *data) | 333 | static int devpts_remount(struct super_block *sb, int *flags, char *data) |
| 316 | { | 334 | { |
| @@ -344,11 +362,9 @@ static int devpts_show_options(struct seq_file *seq, struct dentry *root) | |||
| 344 | seq_printf(seq, ",gid=%u", | 362 | seq_printf(seq, ",gid=%u", |
| 345 | from_kgid_munged(&init_user_ns, opts->gid)); | 363 | from_kgid_munged(&init_user_ns, opts->gid)); |
| 346 | seq_printf(seq, ",mode=%03o", opts->mode); | 364 | seq_printf(seq, ",mode=%03o", opts->mode); |
| 347 | #ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES | ||
| 348 | seq_printf(seq, ",ptmxmode=%03o", opts->ptmxmode); | 365 | seq_printf(seq, ",ptmxmode=%03o", opts->ptmxmode); |
| 349 | if (opts->max < NR_UNIX98_PTY_MAX) | 366 | if (opts->max < NR_UNIX98_PTY_MAX) |
| 350 | seq_printf(seq, ",max=%d", opts->max); | 367 | seq_printf(seq, ",max=%d", opts->max); |
| 351 | #endif | ||
| 352 | 368 | ||
| 353 | return 0; | 369 | return 0; |
| 354 | } | 370 | } |
| @@ -410,40 +426,11 @@ fail: | |||
| 410 | return -ENOMEM; | 426 | return -ENOMEM; |
| 411 | } | 427 | } |
| 412 | 428 | ||
| 413 | #ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES | ||
| 414 | static int compare_init_pts_sb(struct super_block *s, void *p) | ||
| 415 | { | ||
| 416 | if (devpts_mnt) | ||
| 417 | return devpts_mnt->mnt_sb == s; | ||
| 418 | return 0; | ||
| 419 | } | ||
| 420 | |||
| 421 | /* | 429 | /* |
| 422 | * devpts_mount() | 430 | * devpts_mount() |
| 423 | * | 431 | * |
| 424 | * If the '-o newinstance' mount option was specified, mount a new | 432 | * Mount a new (private) instance of devpts. PTYs created in this |
| 425 | * (private) instance of devpts. PTYs created in this instance are | 433 | * instance are independent of the PTYs in other devpts instances. |
| 426 | * independent of the PTYs in other devpts instances. | ||
| 427 | * | ||
| 428 | * If the '-o newinstance' option was not specified, mount/remount the | ||
| 429 | * initial kernel mount of devpts. This type of mount gives the | ||
| 430 | * legacy, single-instance semantics. | ||
| 431 | * | ||
| 432 | * The 'newinstance' option is needed to support multiple namespace | ||
| 433 | * semantics in devpts while preserving backward compatibility of the | ||
| 434 | * current 'single-namespace' semantics. i.e all mounts of devpts | ||
| 435 | * without the 'newinstance' mount option should bind to the initial | ||
| 436 | * kernel mount, like mount_single(). | ||
| 437 | * | ||
| 438 | * Mounts with 'newinstance' option create a new, private namespace. | ||
| 439 | * | ||
| 440 | * NOTE: | ||
| 441 | * | ||
| 442 | * For single-mount semantics, devpts cannot use mount_single(), | ||
| 443 | * because mount_single()/sget() find and use the super-block from | ||
| 444 | * the most recent mount of devpts. But that recent mount may be a | ||
| 445 | * 'newinstance' mount and mount_single() would pick the newinstance | ||
| 446 | * super-block instead of the initial super-block. | ||
| 447 | */ | 434 | */ |
| 448 | static struct dentry *devpts_mount(struct file_system_type *fs_type, | 435 | static struct dentry *devpts_mount(struct file_system_type *fs_type, |
| 449 | int flags, const char *dev_name, void *data) | 436 | int flags, const char *dev_name, void *data) |
| @@ -456,18 +443,7 @@ static struct dentry *devpts_mount(struct file_system_type *fs_type, | |||
| 456 | if (error) | 443 | if (error) |
| 457 | return ERR_PTR(error); | 444 | return ERR_PTR(error); |
| 458 | 445 | ||
| 459 | /* Require newinstance for all user namespace mounts to ensure | 446 | s = sget(fs_type, NULL, set_anon_super, flags, NULL); |
| 460 | * the mount options are not changed. | ||
| 461 | */ | ||
| 462 | if ((current_user_ns() != &init_user_ns) && !opts.newinstance) | ||
| 463 | return ERR_PTR(-EINVAL); | ||
| 464 | |||
| 465 | if (opts.newinstance) | ||
| 466 | s = sget(fs_type, NULL, set_anon_super, flags, NULL); | ||
| 467 | else | ||
| 468 | s = sget(fs_type, compare_init_pts_sb, set_anon_super, flags, | ||
| 469 | NULL); | ||
| 470 | |||
| 471 | if (IS_ERR(s)) | 447 | if (IS_ERR(s)) |
| 472 | return ERR_CAST(s); | 448 | return ERR_CAST(s); |
| 473 | 449 | ||
| @@ -491,18 +467,6 @@ out_undo_sget: | |||
| 491 | return ERR_PTR(error); | 467 | return ERR_PTR(error); |
| 492 | } | 468 | } |
| 493 | 469 | ||
| 494 | #else | ||
| 495 | /* | ||
| 496 | * This supports only the legacy single-instance semantics (no | ||
| 497 | * multiple-instance semantics) | ||
| 498 | */ | ||
| 499 | static struct dentry *devpts_mount(struct file_system_type *fs_type, int flags, | ||
| 500 | const char *dev_name, void *data) | ||
| 501 | { | ||
| 502 | return mount_single(fs_type, flags, data, devpts_fill_super); | ||
| 503 | } | ||
| 504 | #endif | ||
| 505 | |||
| 506 | static void devpts_kill_sb(struct super_block *sb) | 470 | static void devpts_kill_sb(struct super_block *sb) |
| 507 | { | 471 | { |
| 508 | struct pts_fs_info *fsi = DEVPTS_SB(sb); | 472 | struct pts_fs_info *fsi = DEVPTS_SB(sb); |
| @@ -516,9 +480,7 @@ static struct file_system_type devpts_fs_type = { | |||
| 516 | .name = "devpts", | 480 | .name = "devpts", |
| 517 | .mount = devpts_mount, | 481 | .mount = devpts_mount, |
| 518 | .kill_sb = devpts_kill_sb, | 482 | .kill_sb = devpts_kill_sb, |
| 519 | #ifdef CONFIG_DEVPTS_MULTIPLE_INSTANCES | ||
| 520 | .fs_flags = FS_USERNS_MOUNT | FS_USERNS_DEV_MOUNT, | 483 | .fs_flags = FS_USERNS_MOUNT | FS_USERNS_DEV_MOUNT, |
| 521 | #endif | ||
| 522 | }; | 484 | }; |
| 523 | 485 | ||
| 524 | /* | 486 | /* |
| @@ -531,16 +493,13 @@ int devpts_new_index(struct pts_fs_info *fsi) | |||
| 531 | int index; | 493 | int index; |
| 532 | int ida_ret; | 494 | int ida_ret; |
| 533 | 495 | ||
| 534 | if (!fsi) | ||
| 535 | return -ENODEV; | ||
| 536 | |||
| 537 | retry: | 496 | retry: |
| 538 | if (!ida_pre_get(&fsi->allocated_ptys, GFP_KERNEL)) | 497 | if (!ida_pre_get(&fsi->allocated_ptys, GFP_KERNEL)) |
| 539 | return -ENOMEM; | 498 | return -ENOMEM; |
| 540 | 499 | ||
| 541 | mutex_lock(&allocated_ptys_lock); | 500 | mutex_lock(&allocated_ptys_lock); |
| 542 | if (pty_count >= pty_limit - | 501 | if (pty_count >= (pty_limit - |
| 543 | (fsi->mount_opts.newinstance ? pty_reserve : 0)) { | 502 | (fsi->mount_opts.reserve ? 0 : pty_reserve))) { |
| 544 | mutex_unlock(&allocated_ptys_lock); | 503 | mutex_unlock(&allocated_ptys_lock); |
| 545 | return -ENOSPC; | 504 | return -ENOSPC; |
| 546 | } | 505 | } |
| @@ -571,30 +530,6 @@ void devpts_kill_index(struct pts_fs_info *fsi, int idx) | |||
| 571 | mutex_unlock(&allocated_ptys_lock); | 530 | mutex_unlock(&allocated_ptys_lock); |
| 572 | } | 531 | } |
| 573 | 532 | ||
| 574 | /* | ||
| 575 | * pty code needs to hold extra references in case of last /dev/tty close | ||
| 576 | */ | ||
| 577 | struct pts_fs_info *devpts_get_ref(struct inode *ptmx_inode, struct file *file) | ||
| 578 | { | ||
| 579 | struct super_block *sb; | ||
| 580 | struct pts_fs_info *fsi; | ||
| 581 | |||
| 582 | sb = pts_sb_from_inode(ptmx_inode); | ||
| 583 | if (!sb) | ||
| 584 | return NULL; | ||
| 585 | fsi = DEVPTS_SB(sb); | ||
| 586 | if (!fsi) | ||
| 587 | return NULL; | ||
| 588 | |||
| 589 | atomic_inc(&sb->s_active); | ||
| 590 | return fsi; | ||
| 591 | } | ||
| 592 | |||
| 593 | void devpts_put_ref(struct pts_fs_info *fsi) | ||
| 594 | { | ||
| 595 | deactivate_super(fsi->sb); | ||
| 596 | } | ||
| 597 | |||
| 598 | /** | 533 | /** |
| 599 | * devpts_pty_new -- create a new inode in /dev/pts/ | 534 | * devpts_pty_new -- create a new inode in /dev/pts/ |
| 600 | * @ptmx_inode: inode of the master | 535 | * @ptmx_inode: inode of the master |
| @@ -607,16 +542,12 @@ void devpts_put_ref(struct pts_fs_info *fsi) | |||
| 607 | struct dentry *devpts_pty_new(struct pts_fs_info *fsi, int index, void *priv) | 542 | struct dentry *devpts_pty_new(struct pts_fs_info *fsi, int index, void *priv) |
| 608 | { | 543 | { |
| 609 | struct dentry *dentry; | 544 | struct dentry *dentry; |
| 610 | struct super_block *sb; | 545 | struct super_block *sb = fsi->sb; |
| 611 | struct inode *inode; | 546 | struct inode *inode; |
| 612 | struct dentry *root; | 547 | struct dentry *root; |
| 613 | struct pts_mount_opts *opts; | 548 | struct pts_mount_opts *opts; |
| 614 | char s[12]; | 549 | char s[12]; |
| 615 | 550 | ||
| 616 | if (!fsi) | ||
| 617 | return ERR_PTR(-ENODEV); | ||
| 618 | |||
| 619 | sb = fsi->sb; | ||
| 620 | root = sb->s_root; | 551 | root = sb->s_root; |
| 621 | opts = &fsi->mount_opts; | 552 | opts = &fsi->mount_opts; |
| 622 | 553 | ||
| @@ -676,20 +607,8 @@ void devpts_pty_kill(struct dentry *dentry) | |||
| 676 | static int __init init_devpts_fs(void) | 607 | static int __init init_devpts_fs(void) |
| 677 | { | 608 | { |
| 678 | int err = register_filesystem(&devpts_fs_type); | 609 | int err = register_filesystem(&devpts_fs_type); |
| 679 | struct ctl_table_header *table; | ||
| 680 | |||
| 681 | if (!err) { | 610 | if (!err) { |
| 682 | struct vfsmount *mnt; | 611 | register_sysctl_table(pty_root_table); |
| 683 | |||
| 684 | table = register_sysctl_table(pty_root_table); | ||
| 685 | mnt = kern_mount(&devpts_fs_type); | ||
| 686 | if (IS_ERR(mnt)) { | ||
| 687 | err = PTR_ERR(mnt); | ||
| 688 | unregister_filesystem(&devpts_fs_type); | ||
| 689 | unregister_sysctl_table(table); | ||
| 690 | } else { | ||
| 691 | devpts_mnt = mnt; | ||
| 692 | } | ||
| 693 | } | 612 | } |
| 694 | return err; | 613 | return err; |
| 695 | } | 614 | } |
diff --git a/fs/ecryptfs/kthread.c b/fs/ecryptfs/kthread.c index 866bb18efefe..e818f5ac7a26 100644 --- a/fs/ecryptfs/kthread.c +++ b/fs/ecryptfs/kthread.c | |||
| @@ -25,6 +25,7 @@ | |||
| 25 | #include <linux/slab.h> | 25 | #include <linux/slab.h> |
| 26 | #include <linux/wait.h> | 26 | #include <linux/wait.h> |
| 27 | #include <linux/mount.h> | 27 | #include <linux/mount.h> |
| 28 | #include <linux/file.h> | ||
| 28 | #include "ecryptfs_kernel.h" | 29 | #include "ecryptfs_kernel.h" |
| 29 | 30 | ||
| 30 | struct ecryptfs_open_req { | 31 | struct ecryptfs_open_req { |
| @@ -147,7 +148,7 @@ int ecryptfs_privileged_open(struct file **lower_file, | |||
| 147 | flags |= IS_RDONLY(d_inode(lower_dentry)) ? O_RDONLY : O_RDWR; | 148 | flags |= IS_RDONLY(d_inode(lower_dentry)) ? O_RDONLY : O_RDWR; |
| 148 | (*lower_file) = dentry_open(&req.path, flags, cred); | 149 | (*lower_file) = dentry_open(&req.path, flags, cred); |
| 149 | if (!IS_ERR(*lower_file)) | 150 | if (!IS_ERR(*lower_file)) |
| 150 | goto out; | 151 | goto have_file; |
| 151 | if ((flags & O_ACCMODE) == O_RDONLY) { | 152 | if ((flags & O_ACCMODE) == O_RDONLY) { |
| 152 | rc = PTR_ERR((*lower_file)); | 153 | rc = PTR_ERR((*lower_file)); |
| 153 | goto out; | 154 | goto out; |
| @@ -165,8 +166,16 @@ int ecryptfs_privileged_open(struct file **lower_file, | |||
| 165 | mutex_unlock(&ecryptfs_kthread_ctl.mux); | 166 | mutex_unlock(&ecryptfs_kthread_ctl.mux); |
| 166 | wake_up(&ecryptfs_kthread_ctl.wait); | 167 | wake_up(&ecryptfs_kthread_ctl.wait); |
| 167 | wait_for_completion(&req.done); | 168 | wait_for_completion(&req.done); |
| 168 | if (IS_ERR(*lower_file)) | 169 | if (IS_ERR(*lower_file)) { |
| 169 | rc = PTR_ERR(*lower_file); | 170 | rc = PTR_ERR(*lower_file); |
| 171 | goto out; | ||
| 172 | } | ||
| 173 | have_file: | ||
| 174 | if ((*lower_file)->f_op->mmap == NULL) { | ||
| 175 | fput(*lower_file); | ||
| 176 | *lower_file = NULL; | ||
| 177 | rc = -EMEDIUMTYPE; | ||
| 178 | } | ||
| 170 | out: | 179 | out: |
| 171 | return rc; | 180 | return rc; |
| 172 | } | 181 | } |
diff --git a/fs/fscache/page.c b/fs/fscache/page.c index 3078b679fcd1..c8c4f79c7ce1 100644 --- a/fs/fscache/page.c +++ b/fs/fscache/page.c | |||
| @@ -887,6 +887,8 @@ void fscache_invalidate_writes(struct fscache_cookie *cookie) | |||
| 887 | put_page(results[i]); | 887 | put_page(results[i]); |
| 888 | } | 888 | } |
| 889 | 889 | ||
| 890 | wake_up_bit(&cookie->flags, 0); | ||
| 891 | |||
| 890 | _leave(""); | 892 | _leave(""); |
| 891 | } | 893 | } |
| 892 | 894 | ||
diff --git a/fs/internal.h b/fs/internal.h index b71deeecea17..f57ced528cde 100644 --- a/fs/internal.h +++ b/fs/internal.h | |||
| @@ -130,6 +130,7 @@ extern int invalidate_inodes(struct super_block *, bool); | |||
| 130 | extern struct dentry *__d_alloc(struct super_block *, const struct qstr *); | 130 | extern struct dentry *__d_alloc(struct super_block *, const struct qstr *); |
| 131 | extern int d_set_mounted(struct dentry *dentry); | 131 | extern int d_set_mounted(struct dentry *dentry); |
| 132 | extern long prune_dcache_sb(struct super_block *sb, struct shrink_control *sc); | 132 | extern long prune_dcache_sb(struct super_block *sb, struct shrink_control *sc); |
| 133 | extern struct dentry *d_alloc_cursor(struct dentry *); | ||
| 133 | 134 | ||
| 134 | /* | 135 | /* |
| 135 | * read_write.c | 136 | * read_write.c |
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index b31852f76f46..e3ca4b4cac84 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c | |||
| @@ -2329,18 +2329,10 @@ void *jbd2_alloc(size_t size, gfp_t flags) | |||
| 2329 | 2329 | ||
| 2330 | BUG_ON(size & (size-1)); /* Must be a power of 2 */ | 2330 | BUG_ON(size & (size-1)); /* Must be a power of 2 */ |
| 2331 | 2331 | ||
| 2332 | flags |= __GFP_REPEAT; | 2332 | if (size < PAGE_SIZE) |
| 2333 | if (size == PAGE_SIZE) | ||
| 2334 | ptr = (void *)__get_free_pages(flags, 0); | ||
| 2335 | else if (size > PAGE_SIZE) { | ||
| 2336 | int order = get_order(size); | ||
| 2337 | |||
| 2338 | if (order < 3) | ||
| 2339 | ptr = (void *)__get_free_pages(flags, order); | ||
| 2340 | else | ||
| 2341 | ptr = vmalloc(size); | ||
| 2342 | } else | ||
| 2343 | ptr = kmem_cache_alloc(get_slab(size), flags); | 2333 | ptr = kmem_cache_alloc(get_slab(size), flags); |
| 2334 | else | ||
| 2335 | ptr = (void *)__get_free_pages(flags, get_order(size)); | ||
| 2344 | 2336 | ||
| 2345 | /* Check alignment; SLUB has gotten this wrong in the past, | 2337 | /* Check alignment; SLUB has gotten this wrong in the past, |
| 2346 | * and this can lead to user data corruption! */ | 2338 | * and this can lead to user data corruption! */ |
| @@ -2351,20 +2343,10 @@ void *jbd2_alloc(size_t size, gfp_t flags) | |||
| 2351 | 2343 | ||
| 2352 | void jbd2_free(void *ptr, size_t size) | 2344 | void jbd2_free(void *ptr, size_t size) |
| 2353 | { | 2345 | { |
| 2354 | if (size == PAGE_SIZE) { | 2346 | if (size < PAGE_SIZE) |
| 2355 | free_pages((unsigned long)ptr, 0); | 2347 | kmem_cache_free(get_slab(size), ptr); |
| 2356 | return; | 2348 | else |
| 2357 | } | 2349 | free_pages((unsigned long)ptr, get_order(size)); |
| 2358 | if (size > PAGE_SIZE) { | ||
| 2359 | int order = get_order(size); | ||
| 2360 | |||
| 2361 | if (order < 3) | ||
| 2362 | free_pages((unsigned long)ptr, order); | ||
| 2363 | else | ||
| 2364 | vfree(ptr); | ||
| 2365 | return; | ||
| 2366 | } | ||
| 2367 | kmem_cache_free(get_slab(size), ptr); | ||
| 2368 | }; | 2350 | }; |
| 2369 | 2351 | ||
| 2370 | /* | 2352 | /* |
diff --git a/fs/libfs.c b/fs/libfs.c index 3db2721144c2..cedeacbae303 100644 --- a/fs/libfs.c +++ b/fs/libfs.c | |||
| @@ -71,9 +71,7 @@ EXPORT_SYMBOL(simple_lookup); | |||
| 71 | 71 | ||
| 72 | int dcache_dir_open(struct inode *inode, struct file *file) | 72 | int dcache_dir_open(struct inode *inode, struct file *file) |
| 73 | { | 73 | { |
| 74 | static struct qstr cursor_name = QSTR_INIT(".", 1); | 74 | file->private_data = d_alloc_cursor(file->f_path.dentry); |
| 75 | |||
| 76 | file->private_data = d_alloc(file->f_path.dentry, &cursor_name); | ||
| 77 | 75 | ||
| 78 | return file->private_data ? 0 : -ENOMEM; | 76 | return file->private_data ? 0 : -ENOMEM; |
| 79 | } | 77 | } |
diff --git a/fs/namei.c b/fs/namei.c index 4c4f95ac8aa5..70580ab1445c 100644 --- a/fs/namei.c +++ b/fs/namei.c | |||
| @@ -1416,21 +1416,28 @@ static void follow_mount(struct path *path) | |||
| 1416 | } | 1416 | } |
| 1417 | } | 1417 | } |
| 1418 | 1418 | ||
| 1419 | static int path_parent_directory(struct path *path) | ||
| 1420 | { | ||
| 1421 | struct dentry *old = path->dentry; | ||
| 1422 | /* rare case of legitimate dget_parent()... */ | ||
| 1423 | path->dentry = dget_parent(path->dentry); | ||
| 1424 | dput(old); | ||
| 1425 | if (unlikely(!path_connected(path))) | ||
| 1426 | return -ENOENT; | ||
| 1427 | return 0; | ||
| 1428 | } | ||
| 1429 | |||
| 1419 | static int follow_dotdot(struct nameidata *nd) | 1430 | static int follow_dotdot(struct nameidata *nd) |
| 1420 | { | 1431 | { |
| 1421 | while(1) { | 1432 | while(1) { |
| 1422 | struct dentry *old = nd->path.dentry; | ||
| 1423 | |||
| 1424 | if (nd->path.dentry == nd->root.dentry && | 1433 | if (nd->path.dentry == nd->root.dentry && |
| 1425 | nd->path.mnt == nd->root.mnt) { | 1434 | nd->path.mnt == nd->root.mnt) { |
| 1426 | break; | 1435 | break; |
| 1427 | } | 1436 | } |
| 1428 | if (nd->path.dentry != nd->path.mnt->mnt_root) { | 1437 | if (nd->path.dentry != nd->path.mnt->mnt_root) { |
| 1429 | /* rare case of legitimate dget_parent()... */ | 1438 | int ret = path_parent_directory(&nd->path); |
| 1430 | nd->path.dentry = dget_parent(nd->path.dentry); | 1439 | if (ret) |
| 1431 | dput(old); | 1440 | return ret; |
| 1432 | if (unlikely(!path_connected(&nd->path))) | ||
| 1433 | return -ENOENT; | ||
| 1434 | break; | 1441 | break; |
| 1435 | } | 1442 | } |
| 1436 | if (!follow_up(&nd->path)) | 1443 | if (!follow_up(&nd->path)) |
| @@ -2514,6 +2521,34 @@ struct dentry *lookup_one_len_unlocked(const char *name, | |||
| 2514 | } | 2521 | } |
| 2515 | EXPORT_SYMBOL(lookup_one_len_unlocked); | 2522 | EXPORT_SYMBOL(lookup_one_len_unlocked); |
| 2516 | 2523 | ||
| 2524 | #ifdef CONFIG_UNIX98_PTYS | ||
| 2525 | int path_pts(struct path *path) | ||
| 2526 | { | ||
| 2527 | /* Find something mounted on "pts" in the same directory as | ||
| 2528 | * the input path. | ||
| 2529 | */ | ||
| 2530 | struct dentry *child, *parent; | ||
| 2531 | struct qstr this; | ||
| 2532 | int ret; | ||
| 2533 | |||
| 2534 | ret = path_parent_directory(path); | ||
| 2535 | if (ret) | ||
| 2536 | return ret; | ||
| 2537 | |||
| 2538 | parent = path->dentry; | ||
| 2539 | this.name = "pts"; | ||
| 2540 | this.len = 3; | ||
| 2541 | child = d_hash_and_lookup(parent, &this); | ||
| 2542 | if (!child) | ||
| 2543 | return -ENOENT; | ||
| 2544 | |||
| 2545 | path->dentry = child; | ||
| 2546 | dput(parent); | ||
| 2547 | follow_mount(path); | ||
| 2548 | return 0; | ||
| 2549 | } | ||
| 2550 | #endif | ||
| 2551 | |||
| 2517 | int user_path_at_empty(int dfd, const char __user *name, unsigned flags, | 2552 | int user_path_at_empty(int dfd, const char __user *name, unsigned flags, |
| 2518 | struct path *path, int *empty) | 2553 | struct path *path, int *empty) |
| 2519 | { | 2554 | { |
| @@ -2995,9 +3030,13 @@ static int atomic_open(struct nameidata *nd, struct dentry *dentry, | |||
| 2995 | } | 3030 | } |
| 2996 | if (*opened & FILE_CREATED) | 3031 | if (*opened & FILE_CREATED) |
| 2997 | fsnotify_create(dir, dentry); | 3032 | fsnotify_create(dir, dentry); |
| 2998 | path->dentry = dentry; | 3033 | if (unlikely(d_is_negative(dentry))) { |
| 2999 | path->mnt = nd->path.mnt; | 3034 | error = -ENOENT; |
| 3000 | return 1; | 3035 | } else { |
| 3036 | path->dentry = dentry; | ||
| 3037 | path->mnt = nd->path.mnt; | ||
| 3038 | return 1; | ||
| 3039 | } | ||
| 3001 | } | 3040 | } |
| 3002 | } | 3041 | } |
| 3003 | dput(dentry); | 3042 | dput(dentry); |
| @@ -3166,9 +3205,7 @@ static int do_last(struct nameidata *nd, | |||
| 3166 | int acc_mode = op->acc_mode; | 3205 | int acc_mode = op->acc_mode; |
| 3167 | unsigned seq; | 3206 | unsigned seq; |
| 3168 | struct inode *inode; | 3207 | struct inode *inode; |
| 3169 | struct path save_parent = { .dentry = NULL, .mnt = NULL }; | ||
| 3170 | struct path path; | 3208 | struct path path; |
| 3171 | bool retried = false; | ||
| 3172 | int error; | 3209 | int error; |
| 3173 | 3210 | ||
| 3174 | nd->flags &= ~LOOKUP_PARENT; | 3211 | nd->flags &= ~LOOKUP_PARENT; |
| @@ -3211,7 +3248,6 @@ static int do_last(struct nameidata *nd, | |||
| 3211 | return -EISDIR; | 3248 | return -EISDIR; |
| 3212 | } | 3249 | } |
| 3213 | 3250 | ||
| 3214 | retry_lookup: | ||
| 3215 | if (open_flag & (O_CREAT | O_TRUNC | O_WRONLY | O_RDWR)) { | 3251 | if (open_flag & (O_CREAT | O_TRUNC | O_WRONLY | O_RDWR)) { |
| 3216 | error = mnt_want_write(nd->path.mnt); | 3252 | error = mnt_want_write(nd->path.mnt); |
| 3217 | if (!error) | 3253 | if (!error) |
| @@ -3263,6 +3299,10 @@ retry_lookup: | |||
| 3263 | got_write = false; | 3299 | got_write = false; |
| 3264 | } | 3300 | } |
| 3265 | 3301 | ||
| 3302 | error = follow_managed(&path, nd); | ||
| 3303 | if (unlikely(error < 0)) | ||
| 3304 | return error; | ||
| 3305 | |||
| 3266 | if (unlikely(d_is_negative(path.dentry))) { | 3306 | if (unlikely(d_is_negative(path.dentry))) { |
| 3267 | path_to_nameidata(&path, nd); | 3307 | path_to_nameidata(&path, nd); |
| 3268 | return -ENOENT; | 3308 | return -ENOENT; |
| @@ -3278,10 +3318,6 @@ retry_lookup: | |||
| 3278 | return -EEXIST; | 3318 | return -EEXIST; |
| 3279 | } | 3319 | } |
| 3280 | 3320 | ||
| 3281 | error = follow_managed(&path, nd); | ||
| 3282 | if (unlikely(error < 0)) | ||
| 3283 | return error; | ||
| 3284 | |||
| 3285 | seq = 0; /* out of RCU mode, so the value doesn't matter */ | 3321 | seq = 0; /* out of RCU mode, so the value doesn't matter */ |
| 3286 | inode = d_backing_inode(path.dentry); | 3322 | inode = d_backing_inode(path.dentry); |
| 3287 | finish_lookup: | 3323 | finish_lookup: |
| @@ -3292,23 +3328,14 @@ finish_lookup: | |||
| 3292 | if (unlikely(error)) | 3328 | if (unlikely(error)) |
| 3293 | return error; | 3329 | return error; |
| 3294 | 3330 | ||
| 3295 | if ((nd->flags & LOOKUP_RCU) || nd->path.mnt != path.mnt) { | 3331 | path_to_nameidata(&path, nd); |
| 3296 | path_to_nameidata(&path, nd); | ||
| 3297 | } else { | ||
| 3298 | save_parent.dentry = nd->path.dentry; | ||
| 3299 | save_parent.mnt = mntget(path.mnt); | ||
| 3300 | nd->path.dentry = path.dentry; | ||
| 3301 | |||
| 3302 | } | ||
| 3303 | nd->inode = inode; | 3332 | nd->inode = inode; |
| 3304 | nd->seq = seq; | 3333 | nd->seq = seq; |
| 3305 | /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */ | 3334 | /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */ |
| 3306 | finish_open: | 3335 | finish_open: |
| 3307 | error = complete_walk(nd); | 3336 | error = complete_walk(nd); |
| 3308 | if (error) { | 3337 | if (error) |
| 3309 | path_put(&save_parent); | ||
| 3310 | return error; | 3338 | return error; |
| 3311 | } | ||
| 3312 | audit_inode(nd->name, nd->path.dentry, 0); | 3339 | audit_inode(nd->name, nd->path.dentry, 0); |
| 3313 | error = -EISDIR; | 3340 | error = -EISDIR; |
| 3314 | if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry)) | 3341 | if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry)) |
| @@ -3331,13 +3358,9 @@ finish_open_created: | |||
| 3331 | goto out; | 3358 | goto out; |
| 3332 | BUG_ON(*opened & FILE_OPENED); /* once it's opened, it's opened */ | 3359 | BUG_ON(*opened & FILE_OPENED); /* once it's opened, it's opened */ |
| 3333 | error = vfs_open(&nd->path, file, current_cred()); | 3360 | error = vfs_open(&nd->path, file, current_cred()); |
| 3334 | if (!error) { | 3361 | if (error) |
| 3335 | *opened |= FILE_OPENED; | ||
| 3336 | } else { | ||
| 3337 | if (error == -EOPENSTALE) | ||
| 3338 | goto stale_open; | ||
| 3339 | goto out; | 3362 | goto out; |
| 3340 | } | 3363 | *opened |= FILE_OPENED; |
| 3341 | opened: | 3364 | opened: |
| 3342 | error = open_check_o_direct(file); | 3365 | error = open_check_o_direct(file); |
| 3343 | if (!error) | 3366 | if (!error) |
| @@ -3353,26 +3376,7 @@ out: | |||
| 3353 | } | 3376 | } |
| 3354 | if (got_write) | 3377 | if (got_write) |
| 3355 | mnt_drop_write(nd->path.mnt); | 3378 | mnt_drop_write(nd->path.mnt); |
| 3356 | path_put(&save_parent); | ||
| 3357 | return error; | 3379 | return error; |
| 3358 | |||
| 3359 | stale_open: | ||
| 3360 | /* If no saved parent or already retried then can't retry */ | ||
| 3361 | if (!save_parent.dentry || retried) | ||
| 3362 | goto out; | ||
| 3363 | |||
| 3364 | BUG_ON(save_parent.dentry != dir); | ||
| 3365 | path_put(&nd->path); | ||
| 3366 | nd->path = save_parent; | ||
| 3367 | nd->inode = dir->d_inode; | ||
| 3368 | save_parent.mnt = NULL; | ||
| 3369 | save_parent.dentry = NULL; | ||
| 3370 | if (got_write) { | ||
| 3371 | mnt_drop_write(nd->path.mnt); | ||
| 3372 | got_write = false; | ||
| 3373 | } | ||
| 3374 | retried = true; | ||
| 3375 | goto retry_lookup; | ||
| 3376 | } | 3380 | } |
| 3377 | 3381 | ||
| 3378 | static int do_tmpfile(struct nameidata *nd, unsigned flags, | 3382 | static int do_tmpfile(struct nameidata *nd, unsigned flags, |
diff --git a/fs/namespace.c b/fs/namespace.c index 4fb1691b4355..783004af5707 100644 --- a/fs/namespace.c +++ b/fs/namespace.c | |||
| @@ -2409,8 +2409,10 @@ static int do_new_mount(struct path *path, const char *fstype, int flags, | |||
| 2409 | mnt_flags |= MNT_NODEV | MNT_LOCK_NODEV; | 2409 | mnt_flags |= MNT_NODEV | MNT_LOCK_NODEV; |
| 2410 | } | 2410 | } |
| 2411 | if (type->fs_flags & FS_USERNS_VISIBLE) { | 2411 | if (type->fs_flags & FS_USERNS_VISIBLE) { |
| 2412 | if (!fs_fully_visible(type, &mnt_flags)) | 2412 | if (!fs_fully_visible(type, &mnt_flags)) { |
| 2413 | put_filesystem(type); | ||
| 2413 | return -EPERM; | 2414 | return -EPERM; |
| 2415 | } | ||
| 2414 | } | 2416 | } |
| 2415 | } | 2417 | } |
| 2416 | 2418 | ||
| @@ -3245,6 +3247,10 @@ static bool fs_fully_visible(struct file_system_type *type, int *new_mnt_flags) | |||
| 3245 | if (mnt->mnt.mnt_sb->s_iflags & SB_I_NOEXEC) | 3247 | if (mnt->mnt.mnt_sb->s_iflags & SB_I_NOEXEC) |
| 3246 | mnt_flags &= ~(MNT_LOCK_NOSUID | MNT_LOCK_NOEXEC); | 3248 | mnt_flags &= ~(MNT_LOCK_NOSUID | MNT_LOCK_NOEXEC); |
| 3247 | 3249 | ||
| 3250 | /* Don't miss readonly hidden in the superblock flags */ | ||
| 3251 | if (mnt->mnt.mnt_sb->s_flags & MS_RDONLY) | ||
| 3252 | mnt_flags |= MNT_LOCK_READONLY; | ||
| 3253 | |||
| 3248 | /* Verify the mount flags are equal to or more permissive | 3254 | /* Verify the mount flags are equal to or more permissive |
| 3249 | * than the proposed new mount. | 3255 | * than the proposed new mount. |
| 3250 | */ | 3256 | */ |
| @@ -3271,7 +3277,7 @@ static bool fs_fully_visible(struct file_system_type *type, int *new_mnt_flags) | |||
| 3271 | list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) { | 3277 | list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) { |
| 3272 | struct inode *inode = child->mnt_mountpoint->d_inode; | 3278 | struct inode *inode = child->mnt_mountpoint->d_inode; |
| 3273 | /* Only worry about locked mounts */ | 3279 | /* Only worry about locked mounts */ |
| 3274 | if (!(mnt_flags & MNT_LOCKED)) | 3280 | if (!(child->mnt.mnt_flags & MNT_LOCKED)) |
| 3275 | continue; | 3281 | continue; |
| 3276 | /* Is the directory permanetly empty? */ | 3282 | /* Is the directory permanetly empty? */ |
| 3277 | if (!is_empty_dir_inode(inode)) | 3283 | if (!is_empty_dir_inode(inode)) |
diff --git a/fs/nfsd/blocklayout.c b/fs/nfsd/blocklayout.c index e55b5242614d..31f3df193bdb 100644 --- a/fs/nfsd/blocklayout.c +++ b/fs/nfsd/blocklayout.c | |||
| @@ -290,7 +290,7 @@ out_free_buf: | |||
| 290 | return error; | 290 | return error; |
| 291 | } | 291 | } |
| 292 | 292 | ||
| 293 | #define NFSD_MDS_PR_KEY 0x0100000000000000 | 293 | #define NFSD_MDS_PR_KEY 0x0100000000000000ULL |
| 294 | 294 | ||
| 295 | /* | 295 | /* |
| 296 | * We use the client ID as a unique key for the reservations. | 296 | * We use the client ID as a unique key for the reservations. |
diff --git a/fs/nfsd/nfs2acl.c b/fs/nfsd/nfs2acl.c index 1580ea6fd64d..d08cd88155c7 100644 --- a/fs/nfsd/nfs2acl.c +++ b/fs/nfsd/nfs2acl.c | |||
| @@ -104,22 +104,21 @@ static __be32 nfsacld_proc_setacl(struct svc_rqst * rqstp, | |||
| 104 | goto out; | 104 | goto out; |
| 105 | 105 | ||
| 106 | inode = d_inode(fh->fh_dentry); | 106 | inode = d_inode(fh->fh_dentry); |
| 107 | if (!IS_POSIXACL(inode) || !inode->i_op->set_acl) { | ||
| 108 | error = -EOPNOTSUPP; | ||
| 109 | goto out_errno; | ||
| 110 | } | ||
| 111 | 107 | ||
| 112 | error = fh_want_write(fh); | 108 | error = fh_want_write(fh); |
| 113 | if (error) | 109 | if (error) |
| 114 | goto out_errno; | 110 | goto out_errno; |
| 115 | 111 | ||
| 116 | error = inode->i_op->set_acl(inode, argp->acl_access, ACL_TYPE_ACCESS); | 112 | fh_lock(fh); |
| 113 | |||
| 114 | error = set_posix_acl(inode, ACL_TYPE_ACCESS, argp->acl_access); | ||
| 117 | if (error) | 115 | if (error) |
| 118 | goto out_drop_write; | 116 | goto out_drop_lock; |
| 119 | error = inode->i_op->set_acl(inode, argp->acl_default, | 117 | error = set_posix_acl(inode, ACL_TYPE_DEFAULT, argp->acl_default); |
| 120 | ACL_TYPE_DEFAULT); | ||
| 121 | if (error) | 118 | if (error) |
| 122 | goto out_drop_write; | 119 | goto out_drop_lock; |
| 120 | |||
| 121 | fh_unlock(fh); | ||
| 123 | 122 | ||
| 124 | fh_drop_write(fh); | 123 | fh_drop_write(fh); |
| 125 | 124 | ||
| @@ -131,7 +130,8 @@ out: | |||
| 131 | posix_acl_release(argp->acl_access); | 130 | posix_acl_release(argp->acl_access); |
| 132 | posix_acl_release(argp->acl_default); | 131 | posix_acl_release(argp->acl_default); |
| 133 | return nfserr; | 132 | return nfserr; |
| 134 | out_drop_write: | 133 | out_drop_lock: |
| 134 | fh_unlock(fh); | ||
| 135 | fh_drop_write(fh); | 135 | fh_drop_write(fh); |
| 136 | out_errno: | 136 | out_errno: |
| 137 | nfserr = nfserrno(error); | 137 | nfserr = nfserrno(error); |
diff --git a/fs/nfsd/nfs3acl.c b/fs/nfsd/nfs3acl.c index 01df4cd7c753..0c890347cde3 100644 --- a/fs/nfsd/nfs3acl.c +++ b/fs/nfsd/nfs3acl.c | |||
| @@ -95,22 +95,20 @@ static __be32 nfsd3_proc_setacl(struct svc_rqst * rqstp, | |||
| 95 | goto out; | 95 | goto out; |
| 96 | 96 | ||
| 97 | inode = d_inode(fh->fh_dentry); | 97 | inode = d_inode(fh->fh_dentry); |
| 98 | if (!IS_POSIXACL(inode) || !inode->i_op->set_acl) { | ||
| 99 | error = -EOPNOTSUPP; | ||
| 100 | goto out_errno; | ||
| 101 | } | ||
| 102 | 98 | ||
| 103 | error = fh_want_write(fh); | 99 | error = fh_want_write(fh); |
| 104 | if (error) | 100 | if (error) |
| 105 | goto out_errno; | 101 | goto out_errno; |
| 106 | 102 | ||
| 107 | error = inode->i_op->set_acl(inode, argp->acl_access, ACL_TYPE_ACCESS); | 103 | fh_lock(fh); |
| 104 | |||
| 105 | error = set_posix_acl(inode, ACL_TYPE_ACCESS, argp->acl_access); | ||
| 108 | if (error) | 106 | if (error) |
| 109 | goto out_drop_write; | 107 | goto out_drop_lock; |
| 110 | error = inode->i_op->set_acl(inode, argp->acl_default, | 108 | error = set_posix_acl(inode, ACL_TYPE_DEFAULT, argp->acl_default); |
| 111 | ACL_TYPE_DEFAULT); | ||
| 112 | 109 | ||
| 113 | out_drop_write: | 110 | out_drop_lock: |
| 111 | fh_unlock(fh); | ||
| 114 | fh_drop_write(fh); | 112 | fh_drop_write(fh); |
| 115 | out_errno: | 113 | out_errno: |
| 116 | nfserr = nfserrno(error); | 114 | nfserr = nfserrno(error); |
diff --git a/fs/nfsd/nfs4acl.c b/fs/nfsd/nfs4acl.c index 6adabd6049b7..71292a0d6f09 100644 --- a/fs/nfsd/nfs4acl.c +++ b/fs/nfsd/nfs4acl.c | |||
| @@ -770,9 +770,6 @@ nfsd4_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp, | |||
| 770 | dentry = fhp->fh_dentry; | 770 | dentry = fhp->fh_dentry; |
| 771 | inode = d_inode(dentry); | 771 | inode = d_inode(dentry); |
| 772 | 772 | ||
| 773 | if (!inode->i_op->set_acl || !IS_POSIXACL(inode)) | ||
| 774 | return nfserr_attrnotsupp; | ||
| 775 | |||
| 776 | if (S_ISDIR(inode->i_mode)) | 773 | if (S_ISDIR(inode->i_mode)) |
| 777 | flags = NFS4_ACL_DIR; | 774 | flags = NFS4_ACL_DIR; |
| 778 | 775 | ||
| @@ -782,16 +779,19 @@ nfsd4_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp, | |||
| 782 | if (host_error < 0) | 779 | if (host_error < 0) |
| 783 | goto out_nfserr; | 780 | goto out_nfserr; |
| 784 | 781 | ||
| 785 | host_error = inode->i_op->set_acl(inode, pacl, ACL_TYPE_ACCESS); | 782 | fh_lock(fhp); |
| 783 | |||
| 784 | host_error = set_posix_acl(inode, ACL_TYPE_ACCESS, pacl); | ||
| 786 | if (host_error < 0) | 785 | if (host_error < 0) |
| 787 | goto out_release; | 786 | goto out_drop_lock; |
| 788 | 787 | ||
| 789 | if (S_ISDIR(inode->i_mode)) { | 788 | if (S_ISDIR(inode->i_mode)) { |
| 790 | host_error = inode->i_op->set_acl(inode, dpacl, | 789 | host_error = set_posix_acl(inode, ACL_TYPE_DEFAULT, dpacl); |
| 791 | ACL_TYPE_DEFAULT); | ||
| 792 | } | 790 | } |
| 793 | 791 | ||
| 794 | out_release: | 792 | out_drop_lock: |
| 793 | fh_unlock(fhp); | ||
| 794 | |||
| 795 | posix_acl_release(pacl); | 795 | posix_acl_release(pacl); |
| 796 | posix_acl_release(dpacl); | 796 | posix_acl_release(dpacl); |
| 797 | out_nfserr: | 797 | out_nfserr: |
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index 7389cb1d7409..04c68d900324 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c | |||
| @@ -710,22 +710,6 @@ static struct rpc_cred *get_backchannel_cred(struct nfs4_client *clp, struct rpc | |||
| 710 | } | 710 | } |
| 711 | } | 711 | } |
| 712 | 712 | ||
| 713 | static struct rpc_clnt *create_backchannel_client(struct rpc_create_args *args) | ||
| 714 | { | ||
| 715 | struct rpc_xprt *xprt; | ||
| 716 | |||
| 717 | if (args->protocol != XPRT_TRANSPORT_BC_TCP) | ||
| 718 | return rpc_create(args); | ||
| 719 | |||
| 720 | xprt = args->bc_xprt->xpt_bc_xprt; | ||
| 721 | if (xprt) { | ||
| 722 | xprt_get(xprt); | ||
| 723 | return rpc_create_xprt(args, xprt); | ||
| 724 | } | ||
| 725 | |||
| 726 | return rpc_create(args); | ||
| 727 | } | ||
| 728 | |||
| 729 | static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *conn, struct nfsd4_session *ses) | 713 | static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *conn, struct nfsd4_session *ses) |
| 730 | { | 714 | { |
| 731 | int maxtime = max_cb_time(clp->net); | 715 | int maxtime = max_cb_time(clp->net); |
| @@ -768,7 +752,7 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c | |||
| 768 | args.authflavor = ses->se_cb_sec.flavor; | 752 | args.authflavor = ses->se_cb_sec.flavor; |
| 769 | } | 753 | } |
| 770 | /* Create RPC client */ | 754 | /* Create RPC client */ |
| 771 | client = create_backchannel_client(&args); | 755 | client = rpc_create(&args); |
| 772 | if (IS_ERR(client)) { | 756 | if (IS_ERR(client)) { |
| 773 | dprintk("NFSD: couldn't create callback client: %ld\n", | 757 | dprintk("NFSD: couldn't create callback client: %ld\n", |
| 774 | PTR_ERR(client)); | 758 | PTR_ERR(client)); |
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index f5f82e145018..70d0b9b33031 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c | |||
| @@ -3480,12 +3480,17 @@ alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open, | |||
| 3480 | } | 3480 | } |
| 3481 | 3481 | ||
| 3482 | static struct nfs4_ol_stateid * | 3482 | static struct nfs4_ol_stateid * |
| 3483 | init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp, | 3483 | init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open) |
| 3484 | struct nfsd4_open *open) | ||
| 3485 | { | 3484 | { |
| 3486 | 3485 | ||
| 3487 | struct nfs4_openowner *oo = open->op_openowner; | 3486 | struct nfs4_openowner *oo = open->op_openowner; |
| 3488 | struct nfs4_ol_stateid *retstp = NULL; | 3487 | struct nfs4_ol_stateid *retstp = NULL; |
| 3488 | struct nfs4_ol_stateid *stp; | ||
| 3489 | |||
| 3490 | stp = open->op_stp; | ||
| 3491 | /* We are moving these outside of the spinlocks to avoid the warnings */ | ||
| 3492 | mutex_init(&stp->st_mutex); | ||
| 3493 | mutex_lock(&stp->st_mutex); | ||
| 3489 | 3494 | ||
| 3490 | spin_lock(&oo->oo_owner.so_client->cl_lock); | 3495 | spin_lock(&oo->oo_owner.so_client->cl_lock); |
| 3491 | spin_lock(&fp->fi_lock); | 3496 | spin_lock(&fp->fi_lock); |
| @@ -3493,6 +3498,8 @@ init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp, | |||
| 3493 | retstp = nfsd4_find_existing_open(fp, open); | 3498 | retstp = nfsd4_find_existing_open(fp, open); |
| 3494 | if (retstp) | 3499 | if (retstp) |
| 3495 | goto out_unlock; | 3500 | goto out_unlock; |
| 3501 | |||
| 3502 | open->op_stp = NULL; | ||
| 3496 | atomic_inc(&stp->st_stid.sc_count); | 3503 | atomic_inc(&stp->st_stid.sc_count); |
| 3497 | stp->st_stid.sc_type = NFS4_OPEN_STID; | 3504 | stp->st_stid.sc_type = NFS4_OPEN_STID; |
| 3498 | INIT_LIST_HEAD(&stp->st_locks); | 3505 | INIT_LIST_HEAD(&stp->st_locks); |
| @@ -3502,14 +3509,19 @@ init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp, | |||
| 3502 | stp->st_access_bmap = 0; | 3509 | stp->st_access_bmap = 0; |
| 3503 | stp->st_deny_bmap = 0; | 3510 | stp->st_deny_bmap = 0; |
| 3504 | stp->st_openstp = NULL; | 3511 | stp->st_openstp = NULL; |
| 3505 | init_rwsem(&stp->st_rwsem); | ||
| 3506 | list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids); | 3512 | list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids); |
| 3507 | list_add(&stp->st_perfile, &fp->fi_stateids); | 3513 | list_add(&stp->st_perfile, &fp->fi_stateids); |
| 3508 | 3514 | ||
| 3509 | out_unlock: | 3515 | out_unlock: |
| 3510 | spin_unlock(&fp->fi_lock); | 3516 | spin_unlock(&fp->fi_lock); |
| 3511 | spin_unlock(&oo->oo_owner.so_client->cl_lock); | 3517 | spin_unlock(&oo->oo_owner.so_client->cl_lock); |
| 3512 | return retstp; | 3518 | if (retstp) { |
| 3519 | mutex_lock(&retstp->st_mutex); | ||
| 3520 | /* To keep mutex tracking happy */ | ||
| 3521 | mutex_unlock(&stp->st_mutex); | ||
| 3522 | stp = retstp; | ||
| 3523 | } | ||
| 3524 | return stp; | ||
| 3513 | } | 3525 | } |
| 3514 | 3526 | ||
| 3515 | /* | 3527 | /* |
| @@ -4305,7 +4317,6 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf | |||
| 4305 | struct nfs4_client *cl = open->op_openowner->oo_owner.so_client; | 4317 | struct nfs4_client *cl = open->op_openowner->oo_owner.so_client; |
| 4306 | struct nfs4_file *fp = NULL; | 4318 | struct nfs4_file *fp = NULL; |
| 4307 | struct nfs4_ol_stateid *stp = NULL; | 4319 | struct nfs4_ol_stateid *stp = NULL; |
| 4308 | struct nfs4_ol_stateid *swapstp = NULL; | ||
| 4309 | struct nfs4_delegation *dp = NULL; | 4320 | struct nfs4_delegation *dp = NULL; |
| 4310 | __be32 status; | 4321 | __be32 status; |
| 4311 | 4322 | ||
| @@ -4335,32 +4346,28 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf | |||
| 4335 | */ | 4346 | */ |
| 4336 | if (stp) { | 4347 | if (stp) { |
| 4337 | /* Stateid was found, this is an OPEN upgrade */ | 4348 | /* Stateid was found, this is an OPEN upgrade */ |
| 4338 | down_read(&stp->st_rwsem); | 4349 | mutex_lock(&stp->st_mutex); |
| 4339 | status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open); | 4350 | status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open); |
| 4340 | if (status) { | 4351 | if (status) { |
| 4341 | up_read(&stp->st_rwsem); | 4352 | mutex_unlock(&stp->st_mutex); |
| 4342 | goto out; | 4353 | goto out; |
| 4343 | } | 4354 | } |
| 4344 | } else { | 4355 | } else { |
| 4345 | stp = open->op_stp; | 4356 | /* stp is returned locked. */ |
| 4346 | open->op_stp = NULL; | 4357 | stp = init_open_stateid(fp, open); |
| 4347 | swapstp = init_open_stateid(stp, fp, open); | 4358 | /* See if we lost the race to some other thread */ |
| 4348 | if (swapstp) { | 4359 | if (stp->st_access_bmap != 0) { |
| 4349 | nfs4_put_stid(&stp->st_stid); | ||
| 4350 | stp = swapstp; | ||
| 4351 | down_read(&stp->st_rwsem); | ||
| 4352 | status = nfs4_upgrade_open(rqstp, fp, current_fh, | 4360 | status = nfs4_upgrade_open(rqstp, fp, current_fh, |
| 4353 | stp, open); | 4361 | stp, open); |
| 4354 | if (status) { | 4362 | if (status) { |
| 4355 | up_read(&stp->st_rwsem); | 4363 | mutex_unlock(&stp->st_mutex); |
| 4356 | goto out; | 4364 | goto out; |
| 4357 | } | 4365 | } |
| 4358 | goto upgrade_out; | 4366 | goto upgrade_out; |
| 4359 | } | 4367 | } |
| 4360 | down_read(&stp->st_rwsem); | ||
| 4361 | status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open); | 4368 | status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open); |
| 4362 | if (status) { | 4369 | if (status) { |
| 4363 | up_read(&stp->st_rwsem); | 4370 | mutex_unlock(&stp->st_mutex); |
| 4364 | release_open_stateid(stp); | 4371 | release_open_stateid(stp); |
| 4365 | goto out; | 4372 | goto out; |
| 4366 | } | 4373 | } |
| @@ -4372,7 +4379,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf | |||
| 4372 | } | 4379 | } |
| 4373 | upgrade_out: | 4380 | upgrade_out: |
| 4374 | nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid); | 4381 | nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid); |
| 4375 | up_read(&stp->st_rwsem); | 4382 | mutex_unlock(&stp->st_mutex); |
| 4376 | 4383 | ||
| 4377 | if (nfsd4_has_session(&resp->cstate)) { | 4384 | if (nfsd4_has_session(&resp->cstate)) { |
| 4378 | if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) { | 4385 | if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) { |
| @@ -4977,12 +4984,12 @@ static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_ | |||
| 4977 | * revoked delegations are kept only for free_stateid. | 4984 | * revoked delegations are kept only for free_stateid. |
| 4978 | */ | 4985 | */ |
| 4979 | return nfserr_bad_stateid; | 4986 | return nfserr_bad_stateid; |
| 4980 | down_write(&stp->st_rwsem); | 4987 | mutex_lock(&stp->st_mutex); |
| 4981 | status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate)); | 4988 | status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate)); |
| 4982 | if (status == nfs_ok) | 4989 | if (status == nfs_ok) |
| 4983 | status = nfs4_check_fh(current_fh, &stp->st_stid); | 4990 | status = nfs4_check_fh(current_fh, &stp->st_stid); |
| 4984 | if (status != nfs_ok) | 4991 | if (status != nfs_ok) |
| 4985 | up_write(&stp->st_rwsem); | 4992 | mutex_unlock(&stp->st_mutex); |
| 4986 | return status; | 4993 | return status; |
| 4987 | } | 4994 | } |
| 4988 | 4995 | ||
| @@ -5030,7 +5037,7 @@ static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cs | |||
| 5030 | return status; | 5037 | return status; |
| 5031 | oo = openowner(stp->st_stateowner); | 5038 | oo = openowner(stp->st_stateowner); |
| 5032 | if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) { | 5039 | if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) { |
| 5033 | up_write(&stp->st_rwsem); | 5040 | mutex_unlock(&stp->st_mutex); |
| 5034 | nfs4_put_stid(&stp->st_stid); | 5041 | nfs4_put_stid(&stp->st_stid); |
| 5035 | return nfserr_bad_stateid; | 5042 | return nfserr_bad_stateid; |
| 5036 | } | 5043 | } |
| @@ -5062,12 +5069,12 @@ nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | |||
| 5062 | oo = openowner(stp->st_stateowner); | 5069 | oo = openowner(stp->st_stateowner); |
| 5063 | status = nfserr_bad_stateid; | 5070 | status = nfserr_bad_stateid; |
| 5064 | if (oo->oo_flags & NFS4_OO_CONFIRMED) { | 5071 | if (oo->oo_flags & NFS4_OO_CONFIRMED) { |
| 5065 | up_write(&stp->st_rwsem); | 5072 | mutex_unlock(&stp->st_mutex); |
| 5066 | goto put_stateid; | 5073 | goto put_stateid; |
| 5067 | } | 5074 | } |
| 5068 | oo->oo_flags |= NFS4_OO_CONFIRMED; | 5075 | oo->oo_flags |= NFS4_OO_CONFIRMED; |
| 5069 | nfs4_inc_and_copy_stateid(&oc->oc_resp_stateid, &stp->st_stid); | 5076 | nfs4_inc_and_copy_stateid(&oc->oc_resp_stateid, &stp->st_stid); |
| 5070 | up_write(&stp->st_rwsem); | 5077 | mutex_unlock(&stp->st_mutex); |
| 5071 | dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n", | 5078 | dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n", |
| 5072 | __func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid)); | 5079 | __func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid)); |
| 5073 | 5080 | ||
| @@ -5143,7 +5150,7 @@ nfsd4_open_downgrade(struct svc_rqst *rqstp, | |||
| 5143 | nfs4_inc_and_copy_stateid(&od->od_stateid, &stp->st_stid); | 5150 | nfs4_inc_and_copy_stateid(&od->od_stateid, &stp->st_stid); |
| 5144 | status = nfs_ok; | 5151 | status = nfs_ok; |
| 5145 | put_stateid: | 5152 | put_stateid: |
| 5146 | up_write(&stp->st_rwsem); | 5153 | mutex_unlock(&stp->st_mutex); |
| 5147 | nfs4_put_stid(&stp->st_stid); | 5154 | nfs4_put_stid(&stp->st_stid); |
| 5148 | out: | 5155 | out: |
| 5149 | nfsd4_bump_seqid(cstate, status); | 5156 | nfsd4_bump_seqid(cstate, status); |
| @@ -5196,7 +5203,7 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | |||
| 5196 | if (status) | 5203 | if (status) |
| 5197 | goto out; | 5204 | goto out; |
| 5198 | nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid); | 5205 | nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid); |
| 5199 | up_write(&stp->st_rwsem); | 5206 | mutex_unlock(&stp->st_mutex); |
| 5200 | 5207 | ||
| 5201 | nfsd4_close_open_stateid(stp); | 5208 | nfsd4_close_open_stateid(stp); |
| 5202 | 5209 | ||
| @@ -5422,7 +5429,7 @@ init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo, | |||
| 5422 | stp->st_access_bmap = 0; | 5429 | stp->st_access_bmap = 0; |
| 5423 | stp->st_deny_bmap = open_stp->st_deny_bmap; | 5430 | stp->st_deny_bmap = open_stp->st_deny_bmap; |
| 5424 | stp->st_openstp = open_stp; | 5431 | stp->st_openstp = open_stp; |
| 5425 | init_rwsem(&stp->st_rwsem); | 5432 | mutex_init(&stp->st_mutex); |
| 5426 | list_add(&stp->st_locks, &open_stp->st_locks); | 5433 | list_add(&stp->st_locks, &open_stp->st_locks); |
| 5427 | list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids); | 5434 | list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids); |
| 5428 | spin_lock(&fp->fi_lock); | 5435 | spin_lock(&fp->fi_lock); |
| @@ -5591,7 +5598,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | |||
| 5591 | &open_stp, nn); | 5598 | &open_stp, nn); |
| 5592 | if (status) | 5599 | if (status) |
| 5593 | goto out; | 5600 | goto out; |
| 5594 | up_write(&open_stp->st_rwsem); | 5601 | mutex_unlock(&open_stp->st_mutex); |
| 5595 | open_sop = openowner(open_stp->st_stateowner); | 5602 | open_sop = openowner(open_stp->st_stateowner); |
| 5596 | status = nfserr_bad_stateid; | 5603 | status = nfserr_bad_stateid; |
| 5597 | if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid, | 5604 | if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid, |
| @@ -5600,7 +5607,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | |||
| 5600 | status = lookup_or_create_lock_state(cstate, open_stp, lock, | 5607 | status = lookup_or_create_lock_state(cstate, open_stp, lock, |
| 5601 | &lock_stp, &new); | 5608 | &lock_stp, &new); |
| 5602 | if (status == nfs_ok) | 5609 | if (status == nfs_ok) |
| 5603 | down_write(&lock_stp->st_rwsem); | 5610 | mutex_lock(&lock_stp->st_mutex); |
| 5604 | } else { | 5611 | } else { |
| 5605 | status = nfs4_preprocess_seqid_op(cstate, | 5612 | status = nfs4_preprocess_seqid_op(cstate, |
| 5606 | lock->lk_old_lock_seqid, | 5613 | lock->lk_old_lock_seqid, |
| @@ -5704,7 +5711,7 @@ out: | |||
| 5704 | seqid_mutating_err(ntohl(status))) | 5711 | seqid_mutating_err(ntohl(status))) |
| 5705 | lock_sop->lo_owner.so_seqid++; | 5712 | lock_sop->lo_owner.so_seqid++; |
| 5706 | 5713 | ||
| 5707 | up_write(&lock_stp->st_rwsem); | 5714 | mutex_unlock(&lock_stp->st_mutex); |
| 5708 | 5715 | ||
| 5709 | /* | 5716 | /* |
| 5710 | * If this is a new, never-before-used stateid, and we are | 5717 | * If this is a new, never-before-used stateid, and we are |
| @@ -5874,7 +5881,7 @@ nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | |||
| 5874 | fput: | 5881 | fput: |
| 5875 | fput(filp); | 5882 | fput(filp); |
| 5876 | put_stateid: | 5883 | put_stateid: |
| 5877 | up_write(&stp->st_rwsem); | 5884 | mutex_unlock(&stp->st_mutex); |
| 5878 | nfs4_put_stid(&stp->st_stid); | 5885 | nfs4_put_stid(&stp->st_stid); |
| 5879 | out: | 5886 | out: |
| 5880 | nfsd4_bump_seqid(cstate, status); | 5887 | nfsd4_bump_seqid(cstate, status); |
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h index 986e51e5ceac..64053eadeb81 100644 --- a/fs/nfsd/state.h +++ b/fs/nfsd/state.h | |||
| @@ -535,7 +535,7 @@ struct nfs4_ol_stateid { | |||
| 535 | unsigned char st_access_bmap; | 535 | unsigned char st_access_bmap; |
| 536 | unsigned char st_deny_bmap; | 536 | unsigned char st_deny_bmap; |
| 537 | struct nfs4_ol_stateid *st_openstp; | 537 | struct nfs4_ol_stateid *st_openstp; |
| 538 | struct rw_semaphore st_rwsem; | 538 | struct mutex st_mutex; |
| 539 | }; | 539 | }; |
| 540 | 540 | ||
| 541 | static inline struct nfs4_ol_stateid *openlockstateid(struct nfs4_stid *s) | 541 | static inline struct nfs4_ol_stateid *openlockstateid(struct nfs4_stid *s) |
diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c index 809bd2de7ad0..e9fd241b9a0a 100644 --- a/fs/nilfs2/the_nilfs.c +++ b/fs/nilfs2/the_nilfs.c | |||
| @@ -439,7 +439,7 @@ static int nilfs_valid_sb(struct nilfs_super_block *sbp) | |||
| 439 | if (!sbp || le16_to_cpu(sbp->s_magic) != NILFS_SUPER_MAGIC) | 439 | if (!sbp || le16_to_cpu(sbp->s_magic) != NILFS_SUPER_MAGIC) |
| 440 | return 0; | 440 | return 0; |
| 441 | bytes = le16_to_cpu(sbp->s_bytes); | 441 | bytes = le16_to_cpu(sbp->s_bytes); |
| 442 | if (bytes > BLOCK_SIZE) | 442 | if (bytes < sumoff + 4 || bytes > BLOCK_SIZE) |
| 443 | return 0; | 443 | return 0; |
| 444 | crc = crc32_le(le32_to_cpu(sbp->s_crc_seed), (unsigned char *)sbp, | 444 | crc = crc32_le(le32_to_cpu(sbp->s_crc_seed), (unsigned char *)sbp, |
| 445 | sumoff); | 445 | sumoff); |
diff --git a/fs/ocfs2/Makefile b/fs/ocfs2/Makefile index e27e6527912b..4342c7ee7d20 100644 --- a/fs/ocfs2/Makefile +++ b/fs/ocfs2/Makefile | |||
| @@ -1,7 +1,5 @@ | |||
| 1 | ccflags-y := -Ifs/ocfs2 | 1 | ccflags-y := -Ifs/ocfs2 |
| 2 | 2 | ||
| 3 | ccflags-y += -DCATCH_BH_JBD_RACES | ||
| 4 | |||
| 5 | obj-$(CONFIG_OCFS2_FS) += \ | 3 | obj-$(CONFIG_OCFS2_FS) += \ |
| 6 | ocfs2.o \ | 4 | ocfs2.o \ |
| 7 | ocfs2_stackglue.o | 5 | ocfs2_stackglue.o |
diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c index fe50ded1b4ce..498641eed2db 100644 --- a/fs/ocfs2/buffer_head_io.c +++ b/fs/ocfs2/buffer_head_io.c | |||
| @@ -139,11 +139,16 @@ int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block, | |||
| 139 | 139 | ||
| 140 | lock_buffer(bh); | 140 | lock_buffer(bh); |
| 141 | if (buffer_jbd(bh)) { | 141 | if (buffer_jbd(bh)) { |
| 142 | #ifdef CATCH_BH_JBD_RACES | ||
| 142 | mlog(ML_ERROR, | 143 | mlog(ML_ERROR, |
| 143 | "block %llu had the JBD bit set " | 144 | "block %llu had the JBD bit set " |
| 144 | "while I was in lock_buffer!", | 145 | "while I was in lock_buffer!", |
| 145 | (unsigned long long)bh->b_blocknr); | 146 | (unsigned long long)bh->b_blocknr); |
| 146 | BUG(); | 147 | BUG(); |
| 148 | #else | ||
| 149 | unlock_buffer(bh); | ||
| 150 | continue; | ||
| 151 | #endif | ||
| 147 | } | 152 | } |
| 148 | 153 | ||
| 149 | clear_buffer_uptodate(bh); | 154 | clear_buffer_uptodate(bh); |
diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c index 22f0253a3567..c2a6b0894022 100644 --- a/fs/overlayfs/dir.c +++ b/fs/overlayfs/dir.c | |||
| @@ -405,12 +405,21 @@ static int ovl_create_or_link(struct dentry *dentry, int mode, dev_t rdev, | |||
| 405 | err = ovl_create_upper(dentry, inode, &stat, link, hardlink); | 405 | err = ovl_create_upper(dentry, inode, &stat, link, hardlink); |
| 406 | } else { | 406 | } else { |
| 407 | const struct cred *old_cred; | 407 | const struct cred *old_cred; |
| 408 | struct cred *override_cred; | ||
| 408 | 409 | ||
| 409 | old_cred = ovl_override_creds(dentry->d_sb); | 410 | old_cred = ovl_override_creds(dentry->d_sb); |
| 410 | 411 | ||
| 411 | err = ovl_create_over_whiteout(dentry, inode, &stat, link, | 412 | err = -ENOMEM; |
| 412 | hardlink); | 413 | override_cred = prepare_creds(); |
| 414 | if (override_cred) { | ||
| 415 | override_cred->fsuid = old_cred->fsuid; | ||
| 416 | override_cred->fsgid = old_cred->fsgid; | ||
| 417 | put_cred(override_creds(override_cred)); | ||
| 418 | put_cred(override_cred); | ||
| 413 | 419 | ||
| 420 | err = ovl_create_over_whiteout(dentry, inode, &stat, | ||
| 421 | link, hardlink); | ||
| 422 | } | ||
| 414 | revert_creds(old_cred); | 423 | revert_creds(old_cred); |
| 415 | } | 424 | } |
| 416 | 425 | ||
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c index 0ed7c4012437..1dbeab6cf96e 100644 --- a/fs/overlayfs/inode.c +++ b/fs/overlayfs/inode.c | |||
| @@ -238,41 +238,27 @@ out: | |||
| 238 | return err; | 238 | return err; |
| 239 | } | 239 | } |
| 240 | 240 | ||
| 241 | static bool ovl_need_xattr_filter(struct dentry *dentry, | ||
| 242 | enum ovl_path_type type) | ||
| 243 | { | ||
| 244 | if ((type & (__OVL_PATH_PURE | __OVL_PATH_UPPER)) == __OVL_PATH_UPPER) | ||
| 245 | return S_ISDIR(dentry->d_inode->i_mode); | ||
| 246 | else | ||
| 247 | return false; | ||
| 248 | } | ||
| 249 | |||
| 250 | ssize_t ovl_getxattr(struct dentry *dentry, struct inode *inode, | 241 | ssize_t ovl_getxattr(struct dentry *dentry, struct inode *inode, |
| 251 | const char *name, void *value, size_t size) | 242 | const char *name, void *value, size_t size) |
| 252 | { | 243 | { |
| 253 | struct path realpath; | 244 | struct dentry *realdentry = ovl_dentry_real(dentry); |
| 254 | enum ovl_path_type type = ovl_path_real(dentry, &realpath); | ||
| 255 | 245 | ||
| 256 | if (ovl_need_xattr_filter(dentry, type) && ovl_is_private_xattr(name)) | 246 | if (ovl_is_private_xattr(name)) |
| 257 | return -ENODATA; | 247 | return -ENODATA; |
| 258 | 248 | ||
| 259 | return vfs_getxattr(realpath.dentry, name, value, size); | 249 | return vfs_getxattr(realdentry, name, value, size); |
| 260 | } | 250 | } |
| 261 | 251 | ||
| 262 | ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size) | 252 | ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size) |
| 263 | { | 253 | { |
| 264 | struct path realpath; | 254 | struct dentry *realdentry = ovl_dentry_real(dentry); |
| 265 | enum ovl_path_type type = ovl_path_real(dentry, &realpath); | ||
| 266 | ssize_t res; | 255 | ssize_t res; |
| 267 | int off; | 256 | int off; |
| 268 | 257 | ||
| 269 | res = vfs_listxattr(realpath.dentry, list, size); | 258 | res = vfs_listxattr(realdentry, list, size); |
| 270 | if (res <= 0 || size == 0) | 259 | if (res <= 0 || size == 0) |
| 271 | return res; | 260 | return res; |
| 272 | 261 | ||
| 273 | if (!ovl_need_xattr_filter(dentry, type)) | ||
| 274 | return res; | ||
| 275 | |||
| 276 | /* filter out private xattrs */ | 262 | /* filter out private xattrs */ |
| 277 | for (off = 0; off < res;) { | 263 | for (off = 0; off < res;) { |
| 278 | char *s = list + off; | 264 | char *s = list + off; |
| @@ -302,7 +288,7 @@ int ovl_removexattr(struct dentry *dentry, const char *name) | |||
| 302 | goto out; | 288 | goto out; |
| 303 | 289 | ||
| 304 | err = -ENODATA; | 290 | err = -ENODATA; |
| 305 | if (ovl_need_xattr_filter(dentry, type) && ovl_is_private_xattr(name)) | 291 | if (ovl_is_private_xattr(name)) |
| 306 | goto out_drop_write; | 292 | goto out_drop_write; |
| 307 | 293 | ||
| 308 | if (!OVL_TYPE_UPPER(type)) { | 294 | if (!OVL_TYPE_UPPER(type)) { |
diff --git a/fs/posix_acl.c b/fs/posix_acl.c index 8a4a266beff3..edc452c2a563 100644 --- a/fs/posix_acl.c +++ b/fs/posix_acl.c | |||
| @@ -820,39 +820,43 @@ posix_acl_xattr_get(const struct xattr_handler *handler, | |||
| 820 | return error; | 820 | return error; |
| 821 | } | 821 | } |
| 822 | 822 | ||
| 823 | static int | 823 | int |
| 824 | posix_acl_xattr_set(const struct xattr_handler *handler, | 824 | set_posix_acl(struct inode *inode, int type, struct posix_acl *acl) |
| 825 | struct dentry *unused, struct inode *inode, | ||
| 826 | const char *name, const void *value, | ||
| 827 | size_t size, int flags) | ||
| 828 | { | 825 | { |
| 829 | struct posix_acl *acl = NULL; | ||
| 830 | int ret; | ||
| 831 | |||
| 832 | if (!IS_POSIXACL(inode)) | 826 | if (!IS_POSIXACL(inode)) |
| 833 | return -EOPNOTSUPP; | 827 | return -EOPNOTSUPP; |
| 834 | if (!inode->i_op->set_acl) | 828 | if (!inode->i_op->set_acl) |
| 835 | return -EOPNOTSUPP; | 829 | return -EOPNOTSUPP; |
| 836 | 830 | ||
| 837 | if (handler->flags == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode)) | 831 | if (type == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode)) |
| 838 | return value ? -EACCES : 0; | 832 | return acl ? -EACCES : 0; |
| 839 | if (!inode_owner_or_capable(inode)) | 833 | if (!inode_owner_or_capable(inode)) |
| 840 | return -EPERM; | 834 | return -EPERM; |
| 841 | 835 | ||
| 836 | if (acl) { | ||
| 837 | int ret = posix_acl_valid(acl); | ||
| 838 | if (ret) | ||
| 839 | return ret; | ||
| 840 | } | ||
| 841 | return inode->i_op->set_acl(inode, acl, type); | ||
| 842 | } | ||
| 843 | EXPORT_SYMBOL(set_posix_acl); | ||
| 844 | |||
| 845 | static int | ||
| 846 | posix_acl_xattr_set(const struct xattr_handler *handler, | ||
| 847 | struct dentry *unused, struct inode *inode, | ||
| 848 | const char *name, const void *value, | ||
| 849 | size_t size, int flags) | ||
| 850 | { | ||
| 851 | struct posix_acl *acl = NULL; | ||
| 852 | int ret; | ||
| 853 | |||
| 842 | if (value) { | 854 | if (value) { |
| 843 | acl = posix_acl_from_xattr(&init_user_ns, value, size); | 855 | acl = posix_acl_from_xattr(&init_user_ns, value, size); |
| 844 | if (IS_ERR(acl)) | 856 | if (IS_ERR(acl)) |
| 845 | return PTR_ERR(acl); | 857 | return PTR_ERR(acl); |
| 846 | |||
| 847 | if (acl) { | ||
| 848 | ret = posix_acl_valid(acl); | ||
| 849 | if (ret) | ||
| 850 | goto out; | ||
| 851 | } | ||
| 852 | } | 858 | } |
| 853 | 859 | ret = set_posix_acl(inode, handler->flags, acl); | |
| 854 | ret = inode->i_op->set_acl(inode, acl, handler->flags); | ||
| 855 | out: | ||
| 856 | posix_acl_release(acl); | 860 | posix_acl_release(acl); |
| 857 | return ret; | 861 | return ret; |
| 858 | } | 862 | } |
diff --git a/fs/proc/root.c b/fs/proc/root.c index 55bc7d6c8aac..06702783bf40 100644 --- a/fs/proc/root.c +++ b/fs/proc/root.c | |||
| @@ -121,6 +121,13 @@ static struct dentry *proc_mount(struct file_system_type *fs_type, | |||
| 121 | if (IS_ERR(sb)) | 121 | if (IS_ERR(sb)) |
| 122 | return ERR_CAST(sb); | 122 | return ERR_CAST(sb); |
| 123 | 123 | ||
| 124 | /* | ||
| 125 | * procfs isn't actually a stacking filesystem; however, there is | ||
| 126 | * too much magic going on inside it to permit stacking things on | ||
| 127 | * top of it | ||
| 128 | */ | ||
| 129 | sb->s_stack_depth = FILESYSTEM_MAX_STACK_DEPTH; | ||
| 130 | |||
| 124 | if (!proc_parse_options(options, ns)) { | 131 | if (!proc_parse_options(options, ns)) { |
| 125 | deactivate_locked_super(sb); | 132 | deactivate_locked_super(sb); |
| 126 | return ERR_PTR(-EINVAL); | 133 | return ERR_PTR(-EINVAL); |
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c index b8f2d1e8c645..c72c16c5a60f 100644 --- a/fs/reiserfs/super.c +++ b/fs/reiserfs/super.c | |||
| @@ -1393,7 +1393,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg) | |||
| 1393 | unsigned long safe_mask = 0; | 1393 | unsigned long safe_mask = 0; |
| 1394 | unsigned int commit_max_age = (unsigned int)-1; | 1394 | unsigned int commit_max_age = (unsigned int)-1; |
| 1395 | struct reiserfs_journal *journal = SB_JOURNAL(s); | 1395 | struct reiserfs_journal *journal = SB_JOURNAL(s); |
| 1396 | char *new_opts = kstrdup(arg, GFP_KERNEL); | 1396 | char *new_opts; |
| 1397 | int err; | 1397 | int err; |
| 1398 | char *qf_names[REISERFS_MAXQUOTAS]; | 1398 | char *qf_names[REISERFS_MAXQUOTAS]; |
| 1399 | unsigned int qfmt = 0; | 1399 | unsigned int qfmt = 0; |
| @@ -1401,6 +1401,10 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg) | |||
| 1401 | int i; | 1401 | int i; |
| 1402 | #endif | 1402 | #endif |
| 1403 | 1403 | ||
| 1404 | new_opts = kstrdup(arg, GFP_KERNEL); | ||
| 1405 | if (arg && !new_opts) | ||
| 1406 | return -ENOMEM; | ||
| 1407 | |||
| 1404 | sync_filesystem(s); | 1408 | sync_filesystem(s); |
| 1405 | reiserfs_write_lock(s); | 1409 | reiserfs_write_lock(s); |
| 1406 | 1410 | ||
| @@ -1546,7 +1550,8 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg) | |||
| 1546 | } | 1550 | } |
| 1547 | 1551 | ||
| 1548 | out_ok_unlocked: | 1552 | out_ok_unlocked: |
| 1549 | replace_mount_options(s, new_opts); | 1553 | if (new_opts) |
| 1554 | replace_mount_options(s, new_opts); | ||
| 1550 | return 0; | 1555 | return 0; |
| 1551 | 1556 | ||
| 1552 | out_err_unlock: | 1557 | out_err_unlock: |
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c index 08316972ff93..7bbf420d1289 100644 --- a/fs/ubifs/file.c +++ b/fs/ubifs/file.c | |||
| @@ -52,6 +52,7 @@ | |||
| 52 | #include "ubifs.h" | 52 | #include "ubifs.h" |
| 53 | #include <linux/mount.h> | 53 | #include <linux/mount.h> |
| 54 | #include <linux/slab.h> | 54 | #include <linux/slab.h> |
| 55 | #include <linux/migrate.h> | ||
| 55 | 56 | ||
| 56 | static int read_block(struct inode *inode, void *addr, unsigned int block, | 57 | static int read_block(struct inode *inode, void *addr, unsigned int block, |
| 57 | struct ubifs_data_node *dn) | 58 | struct ubifs_data_node *dn) |
| @@ -1452,6 +1453,26 @@ static int ubifs_set_page_dirty(struct page *page) | |||
| 1452 | return ret; | 1453 | return ret; |
| 1453 | } | 1454 | } |
| 1454 | 1455 | ||
| 1456 | #ifdef CONFIG_MIGRATION | ||
| 1457 | static int ubifs_migrate_page(struct address_space *mapping, | ||
| 1458 | struct page *newpage, struct page *page, enum migrate_mode mode) | ||
| 1459 | { | ||
| 1460 | int rc; | ||
| 1461 | |||
| 1462 | rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0); | ||
| 1463 | if (rc != MIGRATEPAGE_SUCCESS) | ||
| 1464 | return rc; | ||
| 1465 | |||
| 1466 | if (PagePrivate(page)) { | ||
| 1467 | ClearPagePrivate(page); | ||
| 1468 | SetPagePrivate(newpage); | ||
| 1469 | } | ||
| 1470 | |||
| 1471 | migrate_page_copy(newpage, page); | ||
| 1472 | return MIGRATEPAGE_SUCCESS; | ||
| 1473 | } | ||
| 1474 | #endif | ||
| 1475 | |||
| 1455 | static int ubifs_releasepage(struct page *page, gfp_t unused_gfp_flags) | 1476 | static int ubifs_releasepage(struct page *page, gfp_t unused_gfp_flags) |
| 1456 | { | 1477 | { |
| 1457 | /* | 1478 | /* |
| @@ -1591,6 +1612,9 @@ const struct address_space_operations ubifs_file_address_operations = { | |||
| 1591 | .write_end = ubifs_write_end, | 1612 | .write_end = ubifs_write_end, |
| 1592 | .invalidatepage = ubifs_invalidatepage, | 1613 | .invalidatepage = ubifs_invalidatepage, |
| 1593 | .set_page_dirty = ubifs_set_page_dirty, | 1614 | .set_page_dirty = ubifs_set_page_dirty, |
| 1615 | #ifdef CONFIG_MIGRATION | ||
| 1616 | .migratepage = ubifs_migrate_page, | ||
| 1617 | #endif | ||
| 1594 | .releasepage = ubifs_releasepage, | 1618 | .releasepage = ubifs_releasepage, |
| 1595 | }; | 1619 | }; |
| 1596 | 1620 | ||
diff --git a/fs/udf/partition.c b/fs/udf/partition.c index 5f861ed287c3..888c364b2fe9 100644 --- a/fs/udf/partition.c +++ b/fs/udf/partition.c | |||
| @@ -295,7 +295,8 @@ static uint32_t udf_try_read_meta(struct inode *inode, uint32_t block, | |||
| 295 | map = &UDF_SB(sb)->s_partmaps[partition]; | 295 | map = &UDF_SB(sb)->s_partmaps[partition]; |
| 296 | /* map to sparable/physical partition desc */ | 296 | /* map to sparable/physical partition desc */ |
| 297 | phyblock = udf_get_pblock(sb, eloc.logicalBlockNum, | 297 | phyblock = udf_get_pblock(sb, eloc.logicalBlockNum, |
| 298 | map->s_partition_num, ext_offset + offset); | 298 | map->s_type_specific.s_metadata.s_phys_partition_ref, |
| 299 | ext_offset + offset); | ||
| 299 | } | 300 | } |
| 300 | 301 | ||
| 301 | brelse(epos.bh); | 302 | brelse(epos.bh); |
| @@ -317,14 +318,18 @@ uint32_t udf_get_pblock_meta25(struct super_block *sb, uint32_t block, | |||
| 317 | mdata = &map->s_type_specific.s_metadata; | 318 | mdata = &map->s_type_specific.s_metadata; |
| 318 | inode = mdata->s_metadata_fe ? : mdata->s_mirror_fe; | 319 | inode = mdata->s_metadata_fe ? : mdata->s_mirror_fe; |
| 319 | 320 | ||
| 320 | /* We shouldn't mount such media... */ | 321 | if (!inode) |
| 321 | BUG_ON(!inode); | 322 | return 0xFFFFFFFF; |
| 323 | |||
| 322 | retblk = udf_try_read_meta(inode, block, partition, offset); | 324 | retblk = udf_try_read_meta(inode, block, partition, offset); |
| 323 | if (retblk == 0xFFFFFFFF && mdata->s_metadata_fe) { | 325 | if (retblk == 0xFFFFFFFF && mdata->s_metadata_fe) { |
| 324 | udf_warn(sb, "error reading from METADATA, trying to read from MIRROR\n"); | 326 | udf_warn(sb, "error reading from METADATA, trying to read from MIRROR\n"); |
| 325 | if (!(mdata->s_flags & MF_MIRROR_FE_LOADED)) { | 327 | if (!(mdata->s_flags & MF_MIRROR_FE_LOADED)) { |
| 326 | mdata->s_mirror_fe = udf_find_metadata_inode_efe(sb, | 328 | mdata->s_mirror_fe = udf_find_metadata_inode_efe(sb, |
| 327 | mdata->s_mirror_file_loc, map->s_partition_num); | 329 | mdata->s_mirror_file_loc, |
| 330 | mdata->s_phys_partition_ref); | ||
| 331 | if (IS_ERR(mdata->s_mirror_fe)) | ||
| 332 | mdata->s_mirror_fe = NULL; | ||
| 328 | mdata->s_flags |= MF_MIRROR_FE_LOADED; | 333 | mdata->s_flags |= MF_MIRROR_FE_LOADED; |
| 329 | } | 334 | } |
| 330 | 335 | ||
diff --git a/fs/udf/super.c b/fs/udf/super.c index 5e2c8c814e1b..4942549e7dc8 100644 --- a/fs/udf/super.c +++ b/fs/udf/super.c | |||
| @@ -951,13 +951,13 @@ out2: | |||
| 951 | } | 951 | } |
| 952 | 952 | ||
| 953 | struct inode *udf_find_metadata_inode_efe(struct super_block *sb, | 953 | struct inode *udf_find_metadata_inode_efe(struct super_block *sb, |
| 954 | u32 meta_file_loc, u32 partition_num) | 954 | u32 meta_file_loc, u32 partition_ref) |
| 955 | { | 955 | { |
| 956 | struct kernel_lb_addr addr; | 956 | struct kernel_lb_addr addr; |
| 957 | struct inode *metadata_fe; | 957 | struct inode *metadata_fe; |
| 958 | 958 | ||
| 959 | addr.logicalBlockNum = meta_file_loc; | 959 | addr.logicalBlockNum = meta_file_loc; |
| 960 | addr.partitionReferenceNum = partition_num; | 960 | addr.partitionReferenceNum = partition_ref; |
| 961 | 961 | ||
| 962 | metadata_fe = udf_iget_special(sb, &addr); | 962 | metadata_fe = udf_iget_special(sb, &addr); |
| 963 | 963 | ||
| @@ -974,7 +974,8 @@ struct inode *udf_find_metadata_inode_efe(struct super_block *sb, | |||
| 974 | return metadata_fe; | 974 | return metadata_fe; |
| 975 | } | 975 | } |
| 976 | 976 | ||
| 977 | static int udf_load_metadata_files(struct super_block *sb, int partition) | 977 | static int udf_load_metadata_files(struct super_block *sb, int partition, |
| 978 | int type1_index) | ||
| 978 | { | 979 | { |
| 979 | struct udf_sb_info *sbi = UDF_SB(sb); | 980 | struct udf_sb_info *sbi = UDF_SB(sb); |
| 980 | struct udf_part_map *map; | 981 | struct udf_part_map *map; |
| @@ -984,20 +985,21 @@ static int udf_load_metadata_files(struct super_block *sb, int partition) | |||
| 984 | 985 | ||
| 985 | map = &sbi->s_partmaps[partition]; | 986 | map = &sbi->s_partmaps[partition]; |
| 986 | mdata = &map->s_type_specific.s_metadata; | 987 | mdata = &map->s_type_specific.s_metadata; |
| 988 | mdata->s_phys_partition_ref = type1_index; | ||
| 987 | 989 | ||
| 988 | /* metadata address */ | 990 | /* metadata address */ |
| 989 | udf_debug("Metadata file location: block = %d part = %d\n", | 991 | udf_debug("Metadata file location: block = %d part = %d\n", |
| 990 | mdata->s_meta_file_loc, map->s_partition_num); | 992 | mdata->s_meta_file_loc, mdata->s_phys_partition_ref); |
| 991 | 993 | ||
| 992 | fe = udf_find_metadata_inode_efe(sb, mdata->s_meta_file_loc, | 994 | fe = udf_find_metadata_inode_efe(sb, mdata->s_meta_file_loc, |
| 993 | map->s_partition_num); | 995 | mdata->s_phys_partition_ref); |
| 994 | if (IS_ERR(fe)) { | 996 | if (IS_ERR(fe)) { |
| 995 | /* mirror file entry */ | 997 | /* mirror file entry */ |
| 996 | udf_debug("Mirror metadata file location: block = %d part = %d\n", | 998 | udf_debug("Mirror metadata file location: block = %d part = %d\n", |
| 997 | mdata->s_mirror_file_loc, map->s_partition_num); | 999 | mdata->s_mirror_file_loc, mdata->s_phys_partition_ref); |
| 998 | 1000 | ||
| 999 | fe = udf_find_metadata_inode_efe(sb, mdata->s_mirror_file_loc, | 1001 | fe = udf_find_metadata_inode_efe(sb, mdata->s_mirror_file_loc, |
| 1000 | map->s_partition_num); | 1002 | mdata->s_phys_partition_ref); |
| 1001 | 1003 | ||
| 1002 | if (IS_ERR(fe)) { | 1004 | if (IS_ERR(fe)) { |
| 1003 | udf_err(sb, "Both metadata and mirror metadata inode efe can not found\n"); | 1005 | udf_err(sb, "Both metadata and mirror metadata inode efe can not found\n"); |
| @@ -1015,7 +1017,7 @@ static int udf_load_metadata_files(struct super_block *sb, int partition) | |||
| 1015 | */ | 1017 | */ |
| 1016 | if (mdata->s_bitmap_file_loc != 0xFFFFFFFF) { | 1018 | if (mdata->s_bitmap_file_loc != 0xFFFFFFFF) { |
| 1017 | addr.logicalBlockNum = mdata->s_bitmap_file_loc; | 1019 | addr.logicalBlockNum = mdata->s_bitmap_file_loc; |
| 1018 | addr.partitionReferenceNum = map->s_partition_num; | 1020 | addr.partitionReferenceNum = mdata->s_phys_partition_ref; |
| 1019 | 1021 | ||
| 1020 | udf_debug("Bitmap file location: block = %d part = %d\n", | 1022 | udf_debug("Bitmap file location: block = %d part = %d\n", |
| 1021 | addr.logicalBlockNum, addr.partitionReferenceNum); | 1023 | addr.logicalBlockNum, addr.partitionReferenceNum); |
| @@ -1283,7 +1285,7 @@ static int udf_load_partdesc(struct super_block *sb, sector_t block) | |||
| 1283 | p = (struct partitionDesc *)bh->b_data; | 1285 | p = (struct partitionDesc *)bh->b_data; |
| 1284 | partitionNumber = le16_to_cpu(p->partitionNumber); | 1286 | partitionNumber = le16_to_cpu(p->partitionNumber); |
| 1285 | 1287 | ||
| 1286 | /* First scan for TYPE1, SPARABLE and METADATA partitions */ | 1288 | /* First scan for TYPE1 and SPARABLE partitions */ |
| 1287 | for (i = 0; i < sbi->s_partitions; i++) { | 1289 | for (i = 0; i < sbi->s_partitions; i++) { |
| 1288 | map = &sbi->s_partmaps[i]; | 1290 | map = &sbi->s_partmaps[i]; |
| 1289 | udf_debug("Searching map: (%d == %d)\n", | 1291 | udf_debug("Searching map: (%d == %d)\n", |
| @@ -1333,7 +1335,7 @@ static int udf_load_partdesc(struct super_block *sb, sector_t block) | |||
| 1333 | goto out_bh; | 1335 | goto out_bh; |
| 1334 | 1336 | ||
| 1335 | if (map->s_partition_type == UDF_METADATA_MAP25) { | 1337 | if (map->s_partition_type == UDF_METADATA_MAP25) { |
| 1336 | ret = udf_load_metadata_files(sb, i); | 1338 | ret = udf_load_metadata_files(sb, i, type1_idx); |
| 1337 | if (ret < 0) { | 1339 | if (ret < 0) { |
| 1338 | udf_err(sb, "error loading MetaData partition map %d\n", | 1340 | udf_err(sb, "error loading MetaData partition map %d\n", |
| 1339 | i); | 1341 | i); |
diff --git a/fs/udf/udf_sb.h b/fs/udf/udf_sb.h index 27b5335730c9..c13875d669c0 100644 --- a/fs/udf/udf_sb.h +++ b/fs/udf/udf_sb.h | |||
| @@ -61,6 +61,11 @@ struct udf_meta_data { | |||
| 61 | __u32 s_bitmap_file_loc; | 61 | __u32 s_bitmap_file_loc; |
| 62 | __u32 s_alloc_unit_size; | 62 | __u32 s_alloc_unit_size; |
| 63 | __u16 s_align_unit_size; | 63 | __u16 s_align_unit_size; |
| 64 | /* | ||
| 65 | * Partition Reference Number of the associated physical / sparable | ||
| 66 | * partition | ||
| 67 | */ | ||
| 68 | __u16 s_phys_partition_ref; | ||
| 64 | int s_flags; | 69 | int s_flags; |
| 65 | struct inode *s_metadata_fe; | 70 | struct inode *s_metadata_fe; |
| 66 | struct inode *s_mirror_fe; | 71 | struct inode *s_mirror_fe; |
diff --git a/include/acpi/video.h b/include/acpi/video.h index 70a41f742037..5731ccb42585 100644 --- a/include/acpi/video.h +++ b/include/acpi/video.h | |||
| @@ -51,7 +51,8 @@ extern void acpi_video_set_dmi_backlight_type(enum acpi_backlight_type type); | |||
| 51 | */ | 51 | */ |
| 52 | extern bool acpi_video_handles_brightness_key_presses(void); | 52 | extern bool acpi_video_handles_brightness_key_presses(void); |
| 53 | extern int acpi_video_get_levels(struct acpi_device *device, | 53 | extern int acpi_video_get_levels(struct acpi_device *device, |
| 54 | struct acpi_video_device_brightness **dev_br); | 54 | struct acpi_video_device_brightness **dev_br, |
| 55 | int *pmax_level); | ||
| 55 | #else | 56 | #else |
| 56 | static inline int acpi_video_register(void) { return 0; } | 57 | static inline int acpi_video_register(void) { return 0; } |
| 57 | static inline void acpi_video_unregister(void) { return; } | 58 | static inline void acpi_video_unregister(void) { return; } |
| @@ -72,7 +73,8 @@ static inline bool acpi_video_handles_brightness_key_presses(void) | |||
| 72 | return false; | 73 | return false; |
| 73 | } | 74 | } |
| 74 | static inline int acpi_video_get_levels(struct acpi_device *device, | 75 | static inline int acpi_video_get_levels(struct acpi_device *device, |
| 75 | struct acpi_video_device_brightness **dev_br) | 76 | struct acpi_video_device_brightness **dev_br, |
| 77 | int *pmax_level) | ||
| 76 | { | 78 | { |
| 77 | return -ENODEV; | 79 | return -ENODEV; |
| 78 | } | 80 | } |
diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h index 6bd05700d8c9..05f05f17a7c2 100644 --- a/include/asm-generic/qspinlock.h +++ b/include/asm-generic/qspinlock.h | |||
| @@ -22,37 +22,33 @@ | |||
| 22 | #include <asm-generic/qspinlock_types.h> | 22 | #include <asm-generic/qspinlock_types.h> |
| 23 | 23 | ||
| 24 | /** | 24 | /** |
| 25 | * queued_spin_unlock_wait - wait until the _current_ lock holder releases the lock | ||
| 26 | * @lock : Pointer to queued spinlock structure | ||
| 27 | * | ||
| 28 | * There is a very slight possibility of live-lock if the lockers keep coming | ||
| 29 | * and the waiter is just unfortunate enough to not see any unlock state. | ||
| 30 | */ | ||
| 31 | #ifndef queued_spin_unlock_wait | ||
| 32 | extern void queued_spin_unlock_wait(struct qspinlock *lock); | ||
| 33 | #endif | ||
| 34 | |||
| 35 | /** | ||
| 25 | * queued_spin_is_locked - is the spinlock locked? | 36 | * queued_spin_is_locked - is the spinlock locked? |
| 26 | * @lock: Pointer to queued spinlock structure | 37 | * @lock: Pointer to queued spinlock structure |
| 27 | * Return: 1 if it is locked, 0 otherwise | 38 | * Return: 1 if it is locked, 0 otherwise |
| 28 | */ | 39 | */ |
| 40 | #ifndef queued_spin_is_locked | ||
| 29 | static __always_inline int queued_spin_is_locked(struct qspinlock *lock) | 41 | static __always_inline int queued_spin_is_locked(struct qspinlock *lock) |
| 30 | { | 42 | { |
| 31 | /* | 43 | /* |
| 32 | * queued_spin_lock_slowpath() can ACQUIRE the lock before | 44 | * See queued_spin_unlock_wait(). |
| 33 | * issuing the unordered store that sets _Q_LOCKED_VAL. | ||
| 34 | * | ||
| 35 | * See both smp_cond_acquire() sites for more detail. | ||
| 36 | * | ||
| 37 | * This however means that in code like: | ||
| 38 | * | ||
| 39 | * spin_lock(A) spin_lock(B) | ||
| 40 | * spin_unlock_wait(B) spin_is_locked(A) | ||
| 41 | * do_something() do_something() | ||
| 42 | * | ||
| 43 | * Both CPUs can end up running do_something() because the store | ||
| 44 | * setting _Q_LOCKED_VAL will pass through the loads in | ||
| 45 | * spin_unlock_wait() and/or spin_is_locked(). | ||
| 46 | * | 45 | * |
| 47 | * Avoid this by issuing a full memory barrier between the spin_lock() | 46 | * Any !0 state indicates it is locked, even if _Q_LOCKED_VAL |
| 48 | * and the loads in spin_unlock_wait() and spin_is_locked(). | 47 | * isn't immediately observable. |
| 49 | * | ||
| 50 | * Note that regular mutual exclusion doesn't care about this | ||
| 51 | * delayed store. | ||
| 52 | */ | 48 | */ |
| 53 | smp_mb(); | 49 | return atomic_read(&lock->val); |
| 54 | return atomic_read(&lock->val) & _Q_LOCKED_MASK; | ||
| 55 | } | 50 | } |
| 51 | #endif | ||
| 56 | 52 | ||
| 57 | /** | 53 | /** |
| 58 | * queued_spin_value_unlocked - is the spinlock structure unlocked? | 54 | * queued_spin_value_unlocked - is the spinlock structure unlocked? |
| @@ -122,21 +118,6 @@ static __always_inline void queued_spin_unlock(struct qspinlock *lock) | |||
| 122 | } | 118 | } |
| 123 | #endif | 119 | #endif |
| 124 | 120 | ||
| 125 | /** | ||
| 126 | * queued_spin_unlock_wait - wait until current lock holder releases the lock | ||
| 127 | * @lock : Pointer to queued spinlock structure | ||
| 128 | * | ||
| 129 | * There is a very slight possibility of live-lock if the lockers keep coming | ||
| 130 | * and the waiter is just unfortunate enough to not see any unlock state. | ||
| 131 | */ | ||
| 132 | static inline void queued_spin_unlock_wait(struct qspinlock *lock) | ||
| 133 | { | ||
| 134 | /* See queued_spin_is_locked() */ | ||
| 135 | smp_mb(); | ||
| 136 | while (atomic_read(&lock->val) & _Q_LOCKED_MASK) | ||
| 137 | cpu_relax(); | ||
| 138 | } | ||
| 139 | |||
| 140 | #ifndef virt_spin_lock | 121 | #ifndef virt_spin_lock |
| 141 | static __always_inline bool virt_spin_lock(struct qspinlock *lock) | 122 | static __always_inline bool virt_spin_lock(struct qspinlock *lock) |
| 142 | { | 123 | { |
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index 576e4639ca60..314b3caa701c 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h | |||
| @@ -65,6 +65,7 @@ struct coredump_params { | |||
| 65 | unsigned long limit; | 65 | unsigned long limit; |
| 66 | unsigned long mm_flags; | 66 | unsigned long mm_flags; |
| 67 | loff_t written; | 67 | loff_t written; |
| 68 | loff_t pos; | ||
| 68 | }; | 69 | }; |
| 69 | 70 | ||
| 70 | /* | 71 | /* |
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index 19b14862d3e0..1b3b6e155392 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h | |||
| @@ -279,6 +279,11 @@ struct ceph_osd_client { | |||
| 279 | struct workqueue_struct *notify_wq; | 279 | struct workqueue_struct *notify_wq; |
| 280 | }; | 280 | }; |
| 281 | 281 | ||
| 282 | static inline bool ceph_osdmap_flag(struct ceph_osd_client *osdc, int flag) | ||
| 283 | { | ||
| 284 | return osdc->osdmap->flags & flag; | ||
| 285 | } | ||
| 286 | |||
| 282 | extern int ceph_osdc_setup(void); | 287 | extern int ceph_osdc_setup(void); |
| 283 | extern void ceph_osdc_cleanup(void); | 288 | extern void ceph_osdc_cleanup(void); |
| 284 | 289 | ||
diff --git a/include/linux/ceph/osdmap.h b/include/linux/ceph/osdmap.h index ddc426b22d81..9ccf4dbe55f8 100644 --- a/include/linux/ceph/osdmap.h +++ b/include/linux/ceph/osdmap.h | |||
| @@ -189,11 +189,6 @@ static inline bool ceph_osd_is_down(struct ceph_osdmap *map, int osd) | |||
| 189 | return !ceph_osd_is_up(map, osd); | 189 | return !ceph_osd_is_up(map, osd); |
| 190 | } | 190 | } |
| 191 | 191 | ||
| 192 | static inline bool ceph_osdmap_flag(struct ceph_osdmap *map, int flag) | ||
| 193 | { | ||
| 194 | return map && (map->flags & flag); | ||
| 195 | } | ||
| 196 | |||
| 197 | extern char *ceph_osdmap_state_str(char *str, int len, int state); | 192 | extern char *ceph_osdmap_state_str(char *str, int len, int state); |
| 198 | extern u32 ceph_get_primary_affinity(struct ceph_osdmap *map, int osd); | 193 | extern u32 ceph_get_primary_affinity(struct ceph_osdmap *map, int osd); |
| 199 | 194 | ||
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index 0c72204c75fc..fb39d5add173 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h | |||
| @@ -25,7 +25,7 @@ | |||
| 25 | #define CLK_SET_PARENT_GATE BIT(1) /* must be gated across re-parent */ | 25 | #define CLK_SET_PARENT_GATE BIT(1) /* must be gated across re-parent */ |
| 26 | #define CLK_SET_RATE_PARENT BIT(2) /* propagate rate change up one level */ | 26 | #define CLK_SET_RATE_PARENT BIT(2) /* propagate rate change up one level */ |
| 27 | #define CLK_IGNORE_UNUSED BIT(3) /* do not gate even if unused */ | 27 | #define CLK_IGNORE_UNUSED BIT(3) /* do not gate even if unused */ |
| 28 | #define CLK_IS_ROOT BIT(4) /* Deprecated: Don't use */ | 28 | /* unused */ |
| 29 | #define CLK_IS_BASIC BIT(5) /* Basic clk, can't do a to_clk_foo() */ | 29 | #define CLK_IS_BASIC BIT(5) /* Basic clk, can't do a to_clk_foo() */ |
| 30 | #define CLK_GET_RATE_NOCACHE BIT(6) /* do not use the cached clk rate */ | 30 | #define CLK_GET_RATE_NOCACHE BIT(6) /* do not use the cached clk rate */ |
| 31 | #define CLK_SET_RATE_NO_REPARENT BIT(7) /* don't re-parent on rate change */ | 31 | #define CLK_SET_RATE_NO_REPARENT BIT(7) /* don't re-parent on rate change */ |
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index 786ad32631a6..07b83d32f66c 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h | |||
| @@ -152,6 +152,8 @@ extern void cpuidle_disable_device(struct cpuidle_device *dev); | |||
| 152 | extern int cpuidle_play_dead(void); | 152 | extern int cpuidle_play_dead(void); |
| 153 | 153 | ||
| 154 | extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev); | 154 | extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev); |
| 155 | static inline struct cpuidle_device *cpuidle_get_device(void) | ||
| 156 | {return __this_cpu_read(cpuidle_devices); } | ||
| 155 | #else | 157 | #else |
| 156 | static inline void disable_cpuidle(void) { } | 158 | static inline void disable_cpuidle(void) { } |
| 157 | static inline bool cpuidle_not_available(struct cpuidle_driver *drv, | 159 | static inline bool cpuidle_not_available(struct cpuidle_driver *drv, |
| @@ -187,6 +189,7 @@ static inline void cpuidle_disable_device(struct cpuidle_device *dev) { } | |||
| 187 | static inline int cpuidle_play_dead(void) {return -ENODEV; } | 189 | static inline int cpuidle_play_dead(void) {return -ENODEV; } |
| 188 | static inline struct cpuidle_driver *cpuidle_get_cpu_driver( | 190 | static inline struct cpuidle_driver *cpuidle_get_cpu_driver( |
| 189 | struct cpuidle_device *dev) {return NULL; } | 191 | struct cpuidle_device *dev) {return NULL; } |
| 192 | static inline struct cpuidle_device *cpuidle_get_device(void) {return NULL; } | ||
| 190 | #endif | 193 | #endif |
| 191 | 194 | ||
| 192 | #if defined(CONFIG_CPU_IDLE) && defined(CONFIG_SUSPEND) | 195 | #if defined(CONFIG_CPU_IDLE) && defined(CONFIG_SUSPEND) |
diff --git a/include/linux/dcache.h b/include/linux/dcache.h index 484c8792da82..f53fa055021a 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h | |||
| @@ -212,6 +212,7 @@ struct dentry_operations { | |||
| 212 | #define DCACHE_OP_REAL 0x08000000 | 212 | #define DCACHE_OP_REAL 0x08000000 |
| 213 | 213 | ||
| 214 | #define DCACHE_PAR_LOOKUP 0x10000000 /* being looked up (with parent locked shared) */ | 214 | #define DCACHE_PAR_LOOKUP 0x10000000 /* being looked up (with parent locked shared) */ |
| 215 | #define DCACHE_DENTRY_CURSOR 0x20000000 | ||
| 215 | 216 | ||
| 216 | extern seqlock_t rename_lock; | 217 | extern seqlock_t rename_lock; |
| 217 | 218 | ||
| @@ -575,5 +576,17 @@ static inline struct inode *vfs_select_inode(struct dentry *dentry, | |||
| 575 | return inode; | 576 | return inode; |
| 576 | } | 577 | } |
| 577 | 578 | ||
| 579 | /** | ||
| 580 | * d_real_inode - Return the real inode | ||
| 581 | * @dentry: The dentry to query | ||
| 582 | * | ||
| 583 | * If dentry is on an union/overlay, then return the underlying, real inode. | ||
| 584 | * Otherwise return d_inode(). | ||
| 585 | */ | ||
| 586 | static inline struct inode *d_real_inode(struct dentry *dentry) | ||
| 587 | { | ||
| 588 | return d_backing_inode(d_real(dentry)); | ||
| 589 | } | ||
| 590 | |||
| 578 | 591 | ||
| 579 | #endif /* __LINUX_DCACHE_H */ | 592 | #endif /* __LINUX_DCACHE_H */ |
diff --git a/include/linux/devpts_fs.h b/include/linux/devpts_fs.h index 5871f292b596..277ab9af9ac2 100644 --- a/include/linux/devpts_fs.h +++ b/include/linux/devpts_fs.h | |||
| @@ -15,13 +15,12 @@ | |||
| 15 | 15 | ||
| 16 | #include <linux/errno.h> | 16 | #include <linux/errno.h> |
| 17 | 17 | ||
| 18 | struct pts_fs_info; | ||
| 19 | |||
| 20 | #ifdef CONFIG_UNIX98_PTYS | 18 | #ifdef CONFIG_UNIX98_PTYS |
| 21 | 19 | ||
| 22 | /* Look up a pts fs info and get a ref to it */ | 20 | struct pts_fs_info; |
| 23 | struct pts_fs_info *devpts_get_ref(struct inode *, struct file *); | 21 | |
| 24 | void devpts_put_ref(struct pts_fs_info *); | 22 | struct pts_fs_info *devpts_acquire(struct file *); |
| 23 | void devpts_release(struct pts_fs_info *); | ||
| 25 | 24 | ||
| 26 | int devpts_new_index(struct pts_fs_info *); | 25 | int devpts_new_index(struct pts_fs_info *); |
| 27 | void devpts_kill_index(struct pts_fs_info *, int); | 26 | void devpts_kill_index(struct pts_fs_info *, int); |
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index 3fe90d494edb..4551c6f2a6c4 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h | |||
| @@ -112,19 +112,24 @@ struct dma_buf_ops { | |||
| 112 | * @file: file pointer used for sharing buffers across, and for refcounting. | 112 | * @file: file pointer used for sharing buffers across, and for refcounting. |
| 113 | * @attachments: list of dma_buf_attachment that denotes all devices attached. | 113 | * @attachments: list of dma_buf_attachment that denotes all devices attached. |
| 114 | * @ops: dma_buf_ops associated with this buffer object. | 114 | * @ops: dma_buf_ops associated with this buffer object. |
| 115 | * @lock: used internally to serialize list manipulation, attach/detach and vmap/unmap | ||
| 116 | * @vmapping_counter: used internally to refcnt the vmaps | ||
| 117 | * @vmap_ptr: the current vmap ptr if vmapping_counter > 0 | ||
| 115 | * @exp_name: name of the exporter; useful for debugging. | 118 | * @exp_name: name of the exporter; useful for debugging. |
| 116 | * @owner: pointer to exporter module; used for refcounting when exporter is a | 119 | * @owner: pointer to exporter module; used for refcounting when exporter is a |
| 117 | * kernel module. | 120 | * kernel module. |
| 118 | * @list_node: node for dma_buf accounting and debugging. | 121 | * @list_node: node for dma_buf accounting and debugging. |
| 119 | * @priv: exporter specific private data for this buffer object. | 122 | * @priv: exporter specific private data for this buffer object. |
| 120 | * @resv: reservation object linked to this dma-buf | 123 | * @resv: reservation object linked to this dma-buf |
| 124 | * @poll: for userspace poll support | ||
| 125 | * @cb_excl: for userspace poll support | ||
| 126 | * @cb_shared: for userspace poll support | ||
| 121 | */ | 127 | */ |
| 122 | struct dma_buf { | 128 | struct dma_buf { |
| 123 | size_t size; | 129 | size_t size; |
| 124 | struct file *file; | 130 | struct file *file; |
| 125 | struct list_head attachments; | 131 | struct list_head attachments; |
| 126 | const struct dma_buf_ops *ops; | 132 | const struct dma_buf_ops *ops; |
| 127 | /* mutex to serialize list manipulation, attach/detach and vmap/unmap */ | ||
| 128 | struct mutex lock; | 133 | struct mutex lock; |
| 129 | unsigned vmapping_counter; | 134 | unsigned vmapping_counter; |
| 130 | void *vmap_ptr; | 135 | void *vmap_ptr; |
| @@ -188,9 +193,11 @@ struct dma_buf_export_info { | |||
| 188 | 193 | ||
| 189 | /** | 194 | /** |
| 190 | * helper macro for exporters; zeros and fills in most common values | 195 | * helper macro for exporters; zeros and fills in most common values |
| 196 | * | ||
| 197 | * @name: export-info name | ||
| 191 | */ | 198 | */ |
| 192 | #define DEFINE_DMA_BUF_EXPORT_INFO(a) \ | 199 | #define DEFINE_DMA_BUF_EXPORT_INFO(name) \ |
| 193 | struct dma_buf_export_info a = { .exp_name = KBUILD_MODNAME, \ | 200 | struct dma_buf_export_info name = { .exp_name = KBUILD_MODNAME, \ |
| 194 | .owner = THIS_MODULE } | 201 | .owner = THIS_MODULE } |
| 195 | 202 | ||
| 196 | /** | 203 | /** |
diff --git a/include/linux/efi.h b/include/linux/efi.h index c2db3ca22217..f196dd0b0f2f 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h | |||
| @@ -1005,7 +1005,7 @@ extern int efi_memattr_apply_permissions(struct mm_struct *mm, | |||
| 1005 | /* Iterate through an efi_memory_map */ | 1005 | /* Iterate through an efi_memory_map */ |
| 1006 | #define for_each_efi_memory_desc_in_map(m, md) \ | 1006 | #define for_each_efi_memory_desc_in_map(m, md) \ |
| 1007 | for ((md) = (m)->map; \ | 1007 | for ((md) = (m)->map; \ |
| 1008 | (md) <= (efi_memory_desc_t *)((m)->map_end - (m)->desc_size); \ | 1008 | ((void *)(md) + (m)->desc_size) <= (m)->map_end; \ |
| 1009 | (md) = (void *)(md) + (m)->desc_size) | 1009 | (md) = (void *)(md) + (m)->desc_size) |
| 1010 | 1010 | ||
| 1011 | /** | 1011 | /** |
diff --git a/include/linux/fence.h b/include/linux/fence.h index 2b17698b60b8..2056e9fd0138 100644 --- a/include/linux/fence.h +++ b/include/linux/fence.h | |||
| @@ -49,6 +49,8 @@ struct fence_cb; | |||
| 49 | * @timestamp: Timestamp when the fence was signaled. | 49 | * @timestamp: Timestamp when the fence was signaled. |
| 50 | * @status: Optional, only valid if < 0, must be set before calling | 50 | * @status: Optional, only valid if < 0, must be set before calling |
| 51 | * fence_signal, indicates that the fence has completed with an error. | 51 | * fence_signal, indicates that the fence has completed with an error. |
| 52 | * @child_list: list of children fences | ||
| 53 | * @active_list: list of active fences | ||
| 52 | * | 54 | * |
| 53 | * the flags member must be manipulated and read using the appropriate | 55 | * the flags member must be manipulated and read using the appropriate |
| 54 | * atomic ops (bit_*), so taking the spinlock will not be needed most | 56 | * atomic ops (bit_*), so taking the spinlock will not be needed most |
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h index 604e1526cd00..13ba552e6c09 100644 --- a/include/linux/fscache-cache.h +++ b/include/linux/fscache-cache.h | |||
| @@ -241,7 +241,7 @@ struct fscache_cache_ops { | |||
| 241 | 241 | ||
| 242 | /* check the consistency between the backing cache and the FS-Cache | 242 | /* check the consistency between the backing cache and the FS-Cache |
| 243 | * cookie */ | 243 | * cookie */ |
| 244 | bool (*check_consistency)(struct fscache_operation *op); | 244 | int (*check_consistency)(struct fscache_operation *op); |
| 245 | 245 | ||
| 246 | /* store the updated auxiliary data on an object */ | 246 | /* store the updated auxiliary data on an object */ |
| 247 | void (*update_object)(struct fscache_object *object); | 247 | void (*update_object)(struct fscache_object *object); |
diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h index d029ffac0d69..99403b19092f 100644 --- a/include/linux/iio/common/st_sensors.h +++ b/include/linux/iio/common/st_sensors.h | |||
| @@ -223,6 +223,8 @@ struct st_sensor_settings { | |||
| 223 | * @get_irq_data_ready: Function to get the IRQ used for data ready signal. | 223 | * @get_irq_data_ready: Function to get the IRQ used for data ready signal. |
| 224 | * @tf: Transfer function structure used by I/O operations. | 224 | * @tf: Transfer function structure used by I/O operations. |
| 225 | * @tb: Transfer buffers and mutex used by I/O operations. | 225 | * @tb: Transfer buffers and mutex used by I/O operations. |
| 226 | * @hw_irq_trigger: if we're using the hardware interrupt on the sensor. | ||
| 227 | * @hw_timestamp: Latest timestamp from the interrupt handler, when in use. | ||
| 226 | */ | 228 | */ |
| 227 | struct st_sensor_data { | 229 | struct st_sensor_data { |
| 228 | struct device *dev; | 230 | struct device *dev; |
| @@ -247,6 +249,9 @@ struct st_sensor_data { | |||
| 247 | 249 | ||
| 248 | const struct st_sensor_transfer_function *tf; | 250 | const struct st_sensor_transfer_function *tf; |
| 249 | struct st_sensor_transfer_buffer tb; | 251 | struct st_sensor_transfer_buffer tb; |
| 252 | |||
| 253 | bool hw_irq_trigger; | ||
| 254 | s64 hw_timestamp; | ||
| 250 | }; | 255 | }; |
| 251 | 256 | ||
| 252 | #ifdef CONFIG_IIO_BUFFER | 257 | #ifdef CONFIG_IIO_BUFFER |
| @@ -260,7 +265,8 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev, | |||
| 260 | const struct iio_trigger_ops *trigger_ops); | 265 | const struct iio_trigger_ops *trigger_ops); |
| 261 | 266 | ||
| 262 | void st_sensors_deallocate_trigger(struct iio_dev *indio_dev); | 267 | void st_sensors_deallocate_trigger(struct iio_dev *indio_dev); |
| 263 | 268 | int st_sensors_validate_device(struct iio_trigger *trig, | |
| 269 | struct iio_dev *indio_dev); | ||
| 264 | #else | 270 | #else |
| 265 | static inline int st_sensors_allocate_trigger(struct iio_dev *indio_dev, | 271 | static inline int st_sensors_allocate_trigger(struct iio_dev *indio_dev, |
| 266 | const struct iio_trigger_ops *trigger_ops) | 272 | const struct iio_trigger_ops *trigger_ops) |
| @@ -271,6 +277,7 @@ static inline void st_sensors_deallocate_trigger(struct iio_dev *indio_dev) | |||
| 271 | { | 277 | { |
| 272 | return; | 278 | return; |
| 273 | } | 279 | } |
| 280 | #define st_sensors_validate_device NULL | ||
| 274 | #endif | 281 | #endif |
| 275 | 282 | ||
| 276 | int st_sensors_init_sensor(struct iio_dev *indio_dev, | 283 | int st_sensors_init_sensor(struct iio_dev *indio_dev, |
diff --git a/include/linux/init_task.h b/include/linux/init_task.h index f2cb8d45513d..f8834f820ec2 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h | |||
| @@ -190,7 +190,7 @@ extern struct task_group root_task_group; | |||
| 190 | #define INIT_TASK(tsk) \ | 190 | #define INIT_TASK(tsk) \ |
| 191 | { \ | 191 | { \ |
| 192 | .state = 0, \ | 192 | .state = 0, \ |
| 193 | .stack = &init_thread_info, \ | 193 | .stack = init_stack, \ |
| 194 | .usage = ATOMIC_INIT(2), \ | 194 | .usage = ATOMIC_INIT(2), \ |
| 195 | .flags = PF_KTHREAD, \ | 195 | .flags = PF_KTHREAD, \ |
| 196 | .prio = MAX_PRIO-20, \ | 196 | .prio = MAX_PRIO-20, \ |
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index bfbd707de390..dc493e0f0ff7 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h | |||
| @@ -305,12 +305,12 @@ | |||
| 305 | #define ICC_SGI1R_AFFINITY_1_SHIFT 16 | 305 | #define ICC_SGI1R_AFFINITY_1_SHIFT 16 |
| 306 | #define ICC_SGI1R_AFFINITY_1_MASK (0xff << ICC_SGI1R_AFFINITY_1_SHIFT) | 306 | #define ICC_SGI1R_AFFINITY_1_MASK (0xff << ICC_SGI1R_AFFINITY_1_SHIFT) |
| 307 | #define ICC_SGI1R_SGI_ID_SHIFT 24 | 307 | #define ICC_SGI1R_SGI_ID_SHIFT 24 |
| 308 | #define ICC_SGI1R_SGI_ID_MASK (0xff << ICC_SGI1R_SGI_ID_SHIFT) | 308 | #define ICC_SGI1R_SGI_ID_MASK (0xfULL << ICC_SGI1R_SGI_ID_SHIFT) |
| 309 | #define ICC_SGI1R_AFFINITY_2_SHIFT 32 | 309 | #define ICC_SGI1R_AFFINITY_2_SHIFT 32 |
| 310 | #define ICC_SGI1R_AFFINITY_2_MASK (0xffULL << ICC_SGI1R_AFFINITY_1_SHIFT) | 310 | #define ICC_SGI1R_AFFINITY_2_MASK (0xffULL << ICC_SGI1R_AFFINITY_2_SHIFT) |
| 311 | #define ICC_SGI1R_IRQ_ROUTING_MODE_BIT 40 | 311 | #define ICC_SGI1R_IRQ_ROUTING_MODE_BIT 40 |
| 312 | #define ICC_SGI1R_AFFINITY_3_SHIFT 48 | 312 | #define ICC_SGI1R_AFFINITY_3_SHIFT 48 |
| 313 | #define ICC_SGI1R_AFFINITY_3_MASK (0xffULL << ICC_SGI1R_AFFINITY_1_SHIFT) | 313 | #define ICC_SGI1R_AFFINITY_3_MASK (0xffULL << ICC_SGI1R_AFFINITY_3_SHIFT) |
| 314 | 314 | ||
| 315 | #include <asm/arch_gicv3.h> | 315 | #include <asm/arch_gicv3.h> |
| 316 | 316 | ||
diff --git a/include/linux/isa.h b/include/linux/isa.h index 5ab85281230b..f2d0258414cf 100644 --- a/include/linux/isa.h +++ b/include/linux/isa.h | |||
| @@ -6,6 +6,7 @@ | |||
| 6 | #define __LINUX_ISA_H | 6 | #define __LINUX_ISA_H |
| 7 | 7 | ||
| 8 | #include <linux/device.h> | 8 | #include <linux/device.h> |
| 9 | #include <linux/errno.h> | ||
| 9 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
| 10 | 11 | ||
| 11 | struct isa_driver { | 12 | struct isa_driver { |
| @@ -22,13 +23,13 @@ struct isa_driver { | |||
| 22 | 23 | ||
| 23 | #define to_isa_driver(x) container_of((x), struct isa_driver, driver) | 24 | #define to_isa_driver(x) container_of((x), struct isa_driver, driver) |
| 24 | 25 | ||
| 25 | #ifdef CONFIG_ISA | 26 | #ifdef CONFIG_ISA_BUS_API |
| 26 | int isa_register_driver(struct isa_driver *, unsigned int); | 27 | int isa_register_driver(struct isa_driver *, unsigned int); |
| 27 | void isa_unregister_driver(struct isa_driver *); | 28 | void isa_unregister_driver(struct isa_driver *); |
| 28 | #else | 29 | #else |
| 29 | static inline int isa_register_driver(struct isa_driver *d, unsigned int i) | 30 | static inline int isa_register_driver(struct isa_driver *d, unsigned int i) |
| 30 | { | 31 | { |
| 31 | return 0; | 32 | return -ENODEV; |
| 32 | } | 33 | } |
| 33 | 34 | ||
| 34 | static inline void isa_unregister_driver(struct isa_driver *d) | 35 | static inline void isa_unregister_driver(struct isa_driver *d) |
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index 0536524bb9eb..68904469fba1 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h | |||
| @@ -117,13 +117,18 @@ struct module; | |||
| 117 | 117 | ||
| 118 | #include <linux/atomic.h> | 118 | #include <linux/atomic.h> |
| 119 | 119 | ||
| 120 | #ifdef HAVE_JUMP_LABEL | ||
| 121 | |||
| 120 | static inline int static_key_count(struct static_key *key) | 122 | static inline int static_key_count(struct static_key *key) |
| 121 | { | 123 | { |
| 122 | return atomic_read(&key->enabled); | 124 | /* |
| 125 | * -1 means the first static_key_slow_inc() is in progress. | ||
| 126 | * static_key_enabled() must return true, so return 1 here. | ||
| 127 | */ | ||
| 128 | int n = atomic_read(&key->enabled); | ||
| 129 | return n >= 0 ? n : 1; | ||
| 123 | } | 130 | } |
| 124 | 131 | ||
| 125 | #ifdef HAVE_JUMP_LABEL | ||
| 126 | |||
| 127 | #define JUMP_TYPE_FALSE 0UL | 132 | #define JUMP_TYPE_FALSE 0UL |
| 128 | #define JUMP_TYPE_TRUE 1UL | 133 | #define JUMP_TYPE_TRUE 1UL |
| 129 | #define JUMP_TYPE_MASK 1UL | 134 | #define JUMP_TYPE_MASK 1UL |
| @@ -162,6 +167,11 @@ extern void jump_label_apply_nops(struct module *mod); | |||
| 162 | 167 | ||
| 163 | #else /* !HAVE_JUMP_LABEL */ | 168 | #else /* !HAVE_JUMP_LABEL */ |
| 164 | 169 | ||
| 170 | static inline int static_key_count(struct static_key *key) | ||
| 171 | { | ||
| 172 | return atomic_read(&key->enabled); | ||
| 173 | } | ||
| 174 | |||
| 165 | static __always_inline void jump_label_init(void) | 175 | static __always_inline void jump_label_init(void) |
| 166 | { | 176 | { |
| 167 | static_key_initialized = true; | 177 | static_key_initialized = true; |
diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 611927f5870d..ac4b3c46a84d 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h | |||
| @@ -59,14 +59,13 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object); | |||
| 59 | 59 | ||
| 60 | void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags); | 60 | void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags); |
| 61 | void kasan_kfree_large(const void *ptr); | 61 | void kasan_kfree_large(const void *ptr); |
| 62 | void kasan_kfree(void *ptr); | 62 | void kasan_poison_kfree(void *ptr); |
| 63 | void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size, | 63 | void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size, |
| 64 | gfp_t flags); | 64 | gfp_t flags); |
| 65 | void kasan_krealloc(const void *object, size_t new_size, gfp_t flags); | 65 | void kasan_krealloc(const void *object, size_t new_size, gfp_t flags); |
| 66 | 66 | ||
| 67 | void kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags); | 67 | void kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags); |
| 68 | bool kasan_slab_free(struct kmem_cache *s, void *object); | 68 | bool kasan_slab_free(struct kmem_cache *s, void *object); |
| 69 | void kasan_poison_slab_free(struct kmem_cache *s, void *object); | ||
| 70 | 69 | ||
| 71 | struct kasan_cache { | 70 | struct kasan_cache { |
| 72 | int alloc_meta_offset; | 71 | int alloc_meta_offset; |
| @@ -76,6 +75,9 @@ struct kasan_cache { | |||
| 76 | int kasan_module_alloc(void *addr, size_t size); | 75 | int kasan_module_alloc(void *addr, size_t size); |
| 77 | void kasan_free_shadow(const struct vm_struct *vm); | 76 | void kasan_free_shadow(const struct vm_struct *vm); |
| 78 | 77 | ||
| 78 | size_t ksize(const void *); | ||
| 79 | static inline void kasan_unpoison_slab(const void *ptr) { ksize(ptr); } | ||
| 80 | |||
| 79 | #else /* CONFIG_KASAN */ | 81 | #else /* CONFIG_KASAN */ |
| 80 | 82 | ||
| 81 | static inline void kasan_unpoison_shadow(const void *address, size_t size) {} | 83 | static inline void kasan_unpoison_shadow(const void *address, size_t size) {} |
| @@ -102,7 +104,7 @@ static inline void kasan_poison_object_data(struct kmem_cache *cache, | |||
| 102 | 104 | ||
| 103 | static inline void kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) {} | 105 | static inline void kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) {} |
| 104 | static inline void kasan_kfree_large(const void *ptr) {} | 106 | static inline void kasan_kfree_large(const void *ptr) {} |
| 105 | static inline void kasan_kfree(void *ptr) {} | 107 | static inline void kasan_poison_kfree(void *ptr) {} |
| 106 | static inline void kasan_kmalloc(struct kmem_cache *s, const void *object, | 108 | static inline void kasan_kmalloc(struct kmem_cache *s, const void *object, |
| 107 | size_t size, gfp_t flags) {} | 109 | size_t size, gfp_t flags) {} |
| 108 | static inline void kasan_krealloc(const void *object, size_t new_size, | 110 | static inline void kasan_krealloc(const void *object, size_t new_size, |
| @@ -114,11 +116,12 @@ static inline bool kasan_slab_free(struct kmem_cache *s, void *object) | |||
| 114 | { | 116 | { |
| 115 | return false; | 117 | return false; |
| 116 | } | 118 | } |
| 117 | static inline void kasan_poison_slab_free(struct kmem_cache *s, void *object) {} | ||
| 118 | 119 | ||
| 119 | static inline int kasan_module_alloc(void *addr, size_t size) { return 0; } | 120 | static inline int kasan_module_alloc(void *addr, size_t size) { return 0; } |
| 120 | static inline void kasan_free_shadow(const struct vm_struct *vm) {} | 121 | static inline void kasan_free_shadow(const struct vm_struct *vm) {} |
| 121 | 122 | ||
| 123 | static inline void kasan_unpoison_slab(const void *ptr) { } | ||
| 124 | |||
| 122 | #endif /* CONFIG_KASAN */ | 125 | #endif /* CONFIG_KASAN */ |
| 123 | 126 | ||
| 124 | #endif /* LINUX_KASAN_H */ | 127 | #endif /* LINUX_KASAN_H */ |
diff --git a/include/linux/leds.h b/include/linux/leds.h index d2b13066e781..e5e7f2e80a54 100644 --- a/include/linux/leds.h +++ b/include/linux/leds.h | |||
| @@ -42,15 +42,16 @@ struct led_classdev { | |||
| 42 | #define LED_UNREGISTERING (1 << 1) | 42 | #define LED_UNREGISTERING (1 << 1) |
| 43 | /* Upper 16 bits reflect control information */ | 43 | /* Upper 16 bits reflect control information */ |
| 44 | #define LED_CORE_SUSPENDRESUME (1 << 16) | 44 | #define LED_CORE_SUSPENDRESUME (1 << 16) |
| 45 | #define LED_BLINK_ONESHOT (1 << 17) | 45 | #define LED_BLINK_SW (1 << 17) |
| 46 | #define LED_BLINK_ONESHOT_STOP (1 << 18) | 46 | #define LED_BLINK_ONESHOT (1 << 18) |
| 47 | #define LED_BLINK_INVERT (1 << 19) | 47 | #define LED_BLINK_ONESHOT_STOP (1 << 19) |
| 48 | #define LED_BLINK_BRIGHTNESS_CHANGE (1 << 20) | 48 | #define LED_BLINK_INVERT (1 << 20) |
| 49 | #define LED_BLINK_DISABLE (1 << 21) | 49 | #define LED_BLINK_BRIGHTNESS_CHANGE (1 << 21) |
| 50 | #define LED_SYSFS_DISABLE (1 << 22) | 50 | #define LED_BLINK_DISABLE (1 << 22) |
| 51 | #define LED_DEV_CAP_FLASH (1 << 23) | 51 | #define LED_SYSFS_DISABLE (1 << 23) |
| 52 | #define LED_HW_PLUGGABLE (1 << 24) | 52 | #define LED_DEV_CAP_FLASH (1 << 24) |
| 53 | #define LED_PANIC_INDICATOR (1 << 25) | 53 | #define LED_HW_PLUGGABLE (1 << 25) |
| 54 | #define LED_PANIC_INDICATOR (1 << 26) | ||
| 54 | 55 | ||
| 55 | /* Set LED brightness level | 56 | /* Set LED brightness level |
| 56 | * Must not sleep. Use brightness_set_blocking for drivers | 57 | * Must not sleep. Use brightness_set_blocking for drivers |
| @@ -72,8 +73,8 @@ struct led_classdev { | |||
| 72 | * and if both are zero then a sensible default should be chosen. | 73 | * and if both are zero then a sensible default should be chosen. |
| 73 | * The call should adjust the timings in that case and if it can't | 74 | * The call should adjust the timings in that case and if it can't |
| 74 | * match the values specified exactly. | 75 | * match the values specified exactly. |
| 75 | * Deactivate blinking again when the brightness is set to a fixed | 76 | * Deactivate blinking again when the brightness is set to LED_OFF |
| 76 | * value via the brightness_set() callback. | 77 | * via the brightness_set() callback. |
| 77 | */ | 78 | */ |
| 78 | int (*blink_set)(struct led_classdev *led_cdev, | 79 | int (*blink_set)(struct led_classdev *led_cdev, |
| 79 | unsigned long *delay_on, | 80 | unsigned long *delay_on, |
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 035abdf62cfe..73a48479892d 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h | |||
| @@ -1240,8 +1240,6 @@ struct mlx5_destroy_psv_out { | |||
| 1240 | u8 rsvd[8]; | 1240 | u8 rsvd[8]; |
| 1241 | }; | 1241 | }; |
| 1242 | 1242 | ||
| 1243 | #define MLX5_CMD_OP_MAX 0x920 | ||
| 1244 | |||
| 1245 | enum { | 1243 | enum { |
| 1246 | VPORT_STATE_DOWN = 0x0, | 1244 | VPORT_STATE_DOWN = 0x0, |
| 1247 | VPORT_STATE_UP = 0x1, | 1245 | VPORT_STATE_UP = 0x1, |
| @@ -1369,6 +1367,12 @@ enum mlx5_cap_type { | |||
| 1369 | #define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \ | 1367 | #define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \ |
| 1370 | MLX5_GET(flow_table_nic_cap, mdev->hca_caps_max[MLX5_CAP_FLOW_TABLE], cap) | 1368 | MLX5_GET(flow_table_nic_cap, mdev->hca_caps_max[MLX5_CAP_FLOW_TABLE], cap) |
| 1371 | 1369 | ||
| 1370 | #define MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) \ | ||
| 1371 | MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.cap) | ||
| 1372 | |||
| 1373 | #define MLX5_CAP_FLOWTABLE_NIC_RX_MAX(mdev, cap) \ | ||
| 1374 | MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive.cap) | ||
| 1375 | |||
| 1372 | #define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \ | 1376 | #define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \ |
| 1373 | MLX5_GET(flow_table_eswitch_cap, \ | 1377 | MLX5_GET(flow_table_eswitch_cap, \ |
| 1374 | mdev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap) | 1378 | mdev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap) |
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 9a05cd7e5890..e955a2859009 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h | |||
| @@ -205,7 +205,8 @@ enum { | |||
| 205 | MLX5_CMD_OP_ALLOC_FLOW_COUNTER = 0x939, | 205 | MLX5_CMD_OP_ALLOC_FLOW_COUNTER = 0x939, |
| 206 | MLX5_CMD_OP_DEALLOC_FLOW_COUNTER = 0x93a, | 206 | MLX5_CMD_OP_DEALLOC_FLOW_COUNTER = 0x93a, |
| 207 | MLX5_CMD_OP_QUERY_FLOW_COUNTER = 0x93b, | 207 | MLX5_CMD_OP_QUERY_FLOW_COUNTER = 0x93b, |
| 208 | MLX5_CMD_OP_MODIFY_FLOW_TABLE = 0x93c | 208 | MLX5_CMD_OP_MODIFY_FLOW_TABLE = 0x93c, |
| 209 | MLX5_CMD_OP_MAX | ||
| 209 | }; | 210 | }; |
| 210 | 211 | ||
| 211 | struct mlx5_ifc_flow_table_fields_supported_bits { | 212 | struct mlx5_ifc_flow_table_fields_supported_bits { |
| @@ -500,7 +501,9 @@ struct mlx5_ifc_e_switch_cap_bits { | |||
| 500 | u8 vport_svlan_insert[0x1]; | 501 | u8 vport_svlan_insert[0x1]; |
| 501 | u8 vport_cvlan_insert_if_not_exist[0x1]; | 502 | u8 vport_cvlan_insert_if_not_exist[0x1]; |
| 502 | u8 vport_cvlan_insert_overwrite[0x1]; | 503 | u8 vport_cvlan_insert_overwrite[0x1]; |
| 503 | u8 reserved_at_5[0x1b]; | 504 | u8 reserved_at_5[0x19]; |
| 505 | u8 nic_vport_node_guid_modify[0x1]; | ||
| 506 | u8 nic_vport_port_guid_modify[0x1]; | ||
| 504 | 507 | ||
| 505 | u8 reserved_at_20[0x7e0]; | 508 | u8 reserved_at_20[0x7e0]; |
| 506 | }; | 509 | }; |
| @@ -4583,7 +4586,10 @@ struct mlx5_ifc_modify_nic_vport_context_out_bits { | |||
| 4583 | }; | 4586 | }; |
| 4584 | 4587 | ||
| 4585 | struct mlx5_ifc_modify_nic_vport_field_select_bits { | 4588 | struct mlx5_ifc_modify_nic_vport_field_select_bits { |
| 4586 | u8 reserved_at_0[0x19]; | 4589 | u8 reserved_at_0[0x16]; |
| 4590 | u8 node_guid[0x1]; | ||
| 4591 | u8 port_guid[0x1]; | ||
| 4592 | u8 reserved_at_18[0x1]; | ||
| 4587 | u8 mtu[0x1]; | 4593 | u8 mtu[0x1]; |
| 4588 | u8 change_event[0x1]; | 4594 | u8 change_event[0x1]; |
| 4589 | u8 promisc[0x1]; | 4595 | u8 promisc[0x1]; |
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h index 64221027bf1f..ab310819ac36 100644 --- a/include/linux/mlx5/qp.h +++ b/include/linux/mlx5/qp.h | |||
| @@ -172,6 +172,7 @@ enum { | |||
| 172 | enum { | 172 | enum { |
| 173 | MLX5_FENCE_MODE_NONE = 0 << 5, | 173 | MLX5_FENCE_MODE_NONE = 0 << 5, |
| 174 | MLX5_FENCE_MODE_INITIATOR_SMALL = 1 << 5, | 174 | MLX5_FENCE_MODE_INITIATOR_SMALL = 1 << 5, |
| 175 | MLX5_FENCE_MODE_FENCE = 2 << 5, | ||
| 175 | MLX5_FENCE_MODE_STRONG_ORDERING = 3 << 5, | 176 | MLX5_FENCE_MODE_STRONG_ORDERING = 3 << 5, |
| 176 | MLX5_FENCE_MODE_SMALL_AND_FENCE = 4 << 5, | 177 | MLX5_FENCE_MODE_SMALL_AND_FENCE = 4 << 5, |
| 177 | }; | 178 | }; |
| @@ -460,10 +461,9 @@ struct mlx5_core_qp { | |||
| 460 | }; | 461 | }; |
| 461 | 462 | ||
| 462 | struct mlx5_qp_path { | 463 | struct mlx5_qp_path { |
| 463 | u8 fl; | 464 | u8 fl_free_ar; |
| 464 | u8 rsvd3; | 465 | u8 rsvd3; |
| 465 | u8 free_ar; | 466 | __be16 pkey_index; |
| 466 | u8 pkey_index; | ||
| 467 | u8 rsvd0; | 467 | u8 rsvd0; |
| 468 | u8 grh_mlid; | 468 | u8 grh_mlid; |
| 469 | __be16 rlid; | 469 | __be16 rlid; |
| @@ -560,6 +560,7 @@ struct mlx5_modify_qp_mbox_in { | |||
| 560 | __be32 optparam; | 560 | __be32 optparam; |
| 561 | u8 rsvd0[4]; | 561 | u8 rsvd0[4]; |
| 562 | struct mlx5_qp_context ctx; | 562 | struct mlx5_qp_context ctx; |
| 563 | u8 rsvd2[16]; | ||
| 563 | }; | 564 | }; |
| 564 | 565 | ||
| 565 | struct mlx5_modify_qp_mbox_out { | 566 | struct mlx5_modify_qp_mbox_out { |
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h index 301da4a5e6bf..6c16c198f680 100644 --- a/include/linux/mlx5/vport.h +++ b/include/linux/mlx5/vport.h | |||
| @@ -50,6 +50,8 @@ int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu); | |||
| 50 | int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev, | 50 | int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev, |
| 51 | u64 *system_image_guid); | 51 | u64 *system_image_guid); |
| 52 | int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid); | 52 | int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid); |
| 53 | int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev, | ||
| 54 | u32 vport, u64 node_guid); | ||
| 53 | int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev, | 55 | int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev, |
| 54 | u16 *qkey_viol_cntr); | 56 | u16 *qkey_viol_cntr); |
| 55 | int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport, | 57 | int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport, |
diff --git a/include/linux/mm.h b/include/linux/mm.h index 5df5feb49575..ece042dfe23c 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
| @@ -602,7 +602,7 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) | |||
| 602 | } | 602 | } |
| 603 | 603 | ||
| 604 | void do_set_pte(struct vm_area_struct *vma, unsigned long address, | 604 | void do_set_pte(struct vm_area_struct *vma, unsigned long address, |
| 605 | struct page *page, pte_t *pte, bool write, bool anon, bool old); | 605 | struct page *page, pte_t *pte, bool write, bool anon); |
| 606 | #endif | 606 | #endif |
| 607 | 607 | ||
| 608 | /* | 608 | /* |
diff --git a/include/linux/namei.h b/include/linux/namei.h index ec5ec2818a28..d3d0398f2a1b 100644 --- a/include/linux/namei.h +++ b/include/linux/namei.h | |||
| @@ -45,6 +45,8 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND}; | |||
| 45 | #define LOOKUP_ROOT 0x2000 | 45 | #define LOOKUP_ROOT 0x2000 |
| 46 | #define LOOKUP_EMPTY 0x4000 | 46 | #define LOOKUP_EMPTY 0x4000 |
| 47 | 47 | ||
| 48 | extern int path_pts(struct path *path); | ||
| 49 | |||
| 48 | extern int user_path_at_empty(int, const char __user *, unsigned, struct path *, int *empty); | 50 | extern int user_path_at_empty(int, const char __user *, unsigned, struct path *, int *empty); |
| 49 | 51 | ||
| 50 | static inline int user_path_at(int dfd, const char __user *name, unsigned flags, | 52 | static inline int user_path_at(int dfd, const char __user *name, unsigned flags, |
diff --git a/include/linux/of.h b/include/linux/of.h index c7292e8ea080..74eb28cadbef 100644 --- a/include/linux/of.h +++ b/include/linux/of.h | |||
| @@ -614,7 +614,7 @@ static inline struct device_node *of_parse_phandle(const struct device_node *np, | |||
| 614 | return NULL; | 614 | return NULL; |
| 615 | } | 615 | } |
| 616 | 616 | ||
| 617 | static inline int of_parse_phandle_with_args(struct device_node *np, | 617 | static inline int of_parse_phandle_with_args(const struct device_node *np, |
| 618 | const char *list_name, | 618 | const char *list_name, |
| 619 | const char *cells_name, | 619 | const char *cells_name, |
| 620 | int index, | 620 | int index, |
diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h index f6e9e85164e8..b969e9443962 100644 --- a/include/linux/of_pci.h +++ b/include/linux/of_pci.h | |||
| @@ -8,7 +8,7 @@ struct pci_dev; | |||
| 8 | struct of_phandle_args; | 8 | struct of_phandle_args; |
| 9 | struct device_node; | 9 | struct device_node; |
| 10 | 10 | ||
| 11 | #ifdef CONFIG_OF | 11 | #ifdef CONFIG_OF_PCI |
| 12 | int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq); | 12 | int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq); |
| 13 | struct device_node *of_pci_find_child_device(struct device_node *parent, | 13 | struct device_node *of_pci_find_child_device(struct device_node *parent, |
| 14 | unsigned int devfn); | 14 | unsigned int devfn); |
diff --git a/include/linux/of_reserved_mem.h b/include/linux/of_reserved_mem.h index ad2f67054372..c201060e0c6d 100644 --- a/include/linux/of_reserved_mem.h +++ b/include/linux/of_reserved_mem.h | |||
| @@ -31,6 +31,13 @@ typedef int (*reservedmem_of_init_fn)(struct reserved_mem *rmem); | |||
| 31 | int of_reserved_mem_device_init(struct device *dev); | 31 | int of_reserved_mem_device_init(struct device *dev); |
| 32 | void of_reserved_mem_device_release(struct device *dev); | 32 | void of_reserved_mem_device_release(struct device *dev); |
| 33 | 33 | ||
| 34 | int early_init_dt_alloc_reserved_memory_arch(phys_addr_t size, | ||
| 35 | phys_addr_t align, | ||
| 36 | phys_addr_t start, | ||
| 37 | phys_addr_t end, | ||
| 38 | bool nomap, | ||
| 39 | phys_addr_t *res_base); | ||
| 40 | |||
| 34 | void fdt_init_reserved_mem(void); | 41 | void fdt_init_reserved_mem(void); |
| 35 | void fdt_reserved_mem_save_node(unsigned long node, const char *uname, | 42 | void fdt_reserved_mem_save_node(unsigned long node, const char *uname, |
| 36 | phys_addr_t base, phys_addr_t size); | 43 | phys_addr_t base, phys_addr_t size); |
diff --git a/include/linux/page_idle.h b/include/linux/page_idle.h index bf268fa92c5b..fec40271339f 100644 --- a/include/linux/page_idle.h +++ b/include/linux/page_idle.h | |||
| @@ -46,33 +46,62 @@ extern struct page_ext_operations page_idle_ops; | |||
| 46 | 46 | ||
| 47 | static inline bool page_is_young(struct page *page) | 47 | static inline bool page_is_young(struct page *page) |
| 48 | { | 48 | { |
| 49 | return test_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags); | 49 | struct page_ext *page_ext = lookup_page_ext(page); |
| 50 | |||
| 51 | if (unlikely(!page_ext)) | ||
| 52 | return false; | ||
| 53 | |||
| 54 | return test_bit(PAGE_EXT_YOUNG, &page_ext->flags); | ||
| 50 | } | 55 | } |
| 51 | 56 | ||
| 52 | static inline void set_page_young(struct page *page) | 57 | static inline void set_page_young(struct page *page) |
| 53 | { | 58 | { |
| 54 | set_bit(PAGE_EXT_YOUNG, &lookup_page_ext(page)->flags); | 59 | struct page_ext *page_ext = lookup_page_ext(page); |
| 60 | |||
| 61 | if (unlikely(!page_ext)) | ||
| 62 | return; | ||
| 63 | |||
| 64 | set_bit(PAGE_EXT_YOUNG, &page_ext->flags); | ||
| 55 | } | 65 | } |
| 56 | 66 | ||
| 57 | static inline bool test_and_clear_page_young(struct page *page) | 67 | static inline bool test_and_clear_page_young(struct page *page) |
| 58 | { | 68 | { |
| 59 | return test_and_clear_bit(PAGE_EXT_YOUNG, | 69 | struct page_ext *page_ext = lookup_page_ext(page); |
| 60 | &lookup_page_ext(page)->flags); | 70 | |
| 71 | if (unlikely(!page_ext)) | ||
| 72 | return false; | ||
| 73 | |||
| 74 | return test_and_clear_bit(PAGE_EXT_YOUNG, &page_ext->flags); | ||
| 61 | } | 75 | } |
| 62 | 76 | ||
| 63 | static inline bool page_is_idle(struct page *page) | 77 | static inline bool page_is_idle(struct page *page) |
| 64 | { | 78 | { |
| 65 | return test_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags); | 79 | struct page_ext *page_ext = lookup_page_ext(page); |
| 80 | |||
| 81 | if (unlikely(!page_ext)) | ||
| 82 | return false; | ||
| 83 | |||
| 84 | return test_bit(PAGE_EXT_IDLE, &page_ext->flags); | ||
| 66 | } | 85 | } |
| 67 | 86 | ||
| 68 | static inline void set_page_idle(struct page *page) | 87 | static inline void set_page_idle(struct page *page) |
| 69 | { | 88 | { |
| 70 | set_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags); | 89 | struct page_ext *page_ext = lookup_page_ext(page); |
| 90 | |||
| 91 | if (unlikely(!page_ext)) | ||
| 92 | return; | ||
| 93 | |||
| 94 | set_bit(PAGE_EXT_IDLE, &page_ext->flags); | ||
| 71 | } | 95 | } |
| 72 | 96 | ||
| 73 | static inline void clear_page_idle(struct page *page) | 97 | static inline void clear_page_idle(struct page *page) |
| 74 | { | 98 | { |
| 75 | clear_bit(PAGE_EXT_IDLE, &lookup_page_ext(page)->flags); | 99 | struct page_ext *page_ext = lookup_page_ext(page); |
| 100 | |||
| 101 | if (unlikely(!page_ext)) | ||
| 102 | return; | ||
| 103 | |||
| 104 | clear_bit(PAGE_EXT_IDLE, &page_ext->flags); | ||
| 76 | } | 105 | } |
| 77 | #endif /* CONFIG_64BIT */ | 106 | #endif /* CONFIG_64BIT */ |
| 78 | 107 | ||
diff --git a/include/linux/pwm.h b/include/linux/pwm.h index 17018f3c066e..908b67c847cd 100644 --- a/include/linux/pwm.h +++ b/include/linux/pwm.h | |||
| @@ -235,6 +235,9 @@ static inline int pwm_config(struct pwm_device *pwm, int duty_ns, | |||
| 235 | if (!pwm) | 235 | if (!pwm) |
| 236 | return -EINVAL; | 236 | return -EINVAL; |
| 237 | 237 | ||
| 238 | if (duty_ns < 0 || period_ns < 0) | ||
| 239 | return -EINVAL; | ||
| 240 | |||
| 238 | pwm_get_state(pwm, &state); | 241 | pwm_get_state(pwm, &state); |
| 239 | if (state.duty_cycle == duty_ns && state.period == period_ns) | 242 | if (state.duty_cycle == duty_ns && state.period == period_ns) |
| 240 | return 0; | 243 | return 0; |
diff --git a/include/linux/reservation.h b/include/linux/reservation.h index 49d057655d62..b0f305e77b7f 100644 --- a/include/linux/reservation.h +++ b/include/linux/reservation.h | |||
| @@ -49,12 +49,27 @@ extern struct ww_class reservation_ww_class; | |||
| 49 | extern struct lock_class_key reservation_seqcount_class; | 49 | extern struct lock_class_key reservation_seqcount_class; |
| 50 | extern const char reservation_seqcount_string[]; | 50 | extern const char reservation_seqcount_string[]; |
| 51 | 51 | ||
| 52 | /** | ||
| 53 | * struct reservation_object_list - a list of shared fences | ||
| 54 | * @rcu: for internal use | ||
| 55 | * @shared_count: table of shared fences | ||
| 56 | * @shared_max: for growing shared fence table | ||
| 57 | * @shared: shared fence table | ||
| 58 | */ | ||
| 52 | struct reservation_object_list { | 59 | struct reservation_object_list { |
| 53 | struct rcu_head rcu; | 60 | struct rcu_head rcu; |
| 54 | u32 shared_count, shared_max; | 61 | u32 shared_count, shared_max; |
| 55 | struct fence __rcu *shared[]; | 62 | struct fence __rcu *shared[]; |
| 56 | }; | 63 | }; |
| 57 | 64 | ||
| 65 | /** | ||
| 66 | * struct reservation_object - a reservation object manages fences for a buffer | ||
| 67 | * @lock: update side lock | ||
| 68 | * @seq: sequence count for managing RCU read-side synchronization | ||
| 69 | * @fence_excl: the exclusive fence, if there is one currently | ||
| 70 | * @fence: list of current shared fences | ||
| 71 | * @staged: staged copy of shared fences for RCU updates | ||
| 72 | */ | ||
| 58 | struct reservation_object { | 73 | struct reservation_object { |
| 59 | struct ww_mutex lock; | 74 | struct ww_mutex lock; |
| 60 | seqcount_t seq; | 75 | seqcount_t seq; |
| @@ -68,6 +83,10 @@ struct reservation_object { | |||
| 68 | #define reservation_object_assert_held(obj) \ | 83 | #define reservation_object_assert_held(obj) \ |
| 69 | lockdep_assert_held(&(obj)->lock.base) | 84 | lockdep_assert_held(&(obj)->lock.base) |
| 70 | 85 | ||
| 86 | /** | ||
| 87 | * reservation_object_init - initialize a reservation object | ||
| 88 | * @obj: the reservation object | ||
| 89 | */ | ||
| 71 | static inline void | 90 | static inline void |
| 72 | reservation_object_init(struct reservation_object *obj) | 91 | reservation_object_init(struct reservation_object *obj) |
| 73 | { | 92 | { |
| @@ -79,6 +98,10 @@ reservation_object_init(struct reservation_object *obj) | |||
| 79 | obj->staged = NULL; | 98 | obj->staged = NULL; |
| 80 | } | 99 | } |
| 81 | 100 | ||
| 101 | /** | ||
| 102 | * reservation_object_fini - destroys a reservation object | ||
| 103 | * @obj: the reservation object | ||
| 104 | */ | ||
| 82 | static inline void | 105 | static inline void |
| 83 | reservation_object_fini(struct reservation_object *obj) | 106 | reservation_object_fini(struct reservation_object *obj) |
| 84 | { | 107 | { |
| @@ -106,6 +129,14 @@ reservation_object_fini(struct reservation_object *obj) | |||
| 106 | ww_mutex_destroy(&obj->lock); | 129 | ww_mutex_destroy(&obj->lock); |
| 107 | } | 130 | } |
| 108 | 131 | ||
| 132 | /** | ||
| 133 | * reservation_object_get_list - get the reservation object's | ||
| 134 | * shared fence list, with update-side lock held | ||
| 135 | * @obj: the reservation object | ||
| 136 | * | ||
| 137 | * Returns the shared fence list. Does NOT take references to | ||
| 138 | * the fence. The obj->lock must be held. | ||
| 139 | */ | ||
| 109 | static inline struct reservation_object_list * | 140 | static inline struct reservation_object_list * |
| 110 | reservation_object_get_list(struct reservation_object *obj) | 141 | reservation_object_get_list(struct reservation_object *obj) |
| 111 | { | 142 | { |
| @@ -113,6 +144,17 @@ reservation_object_get_list(struct reservation_object *obj) | |||
| 113 | reservation_object_held(obj)); | 144 | reservation_object_held(obj)); |
| 114 | } | 145 | } |
| 115 | 146 | ||
| 147 | /** | ||
| 148 | * reservation_object_get_excl - get the reservation object's | ||
| 149 | * exclusive fence, with update-side lock held | ||
| 150 | * @obj: the reservation object | ||
| 151 | * | ||
| 152 | * Returns the exclusive fence (if any). Does NOT take a | ||
| 153 | * reference. The obj->lock must be held. | ||
| 154 | * | ||
| 155 | * RETURNS | ||
| 156 | * The exclusive fence or NULL | ||
| 157 | */ | ||
| 116 | static inline struct fence * | 158 | static inline struct fence * |
| 117 | reservation_object_get_excl(struct reservation_object *obj) | 159 | reservation_object_get_excl(struct reservation_object *obj) |
| 118 | { | 160 | { |
| @@ -120,6 +162,17 @@ reservation_object_get_excl(struct reservation_object *obj) | |||
| 120 | reservation_object_held(obj)); | 162 | reservation_object_held(obj)); |
| 121 | } | 163 | } |
| 122 | 164 | ||
| 165 | /** | ||
| 166 | * reservation_object_get_excl_rcu - get the reservation object's | ||
| 167 | * exclusive fence, without lock held. | ||
| 168 | * @obj: the reservation object | ||
| 169 | * | ||
| 170 | * If there is an exclusive fence, this atomically increments it's | ||
| 171 | * reference count and returns it. | ||
| 172 | * | ||
| 173 | * RETURNS | ||
| 174 | * The exclusive fence or NULL if none | ||
| 175 | */ | ||
| 123 | static inline struct fence * | 176 | static inline struct fence * |
| 124 | reservation_object_get_excl_rcu(struct reservation_object *obj) | 177 | reservation_object_get_excl_rcu(struct reservation_object *obj) |
| 125 | { | 178 | { |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 6e42ada26345..253538f29ade 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -3007,7 +3007,7 @@ static inline int object_is_on_stack(void *obj) | |||
| 3007 | return (obj >= stack) && (obj < (stack + THREAD_SIZE)); | 3007 | return (obj >= stack) && (obj < (stack + THREAD_SIZE)); |
| 3008 | } | 3008 | } |
| 3009 | 3009 | ||
| 3010 | extern void thread_info_cache_init(void); | 3010 | extern void thread_stack_cache_init(void); |
| 3011 | 3011 | ||
| 3012 | #ifdef CONFIG_DEBUG_STACK_USAGE | 3012 | #ifdef CONFIG_DEBUG_STACK_USAGE |
| 3013 | static inline unsigned long stack_not_used(struct task_struct *p) | 3013 | static inline unsigned long stack_not_used(struct task_struct *p) |
diff --git a/include/linux/sctp.h b/include/linux/sctp.h index dacb5e711994..de1f64318fc4 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h | |||
| @@ -765,6 +765,8 @@ struct sctp_info { | |||
| 765 | __u8 sctpi_s_disable_fragments; | 765 | __u8 sctpi_s_disable_fragments; |
| 766 | __u8 sctpi_s_v4mapped; | 766 | __u8 sctpi_s_v4mapped; |
| 767 | __u8 sctpi_s_frag_interleave; | 767 | __u8 sctpi_s_frag_interleave; |
| 768 | __u32 sctpi_s_type; | ||
| 769 | __u32 __reserved3; | ||
| 768 | }; | 770 | }; |
| 769 | 771 | ||
| 770 | struct sctp_infox { | 772 | struct sctp_infox { |
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index 7973a821ac58..ead97654c4e9 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h | |||
| @@ -277,7 +277,10 @@ static inline void raw_write_seqcount_barrier(seqcount_t *s) | |||
| 277 | 277 | ||
| 278 | static inline int raw_read_seqcount_latch(seqcount_t *s) | 278 | static inline int raw_read_seqcount_latch(seqcount_t *s) |
| 279 | { | 279 | { |
| 280 | return lockless_dereference(s)->sequence; | 280 | int seq = READ_ONCE(s->sequence); |
| 281 | /* Pairs with the first smp_wmb() in raw_write_seqcount_latch() */ | ||
| 282 | smp_read_barrier_depends(); | ||
| 283 | return seq; | ||
| 281 | } | 284 | } |
| 282 | 285 | ||
| 283 | /** | 286 | /** |
| @@ -331,7 +334,7 @@ static inline int raw_read_seqcount_latch(seqcount_t *s) | |||
| 331 | * unsigned seq, idx; | 334 | * unsigned seq, idx; |
| 332 | * | 335 | * |
| 333 | * do { | 336 | * do { |
| 334 | * seq = lockless_dereference(latch)->seq; | 337 | * seq = raw_read_seqcount_latch(&latch->seq); |
| 335 | * | 338 | * |
| 336 | * idx = seq & 0x01; | 339 | * idx = seq & 0x01; |
| 337 | * entry = data_query(latch->data[idx], ...); | 340 | * entry = data_query(latch->data[idx], ...); |
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index 19c659d1c0f8..b6810c92b8bb 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h | |||
| @@ -137,8 +137,6 @@ struct rpc_create_args { | |||
| 137 | #define RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT (1UL << 9) | 137 | #define RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT (1UL << 9) |
| 138 | 138 | ||
| 139 | struct rpc_clnt *rpc_create(struct rpc_create_args *args); | 139 | struct rpc_clnt *rpc_create(struct rpc_create_args *args); |
| 140 | struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args, | ||
| 141 | struct rpc_xprt *xprt); | ||
| 142 | struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *, | 140 | struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *, |
| 143 | const struct rpc_program *, u32); | 141 | const struct rpc_program *, u32); |
| 144 | struct rpc_clnt *rpc_clone_client(struct rpc_clnt *); | 142 | struct rpc_clnt *rpc_clone_client(struct rpc_clnt *); |
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h index b7dabc4baafd..79ba50856707 100644 --- a/include/linux/sunrpc/svc_xprt.h +++ b/include/linux/sunrpc/svc_xprt.h | |||
| @@ -84,6 +84,7 @@ struct svc_xprt { | |||
| 84 | 84 | ||
| 85 | struct net *xpt_net; | 85 | struct net *xpt_net; |
| 86 | struct rpc_xprt *xpt_bc_xprt; /* NFSv4.1 backchannel */ | 86 | struct rpc_xprt *xpt_bc_xprt; /* NFSv4.1 backchannel */ |
| 87 | struct rpc_xprt_switch *xpt_bc_xps; /* NFSv4.1 backchannel */ | ||
| 87 | }; | 88 | }; |
| 88 | 89 | ||
| 89 | static inline void unregister_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u) | 90 | static inline void unregister_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u) |
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index 5aa3834619a8..5e3e1b63dbb3 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h | |||
| @@ -297,6 +297,7 @@ struct xprt_create { | |||
| 297 | size_t addrlen; | 297 | size_t addrlen; |
| 298 | const char *servername; | 298 | const char *servername; |
| 299 | struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */ | 299 | struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */ |
| 300 | struct rpc_xprt_switch *bc_xps; | ||
| 300 | unsigned int flags; | 301 | unsigned int flags; |
| 301 | }; | 302 | }; |
| 302 | 303 | ||
diff --git a/include/linux/thermal.h b/include/linux/thermal.h index e45abe7db9a6..ee517bef0db0 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h | |||
| @@ -335,6 +335,8 @@ struct thermal_genl_event { | |||
| 335 | * @get_trend: a pointer to a function that reads the sensor temperature trend. | 335 | * @get_trend: a pointer to a function that reads the sensor temperature trend. |
| 336 | * @set_emul_temp: a pointer to a function that sets sensor emulated | 336 | * @set_emul_temp: a pointer to a function that sets sensor emulated |
| 337 | * temperature. | 337 | * temperature. |
| 338 | * @set_trip_temp: a pointer to a function that sets the trip temperature on | ||
| 339 | * hardware. | ||
| 338 | */ | 340 | */ |
| 339 | struct thermal_zone_of_device_ops { | 341 | struct thermal_zone_of_device_ops { |
| 340 | int (*get_temp)(void *, int *); | 342 | int (*get_temp)(void *, int *); |
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h index 37dbacf84849..816b7543f81b 100644 --- a/include/linux/timekeeping.h +++ b/include/linux/timekeeping.h | |||
| @@ -21,6 +21,9 @@ static inline int do_sys_settimeofday(const struct timespec *tv, | |||
| 21 | struct timespec64 ts64; | 21 | struct timespec64 ts64; |
| 22 | 22 | ||
| 23 | if (!tv) | 23 | if (!tv) |
| 24 | return do_sys_settimeofday64(NULL, tz); | ||
| 25 | |||
| 26 | if (!timespec_valid(tv)) | ||
| 24 | return -EINVAL; | 27 | return -EINVAL; |
| 25 | 28 | ||
| 26 | ts64 = timespec_to_timespec64(*tv); | 29 | ts64 = timespec_to_timespec64(*tv); |
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h index 457651bf45b0..fefe8b06a63d 100644 --- a/include/linux/usb/gadget.h +++ b/include/linux/usb/gadget.h | |||
| @@ -1034,6 +1034,8 @@ static inline int usb_gadget_activate(struct usb_gadget *gadget) | |||
| 1034 | * @udc_name: A name of UDC this driver should be bound to. If udc_name is NULL, | 1034 | * @udc_name: A name of UDC this driver should be bound to. If udc_name is NULL, |
| 1035 | * this driver will be bound to any available UDC. | 1035 | * this driver will be bound to any available UDC. |
| 1036 | * @pending: UDC core private data used for deferred probe of this driver. | 1036 | * @pending: UDC core private data used for deferred probe of this driver. |
| 1037 | * @match_existing_only: If udc is not found, return an error and don't add this | ||
| 1038 | * gadget driver to list of pending driver | ||
| 1037 | * | 1039 | * |
| 1038 | * Devices are disabled till a gadget driver successfully bind()s, which | 1040 | * Devices are disabled till a gadget driver successfully bind()s, which |
| 1039 | * means the driver will handle setup() requests needed to enumerate (and | 1041 | * means the driver will handle setup() requests needed to enumerate (and |
| @@ -1097,6 +1099,7 @@ struct usb_gadget_driver { | |||
| 1097 | 1099 | ||
| 1098 | char *udc_name; | 1100 | char *udc_name; |
| 1099 | struct list_head pending; | 1101 | struct list_head pending; |
| 1102 | unsigned match_existing_only:1; | ||
| 1100 | }; | 1103 | }; |
| 1101 | 1104 | ||
| 1102 | 1105 | ||
diff --git a/include/linux/usb/musb.h b/include/linux/usb/musb.h index 0b3da40a525e..d315c8907869 100644 --- a/include/linux/usb/musb.h +++ b/include/linux/usb/musb.h | |||
| @@ -142,10 +142,11 @@ enum musb_vbus_id_status { | |||
| 142 | }; | 142 | }; |
| 143 | 143 | ||
| 144 | #if IS_ENABLED(CONFIG_USB_MUSB_HDRC) | 144 | #if IS_ENABLED(CONFIG_USB_MUSB_HDRC) |
| 145 | void musb_mailbox(enum musb_vbus_id_status status); | 145 | int musb_mailbox(enum musb_vbus_id_status status); |
| 146 | #else | 146 | #else |
| 147 | static inline void musb_mailbox(enum musb_vbus_id_status status) | 147 | static inline int musb_mailbox(enum musb_vbus_id_status status) |
| 148 | { | 148 | { |
| 149 | return 0; | ||
| 149 | } | 150 | } |
| 150 | #endif | 151 | #endif |
| 151 | 152 | ||
diff --git a/include/media/v4l2-mc.h b/include/media/v4l2-mc.h index 98a938aabdfb..7a8d6037a4bb 100644 --- a/include/media/v4l2-mc.h +++ b/include/media/v4l2-mc.h | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * v4l2-mc.h - Media Controller V4L2 types and prototypes | 2 | * v4l2-mc.h - Media Controller V4L2 types and prototypes |
| 3 | * | 3 | * |
| 4 | * Copyright (C) 2016 Mauro Carvalho Chehab <mchehab@osg.samsung.com> | 4 | * Copyright (C) 2016 Mauro Carvalho Chehab <mchehab@kernel.org> |
| 5 | * Copyright (C) 2006-2010 Nokia Corporation | 5 | * Copyright (C) 2006-2010 Nokia Corporation |
| 6 | * Copyright (c) 2016 Intel Corporation. | 6 | * Copyright (c) 2016 Intel Corporation. |
| 7 | * | 7 | * |
diff --git a/include/net/compat.h b/include/net/compat.h index 48103cf94e97..13de0ccaa059 100644 --- a/include/net/compat.h +++ b/include/net/compat.h | |||
| @@ -42,6 +42,7 @@ int compat_sock_get_timestampns(struct sock *, struct timespec __user *); | |||
| 42 | 42 | ||
| 43 | int get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *, | 43 | int get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *, |
| 44 | struct sockaddr __user **, struct iovec **); | 44 | struct sockaddr __user **, struct iovec **); |
| 45 | struct sock_fprog __user *get_compat_bpf_fprog(char __user *optval); | ||
| 45 | asmlinkage long compat_sys_sendmsg(int, struct compat_msghdr __user *, | 46 | asmlinkage long compat_sys_sendmsg(int, struct compat_msghdr __user *, |
| 46 | unsigned int); | 47 | unsigned int); |
| 47 | asmlinkage long compat_sys_sendmmsg(int, struct compat_mmsghdr __user *, | 48 | asmlinkage long compat_sys_sendmmsg(int, struct compat_mmsghdr __user *, |
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h index d325c81332e3..43a5a0e4524c 100644 --- a/include/net/ip6_tunnel.h +++ b/include/net/ip6_tunnel.h | |||
| @@ -63,6 +63,8 @@ struct ip6_tnl_encap_ops { | |||
| 63 | u8 *protocol, struct flowi6 *fl6); | 63 | u8 *protocol, struct flowi6 *fl6); |
| 64 | }; | 64 | }; |
| 65 | 65 | ||
| 66 | #ifdef CONFIG_INET | ||
| 67 | |||
| 66 | extern const struct ip6_tnl_encap_ops __rcu * | 68 | extern const struct ip6_tnl_encap_ops __rcu * |
| 67 | ip6tun_encaps[MAX_IPTUN_ENCAP_OPS]; | 69 | ip6tun_encaps[MAX_IPTUN_ENCAP_OPS]; |
| 68 | 70 | ||
| @@ -138,7 +140,6 @@ struct net *ip6_tnl_get_link_net(const struct net_device *dev); | |||
| 138 | int ip6_tnl_get_iflink(const struct net_device *dev); | 140 | int ip6_tnl_get_iflink(const struct net_device *dev); |
| 139 | int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu); | 141 | int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu); |
| 140 | 142 | ||
| 141 | #ifdef CONFIG_INET | ||
| 142 | static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb, | 143 | static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb, |
| 143 | struct net_device *dev) | 144 | struct net_device *dev) |
| 144 | { | 145 | { |
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index af4c10ebb241..cd6018a9ee24 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h | |||
| @@ -1232,7 +1232,7 @@ void ip_vs_conn_expire_now(struct ip_vs_conn *cp); | |||
| 1232 | const char *ip_vs_state_name(__u16 proto, int state); | 1232 | const char *ip_vs_state_name(__u16 proto, int state); |
| 1233 | 1233 | ||
| 1234 | void ip_vs_tcp_conn_listen(struct ip_vs_conn *cp); | 1234 | void ip_vs_tcp_conn_listen(struct ip_vs_conn *cp); |
| 1235 | int ip_vs_check_template(struct ip_vs_conn *ct); | 1235 | int ip_vs_check_template(struct ip_vs_conn *ct, struct ip_vs_dest *cdest); |
| 1236 | void ip_vs_random_dropentry(struct netns_ipvs *ipvs); | 1236 | void ip_vs_random_dropentry(struct netns_ipvs *ipvs); |
| 1237 | int ip_vs_conn_init(void); | 1237 | int ip_vs_conn_init(void); |
| 1238 | void ip_vs_conn_cleanup(void); | 1238 | void ip_vs_conn_cleanup(void); |
diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h index 9c5638ad872e..0dbce55437f2 100644 --- a/include/net/netfilter/nf_queue.h +++ b/include/net/netfilter/nf_queue.h | |||
| @@ -28,8 +28,8 @@ struct nf_queue_handler { | |||
| 28 | struct nf_hook_ops *ops); | 28 | struct nf_hook_ops *ops); |
| 29 | }; | 29 | }; |
| 30 | 30 | ||
| 31 | void nf_register_queue_handler(const struct nf_queue_handler *qh); | 31 | void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh); |
| 32 | void nf_unregister_queue_handler(void); | 32 | void nf_unregister_queue_handler(struct net *net); |
| 33 | void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict); | 33 | void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict); |
| 34 | 34 | ||
| 35 | void nf_queue_entry_get_refs(struct nf_queue_entry *entry); | 35 | void nf_queue_entry_get_refs(struct nf_queue_entry *entry); |
diff --git a/include/net/netns/netfilter.h b/include/net/netns/netfilter.h index 38aa4983e2a9..36d723579af2 100644 --- a/include/net/netns/netfilter.h +++ b/include/net/netns/netfilter.h | |||
| @@ -5,11 +5,13 @@ | |||
| 5 | 5 | ||
| 6 | struct proc_dir_entry; | 6 | struct proc_dir_entry; |
| 7 | struct nf_logger; | 7 | struct nf_logger; |
| 8 | struct nf_queue_handler; | ||
| 8 | 9 | ||
| 9 | struct netns_nf { | 10 | struct netns_nf { |
| 10 | #if defined CONFIG_PROC_FS | 11 | #if defined CONFIG_PROC_FS |
| 11 | struct proc_dir_entry *proc_netfilter; | 12 | struct proc_dir_entry *proc_netfilter; |
| 12 | #endif | 13 | #endif |
| 14 | const struct nf_queue_handler __rcu *queue_handler; | ||
| 13 | const struct nf_logger __rcu *nf_loggers[NFPROTO_NUMPROTO]; | 15 | const struct nf_logger __rcu *nf_loggers[NFPROTO_NUMPROTO]; |
| 14 | #ifdef CONFIG_SYSCTL | 16 | #ifdef CONFIG_SYSCTL |
| 15 | struct ctl_table_header *nf_log_dir_header; | 17 | struct ctl_table_header *nf_log_dir_header; |
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index 0f7efa88f210..3722dda0199d 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h | |||
| @@ -392,16 +392,20 @@ struct tc_cls_u32_offload { | |||
| 392 | }; | 392 | }; |
| 393 | }; | 393 | }; |
| 394 | 394 | ||
| 395 | static inline bool tc_should_offload(struct net_device *dev, u32 flags) | 395 | static inline bool tc_should_offload(const struct net_device *dev, |
| 396 | const struct tcf_proto *tp, u32 flags) | ||
| 396 | { | 397 | { |
| 398 | const struct Qdisc *sch = tp->q; | ||
| 399 | const struct Qdisc_class_ops *cops = sch->ops->cl_ops; | ||
| 400 | |||
| 397 | if (!(dev->features & NETIF_F_HW_TC)) | 401 | if (!(dev->features & NETIF_F_HW_TC)) |
| 398 | return false; | 402 | return false; |
| 399 | |||
| 400 | if (flags & TCA_CLS_FLAGS_SKIP_HW) | 403 | if (flags & TCA_CLS_FLAGS_SKIP_HW) |
| 401 | return false; | 404 | return false; |
| 402 | |||
| 403 | if (!dev->netdev_ops->ndo_setup_tc) | 405 | if (!dev->netdev_ops->ndo_setup_tc) |
| 404 | return false; | 406 | return false; |
| 407 | if (cops && cops->tcf_cl_offload) | ||
| 408 | return cops->tcf_cl_offload(tp->classid); | ||
| 405 | 409 | ||
| 406 | return true; | 410 | return true; |
| 407 | } | 411 | } |
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index 401038d2f9b8..fea53f4d92ca 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h | |||
| @@ -61,6 +61,7 @@ psched_tdiff_bounded(psched_time_t tv1, psched_time_t tv2, psched_time_t bound) | |||
| 61 | } | 61 | } |
| 62 | 62 | ||
| 63 | struct qdisc_watchdog { | 63 | struct qdisc_watchdog { |
| 64 | u64 last_expires; | ||
| 64 | struct hrtimer timer; | 65 | struct hrtimer timer; |
| 65 | struct Qdisc *qdisc; | 66 | struct Qdisc *qdisc; |
| 66 | }; | 67 | }; |
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index a1fd76c22a59..62d553184e91 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h | |||
| @@ -168,6 +168,7 @@ struct Qdisc_class_ops { | |||
| 168 | 168 | ||
| 169 | /* Filter manipulation */ | 169 | /* Filter manipulation */ |
| 170 | struct tcf_proto __rcu ** (*tcf_chain)(struct Qdisc *, unsigned long); | 170 | struct tcf_proto __rcu ** (*tcf_chain)(struct Qdisc *, unsigned long); |
| 171 | bool (*tcf_cl_offload)(u32 classid); | ||
| 171 | unsigned long (*bind_tcf)(struct Qdisc *, unsigned long, | 172 | unsigned long (*bind_tcf)(struct Qdisc *, unsigned long, |
| 172 | u32 classid); | 173 | u32 classid); |
| 173 | void (*unbind_tcf)(struct Qdisc *, unsigned long); | 174 | void (*unbind_tcf)(struct Qdisc *, unsigned long); |
| @@ -691,9 +692,11 @@ static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch) | |||
| 691 | /* we can reuse ->gso_skb because peek isn't called for root qdiscs */ | 692 | /* we can reuse ->gso_skb because peek isn't called for root qdiscs */ |
| 692 | if (!sch->gso_skb) { | 693 | if (!sch->gso_skb) { |
| 693 | sch->gso_skb = sch->dequeue(sch); | 694 | sch->gso_skb = sch->dequeue(sch); |
| 694 | if (sch->gso_skb) | 695 | if (sch->gso_skb) { |
| 695 | /* it's still part of the queue */ | 696 | /* it's still part of the queue */ |
| 697 | qdisc_qstats_backlog_inc(sch, sch->gso_skb); | ||
| 696 | sch->q.qlen++; | 698 | sch->q.qlen++; |
| 699 | } | ||
| 697 | } | 700 | } |
| 698 | 701 | ||
| 699 | return sch->gso_skb; | 702 | return sch->gso_skb; |
| @@ -706,6 +709,7 @@ static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch) | |||
| 706 | 709 | ||
| 707 | if (skb) { | 710 | if (skb) { |
| 708 | sch->gso_skb = NULL; | 711 | sch->gso_skb = NULL; |
| 712 | qdisc_qstats_backlog_dec(sch, skb); | ||
| 709 | sch->q.qlen--; | 713 | sch->q.qlen--; |
| 710 | } else { | 714 | } else { |
| 711 | skb = sch->dequeue(sch); | 715 | skb = sch->dequeue(sch); |
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 432bed510369..7e440d41487a 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h | |||
| @@ -217,10 +217,10 @@ enum ib_device_cap_flags { | |||
| 217 | IB_DEVICE_CROSS_CHANNEL = (1 << 27), | 217 | IB_DEVICE_CROSS_CHANNEL = (1 << 27), |
| 218 | IB_DEVICE_MANAGED_FLOW_STEERING = (1 << 29), | 218 | IB_DEVICE_MANAGED_FLOW_STEERING = (1 << 29), |
| 219 | IB_DEVICE_SIGNATURE_HANDOVER = (1 << 30), | 219 | IB_DEVICE_SIGNATURE_HANDOVER = (1 << 30), |
| 220 | IB_DEVICE_ON_DEMAND_PAGING = (1 << 31), | 220 | IB_DEVICE_ON_DEMAND_PAGING = (1ULL << 31), |
| 221 | IB_DEVICE_SG_GAPS_REG = (1ULL << 32), | 221 | IB_DEVICE_SG_GAPS_REG = (1ULL << 32), |
| 222 | IB_DEVICE_VIRTUAL_FUNCTION = ((u64)1 << 33), | 222 | IB_DEVICE_VIRTUAL_FUNCTION = (1ULL << 33), |
| 223 | IB_DEVICE_RAW_SCATTER_FCS = ((u64)1 << 34), | 223 | IB_DEVICE_RAW_SCATTER_FCS = (1ULL << 34), |
| 224 | }; | 224 | }; |
| 225 | 225 | ||
| 226 | enum ib_signature_prot_cap { | 226 | enum ib_signature_prot_cap { |
diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h index 16274e2133cd..9c9a27d42aaa 100644 --- a/include/rdma/rdma_vt.h +++ b/include/rdma/rdma_vt.h | |||
| @@ -203,7 +203,9 @@ struct rvt_driver_provided { | |||
| 203 | 203 | ||
| 204 | /* | 204 | /* |
| 205 | * Allocate a private queue pair data structure for driver specific | 205 | * Allocate a private queue pair data structure for driver specific |
| 206 | * information which is opaque to rdmavt. | 206 | * information which is opaque to rdmavt. Errors are returned via |
| 207 | * ERR_PTR(err). The driver is free to return NULL or a valid | ||
| 208 | * pointer. | ||
| 207 | */ | 209 | */ |
| 208 | void * (*qp_priv_alloc)(struct rvt_dev_info *rdi, struct rvt_qp *qp, | 210 | void * (*qp_priv_alloc)(struct rvt_dev_info *rdi, struct rvt_qp *qp, |
| 209 | gfp_t gfp); | 211 | gfp_t gfp); |
diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h index 23c6960e94a4..2bdd1e3e7007 100644 --- a/include/uapi/linux/btrfs.h +++ b/include/uapi/linux/btrfs.h | |||
| @@ -118,7 +118,7 @@ struct btrfs_ioctl_vol_args_v2 { | |||
| 118 | }; | 118 | }; |
| 119 | union { | 119 | union { |
| 120 | char name[BTRFS_SUBVOL_NAME_MAX + 1]; | 120 | char name[BTRFS_SUBVOL_NAME_MAX + 1]; |
| 121 | u64 devid; | 121 | __u64 devid; |
| 122 | }; | 122 | }; |
| 123 | }; | 123 | }; |
| 124 | 124 | ||
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index 9222db8ccccc..5f030b46cff4 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h | |||
| @@ -1353,6 +1353,15 @@ enum ethtool_link_mode_bit_indices { | |||
| 1353 | ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT = 28, | 1353 | ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT = 28, |
| 1354 | ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT = 29, | 1354 | ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT = 29, |
| 1355 | ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT = 30, | 1355 | ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT = 30, |
| 1356 | ETHTOOL_LINK_MODE_25000baseCR_Full_BIT = 31, | ||
| 1357 | ETHTOOL_LINK_MODE_25000baseKR_Full_BIT = 32, | ||
| 1358 | ETHTOOL_LINK_MODE_25000baseSR_Full_BIT = 33, | ||
| 1359 | ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT = 34, | ||
| 1360 | ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT = 35, | ||
| 1361 | ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT = 36, | ||
| 1362 | ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT = 37, | ||
| 1363 | ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT = 38, | ||
| 1364 | ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT = 39, | ||
| 1356 | 1365 | ||
| 1357 | /* Last allowed bit for __ETHTOOL_LINK_MODE_LEGACY_MASK is bit | 1366 | /* Last allowed bit for __ETHTOOL_LINK_MODE_LEGACY_MASK is bit |
| 1358 | * 31. Please do NOT define any SUPPORTED_* or ADVERTISED_* | 1367 | * 31. Please do NOT define any SUPPORTED_* or ADVERTISED_* |
| @@ -1361,7 +1370,7 @@ enum ethtool_link_mode_bit_indices { | |||
| 1361 | */ | 1370 | */ |
| 1362 | 1371 | ||
| 1363 | __ETHTOOL_LINK_MODE_LAST | 1372 | __ETHTOOL_LINK_MODE_LAST |
| 1364 | = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT, | 1373 | = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, |
| 1365 | }; | 1374 | }; |
| 1366 | 1375 | ||
| 1367 | #define __ETHTOOL_LINK_MODE_LEGACY_MASK(base_name) \ | 1376 | #define __ETHTOOL_LINK_MODE_LEGACY_MASK(base_name) \ |
diff --git a/include/uapi/linux/gtp.h b/include/uapi/linux/gtp.h index ca1054dd8249..72a04a0e8cce 100644 --- a/include/uapi/linux/gtp.h +++ b/include/uapi/linux/gtp.h | |||
| @@ -1,5 +1,5 @@ | |||
| 1 | #ifndef _UAPI_LINUX_GTP_H_ | 1 | #ifndef _UAPI_LINUX_GTP_H_ |
| 2 | #define _UAPI_LINUX_GTP_H__ | 2 | #define _UAPI_LINUX_GTP_H_ |
| 3 | 3 | ||
| 4 | enum gtp_genl_cmds { | 4 | enum gtp_genl_cmds { |
| 5 | GTP_CMD_NEWPDP, | 5 | GTP_CMD_NEWPDP, |
diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index eba5914ba5d1..f4297c8a42fe 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h | |||
| @@ -145,6 +145,8 @@ enum { | |||
| 145 | TCA_POLICE_PEAKRATE, | 145 | TCA_POLICE_PEAKRATE, |
| 146 | TCA_POLICE_AVRATE, | 146 | TCA_POLICE_AVRATE, |
| 147 | TCA_POLICE_RESULT, | 147 | TCA_POLICE_RESULT, |
| 148 | TCA_POLICE_TM, | ||
| 149 | TCA_POLICE_PAD, | ||
| 148 | __TCA_POLICE_MAX | 150 | __TCA_POLICE_MAX |
| 149 | #define TCA_POLICE_RESULT TCA_POLICE_RESULT | 151 | #define TCA_POLICE_RESULT TCA_POLICE_RESULT |
| 150 | }; | 152 | }; |
| @@ -173,7 +175,7 @@ enum { | |||
| 173 | TCA_U32_DIVISOR, | 175 | TCA_U32_DIVISOR, |
| 174 | TCA_U32_SEL, | 176 | TCA_U32_SEL, |
| 175 | TCA_U32_POLICE, | 177 | TCA_U32_POLICE, |
| 176 | TCA_U32_ACT, | 178 | TCA_U32_ACT, |
| 177 | TCA_U32_INDEV, | 179 | TCA_U32_INDEV, |
| 178 | TCA_U32_PCNT, | 180 | TCA_U32_PCNT, |
| 179 | TCA_U32_MARK, | 181 | TCA_U32_MARK, |
diff --git a/include/uapi/sound/Kbuild b/include/uapi/sound/Kbuild index a7f27704f980..691984cb0b91 100644 --- a/include/uapi/sound/Kbuild +++ b/include/uapi/sound/Kbuild | |||
| @@ -1,5 +1,6 @@ | |||
| 1 | # UAPI Header export list | 1 | # UAPI Header export list |
| 2 | header-y += asequencer.h | 2 | header-y += asequencer.h |
| 3 | header-y += asoc.h | ||
| 3 | header-y += asound.h | 4 | header-y += asound.h |
| 4 | header-y += asound_fm.h | 5 | header-y += asound_fm.h |
| 5 | header-y += compress_offload.h | 6 | header-y += compress_offload.h |
| @@ -10,3 +11,5 @@ header-y += hdsp.h | |||
| 10 | header-y += hdspm.h | 11 | header-y += hdspm.h |
| 11 | header-y += sb16_csp.h | 12 | header-y += sb16_csp.h |
| 12 | header-y += sfnt_info.h | 13 | header-y += sfnt_info.h |
| 14 | header-y += tlv.h | ||
| 15 | header-y += usb_stream.h | ||
diff --git a/init/main.c b/init/main.c index 4c17fda5c2ff..eae02aa03c9e 100644 --- a/init/main.c +++ b/init/main.c | |||
| @@ -453,7 +453,7 @@ void __init __weak smp_setup_processor_id(void) | |||
| 453 | } | 453 | } |
| 454 | 454 | ||
| 455 | # if THREAD_SIZE >= PAGE_SIZE | 455 | # if THREAD_SIZE >= PAGE_SIZE |
| 456 | void __init __weak thread_info_cache_init(void) | 456 | void __init __weak thread_stack_cache_init(void) |
| 457 | { | 457 | { |
| 458 | } | 458 | } |
| 459 | #endif | 459 | #endif |
| @@ -627,7 +627,7 @@ asmlinkage __visible void __init start_kernel(void) | |||
| 627 | /* Should be run before the first non-init thread is created */ | 627 | /* Should be run before the first non-init thread is created */ |
| 628 | init_espfix_bsp(); | 628 | init_espfix_bsp(); |
| 629 | #endif | 629 | #endif |
| 630 | thread_info_cache_init(); | 630 | thread_stack_cache_init(); |
| 631 | cred_init(); | 631 | cred_init(); |
| 632 | fork_init(); | 632 | fork_init(); |
| 633 | proc_caches_init(); | 633 | proc_caches_init(); |
| @@ -708,11 +708,13 @@ static bool __init_or_module initcall_blacklisted(initcall_t fn) | |||
| 708 | { | 708 | { |
| 709 | struct blacklist_entry *entry; | 709 | struct blacklist_entry *entry; |
| 710 | char fn_name[KSYM_SYMBOL_LEN]; | 710 | char fn_name[KSYM_SYMBOL_LEN]; |
| 711 | unsigned long addr; | ||
| 711 | 712 | ||
| 712 | if (list_empty(&blacklisted_initcalls)) | 713 | if (list_empty(&blacklisted_initcalls)) |
| 713 | return false; | 714 | return false; |
| 714 | 715 | ||
| 715 | sprint_symbol_no_offset(fn_name, (unsigned long)fn); | 716 | addr = (unsigned long) dereference_function_descriptor(fn); |
| 717 | sprint_symbol_no_offset(fn_name, addr); | ||
| 716 | 718 | ||
| 717 | list_for_each_entry(entry, &blacklisted_initcalls, next) { | 719 | list_for_each_entry(entry, &blacklisted_initcalls, next) { |
| 718 | if (!strcmp(fn_name, entry->buf)) { | 720 | if (!strcmp(fn_name, entry->buf)) { |
diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c index 04be7021f848..318858edb1cd 100644 --- a/kernel/bpf/inode.c +++ b/kernel/bpf/inode.c | |||
| @@ -365,7 +365,6 @@ static struct file_system_type bpf_fs_type = { | |||
| 365 | .name = "bpf", | 365 | .name = "bpf", |
| 366 | .mount = bpf_mount, | 366 | .mount = bpf_mount, |
| 367 | .kill_sb = kill_litter_super, | 367 | .kill_sb = kill_litter_super, |
| 368 | .fs_flags = FS_USERNS_MOUNT, | ||
| 369 | }; | 368 | }; |
| 370 | 369 | ||
| 371 | MODULE_ALIAS_FS("bpf"); | 370 | MODULE_ALIAS_FS("bpf"); |
diff --git a/kernel/events/core.c b/kernel/events/core.c index 274450efea90..9c51ec3f0f44 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
| @@ -3862,10 +3862,8 @@ static void _free_event(struct perf_event *event) | |||
| 3862 | if (event->ctx) | 3862 | if (event->ctx) |
| 3863 | put_ctx(event->ctx); | 3863 | put_ctx(event->ctx); |
| 3864 | 3864 | ||
| 3865 | if (event->pmu) { | 3865 | exclusive_event_destroy(event); |
| 3866 | exclusive_event_destroy(event); | 3866 | module_put(event->pmu->module); |
| 3867 | module_put(event->pmu->module); | ||
| 3868 | } | ||
| 3869 | 3867 | ||
| 3870 | call_rcu(&event->rcu_head, free_event_rcu); | 3868 | call_rcu(&event->rcu_head, free_event_rcu); |
| 3871 | } | 3869 | } |
diff --git a/kernel/fork.c b/kernel/fork.c index 5c2c355aa97f..4a7ec0c6c88c 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
| @@ -148,18 +148,18 @@ static inline void free_task_struct(struct task_struct *tsk) | |||
| 148 | } | 148 | } |
| 149 | #endif | 149 | #endif |
| 150 | 150 | ||
| 151 | void __weak arch_release_thread_info(struct thread_info *ti) | 151 | void __weak arch_release_thread_stack(unsigned long *stack) |
| 152 | { | 152 | { |
| 153 | } | 153 | } |
| 154 | 154 | ||
| 155 | #ifndef CONFIG_ARCH_THREAD_INFO_ALLOCATOR | 155 | #ifndef CONFIG_ARCH_THREAD_STACK_ALLOCATOR |
| 156 | 156 | ||
| 157 | /* | 157 | /* |
| 158 | * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a | 158 | * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a |
| 159 | * kmemcache based allocator. | 159 | * kmemcache based allocator. |
| 160 | */ | 160 | */ |
| 161 | # if THREAD_SIZE >= PAGE_SIZE | 161 | # if THREAD_SIZE >= PAGE_SIZE |
| 162 | static struct thread_info *alloc_thread_info_node(struct task_struct *tsk, | 162 | static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, |
| 163 | int node) | 163 | int node) |
| 164 | { | 164 | { |
| 165 | struct page *page = alloc_kmem_pages_node(node, THREADINFO_GFP, | 165 | struct page *page = alloc_kmem_pages_node(node, THREADINFO_GFP, |
| @@ -172,33 +172,33 @@ static struct thread_info *alloc_thread_info_node(struct task_struct *tsk, | |||
| 172 | return page ? page_address(page) : NULL; | 172 | return page ? page_address(page) : NULL; |
| 173 | } | 173 | } |
| 174 | 174 | ||
| 175 | static inline void free_thread_info(struct thread_info *ti) | 175 | static inline void free_thread_stack(unsigned long *stack) |
| 176 | { | 176 | { |
| 177 | struct page *page = virt_to_page(ti); | 177 | struct page *page = virt_to_page(stack); |
| 178 | 178 | ||
| 179 | memcg_kmem_update_page_stat(page, MEMCG_KERNEL_STACK, | 179 | memcg_kmem_update_page_stat(page, MEMCG_KERNEL_STACK, |
| 180 | -(1 << THREAD_SIZE_ORDER)); | 180 | -(1 << THREAD_SIZE_ORDER)); |
| 181 | __free_kmem_pages(page, THREAD_SIZE_ORDER); | 181 | __free_kmem_pages(page, THREAD_SIZE_ORDER); |
| 182 | } | 182 | } |
| 183 | # else | 183 | # else |
| 184 | static struct kmem_cache *thread_info_cache; | 184 | static struct kmem_cache *thread_stack_cache; |
| 185 | 185 | ||
| 186 | static struct thread_info *alloc_thread_info_node(struct task_struct *tsk, | 186 | static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, |
| 187 | int node) | 187 | int node) |
| 188 | { | 188 | { |
| 189 | return kmem_cache_alloc_node(thread_info_cache, THREADINFO_GFP, node); | 189 | return kmem_cache_alloc_node(thread_stack_cache, THREADINFO_GFP, node); |
| 190 | } | 190 | } |
| 191 | 191 | ||
| 192 | static void free_thread_info(struct thread_info *ti) | 192 | static void free_thread_stack(unsigned long *stack) |
| 193 | { | 193 | { |
| 194 | kmem_cache_free(thread_info_cache, ti); | 194 | kmem_cache_free(thread_stack_cache, stack); |
| 195 | } | 195 | } |
| 196 | 196 | ||
| 197 | void thread_info_cache_init(void) | 197 | void thread_stack_cache_init(void) |
| 198 | { | 198 | { |
| 199 | thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE, | 199 | thread_stack_cache = kmem_cache_create("thread_stack", THREAD_SIZE, |
| 200 | THREAD_SIZE, 0, NULL); | 200 | THREAD_SIZE, 0, NULL); |
| 201 | BUG_ON(thread_info_cache == NULL); | 201 | BUG_ON(thread_stack_cache == NULL); |
| 202 | } | 202 | } |
| 203 | # endif | 203 | # endif |
| 204 | #endif | 204 | #endif |
| @@ -221,9 +221,9 @@ struct kmem_cache *vm_area_cachep; | |||
| 221 | /* SLAB cache for mm_struct structures (tsk->mm) */ | 221 | /* SLAB cache for mm_struct structures (tsk->mm) */ |
| 222 | static struct kmem_cache *mm_cachep; | 222 | static struct kmem_cache *mm_cachep; |
| 223 | 223 | ||
| 224 | static void account_kernel_stack(struct thread_info *ti, int account) | 224 | static void account_kernel_stack(unsigned long *stack, int account) |
| 225 | { | 225 | { |
| 226 | struct zone *zone = page_zone(virt_to_page(ti)); | 226 | struct zone *zone = page_zone(virt_to_page(stack)); |
| 227 | 227 | ||
| 228 | mod_zone_page_state(zone, NR_KERNEL_STACK, account); | 228 | mod_zone_page_state(zone, NR_KERNEL_STACK, account); |
| 229 | } | 229 | } |
| @@ -231,8 +231,8 @@ static void account_kernel_stack(struct thread_info *ti, int account) | |||
| 231 | void free_task(struct task_struct *tsk) | 231 | void free_task(struct task_struct *tsk) |
| 232 | { | 232 | { |
| 233 | account_kernel_stack(tsk->stack, -1); | 233 | account_kernel_stack(tsk->stack, -1); |
| 234 | arch_release_thread_info(tsk->stack); | 234 | arch_release_thread_stack(tsk->stack); |
| 235 | free_thread_info(tsk->stack); | 235 | free_thread_stack(tsk->stack); |
| 236 | rt_mutex_debug_task_free(tsk); | 236 | rt_mutex_debug_task_free(tsk); |
| 237 | ftrace_graph_exit_task(tsk); | 237 | ftrace_graph_exit_task(tsk); |
| 238 | put_seccomp_filter(tsk); | 238 | put_seccomp_filter(tsk); |
| @@ -343,7 +343,7 @@ void set_task_stack_end_magic(struct task_struct *tsk) | |||
| 343 | static struct task_struct *dup_task_struct(struct task_struct *orig, int node) | 343 | static struct task_struct *dup_task_struct(struct task_struct *orig, int node) |
| 344 | { | 344 | { |
| 345 | struct task_struct *tsk; | 345 | struct task_struct *tsk; |
| 346 | struct thread_info *ti; | 346 | unsigned long *stack; |
| 347 | int err; | 347 | int err; |
| 348 | 348 | ||
| 349 | if (node == NUMA_NO_NODE) | 349 | if (node == NUMA_NO_NODE) |
| @@ -352,15 +352,15 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node) | |||
| 352 | if (!tsk) | 352 | if (!tsk) |
| 353 | return NULL; | 353 | return NULL; |
| 354 | 354 | ||
| 355 | ti = alloc_thread_info_node(tsk, node); | 355 | stack = alloc_thread_stack_node(tsk, node); |
| 356 | if (!ti) | 356 | if (!stack) |
| 357 | goto free_tsk; | 357 | goto free_tsk; |
| 358 | 358 | ||
| 359 | err = arch_dup_task_struct(tsk, orig); | 359 | err = arch_dup_task_struct(tsk, orig); |
| 360 | if (err) | 360 | if (err) |
| 361 | goto free_ti; | 361 | goto free_stack; |
| 362 | 362 | ||
| 363 | tsk->stack = ti; | 363 | tsk->stack = stack; |
| 364 | #ifdef CONFIG_SECCOMP | 364 | #ifdef CONFIG_SECCOMP |
| 365 | /* | 365 | /* |
| 366 | * We must handle setting up seccomp filters once we're under | 366 | * We must handle setting up seccomp filters once we're under |
| @@ -392,14 +392,14 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node) | |||
| 392 | tsk->task_frag.page = NULL; | 392 | tsk->task_frag.page = NULL; |
| 393 | tsk->wake_q.next = NULL; | 393 | tsk->wake_q.next = NULL; |
| 394 | 394 | ||
| 395 | account_kernel_stack(ti, 1); | 395 | account_kernel_stack(stack, 1); |
| 396 | 396 | ||
| 397 | kcov_task_init(tsk); | 397 | kcov_task_init(tsk); |
| 398 | 398 | ||
| 399 | return tsk; | 399 | return tsk; |
| 400 | 400 | ||
| 401 | free_ti: | 401 | free_stack: |
| 402 | free_thread_info(ti); | 402 | free_thread_stack(stack); |
| 403 | free_tsk: | 403 | free_tsk: |
| 404 | free_task_struct(tsk); | 404 | free_task_struct(tsk); |
| 405 | return NULL; | 405 | return NULL; |
diff --git a/kernel/futex.c b/kernel/futex.c index ee25f5ba4aca..33664f70e2d2 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
| @@ -469,7 +469,7 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw) | |||
| 469 | { | 469 | { |
| 470 | unsigned long address = (unsigned long)uaddr; | 470 | unsigned long address = (unsigned long)uaddr; |
| 471 | struct mm_struct *mm = current->mm; | 471 | struct mm_struct *mm = current->mm; |
| 472 | struct page *page; | 472 | struct page *page, *tail; |
| 473 | struct address_space *mapping; | 473 | struct address_space *mapping; |
| 474 | int err, ro = 0; | 474 | int err, ro = 0; |
| 475 | 475 | ||
| @@ -530,7 +530,15 @@ again: | |||
| 530 | * considered here and page lock forces unnecessarily serialization | 530 | * considered here and page lock forces unnecessarily serialization |
| 531 | * From this point on, mapping will be re-verified if necessary and | 531 | * From this point on, mapping will be re-verified if necessary and |
| 532 | * page lock will be acquired only if it is unavoidable | 532 | * page lock will be acquired only if it is unavoidable |
| 533 | */ | 533 | * |
| 534 | * Mapping checks require the head page for any compound page so the | ||
| 535 | * head page and mapping is looked up now. For anonymous pages, it | ||
| 536 | * does not matter if the page splits in the future as the key is | ||
| 537 | * based on the address. For filesystem-backed pages, the tail is | ||
| 538 | * required as the index of the page determines the key. For | ||
| 539 | * base pages, there is no tail page and tail == page. | ||
| 540 | */ | ||
| 541 | tail = page; | ||
| 534 | page = compound_head(page); | 542 | page = compound_head(page); |
| 535 | mapping = READ_ONCE(page->mapping); | 543 | mapping = READ_ONCE(page->mapping); |
| 536 | 544 | ||
| @@ -654,7 +662,7 @@ again: | |||
| 654 | 662 | ||
| 655 | key->both.offset |= FUT_OFF_INODE; /* inode-based key */ | 663 | key->both.offset |= FUT_OFF_INODE; /* inode-based key */ |
| 656 | key->shared.inode = inode; | 664 | key->shared.inode = inode; |
| 657 | key->shared.pgoff = basepage_index(page); | 665 | key->shared.pgoff = basepage_index(tail); |
| 658 | rcu_read_unlock(); | 666 | rcu_read_unlock(); |
| 659 | } | 667 | } |
| 660 | 668 | ||
diff --git a/kernel/irq/ipi.c b/kernel/irq/ipi.c index c42742208e5e..89b49f6773f0 100644 --- a/kernel/irq/ipi.c +++ b/kernel/irq/ipi.c | |||
| @@ -125,7 +125,7 @@ int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest) | |||
| 125 | 125 | ||
| 126 | domain = data->domain; | 126 | domain = data->domain; |
| 127 | if (WARN_ON(domain == NULL)) | 127 | if (WARN_ON(domain == NULL)) |
| 128 | return; | 128 | return -EINVAL; |
| 129 | 129 | ||
| 130 | if (!irq_domain_is_ipi(domain)) { | 130 | if (!irq_domain_is_ipi(domain)) { |
| 131 | pr_warn("Trying to destroy a non IPI domain!\n"); | 131 | pr_warn("Trying to destroy a non IPI domain!\n"); |
diff --git a/kernel/jump_label.c b/kernel/jump_label.c index 05254eeb4b4e..4b353e0be121 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c | |||
| @@ -58,13 +58,36 @@ static void jump_label_update(struct static_key *key); | |||
| 58 | 58 | ||
| 59 | void static_key_slow_inc(struct static_key *key) | 59 | void static_key_slow_inc(struct static_key *key) |
| 60 | { | 60 | { |
| 61 | int v, v1; | ||
| 62 | |||
| 61 | STATIC_KEY_CHECK_USE(); | 63 | STATIC_KEY_CHECK_USE(); |
| 62 | if (atomic_inc_not_zero(&key->enabled)) | 64 | |
| 63 | return; | 65 | /* |
| 66 | * Careful if we get concurrent static_key_slow_inc() calls; | ||
| 67 | * later calls must wait for the first one to _finish_ the | ||
| 68 | * jump_label_update() process. At the same time, however, | ||
| 69 | * the jump_label_update() call below wants to see | ||
| 70 | * static_key_enabled(&key) for jumps to be updated properly. | ||
| 71 | * | ||
| 72 | * So give a special meaning to negative key->enabled: it sends | ||
| 73 | * static_key_slow_inc() down the slow path, and it is non-zero | ||
| 74 | * so it counts as "enabled" in jump_label_update(). Note that | ||
| 75 | * atomic_inc_unless_negative() checks >= 0, so roll our own. | ||
| 76 | */ | ||
| 77 | for (v = atomic_read(&key->enabled); v > 0; v = v1) { | ||
| 78 | v1 = atomic_cmpxchg(&key->enabled, v, v + 1); | ||
| 79 | if (likely(v1 == v)) | ||
| 80 | return; | ||
| 81 | } | ||
| 64 | 82 | ||
| 65 | jump_label_lock(); | 83 | jump_label_lock(); |
| 66 | if (atomic_inc_return(&key->enabled) == 1) | 84 | if (atomic_read(&key->enabled) == 0) { |
| 85 | atomic_set(&key->enabled, -1); | ||
| 67 | jump_label_update(key); | 86 | jump_label_update(key); |
| 87 | atomic_set(&key->enabled, 1); | ||
| 88 | } else { | ||
| 89 | atomic_inc(&key->enabled); | ||
| 90 | } | ||
| 68 | jump_label_unlock(); | 91 | jump_label_unlock(); |
| 69 | } | 92 | } |
| 70 | EXPORT_SYMBOL_GPL(static_key_slow_inc); | 93 | EXPORT_SYMBOL_GPL(static_key_slow_inc); |
| @@ -72,6 +95,13 @@ EXPORT_SYMBOL_GPL(static_key_slow_inc); | |||
| 72 | static void __static_key_slow_dec(struct static_key *key, | 95 | static void __static_key_slow_dec(struct static_key *key, |
| 73 | unsigned long rate_limit, struct delayed_work *work) | 96 | unsigned long rate_limit, struct delayed_work *work) |
| 74 | { | 97 | { |
| 98 | /* | ||
| 99 | * The negative count check is valid even when a negative | ||
| 100 | * key->enabled is in use by static_key_slow_inc(); a | ||
| 101 | * __static_key_slow_dec() before the first static_key_slow_inc() | ||
| 102 | * returns is unbalanced, because all other static_key_slow_inc() | ||
| 103 | * instances block while the update is in progress. | ||
| 104 | */ | ||
| 75 | if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) { | 105 | if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) { |
| 76 | WARN(atomic_read(&key->enabled) < 0, | 106 | WARN(atomic_read(&key->enabled) < 0, |
| 77 | "jump label: negative count!\n"); | 107 | "jump label: negative count!\n"); |
diff --git a/kernel/kcov.c b/kernel/kcov.c index a02f2dddd1d7..8d44b3fea9d0 100644 --- a/kernel/kcov.c +++ b/kernel/kcov.c | |||
| @@ -264,7 +264,12 @@ static const struct file_operations kcov_fops = { | |||
| 264 | 264 | ||
| 265 | static int __init kcov_init(void) | 265 | static int __init kcov_init(void) |
| 266 | { | 266 | { |
| 267 | if (!debugfs_create_file("kcov", 0600, NULL, NULL, &kcov_fops)) { | 267 | /* |
| 268 | * The kcov debugfs file won't ever get removed and thus, | ||
| 269 | * there is no need to protect it against removal races. The | ||
| 270 | * use of debugfs_create_file_unsafe() is actually safe here. | ||
| 271 | */ | ||
| 272 | if (!debugfs_create_file_unsafe("kcov", 0600, NULL, NULL, &kcov_fops)) { | ||
| 268 | pr_err("failed to create kcov in debugfs\n"); | 273 | pr_err("failed to create kcov in debugfs\n"); |
| 269 | return -ENOMEM; | 274 | return -ENOMEM; |
| 270 | } | 275 | } |
diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c index 3ef3736002d8..9c951fade415 100644 --- a/kernel/locking/mutex-debug.c +++ b/kernel/locking/mutex-debug.c | |||
| @@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter) | |||
| 49 | } | 49 | } |
| 50 | 50 | ||
| 51 | void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, | 51 | void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, |
| 52 | struct thread_info *ti) | 52 | struct task_struct *task) |
| 53 | { | 53 | { |
| 54 | SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock)); | 54 | SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock)); |
| 55 | 55 | ||
| 56 | /* Mark the current thread as blocked on the lock: */ | 56 | /* Mark the current thread as blocked on the lock: */ |
| 57 | ti->task->blocked_on = waiter; | 57 | task->blocked_on = waiter; |
| 58 | } | 58 | } |
| 59 | 59 | ||
| 60 | void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter, | 60 | void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter, |
| 61 | struct thread_info *ti) | 61 | struct task_struct *task) |
| 62 | { | 62 | { |
| 63 | DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list)); | 63 | DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list)); |
| 64 | DEBUG_LOCKS_WARN_ON(waiter->task != ti->task); | 64 | DEBUG_LOCKS_WARN_ON(waiter->task != task); |
| 65 | DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter); | 65 | DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter); |
| 66 | ti->task->blocked_on = NULL; | 66 | task->blocked_on = NULL; |
| 67 | 67 | ||
| 68 | list_del_init(&waiter->list); | 68 | list_del_init(&waiter->list); |
| 69 | waiter->task = NULL; | 69 | waiter->task = NULL; |
diff --git a/kernel/locking/mutex-debug.h b/kernel/locking/mutex-debug.h index 0799fd3e4cfa..d06ae3bb46c5 100644 --- a/kernel/locking/mutex-debug.h +++ b/kernel/locking/mutex-debug.h | |||
| @@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock, | |||
| 20 | extern void debug_mutex_free_waiter(struct mutex_waiter *waiter); | 20 | extern void debug_mutex_free_waiter(struct mutex_waiter *waiter); |
| 21 | extern void debug_mutex_add_waiter(struct mutex *lock, | 21 | extern void debug_mutex_add_waiter(struct mutex *lock, |
| 22 | struct mutex_waiter *waiter, | 22 | struct mutex_waiter *waiter, |
| 23 | struct thread_info *ti); | 23 | struct task_struct *task); |
| 24 | extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter, | 24 | extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter, |
| 25 | struct thread_info *ti); | 25 | struct task_struct *task); |
| 26 | extern void debug_mutex_unlock(struct mutex *lock); | 26 | extern void debug_mutex_unlock(struct mutex *lock); |
| 27 | extern void debug_mutex_init(struct mutex *lock, const char *name, | 27 | extern void debug_mutex_init(struct mutex *lock, const char *name, |
| 28 | struct lock_class_key *key); | 28 | struct lock_class_key *key); |
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c index e364b424b019..a70b90db3909 100644 --- a/kernel/locking/mutex.c +++ b/kernel/locking/mutex.c | |||
| @@ -486,9 +486,6 @@ __ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx) | |||
| 486 | if (!hold_ctx) | 486 | if (!hold_ctx) |
| 487 | return 0; | 487 | return 0; |
| 488 | 488 | ||
| 489 | if (unlikely(ctx == hold_ctx)) | ||
| 490 | return -EALREADY; | ||
| 491 | |||
| 492 | if (ctx->stamp - hold_ctx->stamp <= LONG_MAX && | 489 | if (ctx->stamp - hold_ctx->stamp <= LONG_MAX && |
| 493 | (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) { | 490 | (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) { |
| 494 | #ifdef CONFIG_DEBUG_MUTEXES | 491 | #ifdef CONFIG_DEBUG_MUTEXES |
| @@ -514,6 +511,12 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, | |||
| 514 | unsigned long flags; | 511 | unsigned long flags; |
| 515 | int ret; | 512 | int ret; |
| 516 | 513 | ||
| 514 | if (use_ww_ctx) { | ||
| 515 | struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); | ||
| 516 | if (unlikely(ww_ctx == READ_ONCE(ww->ctx))) | ||
| 517 | return -EALREADY; | ||
| 518 | } | ||
| 519 | |||
| 517 | preempt_disable(); | 520 | preempt_disable(); |
| 518 | mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip); | 521 | mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip); |
| 519 | 522 | ||
| @@ -534,7 +537,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, | |||
| 534 | goto skip_wait; | 537 | goto skip_wait; |
| 535 | 538 | ||
| 536 | debug_mutex_lock_common(lock, &waiter); | 539 | debug_mutex_lock_common(lock, &waiter); |
| 537 | debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); | 540 | debug_mutex_add_waiter(lock, &waiter, task); |
| 538 | 541 | ||
| 539 | /* add waiting tasks to the end of the waitqueue (FIFO): */ | 542 | /* add waiting tasks to the end of the waitqueue (FIFO): */ |
| 540 | list_add_tail(&waiter.list, &lock->wait_list); | 543 | list_add_tail(&waiter.list, &lock->wait_list); |
| @@ -581,7 +584,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, | |||
| 581 | } | 584 | } |
| 582 | __set_task_state(task, TASK_RUNNING); | 585 | __set_task_state(task, TASK_RUNNING); |
| 583 | 586 | ||
| 584 | mutex_remove_waiter(lock, &waiter, current_thread_info()); | 587 | mutex_remove_waiter(lock, &waiter, task); |
| 585 | /* set it to 0 if there are no waiters left: */ | 588 | /* set it to 0 if there are no waiters left: */ |
| 586 | if (likely(list_empty(&lock->wait_list))) | 589 | if (likely(list_empty(&lock->wait_list))) |
| 587 | atomic_set(&lock->count, 0); | 590 | atomic_set(&lock->count, 0); |
| @@ -602,7 +605,7 @@ skip_wait: | |||
| 602 | return 0; | 605 | return 0; |
| 603 | 606 | ||
| 604 | err: | 607 | err: |
| 605 | mutex_remove_waiter(lock, &waiter, task_thread_info(task)); | 608 | mutex_remove_waiter(lock, &waiter, task); |
| 606 | spin_unlock_mutex(&lock->wait_lock, flags); | 609 | spin_unlock_mutex(&lock->wait_lock, flags); |
| 607 | debug_mutex_free_waiter(&waiter); | 610 | debug_mutex_free_waiter(&waiter); |
| 608 | mutex_release(&lock->dep_map, 1, ip); | 611 | mutex_release(&lock->dep_map, 1, ip); |
diff --git a/kernel/locking/mutex.h b/kernel/locking/mutex.h index 5cda397607f2..a68bae5e852a 100644 --- a/kernel/locking/mutex.h +++ b/kernel/locking/mutex.h | |||
| @@ -13,7 +13,7 @@ | |||
| 13 | do { spin_lock(lock); (void)(flags); } while (0) | 13 | do { spin_lock(lock); (void)(flags); } while (0) |
| 14 | #define spin_unlock_mutex(lock, flags) \ | 14 | #define spin_unlock_mutex(lock, flags) \ |
| 15 | do { spin_unlock(lock); (void)(flags); } while (0) | 15 | do { spin_unlock(lock); (void)(flags); } while (0) |
| 16 | #define mutex_remove_waiter(lock, waiter, ti) \ | 16 | #define mutex_remove_waiter(lock, waiter, task) \ |
| 17 | __list_del((waiter)->list.prev, (waiter)->list.next) | 17 | __list_del((waiter)->list.prev, (waiter)->list.next) |
| 18 | 18 | ||
| 19 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER | 19 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c index ce2f75e32ae1..5fc8c311b8fe 100644 --- a/kernel/locking/qspinlock.c +++ b/kernel/locking/qspinlock.c | |||
| @@ -267,6 +267,66 @@ static __always_inline u32 __pv_wait_head_or_lock(struct qspinlock *lock, | |||
| 267 | #define queued_spin_lock_slowpath native_queued_spin_lock_slowpath | 267 | #define queued_spin_lock_slowpath native_queued_spin_lock_slowpath |
| 268 | #endif | 268 | #endif |
| 269 | 269 | ||
| 270 | /* | ||
| 271 | * queued_spin_lock_slowpath() can (load-)ACQUIRE the lock before | ||
| 272 | * issuing an _unordered_ store to set _Q_LOCKED_VAL. | ||
| 273 | * | ||
| 274 | * This means that the store can be delayed, but no later than the | ||
| 275 | * store-release from the unlock. This means that simply observing | ||
| 276 | * _Q_LOCKED_VAL is not sufficient to determine if the lock is acquired. | ||
| 277 | * | ||
| 278 | * There are two paths that can issue the unordered store: | ||
| 279 | * | ||
| 280 | * (1) clear_pending_set_locked(): *,1,0 -> *,0,1 | ||
| 281 | * | ||
| 282 | * (2) set_locked(): t,0,0 -> t,0,1 ; t != 0 | ||
| 283 | * atomic_cmpxchg_relaxed(): t,0,0 -> 0,0,1 | ||
| 284 | * | ||
| 285 | * However, in both cases we have other !0 state we've set before to queue | ||
| 286 | * ourseves: | ||
| 287 | * | ||
| 288 | * For (1) we have the atomic_cmpxchg_acquire() that set _Q_PENDING_VAL, our | ||
| 289 | * load is constrained by that ACQUIRE to not pass before that, and thus must | ||
| 290 | * observe the store. | ||
| 291 | * | ||
| 292 | * For (2) we have a more intersting scenario. We enqueue ourselves using | ||
| 293 | * xchg_tail(), which ends up being a RELEASE. This in itself is not | ||
| 294 | * sufficient, however that is followed by an smp_cond_acquire() on the same | ||
| 295 | * word, giving a RELEASE->ACQUIRE ordering. This again constrains our load and | ||
| 296 | * guarantees we must observe that store. | ||
| 297 | * | ||
| 298 | * Therefore both cases have other !0 state that is observable before the | ||
| 299 | * unordered locked byte store comes through. This means we can use that to | ||
| 300 | * wait for the lock store, and then wait for an unlock. | ||
| 301 | */ | ||
| 302 | #ifndef queued_spin_unlock_wait | ||
| 303 | void queued_spin_unlock_wait(struct qspinlock *lock) | ||
| 304 | { | ||
| 305 | u32 val; | ||
| 306 | |||
| 307 | for (;;) { | ||
| 308 | val = atomic_read(&lock->val); | ||
| 309 | |||
| 310 | if (!val) /* not locked, we're done */ | ||
| 311 | goto done; | ||
| 312 | |||
| 313 | if (val & _Q_LOCKED_MASK) /* locked, go wait for unlock */ | ||
| 314 | break; | ||
| 315 | |||
| 316 | /* not locked, but pending, wait until we observe the lock */ | ||
| 317 | cpu_relax(); | ||
| 318 | } | ||
| 319 | |||
| 320 | /* any unlock is good */ | ||
| 321 | while (atomic_read(&lock->val) & _Q_LOCKED_MASK) | ||
| 322 | cpu_relax(); | ||
| 323 | |||
| 324 | done: | ||
| 325 | smp_rmb(); /* CTRL + RMB -> ACQUIRE */ | ||
| 326 | } | ||
| 327 | EXPORT_SYMBOL(queued_spin_unlock_wait); | ||
| 328 | #endif | ||
| 329 | |||
| 270 | #endif /* _GEN_PV_LOCK_SLOWPATH */ | 330 | #endif /* _GEN_PV_LOCK_SLOWPATH */ |
| 271 | 331 | ||
| 272 | /** | 332 | /** |
diff --git a/kernel/power/process.c b/kernel/power/process.c index df058bed53ce..0c2ee9761d57 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c | |||
| @@ -146,6 +146,18 @@ int freeze_processes(void) | |||
| 146 | if (!error && !oom_killer_disable()) | 146 | if (!error && !oom_killer_disable()) |
| 147 | error = -EBUSY; | 147 | error = -EBUSY; |
| 148 | 148 | ||
| 149 | /* | ||
| 150 | * There is a hard to fix race between oom_reaper kernel thread | ||
| 151 | * and oom_killer_disable. oom_reaper calls exit_oom_victim | ||
| 152 | * before the victim reaches exit_mm so try to freeze all the tasks | ||
| 153 | * again and catch such a left over task. | ||
| 154 | */ | ||
| 155 | if (!error) { | ||
| 156 | pr_info("Double checking all user space processes after OOM killer disable... "); | ||
| 157 | error = try_to_freeze_tasks(true); | ||
| 158 | pr_cont("\n"); | ||
| 159 | } | ||
| 160 | |||
| 149 | if (error) | 161 | if (error) |
| 150 | thaw_processes(); | 162 | thaw_processes(); |
| 151 | return error; | 163 | return error; |
diff --git a/kernel/relay.c b/kernel/relay.c index 074994bcfa9b..04d7cf3ef8cf 100644 --- a/kernel/relay.c +++ b/kernel/relay.c | |||
| @@ -614,6 +614,7 @@ free_bufs: | |||
| 614 | 614 | ||
| 615 | kref_put(&chan->kref, relay_destroy_channel); | 615 | kref_put(&chan->kref, relay_destroy_channel); |
| 616 | mutex_unlock(&relay_channels_mutex); | 616 | mutex_unlock(&relay_channels_mutex); |
| 617 | kfree(chan); | ||
| 617 | return NULL; | 618 | return NULL; |
| 618 | } | 619 | } |
| 619 | EXPORT_SYMBOL_GPL(relay_open); | 620 | EXPORT_SYMBOL_GPL(relay_open); |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 7f2cae4620c7..51d7105f529a 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
| @@ -1536,7 +1536,9 @@ static int select_fallback_rq(int cpu, struct task_struct *p) | |||
| 1536 | for (;;) { | 1536 | for (;;) { |
| 1537 | /* Any allowed, online CPU? */ | 1537 | /* Any allowed, online CPU? */ |
| 1538 | for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) { | 1538 | for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) { |
| 1539 | if (!cpu_active(dest_cpu)) | 1539 | if (!(p->flags & PF_KTHREAD) && !cpu_active(dest_cpu)) |
| 1540 | continue; | ||
| 1541 | if (!cpu_online(dest_cpu)) | ||
| 1540 | continue; | 1542 | continue; |
| 1541 | goto out; | 1543 | goto out; |
| 1542 | } | 1544 | } |
| @@ -2253,9 +2255,11 @@ int sysctl_numa_balancing(struct ctl_table *table, int write, | |||
| 2253 | #endif | 2255 | #endif |
| 2254 | #endif | 2256 | #endif |
| 2255 | 2257 | ||
| 2258 | #ifdef CONFIG_SCHEDSTATS | ||
| 2259 | |||
| 2256 | DEFINE_STATIC_KEY_FALSE(sched_schedstats); | 2260 | DEFINE_STATIC_KEY_FALSE(sched_schedstats); |
| 2261 | static bool __initdata __sched_schedstats = false; | ||
| 2257 | 2262 | ||
| 2258 | #ifdef CONFIG_SCHEDSTATS | ||
| 2259 | static void set_schedstats(bool enabled) | 2263 | static void set_schedstats(bool enabled) |
| 2260 | { | 2264 | { |
| 2261 | if (enabled) | 2265 | if (enabled) |
| @@ -2278,11 +2282,16 @@ static int __init setup_schedstats(char *str) | |||
| 2278 | if (!str) | 2282 | if (!str) |
| 2279 | goto out; | 2283 | goto out; |
| 2280 | 2284 | ||
| 2285 | /* | ||
| 2286 | * This code is called before jump labels have been set up, so we can't | ||
| 2287 | * change the static branch directly just yet. Instead set a temporary | ||
| 2288 | * variable so init_schedstats() can do it later. | ||
| 2289 | */ | ||
| 2281 | if (!strcmp(str, "enable")) { | 2290 | if (!strcmp(str, "enable")) { |
| 2282 | set_schedstats(true); | 2291 | __sched_schedstats = true; |
| 2283 | ret = 1; | 2292 | ret = 1; |
| 2284 | } else if (!strcmp(str, "disable")) { | 2293 | } else if (!strcmp(str, "disable")) { |
| 2285 | set_schedstats(false); | 2294 | __sched_schedstats = false; |
| 2286 | ret = 1; | 2295 | ret = 1; |
| 2287 | } | 2296 | } |
| 2288 | out: | 2297 | out: |
| @@ -2293,6 +2302,11 @@ out: | |||
| 2293 | } | 2302 | } |
| 2294 | __setup("schedstats=", setup_schedstats); | 2303 | __setup("schedstats=", setup_schedstats); |
| 2295 | 2304 | ||
| 2305 | static void __init init_schedstats(void) | ||
| 2306 | { | ||
| 2307 | set_schedstats(__sched_schedstats); | ||
| 2308 | } | ||
| 2309 | |||
| 2296 | #ifdef CONFIG_PROC_SYSCTL | 2310 | #ifdef CONFIG_PROC_SYSCTL |
| 2297 | int sysctl_schedstats(struct ctl_table *table, int write, | 2311 | int sysctl_schedstats(struct ctl_table *table, int write, |
| 2298 | void __user *buffer, size_t *lenp, loff_t *ppos) | 2312 | void __user *buffer, size_t *lenp, loff_t *ppos) |
| @@ -2313,8 +2327,10 @@ int sysctl_schedstats(struct ctl_table *table, int write, | |||
| 2313 | set_schedstats(state); | 2327 | set_schedstats(state); |
| 2314 | return err; | 2328 | return err; |
| 2315 | } | 2329 | } |
| 2316 | #endif | 2330 | #endif /* CONFIG_PROC_SYSCTL */ |
| 2317 | #endif | 2331 | #else /* !CONFIG_SCHEDSTATS */ |
| 2332 | static inline void init_schedstats(void) {} | ||
| 2333 | #endif /* CONFIG_SCHEDSTATS */ | ||
| 2318 | 2334 | ||
| 2319 | /* | 2335 | /* |
| 2320 | * fork()/clone()-time setup: | 2336 | * fork()/clone()-time setup: |
| @@ -2521,10 +2537,9 @@ void wake_up_new_task(struct task_struct *p) | |||
| 2521 | */ | 2537 | */ |
| 2522 | set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0)); | 2538 | set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0)); |
| 2523 | #endif | 2539 | #endif |
| 2524 | /* Post initialize new task's util average when its cfs_rq is set */ | 2540 | rq = __task_rq_lock(p, &rf); |
| 2525 | post_init_entity_util_avg(&p->se); | 2541 | post_init_entity_util_avg(&p->se); |
| 2526 | 2542 | ||
| 2527 | rq = __task_rq_lock(p, &rf); | ||
| 2528 | activate_task(rq, p, 0); | 2543 | activate_task(rq, p, 0); |
| 2529 | p->on_rq = TASK_ON_RQ_QUEUED; | 2544 | p->on_rq = TASK_ON_RQ_QUEUED; |
| 2530 | trace_sched_wakeup_new(p); | 2545 | trace_sched_wakeup_new(p); |
| @@ -3156,7 +3171,8 @@ static noinline void __schedule_bug(struct task_struct *prev) | |||
| 3156 | static inline void schedule_debug(struct task_struct *prev) | 3171 | static inline void schedule_debug(struct task_struct *prev) |
| 3157 | { | 3172 | { |
| 3158 | #ifdef CONFIG_SCHED_STACK_END_CHECK | 3173 | #ifdef CONFIG_SCHED_STACK_END_CHECK |
| 3159 | BUG_ON(task_stack_end_corrupted(prev)); | 3174 | if (task_stack_end_corrupted(prev)) |
| 3175 | panic("corrupted stack end detected inside scheduler\n"); | ||
| 3160 | #endif | 3176 | #endif |
| 3161 | 3177 | ||
| 3162 | if (unlikely(in_atomic_preempt_off())) { | 3178 | if (unlikely(in_atomic_preempt_off())) { |
| @@ -5133,14 +5149,16 @@ void show_state_filter(unsigned long state_filter) | |||
| 5133 | /* | 5149 | /* |
| 5134 | * reset the NMI-timeout, listing all files on a slow | 5150 | * reset the NMI-timeout, listing all files on a slow |
| 5135 | * console might take a lot of time: | 5151 | * console might take a lot of time: |
| 5152 | * Also, reset softlockup watchdogs on all CPUs, because | ||
| 5153 | * another CPU might be blocked waiting for us to process | ||
| 5154 | * an IPI. | ||
| 5136 | */ | 5155 | */ |
| 5137 | touch_nmi_watchdog(); | 5156 | touch_nmi_watchdog(); |
| 5157 | touch_all_softlockup_watchdogs(); | ||
| 5138 | if (!state_filter || (p->state & state_filter)) | 5158 | if (!state_filter || (p->state & state_filter)) |
| 5139 | sched_show_task(p); | 5159 | sched_show_task(p); |
| 5140 | } | 5160 | } |
| 5141 | 5161 | ||
| 5142 | touch_all_softlockup_watchdogs(); | ||
| 5143 | |||
| 5144 | #ifdef CONFIG_SCHED_DEBUG | 5162 | #ifdef CONFIG_SCHED_DEBUG |
| 5145 | if (!state_filter) | 5163 | if (!state_filter) |
| 5146 | sysrq_sched_debug_show(); | 5164 | sysrq_sched_debug_show(); |
| @@ -7487,6 +7505,8 @@ void __init sched_init(void) | |||
| 7487 | #endif | 7505 | #endif |
| 7488 | init_sched_fair_class(); | 7506 | init_sched_fair_class(); |
| 7489 | 7507 | ||
| 7508 | init_schedstats(); | ||
| 7509 | |||
| 7490 | scheduler_running = 1; | 7510 | scheduler_running = 1; |
| 7491 | } | 7511 | } |
| 7492 | 7512 | ||
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index cf905f655ba1..0368c393a336 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c | |||
| @@ -427,19 +427,12 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) | |||
| 427 | SPLIT_NS(p->se.vruntime), | 427 | SPLIT_NS(p->se.vruntime), |
| 428 | (long long)(p->nvcsw + p->nivcsw), | 428 | (long long)(p->nvcsw + p->nivcsw), |
| 429 | p->prio); | 429 | p->prio); |
| 430 | #ifdef CONFIG_SCHEDSTATS | 430 | |
| 431 | if (schedstat_enabled()) { | ||
| 432 | SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld", | ||
| 433 | SPLIT_NS(p->se.statistics.wait_sum), | ||
| 434 | SPLIT_NS(p->se.sum_exec_runtime), | ||
| 435 | SPLIT_NS(p->se.statistics.sum_sleep_runtime)); | ||
| 436 | } | ||
| 437 | #else | ||
| 438 | SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld", | 431 | SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld", |
| 439 | 0LL, 0L, | 432 | SPLIT_NS(schedstat_val(p, se.statistics.wait_sum)), |
| 440 | SPLIT_NS(p->se.sum_exec_runtime), | 433 | SPLIT_NS(p->se.sum_exec_runtime), |
| 441 | 0LL, 0L); | 434 | SPLIT_NS(schedstat_val(p, se.statistics.sum_sleep_runtime))); |
| 442 | #endif | 435 | |
| 443 | #ifdef CONFIG_NUMA_BALANCING | 436 | #ifdef CONFIG_NUMA_BALANCING |
| 444 | SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p)); | 437 | SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p)); |
| 445 | #endif | 438 | #endif |
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 218f8e83db73..bdcbeea90c95 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
| @@ -2904,6 +2904,23 @@ static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq) | |||
| 2904 | } | 2904 | } |
| 2905 | } | 2905 | } |
| 2906 | 2906 | ||
| 2907 | /* | ||
| 2908 | * Unsigned subtract and clamp on underflow. | ||
| 2909 | * | ||
| 2910 | * Explicitly do a load-store to ensure the intermediate value never hits | ||
| 2911 | * memory. This allows lockless observations without ever seeing the negative | ||
| 2912 | * values. | ||
| 2913 | */ | ||
| 2914 | #define sub_positive(_ptr, _val) do { \ | ||
| 2915 | typeof(_ptr) ptr = (_ptr); \ | ||
| 2916 | typeof(*ptr) val = (_val); \ | ||
| 2917 | typeof(*ptr) res, var = READ_ONCE(*ptr); \ | ||
| 2918 | res = var - val; \ | ||
| 2919 | if (res > var) \ | ||
| 2920 | res = 0; \ | ||
| 2921 | WRITE_ONCE(*ptr, res); \ | ||
| 2922 | } while (0) | ||
| 2923 | |||
| 2907 | /* Group cfs_rq's load_avg is used for task_h_load and update_cfs_share */ | 2924 | /* Group cfs_rq's load_avg is used for task_h_load and update_cfs_share */ |
| 2908 | static inline int | 2925 | static inline int |
| 2909 | update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq) | 2926 | update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq) |
| @@ -2913,15 +2930,15 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq) | |||
| 2913 | 2930 | ||
| 2914 | if (atomic_long_read(&cfs_rq->removed_load_avg)) { | 2931 | if (atomic_long_read(&cfs_rq->removed_load_avg)) { |
| 2915 | s64 r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0); | 2932 | s64 r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0); |
| 2916 | sa->load_avg = max_t(long, sa->load_avg - r, 0); | 2933 | sub_positive(&sa->load_avg, r); |
| 2917 | sa->load_sum = max_t(s64, sa->load_sum - r * LOAD_AVG_MAX, 0); | 2934 | sub_positive(&sa->load_sum, r * LOAD_AVG_MAX); |
| 2918 | removed_load = 1; | 2935 | removed_load = 1; |
| 2919 | } | 2936 | } |
| 2920 | 2937 | ||
| 2921 | if (atomic_long_read(&cfs_rq->removed_util_avg)) { | 2938 | if (atomic_long_read(&cfs_rq->removed_util_avg)) { |
| 2922 | long r = atomic_long_xchg(&cfs_rq->removed_util_avg, 0); | 2939 | long r = atomic_long_xchg(&cfs_rq->removed_util_avg, 0); |
| 2923 | sa->util_avg = max_t(long, sa->util_avg - r, 0); | 2940 | sub_positive(&sa->util_avg, r); |
| 2924 | sa->util_sum = max_t(s32, sa->util_sum - r * LOAD_AVG_MAX, 0); | 2941 | sub_positive(&sa->util_sum, r * LOAD_AVG_MAX); |
| 2925 | removed_util = 1; | 2942 | removed_util = 1; |
| 2926 | } | 2943 | } |
| 2927 | 2944 | ||
| @@ -2994,10 +3011,10 @@ static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s | |||
| 2994 | &se->avg, se->on_rq * scale_load_down(se->load.weight), | 3011 | &se->avg, se->on_rq * scale_load_down(se->load.weight), |
| 2995 | cfs_rq->curr == se, NULL); | 3012 | cfs_rq->curr == se, NULL); |
| 2996 | 3013 | ||
| 2997 | cfs_rq->avg.load_avg = max_t(long, cfs_rq->avg.load_avg - se->avg.load_avg, 0); | 3014 | sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg); |
| 2998 | cfs_rq->avg.load_sum = max_t(s64, cfs_rq->avg.load_sum - se->avg.load_sum, 0); | 3015 | sub_positive(&cfs_rq->avg.load_sum, se->avg.load_sum); |
| 2999 | cfs_rq->avg.util_avg = max_t(long, cfs_rq->avg.util_avg - se->avg.util_avg, 0); | 3016 | sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg); |
| 3000 | cfs_rq->avg.util_sum = max_t(s32, cfs_rq->avg.util_sum - se->avg.util_sum, 0); | 3017 | sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum); |
| 3001 | 3018 | ||
| 3002 | cfs_rq_util_change(cfs_rq); | 3019 | cfs_rq_util_change(cfs_rq); |
| 3003 | } | 3020 | } |
| @@ -3246,7 +3263,7 @@ static inline void check_schedstat_required(void) | |||
| 3246 | trace_sched_stat_iowait_enabled() || | 3263 | trace_sched_stat_iowait_enabled() || |
| 3247 | trace_sched_stat_blocked_enabled() || | 3264 | trace_sched_stat_blocked_enabled() || |
| 3248 | trace_sched_stat_runtime_enabled()) { | 3265 | trace_sched_stat_runtime_enabled()) { |
| 3249 | pr_warn_once("Scheduler tracepoints stat_sleep, stat_iowait, " | 3266 | printk_deferred_once("Scheduler tracepoints stat_sleep, stat_iowait, " |
| 3250 | "stat_blocked and stat_runtime require the " | 3267 | "stat_blocked and stat_runtime require the " |
| 3251 | "kernel parameter schedstats=enabled or " | 3268 | "kernel parameter schedstats=enabled or " |
| 3252 | "kernel.sched_schedstats=1\n"); | 3269 | "kernel.sched_schedstats=1\n"); |
| @@ -4185,6 +4202,26 @@ static void check_enqueue_throttle(struct cfs_rq *cfs_rq) | |||
| 4185 | if (!cfs_bandwidth_used()) | 4202 | if (!cfs_bandwidth_used()) |
| 4186 | return; | 4203 | return; |
| 4187 | 4204 | ||
| 4205 | /* Synchronize hierarchical throttle counter: */ | ||
| 4206 | if (unlikely(!cfs_rq->throttle_uptodate)) { | ||
| 4207 | struct rq *rq = rq_of(cfs_rq); | ||
| 4208 | struct cfs_rq *pcfs_rq; | ||
| 4209 | struct task_group *tg; | ||
| 4210 | |||
| 4211 | cfs_rq->throttle_uptodate = 1; | ||
| 4212 | |||
| 4213 | /* Get closest up-to-date node, because leaves go first: */ | ||
| 4214 | for (tg = cfs_rq->tg->parent; tg; tg = tg->parent) { | ||
| 4215 | pcfs_rq = tg->cfs_rq[cpu_of(rq)]; | ||
| 4216 | if (pcfs_rq->throttle_uptodate) | ||
| 4217 | break; | ||
| 4218 | } | ||
| 4219 | if (tg) { | ||
| 4220 | cfs_rq->throttle_count = pcfs_rq->throttle_count; | ||
| 4221 | cfs_rq->throttled_clock_task = rq_clock_task(rq); | ||
| 4222 | } | ||
| 4223 | } | ||
| 4224 | |||
| 4188 | /* an active group must be handled by the update_curr()->put() path */ | 4225 | /* an active group must be handled by the update_curr()->put() path */ |
| 4189 | if (!cfs_rq->runtime_enabled || cfs_rq->curr) | 4226 | if (!cfs_rq->runtime_enabled || cfs_rq->curr) |
| 4190 | return; | 4227 | return; |
| @@ -4500,15 +4537,14 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) | |||
| 4500 | 4537 | ||
| 4501 | /* Don't dequeue parent if it has other entities besides us */ | 4538 | /* Don't dequeue parent if it has other entities besides us */ |
| 4502 | if (cfs_rq->load.weight) { | 4539 | if (cfs_rq->load.weight) { |
| 4540 | /* Avoid re-evaluating load for this entity: */ | ||
| 4541 | se = parent_entity(se); | ||
| 4503 | /* | 4542 | /* |
| 4504 | * Bias pick_next to pick a task from this cfs_rq, as | 4543 | * Bias pick_next to pick a task from this cfs_rq, as |
| 4505 | * p is sleeping when it is within its sched_slice. | 4544 | * p is sleeping when it is within its sched_slice. |
| 4506 | */ | 4545 | */ |
| 4507 | if (task_sleep && parent_entity(se)) | 4546 | if (task_sleep && se && !throttled_hierarchy(cfs_rq)) |
| 4508 | set_next_buddy(parent_entity(se)); | 4547 | set_next_buddy(se); |
| 4509 | |||
| 4510 | /* avoid re-evaluating load for this entity */ | ||
| 4511 | se = parent_entity(se); | ||
| 4512 | break; | 4548 | break; |
| 4513 | } | 4549 | } |
| 4514 | flags |= DEQUEUE_SLEEP; | 4550 | flags |= DEQUEUE_SLEEP; |
| @@ -8496,8 +8532,9 @@ void free_fair_sched_group(struct task_group *tg) | |||
| 8496 | 8532 | ||
| 8497 | int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) | 8533 | int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) |
| 8498 | { | 8534 | { |
| 8499 | struct cfs_rq *cfs_rq; | ||
| 8500 | struct sched_entity *se; | 8535 | struct sched_entity *se; |
| 8536 | struct cfs_rq *cfs_rq; | ||
| 8537 | struct rq *rq; | ||
| 8501 | int i; | 8538 | int i; |
| 8502 | 8539 | ||
| 8503 | tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL); | 8540 | tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL); |
| @@ -8512,6 +8549,8 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) | |||
| 8512 | init_cfs_bandwidth(tg_cfs_bandwidth(tg)); | 8549 | init_cfs_bandwidth(tg_cfs_bandwidth(tg)); |
| 8513 | 8550 | ||
| 8514 | for_each_possible_cpu(i) { | 8551 | for_each_possible_cpu(i) { |
| 8552 | rq = cpu_rq(i); | ||
| 8553 | |||
| 8515 | cfs_rq = kzalloc_node(sizeof(struct cfs_rq), | 8554 | cfs_rq = kzalloc_node(sizeof(struct cfs_rq), |
| 8516 | GFP_KERNEL, cpu_to_node(i)); | 8555 | GFP_KERNEL, cpu_to_node(i)); |
| 8517 | if (!cfs_rq) | 8556 | if (!cfs_rq) |
| @@ -8525,7 +8564,10 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) | |||
| 8525 | init_cfs_rq(cfs_rq); | 8564 | init_cfs_rq(cfs_rq); |
| 8526 | init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]); | 8565 | init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]); |
| 8527 | init_entity_runnable_average(se); | 8566 | init_entity_runnable_average(se); |
| 8567 | |||
| 8568 | raw_spin_lock_irq(&rq->lock); | ||
| 8528 | post_init_entity_util_avg(se); | 8569 | post_init_entity_util_avg(se); |
| 8570 | raw_spin_unlock_irq(&rq->lock); | ||
| 8529 | } | 8571 | } |
| 8530 | 8572 | ||
| 8531 | return 1; | 8573 | return 1; |
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index bd12c6c714ec..c5aeedf4e93a 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c | |||
| @@ -127,7 +127,7 @@ static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev, | |||
| 127 | */ | 127 | */ |
| 128 | static void cpuidle_idle_call(void) | 128 | static void cpuidle_idle_call(void) |
| 129 | { | 129 | { |
| 130 | struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); | 130 | struct cpuidle_device *dev = cpuidle_get_device(); |
| 131 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); | 131 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); |
| 132 | int next_state, entered_state; | 132 | int next_state, entered_state; |
| 133 | 133 | ||
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 72f1f3087b04..7cbeb92a1cb9 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h | |||
| @@ -437,7 +437,7 @@ struct cfs_rq { | |||
| 437 | 437 | ||
| 438 | u64 throttled_clock, throttled_clock_task; | 438 | u64 throttled_clock, throttled_clock_task; |
| 439 | u64 throttled_clock_task_time; | 439 | u64 throttled_clock_task_time; |
| 440 | int throttled, throttle_count; | 440 | int throttled, throttle_count, throttle_uptodate; |
| 441 | struct list_head throttled_list; | 441 | struct list_head throttled_list; |
| 442 | #endif /* CONFIG_CFS_BANDWIDTH */ | 442 | #endif /* CONFIG_CFS_BANDWIDTH */ |
| 443 | #endif /* CONFIG_FAIR_GROUP_SCHED */ | 443 | #endif /* CONFIG_FAIR_GROUP_SCHED */ |
diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h index 70b3b6a20fb0..78955cbea31c 100644 --- a/kernel/sched/stats.h +++ b/kernel/sched/stats.h | |||
| @@ -33,6 +33,8 @@ rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) | |||
| 33 | # define schedstat_inc(rq, field) do { if (schedstat_enabled()) { (rq)->field++; } } while (0) | 33 | # define schedstat_inc(rq, field) do { if (schedstat_enabled()) { (rq)->field++; } } while (0) |
| 34 | # define schedstat_add(rq, field, amt) do { if (schedstat_enabled()) { (rq)->field += (amt); } } while (0) | 34 | # define schedstat_add(rq, field, amt) do { if (schedstat_enabled()) { (rq)->field += (amt); } } while (0) |
| 35 | # define schedstat_set(var, val) do { if (schedstat_enabled()) { var = (val); } } while (0) | 35 | # define schedstat_set(var, val) do { if (schedstat_enabled()) { var = (val); } } while (0) |
| 36 | # define schedstat_val(rq, field) ((schedstat_enabled()) ? (rq)->field : 0) | ||
| 37 | |||
| 36 | #else /* !CONFIG_SCHEDSTATS */ | 38 | #else /* !CONFIG_SCHEDSTATS */ |
| 37 | static inline void | 39 | static inline void |
| 38 | rq_sched_info_arrive(struct rq *rq, unsigned long long delta) | 40 | rq_sched_info_arrive(struct rq *rq, unsigned long long delta) |
| @@ -47,6 +49,7 @@ rq_sched_info_depart(struct rq *rq, unsigned long long delta) | |||
| 47 | # define schedstat_inc(rq, field) do { } while (0) | 49 | # define schedstat_inc(rq, field) do { } while (0) |
| 48 | # define schedstat_add(rq, field, amt) do { } while (0) | 50 | # define schedstat_add(rq, field, amt) do { } while (0) |
| 49 | # define schedstat_set(var, val) do { } while (0) | 51 | # define schedstat_set(var, val) do { } while (0) |
| 52 | # define schedstat_val(rq, field) 0 | ||
| 50 | #endif | 53 | #endif |
| 51 | 54 | ||
| 52 | #ifdef CONFIG_SCHED_INFO | 55 | #ifdef CONFIG_SCHED_INFO |
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index 8c7392c4fdbd..e99df0ff1d42 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c | |||
| @@ -425,6 +425,7 @@ void destroy_hrtimer_on_stack(struct hrtimer *timer) | |||
| 425 | { | 425 | { |
| 426 | debug_object_free(timer, &hrtimer_debug_descr); | 426 | debug_object_free(timer, &hrtimer_debug_descr); |
| 427 | } | 427 | } |
| 428 | EXPORT_SYMBOL_GPL(destroy_hrtimer_on_stack); | ||
| 428 | 429 | ||
| 429 | #else | 430 | #else |
| 430 | static inline void debug_hrtimer_init(struct hrtimer *timer) { } | 431 | static inline void debug_hrtimer_init(struct hrtimer *timer) { } |
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 780bcbe1d4de..720b7bb01d43 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c | |||
| @@ -198,7 +198,7 @@ static u64 bpf_perf_event_read(u64 r1, u64 index, u64 r3, u64 r4, u64 r5) | |||
| 198 | if (unlikely(index >= array->map.max_entries)) | 198 | if (unlikely(index >= array->map.max_entries)) |
| 199 | return -E2BIG; | 199 | return -E2BIG; |
| 200 | 200 | ||
| 201 | file = (struct file *)array->ptrs[index]; | 201 | file = READ_ONCE(array->ptrs[index]); |
| 202 | if (unlikely(!file)) | 202 | if (unlikely(!file)) |
| 203 | return -ENOENT; | 203 | return -ENOENT; |
| 204 | 204 | ||
| @@ -247,7 +247,7 @@ static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 flags, u64 r4, u64 size) | |||
| 247 | if (unlikely(index >= array->map.max_entries)) | 247 | if (unlikely(index >= array->map.max_entries)) |
| 248 | return -E2BIG; | 248 | return -E2BIG; |
| 249 | 249 | ||
| 250 | file = (struct file *)array->ptrs[index]; | 250 | file = READ_ONCE(array->ptrs[index]); |
| 251 | if (unlikely(!file)) | 251 | if (unlikely(!file)) |
| 252 | return -ENOENT; | 252 | return -ENOENT; |
| 253 | 253 | ||
diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c index f96f0383f6c6..ad1d6164e946 100644 --- a/kernel/trace/trace_printk.c +++ b/kernel/trace/trace_printk.c | |||
| @@ -36,6 +36,10 @@ struct trace_bprintk_fmt { | |||
| 36 | static inline struct trace_bprintk_fmt *lookup_format(const char *fmt) | 36 | static inline struct trace_bprintk_fmt *lookup_format(const char *fmt) |
| 37 | { | 37 | { |
| 38 | struct trace_bprintk_fmt *pos; | 38 | struct trace_bprintk_fmt *pos; |
| 39 | |||
| 40 | if (!fmt) | ||
| 41 | return ERR_PTR(-EINVAL); | ||
| 42 | |||
| 39 | list_for_each_entry(pos, &trace_bprintk_fmt_list, list) { | 43 | list_for_each_entry(pos, &trace_bprintk_fmt_list, list) { |
| 40 | if (!strcmp(pos->fmt, fmt)) | 44 | if (!strcmp(pos->fmt, fmt)) |
| 41 | return pos; | 45 | return pos; |
| @@ -57,7 +61,8 @@ void hold_module_trace_bprintk_format(const char **start, const char **end) | |||
| 57 | for (iter = start; iter < end; iter++) { | 61 | for (iter = start; iter < end; iter++) { |
| 58 | struct trace_bprintk_fmt *tb_fmt = lookup_format(*iter); | 62 | struct trace_bprintk_fmt *tb_fmt = lookup_format(*iter); |
| 59 | if (tb_fmt) { | 63 | if (tb_fmt) { |
| 60 | *iter = tb_fmt->fmt; | 64 | if (!IS_ERR(tb_fmt)) |
| 65 | *iter = tb_fmt->fmt; | ||
| 61 | continue; | 66 | continue; |
| 62 | } | 67 | } |
| 63 | 68 | ||
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 77d7d034bac3..b9cfdbfae9aa 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
| @@ -1841,6 +1841,9 @@ config TEST_BITMAP | |||
| 1841 | 1841 | ||
| 1842 | If unsure, say N. | 1842 | If unsure, say N. |
| 1843 | 1843 | ||
| 1844 | config TEST_UUID | ||
| 1845 | tristate "Test functions located in the uuid module at runtime" | ||
| 1846 | |||
| 1844 | config TEST_RHASHTABLE | 1847 | config TEST_RHASHTABLE |
| 1845 | tristate "Perform selftest on resizable hash table" | 1848 | tristate "Perform selftest on resizable hash table" |
| 1846 | default n | 1849 | default n |
diff --git a/lib/Makefile b/lib/Makefile index 499fb354d627..ff6a7a6c6395 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
| @@ -58,6 +58,7 @@ obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_keys.o | |||
| 58 | obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_key_base.o | 58 | obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_key_base.o |
| 59 | obj-$(CONFIG_TEST_PRINTF) += test_printf.o | 59 | obj-$(CONFIG_TEST_PRINTF) += test_printf.o |
| 60 | obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o | 60 | obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o |
| 61 | obj-$(CONFIG_TEST_UUID) += test_uuid.o | ||
| 61 | 62 | ||
| 62 | ifeq ($(CONFIG_DEBUG_KOBJECT),y) | 63 | ifeq ($(CONFIG_DEBUG_KOBJECT),y) |
| 63 | CFLAGS_kobject.o += -DDEBUG | 64 | CFLAGS_kobject.o += -DDEBUG |
diff --git a/lib/test_uuid.c b/lib/test_uuid.c new file mode 100644 index 000000000000..547d3127a3cf --- /dev/null +++ b/lib/test_uuid.c | |||
| @@ -0,0 +1,133 @@ | |||
| 1 | /* | ||
| 2 | * Test cases for lib/uuid.c module. | ||
| 3 | */ | ||
| 4 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
| 5 | |||
| 6 | #include <linux/init.h> | ||
| 7 | #include <linux/kernel.h> | ||
| 8 | #include <linux/module.h> | ||
| 9 | #include <linux/string.h> | ||
| 10 | #include <linux/uuid.h> | ||
| 11 | |||
| 12 | struct test_uuid_data { | ||
| 13 | const char *uuid; | ||
| 14 | uuid_le le; | ||
| 15 | uuid_be be; | ||
| 16 | }; | ||
| 17 | |||
| 18 | static const struct test_uuid_data test_uuid_test_data[] = { | ||
| 19 | { | ||
| 20 | .uuid = "c33f4995-3701-450e-9fbf-206a2e98e576", | ||
| 21 | .le = UUID_LE(0xc33f4995, 0x3701, 0x450e, 0x9f, 0xbf, 0x20, 0x6a, 0x2e, 0x98, 0xe5, 0x76), | ||
| 22 | .be = UUID_BE(0xc33f4995, 0x3701, 0x450e, 0x9f, 0xbf, 0x20, 0x6a, 0x2e, 0x98, 0xe5, 0x76), | ||
| 23 | }, | ||
| 24 | { | ||
| 25 | .uuid = "64b4371c-77c1-48f9-8221-29f054fc023b", | ||
| 26 | .le = UUID_LE(0x64b4371c, 0x77c1, 0x48f9, 0x82, 0x21, 0x29, 0xf0, 0x54, 0xfc, 0x02, 0x3b), | ||
| 27 | .be = UUID_BE(0x64b4371c, 0x77c1, 0x48f9, 0x82, 0x21, 0x29, 0xf0, 0x54, 0xfc, 0x02, 0x3b), | ||
| 28 | }, | ||
| 29 | { | ||
| 30 | .uuid = "0cb4ddff-a545-4401-9d06-688af53e7f84", | ||
| 31 | .le = UUID_LE(0x0cb4ddff, 0xa545, 0x4401, 0x9d, 0x06, 0x68, 0x8a, 0xf5, 0x3e, 0x7f, 0x84), | ||
| 32 | .be = UUID_BE(0x0cb4ddff, 0xa545, 0x4401, 0x9d, 0x06, 0x68, 0x8a, 0xf5, 0x3e, 0x7f, 0x84), | ||
| 33 | }, | ||
| 34 | }; | ||
| 35 | |||
| 36 | static const char * const test_uuid_wrong_data[] = { | ||
| 37 | "c33f4995-3701-450e-9fbf206a2e98e576 ", /* no hyphen(s) */ | ||
| 38 | "64b4371c-77c1-48f9-8221-29f054XX023b", /* invalid character(s) */ | ||
| 39 | "0cb4ddff-a545-4401-9d06-688af53e", /* not enough data */ | ||
| 40 | }; | ||
| 41 | |||
| 42 | static unsigned total_tests __initdata; | ||
| 43 | static unsigned failed_tests __initdata; | ||
| 44 | |||
| 45 | static void __init test_uuid_failed(const char *prefix, bool wrong, bool be, | ||
| 46 | const char *data, const char *actual) | ||
| 47 | { | ||
| 48 | pr_err("%s test #%u %s %s data: '%s'\n", | ||
| 49 | prefix, | ||
| 50 | total_tests, | ||
| 51 | wrong ? "passed on wrong" : "failed on", | ||
| 52 | be ? "BE" : "LE", | ||
| 53 | data); | ||
| 54 | if (actual && *actual) | ||
| 55 | pr_err("%s test #%u actual data: '%s'\n", | ||
| 56 | prefix, | ||
| 57 | total_tests, | ||
| 58 | actual); | ||
| 59 | failed_tests++; | ||
| 60 | } | ||
| 61 | |||
| 62 | static void __init test_uuid_test(const struct test_uuid_data *data) | ||
| 63 | { | ||
| 64 | uuid_le le; | ||
| 65 | uuid_be be; | ||
| 66 | char buf[48]; | ||
| 67 | |||
| 68 | /* LE */ | ||
| 69 | total_tests++; | ||
| 70 | if (uuid_le_to_bin(data->uuid, &le)) | ||
| 71 | test_uuid_failed("conversion", false, false, data->uuid, NULL); | ||
| 72 | |||
| 73 | total_tests++; | ||
| 74 | if (uuid_le_cmp(data->le, le)) { | ||
| 75 | sprintf(buf, "%pUl", &le); | ||
| 76 | test_uuid_failed("cmp", false, false, data->uuid, buf); | ||
| 77 | } | ||
| 78 | |||
| 79 | /* BE */ | ||
| 80 | total_tests++; | ||
| 81 | if (uuid_be_to_bin(data->uuid, &be)) | ||
| 82 | test_uuid_failed("conversion", false, true, data->uuid, NULL); | ||
| 83 | |||
| 84 | total_tests++; | ||
| 85 | if (uuid_be_cmp(data->be, be)) { | ||
| 86 | sprintf(buf, "%pUb", &be); | ||
| 87 | test_uuid_failed("cmp", false, true, data->uuid, buf); | ||
| 88 | } | ||
| 89 | } | ||
| 90 | |||
| 91 | static void __init test_uuid_wrong(const char *data) | ||
| 92 | { | ||
| 93 | uuid_le le; | ||
| 94 | uuid_be be; | ||
| 95 | |||
| 96 | /* LE */ | ||
| 97 | total_tests++; | ||
| 98 | if (!uuid_le_to_bin(data, &le)) | ||
| 99 | test_uuid_failed("negative", true, false, data, NULL); | ||
| 100 | |||
| 101 | /* BE */ | ||
| 102 | total_tests++; | ||
| 103 | if (!uuid_be_to_bin(data, &be)) | ||
| 104 | test_uuid_failed("negative", true, true, data, NULL); | ||
| 105 | } | ||
| 106 | |||
| 107 | static int __init test_uuid_init(void) | ||
| 108 | { | ||
| 109 | unsigned int i; | ||
| 110 | |||
| 111 | for (i = 0; i < ARRAY_SIZE(test_uuid_test_data); i++) | ||
| 112 | test_uuid_test(&test_uuid_test_data[i]); | ||
| 113 | |||
| 114 | for (i = 0; i < ARRAY_SIZE(test_uuid_wrong_data); i++) | ||
| 115 | test_uuid_wrong(test_uuid_wrong_data[i]); | ||
| 116 | |||
| 117 | if (failed_tests == 0) | ||
| 118 | pr_info("all %u tests passed\n", total_tests); | ||
| 119 | else | ||
| 120 | pr_err("failed %u out of %u tests\n", failed_tests, total_tests); | ||
| 121 | |||
| 122 | return failed_tests ? -EINVAL : 0; | ||
| 123 | } | ||
| 124 | module_init(test_uuid_init); | ||
| 125 | |||
| 126 | static void __exit test_uuid_exit(void) | ||
| 127 | { | ||
| 128 | /* do nothing */ | ||
| 129 | } | ||
| 130 | module_exit(test_uuid_exit); | ||
| 131 | |||
| 132 | MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>"); | ||
| 133 | MODULE_LICENSE("Dual BSD/GPL"); | ||
diff --git a/lib/uuid.c b/lib/uuid.c index e116ae5fa00f..37687af77ff8 100644 --- a/lib/uuid.c +++ b/lib/uuid.c | |||
| @@ -106,8 +106,8 @@ static int __uuid_to_bin(const char *uuid, __u8 b[16], const u8 ei[16]) | |||
| 106 | return -EINVAL; | 106 | return -EINVAL; |
| 107 | 107 | ||
| 108 | for (i = 0; i < 16; i++) { | 108 | for (i = 0; i < 16; i++) { |
| 109 | int hi = hex_to_bin(uuid[si[i]] + 0); | 109 | int hi = hex_to_bin(uuid[si[i] + 0]); |
| 110 | int lo = hex_to_bin(uuid[si[i]] + 1); | 110 | int lo = hex_to_bin(uuid[si[i] + 1]); |
| 111 | 111 | ||
| 112 | b[ei[i]] = (hi << 4) | lo; | 112 | b[ei[i]] = (hi << 4) | lo; |
| 113 | } | 113 | } |
diff --git a/mm/compaction.c b/mm/compaction.c index 1427366ad673..79bfe0e06907 100644 --- a/mm/compaction.c +++ b/mm/compaction.c | |||
| @@ -441,25 +441,23 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, | |||
| 441 | 441 | ||
| 442 | /* Found a free page, break it into order-0 pages */ | 442 | /* Found a free page, break it into order-0 pages */ |
| 443 | isolated = split_free_page(page); | 443 | isolated = split_free_page(page); |
| 444 | if (!isolated) | ||
| 445 | break; | ||
| 446 | |||
| 444 | total_isolated += isolated; | 447 | total_isolated += isolated; |
| 448 | cc->nr_freepages += isolated; | ||
| 445 | for (i = 0; i < isolated; i++) { | 449 | for (i = 0; i < isolated; i++) { |
| 446 | list_add(&page->lru, freelist); | 450 | list_add(&page->lru, freelist); |
| 447 | page++; | 451 | page++; |
| 448 | } | 452 | } |
| 449 | 453 | if (!strict && cc->nr_migratepages <= cc->nr_freepages) { | |
| 450 | /* If a page was split, advance to the end of it */ | 454 | blockpfn += isolated; |
| 451 | if (isolated) { | 455 | break; |
| 452 | cc->nr_freepages += isolated; | ||
| 453 | if (!strict && | ||
| 454 | cc->nr_migratepages <= cc->nr_freepages) { | ||
| 455 | blockpfn += isolated; | ||
| 456 | break; | ||
| 457 | } | ||
| 458 | |||
| 459 | blockpfn += isolated - 1; | ||
| 460 | cursor += isolated - 1; | ||
| 461 | continue; | ||
| 462 | } | 456 | } |
| 457 | /* Advance to the end of split page */ | ||
| 458 | blockpfn += isolated - 1; | ||
| 459 | cursor += isolated - 1; | ||
| 460 | continue; | ||
| 463 | 461 | ||
| 464 | isolate_fail: | 462 | isolate_fail: |
| 465 | if (strict) | 463 | if (strict) |
| @@ -469,6 +467,9 @@ isolate_fail: | |||
| 469 | 467 | ||
| 470 | } | 468 | } |
| 471 | 469 | ||
| 470 | if (locked) | ||
| 471 | spin_unlock_irqrestore(&cc->zone->lock, flags); | ||
| 472 | |||
| 472 | /* | 473 | /* |
| 473 | * There is a tiny chance that we have read bogus compound_order(), | 474 | * There is a tiny chance that we have read bogus compound_order(), |
| 474 | * so be careful to not go outside of the pageblock. | 475 | * so be careful to not go outside of the pageblock. |
| @@ -490,9 +491,6 @@ isolate_fail: | |||
| 490 | if (strict && blockpfn < end_pfn) | 491 | if (strict && blockpfn < end_pfn) |
| 491 | total_isolated = 0; | 492 | total_isolated = 0; |
| 492 | 493 | ||
| 493 | if (locked) | ||
| 494 | spin_unlock_irqrestore(&cc->zone->lock, flags); | ||
| 495 | |||
| 496 | /* Update the pageblock-skip if the whole pageblock was scanned */ | 494 | /* Update the pageblock-skip if the whole pageblock was scanned */ |
| 497 | if (blockpfn == end_pfn) | 495 | if (blockpfn == end_pfn) |
| 498 | update_pageblock_skip(cc, valid_page, total_isolated, false); | 496 | update_pageblock_skip(cc, valid_page, total_isolated, false); |
| @@ -1011,6 +1009,7 @@ static void isolate_freepages(struct compact_control *cc) | |||
| 1011 | block_end_pfn = block_start_pfn, | 1009 | block_end_pfn = block_start_pfn, |
| 1012 | block_start_pfn -= pageblock_nr_pages, | 1010 | block_start_pfn -= pageblock_nr_pages, |
| 1013 | isolate_start_pfn = block_start_pfn) { | 1011 | isolate_start_pfn = block_start_pfn) { |
| 1012 | unsigned long isolated; | ||
| 1014 | 1013 | ||
| 1015 | /* | 1014 | /* |
| 1016 | * This can iterate a massively long zone without finding any | 1015 | * This can iterate a massively long zone without finding any |
| @@ -1035,8 +1034,12 @@ static void isolate_freepages(struct compact_control *cc) | |||
| 1035 | continue; | 1034 | continue; |
| 1036 | 1035 | ||
| 1037 | /* Found a block suitable for isolating free pages from. */ | 1036 | /* Found a block suitable for isolating free pages from. */ |
| 1038 | isolate_freepages_block(cc, &isolate_start_pfn, | 1037 | isolated = isolate_freepages_block(cc, &isolate_start_pfn, |
| 1039 | block_end_pfn, freelist, false); | 1038 | block_end_pfn, freelist, false); |
| 1039 | /* If isolation failed early, do not continue needlessly */ | ||
| 1040 | if (!isolated && isolate_start_pfn < block_end_pfn && | ||
| 1041 | cc->nr_migratepages > cc->nr_freepages) | ||
| 1042 | break; | ||
| 1040 | 1043 | ||
| 1041 | /* | 1044 | /* |
| 1042 | * If we isolated enough freepages, or aborted due to async | 1045 | * If we isolated enough freepages, or aborted due to async |
diff --git a/mm/fadvise.c b/mm/fadvise.c index b8024fa7101d..6c707bfe02fd 100644 --- a/mm/fadvise.c +++ b/mm/fadvise.c | |||
| @@ -126,6 +126,17 @@ SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice) | |||
| 126 | */ | 126 | */ |
| 127 | start_index = (offset+(PAGE_SIZE-1)) >> PAGE_SHIFT; | 127 | start_index = (offset+(PAGE_SIZE-1)) >> PAGE_SHIFT; |
| 128 | end_index = (endbyte >> PAGE_SHIFT); | 128 | end_index = (endbyte >> PAGE_SHIFT); |
| 129 | if ((endbyte & ~PAGE_MASK) != ~PAGE_MASK) { | ||
| 130 | /* First page is tricky as 0 - 1 = -1, but pgoff_t | ||
| 131 | * is unsigned, so the end_index >= start_index | ||
| 132 | * check below would be true and we'll discard the whole | ||
| 133 | * file cache which is not what was asked. | ||
| 134 | */ | ||
| 135 | if (end_index == 0) | ||
| 136 | break; | ||
| 137 | |||
| 138 | end_index--; | ||
| 139 | } | ||
| 129 | 140 | ||
| 130 | if (end_index >= start_index) { | 141 | if (end_index >= start_index) { |
| 131 | unsigned long count = invalidate_mapping_pages(mapping, | 142 | unsigned long count = invalidate_mapping_pages(mapping, |
diff --git a/mm/filemap.c b/mm/filemap.c index 00ae878b2a38..20f3b1f33f0e 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
| @@ -2186,7 +2186,7 @@ repeat: | |||
| 2186 | if (file->f_ra.mmap_miss > 0) | 2186 | if (file->f_ra.mmap_miss > 0) |
| 2187 | file->f_ra.mmap_miss--; | 2187 | file->f_ra.mmap_miss--; |
| 2188 | addr = address + (page->index - vmf->pgoff) * PAGE_SIZE; | 2188 | addr = address + (page->index - vmf->pgoff) * PAGE_SIZE; |
| 2189 | do_set_pte(vma, addr, page, pte, false, false, true); | 2189 | do_set_pte(vma, addr, page, pte, false, false); |
| 2190 | unlock_page(page); | 2190 | unlock_page(page); |
| 2191 | goto next; | 2191 | goto next; |
| 2192 | unlock: | 2192 | unlock: |
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index d26162e81fea..c1f3c0be150a 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
| @@ -832,8 +832,27 @@ static bool vma_has_reserves(struct vm_area_struct *vma, long chg) | |||
| 832 | * Only the process that called mmap() has reserves for | 832 | * Only the process that called mmap() has reserves for |
| 833 | * private mappings. | 833 | * private mappings. |
| 834 | */ | 834 | */ |
| 835 | if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) | 835 | if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { |
| 836 | return true; | 836 | /* |
| 837 | * Like the shared case above, a hole punch or truncate | ||
| 838 | * could have been performed on the private mapping. | ||
| 839 | * Examine the value of chg to determine if reserves | ||
| 840 | * actually exist or were previously consumed. | ||
| 841 | * Very Subtle - The value of chg comes from a previous | ||
| 842 | * call to vma_needs_reserves(). The reserve map for | ||
| 843 | * private mappings has different (opposite) semantics | ||
| 844 | * than that of shared mappings. vma_needs_reserves() | ||
| 845 | * has already taken this difference in semantics into | ||
| 846 | * account. Therefore, the meaning of chg is the same | ||
| 847 | * as in the shared case above. Code could easily be | ||
| 848 | * combined, but keeping it separate draws attention to | ||
| 849 | * subtle differences. | ||
| 850 | */ | ||
| 851 | if (chg) | ||
| 852 | return false; | ||
| 853 | else | ||
| 854 | return true; | ||
| 855 | } | ||
| 837 | 856 | ||
| 838 | return false; | 857 | return false; |
| 839 | } | 858 | } |
| @@ -1011,6 +1030,7 @@ static void destroy_compound_gigantic_page(struct page *page, | |||
| 1011 | int nr_pages = 1 << order; | 1030 | int nr_pages = 1 << order; |
| 1012 | struct page *p = page + 1; | 1031 | struct page *p = page + 1; |
| 1013 | 1032 | ||
| 1033 | atomic_set(compound_mapcount_ptr(page), 0); | ||
| 1014 | for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) { | 1034 | for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) { |
| 1015 | clear_compound_head(p); | 1035 | clear_compound_head(p); |
| 1016 | set_page_refcounted(p); | 1036 | set_page_refcounted(p); |
| @@ -1816,6 +1836,25 @@ static long __vma_reservation_common(struct hstate *h, | |||
| 1816 | 1836 | ||
| 1817 | if (vma->vm_flags & VM_MAYSHARE) | 1837 | if (vma->vm_flags & VM_MAYSHARE) |
| 1818 | return ret; | 1838 | return ret; |
| 1839 | else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && ret >= 0) { | ||
| 1840 | /* | ||
| 1841 | * In most cases, reserves always exist for private mappings. | ||
| 1842 | * However, a file associated with mapping could have been | ||
| 1843 | * hole punched or truncated after reserves were consumed. | ||
| 1844 | * As subsequent fault on such a range will not use reserves. | ||
| 1845 | * Subtle - The reserve map for private mappings has the | ||
| 1846 | * opposite meaning than that of shared mappings. If NO | ||
| 1847 | * entry is in the reserve map, it means a reservation exists. | ||
| 1848 | * If an entry exists in the reserve map, it means the | ||
| 1849 | * reservation has already been consumed. As a result, the | ||
| 1850 | * return value of this routine is the opposite of the | ||
| 1851 | * value returned from reserve map manipulation routines above. | ||
| 1852 | */ | ||
| 1853 | if (ret) | ||
| 1854 | return 0; | ||
| 1855 | else | ||
| 1856 | return 1; | ||
| 1857 | } | ||
| 1819 | else | 1858 | else |
| 1820 | return ret < 0 ? ret : 0; | 1859 | return ret < 0 ? ret : 0; |
| 1821 | } | 1860 | } |
| @@ -4190,7 +4229,6 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) | |||
| 4190 | if (saddr) { | 4229 | if (saddr) { |
| 4191 | spte = huge_pte_offset(svma->vm_mm, saddr); | 4230 | spte = huge_pte_offset(svma->vm_mm, saddr); |
| 4192 | if (spte) { | 4231 | if (spte) { |
| 4193 | mm_inc_nr_pmds(mm); | ||
| 4194 | get_page(virt_to_page(spte)); | 4232 | get_page(virt_to_page(spte)); |
| 4195 | break; | 4233 | break; |
| 4196 | } | 4234 | } |
| @@ -4205,9 +4243,9 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) | |||
| 4205 | if (pud_none(*pud)) { | 4243 | if (pud_none(*pud)) { |
| 4206 | pud_populate(mm, pud, | 4244 | pud_populate(mm, pud, |
| 4207 | (pmd_t *)((unsigned long)spte & PAGE_MASK)); | 4245 | (pmd_t *)((unsigned long)spte & PAGE_MASK)); |
| 4246 | mm_inc_nr_pmds(mm); | ||
| 4208 | } else { | 4247 | } else { |
| 4209 | put_page(virt_to_page(spte)); | 4248 | put_page(virt_to_page(spte)); |
| 4210 | mm_inc_nr_pmds(mm); | ||
| 4211 | } | 4249 | } |
| 4212 | spin_unlock(ptl); | 4250 | spin_unlock(ptl); |
| 4213 | out: | 4251 | out: |
diff --git a/mm/internal.h b/mm/internal.h index a37e5b6f9d25..2524ec880e24 100644 --- a/mm/internal.h +++ b/mm/internal.h | |||
| @@ -24,7 +24,8 @@ | |||
| 24 | */ | 24 | */ |
| 25 | #define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\ | 25 | #define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\ |
| 26 | __GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\ | 26 | __GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\ |
| 27 | __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC) | 27 | __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\ |
| 28 | __GFP_ATOMIC) | ||
| 28 | 29 | ||
| 29 | /* The GFP flags allowed during early boot */ | 30 | /* The GFP flags allowed during early boot */ |
| 30 | #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS)) | 31 | #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS)) |
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c index 18b6a2b8d183..6845f9294696 100644 --- a/mm/kasan/kasan.c +++ b/mm/kasan/kasan.c | |||
| @@ -508,7 +508,7 @@ void kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags) | |||
| 508 | kasan_kmalloc(cache, object, cache->object_size, flags); | 508 | kasan_kmalloc(cache, object, cache->object_size, flags); |
| 509 | } | 509 | } |
| 510 | 510 | ||
| 511 | void kasan_poison_slab_free(struct kmem_cache *cache, void *object) | 511 | static void kasan_poison_slab_free(struct kmem_cache *cache, void *object) |
| 512 | { | 512 | { |
| 513 | unsigned long size = cache->object_size; | 513 | unsigned long size = cache->object_size; |
| 514 | unsigned long rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE); | 514 | unsigned long rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE); |
| @@ -626,7 +626,7 @@ void kasan_krealloc(const void *object, size_t size, gfp_t flags) | |||
| 626 | kasan_kmalloc(page->slab_cache, object, size, flags); | 626 | kasan_kmalloc(page->slab_cache, object, size, flags); |
| 627 | } | 627 | } |
| 628 | 628 | ||
| 629 | void kasan_kfree(void *ptr) | 629 | void kasan_poison_kfree(void *ptr) |
| 630 | { | 630 | { |
| 631 | struct page *page; | 631 | struct page *page; |
| 632 | 632 | ||
| @@ -636,7 +636,7 @@ void kasan_kfree(void *ptr) | |||
| 636 | kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page), | 636 | kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page), |
| 637 | KASAN_FREE_PAGE); | 637 | KASAN_FREE_PAGE); |
| 638 | else | 638 | else |
| 639 | kasan_slab_free(page->slab_cache, ptr); | 639 | kasan_poison_slab_free(page->slab_cache, ptr); |
| 640 | } | 640 | } |
| 641 | 641 | ||
| 642 | void kasan_kfree_large(const void *ptr) | 642 | void kasan_kfree_large(const void *ptr) |
| @@ -763,8 +763,8 @@ static int kasan_mem_notifier(struct notifier_block *nb, | |||
| 763 | 763 | ||
| 764 | static int __init kasan_memhotplug_init(void) | 764 | static int __init kasan_memhotplug_init(void) |
| 765 | { | 765 | { |
| 766 | pr_err("WARNING: KASAN doesn't support memory hot-add\n"); | 766 | pr_info("WARNING: KASAN doesn't support memory hot-add\n"); |
| 767 | pr_err("Memory hot-add will be disabled\n"); | 767 | pr_info("Memory hot-add will be disabled\n"); |
| 768 | 768 | ||
| 769 | hotplug_memory_notifier(kasan_mem_notifier, 0); | 769 | hotplug_memory_notifier(kasan_mem_notifier, 0); |
| 770 | 770 | ||
diff --git a/mm/kmemleak.c b/mm/kmemleak.c index e6429926e957..04320d3adbef 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c | |||
| @@ -307,8 +307,10 @@ static void hex_dump_object(struct seq_file *seq, | |||
| 307 | len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE); | 307 | len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE); |
| 308 | 308 | ||
| 309 | seq_printf(seq, " hex dump (first %zu bytes):\n", len); | 309 | seq_printf(seq, " hex dump (first %zu bytes):\n", len); |
| 310 | kasan_disable_current(); | ||
| 310 | seq_hex_dump(seq, " ", DUMP_PREFIX_NONE, HEX_ROW_SIZE, | 311 | seq_hex_dump(seq, " ", DUMP_PREFIX_NONE, HEX_ROW_SIZE, |
| 311 | HEX_GROUP_SIZE, ptr, len, HEX_ASCII); | 312 | HEX_GROUP_SIZE, ptr, len, HEX_ASCII); |
| 313 | kasan_enable_current(); | ||
| 312 | } | 314 | } |
| 313 | 315 | ||
| 314 | /* | 316 | /* |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 925b431f3f03..ac8664db3823 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
| @@ -1608,7 +1608,7 @@ static void memcg_oom_recover(struct mem_cgroup *memcg) | |||
| 1608 | 1608 | ||
| 1609 | static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order) | 1609 | static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order) |
| 1610 | { | 1610 | { |
| 1611 | if (!current->memcg_may_oom || current->memcg_in_oom) | 1611 | if (!current->memcg_may_oom) |
| 1612 | return; | 1612 | return; |
| 1613 | /* | 1613 | /* |
| 1614 | * We are in the middle of the charge context here, so we | 1614 | * We are in the middle of the charge context here, so we |
| @@ -2896,6 +2896,7 @@ static void memcg_offline_kmem(struct mem_cgroup *memcg) | |||
| 2896 | * ordering is imposed by list_lru_node->lock taken by | 2896 | * ordering is imposed by list_lru_node->lock taken by |
| 2897 | * memcg_drain_all_list_lrus(). | 2897 | * memcg_drain_all_list_lrus(). |
| 2898 | */ | 2898 | */ |
| 2899 | rcu_read_lock(); /* can be called from css_free w/o cgroup_mutex */ | ||
| 2899 | css_for_each_descendant_pre(css, &memcg->css) { | 2900 | css_for_each_descendant_pre(css, &memcg->css) { |
| 2900 | child = mem_cgroup_from_css(css); | 2901 | child = mem_cgroup_from_css(css); |
| 2901 | BUG_ON(child->kmemcg_id != kmemcg_id); | 2902 | BUG_ON(child->kmemcg_id != kmemcg_id); |
| @@ -2903,6 +2904,8 @@ static void memcg_offline_kmem(struct mem_cgroup *memcg) | |||
| 2903 | if (!memcg->use_hierarchy) | 2904 | if (!memcg->use_hierarchy) |
| 2904 | break; | 2905 | break; |
| 2905 | } | 2906 | } |
| 2907 | rcu_read_unlock(); | ||
| 2908 | |||
| 2906 | memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id); | 2909 | memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id); |
| 2907 | 2910 | ||
| 2908 | memcg_free_cache_id(kmemcg_id); | 2911 | memcg_free_cache_id(kmemcg_id); |
| @@ -4200,7 +4203,7 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) | |||
| 4200 | return &memcg->css; | 4203 | return &memcg->css; |
| 4201 | fail: | 4204 | fail: |
| 4202 | mem_cgroup_free(memcg); | 4205 | mem_cgroup_free(memcg); |
| 4203 | return NULL; | 4206 | return ERR_PTR(-ENOMEM); |
| 4204 | } | 4207 | } |
| 4205 | 4208 | ||
| 4206 | static int | 4209 | static int |
| @@ -5541,6 +5544,7 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage) | |||
| 5541 | struct mem_cgroup *memcg; | 5544 | struct mem_cgroup *memcg; |
| 5542 | unsigned int nr_pages; | 5545 | unsigned int nr_pages; |
| 5543 | bool compound; | 5546 | bool compound; |
| 5547 | unsigned long flags; | ||
| 5544 | 5548 | ||
| 5545 | VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage); | 5549 | VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage); |
| 5546 | VM_BUG_ON_PAGE(!PageLocked(newpage), newpage); | 5550 | VM_BUG_ON_PAGE(!PageLocked(newpage), newpage); |
| @@ -5571,10 +5575,10 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage) | |||
| 5571 | 5575 | ||
| 5572 | commit_charge(newpage, memcg, false); | 5576 | commit_charge(newpage, memcg, false); |
| 5573 | 5577 | ||
| 5574 | local_irq_disable(); | 5578 | local_irq_save(flags); |
| 5575 | mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages); | 5579 | mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages); |
| 5576 | memcg_check_events(memcg, newpage); | 5580 | memcg_check_events(memcg, newpage); |
| 5577 | local_irq_enable(); | 5581 | local_irq_restore(flags); |
| 5578 | } | 5582 | } |
| 5579 | 5583 | ||
| 5580 | DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key); | 5584 | DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key); |
diff --git a/mm/memory.c b/mm/memory.c index 15322b73636b..cd1f29e4897e 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
| @@ -2877,7 +2877,7 @@ static int __do_fault(struct vm_area_struct *vma, unsigned long address, | |||
| 2877 | * vm_ops->map_pages. | 2877 | * vm_ops->map_pages. |
| 2878 | */ | 2878 | */ |
| 2879 | void do_set_pte(struct vm_area_struct *vma, unsigned long address, | 2879 | void do_set_pte(struct vm_area_struct *vma, unsigned long address, |
| 2880 | struct page *page, pte_t *pte, bool write, bool anon, bool old) | 2880 | struct page *page, pte_t *pte, bool write, bool anon) |
| 2881 | { | 2881 | { |
| 2882 | pte_t entry; | 2882 | pte_t entry; |
| 2883 | 2883 | ||
| @@ -2885,8 +2885,6 @@ void do_set_pte(struct vm_area_struct *vma, unsigned long address, | |||
| 2885 | entry = mk_pte(page, vma->vm_page_prot); | 2885 | entry = mk_pte(page, vma->vm_page_prot); |
| 2886 | if (write) | 2886 | if (write) |
| 2887 | entry = maybe_mkwrite(pte_mkdirty(entry), vma); | 2887 | entry = maybe_mkwrite(pte_mkdirty(entry), vma); |
| 2888 | if (old) | ||
| 2889 | entry = pte_mkold(entry); | ||
| 2890 | if (anon) { | 2888 | if (anon) { |
| 2891 | inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); | 2889 | inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES); |
| 2892 | page_add_new_anon_rmap(page, vma, address, false); | 2890 | page_add_new_anon_rmap(page, vma, address, false); |
| @@ -2900,16 +2898,8 @@ void do_set_pte(struct vm_area_struct *vma, unsigned long address, | |||
| 2900 | update_mmu_cache(vma, address, pte); | 2898 | update_mmu_cache(vma, address, pte); |
| 2901 | } | 2899 | } |
| 2902 | 2900 | ||
| 2903 | /* | ||
| 2904 | * If architecture emulates "accessed" or "young" bit without HW support, | ||
| 2905 | * there is no much gain with fault_around. | ||
| 2906 | */ | ||
| 2907 | static unsigned long fault_around_bytes __read_mostly = | 2901 | static unsigned long fault_around_bytes __read_mostly = |
| 2908 | #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS | ||
| 2909 | PAGE_SIZE; | ||
| 2910 | #else | ||
| 2911 | rounddown_pow_of_two(65536); | 2902 | rounddown_pow_of_two(65536); |
| 2912 | #endif | ||
| 2913 | 2903 | ||
| 2914 | #ifdef CONFIG_DEBUG_FS | 2904 | #ifdef CONFIG_DEBUG_FS |
| 2915 | static int fault_around_bytes_get(void *data, u64 *val) | 2905 | static int fault_around_bytes_get(void *data, u64 *val) |
| @@ -3032,20 +3022,9 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 3032 | */ | 3022 | */ |
| 3033 | if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) { | 3023 | if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) { |
| 3034 | pte = pte_offset_map_lock(mm, pmd, address, &ptl); | 3024 | pte = pte_offset_map_lock(mm, pmd, address, &ptl); |
| 3035 | if (!pte_same(*pte, orig_pte)) | ||
| 3036 | goto unlock_out; | ||
| 3037 | do_fault_around(vma, address, pte, pgoff, flags); | 3025 | do_fault_around(vma, address, pte, pgoff, flags); |
| 3038 | /* Check if the fault is handled by faultaround */ | 3026 | if (!pte_same(*pte, orig_pte)) |
| 3039 | if (!pte_same(*pte, orig_pte)) { | ||
| 3040 | /* | ||
| 3041 | * Faultaround produce old pte, but the pte we've | ||
| 3042 | * handler fault for should be young. | ||
| 3043 | */ | ||
| 3044 | pte_t entry = pte_mkyoung(*pte); | ||
| 3045 | if (ptep_set_access_flags(vma, address, pte, entry, 0)) | ||
| 3046 | update_mmu_cache(vma, address, pte); | ||
| 3047 | goto unlock_out; | 3027 | goto unlock_out; |
| 3048 | } | ||
| 3049 | pte_unmap_unlock(pte, ptl); | 3028 | pte_unmap_unlock(pte, ptl); |
| 3050 | } | 3029 | } |
| 3051 | 3030 | ||
| @@ -3060,7 +3039,7 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 3060 | put_page(fault_page); | 3039 | put_page(fault_page); |
| 3061 | return ret; | 3040 | return ret; |
| 3062 | } | 3041 | } |
| 3063 | do_set_pte(vma, address, fault_page, pte, false, false, false); | 3042 | do_set_pte(vma, address, fault_page, pte, false, false); |
| 3064 | unlock_page(fault_page); | 3043 | unlock_page(fault_page); |
| 3065 | unlock_out: | 3044 | unlock_out: |
| 3066 | pte_unmap_unlock(pte, ptl); | 3045 | pte_unmap_unlock(pte, ptl); |
| @@ -3111,7 +3090,7 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 3111 | } | 3090 | } |
| 3112 | goto uncharge_out; | 3091 | goto uncharge_out; |
| 3113 | } | 3092 | } |
| 3114 | do_set_pte(vma, address, new_page, pte, true, true, false); | 3093 | do_set_pte(vma, address, new_page, pte, true, true); |
| 3115 | mem_cgroup_commit_charge(new_page, memcg, false, false); | 3094 | mem_cgroup_commit_charge(new_page, memcg, false, false); |
| 3116 | lru_cache_add_active_or_unevictable(new_page, vma); | 3095 | lru_cache_add_active_or_unevictable(new_page, vma); |
| 3117 | pte_unmap_unlock(pte, ptl); | 3096 | pte_unmap_unlock(pte, ptl); |
| @@ -3164,7 +3143,7 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 3164 | put_page(fault_page); | 3143 | put_page(fault_page); |
| 3165 | return ret; | 3144 | return ret; |
| 3166 | } | 3145 | } |
| 3167 | do_set_pte(vma, address, fault_page, pte, true, false, false); | 3146 | do_set_pte(vma, address, fault_page, pte, true, false); |
| 3168 | pte_unmap_unlock(pte, ptl); | 3147 | pte_unmap_unlock(pte, ptl); |
| 3169 | 3148 | ||
| 3170 | if (set_page_dirty(fault_page)) | 3149 | if (set_page_dirty(fault_page)) |
diff --git a/mm/mempool.c b/mm/mempool.c index 9e075f829d0d..8f65464da5de 100644 --- a/mm/mempool.c +++ b/mm/mempool.c | |||
| @@ -104,20 +104,16 @@ static inline void poison_element(mempool_t *pool, void *element) | |||
| 104 | 104 | ||
| 105 | static void kasan_poison_element(mempool_t *pool, void *element) | 105 | static void kasan_poison_element(mempool_t *pool, void *element) |
| 106 | { | 106 | { |
| 107 | if (pool->alloc == mempool_alloc_slab) | 107 | if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) |
| 108 | kasan_poison_slab_free(pool->pool_data, element); | 108 | kasan_poison_kfree(element); |
| 109 | if (pool->alloc == mempool_kmalloc) | ||
| 110 | kasan_kfree(element); | ||
| 111 | if (pool->alloc == mempool_alloc_pages) | 109 | if (pool->alloc == mempool_alloc_pages) |
| 112 | kasan_free_pages(element, (unsigned long)pool->pool_data); | 110 | kasan_free_pages(element, (unsigned long)pool->pool_data); |
| 113 | } | 111 | } |
| 114 | 112 | ||
| 115 | static void kasan_unpoison_element(mempool_t *pool, void *element, gfp_t flags) | 113 | static void kasan_unpoison_element(mempool_t *pool, void *element, gfp_t flags) |
| 116 | { | 114 | { |
| 117 | if (pool->alloc == mempool_alloc_slab) | 115 | if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) |
| 118 | kasan_slab_alloc(pool->pool_data, element, flags); | 116 | kasan_unpoison_slab(element); |
| 119 | if (pool->alloc == mempool_kmalloc) | ||
| 120 | kasan_krealloc(element, (size_t)pool->pool_data, flags); | ||
| 121 | if (pool->alloc == mempool_alloc_pages) | 117 | if (pool->alloc == mempool_alloc_pages) |
| 122 | kasan_alloc_pages(element, (unsigned long)pool->pool_data); | 118 | kasan_alloc_pages(element, (unsigned long)pool->pool_data); |
| 123 | } | 119 | } |
diff --git a/mm/migrate.c b/mm/migrate.c index 9baf41c877ff..bd3fdc202e8b 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
| @@ -431,6 +431,7 @@ int migrate_page_move_mapping(struct address_space *mapping, | |||
| 431 | 431 | ||
| 432 | return MIGRATEPAGE_SUCCESS; | 432 | return MIGRATEPAGE_SUCCESS; |
| 433 | } | 433 | } |
| 434 | EXPORT_SYMBOL(migrate_page_move_mapping); | ||
| 434 | 435 | ||
| 435 | /* | 436 | /* |
| 436 | * The expected number of remaining references is the same as that | 437 | * The expected number of remaining references is the same as that |
| @@ -586,6 +587,7 @@ void migrate_page_copy(struct page *newpage, struct page *page) | |||
| 586 | 587 | ||
| 587 | mem_cgroup_migrate(page, newpage); | 588 | mem_cgroup_migrate(page, newpage); |
| 588 | } | 589 | } |
| 590 | EXPORT_SYMBOL(migrate_page_copy); | ||
| 589 | 591 | ||
| 590 | /************************************************************ | 592 | /************************************************************ |
| 591 | * Migration functions | 593 | * Migration functions |
diff --git a/mm/oom_kill.c b/mm/oom_kill.c index dfb1ab61fb23..ddf74487f848 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c | |||
| @@ -474,13 +474,8 @@ static bool __oom_reap_task(struct task_struct *tsk) | |||
| 474 | p = find_lock_task_mm(tsk); | 474 | p = find_lock_task_mm(tsk); |
| 475 | if (!p) | 475 | if (!p) |
| 476 | goto unlock_oom; | 476 | goto unlock_oom; |
| 477 | |||
| 478 | mm = p->mm; | 477 | mm = p->mm; |
| 479 | if (!atomic_inc_not_zero(&mm->mm_users)) { | 478 | atomic_inc(&mm->mm_users); |
| 480 | task_unlock(p); | ||
| 481 | goto unlock_oom; | ||
| 482 | } | ||
| 483 | |||
| 484 | task_unlock(p); | 479 | task_unlock(p); |
| 485 | 480 | ||
| 486 | if (!down_read_trylock(&mm->mmap_sem)) { | 481 | if (!down_read_trylock(&mm->mmap_sem)) { |
| @@ -625,8 +620,6 @@ void try_oom_reaper(struct task_struct *tsk) | |||
| 625 | if (atomic_read(&mm->mm_users) > 1) { | 620 | if (atomic_read(&mm->mm_users) > 1) { |
| 626 | rcu_read_lock(); | 621 | rcu_read_lock(); |
| 627 | for_each_process(p) { | 622 | for_each_process(p) { |
| 628 | bool exiting; | ||
| 629 | |||
| 630 | if (!process_shares_mm(p, mm)) | 623 | if (!process_shares_mm(p, mm)) |
| 631 | continue; | 624 | continue; |
| 632 | if (fatal_signal_pending(p)) | 625 | if (fatal_signal_pending(p)) |
| @@ -636,10 +629,7 @@ void try_oom_reaper(struct task_struct *tsk) | |||
| 636 | * If the task is exiting make sure the whole thread group | 629 | * If the task is exiting make sure the whole thread group |
| 637 | * is exiting and cannot acces mm anymore. | 630 | * is exiting and cannot acces mm anymore. |
| 638 | */ | 631 | */ |
| 639 | spin_lock_irq(&p->sighand->siglock); | 632 | if (signal_group_exit(p->signal)) |
| 640 | exiting = signal_group_exit(p->signal); | ||
| 641 | spin_unlock_irq(&p->sighand->siglock); | ||
| 642 | if (exiting) | ||
| 643 | continue; | 633 | continue; |
| 644 | 634 | ||
| 645 | /* Give up */ | 635 | /* Give up */ |
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index b9956fdee8f5..e2481949494c 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
| @@ -373,8 +373,9 @@ static void domain_dirty_limits(struct dirty_throttle_control *dtc) | |||
| 373 | struct dirty_throttle_control *gdtc = mdtc_gdtc(dtc); | 373 | struct dirty_throttle_control *gdtc = mdtc_gdtc(dtc); |
| 374 | unsigned long bytes = vm_dirty_bytes; | 374 | unsigned long bytes = vm_dirty_bytes; |
| 375 | unsigned long bg_bytes = dirty_background_bytes; | 375 | unsigned long bg_bytes = dirty_background_bytes; |
| 376 | unsigned long ratio = vm_dirty_ratio; | 376 | /* convert ratios to per-PAGE_SIZE for higher precision */ |
| 377 | unsigned long bg_ratio = dirty_background_ratio; | 377 | unsigned long ratio = (vm_dirty_ratio * PAGE_SIZE) / 100; |
| 378 | unsigned long bg_ratio = (dirty_background_ratio * PAGE_SIZE) / 100; | ||
| 378 | unsigned long thresh; | 379 | unsigned long thresh; |
| 379 | unsigned long bg_thresh; | 380 | unsigned long bg_thresh; |
| 380 | struct task_struct *tsk; | 381 | struct task_struct *tsk; |
| @@ -386,26 +387,28 @@ static void domain_dirty_limits(struct dirty_throttle_control *dtc) | |||
| 386 | /* | 387 | /* |
| 387 | * The byte settings can't be applied directly to memcg | 388 | * The byte settings can't be applied directly to memcg |
| 388 | * domains. Convert them to ratios by scaling against | 389 | * domains. Convert them to ratios by scaling against |
| 389 | * globally available memory. | 390 | * globally available memory. As the ratios are in |
| 391 | * per-PAGE_SIZE, they can be obtained by dividing bytes by | ||
| 392 | * number of pages. | ||
| 390 | */ | 393 | */ |
| 391 | if (bytes) | 394 | if (bytes) |
| 392 | ratio = min(DIV_ROUND_UP(bytes, PAGE_SIZE) * 100 / | 395 | ratio = min(DIV_ROUND_UP(bytes, global_avail), |
| 393 | global_avail, 100UL); | 396 | PAGE_SIZE); |
| 394 | if (bg_bytes) | 397 | if (bg_bytes) |
| 395 | bg_ratio = min(DIV_ROUND_UP(bg_bytes, PAGE_SIZE) * 100 / | 398 | bg_ratio = min(DIV_ROUND_UP(bg_bytes, global_avail), |
| 396 | global_avail, 100UL); | 399 | PAGE_SIZE); |
| 397 | bytes = bg_bytes = 0; | 400 | bytes = bg_bytes = 0; |
| 398 | } | 401 | } |
| 399 | 402 | ||
| 400 | if (bytes) | 403 | if (bytes) |
| 401 | thresh = DIV_ROUND_UP(bytes, PAGE_SIZE); | 404 | thresh = DIV_ROUND_UP(bytes, PAGE_SIZE); |
| 402 | else | 405 | else |
| 403 | thresh = (ratio * available_memory) / 100; | 406 | thresh = (ratio * available_memory) / PAGE_SIZE; |
| 404 | 407 | ||
| 405 | if (bg_bytes) | 408 | if (bg_bytes) |
| 406 | bg_thresh = DIV_ROUND_UP(bg_bytes, PAGE_SIZE); | 409 | bg_thresh = DIV_ROUND_UP(bg_bytes, PAGE_SIZE); |
| 407 | else | 410 | else |
| 408 | bg_thresh = (bg_ratio * available_memory) / 100; | 411 | bg_thresh = (bg_ratio * available_memory) / PAGE_SIZE; |
| 409 | 412 | ||
| 410 | if (bg_thresh >= thresh) | 413 | if (bg_thresh >= thresh) |
| 411 | bg_thresh = thresh / 2; | 414 | bg_thresh = thresh / 2; |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index f8f3bfc435ee..6903b695ebae 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
| @@ -656,6 +656,9 @@ static inline void set_page_guard(struct zone *zone, struct page *page, | |||
| 656 | return; | 656 | return; |
| 657 | 657 | ||
| 658 | page_ext = lookup_page_ext(page); | 658 | page_ext = lookup_page_ext(page); |
| 659 | if (unlikely(!page_ext)) | ||
| 660 | return; | ||
| 661 | |||
| 659 | __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); | 662 | __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); |
| 660 | 663 | ||
| 661 | INIT_LIST_HEAD(&page->lru); | 664 | INIT_LIST_HEAD(&page->lru); |
| @@ -673,6 +676,9 @@ static inline void clear_page_guard(struct zone *zone, struct page *page, | |||
| 673 | return; | 676 | return; |
| 674 | 677 | ||
| 675 | page_ext = lookup_page_ext(page); | 678 | page_ext = lookup_page_ext(page); |
| 679 | if (unlikely(!page_ext)) | ||
| 680 | return; | ||
| 681 | |||
| 676 | __clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); | 682 | __clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); |
| 677 | 683 | ||
| 678 | set_page_private(page, 0); | 684 | set_page_private(page, 0); |
| @@ -2609,11 +2615,12 @@ struct page *buffered_rmqueue(struct zone *preferred_zone, | |||
| 2609 | page = list_last_entry(list, struct page, lru); | 2615 | page = list_last_entry(list, struct page, lru); |
| 2610 | else | 2616 | else |
| 2611 | page = list_first_entry(list, struct page, lru); | 2617 | page = list_first_entry(list, struct page, lru); |
| 2612 | } while (page && check_new_pcp(page)); | ||
| 2613 | 2618 | ||
| 2614 | __dec_zone_state(zone, NR_ALLOC_BATCH); | 2619 | __dec_zone_state(zone, NR_ALLOC_BATCH); |
| 2615 | list_del(&page->lru); | 2620 | list_del(&page->lru); |
| 2616 | pcp->count--; | 2621 | pcp->count--; |
| 2622 | |||
| 2623 | } while (check_new_pcp(page)); | ||
| 2617 | } else { | 2624 | } else { |
| 2618 | /* | 2625 | /* |
| 2619 | * We most definitely don't want callers attempting to | 2626 | * We most definitely don't want callers attempting to |
| @@ -3023,6 +3030,7 @@ reset_fair: | |||
| 3023 | apply_fair = false; | 3030 | apply_fair = false; |
| 3024 | fair_skipped = false; | 3031 | fair_skipped = false; |
| 3025 | reset_alloc_batches(ac->preferred_zoneref->zone); | 3032 | reset_alloc_batches(ac->preferred_zoneref->zone); |
| 3033 | z = ac->preferred_zoneref; | ||
| 3026 | goto zonelist_scan; | 3034 | goto zonelist_scan; |
| 3027 | } | 3035 | } |
| 3028 | 3036 | ||
| @@ -3596,6 +3604,17 @@ retry: | |||
| 3596 | */ | 3604 | */ |
| 3597 | alloc_flags = gfp_to_alloc_flags(gfp_mask); | 3605 | alloc_flags = gfp_to_alloc_flags(gfp_mask); |
| 3598 | 3606 | ||
| 3607 | /* | ||
| 3608 | * Reset the zonelist iterators if memory policies can be ignored. | ||
| 3609 | * These allocations are high priority and system rather than user | ||
| 3610 | * orientated. | ||
| 3611 | */ | ||
| 3612 | if ((alloc_flags & ALLOC_NO_WATERMARKS) || !(alloc_flags & ALLOC_CPUSET)) { | ||
| 3613 | ac->zonelist = node_zonelist(numa_node_id(), gfp_mask); | ||
| 3614 | ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, | ||
| 3615 | ac->high_zoneidx, ac->nodemask); | ||
| 3616 | } | ||
| 3617 | |||
| 3599 | /* This is the last chance, in general, before the goto nopage. */ | 3618 | /* This is the last chance, in general, before the goto nopage. */ |
| 3600 | page = get_page_from_freelist(gfp_mask, order, | 3619 | page = get_page_from_freelist(gfp_mask, order, |
| 3601 | alloc_flags & ~ALLOC_NO_WATERMARKS, ac); | 3620 | alloc_flags & ~ALLOC_NO_WATERMARKS, ac); |
| @@ -3604,12 +3623,6 @@ retry: | |||
| 3604 | 3623 | ||
| 3605 | /* Allocate without watermarks if the context allows */ | 3624 | /* Allocate without watermarks if the context allows */ |
| 3606 | if (alloc_flags & ALLOC_NO_WATERMARKS) { | 3625 | if (alloc_flags & ALLOC_NO_WATERMARKS) { |
| 3607 | /* | ||
| 3608 | * Ignore mempolicies if ALLOC_NO_WATERMARKS on the grounds | ||
| 3609 | * the allocation is high priority and these type of | ||
| 3610 | * allocations are system rather than user orientated | ||
| 3611 | */ | ||
| 3612 | ac->zonelist = node_zonelist(numa_node_id(), gfp_mask); | ||
| 3613 | page = get_page_from_freelist(gfp_mask, order, | 3626 | page = get_page_from_freelist(gfp_mask, order, |
| 3614 | ALLOC_NO_WATERMARKS, ac); | 3627 | ALLOC_NO_WATERMARKS, ac); |
| 3615 | if (page) | 3628 | if (page) |
| @@ -3808,7 +3821,11 @@ retry_cpuset: | |||
| 3808 | /* Dirty zone balancing only done in the fast path */ | 3821 | /* Dirty zone balancing only done in the fast path */ |
| 3809 | ac.spread_dirty_pages = (gfp_mask & __GFP_WRITE); | 3822 | ac.spread_dirty_pages = (gfp_mask & __GFP_WRITE); |
| 3810 | 3823 | ||
| 3811 | /* The preferred zone is used for statistics later */ | 3824 | /* |
| 3825 | * The preferred zone is used for statistics but crucially it is | ||
| 3826 | * also used as the starting point for the zonelist iterator. It | ||
| 3827 | * may get reset for allocations that ignore memory policies. | ||
| 3828 | */ | ||
| 3812 | ac.preferred_zoneref = first_zones_zonelist(ac.zonelist, | 3829 | ac.preferred_zoneref = first_zones_zonelist(ac.zonelist, |
| 3813 | ac.high_zoneidx, ac.nodemask); | 3830 | ac.high_zoneidx, ac.nodemask); |
| 3814 | if (!ac.preferred_zoneref) { | 3831 | if (!ac.preferred_zoneref) { |
diff --git a/mm/page_owner.c b/mm/page_owner.c index 792b56da13d8..fedeba88c9cb 100644 --- a/mm/page_owner.c +++ b/mm/page_owner.c | |||
| @@ -55,6 +55,8 @@ void __reset_page_owner(struct page *page, unsigned int order) | |||
| 55 | 55 | ||
| 56 | for (i = 0; i < (1 << order); i++) { | 56 | for (i = 0; i < (1 << order); i++) { |
| 57 | page_ext = lookup_page_ext(page + i); | 57 | page_ext = lookup_page_ext(page + i); |
| 58 | if (unlikely(!page_ext)) | ||
| 59 | continue; | ||
| 58 | __clear_bit(PAGE_EXT_OWNER, &page_ext->flags); | 60 | __clear_bit(PAGE_EXT_OWNER, &page_ext->flags); |
| 59 | } | 61 | } |
| 60 | } | 62 | } |
| @@ -62,6 +64,7 @@ void __reset_page_owner(struct page *page, unsigned int order) | |||
| 62 | void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask) | 64 | void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask) |
| 63 | { | 65 | { |
| 64 | struct page_ext *page_ext = lookup_page_ext(page); | 66 | struct page_ext *page_ext = lookup_page_ext(page); |
| 67 | |||
| 65 | struct stack_trace trace = { | 68 | struct stack_trace trace = { |
| 66 | .nr_entries = 0, | 69 | .nr_entries = 0, |
| 67 | .max_entries = ARRAY_SIZE(page_ext->trace_entries), | 70 | .max_entries = ARRAY_SIZE(page_ext->trace_entries), |
| @@ -69,6 +72,9 @@ void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask) | |||
| 69 | .skip = 3, | 72 | .skip = 3, |
| 70 | }; | 73 | }; |
| 71 | 74 | ||
| 75 | if (unlikely(!page_ext)) | ||
| 76 | return; | ||
| 77 | |||
| 72 | save_stack_trace(&trace); | 78 | save_stack_trace(&trace); |
| 73 | 79 | ||
| 74 | page_ext->order = order; | 80 | page_ext->order = order; |
| @@ -82,6 +88,8 @@ void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask) | |||
| 82 | void __set_page_owner_migrate_reason(struct page *page, int reason) | 88 | void __set_page_owner_migrate_reason(struct page *page, int reason) |
| 83 | { | 89 | { |
| 84 | struct page_ext *page_ext = lookup_page_ext(page); | 90 | struct page_ext *page_ext = lookup_page_ext(page); |
| 91 | if (unlikely(!page_ext)) | ||
| 92 | return; | ||
| 85 | 93 | ||
| 86 | page_ext->last_migrate_reason = reason; | 94 | page_ext->last_migrate_reason = reason; |
| 87 | } | 95 | } |
| @@ -89,6 +97,12 @@ void __set_page_owner_migrate_reason(struct page *page, int reason) | |||
| 89 | gfp_t __get_page_owner_gfp(struct page *page) | 97 | gfp_t __get_page_owner_gfp(struct page *page) |
| 90 | { | 98 | { |
| 91 | struct page_ext *page_ext = lookup_page_ext(page); | 99 | struct page_ext *page_ext = lookup_page_ext(page); |
| 100 | if (unlikely(!page_ext)) | ||
| 101 | /* | ||
| 102 | * The caller just returns 0 if no valid gfp | ||
| 103 | * So return 0 here too. | ||
| 104 | */ | ||
| 105 | return 0; | ||
| 92 | 106 | ||
| 93 | return page_ext->gfp_mask; | 107 | return page_ext->gfp_mask; |
| 94 | } | 108 | } |
| @@ -99,6 +113,9 @@ void __copy_page_owner(struct page *oldpage, struct page *newpage) | |||
| 99 | struct page_ext *new_ext = lookup_page_ext(newpage); | 113 | struct page_ext *new_ext = lookup_page_ext(newpage); |
| 100 | int i; | 114 | int i; |
| 101 | 115 | ||
| 116 | if (unlikely(!old_ext || !new_ext)) | ||
| 117 | return; | ||
| 118 | |||
| 102 | new_ext->order = old_ext->order; | 119 | new_ext->order = old_ext->order; |
| 103 | new_ext->gfp_mask = old_ext->gfp_mask; | 120 | new_ext->gfp_mask = old_ext->gfp_mask; |
| 104 | new_ext->nr_entries = old_ext->nr_entries; | 121 | new_ext->nr_entries = old_ext->nr_entries; |
| @@ -190,8 +207,15 @@ void __dump_page_owner(struct page *page) | |||
| 190 | .nr_entries = page_ext->nr_entries, | 207 | .nr_entries = page_ext->nr_entries, |
| 191 | .entries = &page_ext->trace_entries[0], | 208 | .entries = &page_ext->trace_entries[0], |
| 192 | }; | 209 | }; |
| 193 | gfp_t gfp_mask = page_ext->gfp_mask; | 210 | gfp_t gfp_mask; |
| 194 | int mt = gfpflags_to_migratetype(gfp_mask); | 211 | int mt; |
| 212 | |||
| 213 | if (unlikely(!page_ext)) { | ||
| 214 | pr_alert("There is not page extension available.\n"); | ||
| 215 | return; | ||
| 216 | } | ||
| 217 | gfp_mask = page_ext->gfp_mask; | ||
| 218 | mt = gfpflags_to_migratetype(gfp_mask); | ||
| 195 | 219 | ||
| 196 | if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) { | 220 | if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) { |
| 197 | pr_alert("page_owner info is not active (free page?)\n"); | 221 | pr_alert("page_owner info is not active (free page?)\n"); |
| @@ -251,6 +275,8 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos) | |||
| 251 | } | 275 | } |
| 252 | 276 | ||
| 253 | page_ext = lookup_page_ext(page); | 277 | page_ext = lookup_page_ext(page); |
| 278 | if (unlikely(!page_ext)) | ||
| 279 | continue; | ||
| 254 | 280 | ||
| 255 | /* | 281 | /* |
| 256 | * Some pages could be missed by concurrent allocation or free, | 282 | * Some pages could be missed by concurrent allocation or free, |
| @@ -317,6 +343,8 @@ static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone) | |||
| 317 | continue; | 343 | continue; |
| 318 | 344 | ||
| 319 | page_ext = lookup_page_ext(page); | 345 | page_ext = lookup_page_ext(page); |
| 346 | if (unlikely(!page_ext)) | ||
| 347 | continue; | ||
| 320 | 348 | ||
| 321 | /* Maybe overraping zone */ | 349 | /* Maybe overraping zone */ |
| 322 | if (test_bit(PAGE_EXT_OWNER, &page_ext->flags)) | 350 | if (test_bit(PAGE_EXT_OWNER, &page_ext->flags)) |
diff --git a/mm/page_poison.c b/mm/page_poison.c index 1eae5fad2446..2e647c65916b 100644 --- a/mm/page_poison.c +++ b/mm/page_poison.c | |||
| @@ -54,6 +54,9 @@ static inline void set_page_poison(struct page *page) | |||
| 54 | struct page_ext *page_ext; | 54 | struct page_ext *page_ext; |
| 55 | 55 | ||
| 56 | page_ext = lookup_page_ext(page); | 56 | page_ext = lookup_page_ext(page); |
| 57 | if (unlikely(!page_ext)) | ||
| 58 | return; | ||
| 59 | |||
| 57 | __set_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags); | 60 | __set_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags); |
| 58 | } | 61 | } |
| 59 | 62 | ||
| @@ -62,6 +65,9 @@ static inline void clear_page_poison(struct page *page) | |||
| 62 | struct page_ext *page_ext; | 65 | struct page_ext *page_ext; |
| 63 | 66 | ||
| 64 | page_ext = lookup_page_ext(page); | 67 | page_ext = lookup_page_ext(page); |
| 68 | if (unlikely(!page_ext)) | ||
| 69 | return; | ||
| 70 | |||
| 65 | __clear_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags); | 71 | __clear_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags); |
| 66 | } | 72 | } |
| 67 | 73 | ||
| @@ -70,7 +76,7 @@ bool page_is_poisoned(struct page *page) | |||
| 70 | struct page_ext *page_ext; | 76 | struct page_ext *page_ext; |
| 71 | 77 | ||
| 72 | page_ext = lookup_page_ext(page); | 78 | page_ext = lookup_page_ext(page); |
| 73 | if (!page_ext) | 79 | if (unlikely(!page_ext)) |
| 74 | return false; | 80 | return false; |
| 75 | 81 | ||
| 76 | return test_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags); | 82 | return test_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags); |
diff --git a/mm/percpu.c b/mm/percpu.c index 0c59684f1ff2..9903830aaebb 100644 --- a/mm/percpu.c +++ b/mm/percpu.c | |||
| @@ -112,7 +112,7 @@ struct pcpu_chunk { | |||
| 112 | int map_used; /* # of map entries used before the sentry */ | 112 | int map_used; /* # of map entries used before the sentry */ |
| 113 | int map_alloc; /* # of map entries allocated */ | 113 | int map_alloc; /* # of map entries allocated */ |
| 114 | int *map; /* allocation map */ | 114 | int *map; /* allocation map */ |
| 115 | struct work_struct map_extend_work;/* async ->map[] extension */ | 115 | struct list_head map_extend_list;/* on pcpu_map_extend_chunks */ |
| 116 | 116 | ||
| 117 | void *data; /* chunk data */ | 117 | void *data; /* chunk data */ |
| 118 | int first_free; /* no free below this */ | 118 | int first_free; /* no free below this */ |
| @@ -162,10 +162,13 @@ static struct pcpu_chunk *pcpu_reserved_chunk; | |||
| 162 | static int pcpu_reserved_chunk_limit; | 162 | static int pcpu_reserved_chunk_limit; |
| 163 | 163 | ||
| 164 | static DEFINE_SPINLOCK(pcpu_lock); /* all internal data structures */ | 164 | static DEFINE_SPINLOCK(pcpu_lock); /* all internal data structures */ |
| 165 | static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop */ | 165 | static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop, map ext */ |
| 166 | 166 | ||
| 167 | static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */ | 167 | static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */ |
| 168 | 168 | ||
| 169 | /* chunks which need their map areas extended, protected by pcpu_lock */ | ||
| 170 | static LIST_HEAD(pcpu_map_extend_chunks); | ||
| 171 | |||
| 169 | /* | 172 | /* |
| 170 | * The number of empty populated pages, protected by pcpu_lock. The | 173 | * The number of empty populated pages, protected by pcpu_lock. The |
| 171 | * reserved chunk doesn't contribute to the count. | 174 | * reserved chunk doesn't contribute to the count. |
| @@ -395,13 +398,19 @@ static int pcpu_need_to_extend(struct pcpu_chunk *chunk, bool is_atomic) | |||
| 395 | { | 398 | { |
| 396 | int margin, new_alloc; | 399 | int margin, new_alloc; |
| 397 | 400 | ||
| 401 | lockdep_assert_held(&pcpu_lock); | ||
| 402 | |||
| 398 | if (is_atomic) { | 403 | if (is_atomic) { |
| 399 | margin = 3; | 404 | margin = 3; |
| 400 | 405 | ||
| 401 | if (chunk->map_alloc < | 406 | if (chunk->map_alloc < |
| 402 | chunk->map_used + PCPU_ATOMIC_MAP_MARGIN_LOW && | 407 | chunk->map_used + PCPU_ATOMIC_MAP_MARGIN_LOW) { |
| 403 | pcpu_async_enabled) | 408 | if (list_empty(&chunk->map_extend_list)) { |
| 404 | schedule_work(&chunk->map_extend_work); | 409 | list_add_tail(&chunk->map_extend_list, |
| 410 | &pcpu_map_extend_chunks); | ||
| 411 | pcpu_schedule_balance_work(); | ||
| 412 | } | ||
| 413 | } | ||
| 405 | } else { | 414 | } else { |
| 406 | margin = PCPU_ATOMIC_MAP_MARGIN_HIGH; | 415 | margin = PCPU_ATOMIC_MAP_MARGIN_HIGH; |
| 407 | } | 416 | } |
| @@ -435,6 +444,8 @@ static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc) | |||
| 435 | size_t old_size = 0, new_size = new_alloc * sizeof(new[0]); | 444 | size_t old_size = 0, new_size = new_alloc * sizeof(new[0]); |
| 436 | unsigned long flags; | 445 | unsigned long flags; |
| 437 | 446 | ||
| 447 | lockdep_assert_held(&pcpu_alloc_mutex); | ||
| 448 | |||
| 438 | new = pcpu_mem_zalloc(new_size); | 449 | new = pcpu_mem_zalloc(new_size); |
| 439 | if (!new) | 450 | if (!new) |
| 440 | return -ENOMEM; | 451 | return -ENOMEM; |
| @@ -467,20 +478,6 @@ out_unlock: | |||
| 467 | return 0; | 478 | return 0; |
| 468 | } | 479 | } |
| 469 | 480 | ||
| 470 | static void pcpu_map_extend_workfn(struct work_struct *work) | ||
| 471 | { | ||
| 472 | struct pcpu_chunk *chunk = container_of(work, struct pcpu_chunk, | ||
| 473 | map_extend_work); | ||
| 474 | int new_alloc; | ||
| 475 | |||
| 476 | spin_lock_irq(&pcpu_lock); | ||
| 477 | new_alloc = pcpu_need_to_extend(chunk, false); | ||
| 478 | spin_unlock_irq(&pcpu_lock); | ||
| 479 | |||
| 480 | if (new_alloc) | ||
| 481 | pcpu_extend_area_map(chunk, new_alloc); | ||
| 482 | } | ||
| 483 | |||
| 484 | /** | 481 | /** |
| 485 | * pcpu_fit_in_area - try to fit the requested allocation in a candidate area | 482 | * pcpu_fit_in_area - try to fit the requested allocation in a candidate area |
| 486 | * @chunk: chunk the candidate area belongs to | 483 | * @chunk: chunk the candidate area belongs to |
| @@ -740,7 +737,7 @@ static struct pcpu_chunk *pcpu_alloc_chunk(void) | |||
| 740 | chunk->map_used = 1; | 737 | chunk->map_used = 1; |
| 741 | 738 | ||
| 742 | INIT_LIST_HEAD(&chunk->list); | 739 | INIT_LIST_HEAD(&chunk->list); |
| 743 | INIT_WORK(&chunk->map_extend_work, pcpu_map_extend_workfn); | 740 | INIT_LIST_HEAD(&chunk->map_extend_list); |
| 744 | chunk->free_size = pcpu_unit_size; | 741 | chunk->free_size = pcpu_unit_size; |
| 745 | chunk->contig_hint = pcpu_unit_size; | 742 | chunk->contig_hint = pcpu_unit_size; |
| 746 | 743 | ||
| @@ -895,6 +892,9 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved, | |||
| 895 | return NULL; | 892 | return NULL; |
| 896 | } | 893 | } |
| 897 | 894 | ||
| 895 | if (!is_atomic) | ||
| 896 | mutex_lock(&pcpu_alloc_mutex); | ||
| 897 | |||
| 898 | spin_lock_irqsave(&pcpu_lock, flags); | 898 | spin_lock_irqsave(&pcpu_lock, flags); |
| 899 | 899 | ||
| 900 | /* serve reserved allocations from the reserved chunk if available */ | 900 | /* serve reserved allocations from the reserved chunk if available */ |
| @@ -967,12 +967,9 @@ restart: | |||
| 967 | if (is_atomic) | 967 | if (is_atomic) |
| 968 | goto fail; | 968 | goto fail; |
| 969 | 969 | ||
| 970 | mutex_lock(&pcpu_alloc_mutex); | ||
| 971 | |||
| 972 | if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) { | 970 | if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) { |
| 973 | chunk = pcpu_create_chunk(); | 971 | chunk = pcpu_create_chunk(); |
| 974 | if (!chunk) { | 972 | if (!chunk) { |
| 975 | mutex_unlock(&pcpu_alloc_mutex); | ||
| 976 | err = "failed to allocate new chunk"; | 973 | err = "failed to allocate new chunk"; |
| 977 | goto fail; | 974 | goto fail; |
| 978 | } | 975 | } |
| @@ -983,7 +980,6 @@ restart: | |||
| 983 | spin_lock_irqsave(&pcpu_lock, flags); | 980 | spin_lock_irqsave(&pcpu_lock, flags); |
| 984 | } | 981 | } |
| 985 | 982 | ||
| 986 | mutex_unlock(&pcpu_alloc_mutex); | ||
| 987 | goto restart; | 983 | goto restart; |
| 988 | 984 | ||
| 989 | area_found: | 985 | area_found: |
| @@ -993,8 +989,6 @@ area_found: | |||
| 993 | if (!is_atomic) { | 989 | if (!is_atomic) { |
| 994 | int page_start, page_end, rs, re; | 990 | int page_start, page_end, rs, re; |
| 995 | 991 | ||
| 996 | mutex_lock(&pcpu_alloc_mutex); | ||
| 997 | |||
| 998 | page_start = PFN_DOWN(off); | 992 | page_start = PFN_DOWN(off); |
| 999 | page_end = PFN_UP(off + size); | 993 | page_end = PFN_UP(off + size); |
| 1000 | 994 | ||
| @@ -1005,7 +999,6 @@ area_found: | |||
| 1005 | 999 | ||
| 1006 | spin_lock_irqsave(&pcpu_lock, flags); | 1000 | spin_lock_irqsave(&pcpu_lock, flags); |
| 1007 | if (ret) { | 1001 | if (ret) { |
| 1008 | mutex_unlock(&pcpu_alloc_mutex); | ||
| 1009 | pcpu_free_area(chunk, off, &occ_pages); | 1002 | pcpu_free_area(chunk, off, &occ_pages); |
| 1010 | err = "failed to populate"; | 1003 | err = "failed to populate"; |
| 1011 | goto fail_unlock; | 1004 | goto fail_unlock; |
| @@ -1045,6 +1038,8 @@ fail: | |||
| 1045 | /* see the flag handling in pcpu_blance_workfn() */ | 1038 | /* see the flag handling in pcpu_blance_workfn() */ |
| 1046 | pcpu_atomic_alloc_failed = true; | 1039 | pcpu_atomic_alloc_failed = true; |
| 1047 | pcpu_schedule_balance_work(); | 1040 | pcpu_schedule_balance_work(); |
| 1041 | } else { | ||
| 1042 | mutex_unlock(&pcpu_alloc_mutex); | ||
| 1048 | } | 1043 | } |
| 1049 | return NULL; | 1044 | return NULL; |
| 1050 | } | 1045 | } |
| @@ -1129,6 +1124,7 @@ static void pcpu_balance_workfn(struct work_struct *work) | |||
| 1129 | if (chunk == list_first_entry(free_head, struct pcpu_chunk, list)) | 1124 | if (chunk == list_first_entry(free_head, struct pcpu_chunk, list)) |
| 1130 | continue; | 1125 | continue; |
| 1131 | 1126 | ||
| 1127 | list_del_init(&chunk->map_extend_list); | ||
| 1132 | list_move(&chunk->list, &to_free); | 1128 | list_move(&chunk->list, &to_free); |
| 1133 | } | 1129 | } |
| 1134 | 1130 | ||
| @@ -1146,6 +1142,25 @@ static void pcpu_balance_workfn(struct work_struct *work) | |||
| 1146 | pcpu_destroy_chunk(chunk); | 1142 | pcpu_destroy_chunk(chunk); |
| 1147 | } | 1143 | } |
| 1148 | 1144 | ||
| 1145 | /* service chunks which requested async area map extension */ | ||
| 1146 | do { | ||
| 1147 | int new_alloc = 0; | ||
| 1148 | |||
| 1149 | spin_lock_irq(&pcpu_lock); | ||
| 1150 | |||
| 1151 | chunk = list_first_entry_or_null(&pcpu_map_extend_chunks, | ||
| 1152 | struct pcpu_chunk, map_extend_list); | ||
| 1153 | if (chunk) { | ||
| 1154 | list_del_init(&chunk->map_extend_list); | ||
| 1155 | new_alloc = pcpu_need_to_extend(chunk, false); | ||
| 1156 | } | ||
| 1157 | |||
| 1158 | spin_unlock_irq(&pcpu_lock); | ||
| 1159 | |||
| 1160 | if (new_alloc) | ||
| 1161 | pcpu_extend_area_map(chunk, new_alloc); | ||
| 1162 | } while (chunk); | ||
| 1163 | |||
| 1149 | /* | 1164 | /* |
| 1150 | * Ensure there are certain number of free populated pages for | 1165 | * Ensure there are certain number of free populated pages for |
| 1151 | * atomic allocs. Fill up from the most packed so that atomic | 1166 | * atomic allocs. Fill up from the most packed so that atomic |
| @@ -1644,7 +1659,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, | |||
| 1644 | */ | 1659 | */ |
| 1645 | schunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0); | 1660 | schunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0); |
| 1646 | INIT_LIST_HEAD(&schunk->list); | 1661 | INIT_LIST_HEAD(&schunk->list); |
| 1647 | INIT_WORK(&schunk->map_extend_work, pcpu_map_extend_workfn); | 1662 | INIT_LIST_HEAD(&schunk->map_extend_list); |
| 1648 | schunk->base_addr = base_addr; | 1663 | schunk->base_addr = base_addr; |
| 1649 | schunk->map = smap; | 1664 | schunk->map = smap; |
| 1650 | schunk->map_alloc = ARRAY_SIZE(smap); | 1665 | schunk->map_alloc = ARRAY_SIZE(smap); |
| @@ -1673,7 +1688,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, | |||
| 1673 | if (dyn_size) { | 1688 | if (dyn_size) { |
| 1674 | dchunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0); | 1689 | dchunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0); |
| 1675 | INIT_LIST_HEAD(&dchunk->list); | 1690 | INIT_LIST_HEAD(&dchunk->list); |
| 1676 | INIT_WORK(&dchunk->map_extend_work, pcpu_map_extend_workfn); | 1691 | INIT_LIST_HEAD(&dchunk->map_extend_list); |
| 1677 | dchunk->base_addr = base_addr; | 1692 | dchunk->base_addr = base_addr; |
| 1678 | dchunk->map = dmap; | 1693 | dchunk->map = dmap; |
| 1679 | dchunk->map_alloc = ARRAY_SIZE(dmap); | 1694 | dchunk->map_alloc = ARRAY_SIZE(dmap); |
diff --git a/mm/shmem.c b/mm/shmem.c index a36144909b28..24463b67b6ef 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
| @@ -2227,7 +2227,7 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset, | |||
| 2227 | /* Remove the !PageUptodate pages we added */ | 2227 | /* Remove the !PageUptodate pages we added */ |
| 2228 | shmem_undo_range(inode, | 2228 | shmem_undo_range(inode, |
| 2229 | (loff_t)start << PAGE_SHIFT, | 2229 | (loff_t)start << PAGE_SHIFT, |
| 2230 | (loff_t)index << PAGE_SHIFT, true); | 2230 | ((loff_t)index << PAGE_SHIFT) - 1, true); |
| 2231 | goto undone; | 2231 | goto undone; |
| 2232 | } | 2232 | } |
| 2233 | 2233 | ||
| @@ -242,7 +242,7 @@ void rotate_reclaimable_page(struct page *page) | |||
| 242 | get_page(page); | 242 | get_page(page); |
| 243 | local_irq_save(flags); | 243 | local_irq_save(flags); |
| 244 | pvec = this_cpu_ptr(&lru_rotate_pvecs); | 244 | pvec = this_cpu_ptr(&lru_rotate_pvecs); |
| 245 | if (!pagevec_add(pvec, page)) | 245 | if (!pagevec_add(pvec, page) || PageCompound(page)) |
| 246 | pagevec_move_tail(pvec); | 246 | pagevec_move_tail(pvec); |
| 247 | local_irq_restore(flags); | 247 | local_irq_restore(flags); |
| 248 | } | 248 | } |
| @@ -296,7 +296,7 @@ void activate_page(struct page *page) | |||
| 296 | struct pagevec *pvec = &get_cpu_var(activate_page_pvecs); | 296 | struct pagevec *pvec = &get_cpu_var(activate_page_pvecs); |
| 297 | 297 | ||
| 298 | get_page(page); | 298 | get_page(page); |
| 299 | if (!pagevec_add(pvec, page)) | 299 | if (!pagevec_add(pvec, page) || PageCompound(page)) |
| 300 | pagevec_lru_move_fn(pvec, __activate_page, NULL); | 300 | pagevec_lru_move_fn(pvec, __activate_page, NULL); |
| 301 | put_cpu_var(activate_page_pvecs); | 301 | put_cpu_var(activate_page_pvecs); |
| 302 | } | 302 | } |
| @@ -391,9 +391,8 @@ static void __lru_cache_add(struct page *page) | |||
| 391 | struct pagevec *pvec = &get_cpu_var(lru_add_pvec); | 391 | struct pagevec *pvec = &get_cpu_var(lru_add_pvec); |
| 392 | 392 | ||
| 393 | get_page(page); | 393 | get_page(page); |
| 394 | if (!pagevec_space(pvec)) | 394 | if (!pagevec_add(pvec, page) || PageCompound(page)) |
| 395 | __pagevec_lru_add(pvec); | 395 | __pagevec_lru_add(pvec); |
| 396 | pagevec_add(pvec, page); | ||
| 397 | put_cpu_var(lru_add_pvec); | 396 | put_cpu_var(lru_add_pvec); |
| 398 | } | 397 | } |
| 399 | 398 | ||
| @@ -628,7 +627,7 @@ void deactivate_file_page(struct page *page) | |||
| 628 | if (likely(get_page_unless_zero(page))) { | 627 | if (likely(get_page_unless_zero(page))) { |
| 629 | struct pagevec *pvec = &get_cpu_var(lru_deactivate_file_pvecs); | 628 | struct pagevec *pvec = &get_cpu_var(lru_deactivate_file_pvecs); |
| 630 | 629 | ||
| 631 | if (!pagevec_add(pvec, page)) | 630 | if (!pagevec_add(pvec, page) || PageCompound(page)) |
| 632 | pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL); | 631 | pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL); |
| 633 | put_cpu_var(lru_deactivate_file_pvecs); | 632 | put_cpu_var(lru_deactivate_file_pvecs); |
| 634 | } | 633 | } |
| @@ -648,7 +647,7 @@ void deactivate_page(struct page *page) | |||
| 648 | struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs); | 647 | struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs); |
| 649 | 648 | ||
| 650 | get_page(page); | 649 | get_page(page); |
| 651 | if (!pagevec_add(pvec, page)) | 650 | if (!pagevec_add(pvec, page) || PageCompound(page)) |
| 652 | pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); | 651 | pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); |
| 653 | put_cpu_var(lru_deactivate_pvecs); | 652 | put_cpu_var(lru_deactivate_pvecs); |
| 654 | } | 653 | } |
| @@ -667,6 +666,24 @@ static void lru_add_drain_per_cpu(struct work_struct *dummy) | |||
| 667 | 666 | ||
| 668 | static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work); | 667 | static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work); |
| 669 | 668 | ||
| 669 | /* | ||
| 670 | * lru_add_drain_wq is used to do lru_add_drain_all() from a WQ_MEM_RECLAIM | ||
| 671 | * workqueue, aiding in getting memory freed. | ||
| 672 | */ | ||
| 673 | static struct workqueue_struct *lru_add_drain_wq; | ||
| 674 | |||
| 675 | static int __init lru_init(void) | ||
| 676 | { | ||
| 677 | lru_add_drain_wq = alloc_workqueue("lru-add-drain", WQ_MEM_RECLAIM, 0); | ||
| 678 | |||
| 679 | if (WARN(!lru_add_drain_wq, | ||
| 680 | "Failed to create workqueue lru_add_drain_wq")) | ||
| 681 | return -ENOMEM; | ||
| 682 | |||
| 683 | return 0; | ||
| 684 | } | ||
| 685 | early_initcall(lru_init); | ||
| 686 | |||
| 670 | void lru_add_drain_all(void) | 687 | void lru_add_drain_all(void) |
| 671 | { | 688 | { |
| 672 | static DEFINE_MUTEX(lock); | 689 | static DEFINE_MUTEX(lock); |
| @@ -686,7 +703,7 @@ void lru_add_drain_all(void) | |||
| 686 | pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) || | 703 | pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) || |
| 687 | need_activate_page_drain(cpu)) { | 704 | need_activate_page_drain(cpu)) { |
| 688 | INIT_WORK(work, lru_add_drain_per_cpu); | 705 | INIT_WORK(work, lru_add_drain_per_cpu); |
| 689 | schedule_work_on(cpu, work); | 706 | queue_work_on(cpu, lru_add_drain_wq, work); |
| 690 | cpumask_set_cpu(cpu, &has_work); | 707 | cpumask_set_cpu(cpu, &has_work); |
| 691 | } | 708 | } |
| 692 | } | 709 | } |
diff --git a/mm/swap_state.c b/mm/swap_state.c index 0d457e7db8d6..c99463ac02fb 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c | |||
| @@ -252,7 +252,10 @@ static inline void free_swap_cache(struct page *page) | |||
| 252 | void free_page_and_swap_cache(struct page *page) | 252 | void free_page_and_swap_cache(struct page *page) |
| 253 | { | 253 | { |
| 254 | free_swap_cache(page); | 254 | free_swap_cache(page); |
| 255 | put_page(page); | 255 | if (is_huge_zero_page(page)) |
| 256 | put_huge_zero_page(); | ||
| 257 | else | ||
| 258 | put_page(page); | ||
| 256 | } | 259 | } |
| 257 | 260 | ||
| 258 | /* | 261 | /* |
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index cf7ad1a53be0..e11475cdeb7a 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
| @@ -1105,7 +1105,7 @@ EXPORT_SYMBOL_GPL(vm_unmap_aliases); | |||
| 1105 | */ | 1105 | */ |
| 1106 | void vm_unmap_ram(const void *mem, unsigned int count) | 1106 | void vm_unmap_ram(const void *mem, unsigned int count) |
| 1107 | { | 1107 | { |
| 1108 | unsigned long size = count << PAGE_SHIFT; | 1108 | unsigned long size = (unsigned long)count << PAGE_SHIFT; |
| 1109 | unsigned long addr = (unsigned long)mem; | 1109 | unsigned long addr = (unsigned long)mem; |
| 1110 | 1110 | ||
| 1111 | BUG_ON(!addr); | 1111 | BUG_ON(!addr); |
| @@ -1140,7 +1140,7 @@ EXPORT_SYMBOL(vm_unmap_ram); | |||
| 1140 | */ | 1140 | */ |
| 1141 | void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot) | 1141 | void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot) |
| 1142 | { | 1142 | { |
| 1143 | unsigned long size = count << PAGE_SHIFT; | 1143 | unsigned long size = (unsigned long)count << PAGE_SHIFT; |
| 1144 | unsigned long addr; | 1144 | unsigned long addr; |
| 1145 | void *mem; | 1145 | void *mem; |
| 1146 | 1146 | ||
| @@ -1574,14 +1574,15 @@ void *vmap(struct page **pages, unsigned int count, | |||
| 1574 | unsigned long flags, pgprot_t prot) | 1574 | unsigned long flags, pgprot_t prot) |
| 1575 | { | 1575 | { |
| 1576 | struct vm_struct *area; | 1576 | struct vm_struct *area; |
| 1577 | unsigned long size; /* In bytes */ | ||
| 1577 | 1578 | ||
| 1578 | might_sleep(); | 1579 | might_sleep(); |
| 1579 | 1580 | ||
| 1580 | if (count > totalram_pages) | 1581 | if (count > totalram_pages) |
| 1581 | return NULL; | 1582 | return NULL; |
| 1582 | 1583 | ||
| 1583 | area = get_vm_area_caller((count << PAGE_SHIFT), flags, | 1584 | size = (unsigned long)count << PAGE_SHIFT; |
| 1584 | __builtin_return_address(0)); | 1585 | area = get_vm_area_caller(size, flags, __builtin_return_address(0)); |
| 1585 | if (!area) | 1586 | if (!area) |
| 1586 | return NULL; | 1587 | return NULL; |
| 1587 | 1588 | ||
diff --git a/mm/vmstat.c b/mm/vmstat.c index 77e42ef388c2..cb2a67bb4158 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c | |||
| @@ -1061,6 +1061,8 @@ static void pagetypeinfo_showmixedcount_print(struct seq_file *m, | |||
| 1061 | continue; | 1061 | continue; |
| 1062 | 1062 | ||
| 1063 | page_ext = lookup_page_ext(page); | 1063 | page_ext = lookup_page_ext(page); |
| 1064 | if (unlikely(!page_ext)) | ||
| 1065 | continue; | ||
| 1064 | 1066 | ||
| 1065 | if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) | 1067 | if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) |
| 1066 | continue; | 1068 | continue; |
diff --git a/mm/z3fold.c b/mm/z3fold.c index 34917d55d311..8f9e89ca1d31 100644 --- a/mm/z3fold.c +++ b/mm/z3fold.c | |||
| @@ -412,7 +412,7 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle) | |||
| 412 | /* HEADLESS page stored */ | 412 | /* HEADLESS page stored */ |
| 413 | bud = HEADLESS; | 413 | bud = HEADLESS; |
| 414 | } else { | 414 | } else { |
| 415 | bud = (handle - zhdr->first_num) & BUDDY_MASK; | 415 | bud = handle_to_buddy(handle); |
| 416 | 416 | ||
| 417 | switch (bud) { | 417 | switch (bud) { |
| 418 | case FIRST: | 418 | case FIRST: |
| @@ -572,15 +572,19 @@ next: | |||
| 572 | pool->pages_nr--; | 572 | pool->pages_nr--; |
| 573 | spin_unlock(&pool->lock); | 573 | spin_unlock(&pool->lock); |
| 574 | return 0; | 574 | return 0; |
| 575 | } else if (zhdr->first_chunks != 0 && | 575 | } else if (!test_bit(PAGE_HEADLESS, &page->private)) { |
| 576 | zhdr->last_chunks != 0 && zhdr->middle_chunks != 0) { | 576 | if (zhdr->first_chunks != 0 && |
| 577 | /* Full, add to buddied list */ | 577 | zhdr->last_chunks != 0 && |
| 578 | list_add(&zhdr->buddy, &pool->buddied); | 578 | zhdr->middle_chunks != 0) { |
| 579 | } else if (!test_bit(PAGE_HEADLESS, &page->private)) { | 579 | /* Full, add to buddied list */ |
| 580 | z3fold_compact_page(zhdr); | 580 | list_add(&zhdr->buddy, &pool->buddied); |
| 581 | /* add to unbuddied list */ | 581 | } else { |
| 582 | freechunks = num_free_chunks(zhdr); | 582 | z3fold_compact_page(zhdr); |
| 583 | list_add(&zhdr->buddy, &pool->unbuddied[freechunks]); | 583 | /* add to unbuddied list */ |
| 584 | freechunks = num_free_chunks(zhdr); | ||
| 585 | list_add(&zhdr->buddy, | ||
| 586 | &pool->unbuddied[freechunks]); | ||
| 587 | } | ||
| 584 | } | 588 | } |
| 585 | 589 | ||
| 586 | /* add to beginning of LRU */ | 590 | /* add to beginning of LRU */ |
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index a1e273af6fc8..82a116ba590e 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c | |||
| @@ -290,6 +290,10 @@ static void vlan_sync_address(struct net_device *dev, | |||
| 290 | if (ether_addr_equal(vlan->real_dev_addr, dev->dev_addr)) | 290 | if (ether_addr_equal(vlan->real_dev_addr, dev->dev_addr)) |
| 291 | return; | 291 | return; |
| 292 | 292 | ||
| 293 | /* vlan continues to inherit address of lower device */ | ||
| 294 | if (vlan_dev_inherit_address(vlandev, dev)) | ||
| 295 | goto out; | ||
| 296 | |||
| 293 | /* vlan address was different from the old address and is equal to | 297 | /* vlan address was different from the old address and is equal to |
| 294 | * the new address */ | 298 | * the new address */ |
| 295 | if (!ether_addr_equal(vlandev->dev_addr, vlan->real_dev_addr) && | 299 | if (!ether_addr_equal(vlandev->dev_addr, vlan->real_dev_addr) && |
| @@ -302,6 +306,7 @@ static void vlan_sync_address(struct net_device *dev, | |||
| 302 | !ether_addr_equal(vlandev->dev_addr, dev->dev_addr)) | 306 | !ether_addr_equal(vlandev->dev_addr, dev->dev_addr)) |
| 303 | dev_uc_add(dev, vlandev->dev_addr); | 307 | dev_uc_add(dev, vlandev->dev_addr); |
| 304 | 308 | ||
| 309 | out: | ||
| 305 | ether_addr_copy(vlan->real_dev_addr, dev->dev_addr); | 310 | ether_addr_copy(vlan->real_dev_addr, dev->dev_addr); |
| 306 | } | 311 | } |
| 307 | 312 | ||
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h index 9d010a09ab98..cc1557978066 100644 --- a/net/8021q/vlan.h +++ b/net/8021q/vlan.h | |||
| @@ -109,6 +109,8 @@ int vlan_check_real_dev(struct net_device *real_dev, | |||
| 109 | void vlan_setup(struct net_device *dev); | 109 | void vlan_setup(struct net_device *dev); |
| 110 | int register_vlan_dev(struct net_device *dev); | 110 | int register_vlan_dev(struct net_device *dev); |
| 111 | void unregister_vlan_dev(struct net_device *dev, struct list_head *head); | 111 | void unregister_vlan_dev(struct net_device *dev, struct list_head *head); |
| 112 | bool vlan_dev_inherit_address(struct net_device *dev, | ||
| 113 | struct net_device *real_dev); | ||
| 112 | 114 | ||
| 113 | static inline u32 vlan_get_ingress_priority(struct net_device *dev, | 115 | static inline u32 vlan_get_ingress_priority(struct net_device *dev, |
| 114 | u16 vlan_tci) | 116 | u16 vlan_tci) |
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index e7e62570bdb8..86ae75b77390 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c | |||
| @@ -245,6 +245,17 @@ void vlan_dev_get_realdev_name(const struct net_device *dev, char *result) | |||
| 245 | strncpy(result, vlan_dev_priv(dev)->real_dev->name, 23); | 245 | strncpy(result, vlan_dev_priv(dev)->real_dev->name, 23); |
| 246 | } | 246 | } |
| 247 | 247 | ||
| 248 | bool vlan_dev_inherit_address(struct net_device *dev, | ||
| 249 | struct net_device *real_dev) | ||
| 250 | { | ||
| 251 | if (dev->addr_assign_type != NET_ADDR_STOLEN) | ||
| 252 | return false; | ||
| 253 | |||
| 254 | ether_addr_copy(dev->dev_addr, real_dev->dev_addr); | ||
| 255 | call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); | ||
| 256 | return true; | ||
| 257 | } | ||
| 258 | |||
| 248 | static int vlan_dev_open(struct net_device *dev) | 259 | static int vlan_dev_open(struct net_device *dev) |
| 249 | { | 260 | { |
| 250 | struct vlan_dev_priv *vlan = vlan_dev_priv(dev); | 261 | struct vlan_dev_priv *vlan = vlan_dev_priv(dev); |
| @@ -255,7 +266,8 @@ static int vlan_dev_open(struct net_device *dev) | |||
| 255 | !(vlan->flags & VLAN_FLAG_LOOSE_BINDING)) | 266 | !(vlan->flags & VLAN_FLAG_LOOSE_BINDING)) |
| 256 | return -ENETDOWN; | 267 | return -ENETDOWN; |
| 257 | 268 | ||
| 258 | if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr)) { | 269 | if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr) && |
| 270 | !vlan_dev_inherit_address(dev, real_dev)) { | ||
| 259 | err = dev_uc_add(real_dev, dev->dev_addr); | 271 | err = dev_uc_add(real_dev, dev->dev_addr); |
| 260 | if (err < 0) | 272 | if (err < 0) |
| 261 | goto out; | 273 | goto out; |
| @@ -560,8 +572,10 @@ static int vlan_dev_init(struct net_device *dev) | |||
| 560 | /* ipv6 shared card related stuff */ | 572 | /* ipv6 shared card related stuff */ |
| 561 | dev->dev_id = real_dev->dev_id; | 573 | dev->dev_id = real_dev->dev_id; |
| 562 | 574 | ||
| 563 | if (is_zero_ether_addr(dev->dev_addr)) | 575 | if (is_zero_ether_addr(dev->dev_addr)) { |
| 564 | eth_hw_addr_inherit(dev, real_dev); | 576 | ether_addr_copy(dev->dev_addr, real_dev->dev_addr); |
| 577 | dev->addr_assign_type = NET_ADDR_STOLEN; | ||
| 578 | } | ||
| 565 | if (is_zero_ether_addr(dev->broadcast)) | 579 | if (is_zero_ether_addr(dev->broadcast)) |
| 566 | memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len); | 580 | memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len); |
| 567 | 581 | ||
diff --git a/net/atm/signaling.c b/net/atm/signaling.c index 4fd6af47383a..adb6e3d21b1e 100644 --- a/net/atm/signaling.c +++ b/net/atm/signaling.c | |||
| @@ -124,7 +124,7 @@ as_indicate_complete: | |||
| 124 | break; | 124 | break; |
| 125 | case as_addparty: | 125 | case as_addparty: |
| 126 | case as_dropparty: | 126 | case as_dropparty: |
| 127 | sk->sk_err_soft = msg->reply; | 127 | sk->sk_err_soft = -msg->reply; |
| 128 | /* < 0 failure, otherwise ep_ref */ | 128 | /* < 0 failure, otherwise ep_ref */ |
| 129 | clear_bit(ATM_VF_WAITING, &vcc->flags); | 129 | clear_bit(ATM_VF_WAITING, &vcc->flags); |
| 130 | break; | 130 | break; |
diff --git a/net/atm/svc.c b/net/atm/svc.c index 3fa0a9ee98d1..878563a8354d 100644 --- a/net/atm/svc.c +++ b/net/atm/svc.c | |||
| @@ -546,7 +546,7 @@ static int svc_addparty(struct socket *sock, struct sockaddr *sockaddr, | |||
| 546 | schedule(); | 546 | schedule(); |
| 547 | } | 547 | } |
| 548 | finish_wait(sk_sleep(sk), &wait); | 548 | finish_wait(sk_sleep(sk), &wait); |
| 549 | error = xchg(&sk->sk_err_soft, 0); | 549 | error = -xchg(&sk->sk_err_soft, 0); |
| 550 | out: | 550 | out: |
| 551 | release_sock(sk); | 551 | release_sock(sk); |
| 552 | return error; | 552 | return error; |
| @@ -573,7 +573,7 @@ static int svc_dropparty(struct socket *sock, int ep_ref) | |||
| 573 | error = -EUNATCH; | 573 | error = -EUNATCH; |
| 574 | goto out; | 574 | goto out; |
| 575 | } | 575 | } |
| 576 | error = xchg(&sk->sk_err_soft, 0); | 576 | error = -xchg(&sk->sk_err_soft, 0); |
| 577 | out: | 577 | out: |
| 578 | release_sock(sk); | 578 | release_sock(sk); |
| 579 | return error; | 579 | return error; |
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index dcea4f4c62b3..c18080ad4085 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c | |||
| @@ -279,6 +279,8 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr) | |||
| 279 | * change from under us. | 279 | * change from under us. |
| 280 | */ | 280 | */ |
| 281 | list_for_each_entry(v, &vg->vlan_list, vlist) { | 281 | list_for_each_entry(v, &vg->vlan_list, vlist) { |
| 282 | if (!br_vlan_should_use(v)) | ||
| 283 | continue; | ||
| 282 | f = __br_fdb_get(br, br->dev->dev_addr, v->vid); | 284 | f = __br_fdb_get(br, br->dev->dev_addr, v->vid); |
| 283 | if (f && f->is_local && !f->dst) | 285 | if (f && f->is_local && !f->dst) |
| 284 | fdb_delete_local(br, NULL, f); | 286 | fdb_delete_local(br, NULL, f); |
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 0160d7d09a1e..89469592076c 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c | |||
| @@ -1276,9 +1276,9 @@ static bool target_should_be_paused(struct ceph_osd_client *osdc, | |||
| 1276 | const struct ceph_osd_request_target *t, | 1276 | const struct ceph_osd_request_target *t, |
| 1277 | struct ceph_pg_pool_info *pi) | 1277 | struct ceph_pg_pool_info *pi) |
| 1278 | { | 1278 | { |
| 1279 | bool pauserd = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD); | 1279 | bool pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD); |
| 1280 | bool pausewr = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR) || | 1280 | bool pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) || |
| 1281 | ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) || | 1281 | ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || |
| 1282 | __pool_full(pi); | 1282 | __pool_full(pi); |
| 1283 | 1283 | ||
| 1284 | WARN_ON(pi->id != t->base_oloc.pool); | 1284 | WARN_ON(pi->id != t->base_oloc.pool); |
| @@ -1303,8 +1303,7 @@ static enum calc_target_result calc_target(struct ceph_osd_client *osdc, | |||
| 1303 | bool force_resend = false; | 1303 | bool force_resend = false; |
| 1304 | bool need_check_tiering = false; | 1304 | bool need_check_tiering = false; |
| 1305 | bool need_resend = false; | 1305 | bool need_resend = false; |
| 1306 | bool sort_bitwise = ceph_osdmap_flag(osdc->osdmap, | 1306 | bool sort_bitwise = ceph_osdmap_flag(osdc, CEPH_OSDMAP_SORTBITWISE); |
| 1307 | CEPH_OSDMAP_SORTBITWISE); | ||
| 1308 | enum calc_target_result ct_res; | 1307 | enum calc_target_result ct_res; |
| 1309 | int ret; | 1308 | int ret; |
| 1310 | 1309 | ||
| @@ -1540,9 +1539,9 @@ static void encode_request(struct ceph_osd_request *req, struct ceph_msg *msg) | |||
| 1540 | */ | 1539 | */ |
| 1541 | msg->hdr.data_off = cpu_to_le16(req->r_data_offset); | 1540 | msg->hdr.data_off = cpu_to_le16(req->r_data_offset); |
| 1542 | 1541 | ||
| 1543 | dout("%s req %p oid %*pE oid_len %d front %zu data %u\n", __func__, | 1542 | dout("%s req %p oid %s oid_len %d front %zu data %u\n", __func__, |
| 1544 | req, req->r_t.target_oid.name_len, req->r_t.target_oid.name, | 1543 | req, req->r_t.target_oid.name, req->r_t.target_oid.name_len, |
| 1545 | req->r_t.target_oid.name_len, msg->front.iov_len, data_len); | 1544 | msg->front.iov_len, data_len); |
| 1546 | } | 1545 | } |
| 1547 | 1546 | ||
| 1548 | /* | 1547 | /* |
| @@ -1590,9 +1589,9 @@ static void maybe_request_map(struct ceph_osd_client *osdc) | |||
| 1590 | verify_osdc_locked(osdc); | 1589 | verify_osdc_locked(osdc); |
| 1591 | WARN_ON(!osdc->osdmap->epoch); | 1590 | WARN_ON(!osdc->osdmap->epoch); |
| 1592 | 1591 | ||
| 1593 | if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) || | 1592 | if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || |
| 1594 | ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD) || | 1593 | ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD) || |
| 1595 | ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR)) { | 1594 | ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) { |
| 1596 | dout("%s osdc %p continuous\n", __func__, osdc); | 1595 | dout("%s osdc %p continuous\n", __func__, osdc); |
| 1597 | continuous = true; | 1596 | continuous = true; |
| 1598 | } else { | 1597 | } else { |
| @@ -1629,19 +1628,19 @@ again: | |||
| 1629 | } | 1628 | } |
| 1630 | 1629 | ||
| 1631 | if ((req->r_flags & CEPH_OSD_FLAG_WRITE) && | 1630 | if ((req->r_flags & CEPH_OSD_FLAG_WRITE) && |
| 1632 | ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR)) { | 1631 | ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) { |
| 1633 | dout("req %p pausewr\n", req); | 1632 | dout("req %p pausewr\n", req); |
| 1634 | req->r_t.paused = true; | 1633 | req->r_t.paused = true; |
| 1635 | maybe_request_map(osdc); | 1634 | maybe_request_map(osdc); |
| 1636 | } else if ((req->r_flags & CEPH_OSD_FLAG_READ) && | 1635 | } else if ((req->r_flags & CEPH_OSD_FLAG_READ) && |
| 1637 | ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD)) { | 1636 | ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) { |
| 1638 | dout("req %p pauserd\n", req); | 1637 | dout("req %p pauserd\n", req); |
| 1639 | req->r_t.paused = true; | 1638 | req->r_t.paused = true; |
| 1640 | maybe_request_map(osdc); | 1639 | maybe_request_map(osdc); |
| 1641 | } else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) && | 1640 | } else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) && |
| 1642 | !(req->r_flags & (CEPH_OSD_FLAG_FULL_TRY | | 1641 | !(req->r_flags & (CEPH_OSD_FLAG_FULL_TRY | |
| 1643 | CEPH_OSD_FLAG_FULL_FORCE)) && | 1642 | CEPH_OSD_FLAG_FULL_FORCE)) && |
| 1644 | (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) || | 1643 | (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || |
| 1645 | pool_full(osdc, req->r_t.base_oloc.pool))) { | 1644 | pool_full(osdc, req->r_t.base_oloc.pool))) { |
| 1646 | dout("req %p full/pool_full\n", req); | 1645 | dout("req %p full/pool_full\n", req); |
| 1647 | pr_warn_ratelimited("FULL or reached pool quota\n"); | 1646 | pr_warn_ratelimited("FULL or reached pool quota\n"); |
| @@ -2280,7 +2279,7 @@ static void send_linger_ping(struct ceph_osd_linger_request *lreq) | |||
| 2280 | struct ceph_osd_request *req = lreq->ping_req; | 2279 | struct ceph_osd_request *req = lreq->ping_req; |
| 2281 | struct ceph_osd_req_op *op = &req->r_ops[0]; | 2280 | struct ceph_osd_req_op *op = &req->r_ops[0]; |
| 2282 | 2281 | ||
| 2283 | if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD)) { | 2282 | if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) { |
| 2284 | dout("%s PAUSERD\n", __func__); | 2283 | dout("%s PAUSERD\n", __func__); |
| 2285 | return; | 2284 | return; |
| 2286 | } | 2285 | } |
| @@ -2893,6 +2892,9 @@ static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg) | |||
| 2893 | dout("req %p tid %llu cb\n", req, req->r_tid); | 2892 | dout("req %p tid %llu cb\n", req, req->r_tid); |
| 2894 | __complete_request(req); | 2893 | __complete_request(req); |
| 2895 | } | 2894 | } |
| 2895 | if (m.flags & CEPH_OSD_FLAG_ONDISK) | ||
| 2896 | complete_all(&req->r_safe_completion); | ||
| 2897 | ceph_osdc_put_request(req); | ||
| 2896 | } else { | 2898 | } else { |
| 2897 | if (req->r_unsafe_callback) { | 2899 | if (req->r_unsafe_callback) { |
| 2898 | dout("req %p tid %llu unsafe-cb\n", req, req->r_tid); | 2900 | dout("req %p tid %llu unsafe-cb\n", req, req->r_tid); |
| @@ -2901,10 +2903,7 @@ static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg) | |||
| 2901 | WARN_ON(1); | 2903 | WARN_ON(1); |
| 2902 | } | 2904 | } |
| 2903 | } | 2905 | } |
| 2904 | if (m.flags & CEPH_OSD_FLAG_ONDISK) | ||
| 2905 | complete_all(&req->r_safe_completion); | ||
| 2906 | 2906 | ||
| 2907 | ceph_osdc_put_request(req); | ||
| 2908 | return; | 2907 | return; |
| 2909 | 2908 | ||
| 2910 | fail_request: | 2909 | fail_request: |
| @@ -3050,7 +3049,7 @@ static int handle_one_map(struct ceph_osd_client *osdc, | |||
| 3050 | bool skipped_map = false; | 3049 | bool skipped_map = false; |
| 3051 | bool was_full; | 3050 | bool was_full; |
| 3052 | 3051 | ||
| 3053 | was_full = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL); | 3052 | was_full = ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL); |
| 3054 | set_pool_was_full(osdc); | 3053 | set_pool_was_full(osdc); |
| 3055 | 3054 | ||
| 3056 | if (incremental) | 3055 | if (incremental) |
| @@ -3088,7 +3087,7 @@ static int handle_one_map(struct ceph_osd_client *osdc, | |||
| 3088 | osdc->osdmap = newmap; | 3087 | osdc->osdmap = newmap; |
| 3089 | } | 3088 | } |
| 3090 | 3089 | ||
| 3091 | was_full &= !ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL); | 3090 | was_full &= !ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL); |
| 3092 | scan_requests(&osdc->homeless_osd, skipped_map, was_full, true, | 3091 | scan_requests(&osdc->homeless_osd, skipped_map, was_full, true, |
| 3093 | need_resend, need_resend_linger); | 3092 | need_resend, need_resend_linger); |
| 3094 | 3093 | ||
| @@ -3174,9 +3173,9 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg) | |||
| 3174 | if (ceph_check_fsid(osdc->client, &fsid) < 0) | 3173 | if (ceph_check_fsid(osdc->client, &fsid) < 0) |
| 3175 | goto bad; | 3174 | goto bad; |
| 3176 | 3175 | ||
| 3177 | was_pauserd = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD); | 3176 | was_pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD); |
| 3178 | was_pausewr = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR) || | 3177 | was_pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) || |
| 3179 | ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) || | 3178 | ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || |
| 3180 | have_pool_full(osdc); | 3179 | have_pool_full(osdc); |
| 3181 | 3180 | ||
| 3182 | /* incremental maps */ | 3181 | /* incremental maps */ |
| @@ -3238,9 +3237,9 @@ done: | |||
| 3238 | * we find out when we are no longer full and stop returning | 3237 | * we find out when we are no longer full and stop returning |
| 3239 | * ENOSPC. | 3238 | * ENOSPC. |
| 3240 | */ | 3239 | */ |
| 3241 | pauserd = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD); | 3240 | pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD); |
| 3242 | pausewr = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR) || | 3241 | pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) || |
| 3243 | ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) || | 3242 | ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || |
| 3244 | have_pool_full(osdc); | 3243 | have_pool_full(osdc); |
| 3245 | if (was_pauserd || was_pausewr || pauserd || pausewr) | 3244 | if (was_pauserd || was_pausewr || pauserd || pausewr) |
| 3246 | maybe_request_map(osdc); | 3245 | maybe_request_map(osdc); |
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c index cde52e94732f..03062bb763b3 100644 --- a/net/ceph/osdmap.c +++ b/net/ceph/osdmap.c | |||
| @@ -1778,8 +1778,8 @@ int ceph_object_locator_to_pg(struct ceph_osdmap *osdmap, | |||
| 1778 | raw_pgid->seed = ceph_str_hash(pi->object_hash, oid->name, | 1778 | raw_pgid->seed = ceph_str_hash(pi->object_hash, oid->name, |
| 1779 | oid->name_len); | 1779 | oid->name_len); |
| 1780 | 1780 | ||
| 1781 | dout("%s %*pE -> raw_pgid %llu.%x\n", __func__, oid->name_len, | 1781 | dout("%s %s -> raw_pgid %llu.%x\n", __func__, oid->name, |
| 1782 | oid->name, raw_pgid->pool, raw_pgid->seed); | 1782 | raw_pgid->pool, raw_pgid->seed); |
| 1783 | return 0; | 1783 | return 0; |
| 1784 | } | 1784 | } |
| 1785 | EXPORT_SYMBOL(ceph_object_locator_to_pg); | 1785 | EXPORT_SYMBOL(ceph_object_locator_to_pg); |
diff --git a/net/compat.c b/net/compat.c index 5cfd26a0006f..1cd2ec046164 100644 --- a/net/compat.c +++ b/net/compat.c | |||
| @@ -309,8 +309,8 @@ void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm) | |||
| 309 | __scm_destroy(scm); | 309 | __scm_destroy(scm); |
| 310 | } | 310 | } |
| 311 | 311 | ||
| 312 | static int do_set_attach_filter(struct socket *sock, int level, int optname, | 312 | /* allocate a 64-bit sock_fprog on the user stack for duration of syscall. */ |
| 313 | char __user *optval, unsigned int optlen) | 313 | struct sock_fprog __user *get_compat_bpf_fprog(char __user *optval) |
| 314 | { | 314 | { |
| 315 | struct compat_sock_fprog __user *fprog32 = (struct compat_sock_fprog __user *)optval; | 315 | struct compat_sock_fprog __user *fprog32 = (struct compat_sock_fprog __user *)optval; |
| 316 | struct sock_fprog __user *kfprog = compat_alloc_user_space(sizeof(struct sock_fprog)); | 316 | struct sock_fprog __user *kfprog = compat_alloc_user_space(sizeof(struct sock_fprog)); |
| @@ -323,6 +323,19 @@ static int do_set_attach_filter(struct socket *sock, int level, int optname, | |||
| 323 | __get_user(ptr, &fprog32->filter) || | 323 | __get_user(ptr, &fprog32->filter) || |
| 324 | __put_user(len, &kfprog->len) || | 324 | __put_user(len, &kfprog->len) || |
| 325 | __put_user(compat_ptr(ptr), &kfprog->filter)) | 325 | __put_user(compat_ptr(ptr), &kfprog->filter)) |
| 326 | return NULL; | ||
| 327 | |||
| 328 | return kfprog; | ||
| 329 | } | ||
| 330 | EXPORT_SYMBOL_GPL(get_compat_bpf_fprog); | ||
| 331 | |||
| 332 | static int do_set_attach_filter(struct socket *sock, int level, int optname, | ||
| 333 | char __user *optval, unsigned int optlen) | ||
| 334 | { | ||
| 335 | struct sock_fprog __user *kfprog; | ||
| 336 | |||
| 337 | kfprog = get_compat_bpf_fprog(optval); | ||
| 338 | if (!kfprog) | ||
| 326 | return -EFAULT; | 339 | return -EFAULT; |
| 327 | 340 | ||
| 328 | return sock_setsockopt(sock, level, optname, (char __user *)kfprog, | 341 | return sock_setsockopt(sock, level, optname, (char __user *)kfprog, |
| @@ -354,7 +367,8 @@ static int do_set_sock_timeout(struct socket *sock, int level, | |||
| 354 | static int compat_sock_setsockopt(struct socket *sock, int level, int optname, | 367 | static int compat_sock_setsockopt(struct socket *sock, int level, int optname, |
| 355 | char __user *optval, unsigned int optlen) | 368 | char __user *optval, unsigned int optlen) |
| 356 | { | 369 | { |
| 357 | if (optname == SO_ATTACH_FILTER) | 370 | if (optname == SO_ATTACH_FILTER || |
| 371 | optname == SO_ATTACH_REUSEPORT_CBPF) | ||
| 358 | return do_set_attach_filter(sock, level, optname, | 372 | return do_set_attach_filter(sock, level, optname, |
| 359 | optval, optlen); | 373 | optval, optlen); |
| 360 | if (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO) | 374 | if (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO) |
diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c index f96ee8b9478d..be873e4e3125 100644 --- a/net/core/gen_stats.c +++ b/net/core/gen_stats.c | |||
| @@ -47,6 +47,7 @@ nla_put_failure: | |||
| 47 | * @xstats_type: TLV type for backward compatibility xstats TLV | 47 | * @xstats_type: TLV type for backward compatibility xstats TLV |
| 48 | * @lock: statistics lock | 48 | * @lock: statistics lock |
| 49 | * @d: dumping handle | 49 | * @d: dumping handle |
| 50 | * @padattr: padding attribute | ||
| 50 | * | 51 | * |
| 51 | * Initializes the dumping handle, grabs the statistic lock and appends | 52 | * Initializes the dumping handle, grabs the statistic lock and appends |
| 52 | * an empty TLV header to the socket buffer for use a container for all | 53 | * an empty TLV header to the socket buffer for use a container for all |
| @@ -87,6 +88,7 @@ EXPORT_SYMBOL(gnet_stats_start_copy_compat); | |||
| 87 | * @type: TLV type for top level statistic TLV | 88 | * @type: TLV type for top level statistic TLV |
| 88 | * @lock: statistics lock | 89 | * @lock: statistics lock |
| 89 | * @d: dumping handle | 90 | * @d: dumping handle |
| 91 | * @padattr: padding attribute | ||
| 90 | * | 92 | * |
| 91 | * Initializes the dumping handle, grabs the statistic lock and appends | 93 | * Initializes the dumping handle, grabs the statistic lock and appends |
| 92 | * an empty TLV header to the socket buffer for use a container for all | 94 | * an empty TLV header to the socket buffer for use a container for all |
diff --git a/net/core/hwbm.c b/net/core/hwbm.c index 941c28486896..2cab489ae62e 100644 --- a/net/core/hwbm.c +++ b/net/core/hwbm.c | |||
| @@ -55,18 +55,21 @@ int hwbm_pool_add(struct hwbm_pool *bm_pool, unsigned int buf_num, gfp_t gfp) | |||
| 55 | spin_lock_irqsave(&bm_pool->lock, flags); | 55 | spin_lock_irqsave(&bm_pool->lock, flags); |
| 56 | if (bm_pool->buf_num == bm_pool->size) { | 56 | if (bm_pool->buf_num == bm_pool->size) { |
| 57 | pr_warn("pool already filled\n"); | 57 | pr_warn("pool already filled\n"); |
| 58 | spin_unlock_irqrestore(&bm_pool->lock, flags); | ||
| 58 | return bm_pool->buf_num; | 59 | return bm_pool->buf_num; |
| 59 | } | 60 | } |
| 60 | 61 | ||
| 61 | if (buf_num + bm_pool->buf_num > bm_pool->size) { | 62 | if (buf_num + bm_pool->buf_num > bm_pool->size) { |
| 62 | pr_warn("cannot allocate %d buffers for pool\n", | 63 | pr_warn("cannot allocate %d buffers for pool\n", |
| 63 | buf_num); | 64 | buf_num); |
| 65 | spin_unlock_irqrestore(&bm_pool->lock, flags); | ||
| 64 | return 0; | 66 | return 0; |
| 65 | } | 67 | } |
| 66 | 68 | ||
| 67 | if ((buf_num + bm_pool->buf_num) < bm_pool->buf_num) { | 69 | if ((buf_num + bm_pool->buf_num) < bm_pool->buf_num) { |
| 68 | pr_warn("Adding %d buffers to the %d current buffers will overflow\n", | 70 | pr_warn("Adding %d buffers to the %d current buffers will overflow\n", |
| 69 | buf_num, bm_pool->buf_num); | 71 | buf_num, bm_pool->buf_num); |
| 72 | spin_unlock_irqrestore(&bm_pool->lock, flags); | ||
| 70 | return 0; | 73 | return 0; |
| 71 | } | 74 | } |
| 72 | 75 | ||
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 2b3f76fe65f4..7a0b616557ab 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c | |||
| @@ -24,6 +24,7 @@ | |||
| 24 | #include <linux/jiffies.h> | 24 | #include <linux/jiffies.h> |
| 25 | #include <linux/pm_runtime.h> | 25 | #include <linux/pm_runtime.h> |
| 26 | #include <linux/of.h> | 26 | #include <linux/of.h> |
| 27 | #include <linux/of_net.h> | ||
| 27 | 28 | ||
| 28 | #include "net-sysfs.h" | 29 | #include "net-sysfs.h" |
| 29 | 30 | ||
diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 8604ae245960..8b02df0d354d 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c | |||
| @@ -2245,10 +2245,8 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until) | |||
| 2245 | hrtimer_set_expires(&t.timer, spin_until); | 2245 | hrtimer_set_expires(&t.timer, spin_until); |
| 2246 | 2246 | ||
| 2247 | remaining = ktime_to_ns(hrtimer_expires_remaining(&t.timer)); | 2247 | remaining = ktime_to_ns(hrtimer_expires_remaining(&t.timer)); |
| 2248 | if (remaining <= 0) { | 2248 | if (remaining <= 0) |
| 2249 | pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay); | 2249 | goto out; |
| 2250 | return; | ||
| 2251 | } | ||
| 2252 | 2250 | ||
| 2253 | start_time = ktime_get(); | 2251 | start_time = ktime_get(); |
| 2254 | if (remaining < 100000) { | 2252 | if (remaining < 100000) { |
| @@ -2273,7 +2271,9 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until) | |||
| 2273 | } | 2271 | } |
| 2274 | 2272 | ||
| 2275 | pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time)); | 2273 | pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time)); |
| 2274 | out: | ||
| 2276 | pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay); | 2275 | pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay); |
| 2276 | destroy_hrtimer_on_stack(&t.timer); | ||
| 2277 | } | 2277 | } |
| 2278 | 2278 | ||
| 2279 | static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev) | 2279 | static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev) |
diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c index ca207dbf673b..116187b5c267 100644 --- a/net/ieee802154/nl802154.c +++ b/net/ieee802154/nl802154.c | |||
| @@ -1289,8 +1289,8 @@ ieee802154_llsec_parse_dev_addr(struct nlattr *nla, | |||
| 1289 | nl802154_dev_addr_policy)) | 1289 | nl802154_dev_addr_policy)) |
| 1290 | return -EINVAL; | 1290 | return -EINVAL; |
| 1291 | 1291 | ||
| 1292 | if (!attrs[NL802154_DEV_ADDR_ATTR_PAN_ID] && | 1292 | if (!attrs[NL802154_DEV_ADDR_ATTR_PAN_ID] || |
| 1293 | !attrs[NL802154_DEV_ADDR_ATTR_MODE] && | 1293 | !attrs[NL802154_DEV_ADDR_ATTR_MODE] || |
| 1294 | !(attrs[NL802154_DEV_ADDR_ATTR_SHORT] || | 1294 | !(attrs[NL802154_DEV_ADDR_ATTR_SHORT] || |
| 1295 | attrs[NL802154_DEV_ADDR_ATTR_EXTENDED])) | 1295 | attrs[NL802154_DEV_ADDR_ATTR_EXTENDED])) |
| 1296 | return -EINVAL; | 1296 | return -EINVAL; |
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 377424ea17a4..d39e9e47a26e 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c | |||
| @@ -1681,6 +1681,14 @@ static __net_init int inet_init_net(struct net *net) | |||
| 1681 | */ | 1681 | */ |
| 1682 | net->ipv4.ping_group_range.range[0] = make_kgid(&init_user_ns, 1); | 1682 | net->ipv4.ping_group_range.range[0] = make_kgid(&init_user_ns, 1); |
| 1683 | net->ipv4.ping_group_range.range[1] = make_kgid(&init_user_ns, 0); | 1683 | net->ipv4.ping_group_range.range[1] = make_kgid(&init_user_ns, 0); |
| 1684 | |||
| 1685 | /* Default values for sysctl-controlled parameters. | ||
| 1686 | * We set them here, in case sysctl is not compiled. | ||
| 1687 | */ | ||
| 1688 | net->ipv4.sysctl_ip_default_ttl = IPDEFTTL; | ||
| 1689 | net->ipv4.sysctl_ip_dynaddr = 0; | ||
| 1690 | net->ipv4.sysctl_ip_early_demux = 1; | ||
| 1691 | |||
| 1684 | return 0; | 1692 | return 0; |
| 1685 | } | 1693 | } |
| 1686 | 1694 | ||
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index bb0419582b8d..1cb67de106fe 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c | |||
| @@ -999,10 +999,6 @@ static __net_init int ipv4_sysctl_init_net(struct net *net) | |||
| 999 | if (!net->ipv4.sysctl_local_reserved_ports) | 999 | if (!net->ipv4.sysctl_local_reserved_ports) |
| 1000 | goto err_ports; | 1000 | goto err_ports; |
| 1001 | 1001 | ||
| 1002 | net->ipv4.sysctl_ip_default_ttl = IPDEFTTL; | ||
| 1003 | net->ipv4.sysctl_ip_dynaddr = 0; | ||
| 1004 | net->ipv4.sysctl_ip_early_demux = 1; | ||
| 1005 | |||
| 1006 | return 0; | 1002 | return 0; |
| 1007 | 1003 | ||
| 1008 | err_ports: | 1004 | err_ports: |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index d56c0559b477..0ff31d97d485 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
| @@ -1618,12 +1618,12 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | |||
| 1618 | } | 1618 | } |
| 1619 | } | 1619 | } |
| 1620 | 1620 | ||
| 1621 | if (rcu_access_pointer(sk->sk_filter)) { | 1621 | if (rcu_access_pointer(sk->sk_filter) && |
| 1622 | if (udp_lib_checksum_complete(skb)) | 1622 | udp_lib_checksum_complete(skb)) |
| 1623 | goto csum_error; | 1623 | goto csum_error; |
| 1624 | if (sk_filter(sk, skb)) | 1624 | |
| 1625 | goto drop; | 1625 | if (sk_filter(sk, skb)) |
| 1626 | } | 1626 | goto drop; |
| 1627 | 1627 | ||
| 1628 | udp_csum_pull_header(skb); | 1628 | udp_csum_pull_header(skb); |
| 1629 | if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) { | 1629 | if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) { |
diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig index 3f8411328de5..2343e4f2e0bf 100644 --- a/net/ipv6/Kconfig +++ b/net/ipv6/Kconfig | |||
| @@ -232,6 +232,15 @@ config IPV6_GRE | |||
| 232 | 232 | ||
| 233 | Saying M here will produce a module called ip6_gre. If unsure, say N. | 233 | Saying M here will produce a module called ip6_gre. If unsure, say N. |
| 234 | 234 | ||
| 235 | config IPV6_FOU | ||
| 236 | tristate | ||
| 237 | default NET_FOU && IPV6 | ||
| 238 | |||
| 239 | config IPV6_FOU_TUNNEL | ||
| 240 | tristate | ||
| 241 | default NET_FOU_IP_TUNNELS && IPV6_FOU | ||
| 242 | select IPV6_TUNNEL | ||
| 243 | |||
| 235 | config IPV6_MULTIPLE_TABLES | 244 | config IPV6_MULTIPLE_TABLES |
| 236 | bool "IPv6: Multiple Routing Tables" | 245 | bool "IPv6: Multiple Routing Tables" |
| 237 | select FIB_RULES | 246 | select FIB_RULES |
diff --git a/net/ipv6/Makefile b/net/ipv6/Makefile index 7ec3129c9ace..6d8ea099213e 100644 --- a/net/ipv6/Makefile +++ b/net/ipv6/Makefile | |||
| @@ -42,7 +42,7 @@ obj-$(CONFIG_IPV6_VTI) += ip6_vti.o | |||
| 42 | obj-$(CONFIG_IPV6_SIT) += sit.o | 42 | obj-$(CONFIG_IPV6_SIT) += sit.o |
| 43 | obj-$(CONFIG_IPV6_TUNNEL) += ip6_tunnel.o | 43 | obj-$(CONFIG_IPV6_TUNNEL) += ip6_tunnel.o |
| 44 | obj-$(CONFIG_IPV6_GRE) += ip6_gre.o | 44 | obj-$(CONFIG_IPV6_GRE) += ip6_gre.o |
| 45 | obj-$(CONFIG_NET_FOU) += fou6.o | 45 | obj-$(CONFIG_IPV6_FOU) += fou6.o |
| 46 | 46 | ||
| 47 | obj-y += addrconf_core.o exthdrs_core.o ip6_checksum.o ip6_icmp.o | 47 | obj-y += addrconf_core.o exthdrs_core.o ip6_checksum.o ip6_icmp.o |
| 48 | obj-$(CONFIG_INET) += output_core.o protocol.o $(ipv6-offload) | 48 | obj-$(CONFIG_INET) += output_core.o protocol.o $(ipv6-offload) |
diff --git a/net/ipv6/fou6.c b/net/ipv6/fou6.c index c972d0b52579..9ea249b9451e 100644 --- a/net/ipv6/fou6.c +++ b/net/ipv6/fou6.c | |||
| @@ -69,7 +69,7 @@ int gue6_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e, | |||
| 69 | } | 69 | } |
| 70 | EXPORT_SYMBOL(gue6_build_header); | 70 | EXPORT_SYMBOL(gue6_build_header); |
| 71 | 71 | ||
| 72 | #ifdef CONFIG_NET_FOU_IP_TUNNELS | 72 | #if IS_ENABLED(CONFIG_IPV6_FOU_TUNNEL) |
| 73 | 73 | ||
| 74 | static const struct ip6_tnl_encap_ops fou_ip6tun_ops = { | 74 | static const struct ip6_tnl_encap_ops fou_ip6tun_ops = { |
| 75 | .encap_hlen = fou_encap_hlen, | 75 | .encap_hlen = fou_encap_hlen, |
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index af503f518278..fdc9de276ab1 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c | |||
| @@ -712,6 +712,7 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu) | |||
| 712 | fl6->daddr = p->raddr; | 712 | fl6->daddr = p->raddr; |
| 713 | fl6->flowi6_oif = p->link; | 713 | fl6->flowi6_oif = p->link; |
| 714 | fl6->flowlabel = 0; | 714 | fl6->flowlabel = 0; |
| 715 | fl6->flowi6_proto = IPPROTO_GRE; | ||
| 715 | 716 | ||
| 716 | if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS)) | 717 | if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS)) |
| 717 | fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo; | 718 | fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo; |
| @@ -1027,6 +1028,8 @@ static int ip6gre_tunnel_init_common(struct net_device *dev) | |||
| 1027 | 1028 | ||
| 1028 | dev->hard_header_len = LL_MAX_HEADER + t_hlen; | 1029 | dev->hard_header_len = LL_MAX_HEADER + t_hlen; |
| 1029 | dev->mtu = ETH_DATA_LEN - t_hlen; | 1030 | dev->mtu = ETH_DATA_LEN - t_hlen; |
| 1031 | if (dev->type == ARPHRD_ETHER) | ||
| 1032 | dev->mtu -= ETH_HLEN; | ||
| 1030 | if (!(tunnel->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) | 1033 | if (!(tunnel->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) |
| 1031 | dev->mtu -= 8; | 1034 | dev->mtu -= 8; |
| 1032 | 1035 | ||
| @@ -1253,6 +1256,8 @@ static int ip6gre_tap_init(struct net_device *dev) | |||
| 1253 | if (ret) | 1256 | if (ret) |
| 1254 | return ret; | 1257 | return ret; |
| 1255 | 1258 | ||
| 1259 | dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; | ||
| 1260 | |||
| 1256 | tunnel = netdev_priv(dev); | 1261 | tunnel = netdev_priv(dev); |
| 1257 | 1262 | ||
| 1258 | ip6gre_tnl_link_config(tunnel, 1); | 1263 | ip6gre_tnl_link_config(tunnel, 1); |
| @@ -1286,6 +1291,7 @@ static void ip6gre_tap_setup(struct net_device *dev) | |||
| 1286 | 1291 | ||
| 1287 | dev->features |= NETIF_F_NETNS_LOCAL; | 1292 | dev->features |= NETIF_F_NETNS_LOCAL; |
| 1288 | dev->priv_flags &= ~IFF_TX_SKB_SHARING; | 1293 | dev->priv_flags &= ~IFF_TX_SKB_SHARING; |
| 1294 | dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; | ||
| 1289 | } | 1295 | } |
| 1290 | 1296 | ||
| 1291 | static bool ip6gre_netlink_encap_parms(struct nlattr *data[], | 1297 | static bool ip6gre_netlink_encap_parms(struct nlattr *data[], |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index cbf127ae7c67..635b8d340cdb 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
| @@ -1071,17 +1071,12 @@ struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6, | |||
| 1071 | const struct in6_addr *final_dst) | 1071 | const struct in6_addr *final_dst) |
| 1072 | { | 1072 | { |
| 1073 | struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie); | 1073 | struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie); |
| 1074 | int err; | ||
| 1075 | 1074 | ||
| 1076 | dst = ip6_sk_dst_check(sk, dst, fl6); | 1075 | dst = ip6_sk_dst_check(sk, dst, fl6); |
| 1076 | if (!dst) | ||
| 1077 | dst = ip6_dst_lookup_flow(sk, fl6, final_dst); | ||
| 1077 | 1078 | ||
| 1078 | err = ip6_dst_lookup_tail(sock_net(sk), sk, &dst, fl6); | 1079 | return dst; |
| 1079 | if (err) | ||
| 1080 | return ERR_PTR(err); | ||
| 1081 | if (final_dst) | ||
| 1082 | fl6->daddr = *final_dst; | ||
| 1083 | |||
| 1084 | return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0); | ||
| 1085 | } | 1080 | } |
| 1086 | EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow); | 1081 | EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow); |
| 1087 | 1082 | ||
diff --git a/net/ipv6/netfilter/nf_dup_ipv6.c b/net/ipv6/netfilter/nf_dup_ipv6.c index 6989c70ae29f..4a84b5ad9ecb 100644 --- a/net/ipv6/netfilter/nf_dup_ipv6.c +++ b/net/ipv6/netfilter/nf_dup_ipv6.c | |||
| @@ -33,6 +33,7 @@ static bool nf_dup_ipv6_route(struct net *net, struct sk_buff *skb, | |||
| 33 | fl6.daddr = *gw; | 33 | fl6.daddr = *gw; |
| 34 | fl6.flowlabel = (__force __be32)(((iph->flow_lbl[0] & 0xF) << 16) | | 34 | fl6.flowlabel = (__force __be32)(((iph->flow_lbl[0] & 0xF) << 16) | |
| 35 | (iph->flow_lbl[1] << 8) | iph->flow_lbl[2]); | 35 | (iph->flow_lbl[1] << 8) | iph->flow_lbl[2]); |
| 36 | fl6.flowi6_flags = FLOWI_FLAG_KNOWN_NH; | ||
| 36 | dst = ip6_route_output(net, NULL, &fl6); | 37 | dst = ip6_route_output(net, NULL, &fl6); |
| 37 | if (dst->error) { | 38 | if (dst->error) { |
| 38 | dst_release(dst); | 39 | dst_release(dst); |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 79e33e02f11a..f36c2d076fce 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
| @@ -1721,7 +1721,9 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i) | |||
| 1721 | destp = ntohs(inet->inet_dport); | 1721 | destp = ntohs(inet->inet_dport); |
| 1722 | srcp = ntohs(inet->inet_sport); | 1722 | srcp = ntohs(inet->inet_sport); |
| 1723 | 1723 | ||
| 1724 | if (icsk->icsk_pending == ICSK_TIME_RETRANS) { | 1724 | if (icsk->icsk_pending == ICSK_TIME_RETRANS || |
| 1725 | icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || | ||
| 1726 | icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { | ||
| 1725 | timer_active = 1; | 1727 | timer_active = 1; |
| 1726 | timer_expires = icsk->icsk_timeout; | 1728 | timer_expires = icsk->icsk_timeout; |
| 1727 | } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) { | 1729 | } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) { |
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 2da1896af934..f421c9f23c5b 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
| @@ -653,12 +653,12 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | |||
| 653 | } | 653 | } |
| 654 | } | 654 | } |
| 655 | 655 | ||
| 656 | if (rcu_access_pointer(sk->sk_filter)) { | 656 | if (rcu_access_pointer(sk->sk_filter) && |
| 657 | if (udp_lib_checksum_complete(skb)) | 657 | udp_lib_checksum_complete(skb)) |
| 658 | goto csum_error; | 658 | goto csum_error; |
| 659 | if (sk_filter(sk, skb)) | 659 | |
| 660 | goto drop; | 660 | if (sk_filter(sk, skb)) |
| 661 | } | 661 | goto drop; |
| 662 | 662 | ||
| 663 | udp_csum_pull_header(skb); | 663 | udp_csum_pull_header(skb); |
| 664 | if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) { | 664 | if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) { |
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 6edfa9980314..1e40dacaa137 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c | |||
| @@ -1581,7 +1581,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 | |||
| 1581 | /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */ | 1581 | /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */ |
| 1582 | tunnel->encap = encap; | 1582 | tunnel->encap = encap; |
| 1583 | if (encap == L2TP_ENCAPTYPE_UDP) { | 1583 | if (encap == L2TP_ENCAPTYPE_UDP) { |
| 1584 | struct udp_tunnel_sock_cfg udp_cfg; | 1584 | struct udp_tunnel_sock_cfg udp_cfg = { }; |
| 1585 | 1585 | ||
| 1586 | udp_cfg.sk_user_data = tunnel; | 1586 | udp_cfg.sk_user_data = tunnel; |
| 1587 | udp_cfg.encap_type = UDP_ENCAP_L2TPINUDP; | 1587 | udp_cfg.encap_type = UDP_ENCAP_L2TPINUDP; |
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index c6f5df1bed12..6c54e03fe9c1 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c | |||
| @@ -128,6 +128,7 @@ static inline struct sock *l2tp_ip6_bind_lookup(struct net *net, | |||
| 128 | */ | 128 | */ |
| 129 | static int l2tp_ip6_recv(struct sk_buff *skb) | 129 | static int l2tp_ip6_recv(struct sk_buff *skb) |
| 130 | { | 130 | { |
| 131 | struct net *net = dev_net(skb->dev); | ||
| 131 | struct sock *sk; | 132 | struct sock *sk; |
| 132 | u32 session_id; | 133 | u32 session_id; |
| 133 | u32 tunnel_id; | 134 | u32 tunnel_id; |
| @@ -154,7 +155,7 @@ static int l2tp_ip6_recv(struct sk_buff *skb) | |||
| 154 | } | 155 | } |
| 155 | 156 | ||
| 156 | /* Ok, this is a data packet. Lookup the session. */ | 157 | /* Ok, this is a data packet. Lookup the session. */ |
| 157 | session = l2tp_session_find(&init_net, NULL, session_id); | 158 | session = l2tp_session_find(net, NULL, session_id); |
| 158 | if (session == NULL) | 159 | if (session == NULL) |
| 159 | goto discard; | 160 | goto discard; |
| 160 | 161 | ||
| @@ -188,14 +189,14 @@ pass_up: | |||
| 188 | goto discard; | 189 | goto discard; |
| 189 | 190 | ||
| 190 | tunnel_id = ntohl(*(__be32 *) &skb->data[4]); | 191 | tunnel_id = ntohl(*(__be32 *) &skb->data[4]); |
| 191 | tunnel = l2tp_tunnel_find(&init_net, tunnel_id); | 192 | tunnel = l2tp_tunnel_find(net, tunnel_id); |
| 192 | if (tunnel != NULL) | 193 | if (tunnel != NULL) |
| 193 | sk = tunnel->sock; | 194 | sk = tunnel->sock; |
| 194 | else { | 195 | else { |
| 195 | struct ipv6hdr *iph = ipv6_hdr(skb); | 196 | struct ipv6hdr *iph = ipv6_hdr(skb); |
| 196 | 197 | ||
| 197 | read_lock_bh(&l2tp_ip6_lock); | 198 | read_lock_bh(&l2tp_ip6_lock); |
| 198 | sk = __l2tp_ip6_bind_lookup(&init_net, &iph->daddr, | 199 | sk = __l2tp_ip6_bind_lookup(net, &iph->daddr, |
| 199 | 0, tunnel_id); | 200 | 0, tunnel_id); |
| 200 | read_unlock_bh(&l2tp_ip6_lock); | 201 | read_unlock_bh(&l2tp_ip6_lock); |
| 201 | } | 202 | } |
| @@ -263,6 +264,7 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
| 263 | struct inet_sock *inet = inet_sk(sk); | 264 | struct inet_sock *inet = inet_sk(sk); |
| 264 | struct ipv6_pinfo *np = inet6_sk(sk); | 265 | struct ipv6_pinfo *np = inet6_sk(sk); |
| 265 | struct sockaddr_l2tpip6 *addr = (struct sockaddr_l2tpip6 *) uaddr; | 266 | struct sockaddr_l2tpip6 *addr = (struct sockaddr_l2tpip6 *) uaddr; |
| 267 | struct net *net = sock_net(sk); | ||
| 266 | __be32 v4addr = 0; | 268 | __be32 v4addr = 0; |
| 267 | int addr_type; | 269 | int addr_type; |
| 268 | int err; | 270 | int err; |
| @@ -286,7 +288,7 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
| 286 | 288 | ||
| 287 | err = -EADDRINUSE; | 289 | err = -EADDRINUSE; |
| 288 | read_lock_bh(&l2tp_ip6_lock); | 290 | read_lock_bh(&l2tp_ip6_lock); |
| 289 | if (__l2tp_ip6_bind_lookup(&init_net, &addr->l2tp_addr, | 291 | if (__l2tp_ip6_bind_lookup(net, &addr->l2tp_addr, |
| 290 | sk->sk_bound_dev_if, addr->l2tp_conn_id)) | 292 | sk->sk_bound_dev_if, addr->l2tp_conn_id)) |
| 291 | goto out_in_use; | 293 | goto out_in_use; |
| 292 | read_unlock_bh(&l2tp_ip6_lock); | 294 | read_unlock_bh(&l2tp_ip6_lock); |
| @@ -456,7 +458,7 @@ static int l2tp_ip6_backlog_recv(struct sock *sk, struct sk_buff *skb) | |||
| 456 | return 0; | 458 | return 0; |
| 457 | 459 | ||
| 458 | drop: | 460 | drop: |
| 459 | IP_INC_STATS(&init_net, IPSTATS_MIB_INDISCARDS); | 461 | IP_INC_STATS(sock_net(sk), IPSTATS_MIB_INDISCARDS); |
| 460 | kfree_skb(skb); | 462 | kfree_skb(skb); |
| 461 | return -1; | 463 | return -1; |
| 462 | } | 464 | } |
diff --git a/net/lapb/lapb_in.c b/net/lapb/lapb_in.c index 5dba899131b3..182470847fcf 100644 --- a/net/lapb/lapb_in.c +++ b/net/lapb/lapb_in.c | |||
| @@ -444,10 +444,9 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb, | |||
| 444 | break; | 444 | break; |
| 445 | 445 | ||
| 446 | case LAPB_FRMR: | 446 | case LAPB_FRMR: |
| 447 | lapb_dbg(1, "(%p) S3 RX FRMR(%d) %02X %02X %02X %02X %02X\n", | 447 | lapb_dbg(1, "(%p) S3 RX FRMR(%d) %5ph\n", |
| 448 | lapb->dev, frame->pf, | 448 | lapb->dev, frame->pf, |
| 449 | skb->data[0], skb->data[1], skb->data[2], | 449 | skb->data); |
| 450 | skb->data[3], skb->data[4]); | ||
| 451 | lapb_establish_data_link(lapb); | 450 | lapb_establish_data_link(lapb); |
| 452 | lapb_dbg(0, "(%p) S3 -> S1\n", lapb->dev); | 451 | lapb_dbg(0, "(%p) S3 -> S1\n", lapb->dev); |
| 453 | lapb_requeue_frames(lapb); | 452 | lapb_requeue_frames(lapb); |
diff --git a/net/lapb/lapb_out.c b/net/lapb/lapb_out.c index ba4d015bd1a6..482c94d9d958 100644 --- a/net/lapb/lapb_out.c +++ b/net/lapb/lapb_out.c | |||
| @@ -148,9 +148,7 @@ void lapb_transmit_buffer(struct lapb_cb *lapb, struct sk_buff *skb, int type) | |||
| 148 | } | 148 | } |
| 149 | } | 149 | } |
| 150 | 150 | ||
| 151 | lapb_dbg(2, "(%p) S%d TX %02X %02X %02X\n", | 151 | lapb_dbg(2, "(%p) S%d TX %3ph\n", lapb->dev, lapb->state, skb->data); |
| 152 | lapb->dev, lapb->state, | ||
| 153 | skb->data[0], skb->data[1], skb->data[2]); | ||
| 154 | 152 | ||
| 155 | if (!lapb_data_transmit(lapb, skb)) | 153 | if (!lapb_data_transmit(lapb, skb)) |
| 156 | kfree_skb(skb); | 154 | kfree_skb(skb); |
diff --git a/net/lapb/lapb_subr.c b/net/lapb/lapb_subr.c index 9d0a426eccbb..3c1914df641f 100644 --- a/net/lapb/lapb_subr.c +++ b/net/lapb/lapb_subr.c | |||
| @@ -113,9 +113,7 @@ int lapb_decode(struct lapb_cb *lapb, struct sk_buff *skb, | |||
| 113 | { | 113 | { |
| 114 | frame->type = LAPB_ILLEGAL; | 114 | frame->type = LAPB_ILLEGAL; |
| 115 | 115 | ||
| 116 | lapb_dbg(2, "(%p) S%d RX %02X %02X %02X\n", | 116 | lapb_dbg(2, "(%p) S%d RX %3ph\n", lapb->dev, lapb->state, skb->data); |
| 117 | lapb->dev, lapb->state, | ||
| 118 | skb->data[0], skb->data[1], skb->data[2]); | ||
| 119 | 117 | ||
| 120 | /* We always need to look at 2 bytes, sometimes we need | 118 | /* We always need to look at 2 bytes, sometimes we need |
| 121 | * to look at 3 and those cases are handled below. | 119 | * to look at 3 and those cases are handled below. |
| @@ -284,10 +282,9 @@ void lapb_transmit_frmr(struct lapb_cb *lapb) | |||
| 284 | dptr++; | 282 | dptr++; |
| 285 | *dptr++ = lapb->frmr_type; | 283 | *dptr++ = lapb->frmr_type; |
| 286 | 284 | ||
| 287 | lapb_dbg(1, "(%p) S%d TX FRMR %02X %02X %02X %02X %02X\n", | 285 | lapb_dbg(1, "(%p) S%d TX FRMR %5ph\n", |
| 288 | lapb->dev, lapb->state, | 286 | lapb->dev, lapb->state, |
| 289 | skb->data[1], skb->data[2], skb->data[3], | 287 | &skb->data[1]); |
| 290 | skb->data[4], skb->data[5]); | ||
| 291 | } else { | 288 | } else { |
| 292 | dptr = skb_put(skb, 4); | 289 | dptr = skb_put(skb, 4); |
| 293 | *dptr++ = LAPB_FRMR; | 290 | *dptr++ = LAPB_FRMR; |
| @@ -299,9 +296,8 @@ void lapb_transmit_frmr(struct lapb_cb *lapb) | |||
| 299 | dptr++; | 296 | dptr++; |
| 300 | *dptr++ = lapb->frmr_type; | 297 | *dptr++ = lapb->frmr_type; |
| 301 | 298 | ||
| 302 | lapb_dbg(1, "(%p) S%d TX FRMR %02X %02X %02X\n", | 299 | lapb_dbg(1, "(%p) S%d TX FRMR %3ph\n", |
| 303 | lapb->dev, lapb->state, skb->data[1], | 300 | lapb->dev, lapb->state, &skb->data[1]); |
| 304 | skb->data[2], skb->data[3]); | ||
| 305 | } | 301 | } |
| 306 | 302 | ||
| 307 | lapb_transmit_buffer(lapb, skb, LAPB_RESPONSE); | 303 | lapb_transmit_buffer(lapb, skb, LAPB_RESPONSE); |
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 4c6404e1ad6e..21b1fdf5d01d 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c | |||
| @@ -161,6 +161,10 @@ void mesh_sta_cleanup(struct sta_info *sta) | |||
| 161 | del_timer_sync(&sta->mesh->plink_timer); | 161 | del_timer_sync(&sta->mesh->plink_timer); |
| 162 | } | 162 | } |
| 163 | 163 | ||
| 164 | /* make sure no readers can access nexthop sta from here on */ | ||
| 165 | mesh_path_flush_by_nexthop(sta); | ||
| 166 | synchronize_net(); | ||
| 167 | |||
| 164 | if (changed) | 168 | if (changed) |
| 165 | ieee80211_mbss_info_change_notify(sdata, changed); | 169 | ieee80211_mbss_info_change_notify(sdata, changed); |
| 166 | } | 170 | } |
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index c8b8ccc370eb..78b0ef32dddd 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h | |||
| @@ -280,7 +280,7 @@ struct ieee80211_fast_tx { | |||
| 280 | u8 sa_offs, da_offs, pn_offs; | 280 | u8 sa_offs, da_offs, pn_offs; |
| 281 | u8 band; | 281 | u8 band; |
| 282 | u8 hdr[30 + 2 + IEEE80211_FAST_XMIT_MAX_IV + | 282 | u8 hdr[30 + 2 + IEEE80211_FAST_XMIT_MAX_IV + |
| 283 | sizeof(rfc1042_header)]; | 283 | sizeof(rfc1042_header)] __aligned(2); |
| 284 | 284 | ||
| 285 | struct rcu_head rcu_head; | 285 | struct rcu_head rcu_head; |
| 286 | }; | 286 | }; |
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c index 2cb3c626cd43..096a45103f14 100644 --- a/net/netfilter/ipvs/ip_vs_conn.c +++ b/net/netfilter/ipvs/ip_vs_conn.c | |||
| @@ -762,7 +762,7 @@ static int expire_quiescent_template(struct netns_ipvs *ipvs, | |||
| 762 | * If available, return 1, otherwise invalidate this connection | 762 | * If available, return 1, otherwise invalidate this connection |
| 763 | * template and return 0. | 763 | * template and return 0. |
| 764 | */ | 764 | */ |
| 765 | int ip_vs_check_template(struct ip_vs_conn *ct) | 765 | int ip_vs_check_template(struct ip_vs_conn *ct, struct ip_vs_dest *cdest) |
| 766 | { | 766 | { |
| 767 | struct ip_vs_dest *dest = ct->dest; | 767 | struct ip_vs_dest *dest = ct->dest; |
| 768 | struct netns_ipvs *ipvs = ct->ipvs; | 768 | struct netns_ipvs *ipvs = ct->ipvs; |
| @@ -772,7 +772,8 @@ int ip_vs_check_template(struct ip_vs_conn *ct) | |||
| 772 | */ | 772 | */ |
| 773 | if ((dest == NULL) || | 773 | if ((dest == NULL) || |
| 774 | !(dest->flags & IP_VS_DEST_F_AVAILABLE) || | 774 | !(dest->flags & IP_VS_DEST_F_AVAILABLE) || |
| 775 | expire_quiescent_template(ipvs, dest)) { | 775 | expire_quiescent_template(ipvs, dest) || |
| 776 | (cdest && (dest != cdest))) { | ||
| 776 | IP_VS_DBG_BUF(9, "check_template: dest not available for " | 777 | IP_VS_DBG_BUF(9, "check_template: dest not available for " |
| 777 | "protocol %s s:%s:%d v:%s:%d " | 778 | "protocol %s s:%s:%d v:%s:%d " |
| 778 | "-> d:%s:%d\n", | 779 | "-> d:%s:%d\n", |
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index 1207f20d24e4..2c1b498a7a27 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c | |||
| @@ -321,7 +321,7 @@ ip_vs_sched_persist(struct ip_vs_service *svc, | |||
| 321 | 321 | ||
| 322 | /* Check if a template already exists */ | 322 | /* Check if a template already exists */ |
| 323 | ct = ip_vs_ct_in_get(¶m); | 323 | ct = ip_vs_ct_in_get(¶m); |
| 324 | if (!ct || !ip_vs_check_template(ct)) { | 324 | if (!ct || !ip_vs_check_template(ct, NULL)) { |
| 325 | struct ip_vs_scheduler *sched; | 325 | struct ip_vs_scheduler *sched; |
| 326 | 326 | ||
| 327 | /* | 327 | /* |
| @@ -1154,7 +1154,8 @@ struct ip_vs_conn *ip_vs_new_conn_out(struct ip_vs_service *svc, | |||
| 1154 | vport, ¶m) < 0) | 1154 | vport, ¶m) < 0) |
| 1155 | return NULL; | 1155 | return NULL; |
| 1156 | ct = ip_vs_ct_in_get(¶m); | 1156 | ct = ip_vs_ct_in_get(¶m); |
| 1157 | if (!ct) { | 1157 | /* check if template exists and points to the same dest */ |
| 1158 | if (!ct || !ip_vs_check_template(ct, dest)) { | ||
| 1158 | ct = ip_vs_conn_new(¶m, dest->af, daddr, dport, | 1159 | ct = ip_vs_conn_new(¶m, dest->af, daddr, dport, |
| 1159 | IP_VS_CONN_F_TEMPLATE, dest, 0); | 1160 | IP_VS_CONN_F_TEMPLATE, dest, 0); |
| 1160 | if (!ct) { | 1161 | if (!ct) { |
diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c index 883c691ec8d0..19efeba02abb 100644 --- a/net/netfilter/nf_conntrack_ftp.c +++ b/net/netfilter/nf_conntrack_ftp.c | |||
| @@ -632,6 +632,7 @@ static int __init nf_conntrack_ftp_init(void) | |||
| 632 | if (ret) { | 632 | if (ret) { |
| 633 | pr_err("failed to register helper for pf: %d port: %d\n", | 633 | pr_err("failed to register helper for pf: %d port: %d\n", |
| 634 | ftp[i][j].tuple.src.l3num, ports[i]); | 634 | ftp[i][j].tuple.src.l3num, ports[i]); |
| 635 | ports_c = i; | ||
| 635 | nf_conntrack_ftp_fini(); | 636 | nf_conntrack_ftp_fini(); |
| 636 | return ret; | 637 | return ret; |
| 637 | } | 638 | } |
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c index f703adb7e5f7..196cb39649e1 100644 --- a/net/netfilter/nf_conntrack_helper.c +++ b/net/netfilter/nf_conntrack_helper.c | |||
| @@ -361,9 +361,10 @@ EXPORT_SYMBOL_GPL(nf_ct_helper_log); | |||
| 361 | 361 | ||
| 362 | int nf_conntrack_helper_register(struct nf_conntrack_helper *me) | 362 | int nf_conntrack_helper_register(struct nf_conntrack_helper *me) |
| 363 | { | 363 | { |
| 364 | int ret = 0; | 364 | struct nf_conntrack_tuple_mask mask = { .src.u.all = htons(0xFFFF) }; |
| 365 | struct nf_conntrack_helper *cur; | ||
| 366 | unsigned int h = helper_hash(&me->tuple); | 365 | unsigned int h = helper_hash(&me->tuple); |
| 366 | struct nf_conntrack_helper *cur; | ||
| 367 | int ret = 0; | ||
| 367 | 368 | ||
| 368 | BUG_ON(me->expect_policy == NULL); | 369 | BUG_ON(me->expect_policy == NULL); |
| 369 | BUG_ON(me->expect_class_max >= NF_CT_MAX_EXPECT_CLASSES); | 370 | BUG_ON(me->expect_class_max >= NF_CT_MAX_EXPECT_CLASSES); |
| @@ -371,9 +372,7 @@ int nf_conntrack_helper_register(struct nf_conntrack_helper *me) | |||
| 371 | 372 | ||
| 372 | mutex_lock(&nf_ct_helper_mutex); | 373 | mutex_lock(&nf_ct_helper_mutex); |
| 373 | hlist_for_each_entry(cur, &nf_ct_helper_hash[h], hnode) { | 374 | hlist_for_each_entry(cur, &nf_ct_helper_hash[h], hnode) { |
| 374 | if (strncmp(cur->name, me->name, NF_CT_HELPER_NAME_LEN) == 0 && | 375 | if (nf_ct_tuple_src_mask_cmp(&cur->tuple, &me->tuple, &mask)) { |
| 375 | cur->tuple.src.l3num == me->tuple.src.l3num && | ||
| 376 | cur->tuple.dst.protonum == me->tuple.dst.protonum) { | ||
| 377 | ret = -EEXIST; | 376 | ret = -EEXIST; |
| 378 | goto out; | 377 | goto out; |
| 379 | } | 378 | } |
diff --git a/net/netfilter/nf_conntrack_irc.c b/net/netfilter/nf_conntrack_irc.c index 8b6da2719600..f97ac61d2536 100644 --- a/net/netfilter/nf_conntrack_irc.c +++ b/net/netfilter/nf_conntrack_irc.c | |||
| @@ -271,6 +271,7 @@ static int __init nf_conntrack_irc_init(void) | |||
| 271 | if (ret) { | 271 | if (ret) { |
| 272 | pr_err("failed to register helper for pf: %u port: %u\n", | 272 | pr_err("failed to register helper for pf: %u port: %u\n", |
| 273 | irc[i].tuple.src.l3num, ports[i]); | 273 | irc[i].tuple.src.l3num, ports[i]); |
| 274 | ports_c = i; | ||
| 274 | nf_conntrack_irc_fini(); | 275 | nf_conntrack_irc_fini(); |
| 275 | return ret; | 276 | return ret; |
| 276 | } | 277 | } |
diff --git a/net/netfilter/nf_conntrack_sane.c b/net/netfilter/nf_conntrack_sane.c index 7523a575f6d1..3fcbaab83b3d 100644 --- a/net/netfilter/nf_conntrack_sane.c +++ b/net/netfilter/nf_conntrack_sane.c | |||
| @@ -223,6 +223,7 @@ static int __init nf_conntrack_sane_init(void) | |||
| 223 | if (ret) { | 223 | if (ret) { |
| 224 | pr_err("failed to register helper for pf: %d port: %d\n", | 224 | pr_err("failed to register helper for pf: %d port: %d\n", |
| 225 | sane[i][j].tuple.src.l3num, ports[i]); | 225 | sane[i][j].tuple.src.l3num, ports[i]); |
| 226 | ports_c = i; | ||
| 226 | nf_conntrack_sane_fini(); | 227 | nf_conntrack_sane_fini(); |
| 227 | return ret; | 228 | return ret; |
| 228 | } | 229 | } |
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c index 3e06402739e0..f72ba5587588 100644 --- a/net/netfilter/nf_conntrack_sip.c +++ b/net/netfilter/nf_conntrack_sip.c | |||
| @@ -1669,6 +1669,7 @@ static int __init nf_conntrack_sip_init(void) | |||
| 1669 | if (ret) { | 1669 | if (ret) { |
| 1670 | pr_err("failed to register helper for pf: %u port: %u\n", | 1670 | pr_err("failed to register helper for pf: %u port: %u\n", |
| 1671 | sip[i][j].tuple.src.l3num, ports[i]); | 1671 | sip[i][j].tuple.src.l3num, ports[i]); |
| 1672 | ports_c = i; | ||
| 1672 | nf_conntrack_sip_fini(); | 1673 | nf_conntrack_sip_fini(); |
| 1673 | return ret; | 1674 | return ret; |
| 1674 | } | 1675 | } |
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c index f87e84ebcec3..c026c472ea80 100644 --- a/net/netfilter/nf_conntrack_standalone.c +++ b/net/netfilter/nf_conntrack_standalone.c | |||
| @@ -487,8 +487,6 @@ static struct ctl_table nf_ct_sysctl_table[] = { | |||
| 487 | { } | 487 | { } |
| 488 | }; | 488 | }; |
| 489 | 489 | ||
| 490 | #define NET_NF_CONNTRACK_MAX 2089 | ||
| 491 | |||
| 492 | static struct ctl_table nf_ct_netfilter_table[] = { | 490 | static struct ctl_table nf_ct_netfilter_table[] = { |
| 493 | { | 491 | { |
| 494 | .procname = "nf_conntrack_max", | 492 | .procname = "nf_conntrack_max", |
diff --git a/net/netfilter/nf_conntrack_tftp.c b/net/netfilter/nf_conntrack_tftp.c index 36f964066461..2e65b5430fba 100644 --- a/net/netfilter/nf_conntrack_tftp.c +++ b/net/netfilter/nf_conntrack_tftp.c | |||
| @@ -142,6 +142,7 @@ static int __init nf_conntrack_tftp_init(void) | |||
| 142 | if (ret) { | 142 | if (ret) { |
| 143 | pr_err("failed to register helper for pf: %u port: %u\n", | 143 | pr_err("failed to register helper for pf: %u port: %u\n", |
| 144 | tftp[i][j].tuple.src.l3num, ports[i]); | 144 | tftp[i][j].tuple.src.l3num, ports[i]); |
| 145 | ports_c = i; | ||
| 145 | nf_conntrack_tftp_fini(); | 146 | nf_conntrack_tftp_fini(); |
| 146 | return ret; | 147 | return ret; |
| 147 | } | 148 | } |
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c index 5baa8e24e6ac..b19ad20a705c 100644 --- a/net/netfilter/nf_queue.c +++ b/net/netfilter/nf_queue.c | |||
| @@ -26,23 +26,21 @@ | |||
| 26 | * Once the queue is registered it must reinject all packets it | 26 | * Once the queue is registered it must reinject all packets it |
| 27 | * receives, no matter what. | 27 | * receives, no matter what. |
| 28 | */ | 28 | */ |
| 29 | static const struct nf_queue_handler __rcu *queue_handler __read_mostly; | ||
| 30 | 29 | ||
| 31 | /* return EBUSY when somebody else is registered, return EEXIST if the | 30 | /* return EBUSY when somebody else is registered, return EEXIST if the |
| 32 | * same handler is registered, return 0 in case of success. */ | 31 | * same handler is registered, return 0 in case of success. */ |
| 33 | void nf_register_queue_handler(const struct nf_queue_handler *qh) | 32 | void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh) |
| 34 | { | 33 | { |
| 35 | /* should never happen, we only have one queueing backend in kernel */ | 34 | /* should never happen, we only have one queueing backend in kernel */ |
| 36 | WARN_ON(rcu_access_pointer(queue_handler)); | 35 | WARN_ON(rcu_access_pointer(net->nf.queue_handler)); |
| 37 | rcu_assign_pointer(queue_handler, qh); | 36 | rcu_assign_pointer(net->nf.queue_handler, qh); |
| 38 | } | 37 | } |
| 39 | EXPORT_SYMBOL(nf_register_queue_handler); | 38 | EXPORT_SYMBOL(nf_register_queue_handler); |
| 40 | 39 | ||
| 41 | /* The caller must flush their queue before this */ | 40 | /* The caller must flush their queue before this */ |
| 42 | void nf_unregister_queue_handler(void) | 41 | void nf_unregister_queue_handler(struct net *net) |
| 43 | { | 42 | { |
| 44 | RCU_INIT_POINTER(queue_handler, NULL); | 43 | RCU_INIT_POINTER(net->nf.queue_handler, NULL); |
| 45 | synchronize_rcu(); | ||
| 46 | } | 44 | } |
| 47 | EXPORT_SYMBOL(nf_unregister_queue_handler); | 45 | EXPORT_SYMBOL(nf_unregister_queue_handler); |
| 48 | 46 | ||
| @@ -103,7 +101,7 @@ void nf_queue_nf_hook_drop(struct net *net, struct nf_hook_ops *ops) | |||
| 103 | const struct nf_queue_handler *qh; | 101 | const struct nf_queue_handler *qh; |
| 104 | 102 | ||
| 105 | rcu_read_lock(); | 103 | rcu_read_lock(); |
| 106 | qh = rcu_dereference(queue_handler); | 104 | qh = rcu_dereference(net->nf.queue_handler); |
| 107 | if (qh) | 105 | if (qh) |
| 108 | qh->nf_hook_drop(net, ops); | 106 | qh->nf_hook_drop(net, ops); |
| 109 | rcu_read_unlock(); | 107 | rcu_read_unlock(); |
| @@ -122,9 +120,10 @@ int nf_queue(struct sk_buff *skb, | |||
| 122 | struct nf_queue_entry *entry = NULL; | 120 | struct nf_queue_entry *entry = NULL; |
| 123 | const struct nf_afinfo *afinfo; | 121 | const struct nf_afinfo *afinfo; |
| 124 | const struct nf_queue_handler *qh; | 122 | const struct nf_queue_handler *qh; |
| 123 | struct net *net = state->net; | ||
| 125 | 124 | ||
| 126 | /* QUEUE == DROP if no one is waiting, to be safe. */ | 125 | /* QUEUE == DROP if no one is waiting, to be safe. */ |
| 127 | qh = rcu_dereference(queue_handler); | 126 | qh = rcu_dereference(net->nf.queue_handler); |
| 128 | if (!qh) { | 127 | if (!qh) { |
| 129 | status = -ESRCH; | 128 | status = -ESRCH; |
| 130 | goto err; | 129 | goto err; |
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 4d292b933b5c..7b7aa871a174 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c | |||
| @@ -2647,6 +2647,8 @@ static int nf_tables_getset(struct net *net, struct sock *nlsk, | |||
| 2647 | /* Only accept unspec with dump */ | 2647 | /* Only accept unspec with dump */ |
| 2648 | if (nfmsg->nfgen_family == NFPROTO_UNSPEC) | 2648 | if (nfmsg->nfgen_family == NFPROTO_UNSPEC) |
| 2649 | return -EAFNOSUPPORT; | 2649 | return -EAFNOSUPPORT; |
| 2650 | if (!nla[NFTA_SET_TABLE]) | ||
| 2651 | return -EINVAL; | ||
| 2650 | 2652 | ||
| 2651 | set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME]); | 2653 | set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME]); |
| 2652 | if (IS_ERR(set)) | 2654 | if (IS_ERR(set)) |
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index aa93877ab6e2..5d36a0926b4a 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c | |||
| @@ -557,7 +557,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue, | |||
| 557 | 557 | ||
| 558 | if (entskb->tstamp.tv64) { | 558 | if (entskb->tstamp.tv64) { |
| 559 | struct nfqnl_msg_packet_timestamp ts; | 559 | struct nfqnl_msg_packet_timestamp ts; |
| 560 | struct timespec64 kts = ktime_to_timespec64(skb->tstamp); | 560 | struct timespec64 kts = ktime_to_timespec64(entskb->tstamp); |
| 561 | 561 | ||
| 562 | ts.sec = cpu_to_be64(kts.tv_sec); | 562 | ts.sec = cpu_to_be64(kts.tv_sec); |
| 563 | ts.usec = cpu_to_be64(kts.tv_nsec / NSEC_PER_USEC); | 563 | ts.usec = cpu_to_be64(kts.tv_nsec / NSEC_PER_USEC); |
| @@ -1482,21 +1482,29 @@ static int __net_init nfnl_queue_net_init(struct net *net) | |||
| 1482 | net->nf.proc_netfilter, &nfqnl_file_ops)) | 1482 | net->nf.proc_netfilter, &nfqnl_file_ops)) |
| 1483 | return -ENOMEM; | 1483 | return -ENOMEM; |
| 1484 | #endif | 1484 | #endif |
| 1485 | nf_register_queue_handler(net, &nfqh); | ||
| 1485 | return 0; | 1486 | return 0; |
| 1486 | } | 1487 | } |
| 1487 | 1488 | ||
| 1488 | static void __net_exit nfnl_queue_net_exit(struct net *net) | 1489 | static void __net_exit nfnl_queue_net_exit(struct net *net) |
| 1489 | { | 1490 | { |
| 1491 | nf_unregister_queue_handler(net); | ||
| 1490 | #ifdef CONFIG_PROC_FS | 1492 | #ifdef CONFIG_PROC_FS |
| 1491 | remove_proc_entry("nfnetlink_queue", net->nf.proc_netfilter); | 1493 | remove_proc_entry("nfnetlink_queue", net->nf.proc_netfilter); |
| 1492 | #endif | 1494 | #endif |
| 1493 | } | 1495 | } |
| 1494 | 1496 | ||
| 1497 | static void nfnl_queue_net_exit_batch(struct list_head *net_exit_list) | ||
| 1498 | { | ||
| 1499 | synchronize_rcu(); | ||
| 1500 | } | ||
| 1501 | |||
| 1495 | static struct pernet_operations nfnl_queue_net_ops = { | 1502 | static struct pernet_operations nfnl_queue_net_ops = { |
| 1496 | .init = nfnl_queue_net_init, | 1503 | .init = nfnl_queue_net_init, |
| 1497 | .exit = nfnl_queue_net_exit, | 1504 | .exit = nfnl_queue_net_exit, |
| 1498 | .id = &nfnl_queue_net_id, | 1505 | .exit_batch = nfnl_queue_net_exit_batch, |
| 1499 | .size = sizeof(struct nfnl_queue_net), | 1506 | .id = &nfnl_queue_net_id, |
| 1507 | .size = sizeof(struct nfnl_queue_net), | ||
| 1500 | }; | 1508 | }; |
| 1501 | 1509 | ||
| 1502 | static int __init nfnetlink_queue_init(void) | 1510 | static int __init nfnetlink_queue_init(void) |
| @@ -1517,7 +1525,6 @@ static int __init nfnetlink_queue_init(void) | |||
| 1517 | } | 1525 | } |
| 1518 | 1526 | ||
| 1519 | register_netdevice_notifier(&nfqnl_dev_notifier); | 1527 | register_netdevice_notifier(&nfqnl_dev_notifier); |
| 1520 | nf_register_queue_handler(&nfqh); | ||
| 1521 | return status; | 1528 | return status; |
| 1522 | 1529 | ||
| 1523 | cleanup_netlink_notifier: | 1530 | cleanup_netlink_notifier: |
| @@ -1529,7 +1536,6 @@ out: | |||
| 1529 | 1536 | ||
| 1530 | static void __exit nfnetlink_queue_fini(void) | 1537 | static void __exit nfnetlink_queue_fini(void) |
| 1531 | { | 1538 | { |
| 1532 | nf_unregister_queue_handler(); | ||
| 1533 | unregister_netdevice_notifier(&nfqnl_dev_notifier); | 1539 | unregister_netdevice_notifier(&nfqnl_dev_notifier); |
| 1534 | nfnetlink_subsys_unregister(&nfqnl_subsys); | 1540 | nfnetlink_subsys_unregister(&nfqnl_subsys); |
| 1535 | netlink_unregister_notifier(&nfqnl_rtnl_notifier); | 1541 | netlink_unregister_notifier(&nfqnl_rtnl_notifier); |
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index c69c892231d7..2675d580c490 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c | |||
| @@ -612,7 +612,7 @@ int xt_compat_check_entry_offsets(const void *base, const char *elems, | |||
| 612 | return -EINVAL; | 612 | return -EINVAL; |
| 613 | 613 | ||
| 614 | if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 && | 614 | if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 && |
| 615 | target_offset + sizeof(struct compat_xt_standard_target) != next_offset) | 615 | COMPAT_XT_ALIGN(target_offset + sizeof(struct compat_xt_standard_target)) != next_offset) |
| 616 | return -EINVAL; | 616 | return -EINVAL; |
| 617 | 617 | ||
| 618 | /* compat_xt_entry match has less strict aligment requirements, | 618 | /* compat_xt_entry match has less strict aligment requirements, |
| @@ -694,7 +694,7 @@ int xt_check_entry_offsets(const void *base, | |||
| 694 | return -EINVAL; | 694 | return -EINVAL; |
| 695 | 695 | ||
| 696 | if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 && | 696 | if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 && |
| 697 | target_offset + sizeof(struct xt_standard_target) != next_offset) | 697 | XT_ALIGN(target_offset + sizeof(struct xt_standard_target)) != next_offset) |
| 698 | return -EINVAL; | 698 | return -EINVAL; |
| 699 | 699 | ||
| 700 | return xt_check_entry_match(elems, base + target_offset, | 700 | return xt_check_entry_match(elems, base + target_offset, |
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index 879185fe183f..9a3eb7a0ebf4 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c | |||
| @@ -137,11 +137,23 @@ static bool is_flow_key_valid(const struct sw_flow_key *key) | |||
| 137 | return !!key->eth.type; | 137 | return !!key->eth.type; |
| 138 | } | 138 | } |
| 139 | 139 | ||
| 140 | static void update_ethertype(struct sk_buff *skb, struct ethhdr *hdr, | ||
| 141 | __be16 ethertype) | ||
| 142 | { | ||
| 143 | if (skb->ip_summed == CHECKSUM_COMPLETE) { | ||
| 144 | __be16 diff[] = { ~(hdr->h_proto), ethertype }; | ||
| 145 | |||
| 146 | skb->csum = ~csum_partial((char *)diff, sizeof(diff), | ||
| 147 | ~skb->csum); | ||
| 148 | } | ||
| 149 | |||
| 150 | hdr->h_proto = ethertype; | ||
| 151 | } | ||
| 152 | |||
| 140 | static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key, | 153 | static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key, |
| 141 | const struct ovs_action_push_mpls *mpls) | 154 | const struct ovs_action_push_mpls *mpls) |
| 142 | { | 155 | { |
| 143 | __be32 *new_mpls_lse; | 156 | __be32 *new_mpls_lse; |
| 144 | struct ethhdr *hdr; | ||
| 145 | 157 | ||
| 146 | /* Networking stack do not allow simultaneous Tunnel and MPLS GSO. */ | 158 | /* Networking stack do not allow simultaneous Tunnel and MPLS GSO. */ |
| 147 | if (skb->encapsulation) | 159 | if (skb->encapsulation) |
| @@ -160,9 +172,7 @@ static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key, | |||
| 160 | 172 | ||
| 161 | skb_postpush_rcsum(skb, new_mpls_lse, MPLS_HLEN); | 173 | skb_postpush_rcsum(skb, new_mpls_lse, MPLS_HLEN); |
| 162 | 174 | ||
| 163 | hdr = eth_hdr(skb); | 175 | update_ethertype(skb, eth_hdr(skb), mpls->mpls_ethertype); |
| 164 | hdr->h_proto = mpls->mpls_ethertype; | ||
| 165 | |||
| 166 | if (!skb->inner_protocol) | 176 | if (!skb->inner_protocol) |
| 167 | skb_set_inner_protocol(skb, skb->protocol); | 177 | skb_set_inner_protocol(skb, skb->protocol); |
| 168 | skb->protocol = mpls->mpls_ethertype; | 178 | skb->protocol = mpls->mpls_ethertype; |
| @@ -193,7 +203,7 @@ static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key, | |||
| 193 | * field correctly in the presence of VLAN tags. | 203 | * field correctly in the presence of VLAN tags. |
| 194 | */ | 204 | */ |
| 195 | hdr = (struct ethhdr *)(skb_mpls_header(skb) - ETH_HLEN); | 205 | hdr = (struct ethhdr *)(skb_mpls_header(skb) - ETH_HLEN); |
| 196 | hdr->h_proto = ethertype; | 206 | update_ethertype(skb, hdr, ethertype); |
| 197 | if (eth_p_mpls(skb->protocol)) | 207 | if (eth_p_mpls(skb->protocol)) |
| 198 | skb->protocol = ethertype; | 208 | skb->protocol = ethertype; |
| 199 | 209 | ||
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 4040eb92d9c9..9bff6ef16fa7 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
| @@ -93,6 +93,7 @@ | |||
| 93 | #include <net/inet_common.h> | 93 | #include <net/inet_common.h> |
| 94 | #endif | 94 | #endif |
| 95 | #include <linux/bpf.h> | 95 | #include <linux/bpf.h> |
| 96 | #include <net/compat.h> | ||
| 96 | 97 | ||
| 97 | #include "internal.h" | 98 | #include "internal.h" |
| 98 | 99 | ||
| @@ -3940,6 +3941,27 @@ static int packet_getsockopt(struct socket *sock, int level, int optname, | |||
| 3940 | } | 3941 | } |
| 3941 | 3942 | ||
| 3942 | 3943 | ||
| 3944 | #ifdef CONFIG_COMPAT | ||
| 3945 | static int compat_packet_setsockopt(struct socket *sock, int level, int optname, | ||
| 3946 | char __user *optval, unsigned int optlen) | ||
| 3947 | { | ||
| 3948 | struct packet_sock *po = pkt_sk(sock->sk); | ||
| 3949 | |||
| 3950 | if (level != SOL_PACKET) | ||
| 3951 | return -ENOPROTOOPT; | ||
| 3952 | |||
| 3953 | if (optname == PACKET_FANOUT_DATA && | ||
| 3954 | po->fanout && po->fanout->type == PACKET_FANOUT_CBPF) { | ||
| 3955 | optval = (char __user *)get_compat_bpf_fprog(optval); | ||
| 3956 | if (!optval) | ||
| 3957 | return -EFAULT; | ||
| 3958 | optlen = sizeof(struct sock_fprog); | ||
| 3959 | } | ||
| 3960 | |||
| 3961 | return packet_setsockopt(sock, level, optname, optval, optlen); | ||
| 3962 | } | ||
| 3963 | #endif | ||
| 3964 | |||
| 3943 | static int packet_notifier(struct notifier_block *this, | 3965 | static int packet_notifier(struct notifier_block *this, |
| 3944 | unsigned long msg, void *ptr) | 3966 | unsigned long msg, void *ptr) |
| 3945 | { | 3967 | { |
| @@ -4416,6 +4438,9 @@ static const struct proto_ops packet_ops = { | |||
| 4416 | .shutdown = sock_no_shutdown, | 4438 | .shutdown = sock_no_shutdown, |
| 4417 | .setsockopt = packet_setsockopt, | 4439 | .setsockopt = packet_setsockopt, |
| 4418 | .getsockopt = packet_getsockopt, | 4440 | .getsockopt = packet_getsockopt, |
| 4441 | #ifdef CONFIG_COMPAT | ||
| 4442 | .compat_setsockopt = compat_packet_setsockopt, | ||
| 4443 | #endif | ||
| 4419 | .sendmsg = packet_sendmsg, | 4444 | .sendmsg = packet_sendmsg, |
| 4420 | .recvmsg = packet_recvmsg, | 4445 | .recvmsg = packet_recvmsg, |
| 4421 | .mmap = packet_mmap, | 4446 | .mmap = packet_mmap, |
diff --git a/net/rds/rds.h b/net/rds/rds.h index 80256b08eac0..387df5f32e49 100644 --- a/net/rds/rds.h +++ b/net/rds/rds.h | |||
| @@ -74,6 +74,7 @@ enum { | |||
| 74 | RDS_CONN_CONNECTING, | 74 | RDS_CONN_CONNECTING, |
| 75 | RDS_CONN_DISCONNECTING, | 75 | RDS_CONN_DISCONNECTING, |
| 76 | RDS_CONN_UP, | 76 | RDS_CONN_UP, |
| 77 | RDS_CONN_RESETTING, | ||
| 77 | RDS_CONN_ERROR, | 78 | RDS_CONN_ERROR, |
| 78 | }; | 79 | }; |
| 79 | 80 | ||
| @@ -813,6 +814,7 @@ void rds_connect_worker(struct work_struct *); | |||
| 813 | void rds_shutdown_worker(struct work_struct *); | 814 | void rds_shutdown_worker(struct work_struct *); |
| 814 | void rds_send_worker(struct work_struct *); | 815 | void rds_send_worker(struct work_struct *); |
| 815 | void rds_recv_worker(struct work_struct *); | 816 | void rds_recv_worker(struct work_struct *); |
| 817 | void rds_connect_path_complete(struct rds_connection *conn, int curr); | ||
| 816 | void rds_connect_complete(struct rds_connection *conn); | 818 | void rds_connect_complete(struct rds_connection *conn); |
| 817 | 819 | ||
| 818 | /* transport.c */ | 820 | /* transport.c */ |
diff --git a/net/rds/recv.c b/net/rds/recv.c index c0be1ecd11c9..8413f6c99e13 100644 --- a/net/rds/recv.c +++ b/net/rds/recv.c | |||
| @@ -561,5 +561,7 @@ void rds_inc_info_copy(struct rds_incoming *inc, | |||
| 561 | minfo.fport = inc->i_hdr.h_dport; | 561 | minfo.fport = inc->i_hdr.h_dport; |
| 562 | } | 562 | } |
| 563 | 563 | ||
| 564 | minfo.flags = 0; | ||
| 565 | |||
| 564 | rds_info_copy(iter, &minfo, sizeof(minfo)); | 566 | rds_info_copy(iter, &minfo, sizeof(minfo)); |
| 565 | } | 567 | } |
diff --git a/net/rds/send.c b/net/rds/send.c index c9cdb358ea88..b1962f8e30f7 100644 --- a/net/rds/send.c +++ b/net/rds/send.c | |||
| @@ -99,6 +99,7 @@ void rds_send_reset(struct rds_connection *conn) | |||
| 99 | list_splice_init(&conn->c_retrans, &conn->c_send_queue); | 99 | list_splice_init(&conn->c_retrans, &conn->c_send_queue); |
| 100 | spin_unlock_irqrestore(&conn->c_lock, flags); | 100 | spin_unlock_irqrestore(&conn->c_lock, flags); |
| 101 | } | 101 | } |
| 102 | EXPORT_SYMBOL_GPL(rds_send_reset); | ||
| 102 | 103 | ||
| 103 | static int acquire_in_xmit(struct rds_connection *conn) | 104 | static int acquire_in_xmit(struct rds_connection *conn) |
| 104 | { | 105 | { |
diff --git a/net/rds/tcp.c b/net/rds/tcp.c index 86187dad1440..74ee126a6fe6 100644 --- a/net/rds/tcp.c +++ b/net/rds/tcp.c | |||
| @@ -126,9 +126,81 @@ void rds_tcp_restore_callbacks(struct socket *sock, | |||
| 126 | } | 126 | } |
| 127 | 127 | ||
| 128 | /* | 128 | /* |
| 129 | * This is the only path that sets tc->t_sock. Send and receive trust that | 129 | * rds_tcp_reset_callbacks() switches the to the new sock and |
| 130 | * it is set. The RDS_CONN_UP bit protects those paths from being | 130 | * returns the existing tc->t_sock. |
| 131 | * called while it isn't set. | 131 | * |
| 132 | * The only functions that set tc->t_sock are rds_tcp_set_callbacks | ||
| 133 | * and rds_tcp_reset_callbacks. Send and receive trust that | ||
| 134 | * it is set. The absence of RDS_CONN_UP bit protects those paths | ||
| 135 | * from being called while it isn't set. | ||
| 136 | */ | ||
| 137 | void rds_tcp_reset_callbacks(struct socket *sock, | ||
| 138 | struct rds_connection *conn) | ||
| 139 | { | ||
| 140 | struct rds_tcp_connection *tc = conn->c_transport_data; | ||
| 141 | struct socket *osock = tc->t_sock; | ||
| 142 | |||
| 143 | if (!osock) | ||
| 144 | goto newsock; | ||
| 145 | |||
| 146 | /* Need to resolve a duelling SYN between peers. | ||
| 147 | * We have an outstanding SYN to this peer, which may | ||
| 148 | * potentially have transitioned to the RDS_CONN_UP state, | ||
| 149 | * so we must quiesce any send threads before resetting | ||
| 150 | * c_transport_data. We quiesce these threads by setting | ||
| 151 | * c_state to something other than RDS_CONN_UP, and then | ||
| 152 | * waiting for any existing threads in rds_send_xmit to | ||
| 153 | * complete release_in_xmit(). (Subsequent threads entering | ||
| 154 | * rds_send_xmit() will bail on !rds_conn_up(). | ||
| 155 | * | ||
| 156 | * However an incoming syn-ack at this point would end up | ||
| 157 | * marking the conn as RDS_CONN_UP, and would again permit | ||
| 158 | * rds_send_xmi() threads through, so ideally we would | ||
| 159 | * synchronize on RDS_CONN_UP after lock_sock(), but cannot | ||
| 160 | * do that: waiting on !RDS_IN_XMIT after lock_sock() may | ||
| 161 | * end up deadlocking with tcp_sendmsg(), and the RDS_IN_XMIT | ||
| 162 | * would not get set. As a result, we set c_state to | ||
| 163 | * RDS_CONN_RESETTTING, to ensure that rds_tcp_state_change | ||
| 164 | * cannot mark rds_conn_path_up() in the window before lock_sock() | ||
| 165 | */ | ||
| 166 | atomic_set(&conn->c_state, RDS_CONN_RESETTING); | ||
| 167 | wait_event(conn->c_waitq, !test_bit(RDS_IN_XMIT, &conn->c_flags)); | ||
| 168 | lock_sock(osock->sk); | ||
| 169 | /* reset receive side state for rds_tcp_data_recv() for osock */ | ||
| 170 | if (tc->t_tinc) { | ||
| 171 | rds_inc_put(&tc->t_tinc->ti_inc); | ||
| 172 | tc->t_tinc = NULL; | ||
| 173 | } | ||
| 174 | tc->t_tinc_hdr_rem = sizeof(struct rds_header); | ||
| 175 | tc->t_tinc_data_rem = 0; | ||
| 176 | tc->t_sock = NULL; | ||
| 177 | |||
| 178 | write_lock_bh(&osock->sk->sk_callback_lock); | ||
| 179 | |||
| 180 | osock->sk->sk_user_data = NULL; | ||
| 181 | osock->sk->sk_data_ready = tc->t_orig_data_ready; | ||
| 182 | osock->sk->sk_write_space = tc->t_orig_write_space; | ||
| 183 | osock->sk->sk_state_change = tc->t_orig_state_change; | ||
| 184 | write_unlock_bh(&osock->sk->sk_callback_lock); | ||
| 185 | release_sock(osock->sk); | ||
| 186 | sock_release(osock); | ||
| 187 | newsock: | ||
| 188 | rds_send_reset(conn); | ||
| 189 | lock_sock(sock->sk); | ||
| 190 | write_lock_bh(&sock->sk->sk_callback_lock); | ||
| 191 | tc->t_sock = sock; | ||
| 192 | sock->sk->sk_user_data = conn; | ||
| 193 | sock->sk->sk_data_ready = rds_tcp_data_ready; | ||
| 194 | sock->sk->sk_write_space = rds_tcp_write_space; | ||
| 195 | sock->sk->sk_state_change = rds_tcp_state_change; | ||
| 196 | |||
| 197 | write_unlock_bh(&sock->sk->sk_callback_lock); | ||
| 198 | release_sock(sock->sk); | ||
| 199 | } | ||
| 200 | |||
| 201 | /* Add tc to rds_tcp_tc_list and set tc->t_sock. See comments | ||
| 202 | * above rds_tcp_reset_callbacks for notes about synchronization | ||
| 203 | * with data path | ||
| 132 | */ | 204 | */ |
| 133 | void rds_tcp_set_callbacks(struct socket *sock, struct rds_connection *conn) | 205 | void rds_tcp_set_callbacks(struct socket *sock, struct rds_connection *conn) |
| 134 | { | 206 | { |
diff --git a/net/rds/tcp.h b/net/rds/tcp.h index 41c228300525..ec0602b0dc24 100644 --- a/net/rds/tcp.h +++ b/net/rds/tcp.h | |||
| @@ -50,6 +50,7 @@ struct rds_tcp_statistics { | |||
| 50 | void rds_tcp_tune(struct socket *sock); | 50 | void rds_tcp_tune(struct socket *sock); |
| 51 | void rds_tcp_nonagle(struct socket *sock); | 51 | void rds_tcp_nonagle(struct socket *sock); |
| 52 | void rds_tcp_set_callbacks(struct socket *sock, struct rds_connection *conn); | 52 | void rds_tcp_set_callbacks(struct socket *sock, struct rds_connection *conn); |
| 53 | void rds_tcp_reset_callbacks(struct socket *sock, struct rds_connection *conn); | ||
| 53 | void rds_tcp_restore_callbacks(struct socket *sock, | 54 | void rds_tcp_restore_callbacks(struct socket *sock, |
| 54 | struct rds_tcp_connection *tc); | 55 | struct rds_tcp_connection *tc); |
| 55 | u32 rds_tcp_snd_nxt(struct rds_tcp_connection *tc); | 56 | u32 rds_tcp_snd_nxt(struct rds_tcp_connection *tc); |
diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c index fb82e0a0bf89..fba13d0305fb 100644 --- a/net/rds/tcp_connect.c +++ b/net/rds/tcp_connect.c | |||
| @@ -60,7 +60,7 @@ void rds_tcp_state_change(struct sock *sk) | |||
| 60 | case TCP_SYN_RECV: | 60 | case TCP_SYN_RECV: |
| 61 | break; | 61 | break; |
| 62 | case TCP_ESTABLISHED: | 62 | case TCP_ESTABLISHED: |
| 63 | rds_connect_complete(conn); | 63 | rds_connect_path_complete(conn, RDS_CONN_CONNECTING); |
| 64 | break; | 64 | break; |
| 65 | case TCP_CLOSE_WAIT: | 65 | case TCP_CLOSE_WAIT: |
| 66 | case TCP_CLOSE: | 66 | case TCP_CLOSE: |
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c index 4bf4befe5066..686b1d03a558 100644 --- a/net/rds/tcp_listen.c +++ b/net/rds/tcp_listen.c | |||
| @@ -78,7 +78,6 @@ int rds_tcp_accept_one(struct socket *sock) | |||
| 78 | struct inet_sock *inet; | 78 | struct inet_sock *inet; |
| 79 | struct rds_tcp_connection *rs_tcp = NULL; | 79 | struct rds_tcp_connection *rs_tcp = NULL; |
| 80 | int conn_state; | 80 | int conn_state; |
| 81 | struct sock *nsk; | ||
| 82 | 81 | ||
| 83 | if (!sock) /* module unload or netns delete in progress */ | 82 | if (!sock) /* module unload or netns delete in progress */ |
| 84 | return -ENETUNREACH; | 83 | return -ENETUNREACH; |
| @@ -136,26 +135,21 @@ int rds_tcp_accept_one(struct socket *sock) | |||
| 136 | !conn->c_outgoing) { | 135 | !conn->c_outgoing) { |
| 137 | goto rst_nsk; | 136 | goto rst_nsk; |
| 138 | } else { | 137 | } else { |
| 139 | atomic_set(&conn->c_state, RDS_CONN_CONNECTING); | 138 | rds_tcp_reset_callbacks(new_sock, conn); |
| 140 | wait_event(conn->c_waitq, | ||
| 141 | !test_bit(RDS_IN_XMIT, &conn->c_flags)); | ||
| 142 | rds_tcp_restore_callbacks(rs_tcp->t_sock, rs_tcp); | ||
| 143 | conn->c_outgoing = 0; | 139 | conn->c_outgoing = 0; |
| 140 | /* rds_connect_path_complete() marks RDS_CONN_UP */ | ||
| 141 | rds_connect_path_complete(conn, RDS_CONN_DISCONNECTING); | ||
| 144 | } | 142 | } |
| 143 | } else { | ||
| 144 | rds_tcp_set_callbacks(new_sock, conn); | ||
| 145 | rds_connect_path_complete(conn, RDS_CONN_CONNECTING); | ||
| 145 | } | 146 | } |
| 146 | rds_tcp_set_callbacks(new_sock, conn); | ||
| 147 | rds_connect_complete(conn); /* marks RDS_CONN_UP */ | ||
| 148 | new_sock = NULL; | 147 | new_sock = NULL; |
| 149 | ret = 0; | 148 | ret = 0; |
| 150 | goto out; | 149 | goto out; |
| 151 | rst_nsk: | 150 | rst_nsk: |
| 152 | /* reset the newly returned accept sock and bail */ | 151 | /* reset the newly returned accept sock and bail */ |
| 153 | nsk = new_sock->sk; | 152 | kernel_sock_shutdown(new_sock, SHUT_RDWR); |
| 154 | rds_tcp_stats_inc(s_tcp_listen_closed_stale); | ||
| 155 | nsk->sk_user_data = NULL; | ||
| 156 | nsk->sk_prot->disconnect(nsk, 0); | ||
| 157 | tcp_done(nsk); | ||
| 158 | new_sock = NULL; | ||
| 159 | ret = 0; | 153 | ret = 0; |
| 160 | out: | 154 | out: |
| 161 | if (rs_tcp) | 155 | if (rs_tcp) |
diff --git a/net/rds/threads.c b/net/rds/threads.c index 454aa6d23327..4a323045719b 100644 --- a/net/rds/threads.c +++ b/net/rds/threads.c | |||
| @@ -71,9 +71,9 @@ | |||
| 71 | struct workqueue_struct *rds_wq; | 71 | struct workqueue_struct *rds_wq; |
| 72 | EXPORT_SYMBOL_GPL(rds_wq); | 72 | EXPORT_SYMBOL_GPL(rds_wq); |
| 73 | 73 | ||
| 74 | void rds_connect_complete(struct rds_connection *conn) | 74 | void rds_connect_path_complete(struct rds_connection *conn, int curr) |
| 75 | { | 75 | { |
| 76 | if (!rds_conn_transition(conn, RDS_CONN_CONNECTING, RDS_CONN_UP)) { | 76 | if (!rds_conn_transition(conn, curr, RDS_CONN_UP)) { |
| 77 | printk(KERN_WARNING "%s: Cannot transition to state UP, " | 77 | printk(KERN_WARNING "%s: Cannot transition to state UP, " |
| 78 | "current state is %d\n", | 78 | "current state is %d\n", |
| 79 | __func__, | 79 | __func__, |
| @@ -90,6 +90,12 @@ void rds_connect_complete(struct rds_connection *conn) | |||
| 90 | queue_delayed_work(rds_wq, &conn->c_send_w, 0); | 90 | queue_delayed_work(rds_wq, &conn->c_send_w, 0); |
| 91 | queue_delayed_work(rds_wq, &conn->c_recv_w, 0); | 91 | queue_delayed_work(rds_wq, &conn->c_recv_w, 0); |
| 92 | } | 92 | } |
| 93 | EXPORT_SYMBOL_GPL(rds_connect_path_complete); | ||
| 94 | |||
| 95 | void rds_connect_complete(struct rds_connection *conn) | ||
| 96 | { | ||
| 97 | rds_connect_path_complete(conn, RDS_CONN_CONNECTING); | ||
| 98 | } | ||
| 93 | EXPORT_SYMBOL_GPL(rds_connect_complete); | 99 | EXPORT_SYMBOL_GPL(rds_connect_complete); |
| 94 | 100 | ||
| 95 | /* | 101 | /* |
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c index 6b726a046a7d..bab56ed649ba 100644 --- a/net/rxrpc/rxkad.c +++ b/net/rxrpc/rxkad.c | |||
| @@ -1162,9 +1162,7 @@ static int rxkad_init(void) | |||
| 1162 | /* pin the cipher we need so that the crypto layer doesn't invoke | 1162 | /* pin the cipher we need so that the crypto layer doesn't invoke |
| 1163 | * keventd to go get it */ | 1163 | * keventd to go get it */ |
| 1164 | rxkad_ci = crypto_alloc_skcipher("pcbc(fcrypt)", 0, CRYPTO_ALG_ASYNC); | 1164 | rxkad_ci = crypto_alloc_skcipher("pcbc(fcrypt)", 0, CRYPTO_ALG_ASYNC); |
| 1165 | if (IS_ERR(rxkad_ci)) | 1165 | return PTR_ERR_OR_ZERO(rxkad_ci); |
| 1166 | return PTR_ERR(rxkad_ci); | ||
| 1167 | return 0; | ||
| 1168 | } | 1166 | } |
| 1169 | 1167 | ||
| 1170 | /* | 1168 | /* |
diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 330f14e302e8..c557789765dc 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c | |||
| @@ -38,7 +38,7 @@ struct tcf_police { | |||
| 38 | bool peak_present; | 38 | bool peak_present; |
| 39 | }; | 39 | }; |
| 40 | #define to_police(pc) \ | 40 | #define to_police(pc) \ |
| 41 | container_of(pc, struct tcf_police, common) | 41 | container_of(pc->priv, struct tcf_police, common) |
| 42 | 42 | ||
| 43 | #define POL_TAB_MASK 15 | 43 | #define POL_TAB_MASK 15 |
| 44 | 44 | ||
| @@ -119,14 +119,12 @@ static int tcf_act_police_locate(struct net *net, struct nlattr *nla, | |||
| 119 | struct nlattr *est, struct tc_action *a, | 119 | struct nlattr *est, struct tc_action *a, |
| 120 | int ovr, int bind) | 120 | int ovr, int bind) |
| 121 | { | 121 | { |
| 122 | unsigned int h; | ||
| 123 | int ret = 0, err; | 122 | int ret = 0, err; |
| 124 | struct nlattr *tb[TCA_POLICE_MAX + 1]; | 123 | struct nlattr *tb[TCA_POLICE_MAX + 1]; |
| 125 | struct tc_police *parm; | 124 | struct tc_police *parm; |
| 126 | struct tcf_police *police; | 125 | struct tcf_police *police; |
| 127 | struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL; | 126 | struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL; |
| 128 | struct tc_action_net *tn = net_generic(net, police_net_id); | 127 | struct tc_action_net *tn = net_generic(net, police_net_id); |
| 129 | struct tcf_hashinfo *hinfo = tn->hinfo; | ||
| 130 | int size; | 128 | int size; |
| 131 | 129 | ||
| 132 | if (nla == NULL) | 130 | if (nla == NULL) |
| @@ -145,7 +143,7 @@ static int tcf_act_police_locate(struct net *net, struct nlattr *nla, | |||
| 145 | 143 | ||
| 146 | if (parm->index) { | 144 | if (parm->index) { |
| 147 | if (tcf_hash_search(tn, a, parm->index)) { | 145 | if (tcf_hash_search(tn, a, parm->index)) { |
| 148 | police = to_police(a->priv); | 146 | police = to_police(a); |
| 149 | if (bind) { | 147 | if (bind) { |
| 150 | police->tcf_bindcnt += 1; | 148 | police->tcf_bindcnt += 1; |
| 151 | police->tcf_refcnt += 1; | 149 | police->tcf_refcnt += 1; |
| @@ -156,16 +154,15 @@ static int tcf_act_police_locate(struct net *net, struct nlattr *nla, | |||
| 156 | /* not replacing */ | 154 | /* not replacing */ |
| 157 | return -EEXIST; | 155 | return -EEXIST; |
| 158 | } | 156 | } |
| 157 | } else { | ||
| 158 | ret = tcf_hash_create(tn, parm->index, NULL, a, | ||
| 159 | sizeof(*police), bind, false); | ||
| 160 | if (ret) | ||
| 161 | return ret; | ||
| 162 | ret = ACT_P_CREATED; | ||
| 159 | } | 163 | } |
| 160 | 164 | ||
| 161 | police = kzalloc(sizeof(*police), GFP_KERNEL); | 165 | police = to_police(a); |
| 162 | if (police == NULL) | ||
| 163 | return -ENOMEM; | ||
| 164 | ret = ACT_P_CREATED; | ||
| 165 | police->tcf_refcnt = 1; | ||
| 166 | spin_lock_init(&police->tcf_lock); | ||
| 167 | if (bind) | ||
| 168 | police->tcf_bindcnt = 1; | ||
| 169 | override: | 166 | override: |
| 170 | if (parm->rate.rate) { | 167 | if (parm->rate.rate) { |
| 171 | err = -ENOMEM; | 168 | err = -ENOMEM; |
| @@ -237,14 +234,8 @@ override: | |||
| 237 | return ret; | 234 | return ret; |
| 238 | 235 | ||
| 239 | police->tcfp_t_c = ktime_get_ns(); | 236 | police->tcfp_t_c = ktime_get_ns(); |
| 240 | police->tcf_index = parm->index ? parm->index : | 237 | tcf_hash_insert(tn, a); |
| 241 | tcf_hash_new_index(tn); | ||
| 242 | h = tcf_hash(police->tcf_index, POL_TAB_MASK); | ||
| 243 | spin_lock_bh(&hinfo->lock); | ||
| 244 | hlist_add_head(&police->tcf_head, &hinfo->htab[h]); | ||
| 245 | spin_unlock_bh(&hinfo->lock); | ||
| 246 | 238 | ||
| 247 | a->priv = police; | ||
| 248 | return ret; | 239 | return ret; |
| 249 | 240 | ||
| 250 | failure_unlock: | 241 | failure_unlock: |
| @@ -253,7 +244,7 @@ failure: | |||
| 253 | qdisc_put_rtab(P_tab); | 244 | qdisc_put_rtab(P_tab); |
| 254 | qdisc_put_rtab(R_tab); | 245 | qdisc_put_rtab(R_tab); |
| 255 | if (ret == ACT_P_CREATED) | 246 | if (ret == ACT_P_CREATED) |
| 256 | kfree(police); | 247 | tcf_hash_cleanup(a, est); |
| 257 | return err; | 248 | return err; |
| 258 | } | 249 | } |
| 259 | 250 | ||
| @@ -268,6 +259,7 @@ static int tcf_act_police(struct sk_buff *skb, const struct tc_action *a, | |||
| 268 | spin_lock(&police->tcf_lock); | 259 | spin_lock(&police->tcf_lock); |
| 269 | 260 | ||
| 270 | bstats_update(&police->tcf_bstats, skb); | 261 | bstats_update(&police->tcf_bstats, skb); |
| 262 | tcf_lastuse_update(&police->tcf_tm); | ||
| 271 | 263 | ||
| 272 | if (police->tcfp_ewma_rate && | 264 | if (police->tcfp_ewma_rate && |
| 273 | police->tcf_rate_est.bps >= police->tcfp_ewma_rate) { | 265 | police->tcf_rate_est.bps >= police->tcfp_ewma_rate) { |
| @@ -327,6 +319,7 @@ tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) | |||
| 327 | .refcnt = police->tcf_refcnt - ref, | 319 | .refcnt = police->tcf_refcnt - ref, |
| 328 | .bindcnt = police->tcf_bindcnt - bind, | 320 | .bindcnt = police->tcf_bindcnt - bind, |
| 329 | }; | 321 | }; |
| 322 | struct tcf_t t; | ||
| 330 | 323 | ||
| 331 | if (police->rate_present) | 324 | if (police->rate_present) |
| 332 | psched_ratecfg_getrate(&opt.rate, &police->rate); | 325 | psched_ratecfg_getrate(&opt.rate, &police->rate); |
| @@ -340,6 +333,13 @@ tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) | |||
| 340 | if (police->tcfp_ewma_rate && | 333 | if (police->tcfp_ewma_rate && |
| 341 | nla_put_u32(skb, TCA_POLICE_AVRATE, police->tcfp_ewma_rate)) | 334 | nla_put_u32(skb, TCA_POLICE_AVRATE, police->tcfp_ewma_rate)) |
| 342 | goto nla_put_failure; | 335 | goto nla_put_failure; |
| 336 | |||
| 337 | t.install = jiffies_to_clock_t(jiffies - police->tcf_tm.install); | ||
| 338 | t.lastuse = jiffies_to_clock_t(jiffies - police->tcf_tm.lastuse); | ||
| 339 | t.expires = jiffies_to_clock_t(police->tcf_tm.expires); | ||
| 340 | if (nla_put_64bit(skb, TCA_POLICE_TM, sizeof(t), &t, TCA_POLICE_PAD)) | ||
| 341 | goto nla_put_failure; | ||
| 342 | |||
| 343 | return skb->len; | 343 | return skb->len; |
| 344 | 344 | ||
| 345 | nla_put_failure: | 345 | nla_put_failure: |
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 730aacafc22d..b3b7978f4182 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c | |||
| @@ -171,7 +171,7 @@ static void fl_hw_destroy_filter(struct tcf_proto *tp, unsigned long cookie) | |||
| 171 | struct tc_cls_flower_offload offload = {0}; | 171 | struct tc_cls_flower_offload offload = {0}; |
| 172 | struct tc_to_netdev tc; | 172 | struct tc_to_netdev tc; |
| 173 | 173 | ||
| 174 | if (!tc_should_offload(dev, 0)) | 174 | if (!tc_should_offload(dev, tp, 0)) |
| 175 | return; | 175 | return; |
| 176 | 176 | ||
| 177 | offload.command = TC_CLSFLOWER_DESTROY; | 177 | offload.command = TC_CLSFLOWER_DESTROY; |
| @@ -194,7 +194,7 @@ static void fl_hw_replace_filter(struct tcf_proto *tp, | |||
| 194 | struct tc_cls_flower_offload offload = {0}; | 194 | struct tc_cls_flower_offload offload = {0}; |
| 195 | struct tc_to_netdev tc; | 195 | struct tc_to_netdev tc; |
| 196 | 196 | ||
| 197 | if (!tc_should_offload(dev, flags)) | 197 | if (!tc_should_offload(dev, tp, flags)) |
| 198 | return; | 198 | return; |
| 199 | 199 | ||
| 200 | offload.command = TC_CLSFLOWER_REPLACE; | 200 | offload.command = TC_CLSFLOWER_REPLACE; |
| @@ -216,7 +216,7 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f) | |||
| 216 | struct tc_cls_flower_offload offload = {0}; | 216 | struct tc_cls_flower_offload offload = {0}; |
| 217 | struct tc_to_netdev tc; | 217 | struct tc_to_netdev tc; |
| 218 | 218 | ||
| 219 | if (!tc_should_offload(dev, 0)) | 219 | if (!tc_should_offload(dev, tp, 0)) |
| 220 | return; | 220 | return; |
| 221 | 221 | ||
| 222 | offload.command = TC_CLSFLOWER_STATS; | 222 | offload.command = TC_CLSFLOWER_STATS; |
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index 079b43b3c5d2..ffe593efe930 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c | |||
| @@ -440,7 +440,7 @@ static void u32_remove_hw_knode(struct tcf_proto *tp, u32 handle) | |||
| 440 | offload.type = TC_SETUP_CLSU32; | 440 | offload.type = TC_SETUP_CLSU32; |
| 441 | offload.cls_u32 = &u32_offload; | 441 | offload.cls_u32 = &u32_offload; |
| 442 | 442 | ||
| 443 | if (tc_should_offload(dev, 0)) { | 443 | if (tc_should_offload(dev, tp, 0)) { |
| 444 | offload.cls_u32->command = TC_CLSU32_DELETE_KNODE; | 444 | offload.cls_u32->command = TC_CLSU32_DELETE_KNODE; |
| 445 | offload.cls_u32->knode.handle = handle; | 445 | offload.cls_u32->knode.handle = handle; |
| 446 | dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, | 446 | dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, |
| @@ -457,20 +457,21 @@ static int u32_replace_hw_hnode(struct tcf_proto *tp, | |||
| 457 | struct tc_to_netdev offload; | 457 | struct tc_to_netdev offload; |
| 458 | int err; | 458 | int err; |
| 459 | 459 | ||
| 460 | if (!tc_should_offload(dev, tp, flags)) | ||
| 461 | return tc_skip_sw(flags) ? -EINVAL : 0; | ||
| 462 | |||
| 460 | offload.type = TC_SETUP_CLSU32; | 463 | offload.type = TC_SETUP_CLSU32; |
| 461 | offload.cls_u32 = &u32_offload; | 464 | offload.cls_u32 = &u32_offload; |
| 462 | 465 | ||
| 463 | if (tc_should_offload(dev, flags)) { | 466 | offload.cls_u32->command = TC_CLSU32_NEW_HNODE; |
| 464 | offload.cls_u32->command = TC_CLSU32_NEW_HNODE; | 467 | offload.cls_u32->hnode.divisor = h->divisor; |
| 465 | offload.cls_u32->hnode.divisor = h->divisor; | 468 | offload.cls_u32->hnode.handle = h->handle; |
| 466 | offload.cls_u32->hnode.handle = h->handle; | 469 | offload.cls_u32->hnode.prio = h->prio; |
| 467 | offload.cls_u32->hnode.prio = h->prio; | ||
| 468 | 470 | ||
| 469 | err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, | 471 | err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, |
| 470 | tp->protocol, &offload); | 472 | tp->protocol, &offload); |
| 471 | if (tc_skip_sw(flags)) | 473 | if (tc_skip_sw(flags)) |
| 472 | return err; | 474 | return err; |
| 473 | } | ||
| 474 | 475 | ||
| 475 | return 0; | 476 | return 0; |
| 476 | } | 477 | } |
| @@ -484,7 +485,7 @@ static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h) | |||
| 484 | offload.type = TC_SETUP_CLSU32; | 485 | offload.type = TC_SETUP_CLSU32; |
| 485 | offload.cls_u32 = &u32_offload; | 486 | offload.cls_u32 = &u32_offload; |
| 486 | 487 | ||
| 487 | if (tc_should_offload(dev, 0)) { | 488 | if (tc_should_offload(dev, tp, 0)) { |
| 488 | offload.cls_u32->command = TC_CLSU32_DELETE_HNODE; | 489 | offload.cls_u32->command = TC_CLSU32_DELETE_HNODE; |
| 489 | offload.cls_u32->hnode.divisor = h->divisor; | 490 | offload.cls_u32->hnode.divisor = h->divisor; |
| 490 | offload.cls_u32->hnode.handle = h->handle; | 491 | offload.cls_u32->hnode.handle = h->handle; |
| @@ -507,27 +508,28 @@ static int u32_replace_hw_knode(struct tcf_proto *tp, | |||
| 507 | offload.type = TC_SETUP_CLSU32; | 508 | offload.type = TC_SETUP_CLSU32; |
| 508 | offload.cls_u32 = &u32_offload; | 509 | offload.cls_u32 = &u32_offload; |
| 509 | 510 | ||
| 510 | if (tc_should_offload(dev, flags)) { | 511 | if (!tc_should_offload(dev, tp, flags)) |
| 511 | offload.cls_u32->command = TC_CLSU32_REPLACE_KNODE; | 512 | return tc_skip_sw(flags) ? -EINVAL : 0; |
| 512 | offload.cls_u32->knode.handle = n->handle; | 513 | |
| 513 | offload.cls_u32->knode.fshift = n->fshift; | 514 | offload.cls_u32->command = TC_CLSU32_REPLACE_KNODE; |
| 515 | offload.cls_u32->knode.handle = n->handle; | ||
| 516 | offload.cls_u32->knode.fshift = n->fshift; | ||
| 514 | #ifdef CONFIG_CLS_U32_MARK | 517 | #ifdef CONFIG_CLS_U32_MARK |
| 515 | offload.cls_u32->knode.val = n->val; | 518 | offload.cls_u32->knode.val = n->val; |
| 516 | offload.cls_u32->knode.mask = n->mask; | 519 | offload.cls_u32->knode.mask = n->mask; |
| 517 | #else | 520 | #else |
| 518 | offload.cls_u32->knode.val = 0; | 521 | offload.cls_u32->knode.val = 0; |
| 519 | offload.cls_u32->knode.mask = 0; | 522 | offload.cls_u32->knode.mask = 0; |
| 520 | #endif | 523 | #endif |
| 521 | offload.cls_u32->knode.sel = &n->sel; | 524 | offload.cls_u32->knode.sel = &n->sel; |
| 522 | offload.cls_u32->knode.exts = &n->exts; | 525 | offload.cls_u32->knode.exts = &n->exts; |
| 523 | if (n->ht_down) | 526 | if (n->ht_down) |
| 524 | offload.cls_u32->knode.link_handle = n->ht_down->handle; | 527 | offload.cls_u32->knode.link_handle = n->ht_down->handle; |
| 525 | 528 | ||
| 526 | err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, | 529 | err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, |
| 527 | tp->protocol, &offload); | 530 | tp->protocol, &offload); |
| 528 | if (tc_skip_sw(flags)) | 531 | if (tc_skip_sw(flags)) |
| 529 | return err; | 532 | return err; |
| 530 | } | ||
| 531 | 533 | ||
| 532 | return 0; | 534 | return 0; |
| 533 | } | 535 | } |
| @@ -863,7 +865,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, | |||
| 863 | if (tb[TCA_U32_FLAGS]) { | 865 | if (tb[TCA_U32_FLAGS]) { |
| 864 | flags = nla_get_u32(tb[TCA_U32_FLAGS]); | 866 | flags = nla_get_u32(tb[TCA_U32_FLAGS]); |
| 865 | if (!tc_flags_valid(flags)) | 867 | if (!tc_flags_valid(flags)) |
| 866 | return err; | 868 | return -EINVAL; |
| 867 | } | 869 | } |
| 868 | 870 | ||
| 869 | n = (struct tc_u_knode *)*arg; | 871 | n = (struct tc_u_knode *)*arg; |
| @@ -921,11 +923,17 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, | |||
| 921 | ht->divisor = divisor; | 923 | ht->divisor = divisor; |
| 922 | ht->handle = handle; | 924 | ht->handle = handle; |
| 923 | ht->prio = tp->prio; | 925 | ht->prio = tp->prio; |
| 926 | |||
| 927 | err = u32_replace_hw_hnode(tp, ht, flags); | ||
| 928 | if (err) { | ||
| 929 | kfree(ht); | ||
| 930 | return err; | ||
| 931 | } | ||
| 932 | |||
| 924 | RCU_INIT_POINTER(ht->next, tp_c->hlist); | 933 | RCU_INIT_POINTER(ht->next, tp_c->hlist); |
| 925 | rcu_assign_pointer(tp_c->hlist, ht); | 934 | rcu_assign_pointer(tp_c->hlist, ht); |
| 926 | *arg = (unsigned long)ht; | 935 | *arg = (unsigned long)ht; |
| 927 | 936 | ||
| 928 | u32_replace_hw_hnode(tp, ht, flags); | ||
| 929 | return 0; | 937 | return 0; |
| 930 | } | 938 | } |
| 931 | 939 | ||
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 64f71a2155f3..ddf047df5361 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c | |||
| @@ -607,6 +607,10 @@ void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires, bool thr | |||
| 607 | if (throttle) | 607 | if (throttle) |
| 608 | qdisc_throttled(wd->qdisc); | 608 | qdisc_throttled(wd->qdisc); |
| 609 | 609 | ||
| 610 | if (wd->last_expires == expires) | ||
| 611 | return; | ||
| 612 | |||
| 613 | wd->last_expires = expires; | ||
| 610 | hrtimer_start(&wd->timer, | 614 | hrtimer_start(&wd->timer, |
| 611 | ns_to_ktime(expires), | 615 | ns_to_ktime(expires), |
| 612 | HRTIMER_MODE_ABS_PINNED); | 616 | HRTIMER_MODE_ABS_PINNED); |
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c index a63e879e8975..bf8af2c43c2c 100644 --- a/net/sched/sch_drr.c +++ b/net/sched/sch_drr.c | |||
| @@ -375,6 +375,7 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
| 375 | cl->deficit = cl->quantum; | 375 | cl->deficit = cl->quantum; |
| 376 | } | 376 | } |
| 377 | 377 | ||
| 378 | qdisc_qstats_backlog_inc(sch, skb); | ||
| 378 | sch->q.qlen++; | 379 | sch->q.qlen++; |
| 379 | return err; | 380 | return err; |
| 380 | } | 381 | } |
| @@ -407,6 +408,7 @@ static struct sk_buff *drr_dequeue(struct Qdisc *sch) | |||
| 407 | 408 | ||
| 408 | bstats_update(&cl->bstats, skb); | 409 | bstats_update(&cl->bstats, skb); |
| 409 | qdisc_bstats_update(sch, skb); | 410 | qdisc_bstats_update(sch, skb); |
| 411 | qdisc_qstats_backlog_dec(sch, skb); | ||
| 410 | sch->q.qlen--; | 412 | sch->q.qlen--; |
| 411 | return skb; | 413 | return skb; |
| 412 | } | 414 | } |
| @@ -428,6 +430,7 @@ static unsigned int drr_drop(struct Qdisc *sch) | |||
| 428 | if (cl->qdisc->ops->drop) { | 430 | if (cl->qdisc->ops->drop) { |
| 429 | len = cl->qdisc->ops->drop(cl->qdisc); | 431 | len = cl->qdisc->ops->drop(cl->qdisc); |
| 430 | if (len > 0) { | 432 | if (len > 0) { |
| 433 | sch->qstats.backlog -= len; | ||
| 431 | sch->q.qlen--; | 434 | sch->q.qlen--; |
| 432 | if (cl->qdisc->q.qlen == 0) | 435 | if (cl->qdisc->q.qlen == 0) |
| 433 | list_del(&cl->alist); | 436 | list_del(&cl->alist); |
| @@ -463,6 +466,7 @@ static void drr_reset_qdisc(struct Qdisc *sch) | |||
| 463 | qdisc_reset(cl->qdisc); | 466 | qdisc_reset(cl->qdisc); |
| 464 | } | 467 | } |
| 465 | } | 468 | } |
| 469 | sch->qstats.backlog = 0; | ||
| 466 | sch->q.qlen = 0; | 470 | sch->q.qlen = 0; |
| 467 | } | 471 | } |
| 468 | 472 | ||
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index 6883a8971562..da250b2e06ae 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c | |||
| @@ -199,6 +199,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
| 199 | unsigned int idx, prev_backlog, prev_qlen; | 199 | unsigned int idx, prev_backlog, prev_qlen; |
| 200 | struct fq_codel_flow *flow; | 200 | struct fq_codel_flow *flow; |
| 201 | int uninitialized_var(ret); | 201 | int uninitialized_var(ret); |
| 202 | unsigned int pkt_len; | ||
| 202 | bool memory_limited; | 203 | bool memory_limited; |
| 203 | 204 | ||
| 204 | idx = fq_codel_classify(skb, sch, &ret); | 205 | idx = fq_codel_classify(skb, sch, &ret); |
| @@ -230,6 +231,8 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
| 230 | prev_backlog = sch->qstats.backlog; | 231 | prev_backlog = sch->qstats.backlog; |
| 231 | prev_qlen = sch->q.qlen; | 232 | prev_qlen = sch->q.qlen; |
| 232 | 233 | ||
| 234 | /* save this packet length as it might be dropped by fq_codel_drop() */ | ||
| 235 | pkt_len = qdisc_pkt_len(skb); | ||
| 233 | /* fq_codel_drop() is quite expensive, as it performs a linear search | 236 | /* fq_codel_drop() is quite expensive, as it performs a linear search |
| 234 | * in q->backlogs[] to find a fat flow. | 237 | * in q->backlogs[] to find a fat flow. |
| 235 | * So instead of dropping a single packet, drop half of its backlog | 238 | * So instead of dropping a single packet, drop half of its backlog |
| @@ -237,14 +240,23 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
| 237 | */ | 240 | */ |
| 238 | ret = fq_codel_drop(sch, q->drop_batch_size); | 241 | ret = fq_codel_drop(sch, q->drop_batch_size); |
| 239 | 242 | ||
| 240 | q->drop_overlimit += prev_qlen - sch->q.qlen; | 243 | prev_qlen -= sch->q.qlen; |
| 244 | prev_backlog -= sch->qstats.backlog; | ||
| 245 | q->drop_overlimit += prev_qlen; | ||
| 241 | if (memory_limited) | 246 | if (memory_limited) |
| 242 | q->drop_overmemory += prev_qlen - sch->q.qlen; | 247 | q->drop_overmemory += prev_qlen; |
| 243 | /* As we dropped packet(s), better let upper stack know this */ | ||
| 244 | qdisc_tree_reduce_backlog(sch, prev_qlen - sch->q.qlen, | ||
| 245 | prev_backlog - sch->qstats.backlog); | ||
| 246 | 248 | ||
| 247 | return ret == idx ? NET_XMIT_CN : NET_XMIT_SUCCESS; | 249 | /* As we dropped packet(s), better let upper stack know this. |
| 250 | * If we dropped a packet for this flow, return NET_XMIT_CN, | ||
| 251 | * but in this case, our parents wont increase their backlogs. | ||
| 252 | */ | ||
| 253 | if (ret == idx) { | ||
| 254 | qdisc_tree_reduce_backlog(sch, prev_qlen - 1, | ||
| 255 | prev_backlog - pkt_len); | ||
| 256 | return NET_XMIT_CN; | ||
| 257 | } | ||
| 258 | qdisc_tree_reduce_backlog(sch, prev_qlen, prev_backlog); | ||
| 259 | return NET_XMIT_SUCCESS; | ||
| 248 | } | 260 | } |
| 249 | 261 | ||
| 250 | /* This is the specific function called from codel_dequeue() | 262 | /* This is the specific function called from codel_dequeue() |
| @@ -649,7 +661,7 @@ static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl, | |||
| 649 | qs.backlog = q->backlogs[idx]; | 661 | qs.backlog = q->backlogs[idx]; |
| 650 | qs.drops = flow->dropped; | 662 | qs.drops = flow->dropped; |
| 651 | } | 663 | } |
| 652 | if (gnet_stats_copy_queue(d, NULL, &qs, 0) < 0) | 664 | if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0) |
| 653 | return -1; | 665 | return -1; |
| 654 | if (idx < q->flows_cnt) | 666 | if (idx < q->flows_cnt) |
| 655 | return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); | 667 | return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); |
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 269dd71b3828..f9e0e9c03d0a 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
| @@ -49,6 +49,7 @@ static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q) | |||
| 49 | { | 49 | { |
| 50 | q->gso_skb = skb; | 50 | q->gso_skb = skb; |
| 51 | q->qstats.requeues++; | 51 | q->qstats.requeues++; |
| 52 | qdisc_qstats_backlog_inc(q, skb); | ||
| 52 | q->q.qlen++; /* it's still part of the queue */ | 53 | q->q.qlen++; /* it's still part of the queue */ |
| 53 | __netif_schedule(q); | 54 | __netif_schedule(q); |
| 54 | 55 | ||
| @@ -92,6 +93,7 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate, | |||
| 92 | txq = skb_get_tx_queue(txq->dev, skb); | 93 | txq = skb_get_tx_queue(txq->dev, skb); |
| 93 | if (!netif_xmit_frozen_or_stopped(txq)) { | 94 | if (!netif_xmit_frozen_or_stopped(txq)) { |
| 94 | q->gso_skb = NULL; | 95 | q->gso_skb = NULL; |
| 96 | qdisc_qstats_backlog_dec(q, skb); | ||
| 95 | q->q.qlen--; | 97 | q->q.qlen--; |
| 96 | } else | 98 | } else |
| 97 | skb = NULL; | 99 | skb = NULL; |
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index d783d7cc3348..1ac9f9f03fe3 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c | |||
| @@ -1529,6 +1529,7 @@ hfsc_reset_qdisc(struct Qdisc *sch) | |||
| 1529 | q->eligible = RB_ROOT; | 1529 | q->eligible = RB_ROOT; |
| 1530 | INIT_LIST_HEAD(&q->droplist); | 1530 | INIT_LIST_HEAD(&q->droplist); |
| 1531 | qdisc_watchdog_cancel(&q->watchdog); | 1531 | qdisc_watchdog_cancel(&q->watchdog); |
| 1532 | sch->qstats.backlog = 0; | ||
| 1532 | sch->q.qlen = 0; | 1533 | sch->q.qlen = 0; |
| 1533 | } | 1534 | } |
| 1534 | 1535 | ||
| @@ -1559,14 +1560,6 @@ hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb) | |||
| 1559 | struct hfsc_sched *q = qdisc_priv(sch); | 1560 | struct hfsc_sched *q = qdisc_priv(sch); |
| 1560 | unsigned char *b = skb_tail_pointer(skb); | 1561 | unsigned char *b = skb_tail_pointer(skb); |
| 1561 | struct tc_hfsc_qopt qopt; | 1562 | struct tc_hfsc_qopt qopt; |
| 1562 | struct hfsc_class *cl; | ||
| 1563 | unsigned int i; | ||
| 1564 | |||
| 1565 | sch->qstats.backlog = 0; | ||
| 1566 | for (i = 0; i < q->clhash.hashsize; i++) { | ||
| 1567 | hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode) | ||
| 1568 | sch->qstats.backlog += cl->qdisc->qstats.backlog; | ||
| 1569 | } | ||
| 1570 | 1563 | ||
| 1571 | qopt.defcls = q->defcls; | 1564 | qopt.defcls = q->defcls; |
| 1572 | if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt)) | 1565 | if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt)) |
| @@ -1604,6 +1597,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
| 1604 | if (cl->qdisc->q.qlen == 1) | 1597 | if (cl->qdisc->q.qlen == 1) |
| 1605 | set_active(cl, qdisc_pkt_len(skb)); | 1598 | set_active(cl, qdisc_pkt_len(skb)); |
| 1606 | 1599 | ||
| 1600 | qdisc_qstats_backlog_inc(sch, skb); | ||
| 1607 | sch->q.qlen++; | 1601 | sch->q.qlen++; |
| 1608 | 1602 | ||
| 1609 | return NET_XMIT_SUCCESS; | 1603 | return NET_XMIT_SUCCESS; |
| @@ -1672,6 +1666,7 @@ hfsc_dequeue(struct Qdisc *sch) | |||
| 1672 | 1666 | ||
| 1673 | qdisc_unthrottled(sch); | 1667 | qdisc_unthrottled(sch); |
| 1674 | qdisc_bstats_update(sch, skb); | 1668 | qdisc_bstats_update(sch, skb); |
| 1669 | qdisc_qstats_backlog_dec(sch, skb); | ||
| 1675 | sch->q.qlen--; | 1670 | sch->q.qlen--; |
| 1676 | 1671 | ||
| 1677 | return skb; | 1672 | return skb; |
| @@ -1695,6 +1690,7 @@ hfsc_drop(struct Qdisc *sch) | |||
| 1695 | } | 1690 | } |
| 1696 | cl->qstats.drops++; | 1691 | cl->qstats.drops++; |
| 1697 | qdisc_qstats_drop(sch); | 1692 | qdisc_qstats_drop(sch); |
| 1693 | sch->qstats.backlog -= len; | ||
| 1698 | sch->q.qlen--; | 1694 | sch->q.qlen--; |
| 1699 | return len; | 1695 | return len; |
| 1700 | } | 1696 | } |
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index f6bf5818ed4d..d4b4218af6b1 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c | |||
| @@ -928,17 +928,10 @@ ok: | |||
| 928 | } | 928 | } |
| 929 | } | 929 | } |
| 930 | qdisc_qstats_overlimit(sch); | 930 | qdisc_qstats_overlimit(sch); |
| 931 | if (likely(next_event > q->now)) { | 931 | if (likely(next_event > q->now)) |
| 932 | if (!test_bit(__QDISC_STATE_DEACTIVATED, | 932 | qdisc_watchdog_schedule_ns(&q->watchdog, next_event, true); |
| 933 | &qdisc_root_sleeping(q->watchdog.qdisc)->state)) { | 933 | else |
| 934 | ktime_t time = ns_to_ktime(next_event); | ||
| 935 | qdisc_throttled(q->watchdog.qdisc); | ||
| 936 | hrtimer_start(&q->watchdog.timer, time, | ||
| 937 | HRTIMER_MODE_ABS_PINNED); | ||
| 938 | } | ||
| 939 | } else { | ||
| 940 | schedule_work(&q->work); | 934 | schedule_work(&q->work); |
| 941 | } | ||
| 942 | fin: | 935 | fin: |
| 943 | return skb; | 936 | return skb; |
| 944 | } | 937 | } |
diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c index 10adbc617905..8fe6999b642a 100644 --- a/net/sched/sch_ingress.c +++ b/net/sched/sch_ingress.c | |||
| @@ -27,6 +27,11 @@ static unsigned long ingress_get(struct Qdisc *sch, u32 classid) | |||
| 27 | return TC_H_MIN(classid) + 1; | 27 | return TC_H_MIN(classid) + 1; |
| 28 | } | 28 | } |
| 29 | 29 | ||
| 30 | static bool ingress_cl_offload(u32 classid) | ||
| 31 | { | ||
| 32 | return true; | ||
| 33 | } | ||
| 34 | |||
| 30 | static unsigned long ingress_bind_filter(struct Qdisc *sch, | 35 | static unsigned long ingress_bind_filter(struct Qdisc *sch, |
| 31 | unsigned long parent, u32 classid) | 36 | unsigned long parent, u32 classid) |
| 32 | { | 37 | { |
| @@ -86,6 +91,7 @@ static const struct Qdisc_class_ops ingress_class_ops = { | |||
| 86 | .put = ingress_put, | 91 | .put = ingress_put, |
| 87 | .walk = ingress_walk, | 92 | .walk = ingress_walk, |
| 88 | .tcf_chain = ingress_find_tcf, | 93 | .tcf_chain = ingress_find_tcf, |
| 94 | .tcf_cl_offload = ingress_cl_offload, | ||
| 89 | .bind_tcf = ingress_bind_filter, | 95 | .bind_tcf = ingress_bind_filter, |
| 90 | .unbind_tcf = ingress_put, | 96 | .unbind_tcf = ingress_put, |
| 91 | }; | 97 | }; |
| @@ -110,6 +116,11 @@ static unsigned long clsact_get(struct Qdisc *sch, u32 classid) | |||
| 110 | } | 116 | } |
| 111 | } | 117 | } |
| 112 | 118 | ||
| 119 | static bool clsact_cl_offload(u32 classid) | ||
| 120 | { | ||
| 121 | return TC_H_MIN(classid) == TC_H_MIN(TC_H_MIN_INGRESS); | ||
| 122 | } | ||
| 123 | |||
| 113 | static unsigned long clsact_bind_filter(struct Qdisc *sch, | 124 | static unsigned long clsact_bind_filter(struct Qdisc *sch, |
| 114 | unsigned long parent, u32 classid) | 125 | unsigned long parent, u32 classid) |
| 115 | { | 126 | { |
| @@ -158,6 +169,7 @@ static const struct Qdisc_class_ops clsact_class_ops = { | |||
| 158 | .put = ingress_put, | 169 | .put = ingress_put, |
| 159 | .walk = ingress_walk, | 170 | .walk = ingress_walk, |
| 160 | .tcf_chain = clsact_find_tcf, | 171 | .tcf_chain = clsact_find_tcf, |
| 172 | .tcf_cl_offload = clsact_cl_offload, | ||
| 161 | .bind_tcf = clsact_bind_filter, | 173 | .bind_tcf = clsact_bind_filter, |
| 162 | .unbind_tcf = ingress_put, | 174 | .unbind_tcf = ingress_put, |
| 163 | }; | 175 | }; |
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index fee1b15506b2..4b0a82191bc4 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c | |||
| @@ -85,6 +85,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
| 85 | 85 | ||
| 86 | ret = qdisc_enqueue(skb, qdisc); | 86 | ret = qdisc_enqueue(skb, qdisc); |
| 87 | if (ret == NET_XMIT_SUCCESS) { | 87 | if (ret == NET_XMIT_SUCCESS) { |
| 88 | qdisc_qstats_backlog_inc(sch, skb); | ||
| 88 | sch->q.qlen++; | 89 | sch->q.qlen++; |
| 89 | return NET_XMIT_SUCCESS; | 90 | return NET_XMIT_SUCCESS; |
| 90 | } | 91 | } |
| @@ -117,6 +118,7 @@ static struct sk_buff *prio_dequeue(struct Qdisc *sch) | |||
| 117 | struct sk_buff *skb = qdisc_dequeue_peeked(qdisc); | 118 | struct sk_buff *skb = qdisc_dequeue_peeked(qdisc); |
| 118 | if (skb) { | 119 | if (skb) { |
| 119 | qdisc_bstats_update(sch, skb); | 120 | qdisc_bstats_update(sch, skb); |
| 121 | qdisc_qstats_backlog_dec(sch, skb); | ||
| 120 | sch->q.qlen--; | 122 | sch->q.qlen--; |
| 121 | return skb; | 123 | return skb; |
| 122 | } | 124 | } |
| @@ -135,6 +137,7 @@ static unsigned int prio_drop(struct Qdisc *sch) | |||
| 135 | for (prio = q->bands-1; prio >= 0; prio--) { | 137 | for (prio = q->bands-1; prio >= 0; prio--) { |
| 136 | qdisc = q->queues[prio]; | 138 | qdisc = q->queues[prio]; |
| 137 | if (qdisc->ops->drop && (len = qdisc->ops->drop(qdisc)) != 0) { | 139 | if (qdisc->ops->drop && (len = qdisc->ops->drop(qdisc)) != 0) { |
| 140 | sch->qstats.backlog -= len; | ||
| 138 | sch->q.qlen--; | 141 | sch->q.qlen--; |
| 139 | return len; | 142 | return len; |
| 140 | } | 143 | } |
| @@ -151,6 +154,7 @@ prio_reset(struct Qdisc *sch) | |||
| 151 | 154 | ||
| 152 | for (prio = 0; prio < q->bands; prio++) | 155 | for (prio = 0; prio < q->bands; prio++) |
| 153 | qdisc_reset(q->queues[prio]); | 156 | qdisc_reset(q->queues[prio]); |
| 157 | sch->qstats.backlog = 0; | ||
| 154 | sch->q.qlen = 0; | 158 | sch->q.qlen = 0; |
| 155 | } | 159 | } |
| 156 | 160 | ||
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index 8d2d8d953432..f18857febdad 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c | |||
| @@ -1235,8 +1235,10 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
| 1235 | cl->agg->lmax, qdisc_pkt_len(skb), cl->common.classid); | 1235 | cl->agg->lmax, qdisc_pkt_len(skb), cl->common.classid); |
| 1236 | err = qfq_change_agg(sch, cl, cl->agg->class_weight, | 1236 | err = qfq_change_agg(sch, cl, cl->agg->class_weight, |
| 1237 | qdisc_pkt_len(skb)); | 1237 | qdisc_pkt_len(skb)); |
| 1238 | if (err) | 1238 | if (err) { |
| 1239 | return err; | 1239 | cl->qstats.drops++; |
| 1240 | return qdisc_drop(skb, sch); | ||
| 1241 | } | ||
| 1240 | } | 1242 | } |
| 1241 | 1243 | ||
| 1242 | err = qdisc_enqueue(skb, cl->qdisc); | 1244 | err = qdisc_enqueue(skb, cl->qdisc); |
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c index 8c0508c0e287..91578bdd378c 100644 --- a/net/sched/sch_red.c +++ b/net/sched/sch_red.c | |||
| @@ -97,6 +97,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
| 97 | 97 | ||
| 98 | ret = qdisc_enqueue(skb, child); | 98 | ret = qdisc_enqueue(skb, child); |
| 99 | if (likely(ret == NET_XMIT_SUCCESS)) { | 99 | if (likely(ret == NET_XMIT_SUCCESS)) { |
| 100 | qdisc_qstats_backlog_inc(sch, skb); | ||
| 100 | sch->q.qlen++; | 101 | sch->q.qlen++; |
| 101 | } else if (net_xmit_drop_count(ret)) { | 102 | } else if (net_xmit_drop_count(ret)) { |
| 102 | q->stats.pdrop++; | 103 | q->stats.pdrop++; |
| @@ -118,6 +119,7 @@ static struct sk_buff *red_dequeue(struct Qdisc *sch) | |||
| 118 | skb = child->dequeue(child); | 119 | skb = child->dequeue(child); |
| 119 | if (skb) { | 120 | if (skb) { |
| 120 | qdisc_bstats_update(sch, skb); | 121 | qdisc_bstats_update(sch, skb); |
| 122 | qdisc_qstats_backlog_dec(sch, skb); | ||
| 121 | sch->q.qlen--; | 123 | sch->q.qlen--; |
| 122 | } else { | 124 | } else { |
| 123 | if (!red_is_idling(&q->vars)) | 125 | if (!red_is_idling(&q->vars)) |
| @@ -143,6 +145,7 @@ static unsigned int red_drop(struct Qdisc *sch) | |||
| 143 | if (child->ops->drop && (len = child->ops->drop(child)) > 0) { | 145 | if (child->ops->drop && (len = child->ops->drop(child)) > 0) { |
| 144 | q->stats.other++; | 146 | q->stats.other++; |
| 145 | qdisc_qstats_drop(sch); | 147 | qdisc_qstats_drop(sch); |
| 148 | sch->qstats.backlog -= len; | ||
| 146 | sch->q.qlen--; | 149 | sch->q.qlen--; |
| 147 | return len; | 150 | return len; |
| 148 | } | 151 | } |
| @@ -158,6 +161,7 @@ static void red_reset(struct Qdisc *sch) | |||
| 158 | struct red_sched_data *q = qdisc_priv(sch); | 161 | struct red_sched_data *q = qdisc_priv(sch); |
| 159 | 162 | ||
| 160 | qdisc_reset(q->qdisc); | 163 | qdisc_reset(q->qdisc); |
| 164 | sch->qstats.backlog = 0; | ||
| 161 | sch->q.qlen = 0; | 165 | sch->q.qlen = 0; |
| 162 | red_restart(&q->vars); | 166 | red_restart(&q->vars); |
| 163 | } | 167 | } |
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index 83b90b584fae..3161e491990b 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c | |||
| @@ -207,6 +207,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
| 207 | return ret; | 207 | return ret; |
| 208 | } | 208 | } |
| 209 | 209 | ||
| 210 | qdisc_qstats_backlog_inc(sch, skb); | ||
| 210 | sch->q.qlen++; | 211 | sch->q.qlen++; |
| 211 | return NET_XMIT_SUCCESS; | 212 | return NET_XMIT_SUCCESS; |
| 212 | } | 213 | } |
| @@ -217,6 +218,7 @@ static unsigned int tbf_drop(struct Qdisc *sch) | |||
| 217 | unsigned int len = 0; | 218 | unsigned int len = 0; |
| 218 | 219 | ||
| 219 | if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) { | 220 | if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) { |
| 221 | sch->qstats.backlog -= len; | ||
| 220 | sch->q.qlen--; | 222 | sch->q.qlen--; |
| 221 | qdisc_qstats_drop(sch); | 223 | qdisc_qstats_drop(sch); |
| 222 | } | 224 | } |
| @@ -263,6 +265,7 @@ static struct sk_buff *tbf_dequeue(struct Qdisc *sch) | |||
| 263 | q->t_c = now; | 265 | q->t_c = now; |
| 264 | q->tokens = toks; | 266 | q->tokens = toks; |
| 265 | q->ptokens = ptoks; | 267 | q->ptokens = ptoks; |
| 268 | qdisc_qstats_backlog_dec(sch, skb); | ||
| 266 | sch->q.qlen--; | 269 | sch->q.qlen--; |
| 267 | qdisc_unthrottled(sch); | 270 | qdisc_unthrottled(sch); |
| 268 | qdisc_bstats_update(sch, skb); | 271 | qdisc_bstats_update(sch, skb); |
| @@ -294,6 +297,7 @@ static void tbf_reset(struct Qdisc *sch) | |||
| 294 | struct tbf_sched_data *q = qdisc_priv(sch); | 297 | struct tbf_sched_data *q = qdisc_priv(sch); |
| 295 | 298 | ||
| 296 | qdisc_reset(q->qdisc); | 299 | qdisc_reset(q->qdisc); |
| 300 | sch->qstats.backlog = 0; | ||
| 297 | sch->q.qlen = 0; | 301 | sch->q.qlen = 0; |
| 298 | q->t_c = ktime_get_ns(); | 302 | q->t_c = ktime_get_ns(); |
| 299 | q->tokens = q->buffer; | 303 | q->tokens = q->buffer; |
diff --git a/net/sctp/sctp_diag.c b/net/sctp/sctp_diag.c index 8e3e769dc9ea..1ce724b87618 100644 --- a/net/sctp/sctp_diag.c +++ b/net/sctp/sctp_diag.c | |||
| @@ -356,6 +356,9 @@ static int sctp_ep_dump(struct sctp_endpoint *ep, void *p) | |||
| 356 | if (cb->args[4] < cb->args[1]) | 356 | if (cb->args[4] < cb->args[1]) |
| 357 | goto next; | 357 | goto next; |
| 358 | 358 | ||
| 359 | if ((r->idiag_states & ~TCPF_LISTEN) && !list_empty(&ep->asocs)) | ||
| 360 | goto next; | ||
| 361 | |||
| 359 | if (r->sdiag_family != AF_UNSPEC && | 362 | if (r->sdiag_family != AF_UNSPEC && |
| 360 | sk->sk_family != r->sdiag_family) | 363 | sk->sk_family != r->sdiag_family) |
| 361 | goto next; | 364 | goto next; |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 777d0324594a..67154b848aa9 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
| @@ -4220,6 +4220,7 @@ int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc, | |||
| 4220 | info->sctpi_s_disable_fragments = sp->disable_fragments; | 4220 | info->sctpi_s_disable_fragments = sp->disable_fragments; |
| 4221 | info->sctpi_s_v4mapped = sp->v4mapped; | 4221 | info->sctpi_s_v4mapped = sp->v4mapped; |
| 4222 | info->sctpi_s_frag_interleave = sp->frag_interleave; | 4222 | info->sctpi_s_frag_interleave = sp->frag_interleave; |
| 4223 | info->sctpi_s_type = sp->type; | ||
| 4223 | 4224 | ||
| 4224 | return 0; | 4225 | return 0; |
| 4225 | } | 4226 | } |
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 06b4df9faaa1..2808d550d273 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c | |||
| @@ -446,16 +446,27 @@ out_no_rpciod: | |||
| 446 | return ERR_PTR(err); | 446 | return ERR_PTR(err); |
| 447 | } | 447 | } |
| 448 | 448 | ||
| 449 | struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args, | 449 | static struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args, |
| 450 | struct rpc_xprt *xprt) | 450 | struct rpc_xprt *xprt) |
| 451 | { | 451 | { |
| 452 | struct rpc_clnt *clnt = NULL; | 452 | struct rpc_clnt *clnt = NULL; |
| 453 | struct rpc_xprt_switch *xps; | 453 | struct rpc_xprt_switch *xps; |
| 454 | 454 | ||
| 455 | xps = xprt_switch_alloc(xprt, GFP_KERNEL); | 455 | if (args->bc_xprt && args->bc_xprt->xpt_bc_xps) { |
| 456 | if (xps == NULL) | 456 | WARN_ON(args->protocol != XPRT_TRANSPORT_BC_TCP); |
| 457 | return ERR_PTR(-ENOMEM); | 457 | xps = args->bc_xprt->xpt_bc_xps; |
| 458 | 458 | xprt_switch_get(xps); | |
| 459 | } else { | ||
| 460 | xps = xprt_switch_alloc(xprt, GFP_KERNEL); | ||
| 461 | if (xps == NULL) { | ||
| 462 | xprt_put(xprt); | ||
| 463 | return ERR_PTR(-ENOMEM); | ||
| 464 | } | ||
| 465 | if (xprt->bc_xprt) { | ||
| 466 | xprt_switch_get(xps); | ||
| 467 | xprt->bc_xprt->xpt_bc_xps = xps; | ||
| 468 | } | ||
| 469 | } | ||
| 459 | clnt = rpc_new_client(args, xps, xprt, NULL); | 470 | clnt = rpc_new_client(args, xps, xprt, NULL); |
| 460 | if (IS_ERR(clnt)) | 471 | if (IS_ERR(clnt)) |
| 461 | return clnt; | 472 | return clnt; |
| @@ -483,7 +494,6 @@ struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args, | |||
| 483 | 494 | ||
| 484 | return clnt; | 495 | return clnt; |
| 485 | } | 496 | } |
| 486 | EXPORT_SYMBOL_GPL(rpc_create_xprt); | ||
| 487 | 497 | ||
| 488 | /** | 498 | /** |
| 489 | * rpc_create - create an RPC client and transport with one call | 499 | * rpc_create - create an RPC client and transport with one call |
| @@ -509,6 +519,15 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args) | |||
| 509 | }; | 519 | }; |
| 510 | char servername[48]; | 520 | char servername[48]; |
| 511 | 521 | ||
| 522 | if (args->bc_xprt) { | ||
| 523 | WARN_ON(args->protocol != XPRT_TRANSPORT_BC_TCP); | ||
| 524 | xprt = args->bc_xprt->xpt_bc_xprt; | ||
| 525 | if (xprt) { | ||
| 526 | xprt_get(xprt); | ||
| 527 | return rpc_create_xprt(args, xprt); | ||
| 528 | } | ||
| 529 | } | ||
| 530 | |||
| 512 | if (args->flags & RPC_CLNT_CREATE_INFINITE_SLOTS) | 531 | if (args->flags & RPC_CLNT_CREATE_INFINITE_SLOTS) |
| 513 | xprtargs.flags |= XPRT_CREATE_INFINITE_SLOTS; | 532 | xprtargs.flags |= XPRT_CREATE_INFINITE_SLOTS; |
| 514 | if (args->flags & RPC_CLNT_CREATE_NO_IDLE_TIMEOUT) | 533 | if (args->flags & RPC_CLNT_CREATE_NO_IDLE_TIMEOUT) |
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index f5572e31d518..4f01f63102ee 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c | |||
| @@ -136,6 +136,8 @@ static void svc_xprt_free(struct kref *kref) | |||
| 136 | /* See comment on corresponding get in xs_setup_bc_tcp(): */ | 136 | /* See comment on corresponding get in xs_setup_bc_tcp(): */ |
| 137 | if (xprt->xpt_bc_xprt) | 137 | if (xprt->xpt_bc_xprt) |
| 138 | xprt_put(xprt->xpt_bc_xprt); | 138 | xprt_put(xprt->xpt_bc_xprt); |
| 139 | if (xprt->xpt_bc_xps) | ||
| 140 | xprt_switch_put(xprt->xpt_bc_xps); | ||
| 139 | xprt->xpt_ops->xpo_free(xprt); | 141 | xprt->xpt_ops->xpo_free(xprt); |
| 140 | module_put(owner); | 142 | module_put(owner); |
| 141 | } | 143 | } |
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 2d3e0c42361e..7e2b2fa189c3 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
| @@ -3057,6 +3057,7 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args) | |||
| 3057 | return xprt; | 3057 | return xprt; |
| 3058 | 3058 | ||
| 3059 | args->bc_xprt->xpt_bc_xprt = NULL; | 3059 | args->bc_xprt->xpt_bc_xprt = NULL; |
| 3060 | args->bc_xprt->xpt_bc_xps = NULL; | ||
| 3060 | xprt_put(xprt); | 3061 | xprt_put(xprt); |
| 3061 | ret = ERR_PTR(-EINVAL); | 3062 | ret = ERR_PTR(-EINVAL); |
| 3062 | out_err: | 3063 | out_err: |
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c index 4dfc5c14f8c3..3ad9fab1985f 100644 --- a/net/tipc/netlink_compat.c +++ b/net/tipc/netlink_compat.c | |||
| @@ -346,9 +346,15 @@ static int tipc_nl_compat_bearer_dump(struct tipc_nl_compat_msg *msg, | |||
| 346 | struct nlattr **attrs) | 346 | struct nlattr **attrs) |
| 347 | { | 347 | { |
| 348 | struct nlattr *bearer[TIPC_NLA_BEARER_MAX + 1]; | 348 | struct nlattr *bearer[TIPC_NLA_BEARER_MAX + 1]; |
| 349 | int err; | ||
| 350 | |||
| 351 | if (!attrs[TIPC_NLA_BEARER]) | ||
| 352 | return -EINVAL; | ||
| 349 | 353 | ||
| 350 | nla_parse_nested(bearer, TIPC_NLA_BEARER_MAX, attrs[TIPC_NLA_BEARER], | 354 | err = nla_parse_nested(bearer, TIPC_NLA_BEARER_MAX, |
| 351 | NULL); | 355 | attrs[TIPC_NLA_BEARER], NULL); |
| 356 | if (err) | ||
| 357 | return err; | ||
| 352 | 358 | ||
| 353 | return tipc_add_tlv(msg->rep, TIPC_TLV_BEARER_NAME, | 359 | return tipc_add_tlv(msg->rep, TIPC_TLV_BEARER_NAME, |
| 354 | nla_data(bearer[TIPC_NLA_BEARER_NAME]), | 360 | nla_data(bearer[TIPC_NLA_BEARER_NAME]), |
| @@ -460,14 +466,31 @@ static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg, | |||
| 460 | struct nlattr *link[TIPC_NLA_LINK_MAX + 1]; | 466 | struct nlattr *link[TIPC_NLA_LINK_MAX + 1]; |
| 461 | struct nlattr *prop[TIPC_NLA_PROP_MAX + 1]; | 467 | struct nlattr *prop[TIPC_NLA_PROP_MAX + 1]; |
| 462 | struct nlattr *stats[TIPC_NLA_STATS_MAX + 1]; | 468 | struct nlattr *stats[TIPC_NLA_STATS_MAX + 1]; |
| 469 | int err; | ||
| 463 | 470 | ||
| 464 | nla_parse_nested(link, TIPC_NLA_LINK_MAX, attrs[TIPC_NLA_LINK], NULL); | 471 | if (!attrs[TIPC_NLA_LINK]) |
| 472 | return -EINVAL; | ||
| 465 | 473 | ||
| 466 | nla_parse_nested(prop, TIPC_NLA_PROP_MAX, link[TIPC_NLA_LINK_PROP], | 474 | err = nla_parse_nested(link, TIPC_NLA_LINK_MAX, attrs[TIPC_NLA_LINK], |
| 467 | NULL); | 475 | NULL); |
| 476 | if (err) | ||
| 477 | return err; | ||
| 478 | |||
| 479 | if (!link[TIPC_NLA_LINK_PROP]) | ||
| 480 | return -EINVAL; | ||
| 468 | 481 | ||
| 469 | nla_parse_nested(stats, TIPC_NLA_STATS_MAX, link[TIPC_NLA_LINK_STATS], | 482 | err = nla_parse_nested(prop, TIPC_NLA_PROP_MAX, |
| 470 | NULL); | 483 | link[TIPC_NLA_LINK_PROP], NULL); |
| 484 | if (err) | ||
| 485 | return err; | ||
| 486 | |||
| 487 | if (!link[TIPC_NLA_LINK_STATS]) | ||
| 488 | return -EINVAL; | ||
| 489 | |||
| 490 | err = nla_parse_nested(stats, TIPC_NLA_STATS_MAX, | ||
| 491 | link[TIPC_NLA_LINK_STATS], NULL); | ||
| 492 | if (err) | ||
| 493 | return err; | ||
| 471 | 494 | ||
| 472 | name = (char *)TLV_DATA(msg->req); | 495 | name = (char *)TLV_DATA(msg->req); |
| 473 | if (strcmp(name, nla_data(link[TIPC_NLA_LINK_NAME])) != 0) | 496 | if (strcmp(name, nla_data(link[TIPC_NLA_LINK_NAME])) != 0) |
| @@ -569,12 +592,20 @@ static int tipc_nl_compat_link_dump(struct tipc_nl_compat_msg *msg, | |||
| 569 | { | 592 | { |
| 570 | struct nlattr *link[TIPC_NLA_LINK_MAX + 1]; | 593 | struct nlattr *link[TIPC_NLA_LINK_MAX + 1]; |
| 571 | struct tipc_link_info link_info; | 594 | struct tipc_link_info link_info; |
| 595 | int err; | ||
| 572 | 596 | ||
| 573 | nla_parse_nested(link, TIPC_NLA_LINK_MAX, attrs[TIPC_NLA_LINK], NULL); | 597 | if (!attrs[TIPC_NLA_LINK]) |
| 598 | return -EINVAL; | ||
| 599 | |||
| 600 | err = nla_parse_nested(link, TIPC_NLA_LINK_MAX, attrs[TIPC_NLA_LINK], | ||
| 601 | NULL); | ||
| 602 | if (err) | ||
| 603 | return err; | ||
| 574 | 604 | ||
| 575 | link_info.dest = nla_get_flag(link[TIPC_NLA_LINK_DEST]); | 605 | link_info.dest = nla_get_flag(link[TIPC_NLA_LINK_DEST]); |
| 576 | link_info.up = htonl(nla_get_flag(link[TIPC_NLA_LINK_UP])); | 606 | link_info.up = htonl(nla_get_flag(link[TIPC_NLA_LINK_UP])); |
| 577 | strcpy(link_info.str, nla_data(link[TIPC_NLA_LINK_NAME])); | 607 | nla_strlcpy(link_info.str, nla_data(link[TIPC_NLA_LINK_NAME]), |
| 608 | TIPC_MAX_LINK_NAME); | ||
| 578 | 609 | ||
| 579 | return tipc_add_tlv(msg->rep, TIPC_TLV_LINK_INFO, | 610 | return tipc_add_tlv(msg->rep, TIPC_TLV_LINK_INFO, |
| 580 | &link_info, sizeof(link_info)); | 611 | &link_info, sizeof(link_info)); |
| @@ -758,12 +789,23 @@ static int tipc_nl_compat_name_table_dump(struct tipc_nl_compat_msg *msg, | |||
| 758 | u32 node, depth, type, lowbound, upbound; | 789 | u32 node, depth, type, lowbound, upbound; |
| 759 | static const char * const scope_str[] = {"", " zone", " cluster", | 790 | static const char * const scope_str[] = {"", " zone", " cluster", |
| 760 | " node"}; | 791 | " node"}; |
| 792 | int err; | ||
| 761 | 793 | ||
| 762 | nla_parse_nested(nt, TIPC_NLA_NAME_TABLE_MAX, | 794 | if (!attrs[TIPC_NLA_NAME_TABLE]) |
| 763 | attrs[TIPC_NLA_NAME_TABLE], NULL); | 795 | return -EINVAL; |
| 764 | 796 | ||
| 765 | nla_parse_nested(publ, TIPC_NLA_PUBL_MAX, nt[TIPC_NLA_NAME_TABLE_PUBL], | 797 | err = nla_parse_nested(nt, TIPC_NLA_NAME_TABLE_MAX, |
| 766 | NULL); | 798 | attrs[TIPC_NLA_NAME_TABLE], NULL); |
| 799 | if (err) | ||
| 800 | return err; | ||
| 801 | |||
| 802 | if (!nt[TIPC_NLA_NAME_TABLE_PUBL]) | ||
| 803 | return -EINVAL; | ||
| 804 | |||
| 805 | err = nla_parse_nested(publ, TIPC_NLA_PUBL_MAX, | ||
| 806 | nt[TIPC_NLA_NAME_TABLE_PUBL], NULL); | ||
| 807 | if (err) | ||
| 808 | return err; | ||
| 767 | 809 | ||
| 768 | ntq = (struct tipc_name_table_query *)TLV_DATA(msg->req); | 810 | ntq = (struct tipc_name_table_query *)TLV_DATA(msg->req); |
| 769 | 811 | ||
| @@ -815,8 +857,15 @@ static int __tipc_nl_compat_publ_dump(struct tipc_nl_compat_msg *msg, | |||
| 815 | { | 857 | { |
| 816 | u32 type, lower, upper; | 858 | u32 type, lower, upper; |
| 817 | struct nlattr *publ[TIPC_NLA_PUBL_MAX + 1]; | 859 | struct nlattr *publ[TIPC_NLA_PUBL_MAX + 1]; |
| 860 | int err; | ||
| 818 | 861 | ||
| 819 | nla_parse_nested(publ, TIPC_NLA_PUBL_MAX, attrs[TIPC_NLA_PUBL], NULL); | 862 | if (!attrs[TIPC_NLA_PUBL]) |
| 863 | return -EINVAL; | ||
| 864 | |||
| 865 | err = nla_parse_nested(publ, TIPC_NLA_PUBL_MAX, attrs[TIPC_NLA_PUBL], | ||
| 866 | NULL); | ||
| 867 | if (err) | ||
| 868 | return err; | ||
| 820 | 869 | ||
| 821 | type = nla_get_u32(publ[TIPC_NLA_PUBL_TYPE]); | 870 | type = nla_get_u32(publ[TIPC_NLA_PUBL_TYPE]); |
| 822 | lower = nla_get_u32(publ[TIPC_NLA_PUBL_LOWER]); | 871 | lower = nla_get_u32(publ[TIPC_NLA_PUBL_LOWER]); |
| @@ -876,7 +925,13 @@ static int tipc_nl_compat_sk_dump(struct tipc_nl_compat_msg *msg, | |||
| 876 | u32 sock_ref; | 925 | u32 sock_ref; |
| 877 | struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1]; | 926 | struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1]; |
| 878 | 927 | ||
| 879 | nla_parse_nested(sock, TIPC_NLA_SOCK_MAX, attrs[TIPC_NLA_SOCK], NULL); | 928 | if (!attrs[TIPC_NLA_SOCK]) |
| 929 | return -EINVAL; | ||
| 930 | |||
| 931 | err = nla_parse_nested(sock, TIPC_NLA_SOCK_MAX, attrs[TIPC_NLA_SOCK], | ||
| 932 | NULL); | ||
| 933 | if (err) | ||
| 934 | return err; | ||
| 880 | 935 | ||
| 881 | sock_ref = nla_get_u32(sock[TIPC_NLA_SOCK_REF]); | 936 | sock_ref = nla_get_u32(sock[TIPC_NLA_SOCK_REF]); |
| 882 | tipc_tlv_sprintf(msg->rep, "%u:", sock_ref); | 937 | tipc_tlv_sprintf(msg->rep, "%u:", sock_ref); |
| @@ -917,9 +972,15 @@ static int tipc_nl_compat_media_dump(struct tipc_nl_compat_msg *msg, | |||
| 917 | struct nlattr **attrs) | 972 | struct nlattr **attrs) |
| 918 | { | 973 | { |
| 919 | struct nlattr *media[TIPC_NLA_MEDIA_MAX + 1]; | 974 | struct nlattr *media[TIPC_NLA_MEDIA_MAX + 1]; |
| 975 | int err; | ||
| 976 | |||
| 977 | if (!attrs[TIPC_NLA_MEDIA]) | ||
| 978 | return -EINVAL; | ||
| 920 | 979 | ||
| 921 | nla_parse_nested(media, TIPC_NLA_MEDIA_MAX, attrs[TIPC_NLA_MEDIA], | 980 | err = nla_parse_nested(media, TIPC_NLA_MEDIA_MAX, attrs[TIPC_NLA_MEDIA], |
| 922 | NULL); | 981 | NULL); |
| 982 | if (err) | ||
| 983 | return err; | ||
| 923 | 984 | ||
| 924 | return tipc_add_tlv(msg->rep, TIPC_TLV_MEDIA_NAME, | 985 | return tipc_add_tlv(msg->rep, TIPC_TLV_MEDIA_NAME, |
| 925 | nla_data(media[TIPC_NLA_MEDIA_NAME]), | 986 | nla_data(media[TIPC_NLA_MEDIA_NAME]), |
| @@ -931,8 +992,15 @@ static int tipc_nl_compat_node_dump(struct tipc_nl_compat_msg *msg, | |||
| 931 | { | 992 | { |
| 932 | struct tipc_node_info node_info; | 993 | struct tipc_node_info node_info; |
| 933 | struct nlattr *node[TIPC_NLA_NODE_MAX + 1]; | 994 | struct nlattr *node[TIPC_NLA_NODE_MAX + 1]; |
| 995 | int err; | ||
| 934 | 996 | ||
| 935 | nla_parse_nested(node, TIPC_NLA_NODE_MAX, attrs[TIPC_NLA_NODE], NULL); | 997 | if (!attrs[TIPC_NLA_NODE]) |
| 998 | return -EINVAL; | ||
| 999 | |||
| 1000 | err = nla_parse_nested(node, TIPC_NLA_NODE_MAX, attrs[TIPC_NLA_NODE], | ||
| 1001 | NULL); | ||
| 1002 | if (err) | ||
| 1003 | return err; | ||
| 936 | 1004 | ||
| 937 | node_info.addr = htonl(nla_get_u32(node[TIPC_NLA_NODE_ADDR])); | 1005 | node_info.addr = htonl(nla_get_u32(node[TIPC_NLA_NODE_ADDR])); |
| 938 | node_info.up = htonl(nla_get_flag(node[TIPC_NLA_NODE_UP])); | 1006 | node_info.up = htonl(nla_get_flag(node[TIPC_NLA_NODE_UP])); |
| @@ -971,8 +1039,16 @@ static int tipc_nl_compat_net_dump(struct tipc_nl_compat_msg *msg, | |||
| 971 | { | 1039 | { |
| 972 | __be32 id; | 1040 | __be32 id; |
| 973 | struct nlattr *net[TIPC_NLA_NET_MAX + 1]; | 1041 | struct nlattr *net[TIPC_NLA_NET_MAX + 1]; |
| 1042 | int err; | ||
| 1043 | |||
| 1044 | if (!attrs[TIPC_NLA_NET]) | ||
| 1045 | return -EINVAL; | ||
| 1046 | |||
| 1047 | err = nla_parse_nested(net, TIPC_NLA_NET_MAX, attrs[TIPC_NLA_NET], | ||
| 1048 | NULL); | ||
| 1049 | if (err) | ||
| 1050 | return err; | ||
| 974 | 1051 | ||
| 975 | nla_parse_nested(net, TIPC_NLA_NET_MAX, attrs[TIPC_NLA_NET], NULL); | ||
| 976 | id = htonl(nla_get_u32(net[TIPC_NLA_NET_ID])); | 1052 | id = htonl(nla_get_u32(net[TIPC_NLA_NET_ID])); |
| 977 | 1053 | ||
| 978 | return tipc_add_tlv(msg->rep, TIPC_TLV_UNSIGNED, &id, sizeof(id)); | 1054 | return tipc_add_tlv(msg->rep, TIPC_TLV_UNSIGNED, &id, sizeof(id)); |
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 80aa6a3e6817..735362c26c8e 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c | |||
| @@ -315,7 +315,7 @@ static struct sock *unix_find_socket_byinode(struct inode *i) | |||
| 315 | &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) { | 315 | &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) { |
| 316 | struct dentry *dentry = unix_sk(s)->path.dentry; | 316 | struct dentry *dentry = unix_sk(s)->path.dentry; |
| 317 | 317 | ||
| 318 | if (dentry && d_backing_inode(dentry) == i) { | 318 | if (dentry && d_real_inode(dentry) == i) { |
| 319 | sock_hold(s); | 319 | sock_hold(s); |
| 320 | goto found; | 320 | goto found; |
| 321 | } | 321 | } |
| @@ -911,7 +911,7 @@ static struct sock *unix_find_other(struct net *net, | |||
| 911 | err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path); | 911 | err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path); |
| 912 | if (err) | 912 | if (err) |
| 913 | goto fail; | 913 | goto fail; |
| 914 | inode = d_backing_inode(path.dentry); | 914 | inode = d_real_inode(path.dentry); |
| 915 | err = inode_permission(inode, MAY_WRITE); | 915 | err = inode_permission(inode, MAY_WRITE); |
| 916 | if (err) | 916 | if (err) |
| 917 | goto put_fail; | 917 | goto put_fail; |
| @@ -1048,7 +1048,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
| 1048 | goto out_up; | 1048 | goto out_up; |
| 1049 | } | 1049 | } |
| 1050 | addr->hash = UNIX_HASH_SIZE; | 1050 | addr->hash = UNIX_HASH_SIZE; |
| 1051 | hash = d_backing_inode(dentry)->i_ino & (UNIX_HASH_SIZE - 1); | 1051 | hash = d_real_inode(dentry)->i_ino & (UNIX_HASH_SIZE - 1); |
| 1052 | spin_lock(&unix_table_lock); | 1052 | spin_lock(&unix_table_lock); |
| 1053 | u->path = u_path; | 1053 | u->path = u_path; |
| 1054 | list = &unix_socket_table[hash]; | 1054 | list = &unix_socket_table[hash]; |
diff --git a/net/wireless/core.c b/net/wireless/core.c index d25c82bc1bbe..ecca3896b9f7 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c | |||
| @@ -363,8 +363,6 @@ struct wiphy *wiphy_new_nm(const struct cfg80211_ops *ops, int sizeof_priv, | |||
| 363 | WARN_ON(ops->remain_on_channel && !ops->cancel_remain_on_channel); | 363 | WARN_ON(ops->remain_on_channel && !ops->cancel_remain_on_channel); |
| 364 | WARN_ON(ops->tdls_channel_switch && !ops->tdls_cancel_channel_switch); | 364 | WARN_ON(ops->tdls_channel_switch && !ops->tdls_cancel_channel_switch); |
| 365 | WARN_ON(ops->add_tx_ts && !ops->del_tx_ts); | 365 | WARN_ON(ops->add_tx_ts && !ops->del_tx_ts); |
| 366 | WARN_ON(ops->set_tx_power && !ops->get_tx_power); | ||
| 367 | WARN_ON(ops->set_antenna && !ops->get_antenna); | ||
| 368 | 366 | ||
| 369 | alloc_size = sizeof(*rdev) + sizeof_priv; | 367 | alloc_size = sizeof(*rdev) + sizeof_priv; |
| 370 | 368 | ||
diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c index 6250b1cfcde5..dbb2738e356a 100644 --- a/net/wireless/wext-core.c +++ b/net/wireless/wext-core.c | |||
| @@ -958,8 +958,29 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr, | |||
| 958 | return private(dev, iwr, cmd, info, handler); | 958 | return private(dev, iwr, cmd, info, handler); |
| 959 | } | 959 | } |
| 960 | /* Old driver API : call driver ioctl handler */ | 960 | /* Old driver API : call driver ioctl handler */ |
| 961 | if (dev->netdev_ops->ndo_do_ioctl) | 961 | if (dev->netdev_ops->ndo_do_ioctl) { |
| 962 | return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd); | 962 | #ifdef CONFIG_COMPAT |
| 963 | if (info->flags & IW_REQUEST_FLAG_COMPAT) { | ||
| 964 | int ret = 0; | ||
| 965 | struct iwreq iwr_lcl; | ||
| 966 | struct compat_iw_point *iwp_compat = (void *) &iwr->u.data; | ||
| 967 | |||
| 968 | memcpy(&iwr_lcl, iwr, sizeof(struct iwreq)); | ||
| 969 | iwr_lcl.u.data.pointer = compat_ptr(iwp_compat->pointer); | ||
| 970 | iwr_lcl.u.data.length = iwp_compat->length; | ||
| 971 | iwr_lcl.u.data.flags = iwp_compat->flags; | ||
| 972 | |||
| 973 | ret = dev->netdev_ops->ndo_do_ioctl(dev, (void *) &iwr_lcl, cmd); | ||
| 974 | |||
| 975 | iwp_compat->pointer = ptr_to_compat(iwr_lcl.u.data.pointer); | ||
| 976 | iwp_compat->length = iwr_lcl.u.data.length; | ||
| 977 | iwp_compat->flags = iwr_lcl.u.data.flags; | ||
| 978 | |||
| 979 | return ret; | ||
| 980 | } else | ||
| 981 | #endif | ||
| 982 | return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd); | ||
| 983 | } | ||
| 963 | return -EOPNOTSUPP; | 984 | return -EOPNOTSUPP; |
| 964 | } | 985 | } |
| 965 | 986 | ||
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index 6750595bd7b8..4904ced676d4 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl | |||
| @@ -2454,6 +2454,7 @@ sub process { | |||
| 2454 | 2454 | ||
| 2455 | # Check for git id commit length and improperly formed commit descriptions | 2455 | # Check for git id commit length and improperly formed commit descriptions |
| 2456 | if ($in_commit_log && !$commit_log_possible_stack_dump && | 2456 | if ($in_commit_log && !$commit_log_possible_stack_dump && |
| 2457 | $line !~ /^\s*(?:Link|Patchwork|http|BugLink):/i && | ||
| 2457 | ($line =~ /\bcommit\s+[0-9a-f]{5,}\b/i || | 2458 | ($line =~ /\bcommit\s+[0-9a-f]{5,}\b/i || |
| 2458 | ($line =~ /\b[0-9a-f]{12,40}\b/i && | 2459 | ($line =~ /\b[0-9a-f]{12,40}\b/i && |
| 2459 | $line !~ /[\<\[][0-9a-f]{12,40}[\>\]]/i && | 2460 | $line !~ /[\<\[][0-9a-f]{12,40}[\>\]]/i && |
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c index a9155077feef..fec75786f75b 100644 --- a/scripts/mod/file2alias.c +++ b/scripts/mod/file2alias.c | |||
| @@ -384,7 +384,7 @@ static void do_of_entry_multi(void *symval, struct module *mod) | |||
| 384 | len = sprintf(alias, "of:N%sT%s", (*name)[0] ? *name : "*", | 384 | len = sprintf(alias, "of:N%sT%s", (*name)[0] ? *name : "*", |
| 385 | (*type)[0] ? *type : "*"); | 385 | (*type)[0] ? *type : "*"); |
| 386 | 386 | ||
| 387 | if (compatible[0]) | 387 | if ((*compatible)[0]) |
| 388 | sprintf(&alias[len], "%sC%s", (*type)[0] ? "*" : "", | 388 | sprintf(&alias[len], "%sC%s", (*type)[0] ? "*" : "", |
| 389 | *compatible); | 389 | *compatible); |
| 390 | 390 | ||
diff --git a/security/keys/compat.c b/security/keys/compat.c index c8783b3b628c..36c80bf5b89c 100644 --- a/security/keys/compat.c +++ b/security/keys/compat.c | |||
| @@ -134,7 +134,7 @@ COMPAT_SYSCALL_DEFINE5(keyctl, u32, option, | |||
| 134 | 134 | ||
| 135 | case KEYCTL_DH_COMPUTE: | 135 | case KEYCTL_DH_COMPUTE: |
| 136 | return keyctl_dh_compute(compat_ptr(arg2), compat_ptr(arg3), | 136 | return keyctl_dh_compute(compat_ptr(arg2), compat_ptr(arg3), |
| 137 | arg4); | 137 | arg4, compat_ptr(arg5)); |
| 138 | 138 | ||
| 139 | default: | 139 | default: |
| 140 | return -EOPNOTSUPP; | 140 | return -EOPNOTSUPP; |
diff --git a/security/keys/dh.c b/security/keys/dh.c index 880505a4b9f1..531ed2ec132f 100644 --- a/security/keys/dh.c +++ b/security/keys/dh.c | |||
| @@ -78,7 +78,8 @@ error: | |||
| 78 | } | 78 | } |
| 79 | 79 | ||
| 80 | long keyctl_dh_compute(struct keyctl_dh_params __user *params, | 80 | long keyctl_dh_compute(struct keyctl_dh_params __user *params, |
| 81 | char __user *buffer, size_t buflen) | 81 | char __user *buffer, size_t buflen, |
| 82 | void __user *reserved) | ||
| 82 | { | 83 | { |
| 83 | long ret; | 84 | long ret; |
| 84 | MPI base, private, prime, result; | 85 | MPI base, private, prime, result; |
| @@ -97,6 +98,11 @@ long keyctl_dh_compute(struct keyctl_dh_params __user *params, | |||
| 97 | goto out; | 98 | goto out; |
| 98 | } | 99 | } |
| 99 | 100 | ||
| 101 | if (reserved) { | ||
| 102 | ret = -EINVAL; | ||
| 103 | goto out; | ||
| 104 | } | ||
| 105 | |||
| 100 | keylen = mpi_from_key(pcopy.prime, buflen, &prime); | 106 | keylen = mpi_from_key(pcopy.prime, buflen, &prime); |
| 101 | if (keylen < 0 || !prime) { | 107 | if (keylen < 0 || !prime) { |
| 102 | /* buflen == 0 may be used to query the required buffer size, | 108 | /* buflen == 0 may be used to query the required buffer size, |
diff --git a/security/keys/internal.h b/security/keys/internal.h index 8ec7a528365d..a705a7d92ad7 100644 --- a/security/keys/internal.h +++ b/security/keys/internal.h | |||
| @@ -260,10 +260,11 @@ static inline long keyctl_get_persistent(uid_t uid, key_serial_t destring) | |||
| 260 | 260 | ||
| 261 | #ifdef CONFIG_KEY_DH_OPERATIONS | 261 | #ifdef CONFIG_KEY_DH_OPERATIONS |
| 262 | extern long keyctl_dh_compute(struct keyctl_dh_params __user *, char __user *, | 262 | extern long keyctl_dh_compute(struct keyctl_dh_params __user *, char __user *, |
| 263 | size_t); | 263 | size_t, void __user *); |
| 264 | #else | 264 | #else |
| 265 | static inline long keyctl_dh_compute(struct keyctl_dh_params __user *params, | 265 | static inline long keyctl_dh_compute(struct keyctl_dh_params __user *params, |
| 266 | char __user *buffer, size_t buflen) | 266 | char __user *buffer, size_t buflen, |
| 267 | void __user *reserved) | ||
| 267 | { | 268 | { |
| 268 | return -EOPNOTSUPP; | 269 | return -EOPNOTSUPP; |
| 269 | } | 270 | } |
diff --git a/security/keys/key.c b/security/keys/key.c index bd5a272f28a6..346fbf201c22 100644 --- a/security/keys/key.c +++ b/security/keys/key.c | |||
| @@ -597,7 +597,7 @@ int key_reject_and_link(struct key *key, | |||
| 597 | 597 | ||
| 598 | mutex_unlock(&key_construction_mutex); | 598 | mutex_unlock(&key_construction_mutex); |
| 599 | 599 | ||
| 600 | if (keyring) | 600 | if (keyring && link_ret == 0) |
| 601 | __key_link_end(keyring, &key->index_key, edit); | 601 | __key_link_end(keyring, &key->index_key, edit); |
| 602 | 602 | ||
| 603 | /* wake up anyone waiting for a key to be constructed */ | 603 | /* wake up anyone waiting for a key to be constructed */ |
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c index 3b135a0af344..d580ad06b792 100644 --- a/security/keys/keyctl.c +++ b/security/keys/keyctl.c | |||
| @@ -1688,8 +1688,8 @@ SYSCALL_DEFINE5(keyctl, int, option, unsigned long, arg2, unsigned long, arg3, | |||
| 1688 | 1688 | ||
| 1689 | case KEYCTL_DH_COMPUTE: | 1689 | case KEYCTL_DH_COMPUTE: |
| 1690 | return keyctl_dh_compute((struct keyctl_dh_params __user *) arg2, | 1690 | return keyctl_dh_compute((struct keyctl_dh_params __user *) arg2, |
| 1691 | (char __user *) arg3, | 1691 | (char __user *) arg3, (size_t) arg4, |
| 1692 | (size_t) arg4); | 1692 | (void __user *) arg5); |
| 1693 | 1693 | ||
| 1694 | default: | 1694 | default: |
| 1695 | return -EOPNOTSUPP; | 1695 | return -EOPNOTSUPP; |
diff --git a/sound/drivers/dummy.c b/sound/drivers/dummy.c index c0f8f613f1f1..172dacd925f5 100644 --- a/sound/drivers/dummy.c +++ b/sound/drivers/dummy.c | |||
| @@ -420,6 +420,7 @@ static int dummy_hrtimer_stop(struct snd_pcm_substream *substream) | |||
| 420 | 420 | ||
| 421 | static inline void dummy_hrtimer_sync(struct dummy_hrtimer_pcm *dpcm) | 421 | static inline void dummy_hrtimer_sync(struct dummy_hrtimer_pcm *dpcm) |
| 422 | { | 422 | { |
| 423 | hrtimer_cancel(&dpcm->timer); | ||
| 423 | tasklet_kill(&dpcm->tasklet); | 424 | tasklet_kill(&dpcm->tasklet); |
| 424 | } | 425 | } |
| 425 | 426 | ||
diff --git a/sound/hda/hdac_regmap.c b/sound/hda/hdac_regmap.c index 87041ddd29cb..47a358fab132 100644 --- a/sound/hda/hdac_regmap.c +++ b/sound/hda/hdac_regmap.c | |||
| @@ -444,7 +444,7 @@ int snd_hdac_regmap_write_raw(struct hdac_device *codec, unsigned int reg, | |||
| 444 | err = reg_raw_write(codec, reg, val); | 444 | err = reg_raw_write(codec, reg, val); |
| 445 | if (err == -EAGAIN) { | 445 | if (err == -EAGAIN) { |
| 446 | err = snd_hdac_power_up_pm(codec); | 446 | err = snd_hdac_power_up_pm(codec); |
| 447 | if (!err) | 447 | if (err >= 0) |
| 448 | err = reg_raw_write(codec, reg, val); | 448 | err = reg_raw_write(codec, reg, val); |
| 449 | snd_hdac_power_down_pm(codec); | 449 | snd_hdac_power_down_pm(codec); |
| 450 | } | 450 | } |
| @@ -470,7 +470,7 @@ static int __snd_hdac_regmap_read_raw(struct hdac_device *codec, | |||
| 470 | err = reg_raw_read(codec, reg, val, uncached); | 470 | err = reg_raw_read(codec, reg, val, uncached); |
| 471 | if (err == -EAGAIN) { | 471 | if (err == -EAGAIN) { |
| 472 | err = snd_hdac_power_up_pm(codec); | 472 | err = snd_hdac_power_up_pm(codec); |
| 473 | if (!err) | 473 | if (err >= 0) |
| 474 | err = reg_raw_read(codec, reg, val, uncached); | 474 | err = reg_raw_read(codec, reg, val, uncached); |
| 475 | snd_hdac_power_down_pm(codec); | 475 | snd_hdac_power_down_pm(codec); |
| 476 | } | 476 | } |
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 9a0d1445ca5c..94089fc71884 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c | |||
| @@ -365,8 +365,11 @@ enum { | |||
| 365 | 365 | ||
| 366 | #define IS_SKL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa170) | 366 | #define IS_SKL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa170) |
| 367 | #define IS_SKL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d70) | 367 | #define IS_SKL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d70) |
| 368 | #define IS_KBL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa171) | ||
| 369 | #define IS_KBL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d71) | ||
| 368 | #define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98) | 370 | #define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98) |
| 369 | #define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci)) | 371 | #define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci)) || \ |
| 372 | IS_KBL(pci) || IS_KBL_LP(pci) | ||
| 370 | 373 | ||
| 371 | static char *driver_short_names[] = { | 374 | static char *driver_short_names[] = { |
| 372 | [AZX_DRIVER_ICH] = "HDA Intel", | 375 | [AZX_DRIVER_ICH] = "HDA Intel", |
| @@ -2181,6 +2184,12 @@ static const struct pci_device_id azx_ids[] = { | |||
| 2181 | /* Sunrise Point-LP */ | 2184 | /* Sunrise Point-LP */ |
| 2182 | { PCI_DEVICE(0x8086, 0x9d70), | 2185 | { PCI_DEVICE(0x8086, 0x9d70), |
| 2183 | .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE }, | 2186 | .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE }, |
| 2187 | /* Kabylake */ | ||
| 2188 | { PCI_DEVICE(0x8086, 0xa171), | ||
| 2189 | .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE }, | ||
| 2190 | /* Kabylake-LP */ | ||
| 2191 | { PCI_DEVICE(0x8086, 0x9d71), | ||
| 2192 | .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE }, | ||
| 2184 | /* Broxton-P(Apollolake) */ | 2193 | /* Broxton-P(Apollolake) */ |
| 2185 | { PCI_DEVICE(0x8086, 0x5a98), | 2194 | { PCI_DEVICE(0x8086, 0x5a98), |
| 2186 | .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON }, | 2195 | .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON }, |
diff --git a/sound/pci/hda/hda_tegra.c b/sound/pci/hda/hda_tegra.c index 17fd81736d3d..0621920f7617 100644 --- a/sound/pci/hda/hda_tegra.c +++ b/sound/pci/hda/hda_tegra.c | |||
| @@ -115,20 +115,20 @@ static int substream_free_pages(struct azx *chip, | |||
| 115 | /* | 115 | /* |
| 116 | * Register access ops. Tegra HDA register access is DWORD only. | 116 | * Register access ops. Tegra HDA register access is DWORD only. |
| 117 | */ | 117 | */ |
| 118 | static void hda_tegra_writel(u32 value, u32 *addr) | 118 | static void hda_tegra_writel(u32 value, u32 __iomem *addr) |
| 119 | { | 119 | { |
| 120 | writel(value, addr); | 120 | writel(value, addr); |
| 121 | } | 121 | } |
| 122 | 122 | ||
| 123 | static u32 hda_tegra_readl(u32 *addr) | 123 | static u32 hda_tegra_readl(u32 __iomem *addr) |
| 124 | { | 124 | { |
| 125 | return readl(addr); | 125 | return readl(addr); |
| 126 | } | 126 | } |
| 127 | 127 | ||
| 128 | static void hda_tegra_writew(u16 value, u16 *addr) | 128 | static void hda_tegra_writew(u16 value, u16 __iomem *addr) |
| 129 | { | 129 | { |
| 130 | unsigned int shift = ((unsigned long)(addr) & 0x3) << 3; | 130 | unsigned int shift = ((unsigned long)(addr) & 0x3) << 3; |
| 131 | void *dword_addr = (void *)((unsigned long)(addr) & ~0x3); | 131 | void __iomem *dword_addr = (void __iomem *)((unsigned long)(addr) & ~0x3); |
| 132 | u32 v; | 132 | u32 v; |
| 133 | 133 | ||
| 134 | v = readl(dword_addr); | 134 | v = readl(dword_addr); |
| @@ -137,20 +137,20 @@ static void hda_tegra_writew(u16 value, u16 *addr) | |||
| 137 | writel(v, dword_addr); | 137 | writel(v, dword_addr); |
| 138 | } | 138 | } |
| 139 | 139 | ||
| 140 | static u16 hda_tegra_readw(u16 *addr) | 140 | static u16 hda_tegra_readw(u16 __iomem *addr) |
| 141 | { | 141 | { |
| 142 | unsigned int shift = ((unsigned long)(addr) & 0x3) << 3; | 142 | unsigned int shift = ((unsigned long)(addr) & 0x3) << 3; |
| 143 | void *dword_addr = (void *)((unsigned long)(addr) & ~0x3); | 143 | void __iomem *dword_addr = (void __iomem *)((unsigned long)(addr) & ~0x3); |
| 144 | u32 v; | 144 | u32 v; |
| 145 | 145 | ||
| 146 | v = readl(dword_addr); | 146 | v = readl(dword_addr); |
| 147 | return (v >> shift) & 0xffff; | 147 | return (v >> shift) & 0xffff; |
| 148 | } | 148 | } |
| 149 | 149 | ||
| 150 | static void hda_tegra_writeb(u8 value, u8 *addr) | 150 | static void hda_tegra_writeb(u8 value, u8 __iomem *addr) |
| 151 | { | 151 | { |
| 152 | unsigned int shift = ((unsigned long)(addr) & 0x3) << 3; | 152 | unsigned int shift = ((unsigned long)(addr) & 0x3) << 3; |
| 153 | void *dword_addr = (void *)((unsigned long)(addr) & ~0x3); | 153 | void __iomem *dword_addr = (void __iomem *)((unsigned long)(addr) & ~0x3); |
| 154 | u32 v; | 154 | u32 v; |
| 155 | 155 | ||
| 156 | v = readl(dword_addr); | 156 | v = readl(dword_addr); |
| @@ -159,10 +159,10 @@ static void hda_tegra_writeb(u8 value, u8 *addr) | |||
| 159 | writel(v, dword_addr); | 159 | writel(v, dword_addr); |
| 160 | } | 160 | } |
| 161 | 161 | ||
| 162 | static u8 hda_tegra_readb(u8 *addr) | 162 | static u8 hda_tegra_readb(u8 __iomem *addr) |
| 163 | { | 163 | { |
| 164 | unsigned int shift = ((unsigned long)(addr) & 0x3) << 3; | 164 | unsigned int shift = ((unsigned long)(addr) & 0x3) << 3; |
| 165 | void *dword_addr = (void *)((unsigned long)(addr) & ~0x3); | 165 | void __iomem *dword_addr = (void __iomem *)((unsigned long)(addr) & ~0x3); |
| 166 | u32 v; | 166 | u32 v; |
| 167 | 167 | ||
| 168 | v = readl(dword_addr); | 168 | v = readl(dword_addr); |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index d53c25e7a1c1..900bfbc3368c 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
| @@ -346,6 +346,9 @@ static void alc_fill_eapd_coef(struct hda_codec *codec) | |||
| 346 | case 0x10ec0234: | 346 | case 0x10ec0234: |
| 347 | case 0x10ec0274: | 347 | case 0x10ec0274: |
| 348 | case 0x10ec0294: | 348 | case 0x10ec0294: |
| 349 | case 0x10ec0700: | ||
| 350 | case 0x10ec0701: | ||
| 351 | case 0x10ec0703: | ||
| 349 | alc_update_coef_idx(codec, 0x10, 1<<15, 0); | 352 | alc_update_coef_idx(codec, 0x10, 1<<15, 0); |
| 350 | break; | 353 | break; |
| 351 | case 0x10ec0662: | 354 | case 0x10ec0662: |
| @@ -2655,6 +2658,7 @@ enum { | |||
| 2655 | ALC269_TYPE_ALC256, | 2658 | ALC269_TYPE_ALC256, |
| 2656 | ALC269_TYPE_ALC225, | 2659 | ALC269_TYPE_ALC225, |
| 2657 | ALC269_TYPE_ALC294, | 2660 | ALC269_TYPE_ALC294, |
| 2661 | ALC269_TYPE_ALC700, | ||
| 2658 | }; | 2662 | }; |
| 2659 | 2663 | ||
| 2660 | /* | 2664 | /* |
| @@ -2686,6 +2690,7 @@ static int alc269_parse_auto_config(struct hda_codec *codec) | |||
| 2686 | case ALC269_TYPE_ALC256: | 2690 | case ALC269_TYPE_ALC256: |
| 2687 | case ALC269_TYPE_ALC225: | 2691 | case ALC269_TYPE_ALC225: |
| 2688 | case ALC269_TYPE_ALC294: | 2692 | case ALC269_TYPE_ALC294: |
| 2693 | case ALC269_TYPE_ALC700: | ||
| 2689 | ssids = alc269_ssids; | 2694 | ssids = alc269_ssids; |
| 2690 | break; | 2695 | break; |
| 2691 | default: | 2696 | default: |
| @@ -3618,13 +3623,20 @@ static void alc269_fixup_hp_line1_mic1_led(struct hda_codec *codec, | |||
| 3618 | static void alc_headset_mode_unplugged(struct hda_codec *codec) | 3623 | static void alc_headset_mode_unplugged(struct hda_codec *codec) |
| 3619 | { | 3624 | { |
| 3620 | static struct coef_fw coef0255[] = { | 3625 | static struct coef_fw coef0255[] = { |
| 3621 | WRITE_COEF(0x1b, 0x0c0b), /* LDO and MISC control */ | ||
| 3622 | WRITE_COEF(0x45, 0xd089), /* UAJ function set to menual mode */ | 3626 | WRITE_COEF(0x45, 0xd089), /* UAJ function set to menual mode */ |
| 3623 | UPDATE_COEFEX(0x57, 0x05, 1<<14, 0), /* Direct Drive HP Amp control(Set to verb control)*/ | 3627 | UPDATE_COEFEX(0x57, 0x05, 1<<14, 0), /* Direct Drive HP Amp control(Set to verb control)*/ |
| 3624 | WRITE_COEF(0x06, 0x6104), /* Set MIC2 Vref gate with HP */ | 3628 | WRITE_COEF(0x06, 0x6104), /* Set MIC2 Vref gate with HP */ |
| 3625 | WRITE_COEFEX(0x57, 0x03, 0x8aa6), /* Direct Drive HP Amp control */ | 3629 | WRITE_COEFEX(0x57, 0x03, 0x8aa6), /* Direct Drive HP Amp control */ |
| 3626 | {} | 3630 | {} |
| 3627 | }; | 3631 | }; |
| 3632 | static struct coef_fw coef0255_1[] = { | ||
| 3633 | WRITE_COEF(0x1b, 0x0c0b), /* LDO and MISC control */ | ||
| 3634 | {} | ||
| 3635 | }; | ||
| 3636 | static struct coef_fw coef0256[] = { | ||
| 3637 | WRITE_COEF(0x1b, 0x0c4b), /* LDO and MISC control */ | ||
| 3638 | {} | ||
| 3639 | }; | ||
| 3628 | static struct coef_fw coef0233[] = { | 3640 | static struct coef_fw coef0233[] = { |
| 3629 | WRITE_COEF(0x1b, 0x0c0b), | 3641 | WRITE_COEF(0x1b, 0x0c0b), |
| 3630 | WRITE_COEF(0x45, 0xc429), | 3642 | WRITE_COEF(0x45, 0xc429), |
| @@ -3677,7 +3689,11 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec) | |||
| 3677 | 3689 | ||
| 3678 | switch (codec->core.vendor_id) { | 3690 | switch (codec->core.vendor_id) { |
| 3679 | case 0x10ec0255: | 3691 | case 0x10ec0255: |
| 3692 | alc_process_coef_fw(codec, coef0255_1); | ||
| 3693 | alc_process_coef_fw(codec, coef0255); | ||
| 3694 | break; | ||
| 3680 | case 0x10ec0256: | 3695 | case 0x10ec0256: |
| 3696 | alc_process_coef_fw(codec, coef0256); | ||
| 3681 | alc_process_coef_fw(codec, coef0255); | 3697 | alc_process_coef_fw(codec, coef0255); |
| 3682 | break; | 3698 | break; |
| 3683 | case 0x10ec0233: | 3699 | case 0x10ec0233: |
| @@ -3896,6 +3912,12 @@ static void alc_headset_mode_ctia(struct hda_codec *codec) | |||
| 3896 | WRITE_COEFEX(0x57, 0x03, 0x8ea6), | 3912 | WRITE_COEFEX(0x57, 0x03, 0x8ea6), |
| 3897 | {} | 3913 | {} |
| 3898 | }; | 3914 | }; |
| 3915 | static struct coef_fw coef0256[] = { | ||
| 3916 | WRITE_COEF(0x45, 0xd489), /* Set to CTIA type */ | ||
| 3917 | WRITE_COEF(0x1b, 0x0c6b), | ||
| 3918 | WRITE_COEFEX(0x57, 0x03, 0x8ea6), | ||
| 3919 | {} | ||
| 3920 | }; | ||
| 3899 | static struct coef_fw coef0233[] = { | 3921 | static struct coef_fw coef0233[] = { |
| 3900 | WRITE_COEF(0x45, 0xd429), | 3922 | WRITE_COEF(0x45, 0xd429), |
| 3901 | WRITE_COEF(0x1b, 0x0c2b), | 3923 | WRITE_COEF(0x1b, 0x0c2b), |
| @@ -3936,9 +3958,11 @@ static void alc_headset_mode_ctia(struct hda_codec *codec) | |||
| 3936 | 3958 | ||
| 3937 | switch (codec->core.vendor_id) { | 3959 | switch (codec->core.vendor_id) { |
| 3938 | case 0x10ec0255: | 3960 | case 0x10ec0255: |
| 3939 | case 0x10ec0256: | ||
| 3940 | alc_process_coef_fw(codec, coef0255); | 3961 | alc_process_coef_fw(codec, coef0255); |
| 3941 | break; | 3962 | break; |
| 3963 | case 0x10ec0256: | ||
| 3964 | alc_process_coef_fw(codec, coef0256); | ||
| 3965 | break; | ||
| 3942 | case 0x10ec0233: | 3966 | case 0x10ec0233: |
| 3943 | case 0x10ec0283: | 3967 | case 0x10ec0283: |
| 3944 | alc_process_coef_fw(codec, coef0233); | 3968 | alc_process_coef_fw(codec, coef0233); |
| @@ -3978,6 +4002,12 @@ static void alc_headset_mode_omtp(struct hda_codec *codec) | |||
| 3978 | WRITE_COEFEX(0x57, 0x03, 0x8ea6), | 4002 | WRITE_COEFEX(0x57, 0x03, 0x8ea6), |
| 3979 | {} | 4003 | {} |
| 3980 | }; | 4004 | }; |
| 4005 | static struct coef_fw coef0256[] = { | ||
| 4006 | WRITE_COEF(0x45, 0xe489), /* Set to OMTP Type */ | ||
| 4007 | WRITE_COEF(0x1b, 0x0c6b), | ||
| 4008 | WRITE_COEFEX(0x57, 0x03, 0x8ea6), | ||
| 4009 | {} | ||
| 4010 | }; | ||
| 3981 | static struct coef_fw coef0233[] = { | 4011 | static struct coef_fw coef0233[] = { |
| 3982 | WRITE_COEF(0x45, 0xe429), | 4012 | WRITE_COEF(0x45, 0xe429), |
| 3983 | WRITE_COEF(0x1b, 0x0c2b), | 4013 | WRITE_COEF(0x1b, 0x0c2b), |
| @@ -4018,9 +4048,11 @@ static void alc_headset_mode_omtp(struct hda_codec *codec) | |||
| 4018 | 4048 | ||
| 4019 | switch (codec->core.vendor_id) { | 4049 | switch (codec->core.vendor_id) { |
| 4020 | case 0x10ec0255: | 4050 | case 0x10ec0255: |
| 4021 | case 0x10ec0256: | ||
| 4022 | alc_process_coef_fw(codec, coef0255); | 4051 | alc_process_coef_fw(codec, coef0255); |
| 4023 | break; | 4052 | break; |
| 4053 | case 0x10ec0256: | ||
| 4054 | alc_process_coef_fw(codec, coef0256); | ||
| 4055 | break; | ||
| 4024 | case 0x10ec0233: | 4056 | case 0x10ec0233: |
| 4025 | case 0x10ec0283: | 4057 | case 0x10ec0283: |
| 4026 | alc_process_coef_fw(codec, coef0233); | 4058 | alc_process_coef_fw(codec, coef0233); |
| @@ -4266,7 +4298,7 @@ static void alc_fixup_headset_mode_no_hp_mic(struct hda_codec *codec, | |||
| 4266 | static void alc255_set_default_jack_type(struct hda_codec *codec) | 4298 | static void alc255_set_default_jack_type(struct hda_codec *codec) |
| 4267 | { | 4299 | { |
| 4268 | /* Set to iphone type */ | 4300 | /* Set to iphone type */ |
| 4269 | static struct coef_fw fw[] = { | 4301 | static struct coef_fw alc255fw[] = { |
| 4270 | WRITE_COEF(0x1b, 0x880b), | 4302 | WRITE_COEF(0x1b, 0x880b), |
| 4271 | WRITE_COEF(0x45, 0xd089), | 4303 | WRITE_COEF(0x45, 0xd089), |
| 4272 | WRITE_COEF(0x1b, 0x080b), | 4304 | WRITE_COEF(0x1b, 0x080b), |
| @@ -4274,7 +4306,22 @@ static void alc255_set_default_jack_type(struct hda_codec *codec) | |||
| 4274 | WRITE_COEF(0x1b, 0x0c0b), | 4306 | WRITE_COEF(0x1b, 0x0c0b), |
| 4275 | {} | 4307 | {} |
| 4276 | }; | 4308 | }; |
| 4277 | alc_process_coef_fw(codec, fw); | 4309 | static struct coef_fw alc256fw[] = { |
| 4310 | WRITE_COEF(0x1b, 0x884b), | ||
| 4311 | WRITE_COEF(0x45, 0xd089), | ||
| 4312 | WRITE_COEF(0x1b, 0x084b), | ||
| 4313 | WRITE_COEF(0x46, 0x0004), | ||
| 4314 | WRITE_COEF(0x1b, 0x0c4b), | ||
| 4315 | {} | ||
| 4316 | }; | ||
| 4317 | switch (codec->core.vendor_id) { | ||
| 4318 | case 0x10ec0255: | ||
| 4319 | alc_process_coef_fw(codec, alc255fw); | ||
| 4320 | break; | ||
| 4321 | case 0x10ec0256: | ||
| 4322 | alc_process_coef_fw(codec, alc256fw); | ||
| 4323 | break; | ||
| 4324 | } | ||
| 4278 | msleep(30); | 4325 | msleep(30); |
| 4279 | } | 4326 | } |
| 4280 | 4327 | ||
| @@ -5587,6 +5634,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { | |||
| 5587 | SND_PCI_QUIRK(0x17aa, 0x2218, "Thinkpad X1 Carbon 2nd", ALC292_FIXUP_TPT440_DOCK), | 5634 | SND_PCI_QUIRK(0x17aa, 0x2218, "Thinkpad X1 Carbon 2nd", ALC292_FIXUP_TPT440_DOCK), |
| 5588 | SND_PCI_QUIRK(0x17aa, 0x2223, "ThinkPad T550", ALC292_FIXUP_TPT440_DOCK), | 5635 | SND_PCI_QUIRK(0x17aa, 0x2223, "ThinkPad T550", ALC292_FIXUP_TPT440_DOCK), |
| 5589 | SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK), | 5636 | SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK), |
| 5637 | SND_PCI_QUIRK(0x17aa, 0x2231, "Thinkpad T560", ALC292_FIXUP_TPT460), | ||
| 5590 | SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC292_FIXUP_TPT460), | 5638 | SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC292_FIXUP_TPT460), |
| 5591 | SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), | 5639 | SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), |
| 5592 | SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), | 5640 | SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), |
| @@ -5602,6 +5650,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { | |||
| 5602 | SND_PCI_QUIRK(0x17aa, 0x503c, "Thinkpad L450", ALC292_FIXUP_TPT440_DOCK), | 5650 | SND_PCI_QUIRK(0x17aa, 0x503c, "Thinkpad L450", ALC292_FIXUP_TPT440_DOCK), |
| 5603 | SND_PCI_QUIRK(0x17aa, 0x504a, "ThinkPad X260", ALC292_FIXUP_TPT440_DOCK), | 5651 | SND_PCI_QUIRK(0x17aa, 0x504a, "ThinkPad X260", ALC292_FIXUP_TPT440_DOCK), |
| 5604 | SND_PCI_QUIRK(0x17aa, 0x504b, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE), | 5652 | SND_PCI_QUIRK(0x17aa, 0x504b, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE), |
| 5653 | SND_PCI_QUIRK(0x17aa, 0x5050, "Thinkpad T560p", ALC292_FIXUP_TPT460), | ||
| 5654 | SND_PCI_QUIRK(0x17aa, 0x5053, "Thinkpad T460", ALC292_FIXUP_TPT460), | ||
| 5605 | SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), | 5655 | SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), |
| 5606 | SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K), | 5656 | SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K), |
| 5607 | SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD), | 5657 | SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD), |
| @@ -5775,11 +5825,19 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { | |||
| 5775 | {0x12, 0x90a60180}, | 5825 | {0x12, 0x90a60180}, |
| 5776 | {0x14, 0x90170130}, | 5826 | {0x14, 0x90170130}, |
| 5777 | {0x21, 0x02211040}), | 5827 | {0x21, 0x02211040}), |
| 5828 | SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell Inspiron 5565", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, | ||
| 5829 | {0x12, 0x90a60180}, | ||
| 5830 | {0x14, 0x90170120}, | ||
| 5831 | {0x21, 0x02211030}), | ||
| 5778 | SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, | 5832 | SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, |
| 5779 | {0x12, 0x90a60160}, | 5833 | {0x12, 0x90a60160}, |
| 5780 | {0x14, 0x90170120}, | 5834 | {0x14, 0x90170120}, |
| 5781 | {0x21, 0x02211030}), | 5835 | {0x21, 0x02211030}), |
| 5782 | SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, | 5836 | SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, |
| 5837 | {0x12, 0x90a60170}, | ||
| 5838 | {0x14, 0x90170120}, | ||
| 5839 | {0x21, 0x02211030}), | ||
| 5840 | SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, | ||
| 5783 | ALC256_STANDARD_PINS), | 5841 | ALC256_STANDARD_PINS), |
| 5784 | SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4, | 5842 | SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4, |
| 5785 | {0x12, 0x90a60130}, | 5843 | {0x12, 0x90a60130}, |
| @@ -6053,6 +6111,14 @@ static int patch_alc269(struct hda_codec *codec) | |||
| 6053 | case 0x10ec0294: | 6111 | case 0x10ec0294: |
| 6054 | spec->codec_variant = ALC269_TYPE_ALC294; | 6112 | spec->codec_variant = ALC269_TYPE_ALC294; |
| 6055 | break; | 6113 | break; |
| 6114 | case 0x10ec0700: | ||
| 6115 | case 0x10ec0701: | ||
| 6116 | case 0x10ec0703: | ||
| 6117 | spec->codec_variant = ALC269_TYPE_ALC700; | ||
| 6118 | spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */ | ||
| 6119 | alc_update_coef_idx(codec, 0x4a, 0, 1 << 15); /* Combo jack auto trigger control */ | ||
| 6120 | break; | ||
| 6121 | |||
| 6056 | } | 6122 | } |
| 6057 | 6123 | ||
| 6058 | if (snd_hda_codec_read(codec, 0x51, 0, AC_VERB_PARAMETERS, 0) == 0x10ec5505) { | 6124 | if (snd_hda_codec_read(codec, 0x51, 0, AC_VERB_PARAMETERS, 0) == 0x10ec5505) { |
| @@ -7008,6 +7074,9 @@ static const struct hda_device_id snd_hda_id_realtek[] = { | |||
| 7008 | HDA_CODEC_ENTRY(0x10ec0670, "ALC670", patch_alc662), | 7074 | HDA_CODEC_ENTRY(0x10ec0670, "ALC670", patch_alc662), |
| 7009 | HDA_CODEC_ENTRY(0x10ec0671, "ALC671", patch_alc662), | 7075 | HDA_CODEC_ENTRY(0x10ec0671, "ALC671", patch_alc662), |
| 7010 | HDA_CODEC_ENTRY(0x10ec0680, "ALC680", patch_alc680), | 7076 | HDA_CODEC_ENTRY(0x10ec0680, "ALC680", patch_alc680), |
| 7077 | HDA_CODEC_ENTRY(0x10ec0700, "ALC700", patch_alc269), | ||
| 7078 | HDA_CODEC_ENTRY(0x10ec0701, "ALC701", patch_alc269), | ||
| 7079 | HDA_CODEC_ENTRY(0x10ec0703, "ALC703", patch_alc269), | ||
| 7011 | HDA_CODEC_ENTRY(0x10ec0867, "ALC891", patch_alc882), | 7080 | HDA_CODEC_ENTRY(0x10ec0867, "ALC891", patch_alc882), |
| 7012 | HDA_CODEC_ENTRY(0x10ec0880, "ALC880", patch_alc880), | 7081 | HDA_CODEC_ENTRY(0x10ec0880, "ALC880", patch_alc880), |
| 7013 | HDA_CODEC_ENTRY(0x10ec0882, "ALC882", patch_alc882), | 7082 | HDA_CODEC_ENTRY(0x10ec0882, "ALC882", patch_alc882), |
diff --git a/tools/perf/util/data-convert-bt.c b/tools/perf/util/data-convert-bt.c index bbf69d248ec5..9f53020c3269 100644 --- a/tools/perf/util/data-convert-bt.c +++ b/tools/perf/util/data-convert-bt.c | |||
| @@ -204,6 +204,44 @@ static unsigned long long adjust_signedness(unsigned long long value_int, int si | |||
| 204 | return (value_int & value_mask) | ~value_mask; | 204 | return (value_int & value_mask) | ~value_mask; |
| 205 | } | 205 | } |
| 206 | 206 | ||
| 207 | static int string_set_value(struct bt_ctf_field *field, const char *string) | ||
| 208 | { | ||
| 209 | char *buffer = NULL; | ||
| 210 | size_t len = strlen(string), i, p; | ||
| 211 | int err; | ||
| 212 | |||
| 213 | for (i = p = 0; i < len; i++, p++) { | ||
| 214 | if (isprint(string[i])) { | ||
| 215 | if (!buffer) | ||
| 216 | continue; | ||
| 217 | buffer[p] = string[i]; | ||
| 218 | } else { | ||
| 219 | char numstr[5]; | ||
| 220 | |||
| 221 | snprintf(numstr, sizeof(numstr), "\\x%02x", | ||
| 222 | (unsigned int)(string[i]) & 0xff); | ||
| 223 | |||
| 224 | if (!buffer) { | ||
| 225 | buffer = zalloc(i + (len - i) * 4 + 2); | ||
| 226 | if (!buffer) { | ||
| 227 | pr_err("failed to set unprintable string '%s'\n", string); | ||
| 228 | return bt_ctf_field_string_set_value(field, "UNPRINTABLE-STRING"); | ||
| 229 | } | ||
| 230 | if (i > 0) | ||
| 231 | strncpy(buffer, string, i); | ||
| 232 | } | ||
| 233 | strncat(buffer + p, numstr, 4); | ||
| 234 | p += 3; | ||
| 235 | } | ||
| 236 | } | ||
| 237 | |||
| 238 | if (!buffer) | ||
| 239 | return bt_ctf_field_string_set_value(field, string); | ||
| 240 | err = bt_ctf_field_string_set_value(field, buffer); | ||
| 241 | free(buffer); | ||
| 242 | return err; | ||
| 243 | } | ||
| 244 | |||
| 207 | static int add_tracepoint_field_value(struct ctf_writer *cw, | 245 | static int add_tracepoint_field_value(struct ctf_writer *cw, |
| 208 | struct bt_ctf_event_class *event_class, | 246 | struct bt_ctf_event_class *event_class, |
| 209 | struct bt_ctf_event *event, | 247 | struct bt_ctf_event *event, |
| @@ -270,8 +308,7 @@ static int add_tracepoint_field_value(struct ctf_writer *cw, | |||
| 270 | } | 308 | } |
| 271 | 309 | ||
| 272 | if (flags & FIELD_IS_STRING) | 310 | if (flags & FIELD_IS_STRING) |
| 273 | ret = bt_ctf_field_string_set_value(field, | 311 | ret = string_set_value(field, data + offset + i * len); |
| 274 | data + offset + i * len); | ||
| 275 | else { | 312 | else { |
| 276 | unsigned long long value_int; | 313 | unsigned long long value_int; |
| 277 | 314 | ||
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c index f6fcc6832949..9b141f12329e 100644 --- a/tools/perf/util/event.c +++ b/tools/perf/util/event.c | |||
| @@ -673,6 +673,8 @@ int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, | |||
| 673 | int err; | 673 | int err; |
| 674 | union perf_event *event; | 674 | union perf_event *event; |
| 675 | 675 | ||
| 676 | if (symbol_conf.kptr_restrict) | ||
| 677 | return -1; | ||
| 676 | if (map == NULL) | 678 | if (map == NULL) |
| 677 | return -1; | 679 | return -1; |
| 678 | 680 | ||
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 20f9cb32b703..54c4ff2b1cee 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c | |||
| @@ -1933,17 +1933,17 @@ int setup_intlist(struct intlist **list, const char *list_str, | |||
| 1933 | static bool symbol__read_kptr_restrict(void) | 1933 | static bool symbol__read_kptr_restrict(void) |
| 1934 | { | 1934 | { |
| 1935 | bool value = false; | 1935 | bool value = false; |
| 1936 | FILE *fp = fopen("/proc/sys/kernel/kptr_restrict", "r"); | ||
| 1936 | 1937 | ||
| 1937 | if (geteuid() != 0) { | 1938 | if (fp != NULL) { |
| 1938 | FILE *fp = fopen("/proc/sys/kernel/kptr_restrict", "r"); | 1939 | char line[8]; |
| 1939 | if (fp != NULL) { | ||
| 1940 | char line[8]; | ||
| 1941 | 1940 | ||
| 1942 | if (fgets(line, sizeof(line), fp) != NULL) | 1941 | if (fgets(line, sizeof(line), fp) != NULL) |
| 1943 | value = atoi(line) != 0; | 1942 | value = (geteuid() != 0) ? |
| 1943 | (atoi(line) != 0) : | ||
| 1944 | (atoi(line) == 2); | ||
| 1944 | 1945 | ||
| 1945 | fclose(fp); | 1946 | fclose(fp); |
| 1946 | } | ||
| 1947 | } | 1947 | } |
| 1948 | 1948 | ||
| 1949 | return value; | 1949 | return value; |
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist-mod.tc b/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist-mod.tc index c2b61c4fda11..0bf5085281f3 100644 --- a/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist-mod.tc +++ b/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist-mod.tc | |||
| @@ -23,15 +23,14 @@ if [ ! -f events/sched/sched_process_fork/trigger ]; then | |||
| 23 | exit_unsupported | 23 | exit_unsupported |
| 24 | fi | 24 | fi |
| 25 | 25 | ||
| 26 | reset_tracer | 26 | if [ ! -f events/sched/sched_process_fork/hist ]; then |
| 27 | do_reset | ||
| 28 | |||
| 29 | FEATURE=`grep hist events/sched/sched_process_fork/trigger` | ||
| 30 | if [ -z "$FEATURE" ]; then | ||
| 31 | echo "hist trigger is not supported" | 27 | echo "hist trigger is not supported" |
| 32 | exit_unsupported | 28 | exit_unsupported |
| 33 | fi | 29 | fi |
| 34 | 30 | ||
| 31 | reset_tracer | ||
| 32 | do_reset | ||
| 33 | |||
| 35 | echo "Test histogram with execname modifier" | 34 | echo "Test histogram with execname modifier" |
| 36 | 35 | ||
| 37 | echo 'hist:keys=common_pid.execname' > events/sched/sched_process_fork/trigger | 36 | echo 'hist:keys=common_pid.execname' > events/sched/sched_process_fork/trigger |
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist.tc b/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist.tc index b2902d42a537..a00184cd9c95 100644 --- a/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist.tc +++ b/tools/testing/selftests/ftrace/test.d/trigger/trigger-hist.tc | |||
| @@ -23,15 +23,14 @@ if [ ! -f events/sched/sched_process_fork/trigger ]; then | |||
| 23 | exit_unsupported | 23 | exit_unsupported |
| 24 | fi | 24 | fi |
| 25 | 25 | ||
| 26 | reset_tracer | 26 | if [ ! -f events/sched/sched_process_fork/hist ]; then |
| 27 | do_reset | ||
| 28 | |||
| 29 | FEATURE=`grep hist events/sched/sched_process_fork/trigger` | ||
| 30 | if [ -z "$FEATURE" ]; then | ||
| 31 | echo "hist trigger is not supported" | 27 | echo "hist trigger is not supported" |
| 32 | exit_unsupported | 28 | exit_unsupported |
| 33 | fi | 29 | fi |
| 34 | 30 | ||
| 31 | reset_tracer | ||
| 32 | do_reset | ||
| 33 | |||
| 35 | echo "Test histogram basic tigger" | 34 | echo "Test histogram basic tigger" |
| 36 | 35 | ||
| 37 | echo 'hist:keys=parent_pid:vals=child_pid' > events/sched/sched_process_fork/trigger | 36 | echo 'hist:keys=parent_pid:vals=child_pid' > events/sched/sched_process_fork/trigger |
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/trigger-multihist.tc b/tools/testing/selftests/ftrace/test.d/trigger/trigger-multihist.tc index 03c4a46561fc..3478b00ead57 100644 --- a/tools/testing/selftests/ftrace/test.d/trigger/trigger-multihist.tc +++ b/tools/testing/selftests/ftrace/test.d/trigger/trigger-multihist.tc | |||
| @@ -23,15 +23,14 @@ if [ ! -f events/sched/sched_process_fork/trigger ]; then | |||
| 23 | exit_unsupported | 23 | exit_unsupported |
| 24 | fi | 24 | fi |
| 25 | 25 | ||
| 26 | reset_tracer | 26 | if [ ! -f events/sched/sched_process_fork/hist ]; then |
| 27 | do_reset | ||
| 28 | |||
| 29 | FEATURE=`grep hist events/sched/sched_process_fork/trigger` | ||
| 30 | if [ -z "$FEATURE" ]; then | ||
| 31 | echo "hist trigger is not supported" | 27 | echo "hist trigger is not supported" |
| 32 | exit_unsupported | 28 | exit_unsupported |
| 33 | fi | 29 | fi |
| 34 | 30 | ||
| 31 | reset_tracer | ||
| 32 | do_reset | ||
| 33 | |||
| 35 | reset_trigger | 34 | reset_trigger |
| 36 | 35 | ||
| 37 | echo "Test histogram multiple tiggers" | 36 | echo "Test histogram multiple tiggers" |
diff --git a/tools/testing/selftests/net/reuseport_bpf.c b/tools/testing/selftests/net/reuseport_bpf.c index 96ba386b1b7b..4a8217448f20 100644 --- a/tools/testing/selftests/net/reuseport_bpf.c +++ b/tools/testing/selftests/net/reuseport_bpf.c | |||
| @@ -111,9 +111,9 @@ static void attach_ebpf(int fd, uint16_t mod) | |||
| 111 | memset(&attr, 0, sizeof(attr)); | 111 | memset(&attr, 0, sizeof(attr)); |
| 112 | attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER; | 112 | attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER; |
| 113 | attr.insn_cnt = ARRAY_SIZE(prog); | 113 | attr.insn_cnt = ARRAY_SIZE(prog); |
| 114 | attr.insns = (uint64_t)prog; | 114 | attr.insns = (unsigned long) &prog; |
| 115 | attr.license = (uint64_t)bpf_license; | 115 | attr.license = (unsigned long) &bpf_license; |
| 116 | attr.log_buf = (uint64_t)bpf_log_buf; | 116 | attr.log_buf = (unsigned long) &bpf_log_buf; |
| 117 | attr.log_size = sizeof(bpf_log_buf); | 117 | attr.log_size = sizeof(bpf_log_buf); |
| 118 | attr.log_level = 1; | 118 | attr.log_level = 1; |
| 119 | attr.kern_version = 0; | 119 | attr.kern_version = 0; |
| @@ -351,8 +351,8 @@ static void test_filter_no_reuseport(const struct test_params p) | |||
| 351 | memset(&eprog, 0, sizeof(eprog)); | 351 | memset(&eprog, 0, sizeof(eprog)); |
| 352 | eprog.prog_type = BPF_PROG_TYPE_SOCKET_FILTER; | 352 | eprog.prog_type = BPF_PROG_TYPE_SOCKET_FILTER; |
| 353 | eprog.insn_cnt = ARRAY_SIZE(ecode); | 353 | eprog.insn_cnt = ARRAY_SIZE(ecode); |
| 354 | eprog.insns = (uint64_t)ecode; | 354 | eprog.insns = (unsigned long) &ecode; |
| 355 | eprog.license = (uint64_t)bpf_license; | 355 | eprog.license = (unsigned long) &bpf_license; |
| 356 | eprog.kern_version = 0; | 356 | eprog.kern_version = 0; |
| 357 | 357 | ||
| 358 | memset(&cprog, 0, sizeof(cprog)); | 358 | memset(&cprog, 0, sizeof(cprog)); |
diff --git a/tools/testing/selftests/vm/compaction_test.c b/tools/testing/selftests/vm/compaction_test.c index 932ff577ffc0..00c4f65d12da 100644 --- a/tools/testing/selftests/vm/compaction_test.c +++ b/tools/testing/selftests/vm/compaction_test.c | |||
| @@ -136,7 +136,7 @@ int check_compaction(unsigned long mem_free, unsigned int hugepage_size) | |||
| 136 | printf("No of huge pages allocated = %d\n", | 136 | printf("No of huge pages allocated = %d\n", |
| 137 | (atoi(nr_hugepages))); | 137 | (atoi(nr_hugepages))); |
| 138 | 138 | ||
| 139 | if (write(fd, initial_nr_hugepages, sizeof(initial_nr_hugepages)) | 139 | if (write(fd, initial_nr_hugepages, strlen(initial_nr_hugepages)) |
| 140 | != strlen(initial_nr_hugepages)) { | 140 | != strlen(initial_nr_hugepages)) { |
| 141 | perror("Failed to write to /proc/sys/vm/nr_hugepages\n"); | 141 | perror("Failed to write to /proc/sys/vm/nr_hugepages\n"); |
| 142 | goto close_fd; | 142 | goto close_fd; |
diff --git a/tools/virtio/ringtest/Makefile b/tools/virtio/ringtest/Makefile index 6ba745529833..6173adae9f08 100644 --- a/tools/virtio/ringtest/Makefile +++ b/tools/virtio/ringtest/Makefile | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | all: | 1 | all: |
| 2 | 2 | ||
| 3 | all: ring virtio_ring_0_9 virtio_ring_poll virtio_ring_inorder | 3 | all: ring virtio_ring_0_9 virtio_ring_poll virtio_ring_inorder noring |
| 4 | 4 | ||
| 5 | CFLAGS += -Wall | 5 | CFLAGS += -Wall |
| 6 | CFLAGS += -pthread -O2 -ggdb | 6 | CFLAGS += -pthread -O2 -ggdb |
| @@ -15,11 +15,13 @@ ring: ring.o main.o | |||
| 15 | virtio_ring_0_9: virtio_ring_0_9.o main.o | 15 | virtio_ring_0_9: virtio_ring_0_9.o main.o |
| 16 | virtio_ring_poll: virtio_ring_poll.o main.o | 16 | virtio_ring_poll: virtio_ring_poll.o main.o |
| 17 | virtio_ring_inorder: virtio_ring_inorder.o main.o | 17 | virtio_ring_inorder: virtio_ring_inorder.o main.o |
| 18 | noring: noring.o main.o | ||
| 18 | clean: | 19 | clean: |
| 19 | -rm main.o | 20 | -rm main.o |
| 20 | -rm ring.o ring | 21 | -rm ring.o ring |
| 21 | -rm virtio_ring_0_9.o virtio_ring_0_9 | 22 | -rm virtio_ring_0_9.o virtio_ring_0_9 |
| 22 | -rm virtio_ring_poll.o virtio_ring_poll | 23 | -rm virtio_ring_poll.o virtio_ring_poll |
| 23 | -rm virtio_ring_inorder.o virtio_ring_inorder | 24 | -rm virtio_ring_inorder.o virtio_ring_inorder |
| 25 | -rm noring.o noring | ||
| 24 | 26 | ||
| 25 | .PHONY: all clean | 27 | .PHONY: all clean |
diff --git a/tools/virtio/ringtest/README b/tools/virtio/ringtest/README index 34e94c46104f..d83707a336c9 100644 --- a/tools/virtio/ringtest/README +++ b/tools/virtio/ringtest/README | |||
| @@ -1,2 +1,6 @@ | |||
| 1 | Partial implementation of various ring layouts, useful to tune virtio design. | 1 | Partial implementation of various ring layouts, useful to tune virtio design. |
| 2 | Uses shared memory heavily. | 2 | Uses shared memory heavily. |
| 3 | |||
| 4 | Typical use: | ||
| 5 | |||
| 6 | # sh run-on-all.sh perf stat -r 10 --log-fd 1 -- ./ring | ||
diff --git a/tools/virtio/ringtest/noring.c b/tools/virtio/ringtest/noring.c new file mode 100644 index 000000000000..eda2f4824130 --- /dev/null +++ b/tools/virtio/ringtest/noring.c | |||
| @@ -0,0 +1,69 @@ | |||
| 1 | #define _GNU_SOURCE | ||
| 2 | #include "main.h" | ||
| 3 | #include <assert.h> | ||
| 4 | |||
| 5 | /* stub implementation: useful for measuring overhead */ | ||
| 6 | void alloc_ring(void) | ||
| 7 | { | ||
| 8 | } | ||
| 9 | |||
| 10 | /* guest side */ | ||
| 11 | int add_inbuf(unsigned len, void *buf, void *datap) | ||
| 12 | { | ||
| 13 | return 0; | ||
| 14 | } | ||
| 15 | |||
| 16 | /* | ||
| 17 | * skb_array API provides no way for producer to find out whether a given | ||
| 18 | * buffer was consumed. Our tests merely require that a successful get_buf | ||
| 19 | * implies that add_inbuf succeed in the past, and that add_inbuf will succeed, | ||
| 20 | * fake it accordingly. | ||
| 21 | */ | ||
| 22 | void *get_buf(unsigned *lenp, void **bufp) | ||
| 23 | { | ||
| 24 | return "Buffer"; | ||
| 25 | } | ||
| 26 | |||
| 27 | void poll_used(void) | ||
| 28 | { | ||
| 29 | } | ||
| 30 | |||
| 31 | void disable_call() | ||
| 32 | { | ||
| 33 | assert(0); | ||
| 34 | } | ||
| 35 | |||
| 36 | bool enable_call() | ||
| 37 | { | ||
| 38 | assert(0); | ||
| 39 | } | ||
| 40 | |||
| 41 | void kick_available(void) | ||
| 42 | { | ||
| 43 | assert(0); | ||
| 44 | } | ||
| 45 | |||
| 46 | /* host side */ | ||
| 47 | void disable_kick() | ||
| 48 | { | ||
| 49 | assert(0); | ||
| 50 | } | ||
| 51 | |||
| 52 | bool enable_kick() | ||
| 53 | { | ||
| 54 | assert(0); | ||
| 55 | } | ||
| 56 | |||
| 57 | void poll_avail(void) | ||
| 58 | { | ||
| 59 | } | ||
| 60 | |||
| 61 | bool use_buf(unsigned *lenp, void **bufp) | ||
| 62 | { | ||
| 63 | return true; | ||
| 64 | } | ||
| 65 | |||
| 66 | void call_used(void) | ||
| 67 | { | ||
| 68 | assert(0); | ||
| 69 | } | ||
diff --git a/tools/virtio/ringtest/run-on-all.sh b/tools/virtio/ringtest/run-on-all.sh index 52b0f71ffa8d..2e69ca812b4c 100755 --- a/tools/virtio/ringtest/run-on-all.sh +++ b/tools/virtio/ringtest/run-on-all.sh | |||
| @@ -3,10 +3,10 @@ | |||
| 3 | #use last CPU for host. Why not the first? | 3 | #use last CPU for host. Why not the first? |
| 4 | #many devices tend to use cpu0 by default so | 4 | #many devices tend to use cpu0 by default so |
| 5 | #it tends to be busier | 5 | #it tends to be busier |
| 6 | HOST_AFFINITY=$(cd /dev/cpu; ls|grep -v '[a-z]'|sort -n|tail -1) | 6 | HOST_AFFINITY=$(lscpu -p=cpu | tail -1) |
| 7 | 7 | ||
| 8 | #run command on all cpus | 8 | #run command on all cpus |
| 9 | for cpu in $(cd /dev/cpu; ls|grep -v '[a-z]'|sort -n); | 9 | for cpu in $(seq 0 $HOST_AFFINITY) |
| 10 | do | 10 | do |
| 11 | #Don't run guest and host on same CPU | 11 | #Don't run guest and host on same CPU |
| 12 | #It actually works ok if using signalling | 12 | #It actually works ok if using signalling |
diff --git a/tools/vm/slabinfo.c b/tools/vm/slabinfo.c index 1889163f2f05..7cf6e1769903 100644 --- a/tools/vm/slabinfo.c +++ b/tools/vm/slabinfo.c | |||
| @@ -492,7 +492,7 @@ static void slab_stats(struct slabinfo *s) | |||
| 492 | s->deactivate_to_head + s->deactivate_to_tail + s->deactivate_bypass; | 492 | s->deactivate_to_head + s->deactivate_to_tail + s->deactivate_bypass; |
| 493 | 493 | ||
| 494 | if (total) { | 494 | if (total) { |
| 495 | printf("\nSlab Deactivation Ocurrences %%\n"); | 495 | printf("\nSlab Deactivation Occurrences %%\n"); |
| 496 | printf("-------------------------------------------------\n"); | 496 | printf("-------------------------------------------------\n"); |
| 497 | printf("Slab full %7lu %3lu%%\n", | 497 | printf("Slab full %7lu %3lu%%\n", |
| 498 | s->deactivate_full, (s->deactivate_full * 100) / total); | 498 | s->deactivate_full, (s->deactivate_full * 100) / total); |
diff --git a/virt/kvm/arm/hyp/vgic-v2-sr.c b/virt/kvm/arm/hyp/vgic-v2-sr.c index a3f12b3b277b..3a3a699b7489 100644 --- a/virt/kvm/arm/hyp/vgic-v2-sr.c +++ b/virt/kvm/arm/hyp/vgic-v2-sr.c | |||
| @@ -100,12 +100,11 @@ static void __hyp_text save_lrs(struct kvm_vcpu *vcpu, void __iomem *base) | |||
| 100 | if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i))) | 100 | if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i))) |
| 101 | continue; | 101 | continue; |
| 102 | 102 | ||
| 103 | if (cpu_if->vgic_elrsr & (1UL << i)) { | 103 | if (cpu_if->vgic_elrsr & (1UL << i)) |
| 104 | cpu_if->vgic_lr[i] &= ~GICH_LR_STATE; | 104 | cpu_if->vgic_lr[i] &= ~GICH_LR_STATE; |
| 105 | continue; | 105 | else |
| 106 | } | 106 | cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4)); |
| 107 | 107 | ||
| 108 | cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4)); | ||
| 109 | writel_relaxed(0, base + GICH_LR0 + (i * 4)); | 108 | writel_relaxed(0, base + GICH_LR0 + (i * 4)); |
| 110 | } | 109 | } |
| 111 | } | 110 | } |
diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c index 059595ec3da0..9f6fab74dce7 100644 --- a/virt/kvm/arm/vgic/vgic-mmio.c +++ b/virt/kvm/arm/vgic/vgic-mmio.c | |||
| @@ -191,10 +191,8 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq, | |||
| 191 | * other thread sync back the IRQ. | 191 | * other thread sync back the IRQ. |
| 192 | */ | 192 | */ |
| 193 | while (irq->vcpu && /* IRQ may have state in an LR somewhere */ | 193 | while (irq->vcpu && /* IRQ may have state in an LR somewhere */ |
| 194 | irq->vcpu->cpu != -1) { /* VCPU thread is running */ | 194 | irq->vcpu->cpu != -1) /* VCPU thread is running */ |
| 195 | BUG_ON(irq->intid < VGIC_NR_PRIVATE_IRQS); | ||
| 196 | cond_resched_lock(&irq->irq_lock); | 195 | cond_resched_lock(&irq->irq_lock); |
| 197 | } | ||
| 198 | 196 | ||
| 199 | irq->active = new_active_state; | 197 | irq->active = new_active_state; |
| 200 | if (new_active_state) | 198 | if (new_active_state) |
diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c index 8ad42c217770..e31405ee5515 100644 --- a/virt/kvm/arm/vgic/vgic-v2.c +++ b/virt/kvm/arm/vgic/vgic-v2.c | |||
| @@ -112,11 +112,15 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu) | |||
| 112 | } | 112 | } |
| 113 | } | 113 | } |
| 114 | 114 | ||
| 115 | /* Clear soft pending state when level IRQs have been acked */ | 115 | /* |
| 116 | if (irq->config == VGIC_CONFIG_LEVEL && | 116 | * Clear soft pending state when level irqs have been acked. |
| 117 | !(val & GICH_LR_PENDING_BIT)) { | 117 | * Always regenerate the pending state. |
| 118 | irq->soft_pending = false; | 118 | */ |
| 119 | irq->pending = irq->line_level; | 119 | if (irq->config == VGIC_CONFIG_LEVEL) { |
| 120 | if (!(val & GICH_LR_PENDING_BIT)) | ||
| 121 | irq->soft_pending = false; | ||
| 122 | |||
| 123 | irq->pending = irq->line_level || irq->soft_pending; | ||
| 120 | } | 124 | } |
| 121 | 125 | ||
| 122 | spin_unlock(&irq->irq_lock); | 126 | spin_unlock(&irq->irq_lock); |
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c index 336a46115937..346b4ad12b49 100644 --- a/virt/kvm/arm/vgic/vgic-v3.c +++ b/virt/kvm/arm/vgic/vgic-v3.c | |||
| @@ -101,11 +101,15 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu) | |||
| 101 | } | 101 | } |
| 102 | } | 102 | } |
| 103 | 103 | ||
| 104 | /* Clear soft pending state when level irqs have been acked */ | 104 | /* |
| 105 | if (irq->config == VGIC_CONFIG_LEVEL && | 105 | * Clear soft pending state when level irqs have been acked. |
| 106 | !(val & ICH_LR_PENDING_BIT)) { | 106 | * Always regenerate the pending state. |
| 107 | irq->soft_pending = false; | 107 | */ |
| 108 | irq->pending = irq->line_level; | 108 | if (irq->config == VGIC_CONFIG_LEVEL) { |
| 109 | if (!(val & ICH_LR_PENDING_BIT)) | ||
| 110 | irq->soft_pending = false; | ||
| 111 | |||
| 112 | irq->pending = irq->line_level || irq->soft_pending; | ||
| 109 | } | 113 | } |
| 110 | 114 | ||
| 111 | spin_unlock(&irq->irq_lock); | 115 | spin_unlock(&irq->irq_lock); |
diff --git a/virt/kvm/irqchip.c b/virt/kvm/irqchip.c index fe84e1a95dd5..8db197bb6c7a 100644 --- a/virt/kvm/irqchip.c +++ b/virt/kvm/irqchip.c | |||
| @@ -40,7 +40,7 @@ int kvm_irq_map_gsi(struct kvm *kvm, | |||
| 40 | 40 | ||
| 41 | irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu, | 41 | irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu, |
| 42 | lockdep_is_held(&kvm->irq_lock)); | 42 | lockdep_is_held(&kvm->irq_lock)); |
| 43 | if (gsi < irq_rt->nr_rt_entries) { | 43 | if (irq_rt && gsi < irq_rt->nr_rt_entries) { |
| 44 | hlist_for_each_entry(e, &irq_rt->map[gsi], link) { | 44 | hlist_for_each_entry(e, &irq_rt->map[gsi], link) { |
| 45 | entries[n] = *e; | 45 | entries[n] = *e; |
| 46 | ++n; | 46 | ++n; |
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 37af23052470..48bd520fc702 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
| @@ -2935,25 +2935,27 @@ static long kvm_vm_ioctl(struct file *filp, | |||
| 2935 | case KVM_SET_GSI_ROUTING: { | 2935 | case KVM_SET_GSI_ROUTING: { |
| 2936 | struct kvm_irq_routing routing; | 2936 | struct kvm_irq_routing routing; |
| 2937 | struct kvm_irq_routing __user *urouting; | 2937 | struct kvm_irq_routing __user *urouting; |
| 2938 | struct kvm_irq_routing_entry *entries; | 2938 | struct kvm_irq_routing_entry *entries = NULL; |
| 2939 | 2939 | ||
| 2940 | r = -EFAULT; | 2940 | r = -EFAULT; |
| 2941 | if (copy_from_user(&routing, argp, sizeof(routing))) | 2941 | if (copy_from_user(&routing, argp, sizeof(routing))) |
| 2942 | goto out; | 2942 | goto out; |
| 2943 | r = -EINVAL; | 2943 | r = -EINVAL; |
| 2944 | if (routing.nr >= KVM_MAX_IRQ_ROUTES) | 2944 | if (routing.nr > KVM_MAX_IRQ_ROUTES) |
| 2945 | goto out; | 2945 | goto out; |
| 2946 | if (routing.flags) | 2946 | if (routing.flags) |
| 2947 | goto out; | 2947 | goto out; |
| 2948 | r = -ENOMEM; | 2948 | if (routing.nr) { |
| 2949 | entries = vmalloc(routing.nr * sizeof(*entries)); | 2949 | r = -ENOMEM; |
| 2950 | if (!entries) | 2950 | entries = vmalloc(routing.nr * sizeof(*entries)); |
| 2951 | goto out; | 2951 | if (!entries) |
| 2952 | r = -EFAULT; | 2952 | goto out; |
| 2953 | urouting = argp; | 2953 | r = -EFAULT; |
| 2954 | if (copy_from_user(entries, urouting->entries, | 2954 | urouting = argp; |
| 2955 | routing.nr * sizeof(*entries))) | 2955 | if (copy_from_user(entries, urouting->entries, |
| 2956 | goto out_free_irq_routing; | 2956 | routing.nr * sizeof(*entries))) |
| 2957 | goto out_free_irq_routing; | ||
| 2958 | } | ||
| 2957 | r = kvm_set_irq_routing(kvm, entries, routing.nr, | 2959 | r = kvm_set_irq_routing(kvm, entries, routing.nr, |
| 2958 | routing.flags); | 2960 | routing.flags); |
| 2959 | out_free_irq_routing: | 2961 | out_free_irq_routing: |
