diff options
311 files changed, 4047 insertions, 1885 deletions
@@ -88,6 +88,7 @@ Kay Sievers <kay.sievers@vrfy.org> | |||
88 | Kenneth W Chen <kenneth.w.chen@intel.com> | 88 | Kenneth W Chen <kenneth.w.chen@intel.com> |
89 | Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com> | 89 | Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com> |
90 | Koushik <raghavendra.koushik@neterion.com> | 90 | Koushik <raghavendra.koushik@neterion.com> |
91 | Krzysztof Kozlowski <krzk@kernel.org> <k.kozlowski@samsung.com> | ||
91 | Krzysztof Kozlowski <krzk@kernel.org> <k.kozlowski.k@gmail.com> | 92 | Krzysztof Kozlowski <krzk@kernel.org> <k.kozlowski.k@gmail.com> |
92 | Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> | 93 | Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> |
93 | Leonid I Ananiev <leonid.i.ananiev@intel.com> | 94 | Leonid I Ananiev <leonid.i.ananiev@intel.com> |
diff --git a/Documentation/conf.py b/Documentation/conf.py index 96b7aa66c89c..106ae9c740b9 100644 --- a/Documentation/conf.py +++ b/Documentation/conf.py | |||
@@ -131,7 +131,7 @@ pygments_style = 'sphinx' | |||
131 | todo_include_todos = False | 131 | todo_include_todos = False |
132 | 132 | ||
133 | primary_domain = 'C' | 133 | primary_domain = 'C' |
134 | highlight_language = 'C' | 134 | highlight_language = 'guess' |
135 | 135 | ||
136 | # -- Options for HTML output ---------------------------------------------- | 136 | # -- Options for HTML output ---------------------------------------------- |
137 | 137 | ||
diff --git a/Documentation/hwmon/ftsteutates b/Documentation/hwmon/ftsteutates index 2a1bf69c6a26..8c10a916de20 100644 --- a/Documentation/hwmon/ftsteutates +++ b/Documentation/hwmon/ftsteutates | |||
@@ -19,5 +19,5 @@ enhancements. It can monitor up to 4 voltages, 16 temperatures and | |||
19 | implemented in this driver. | 19 | implemented in this driver. |
20 | 20 | ||
21 | Specification of the chip can be found here: | 21 | Specification of the chip can be found here: |
22 | ftp:///pub/Mainboard-OEM-Sales/Services/Software&Tools/Linux_SystemMonitoring&Watchdog&GPIO/BMC-Teutates_Specification_V1.21.pdf | 22 | ftp://ftp.ts.fujitsu.com/pub/Mainboard-OEM-Sales/Services/Software&Tools/Linux_SystemMonitoring&Watchdog&GPIO/BMC-Teutates_Specification_V1.21.pdf |
23 | ftp:///pub/Mainboard-OEM-Sales/Services/Software&Tools/Linux_SystemMonitoring&Watchdog&GPIO/Fujitsu_mainboards-1-Sensors_HowTo-en-US.pdf | 23 | ftp://ftp.ts.fujitsu.com/pub/Mainboard-OEM-Sales/Services/Software&Tools/Linux_SystemMonitoring&Watchdog&GPIO/Fujitsu_mainboards-1-Sensors_HowTo-en-US.pdf |
diff --git a/Documentation/kernel-documentation.rst b/Documentation/kernel-documentation.rst index c4eb5049da39..391decc66a18 100644 --- a/Documentation/kernel-documentation.rst +++ b/Documentation/kernel-documentation.rst | |||
@@ -366,8 +366,6 @@ Domain`_ references. | |||
366 | Cross-referencing from reStructuredText | 366 | Cross-referencing from reStructuredText |
367 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | 367 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
368 | 368 | ||
369 | .. highlight:: none | ||
370 | |||
371 | To cross-reference the functions and types defined in the kernel-doc comments | 369 | To cross-reference the functions and types defined in the kernel-doc comments |
372 | from reStructuredText documents, please use the `Sphinx C Domain`_ | 370 | from reStructuredText documents, please use the `Sphinx C Domain`_ |
373 | references. For example:: | 371 | references. For example:: |
@@ -390,8 +388,6 @@ For further details, please refer to the `Sphinx C Domain`_ documentation. | |||
390 | Function documentation | 388 | Function documentation |
391 | ---------------------- | 389 | ---------------------- |
392 | 390 | ||
393 | .. highlight:: c | ||
394 | |||
395 | The general format of a function and function-like macro kernel-doc comment is:: | 391 | The general format of a function and function-like macro kernel-doc comment is:: |
396 | 392 | ||
397 | /** | 393 | /** |
@@ -572,8 +568,6 @@ DocBook XML [DEPRECATED] | |||
572 | Converting DocBook to Sphinx | 568 | Converting DocBook to Sphinx |
573 | ---------------------------- | 569 | ---------------------------- |
574 | 570 | ||
575 | .. highlight:: none | ||
576 | |||
577 | Over time, we expect all of the documents under ``Documentation/DocBook`` to be | 571 | Over time, we expect all of the documents under ``Documentation/DocBook`` to be |
578 | converted to Sphinx and reStructuredText. For most DocBook XML documents, a good | 572 | converted to Sphinx and reStructuredText. For most DocBook XML documents, a good |
579 | enough solution is to use the simple ``Documentation/sphinx/tmplcvt`` script, | 573 | enough solution is to use the simple ``Documentation/sphinx/tmplcvt`` script, |
diff --git a/Documentation/networking/rxrpc.txt b/Documentation/networking/rxrpc.txt index 16a924c486bf..70c926ae212d 100644 --- a/Documentation/networking/rxrpc.txt +++ b/Documentation/networking/rxrpc.txt | |||
@@ -790,13 +790,12 @@ The kernel interface functions are as follows: | |||
790 | Data messages can have their contents extracted with the usual bunch of | 790 | Data messages can have their contents extracted with the usual bunch of |
791 | socket buffer manipulation functions. A data message can be determined to | 791 | socket buffer manipulation functions. A data message can be determined to |
792 | be the last one in a sequence with rxrpc_kernel_is_data_last(). When a | 792 | be the last one in a sequence with rxrpc_kernel_is_data_last(). When a |
793 | data message has been used up, rxrpc_kernel_data_delivered() should be | 793 | data message has been used up, rxrpc_kernel_data_consumed() should be |
794 | called on it.. | 794 | called on it. |
795 | 795 | ||
796 | Non-data messages should be handled to rxrpc_kernel_free_skb() to dispose | 796 | Messages should be handled to rxrpc_kernel_free_skb() to dispose of. It |
797 | of. It is possible to get extra refs on all types of message for later | 797 | is possible to get extra refs on all types of message for later freeing, |
798 | freeing, but this may pin the state of a call until the message is finally | 798 | but this may pin the state of a call until the message is finally freed. |
799 | freed. | ||
800 | 799 | ||
801 | (*) Accept an incoming call. | 800 | (*) Accept an incoming call. |
802 | 801 | ||
@@ -821,12 +820,14 @@ The kernel interface functions are as follows: | |||
821 | Other errors may be returned if the call had been aborted (-ECONNABORTED) | 820 | Other errors may be returned if the call had been aborted (-ECONNABORTED) |
822 | or had timed out (-ETIME). | 821 | or had timed out (-ETIME). |
823 | 822 | ||
824 | (*) Record the delivery of a data message and free it. | 823 | (*) Record the delivery of a data message. |
825 | 824 | ||
826 | void rxrpc_kernel_data_delivered(struct sk_buff *skb); | 825 | void rxrpc_kernel_data_consumed(struct rxrpc_call *call, |
826 | struct sk_buff *skb); | ||
827 | 827 | ||
828 | This is used to record a data message as having been delivered and to | 828 | This is used to record a data message as having been consumed and to |
829 | update the ACK state for the call. The socket buffer will be freed. | 829 | update the ACK state for the call. The message must still be passed to |
830 | rxrpc_kernel_free_skb() for disposal by the caller. | ||
830 | 831 | ||
831 | (*) Free a message. | 832 | (*) Free a message. |
832 | 833 | ||
diff --git a/Documentation/power/basic-pm-debugging.txt b/Documentation/power/basic-pm-debugging.txt index b96098ccfe69..708f87f78a75 100644 --- a/Documentation/power/basic-pm-debugging.txt +++ b/Documentation/power/basic-pm-debugging.txt | |||
@@ -164,7 +164,32 @@ load n/2 modules more and try again. | |||
164 | Again, if you find the offending module(s), it(they) must be unloaded every time | 164 | Again, if you find the offending module(s), it(they) must be unloaded every time |
165 | before hibernation, and please report the problem with it(them). | 165 | before hibernation, and please report the problem with it(them). |
166 | 166 | ||
167 | c) Advanced debugging | 167 | c) Using the "test_resume" hibernation option |
168 | |||
169 | /sys/power/disk generally tells the kernel what to do after creating a | ||
170 | hibernation image. One of the available options is "test_resume" which | ||
171 | causes the just created image to be used for immediate restoration. Namely, | ||
172 | after doing: | ||
173 | |||
174 | # echo test_resume > /sys/power/disk | ||
175 | # echo disk > /sys/power/state | ||
176 | |||
177 | a hibernation image will be created and a resume from it will be triggered | ||
178 | immediately without involving the platform firmware in any way. | ||
179 | |||
180 | That test can be used to check if failures to resume from hibernation are | ||
181 | related to bad interactions with the platform firmware. That is, if the above | ||
182 | works every time, but resume from actual hibernation does not work or is | ||
183 | unreliable, the platform firmware may be responsible for the failures. | ||
184 | |||
185 | On architectures and platforms that support using different kernels to restore | ||
186 | hibernation images (that is, the kernel used to read the image from storage and | ||
187 | load it into memory is different from the one included in the image) or support | ||
188 | kernel address space randomization, it also can be used to check if failures | ||
189 | to resume may be related to the differences between the restore and image | ||
190 | kernels. | ||
191 | |||
192 | d) Advanced debugging | ||
168 | 193 | ||
169 | In case that hibernation does not work on your system even in the minimal | 194 | In case that hibernation does not work on your system even in the minimal |
170 | configuration and compiling more drivers as modules is not practical or some | 195 | configuration and compiling more drivers as modules is not practical or some |
diff --git a/Documentation/power/interface.txt b/Documentation/power/interface.txt index f1f0f59a7c47..974916ff6608 100644 --- a/Documentation/power/interface.txt +++ b/Documentation/power/interface.txt | |||
@@ -1,75 +1,76 @@ | |||
1 | Power Management Interface | 1 | Power Management Interface for System Sleep |
2 | 2 | ||
3 | 3 | Copyright (c) 2016 Intel Corp., Rafael J. Wysocki <rafael.j.wysocki@intel.com> | |
4 | The power management subsystem provides a unified sysfs interface to | 4 | |
5 | userspace, regardless of what architecture or platform one is | 5 | The power management subsystem provides userspace with a unified sysfs interface |
6 | running. The interface exists in /sys/power/ directory (assuming sysfs | 6 | for system sleep regardless of the underlying system architecture or platform. |
7 | is mounted at /sys). | 7 | The interface is located in the /sys/power/ directory (assuming that sysfs is |
8 | 8 | mounted at /sys). | |
9 | /sys/power/state controls system power state. Reading from this file | 9 | |
10 | returns what states are supported, which is hard-coded to 'freeze', | 10 | /sys/power/state is the system sleep state control file. |
11 | 'standby' (Power-On Suspend), 'mem' (Suspend-to-RAM), and 'disk' | 11 | |
12 | (Suspend-to-Disk). | 12 | Reading from it returns a list of supported sleep states, encoded as: |
13 | 13 | ||
14 | Writing to this file one of those strings causes the system to | 14 | 'freeze' (Suspend-to-Idle) |
15 | transition into that state. Please see the file | 15 | 'standby' (Power-On Suspend) |
16 | Documentation/power/states.txt for a description of each of those | 16 | 'mem' (Suspend-to-RAM) |
17 | states. | 17 | 'disk' (Suspend-to-Disk) |
18 | 18 | ||
19 | 19 | Suspend-to-Idle is always supported. Suspend-to-Disk is always supported | |
20 | /sys/power/disk controls the operating mode of the suspend-to-disk | 20 | too as long the kernel has been configured to support hibernation at all |
21 | mechanism. Suspend-to-disk can be handled in several ways. We have a | 21 | (ie. CONFIG_HIBERNATION is set in the kernel configuration file). Support |
22 | few options for putting the system to sleep - using the platform driver | 22 | for Suspend-to-RAM and Power-On Suspend depends on the capabilities of the |
23 | (e.g. ACPI or other suspend_ops), powering off the system or rebooting the | 23 | platform. |
24 | system (for testing). | 24 | |
25 | 25 | If one of the strings listed in /sys/power/state is written to it, the system | |
26 | Additionally, /sys/power/disk can be used to turn on one of the two testing | 26 | will attempt to transition into the corresponding sleep state. Refer to |
27 | modes of the suspend-to-disk mechanism: 'testproc' or 'test'. If the | 27 | Documentation/power/states.txt for a description of each of those states. |
28 | suspend-to-disk mechanism is in the 'testproc' mode, writing 'disk' to | 28 | |
29 | /sys/power/state will cause the kernel to disable nonboot CPUs and freeze | 29 | /sys/power/disk controls the operating mode of hibernation (Suspend-to-Disk). |
30 | tasks, wait for 5 seconds, unfreeze tasks and enable nonboot CPUs. If it is | 30 | Specifically, it tells the kernel what to do after creating a hibernation image. |
31 | in the 'test' mode, writing 'disk' to /sys/power/state will cause the kernel | 31 | |
32 | to disable nonboot CPUs and freeze tasks, shrink memory, suspend devices, wait | 32 | Reading from it returns a list of supported options encoded as: |
33 | for 5 seconds, resume devices, unfreeze tasks and enable nonboot CPUs. Then, | 33 | |
34 | we are able to look in the log messages and work out, for example, which code | 34 | 'platform' (put the system into sleep using a platform-provided method) |
35 | is being slow and which device drivers are misbehaving. | 35 | 'shutdown' (shut the system down) |
36 | 36 | 'reboot' (reboot the system) | |
37 | Reading from this file will display all supported modes and the currently | 37 | 'suspend' (trigger a Suspend-to-RAM transition) |
38 | selected one in brackets, for example | 38 | 'test_resume' (resume-after-hibernation test mode) |
39 | 39 | ||
40 | [shutdown] reboot test testproc | 40 | The currently selected option is printed in square brackets. |
41 | 41 | ||
42 | Writing to this file will accept one of | 42 | The 'platform' option is only available if the platform provides a special |
43 | 43 | mechanism to put the system to sleep after creating a hibernation image (ACPI | |
44 | 'platform' (only if the platform supports it) | 44 | does that, for example). The 'suspend' option is available if Suspend-to-RAM |
45 | 'shutdown' | 45 | is supported. Refer to Documentation/power/basic_pm_debugging.txt for the |
46 | 'reboot' | 46 | description of the 'test_resume' option. |
47 | 'testproc' | 47 | |
48 | 'test' | 48 | To select an option, write the string representing it to /sys/power/disk. |
49 | 49 | ||
50 | /sys/power/image_size controls the size of the image created by | 50 | /sys/power/image_size controls the size of hibernation images. |
51 | the suspend-to-disk mechanism. It can be written a string | 51 | |
52 | representing a non-negative integer that will be used as an upper | 52 | It can be written a string representing a non-negative integer that will be |
53 | limit of the image size, in bytes. The suspend-to-disk mechanism will | 53 | used as a best-effort upper limit of the image size, in bytes. The hibernation |
54 | do its best to ensure the image size will not exceed that number. However, | 54 | core will do its best to ensure that the image size will not exceed that number. |
55 | if this turns out to be impossible, it will try to suspend anyway using the | 55 | However, if that turns out to be impossible to achieve, a hibernation image will |
56 | smallest image possible. In particular, if "0" is written to this file, the | 56 | still be created and its size will be as small as possible. In particular, |
57 | suspend image will be as small as possible. | 57 | writing '0' to this file will enforce hibernation images to be as small as |
58 | 58 | possible. | |
59 | Reading from this file will display the current image size limit, which | 59 | |
60 | is set to 2/5 of available RAM by default. | 60 | Reading from this file returns the current image size limit, which is set to |
61 | 61 | around 2/5 of available RAM by default. | |
62 | /sys/power/pm_trace controls the code which saves the last PM event point in | 62 | |
63 | the RTC across reboots, so that you can debug a machine that just hangs | 63 | /sys/power/pm_trace controls the PM trace mechanism saving the last suspend |
64 | during suspend (or more commonly, during resume). Namely, the RTC is only | 64 | or resume event point in the RTC across reboots. |
65 | used to save the last PM event point if this file contains '1'. Initially it | 65 | |
66 | contains '0' which may be changed to '1' by writing a string representing a | 66 | It helps to debug hard lockups or reboots due to device driver failures that |
67 | nonzero integer into it. | 67 | occur during system suspend or resume (which is more common) more effectively. |
68 | 68 | ||
69 | To use this debugging feature you should attempt to suspend the machine, then | 69 | If /sys/power/pm_trace contains '1', the fingerprint of each suspend/resume |
70 | reboot it and run | 70 | event point in turn will be stored in the RTC memory (overwriting the actual |
71 | 71 | RTC information), so it will survive a system crash if one occurs right after | |
72 | dmesg -s 1000000 | grep 'hash matches' | 72 | storing it and it can be used later to identify the driver that caused the crash |
73 | 73 | to happen (see Documentation/power/s2ram.txt for more information). | |
74 | CAUTION: Using it will cause your machine's real-time (CMOS) clock to be | 74 | |
75 | set to a random invalid time after a resume. | 75 | Initially it contains '0' which may be changed to '1' by writing a string |
76 | representing a nonzero integer into it. | ||
diff --git a/Documentation/sphinx-static/theme_overrides.css b/Documentation/sphinx-static/theme_overrides.css index 3a2ac4bcfd78..e88461c4c1e6 100644 --- a/Documentation/sphinx-static/theme_overrides.css +++ b/Documentation/sphinx-static/theme_overrides.css | |||
@@ -42,11 +42,12 @@ | |||
42 | caption a.headerlink { opacity: 0; } | 42 | caption a.headerlink { opacity: 0; } |
43 | caption a.headerlink:hover { opacity: 1; } | 43 | caption a.headerlink:hover { opacity: 1; } |
44 | 44 | ||
45 | /* inline literal: drop the borderbox and red color */ | 45 | /* inline literal: drop the borderbox, padding and red color */ |
46 | 46 | ||
47 | code, .rst-content tt, .rst-content code { | 47 | code, .rst-content tt, .rst-content code { |
48 | color: inherit; | 48 | color: inherit; |
49 | border: none; | 49 | border: none; |
50 | padding: unset; | ||
50 | background: inherit; | 51 | background: inherit; |
51 | font-size: 85%; | 52 | font-size: 85%; |
52 | } | 53 | } |
diff --git a/MAINTAINERS b/MAINTAINERS index a306795a7450..ecf066a399e3 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -1614,7 +1614,7 @@ N: rockchip | |||
1614 | 1614 | ||
1615 | ARM/SAMSUNG EXYNOS ARM ARCHITECTURES | 1615 | ARM/SAMSUNG EXYNOS ARM ARCHITECTURES |
1616 | M: Kukjin Kim <kgene@kernel.org> | 1616 | M: Kukjin Kim <kgene@kernel.org> |
1617 | M: Krzysztof Kozlowski <k.kozlowski@samsung.com> | 1617 | M: Krzysztof Kozlowski <krzk@kernel.org> |
1618 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 1618 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
1619 | L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers) | 1619 | L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers) |
1620 | S: Maintained | 1620 | S: Maintained |
@@ -1822,6 +1822,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-stericsson.git | |||
1822 | ARM/UNIPHIER ARCHITECTURE | 1822 | ARM/UNIPHIER ARCHITECTURE |
1823 | M: Masahiro Yamada <yamada.masahiro@socionext.com> | 1823 | M: Masahiro Yamada <yamada.masahiro@socionext.com> |
1824 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 1824 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
1825 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy/linux-uniphier.git | ||
1825 | S: Maintained | 1826 | S: Maintained |
1826 | F: arch/arm/boot/dts/uniphier* | 1827 | F: arch/arm/boot/dts/uniphier* |
1827 | F: arch/arm/include/asm/hardware/cache-uniphier.h | 1828 | F: arch/arm/include/asm/hardware/cache-uniphier.h |
@@ -4525,6 +4526,12 @@ L: linux-edac@vger.kernel.org | |||
4525 | S: Maintained | 4526 | S: Maintained |
4526 | F: drivers/edac/sb_edac.c | 4527 | F: drivers/edac/sb_edac.c |
4527 | 4528 | ||
4529 | EDAC-SKYLAKE | ||
4530 | M: Tony Luck <tony.luck@intel.com> | ||
4531 | L: linux-edac@vger.kernel.org | ||
4532 | S: Maintained | ||
4533 | F: drivers/edac/skx_edac.c | ||
4534 | |||
4528 | EDAC-XGENE | 4535 | EDAC-XGENE |
4529 | APPLIED MICRO (APM) X-GENE SOC EDAC | 4536 | APPLIED MICRO (APM) X-GENE SOC EDAC |
4530 | M: Loc Ho <lho@apm.com> | 4537 | M: Loc Ho <lho@apm.com> |
@@ -7449,7 +7456,8 @@ F: Documentation/devicetree/bindings/sound/max9860.txt | |||
7449 | F: sound/soc/codecs/max9860.* | 7456 | F: sound/soc/codecs/max9860.* |
7450 | 7457 | ||
7451 | MAXIM MUIC CHARGER DRIVERS FOR EXYNOS BASED BOARDS | 7458 | MAXIM MUIC CHARGER DRIVERS FOR EXYNOS BASED BOARDS |
7452 | M: Krzysztof Kozlowski <k.kozlowski@samsung.com> | 7459 | M: Krzysztof Kozlowski <krzk@kernel.org> |
7460 | M: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com> | ||
7453 | L: linux-pm@vger.kernel.org | 7461 | L: linux-pm@vger.kernel.org |
7454 | S: Supported | 7462 | S: Supported |
7455 | F: drivers/power/max14577_charger.c | 7463 | F: drivers/power/max14577_charger.c |
@@ -7465,7 +7473,8 @@ F: include/dt-bindings/*/*max77802.h | |||
7465 | 7473 | ||
7466 | MAXIM PMIC AND MUIC DRIVERS FOR EXYNOS BASED BOARDS | 7474 | MAXIM PMIC AND MUIC DRIVERS FOR EXYNOS BASED BOARDS |
7467 | M: Chanwoo Choi <cw00.choi@samsung.com> | 7475 | M: Chanwoo Choi <cw00.choi@samsung.com> |
7468 | M: Krzysztof Kozlowski <k.kozlowski@samsung.com> | 7476 | M: Krzysztof Kozlowski <krzk@kernel.org> |
7477 | M: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com> | ||
7469 | L: linux-kernel@vger.kernel.org | 7478 | L: linux-kernel@vger.kernel.org |
7470 | S: Supported | 7479 | S: Supported |
7471 | F: drivers/*/max14577*.c | 7480 | F: drivers/*/max14577*.c |
@@ -9231,7 +9240,7 @@ F: drivers/pinctrl/sh-pfc/ | |||
9231 | 9240 | ||
9232 | PIN CONTROLLER - SAMSUNG | 9241 | PIN CONTROLLER - SAMSUNG |
9233 | M: Tomasz Figa <tomasz.figa@gmail.com> | 9242 | M: Tomasz Figa <tomasz.figa@gmail.com> |
9234 | M: Krzysztof Kozlowski <k.kozlowski@samsung.com> | 9243 | M: Krzysztof Kozlowski <krzk@kernel.org> |
9235 | M: Sylwester Nawrocki <s.nawrocki@samsung.com> | 9244 | M: Sylwester Nawrocki <s.nawrocki@samsung.com> |
9236 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 9245 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
9237 | L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers) | 9246 | L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers) |
@@ -10164,7 +10173,7 @@ S: Maintained | |||
10164 | F: drivers/platform/x86/samsung-laptop.c | 10173 | F: drivers/platform/x86/samsung-laptop.c |
10165 | 10174 | ||
10166 | SAMSUNG AUDIO (ASoC) DRIVERS | 10175 | SAMSUNG AUDIO (ASoC) DRIVERS |
10167 | M: Krzysztof Kozlowski <k.kozlowski@samsung.com> | 10176 | M: Krzysztof Kozlowski <krzk@kernel.org> |
10168 | M: Sangbeom Kim <sbkim73@samsung.com> | 10177 | M: Sangbeom Kim <sbkim73@samsung.com> |
10169 | M: Sylwester Nawrocki <s.nawrocki@samsung.com> | 10178 | M: Sylwester Nawrocki <s.nawrocki@samsung.com> |
10170 | L: alsa-devel@alsa-project.org (moderated for non-subscribers) | 10179 | L: alsa-devel@alsa-project.org (moderated for non-subscribers) |
@@ -10179,7 +10188,8 @@ F: drivers/video/fbdev/s3c-fb.c | |||
10179 | 10188 | ||
10180 | SAMSUNG MULTIFUNCTION PMIC DEVICE DRIVERS | 10189 | SAMSUNG MULTIFUNCTION PMIC DEVICE DRIVERS |
10181 | M: Sangbeom Kim <sbkim73@samsung.com> | 10190 | M: Sangbeom Kim <sbkim73@samsung.com> |
10182 | M: Krzysztof Kozlowski <k.kozlowski@samsung.com> | 10191 | M: Krzysztof Kozlowski <krzk@kernel.org> |
10192 | M: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com> | ||
10183 | L: linux-kernel@vger.kernel.org | 10193 | L: linux-kernel@vger.kernel.org |
10184 | L: linux-samsung-soc@vger.kernel.org | 10194 | L: linux-samsung-soc@vger.kernel.org |
10185 | S: Supported | 10195 | S: Supported |
@@ -1,7 +1,7 @@ | |||
1 | VERSION = 4 | 1 | VERSION = 4 |
2 | PATCHLEVEL = 8 | 2 | PATCHLEVEL = 8 |
3 | SUBLEVEL = 0 | 3 | SUBLEVEL = 0 |
4 | EXTRAVERSION = -rc2 | 4 | EXTRAVERSION = -rc3 |
5 | NAME = Psychotic Stoned Sheep | 5 | NAME = Psychotic Stoned Sheep |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/arm/boot/dts/am335x-baltos.dtsi b/arch/arm/boot/dts/am335x-baltos.dtsi index c8609d8d2c55..b689172632ef 100644 --- a/arch/arm/boot/dts/am335x-baltos.dtsi +++ b/arch/arm/boot/dts/am335x-baltos.dtsi | |||
@@ -226,7 +226,7 @@ | |||
226 | 226 | ||
227 | #address-cells = <1>; | 227 | #address-cells = <1>; |
228 | #size-cells = <1>; | 228 | #size-cells = <1>; |
229 | elm_id = <&elm>; | 229 | ti,elm-id = <&elm>; |
230 | }; | 230 | }; |
231 | }; | 231 | }; |
232 | 232 | ||
diff --git a/arch/arm/boot/dts/am335x-igep0033.dtsi b/arch/arm/boot/dts/am335x-igep0033.dtsi index df63484ef9b3..e7d9ca1305fa 100644 --- a/arch/arm/boot/dts/am335x-igep0033.dtsi +++ b/arch/arm/boot/dts/am335x-igep0033.dtsi | |||
@@ -161,7 +161,7 @@ | |||
161 | 161 | ||
162 | #address-cells = <1>; | 162 | #address-cells = <1>; |
163 | #size-cells = <1>; | 163 | #size-cells = <1>; |
164 | elm_id = <&elm>; | 164 | ti,elm-id = <&elm>; |
165 | 165 | ||
166 | /* MTD partition table */ | 166 | /* MTD partition table */ |
167 | partition@0 { | 167 | partition@0 { |
diff --git a/arch/arm/boot/dts/am335x-phycore-som.dtsi b/arch/arm/boot/dts/am335x-phycore-som.dtsi index 86f773165d5c..1263c9d4cba3 100644 --- a/arch/arm/boot/dts/am335x-phycore-som.dtsi +++ b/arch/arm/boot/dts/am335x-phycore-som.dtsi | |||
@@ -197,7 +197,7 @@ | |||
197 | gpmc,wr-access-ns = <30>; | 197 | gpmc,wr-access-ns = <30>; |
198 | gpmc,wr-data-mux-bus-ns = <0>; | 198 | gpmc,wr-data-mux-bus-ns = <0>; |
199 | 199 | ||
200 | elm_id = <&elm>; | 200 | ti,elm-id = <&elm>; |
201 | 201 | ||
202 | #address-cells = <1>; | 202 | #address-cells = <1>; |
203 | #size-cells = <1>; | 203 | #size-cells = <1>; |
diff --git a/arch/arm/boot/dts/armada-388-clearfog.dts b/arch/arm/boot/dts/armada-388-clearfog.dts index 2e0556af6e5e..d3e6bd805006 100644 --- a/arch/arm/boot/dts/armada-388-clearfog.dts +++ b/arch/arm/boot/dts/armada-388-clearfog.dts | |||
@@ -390,12 +390,12 @@ | |||
390 | 390 | ||
391 | port@0 { | 391 | port@0 { |
392 | reg = <0>; | 392 | reg = <0>; |
393 | label = "lan1"; | 393 | label = "lan5"; |
394 | }; | 394 | }; |
395 | 395 | ||
396 | port@1 { | 396 | port@1 { |
397 | reg = <1>; | 397 | reg = <1>; |
398 | label = "lan2"; | 398 | label = "lan4"; |
399 | }; | 399 | }; |
400 | 400 | ||
401 | port@2 { | 401 | port@2 { |
@@ -405,12 +405,12 @@ | |||
405 | 405 | ||
406 | port@3 { | 406 | port@3 { |
407 | reg = <3>; | 407 | reg = <3>; |
408 | label = "lan4"; | 408 | label = "lan2"; |
409 | }; | 409 | }; |
410 | 410 | ||
411 | port@4 { | 411 | port@4 { |
412 | reg = <4>; | 412 | reg = <4>; |
413 | label = "lan5"; | 413 | label = "lan1"; |
414 | }; | 414 | }; |
415 | 415 | ||
416 | port@5 { | 416 | port@5 { |
diff --git a/arch/arm/boot/dts/exynos5410-odroidxu.dts b/arch/arm/boot/dts/exynos5410-odroidxu.dts index d9499310a301..f6d135245a4b 100644 --- a/arch/arm/boot/dts/exynos5410-odroidxu.dts +++ b/arch/arm/boot/dts/exynos5410-odroidxu.dts | |||
@@ -447,14 +447,11 @@ | |||
447 | samsung,dw-mshc-ciu-div = <3>; | 447 | samsung,dw-mshc-ciu-div = <3>; |
448 | samsung,dw-mshc-sdr-timing = <0 4>; | 448 | samsung,dw-mshc-sdr-timing = <0 4>; |
449 | samsung,dw-mshc-ddr-timing = <0 2>; | 449 | samsung,dw-mshc-ddr-timing = <0 2>; |
450 | samsung,dw-mshc-hs400-timing = <0 2>; | ||
451 | samsung,read-strobe-delay = <90>; | ||
452 | pinctrl-names = "default"; | 450 | pinctrl-names = "default"; |
453 | pinctrl-0 = <&sd0_clk &sd0_cmd &sd0_bus1 &sd0_bus4 &sd0_bus8 &sd0_cd>; | 451 | pinctrl-0 = <&sd0_clk &sd0_cmd &sd0_bus1 &sd0_bus4 &sd0_bus8 &sd0_cd>; |
454 | bus-width = <8>; | 452 | bus-width = <8>; |
455 | cap-mmc-highspeed; | 453 | cap-mmc-highspeed; |
456 | mmc-hs200-1_8v; | 454 | mmc-hs200-1_8v; |
457 | mmc-hs400-1_8v; | ||
458 | vmmc-supply = <&ldo20_reg>; | 455 | vmmc-supply = <&ldo20_reg>; |
459 | vqmmc-supply = <&ldo11_reg>; | 456 | vqmmc-supply = <&ldo11_reg>; |
460 | }; | 457 | }; |
diff --git a/arch/arm/boot/dts/imx6sx-sabreauto.dts b/arch/arm/boot/dts/imx6sx-sabreauto.dts index 96ea936eeeb0..240a2864d044 100644 --- a/arch/arm/boot/dts/imx6sx-sabreauto.dts +++ b/arch/arm/boot/dts/imx6sx-sabreauto.dts | |||
@@ -64,7 +64,7 @@ | |||
64 | cd-gpios = <&gpio7 11 GPIO_ACTIVE_LOW>; | 64 | cd-gpios = <&gpio7 11 GPIO_ACTIVE_LOW>; |
65 | no-1-8-v; | 65 | no-1-8-v; |
66 | keep-power-in-suspend; | 66 | keep-power-in-suspend; |
67 | enable-sdio-wakup; | 67 | wakeup-source; |
68 | status = "okay"; | 68 | status = "okay"; |
69 | }; | 69 | }; |
70 | 70 | ||
diff --git a/arch/arm/boot/dts/kirkwood-ib62x0.dts b/arch/arm/boot/dts/kirkwood-ib62x0.dts index ef84d8699a76..5bf62897014c 100644 --- a/arch/arm/boot/dts/kirkwood-ib62x0.dts +++ b/arch/arm/boot/dts/kirkwood-ib62x0.dts | |||
@@ -113,7 +113,7 @@ | |||
113 | 113 | ||
114 | partition@e0000 { | 114 | partition@e0000 { |
115 | label = "u-boot environment"; | 115 | label = "u-boot environment"; |
116 | reg = <0xe0000 0x100000>; | 116 | reg = <0xe0000 0x20000>; |
117 | }; | 117 | }; |
118 | 118 | ||
119 | partition@100000 { | 119 | partition@100000 { |
diff --git a/arch/arm/boot/dts/kirkwood-openrd.dtsi b/arch/arm/boot/dts/kirkwood-openrd.dtsi index e4ecab112601..7175511a92da 100644 --- a/arch/arm/boot/dts/kirkwood-openrd.dtsi +++ b/arch/arm/boot/dts/kirkwood-openrd.dtsi | |||
@@ -116,6 +116,10 @@ | |||
116 | }; | 116 | }; |
117 | }; | 117 | }; |
118 | 118 | ||
119 | &pciec { | ||
120 | status = "okay"; | ||
121 | }; | ||
122 | |||
119 | &pcie0 { | 123 | &pcie0 { |
120 | status = "okay"; | 124 | status = "okay"; |
121 | }; | 125 | }; |
diff --git a/arch/arm/boot/dts/logicpd-som-lv.dtsi b/arch/arm/boot/dts/logicpd-som-lv.dtsi index 365f39ff58bb..0ff1c2de95bf 100644 --- a/arch/arm/boot/dts/logicpd-som-lv.dtsi +++ b/arch/arm/boot/dts/logicpd-som-lv.dtsi | |||
@@ -35,10 +35,15 @@ | |||
35 | ranges = <0 0 0x00000000 0x1000000>; /* CS0: 16MB for NAND */ | 35 | ranges = <0 0 0x00000000 0x1000000>; /* CS0: 16MB for NAND */ |
36 | 36 | ||
37 | nand@0,0 { | 37 | nand@0,0 { |
38 | linux,mtd-name = "micron,mt29f4g16abbda3w"; | 38 | compatible = "ti,omap2-nand"; |
39 | reg = <0 0 4>; /* CS0, offset 0, IO size 4 */ | 39 | reg = <0 0 4>; /* CS0, offset 0, IO size 4 */ |
40 | interrupt-parent = <&gpmc>; | ||
41 | interrupts = <0 IRQ_TYPE_NONE>, /* fifoevent */ | ||
42 | <1 IRQ_TYPE_NONE>; /* termcount */ | ||
43 | linux,mtd-name = "micron,mt29f4g16abbda3w"; | ||
40 | nand-bus-width = <16>; | 44 | nand-bus-width = <16>; |
41 | ti,nand-ecc-opt = "bch8"; | 45 | ti,nand-ecc-opt = "bch8"; |
46 | rb-gpios = <&gpmc 0 GPIO_ACTIVE_HIGH>; /* gpmc_wait0 */ | ||
42 | gpmc,sync-clk-ps = <0>; | 47 | gpmc,sync-clk-ps = <0>; |
43 | gpmc,cs-on-ns = <0>; | 48 | gpmc,cs-on-ns = <0>; |
44 | gpmc,cs-rd-off-ns = <44>; | 49 | gpmc,cs-rd-off-ns = <44>; |
@@ -54,10 +59,6 @@ | |||
54 | gpmc,wr-access-ns = <40>; | 59 | gpmc,wr-access-ns = <40>; |
55 | gpmc,wr-data-mux-bus-ns = <0>; | 60 | gpmc,wr-data-mux-bus-ns = <0>; |
56 | gpmc,device-width = <2>; | 61 | gpmc,device-width = <2>; |
57 | |||
58 | gpmc,page-burst-access-ns = <5>; | ||
59 | gpmc,cycle2cycle-delay-ns = <50>; | ||
60 | |||
61 | #address-cells = <1>; | 62 | #address-cells = <1>; |
62 | #size-cells = <1>; | 63 | #size-cells = <1>; |
63 | 64 | ||
diff --git a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi index 5e9a13c0eaf7..1c2c74655416 100644 --- a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi +++ b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi | |||
@@ -46,6 +46,7 @@ | |||
46 | linux,mtd-name = "micron,mt29f4g16abbda3w"; | 46 | linux,mtd-name = "micron,mt29f4g16abbda3w"; |
47 | nand-bus-width = <16>; | 47 | nand-bus-width = <16>; |
48 | ti,nand-ecc-opt = "bch8"; | 48 | ti,nand-ecc-opt = "bch8"; |
49 | rb-gpios = <&gpmc 0 GPIO_ACTIVE_HIGH>; /* gpmc_wait0 */ | ||
49 | gpmc,sync-clk-ps = <0>; | 50 | gpmc,sync-clk-ps = <0>; |
50 | gpmc,cs-on-ns = <0>; | 51 | gpmc,cs-on-ns = <0>; |
51 | gpmc,cs-rd-off-ns = <44>; | 52 | gpmc,cs-rd-off-ns = <44>; |
diff --git a/arch/arm/boot/dts/omap3-overo-base.dtsi b/arch/arm/boot/dts/omap3-overo-base.dtsi index de256fa8da48..3e946cac55f3 100644 --- a/arch/arm/boot/dts/omap3-overo-base.dtsi +++ b/arch/arm/boot/dts/omap3-overo-base.dtsi | |||
@@ -223,7 +223,9 @@ | |||
223 | }; | 223 | }; |
224 | 224 | ||
225 | &gpmc { | 225 | &gpmc { |
226 | ranges = <0 0 0x00000000 0x20000000>; | 226 | ranges = <0 0 0x30000000 0x1000000>, /* CS0 */ |
227 | <4 0 0x2b000000 0x1000000>, /* CS4 */ | ||
228 | <5 0 0x2c000000 0x1000000>; /* CS5 */ | ||
227 | 229 | ||
228 | nand@0,0 { | 230 | nand@0,0 { |
229 | compatible = "ti,omap2-nand"; | 231 | compatible = "ti,omap2-nand"; |
diff --git a/arch/arm/boot/dts/omap3-overo-chestnut43-common.dtsi b/arch/arm/boot/dts/omap3-overo-chestnut43-common.dtsi index 7df27926ead2..4f4c6efbd518 100644 --- a/arch/arm/boot/dts/omap3-overo-chestnut43-common.dtsi +++ b/arch/arm/boot/dts/omap3-overo-chestnut43-common.dtsi | |||
@@ -55,8 +55,6 @@ | |||
55 | #include "omap-gpmc-smsc9221.dtsi" | 55 | #include "omap-gpmc-smsc9221.dtsi" |
56 | 56 | ||
57 | &gpmc { | 57 | &gpmc { |
58 | ranges = <5 0 0x2c000000 0x1000000>; /* CS5 */ | ||
59 | |||
60 | ethernet@gpmc { | 58 | ethernet@gpmc { |
61 | reg = <5 0 0xff>; | 59 | reg = <5 0 0xff>; |
62 | interrupt-parent = <&gpio6>; | 60 | interrupt-parent = <&gpio6>; |
diff --git a/arch/arm/boot/dts/omap3-overo-tobi-common.dtsi b/arch/arm/boot/dts/omap3-overo-tobi-common.dtsi index 9e24b6a1d07b..1b304e2f1bd2 100644 --- a/arch/arm/boot/dts/omap3-overo-tobi-common.dtsi +++ b/arch/arm/boot/dts/omap3-overo-tobi-common.dtsi | |||
@@ -27,8 +27,6 @@ | |||
27 | #include "omap-gpmc-smsc9221.dtsi" | 27 | #include "omap-gpmc-smsc9221.dtsi" |
28 | 28 | ||
29 | &gpmc { | 29 | &gpmc { |
30 | ranges = <5 0 0x2c000000 0x1000000>; /* CS5 */ | ||
31 | |||
32 | ethernet@gpmc { | 30 | ethernet@gpmc { |
33 | reg = <5 0 0xff>; | 31 | reg = <5 0 0xff>; |
34 | interrupt-parent = <&gpio6>; | 32 | interrupt-parent = <&gpio6>; |
diff --git a/arch/arm/boot/dts/omap3-overo-tobiduo-common.dtsi b/arch/arm/boot/dts/omap3-overo-tobiduo-common.dtsi index 334109e14613..82e98ee3023a 100644 --- a/arch/arm/boot/dts/omap3-overo-tobiduo-common.dtsi +++ b/arch/arm/boot/dts/omap3-overo-tobiduo-common.dtsi | |||
@@ -15,9 +15,6 @@ | |||
15 | #include "omap-gpmc-smsc9221.dtsi" | 15 | #include "omap-gpmc-smsc9221.dtsi" |
16 | 16 | ||
17 | &gpmc { | 17 | &gpmc { |
18 | ranges = <4 0 0x2b000000 0x1000000>, /* CS4 */ | ||
19 | <5 0 0x2c000000 0x1000000>; /* CS5 */ | ||
20 | |||
21 | smsc1: ethernet@gpmc { | 18 | smsc1: ethernet@gpmc { |
22 | reg = <5 0 0xff>; | 19 | reg = <5 0 0xff>; |
23 | interrupt-parent = <&gpio6>; | 20 | interrupt-parent = <&gpio6>; |
diff --git a/arch/arm/boot/dts/tegra114-dalmore.dts b/arch/arm/boot/dts/tegra114-dalmore.dts index 1dfc492cc004..1444fbd543e7 100644 --- a/arch/arm/boot/dts/tegra114-dalmore.dts +++ b/arch/arm/boot/dts/tegra114-dalmore.dts | |||
@@ -897,7 +897,7 @@ | |||
897 | palmas: tps65913@58 { | 897 | palmas: tps65913@58 { |
898 | compatible = "ti,palmas"; | 898 | compatible = "ti,palmas"; |
899 | reg = <0x58>; | 899 | reg = <0x58>; |
900 | interrupts = <0 86 IRQ_TYPE_LEVEL_LOW>; | 900 | interrupts = <0 86 IRQ_TYPE_LEVEL_HIGH>; |
901 | 901 | ||
902 | #interrupt-cells = <2>; | 902 | #interrupt-cells = <2>; |
903 | interrupt-controller; | 903 | interrupt-controller; |
diff --git a/arch/arm/boot/dts/tegra114-roth.dts b/arch/arm/boot/dts/tegra114-roth.dts index 70cf40996c3f..966a7fc044af 100644 --- a/arch/arm/boot/dts/tegra114-roth.dts +++ b/arch/arm/boot/dts/tegra114-roth.dts | |||
@@ -802,7 +802,7 @@ | |||
802 | palmas: pmic@58 { | 802 | palmas: pmic@58 { |
803 | compatible = "ti,palmas"; | 803 | compatible = "ti,palmas"; |
804 | reg = <0x58>; | 804 | reg = <0x58>; |
805 | interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_LOW>; | 805 | interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>; |
806 | 806 | ||
807 | #interrupt-cells = <2>; | 807 | #interrupt-cells = <2>; |
808 | interrupt-controller; | 808 | interrupt-controller; |
diff --git a/arch/arm/boot/dts/tegra114-tn7.dts b/arch/arm/boot/dts/tegra114-tn7.dts index 17dd14545862..a161fa1dfb61 100644 --- a/arch/arm/boot/dts/tegra114-tn7.dts +++ b/arch/arm/boot/dts/tegra114-tn7.dts | |||
@@ -63,7 +63,7 @@ | |||
63 | palmas: pmic@58 { | 63 | palmas: pmic@58 { |
64 | compatible = "ti,palmas"; | 64 | compatible = "ti,palmas"; |
65 | reg = <0x58>; | 65 | reg = <0x58>; |
66 | interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_LOW>; | 66 | interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>; |
67 | 67 | ||
68 | #interrupt-cells = <2>; | 68 | #interrupt-cells = <2>; |
69 | interrupt-controller; | 69 | interrupt-controller; |
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index bc5f50799d75..9f157e7c51e7 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S | |||
@@ -295,6 +295,7 @@ __und_svc_fault: | |||
295 | bl __und_fault | 295 | bl __und_fault |
296 | 296 | ||
297 | __und_svc_finish: | 297 | __und_svc_finish: |
298 | get_thread_info tsk | ||
298 | ldr r5, [sp, #S_PSR] @ Get SVC cpsr | 299 | ldr r5, [sp, #S_PSR] @ Get SVC cpsr |
299 | svc_exit r5 @ return from exception | 300 | svc_exit r5 @ return from exception |
300 | UNWIND(.fnend ) | 301 | UNWIND(.fnend ) |
diff --git a/arch/arm/mach-imx/gpc.c b/arch/arm/mach-imx/gpc.c index fd8720532471..0df062d8b2c9 100644 --- a/arch/arm/mach-imx/gpc.c +++ b/arch/arm/mach-imx/gpc.c | |||
@@ -271,6 +271,12 @@ static int __init imx_gpc_init(struct device_node *node, | |||
271 | for (i = 0; i < IMR_NUM; i++) | 271 | for (i = 0; i < IMR_NUM; i++) |
272 | writel_relaxed(~0, gpc_base + GPC_IMR1 + i * 4); | 272 | writel_relaxed(~0, gpc_base + GPC_IMR1 + i * 4); |
273 | 273 | ||
274 | /* | ||
275 | * Clear the OF_POPULATED flag set in of_irq_init so that | ||
276 | * later the GPC power domain driver will not be skipped. | ||
277 | */ | ||
278 | of_node_clear_flag(node, OF_POPULATED); | ||
279 | |||
274 | return 0; | 280 | return 0; |
275 | } | 281 | } |
276 | IRQCHIP_DECLARE(imx_gpc, "fsl,imx6q-gpc", imx_gpc_init); | 282 | IRQCHIP_DECLARE(imx_gpc, "fsl,imx6q-gpc", imx_gpc_init); |
diff --git a/arch/arm/mach-imx/mach-imx6ul.c b/arch/arm/mach-imx/mach-imx6ul.c index 5d9bfab279dd..6bb7d9cf1e38 100644 --- a/arch/arm/mach-imx/mach-imx6ul.c +++ b/arch/arm/mach-imx/mach-imx6ul.c | |||
@@ -64,6 +64,7 @@ static void __init imx6ul_init_machine(void) | |||
64 | if (parent == NULL) | 64 | if (parent == NULL) |
65 | pr_warn("failed to initialize soc device\n"); | 65 | pr_warn("failed to initialize soc device\n"); |
66 | 66 | ||
67 | of_platform_default_populate(NULL, NULL, parent); | ||
67 | imx6ul_enet_init(); | 68 | imx6ul_enet_init(); |
68 | imx_anatop_init(); | 69 | imx_anatop_init(); |
69 | imx6ul_pm_init(); | 70 | imx6ul_pm_init(); |
diff --git a/arch/arm/mach-imx/pm-imx6.c b/arch/arm/mach-imx/pm-imx6.c index 58924b3844df..67bab74fcbc5 100644 --- a/arch/arm/mach-imx/pm-imx6.c +++ b/arch/arm/mach-imx/pm-imx6.c | |||
@@ -295,7 +295,7 @@ int imx6_set_lpm(enum mxc_cpu_pwr_mode mode) | |||
295 | val &= ~BM_CLPCR_SBYOS; | 295 | val &= ~BM_CLPCR_SBYOS; |
296 | if (cpu_is_imx6sl()) | 296 | if (cpu_is_imx6sl()) |
297 | val |= BM_CLPCR_BYPASS_PMIC_READY; | 297 | val |= BM_CLPCR_BYPASS_PMIC_READY; |
298 | if (cpu_is_imx6sl() || cpu_is_imx6sx()) | 298 | if (cpu_is_imx6sl() || cpu_is_imx6sx() || cpu_is_imx6ul()) |
299 | val |= BM_CLPCR_BYP_MMDC_CH0_LPM_HS; | 299 | val |= BM_CLPCR_BYP_MMDC_CH0_LPM_HS; |
300 | else | 300 | else |
301 | val |= BM_CLPCR_BYP_MMDC_CH1_LPM_HS; | 301 | val |= BM_CLPCR_BYP_MMDC_CH1_LPM_HS; |
diff --git a/arch/arm/mach-omap2/cm33xx.c b/arch/arm/mach-omap2/cm33xx.c index c073fb57dd13..6f2d0aec0513 100644 --- a/arch/arm/mach-omap2/cm33xx.c +++ b/arch/arm/mach-omap2/cm33xx.c | |||
@@ -220,9 +220,6 @@ static int am33xx_cm_wait_module_ready(u8 part, s16 inst, u16 clkctrl_offs, | |||
220 | { | 220 | { |
221 | int i = 0; | 221 | int i = 0; |
222 | 222 | ||
223 | if (!clkctrl_offs) | ||
224 | return 0; | ||
225 | |||
226 | omap_test_timeout(_is_module_ready(inst, clkctrl_offs), | 223 | omap_test_timeout(_is_module_ready(inst, clkctrl_offs), |
227 | MAX_MODULE_READY_TIME, i); | 224 | MAX_MODULE_READY_TIME, i); |
228 | 225 | ||
@@ -246,9 +243,6 @@ static int am33xx_cm_wait_module_idle(u8 part, s16 inst, u16 clkctrl_offs, | |||
246 | { | 243 | { |
247 | int i = 0; | 244 | int i = 0; |
248 | 245 | ||
249 | if (!clkctrl_offs) | ||
250 | return 0; | ||
251 | |||
252 | omap_test_timeout((_clkctrl_idlest(inst, clkctrl_offs) == | 246 | omap_test_timeout((_clkctrl_idlest(inst, clkctrl_offs) == |
253 | CLKCTRL_IDLEST_DISABLED), | 247 | CLKCTRL_IDLEST_DISABLED), |
254 | MAX_MODULE_READY_TIME, i); | 248 | MAX_MODULE_READY_TIME, i); |
diff --git a/arch/arm/mach-omap2/cminst44xx.c b/arch/arm/mach-omap2/cminst44xx.c index 2c0e07ed6b99..2ab27ade136a 100644 --- a/arch/arm/mach-omap2/cminst44xx.c +++ b/arch/arm/mach-omap2/cminst44xx.c | |||
@@ -278,9 +278,6 @@ static int omap4_cminst_wait_module_ready(u8 part, s16 inst, u16 clkctrl_offs, | |||
278 | { | 278 | { |
279 | int i = 0; | 279 | int i = 0; |
280 | 280 | ||
281 | if (!clkctrl_offs) | ||
282 | return 0; | ||
283 | |||
284 | omap_test_timeout(_is_module_ready(part, inst, clkctrl_offs), | 281 | omap_test_timeout(_is_module_ready(part, inst, clkctrl_offs), |
285 | MAX_MODULE_READY_TIME, i); | 282 | MAX_MODULE_READY_TIME, i); |
286 | 283 | ||
@@ -304,9 +301,6 @@ static int omap4_cminst_wait_module_idle(u8 part, s16 inst, u16 clkctrl_offs, | |||
304 | { | 301 | { |
305 | int i = 0; | 302 | int i = 0; |
306 | 303 | ||
307 | if (!clkctrl_offs) | ||
308 | return 0; | ||
309 | |||
310 | omap_test_timeout((_clkctrl_idlest(part, inst, clkctrl_offs) == | 304 | omap_test_timeout((_clkctrl_idlest(part, inst, clkctrl_offs) == |
311 | CLKCTRL_IDLEST_DISABLED), | 305 | CLKCTRL_IDLEST_DISABLED), |
312 | MAX_MODULE_DISABLE_TIME, i); | 306 | MAX_MODULE_DISABLE_TIME, i); |
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index 5b709383381c..1052b29697b8 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c | |||
@@ -1053,6 +1053,10 @@ static int _omap4_wait_target_disable(struct omap_hwmod *oh) | |||
1053 | if (oh->flags & HWMOD_NO_IDLEST) | 1053 | if (oh->flags & HWMOD_NO_IDLEST) |
1054 | return 0; | 1054 | return 0; |
1055 | 1055 | ||
1056 | if (!oh->prcm.omap4.clkctrl_offs && | ||
1057 | !(oh->prcm.omap4.flags & HWMOD_OMAP4_ZERO_CLKCTRL_OFFSET)) | ||
1058 | return 0; | ||
1059 | |||
1056 | return omap_cm_wait_module_idle(oh->clkdm->prcm_partition, | 1060 | return omap_cm_wait_module_idle(oh->clkdm->prcm_partition, |
1057 | oh->clkdm->cm_inst, | 1061 | oh->clkdm->cm_inst, |
1058 | oh->prcm.omap4.clkctrl_offs, 0); | 1062 | oh->prcm.omap4.clkctrl_offs, 0); |
@@ -2971,6 +2975,10 @@ static int _omap4_wait_target_ready(struct omap_hwmod *oh) | |||
2971 | if (!_find_mpu_rt_port(oh)) | 2975 | if (!_find_mpu_rt_port(oh)) |
2972 | return 0; | 2976 | return 0; |
2973 | 2977 | ||
2978 | if (!oh->prcm.omap4.clkctrl_offs && | ||
2979 | !(oh->prcm.omap4.flags & HWMOD_OMAP4_ZERO_CLKCTRL_OFFSET)) | ||
2980 | return 0; | ||
2981 | |||
2974 | /* XXX check module SIDLEMODE, hardreset status */ | 2982 | /* XXX check module SIDLEMODE, hardreset status */ |
2975 | 2983 | ||
2976 | return omap_cm_wait_module_ready(oh->clkdm->prcm_partition, | 2984 | return omap_cm_wait_module_ready(oh->clkdm->prcm_partition, |
diff --git a/arch/arm/mach-omap2/omap_hwmod.h b/arch/arm/mach-omap2/omap_hwmod.h index 4041bad79a9a..78904017f18c 100644 --- a/arch/arm/mach-omap2/omap_hwmod.h +++ b/arch/arm/mach-omap2/omap_hwmod.h | |||
@@ -443,8 +443,12 @@ struct omap_hwmod_omap2_prcm { | |||
443 | * HWMOD_OMAP4_NO_CONTEXT_LOSS_BIT: Some IP blocks don't have a PRCM | 443 | * HWMOD_OMAP4_NO_CONTEXT_LOSS_BIT: Some IP blocks don't have a PRCM |
444 | * module-level context loss register associated with them; this | 444 | * module-level context loss register associated with them; this |
445 | * flag bit should be set in those cases | 445 | * flag bit should be set in those cases |
446 | * HWMOD_OMAP4_ZERO_CLKCTRL_OFFSET: Some IP blocks have a valid CLKCTRL | ||
447 | * offset of zero; this flag bit should be set in those cases to | ||
448 | * distinguish from hwmods that have no clkctrl offset. | ||
446 | */ | 449 | */ |
447 | #define HWMOD_OMAP4_NO_CONTEXT_LOSS_BIT (1 << 0) | 450 | #define HWMOD_OMAP4_NO_CONTEXT_LOSS_BIT (1 << 0) |
451 | #define HWMOD_OMAP4_ZERO_CLKCTRL_OFFSET (1 << 1) | ||
448 | 452 | ||
449 | /** | 453 | /** |
450 | * struct omap_hwmod_omap4_prcm - OMAP4-specific PRCM data | 454 | * struct omap_hwmod_omap4_prcm - OMAP4-specific PRCM data |
diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c index 55c5878577f4..e2d84aa7f595 100644 --- a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #define CLKCTRL(oh, clkctrl) ((oh).prcm.omap4.clkctrl_offs = (clkctrl)) | 29 | #define CLKCTRL(oh, clkctrl) ((oh).prcm.omap4.clkctrl_offs = (clkctrl)) |
30 | #define RSTCTRL(oh, rstctrl) ((oh).prcm.omap4.rstctrl_offs = (rstctrl)) | 30 | #define RSTCTRL(oh, rstctrl) ((oh).prcm.omap4.rstctrl_offs = (rstctrl)) |
31 | #define RSTST(oh, rstst) ((oh).prcm.omap4.rstst_offs = (rstst)) | 31 | #define RSTST(oh, rstst) ((oh).prcm.omap4.rstst_offs = (rstst)) |
32 | #define PRCM_FLAGS(oh, flag) ((oh).prcm.omap4.flags = (flag)) | ||
32 | 33 | ||
33 | /* | 34 | /* |
34 | * 'l3' class | 35 | * 'l3' class |
@@ -1296,6 +1297,7 @@ static void omap_hwmod_am33xx_clkctrl(void) | |||
1296 | CLKCTRL(am33xx_i2c1_hwmod, AM33XX_CM_WKUP_I2C0_CLKCTRL_OFFSET); | 1297 | CLKCTRL(am33xx_i2c1_hwmod, AM33XX_CM_WKUP_I2C0_CLKCTRL_OFFSET); |
1297 | CLKCTRL(am33xx_wd_timer1_hwmod, AM33XX_CM_WKUP_WDT1_CLKCTRL_OFFSET); | 1298 | CLKCTRL(am33xx_wd_timer1_hwmod, AM33XX_CM_WKUP_WDT1_CLKCTRL_OFFSET); |
1298 | CLKCTRL(am33xx_rtc_hwmod, AM33XX_CM_RTC_RTC_CLKCTRL_OFFSET); | 1299 | CLKCTRL(am33xx_rtc_hwmod, AM33XX_CM_RTC_RTC_CLKCTRL_OFFSET); |
1300 | PRCM_FLAGS(am33xx_rtc_hwmod, HWMOD_OMAP4_ZERO_CLKCTRL_OFFSET); | ||
1299 | CLKCTRL(am33xx_mmc2_hwmod, AM33XX_CM_PER_MMC2_CLKCTRL_OFFSET); | 1301 | CLKCTRL(am33xx_mmc2_hwmod, AM33XX_CM_PER_MMC2_CLKCTRL_OFFSET); |
1300 | CLKCTRL(am33xx_gpmc_hwmod, AM33XX_CM_PER_GPMC_CLKCTRL_OFFSET); | 1302 | CLKCTRL(am33xx_gpmc_hwmod, AM33XX_CM_PER_GPMC_CLKCTRL_OFFSET); |
1301 | CLKCTRL(am33xx_l4_ls_hwmod, AM33XX_CM_PER_L4LS_CLKCTRL_OFFSET); | 1303 | CLKCTRL(am33xx_l4_ls_hwmod, AM33XX_CM_PER_L4LS_CLKCTRL_OFFSET); |
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c index d72ee6185d5e..1cc4a6f3954e 100644 --- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c | |||
@@ -722,8 +722,20 @@ static struct omap_hwmod omap3xxx_dss_dispc_hwmod = { | |||
722 | * display serial interface controller | 722 | * display serial interface controller |
723 | */ | 723 | */ |
724 | 724 | ||
725 | static struct omap_hwmod_class_sysconfig omap3xxx_dsi_sysc = { | ||
726 | .rev_offs = 0x0000, | ||
727 | .sysc_offs = 0x0010, | ||
728 | .syss_offs = 0x0014, | ||
729 | .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY | | ||
730 | SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE | | ||
731 | SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS), | ||
732 | .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), | ||
733 | .sysc_fields = &omap_hwmod_sysc_type1, | ||
734 | }; | ||
735 | |||
725 | static struct omap_hwmod_class omap3xxx_dsi_hwmod_class = { | 736 | static struct omap_hwmod_class omap3xxx_dsi_hwmod_class = { |
726 | .name = "dsi", | 737 | .name = "dsi", |
738 | .sysc = &omap3xxx_dsi_sysc, | ||
727 | }; | 739 | }; |
728 | 740 | ||
729 | static struct omap_hwmod_irq_info omap3xxx_dsi1_irqs[] = { | 741 | static struct omap_hwmod_irq_info omap3xxx_dsi1_irqs[] = { |
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 62f4d01941f7..6344913f0804 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
@@ -728,7 +728,8 @@ static void *__init late_alloc(unsigned long sz) | |||
728 | { | 728 | { |
729 | void *ptr = (void *)__get_free_pages(PGALLOC_GFP, get_order(sz)); | 729 | void *ptr = (void *)__get_free_pages(PGALLOC_GFP, get_order(sz)); |
730 | 730 | ||
731 | BUG_ON(!ptr); | 731 | if (!ptr || !pgtable_page_ctor(virt_to_page(ptr))) |
732 | BUG(); | ||
732 | return ptr; | 733 | return ptr; |
733 | } | 734 | } |
734 | 735 | ||
@@ -1155,10 +1156,19 @@ void __init sanity_check_meminfo(void) | |||
1155 | { | 1156 | { |
1156 | phys_addr_t memblock_limit = 0; | 1157 | phys_addr_t memblock_limit = 0; |
1157 | int highmem = 0; | 1158 | int highmem = 0; |
1158 | phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1; | 1159 | u64 vmalloc_limit; |
1159 | struct memblock_region *reg; | 1160 | struct memblock_region *reg; |
1160 | bool should_use_highmem = false; | 1161 | bool should_use_highmem = false; |
1161 | 1162 | ||
1163 | /* | ||
1164 | * Let's use our own (unoptimized) equivalent of __pa() that is | ||
1165 | * not affected by wrap-arounds when sizeof(phys_addr_t) == 4. | ||
1166 | * The result is used as the upper bound on physical memory address | ||
1167 | * and may itself be outside the valid range for which phys_addr_t | ||
1168 | * and therefore __pa() is defined. | ||
1169 | */ | ||
1170 | vmalloc_limit = (u64)(uintptr_t)vmalloc_min - PAGE_OFFSET + PHYS_OFFSET; | ||
1171 | |||
1162 | for_each_memblock(memory, reg) { | 1172 | for_each_memblock(memory, reg) { |
1163 | phys_addr_t block_start = reg->base; | 1173 | phys_addr_t block_start = reg->base; |
1164 | phys_addr_t block_end = reg->base + reg->size; | 1174 | phys_addr_t block_end = reg->base + reg->size; |
@@ -1183,10 +1193,11 @@ void __init sanity_check_meminfo(void) | |||
1183 | if (reg->size > size_limit) { | 1193 | if (reg->size > size_limit) { |
1184 | phys_addr_t overlap_size = reg->size - size_limit; | 1194 | phys_addr_t overlap_size = reg->size - size_limit; |
1185 | 1195 | ||
1186 | pr_notice("Truncating RAM at %pa-%pa to -%pa", | 1196 | pr_notice("Truncating RAM at %pa-%pa", |
1187 | &block_start, &block_end, &vmalloc_limit); | 1197 | &block_start, &block_end); |
1188 | memblock_remove(vmalloc_limit, overlap_size); | ||
1189 | block_end = vmalloc_limit; | 1198 | block_end = vmalloc_limit; |
1199 | pr_cont(" to -%pa", &block_end); | ||
1200 | memblock_remove(vmalloc_limit, overlap_size); | ||
1190 | should_use_highmem = true; | 1201 | should_use_highmem = true; |
1191 | } | 1202 | } |
1192 | } | 1203 | } |
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S index 9a3aec97ac09..ccf79d849e0a 100644 --- a/arch/arm64/kernel/sleep.S +++ b/arch/arm64/kernel/sleep.S | |||
@@ -101,12 +101,20 @@ ENTRY(cpu_resume) | |||
101 | bl el2_setup // if in EL2 drop to EL1 cleanly | 101 | bl el2_setup // if in EL2 drop to EL1 cleanly |
102 | /* enable the MMU early - so we can access sleep_save_stash by va */ | 102 | /* enable the MMU early - so we can access sleep_save_stash by va */ |
103 | adr_l lr, __enable_mmu /* __cpu_setup will return here */ | 103 | adr_l lr, __enable_mmu /* __cpu_setup will return here */ |
104 | ldr x27, =_cpu_resume /* __enable_mmu will branch here */ | 104 | adr_l x27, _resume_switched /* __enable_mmu will branch here */ |
105 | adrp x25, idmap_pg_dir | 105 | adrp x25, idmap_pg_dir |
106 | adrp x26, swapper_pg_dir | 106 | adrp x26, swapper_pg_dir |
107 | b __cpu_setup | 107 | b __cpu_setup |
108 | ENDPROC(cpu_resume) | 108 | ENDPROC(cpu_resume) |
109 | 109 | ||
110 | .pushsection ".idmap.text", "ax" | ||
111 | _resume_switched: | ||
112 | ldr x8, =_cpu_resume | ||
113 | br x8 | ||
114 | ENDPROC(_resume_switched) | ||
115 | .ltorg | ||
116 | .popsection | ||
117 | |||
110 | ENTRY(_cpu_resume) | 118 | ENTRY(_cpu_resume) |
111 | mrs x1, mpidr_el1 | 119 | mrs x1, mpidr_el1 |
112 | adrp x8, mpidr_hash | 120 | adrp x8, mpidr_hash |
diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c index f94b80eb295d..9c3e75df2180 100644 --- a/arch/arm64/mm/dump.c +++ b/arch/arm64/mm/dump.c | |||
@@ -242,7 +242,7 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level, | |||
242 | 242 | ||
243 | static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start) | 243 | static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start) |
244 | { | 244 | { |
245 | pte_t *pte = pte_offset_kernel(pmd, 0); | 245 | pte_t *pte = pte_offset_kernel(pmd, 0UL); |
246 | unsigned long addr; | 246 | unsigned long addr; |
247 | unsigned i; | 247 | unsigned i; |
248 | 248 | ||
@@ -254,7 +254,7 @@ static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start) | |||
254 | 254 | ||
255 | static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start) | 255 | static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start) |
256 | { | 256 | { |
257 | pmd_t *pmd = pmd_offset(pud, 0); | 257 | pmd_t *pmd = pmd_offset(pud, 0UL); |
258 | unsigned long addr; | 258 | unsigned long addr; |
259 | unsigned i; | 259 | unsigned i; |
260 | 260 | ||
@@ -271,7 +271,7 @@ static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start) | |||
271 | 271 | ||
272 | static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start) | 272 | static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start) |
273 | { | 273 | { |
274 | pud_t *pud = pud_offset(pgd, 0); | 274 | pud_t *pud = pud_offset(pgd, 0UL); |
275 | unsigned long addr; | 275 | unsigned long addr; |
276 | unsigned i; | 276 | unsigned i; |
277 | 277 | ||
diff --git a/arch/arm64/mm/numa.c b/arch/arm64/mm/numa.c index c7fe3ec70774..5bb15eab6f00 100644 --- a/arch/arm64/mm/numa.c +++ b/arch/arm64/mm/numa.c | |||
@@ -23,6 +23,8 @@ | |||
23 | #include <linux/module.h> | 23 | #include <linux/module.h> |
24 | #include <linux/of.h> | 24 | #include <linux/of.h> |
25 | 25 | ||
26 | #include <asm/acpi.h> | ||
27 | |||
26 | struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; | 28 | struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; |
27 | EXPORT_SYMBOL(node_data); | 29 | EXPORT_SYMBOL(node_data); |
28 | nodemask_t numa_nodes_parsed __initdata; | 30 | nodemask_t numa_nodes_parsed __initdata; |
diff --git a/arch/parisc/include/uapi/asm/errno.h b/arch/parisc/include/uapi/asm/errno.h index c0ae62520d15..274d5bc6ecce 100644 --- a/arch/parisc/include/uapi/asm/errno.h +++ b/arch/parisc/include/uapi/asm/errno.h | |||
@@ -97,10 +97,10 @@ | |||
97 | #define ENOTCONN 235 /* Transport endpoint is not connected */ | 97 | #define ENOTCONN 235 /* Transport endpoint is not connected */ |
98 | #define ESHUTDOWN 236 /* Cannot send after transport endpoint shutdown */ | 98 | #define ESHUTDOWN 236 /* Cannot send after transport endpoint shutdown */ |
99 | #define ETOOMANYREFS 237 /* Too many references: cannot splice */ | 99 | #define ETOOMANYREFS 237 /* Too many references: cannot splice */ |
100 | #define EREFUSED ECONNREFUSED /* for HP's NFS apparently */ | ||
101 | #define ETIMEDOUT 238 /* Connection timed out */ | 100 | #define ETIMEDOUT 238 /* Connection timed out */ |
102 | #define ECONNREFUSED 239 /* Connection refused */ | 101 | #define ECONNREFUSED 239 /* Connection refused */ |
103 | #define EREMOTERELEASE 240 /* Remote peer released connection */ | 102 | #define EREFUSED ECONNREFUSED /* for HP's NFS apparently */ |
103 | #define EREMOTERELEASE 240 /* Remote peer released connection */ | ||
104 | #define EHOSTDOWN 241 /* Host is down */ | 104 | #define EHOSTDOWN 241 /* Host is down */ |
105 | #define EHOSTUNREACH 242 /* No route to host */ | 105 | #define EHOSTUNREACH 242 /* No route to host */ |
106 | 106 | ||
diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c index 5adc339eb7c8..0c2a94a0f751 100644 --- a/arch/parisc/kernel/processor.c +++ b/arch/parisc/kernel/processor.c | |||
@@ -51,8 +51,6 @@ EXPORT_SYMBOL(_parisc_requires_coherency); | |||
51 | 51 | ||
52 | DEFINE_PER_CPU(struct cpuinfo_parisc, cpu_data); | 52 | DEFINE_PER_CPU(struct cpuinfo_parisc, cpu_data); |
53 | 53 | ||
54 | extern int update_cr16_clocksource(void); /* from time.c */ | ||
55 | |||
56 | /* | 54 | /* |
57 | ** PARISC CPU driver - claim "device" and initialize CPU data structures. | 55 | ** PARISC CPU driver - claim "device" and initialize CPU data structures. |
58 | ** | 56 | ** |
@@ -228,12 +226,6 @@ static int processor_probe(struct parisc_device *dev) | |||
228 | } | 226 | } |
229 | #endif | 227 | #endif |
230 | 228 | ||
231 | /* If we've registered more than one cpu, | ||
232 | * we'll use the jiffies clocksource since cr16 | ||
233 | * is not synchronized between CPUs. | ||
234 | */ | ||
235 | update_cr16_clocksource(); | ||
236 | |||
237 | return 0; | 229 | return 0; |
238 | } | 230 | } |
239 | 231 | ||
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c index 505cf1ac5af2..4b0b963d52a7 100644 --- a/arch/parisc/kernel/time.c +++ b/arch/parisc/kernel/time.c | |||
@@ -221,18 +221,6 @@ static struct clocksource clocksource_cr16 = { | |||
221 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | 221 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
222 | }; | 222 | }; |
223 | 223 | ||
224 | int update_cr16_clocksource(void) | ||
225 | { | ||
226 | /* since the cr16 cycle counters are not synchronized across CPUs, | ||
227 | we'll check if we should switch to a safe clocksource: */ | ||
228 | if (clocksource_cr16.rating != 0 && num_online_cpus() > 1) { | ||
229 | clocksource_change_rating(&clocksource_cr16, 0); | ||
230 | return 1; | ||
231 | } | ||
232 | |||
233 | return 0; | ||
234 | } | ||
235 | |||
236 | void __init start_cpu_itimer(void) | 224 | void __init start_cpu_itimer(void) |
237 | { | 225 | { |
238 | unsigned int cpu = smp_processor_id(); | 226 | unsigned int cpu = smp_processor_id(); |
diff --git a/arch/s390/boot/compressed/head.S b/arch/s390/boot/compressed/head.S index f86a4eef28a9..28c4f96a2d9c 100644 --- a/arch/s390/boot/compressed/head.S +++ b/arch/s390/boot/compressed/head.S | |||
@@ -21,16 +21,21 @@ ENTRY(startup_continue) | |||
21 | lg %r15,.Lstack-.LPG1(%r13) | 21 | lg %r15,.Lstack-.LPG1(%r13) |
22 | aghi %r15,-160 | 22 | aghi %r15,-160 |
23 | brasl %r14,decompress_kernel | 23 | brasl %r14,decompress_kernel |
24 | # setup registers for memory mover & branch to target | 24 | # Set up registers for memory mover. We move the decompressed image to |
25 | # 0x11000, starting at offset 0x11000 in the decompressed image so | ||
26 | # that code living at 0x11000 in the image will end up at 0x11000 in | ||
27 | # memory. | ||
25 | lgr %r4,%r2 | 28 | lgr %r4,%r2 |
26 | lg %r2,.Loffset-.LPG1(%r13) | 29 | lg %r2,.Loffset-.LPG1(%r13) |
27 | la %r4,0(%r2,%r4) | 30 | la %r4,0(%r2,%r4) |
28 | lg %r3,.Lmvsize-.LPG1(%r13) | 31 | lg %r3,.Lmvsize-.LPG1(%r13) |
29 | lgr %r5,%r3 | 32 | lgr %r5,%r3 |
30 | # move the memory mover someplace safe | 33 | # Move the memory mover someplace safe so it doesn't overwrite itself. |
31 | la %r1,0x200 | 34 | la %r1,0x200 |
32 | mvc 0(mover_end-mover,%r1),mover-.LPG1(%r13) | 35 | mvc 0(mover_end-mover,%r1),mover-.LPG1(%r13) |
33 | # decompress image is started at 0x11000 | 36 | # When the memory mover is done we pass control to |
37 | # arch/s390/kernel/head64.S:startup_continue which lives at 0x11000 in | ||
38 | # the decompressed image. | ||
34 | lgr %r6,%r2 | 39 | lgr %r6,%r2 |
35 | br %r1 | 40 | br %r1 |
36 | mover: | 41 | mover: |
diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig index 889ea3450210..26e0c7f08814 100644 --- a/arch/s390/configs/default_defconfig +++ b/arch/s390/configs/default_defconfig | |||
@@ -678,7 +678,7 @@ CONFIG_CRYPTO_SHA512_S390=m | |||
678 | CONFIG_CRYPTO_DES_S390=m | 678 | CONFIG_CRYPTO_DES_S390=m |
679 | CONFIG_CRYPTO_AES_S390=m | 679 | CONFIG_CRYPTO_AES_S390=m |
680 | CONFIG_CRYPTO_GHASH_S390=m | 680 | CONFIG_CRYPTO_GHASH_S390=m |
681 | CONFIG_CRYPTO_CRC32_S390=m | 681 | CONFIG_CRYPTO_CRC32_S390=y |
682 | CONFIG_ASYMMETRIC_KEY_TYPE=y | 682 | CONFIG_ASYMMETRIC_KEY_TYPE=y |
683 | CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m | 683 | CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m |
684 | CONFIG_X509_CERTIFICATE_PARSER=m | 684 | CONFIG_X509_CERTIFICATE_PARSER=m |
diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig index 1bcfd764910a..24879dab47bc 100644 --- a/arch/s390/configs/gcov_defconfig +++ b/arch/s390/configs/gcov_defconfig | |||
@@ -616,7 +616,7 @@ CONFIG_CRYPTO_SHA512_S390=m | |||
616 | CONFIG_CRYPTO_DES_S390=m | 616 | CONFIG_CRYPTO_DES_S390=m |
617 | CONFIG_CRYPTO_AES_S390=m | 617 | CONFIG_CRYPTO_AES_S390=m |
618 | CONFIG_CRYPTO_GHASH_S390=m | 618 | CONFIG_CRYPTO_GHASH_S390=m |
619 | CONFIG_CRYPTO_CRC32_S390=m | 619 | CONFIG_CRYPTO_CRC32_S390=y |
620 | CONFIG_ASYMMETRIC_KEY_TYPE=y | 620 | CONFIG_ASYMMETRIC_KEY_TYPE=y |
621 | CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m | 621 | CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m |
622 | CONFIG_X509_CERTIFICATE_PARSER=m | 622 | CONFIG_X509_CERTIFICATE_PARSER=m |
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig index 13ff090139c8..a5c1e5f2a0ca 100644 --- a/arch/s390/configs/performance_defconfig +++ b/arch/s390/configs/performance_defconfig | |||
@@ -615,7 +615,7 @@ CONFIG_CRYPTO_SHA512_S390=m | |||
615 | CONFIG_CRYPTO_DES_S390=m | 615 | CONFIG_CRYPTO_DES_S390=m |
616 | CONFIG_CRYPTO_AES_S390=m | 616 | CONFIG_CRYPTO_AES_S390=m |
617 | CONFIG_CRYPTO_GHASH_S390=m | 617 | CONFIG_CRYPTO_GHASH_S390=m |
618 | CONFIG_CRYPTO_CRC32_S390=m | 618 | CONFIG_CRYPTO_CRC32_S390=y |
619 | CONFIG_ASYMMETRIC_KEY_TYPE=y | 619 | CONFIG_ASYMMETRIC_KEY_TYPE=y |
620 | CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m | 620 | CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m |
621 | CONFIG_X509_CERTIFICATE_PARSER=m | 621 | CONFIG_X509_CERTIFICATE_PARSER=m |
diff --git a/arch/s390/crypto/crc32-vx.c b/arch/s390/crypto/crc32-vx.c index 577ae1d4ae89..2bad9d837029 100644 --- a/arch/s390/crypto/crc32-vx.c +++ b/arch/s390/crypto/crc32-vx.c | |||
@@ -51,6 +51,9 @@ u32 crc32c_le_vgfm_16(u32 crc, unsigned char const *buf, size_t size); | |||
51 | struct kernel_fpu vxstate; \ | 51 | struct kernel_fpu vxstate; \ |
52 | unsigned long prealign, aligned, remaining; \ | 52 | unsigned long prealign, aligned, remaining; \ |
53 | \ | 53 | \ |
54 | if (datalen < VX_MIN_LEN + VX_ALIGN_MASK) \ | ||
55 | return ___crc32_sw(crc, data, datalen); \ | ||
56 | \ | ||
54 | if ((unsigned long)data & VX_ALIGN_MASK) { \ | 57 | if ((unsigned long)data & VX_ALIGN_MASK) { \ |
55 | prealign = VX_ALIGNMENT - \ | 58 | prealign = VX_ALIGNMENT - \ |
56 | ((unsigned long)data & VX_ALIGN_MASK); \ | 59 | ((unsigned long)data & VX_ALIGN_MASK); \ |
@@ -59,9 +62,6 @@ u32 crc32c_le_vgfm_16(u32 crc, unsigned char const *buf, size_t size); | |||
59 | data = (void *)((unsigned long)data + prealign); \ | 62 | data = (void *)((unsigned long)data + prealign); \ |
60 | } \ | 63 | } \ |
61 | \ | 64 | \ |
62 | if (datalen < VX_MIN_LEN) \ | ||
63 | return ___crc32_sw(crc, data, datalen); \ | ||
64 | \ | ||
65 | aligned = datalen & ~VX_ALIGN_MASK; \ | 65 | aligned = datalen & ~VX_ALIGN_MASK; \ |
66 | remaining = datalen & VX_ALIGN_MASK; \ | 66 | remaining = datalen & VX_ALIGN_MASK; \ |
67 | \ | 67 | \ |
diff --git a/arch/s390/defconfig b/arch/s390/defconfig index ccccebeeaaf6..73610f2e3b4f 100644 --- a/arch/s390/defconfig +++ b/arch/s390/defconfig | |||
@@ -234,7 +234,7 @@ CONFIG_CRYPTO_SHA256_S390=m | |||
234 | CONFIG_CRYPTO_SHA512_S390=m | 234 | CONFIG_CRYPTO_SHA512_S390=m |
235 | CONFIG_CRYPTO_DES_S390=m | 235 | CONFIG_CRYPTO_DES_S390=m |
236 | CONFIG_CRYPTO_AES_S390=m | 236 | CONFIG_CRYPTO_AES_S390=m |
237 | CONFIG_CRYPTO_CRC32_S390=m | 237 | CONFIG_CRYPTO_CRC32_S390=y |
238 | CONFIG_CRC7=m | 238 | CONFIG_CRC7=m |
239 | # CONFIG_XZ_DEC_X86 is not set | 239 | # CONFIG_XZ_DEC_X86 is not set |
240 | # CONFIG_XZ_DEC_POWERPC is not set | 240 | # CONFIG_XZ_DEC_POWERPC is not set |
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S index 56e4d8234ef2..4431905f8cfa 100644 --- a/arch/s390/kernel/head.S +++ b/arch/s390/kernel/head.S | |||
@@ -309,7 +309,9 @@ ENTRY(startup_kdump) | |||
309 | l %r15,.Lstack-.LPG0(%r13) | 309 | l %r15,.Lstack-.LPG0(%r13) |
310 | ahi %r15,-STACK_FRAME_OVERHEAD | 310 | ahi %r15,-STACK_FRAME_OVERHEAD |
311 | brasl %r14,verify_facilities | 311 | brasl %r14,verify_facilities |
312 | /* Continue with startup code in head64.S */ | 312 | # For uncompressed images, continue in |
313 | # arch/s390/kernel/head64.S. For compressed images, continue in | ||
314 | # arch/s390/boot/compressed/head.S. | ||
313 | jg startup_continue | 315 | jg startup_continue |
314 | 316 | ||
315 | .Lstack: | 317 | .Lstack: |
diff --git a/arch/s390/lib/string.c b/arch/s390/lib/string.c index e390bbb16443..48352bffbc92 100644 --- a/arch/s390/lib/string.c +++ b/arch/s390/lib/string.c | |||
@@ -237,11 +237,10 @@ char * strrchr(const char * s, int c) | |||
237 | EXPORT_SYMBOL(strrchr); | 237 | EXPORT_SYMBOL(strrchr); |
238 | 238 | ||
239 | static inline int clcle(const char *s1, unsigned long l1, | 239 | static inline int clcle(const char *s1, unsigned long l1, |
240 | const char *s2, unsigned long l2, | 240 | const char *s2, unsigned long l2) |
241 | int *diff) | ||
242 | { | 241 | { |
243 | register unsigned long r2 asm("2") = (unsigned long) s1; | 242 | register unsigned long r2 asm("2") = (unsigned long) s1; |
244 | register unsigned long r3 asm("3") = (unsigned long) l2; | 243 | register unsigned long r3 asm("3") = (unsigned long) l1; |
245 | register unsigned long r4 asm("4") = (unsigned long) s2; | 244 | register unsigned long r4 asm("4") = (unsigned long) s2; |
246 | register unsigned long r5 asm("5") = (unsigned long) l2; | 245 | register unsigned long r5 asm("5") = (unsigned long) l2; |
247 | int cc; | 246 | int cc; |
@@ -252,7 +251,6 @@ static inline int clcle(const char *s1, unsigned long l1, | |||
252 | " srl %0,28" | 251 | " srl %0,28" |
253 | : "=&d" (cc), "+a" (r2), "+a" (r3), | 252 | : "=&d" (cc), "+a" (r2), "+a" (r3), |
254 | "+a" (r4), "+a" (r5) : : "cc"); | 253 | "+a" (r4), "+a" (r5) : : "cc"); |
255 | *diff = *(char *)r2 - *(char *)r4; | ||
256 | return cc; | 254 | return cc; |
257 | } | 255 | } |
258 | 256 | ||
@@ -270,9 +268,9 @@ char * strstr(const char * s1,const char * s2) | |||
270 | return (char *) s1; | 268 | return (char *) s1; |
271 | l1 = __strend(s1) - s1; | 269 | l1 = __strend(s1) - s1; |
272 | while (l1-- >= l2) { | 270 | while (l1-- >= l2) { |
273 | int cc, dummy; | 271 | int cc; |
274 | 272 | ||
275 | cc = clcle(s1, l1, s2, l2, &dummy); | 273 | cc = clcle(s1, l2, s2, l2); |
276 | if (!cc) | 274 | if (!cc) |
277 | return (char *) s1; | 275 | return (char *) s1; |
278 | s1++; | 276 | s1++; |
@@ -313,11 +311,11 @@ EXPORT_SYMBOL(memchr); | |||
313 | */ | 311 | */ |
314 | int memcmp(const void *cs, const void *ct, size_t n) | 312 | int memcmp(const void *cs, const void *ct, size_t n) |
315 | { | 313 | { |
316 | int ret, diff; | 314 | int ret; |
317 | 315 | ||
318 | ret = clcle(cs, n, ct, n, &diff); | 316 | ret = clcle(cs, n, ct, n); |
319 | if (ret) | 317 | if (ret) |
320 | ret = diff; | 318 | ret = ret == 1 ? -1 : 1; |
321 | return ret; | 319 | return ret; |
322 | } | 320 | } |
323 | EXPORT_SYMBOL(memcmp); | 321 | EXPORT_SYMBOL(memcmp); |
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c index 7104ffb5a67f..af7cf28cf97e 100644 --- a/arch/s390/mm/pageattr.c +++ b/arch/s390/mm/pageattr.c | |||
@@ -252,6 +252,8 @@ static int change_page_attr(unsigned long addr, unsigned long end, | |||
252 | int rc = -EINVAL; | 252 | int rc = -EINVAL; |
253 | pgd_t *pgdp; | 253 | pgd_t *pgdp; |
254 | 254 | ||
255 | if (addr == end) | ||
256 | return 0; | ||
255 | if (end >= MODULES_END) | 257 | if (end >= MODULES_END) |
256 | return -EINVAL; | 258 | return -EINVAL; |
257 | mutex_lock(&cpa_mutex); | 259 | mutex_lock(&cpa_mutex); |
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index 27a0228c9cae..b816971f5da4 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c | |||
@@ -355,6 +355,7 @@ void load_ucode_amd_ap(void) | |||
355 | unsigned int cpu = smp_processor_id(); | 355 | unsigned int cpu = smp_processor_id(); |
356 | struct equiv_cpu_entry *eq; | 356 | struct equiv_cpu_entry *eq; |
357 | struct microcode_amd *mc; | 357 | struct microcode_amd *mc; |
358 | u8 *cont = container; | ||
358 | u32 rev, eax; | 359 | u32 rev, eax; |
359 | u16 eq_id; | 360 | u16 eq_id; |
360 | 361 | ||
@@ -371,8 +372,11 @@ void load_ucode_amd_ap(void) | |||
371 | if (check_current_patch_level(&rev, false)) | 372 | if (check_current_patch_level(&rev, false)) |
372 | return; | 373 | return; |
373 | 374 | ||
375 | /* Add CONFIG_RANDOMIZE_MEMORY offset. */ | ||
376 | cont += PAGE_OFFSET - __PAGE_OFFSET_BASE; | ||
377 | |||
374 | eax = cpuid_eax(0x00000001); | 378 | eax = cpuid_eax(0x00000001); |
375 | eq = (struct equiv_cpu_entry *)(container + CONTAINER_HDR_SZ); | 379 | eq = (struct equiv_cpu_entry *)(cont + CONTAINER_HDR_SZ); |
376 | 380 | ||
377 | eq_id = find_equiv_id(eq, eax); | 381 | eq_id = find_equiv_id(eq, eax); |
378 | if (!eq_id) | 382 | if (!eq_id) |
@@ -434,6 +438,9 @@ int __init save_microcode_in_initrd_amd(void) | |||
434 | else | 438 | else |
435 | container = cont_va; | 439 | container = cont_va; |
436 | 440 | ||
441 | /* Add CONFIG_RANDOMIZE_MEMORY offset. */ | ||
442 | container += PAGE_OFFSET - __PAGE_OFFSET_BASE; | ||
443 | |||
437 | eax = cpuid_eax(0x00000001); | 444 | eax = cpuid_eax(0x00000001); |
438 | eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff); | 445 | eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff); |
439 | 446 | ||
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 2a6e84a30a54..4296beb8fdd3 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -100,10 +100,11 @@ EXPORT_PER_CPU_SYMBOL(cpu_info); | |||
100 | /* Logical package management. We might want to allocate that dynamically */ | 100 | /* Logical package management. We might want to allocate that dynamically */ |
101 | static int *physical_to_logical_pkg __read_mostly; | 101 | static int *physical_to_logical_pkg __read_mostly; |
102 | static unsigned long *physical_package_map __read_mostly;; | 102 | static unsigned long *physical_package_map __read_mostly;; |
103 | static unsigned long *logical_package_map __read_mostly; | ||
104 | static unsigned int max_physical_pkg_id __read_mostly; | 103 | static unsigned int max_physical_pkg_id __read_mostly; |
105 | unsigned int __max_logical_packages __read_mostly; | 104 | unsigned int __max_logical_packages __read_mostly; |
106 | EXPORT_SYMBOL(__max_logical_packages); | 105 | EXPORT_SYMBOL(__max_logical_packages); |
106 | static unsigned int logical_packages __read_mostly; | ||
107 | static bool logical_packages_frozen __read_mostly; | ||
107 | 108 | ||
108 | /* Maximum number of SMT threads on any online core */ | 109 | /* Maximum number of SMT threads on any online core */ |
109 | int __max_smt_threads __read_mostly; | 110 | int __max_smt_threads __read_mostly; |
@@ -277,14 +278,14 @@ int topology_update_package_map(unsigned int apicid, unsigned int cpu) | |||
277 | if (test_and_set_bit(pkg, physical_package_map)) | 278 | if (test_and_set_bit(pkg, physical_package_map)) |
278 | goto found; | 279 | goto found; |
279 | 280 | ||
280 | new = find_first_zero_bit(logical_package_map, __max_logical_packages); | 281 | if (logical_packages_frozen) { |
281 | if (new >= __max_logical_packages) { | ||
282 | physical_to_logical_pkg[pkg] = -1; | 282 | physical_to_logical_pkg[pkg] = -1; |
283 | pr_warn("APIC(%x) Package %u exceeds logical package map\n", | 283 | pr_warn("APIC(%x) Package %u exceeds logical package max\n", |
284 | apicid, pkg); | 284 | apicid, pkg); |
285 | return -ENOSPC; | 285 | return -ENOSPC; |
286 | } | 286 | } |
287 | set_bit(new, logical_package_map); | 287 | |
288 | new = logical_packages++; | ||
288 | pr_info("APIC(%x) Converting physical %u to logical package %u\n", | 289 | pr_info("APIC(%x) Converting physical %u to logical package %u\n", |
289 | apicid, pkg, new); | 290 | apicid, pkg, new); |
290 | physical_to_logical_pkg[pkg] = new; | 291 | physical_to_logical_pkg[pkg] = new; |
@@ -341,6 +342,7 @@ static void __init smp_init_package_map(void) | |||
341 | } | 342 | } |
342 | 343 | ||
343 | __max_logical_packages = DIV_ROUND_UP(total_cpus, ncpus); | 344 | __max_logical_packages = DIV_ROUND_UP(total_cpus, ncpus); |
345 | logical_packages = 0; | ||
344 | 346 | ||
345 | /* | 347 | /* |
346 | * Possibly larger than what we need as the number of apic ids per | 348 | * Possibly larger than what we need as the number of apic ids per |
@@ -352,10 +354,6 @@ static void __init smp_init_package_map(void) | |||
352 | memset(physical_to_logical_pkg, 0xff, size); | 354 | memset(physical_to_logical_pkg, 0xff, size); |
353 | size = BITS_TO_LONGS(max_physical_pkg_id) * sizeof(unsigned long); | 355 | size = BITS_TO_LONGS(max_physical_pkg_id) * sizeof(unsigned long); |
354 | physical_package_map = kzalloc(size, GFP_KERNEL); | 356 | physical_package_map = kzalloc(size, GFP_KERNEL); |
355 | size = BITS_TO_LONGS(__max_logical_packages) * sizeof(unsigned long); | ||
356 | logical_package_map = kzalloc(size, GFP_KERNEL); | ||
357 | |||
358 | pr_info("Max logical packages: %u\n", __max_logical_packages); | ||
359 | 357 | ||
360 | for_each_present_cpu(cpu) { | 358 | for_each_present_cpu(cpu) { |
361 | unsigned int apicid = apic->cpu_present_to_apicid(cpu); | 359 | unsigned int apicid = apic->cpu_present_to_apicid(cpu); |
@@ -369,6 +367,15 @@ static void __init smp_init_package_map(void) | |||
369 | set_cpu_possible(cpu, false); | 367 | set_cpu_possible(cpu, false); |
370 | set_cpu_present(cpu, false); | 368 | set_cpu_present(cpu, false); |
371 | } | 369 | } |
370 | |||
371 | if (logical_packages > __max_logical_packages) { | ||
372 | pr_warn("Detected more packages (%u), then computed by BIOS data (%u).\n", | ||
373 | logical_packages, __max_logical_packages); | ||
374 | logical_packages_frozen = true; | ||
375 | __max_logical_packages = logical_packages; | ||
376 | } | ||
377 | |||
378 | pr_info("Max logical packages: %u\n", __max_logical_packages); | ||
372 | } | 379 | } |
373 | 380 | ||
374 | void __init smp_store_boot_cpu_info(void) | 381 | void __init smp_store_boot_cpu_info(void) |
diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c index a3e3ccc87138..9634557a5444 100644 --- a/arch/x86/power/hibernate_64.c +++ b/arch/x86/power/hibernate_64.c | |||
@@ -113,7 +113,7 @@ static int set_up_temporary_mappings(void) | |||
113 | return result; | 113 | return result; |
114 | } | 114 | } |
115 | 115 | ||
116 | temp_level4_pgt = (unsigned long)pgd - __PAGE_OFFSET; | 116 | temp_level4_pgt = __pa(pgd); |
117 | return 0; | 117 | return 0; |
118 | } | 118 | } |
119 | 119 | ||
diff --git a/crypto/Kconfig b/crypto/Kconfig index a9377bef25e3..84d71482bf08 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig | |||
@@ -439,7 +439,7 @@ config CRYPTO_CRC32C_INTEL | |||
439 | 439 | ||
440 | config CRYPT_CRC32C_VPMSUM | 440 | config CRYPT_CRC32C_VPMSUM |
441 | tristate "CRC32c CRC algorithm (powerpc64)" | 441 | tristate "CRC32c CRC algorithm (powerpc64)" |
442 | depends on PPC64 | 442 | depends on PPC64 && ALTIVEC |
443 | select CRYPTO_HASH | 443 | select CRYPTO_HASH |
444 | select CRC32 | 444 | select CRC32 |
445 | help | 445 | help |
diff --git a/crypto/sha3_generic.c b/crypto/sha3_generic.c index 62264397a2d2..7e8ed96236ce 100644 --- a/crypto/sha3_generic.c +++ b/crypto/sha3_generic.c | |||
@@ -24,14 +24,14 @@ | |||
24 | #define ROTL64(x, y) (((x) << (y)) | ((x) >> (64 - (y)))) | 24 | #define ROTL64(x, y) (((x) << (y)) | ((x) >> (64 - (y)))) |
25 | 25 | ||
26 | static const u64 keccakf_rndc[24] = { | 26 | static const u64 keccakf_rndc[24] = { |
27 | 0x0000000000000001, 0x0000000000008082, 0x800000000000808a, | 27 | 0x0000000000000001ULL, 0x0000000000008082ULL, 0x800000000000808aULL, |
28 | 0x8000000080008000, 0x000000000000808b, 0x0000000080000001, | 28 | 0x8000000080008000ULL, 0x000000000000808bULL, 0x0000000080000001ULL, |
29 | 0x8000000080008081, 0x8000000000008009, 0x000000000000008a, | 29 | 0x8000000080008081ULL, 0x8000000000008009ULL, 0x000000000000008aULL, |
30 | 0x0000000000000088, 0x0000000080008009, 0x000000008000000a, | 30 | 0x0000000000000088ULL, 0x0000000080008009ULL, 0x000000008000000aULL, |
31 | 0x000000008000808b, 0x800000000000008b, 0x8000000000008089, | 31 | 0x000000008000808bULL, 0x800000000000008bULL, 0x8000000000008089ULL, |
32 | 0x8000000000008003, 0x8000000000008002, 0x8000000000000080, | 32 | 0x8000000000008003ULL, 0x8000000000008002ULL, 0x8000000000000080ULL, |
33 | 0x000000000000800a, 0x800000008000000a, 0x8000000080008081, | 33 | 0x000000000000800aULL, 0x800000008000000aULL, 0x8000000080008081ULL, |
34 | 0x8000000000008080, 0x0000000080000001, 0x8000000080008008 | 34 | 0x8000000000008080ULL, 0x0000000080000001ULL, 0x8000000080008008ULL |
35 | }; | 35 | }; |
36 | 36 | ||
37 | static const int keccakf_rotc[24] = { | 37 | static const int keccakf_rotc[24] = { |
diff --git a/drivers/bus/vexpress-config.c b/drivers/bus/vexpress-config.c index c3cb76b363c6..9efdf1de4035 100644 --- a/drivers/bus/vexpress-config.c +++ b/drivers/bus/vexpress-config.c | |||
@@ -178,6 +178,7 @@ static int vexpress_config_populate(struct device_node *node) | |||
178 | 178 | ||
179 | parent = class_find_device(vexpress_config_class, NULL, bridge, | 179 | parent = class_find_device(vexpress_config_class, NULL, bridge, |
180 | vexpress_config_node_match); | 180 | vexpress_config_node_match); |
181 | of_node_put(bridge); | ||
181 | if (WARN_ON(!parent)) | 182 | if (WARN_ON(!parent)) |
182 | return -ENODEV; | 183 | return -ENODEV; |
183 | 184 | ||
diff --git a/drivers/clocksource/bcm_kona_timer.c b/drivers/clocksource/bcm_kona_timer.c index 7e3fd375a627..92f6e4deee74 100644 --- a/drivers/clocksource/bcm_kona_timer.c +++ b/drivers/clocksource/bcm_kona_timer.c | |||
@@ -66,10 +66,10 @@ static void kona_timer_disable_and_clear(void __iomem *base) | |||
66 | 66 | ||
67 | } | 67 | } |
68 | 68 | ||
69 | static void | 69 | static int |
70 | kona_timer_get_counter(void __iomem *timer_base, uint32_t *msw, uint32_t *lsw) | 70 | kona_timer_get_counter(void __iomem *timer_base, uint32_t *msw, uint32_t *lsw) |
71 | { | 71 | { |
72 | int loop_limit = 4; | 72 | int loop_limit = 3; |
73 | 73 | ||
74 | /* | 74 | /* |
75 | * Read 64-bit free running counter | 75 | * Read 64-bit free running counter |
@@ -83,18 +83,19 @@ kona_timer_get_counter(void __iomem *timer_base, uint32_t *msw, uint32_t *lsw) | |||
83 | * if new hi-word is equal to previously read hi-word then stop. | 83 | * if new hi-word is equal to previously read hi-word then stop. |
84 | */ | 84 | */ |
85 | 85 | ||
86 | while (--loop_limit) { | 86 | do { |
87 | *msw = readl(timer_base + KONA_GPTIMER_STCHI_OFFSET); | 87 | *msw = readl(timer_base + KONA_GPTIMER_STCHI_OFFSET); |
88 | *lsw = readl(timer_base + KONA_GPTIMER_STCLO_OFFSET); | 88 | *lsw = readl(timer_base + KONA_GPTIMER_STCLO_OFFSET); |
89 | if (*msw == readl(timer_base + KONA_GPTIMER_STCHI_OFFSET)) | 89 | if (*msw == readl(timer_base + KONA_GPTIMER_STCHI_OFFSET)) |
90 | break; | 90 | break; |
91 | } | 91 | } while (--loop_limit); |
92 | if (!loop_limit) { | 92 | if (!loop_limit) { |
93 | pr_err("bcm_kona_timer: getting counter failed.\n"); | 93 | pr_err("bcm_kona_timer: getting counter failed.\n"); |
94 | pr_err(" Timer will be impacted\n"); | 94 | pr_err(" Timer will be impacted\n"); |
95 | return -ETIMEDOUT; | ||
95 | } | 96 | } |
96 | 97 | ||
97 | return; | 98 | return 0; |
98 | } | 99 | } |
99 | 100 | ||
100 | static int kona_timer_set_next_event(unsigned long clc, | 101 | static int kona_timer_set_next_event(unsigned long clc, |
@@ -112,8 +113,11 @@ static int kona_timer_set_next_event(unsigned long clc, | |||
112 | 113 | ||
113 | uint32_t lsw, msw; | 114 | uint32_t lsw, msw; |
114 | uint32_t reg; | 115 | uint32_t reg; |
116 | int ret; | ||
115 | 117 | ||
116 | kona_timer_get_counter(timers.tmr_regs, &msw, &lsw); | 118 | ret = kona_timer_get_counter(timers.tmr_regs, &msw, &lsw); |
119 | if (ret) | ||
120 | return ret; | ||
117 | 121 | ||
118 | /* Load the "next" event tick value */ | 122 | /* Load the "next" event tick value */ |
119 | writel(lsw + clc, timers.tmr_regs + KONA_GPTIMER_STCM0_OFFSET); | 123 | writel(lsw + clc, timers.tmr_regs + KONA_GPTIMER_STCM0_OFFSET); |
diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c index d91e8725917c..b4b3ab5a11ad 100644 --- a/drivers/clocksource/mips-gic-timer.c +++ b/drivers/clocksource/mips-gic-timer.c | |||
@@ -164,7 +164,7 @@ void __init gic_clocksource_init(unsigned int frequency) | |||
164 | gic_start_count(); | 164 | gic_start_count(); |
165 | } | 165 | } |
166 | 166 | ||
167 | static void __init gic_clocksource_of_init(struct device_node *node) | 167 | static int __init gic_clocksource_of_init(struct device_node *node) |
168 | { | 168 | { |
169 | struct clk *clk; | 169 | struct clk *clk; |
170 | int ret; | 170 | int ret; |
diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c index 719b478d136e..3c39e6f45971 100644 --- a/drivers/clocksource/time-armada-370-xp.c +++ b/drivers/clocksource/time-armada-370-xp.c | |||
@@ -338,7 +338,6 @@ static int __init armada_xp_timer_init(struct device_node *np) | |||
338 | struct clk *clk = of_clk_get_by_name(np, "fixed"); | 338 | struct clk *clk = of_clk_get_by_name(np, "fixed"); |
339 | int ret; | 339 | int ret; |
340 | 340 | ||
341 | clk = of_clk_get(np, 0); | ||
342 | if (IS_ERR(clk)) { | 341 | if (IS_ERR(clk)) { |
343 | pr_err("Failed to get clock"); | 342 | pr_err("Failed to get clock"); |
344 | return PTR_ERR(clk); | 343 | return PTR_ERR(clk); |
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index ea8189f4b021..6dc597126b79 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c | |||
@@ -441,6 +441,9 @@ static int aead_set_sh_desc(struct crypto_aead *aead) | |||
441 | OP_ALG_AAI_CTR_MOD128); | 441 | OP_ALG_AAI_CTR_MOD128); |
442 | const bool is_rfc3686 = alg->caam.rfc3686; | 442 | const bool is_rfc3686 = alg->caam.rfc3686; |
443 | 443 | ||
444 | if (!ctx->authsize) | ||
445 | return 0; | ||
446 | |||
444 | /* NULL encryption / decryption */ | 447 | /* NULL encryption / decryption */ |
445 | if (!ctx->enckeylen) | 448 | if (!ctx->enckeylen) |
446 | return aead_null_set_sh_desc(aead); | 449 | return aead_null_set_sh_desc(aead); |
@@ -614,7 +617,7 @@ skip_enc: | |||
614 | keys_fit_inline = true; | 617 | keys_fit_inline = true; |
615 | 618 | ||
616 | /* aead_givencrypt shared descriptor */ | 619 | /* aead_givencrypt shared descriptor */ |
617 | desc = ctx->sh_desc_givenc; | 620 | desc = ctx->sh_desc_enc; |
618 | 621 | ||
619 | /* Note: Context registers are saved. */ | 622 | /* Note: Context registers are saved. */ |
620 | init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686); | 623 | init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686); |
@@ -645,13 +648,13 @@ copy_iv: | |||
645 | append_operation(desc, ctx->class2_alg_type | | 648 | append_operation(desc, ctx->class2_alg_type | |
646 | OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); | 649 | OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); |
647 | 650 | ||
648 | /* ivsize + cryptlen = seqoutlen - authsize */ | ||
649 | append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); | ||
650 | |||
651 | /* Read and write assoclen bytes */ | 651 | /* Read and write assoclen bytes */ |
652 | append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); | 652 | append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); |
653 | append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); | 653 | append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); |
654 | 654 | ||
655 | /* ivsize + cryptlen = seqoutlen - authsize */ | ||
656 | append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); | ||
657 | |||
655 | /* Skip assoc data */ | 658 | /* Skip assoc data */ |
656 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); | 659 | append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); |
657 | 660 | ||
@@ -697,7 +700,7 @@ copy_iv: | |||
697 | ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, | 700 | ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, |
698 | desc_bytes(desc), | 701 | desc_bytes(desc), |
699 | DMA_TO_DEVICE); | 702 | DMA_TO_DEVICE); |
700 | if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) { | 703 | if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) { |
701 | dev_err(jrdev, "unable to map shared descriptor\n"); | 704 | dev_err(jrdev, "unable to map shared descriptor\n"); |
702 | return -ENOMEM; | 705 | return -ENOMEM; |
703 | } | 706 | } |
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index f1ecc8df8d41..36365b3efdfd 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c | |||
@@ -1898,6 +1898,7 @@ caam_hash_alloc(struct caam_hash_template *template, | |||
1898 | template->name); | 1898 | template->name); |
1899 | snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", | 1899 | snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", |
1900 | template->driver_name); | 1900 | template->driver_name); |
1901 | t_alg->ahash_alg.setkey = NULL; | ||
1901 | } | 1902 | } |
1902 | alg->cra_module = THIS_MODULE; | 1903 | alg->cra_module = THIS_MODULE; |
1903 | alg->cra_init = caam_hash_cra_init; | 1904 | alg->cra_init = caam_hash_cra_init; |
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig index d0c1dab9b435..dff1a4a6dc1b 100644 --- a/drivers/edac/Kconfig +++ b/drivers/edac/Kconfig | |||
@@ -251,6 +251,14 @@ config EDAC_SBRIDGE | |||
251 | Support for error detection and correction the Intel | 251 | Support for error detection and correction the Intel |
252 | Sandy Bridge, Ivy Bridge and Haswell Integrated Memory Controllers. | 252 | Sandy Bridge, Ivy Bridge and Haswell Integrated Memory Controllers. |
253 | 253 | ||
254 | config EDAC_SKX | ||
255 | tristate "Intel Skylake server Integrated MC" | ||
256 | depends on EDAC_MM_EDAC && PCI && X86_64 && X86_MCE_INTEL | ||
257 | depends on PCI_MMCONFIG | ||
258 | help | ||
259 | Support for error detection and correction the Intel | ||
260 | Skylake server Integrated Memory Controllers. | ||
261 | |||
254 | config EDAC_MPC85XX | 262 | config EDAC_MPC85XX |
255 | tristate "Freescale MPC83xx / MPC85xx" | 263 | tristate "Freescale MPC83xx / MPC85xx" |
256 | depends on EDAC_MM_EDAC && FSL_SOC | 264 | depends on EDAC_MM_EDAC && FSL_SOC |
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile index f9e4a3e0e6e9..986049925b08 100644 --- a/drivers/edac/Makefile +++ b/drivers/edac/Makefile | |||
@@ -31,6 +31,7 @@ obj-$(CONFIG_EDAC_I5400) += i5400_edac.o | |||
31 | obj-$(CONFIG_EDAC_I7300) += i7300_edac.o | 31 | obj-$(CONFIG_EDAC_I7300) += i7300_edac.o |
32 | obj-$(CONFIG_EDAC_I7CORE) += i7core_edac.o | 32 | obj-$(CONFIG_EDAC_I7CORE) += i7core_edac.o |
33 | obj-$(CONFIG_EDAC_SBRIDGE) += sb_edac.o | 33 | obj-$(CONFIG_EDAC_SBRIDGE) += sb_edac.o |
34 | obj-$(CONFIG_EDAC_SKX) += skx_edac.o | ||
34 | obj-$(CONFIG_EDAC_E7XXX) += e7xxx_edac.o | 35 | obj-$(CONFIG_EDAC_E7XXX) += e7xxx_edac.o |
35 | obj-$(CONFIG_EDAC_E752X) += e752x_edac.o | 36 | obj-$(CONFIG_EDAC_E752X) += e752x_edac.o |
36 | obj-$(CONFIG_EDAC_I82443BXGX) += i82443bxgx_edac.o | 37 | obj-$(CONFIG_EDAC_I82443BXGX) += i82443bxgx_edac.o |
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c index 4fb2eb7c800d..ce0067b7a2f6 100644 --- a/drivers/edac/sb_edac.c +++ b/drivers/edac/sb_edac.c | |||
@@ -552,9 +552,9 @@ static const struct pci_id_table pci_dev_descr_haswell_table[] = { | |||
552 | /* Knight's Landing Support */ | 552 | /* Knight's Landing Support */ |
553 | /* | 553 | /* |
554 | * KNL's memory channels are swizzled between memory controllers. | 554 | * KNL's memory channels are swizzled between memory controllers. |
555 | * MC0 is mapped to CH3,5,6 and MC1 is mapped to CH0,1,2 | 555 | * MC0 is mapped to CH3,4,5 and MC1 is mapped to CH0,1,2 |
556 | */ | 556 | */ |
557 | #define knl_channel_remap(channel) ((channel + 3) % 6) | 557 | #define knl_channel_remap(mc, chan) ((mc) ? (chan) : (chan) + 3) |
558 | 558 | ||
559 | /* Memory controller, TAD tables, error injection - 2-8-0, 2-9-0 (2 of these) */ | 559 | /* Memory controller, TAD tables, error injection - 2-8-0, 2-9-0 (2 of these) */ |
560 | #define PCI_DEVICE_ID_INTEL_KNL_IMC_MC 0x7840 | 560 | #define PCI_DEVICE_ID_INTEL_KNL_IMC_MC 0x7840 |
@@ -1286,7 +1286,7 @@ static u32 knl_get_mc_route(int entry, u32 reg) | |||
1286 | mc = GET_BITFIELD(reg, entry*3, (entry*3)+2); | 1286 | mc = GET_BITFIELD(reg, entry*3, (entry*3)+2); |
1287 | chan = GET_BITFIELD(reg, (entry*2) + 18, (entry*2) + 18 + 1); | 1287 | chan = GET_BITFIELD(reg, (entry*2) + 18, (entry*2) + 18 + 1); |
1288 | 1288 | ||
1289 | return knl_channel_remap(mc*3 + chan); | 1289 | return knl_channel_remap(mc, chan); |
1290 | } | 1290 | } |
1291 | 1291 | ||
1292 | /* | 1292 | /* |
@@ -2997,8 +2997,15 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci, | |||
2997 | } else { | 2997 | } else { |
2998 | char A = *("A"); | 2998 | char A = *("A"); |
2999 | 2999 | ||
3000 | channel = knl_channel_remap(channel); | 3000 | /* |
3001 | * Reported channel is in range 0-2, so we can't map it | ||
3002 | * back to mc. To figure out mc we check machine check | ||
3003 | * bank register that reported this error. | ||
3004 | * bank15 means mc0 and bank16 means mc1. | ||
3005 | */ | ||
3006 | channel = knl_channel_remap(m->bank == 16, channel); | ||
3001 | channel_mask = 1 << channel; | 3007 | channel_mask = 1 << channel; |
3008 | |||
3002 | snprintf(msg, sizeof(msg), | 3009 | snprintf(msg, sizeof(msg), |
3003 | "%s%s err_code:%04x:%04x channel:%d (DIMM_%c)", | 3010 | "%s%s err_code:%04x:%04x channel:%d (DIMM_%c)", |
3004 | overflow ? " OVERFLOW" : "", | 3011 | overflow ? " OVERFLOW" : "", |
diff --git a/drivers/edac/skx_edac.c b/drivers/edac/skx_edac.c new file mode 100644 index 000000000000..0ff4878c2aa1 --- /dev/null +++ b/drivers/edac/skx_edac.c | |||
@@ -0,0 +1,1121 @@ | |||
1 | /* | ||
2 | * EDAC driver for Intel(R) Xeon(R) Skylake processors | ||
3 | * Copyright (c) 2016, Intel Corporation. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms and conditions of the GNU General Public License, | ||
7 | * version 2, as published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | */ | ||
14 | |||
15 | #include <linux/module.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/pci.h> | ||
18 | #include <linux/pci_ids.h> | ||
19 | #include <linux/slab.h> | ||
20 | #include <linux/delay.h> | ||
21 | #include <linux/edac.h> | ||
22 | #include <linux/mmzone.h> | ||
23 | #include <linux/smp.h> | ||
24 | #include <linux/bitmap.h> | ||
25 | #include <linux/math64.h> | ||
26 | #include <linux/mod_devicetable.h> | ||
27 | #include <asm/cpu_device_id.h> | ||
28 | #include <asm/processor.h> | ||
29 | #include <asm/mce.h> | ||
30 | |||
31 | #include "edac_core.h" | ||
32 | |||
33 | #define SKX_REVISION " Ver: 1.0 " | ||
34 | |||
35 | /* | ||
36 | * Debug macros | ||
37 | */ | ||
38 | #define skx_printk(level, fmt, arg...) \ | ||
39 | edac_printk(level, "skx", fmt, ##arg) | ||
40 | |||
41 | #define skx_mc_printk(mci, level, fmt, arg...) \ | ||
42 | edac_mc_chipset_printk(mci, level, "skx", fmt, ##arg) | ||
43 | |||
44 | /* | ||
45 | * Get a bit field at register value <v>, from bit <lo> to bit <hi> | ||
46 | */ | ||
47 | #define GET_BITFIELD(v, lo, hi) \ | ||
48 | (((v) & GENMASK_ULL((hi), (lo))) >> (lo)) | ||
49 | |||
50 | static LIST_HEAD(skx_edac_list); | ||
51 | |||
52 | static u64 skx_tolm, skx_tohm; | ||
53 | |||
54 | #define NUM_IMC 2 /* memory controllers per socket */ | ||
55 | #define NUM_CHANNELS 3 /* channels per memory controller */ | ||
56 | #define NUM_DIMMS 2 /* Max DIMMS per channel */ | ||
57 | |||
58 | #define MASK26 0x3FFFFFF /* Mask for 2^26 */ | ||
59 | #define MASK29 0x1FFFFFFF /* Mask for 2^29 */ | ||
60 | |||
61 | /* | ||
62 | * Each cpu socket contains some pci devices that provide global | ||
63 | * information, and also some that are local to each of the two | ||
64 | * memory controllers on the die. | ||
65 | */ | ||
66 | struct skx_dev { | ||
67 | struct list_head list; | ||
68 | u8 bus[4]; | ||
69 | struct pci_dev *sad_all; | ||
70 | struct pci_dev *util_all; | ||
71 | u32 mcroute; | ||
72 | struct skx_imc { | ||
73 | struct mem_ctl_info *mci; | ||
74 | u8 mc; /* system wide mc# */ | ||
75 | u8 lmc; /* socket relative mc# */ | ||
76 | u8 src_id, node_id; | ||
77 | struct skx_channel { | ||
78 | struct pci_dev *cdev; | ||
79 | struct skx_dimm { | ||
80 | u8 close_pg; | ||
81 | u8 bank_xor_enable; | ||
82 | u8 fine_grain_bank; | ||
83 | u8 rowbits; | ||
84 | u8 colbits; | ||
85 | } dimms[NUM_DIMMS]; | ||
86 | } chan[NUM_CHANNELS]; | ||
87 | } imc[NUM_IMC]; | ||
88 | }; | ||
89 | static int skx_num_sockets; | ||
90 | |||
91 | struct skx_pvt { | ||
92 | struct skx_imc *imc; | ||
93 | }; | ||
94 | |||
95 | struct decoded_addr { | ||
96 | struct skx_dev *dev; | ||
97 | u64 addr; | ||
98 | int socket; | ||
99 | int imc; | ||
100 | int channel; | ||
101 | u64 chan_addr; | ||
102 | int sktways; | ||
103 | int chanways; | ||
104 | int dimm; | ||
105 | int rank; | ||
106 | int channel_rank; | ||
107 | u64 rank_address; | ||
108 | int row; | ||
109 | int column; | ||
110 | int bank_address; | ||
111 | int bank_group; | ||
112 | }; | ||
113 | |||
114 | static struct skx_dev *get_skx_dev(u8 bus, u8 idx) | ||
115 | { | ||
116 | struct skx_dev *d; | ||
117 | |||
118 | list_for_each_entry(d, &skx_edac_list, list) { | ||
119 | if (d->bus[idx] == bus) | ||
120 | return d; | ||
121 | } | ||
122 | |||
123 | return NULL; | ||
124 | } | ||
125 | |||
126 | enum munittype { | ||
127 | CHAN0, CHAN1, CHAN2, SAD_ALL, UTIL_ALL, SAD | ||
128 | }; | ||
129 | |||
130 | struct munit { | ||
131 | u16 did; | ||
132 | u16 devfn[NUM_IMC]; | ||
133 | u8 busidx; | ||
134 | u8 per_socket; | ||
135 | enum munittype mtype; | ||
136 | }; | ||
137 | |||
138 | /* | ||
139 | * List of PCI device ids that we need together with some device | ||
140 | * number and function numbers to tell which memory controller the | ||
141 | * device belongs to. | ||
142 | */ | ||
143 | static const struct munit skx_all_munits[] = { | ||
144 | { 0x2054, { }, 1, 1, SAD_ALL }, | ||
145 | { 0x2055, { }, 1, 1, UTIL_ALL }, | ||
146 | { 0x2040, { PCI_DEVFN(10, 0), PCI_DEVFN(12, 0) }, 2, 2, CHAN0 }, | ||
147 | { 0x2044, { PCI_DEVFN(10, 4), PCI_DEVFN(12, 4) }, 2, 2, CHAN1 }, | ||
148 | { 0x2048, { PCI_DEVFN(11, 0), PCI_DEVFN(13, 0) }, 2, 2, CHAN2 }, | ||
149 | { 0x208e, { }, 1, 0, SAD }, | ||
150 | { } | ||
151 | }; | ||
152 | |||
153 | /* | ||
154 | * We use the per-socket device 0x2016 to count how many sockets are present, | ||
155 | * and to detemine which PCI buses are associated with each socket. Allocate | ||
156 | * and build the full list of all the skx_dev structures that we need here. | ||
157 | */ | ||
158 | static int get_all_bus_mappings(void) | ||
159 | { | ||
160 | struct pci_dev *pdev, *prev; | ||
161 | struct skx_dev *d; | ||
162 | u32 reg; | ||
163 | int ndev = 0; | ||
164 | |||
165 | prev = NULL; | ||
166 | for (;;) { | ||
167 | pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2016, prev); | ||
168 | if (!pdev) | ||
169 | break; | ||
170 | ndev++; | ||
171 | d = kzalloc(sizeof(*d), GFP_KERNEL); | ||
172 | if (!d) { | ||
173 | pci_dev_put(pdev); | ||
174 | return -ENOMEM; | ||
175 | } | ||
176 | pci_read_config_dword(pdev, 0xCC, ®); | ||
177 | d->bus[0] = GET_BITFIELD(reg, 0, 7); | ||
178 | d->bus[1] = GET_BITFIELD(reg, 8, 15); | ||
179 | d->bus[2] = GET_BITFIELD(reg, 16, 23); | ||
180 | d->bus[3] = GET_BITFIELD(reg, 24, 31); | ||
181 | edac_dbg(2, "busses: %x, %x, %x, %x\n", | ||
182 | d->bus[0], d->bus[1], d->bus[2], d->bus[3]); | ||
183 | list_add_tail(&d->list, &skx_edac_list); | ||
184 | skx_num_sockets++; | ||
185 | prev = pdev; | ||
186 | } | ||
187 | |||
188 | return ndev; | ||
189 | } | ||
190 | |||
191 | static int get_all_munits(const struct munit *m) | ||
192 | { | ||
193 | struct pci_dev *pdev, *prev; | ||
194 | struct skx_dev *d; | ||
195 | u32 reg; | ||
196 | int i = 0, ndev = 0; | ||
197 | |||
198 | prev = NULL; | ||
199 | for (;;) { | ||
200 | pdev = pci_get_device(PCI_VENDOR_ID_INTEL, m->did, prev); | ||
201 | if (!pdev) | ||
202 | break; | ||
203 | ndev++; | ||
204 | if (m->per_socket == NUM_IMC) { | ||
205 | for (i = 0; i < NUM_IMC; i++) | ||
206 | if (m->devfn[i] == pdev->devfn) | ||
207 | break; | ||
208 | if (i == NUM_IMC) | ||
209 | goto fail; | ||
210 | } | ||
211 | d = get_skx_dev(pdev->bus->number, m->busidx); | ||
212 | if (!d) | ||
213 | goto fail; | ||
214 | |||
215 | /* Be sure that the device is enabled */ | ||
216 | if (unlikely(pci_enable_device(pdev) < 0)) { | ||
217 | skx_printk(KERN_ERR, | ||
218 | "Couldn't enable %04x:%04x\n", PCI_VENDOR_ID_INTEL, m->did); | ||
219 | goto fail; | ||
220 | } | ||
221 | |||
222 | switch (m->mtype) { | ||
223 | case CHAN0: case CHAN1: case CHAN2: | ||
224 | pci_dev_get(pdev); | ||
225 | d->imc[i].chan[m->mtype].cdev = pdev; | ||
226 | break; | ||
227 | case SAD_ALL: | ||
228 | pci_dev_get(pdev); | ||
229 | d->sad_all = pdev; | ||
230 | break; | ||
231 | case UTIL_ALL: | ||
232 | pci_dev_get(pdev); | ||
233 | d->util_all = pdev; | ||
234 | break; | ||
235 | case SAD: | ||
236 | /* | ||
237 | * one of these devices per core, including cores | ||
238 | * that don't exist on this SKU. Ignore any that | ||
239 | * read a route table of zero, make sure all the | ||
240 | * non-zero values match. | ||
241 | */ | ||
242 | pci_read_config_dword(pdev, 0xB4, ®); | ||
243 | if (reg != 0) { | ||
244 | if (d->mcroute == 0) | ||
245 | d->mcroute = reg; | ||
246 | else if (d->mcroute != reg) { | ||
247 | skx_printk(KERN_ERR, | ||
248 | "mcroute mismatch\n"); | ||
249 | goto fail; | ||
250 | } | ||
251 | } | ||
252 | ndev--; | ||
253 | break; | ||
254 | } | ||
255 | |||
256 | prev = pdev; | ||
257 | } | ||
258 | |||
259 | return ndev; | ||
260 | fail: | ||
261 | pci_dev_put(pdev); | ||
262 | return -ENODEV; | ||
263 | } | ||
264 | |||
265 | const struct x86_cpu_id skx_cpuids[] = { | ||
266 | { X86_VENDOR_INTEL, 6, 0x55, 0, 0 }, /* Skylake */ | ||
267 | { } | ||
268 | }; | ||
269 | MODULE_DEVICE_TABLE(x86cpu, skx_cpuids); | ||
270 | |||
271 | static u8 get_src_id(struct skx_dev *d) | ||
272 | { | ||
273 | u32 reg; | ||
274 | |||
275 | pci_read_config_dword(d->util_all, 0xF0, ®); | ||
276 | |||
277 | return GET_BITFIELD(reg, 12, 14); | ||
278 | } | ||
279 | |||
280 | static u8 skx_get_node_id(struct skx_dev *d) | ||
281 | { | ||
282 | u32 reg; | ||
283 | |||
284 | pci_read_config_dword(d->util_all, 0xF4, ®); | ||
285 | |||
286 | return GET_BITFIELD(reg, 0, 2); | ||
287 | } | ||
288 | |||
289 | static int get_dimm_attr(u32 reg, int lobit, int hibit, int add, int minval, | ||
290 | int maxval, char *name) | ||
291 | { | ||
292 | u32 val = GET_BITFIELD(reg, lobit, hibit); | ||
293 | |||
294 | if (val < minval || val > maxval) { | ||
295 | edac_dbg(2, "bad %s = %d (raw=%x)\n", name, val, reg); | ||
296 | return -EINVAL; | ||
297 | } | ||
298 | return val + add; | ||
299 | } | ||
300 | |||
301 | #define IS_DIMM_PRESENT(mtr) GET_BITFIELD((mtr), 15, 15) | ||
302 | |||
303 | #define numrank(reg) get_dimm_attr((reg), 12, 13, 0, 1, 2, "ranks") | ||
304 | #define numrow(reg) get_dimm_attr((reg), 2, 4, 12, 1, 6, "rows") | ||
305 | #define numcol(reg) get_dimm_attr((reg), 0, 1, 10, 0, 2, "cols") | ||
306 | |||
307 | static int get_width(u32 mtr) | ||
308 | { | ||
309 | switch (GET_BITFIELD(mtr, 8, 9)) { | ||
310 | case 0: | ||
311 | return DEV_X4; | ||
312 | case 1: | ||
313 | return DEV_X8; | ||
314 | case 2: | ||
315 | return DEV_X16; | ||
316 | } | ||
317 | return DEV_UNKNOWN; | ||
318 | } | ||
319 | |||
320 | static int skx_get_hi_lo(void) | ||
321 | { | ||
322 | struct pci_dev *pdev; | ||
323 | u32 reg; | ||
324 | |||
325 | pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2034, NULL); | ||
326 | if (!pdev) { | ||
327 | edac_dbg(0, "Can't get tolm/tohm\n"); | ||
328 | return -ENODEV; | ||
329 | } | ||
330 | |||
331 | pci_read_config_dword(pdev, 0xD0, ®); | ||
332 | skx_tolm = reg; | ||
333 | pci_read_config_dword(pdev, 0xD4, ®); | ||
334 | skx_tohm = reg; | ||
335 | pci_read_config_dword(pdev, 0xD8, ®); | ||
336 | skx_tohm |= (u64)reg << 32; | ||
337 | |||
338 | pci_dev_put(pdev); | ||
339 | edac_dbg(2, "tolm=%llx tohm=%llx\n", skx_tolm, skx_tohm); | ||
340 | |||
341 | return 0; | ||
342 | } | ||
343 | |||
344 | static int get_dimm_info(u32 mtr, u32 amap, struct dimm_info *dimm, | ||
345 | struct skx_imc *imc, int chan, int dimmno) | ||
346 | { | ||
347 | int banks = 16, ranks, rows, cols, npages; | ||
348 | u64 size; | ||
349 | |||
350 | if (!IS_DIMM_PRESENT(mtr)) | ||
351 | return 0; | ||
352 | ranks = numrank(mtr); | ||
353 | rows = numrow(mtr); | ||
354 | cols = numcol(mtr); | ||
355 | |||
356 | /* | ||
357 | * Compute size in 8-byte (2^3) words, then shift to MiB (2^20) | ||
358 | */ | ||
359 | size = ((1ull << (rows + cols + ranks)) * banks) >> (20 - 3); | ||
360 | npages = MiB_TO_PAGES(size); | ||
361 | |||
362 | edac_dbg(0, "mc#%d: channel %d, dimm %d, %lld Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n", | ||
363 | imc->mc, chan, dimmno, size, npages, | ||
364 | banks, ranks, rows, cols); | ||
365 | |||
366 | imc->chan[chan].dimms[dimmno].close_pg = GET_BITFIELD(mtr, 0, 0); | ||
367 | imc->chan[chan].dimms[dimmno].bank_xor_enable = GET_BITFIELD(mtr, 9, 9); | ||
368 | imc->chan[chan].dimms[dimmno].fine_grain_bank = GET_BITFIELD(amap, 0, 0); | ||
369 | imc->chan[chan].dimms[dimmno].rowbits = rows; | ||
370 | imc->chan[chan].dimms[dimmno].colbits = cols; | ||
371 | |||
372 | dimm->nr_pages = npages; | ||
373 | dimm->grain = 32; | ||
374 | dimm->dtype = get_width(mtr); | ||
375 | dimm->mtype = MEM_DDR4; | ||
376 | dimm->edac_mode = EDAC_SECDED; /* likely better than this */ | ||
377 | snprintf(dimm->label, sizeof(dimm->label), "CPU_SrcID#%u_MC#%u_Chan#%u_DIMM#%u", | ||
378 | imc->src_id, imc->lmc, chan, dimmno); | ||
379 | |||
380 | return 1; | ||
381 | } | ||
382 | |||
383 | #define SKX_GET_MTMTR(dev, reg) \ | ||
384 | pci_read_config_dword((dev), 0x87c, ®) | ||
385 | |||
386 | static bool skx_check_ecc(struct pci_dev *pdev) | ||
387 | { | ||
388 | u32 mtmtr; | ||
389 | |||
390 | SKX_GET_MTMTR(pdev, mtmtr); | ||
391 | |||
392 | return !!GET_BITFIELD(mtmtr, 2, 2); | ||
393 | } | ||
394 | |||
395 | static int skx_get_dimm_config(struct mem_ctl_info *mci) | ||
396 | { | ||
397 | struct skx_pvt *pvt = mci->pvt_info; | ||
398 | struct skx_imc *imc = pvt->imc; | ||
399 | struct dimm_info *dimm; | ||
400 | int i, j; | ||
401 | u32 mtr, amap; | ||
402 | int ndimms; | ||
403 | |||
404 | for (i = 0; i < NUM_CHANNELS; i++) { | ||
405 | ndimms = 0; | ||
406 | pci_read_config_dword(imc->chan[i].cdev, 0x8C, &amap); | ||
407 | for (j = 0; j < NUM_DIMMS; j++) { | ||
408 | dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, | ||
409 | mci->n_layers, i, j, 0); | ||
410 | pci_read_config_dword(imc->chan[i].cdev, | ||
411 | 0x80 + 4*j, &mtr); | ||
412 | ndimms += get_dimm_info(mtr, amap, dimm, imc, i, j); | ||
413 | } | ||
414 | if (ndimms && !skx_check_ecc(imc->chan[0].cdev)) { | ||
415 | skx_printk(KERN_ERR, "ECC is disabled on imc %d\n", imc->mc); | ||
416 | return -ENODEV; | ||
417 | } | ||
418 | } | ||
419 | |||
420 | return 0; | ||
421 | } | ||
422 | |||
423 | static void skx_unregister_mci(struct skx_imc *imc) | ||
424 | { | ||
425 | struct mem_ctl_info *mci = imc->mci; | ||
426 | |||
427 | if (!mci) | ||
428 | return; | ||
429 | |||
430 | edac_dbg(0, "MC%d: mci = %p\n", imc->mc, mci); | ||
431 | |||
432 | /* Remove MC sysfs nodes */ | ||
433 | edac_mc_del_mc(mci->pdev); | ||
434 | |||
435 | edac_dbg(1, "%s: free mci struct\n", mci->ctl_name); | ||
436 | kfree(mci->ctl_name); | ||
437 | edac_mc_free(mci); | ||
438 | } | ||
439 | |||
440 | static int skx_register_mci(struct skx_imc *imc) | ||
441 | { | ||
442 | struct mem_ctl_info *mci; | ||
443 | struct edac_mc_layer layers[2]; | ||
444 | struct pci_dev *pdev = imc->chan[0].cdev; | ||
445 | struct skx_pvt *pvt; | ||
446 | int rc; | ||
447 | |||
448 | /* allocate a new MC control structure */ | ||
449 | layers[0].type = EDAC_MC_LAYER_CHANNEL; | ||
450 | layers[0].size = NUM_CHANNELS; | ||
451 | layers[0].is_virt_csrow = false; | ||
452 | layers[1].type = EDAC_MC_LAYER_SLOT; | ||
453 | layers[1].size = NUM_DIMMS; | ||
454 | layers[1].is_virt_csrow = true; | ||
455 | mci = edac_mc_alloc(imc->mc, ARRAY_SIZE(layers), layers, | ||
456 | sizeof(struct skx_pvt)); | ||
457 | |||
458 | if (unlikely(!mci)) | ||
459 | return -ENOMEM; | ||
460 | |||
461 | edac_dbg(0, "MC#%d: mci = %p\n", imc->mc, mci); | ||
462 | |||
463 | /* Associate skx_dev and mci for future usage */ | ||
464 | imc->mci = mci; | ||
465 | pvt = mci->pvt_info; | ||
466 | pvt->imc = imc; | ||
467 | |||
468 | mci->ctl_name = kasprintf(GFP_KERNEL, "Skylake Socket#%d IMC#%d", | ||
469 | imc->node_id, imc->lmc); | ||
470 | mci->mtype_cap = MEM_FLAG_DDR4; | ||
471 | mci->edac_ctl_cap = EDAC_FLAG_NONE; | ||
472 | mci->edac_cap = EDAC_FLAG_NONE; | ||
473 | mci->mod_name = "skx_edac.c"; | ||
474 | mci->dev_name = pci_name(imc->chan[0].cdev); | ||
475 | mci->mod_ver = SKX_REVISION; | ||
476 | mci->ctl_page_to_phys = NULL; | ||
477 | |||
478 | rc = skx_get_dimm_config(mci); | ||
479 | if (rc < 0) | ||
480 | goto fail; | ||
481 | |||
482 | /* record ptr to the generic device */ | ||
483 | mci->pdev = &pdev->dev; | ||
484 | |||
485 | /* add this new MC control structure to EDAC's list of MCs */ | ||
486 | if (unlikely(edac_mc_add_mc(mci))) { | ||
487 | edac_dbg(0, "MC: failed edac_mc_add_mc()\n"); | ||
488 | rc = -EINVAL; | ||
489 | goto fail; | ||
490 | } | ||
491 | |||
492 | return 0; | ||
493 | |||
494 | fail: | ||
495 | kfree(mci->ctl_name); | ||
496 | edac_mc_free(mci); | ||
497 | imc->mci = NULL; | ||
498 | return rc; | ||
499 | } | ||
500 | |||
501 | #define SKX_MAX_SAD 24 | ||
502 | |||
503 | #define SKX_GET_SAD(d, i, reg) \ | ||
504 | pci_read_config_dword((d)->sad_all, 0x60 + 8 * (i), ®) | ||
505 | #define SKX_GET_ILV(d, i, reg) \ | ||
506 | pci_read_config_dword((d)->sad_all, 0x64 + 8 * (i), ®) | ||
507 | |||
508 | #define SKX_SAD_MOD3MODE(sad) GET_BITFIELD((sad), 30, 31) | ||
509 | #define SKX_SAD_MOD3(sad) GET_BITFIELD((sad), 27, 27) | ||
510 | #define SKX_SAD_LIMIT(sad) (((u64)GET_BITFIELD((sad), 7, 26) << 26) | MASK26) | ||
511 | #define SKX_SAD_MOD3ASMOD2(sad) GET_BITFIELD((sad), 5, 6) | ||
512 | #define SKX_SAD_ATTR(sad) GET_BITFIELD((sad), 3, 4) | ||
513 | #define SKX_SAD_INTERLEAVE(sad) GET_BITFIELD((sad), 1, 2) | ||
514 | #define SKX_SAD_ENABLE(sad) GET_BITFIELD((sad), 0, 0) | ||
515 | |||
516 | #define SKX_ILV_REMOTE(tgt) (((tgt) & 8) == 0) | ||
517 | #define SKX_ILV_TARGET(tgt) ((tgt) & 7) | ||
518 | |||
519 | static bool skx_sad_decode(struct decoded_addr *res) | ||
520 | { | ||
521 | struct skx_dev *d = list_first_entry(&skx_edac_list, typeof(*d), list); | ||
522 | u64 addr = res->addr; | ||
523 | int i, idx, tgt, lchan, shift; | ||
524 | u32 sad, ilv; | ||
525 | u64 limit, prev_limit; | ||
526 | int remote = 0; | ||
527 | |||
528 | /* Simple sanity check for I/O space or out of range */ | ||
529 | if (addr >= skx_tohm || (addr >= skx_tolm && addr < BIT_ULL(32))) { | ||
530 | edac_dbg(0, "Address %llx out of range\n", addr); | ||
531 | return false; | ||
532 | } | ||
533 | |||
534 | restart: | ||
535 | prev_limit = 0; | ||
536 | for (i = 0; i < SKX_MAX_SAD; i++) { | ||
537 | SKX_GET_SAD(d, i, sad); | ||
538 | limit = SKX_SAD_LIMIT(sad); | ||
539 | if (SKX_SAD_ENABLE(sad)) { | ||
540 | if (addr >= prev_limit && addr <= limit) | ||
541 | goto sad_found; | ||
542 | } | ||
543 | prev_limit = limit + 1; | ||
544 | } | ||
545 | edac_dbg(0, "No SAD entry for %llx\n", addr); | ||
546 | return false; | ||
547 | |||
548 | sad_found: | ||
549 | SKX_GET_ILV(d, i, ilv); | ||
550 | |||
551 | switch (SKX_SAD_INTERLEAVE(sad)) { | ||
552 | case 0: | ||
553 | idx = GET_BITFIELD(addr, 6, 8); | ||
554 | break; | ||
555 | case 1: | ||
556 | idx = GET_BITFIELD(addr, 8, 10); | ||
557 | break; | ||
558 | case 2: | ||
559 | idx = GET_BITFIELD(addr, 12, 14); | ||
560 | break; | ||
561 | case 3: | ||
562 | idx = GET_BITFIELD(addr, 30, 32); | ||
563 | break; | ||
564 | } | ||
565 | |||
566 | tgt = GET_BITFIELD(ilv, 4 * idx, 4 * idx + 3); | ||
567 | |||
568 | /* If point to another node, find it and start over */ | ||
569 | if (SKX_ILV_REMOTE(tgt)) { | ||
570 | if (remote) { | ||
571 | edac_dbg(0, "Double remote!\n"); | ||
572 | return false; | ||
573 | } | ||
574 | remote = 1; | ||
575 | list_for_each_entry(d, &skx_edac_list, list) { | ||
576 | if (d->imc[0].src_id == SKX_ILV_TARGET(tgt)) | ||
577 | goto restart; | ||
578 | } | ||
579 | edac_dbg(0, "Can't find node %d\n", SKX_ILV_TARGET(tgt)); | ||
580 | return false; | ||
581 | } | ||
582 | |||
583 | if (SKX_SAD_MOD3(sad) == 0) | ||
584 | lchan = SKX_ILV_TARGET(tgt); | ||
585 | else { | ||
586 | switch (SKX_SAD_MOD3MODE(sad)) { | ||
587 | case 0: | ||
588 | shift = 6; | ||
589 | break; | ||
590 | case 1: | ||
591 | shift = 8; | ||
592 | break; | ||
593 | case 2: | ||
594 | shift = 12; | ||
595 | break; | ||
596 | default: | ||
597 | edac_dbg(0, "illegal mod3mode\n"); | ||
598 | return false; | ||
599 | } | ||
600 | switch (SKX_SAD_MOD3ASMOD2(sad)) { | ||
601 | case 0: | ||
602 | lchan = (addr >> shift) % 3; | ||
603 | break; | ||
604 | case 1: | ||
605 | lchan = (addr >> shift) % 2; | ||
606 | break; | ||
607 | case 2: | ||
608 | lchan = (addr >> shift) % 2; | ||
609 | lchan = (lchan << 1) | ~lchan; | ||
610 | break; | ||
611 | case 3: | ||
612 | lchan = ((addr >> shift) % 2) << 1; | ||
613 | break; | ||
614 | } | ||
615 | lchan = (lchan << 1) | (SKX_ILV_TARGET(tgt) & 1); | ||
616 | } | ||
617 | |||
618 | res->dev = d; | ||
619 | res->socket = d->imc[0].src_id; | ||
620 | res->imc = GET_BITFIELD(d->mcroute, lchan * 3, lchan * 3 + 2); | ||
621 | res->channel = GET_BITFIELD(d->mcroute, lchan * 2 + 18, lchan * 2 + 19); | ||
622 | |||
623 | edac_dbg(2, "%llx: socket=%d imc=%d channel=%d\n", | ||
624 | res->addr, res->socket, res->imc, res->channel); | ||
625 | return true; | ||
626 | } | ||
627 | |||
628 | #define SKX_MAX_TAD 8 | ||
629 | |||
630 | #define SKX_GET_TADBASE(d, mc, i, reg) \ | ||
631 | pci_read_config_dword((d)->imc[mc].chan[0].cdev, 0x850 + 4 * (i), ®) | ||
632 | #define SKX_GET_TADWAYNESS(d, mc, i, reg) \ | ||
633 | pci_read_config_dword((d)->imc[mc].chan[0].cdev, 0x880 + 4 * (i), ®) | ||
634 | #define SKX_GET_TADCHNILVOFFSET(d, mc, ch, i, reg) \ | ||
635 | pci_read_config_dword((d)->imc[mc].chan[ch].cdev, 0x90 + 4 * (i), ®) | ||
636 | |||
637 | #define SKX_TAD_BASE(b) ((u64)GET_BITFIELD((b), 12, 31) << 26) | ||
638 | #define SKX_TAD_SKT_GRAN(b) GET_BITFIELD((b), 4, 5) | ||
639 | #define SKX_TAD_CHN_GRAN(b) GET_BITFIELD((b), 6, 7) | ||
640 | #define SKX_TAD_LIMIT(b) (((u64)GET_BITFIELD((b), 12, 31) << 26) | MASK26) | ||
641 | #define SKX_TAD_OFFSET(b) ((u64)GET_BITFIELD((b), 4, 23) << 26) | ||
642 | #define SKX_TAD_SKTWAYS(b) (1 << GET_BITFIELD((b), 10, 11)) | ||
643 | #define SKX_TAD_CHNWAYS(b) (GET_BITFIELD((b), 8, 9) + 1) | ||
644 | |||
645 | /* which bit used for both socket and channel interleave */ | ||
646 | static int skx_granularity[] = { 6, 8, 12, 30 }; | ||
647 | |||
648 | static u64 skx_do_interleave(u64 addr, int shift, int ways, u64 lowbits) | ||
649 | { | ||
650 | addr >>= shift; | ||
651 | addr /= ways; | ||
652 | addr <<= shift; | ||
653 | |||
654 | return addr | (lowbits & ((1ull << shift) - 1)); | ||
655 | } | ||
656 | |||
657 | static bool skx_tad_decode(struct decoded_addr *res) | ||
658 | { | ||
659 | int i; | ||
660 | u32 base, wayness, chnilvoffset; | ||
661 | int skt_interleave_bit, chn_interleave_bit; | ||
662 | u64 channel_addr; | ||
663 | |||
664 | for (i = 0; i < SKX_MAX_TAD; i++) { | ||
665 | SKX_GET_TADBASE(res->dev, res->imc, i, base); | ||
666 | SKX_GET_TADWAYNESS(res->dev, res->imc, i, wayness); | ||
667 | if (SKX_TAD_BASE(base) <= res->addr && res->addr <= SKX_TAD_LIMIT(wayness)) | ||
668 | goto tad_found; | ||
669 | } | ||
670 | edac_dbg(0, "No TAD entry for %llx\n", res->addr); | ||
671 | return false; | ||
672 | |||
673 | tad_found: | ||
674 | res->sktways = SKX_TAD_SKTWAYS(wayness); | ||
675 | res->chanways = SKX_TAD_CHNWAYS(wayness); | ||
676 | skt_interleave_bit = skx_granularity[SKX_TAD_SKT_GRAN(base)]; | ||
677 | chn_interleave_bit = skx_granularity[SKX_TAD_CHN_GRAN(base)]; | ||
678 | |||
679 | SKX_GET_TADCHNILVOFFSET(res->dev, res->imc, res->channel, i, chnilvoffset); | ||
680 | channel_addr = res->addr - SKX_TAD_OFFSET(chnilvoffset); | ||
681 | |||
682 | if (res->chanways == 3 && skt_interleave_bit > chn_interleave_bit) { | ||
683 | /* Must handle channel first, then socket */ | ||
684 | channel_addr = skx_do_interleave(channel_addr, chn_interleave_bit, | ||
685 | res->chanways, channel_addr); | ||
686 | channel_addr = skx_do_interleave(channel_addr, skt_interleave_bit, | ||
687 | res->sktways, channel_addr); | ||
688 | } else { | ||
689 | /* Handle socket then channel. Preserve low bits from original address */ | ||
690 | channel_addr = skx_do_interleave(channel_addr, skt_interleave_bit, | ||
691 | res->sktways, res->addr); | ||
692 | channel_addr = skx_do_interleave(channel_addr, chn_interleave_bit, | ||
693 | res->chanways, res->addr); | ||
694 | } | ||
695 | |||
696 | res->chan_addr = channel_addr; | ||
697 | |||
698 | edac_dbg(2, "%llx: chan_addr=%llx sktways=%d chanways=%d\n", | ||
699 | res->addr, res->chan_addr, res->sktways, res->chanways); | ||
700 | return true; | ||
701 | } | ||
702 | |||
703 | #define SKX_MAX_RIR 4 | ||
704 | |||
705 | #define SKX_GET_RIRWAYNESS(d, mc, ch, i, reg) \ | ||
706 | pci_read_config_dword((d)->imc[mc].chan[ch].cdev, \ | ||
707 | 0x108 + 4 * (i), ®) | ||
708 | #define SKX_GET_RIRILV(d, mc, ch, idx, i, reg) \ | ||
709 | pci_read_config_dword((d)->imc[mc].chan[ch].cdev, \ | ||
710 | 0x120 + 16 * idx + 4 * (i), ®) | ||
711 | |||
712 | #define SKX_RIR_VALID(b) GET_BITFIELD((b), 31, 31) | ||
713 | #define SKX_RIR_LIMIT(b) (((u64)GET_BITFIELD((b), 1, 11) << 29) | MASK29) | ||
714 | #define SKX_RIR_WAYS(b) (1 << GET_BITFIELD((b), 28, 29)) | ||
715 | #define SKX_RIR_CHAN_RANK(b) GET_BITFIELD((b), 16, 19) | ||
716 | #define SKX_RIR_OFFSET(b) ((u64)(GET_BITFIELD((b), 2, 15) << 26)) | ||
717 | |||
718 | static bool skx_rir_decode(struct decoded_addr *res) | ||
719 | { | ||
720 | int i, idx, chan_rank; | ||
721 | int shift; | ||
722 | u32 rirway, rirlv; | ||
723 | u64 rank_addr, prev_limit = 0, limit; | ||
724 | |||
725 | if (res->dev->imc[res->imc].chan[res->channel].dimms[0].close_pg) | ||
726 | shift = 6; | ||
727 | else | ||
728 | shift = 13; | ||
729 | |||
730 | for (i = 0; i < SKX_MAX_RIR; i++) { | ||
731 | SKX_GET_RIRWAYNESS(res->dev, res->imc, res->channel, i, rirway); | ||
732 | limit = SKX_RIR_LIMIT(rirway); | ||
733 | if (SKX_RIR_VALID(rirway)) { | ||
734 | if (prev_limit <= res->chan_addr && | ||
735 | res->chan_addr <= limit) | ||
736 | goto rir_found; | ||
737 | } | ||
738 | prev_limit = limit; | ||
739 | } | ||
740 | edac_dbg(0, "No RIR entry for %llx\n", res->addr); | ||
741 | return false; | ||
742 | |||
743 | rir_found: | ||
744 | rank_addr = res->chan_addr >> shift; | ||
745 | rank_addr /= SKX_RIR_WAYS(rirway); | ||
746 | rank_addr <<= shift; | ||
747 | rank_addr |= res->chan_addr & GENMASK_ULL(shift - 1, 0); | ||
748 | |||
749 | res->rank_address = rank_addr; | ||
750 | idx = (res->chan_addr >> shift) % SKX_RIR_WAYS(rirway); | ||
751 | |||
752 | SKX_GET_RIRILV(res->dev, res->imc, res->channel, idx, i, rirlv); | ||
753 | res->rank_address = rank_addr - SKX_RIR_OFFSET(rirlv); | ||
754 | chan_rank = SKX_RIR_CHAN_RANK(rirlv); | ||
755 | res->channel_rank = chan_rank; | ||
756 | res->dimm = chan_rank / 4; | ||
757 | res->rank = chan_rank % 4; | ||
758 | |||
759 | edac_dbg(2, "%llx: dimm=%d rank=%d chan_rank=%d rank_addr=%llx\n", | ||
760 | res->addr, res->dimm, res->rank, | ||
761 | res->channel_rank, res->rank_address); | ||
762 | return true; | ||
763 | } | ||
764 | |||
765 | static u8 skx_close_row[] = { | ||
766 | 15, 16, 17, 18, 20, 21, 22, 28, 10, 11, 12, 13, 29, 30, 31, 32, 33 | ||
767 | }; | ||
768 | static u8 skx_close_column[] = { | ||
769 | 3, 4, 5, 14, 19, 23, 24, 25, 26, 27 | ||
770 | }; | ||
771 | static u8 skx_open_row[] = { | ||
772 | 14, 15, 16, 20, 28, 21, 22, 23, 24, 25, 26, 27, 29, 30, 31, 32, 33 | ||
773 | }; | ||
774 | static u8 skx_open_column[] = { | ||
775 | 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 | ||
776 | }; | ||
777 | static u8 skx_open_fine_column[] = { | ||
778 | 3, 4, 5, 7, 8, 9, 10, 11, 12, 13 | ||
779 | }; | ||
780 | |||
781 | static int skx_bits(u64 addr, int nbits, u8 *bits) | ||
782 | { | ||
783 | int i, res = 0; | ||
784 | |||
785 | for (i = 0; i < nbits; i++) | ||
786 | res |= ((addr >> bits[i]) & 1) << i; | ||
787 | return res; | ||
788 | } | ||
789 | |||
790 | static int skx_bank_bits(u64 addr, int b0, int b1, int do_xor, int x0, int x1) | ||
791 | { | ||
792 | int ret = GET_BITFIELD(addr, b0, b0) | (GET_BITFIELD(addr, b1, b1) << 1); | ||
793 | |||
794 | if (do_xor) | ||
795 | ret ^= GET_BITFIELD(addr, x0, x0) | (GET_BITFIELD(addr, x1, x1) << 1); | ||
796 | |||
797 | return ret; | ||
798 | } | ||
799 | |||
800 | static bool skx_mad_decode(struct decoded_addr *r) | ||
801 | { | ||
802 | struct skx_dimm *dimm = &r->dev->imc[r->imc].chan[r->channel].dimms[r->dimm]; | ||
803 | int bg0 = dimm->fine_grain_bank ? 6 : 13; | ||
804 | |||
805 | if (dimm->close_pg) { | ||
806 | r->row = skx_bits(r->rank_address, dimm->rowbits, skx_close_row); | ||
807 | r->column = skx_bits(r->rank_address, dimm->colbits, skx_close_column); | ||
808 | r->column |= 0x400; /* C10 is autoprecharge, always set */ | ||
809 | r->bank_address = skx_bank_bits(r->rank_address, 8, 9, dimm->bank_xor_enable, 22, 28); | ||
810 | r->bank_group = skx_bank_bits(r->rank_address, 6, 7, dimm->bank_xor_enable, 20, 21); | ||
811 | } else { | ||
812 | r->row = skx_bits(r->rank_address, dimm->rowbits, skx_open_row); | ||
813 | if (dimm->fine_grain_bank) | ||
814 | r->column = skx_bits(r->rank_address, dimm->colbits, skx_open_fine_column); | ||
815 | else | ||
816 | r->column = skx_bits(r->rank_address, dimm->colbits, skx_open_column); | ||
817 | r->bank_address = skx_bank_bits(r->rank_address, 18, 19, dimm->bank_xor_enable, 22, 23); | ||
818 | r->bank_group = skx_bank_bits(r->rank_address, bg0, 17, dimm->bank_xor_enable, 20, 21); | ||
819 | } | ||
820 | r->row &= (1u << dimm->rowbits) - 1; | ||
821 | |||
822 | edac_dbg(2, "%llx: row=%x col=%x bank_addr=%d bank_group=%d\n", | ||
823 | r->addr, r->row, r->column, r->bank_address, | ||
824 | r->bank_group); | ||
825 | return true; | ||
826 | } | ||
827 | |||
828 | static bool skx_decode(struct decoded_addr *res) | ||
829 | { | ||
830 | |||
831 | return skx_sad_decode(res) && skx_tad_decode(res) && | ||
832 | skx_rir_decode(res) && skx_mad_decode(res); | ||
833 | } | ||
834 | |||
835 | #ifdef CONFIG_EDAC_DEBUG | ||
836 | /* | ||
837 | * Debug feature. Make /sys/kernel/debug/skx_edac_test/addr. | ||
838 | * Write an address to this file to exercise the address decode | ||
839 | * logic in this driver. | ||
840 | */ | ||
841 | static struct dentry *skx_test; | ||
842 | static u64 skx_fake_addr; | ||
843 | |||
844 | static int debugfs_u64_set(void *data, u64 val) | ||
845 | { | ||
846 | struct decoded_addr res; | ||
847 | |||
848 | res.addr = val; | ||
849 | skx_decode(&res); | ||
850 | |||
851 | return 0; | ||
852 | } | ||
853 | |||
854 | DEFINE_SIMPLE_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n"); | ||
855 | |||
856 | static struct dentry *mydebugfs_create(const char *name, umode_t mode, | ||
857 | struct dentry *parent, u64 *value) | ||
858 | { | ||
859 | return debugfs_create_file(name, mode, parent, value, &fops_u64_wo); | ||
860 | } | ||
861 | |||
862 | static void setup_skx_debug(void) | ||
863 | { | ||
864 | skx_test = debugfs_create_dir("skx_edac_test", NULL); | ||
865 | mydebugfs_create("addr", S_IWUSR, skx_test, &skx_fake_addr); | ||
866 | } | ||
867 | |||
868 | static void teardown_skx_debug(void) | ||
869 | { | ||
870 | debugfs_remove_recursive(skx_test); | ||
871 | } | ||
872 | #else | ||
873 | static void setup_skx_debug(void) | ||
874 | { | ||
875 | } | ||
876 | |||
877 | static void teardown_skx_debug(void) | ||
878 | { | ||
879 | } | ||
880 | #endif /*CONFIG_EDAC_DEBUG*/ | ||
881 | |||
882 | static void skx_mce_output_error(struct mem_ctl_info *mci, | ||
883 | const struct mce *m, | ||
884 | struct decoded_addr *res) | ||
885 | { | ||
886 | enum hw_event_mc_err_type tp_event; | ||
887 | char *type, *optype, msg[256]; | ||
888 | bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0); | ||
889 | bool overflow = GET_BITFIELD(m->status, 62, 62); | ||
890 | bool uncorrected_error = GET_BITFIELD(m->status, 61, 61); | ||
891 | bool recoverable; | ||
892 | u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52); | ||
893 | u32 mscod = GET_BITFIELD(m->status, 16, 31); | ||
894 | u32 errcode = GET_BITFIELD(m->status, 0, 15); | ||
895 | u32 optypenum = GET_BITFIELD(m->status, 4, 6); | ||
896 | |||
897 | recoverable = GET_BITFIELD(m->status, 56, 56); | ||
898 | |||
899 | if (uncorrected_error) { | ||
900 | if (ripv) { | ||
901 | type = "FATAL"; | ||
902 | tp_event = HW_EVENT_ERR_FATAL; | ||
903 | } else { | ||
904 | type = "NON_FATAL"; | ||
905 | tp_event = HW_EVENT_ERR_UNCORRECTED; | ||
906 | } | ||
907 | } else { | ||
908 | type = "CORRECTED"; | ||
909 | tp_event = HW_EVENT_ERR_CORRECTED; | ||
910 | } | ||
911 | |||
912 | /* | ||
913 | * According with Table 15-9 of the Intel Architecture spec vol 3A, | ||
914 | * memory errors should fit in this mask: | ||
915 | * 000f 0000 1mmm cccc (binary) | ||
916 | * where: | ||
917 | * f = Correction Report Filtering Bit. If 1, subsequent errors | ||
918 | * won't be shown | ||
919 | * mmm = error type | ||
920 | * cccc = channel | ||
921 | * If the mask doesn't match, report an error to the parsing logic | ||
922 | */ | ||
923 | if (!((errcode & 0xef80) == 0x80)) { | ||
924 | optype = "Can't parse: it is not a mem"; | ||
925 | } else { | ||
926 | switch (optypenum) { | ||
927 | case 0: | ||
928 | optype = "generic undef request error"; | ||
929 | break; | ||
930 | case 1: | ||
931 | optype = "memory read error"; | ||
932 | break; | ||
933 | case 2: | ||
934 | optype = "memory write error"; | ||
935 | break; | ||
936 | case 3: | ||
937 | optype = "addr/cmd error"; | ||
938 | break; | ||
939 | case 4: | ||
940 | optype = "memory scrubbing error"; | ||
941 | break; | ||
942 | default: | ||
943 | optype = "reserved"; | ||
944 | break; | ||
945 | } | ||
946 | } | ||
947 | |||
948 | snprintf(msg, sizeof(msg), | ||
949 | "%s%s err_code:%04x:%04x socket:%d imc:%d rank:%d bg:%d ba:%d row:%x col:%x", | ||
950 | overflow ? " OVERFLOW" : "", | ||
951 | (uncorrected_error && recoverable) ? " recoverable" : "", | ||
952 | mscod, errcode, | ||
953 | res->socket, res->imc, res->rank, | ||
954 | res->bank_group, res->bank_address, res->row, res->column); | ||
955 | |||
956 | edac_dbg(0, "%s\n", msg); | ||
957 | |||
958 | /* Call the helper to output message */ | ||
959 | edac_mc_handle_error(tp_event, mci, core_err_cnt, | ||
960 | m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0, | ||
961 | res->channel, res->dimm, -1, | ||
962 | optype, msg); | ||
963 | } | ||
964 | |||
965 | static int skx_mce_check_error(struct notifier_block *nb, unsigned long val, | ||
966 | void *data) | ||
967 | { | ||
968 | struct mce *mce = (struct mce *)data; | ||
969 | struct decoded_addr res; | ||
970 | struct mem_ctl_info *mci; | ||
971 | char *type; | ||
972 | |||
973 | if (get_edac_report_status() == EDAC_REPORTING_DISABLED) | ||
974 | return NOTIFY_DONE; | ||
975 | |||
976 | /* ignore unless this is memory related with an address */ | ||
977 | if ((mce->status & 0xefff) >> 7 != 1 || !(mce->status & MCI_STATUS_ADDRV)) | ||
978 | return NOTIFY_DONE; | ||
979 | |||
980 | res.addr = mce->addr; | ||
981 | if (!skx_decode(&res)) | ||
982 | return NOTIFY_DONE; | ||
983 | mci = res.dev->imc[res.imc].mci; | ||
984 | |||
985 | if (mce->mcgstatus & MCG_STATUS_MCIP) | ||
986 | type = "Exception"; | ||
987 | else | ||
988 | type = "Event"; | ||
989 | |||
990 | skx_mc_printk(mci, KERN_DEBUG, "HANDLING MCE MEMORY ERROR\n"); | ||
991 | |||
992 | skx_mc_printk(mci, KERN_DEBUG, "CPU %d: Machine Check %s: %Lx " | ||
993 | "Bank %d: %016Lx\n", mce->extcpu, type, | ||
994 | mce->mcgstatus, mce->bank, mce->status); | ||
995 | skx_mc_printk(mci, KERN_DEBUG, "TSC %llx ", mce->tsc); | ||
996 | skx_mc_printk(mci, KERN_DEBUG, "ADDR %llx ", mce->addr); | ||
997 | skx_mc_printk(mci, KERN_DEBUG, "MISC %llx ", mce->misc); | ||
998 | |||
999 | skx_mc_printk(mci, KERN_DEBUG, "PROCESSOR %u:%x TIME %llu SOCKET " | ||
1000 | "%u APIC %x\n", mce->cpuvendor, mce->cpuid, | ||
1001 | mce->time, mce->socketid, mce->apicid); | ||
1002 | |||
1003 | skx_mce_output_error(mci, mce, &res); | ||
1004 | |||
1005 | return NOTIFY_DONE; | ||
1006 | } | ||
1007 | |||
1008 | static struct notifier_block skx_mce_dec = { | ||
1009 | .notifier_call = skx_mce_check_error, | ||
1010 | }; | ||
1011 | |||
1012 | static void skx_remove(void) | ||
1013 | { | ||
1014 | int i, j; | ||
1015 | struct skx_dev *d, *tmp; | ||
1016 | |||
1017 | edac_dbg(0, "\n"); | ||
1018 | |||
1019 | list_for_each_entry_safe(d, tmp, &skx_edac_list, list) { | ||
1020 | list_del(&d->list); | ||
1021 | for (i = 0; i < NUM_IMC; i++) { | ||
1022 | skx_unregister_mci(&d->imc[i]); | ||
1023 | for (j = 0; j < NUM_CHANNELS; j++) | ||
1024 | pci_dev_put(d->imc[i].chan[j].cdev); | ||
1025 | } | ||
1026 | pci_dev_put(d->util_all); | ||
1027 | pci_dev_put(d->sad_all); | ||
1028 | |||
1029 | kfree(d); | ||
1030 | } | ||
1031 | } | ||
1032 | |||
1033 | /* | ||
1034 | * skx_init: | ||
1035 | * make sure we are running on the correct cpu model | ||
1036 | * search for all the devices we need | ||
1037 | * check which DIMMs are present. | ||
1038 | */ | ||
1039 | int __init skx_init(void) | ||
1040 | { | ||
1041 | const struct x86_cpu_id *id; | ||
1042 | const struct munit *m; | ||
1043 | int rc = 0, i; | ||
1044 | u8 mc = 0, src_id, node_id; | ||
1045 | struct skx_dev *d; | ||
1046 | |||
1047 | edac_dbg(2, "\n"); | ||
1048 | |||
1049 | id = x86_match_cpu(skx_cpuids); | ||
1050 | if (!id) | ||
1051 | return -ENODEV; | ||
1052 | |||
1053 | rc = skx_get_hi_lo(); | ||
1054 | if (rc) | ||
1055 | return rc; | ||
1056 | |||
1057 | rc = get_all_bus_mappings(); | ||
1058 | if (rc < 0) | ||
1059 | goto fail; | ||
1060 | if (rc == 0) { | ||
1061 | edac_dbg(2, "No memory controllers found\n"); | ||
1062 | return -ENODEV; | ||
1063 | } | ||
1064 | |||
1065 | for (m = skx_all_munits; m->did; m++) { | ||
1066 | rc = get_all_munits(m); | ||
1067 | if (rc < 0) | ||
1068 | goto fail; | ||
1069 | if (rc != m->per_socket * skx_num_sockets) { | ||
1070 | edac_dbg(2, "Expected %d, got %d of %x\n", | ||
1071 | m->per_socket * skx_num_sockets, rc, m->did); | ||
1072 | rc = -ENODEV; | ||
1073 | goto fail; | ||
1074 | } | ||
1075 | } | ||
1076 | |||
1077 | list_for_each_entry(d, &skx_edac_list, list) { | ||
1078 | src_id = get_src_id(d); | ||
1079 | node_id = skx_get_node_id(d); | ||
1080 | edac_dbg(2, "src_id=%d node_id=%d\n", src_id, node_id); | ||
1081 | for (i = 0; i < NUM_IMC; i++) { | ||
1082 | d->imc[i].mc = mc++; | ||
1083 | d->imc[i].lmc = i; | ||
1084 | d->imc[i].src_id = src_id; | ||
1085 | d->imc[i].node_id = node_id; | ||
1086 | rc = skx_register_mci(&d->imc[i]); | ||
1087 | if (rc < 0) | ||
1088 | goto fail; | ||
1089 | } | ||
1090 | } | ||
1091 | |||
1092 | /* Ensure that the OPSTATE is set correctly for POLL or NMI */ | ||
1093 | opstate_init(); | ||
1094 | |||
1095 | setup_skx_debug(); | ||
1096 | |||
1097 | mce_register_decode_chain(&skx_mce_dec); | ||
1098 | |||
1099 | return 0; | ||
1100 | fail: | ||
1101 | skx_remove(); | ||
1102 | return rc; | ||
1103 | } | ||
1104 | |||
1105 | static void __exit skx_exit(void) | ||
1106 | { | ||
1107 | edac_dbg(2, "\n"); | ||
1108 | mce_unregister_decode_chain(&skx_mce_dec); | ||
1109 | skx_remove(); | ||
1110 | teardown_skx_debug(); | ||
1111 | } | ||
1112 | |||
1113 | module_init(skx_init); | ||
1114 | module_exit(skx_exit); | ||
1115 | |||
1116 | module_param(edac_op_state, int, 0444); | ||
1117 | MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); | ||
1118 | |||
1119 | MODULE_LICENSE("GPL v2"); | ||
1120 | MODULE_AUTHOR("Tony Luck"); | ||
1121 | MODULE_DESCRIPTION("MC Driver for Intel Skylake server processors"); | ||
diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c index 438893762076..ce2bc2a38101 100644 --- a/drivers/firmware/arm_scpi.c +++ b/drivers/firmware/arm_scpi.c | |||
@@ -709,9 +709,10 @@ static int scpi_probe(struct platform_device *pdev) | |||
709 | struct mbox_client *cl = &pchan->cl; | 709 | struct mbox_client *cl = &pchan->cl; |
710 | struct device_node *shmem = of_parse_phandle(np, "shmem", idx); | 710 | struct device_node *shmem = of_parse_phandle(np, "shmem", idx); |
711 | 711 | ||
712 | if (of_address_to_resource(shmem, 0, &res)) { | 712 | ret = of_address_to_resource(shmem, 0, &res); |
713 | of_node_put(shmem); | ||
714 | if (ret) { | ||
713 | dev_err(dev, "failed to get SCPI payload mem resource\n"); | 715 | dev_err(dev, "failed to get SCPI payload mem resource\n"); |
714 | ret = -EINVAL; | ||
715 | goto err; | 716 | goto err; |
716 | } | 717 | } |
717 | 718 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 8ebc5f1eb4c0..8c704c86597b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h | |||
@@ -646,9 +646,9 @@ int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev); | |||
646 | void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev); | 646 | void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev); |
647 | int amdgpu_gart_init(struct amdgpu_device *adev); | 647 | int amdgpu_gart_init(struct amdgpu_device *adev); |
648 | void amdgpu_gart_fini(struct amdgpu_device *adev); | 648 | void amdgpu_gart_fini(struct amdgpu_device *adev); |
649 | void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset, | 649 | void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset, |
650 | int pages); | 650 | int pages); |
651 | int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset, | 651 | int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset, |
652 | int pages, struct page **pagelist, | 652 | int pages, struct page **pagelist, |
653 | dma_addr_t *dma_addr, uint32_t flags); | 653 | dma_addr_t *dma_addr, uint32_t flags); |
654 | 654 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c index 49de92600074..10b5ddf2c588 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c | |||
@@ -200,16 +200,7 @@ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx) | |||
200 | atpx->is_hybrid = false; | 200 | atpx->is_hybrid = false; |
201 | if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) { | 201 | if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) { |
202 | printk("ATPX Hybrid Graphics\n"); | 202 | printk("ATPX Hybrid Graphics\n"); |
203 | #if 1 | ||
204 | /* This is a temporary hack until the D3 cold support | ||
205 | * makes it upstream. The ATPX power_control method seems | ||
206 | * to still work on even if the system should be using | ||
207 | * the new standardized hybrid D3 cold ACPI interface. | ||
208 | */ | ||
209 | atpx->functions.power_cntl = true; | ||
210 | #else | ||
211 | atpx->functions.power_cntl = false; | 203 | atpx->functions.power_cntl = false; |
212 | #endif | ||
213 | atpx->is_hybrid = true; | 204 | atpx->is_hybrid = true; |
214 | } | 205 | } |
215 | 206 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c index 921bce2df0b0..0feea347f680 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c | |||
@@ -221,7 +221,7 @@ void amdgpu_gart_table_vram_free(struct amdgpu_device *adev) | |||
221 | * Unbinds the requested pages from the gart page table and | 221 | * Unbinds the requested pages from the gart page table and |
222 | * replaces them with the dummy page (all asics). | 222 | * replaces them with the dummy page (all asics). |
223 | */ | 223 | */ |
224 | void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset, | 224 | void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset, |
225 | int pages) | 225 | int pages) |
226 | { | 226 | { |
227 | unsigned t; | 227 | unsigned t; |
@@ -268,7 +268,7 @@ void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset, | |||
268 | * (all asics). | 268 | * (all asics). |
269 | * Returns 0 for success, -EINVAL for failure. | 269 | * Returns 0 for success, -EINVAL for failure. |
270 | */ | 270 | */ |
271 | int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset, | 271 | int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset, |
272 | int pages, struct page **pagelist, dma_addr_t *dma_addr, | 272 | int pages, struct page **pagelist, dma_addr_t *dma_addr, |
273 | uint32_t flags) | 273 | uint32_t flags) |
274 | { | 274 | { |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index b11f4e8868d7..4aa993d19018 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | |||
@@ -1187,7 +1187,8 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout) | |||
1187 | r = 0; | 1187 | r = 0; |
1188 | } | 1188 | } |
1189 | 1189 | ||
1190 | error: | ||
1191 | fence_put(fence); | 1190 | fence_put(fence); |
1191 | |||
1192 | error: | ||
1192 | return r; | 1193 | return r; |
1193 | } | 1194 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 8e642fc48df4..80120fa4092c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | |||
@@ -1535,7 +1535,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) | |||
1535 | r = amd_sched_entity_init(&ring->sched, &vm->entity, | 1535 | r = amd_sched_entity_init(&ring->sched, &vm->entity, |
1536 | rq, amdgpu_sched_jobs); | 1536 | rq, amdgpu_sched_jobs); |
1537 | if (r) | 1537 | if (r) |
1538 | return r; | 1538 | goto err; |
1539 | 1539 | ||
1540 | vm->page_directory_fence = NULL; | 1540 | vm->page_directory_fence = NULL; |
1541 | 1541 | ||
@@ -1565,6 +1565,9 @@ error_free_page_directory: | |||
1565 | error_free_sched_entity: | 1565 | error_free_sched_entity: |
1566 | amd_sched_entity_fini(&ring->sched, &vm->entity); | 1566 | amd_sched_entity_fini(&ring->sched, &vm->entity); |
1567 | 1567 | ||
1568 | err: | ||
1569 | drm_free_large(vm->page_tables); | ||
1570 | |||
1568 | return r; | 1571 | return r; |
1569 | } | 1572 | } |
1570 | 1573 | ||
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c index e621eba63126..a7d3cb3fead0 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c | |||
@@ -184,7 +184,7 @@ u32 __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd, | |||
184 | sizeof(u32)) + inx; | 184 | sizeof(u32)) + inx; |
185 | 185 | ||
186 | pr_debug("kfd: get kernel queue doorbell\n" | 186 | pr_debug("kfd: get kernel queue doorbell\n" |
187 | " doorbell offset == 0x%08d\n" | 187 | " doorbell offset == 0x%08X\n" |
188 | " kernel address == 0x%08lX\n", | 188 | " kernel address == 0x%08lX\n", |
189 | *doorbell_off, (uintptr_t)(kfd->doorbell_kernel_ptr + inx)); | 189 | *doorbell_off, (uintptr_t)(kfd->doorbell_kernel_ptr + inx)); |
190 | 190 | ||
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index ce54e985d91b..0a06f9120b5a 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c | |||
@@ -464,7 +464,7 @@ static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper) | |||
464 | 464 | ||
465 | /* Sometimes user space wants everything disabled, so don't steal the | 465 | /* Sometimes user space wants everything disabled, so don't steal the |
466 | * display if there's a master. */ | 466 | * display if there's a master. */ |
467 | if (lockless_dereference(dev->master)) | 467 | if (READ_ONCE(dev->master)) |
468 | return false; | 468 | return false; |
469 | 469 | ||
470 | drm_for_each_crtc(crtc, dev) { | 470 | drm_for_each_crtc(crtc, dev) { |
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c index 87ef34150d46..b382cf505262 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c | |||
@@ -1333,8 +1333,6 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu, | |||
1333 | if (ret < 0) | 1333 | if (ret < 0) |
1334 | return ret; | 1334 | return ret; |
1335 | 1335 | ||
1336 | mutex_lock(&gpu->lock); | ||
1337 | |||
1338 | /* | 1336 | /* |
1339 | * TODO | 1337 | * TODO |
1340 | * | 1338 | * |
@@ -1348,16 +1346,18 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu, | |||
1348 | if (unlikely(event == ~0U)) { | 1346 | if (unlikely(event == ~0U)) { |
1349 | DRM_ERROR("no free event\n"); | 1347 | DRM_ERROR("no free event\n"); |
1350 | ret = -EBUSY; | 1348 | ret = -EBUSY; |
1351 | goto out_unlock; | 1349 | goto out_pm_put; |
1352 | } | 1350 | } |
1353 | 1351 | ||
1354 | fence = etnaviv_gpu_fence_alloc(gpu); | 1352 | fence = etnaviv_gpu_fence_alloc(gpu); |
1355 | if (!fence) { | 1353 | if (!fence) { |
1356 | event_free(gpu, event); | 1354 | event_free(gpu, event); |
1357 | ret = -ENOMEM; | 1355 | ret = -ENOMEM; |
1358 | goto out_unlock; | 1356 | goto out_pm_put; |
1359 | } | 1357 | } |
1360 | 1358 | ||
1359 | mutex_lock(&gpu->lock); | ||
1360 | |||
1361 | gpu->event[event].fence = fence; | 1361 | gpu->event[event].fence = fence; |
1362 | submit->fence = fence->seqno; | 1362 | submit->fence = fence->seqno; |
1363 | gpu->active_fence = submit->fence; | 1363 | gpu->active_fence = submit->fence; |
@@ -1395,9 +1395,9 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu, | |||
1395 | hangcheck_timer_reset(gpu); | 1395 | hangcheck_timer_reset(gpu); |
1396 | ret = 0; | 1396 | ret = 0; |
1397 | 1397 | ||
1398 | out_unlock: | ||
1399 | mutex_unlock(&gpu->lock); | 1398 | mutex_unlock(&gpu->lock); |
1400 | 1399 | ||
1400 | out_pm_put: | ||
1401 | etnaviv_gpu_pm_put(gpu); | 1401 | etnaviv_gpu_pm_put(gpu); |
1402 | 1402 | ||
1403 | return ret; | 1403 | return ret; |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 21f939074abc..20fe9d52e256 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -1854,6 +1854,7 @@ struct drm_i915_private { | |||
1854 | enum modeset_restore modeset_restore; | 1854 | enum modeset_restore modeset_restore; |
1855 | struct mutex modeset_restore_lock; | 1855 | struct mutex modeset_restore_lock; |
1856 | struct drm_atomic_state *modeset_restore_state; | 1856 | struct drm_atomic_state *modeset_restore_state; |
1857 | struct drm_modeset_acquire_ctx reset_ctx; | ||
1857 | 1858 | ||
1858 | struct list_head vm_list; /* Global list of all address spaces */ | 1859 | struct list_head vm_list; /* Global list of all address spaces */ |
1859 | struct i915_ggtt ggtt; /* VM representing the global address space */ | 1860 | struct i915_ggtt ggtt; /* VM representing the global address space */ |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 11681501d7b1..a77ce9983f69 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -879,9 +879,12 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, | |||
879 | ret = i915_gem_shmem_pread(dev, obj, args, file); | 879 | ret = i915_gem_shmem_pread(dev, obj, args, file); |
880 | 880 | ||
881 | /* pread for non shmem backed objects */ | 881 | /* pread for non shmem backed objects */ |
882 | if (ret == -EFAULT || ret == -ENODEV) | 882 | if (ret == -EFAULT || ret == -ENODEV) { |
883 | intel_runtime_pm_get(to_i915(dev)); | ||
883 | ret = i915_gem_gtt_pread(dev, obj, args->size, | 884 | ret = i915_gem_gtt_pread(dev, obj, args->size, |
884 | args->offset, args->data_ptr); | 885 | args->offset, args->data_ptr); |
886 | intel_runtime_pm_put(to_i915(dev)); | ||
887 | } | ||
885 | 888 | ||
886 | out: | 889 | out: |
887 | drm_gem_object_unreference(&obj->base); | 890 | drm_gem_object_unreference(&obj->base); |
@@ -1306,7 +1309,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
1306 | * textures). Fallback to the shmem path in that case. */ | 1309 | * textures). Fallback to the shmem path in that case. */ |
1307 | } | 1310 | } |
1308 | 1311 | ||
1309 | if (ret == -EFAULT) { | 1312 | if (ret == -EFAULT || ret == -ENOSPC) { |
1310 | if (obj->phys_handle) | 1313 | if (obj->phys_handle) |
1311 | ret = i915_gem_phys_pwrite(obj, args, file); | 1314 | ret = i915_gem_phys_pwrite(obj, args, file); |
1312 | else if (i915_gem_object_has_struct_page(obj)) | 1315 | else if (i915_gem_object_has_struct_page(obj)) |
@@ -3169,6 +3172,8 @@ static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine) | |||
3169 | } | 3172 | } |
3170 | 3173 | ||
3171 | intel_ring_init_seqno(engine, engine->last_submitted_seqno); | 3174 | intel_ring_init_seqno(engine, engine->last_submitted_seqno); |
3175 | |||
3176 | engine->i915->gt.active_engines &= ~intel_engine_flag(engine); | ||
3172 | } | 3177 | } |
3173 | 3178 | ||
3174 | void i915_gem_reset(struct drm_device *dev) | 3179 | void i915_gem_reset(struct drm_device *dev) |
@@ -3186,6 +3191,7 @@ void i915_gem_reset(struct drm_device *dev) | |||
3186 | 3191 | ||
3187 | for_each_engine(engine, dev_priv) | 3192 | for_each_engine(engine, dev_priv) |
3188 | i915_gem_reset_engine_cleanup(engine); | 3193 | i915_gem_reset_engine_cleanup(engine); |
3194 | mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0); | ||
3189 | 3195 | ||
3190 | i915_gem_context_reset(dev); | 3196 | i915_gem_context_reset(dev); |
3191 | 3197 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 10f1e32767e6..7a30af79d799 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
@@ -2873,6 +2873,7 @@ void i915_ggtt_cleanup_hw(struct drm_device *dev) | |||
2873 | struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; | 2873 | struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; |
2874 | 2874 | ||
2875 | ppgtt->base.cleanup(&ppgtt->base); | 2875 | ppgtt->base.cleanup(&ppgtt->base); |
2876 | kfree(ppgtt); | ||
2876 | } | 2877 | } |
2877 | 2878 | ||
2878 | i915_gem_cleanup_stolen(dev); | 2879 | i915_gem_cleanup_stolen(dev); |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index ce14fe09d962..5c06413ae0e6 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -1536,6 +1536,7 @@ enum skl_disp_power_wells { | |||
1536 | #define BALANCE_LEG_MASK(port) (7<<(8+3*(port))) | 1536 | #define BALANCE_LEG_MASK(port) (7<<(8+3*(port))) |
1537 | /* Balance leg disable bits */ | 1537 | /* Balance leg disable bits */ |
1538 | #define BALANCE_LEG_DISABLE_SHIFT 23 | 1538 | #define BALANCE_LEG_DISABLE_SHIFT 23 |
1539 | #define BALANCE_LEG_DISABLE(port) (1 << (23 + (port))) | ||
1539 | 1540 | ||
1540 | /* | 1541 | /* |
1541 | * Fence registers | 1542 | * Fence registers |
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c index 6700a7be7f78..d32f586f9c05 100644 --- a/drivers/gpu/drm/i915/intel_audio.c +++ b/drivers/gpu/drm/i915/intel_audio.c | |||
@@ -600,6 +600,8 @@ static void i915_audio_component_codec_wake_override(struct device *dev, | |||
600 | if (!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv)) | 600 | if (!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv)) |
601 | return; | 601 | return; |
602 | 602 | ||
603 | i915_audio_component_get_power(dev); | ||
604 | |||
603 | /* | 605 | /* |
604 | * Enable/disable generating the codec wake signal, overriding the | 606 | * Enable/disable generating the codec wake signal, overriding the |
605 | * internal logic to generate the codec wake to controller. | 607 | * internal logic to generate the codec wake to controller. |
@@ -615,6 +617,8 @@ static void i915_audio_component_codec_wake_override(struct device *dev, | |||
615 | I915_WRITE(HSW_AUD_CHICKENBIT, tmp); | 617 | I915_WRITE(HSW_AUD_CHICKENBIT, tmp); |
616 | usleep_range(1000, 1500); | 618 | usleep_range(1000, 1500); |
617 | } | 619 | } |
620 | |||
621 | i915_audio_component_put_power(dev); | ||
618 | } | 622 | } |
619 | 623 | ||
620 | /* Get CDCLK in kHz */ | 624 | /* Get CDCLK in kHz */ |
@@ -648,6 +652,7 @@ static int i915_audio_component_sync_audio_rate(struct device *dev, | |||
648 | !IS_HASWELL(dev_priv)) | 652 | !IS_HASWELL(dev_priv)) |
649 | return 0; | 653 | return 0; |
650 | 654 | ||
655 | i915_audio_component_get_power(dev); | ||
651 | mutex_lock(&dev_priv->av_mutex); | 656 | mutex_lock(&dev_priv->av_mutex); |
652 | /* 1. get the pipe */ | 657 | /* 1. get the pipe */ |
653 | intel_encoder = dev_priv->dig_port_map[port]; | 658 | intel_encoder = dev_priv->dig_port_map[port]; |
@@ -698,6 +703,7 @@ static int i915_audio_component_sync_audio_rate(struct device *dev, | |||
698 | 703 | ||
699 | unlock: | 704 | unlock: |
700 | mutex_unlock(&dev_priv->av_mutex); | 705 | mutex_unlock(&dev_priv->av_mutex); |
706 | i915_audio_component_put_power(dev); | ||
701 | return err; | 707 | return err; |
702 | } | 708 | } |
703 | 709 | ||
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index dd1d6fe12297..1a7efac65fd5 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c | |||
@@ -145,7 +145,7 @@ static const struct ddi_buf_trans skl_ddi_translations_dp[] = { | |||
145 | static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = { | 145 | static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = { |
146 | { 0x0000201B, 0x000000A2, 0x0 }, | 146 | { 0x0000201B, 0x000000A2, 0x0 }, |
147 | { 0x00005012, 0x00000088, 0x0 }, | 147 | { 0x00005012, 0x00000088, 0x0 }, |
148 | { 0x80007011, 0x000000CD, 0x0 }, | 148 | { 0x80007011, 0x000000CD, 0x1 }, |
149 | { 0x80009010, 0x000000C0, 0x1 }, | 149 | { 0x80009010, 0x000000C0, 0x1 }, |
150 | { 0x0000201B, 0x0000009D, 0x0 }, | 150 | { 0x0000201B, 0x0000009D, 0x0 }, |
151 | { 0x80005012, 0x000000C0, 0x1 }, | 151 | { 0x80005012, 0x000000C0, 0x1 }, |
@@ -158,7 +158,7 @@ static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = { | |||
158 | static const struct ddi_buf_trans skl_y_ddi_translations_dp[] = { | 158 | static const struct ddi_buf_trans skl_y_ddi_translations_dp[] = { |
159 | { 0x00000018, 0x000000A2, 0x0 }, | 159 | { 0x00000018, 0x000000A2, 0x0 }, |
160 | { 0x00005012, 0x00000088, 0x0 }, | 160 | { 0x00005012, 0x00000088, 0x0 }, |
161 | { 0x80007011, 0x000000CD, 0x0 }, | 161 | { 0x80007011, 0x000000CD, 0x3 }, |
162 | { 0x80009010, 0x000000C0, 0x3 }, | 162 | { 0x80009010, 0x000000C0, 0x3 }, |
163 | { 0x00000018, 0x0000009D, 0x0 }, | 163 | { 0x00000018, 0x0000009D, 0x0 }, |
164 | { 0x80005012, 0x000000C0, 0x3 }, | 164 | { 0x80005012, 0x000000C0, 0x3 }, |
@@ -388,6 +388,40 @@ skl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries) | |||
388 | } | 388 | } |
389 | } | 389 | } |
390 | 390 | ||
391 | static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port port) | ||
392 | { | ||
393 | int n_hdmi_entries; | ||
394 | int hdmi_level; | ||
395 | int hdmi_default_entry; | ||
396 | |||
397 | hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift; | ||
398 | |||
399 | if (IS_BROXTON(dev_priv)) | ||
400 | return hdmi_level; | ||
401 | |||
402 | if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { | ||
403 | skl_get_buf_trans_hdmi(dev_priv, &n_hdmi_entries); | ||
404 | hdmi_default_entry = 8; | ||
405 | } else if (IS_BROADWELL(dev_priv)) { | ||
406 | n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi); | ||
407 | hdmi_default_entry = 7; | ||
408 | } else if (IS_HASWELL(dev_priv)) { | ||
409 | n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi); | ||
410 | hdmi_default_entry = 6; | ||
411 | } else { | ||
412 | WARN(1, "ddi translation table missing\n"); | ||
413 | n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi); | ||
414 | hdmi_default_entry = 7; | ||
415 | } | ||
416 | |||
417 | /* Choose a good default if VBT is badly populated */ | ||
418 | if (hdmi_level == HDMI_LEVEL_SHIFT_UNKNOWN || | ||
419 | hdmi_level >= n_hdmi_entries) | ||
420 | hdmi_level = hdmi_default_entry; | ||
421 | |||
422 | return hdmi_level; | ||
423 | } | ||
424 | |||
391 | /* | 425 | /* |
392 | * Starting with Haswell, DDI port buffers must be programmed with correct | 426 | * Starting with Haswell, DDI port buffers must be programmed with correct |
393 | * values in advance. The buffer values are different for FDI and DP modes, | 427 | * values in advance. The buffer values are different for FDI and DP modes, |
@@ -399,7 +433,7 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder) | |||
399 | { | 433 | { |
400 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | 434 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); |
401 | u32 iboost_bit = 0; | 435 | u32 iboost_bit = 0; |
402 | int i, n_hdmi_entries, n_dp_entries, n_edp_entries, hdmi_default_entry, | 436 | int i, n_hdmi_entries, n_dp_entries, n_edp_entries, |
403 | size; | 437 | size; |
404 | int hdmi_level; | 438 | int hdmi_level; |
405 | enum port port; | 439 | enum port port; |
@@ -410,7 +444,7 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder) | |||
410 | const struct ddi_buf_trans *ddi_translations; | 444 | const struct ddi_buf_trans *ddi_translations; |
411 | 445 | ||
412 | port = intel_ddi_get_encoder_port(encoder); | 446 | port = intel_ddi_get_encoder_port(encoder); |
413 | hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift; | 447 | hdmi_level = intel_ddi_hdmi_level(dev_priv, port); |
414 | 448 | ||
415 | if (IS_BROXTON(dev_priv)) { | 449 | if (IS_BROXTON(dev_priv)) { |
416 | if (encoder->type != INTEL_OUTPUT_HDMI) | 450 | if (encoder->type != INTEL_OUTPUT_HDMI) |
@@ -430,7 +464,6 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder) | |||
430 | skl_get_buf_trans_edp(dev_priv, &n_edp_entries); | 464 | skl_get_buf_trans_edp(dev_priv, &n_edp_entries); |
431 | ddi_translations_hdmi = | 465 | ddi_translations_hdmi = |
432 | skl_get_buf_trans_hdmi(dev_priv, &n_hdmi_entries); | 466 | skl_get_buf_trans_hdmi(dev_priv, &n_hdmi_entries); |
433 | hdmi_default_entry = 8; | ||
434 | /* If we're boosting the current, set bit 31 of trans1 */ | 467 | /* If we're boosting the current, set bit 31 of trans1 */ |
435 | if (dev_priv->vbt.ddi_port_info[port].hdmi_boost_level || | 468 | if (dev_priv->vbt.ddi_port_info[port].hdmi_boost_level || |
436 | dev_priv->vbt.ddi_port_info[port].dp_boost_level) | 469 | dev_priv->vbt.ddi_port_info[port].dp_boost_level) |
@@ -456,7 +489,6 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder) | |||
456 | 489 | ||
457 | n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp); | 490 | n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp); |
458 | n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi); | 491 | n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi); |
459 | hdmi_default_entry = 7; | ||
460 | } else if (IS_HASWELL(dev_priv)) { | 492 | } else if (IS_HASWELL(dev_priv)) { |
461 | ddi_translations_fdi = hsw_ddi_translations_fdi; | 493 | ddi_translations_fdi = hsw_ddi_translations_fdi; |
462 | ddi_translations_dp = hsw_ddi_translations_dp; | 494 | ddi_translations_dp = hsw_ddi_translations_dp; |
@@ -464,7 +496,6 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder) | |||
464 | ddi_translations_hdmi = hsw_ddi_translations_hdmi; | 496 | ddi_translations_hdmi = hsw_ddi_translations_hdmi; |
465 | n_dp_entries = n_edp_entries = ARRAY_SIZE(hsw_ddi_translations_dp); | 497 | n_dp_entries = n_edp_entries = ARRAY_SIZE(hsw_ddi_translations_dp); |
466 | n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi); | 498 | n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi); |
467 | hdmi_default_entry = 6; | ||
468 | } else { | 499 | } else { |
469 | WARN(1, "ddi translation table missing\n"); | 500 | WARN(1, "ddi translation table missing\n"); |
470 | ddi_translations_edp = bdw_ddi_translations_dp; | 501 | ddi_translations_edp = bdw_ddi_translations_dp; |
@@ -474,7 +505,6 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder) | |||
474 | n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp); | 505 | n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp); |
475 | n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp); | 506 | n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp); |
476 | n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi); | 507 | n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi); |
477 | hdmi_default_entry = 7; | ||
478 | } | 508 | } |
479 | 509 | ||
480 | switch (encoder->type) { | 510 | switch (encoder->type) { |
@@ -505,11 +535,6 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder) | |||
505 | if (encoder->type != INTEL_OUTPUT_HDMI) | 535 | if (encoder->type != INTEL_OUTPUT_HDMI) |
506 | return; | 536 | return; |
507 | 537 | ||
508 | /* Choose a good default if VBT is badly populated */ | ||
509 | if (hdmi_level == HDMI_LEVEL_SHIFT_UNKNOWN || | ||
510 | hdmi_level >= n_hdmi_entries) | ||
511 | hdmi_level = hdmi_default_entry; | ||
512 | |||
513 | /* Entry 9 is for HDMI: */ | 538 | /* Entry 9 is for HDMI: */ |
514 | I915_WRITE(DDI_BUF_TRANS_LO(port, i), | 539 | I915_WRITE(DDI_BUF_TRANS_LO(port, i), |
515 | ddi_translations_hdmi[hdmi_level].trans1 | iboost_bit); | 540 | ddi_translations_hdmi[hdmi_level].trans1 | iboost_bit); |
@@ -1379,14 +1404,30 @@ void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc) | |||
1379 | TRANS_CLK_SEL_DISABLED); | 1404 | TRANS_CLK_SEL_DISABLED); |
1380 | } | 1405 | } |
1381 | 1406 | ||
1382 | static void skl_ddi_set_iboost(struct drm_i915_private *dev_priv, | 1407 | static void _skl_ddi_set_iboost(struct drm_i915_private *dev_priv, |
1383 | u32 level, enum port port, int type) | 1408 | enum port port, uint8_t iboost) |
1384 | { | 1409 | { |
1410 | u32 tmp; | ||
1411 | |||
1412 | tmp = I915_READ(DISPIO_CR_TX_BMU_CR0); | ||
1413 | tmp &= ~(BALANCE_LEG_MASK(port) | BALANCE_LEG_DISABLE(port)); | ||
1414 | if (iboost) | ||
1415 | tmp |= iboost << BALANCE_LEG_SHIFT(port); | ||
1416 | else | ||
1417 | tmp |= BALANCE_LEG_DISABLE(port); | ||
1418 | I915_WRITE(DISPIO_CR_TX_BMU_CR0, tmp); | ||
1419 | } | ||
1420 | |||
1421 | static void skl_ddi_set_iboost(struct intel_encoder *encoder, u32 level) | ||
1422 | { | ||
1423 | struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base); | ||
1424 | struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); | ||
1425 | enum port port = intel_dig_port->port; | ||
1426 | int type = encoder->type; | ||
1385 | const struct ddi_buf_trans *ddi_translations; | 1427 | const struct ddi_buf_trans *ddi_translations; |
1386 | uint8_t iboost; | 1428 | uint8_t iboost; |
1387 | uint8_t dp_iboost, hdmi_iboost; | 1429 | uint8_t dp_iboost, hdmi_iboost; |
1388 | int n_entries; | 1430 | int n_entries; |
1389 | u32 reg; | ||
1390 | 1431 | ||
1391 | /* VBT may override standard boost values */ | 1432 | /* VBT may override standard boost values */ |
1392 | dp_iboost = dev_priv->vbt.ddi_port_info[port].dp_boost_level; | 1433 | dp_iboost = dev_priv->vbt.ddi_port_info[port].dp_boost_level; |
@@ -1428,16 +1469,10 @@ static void skl_ddi_set_iboost(struct drm_i915_private *dev_priv, | |||
1428 | return; | 1469 | return; |
1429 | } | 1470 | } |
1430 | 1471 | ||
1431 | reg = I915_READ(DISPIO_CR_TX_BMU_CR0); | 1472 | _skl_ddi_set_iboost(dev_priv, port, iboost); |
1432 | reg &= ~BALANCE_LEG_MASK(port); | ||
1433 | reg &= ~(1 << (BALANCE_LEG_DISABLE_SHIFT + port)); | ||
1434 | |||
1435 | if (iboost) | ||
1436 | reg |= iboost << BALANCE_LEG_SHIFT(port); | ||
1437 | else | ||
1438 | reg |= 1 << (BALANCE_LEG_DISABLE_SHIFT + port); | ||
1439 | 1473 | ||
1440 | I915_WRITE(DISPIO_CR_TX_BMU_CR0, reg); | 1474 | if (port == PORT_A && intel_dig_port->max_lanes == 4) |
1475 | _skl_ddi_set_iboost(dev_priv, PORT_E, iboost); | ||
1441 | } | 1476 | } |
1442 | 1477 | ||
1443 | static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv, | 1478 | static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv, |
@@ -1568,7 +1603,7 @@ uint32_t ddi_signal_levels(struct intel_dp *intel_dp) | |||
1568 | level = translate_signal_level(signal_levels); | 1603 | level = translate_signal_level(signal_levels); |
1569 | 1604 | ||
1570 | if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) | 1605 | if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) |
1571 | skl_ddi_set_iboost(dev_priv, level, port, encoder->type); | 1606 | skl_ddi_set_iboost(encoder, level); |
1572 | else if (IS_BROXTON(dev_priv)) | 1607 | else if (IS_BROXTON(dev_priv)) |
1573 | bxt_ddi_vswing_sequence(dev_priv, level, port, encoder->type); | 1608 | bxt_ddi_vswing_sequence(dev_priv, level, port, encoder->type); |
1574 | 1609 | ||
@@ -1637,6 +1672,10 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder) | |||
1637 | intel_dp_stop_link_train(intel_dp); | 1672 | intel_dp_stop_link_train(intel_dp); |
1638 | } else if (type == INTEL_OUTPUT_HDMI) { | 1673 | } else if (type == INTEL_OUTPUT_HDMI) { |
1639 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); | 1674 | struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); |
1675 | int level = intel_ddi_hdmi_level(dev_priv, port); | ||
1676 | |||
1677 | if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) | ||
1678 | skl_ddi_set_iboost(intel_encoder, level); | ||
1640 | 1679 | ||
1641 | intel_hdmi->set_infoframes(encoder, | 1680 | intel_hdmi->set_infoframes(encoder, |
1642 | crtc->config->has_hdmi_sink, | 1681 | crtc->config->has_hdmi_sink, |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index dcf93b3d4fb6..2a751b6e0253 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -3093,40 +3093,110 @@ static void intel_update_primary_planes(struct drm_device *dev) | |||
3093 | 3093 | ||
3094 | for_each_crtc(dev, crtc) { | 3094 | for_each_crtc(dev, crtc) { |
3095 | struct intel_plane *plane = to_intel_plane(crtc->primary); | 3095 | struct intel_plane *plane = to_intel_plane(crtc->primary); |
3096 | struct intel_plane_state *plane_state; | 3096 | struct intel_plane_state *plane_state = |
3097 | 3097 | to_intel_plane_state(plane->base.state); | |
3098 | drm_modeset_lock_crtc(crtc, &plane->base); | ||
3099 | plane_state = to_intel_plane_state(plane->base.state); | ||
3100 | 3098 | ||
3101 | if (plane_state->visible) | 3099 | if (plane_state->visible) |
3102 | plane->update_plane(&plane->base, | 3100 | plane->update_plane(&plane->base, |
3103 | to_intel_crtc_state(crtc->state), | 3101 | to_intel_crtc_state(crtc->state), |
3104 | plane_state); | 3102 | plane_state); |
3103 | } | ||
3104 | } | ||
3105 | |||
3106 | static int | ||
3107 | __intel_display_resume(struct drm_device *dev, | ||
3108 | struct drm_atomic_state *state) | ||
3109 | { | ||
3110 | struct drm_crtc_state *crtc_state; | ||
3111 | struct drm_crtc *crtc; | ||
3112 | int i, ret; | ||
3113 | |||
3114 | intel_modeset_setup_hw_state(dev); | ||
3115 | i915_redisable_vga(dev); | ||
3105 | 3116 | ||
3106 | drm_modeset_unlock_crtc(crtc); | 3117 | if (!state) |
3118 | return 0; | ||
3119 | |||
3120 | for_each_crtc_in_state(state, crtc, crtc_state, i) { | ||
3121 | /* | ||
3122 | * Force recalculation even if we restore | ||
3123 | * current state. With fast modeset this may not result | ||
3124 | * in a modeset when the state is compatible. | ||
3125 | */ | ||
3126 | crtc_state->mode_changed = true; | ||
3107 | } | 3127 | } |
3128 | |||
3129 | /* ignore any reset values/BIOS leftovers in the WM registers */ | ||
3130 | to_intel_atomic_state(state)->skip_intermediate_wm = true; | ||
3131 | |||
3132 | ret = drm_atomic_commit(state); | ||
3133 | |||
3134 | WARN_ON(ret == -EDEADLK); | ||
3135 | return ret; | ||
3108 | } | 3136 | } |
3109 | 3137 | ||
3110 | void intel_prepare_reset(struct drm_i915_private *dev_priv) | 3138 | void intel_prepare_reset(struct drm_i915_private *dev_priv) |
3111 | { | 3139 | { |
3140 | struct drm_device *dev = &dev_priv->drm; | ||
3141 | struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx; | ||
3142 | struct drm_atomic_state *state; | ||
3143 | int ret; | ||
3144 | |||
3112 | /* no reset support for gen2 */ | 3145 | /* no reset support for gen2 */ |
3113 | if (IS_GEN2(dev_priv)) | 3146 | if (IS_GEN2(dev_priv)) |
3114 | return; | 3147 | return; |
3115 | 3148 | ||
3116 | /* reset doesn't touch the display */ | 3149 | /* |
3150 | * Need mode_config.mutex so that we don't | ||
3151 | * trample ongoing ->detect() and whatnot. | ||
3152 | */ | ||
3153 | mutex_lock(&dev->mode_config.mutex); | ||
3154 | drm_modeset_acquire_init(ctx, 0); | ||
3155 | while (1) { | ||
3156 | ret = drm_modeset_lock_all_ctx(dev, ctx); | ||
3157 | if (ret != -EDEADLK) | ||
3158 | break; | ||
3159 | |||
3160 | drm_modeset_backoff(ctx); | ||
3161 | } | ||
3162 | |||
3163 | /* reset doesn't touch the display, but flips might get nuked anyway, */ | ||
3117 | if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) | 3164 | if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) |
3118 | return; | 3165 | return; |
3119 | 3166 | ||
3120 | drm_modeset_lock_all(&dev_priv->drm); | ||
3121 | /* | 3167 | /* |
3122 | * Disabling the crtcs gracefully seems nicer. Also the | 3168 | * Disabling the crtcs gracefully seems nicer. Also the |
3123 | * g33 docs say we should at least disable all the planes. | 3169 | * g33 docs say we should at least disable all the planes. |
3124 | */ | 3170 | */ |
3125 | intel_display_suspend(&dev_priv->drm); | 3171 | state = drm_atomic_helper_duplicate_state(dev, ctx); |
3172 | if (IS_ERR(state)) { | ||
3173 | ret = PTR_ERR(state); | ||
3174 | state = NULL; | ||
3175 | DRM_ERROR("Duplicating state failed with %i\n", ret); | ||
3176 | goto err; | ||
3177 | } | ||
3178 | |||
3179 | ret = drm_atomic_helper_disable_all(dev, ctx); | ||
3180 | if (ret) { | ||
3181 | DRM_ERROR("Suspending crtc's failed with %i\n", ret); | ||
3182 | goto err; | ||
3183 | } | ||
3184 | |||
3185 | dev_priv->modeset_restore_state = state; | ||
3186 | state->acquire_ctx = ctx; | ||
3187 | return; | ||
3188 | |||
3189 | err: | ||
3190 | drm_atomic_state_free(state); | ||
3126 | } | 3191 | } |
3127 | 3192 | ||
3128 | void intel_finish_reset(struct drm_i915_private *dev_priv) | 3193 | void intel_finish_reset(struct drm_i915_private *dev_priv) |
3129 | { | 3194 | { |
3195 | struct drm_device *dev = &dev_priv->drm; | ||
3196 | struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx; | ||
3197 | struct drm_atomic_state *state = dev_priv->modeset_restore_state; | ||
3198 | int ret; | ||
3199 | |||
3130 | /* | 3200 | /* |
3131 | * Flips in the rings will be nuked by the reset, | 3201 | * Flips in the rings will be nuked by the reset, |
3132 | * so complete all pending flips so that user space | 3202 | * so complete all pending flips so that user space |
@@ -3138,6 +3208,8 @@ void intel_finish_reset(struct drm_i915_private *dev_priv) | |||
3138 | if (IS_GEN2(dev_priv)) | 3208 | if (IS_GEN2(dev_priv)) |
3139 | return; | 3209 | return; |
3140 | 3210 | ||
3211 | dev_priv->modeset_restore_state = NULL; | ||
3212 | |||
3141 | /* reset doesn't touch the display */ | 3213 | /* reset doesn't touch the display */ |
3142 | if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) { | 3214 | if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) { |
3143 | /* | 3215 | /* |
@@ -3149,29 +3221,32 @@ void intel_finish_reset(struct drm_i915_private *dev_priv) | |||
3149 | * FIXME: Atomic will make this obsolete since we won't schedule | 3221 | * FIXME: Atomic will make this obsolete since we won't schedule |
3150 | * CS-based flips (which might get lost in gpu resets) any more. | 3222 | * CS-based flips (which might get lost in gpu resets) any more. |
3151 | */ | 3223 | */ |
3152 | intel_update_primary_planes(&dev_priv->drm); | 3224 | intel_update_primary_planes(dev); |
3153 | return; | 3225 | } else { |
3154 | } | 3226 | /* |
3155 | 3227 | * The display has been reset as well, | |
3156 | /* | 3228 | * so need a full re-initialization. |
3157 | * The display has been reset as well, | 3229 | */ |
3158 | * so need a full re-initialization. | 3230 | intel_runtime_pm_disable_interrupts(dev_priv); |
3159 | */ | 3231 | intel_runtime_pm_enable_interrupts(dev_priv); |
3160 | intel_runtime_pm_disable_interrupts(dev_priv); | ||
3161 | intel_runtime_pm_enable_interrupts(dev_priv); | ||
3162 | 3232 | ||
3163 | intel_modeset_init_hw(&dev_priv->drm); | 3233 | intel_modeset_init_hw(dev); |
3164 | 3234 | ||
3165 | spin_lock_irq(&dev_priv->irq_lock); | 3235 | spin_lock_irq(&dev_priv->irq_lock); |
3166 | if (dev_priv->display.hpd_irq_setup) | 3236 | if (dev_priv->display.hpd_irq_setup) |
3167 | dev_priv->display.hpd_irq_setup(dev_priv); | 3237 | dev_priv->display.hpd_irq_setup(dev_priv); |
3168 | spin_unlock_irq(&dev_priv->irq_lock); | 3238 | spin_unlock_irq(&dev_priv->irq_lock); |
3169 | 3239 | ||
3170 | intel_display_resume(&dev_priv->drm); | 3240 | ret = __intel_display_resume(dev, state); |
3241 | if (ret) | ||
3242 | DRM_ERROR("Restoring old state failed with %i\n", ret); | ||
3171 | 3243 | ||
3172 | intel_hpd_init(dev_priv); | 3244 | intel_hpd_init(dev_priv); |
3245 | } | ||
3173 | 3246 | ||
3174 | drm_modeset_unlock_all(&dev_priv->drm); | 3247 | drm_modeset_drop_locks(ctx); |
3248 | drm_modeset_acquire_fini(ctx); | ||
3249 | mutex_unlock(&dev->mode_config.mutex); | ||
3175 | } | 3250 | } |
3176 | 3251 | ||
3177 | static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) | 3252 | static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) |
@@ -16156,9 +16231,10 @@ void intel_display_resume(struct drm_device *dev) | |||
16156 | struct drm_atomic_state *state = dev_priv->modeset_restore_state; | 16231 | struct drm_atomic_state *state = dev_priv->modeset_restore_state; |
16157 | struct drm_modeset_acquire_ctx ctx; | 16232 | struct drm_modeset_acquire_ctx ctx; |
16158 | int ret; | 16233 | int ret; |
16159 | bool setup = false; | ||
16160 | 16234 | ||
16161 | dev_priv->modeset_restore_state = NULL; | 16235 | dev_priv->modeset_restore_state = NULL; |
16236 | if (state) | ||
16237 | state->acquire_ctx = &ctx; | ||
16162 | 16238 | ||
16163 | /* | 16239 | /* |
16164 | * This is a cludge because with real atomic modeset mode_config.mutex | 16240 | * This is a cludge because with real atomic modeset mode_config.mutex |
@@ -16169,43 +16245,17 @@ void intel_display_resume(struct drm_device *dev) | |||
16169 | mutex_lock(&dev->mode_config.mutex); | 16245 | mutex_lock(&dev->mode_config.mutex); |
16170 | drm_modeset_acquire_init(&ctx, 0); | 16246 | drm_modeset_acquire_init(&ctx, 0); |
16171 | 16247 | ||
16172 | retry: | 16248 | while (1) { |
16173 | ret = drm_modeset_lock_all_ctx(dev, &ctx); | 16249 | ret = drm_modeset_lock_all_ctx(dev, &ctx); |
16174 | 16250 | if (ret != -EDEADLK) | |
16175 | if (ret == 0 && !setup) { | 16251 | break; |
16176 | setup = true; | ||
16177 | |||
16178 | intel_modeset_setup_hw_state(dev); | ||
16179 | i915_redisable_vga(dev); | ||
16180 | } | ||
16181 | |||
16182 | if (ret == 0 && state) { | ||
16183 | struct drm_crtc_state *crtc_state; | ||
16184 | struct drm_crtc *crtc; | ||
16185 | int i; | ||
16186 | |||
16187 | state->acquire_ctx = &ctx; | ||
16188 | |||
16189 | /* ignore any reset values/BIOS leftovers in the WM registers */ | ||
16190 | to_intel_atomic_state(state)->skip_intermediate_wm = true; | ||
16191 | |||
16192 | for_each_crtc_in_state(state, crtc, crtc_state, i) { | ||
16193 | /* | ||
16194 | * Force recalculation even if we restore | ||
16195 | * current state. With fast modeset this may not result | ||
16196 | * in a modeset when the state is compatible. | ||
16197 | */ | ||
16198 | crtc_state->mode_changed = true; | ||
16199 | } | ||
16200 | |||
16201 | ret = drm_atomic_commit(state); | ||
16202 | } | ||
16203 | 16252 | ||
16204 | if (ret == -EDEADLK) { | ||
16205 | drm_modeset_backoff(&ctx); | 16253 | drm_modeset_backoff(&ctx); |
16206 | goto retry; | ||
16207 | } | 16254 | } |
16208 | 16255 | ||
16256 | if (!ret) | ||
16257 | ret = __intel_display_resume(dev, state); | ||
16258 | |||
16209 | drm_modeset_drop_locks(&ctx); | 16259 | drm_modeset_drop_locks(&ctx); |
16210 | drm_modeset_acquire_fini(&ctx); | 16260 | drm_modeset_acquire_fini(&ctx); |
16211 | mutex_unlock(&dev->mode_config.mutex); | 16261 | mutex_unlock(&dev->mode_config.mutex); |
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c index 6a7ad3ed1463..3836a1c79714 100644 --- a/drivers/gpu/drm/i915/intel_fbc.c +++ b/drivers/gpu/drm/i915/intel_fbc.c | |||
@@ -1230,12 +1230,29 @@ static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv) | |||
1230 | if (i915.enable_fbc >= 0) | 1230 | if (i915.enable_fbc >= 0) |
1231 | return !!i915.enable_fbc; | 1231 | return !!i915.enable_fbc; |
1232 | 1232 | ||
1233 | if (!HAS_FBC(dev_priv)) | ||
1234 | return 0; | ||
1235 | |||
1233 | if (IS_BROADWELL(dev_priv)) | 1236 | if (IS_BROADWELL(dev_priv)) |
1234 | return 1; | 1237 | return 1; |
1235 | 1238 | ||
1236 | return 0; | 1239 | return 0; |
1237 | } | 1240 | } |
1238 | 1241 | ||
1242 | static bool need_fbc_vtd_wa(struct drm_i915_private *dev_priv) | ||
1243 | { | ||
1244 | #ifdef CONFIG_INTEL_IOMMU | ||
1245 | /* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */ | ||
1246 | if (intel_iommu_gfx_mapped && | ||
1247 | (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))) { | ||
1248 | DRM_INFO("Disabling framebuffer compression (FBC) to prevent screen flicker with VT-d enabled\n"); | ||
1249 | return true; | ||
1250 | } | ||
1251 | #endif | ||
1252 | |||
1253 | return false; | ||
1254 | } | ||
1255 | |||
1239 | /** | 1256 | /** |
1240 | * intel_fbc_init - Initialize FBC | 1257 | * intel_fbc_init - Initialize FBC |
1241 | * @dev_priv: the i915 device | 1258 | * @dev_priv: the i915 device |
@@ -1253,6 +1270,9 @@ void intel_fbc_init(struct drm_i915_private *dev_priv) | |||
1253 | fbc->active = false; | 1270 | fbc->active = false; |
1254 | fbc->work.scheduled = false; | 1271 | fbc->work.scheduled = false; |
1255 | 1272 | ||
1273 | if (need_fbc_vtd_wa(dev_priv)) | ||
1274 | mkwrite_device_info(dev_priv)->has_fbc = false; | ||
1275 | |||
1256 | i915.enable_fbc = intel_sanitize_fbc_option(dev_priv); | 1276 | i915.enable_fbc = intel_sanitize_fbc_option(dev_priv); |
1257 | DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n", i915.enable_fbc); | 1277 | DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n", i915.enable_fbc); |
1258 | 1278 | ||
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 97ba6c8cf907..d5deb58a2128 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
@@ -3344,6 +3344,8 @@ static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal, | |||
3344 | plane_bytes_per_line *= 4; | 3344 | plane_bytes_per_line *= 4; |
3345 | plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512); | 3345 | plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512); |
3346 | plane_blocks_per_line /= 4; | 3346 | plane_blocks_per_line /= 4; |
3347 | } else if (tiling == DRM_FORMAT_MOD_NONE) { | ||
3348 | plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512) + 1; | ||
3347 | } else { | 3349 | } else { |
3348 | plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512); | 3350 | plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512); |
3349 | } | 3351 | } |
@@ -6574,9 +6576,7 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv) | |||
6574 | 6576 | ||
6575 | void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv) | 6577 | void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv) |
6576 | { | 6578 | { |
6577 | if (IS_CHERRYVIEW(dev_priv)) | 6579 | if (IS_VALLEYVIEW(dev_priv)) |
6578 | return; | ||
6579 | else if (IS_VALLEYVIEW(dev_priv)) | ||
6580 | valleyview_cleanup_gt_powersave(dev_priv); | 6580 | valleyview_cleanup_gt_powersave(dev_priv); |
6581 | 6581 | ||
6582 | if (!i915.enable_rc6) | 6582 | if (!i915.enable_rc6) |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index cca7792f26d5..1d3161bbea24 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -1178,8 +1178,8 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine) | |||
1178 | I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) | | 1178 | I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) | |
1179 | L3_HIGH_PRIO_CREDITS(2)); | 1179 | L3_HIGH_PRIO_CREDITS(2)); |
1180 | 1180 | ||
1181 | /* WaInsertDummyPushConstPs:bxt */ | 1181 | /* WaToEnableHwFixForPushConstHWBug:bxt */ |
1182 | if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) | 1182 | if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER)) |
1183 | WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, | 1183 | WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, |
1184 | GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); | 1184 | GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); |
1185 | 1185 | ||
@@ -1222,8 +1222,8 @@ static int kbl_init_workarounds(struct intel_engine_cs *engine) | |||
1222 | I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) | | 1222 | I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) | |
1223 | GEN8_LQSC_RO_PERF_DIS); | 1223 | GEN8_LQSC_RO_PERF_DIS); |
1224 | 1224 | ||
1225 | /* WaInsertDummyPushConstPs:kbl */ | 1225 | /* WaToEnableHwFixForPushConstHWBug:kbl */ |
1226 | if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0)) | 1226 | if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER)) |
1227 | WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, | 1227 | WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, |
1228 | GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); | 1228 | GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); |
1229 | 1229 | ||
diff --git a/drivers/gpu/drm/mediatek/Kconfig b/drivers/gpu/drm/mediatek/Kconfig index 23ac8041c562..294de4549922 100644 --- a/drivers/gpu/drm/mediatek/Kconfig +++ b/drivers/gpu/drm/mediatek/Kconfig | |||
@@ -2,6 +2,9 @@ config DRM_MEDIATEK | |||
2 | tristate "DRM Support for Mediatek SoCs" | 2 | tristate "DRM Support for Mediatek SoCs" |
3 | depends on DRM | 3 | depends on DRM |
4 | depends on ARCH_MEDIATEK || (ARM && COMPILE_TEST) | 4 | depends on ARCH_MEDIATEK || (ARM && COMPILE_TEST) |
5 | depends on COMMON_CLK | ||
6 | depends on HAVE_ARM_SMCCC | ||
7 | depends on OF | ||
5 | select DRM_GEM_CMA_HELPER | 8 | select DRM_GEM_CMA_HELPER |
6 | select DRM_KMS_HELPER | 9 | select DRM_KMS_HELPER |
7 | select DRM_MIPI_DSI | 10 | select DRM_MIPI_DSI |
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c index 6de342861202..ddef0d494084 100644 --- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c +++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c | |||
@@ -198,16 +198,7 @@ static int radeon_atpx_validate(struct radeon_atpx *atpx) | |||
198 | atpx->is_hybrid = false; | 198 | atpx->is_hybrid = false; |
199 | if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) { | 199 | if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) { |
200 | printk("ATPX Hybrid Graphics\n"); | 200 | printk("ATPX Hybrid Graphics\n"); |
201 | #if 1 | ||
202 | /* This is a temporary hack until the D3 cold support | ||
203 | * makes it upstream. The ATPX power_control method seems | ||
204 | * to still work on even if the system should be using | ||
205 | * the new standardized hybrid D3 cold ACPI interface. | ||
206 | */ | ||
207 | atpx->functions.power_cntl = true; | ||
208 | #else | ||
209 | atpx->functions.power_cntl = false; | 201 | atpx->functions.power_cntl = false; |
210 | #endif | ||
211 | atpx->is_hybrid = true; | 202 | atpx->is_hybrid = true; |
212 | } | 203 | } |
213 | 204 | ||
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c index 730d84028260..d0203a115eff 100644 --- a/drivers/hwmon/it87.c +++ b/drivers/hwmon/it87.c | |||
@@ -491,7 +491,7 @@ struct it87_sio_data { | |||
491 | struct it87_data { | 491 | struct it87_data { |
492 | const struct attribute_group *groups[7]; | 492 | const struct attribute_group *groups[7]; |
493 | enum chips type; | 493 | enum chips type; |
494 | u16 features; | 494 | u32 features; |
495 | u8 peci_mask; | 495 | u8 peci_mask; |
496 | u8 old_peci_mask; | 496 | u8 old_peci_mask; |
497 | 497 | ||
diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c index f23372669f77..1bb97f658b47 100644 --- a/drivers/i2c/busses/i2c-at91.c +++ b/drivers/i2c/busses/i2c-at91.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #define AT91_I2C_TIMEOUT msecs_to_jiffies(100) /* transfer timeout */ | 38 | #define AT91_I2C_TIMEOUT msecs_to_jiffies(100) /* transfer timeout */ |
39 | #define AT91_I2C_DMA_THRESHOLD 8 /* enable DMA if transfer size is bigger than this threshold */ | 39 | #define AT91_I2C_DMA_THRESHOLD 8 /* enable DMA if transfer size is bigger than this threshold */ |
40 | #define AUTOSUSPEND_TIMEOUT 2000 | 40 | #define AUTOSUSPEND_TIMEOUT 2000 |
41 | #define AT91_I2C_MAX_ALT_CMD_DATA_SIZE 256 | ||
41 | 42 | ||
42 | /* AT91 TWI register definitions */ | 43 | /* AT91 TWI register definitions */ |
43 | #define AT91_TWI_CR 0x0000 /* Control Register */ | 44 | #define AT91_TWI_CR 0x0000 /* Control Register */ |
@@ -141,6 +142,7 @@ struct at91_twi_dev { | |||
141 | unsigned twi_cwgr_reg; | 142 | unsigned twi_cwgr_reg; |
142 | struct at91_twi_pdata *pdata; | 143 | struct at91_twi_pdata *pdata; |
143 | bool use_dma; | 144 | bool use_dma; |
145 | bool use_alt_cmd; | ||
144 | bool recv_len_abort; | 146 | bool recv_len_abort; |
145 | u32 fifo_size; | 147 | u32 fifo_size; |
146 | struct at91_twi_dma dma; | 148 | struct at91_twi_dma dma; |
@@ -269,7 +271,7 @@ static void at91_twi_write_next_byte(struct at91_twi_dev *dev) | |||
269 | 271 | ||
270 | /* send stop when last byte has been written */ | 272 | /* send stop when last byte has been written */ |
271 | if (--dev->buf_len == 0) | 273 | if (--dev->buf_len == 0) |
272 | if (!dev->pdata->has_alt_cmd) | 274 | if (!dev->use_alt_cmd) |
273 | at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP); | 275 | at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP); |
274 | 276 | ||
275 | dev_dbg(dev->dev, "wrote 0x%x, to go %d\n", *dev->buf, dev->buf_len); | 277 | dev_dbg(dev->dev, "wrote 0x%x, to go %d\n", *dev->buf, dev->buf_len); |
@@ -292,7 +294,7 @@ static void at91_twi_write_data_dma_callback(void *data) | |||
292 | * we just have to enable TXCOMP one. | 294 | * we just have to enable TXCOMP one. |
293 | */ | 295 | */ |
294 | at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP); | 296 | at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP); |
295 | if (!dev->pdata->has_alt_cmd) | 297 | if (!dev->use_alt_cmd) |
296 | at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP); | 298 | at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP); |
297 | } | 299 | } |
298 | 300 | ||
@@ -410,7 +412,7 @@ static void at91_twi_read_next_byte(struct at91_twi_dev *dev) | |||
410 | } | 412 | } |
411 | 413 | ||
412 | /* send stop if second but last byte has been read */ | 414 | /* send stop if second but last byte has been read */ |
413 | if (!dev->pdata->has_alt_cmd && dev->buf_len == 1) | 415 | if (!dev->use_alt_cmd && dev->buf_len == 1) |
414 | at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP); | 416 | at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP); |
415 | 417 | ||
416 | dev_dbg(dev->dev, "read 0x%x, to go %d\n", *dev->buf, dev->buf_len); | 418 | dev_dbg(dev->dev, "read 0x%x, to go %d\n", *dev->buf, dev->buf_len); |
@@ -426,7 +428,7 @@ static void at91_twi_read_data_dma_callback(void *data) | |||
426 | dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg[0]), | 428 | dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg[0]), |
427 | dev->buf_len, DMA_FROM_DEVICE); | 429 | dev->buf_len, DMA_FROM_DEVICE); |
428 | 430 | ||
429 | if (!dev->pdata->has_alt_cmd) { | 431 | if (!dev->use_alt_cmd) { |
430 | /* The last two bytes have to be read without using dma */ | 432 | /* The last two bytes have to be read without using dma */ |
431 | dev->buf += dev->buf_len - 2; | 433 | dev->buf += dev->buf_len - 2; |
432 | dev->buf_len = 2; | 434 | dev->buf_len = 2; |
@@ -443,7 +445,7 @@ static void at91_twi_read_data_dma(struct at91_twi_dev *dev) | |||
443 | struct dma_chan *chan_rx = dma->chan_rx; | 445 | struct dma_chan *chan_rx = dma->chan_rx; |
444 | size_t buf_len; | 446 | size_t buf_len; |
445 | 447 | ||
446 | buf_len = (dev->pdata->has_alt_cmd) ? dev->buf_len : dev->buf_len - 2; | 448 | buf_len = (dev->use_alt_cmd) ? dev->buf_len : dev->buf_len - 2; |
447 | dma->direction = DMA_FROM_DEVICE; | 449 | dma->direction = DMA_FROM_DEVICE; |
448 | 450 | ||
449 | /* Keep in mind that we won't use dma to read the last two bytes */ | 451 | /* Keep in mind that we won't use dma to read the last two bytes */ |
@@ -651,7 +653,7 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev) | |||
651 | unsigned start_flags = AT91_TWI_START; | 653 | unsigned start_flags = AT91_TWI_START; |
652 | 654 | ||
653 | /* if only one byte is to be read, immediately stop transfer */ | 655 | /* if only one byte is to be read, immediately stop transfer */ |
654 | if (!has_alt_cmd && dev->buf_len <= 1 && | 656 | if (!dev->use_alt_cmd && dev->buf_len <= 1 && |
655 | !(dev->msg->flags & I2C_M_RECV_LEN)) | 657 | !(dev->msg->flags & I2C_M_RECV_LEN)) |
656 | start_flags |= AT91_TWI_STOP; | 658 | start_flags |= AT91_TWI_STOP; |
657 | at91_twi_write(dev, AT91_TWI_CR, start_flags); | 659 | at91_twi_write(dev, AT91_TWI_CR, start_flags); |
@@ -745,7 +747,7 @@ static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num) | |||
745 | int ret; | 747 | int ret; |
746 | unsigned int_addr_flag = 0; | 748 | unsigned int_addr_flag = 0; |
747 | struct i2c_msg *m_start = msg; | 749 | struct i2c_msg *m_start = msg; |
748 | bool is_read, use_alt_cmd = false; | 750 | bool is_read; |
749 | 751 | ||
750 | dev_dbg(&adap->dev, "at91_xfer: processing %d messages:\n", num); | 752 | dev_dbg(&adap->dev, "at91_xfer: processing %d messages:\n", num); |
751 | 753 | ||
@@ -768,14 +770,16 @@ static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num) | |||
768 | at91_twi_write(dev, AT91_TWI_IADR, internal_address); | 770 | at91_twi_write(dev, AT91_TWI_IADR, internal_address); |
769 | } | 771 | } |
770 | 772 | ||
773 | dev->use_alt_cmd = false; | ||
771 | is_read = (m_start->flags & I2C_M_RD); | 774 | is_read = (m_start->flags & I2C_M_RD); |
772 | if (dev->pdata->has_alt_cmd) { | 775 | if (dev->pdata->has_alt_cmd) { |
773 | if (m_start->len > 0) { | 776 | if (m_start->len > 0 && |
777 | m_start->len < AT91_I2C_MAX_ALT_CMD_DATA_SIZE) { | ||
774 | at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_ACMEN); | 778 | at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_ACMEN); |
775 | at91_twi_write(dev, AT91_TWI_ACR, | 779 | at91_twi_write(dev, AT91_TWI_ACR, |
776 | AT91_TWI_ACR_DATAL(m_start->len) | | 780 | AT91_TWI_ACR_DATAL(m_start->len) | |
777 | ((is_read) ? AT91_TWI_ACR_DIR : 0)); | 781 | ((is_read) ? AT91_TWI_ACR_DIR : 0)); |
778 | use_alt_cmd = true; | 782 | dev->use_alt_cmd = true; |
779 | } else { | 783 | } else { |
780 | at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_ACMDIS); | 784 | at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_ACMDIS); |
781 | } | 785 | } |
@@ -784,7 +788,7 @@ static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num) | |||
784 | at91_twi_write(dev, AT91_TWI_MMR, | 788 | at91_twi_write(dev, AT91_TWI_MMR, |
785 | (m_start->addr << 16) | | 789 | (m_start->addr << 16) | |
786 | int_addr_flag | | 790 | int_addr_flag | |
787 | ((!use_alt_cmd && is_read) ? AT91_TWI_MREAD : 0)); | 791 | ((!dev->use_alt_cmd && is_read) ? AT91_TWI_MREAD : 0)); |
788 | 792 | ||
789 | dev->buf_len = m_start->len; | 793 | dev->buf_len = m_start->len; |
790 | dev->buf = m_start->buf; | 794 | dev->buf = m_start->buf; |
diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c index 19c843828fe2..95f7cac76f89 100644 --- a/drivers/i2c/busses/i2c-bcm-iproc.c +++ b/drivers/i2c/busses/i2c-bcm-iproc.c | |||
@@ -158,7 +158,7 @@ static irqreturn_t bcm_iproc_i2c_isr(int irq, void *data) | |||
158 | 158 | ||
159 | if (status & BIT(IS_M_START_BUSY_SHIFT)) { | 159 | if (status & BIT(IS_M_START_BUSY_SHIFT)) { |
160 | iproc_i2c->xfer_is_done = 1; | 160 | iproc_i2c->xfer_is_done = 1; |
161 | complete_all(&iproc_i2c->done); | 161 | complete(&iproc_i2c->done); |
162 | } | 162 | } |
163 | 163 | ||
164 | writel(status, iproc_i2c->base + IS_OFFSET); | 164 | writel(status, iproc_i2c->base + IS_OFFSET); |
diff --git a/drivers/i2c/busses/i2c-bcm-kona.c b/drivers/i2c/busses/i2c-bcm-kona.c index ac9f47679c3a..f98743277e3c 100644 --- a/drivers/i2c/busses/i2c-bcm-kona.c +++ b/drivers/i2c/busses/i2c-bcm-kona.c | |||
@@ -229,7 +229,7 @@ static irqreturn_t bcm_kona_i2c_isr(int irq, void *devid) | |||
229 | dev->base + TXFCR_OFFSET); | 229 | dev->base + TXFCR_OFFSET); |
230 | 230 | ||
231 | writel(status & ~ISR_RESERVED_MASK, dev->base + ISR_OFFSET); | 231 | writel(status & ~ISR_RESERVED_MASK, dev->base + ISR_OFFSET); |
232 | complete_all(&dev->done); | 232 | complete(&dev->done); |
233 | 233 | ||
234 | return IRQ_HANDLED; | 234 | return IRQ_HANDLED; |
235 | } | 235 | } |
diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c index 3f5a4d71d3bf..385b57bfcb38 100644 --- a/drivers/i2c/busses/i2c-brcmstb.c +++ b/drivers/i2c/busses/i2c-brcmstb.c | |||
@@ -228,7 +228,7 @@ static irqreturn_t brcmstb_i2c_isr(int irq, void *devid) | |||
228 | return IRQ_NONE; | 228 | return IRQ_NONE; |
229 | 229 | ||
230 | brcmstb_i2c_enable_disable_irq(dev, INT_DISABLE); | 230 | brcmstb_i2c_enable_disable_irq(dev, INT_DISABLE); |
231 | complete_all(&dev->done); | 231 | complete(&dev->done); |
232 | 232 | ||
233 | dev_dbg(dev->device, "isr handled"); | 233 | dev_dbg(dev->device, "isr handled"); |
234 | return IRQ_HANDLED; | 234 | return IRQ_HANDLED; |
diff --git a/drivers/i2c/busses/i2c-cros-ec-tunnel.c b/drivers/i2c/busses/i2c-cros-ec-tunnel.c index a0d95ff682ae..2d5ff86398d0 100644 --- a/drivers/i2c/busses/i2c-cros-ec-tunnel.c +++ b/drivers/i2c/busses/i2c-cros-ec-tunnel.c | |||
@@ -215,7 +215,7 @@ static int ec_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg i2c_msgs[], | |||
215 | msg->outsize = request_len; | 215 | msg->outsize = request_len; |
216 | msg->insize = response_len; | 216 | msg->insize = response_len; |
217 | 217 | ||
218 | result = cros_ec_cmd_xfer(bus->ec, msg); | 218 | result = cros_ec_cmd_xfer_status(bus->ec, msg); |
219 | if (result < 0) { | 219 | if (result < 0) { |
220 | dev_err(dev, "Error transferring EC i2c message %d\n", result); | 220 | dev_err(dev, "Error transferring EC i2c message %d\n", result); |
221 | goto exit; | 221 | goto exit; |
diff --git a/drivers/i2c/busses/i2c-meson.c b/drivers/i2c/busses/i2c-meson.c index 71d3929adf54..76e28980904f 100644 --- a/drivers/i2c/busses/i2c-meson.c +++ b/drivers/i2c/busses/i2c-meson.c | |||
@@ -211,7 +211,7 @@ static void meson_i2c_stop(struct meson_i2c *i2c) | |||
211 | meson_i2c_add_token(i2c, TOKEN_STOP); | 211 | meson_i2c_add_token(i2c, TOKEN_STOP); |
212 | } else { | 212 | } else { |
213 | i2c->state = STATE_IDLE; | 213 | i2c->state = STATE_IDLE; |
214 | complete_all(&i2c->done); | 214 | complete(&i2c->done); |
215 | } | 215 | } |
216 | } | 216 | } |
217 | 217 | ||
@@ -238,7 +238,7 @@ static irqreturn_t meson_i2c_irq(int irqno, void *dev_id) | |||
238 | dev_dbg(i2c->dev, "error bit set\n"); | 238 | dev_dbg(i2c->dev, "error bit set\n"); |
239 | i2c->error = -ENXIO; | 239 | i2c->error = -ENXIO; |
240 | i2c->state = STATE_IDLE; | 240 | i2c->state = STATE_IDLE; |
241 | complete_all(&i2c->done); | 241 | complete(&i2c->done); |
242 | goto out; | 242 | goto out; |
243 | } | 243 | } |
244 | 244 | ||
@@ -269,7 +269,7 @@ static irqreturn_t meson_i2c_irq(int irqno, void *dev_id) | |||
269 | break; | 269 | break; |
270 | case STATE_STOP: | 270 | case STATE_STOP: |
271 | i2c->state = STATE_IDLE; | 271 | i2c->state = STATE_IDLE; |
272 | complete_all(&i2c->done); | 272 | complete(&i2c->done); |
273 | break; | 273 | break; |
274 | case STATE_IDLE: | 274 | case STATE_IDLE: |
275 | break; | 275 | break; |
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c index dfa7a4b4a91d..ac88a524143e 100644 --- a/drivers/i2c/busses/i2c-ocores.c +++ b/drivers/i2c/busses/i2c-ocores.c | |||
@@ -379,6 +379,7 @@ static int ocores_i2c_of_probe(struct platform_device *pdev, | |||
379 | if (!clock_frequency_present) { | 379 | if (!clock_frequency_present) { |
380 | dev_err(&pdev->dev, | 380 | dev_err(&pdev->dev, |
381 | "Missing required parameter 'opencores,ip-clock-frequency'\n"); | 381 | "Missing required parameter 'opencores,ip-clock-frequency'\n"); |
382 | clk_disable_unprepare(i2c->clk); | ||
382 | return -ENODEV; | 383 | return -ENODEV; |
383 | } | 384 | } |
384 | i2c->ip_clock_khz = clock_frequency / 1000; | 385 | i2c->ip_clock_khz = clock_frequency / 1000; |
@@ -467,20 +468,21 @@ static int ocores_i2c_probe(struct platform_device *pdev) | |||
467 | default: | 468 | default: |
468 | dev_err(&pdev->dev, "Unsupported I/O width (%d)\n", | 469 | dev_err(&pdev->dev, "Unsupported I/O width (%d)\n", |
469 | i2c->reg_io_width); | 470 | i2c->reg_io_width); |
470 | return -EINVAL; | 471 | ret = -EINVAL; |
472 | goto err_clk; | ||
471 | } | 473 | } |
472 | } | 474 | } |
473 | 475 | ||
474 | ret = ocores_init(&pdev->dev, i2c); | 476 | ret = ocores_init(&pdev->dev, i2c); |
475 | if (ret) | 477 | if (ret) |
476 | return ret; | 478 | goto err_clk; |
477 | 479 | ||
478 | init_waitqueue_head(&i2c->wait); | 480 | init_waitqueue_head(&i2c->wait); |
479 | ret = devm_request_irq(&pdev->dev, irq, ocores_isr, 0, | 481 | ret = devm_request_irq(&pdev->dev, irq, ocores_isr, 0, |
480 | pdev->name, i2c); | 482 | pdev->name, i2c); |
481 | if (ret) { | 483 | if (ret) { |
482 | dev_err(&pdev->dev, "Cannot claim IRQ\n"); | 484 | dev_err(&pdev->dev, "Cannot claim IRQ\n"); |
483 | return ret; | 485 | goto err_clk; |
484 | } | 486 | } |
485 | 487 | ||
486 | /* hook up driver to tree */ | 488 | /* hook up driver to tree */ |
@@ -494,7 +496,7 @@ static int ocores_i2c_probe(struct platform_device *pdev) | |||
494 | ret = i2c_add_adapter(&i2c->adap); | 496 | ret = i2c_add_adapter(&i2c->adap); |
495 | if (ret) { | 497 | if (ret) { |
496 | dev_err(&pdev->dev, "Failed to add adapter\n"); | 498 | dev_err(&pdev->dev, "Failed to add adapter\n"); |
497 | return ret; | 499 | goto err_clk; |
498 | } | 500 | } |
499 | 501 | ||
500 | /* add in known devices to the bus */ | 502 | /* add in known devices to the bus */ |
@@ -504,6 +506,10 @@ static int ocores_i2c_probe(struct platform_device *pdev) | |||
504 | } | 506 | } |
505 | 507 | ||
506 | return 0; | 508 | return 0; |
509 | |||
510 | err_clk: | ||
511 | clk_disable_unprepare(i2c->clk); | ||
512 | return ret; | ||
507 | } | 513 | } |
508 | 514 | ||
509 | static int ocores_i2c_remove(struct platform_device *pdev) | 515 | static int ocores_i2c_remove(struct platform_device *pdev) |
diff --git a/drivers/i2c/muxes/i2c-demux-pinctrl.c b/drivers/i2c/muxes/i2c-demux-pinctrl.c index 8de073aed001..215ac87f606d 100644 --- a/drivers/i2c/muxes/i2c-demux-pinctrl.c +++ b/drivers/i2c/muxes/i2c-demux-pinctrl.c | |||
@@ -68,7 +68,7 @@ static int i2c_demux_activate_master(struct i2c_demux_pinctrl_priv *priv, u32 ne | |||
68 | adap = of_find_i2c_adapter_by_node(priv->chan[new_chan].parent_np); | 68 | adap = of_find_i2c_adapter_by_node(priv->chan[new_chan].parent_np); |
69 | if (!adap) { | 69 | if (!adap) { |
70 | ret = -ENODEV; | 70 | ret = -ENODEV; |
71 | goto err; | 71 | goto err_with_revert; |
72 | } | 72 | } |
73 | 73 | ||
74 | p = devm_pinctrl_get_select(adap->dev.parent, priv->bus_name); | 74 | p = devm_pinctrl_get_select(adap->dev.parent, priv->bus_name); |
@@ -103,6 +103,8 @@ static int i2c_demux_activate_master(struct i2c_demux_pinctrl_priv *priv, u32 ne | |||
103 | 103 | ||
104 | err_with_put: | 104 | err_with_put: |
105 | i2c_put_adapter(adap); | 105 | i2c_put_adapter(adap); |
106 | err_with_revert: | ||
107 | of_changeset_revert(&priv->chan[new_chan].chgset); | ||
106 | err: | 108 | err: |
107 | dev_err(priv->dev, "failed to setup demux-adapter %d (%d)\n", new_chan, ret); | 109 | dev_err(priv->dev, "failed to setup demux-adapter %d (%d)\n", new_chan, ret); |
108 | return ret; | 110 | return ret; |
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 08a1e2f3690f..00c8a08d56e7 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c | |||
@@ -68,7 +68,8 @@ void iommu_put_dma_cookie(struct iommu_domain *domain) | |||
68 | if (!iovad) | 68 | if (!iovad) |
69 | return; | 69 | return; |
70 | 70 | ||
71 | put_iova_domain(iovad); | 71 | if (iovad->granule) |
72 | put_iova_domain(iovad); | ||
72 | kfree(iovad); | 73 | kfree(iovad); |
73 | domain->iova_cookie = NULL; | 74 | domain->iova_cookie = NULL; |
74 | } | 75 | } |
@@ -151,12 +152,15 @@ int dma_direction_to_prot(enum dma_data_direction dir, bool coherent) | |||
151 | } | 152 | } |
152 | } | 153 | } |
153 | 154 | ||
154 | static struct iova *__alloc_iova(struct iova_domain *iovad, size_t size, | 155 | static struct iova *__alloc_iova(struct iommu_domain *domain, size_t size, |
155 | dma_addr_t dma_limit) | 156 | dma_addr_t dma_limit) |
156 | { | 157 | { |
158 | struct iova_domain *iovad = domain->iova_cookie; | ||
157 | unsigned long shift = iova_shift(iovad); | 159 | unsigned long shift = iova_shift(iovad); |
158 | unsigned long length = iova_align(iovad, size) >> shift; | 160 | unsigned long length = iova_align(iovad, size) >> shift; |
159 | 161 | ||
162 | if (domain->geometry.force_aperture) | ||
163 | dma_limit = min(dma_limit, domain->geometry.aperture_end); | ||
160 | /* | 164 | /* |
161 | * Enforce size-alignment to be safe - there could perhaps be an | 165 | * Enforce size-alignment to be safe - there could perhaps be an |
162 | * attribute to control this per-device, or at least per-domain... | 166 | * attribute to control this per-device, or at least per-domain... |
@@ -314,7 +318,7 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp, | |||
314 | if (!pages) | 318 | if (!pages) |
315 | return NULL; | 319 | return NULL; |
316 | 320 | ||
317 | iova = __alloc_iova(iovad, size, dev->coherent_dma_mask); | 321 | iova = __alloc_iova(domain, size, dev->coherent_dma_mask); |
318 | if (!iova) | 322 | if (!iova) |
319 | goto out_free_pages; | 323 | goto out_free_pages; |
320 | 324 | ||
@@ -386,7 +390,7 @@ dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, | |||
386 | phys_addr_t phys = page_to_phys(page) + offset; | 390 | phys_addr_t phys = page_to_phys(page) + offset; |
387 | size_t iova_off = iova_offset(iovad, phys); | 391 | size_t iova_off = iova_offset(iovad, phys); |
388 | size_t len = iova_align(iovad, size + iova_off); | 392 | size_t len = iova_align(iovad, size + iova_off); |
389 | struct iova *iova = __alloc_iova(iovad, len, dma_get_mask(dev)); | 393 | struct iova *iova = __alloc_iova(domain, len, dma_get_mask(dev)); |
390 | 394 | ||
391 | if (!iova) | 395 | if (!iova) |
392 | return DMA_ERROR_CODE; | 396 | return DMA_ERROR_CODE; |
@@ -538,7 +542,7 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, | |||
538 | prev = s; | 542 | prev = s; |
539 | } | 543 | } |
540 | 544 | ||
541 | iova = __alloc_iova(iovad, iova_len, dma_get_mask(dev)); | 545 | iova = __alloc_iova(domain, iova_len, dma_get_mask(dev)); |
542 | if (!iova) | 546 | if (!iova) |
543 | goto out_restore_sg; | 547 | goto out_restore_sg; |
544 | 548 | ||
diff --git a/drivers/iommu/mtk_iommu.h b/drivers/iommu/mtk_iommu.h index 9ed0a8462ccf..3dab13b4a211 100644 --- a/drivers/iommu/mtk_iommu.h +++ b/drivers/iommu/mtk_iommu.h | |||
@@ -55,19 +55,19 @@ struct mtk_iommu_data { | |||
55 | bool enable_4GB; | 55 | bool enable_4GB; |
56 | }; | 56 | }; |
57 | 57 | ||
58 | static int compare_of(struct device *dev, void *data) | 58 | static inline int compare_of(struct device *dev, void *data) |
59 | { | 59 | { |
60 | return dev->of_node == data; | 60 | return dev->of_node == data; |
61 | } | 61 | } |
62 | 62 | ||
63 | static int mtk_iommu_bind(struct device *dev) | 63 | static inline int mtk_iommu_bind(struct device *dev) |
64 | { | 64 | { |
65 | struct mtk_iommu_data *data = dev_get_drvdata(dev); | 65 | struct mtk_iommu_data *data = dev_get_drvdata(dev); |
66 | 66 | ||
67 | return component_bind_all(dev, &data->smi_imu); | 67 | return component_bind_all(dev, &data->smi_imu); |
68 | } | 68 | } |
69 | 69 | ||
70 | static void mtk_iommu_unbind(struct device *dev) | 70 | static inline void mtk_iommu_unbind(struct device *dev) |
71 | { | 71 | { |
72 | struct mtk_iommu_data *data = dev_get_drvdata(dev); | 72 | struct mtk_iommu_data *data = dev_get_drvdata(dev); |
73 | 73 | ||
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 4e9784b4e0ac..eedba67b0e3e 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
@@ -181,7 +181,7 @@ struct crypt_config { | |||
181 | u8 key[0]; | 181 | u8 key[0]; |
182 | }; | 182 | }; |
183 | 183 | ||
184 | #define MIN_IOS 16 | 184 | #define MIN_IOS 64 |
185 | 185 | ||
186 | static void clone_init(struct dm_crypt_io *, struct bio *); | 186 | static void clone_init(struct dm_crypt_io *, struct bio *); |
187 | static void kcryptd_queue_crypt(struct dm_crypt_io *io); | 187 | static void kcryptd_queue_crypt(struct dm_crypt_io *io); |
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 1b9795d75ef8..8abde6b8cedc 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c | |||
@@ -191,7 +191,6 @@ struct raid_dev { | |||
191 | #define RT_FLAG_RS_BITMAP_LOADED 2 | 191 | #define RT_FLAG_RS_BITMAP_LOADED 2 |
192 | #define RT_FLAG_UPDATE_SBS 3 | 192 | #define RT_FLAG_UPDATE_SBS 3 |
193 | #define RT_FLAG_RESHAPE_RS 4 | 193 | #define RT_FLAG_RESHAPE_RS 4 |
194 | #define RT_FLAG_KEEP_RS_FROZEN 5 | ||
195 | 194 | ||
196 | /* Array elements of 64 bit needed for rebuild/failed disk bits */ | 195 | /* Array elements of 64 bit needed for rebuild/failed disk bits */ |
197 | #define DISKS_ARRAY_ELEMS ((MAX_RAID_DEVICES + (sizeof(uint64_t) * 8 - 1)) / sizeof(uint64_t) / 8) | 196 | #define DISKS_ARRAY_ELEMS ((MAX_RAID_DEVICES + (sizeof(uint64_t) * 8 - 1)) / sizeof(uint64_t) / 8) |
@@ -861,6 +860,9 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size) | |||
861 | { | 860 | { |
862 | unsigned long min_region_size = rs->ti->len / (1 << 21); | 861 | unsigned long min_region_size = rs->ti->len / (1 << 21); |
863 | 862 | ||
863 | if (rs_is_raid0(rs)) | ||
864 | return 0; | ||
865 | |||
864 | if (!region_size) { | 866 | if (!region_size) { |
865 | /* | 867 | /* |
866 | * Choose a reasonable default. All figures in sectors. | 868 | * Choose a reasonable default. All figures in sectors. |
@@ -930,6 +932,8 @@ static int validate_raid_redundancy(struct raid_set *rs) | |||
930 | rebuild_cnt++; | 932 | rebuild_cnt++; |
931 | 933 | ||
932 | switch (rs->raid_type->level) { | 934 | switch (rs->raid_type->level) { |
935 | case 0: | ||
936 | break; | ||
933 | case 1: | 937 | case 1: |
934 | if (rebuild_cnt >= rs->md.raid_disks) | 938 | if (rebuild_cnt >= rs->md.raid_disks) |
935 | goto too_many; | 939 | goto too_many; |
@@ -2335,6 +2339,13 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs) | |||
2335 | case 0: | 2339 | case 0: |
2336 | break; | 2340 | break; |
2337 | default: | 2341 | default: |
2342 | /* | ||
2343 | * We have to keep any raid0 data/metadata device pairs or | ||
2344 | * the MD raid0 personality will fail to start the array. | ||
2345 | */ | ||
2346 | if (rs_is_raid0(rs)) | ||
2347 | continue; | ||
2348 | |||
2338 | dev = container_of(rdev, struct raid_dev, rdev); | 2349 | dev = container_of(rdev, struct raid_dev, rdev); |
2339 | if (dev->meta_dev) | 2350 | if (dev->meta_dev) |
2340 | dm_put_device(ti, dev->meta_dev); | 2351 | dm_put_device(ti, dev->meta_dev); |
@@ -2579,7 +2590,6 @@ static int rs_prepare_reshape(struct raid_set *rs) | |||
2579 | } else { | 2590 | } else { |
2580 | /* Process raid1 without delta_disks */ | 2591 | /* Process raid1 without delta_disks */ |
2581 | mddev->raid_disks = rs->raid_disks; | 2592 | mddev->raid_disks = rs->raid_disks; |
2582 | set_bit(RT_FLAG_KEEP_RS_FROZEN, &rs->runtime_flags); | ||
2583 | reshape = false; | 2593 | reshape = false; |
2584 | } | 2594 | } |
2585 | } else { | 2595 | } else { |
@@ -2590,7 +2600,6 @@ static int rs_prepare_reshape(struct raid_set *rs) | |||
2590 | if (reshape) { | 2600 | if (reshape) { |
2591 | set_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags); | 2601 | set_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags); |
2592 | set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); | 2602 | set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); |
2593 | set_bit(RT_FLAG_KEEP_RS_FROZEN, &rs->runtime_flags); | ||
2594 | } else if (mddev->raid_disks < rs->raid_disks) | 2603 | } else if (mddev->raid_disks < rs->raid_disks) |
2595 | /* Create new superblocks and bitmaps, if any new disks */ | 2604 | /* Create new superblocks and bitmaps, if any new disks */ |
2596 | set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); | 2605 | set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); |
@@ -2902,7 +2911,6 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
2902 | goto bad; | 2911 | goto bad; |
2903 | 2912 | ||
2904 | set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); | 2913 | set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); |
2905 | set_bit(RT_FLAG_KEEP_RS_FROZEN, &rs->runtime_flags); | ||
2906 | /* Takeover ain't recovery, so disable recovery */ | 2914 | /* Takeover ain't recovery, so disable recovery */ |
2907 | rs_setup_recovery(rs, MaxSector); | 2915 | rs_setup_recovery(rs, MaxSector); |
2908 | rs_set_new(rs); | 2916 | rs_set_new(rs); |
@@ -3386,21 +3394,28 @@ static void raid_postsuspend(struct dm_target *ti) | |||
3386 | { | 3394 | { |
3387 | struct raid_set *rs = ti->private; | 3395 | struct raid_set *rs = ti->private; |
3388 | 3396 | ||
3389 | if (test_and_clear_bit(RT_FLAG_RS_RESUMED, &rs->runtime_flags)) { | 3397 | if (!rs->md.suspended) |
3390 | if (!rs->md.suspended) | 3398 | mddev_suspend(&rs->md); |
3391 | mddev_suspend(&rs->md); | 3399 | |
3392 | rs->md.ro = 1; | 3400 | rs->md.ro = 1; |
3393 | } | ||
3394 | } | 3401 | } |
3395 | 3402 | ||
3396 | static void attempt_restore_of_faulty_devices(struct raid_set *rs) | 3403 | static void attempt_restore_of_faulty_devices(struct raid_set *rs) |
3397 | { | 3404 | { |
3398 | int i; | 3405 | int i; |
3399 | uint64_t failed_devices, cleared_failed_devices = 0; | 3406 | uint64_t cleared_failed_devices[DISKS_ARRAY_ELEMS]; |
3400 | unsigned long flags; | 3407 | unsigned long flags; |
3408 | bool cleared = false; | ||
3401 | struct dm_raid_superblock *sb; | 3409 | struct dm_raid_superblock *sb; |
3410 | struct mddev *mddev = &rs->md; | ||
3402 | struct md_rdev *r; | 3411 | struct md_rdev *r; |
3403 | 3412 | ||
3413 | /* RAID personalities have to provide hot add/remove methods or we need to bail out. */ | ||
3414 | if (!mddev->pers || !mddev->pers->hot_add_disk || !mddev->pers->hot_remove_disk) | ||
3415 | return; | ||
3416 | |||
3417 | memset(cleared_failed_devices, 0, sizeof(cleared_failed_devices)); | ||
3418 | |||
3404 | for (i = 0; i < rs->md.raid_disks; i++) { | 3419 | for (i = 0; i < rs->md.raid_disks; i++) { |
3405 | r = &rs->dev[i].rdev; | 3420 | r = &rs->dev[i].rdev; |
3406 | if (test_bit(Faulty, &r->flags) && r->sb_page && | 3421 | if (test_bit(Faulty, &r->flags) && r->sb_page && |
@@ -3420,7 +3435,7 @@ static void attempt_restore_of_faulty_devices(struct raid_set *rs) | |||
3420 | * ourselves. | 3435 | * ourselves. |
3421 | */ | 3436 | */ |
3422 | if ((r->raid_disk >= 0) && | 3437 | if ((r->raid_disk >= 0) && |
3423 | (r->mddev->pers->hot_remove_disk(r->mddev, r) != 0)) | 3438 | (mddev->pers->hot_remove_disk(mddev, r) != 0)) |
3424 | /* Failed to revive this device, try next */ | 3439 | /* Failed to revive this device, try next */ |
3425 | continue; | 3440 | continue; |
3426 | 3441 | ||
@@ -3430,22 +3445,30 @@ static void attempt_restore_of_faulty_devices(struct raid_set *rs) | |||
3430 | clear_bit(Faulty, &r->flags); | 3445 | clear_bit(Faulty, &r->flags); |
3431 | clear_bit(WriteErrorSeen, &r->flags); | 3446 | clear_bit(WriteErrorSeen, &r->flags); |
3432 | clear_bit(In_sync, &r->flags); | 3447 | clear_bit(In_sync, &r->flags); |
3433 | if (r->mddev->pers->hot_add_disk(r->mddev, r)) { | 3448 | if (mddev->pers->hot_add_disk(mddev, r)) { |
3434 | r->raid_disk = -1; | 3449 | r->raid_disk = -1; |
3435 | r->saved_raid_disk = -1; | 3450 | r->saved_raid_disk = -1; |
3436 | r->flags = flags; | 3451 | r->flags = flags; |
3437 | } else { | 3452 | } else { |
3438 | r->recovery_offset = 0; | 3453 | r->recovery_offset = 0; |
3439 | cleared_failed_devices |= 1 << i; | 3454 | set_bit(i, (void *) cleared_failed_devices); |
3455 | cleared = true; | ||
3440 | } | 3456 | } |
3441 | } | 3457 | } |
3442 | } | 3458 | } |
3443 | if (cleared_failed_devices) { | 3459 | |
3460 | /* If any failed devices could be cleared, update all sbs failed_devices bits */ | ||
3461 | if (cleared) { | ||
3462 | uint64_t failed_devices[DISKS_ARRAY_ELEMS]; | ||
3463 | |||
3444 | rdev_for_each(r, &rs->md) { | 3464 | rdev_for_each(r, &rs->md) { |
3445 | sb = page_address(r->sb_page); | 3465 | sb = page_address(r->sb_page); |
3446 | failed_devices = le64_to_cpu(sb->failed_devices); | 3466 | sb_retrieve_failed_devices(sb, failed_devices); |
3447 | failed_devices &= ~cleared_failed_devices; | 3467 | |
3448 | sb->failed_devices = cpu_to_le64(failed_devices); | 3468 | for (i = 0; i < DISKS_ARRAY_ELEMS; i++) |
3469 | failed_devices[i] &= ~cleared_failed_devices[i]; | ||
3470 | |||
3471 | sb_update_failed_devices(sb, failed_devices); | ||
3449 | } | 3472 | } |
3450 | } | 3473 | } |
3451 | } | 3474 | } |
@@ -3610,26 +3633,15 @@ static void raid_resume(struct dm_target *ti) | |||
3610 | * devices are reachable again. | 3633 | * devices are reachable again. |
3611 | */ | 3634 | */ |
3612 | attempt_restore_of_faulty_devices(rs); | 3635 | attempt_restore_of_faulty_devices(rs); |
3613 | } else { | 3636 | } |
3614 | mddev->ro = 0; | ||
3615 | mddev->in_sync = 0; | ||
3616 | 3637 | ||
3617 | /* | 3638 | mddev->ro = 0; |
3618 | * When passing in flags to the ctr, we expect userspace | 3639 | mddev->in_sync = 0; |
3619 | * to reset them because they made it to the superblocks | ||
3620 | * and reload the mapping anyway. | ||
3621 | * | ||
3622 | * -> only unfreeze recovery in case of a table reload or | ||
3623 | * we'll have a bogus recovery/reshape position | ||
3624 | * retrieved from the superblock by the ctr because | ||
3625 | * the ongoing recovery/reshape will change it after read. | ||
3626 | */ | ||
3627 | if (!test_bit(RT_FLAG_KEEP_RS_FROZEN, &rs->runtime_flags)) | ||
3628 | clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); | ||
3629 | 3640 | ||
3630 | if (mddev->suspended) | 3641 | clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); |
3631 | mddev_resume(mddev); | 3642 | |
3632 | } | 3643 | if (mddev->suspended) |
3644 | mddev_resume(mddev); | ||
3633 | } | 3645 | } |
3634 | 3646 | ||
3635 | static struct target_type raid_target = { | 3647 | static struct target_type raid_target = { |
diff --git a/drivers/md/dm-round-robin.c b/drivers/md/dm-round-robin.c index 4ace1da17db8..6c25213ab38c 100644 --- a/drivers/md/dm-round-robin.c +++ b/drivers/md/dm-round-robin.c | |||
@@ -210,14 +210,17 @@ static struct dm_path *rr_select_path(struct path_selector *ps, size_t nr_bytes) | |||
210 | struct path_info *pi = NULL; | 210 | struct path_info *pi = NULL; |
211 | struct dm_path *current_path = NULL; | 211 | struct dm_path *current_path = NULL; |
212 | 212 | ||
213 | local_irq_save(flags); | ||
213 | current_path = *this_cpu_ptr(s->current_path); | 214 | current_path = *this_cpu_ptr(s->current_path); |
214 | if (current_path) { | 215 | if (current_path) { |
215 | percpu_counter_dec(&s->repeat_count); | 216 | percpu_counter_dec(&s->repeat_count); |
216 | if (percpu_counter_read_positive(&s->repeat_count) > 0) | 217 | if (percpu_counter_read_positive(&s->repeat_count) > 0) { |
218 | local_irq_restore(flags); | ||
217 | return current_path; | 219 | return current_path; |
220 | } | ||
218 | } | 221 | } |
219 | 222 | ||
220 | spin_lock_irqsave(&s->lock, flags); | 223 | spin_lock(&s->lock); |
221 | if (!list_empty(&s->valid_paths)) { | 224 | if (!list_empty(&s->valid_paths)) { |
222 | pi = list_entry(s->valid_paths.next, struct path_info, list); | 225 | pi = list_entry(s->valid_paths.next, struct path_info, list); |
223 | list_move_tail(&pi->list, &s->valid_paths); | 226 | list_move_tail(&pi->list, &s->valid_paths); |
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c index 869c83fb3c5d..f00f3e742265 100644 --- a/drivers/memory/omap-gpmc.c +++ b/drivers/memory/omap-gpmc.c | |||
@@ -2185,7 +2185,7 @@ static int gpmc_probe_dt(struct platform_device *pdev) | |||
2185 | return 0; | 2185 | return 0; |
2186 | } | 2186 | } |
2187 | 2187 | ||
2188 | static int gpmc_probe_dt_children(struct platform_device *pdev) | 2188 | static void gpmc_probe_dt_children(struct platform_device *pdev) |
2189 | { | 2189 | { |
2190 | int ret; | 2190 | int ret; |
2191 | struct device_node *child; | 2191 | struct device_node *child; |
@@ -2200,11 +2200,11 @@ static int gpmc_probe_dt_children(struct platform_device *pdev) | |||
2200 | else | 2200 | else |
2201 | ret = gpmc_probe_generic_child(pdev, child); | 2201 | ret = gpmc_probe_generic_child(pdev, child); |
2202 | 2202 | ||
2203 | if (ret) | 2203 | if (ret) { |
2204 | return ret; | 2204 | dev_err(&pdev->dev, "failed to probe DT child '%s': %d\n", |
2205 | child->name, ret); | ||
2206 | } | ||
2205 | } | 2207 | } |
2206 | |||
2207 | return 0; | ||
2208 | } | 2208 | } |
2209 | #else | 2209 | #else |
2210 | static int gpmc_probe_dt(struct platform_device *pdev) | 2210 | static int gpmc_probe_dt(struct platform_device *pdev) |
@@ -2212,9 +2212,8 @@ static int gpmc_probe_dt(struct platform_device *pdev) | |||
2212 | return 0; | 2212 | return 0; |
2213 | } | 2213 | } |
2214 | 2214 | ||
2215 | static int gpmc_probe_dt_children(struct platform_device *pdev) | 2215 | static void gpmc_probe_dt_children(struct platform_device *pdev) |
2216 | { | 2216 | { |
2217 | return 0; | ||
2218 | } | 2217 | } |
2219 | #endif /* CONFIG_OF */ | 2218 | #endif /* CONFIG_OF */ |
2220 | 2219 | ||
@@ -2369,16 +2368,10 @@ static int gpmc_probe(struct platform_device *pdev) | |||
2369 | goto setup_irq_failed; | 2368 | goto setup_irq_failed; |
2370 | } | 2369 | } |
2371 | 2370 | ||
2372 | rc = gpmc_probe_dt_children(pdev); | 2371 | gpmc_probe_dt_children(pdev); |
2373 | if (rc < 0) { | ||
2374 | dev_err(gpmc->dev, "failed to probe DT children\n"); | ||
2375 | goto dt_children_failed; | ||
2376 | } | ||
2377 | 2372 | ||
2378 | return 0; | 2373 | return 0; |
2379 | 2374 | ||
2380 | dt_children_failed: | ||
2381 | gpmc_free_irq(gpmc); | ||
2382 | setup_irq_failed: | 2375 | setup_irq_failed: |
2383 | gpmc_gpio_exit(gpmc); | 2376 | gpmc_gpio_exit(gpmc); |
2384 | gpio_init_failed: | 2377 | gpio_init_failed: |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 1f276fa30ba6..217e8da0628c 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -152,7 +152,7 @@ module_param(lacp_rate, charp, 0); | |||
152 | MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner; " | 152 | MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner; " |
153 | "0 for slow, 1 for fast"); | 153 | "0 for slow, 1 for fast"); |
154 | module_param(ad_select, charp, 0); | 154 | module_param(ad_select, charp, 0); |
155 | MODULE_PARM_DESC(ad_select, "803.ad aggregation selection logic; " | 155 | MODULE_PARM_DESC(ad_select, "802.3ad aggregation selection logic; " |
156 | "0 for stable (default), 1 for bandwidth, " | 156 | "0 for stable (default), 1 for bandwidth, " |
157 | "2 for count"); | 157 | "2 for count"); |
158 | module_param(min_links, int, 0); | 158 | module_param(min_links, int, 0); |
diff --git a/drivers/net/dsa/b53/b53_regs.h b/drivers/net/dsa/b53/b53_regs.h index 8f12bddd5dc9..a0b453ea34c9 100644 --- a/drivers/net/dsa/b53/b53_regs.h +++ b/drivers/net/dsa/b53/b53_regs.h | |||
@@ -258,7 +258,7 @@ | |||
258 | * BCM5325 and BCM5365 share most definitions below | 258 | * BCM5325 and BCM5365 share most definitions below |
259 | */ | 259 | */ |
260 | #define B53_ARLTBL_MAC_VID_ENTRY(n) (0x10 * (n)) | 260 | #define B53_ARLTBL_MAC_VID_ENTRY(n) (0x10 * (n)) |
261 | #define ARLTBL_MAC_MASK 0xffffffffffff | 261 | #define ARLTBL_MAC_MASK 0xffffffffffffULL |
262 | #define ARLTBL_VID_S 48 | 262 | #define ARLTBL_VID_S 48 |
263 | #define ARLTBL_VID_MASK_25 0xff | 263 | #define ARLTBL_VID_MASK_25 0xff |
264 | #define ARLTBL_VID_MASK 0xfff | 264 | #define ARLTBL_VID_MASK 0xfff |
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index d36aedde8cb9..d1d9d3cf9139 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c | |||
@@ -3187,6 +3187,7 @@ static int mv88e6xxx_set_addr(struct dsa_switch *ds, u8 *addr) | |||
3187 | return err; | 3187 | return err; |
3188 | } | 3188 | } |
3189 | 3189 | ||
3190 | #ifdef CONFIG_NET_DSA_HWMON | ||
3190 | static int mv88e6xxx_mdio_page_read(struct dsa_switch *ds, int port, int page, | 3191 | static int mv88e6xxx_mdio_page_read(struct dsa_switch *ds, int port, int page, |
3191 | int reg) | 3192 | int reg) |
3192 | { | 3193 | { |
@@ -3212,6 +3213,7 @@ static int mv88e6xxx_mdio_page_write(struct dsa_switch *ds, int port, int page, | |||
3212 | 3213 | ||
3213 | return ret; | 3214 | return ret; |
3214 | } | 3215 | } |
3216 | #endif | ||
3215 | 3217 | ||
3216 | static int mv88e6xxx_port_to_mdio_addr(struct mv88e6xxx_chip *chip, int port) | 3218 | static int mv88e6xxx_port_to_mdio_addr(struct mv88e6xxx_chip *chip, int port) |
3217 | { | 3219 | { |
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c index 37a0f463b8de..18bb9556dd00 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c | |||
@@ -793,6 +793,8 @@ int xgene_enet_phy_connect(struct net_device *ndev) | |||
793 | netdev_err(ndev, "Could not connect to PHY\n"); | 793 | netdev_err(ndev, "Could not connect to PHY\n"); |
794 | return -ENODEV; | 794 | return -ENODEV; |
795 | } | 795 | } |
796 | #else | ||
797 | return -ENODEV; | ||
796 | #endif | 798 | #endif |
797 | } | 799 | } |
798 | 800 | ||
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index 4bff0f3040df..b0da9693f28a 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c | |||
@@ -771,8 +771,10 @@ int arc_emac_probe(struct net_device *ndev, int interface) | |||
771 | priv->dev = dev; | 771 | priv->dev = dev; |
772 | 772 | ||
773 | priv->regs = devm_ioremap_resource(dev, &res_regs); | 773 | priv->regs = devm_ioremap_resource(dev, &res_regs); |
774 | if (IS_ERR(priv->regs)) | 774 | if (IS_ERR(priv->regs)) { |
775 | return PTR_ERR(priv->regs); | 775 | err = PTR_ERR(priv->regs); |
776 | goto out_put_node; | ||
777 | } | ||
776 | 778 | ||
777 | dev_dbg(dev, "Registers base address is 0x%p\n", priv->regs); | 779 | dev_dbg(dev, "Registers base address is 0x%p\n", priv->regs); |
778 | 780 | ||
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index ff300f7cf529..659261218d9f 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c | |||
@@ -12552,10 +12552,6 @@ static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, | |||
12552 | info->data = TG3_RSS_MAX_NUM_QS; | 12552 | info->data = TG3_RSS_MAX_NUM_QS; |
12553 | } | 12553 | } |
12554 | 12554 | ||
12555 | /* The first interrupt vector only | ||
12556 | * handles link interrupts. | ||
12557 | */ | ||
12558 | info->data -= 1; | ||
12559 | return 0; | 12555 | return 0; |
12560 | 12556 | ||
12561 | default: | 12557 | default: |
@@ -14014,6 +14010,7 @@ static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) | |||
14014 | } | 14010 | } |
14015 | 14011 | ||
14016 | if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) || | 14012 | if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) || |
14013 | (!ec->rx_coalesce_usecs) || | ||
14017 | (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) || | 14014 | (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) || |
14018 | (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) || | 14015 | (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) || |
14019 | (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) || | 14016 | (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) || |
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index 36893d8958d4..b6fcf10621b6 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h | |||
@@ -403,11 +403,11 @@ | |||
403 | #define MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII 0x00000004 | 403 | #define MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII 0x00000004 |
404 | #define MACB_CAPS_NO_GIGABIT_HALF 0x00000008 | 404 | #define MACB_CAPS_NO_GIGABIT_HALF 0x00000008 |
405 | #define MACB_CAPS_USRIO_DISABLED 0x00000010 | 405 | #define MACB_CAPS_USRIO_DISABLED 0x00000010 |
406 | #define MACB_CAPS_JUMBO 0x00000020 | ||
406 | #define MACB_CAPS_FIFO_MODE 0x10000000 | 407 | #define MACB_CAPS_FIFO_MODE 0x10000000 |
407 | #define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000 | 408 | #define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000 |
408 | #define MACB_CAPS_SG_DISABLED 0x40000000 | 409 | #define MACB_CAPS_SG_DISABLED 0x40000000 |
409 | #define MACB_CAPS_MACB_IS_GEM 0x80000000 | 410 | #define MACB_CAPS_MACB_IS_GEM 0x80000000 |
410 | #define MACB_CAPS_JUMBO 0x00000010 | ||
411 | 411 | ||
412 | /* Bit manipulation macros */ | 412 | /* Bit manipulation macros */ |
413 | #define MACB_BIT(name) \ | 413 | #define MACB_BIT(name) \ |
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index 1471e16ba719..f45385f5c6e5 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c | |||
@@ -1299,6 +1299,7 @@ static int | |||
1299 | dm9000_open(struct net_device *dev) | 1299 | dm9000_open(struct net_device *dev) |
1300 | { | 1300 | { |
1301 | struct board_info *db = netdev_priv(dev); | 1301 | struct board_info *db = netdev_priv(dev); |
1302 | unsigned int irq_flags = irq_get_trigger_type(dev->irq); | ||
1302 | 1303 | ||
1303 | if (netif_msg_ifup(db)) | 1304 | if (netif_msg_ifup(db)) |
1304 | dev_dbg(db->dev, "enabling %s\n", dev->name); | 1305 | dev_dbg(db->dev, "enabling %s\n", dev->name); |
@@ -1306,9 +1307,11 @@ dm9000_open(struct net_device *dev) | |||
1306 | /* If there is no IRQ type specified, tell the user that this is a | 1307 | /* If there is no IRQ type specified, tell the user that this is a |
1307 | * problem | 1308 | * problem |
1308 | */ | 1309 | */ |
1309 | if (irq_get_trigger_type(dev->irq) == IRQF_TRIGGER_NONE) | 1310 | if (irq_flags == IRQF_TRIGGER_NONE) |
1310 | dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n"); | 1311 | dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n"); |
1311 | 1312 | ||
1313 | irq_flags |= IRQF_SHARED; | ||
1314 | |||
1312 | /* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */ | 1315 | /* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */ |
1313 | iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */ | 1316 | iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */ |
1314 | mdelay(1); /* delay needs by DM9000B */ | 1317 | mdelay(1); /* delay needs by DM9000B */ |
@@ -1316,8 +1319,7 @@ dm9000_open(struct net_device *dev) | |||
1316 | /* Initialize DM9000 board */ | 1319 | /* Initialize DM9000 board */ |
1317 | dm9000_init_dm9000(dev); | 1320 | dm9000_init_dm9000(dev); |
1318 | 1321 | ||
1319 | if (request_irq(dev->irq, dm9000_interrupt, IRQF_SHARED, | 1322 | if (request_irq(dev->irq, dm9000_interrupt, irq_flags, dev->name, dev)) |
1320 | dev->name, dev)) | ||
1321 | return -EAGAIN; | 1323 | return -EAGAIN; |
1322 | /* Now that we have an interrupt handler hooked up we can unmask | 1324 | /* Now that we have an interrupt handler hooked up we can unmask |
1323 | * our interrupts | 1325 | * our interrupts |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c index 1235c7f2564b..1e1eb92998fb 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c | |||
@@ -17,7 +17,7 @@ static const struct mac_stats_string g_gmac_stats_string[] = { | |||
17 | {"gmac_rx_octets_total_ok", MAC_STATS_FIELD_OFF(rx_good_bytes)}, | 17 | {"gmac_rx_octets_total_ok", MAC_STATS_FIELD_OFF(rx_good_bytes)}, |
18 | {"gmac_rx_octets_bad", MAC_STATS_FIELD_OFF(rx_bad_bytes)}, | 18 | {"gmac_rx_octets_bad", MAC_STATS_FIELD_OFF(rx_bad_bytes)}, |
19 | {"gmac_rx_uc_pkts", MAC_STATS_FIELD_OFF(rx_uc_pkts)}, | 19 | {"gmac_rx_uc_pkts", MAC_STATS_FIELD_OFF(rx_uc_pkts)}, |
20 | {"gamc_rx_mc_pkts", MAC_STATS_FIELD_OFF(rx_mc_pkts)}, | 20 | {"gmac_rx_mc_pkts", MAC_STATS_FIELD_OFF(rx_mc_pkts)}, |
21 | {"gmac_rx_bc_pkts", MAC_STATS_FIELD_OFF(rx_bc_pkts)}, | 21 | {"gmac_rx_bc_pkts", MAC_STATS_FIELD_OFF(rx_bc_pkts)}, |
22 | {"gmac_rx_pkts_64octets", MAC_STATS_FIELD_OFF(rx_64bytes)}, | 22 | {"gmac_rx_pkts_64octets", MAC_STATS_FIELD_OFF(rx_64bytes)}, |
23 | {"gmac_rx_pkts_65to127", MAC_STATS_FIELD_OFF(rx_65to127)}, | 23 | {"gmac_rx_pkts_65to127", MAC_STATS_FIELD_OFF(rx_65to127)}, |
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c index 7fd4d54599e4..6b03c8553e59 100644 --- a/drivers/net/ethernet/intel/e1000e/82571.c +++ b/drivers/net/ethernet/intel/e1000e/82571.c | |||
@@ -2032,7 +2032,8 @@ const struct e1000_info e1000_82574_info = { | |||
2032 | | FLAG2_DISABLE_ASPM_L0S | 2032 | | FLAG2_DISABLE_ASPM_L0S |
2033 | | FLAG2_DISABLE_ASPM_L1 | 2033 | | FLAG2_DISABLE_ASPM_L1 |
2034 | | FLAG2_NO_DISABLE_RX | 2034 | | FLAG2_NO_DISABLE_RX |
2035 | | FLAG2_DMA_BURST, | 2035 | | FLAG2_DMA_BURST |
2036 | | FLAG2_CHECK_SYSTIM_OVERFLOW, | ||
2036 | .pba = 32, | 2037 | .pba = 32, |
2037 | .max_hw_frame_size = DEFAULT_JUMBO, | 2038 | .max_hw_frame_size = DEFAULT_JUMBO, |
2038 | .get_variants = e1000_get_variants_82571, | 2039 | .get_variants = e1000_get_variants_82571, |
@@ -2053,7 +2054,8 @@ const struct e1000_info e1000_82583_info = { | |||
2053 | | FLAG_HAS_CTRLEXT_ON_LOAD, | 2054 | | FLAG_HAS_CTRLEXT_ON_LOAD, |
2054 | .flags2 = FLAG2_DISABLE_ASPM_L0S | 2055 | .flags2 = FLAG2_DISABLE_ASPM_L0S |
2055 | | FLAG2_DISABLE_ASPM_L1 | 2056 | | FLAG2_DISABLE_ASPM_L1 |
2056 | | FLAG2_NO_DISABLE_RX, | 2057 | | FLAG2_NO_DISABLE_RX |
2058 | | FLAG2_CHECK_SYSTIM_OVERFLOW, | ||
2057 | .pba = 32, | 2059 | .pba = 32, |
2058 | .max_hw_frame_size = DEFAULT_JUMBO, | 2060 | .max_hw_frame_size = DEFAULT_JUMBO, |
2059 | .get_variants = e1000_get_variants_82571, | 2061 | .get_variants = e1000_get_variants_82571, |
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index ef96cd11d6d2..879cca47b021 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h | |||
@@ -452,6 +452,7 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca); | |||
452 | #define FLAG2_PCIM2PCI_ARBITER_WA BIT(11) | 452 | #define FLAG2_PCIM2PCI_ARBITER_WA BIT(11) |
453 | #define FLAG2_DFLT_CRC_STRIPPING BIT(12) | 453 | #define FLAG2_DFLT_CRC_STRIPPING BIT(12) |
454 | #define FLAG2_CHECK_RX_HWTSTAMP BIT(13) | 454 | #define FLAG2_CHECK_RX_HWTSTAMP BIT(13) |
455 | #define FLAG2_CHECK_SYSTIM_OVERFLOW BIT(14) | ||
455 | 456 | ||
456 | #define E1000_RX_DESC_PS(R, i) \ | 457 | #define E1000_RX_DESC_PS(R, i) \ |
457 | (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) | 458 | (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) |
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index 3e11322d8d58..f3aaca743ea3 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c | |||
@@ -5885,7 +5885,8 @@ const struct e1000_info e1000_pch_lpt_info = { | |||
5885 | | FLAG_HAS_JUMBO_FRAMES | 5885 | | FLAG_HAS_JUMBO_FRAMES |
5886 | | FLAG_APME_IN_WUC, | 5886 | | FLAG_APME_IN_WUC, |
5887 | .flags2 = FLAG2_HAS_PHY_STATS | 5887 | .flags2 = FLAG2_HAS_PHY_STATS |
5888 | | FLAG2_HAS_EEE, | 5888 | | FLAG2_HAS_EEE |
5889 | | FLAG2_CHECK_SYSTIM_OVERFLOW, | ||
5889 | .pba = 26, | 5890 | .pba = 26, |
5890 | .max_hw_frame_size = 9022, | 5891 | .max_hw_frame_size = 9022, |
5891 | .get_variants = e1000_get_variants_ich8lan, | 5892 | .get_variants = e1000_get_variants_ich8lan, |
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 02f443958f31..7017281ba2dc 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c | |||
@@ -4303,6 +4303,42 @@ void e1000e_reinit_locked(struct e1000_adapter *adapter) | |||
4303 | } | 4303 | } |
4304 | 4304 | ||
4305 | /** | 4305 | /** |
4306 | * e1000e_sanitize_systim - sanitize raw cycle counter reads | ||
4307 | * @hw: pointer to the HW structure | ||
4308 | * @systim: cycle_t value read, sanitized and returned | ||
4309 | * | ||
4310 | * Errata for 82574/82583 possible bad bits read from SYSTIMH/L: | ||
4311 | * check to see that the time is incrementing at a reasonable | ||
4312 | * rate and is a multiple of incvalue. | ||
4313 | **/ | ||
4314 | static cycle_t e1000e_sanitize_systim(struct e1000_hw *hw, cycle_t systim) | ||
4315 | { | ||
4316 | u64 time_delta, rem, temp; | ||
4317 | cycle_t systim_next; | ||
4318 | u32 incvalue; | ||
4319 | int i; | ||
4320 | |||
4321 | incvalue = er32(TIMINCA) & E1000_TIMINCA_INCVALUE_MASK; | ||
4322 | for (i = 0; i < E1000_MAX_82574_SYSTIM_REREADS; i++) { | ||
4323 | /* latch SYSTIMH on read of SYSTIML */ | ||
4324 | systim_next = (cycle_t)er32(SYSTIML); | ||
4325 | systim_next |= (cycle_t)er32(SYSTIMH) << 32; | ||
4326 | |||
4327 | time_delta = systim_next - systim; | ||
4328 | temp = time_delta; | ||
4329 | /* VMWare users have seen incvalue of zero, don't div / 0 */ | ||
4330 | rem = incvalue ? do_div(temp, incvalue) : (time_delta != 0); | ||
4331 | |||
4332 | systim = systim_next; | ||
4333 | |||
4334 | if ((time_delta < E1000_82574_SYSTIM_EPSILON) && (rem == 0)) | ||
4335 | break; | ||
4336 | } | ||
4337 | |||
4338 | return systim; | ||
4339 | } | ||
4340 | |||
4341 | /** | ||
4306 | * e1000e_cyclecounter_read - read raw cycle counter (used by time counter) | 4342 | * e1000e_cyclecounter_read - read raw cycle counter (used by time counter) |
4307 | * @cc: cyclecounter structure | 4343 | * @cc: cyclecounter structure |
4308 | **/ | 4344 | **/ |
@@ -4312,7 +4348,7 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc) | |||
4312 | cc); | 4348 | cc); |
4313 | struct e1000_hw *hw = &adapter->hw; | 4349 | struct e1000_hw *hw = &adapter->hw; |
4314 | u32 systimel, systimeh; | 4350 | u32 systimel, systimeh; |
4315 | cycle_t systim, systim_next; | 4351 | cycle_t systim; |
4316 | /* SYSTIMH latching upon SYSTIML read does not work well. | 4352 | /* SYSTIMH latching upon SYSTIML read does not work well. |
4317 | * This means that if SYSTIML overflows after we read it but before | 4353 | * This means that if SYSTIML overflows after we read it but before |
4318 | * we read SYSTIMH, the value of SYSTIMH has been incremented and we | 4354 | * we read SYSTIMH, the value of SYSTIMH has been incremented and we |
@@ -4335,33 +4371,9 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc) | |||
4335 | systim = (cycle_t)systimel; | 4371 | systim = (cycle_t)systimel; |
4336 | systim |= (cycle_t)systimeh << 32; | 4372 | systim |= (cycle_t)systimeh << 32; |
4337 | 4373 | ||
4338 | if ((hw->mac.type == e1000_82574) || (hw->mac.type == e1000_82583)) { | 4374 | if (adapter->flags2 & FLAG2_CHECK_SYSTIM_OVERFLOW) |
4339 | u64 time_delta, rem, temp; | 4375 | systim = e1000e_sanitize_systim(hw, systim); |
4340 | u32 incvalue; | ||
4341 | int i; | ||
4342 | |||
4343 | /* errata for 82574/82583 possible bad bits read from SYSTIMH/L | ||
4344 | * check to see that the time is incrementing at a reasonable | ||
4345 | * rate and is a multiple of incvalue | ||
4346 | */ | ||
4347 | incvalue = er32(TIMINCA) & E1000_TIMINCA_INCVALUE_MASK; | ||
4348 | for (i = 0; i < E1000_MAX_82574_SYSTIM_REREADS; i++) { | ||
4349 | /* latch SYSTIMH on read of SYSTIML */ | ||
4350 | systim_next = (cycle_t)er32(SYSTIML); | ||
4351 | systim_next |= (cycle_t)er32(SYSTIMH) << 32; | ||
4352 | |||
4353 | time_delta = systim_next - systim; | ||
4354 | temp = time_delta; | ||
4355 | /* VMWare users have seen incvalue of zero, don't div / 0 */ | ||
4356 | rem = incvalue ? do_div(temp, incvalue) : (time_delta != 0); | ||
4357 | |||
4358 | systim = systim_next; | ||
4359 | 4376 | ||
4360 | if ((time_delta < E1000_82574_SYSTIM_EPSILON) && | ||
4361 | (rem == 0)) | ||
4362 | break; | ||
4363 | } | ||
4364 | } | ||
4365 | return systim; | 4377 | return systim; |
4366 | } | 4378 | } |
4367 | 4379 | ||
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 81c99e1be708..c6ac7a61812f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c | |||
@@ -4554,23 +4554,38 @@ static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf) | |||
4554 | **/ | 4554 | **/ |
4555 | static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg) | 4555 | static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg) |
4556 | { | 4556 | { |
4557 | int i, tc_unused = 0; | ||
4557 | u8 num_tc = 0; | 4558 | u8 num_tc = 0; |
4558 | int i; | 4559 | u8 ret = 0; |
4559 | 4560 | ||
4560 | /* Scan the ETS Config Priority Table to find | 4561 | /* Scan the ETS Config Priority Table to find |
4561 | * traffic class enabled for a given priority | 4562 | * traffic class enabled for a given priority |
4562 | * and use the traffic class index to get the | 4563 | * and create a bitmask of enabled TCs |
4563 | * number of traffic classes enabled | ||
4564 | */ | 4564 | */ |
4565 | for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { | 4565 | for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) |
4566 | if (dcbcfg->etscfg.prioritytable[i] > num_tc) | 4566 | num_tc |= BIT(dcbcfg->etscfg.prioritytable[i]); |
4567 | num_tc = dcbcfg->etscfg.prioritytable[i]; | ||
4568 | } | ||
4569 | 4567 | ||
4570 | /* Traffic class index starts from zero so | 4568 | /* Now scan the bitmask to check for |
4571 | * increment to return the actual count | 4569 | * contiguous TCs starting with TC0 |
4572 | */ | 4570 | */ |
4573 | return num_tc + 1; | 4571 | for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { |
4572 | if (num_tc & BIT(i)) { | ||
4573 | if (!tc_unused) { | ||
4574 | ret++; | ||
4575 | } else { | ||
4576 | pr_err("Non-contiguous TC - Disabling DCB\n"); | ||
4577 | return 1; | ||
4578 | } | ||
4579 | } else { | ||
4580 | tc_unused = 1; | ||
4581 | } | ||
4582 | } | ||
4583 | |||
4584 | /* There is always at least TC0 */ | ||
4585 | if (!ret) | ||
4586 | ret = 1; | ||
4587 | |||
4588 | return ret; | ||
4574 | } | 4589 | } |
4575 | 4590 | ||
4576 | /** | 4591 | /** |
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index e61b647f5f2a..336c103ae374 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c | |||
@@ -744,7 +744,8 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter) | |||
744 | } | 744 | } |
745 | } | 745 | } |
746 | 746 | ||
747 | shhwtstamps.hwtstamp = ktime_sub_ns(shhwtstamps.hwtstamp, adjust); | 747 | shhwtstamps.hwtstamp = |
748 | ktime_add_ns(shhwtstamps.hwtstamp, adjust); | ||
748 | 749 | ||
749 | skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps); | 750 | skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps); |
750 | dev_kfree_skb_any(adapter->ptp_tx_skb); | 751 | dev_kfree_skb_any(adapter->ptp_tx_skb); |
@@ -767,13 +768,32 @@ void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, | |||
767 | struct sk_buff *skb) | 768 | struct sk_buff *skb) |
768 | { | 769 | { |
769 | __le64 *regval = (__le64 *)va; | 770 | __le64 *regval = (__le64 *)va; |
771 | struct igb_adapter *adapter = q_vector->adapter; | ||
772 | int adjust = 0; | ||
770 | 773 | ||
771 | /* The timestamp is recorded in little endian format. | 774 | /* The timestamp is recorded in little endian format. |
772 | * DWORD: 0 1 2 3 | 775 | * DWORD: 0 1 2 3 |
773 | * Field: Reserved Reserved SYSTIML SYSTIMH | 776 | * Field: Reserved Reserved SYSTIML SYSTIMH |
774 | */ | 777 | */ |
775 | igb_ptp_systim_to_hwtstamp(q_vector->adapter, skb_hwtstamps(skb), | 778 | igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), |
776 | le64_to_cpu(regval[1])); | 779 | le64_to_cpu(regval[1])); |
780 | |||
781 | /* adjust timestamp for the RX latency based on link speed */ | ||
782 | if (adapter->hw.mac.type == e1000_i210) { | ||
783 | switch (adapter->link_speed) { | ||
784 | case SPEED_10: | ||
785 | adjust = IGB_I210_RX_LATENCY_10; | ||
786 | break; | ||
787 | case SPEED_100: | ||
788 | adjust = IGB_I210_RX_LATENCY_100; | ||
789 | break; | ||
790 | case SPEED_1000: | ||
791 | adjust = IGB_I210_RX_LATENCY_1000; | ||
792 | break; | ||
793 | } | ||
794 | } | ||
795 | skb_hwtstamps(skb)->hwtstamp = | ||
796 | ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust); | ||
777 | } | 797 | } |
778 | 798 | ||
779 | /** | 799 | /** |
@@ -825,7 +845,7 @@ void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, | |||
825 | } | 845 | } |
826 | } | 846 | } |
827 | skb_hwtstamps(skb)->hwtstamp = | 847 | skb_hwtstamps(skb)->hwtstamp = |
828 | ktime_add_ns(skb_hwtstamps(skb)->hwtstamp, adjust); | 848 | ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust); |
829 | 849 | ||
830 | /* Update the last_rx_timestamp timer in order to enable watchdog check | 850 | /* Update the last_rx_timestamp timer in order to enable watchdog check |
831 | * for error case of latched timestamp on a dropped packet. | 851 | * for error case of latched timestamp on a dropped packet. |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 5418c69a7463..b4f03748adc0 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
@@ -4100,6 +4100,8 @@ static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter) | |||
4100 | struct ixgbe_hw *hw = &adapter->hw; | 4100 | struct ixgbe_hw *hw = &adapter->hw; |
4101 | u32 vlnctrl, i; | 4101 | u32 vlnctrl, i; |
4102 | 4102 | ||
4103 | vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); | ||
4104 | |||
4103 | switch (hw->mac.type) { | 4105 | switch (hw->mac.type) { |
4104 | case ixgbe_mac_82599EB: | 4106 | case ixgbe_mac_82599EB: |
4105 | case ixgbe_mac_X540: | 4107 | case ixgbe_mac_X540: |
@@ -4112,8 +4114,7 @@ static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter) | |||
4112 | /* fall through */ | 4114 | /* fall through */ |
4113 | case ixgbe_mac_82598EB: | 4115 | case ixgbe_mac_82598EB: |
4114 | /* legacy case, we can just disable VLAN filtering */ | 4116 | /* legacy case, we can just disable VLAN filtering */ |
4115 | vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); | 4117 | vlnctrl &= ~IXGBE_VLNCTRL_VFE; |
4116 | vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN); | ||
4117 | IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); | 4118 | IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); |
4118 | return; | 4119 | return; |
4119 | } | 4120 | } |
@@ -4125,6 +4126,10 @@ static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter) | |||
4125 | /* Set flag so we don't redo unnecessary work */ | 4126 | /* Set flag so we don't redo unnecessary work */ |
4126 | adapter->flags2 |= IXGBE_FLAG2_VLAN_PROMISC; | 4127 | adapter->flags2 |= IXGBE_FLAG2_VLAN_PROMISC; |
4127 | 4128 | ||
4129 | /* For VMDq and SR-IOV we must leave VLAN filtering enabled */ | ||
4130 | vlnctrl |= IXGBE_VLNCTRL_VFE; | ||
4131 | IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); | ||
4132 | |||
4128 | /* Add PF to all active pools */ | 4133 | /* Add PF to all active pools */ |
4129 | for (i = IXGBE_VLVF_ENTRIES; --i;) { | 4134 | for (i = IXGBE_VLVF_ENTRIES; --i;) { |
4130 | u32 reg_offset = IXGBE_VLVFB(i * 2 + VMDQ_P(0) / 32); | 4135 | u32 reg_offset = IXGBE_VLVFB(i * 2 + VMDQ_P(0) / 32); |
@@ -4191,6 +4196,11 @@ static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter) | |||
4191 | struct ixgbe_hw *hw = &adapter->hw; | 4196 | struct ixgbe_hw *hw = &adapter->hw; |
4192 | u32 vlnctrl, i; | 4197 | u32 vlnctrl, i; |
4193 | 4198 | ||
4199 | /* Set VLAN filtering to enabled */ | ||
4200 | vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); | ||
4201 | vlnctrl |= IXGBE_VLNCTRL_VFE; | ||
4202 | IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); | ||
4203 | |||
4194 | switch (hw->mac.type) { | 4204 | switch (hw->mac.type) { |
4195 | case ixgbe_mac_82599EB: | 4205 | case ixgbe_mac_82599EB: |
4196 | case ixgbe_mac_X540: | 4206 | case ixgbe_mac_X540: |
@@ -4202,10 +4212,6 @@ static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter) | |||
4202 | break; | 4212 | break; |
4203 | /* fall through */ | 4213 | /* fall through */ |
4204 | case ixgbe_mac_82598EB: | 4214 | case ixgbe_mac_82598EB: |
4205 | vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); | ||
4206 | vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; | ||
4207 | vlnctrl |= IXGBE_VLNCTRL_VFE; | ||
4208 | IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); | ||
4209 | return; | 4215 | return; |
4210 | } | 4216 | } |
4211 | 4217 | ||
@@ -8390,12 +8396,14 @@ static int parse_tc_actions(struct ixgbe_adapter *adapter, | |||
8390 | struct tcf_exts *exts, u64 *action, u8 *queue) | 8396 | struct tcf_exts *exts, u64 *action, u8 *queue) |
8391 | { | 8397 | { |
8392 | const struct tc_action *a; | 8398 | const struct tc_action *a; |
8399 | LIST_HEAD(actions); | ||
8393 | int err; | 8400 | int err; |
8394 | 8401 | ||
8395 | if (tc_no_actions(exts)) | 8402 | if (tc_no_actions(exts)) |
8396 | return -EINVAL; | 8403 | return -EINVAL; |
8397 | 8404 | ||
8398 | tc_for_each_action(a, exts) { | 8405 | tcf_exts_to_list(exts, &actions); |
8406 | list_for_each_entry(a, &actions, list) { | ||
8399 | 8407 | ||
8400 | /* Drop action */ | 8408 | /* Drop action */ |
8401 | if (is_tcf_gact_shot(a)) { | 8409 | if (is_tcf_gact_shot(a)) { |
@@ -9517,6 +9525,7 @@ skip_sriov: | |||
9517 | 9525 | ||
9518 | /* copy netdev features into list of user selectable features */ | 9526 | /* copy netdev features into list of user selectable features */ |
9519 | netdev->hw_features |= netdev->features | | 9527 | netdev->hw_features |= netdev->features | |
9528 | NETIF_F_HW_VLAN_CTAG_FILTER | | ||
9520 | NETIF_F_HW_VLAN_CTAG_RX | | 9529 | NETIF_F_HW_VLAN_CTAG_RX | |
9521 | NETIF_F_HW_VLAN_CTAG_TX | | 9530 | NETIF_F_HW_VLAN_CTAG_TX | |
9522 | NETIF_F_RXALL | | 9531 | NETIF_F_RXALL | |
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index b57ae3afb994..f1609542adf1 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c | |||
@@ -245,12 +245,16 @@ static int mtk_phy_connect(struct mtk_mac *mac) | |||
245 | case PHY_INTERFACE_MODE_MII: | 245 | case PHY_INTERFACE_MODE_MII: |
246 | ge_mode = 1; | 246 | ge_mode = 1; |
247 | break; | 247 | break; |
248 | case PHY_INTERFACE_MODE_RMII: | 248 | case PHY_INTERFACE_MODE_REVMII: |
249 | ge_mode = 2; | 249 | ge_mode = 2; |
250 | break; | 250 | break; |
251 | case PHY_INTERFACE_MODE_RMII: | ||
252 | if (!mac->id) | ||
253 | goto err_phy; | ||
254 | ge_mode = 3; | ||
255 | break; | ||
251 | default: | 256 | default: |
252 | dev_err(eth->dev, "invalid phy_mode\n"); | 257 | goto err_phy; |
253 | return -1; | ||
254 | } | 258 | } |
255 | 259 | ||
256 | /* put the gmac into the right mode */ | 260 | /* put the gmac into the right mode */ |
@@ -263,13 +267,25 @@ static int mtk_phy_connect(struct mtk_mac *mac) | |||
263 | mac->phy_dev->autoneg = AUTONEG_ENABLE; | 267 | mac->phy_dev->autoneg = AUTONEG_ENABLE; |
264 | mac->phy_dev->speed = 0; | 268 | mac->phy_dev->speed = 0; |
265 | mac->phy_dev->duplex = 0; | 269 | mac->phy_dev->duplex = 0; |
270 | |||
271 | if (of_phy_is_fixed_link(mac->of_node)) | ||
272 | mac->phy_dev->supported |= | ||
273 | SUPPORTED_Pause | SUPPORTED_Asym_Pause; | ||
274 | |||
266 | mac->phy_dev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause | | 275 | mac->phy_dev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause | |
267 | SUPPORTED_Asym_Pause; | 276 | SUPPORTED_Asym_Pause; |
268 | mac->phy_dev->advertising = mac->phy_dev->supported | | 277 | mac->phy_dev->advertising = mac->phy_dev->supported | |
269 | ADVERTISED_Autoneg; | 278 | ADVERTISED_Autoneg; |
270 | phy_start_aneg(mac->phy_dev); | 279 | phy_start_aneg(mac->phy_dev); |
271 | 280 | ||
281 | of_node_put(np); | ||
282 | |||
272 | return 0; | 283 | return 0; |
284 | |||
285 | err_phy: | ||
286 | of_node_put(np); | ||
287 | dev_err(eth->dev, "invalid phy_mode\n"); | ||
288 | return -EINVAL; | ||
273 | } | 289 | } |
274 | 290 | ||
275 | static int mtk_mdio_init(struct mtk_eth *eth) | 291 | static int mtk_mdio_init(struct mtk_eth *eth) |
@@ -542,15 +558,15 @@ static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring, | |||
542 | return &ring->buf[idx]; | 558 | return &ring->buf[idx]; |
543 | } | 559 | } |
544 | 560 | ||
545 | static void mtk_tx_unmap(struct device *dev, struct mtk_tx_buf *tx_buf) | 561 | static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf) |
546 | { | 562 | { |
547 | if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) { | 563 | if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) { |
548 | dma_unmap_single(dev, | 564 | dma_unmap_single(eth->dev, |
549 | dma_unmap_addr(tx_buf, dma_addr0), | 565 | dma_unmap_addr(tx_buf, dma_addr0), |
550 | dma_unmap_len(tx_buf, dma_len0), | 566 | dma_unmap_len(tx_buf, dma_len0), |
551 | DMA_TO_DEVICE); | 567 | DMA_TO_DEVICE); |
552 | } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) { | 568 | } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) { |
553 | dma_unmap_page(dev, | 569 | dma_unmap_page(eth->dev, |
554 | dma_unmap_addr(tx_buf, dma_addr0), | 570 | dma_unmap_addr(tx_buf, dma_addr0), |
555 | dma_unmap_len(tx_buf, dma_len0), | 571 | dma_unmap_len(tx_buf, dma_len0), |
556 | DMA_TO_DEVICE); | 572 | DMA_TO_DEVICE); |
@@ -595,9 +611,9 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, | |||
595 | if (skb_vlan_tag_present(skb)) | 611 | if (skb_vlan_tag_present(skb)) |
596 | txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb); | 612 | txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb); |
597 | 613 | ||
598 | mapped_addr = dma_map_single(&dev->dev, skb->data, | 614 | mapped_addr = dma_map_single(eth->dev, skb->data, |
599 | skb_headlen(skb), DMA_TO_DEVICE); | 615 | skb_headlen(skb), DMA_TO_DEVICE); |
600 | if (unlikely(dma_mapping_error(&dev->dev, mapped_addr))) | 616 | if (unlikely(dma_mapping_error(eth->dev, mapped_addr))) |
601 | return -ENOMEM; | 617 | return -ENOMEM; |
602 | 618 | ||
603 | WRITE_ONCE(itxd->txd1, mapped_addr); | 619 | WRITE_ONCE(itxd->txd1, mapped_addr); |
@@ -623,10 +639,10 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, | |||
623 | 639 | ||
624 | n_desc++; | 640 | n_desc++; |
625 | frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN); | 641 | frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN); |
626 | mapped_addr = skb_frag_dma_map(&dev->dev, frag, offset, | 642 | mapped_addr = skb_frag_dma_map(eth->dev, frag, offset, |
627 | frag_map_size, | 643 | frag_map_size, |
628 | DMA_TO_DEVICE); | 644 | DMA_TO_DEVICE); |
629 | if (unlikely(dma_mapping_error(&dev->dev, mapped_addr))) | 645 | if (unlikely(dma_mapping_error(eth->dev, mapped_addr))) |
630 | goto err_dma; | 646 | goto err_dma; |
631 | 647 | ||
632 | if (i == nr_frags - 1 && | 648 | if (i == nr_frags - 1 && |
@@ -679,7 +695,7 @@ err_dma: | |||
679 | tx_buf = mtk_desc_to_tx_buf(ring, itxd); | 695 | tx_buf = mtk_desc_to_tx_buf(ring, itxd); |
680 | 696 | ||
681 | /* unmap dma */ | 697 | /* unmap dma */ |
682 | mtk_tx_unmap(&dev->dev, tx_buf); | 698 | mtk_tx_unmap(eth, tx_buf); |
683 | 699 | ||
684 | itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; | 700 | itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; |
685 | itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2); | 701 | itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2); |
@@ -836,11 +852,11 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, | |||
836 | netdev->stats.rx_dropped++; | 852 | netdev->stats.rx_dropped++; |
837 | goto release_desc; | 853 | goto release_desc; |
838 | } | 854 | } |
839 | dma_addr = dma_map_single(ð->netdev[mac]->dev, | 855 | dma_addr = dma_map_single(eth->dev, |
840 | new_data + NET_SKB_PAD, | 856 | new_data + NET_SKB_PAD, |
841 | ring->buf_size, | 857 | ring->buf_size, |
842 | DMA_FROM_DEVICE); | 858 | DMA_FROM_DEVICE); |
843 | if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) { | 859 | if (unlikely(dma_mapping_error(eth->dev, dma_addr))) { |
844 | skb_free_frag(new_data); | 860 | skb_free_frag(new_data); |
845 | netdev->stats.rx_dropped++; | 861 | netdev->stats.rx_dropped++; |
846 | goto release_desc; | 862 | goto release_desc; |
@@ -855,7 +871,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, | |||
855 | } | 871 | } |
856 | skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); | 872 | skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); |
857 | 873 | ||
858 | dma_unmap_single(&netdev->dev, trxd.rxd1, | 874 | dma_unmap_single(eth->dev, trxd.rxd1, |
859 | ring->buf_size, DMA_FROM_DEVICE); | 875 | ring->buf_size, DMA_FROM_DEVICE); |
860 | pktlen = RX_DMA_GET_PLEN0(trxd.rxd2); | 876 | pktlen = RX_DMA_GET_PLEN0(trxd.rxd2); |
861 | skb->dev = netdev; | 877 | skb->dev = netdev; |
@@ -937,7 +953,7 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget) | |||
937 | done[mac]++; | 953 | done[mac]++; |
938 | budget--; | 954 | budget--; |
939 | } | 955 | } |
940 | mtk_tx_unmap(eth->dev, tx_buf); | 956 | mtk_tx_unmap(eth, tx_buf); |
941 | 957 | ||
942 | ring->last_free = desc; | 958 | ring->last_free = desc; |
943 | atomic_inc(&ring->free_count); | 959 | atomic_inc(&ring->free_count); |
@@ -1092,7 +1108,7 @@ static void mtk_tx_clean(struct mtk_eth *eth) | |||
1092 | 1108 | ||
1093 | if (ring->buf) { | 1109 | if (ring->buf) { |
1094 | for (i = 0; i < MTK_DMA_SIZE; i++) | 1110 | for (i = 0; i < MTK_DMA_SIZE; i++) |
1095 | mtk_tx_unmap(eth->dev, &ring->buf[i]); | 1111 | mtk_tx_unmap(eth, &ring->buf[i]); |
1096 | kfree(ring->buf); | 1112 | kfree(ring->buf); |
1097 | ring->buf = NULL; | 1113 | ring->buf = NULL; |
1098 | } | 1114 | } |
@@ -1751,6 +1767,7 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) | |||
1751 | goto free_netdev; | 1767 | goto free_netdev; |
1752 | } | 1768 | } |
1753 | spin_lock_init(&mac->hw_stats->stats_lock); | 1769 | spin_lock_init(&mac->hw_stats->stats_lock); |
1770 | u64_stats_init(&mac->hw_stats->syncp); | ||
1754 | mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET; | 1771 | mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET; |
1755 | 1772 | ||
1756 | SET_NETDEV_DEV(eth->netdev[id], eth->dev); | 1773 | SET_NETDEV_DEV(eth->netdev[id], eth->dev); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 0f19b01e3fff..dc8b1cb0fdc8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | |||
@@ -318,6 +318,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, | |||
318 | u32 *action, u32 *flow_tag) | 318 | u32 *action, u32 *flow_tag) |
319 | { | 319 | { |
320 | const struct tc_action *a; | 320 | const struct tc_action *a; |
321 | LIST_HEAD(actions); | ||
321 | 322 | ||
322 | if (tc_no_actions(exts)) | 323 | if (tc_no_actions(exts)) |
323 | return -EINVAL; | 324 | return -EINVAL; |
@@ -325,7 +326,8 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, | |||
325 | *flow_tag = MLX5_FS_DEFAULT_FLOW_TAG; | 326 | *flow_tag = MLX5_FS_DEFAULT_FLOW_TAG; |
326 | *action = 0; | 327 | *action = 0; |
327 | 328 | ||
328 | tc_for_each_action(a, exts) { | 329 | tcf_exts_to_list(exts, &actions); |
330 | list_for_each_entry(a, &actions, list) { | ||
329 | /* Only support a single action per rule */ | 331 | /* Only support a single action per rule */ |
330 | if (*action) | 332 | if (*action) |
331 | return -EINVAL; | 333 | return -EINVAL; |
@@ -362,13 +364,15 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, | |||
362 | u32 *action, u32 *dest_vport) | 364 | u32 *action, u32 *dest_vport) |
363 | { | 365 | { |
364 | const struct tc_action *a; | 366 | const struct tc_action *a; |
367 | LIST_HEAD(actions); | ||
365 | 368 | ||
366 | if (tc_no_actions(exts)) | 369 | if (tc_no_actions(exts)) |
367 | return -EINVAL; | 370 | return -EINVAL; |
368 | 371 | ||
369 | *action = 0; | 372 | *action = 0; |
370 | 373 | ||
371 | tc_for_each_action(a, exts) { | 374 | tcf_exts_to_list(exts, &actions); |
375 | list_for_each_entry(a, &actions, list) { | ||
372 | /* Only support a single action per rule */ | 376 | /* Only support a single action per rule */ |
373 | if (*action) | 377 | if (*action) |
374 | return -EINVAL; | 378 | return -EINVAL; |
@@ -503,6 +507,7 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv, | |||
503 | struct mlx5e_tc_flow *flow; | 507 | struct mlx5e_tc_flow *flow; |
504 | struct tc_action *a; | 508 | struct tc_action *a; |
505 | struct mlx5_fc *counter; | 509 | struct mlx5_fc *counter; |
510 | LIST_HEAD(actions); | ||
506 | u64 bytes; | 511 | u64 bytes; |
507 | u64 packets; | 512 | u64 packets; |
508 | u64 lastuse; | 513 | u64 lastuse; |
@@ -518,7 +523,8 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv, | |||
518 | 523 | ||
519 | mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse); | 524 | mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse); |
520 | 525 | ||
521 | tc_for_each_action(a, f->exts) | 526 | tcf_exts_to_list(f->exts, &actions); |
527 | list_for_each_entry(a, &actions, list) | ||
522 | tcf_action_stats_update(a, bytes, packets, lastuse); | 528 | tcf_action_stats_update(a, bytes, packets, lastuse); |
523 | 529 | ||
524 | return 0; | 530 | return 0; |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 7ca9201f7dcb..1721098eef13 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h | |||
@@ -3383,6 +3383,15 @@ MLXSW_ITEM32(reg, ritr, ipv4_fe, 0x04, 29, 1); | |||
3383 | */ | 3383 | */ |
3384 | MLXSW_ITEM32(reg, ritr, ipv6_fe, 0x04, 28, 1); | 3384 | MLXSW_ITEM32(reg, ritr, ipv6_fe, 0x04, 28, 1); |
3385 | 3385 | ||
3386 | /* reg_ritr_lb_en | ||
3387 | * Loop-back filter enable for unicast packets. | ||
3388 | * If the flag is set then loop-back filter for unicast packets is | ||
3389 | * implemented on the RIF. Multicast packets are always subject to | ||
3390 | * loop-back filtering. | ||
3391 | * Access: RW | ||
3392 | */ | ||
3393 | MLXSW_ITEM32(reg, ritr, lb_en, 0x04, 24, 1); | ||
3394 | |||
3386 | /* reg_ritr_virtual_router | 3395 | /* reg_ritr_virtual_router |
3387 | * Virtual router ID associated with the router interface. | 3396 | * Virtual router ID associated with the router interface. |
3388 | * Access: RW | 3397 | * Access: RW |
@@ -3484,6 +3493,7 @@ static inline void mlxsw_reg_ritr_pack(char *payload, bool enable, | |||
3484 | mlxsw_reg_ritr_op_set(payload, op); | 3493 | mlxsw_reg_ritr_op_set(payload, op); |
3485 | mlxsw_reg_ritr_rif_set(payload, rif); | 3494 | mlxsw_reg_ritr_rif_set(payload, rif); |
3486 | mlxsw_reg_ritr_ipv4_fe_set(payload, 1); | 3495 | mlxsw_reg_ritr_ipv4_fe_set(payload, 1); |
3496 | mlxsw_reg_ritr_lb_en_set(payload, 1); | ||
3487 | mlxsw_reg_ritr_mtu_set(payload, mtu); | 3497 | mlxsw_reg_ritr_mtu_set(payload, mtu); |
3488 | mlxsw_reg_ritr_if_mac_memcpy_to(payload, mac); | 3498 | mlxsw_reg_ritr_if_mac_memcpy_to(payload, mac); |
3489 | } | 3499 | } |
@@ -4000,6 +4010,7 @@ static inline void mlxsw_reg_ralue_pack(char *payload, | |||
4000 | { | 4010 | { |
4001 | MLXSW_REG_ZERO(ralue, payload); | 4011 | MLXSW_REG_ZERO(ralue, payload); |
4002 | mlxsw_reg_ralue_protocol_set(payload, protocol); | 4012 | mlxsw_reg_ralue_protocol_set(payload, protocol); |
4013 | mlxsw_reg_ralue_op_set(payload, op); | ||
4003 | mlxsw_reg_ralue_virtual_router_set(payload, virtual_router); | 4014 | mlxsw_reg_ralue_virtual_router_set(payload, virtual_router); |
4004 | mlxsw_reg_ralue_prefix_len_set(payload, prefix_len); | 4015 | mlxsw_reg_ralue_prefix_len_set(payload, prefix_len); |
4005 | mlxsw_reg_ralue_entry_type_set(payload, | 4016 | mlxsw_reg_ralue_entry_type_set(payload, |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index c3e61500819d..1f8168906811 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c | |||
@@ -942,8 +942,8 @@ static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport) | |||
942 | kfree(mlxsw_sp_vport); | 942 | kfree(mlxsw_sp_vport); |
943 | } | 943 | } |
944 | 944 | ||
945 | int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto, | 945 | static int mlxsw_sp_port_add_vid(struct net_device *dev, |
946 | u16 vid) | 946 | __be16 __always_unused proto, u16 vid) |
947 | { | 947 | { |
948 | struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); | 948 | struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); |
949 | struct mlxsw_sp_port *mlxsw_sp_vport; | 949 | struct mlxsw_sp_port *mlxsw_sp_vport; |
@@ -956,16 +956,12 @@ int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto, | |||
956 | if (!vid) | 956 | if (!vid) |
957 | return 0; | 957 | return 0; |
958 | 958 | ||
959 | if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid)) { | 959 | if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid)) |
960 | netdev_warn(dev, "VID=%d already configured\n", vid); | ||
961 | return 0; | 960 | return 0; |
962 | } | ||
963 | 961 | ||
964 | mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vid); | 962 | mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vid); |
965 | if (!mlxsw_sp_vport) { | 963 | if (!mlxsw_sp_vport) |
966 | netdev_err(dev, "Failed to create vPort for VID=%d\n", vid); | ||
967 | return -ENOMEM; | 964 | return -ENOMEM; |
968 | } | ||
969 | 965 | ||
970 | /* When adding the first VLAN interface on a bridged port we need to | 966 | /* When adding the first VLAN interface on a bridged port we need to |
971 | * transition all the active 802.1Q bridge VLANs to use explicit | 967 | * transition all the active 802.1Q bridge VLANs to use explicit |
@@ -973,24 +969,17 @@ int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto, | |||
973 | */ | 969 | */ |
974 | if (list_is_singular(&mlxsw_sp_port->vports_list)) { | 970 | if (list_is_singular(&mlxsw_sp_port->vports_list)) { |
975 | err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port); | 971 | err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port); |
976 | if (err) { | 972 | if (err) |
977 | netdev_err(dev, "Failed to set to Virtual mode\n"); | ||
978 | goto err_port_vp_mode_trans; | 973 | goto err_port_vp_mode_trans; |
979 | } | ||
980 | } | 974 | } |
981 | 975 | ||
982 | err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); | 976 | err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); |
983 | if (err) { | 977 | if (err) |
984 | netdev_err(dev, "Failed to disable learning for VID=%d\n", vid); | ||
985 | goto err_port_vid_learning_set; | 978 | goto err_port_vid_learning_set; |
986 | } | ||
987 | 979 | ||
988 | err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, untagged); | 980 | err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, untagged); |
989 | if (err) { | 981 | if (err) |
990 | netdev_err(dev, "Failed to set VLAN membership for VID=%d\n", | ||
991 | vid); | ||
992 | goto err_port_add_vid; | 982 | goto err_port_add_vid; |
993 | } | ||
994 | 983 | ||
995 | return 0; | 984 | return 0; |
996 | 985 | ||
@@ -1010,7 +999,6 @@ static int mlxsw_sp_port_kill_vid(struct net_device *dev, | |||
1010 | struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); | 999 | struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); |
1011 | struct mlxsw_sp_port *mlxsw_sp_vport; | 1000 | struct mlxsw_sp_port *mlxsw_sp_vport; |
1012 | struct mlxsw_sp_fid *f; | 1001 | struct mlxsw_sp_fid *f; |
1013 | int err; | ||
1014 | 1002 | ||
1015 | /* VLAN 0 is removed from HW filter when device goes down, but | 1003 | /* VLAN 0 is removed from HW filter when device goes down, but |
1016 | * it is reserved in our case, so simply return. | 1004 | * it is reserved in our case, so simply return. |
@@ -1019,23 +1007,12 @@ static int mlxsw_sp_port_kill_vid(struct net_device *dev, | |||
1019 | return 0; | 1007 | return 0; |
1020 | 1008 | ||
1021 | mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); | 1009 | mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); |
1022 | if (!mlxsw_sp_vport) { | 1010 | if (WARN_ON(!mlxsw_sp_vport)) |
1023 | netdev_warn(dev, "VID=%d does not exist\n", vid); | ||
1024 | return 0; | 1011 | return 0; |
1025 | } | ||
1026 | 1012 | ||
1027 | err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false); | 1013 | mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false); |
1028 | if (err) { | ||
1029 | netdev_err(dev, "Failed to set VLAN membership for VID=%d\n", | ||
1030 | vid); | ||
1031 | return err; | ||
1032 | } | ||
1033 | 1014 | ||
1034 | err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); | 1015 | mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); |
1035 | if (err) { | ||
1036 | netdev_err(dev, "Failed to enable learning for VID=%d\n", vid); | ||
1037 | return err; | ||
1038 | } | ||
1039 | 1016 | ||
1040 | /* Drop FID reference. If this was the last reference the | 1017 | /* Drop FID reference. If this was the last reference the |
1041 | * resources will be freed. | 1018 | * resources will be freed. |
@@ -1048,13 +1025,8 @@ static int mlxsw_sp_port_kill_vid(struct net_device *dev, | |||
1048 | * transition all active 802.1Q bridge VLANs to use VID to FID | 1025 | * transition all active 802.1Q bridge VLANs to use VID to FID |
1049 | * mappings and set port's mode to VLAN mode. | 1026 | * mappings and set port's mode to VLAN mode. |
1050 | */ | 1027 | */ |
1051 | if (list_is_singular(&mlxsw_sp_port->vports_list)) { | 1028 | if (list_is_singular(&mlxsw_sp_port->vports_list)) |
1052 | err = mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); | 1029 | mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); |
1053 | if (err) { | ||
1054 | netdev_err(dev, "Failed to set to VLAN mode\n"); | ||
1055 | return err; | ||
1056 | } | ||
1057 | } | ||
1058 | 1030 | ||
1059 | mlxsw_sp_port_vport_destroy(mlxsw_sp_vport); | 1031 | mlxsw_sp_port_vport_destroy(mlxsw_sp_vport); |
1060 | 1032 | ||
@@ -1149,6 +1121,7 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, | |||
1149 | bool ingress) | 1121 | bool ingress) |
1150 | { | 1122 | { |
1151 | const struct tc_action *a; | 1123 | const struct tc_action *a; |
1124 | LIST_HEAD(actions); | ||
1152 | int err; | 1125 | int err; |
1153 | 1126 | ||
1154 | if (!tc_single_action(cls->exts)) { | 1127 | if (!tc_single_action(cls->exts)) { |
@@ -1156,7 +1129,8 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, | |||
1156 | return -ENOTSUPP; | 1129 | return -ENOTSUPP; |
1157 | } | 1130 | } |
1158 | 1131 | ||
1159 | tc_for_each_action(a, cls->exts) { | 1132 | tcf_exts_to_list(cls->exts, &actions); |
1133 | list_for_each_entry(a, &actions, list) { | ||
1160 | if (!is_tcf_mirred_mirror(a) || protocol != htons(ETH_P_ALL)) | 1134 | if (!is_tcf_mirred_mirror(a) || protocol != htons(ETH_P_ALL)) |
1161 | return -ENOTSUPP; | 1135 | return -ENOTSUPP; |
1162 | 1136 | ||
@@ -2076,6 +2050,18 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) | |||
2076 | return 0; | 2050 | return 0; |
2077 | } | 2051 | } |
2078 | 2052 | ||
2053 | static int mlxsw_sp_port_pvid_vport_create(struct mlxsw_sp_port *mlxsw_sp_port) | ||
2054 | { | ||
2055 | mlxsw_sp_port->pvid = 1; | ||
2056 | |||
2057 | return mlxsw_sp_port_add_vid(mlxsw_sp_port->dev, 0, 1); | ||
2058 | } | ||
2059 | |||
2060 | static int mlxsw_sp_port_pvid_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_port) | ||
2061 | { | ||
2062 | return mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1); | ||
2063 | } | ||
2064 | |||
2079 | static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, | 2065 | static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, |
2080 | bool split, u8 module, u8 width, u8 lane) | 2066 | bool split, u8 module, u8 width, u8 lane) |
2081 | { | 2067 | { |
@@ -2191,7 +2177,15 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, | |||
2191 | goto err_port_dcb_init; | 2177 | goto err_port_dcb_init; |
2192 | } | 2178 | } |
2193 | 2179 | ||
2180 | err = mlxsw_sp_port_pvid_vport_create(mlxsw_sp_port); | ||
2181 | if (err) { | ||
2182 | dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create PVID vPort\n", | ||
2183 | mlxsw_sp_port->local_port); | ||
2184 | goto err_port_pvid_vport_create; | ||
2185 | } | ||
2186 | |||
2194 | mlxsw_sp_port_switchdev_init(mlxsw_sp_port); | 2187 | mlxsw_sp_port_switchdev_init(mlxsw_sp_port); |
2188 | mlxsw_sp->ports[local_port] = mlxsw_sp_port; | ||
2195 | err = register_netdev(dev); | 2189 | err = register_netdev(dev); |
2196 | if (err) { | 2190 | if (err) { |
2197 | dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", | 2191 | dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", |
@@ -2208,24 +2202,23 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, | |||
2208 | goto err_core_port_init; | 2202 | goto err_core_port_init; |
2209 | } | 2203 | } |
2210 | 2204 | ||
2211 | err = mlxsw_sp_port_vlan_init(mlxsw_sp_port); | ||
2212 | if (err) | ||
2213 | goto err_port_vlan_init; | ||
2214 | |||
2215 | mlxsw_sp->ports[local_port] = mlxsw_sp_port; | ||
2216 | return 0; | 2205 | return 0; |
2217 | 2206 | ||
2218 | err_port_vlan_init: | ||
2219 | mlxsw_core_port_fini(&mlxsw_sp_port->core_port); | ||
2220 | err_core_port_init: | 2207 | err_core_port_init: |
2221 | unregister_netdev(dev); | 2208 | unregister_netdev(dev); |
2222 | err_register_netdev: | 2209 | err_register_netdev: |
2210 | mlxsw_sp->ports[local_port] = NULL; | ||
2211 | mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); | ||
2212 | mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port); | ||
2213 | err_port_pvid_vport_create: | ||
2214 | mlxsw_sp_port_dcb_fini(mlxsw_sp_port); | ||
2223 | err_port_dcb_init: | 2215 | err_port_dcb_init: |
2224 | err_port_ets_init: | 2216 | err_port_ets_init: |
2225 | err_port_buffers_init: | 2217 | err_port_buffers_init: |
2226 | err_port_admin_status_set: | 2218 | err_port_admin_status_set: |
2227 | err_port_mtu_set: | 2219 | err_port_mtu_set: |
2228 | err_port_speed_by_width_set: | 2220 | err_port_speed_by_width_set: |
2221 | mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); | ||
2229 | err_port_swid_set: | 2222 | err_port_swid_set: |
2230 | err_port_system_port_mapping_set: | 2223 | err_port_system_port_mapping_set: |
2231 | err_dev_addr_init: | 2224 | err_dev_addr_init: |
@@ -2245,12 +2238,12 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) | |||
2245 | 2238 | ||
2246 | if (!mlxsw_sp_port) | 2239 | if (!mlxsw_sp_port) |
2247 | return; | 2240 | return; |
2248 | mlxsw_sp->ports[local_port] = NULL; | ||
2249 | mlxsw_core_port_fini(&mlxsw_sp_port->core_port); | 2241 | mlxsw_core_port_fini(&mlxsw_sp_port->core_port); |
2250 | unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ | 2242 | unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ |
2251 | mlxsw_sp_port_dcb_fini(mlxsw_sp_port); | 2243 | mlxsw_sp->ports[local_port] = NULL; |
2252 | mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1); | ||
2253 | mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); | 2244 | mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); |
2245 | mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port); | ||
2246 | mlxsw_sp_port_dcb_fini(mlxsw_sp_port); | ||
2254 | mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); | 2247 | mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); |
2255 | mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port); | 2248 | mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port); |
2256 | free_percpu(mlxsw_sp_port->pcpu_stats); | 2249 | free_percpu(mlxsw_sp_port->pcpu_stats); |
@@ -2662,6 +2655,26 @@ static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = { | |||
2662 | { | 2655 | { |
2663 | .func = mlxsw_sp_rx_listener_func, | 2656 | .func = mlxsw_sp_rx_listener_func, |
2664 | .local_port = MLXSW_PORT_DONT_CARE, | 2657 | .local_port = MLXSW_PORT_DONT_CARE, |
2658 | .trap_id = MLXSW_TRAP_ID_MTUERROR, | ||
2659 | }, | ||
2660 | { | ||
2661 | .func = mlxsw_sp_rx_listener_func, | ||
2662 | .local_port = MLXSW_PORT_DONT_CARE, | ||
2663 | .trap_id = MLXSW_TRAP_ID_TTLERROR, | ||
2664 | }, | ||
2665 | { | ||
2666 | .func = mlxsw_sp_rx_listener_func, | ||
2667 | .local_port = MLXSW_PORT_DONT_CARE, | ||
2668 | .trap_id = MLXSW_TRAP_ID_LBERROR, | ||
2669 | }, | ||
2670 | { | ||
2671 | .func = mlxsw_sp_rx_listener_func, | ||
2672 | .local_port = MLXSW_PORT_DONT_CARE, | ||
2673 | .trap_id = MLXSW_TRAP_ID_OSPF, | ||
2674 | }, | ||
2675 | { | ||
2676 | .func = mlxsw_sp_rx_listener_func, | ||
2677 | .local_port = MLXSW_PORT_DONT_CARE, | ||
2665 | .trap_id = MLXSW_TRAP_ID_IP2ME, | 2678 | .trap_id = MLXSW_TRAP_ID_IP2ME, |
2666 | }, | 2679 | }, |
2667 | { | 2680 | { |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index f69aa37d1521..ab3feb81bd43 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h | |||
@@ -536,8 +536,6 @@ int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port, | |||
536 | u16 vid); | 536 | u16 vid); |
537 | int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, | 537 | int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, |
538 | u16 vid_end, bool is_member, bool untagged); | 538 | u16 vid_end, bool is_member, bool untagged); |
539 | int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto, | ||
540 | u16 vid); | ||
541 | int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid, | 539 | int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid, |
542 | bool set); | 540 | bool set); |
543 | void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port); | 541 | void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port); |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c index 074cdda7b6f3..237418a0e6e0 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c | |||
@@ -330,7 +330,7 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = { | |||
330 | MLXSW_SP_CPU_PORT_SB_CM, | 330 | MLXSW_SP_CPU_PORT_SB_CM, |
331 | MLXSW_SP_CPU_PORT_SB_CM, | 331 | MLXSW_SP_CPU_PORT_SB_CM, |
332 | MLXSW_SP_CPU_PORT_SB_CM, | 332 | MLXSW_SP_CPU_PORT_SB_CM, |
333 | MLXSW_SP_CPU_PORT_SB_CM, | 333 | MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(10000), 0, 0), |
334 | MLXSW_SP_CPU_PORT_SB_CM, | 334 | MLXSW_SP_CPU_PORT_SB_CM, |
335 | MLXSW_SP_CPU_PORT_SB_CM, | 335 | MLXSW_SP_CPU_PORT_SB_CM, |
336 | MLXSW_SP_CPU_PORT_SB_CM, | 336 | MLXSW_SP_CPU_PORT_SB_CM, |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c index 01cfb7512827..b6ed7f7c531e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c | |||
@@ -341,6 +341,8 @@ static int mlxsw_sp_port_pfc_set(struct mlxsw_sp_port *mlxsw_sp_port, | |||
341 | char pfcc_pl[MLXSW_REG_PFCC_LEN]; | 341 | char pfcc_pl[MLXSW_REG_PFCC_LEN]; |
342 | 342 | ||
343 | mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port); | 343 | mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port); |
344 | mlxsw_reg_pfcc_pprx_set(pfcc_pl, mlxsw_sp_port->link.rx_pause); | ||
345 | mlxsw_reg_pfcc_pptx_set(pfcc_pl, mlxsw_sp_port->link.tx_pause); | ||
344 | mlxsw_reg_pfcc_prio_pack(pfcc_pl, pfc->pfc_en); | 346 | mlxsw_reg_pfcc_prio_pack(pfcc_pl, pfc->pfc_en); |
345 | 347 | ||
346 | return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc), | 348 | return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc), |
@@ -351,17 +353,17 @@ static int mlxsw_sp_dcbnl_ieee_setpfc(struct net_device *dev, | |||
351 | struct ieee_pfc *pfc) | 353 | struct ieee_pfc *pfc) |
352 | { | 354 | { |
353 | struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); | 355 | struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); |
356 | bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); | ||
354 | int err; | 357 | int err; |
355 | 358 | ||
356 | if ((mlxsw_sp_port->link.tx_pause || mlxsw_sp_port->link.rx_pause) && | 359 | if (pause_en && pfc->pfc_en) { |
357 | pfc->pfc_en) { | ||
358 | netdev_err(dev, "PAUSE frames already enabled on port\n"); | 360 | netdev_err(dev, "PAUSE frames already enabled on port\n"); |
359 | return -EINVAL; | 361 | return -EINVAL; |
360 | } | 362 | } |
361 | 363 | ||
362 | err = __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, | 364 | err = __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, |
363 | mlxsw_sp_port->dcb.ets->prio_tc, | 365 | mlxsw_sp_port->dcb.ets->prio_tc, |
364 | false, pfc); | 366 | pause_en, pfc); |
365 | if (err) { | 367 | if (err) { |
366 | netdev_err(dev, "Failed to configure port's headroom for PFC\n"); | 368 | netdev_err(dev, "Failed to configure port's headroom for PFC\n"); |
367 | return err; | 369 | return err; |
@@ -380,7 +382,7 @@ static int mlxsw_sp_dcbnl_ieee_setpfc(struct net_device *dev, | |||
380 | 382 | ||
381 | err_port_pfc_set: | 383 | err_port_pfc_set: |
382 | __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, | 384 | __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, |
383 | mlxsw_sp_port->dcb.ets->prio_tc, false, | 385 | mlxsw_sp_port->dcb.ets->prio_tc, pause_en, |
384 | mlxsw_sp_port->dcb.pfc); | 386 | mlxsw_sp_port->dcb.pfc); |
385 | return err; | 387 | return err; |
386 | } | 388 | } |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 81418d629231..90bb93b037ec 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | |||
@@ -1651,9 +1651,10 @@ static void mlxsw_sp_router_fib4_add_info_destroy(void const *data) | |||
1651 | const struct mlxsw_sp_router_fib4_add_info *info = data; | 1651 | const struct mlxsw_sp_router_fib4_add_info *info = data; |
1652 | struct mlxsw_sp_fib_entry *fib_entry = info->fib_entry; | 1652 | struct mlxsw_sp_fib_entry *fib_entry = info->fib_entry; |
1653 | struct mlxsw_sp *mlxsw_sp = info->mlxsw_sp; | 1653 | struct mlxsw_sp *mlxsw_sp = info->mlxsw_sp; |
1654 | struct mlxsw_sp_vr *vr = fib_entry->vr; | ||
1654 | 1655 | ||
1655 | mlxsw_sp_fib_entry_destroy(fib_entry); | 1656 | mlxsw_sp_fib_entry_destroy(fib_entry); |
1656 | mlxsw_sp_vr_put(mlxsw_sp, fib_entry->vr); | 1657 | mlxsw_sp_vr_put(mlxsw_sp, vr); |
1657 | kfree(info); | 1658 | kfree(info); |
1658 | } | 1659 | } |
1659 | 1660 | ||
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index a1ad5e6bdfa8..d1b59cdfacc1 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c | |||
@@ -450,6 +450,8 @@ void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f) | |||
450 | 450 | ||
451 | kfree(f); | 451 | kfree(f); |
452 | 452 | ||
453 | mlxsw_sp_fid_map(mlxsw_sp, fid, false); | ||
454 | |||
453 | mlxsw_sp_fid_op(mlxsw_sp, fid, false); | 455 | mlxsw_sp_fid_op(mlxsw_sp, fid, false); |
454 | } | 456 | } |
455 | 457 | ||
@@ -997,13 +999,13 @@ static int mlxsw_sp_port_obj_add(struct net_device *dev, | |||
997 | } | 999 | } |
998 | 1000 | ||
999 | static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, | 1001 | static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, |
1000 | u16 vid_begin, u16 vid_end, bool init) | 1002 | u16 vid_begin, u16 vid_end) |
1001 | { | 1003 | { |
1002 | struct net_device *dev = mlxsw_sp_port->dev; | 1004 | struct net_device *dev = mlxsw_sp_port->dev; |
1003 | u16 vid, pvid; | 1005 | u16 vid, pvid; |
1004 | int err; | 1006 | int err; |
1005 | 1007 | ||
1006 | if (!init && !mlxsw_sp_port->bridged) | 1008 | if (!mlxsw_sp_port->bridged) |
1007 | return -EINVAL; | 1009 | return -EINVAL; |
1008 | 1010 | ||
1009 | err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, | 1011 | err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, |
@@ -1014,9 +1016,6 @@ static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, | |||
1014 | return err; | 1016 | return err; |
1015 | } | 1017 | } |
1016 | 1018 | ||
1017 | if (init) | ||
1018 | goto out; | ||
1019 | |||
1020 | pvid = mlxsw_sp_port->pvid; | 1019 | pvid = mlxsw_sp_port->pvid; |
1021 | if (pvid >= vid_begin && pvid <= vid_end) { | 1020 | if (pvid >= vid_begin && pvid <= vid_end) { |
1022 | err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0); | 1021 | err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0); |
@@ -1028,7 +1027,6 @@ static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, | |||
1028 | 1027 | ||
1029 | mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end); | 1028 | mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end); |
1030 | 1029 | ||
1031 | out: | ||
1032 | /* Changing activity bits only if HW operation succeded */ | 1030 | /* Changing activity bits only if HW operation succeded */ |
1033 | for (vid = vid_begin; vid <= vid_end; vid++) | 1031 | for (vid = vid_begin; vid <= vid_end; vid++) |
1034 | clear_bit(vid, mlxsw_sp_port->active_vlans); | 1032 | clear_bit(vid, mlxsw_sp_port->active_vlans); |
@@ -1039,8 +1037,8 @@ out: | |||
1039 | static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, | 1037 | static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, |
1040 | const struct switchdev_obj_port_vlan *vlan) | 1038 | const struct switchdev_obj_port_vlan *vlan) |
1041 | { | 1039 | { |
1042 | return __mlxsw_sp_port_vlans_del(mlxsw_sp_port, | 1040 | return __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vlan->vid_begin, |
1043 | vlan->vid_begin, vlan->vid_end, false); | 1041 | vlan->vid_end); |
1044 | } | 1042 | } |
1045 | 1043 | ||
1046 | void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port) | 1044 | void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port) |
@@ -1048,7 +1046,7 @@ void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port) | |||
1048 | u16 vid; | 1046 | u16 vid; |
1049 | 1047 | ||
1050 | for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) | 1048 | for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) |
1051 | __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vid, vid, false); | 1049 | __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vid, vid); |
1052 | } | 1050 | } |
1053 | 1051 | ||
1054 | static int | 1052 | static int |
@@ -1546,32 +1544,6 @@ void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp) | |||
1546 | mlxsw_sp_fdb_fini(mlxsw_sp); | 1544 | mlxsw_sp_fdb_fini(mlxsw_sp); |
1547 | } | 1545 | } |
1548 | 1546 | ||
1549 | int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port) | ||
1550 | { | ||
1551 | struct net_device *dev = mlxsw_sp_port->dev; | ||
1552 | int err; | ||
1553 | |||
1554 | /* Allow only untagged packets to ingress and tag them internally | ||
1555 | * with VID 1. | ||
1556 | */ | ||
1557 | mlxsw_sp_port->pvid = 1; | ||
1558 | err = __mlxsw_sp_port_vlans_del(mlxsw_sp_port, 0, VLAN_N_VID - 1, | ||
1559 | true); | ||
1560 | if (err) { | ||
1561 | netdev_err(dev, "Unable to init VLANs\n"); | ||
1562 | return err; | ||
1563 | } | ||
1564 | |||
1565 | /* Add implicit VLAN interface in the device, so that untagged | ||
1566 | * packets will be classified to the default vFID. | ||
1567 | */ | ||
1568 | err = mlxsw_sp_port_add_vid(dev, 0, 1); | ||
1569 | if (err) | ||
1570 | netdev_err(dev, "Failed to configure default vFID\n"); | ||
1571 | |||
1572 | return err; | ||
1573 | } | ||
1574 | |||
1575 | void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port) | 1547 | void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port) |
1576 | { | 1548 | { |
1577 | mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops; | 1549 | mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops; |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h index 470d7696e9fe..ed8e30186400 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/trap.h +++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h | |||
@@ -56,6 +56,10 @@ enum { | |||
56 | MLXSW_TRAP_ID_IGMP_V3_REPORT = 0x34, | 56 | MLXSW_TRAP_ID_IGMP_V3_REPORT = 0x34, |
57 | MLXSW_TRAP_ID_ARPBC = 0x50, | 57 | MLXSW_TRAP_ID_ARPBC = 0x50, |
58 | MLXSW_TRAP_ID_ARPUC = 0x51, | 58 | MLXSW_TRAP_ID_ARPUC = 0x51, |
59 | MLXSW_TRAP_ID_MTUERROR = 0x52, | ||
60 | MLXSW_TRAP_ID_TTLERROR = 0x53, | ||
61 | MLXSW_TRAP_ID_LBERROR = 0x54, | ||
62 | MLXSW_TRAP_ID_OSPF = 0x55, | ||
59 | MLXSW_TRAP_ID_IP2ME = 0x5F, | 63 | MLXSW_TRAP_ID_IP2ME = 0x5F, |
60 | MLXSW_TRAP_ID_RTR_INGRESS0 = 0x70, | 64 | MLXSW_TRAP_ID_RTR_INGRESS0 = 0x70, |
61 | MLXSW_TRAP_ID_HOST_MISS_IPV4 = 0x90, | 65 | MLXSW_TRAP_ID_HOST_MISS_IPV4 = 0x90, |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index d0dc28f93c0e..226cb08cc055 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c | |||
@@ -52,40 +52,94 @@ static bool qed_dcbx_app_ethtype(u32 app_info_bitmap) | |||
52 | DCBX_APP_SF_ETHTYPE); | 52 | DCBX_APP_SF_ETHTYPE); |
53 | } | 53 | } |
54 | 54 | ||
55 | static bool qed_dcbx_ieee_app_ethtype(u32 app_info_bitmap) | ||
56 | { | ||
57 | u8 mfw_val = QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF_IEEE); | ||
58 | |||
59 | /* Old MFW */ | ||
60 | if (mfw_val == DCBX_APP_SF_IEEE_RESERVED) | ||
61 | return qed_dcbx_app_ethtype(app_info_bitmap); | ||
62 | |||
63 | return !!(mfw_val == DCBX_APP_SF_IEEE_ETHTYPE); | ||
64 | } | ||
65 | |||
55 | static bool qed_dcbx_app_port(u32 app_info_bitmap) | 66 | static bool qed_dcbx_app_port(u32 app_info_bitmap) |
56 | { | 67 | { |
57 | return !!(QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) == | 68 | return !!(QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) == |
58 | DCBX_APP_SF_PORT); | 69 | DCBX_APP_SF_PORT); |
59 | } | 70 | } |
60 | 71 | ||
61 | static bool qed_dcbx_default_tlv(u32 app_info_bitmap, u16 proto_id) | 72 | static bool qed_dcbx_ieee_app_port(u32 app_info_bitmap, u8 type) |
62 | { | 73 | { |
63 | return !!(qed_dcbx_app_ethtype(app_info_bitmap) && | 74 | u8 mfw_val = QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF_IEEE); |
64 | proto_id == QED_ETH_TYPE_DEFAULT); | 75 | |
76 | /* Old MFW */ | ||
77 | if (mfw_val == DCBX_APP_SF_IEEE_RESERVED) | ||
78 | return qed_dcbx_app_port(app_info_bitmap); | ||
79 | |||
80 | return !!(mfw_val == type || mfw_val == DCBX_APP_SF_IEEE_TCP_UDP_PORT); | ||
65 | } | 81 | } |
66 | 82 | ||
67 | static bool qed_dcbx_iscsi_tlv(u32 app_info_bitmap, u16 proto_id) | 83 | static bool qed_dcbx_default_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee) |
68 | { | 84 | { |
69 | return !!(qed_dcbx_app_port(app_info_bitmap) && | 85 | bool ethtype; |
70 | proto_id == QED_TCP_PORT_ISCSI); | 86 | |
87 | if (ieee) | ||
88 | ethtype = qed_dcbx_ieee_app_ethtype(app_info_bitmap); | ||
89 | else | ||
90 | ethtype = qed_dcbx_app_ethtype(app_info_bitmap); | ||
91 | |||
92 | return !!(ethtype && (proto_id == QED_ETH_TYPE_DEFAULT)); | ||
71 | } | 93 | } |
72 | 94 | ||
73 | static bool qed_dcbx_fcoe_tlv(u32 app_info_bitmap, u16 proto_id) | 95 | static bool qed_dcbx_iscsi_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee) |
74 | { | 96 | { |
75 | return !!(qed_dcbx_app_ethtype(app_info_bitmap) && | 97 | bool port; |
76 | proto_id == QED_ETH_TYPE_FCOE); | 98 | |
99 | if (ieee) | ||
100 | port = qed_dcbx_ieee_app_port(app_info_bitmap, | ||
101 | DCBX_APP_SF_IEEE_TCP_PORT); | ||
102 | else | ||
103 | port = qed_dcbx_app_port(app_info_bitmap); | ||
104 | |||
105 | return !!(port && (proto_id == QED_TCP_PORT_ISCSI)); | ||
77 | } | 106 | } |
78 | 107 | ||
79 | static bool qed_dcbx_roce_tlv(u32 app_info_bitmap, u16 proto_id) | 108 | static bool qed_dcbx_fcoe_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee) |
80 | { | 109 | { |
81 | return !!(qed_dcbx_app_ethtype(app_info_bitmap) && | 110 | bool ethtype; |
82 | proto_id == QED_ETH_TYPE_ROCE); | 111 | |
112 | if (ieee) | ||
113 | ethtype = qed_dcbx_ieee_app_ethtype(app_info_bitmap); | ||
114 | else | ||
115 | ethtype = qed_dcbx_app_ethtype(app_info_bitmap); | ||
116 | |||
117 | return !!(ethtype && (proto_id == QED_ETH_TYPE_FCOE)); | ||
83 | } | 118 | } |
84 | 119 | ||
85 | static bool qed_dcbx_roce_v2_tlv(u32 app_info_bitmap, u16 proto_id) | 120 | static bool qed_dcbx_roce_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee) |
86 | { | 121 | { |
87 | return !!(qed_dcbx_app_port(app_info_bitmap) && | 122 | bool ethtype; |
88 | proto_id == QED_UDP_PORT_TYPE_ROCE_V2); | 123 | |
124 | if (ieee) | ||
125 | ethtype = qed_dcbx_ieee_app_ethtype(app_info_bitmap); | ||
126 | else | ||
127 | ethtype = qed_dcbx_app_ethtype(app_info_bitmap); | ||
128 | |||
129 | return !!(ethtype && (proto_id == QED_ETH_TYPE_ROCE)); | ||
130 | } | ||
131 | |||
132 | static bool qed_dcbx_roce_v2_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee) | ||
133 | { | ||
134 | bool port; | ||
135 | |||
136 | if (ieee) | ||
137 | port = qed_dcbx_ieee_app_port(app_info_bitmap, | ||
138 | DCBX_APP_SF_IEEE_UDP_PORT); | ||
139 | else | ||
140 | port = qed_dcbx_app_port(app_info_bitmap); | ||
141 | |||
142 | return !!(port && (proto_id == QED_UDP_PORT_TYPE_ROCE_V2)); | ||
89 | } | 143 | } |
90 | 144 | ||
91 | static void | 145 | static void |
@@ -164,17 +218,17 @@ qed_dcbx_update_app_info(struct qed_dcbx_results *p_data, | |||
164 | static bool | 218 | static bool |
165 | qed_dcbx_get_app_protocol_type(struct qed_hwfn *p_hwfn, | 219 | qed_dcbx_get_app_protocol_type(struct qed_hwfn *p_hwfn, |
166 | u32 app_prio_bitmap, | 220 | u32 app_prio_bitmap, |
167 | u16 id, enum dcbx_protocol_type *type) | 221 | u16 id, enum dcbx_protocol_type *type, bool ieee) |
168 | { | 222 | { |
169 | if (qed_dcbx_fcoe_tlv(app_prio_bitmap, id)) { | 223 | if (qed_dcbx_fcoe_tlv(app_prio_bitmap, id, ieee)) { |
170 | *type = DCBX_PROTOCOL_FCOE; | 224 | *type = DCBX_PROTOCOL_FCOE; |
171 | } else if (qed_dcbx_roce_tlv(app_prio_bitmap, id)) { | 225 | } else if (qed_dcbx_roce_tlv(app_prio_bitmap, id, ieee)) { |
172 | *type = DCBX_PROTOCOL_ROCE; | 226 | *type = DCBX_PROTOCOL_ROCE; |
173 | } else if (qed_dcbx_iscsi_tlv(app_prio_bitmap, id)) { | 227 | } else if (qed_dcbx_iscsi_tlv(app_prio_bitmap, id, ieee)) { |
174 | *type = DCBX_PROTOCOL_ISCSI; | 228 | *type = DCBX_PROTOCOL_ISCSI; |
175 | } else if (qed_dcbx_default_tlv(app_prio_bitmap, id)) { | 229 | } else if (qed_dcbx_default_tlv(app_prio_bitmap, id, ieee)) { |
176 | *type = DCBX_PROTOCOL_ETH; | 230 | *type = DCBX_PROTOCOL_ETH; |
177 | } else if (qed_dcbx_roce_v2_tlv(app_prio_bitmap, id)) { | 231 | } else if (qed_dcbx_roce_v2_tlv(app_prio_bitmap, id, ieee)) { |
178 | *type = DCBX_PROTOCOL_ROCE_V2; | 232 | *type = DCBX_PROTOCOL_ROCE_V2; |
179 | } else { | 233 | } else { |
180 | *type = DCBX_MAX_PROTOCOL_TYPE; | 234 | *type = DCBX_MAX_PROTOCOL_TYPE; |
@@ -194,17 +248,18 @@ static int | |||
194 | qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, | 248 | qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, |
195 | struct qed_dcbx_results *p_data, | 249 | struct qed_dcbx_results *p_data, |
196 | struct dcbx_app_priority_entry *p_tbl, | 250 | struct dcbx_app_priority_entry *p_tbl, |
197 | u32 pri_tc_tbl, int count, bool dcbx_enabled) | 251 | u32 pri_tc_tbl, int count, u8 dcbx_version) |
198 | { | 252 | { |
199 | u8 tc, priority_map; | 253 | u8 tc, priority_map; |
200 | enum dcbx_protocol_type type; | 254 | enum dcbx_protocol_type type; |
255 | bool enable, ieee; | ||
201 | u16 protocol_id; | 256 | u16 protocol_id; |
202 | int priority; | 257 | int priority; |
203 | bool enable; | ||
204 | int i; | 258 | int i; |
205 | 259 | ||
206 | DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Num APP entries = %d\n", count); | 260 | DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Num APP entries = %d\n", count); |
207 | 261 | ||
262 | ieee = (dcbx_version == DCBX_CONFIG_VERSION_IEEE); | ||
208 | /* Parse APP TLV */ | 263 | /* Parse APP TLV */ |
209 | for (i = 0; i < count; i++) { | 264 | for (i = 0; i < count; i++) { |
210 | protocol_id = QED_MFW_GET_FIELD(p_tbl[i].entry, | 265 | protocol_id = QED_MFW_GET_FIELD(p_tbl[i].entry, |
@@ -219,7 +274,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, | |||
219 | 274 | ||
220 | tc = QED_DCBX_PRIO2TC(pri_tc_tbl, priority); | 275 | tc = QED_DCBX_PRIO2TC(pri_tc_tbl, priority); |
221 | if (qed_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry, | 276 | if (qed_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry, |
222 | protocol_id, &type)) { | 277 | protocol_id, &type, ieee)) { |
223 | /* ETH always have the enable bit reset, as it gets | 278 | /* ETH always have the enable bit reset, as it gets |
224 | * vlan information per packet. For other protocols, | 279 | * vlan information per packet. For other protocols, |
225 | * should be set according to the dcbx_enabled | 280 | * should be set according to the dcbx_enabled |
@@ -275,15 +330,12 @@ static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn) | |||
275 | struct dcbx_ets_feature *p_ets; | 330 | struct dcbx_ets_feature *p_ets; |
276 | struct qed_hw_info *p_info; | 331 | struct qed_hw_info *p_info; |
277 | u32 pri_tc_tbl, flags; | 332 | u32 pri_tc_tbl, flags; |
278 | bool dcbx_enabled; | 333 | u8 dcbx_version; |
279 | int num_entries; | 334 | int num_entries; |
280 | int rc = 0; | 335 | int rc = 0; |
281 | 336 | ||
282 | /* If DCBx version is non zero, then negotiation was | ||
283 | * successfuly performed | ||
284 | */ | ||
285 | flags = p_hwfn->p_dcbx_info->operational.flags; | 337 | flags = p_hwfn->p_dcbx_info->operational.flags; |
286 | dcbx_enabled = !!QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION); | 338 | dcbx_version = QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION); |
287 | 339 | ||
288 | p_app = &p_hwfn->p_dcbx_info->operational.features.app; | 340 | p_app = &p_hwfn->p_dcbx_info->operational.features.app; |
289 | p_tbl = p_app->app_pri_tbl; | 341 | p_tbl = p_app->app_pri_tbl; |
@@ -295,13 +347,13 @@ static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn) | |||
295 | num_entries = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES); | 347 | num_entries = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES); |
296 | 348 | ||
297 | rc = qed_dcbx_process_tlv(p_hwfn, &data, p_tbl, pri_tc_tbl, | 349 | rc = qed_dcbx_process_tlv(p_hwfn, &data, p_tbl, pri_tc_tbl, |
298 | num_entries, dcbx_enabled); | 350 | num_entries, dcbx_version); |
299 | if (rc) | 351 | if (rc) |
300 | return rc; | 352 | return rc; |
301 | 353 | ||
302 | p_info->num_tc = QED_MFW_GET_FIELD(p_ets->flags, DCBX_ETS_MAX_TCS); | 354 | p_info->num_tc = QED_MFW_GET_FIELD(p_ets->flags, DCBX_ETS_MAX_TCS); |
303 | data.pf_id = p_hwfn->rel_pf_id; | 355 | data.pf_id = p_hwfn->rel_pf_id; |
304 | data.dcbx_enabled = dcbx_enabled; | 356 | data.dcbx_enabled = !!dcbx_version; |
305 | 357 | ||
306 | qed_dcbx_dp_protocol(p_hwfn, &data); | 358 | qed_dcbx_dp_protocol(p_hwfn, &data); |
307 | 359 | ||
@@ -400,7 +452,7 @@ static void | |||
400 | qed_dcbx_get_app_data(struct qed_hwfn *p_hwfn, | 452 | qed_dcbx_get_app_data(struct qed_hwfn *p_hwfn, |
401 | struct dcbx_app_priority_feature *p_app, | 453 | struct dcbx_app_priority_feature *p_app, |
402 | struct dcbx_app_priority_entry *p_tbl, | 454 | struct dcbx_app_priority_entry *p_tbl, |
403 | struct qed_dcbx_params *p_params) | 455 | struct qed_dcbx_params *p_params, bool ieee) |
404 | { | 456 | { |
405 | struct qed_app_entry *entry; | 457 | struct qed_app_entry *entry; |
406 | u8 pri_map; | 458 | u8 pri_map; |
@@ -414,15 +466,46 @@ qed_dcbx_get_app_data(struct qed_hwfn *p_hwfn, | |||
414 | DCBX_APP_NUM_ENTRIES); | 466 | DCBX_APP_NUM_ENTRIES); |
415 | for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) { | 467 | for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) { |
416 | entry = &p_params->app_entry[i]; | 468 | entry = &p_params->app_entry[i]; |
417 | entry->ethtype = !(QED_MFW_GET_FIELD(p_tbl[i].entry, | 469 | if (ieee) { |
418 | DCBX_APP_SF)); | 470 | u8 sf_ieee; |
471 | u32 val; | ||
472 | |||
473 | sf_ieee = QED_MFW_GET_FIELD(p_tbl[i].entry, | ||
474 | DCBX_APP_SF_IEEE); | ||
475 | switch (sf_ieee) { | ||
476 | case DCBX_APP_SF_IEEE_RESERVED: | ||
477 | /* Old MFW */ | ||
478 | val = QED_MFW_GET_FIELD(p_tbl[i].entry, | ||
479 | DCBX_APP_SF); | ||
480 | entry->sf_ieee = val ? | ||
481 | QED_DCBX_SF_IEEE_TCP_UDP_PORT : | ||
482 | QED_DCBX_SF_IEEE_ETHTYPE; | ||
483 | break; | ||
484 | case DCBX_APP_SF_IEEE_ETHTYPE: | ||
485 | entry->sf_ieee = QED_DCBX_SF_IEEE_ETHTYPE; | ||
486 | break; | ||
487 | case DCBX_APP_SF_IEEE_TCP_PORT: | ||
488 | entry->sf_ieee = QED_DCBX_SF_IEEE_TCP_PORT; | ||
489 | break; | ||
490 | case DCBX_APP_SF_IEEE_UDP_PORT: | ||
491 | entry->sf_ieee = QED_DCBX_SF_IEEE_UDP_PORT; | ||
492 | break; | ||
493 | case DCBX_APP_SF_IEEE_TCP_UDP_PORT: | ||
494 | entry->sf_ieee = QED_DCBX_SF_IEEE_TCP_UDP_PORT; | ||
495 | break; | ||
496 | } | ||
497 | } else { | ||
498 | entry->ethtype = !(QED_MFW_GET_FIELD(p_tbl[i].entry, | ||
499 | DCBX_APP_SF)); | ||
500 | } | ||
501 | |||
419 | pri_map = QED_MFW_GET_FIELD(p_tbl[i].entry, DCBX_APP_PRI_MAP); | 502 | pri_map = QED_MFW_GET_FIELD(p_tbl[i].entry, DCBX_APP_PRI_MAP); |
420 | entry->prio = ffs(pri_map) - 1; | 503 | entry->prio = ffs(pri_map) - 1; |
421 | entry->proto_id = QED_MFW_GET_FIELD(p_tbl[i].entry, | 504 | entry->proto_id = QED_MFW_GET_FIELD(p_tbl[i].entry, |
422 | DCBX_APP_PROTOCOL_ID); | 505 | DCBX_APP_PROTOCOL_ID); |
423 | qed_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry, | 506 | qed_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry, |
424 | entry->proto_id, | 507 | entry->proto_id, |
425 | &entry->proto_type); | 508 | &entry->proto_type, ieee); |
426 | } | 509 | } |
427 | 510 | ||
428 | DP_VERBOSE(p_hwfn, QED_MSG_DCB, | 511 | DP_VERBOSE(p_hwfn, QED_MSG_DCB, |
@@ -483,7 +566,7 @@ qed_dcbx_get_ets_data(struct qed_hwfn *p_hwfn, | |||
483 | bw_map[1] = be32_to_cpu(p_ets->tc_bw_tbl[1]); | 566 | bw_map[1] = be32_to_cpu(p_ets->tc_bw_tbl[1]); |
484 | tsa_map[0] = be32_to_cpu(p_ets->tc_tsa_tbl[0]); | 567 | tsa_map[0] = be32_to_cpu(p_ets->tc_tsa_tbl[0]); |
485 | tsa_map[1] = be32_to_cpu(p_ets->tc_tsa_tbl[1]); | 568 | tsa_map[1] = be32_to_cpu(p_ets->tc_tsa_tbl[1]); |
486 | pri_map = be32_to_cpu(p_ets->pri_tc_tbl[0]); | 569 | pri_map = p_ets->pri_tc_tbl[0]; |
487 | for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) { | 570 | for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) { |
488 | p_params->ets_tc_bw_tbl[i] = ((u8 *)bw_map)[i]; | 571 | p_params->ets_tc_bw_tbl[i] = ((u8 *)bw_map)[i]; |
489 | p_params->ets_tc_tsa_tbl[i] = ((u8 *)tsa_map)[i]; | 572 | p_params->ets_tc_tsa_tbl[i] = ((u8 *)tsa_map)[i]; |
@@ -500,9 +583,9 @@ qed_dcbx_get_common_params(struct qed_hwfn *p_hwfn, | |||
500 | struct dcbx_app_priority_feature *p_app, | 583 | struct dcbx_app_priority_feature *p_app, |
501 | struct dcbx_app_priority_entry *p_tbl, | 584 | struct dcbx_app_priority_entry *p_tbl, |
502 | struct dcbx_ets_feature *p_ets, | 585 | struct dcbx_ets_feature *p_ets, |
503 | u32 pfc, struct qed_dcbx_params *p_params) | 586 | u32 pfc, struct qed_dcbx_params *p_params, bool ieee) |
504 | { | 587 | { |
505 | qed_dcbx_get_app_data(p_hwfn, p_app, p_tbl, p_params); | 588 | qed_dcbx_get_app_data(p_hwfn, p_app, p_tbl, p_params, ieee); |
506 | qed_dcbx_get_ets_data(p_hwfn, p_ets, p_params); | 589 | qed_dcbx_get_ets_data(p_hwfn, p_ets, p_params); |
507 | qed_dcbx_get_pfc_data(p_hwfn, pfc, p_params); | 590 | qed_dcbx_get_pfc_data(p_hwfn, pfc, p_params); |
508 | } | 591 | } |
@@ -516,7 +599,7 @@ qed_dcbx_get_local_params(struct qed_hwfn *p_hwfn, | |||
516 | p_feat = &p_hwfn->p_dcbx_info->local_admin.features; | 599 | p_feat = &p_hwfn->p_dcbx_info->local_admin.features; |
517 | qed_dcbx_get_common_params(p_hwfn, &p_feat->app, | 600 | qed_dcbx_get_common_params(p_hwfn, &p_feat->app, |
518 | p_feat->app.app_pri_tbl, &p_feat->ets, | 601 | p_feat->app.app_pri_tbl, &p_feat->ets, |
519 | p_feat->pfc, ¶ms->local.params); | 602 | p_feat->pfc, ¶ms->local.params, false); |
520 | params->local.valid = true; | 603 | params->local.valid = true; |
521 | } | 604 | } |
522 | 605 | ||
@@ -529,7 +612,7 @@ qed_dcbx_get_remote_params(struct qed_hwfn *p_hwfn, | |||
529 | p_feat = &p_hwfn->p_dcbx_info->remote.features; | 612 | p_feat = &p_hwfn->p_dcbx_info->remote.features; |
530 | qed_dcbx_get_common_params(p_hwfn, &p_feat->app, | 613 | qed_dcbx_get_common_params(p_hwfn, &p_feat->app, |
531 | p_feat->app.app_pri_tbl, &p_feat->ets, | 614 | p_feat->app.app_pri_tbl, &p_feat->ets, |
532 | p_feat->pfc, ¶ms->remote.params); | 615 | p_feat->pfc, ¶ms->remote.params, false); |
533 | params->remote.valid = true; | 616 | params->remote.valid = true; |
534 | } | 617 | } |
535 | 618 | ||
@@ -574,7 +657,8 @@ qed_dcbx_get_operational_params(struct qed_hwfn *p_hwfn, | |||
574 | 657 | ||
575 | qed_dcbx_get_common_params(p_hwfn, &p_feat->app, | 658 | qed_dcbx_get_common_params(p_hwfn, &p_feat->app, |
576 | p_feat->app.app_pri_tbl, &p_feat->ets, | 659 | p_feat->app.app_pri_tbl, &p_feat->ets, |
577 | p_feat->pfc, ¶ms->operational.params); | 660 | p_feat->pfc, ¶ms->operational.params, |
661 | p_operational->ieee); | ||
578 | qed_dcbx_get_priority_info(p_hwfn, &p_operational->app_prio, p_results); | 662 | qed_dcbx_get_priority_info(p_hwfn, &p_operational->app_prio, p_results); |
579 | err = QED_MFW_GET_FIELD(p_feat->app.flags, DCBX_APP_ERROR); | 663 | err = QED_MFW_GET_FIELD(p_feat->app.flags, DCBX_APP_ERROR); |
580 | p_operational->err = err; | 664 | p_operational->err = err; |
@@ -944,7 +1028,6 @@ qed_dcbx_set_ets_data(struct qed_hwfn *p_hwfn, | |||
944 | val = (((u32)p_params->ets_pri_tc_tbl[i]) << ((7 - i) * 4)); | 1028 | val = (((u32)p_params->ets_pri_tc_tbl[i]) << ((7 - i) * 4)); |
945 | p_ets->pri_tc_tbl[0] |= val; | 1029 | p_ets->pri_tc_tbl[0] |= val; |
946 | } | 1030 | } |
947 | p_ets->pri_tc_tbl[0] = cpu_to_be32(p_ets->pri_tc_tbl[0]); | ||
948 | for (i = 0; i < 2; i++) { | 1031 | for (i = 0; i < 2; i++) { |
949 | p_ets->tc_bw_tbl[i] = cpu_to_be32(p_ets->tc_bw_tbl[i]); | 1032 | p_ets->tc_bw_tbl[i] = cpu_to_be32(p_ets->tc_bw_tbl[i]); |
950 | p_ets->tc_tsa_tbl[i] = cpu_to_be32(p_ets->tc_tsa_tbl[i]); | 1033 | p_ets->tc_tsa_tbl[i] = cpu_to_be32(p_ets->tc_tsa_tbl[i]); |
@@ -954,7 +1037,7 @@ qed_dcbx_set_ets_data(struct qed_hwfn *p_hwfn, | |||
954 | static void | 1037 | static void |
955 | qed_dcbx_set_app_data(struct qed_hwfn *p_hwfn, | 1038 | qed_dcbx_set_app_data(struct qed_hwfn *p_hwfn, |
956 | struct dcbx_app_priority_feature *p_app, | 1039 | struct dcbx_app_priority_feature *p_app, |
957 | struct qed_dcbx_params *p_params) | 1040 | struct qed_dcbx_params *p_params, bool ieee) |
958 | { | 1041 | { |
959 | u32 *entry; | 1042 | u32 *entry; |
960 | int i; | 1043 | int i; |
@@ -975,12 +1058,36 @@ qed_dcbx_set_app_data(struct qed_hwfn *p_hwfn, | |||
975 | 1058 | ||
976 | for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) { | 1059 | for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) { |
977 | entry = &p_app->app_pri_tbl[i].entry; | 1060 | entry = &p_app->app_pri_tbl[i].entry; |
978 | *entry &= ~DCBX_APP_SF_MASK; | 1061 | if (ieee) { |
979 | if (p_params->app_entry[i].ethtype) | 1062 | *entry &= ~DCBX_APP_SF_IEEE_MASK; |
980 | *entry |= ((u32)DCBX_APP_SF_ETHTYPE << | 1063 | switch (p_params->app_entry[i].sf_ieee) { |
981 | DCBX_APP_SF_SHIFT); | 1064 | case QED_DCBX_SF_IEEE_ETHTYPE: |
982 | else | 1065 | *entry |= ((u32)DCBX_APP_SF_IEEE_ETHTYPE << |
983 | *entry |= ((u32)DCBX_APP_SF_PORT << DCBX_APP_SF_SHIFT); | 1066 | DCBX_APP_SF_IEEE_SHIFT); |
1067 | break; | ||
1068 | case QED_DCBX_SF_IEEE_TCP_PORT: | ||
1069 | *entry |= ((u32)DCBX_APP_SF_IEEE_TCP_PORT << | ||
1070 | DCBX_APP_SF_IEEE_SHIFT); | ||
1071 | break; | ||
1072 | case QED_DCBX_SF_IEEE_UDP_PORT: | ||
1073 | *entry |= ((u32)DCBX_APP_SF_IEEE_UDP_PORT << | ||
1074 | DCBX_APP_SF_IEEE_SHIFT); | ||
1075 | break; | ||
1076 | case QED_DCBX_SF_IEEE_TCP_UDP_PORT: | ||
1077 | *entry |= ((u32)DCBX_APP_SF_IEEE_TCP_UDP_PORT << | ||
1078 | DCBX_APP_SF_IEEE_SHIFT); | ||
1079 | break; | ||
1080 | } | ||
1081 | } else { | ||
1082 | *entry &= ~DCBX_APP_SF_MASK; | ||
1083 | if (p_params->app_entry[i].ethtype) | ||
1084 | *entry |= ((u32)DCBX_APP_SF_ETHTYPE << | ||
1085 | DCBX_APP_SF_SHIFT); | ||
1086 | else | ||
1087 | *entry |= ((u32)DCBX_APP_SF_PORT << | ||
1088 | DCBX_APP_SF_SHIFT); | ||
1089 | } | ||
1090 | |||
984 | *entry &= ~DCBX_APP_PROTOCOL_ID_MASK; | 1091 | *entry &= ~DCBX_APP_PROTOCOL_ID_MASK; |
985 | *entry |= ((u32)p_params->app_entry[i].proto_id << | 1092 | *entry |= ((u32)p_params->app_entry[i].proto_id << |
986 | DCBX_APP_PROTOCOL_ID_SHIFT); | 1093 | DCBX_APP_PROTOCOL_ID_SHIFT); |
@@ -995,15 +1102,19 @@ qed_dcbx_set_local_params(struct qed_hwfn *p_hwfn, | |||
995 | struct dcbx_local_params *local_admin, | 1102 | struct dcbx_local_params *local_admin, |
996 | struct qed_dcbx_set *params) | 1103 | struct qed_dcbx_set *params) |
997 | { | 1104 | { |
1105 | bool ieee = false; | ||
1106 | |||
998 | local_admin->flags = 0; | 1107 | local_admin->flags = 0; |
999 | memcpy(&local_admin->features, | 1108 | memcpy(&local_admin->features, |
1000 | &p_hwfn->p_dcbx_info->operational.features, | 1109 | &p_hwfn->p_dcbx_info->operational.features, |
1001 | sizeof(local_admin->features)); | 1110 | sizeof(local_admin->features)); |
1002 | 1111 | ||
1003 | if (params->enabled) | 1112 | if (params->enabled) { |
1004 | local_admin->config = params->ver_num; | 1113 | local_admin->config = params->ver_num; |
1005 | else | 1114 | ieee = !!(params->ver_num & DCBX_CONFIG_VERSION_IEEE); |
1115 | } else { | ||
1006 | local_admin->config = DCBX_CONFIG_VERSION_DISABLED; | 1116 | local_admin->config = DCBX_CONFIG_VERSION_DISABLED; |
1117 | } | ||
1007 | 1118 | ||
1008 | if (params->override_flags & QED_DCBX_OVERRIDE_PFC_CFG) | 1119 | if (params->override_flags & QED_DCBX_OVERRIDE_PFC_CFG) |
1009 | qed_dcbx_set_pfc_data(p_hwfn, &local_admin->features.pfc, | 1120 | qed_dcbx_set_pfc_data(p_hwfn, &local_admin->features.pfc, |
@@ -1015,7 +1126,7 @@ qed_dcbx_set_local_params(struct qed_hwfn *p_hwfn, | |||
1015 | 1126 | ||
1016 | if (params->override_flags & QED_DCBX_OVERRIDE_APP_CFG) | 1127 | if (params->override_flags & QED_DCBX_OVERRIDE_APP_CFG) |
1017 | qed_dcbx_set_app_data(p_hwfn, &local_admin->features.app, | 1128 | qed_dcbx_set_app_data(p_hwfn, &local_admin->features.app, |
1018 | ¶ms->config.params); | 1129 | ¶ms->config.params, ieee); |
1019 | } | 1130 | } |
1020 | 1131 | ||
1021 | int qed_dcbx_config_params(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, | 1132 | int qed_dcbx_config_params(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, |
@@ -1596,8 +1707,10 @@ static int qed_dcbnl_setapp(struct qed_dev *cdev, | |||
1596 | if ((entry->ethtype == ethtype) && (entry->proto_id == idval)) | 1707 | if ((entry->ethtype == ethtype) && (entry->proto_id == idval)) |
1597 | break; | 1708 | break; |
1598 | /* First empty slot */ | 1709 | /* First empty slot */ |
1599 | if (!entry->proto_id) | 1710 | if (!entry->proto_id) { |
1711 | dcbx_set.config.params.num_app_entries++; | ||
1600 | break; | 1712 | break; |
1713 | } | ||
1601 | } | 1714 | } |
1602 | 1715 | ||
1603 | if (i == QED_DCBX_MAX_APP_PROTOCOL) { | 1716 | if (i == QED_DCBX_MAX_APP_PROTOCOL) { |
@@ -2117,8 +2230,10 @@ int qed_dcbnl_ieee_setapp(struct qed_dev *cdev, struct dcb_app *app) | |||
2117 | (entry->proto_id == app->protocol)) | 2230 | (entry->proto_id == app->protocol)) |
2118 | break; | 2231 | break; |
2119 | /* First empty slot */ | 2232 | /* First empty slot */ |
2120 | if (!entry->proto_id) | 2233 | if (!entry->proto_id) { |
2234 | dcbx_set.config.params.num_app_entries++; | ||
2121 | break; | 2235 | break; |
2236 | } | ||
2122 | } | 2237 | } |
2123 | 2238 | ||
2124 | if (i == QED_DCBX_MAX_APP_PROTOCOL) { | 2239 | if (i == QED_DCBX_MAX_APP_PROTOCOL) { |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 592784019994..6f9d3b831a2a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h | |||
@@ -6850,6 +6850,14 @@ struct dcbx_app_priority_entry { | |||
6850 | #define DCBX_APP_SF_SHIFT 8 | 6850 | #define DCBX_APP_SF_SHIFT 8 |
6851 | #define DCBX_APP_SF_ETHTYPE 0 | 6851 | #define DCBX_APP_SF_ETHTYPE 0 |
6852 | #define DCBX_APP_SF_PORT 1 | 6852 | #define DCBX_APP_SF_PORT 1 |
6853 | #define DCBX_APP_SF_IEEE_MASK 0x0000f000 | ||
6854 | #define DCBX_APP_SF_IEEE_SHIFT 12 | ||
6855 | #define DCBX_APP_SF_IEEE_RESERVED 0 | ||
6856 | #define DCBX_APP_SF_IEEE_ETHTYPE 1 | ||
6857 | #define DCBX_APP_SF_IEEE_TCP_PORT 2 | ||
6858 | #define DCBX_APP_SF_IEEE_UDP_PORT 3 | ||
6859 | #define DCBX_APP_SF_IEEE_TCP_UDP_PORT 4 | ||
6860 | |||
6853 | #define DCBX_APP_PROTOCOL_ID_MASK 0xffff0000 | 6861 | #define DCBX_APP_PROTOCOL_ID_MASK 0xffff0000 |
6854 | #define DCBX_APP_PROTOCOL_ID_SHIFT 16 | 6862 | #define DCBX_APP_PROTOCOL_ID_SHIFT 16 |
6855 | }; | 6863 | }; |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index fd973f4f16c7..49bad00a0f8f 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h | |||
@@ -37,8 +37,8 @@ | |||
37 | 37 | ||
38 | #define _QLCNIC_LINUX_MAJOR 5 | 38 | #define _QLCNIC_LINUX_MAJOR 5 |
39 | #define _QLCNIC_LINUX_MINOR 3 | 39 | #define _QLCNIC_LINUX_MINOR 3 |
40 | #define _QLCNIC_LINUX_SUBVERSION 64 | 40 | #define _QLCNIC_LINUX_SUBVERSION 65 |
41 | #define QLCNIC_LINUX_VERSIONID "5.3.64" | 41 | #define QLCNIC_LINUX_VERSIONID "5.3.65" |
42 | #define QLCNIC_DRV_IDC_VER 0x01 | 42 | #define QLCNIC_DRV_IDC_VER 0x01 |
43 | #define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ | 43 | #define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ |
44 | (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) | 44 | (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index 87c642d3b075..fedd7366713c 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c | |||
@@ -102,7 +102,6 @@ | |||
102 | #define QLCNIC_RESPONSE_DESC 0x05 | 102 | #define QLCNIC_RESPONSE_DESC 0x05 |
103 | #define QLCNIC_LRO_DESC 0x12 | 103 | #define QLCNIC_LRO_DESC 0x12 |
104 | 104 | ||
105 | #define QLCNIC_TX_POLL_BUDGET 128 | ||
106 | #define QLCNIC_TCP_HDR_SIZE 20 | 105 | #define QLCNIC_TCP_HDR_SIZE 20 |
107 | #define QLCNIC_TCP_TS_OPTION_SIZE 12 | 106 | #define QLCNIC_TCP_TS_OPTION_SIZE 12 |
108 | #define QLCNIC_FETCH_RING_ID(handle) ((handle) >> 63) | 107 | #define QLCNIC_FETCH_RING_ID(handle) ((handle) >> 63) |
@@ -2008,7 +2007,6 @@ static int qlcnic_83xx_msix_tx_poll(struct napi_struct *napi, int budget) | |||
2008 | struct qlcnic_host_tx_ring *tx_ring; | 2007 | struct qlcnic_host_tx_ring *tx_ring; |
2009 | struct qlcnic_adapter *adapter; | 2008 | struct qlcnic_adapter *adapter; |
2010 | 2009 | ||
2011 | budget = QLCNIC_TX_POLL_BUDGET; | ||
2012 | tx_ring = container_of(napi, struct qlcnic_host_tx_ring, napi); | 2010 | tx_ring = container_of(napi, struct qlcnic_host_tx_ring, napi); |
2013 | adapter = tx_ring->adapter; | 2011 | adapter = tx_ring->adapter; |
2014 | work_done = qlcnic_process_cmd_ring(adapter, tx_ring, budget); | 2012 | work_done = qlcnic_process_cmd_ring(adapter, tx_ring, budget); |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h index 017d8c2c8285..24061b9b92e8 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h | |||
@@ -156,10 +156,8 @@ struct qlcnic_vf_info { | |||
156 | spinlock_t vlan_list_lock; /* Lock for VLAN list */ | 156 | spinlock_t vlan_list_lock; /* Lock for VLAN list */ |
157 | }; | 157 | }; |
158 | 158 | ||
159 | struct qlcnic_async_work_list { | 159 | struct qlcnic_async_cmd { |
160 | struct list_head list; | 160 | struct list_head list; |
161 | struct work_struct work; | ||
162 | void *ptr; | ||
163 | struct qlcnic_cmd_args *cmd; | 161 | struct qlcnic_cmd_args *cmd; |
164 | }; | 162 | }; |
165 | 163 | ||
@@ -168,7 +166,10 @@ struct qlcnic_back_channel { | |||
168 | struct workqueue_struct *bc_trans_wq; | 166 | struct workqueue_struct *bc_trans_wq; |
169 | struct workqueue_struct *bc_async_wq; | 167 | struct workqueue_struct *bc_async_wq; |
170 | struct workqueue_struct *bc_flr_wq; | 168 | struct workqueue_struct *bc_flr_wq; |
171 | struct list_head async_list; | 169 | struct qlcnic_adapter *adapter; |
170 | struct list_head async_cmd_list; | ||
171 | struct work_struct vf_async_work; | ||
172 | spinlock_t queue_lock; /* async_cmd_list queue lock */ | ||
172 | }; | 173 | }; |
173 | 174 | ||
174 | struct qlcnic_sriov { | 175 | struct qlcnic_sriov { |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c index 7327b729ba2e..d7107055ec60 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #define QLC_83XX_VF_RESET_FAIL_THRESH 8 | 29 | #define QLC_83XX_VF_RESET_FAIL_THRESH 8 |
30 | #define QLC_BC_CMD_MAX_RETRY_CNT 5 | 30 | #define QLC_BC_CMD_MAX_RETRY_CNT 5 |
31 | 31 | ||
32 | static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work); | ||
32 | static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *); | 33 | static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *); |
33 | static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *, u32); | 34 | static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *, u32); |
34 | static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *); | 35 | static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *); |
@@ -177,7 +178,10 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs) | |||
177 | } | 178 | } |
178 | 179 | ||
179 | bc->bc_async_wq = wq; | 180 | bc->bc_async_wq = wq; |
180 | INIT_LIST_HEAD(&bc->async_list); | 181 | INIT_LIST_HEAD(&bc->async_cmd_list); |
182 | INIT_WORK(&bc->vf_async_work, qlcnic_sriov_handle_async_issue_cmd); | ||
183 | spin_lock_init(&bc->queue_lock); | ||
184 | bc->adapter = adapter; | ||
181 | 185 | ||
182 | for (i = 0; i < num_vfs; i++) { | 186 | for (i = 0; i < num_vfs; i++) { |
183 | vf = &sriov->vf_info[i]; | 187 | vf = &sriov->vf_info[i]; |
@@ -1517,17 +1521,21 @@ static void qlcnic_vf_add_mc_list(struct net_device *netdev, const u8 *mac, | |||
1517 | 1521 | ||
1518 | void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc) | 1522 | void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc) |
1519 | { | 1523 | { |
1520 | struct list_head *head = &bc->async_list; | 1524 | struct list_head *head = &bc->async_cmd_list; |
1521 | struct qlcnic_async_work_list *entry; | 1525 | struct qlcnic_async_cmd *entry; |
1522 | 1526 | ||
1523 | flush_workqueue(bc->bc_async_wq); | 1527 | flush_workqueue(bc->bc_async_wq); |
1528 | cancel_work_sync(&bc->vf_async_work); | ||
1529 | |||
1530 | spin_lock(&bc->queue_lock); | ||
1524 | while (!list_empty(head)) { | 1531 | while (!list_empty(head)) { |
1525 | entry = list_entry(head->next, struct qlcnic_async_work_list, | 1532 | entry = list_entry(head->next, struct qlcnic_async_cmd, |
1526 | list); | 1533 | list); |
1527 | cancel_work_sync(&entry->work); | ||
1528 | list_del(&entry->list); | 1534 | list_del(&entry->list); |
1535 | kfree(entry->cmd); | ||
1529 | kfree(entry); | 1536 | kfree(entry); |
1530 | } | 1537 | } |
1538 | spin_unlock(&bc->queue_lock); | ||
1531 | } | 1539 | } |
1532 | 1540 | ||
1533 | void qlcnic_sriov_vf_set_multi(struct net_device *netdev) | 1541 | void qlcnic_sriov_vf_set_multi(struct net_device *netdev) |
@@ -1587,57 +1595,64 @@ void qlcnic_sriov_vf_set_multi(struct net_device *netdev) | |||
1587 | 1595 | ||
1588 | static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work) | 1596 | static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work) |
1589 | { | 1597 | { |
1590 | struct qlcnic_async_work_list *entry; | 1598 | struct qlcnic_async_cmd *entry, *tmp; |
1591 | struct qlcnic_adapter *adapter; | 1599 | struct qlcnic_back_channel *bc; |
1592 | struct qlcnic_cmd_args *cmd; | 1600 | struct qlcnic_cmd_args *cmd; |
1601 | struct list_head *head; | ||
1602 | LIST_HEAD(del_list); | ||
1603 | |||
1604 | bc = container_of(work, struct qlcnic_back_channel, vf_async_work); | ||
1605 | head = &bc->async_cmd_list; | ||
1606 | |||
1607 | spin_lock(&bc->queue_lock); | ||
1608 | list_splice_init(head, &del_list); | ||
1609 | spin_unlock(&bc->queue_lock); | ||
1610 | |||
1611 | list_for_each_entry_safe(entry, tmp, &del_list, list) { | ||
1612 | list_del(&entry->list); | ||
1613 | cmd = entry->cmd; | ||
1614 | __qlcnic_sriov_issue_cmd(bc->adapter, cmd); | ||
1615 | kfree(entry); | ||
1616 | } | ||
1617 | |||
1618 | if (!list_empty(head)) | ||
1619 | queue_work(bc->bc_async_wq, &bc->vf_async_work); | ||
1593 | 1620 | ||
1594 | entry = container_of(work, struct qlcnic_async_work_list, work); | ||
1595 | adapter = entry->ptr; | ||
1596 | cmd = entry->cmd; | ||
1597 | __qlcnic_sriov_issue_cmd(adapter, cmd); | ||
1598 | return; | 1621 | return; |
1599 | } | 1622 | } |
1600 | 1623 | ||
1601 | static struct qlcnic_async_work_list * | 1624 | static struct qlcnic_async_cmd * |
1602 | qlcnic_sriov_get_free_node_async_work(struct qlcnic_back_channel *bc) | 1625 | qlcnic_sriov_alloc_async_cmd(struct qlcnic_back_channel *bc, |
1626 | struct qlcnic_cmd_args *cmd) | ||
1603 | { | 1627 | { |
1604 | struct list_head *node; | 1628 | struct qlcnic_async_cmd *entry = NULL; |
1605 | struct qlcnic_async_work_list *entry = NULL; | ||
1606 | u8 empty = 0; | ||
1607 | 1629 | ||
1608 | list_for_each(node, &bc->async_list) { | 1630 | entry = kzalloc(sizeof(*entry), GFP_ATOMIC); |
1609 | entry = list_entry(node, struct qlcnic_async_work_list, list); | 1631 | if (!entry) |
1610 | if (!work_pending(&entry->work)) { | 1632 | return NULL; |
1611 | empty = 1; | ||
1612 | break; | ||
1613 | } | ||
1614 | } | ||
1615 | 1633 | ||
1616 | if (!empty) { | 1634 | entry->cmd = cmd; |
1617 | entry = kzalloc(sizeof(struct qlcnic_async_work_list), | 1635 | |
1618 | GFP_ATOMIC); | 1636 | spin_lock(&bc->queue_lock); |
1619 | if (entry == NULL) | 1637 | list_add_tail(&entry->list, &bc->async_cmd_list); |
1620 | return NULL; | 1638 | spin_unlock(&bc->queue_lock); |
1621 | list_add_tail(&entry->list, &bc->async_list); | ||
1622 | } | ||
1623 | 1639 | ||
1624 | return entry; | 1640 | return entry; |
1625 | } | 1641 | } |
1626 | 1642 | ||
1627 | static void qlcnic_sriov_schedule_async_cmd(struct qlcnic_back_channel *bc, | 1643 | static void qlcnic_sriov_schedule_async_cmd(struct qlcnic_back_channel *bc, |
1628 | work_func_t func, void *data, | ||
1629 | struct qlcnic_cmd_args *cmd) | 1644 | struct qlcnic_cmd_args *cmd) |
1630 | { | 1645 | { |
1631 | struct qlcnic_async_work_list *entry = NULL; | 1646 | struct qlcnic_async_cmd *entry = NULL; |
1632 | 1647 | ||
1633 | entry = qlcnic_sriov_get_free_node_async_work(bc); | 1648 | entry = qlcnic_sriov_alloc_async_cmd(bc, cmd); |
1634 | if (!entry) | 1649 | if (!entry) { |
1650 | qlcnic_free_mbx_args(cmd); | ||
1651 | kfree(cmd); | ||
1635 | return; | 1652 | return; |
1653 | } | ||
1636 | 1654 | ||
1637 | entry->ptr = data; | 1655 | queue_work(bc->bc_async_wq, &bc->vf_async_work); |
1638 | entry->cmd = cmd; | ||
1639 | INIT_WORK(&entry->work, func); | ||
1640 | queue_work(bc->bc_async_wq, &entry->work); | ||
1641 | } | 1656 | } |
1642 | 1657 | ||
1643 | static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *adapter, | 1658 | static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *adapter, |
@@ -1649,8 +1664,8 @@ static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *adapter, | |||
1649 | if (adapter->need_fw_reset) | 1664 | if (adapter->need_fw_reset) |
1650 | return -EIO; | 1665 | return -EIO; |
1651 | 1666 | ||
1652 | qlcnic_sriov_schedule_async_cmd(bc, qlcnic_sriov_handle_async_issue_cmd, | 1667 | qlcnic_sriov_schedule_async_cmd(bc, cmd); |
1653 | adapter, cmd); | 1668 | |
1654 | return 0; | 1669 | return 0; |
1655 | } | 1670 | } |
1656 | 1671 | ||
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index c51f34693eae..f85d605e4560 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c | |||
@@ -734,6 +734,7 @@ static void cpsw_rx_handler(void *token, int len, int status) | |||
734 | netif_receive_skb(skb); | 734 | netif_receive_skb(skb); |
735 | ndev->stats.rx_bytes += len; | 735 | ndev->stats.rx_bytes += len; |
736 | ndev->stats.rx_packets++; | 736 | ndev->stats.rx_packets++; |
737 | kmemleak_not_leak(new_skb); | ||
737 | } else { | 738 | } else { |
738 | ndev->stats.rx_dropped++; | 739 | ndev->stats.rx_dropped++; |
739 | new_skb = skb; | 740 | new_skb = skb; |
@@ -1325,6 +1326,7 @@ static int cpsw_ndo_open(struct net_device *ndev) | |||
1325 | kfree_skb(skb); | 1326 | kfree_skb(skb); |
1326 | goto err_cleanup; | 1327 | goto err_cleanup; |
1327 | } | 1328 | } |
1329 | kmemleak_not_leak(skb); | ||
1328 | } | 1330 | } |
1329 | /* continue even if we didn't manage to submit all | 1331 | /* continue even if we didn't manage to submit all |
1330 | * receive descs | 1332 | * receive descs |
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c index 01a77145a0fa..8fd131207ee1 100644 --- a/drivers/net/ethernet/tundra/tsi108_eth.c +++ b/drivers/net/ethernet/tundra/tsi108_eth.c | |||
@@ -166,6 +166,7 @@ static struct platform_driver tsi_eth_driver = { | |||
166 | 166 | ||
167 | static void tsi108_timed_checker(unsigned long dev_ptr); | 167 | static void tsi108_timed_checker(unsigned long dev_ptr); |
168 | 168 | ||
169 | #ifdef DEBUG | ||
169 | static void dump_eth_one(struct net_device *dev) | 170 | static void dump_eth_one(struct net_device *dev) |
170 | { | 171 | { |
171 | struct tsi108_prv_data *data = netdev_priv(dev); | 172 | struct tsi108_prv_data *data = netdev_priv(dev); |
@@ -190,6 +191,7 @@ static void dump_eth_one(struct net_device *dev) | |||
190 | TSI_READ(TSI108_EC_RXESTAT), | 191 | TSI_READ(TSI108_EC_RXESTAT), |
191 | TSI_READ(TSI108_EC_RXERR), data->rxpending); | 192 | TSI_READ(TSI108_EC_RXERR), data->rxpending); |
192 | } | 193 | } |
194 | #endif | ||
193 | 195 | ||
194 | /* Synchronization is needed between the thread and up/down events. | 196 | /* Synchronization is needed between the thread and up/down events. |
195 | * Note that the PHY is accessed through the same registers for both | 197 | * Note that the PHY is accessed through the same registers for both |
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 467fb8b4d083..591af71eae56 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h | |||
@@ -644,12 +644,6 @@ struct netvsc_reconfig { | |||
644 | u32 event; | 644 | u32 event; |
645 | }; | 645 | }; |
646 | 646 | ||
647 | struct garp_wrk { | ||
648 | struct work_struct dwrk; | ||
649 | struct net_device *netdev; | ||
650 | struct netvsc_device *netvsc_dev; | ||
651 | }; | ||
652 | |||
653 | /* The context of the netvsc device */ | 647 | /* The context of the netvsc device */ |
654 | struct net_device_context { | 648 | struct net_device_context { |
655 | /* point back to our device context */ | 649 | /* point back to our device context */ |
@@ -667,7 +661,6 @@ struct net_device_context { | |||
667 | 661 | ||
668 | struct work_struct work; | 662 | struct work_struct work; |
669 | u32 msg_enable; /* debug level */ | 663 | u32 msg_enable; /* debug level */ |
670 | struct garp_wrk gwrk; | ||
671 | 664 | ||
672 | struct netvsc_stats __percpu *tx_stats; | 665 | struct netvsc_stats __percpu *tx_stats; |
673 | struct netvsc_stats __percpu *rx_stats; | 666 | struct netvsc_stats __percpu *rx_stats; |
@@ -678,6 +671,15 @@ struct net_device_context { | |||
678 | 671 | ||
679 | /* the device is going away */ | 672 | /* the device is going away */ |
680 | bool start_remove; | 673 | bool start_remove; |
674 | |||
675 | /* State to manage the associated VF interface. */ | ||
676 | struct net_device *vf_netdev; | ||
677 | bool vf_inject; | ||
678 | atomic_t vf_use_cnt; | ||
679 | /* 1: allocated, serial number is valid. 0: not allocated */ | ||
680 | u32 vf_alloc; | ||
681 | /* Serial number of the VF to team with */ | ||
682 | u32 vf_serial; | ||
681 | }; | 683 | }; |
682 | 684 | ||
683 | /* Per netvsc device */ | 685 | /* Per netvsc device */ |
@@ -733,15 +735,7 @@ struct netvsc_device { | |||
733 | u32 max_pkt; /* max number of pkt in one send, e.g. 8 */ | 735 | u32 max_pkt; /* max number of pkt in one send, e.g. 8 */ |
734 | u32 pkt_align; /* alignment bytes, e.g. 8 */ | 736 | u32 pkt_align; /* alignment bytes, e.g. 8 */ |
735 | 737 | ||
736 | /* 1: allocated, serial number is valid. 0: not allocated */ | ||
737 | u32 vf_alloc; | ||
738 | /* Serial number of the VF to team with */ | ||
739 | u32 vf_serial; | ||
740 | atomic_t open_cnt; | 738 | atomic_t open_cnt; |
741 | /* State to manage the associated VF interface. */ | ||
742 | bool vf_inject; | ||
743 | struct net_device *vf_netdev; | ||
744 | atomic_t vf_use_cnt; | ||
745 | }; | 739 | }; |
746 | 740 | ||
747 | static inline struct netvsc_device * | 741 | static inline struct netvsc_device * |
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 20e09174ff62..410fb8e81376 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c | |||
@@ -77,13 +77,9 @@ static struct netvsc_device *alloc_net_device(void) | |||
77 | init_waitqueue_head(&net_device->wait_drain); | 77 | init_waitqueue_head(&net_device->wait_drain); |
78 | net_device->destroy = false; | 78 | net_device->destroy = false; |
79 | atomic_set(&net_device->open_cnt, 0); | 79 | atomic_set(&net_device->open_cnt, 0); |
80 | atomic_set(&net_device->vf_use_cnt, 0); | ||
81 | net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT; | 80 | net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT; |
82 | net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT; | 81 | net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT; |
83 | 82 | ||
84 | net_device->vf_netdev = NULL; | ||
85 | net_device->vf_inject = false; | ||
86 | |||
87 | return net_device; | 83 | return net_device; |
88 | } | 84 | } |
89 | 85 | ||
@@ -1106,16 +1102,16 @@ static void netvsc_send_table(struct hv_device *hdev, | |||
1106 | nvscdev->send_table[i] = tab[i]; | 1102 | nvscdev->send_table[i] = tab[i]; |
1107 | } | 1103 | } |
1108 | 1104 | ||
1109 | static void netvsc_send_vf(struct netvsc_device *nvdev, | 1105 | static void netvsc_send_vf(struct net_device_context *net_device_ctx, |
1110 | struct nvsp_message *nvmsg) | 1106 | struct nvsp_message *nvmsg) |
1111 | { | 1107 | { |
1112 | nvdev->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated; | 1108 | net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated; |
1113 | nvdev->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial; | 1109 | net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial; |
1114 | } | 1110 | } |
1115 | 1111 | ||
1116 | static inline void netvsc_receive_inband(struct hv_device *hdev, | 1112 | static inline void netvsc_receive_inband(struct hv_device *hdev, |
1117 | struct netvsc_device *nvdev, | 1113 | struct net_device_context *net_device_ctx, |
1118 | struct nvsp_message *nvmsg) | 1114 | struct nvsp_message *nvmsg) |
1119 | { | 1115 | { |
1120 | switch (nvmsg->hdr.msg_type) { | 1116 | switch (nvmsg->hdr.msg_type) { |
1121 | case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE: | 1117 | case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE: |
@@ -1123,7 +1119,7 @@ static inline void netvsc_receive_inband(struct hv_device *hdev, | |||
1123 | break; | 1119 | break; |
1124 | 1120 | ||
1125 | case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION: | 1121 | case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION: |
1126 | netvsc_send_vf(nvdev, nvmsg); | 1122 | netvsc_send_vf(net_device_ctx, nvmsg); |
1127 | break; | 1123 | break; |
1128 | } | 1124 | } |
1129 | } | 1125 | } |
@@ -1136,6 +1132,7 @@ static void netvsc_process_raw_pkt(struct hv_device *device, | |||
1136 | struct vmpacket_descriptor *desc) | 1132 | struct vmpacket_descriptor *desc) |
1137 | { | 1133 | { |
1138 | struct nvsp_message *nvmsg; | 1134 | struct nvsp_message *nvmsg; |
1135 | struct net_device_context *net_device_ctx = netdev_priv(ndev); | ||
1139 | 1136 | ||
1140 | nvmsg = (struct nvsp_message *)((unsigned long) | 1137 | nvmsg = (struct nvsp_message *)((unsigned long) |
1141 | desc + (desc->offset8 << 3)); | 1138 | desc + (desc->offset8 << 3)); |
@@ -1150,7 +1147,7 @@ static void netvsc_process_raw_pkt(struct hv_device *device, | |||
1150 | break; | 1147 | break; |
1151 | 1148 | ||
1152 | case VM_PKT_DATA_INBAND: | 1149 | case VM_PKT_DATA_INBAND: |
1153 | netvsc_receive_inband(device, net_device, nvmsg); | 1150 | netvsc_receive_inband(device, net_device_ctx, nvmsg); |
1154 | break; | 1151 | break; |
1155 | 1152 | ||
1156 | default: | 1153 | default: |
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 41bd952cc28d..3ba29fc80d05 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c | |||
@@ -658,20 +658,19 @@ int netvsc_recv_callback(struct hv_device *device_obj, | |||
658 | struct sk_buff *skb; | 658 | struct sk_buff *skb; |
659 | struct sk_buff *vf_skb; | 659 | struct sk_buff *vf_skb; |
660 | struct netvsc_stats *rx_stats; | 660 | struct netvsc_stats *rx_stats; |
661 | struct netvsc_device *netvsc_dev = net_device_ctx->nvdev; | ||
662 | u32 bytes_recvd = packet->total_data_buflen; | 661 | u32 bytes_recvd = packet->total_data_buflen; |
663 | int ret = 0; | 662 | int ret = 0; |
664 | 663 | ||
665 | if (!net || net->reg_state != NETREG_REGISTERED) | 664 | if (!net || net->reg_state != NETREG_REGISTERED) |
666 | return NVSP_STAT_FAIL; | 665 | return NVSP_STAT_FAIL; |
667 | 666 | ||
668 | if (READ_ONCE(netvsc_dev->vf_inject)) { | 667 | if (READ_ONCE(net_device_ctx->vf_inject)) { |
669 | atomic_inc(&netvsc_dev->vf_use_cnt); | 668 | atomic_inc(&net_device_ctx->vf_use_cnt); |
670 | if (!READ_ONCE(netvsc_dev->vf_inject)) { | 669 | if (!READ_ONCE(net_device_ctx->vf_inject)) { |
671 | /* | 670 | /* |
672 | * We raced; just move on. | 671 | * We raced; just move on. |
673 | */ | 672 | */ |
674 | atomic_dec(&netvsc_dev->vf_use_cnt); | 673 | atomic_dec(&net_device_ctx->vf_use_cnt); |
675 | goto vf_injection_done; | 674 | goto vf_injection_done; |
676 | } | 675 | } |
677 | 676 | ||
@@ -683,17 +682,19 @@ int netvsc_recv_callback(struct hv_device *device_obj, | |||
683 | * the host). Deliver these via the VF interface | 682 | * the host). Deliver these via the VF interface |
684 | * in the guest. | 683 | * in the guest. |
685 | */ | 684 | */ |
686 | vf_skb = netvsc_alloc_recv_skb(netvsc_dev->vf_netdev, packet, | 685 | vf_skb = netvsc_alloc_recv_skb(net_device_ctx->vf_netdev, |
687 | csum_info, *data, vlan_tci); | 686 | packet, csum_info, *data, |
687 | vlan_tci); | ||
688 | if (vf_skb != NULL) { | 688 | if (vf_skb != NULL) { |
689 | ++netvsc_dev->vf_netdev->stats.rx_packets; | 689 | ++net_device_ctx->vf_netdev->stats.rx_packets; |
690 | netvsc_dev->vf_netdev->stats.rx_bytes += bytes_recvd; | 690 | net_device_ctx->vf_netdev->stats.rx_bytes += |
691 | bytes_recvd; | ||
691 | netif_receive_skb(vf_skb); | 692 | netif_receive_skb(vf_skb); |
692 | } else { | 693 | } else { |
693 | ++net->stats.rx_dropped; | 694 | ++net->stats.rx_dropped; |
694 | ret = NVSP_STAT_FAIL; | 695 | ret = NVSP_STAT_FAIL; |
695 | } | 696 | } |
696 | atomic_dec(&netvsc_dev->vf_use_cnt); | 697 | atomic_dec(&net_device_ctx->vf_use_cnt); |
697 | return ret; | 698 | return ret; |
698 | } | 699 | } |
699 | 700 | ||
@@ -1150,17 +1151,6 @@ static void netvsc_free_netdev(struct net_device *netdev) | |||
1150 | free_netdev(netdev); | 1151 | free_netdev(netdev); |
1151 | } | 1152 | } |
1152 | 1153 | ||
1153 | static void netvsc_notify_peers(struct work_struct *wrk) | ||
1154 | { | ||
1155 | struct garp_wrk *gwrk; | ||
1156 | |||
1157 | gwrk = container_of(wrk, struct garp_wrk, dwrk); | ||
1158 | |||
1159 | netdev_notify_peers(gwrk->netdev); | ||
1160 | |||
1161 | atomic_dec(&gwrk->netvsc_dev->vf_use_cnt); | ||
1162 | } | ||
1163 | |||
1164 | static struct net_device *get_netvsc_net_device(char *mac) | 1154 | static struct net_device *get_netvsc_net_device(char *mac) |
1165 | { | 1155 | { |
1166 | struct net_device *dev, *found = NULL; | 1156 | struct net_device *dev, *found = NULL; |
@@ -1203,7 +1193,7 @@ static int netvsc_register_vf(struct net_device *vf_netdev) | |||
1203 | 1193 | ||
1204 | net_device_ctx = netdev_priv(ndev); | 1194 | net_device_ctx = netdev_priv(ndev); |
1205 | netvsc_dev = net_device_ctx->nvdev; | 1195 | netvsc_dev = net_device_ctx->nvdev; |
1206 | if (netvsc_dev == NULL) | 1196 | if (!netvsc_dev || net_device_ctx->vf_netdev) |
1207 | return NOTIFY_DONE; | 1197 | return NOTIFY_DONE; |
1208 | 1198 | ||
1209 | netdev_info(ndev, "VF registering: %s\n", vf_netdev->name); | 1199 | netdev_info(ndev, "VF registering: %s\n", vf_netdev->name); |
@@ -1211,10 +1201,23 @@ static int netvsc_register_vf(struct net_device *vf_netdev) | |||
1211 | * Take a reference on the module. | 1201 | * Take a reference on the module. |
1212 | */ | 1202 | */ |
1213 | try_module_get(THIS_MODULE); | 1203 | try_module_get(THIS_MODULE); |
1214 | netvsc_dev->vf_netdev = vf_netdev; | 1204 | net_device_ctx->vf_netdev = vf_netdev; |
1215 | return NOTIFY_OK; | 1205 | return NOTIFY_OK; |
1216 | } | 1206 | } |
1217 | 1207 | ||
1208 | static void netvsc_inject_enable(struct net_device_context *net_device_ctx) | ||
1209 | { | ||
1210 | net_device_ctx->vf_inject = true; | ||
1211 | } | ||
1212 | |||
1213 | static void netvsc_inject_disable(struct net_device_context *net_device_ctx) | ||
1214 | { | ||
1215 | net_device_ctx->vf_inject = false; | ||
1216 | |||
1217 | /* Wait for currently active users to drain out. */ | ||
1218 | while (atomic_read(&net_device_ctx->vf_use_cnt) != 0) | ||
1219 | udelay(50); | ||
1220 | } | ||
1218 | 1221 | ||
1219 | static int netvsc_vf_up(struct net_device *vf_netdev) | 1222 | static int netvsc_vf_up(struct net_device *vf_netdev) |
1220 | { | 1223 | { |
@@ -1233,11 +1236,11 @@ static int netvsc_vf_up(struct net_device *vf_netdev) | |||
1233 | net_device_ctx = netdev_priv(ndev); | 1236 | net_device_ctx = netdev_priv(ndev); |
1234 | netvsc_dev = net_device_ctx->nvdev; | 1237 | netvsc_dev = net_device_ctx->nvdev; |
1235 | 1238 | ||
1236 | if ((netvsc_dev == NULL) || (netvsc_dev->vf_netdev == NULL)) | 1239 | if (!netvsc_dev || !net_device_ctx->vf_netdev) |
1237 | return NOTIFY_DONE; | 1240 | return NOTIFY_DONE; |
1238 | 1241 | ||
1239 | netdev_info(ndev, "VF up: %s\n", vf_netdev->name); | 1242 | netdev_info(ndev, "VF up: %s\n", vf_netdev->name); |
1240 | netvsc_dev->vf_inject = true; | 1243 | netvsc_inject_enable(net_device_ctx); |
1241 | 1244 | ||
1242 | /* | 1245 | /* |
1243 | * Open the device before switching data path. | 1246 | * Open the device before switching data path. |
@@ -1252,15 +1255,8 @@ static int netvsc_vf_up(struct net_device *vf_netdev) | |||
1252 | 1255 | ||
1253 | netif_carrier_off(ndev); | 1256 | netif_carrier_off(ndev); |
1254 | 1257 | ||
1255 | /* | 1258 | /* Now notify peers through VF device. */ |
1256 | * Now notify peers. We are scheduling work to | 1259 | call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, vf_netdev); |
1257 | * notify peers; take a reference to prevent | ||
1258 | * the VF interface from vanishing. | ||
1259 | */ | ||
1260 | atomic_inc(&netvsc_dev->vf_use_cnt); | ||
1261 | net_device_ctx->gwrk.netdev = vf_netdev; | ||
1262 | net_device_ctx->gwrk.netvsc_dev = netvsc_dev; | ||
1263 | schedule_work(&net_device_ctx->gwrk.dwrk); | ||
1264 | 1260 | ||
1265 | return NOTIFY_OK; | 1261 | return NOTIFY_OK; |
1266 | } | 1262 | } |
@@ -1283,29 +1279,18 @@ static int netvsc_vf_down(struct net_device *vf_netdev) | |||
1283 | net_device_ctx = netdev_priv(ndev); | 1279 | net_device_ctx = netdev_priv(ndev); |
1284 | netvsc_dev = net_device_ctx->nvdev; | 1280 | netvsc_dev = net_device_ctx->nvdev; |
1285 | 1281 | ||
1286 | if ((netvsc_dev == NULL) || (netvsc_dev->vf_netdev == NULL)) | 1282 | if (!netvsc_dev || !net_device_ctx->vf_netdev) |
1287 | return NOTIFY_DONE; | 1283 | return NOTIFY_DONE; |
1288 | 1284 | ||
1289 | netdev_info(ndev, "VF down: %s\n", vf_netdev->name); | 1285 | netdev_info(ndev, "VF down: %s\n", vf_netdev->name); |
1290 | netvsc_dev->vf_inject = false; | 1286 | netvsc_inject_disable(net_device_ctx); |
1291 | /* | ||
1292 | * Wait for currently active users to | ||
1293 | * drain out. | ||
1294 | */ | ||
1295 | |||
1296 | while (atomic_read(&netvsc_dev->vf_use_cnt) != 0) | ||
1297 | udelay(50); | ||
1298 | netvsc_switch_datapath(ndev, false); | 1287 | netvsc_switch_datapath(ndev, false); |
1299 | netdev_info(ndev, "Data path switched from VF: %s\n", vf_netdev->name); | 1288 | netdev_info(ndev, "Data path switched from VF: %s\n", vf_netdev->name); |
1300 | rndis_filter_close(netvsc_dev); | 1289 | rndis_filter_close(netvsc_dev); |
1301 | netif_carrier_on(ndev); | 1290 | netif_carrier_on(ndev); |
1302 | /* | 1291 | |
1303 | * Notify peers. | 1292 | /* Now notify peers through netvsc device. */ |
1304 | */ | 1293 | call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, ndev); |
1305 | atomic_inc(&netvsc_dev->vf_use_cnt); | ||
1306 | net_device_ctx->gwrk.netdev = ndev; | ||
1307 | net_device_ctx->gwrk.netvsc_dev = netvsc_dev; | ||
1308 | schedule_work(&net_device_ctx->gwrk.dwrk); | ||
1309 | 1294 | ||
1310 | return NOTIFY_OK; | 1295 | return NOTIFY_OK; |
1311 | } | 1296 | } |
@@ -1327,11 +1312,11 @@ static int netvsc_unregister_vf(struct net_device *vf_netdev) | |||
1327 | 1312 | ||
1328 | net_device_ctx = netdev_priv(ndev); | 1313 | net_device_ctx = netdev_priv(ndev); |
1329 | netvsc_dev = net_device_ctx->nvdev; | 1314 | netvsc_dev = net_device_ctx->nvdev; |
1330 | if (netvsc_dev == NULL) | 1315 | if (!netvsc_dev || !net_device_ctx->vf_netdev) |
1331 | return NOTIFY_DONE; | 1316 | return NOTIFY_DONE; |
1332 | netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name); | 1317 | netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name); |
1333 | 1318 | netvsc_inject_disable(net_device_ctx); | |
1334 | netvsc_dev->vf_netdev = NULL; | 1319 | net_device_ctx->vf_netdev = NULL; |
1335 | module_put(THIS_MODULE); | 1320 | module_put(THIS_MODULE); |
1336 | return NOTIFY_OK; | 1321 | return NOTIFY_OK; |
1337 | } | 1322 | } |
@@ -1377,11 +1362,14 @@ static int netvsc_probe(struct hv_device *dev, | |||
1377 | 1362 | ||
1378 | INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change); | 1363 | INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change); |
1379 | INIT_WORK(&net_device_ctx->work, do_set_multicast); | 1364 | INIT_WORK(&net_device_ctx->work, do_set_multicast); |
1380 | INIT_WORK(&net_device_ctx->gwrk.dwrk, netvsc_notify_peers); | ||
1381 | 1365 | ||
1382 | spin_lock_init(&net_device_ctx->lock); | 1366 | spin_lock_init(&net_device_ctx->lock); |
1383 | INIT_LIST_HEAD(&net_device_ctx->reconfig_events); | 1367 | INIT_LIST_HEAD(&net_device_ctx->reconfig_events); |
1384 | 1368 | ||
1369 | atomic_set(&net_device_ctx->vf_use_cnt, 0); | ||
1370 | net_device_ctx->vf_netdev = NULL; | ||
1371 | net_device_ctx->vf_inject = false; | ||
1372 | |||
1385 | net->netdev_ops = &device_ops; | 1373 | net->netdev_ops = &device_ops; |
1386 | 1374 | ||
1387 | net->hw_features = NETVSC_HW_FEATURES; | 1375 | net->hw_features = NETVSC_HW_FEATURES; |
@@ -1494,8 +1482,13 @@ static int netvsc_netdev_event(struct notifier_block *this, | |||
1494 | { | 1482 | { |
1495 | struct net_device *event_dev = netdev_notifier_info_to_dev(ptr); | 1483 | struct net_device *event_dev = netdev_notifier_info_to_dev(ptr); |
1496 | 1484 | ||
1497 | /* Avoid Vlan, Bonding dev with same MAC registering as VF */ | 1485 | /* Avoid Vlan dev with same MAC registering as VF */ |
1498 | if (event_dev->priv_flags & (IFF_802_1Q_VLAN | IFF_BONDING)) | 1486 | if (event_dev->priv_flags & IFF_802_1Q_VLAN) |
1487 | return NOTIFY_DONE; | ||
1488 | |||
1489 | /* Avoid Bonding master dev with same MAC registering as VF */ | ||
1490 | if (event_dev->priv_flags & IFF_BONDING && | ||
1491 | event_dev->flags & IFF_MASTER) | ||
1499 | return NOTIFY_DONE; | 1492 | return NOTIFY_DONE; |
1500 | 1493 | ||
1501 | switch (event) { | 1494 | switch (event) { |
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index d13e6e15d7b5..351e701eb043 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c | |||
@@ -270,6 +270,7 @@ struct macsec_dev { | |||
270 | struct pcpu_secy_stats __percpu *stats; | 270 | struct pcpu_secy_stats __percpu *stats; |
271 | struct list_head secys; | 271 | struct list_head secys; |
272 | struct gro_cells gro_cells; | 272 | struct gro_cells gro_cells; |
273 | unsigned int nest_level; | ||
273 | }; | 274 | }; |
274 | 275 | ||
275 | /** | 276 | /** |
@@ -2699,6 +2700,8 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb, | |||
2699 | 2700 | ||
2700 | #define MACSEC_FEATURES \ | 2701 | #define MACSEC_FEATURES \ |
2701 | (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST) | 2702 | (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST) |
2703 | static struct lock_class_key macsec_netdev_addr_lock_key; | ||
2704 | |||
2702 | static int macsec_dev_init(struct net_device *dev) | 2705 | static int macsec_dev_init(struct net_device *dev) |
2703 | { | 2706 | { |
2704 | struct macsec_dev *macsec = macsec_priv(dev); | 2707 | struct macsec_dev *macsec = macsec_priv(dev); |
@@ -2910,6 +2913,13 @@ static int macsec_get_iflink(const struct net_device *dev) | |||
2910 | return macsec_priv(dev)->real_dev->ifindex; | 2913 | return macsec_priv(dev)->real_dev->ifindex; |
2911 | } | 2914 | } |
2912 | 2915 | ||
2916 | |||
2917 | static int macsec_get_nest_level(struct net_device *dev) | ||
2918 | { | ||
2919 | return macsec_priv(dev)->nest_level; | ||
2920 | } | ||
2921 | |||
2922 | |||
2913 | static const struct net_device_ops macsec_netdev_ops = { | 2923 | static const struct net_device_ops macsec_netdev_ops = { |
2914 | .ndo_init = macsec_dev_init, | 2924 | .ndo_init = macsec_dev_init, |
2915 | .ndo_uninit = macsec_dev_uninit, | 2925 | .ndo_uninit = macsec_dev_uninit, |
@@ -2923,6 +2933,7 @@ static const struct net_device_ops macsec_netdev_ops = { | |||
2923 | .ndo_start_xmit = macsec_start_xmit, | 2933 | .ndo_start_xmit = macsec_start_xmit, |
2924 | .ndo_get_stats64 = macsec_get_stats64, | 2934 | .ndo_get_stats64 = macsec_get_stats64, |
2925 | .ndo_get_iflink = macsec_get_iflink, | 2935 | .ndo_get_iflink = macsec_get_iflink, |
2936 | .ndo_get_lock_subclass = macsec_get_nest_level, | ||
2926 | }; | 2937 | }; |
2927 | 2938 | ||
2928 | static const struct device_type macsec_type = { | 2939 | static const struct device_type macsec_type = { |
@@ -3047,22 +3058,31 @@ static void macsec_del_dev(struct macsec_dev *macsec) | |||
3047 | } | 3058 | } |
3048 | } | 3059 | } |
3049 | 3060 | ||
3061 | static void macsec_common_dellink(struct net_device *dev, struct list_head *head) | ||
3062 | { | ||
3063 | struct macsec_dev *macsec = macsec_priv(dev); | ||
3064 | struct net_device *real_dev = macsec->real_dev; | ||
3065 | |||
3066 | unregister_netdevice_queue(dev, head); | ||
3067 | list_del_rcu(&macsec->secys); | ||
3068 | macsec_del_dev(macsec); | ||
3069 | netdev_upper_dev_unlink(real_dev, dev); | ||
3070 | |||
3071 | macsec_generation++; | ||
3072 | } | ||
3073 | |||
3050 | static void macsec_dellink(struct net_device *dev, struct list_head *head) | 3074 | static void macsec_dellink(struct net_device *dev, struct list_head *head) |
3051 | { | 3075 | { |
3052 | struct macsec_dev *macsec = macsec_priv(dev); | 3076 | struct macsec_dev *macsec = macsec_priv(dev); |
3053 | struct net_device *real_dev = macsec->real_dev; | 3077 | struct net_device *real_dev = macsec->real_dev; |
3054 | struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); | 3078 | struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); |
3055 | 3079 | ||
3056 | macsec_generation++; | 3080 | macsec_common_dellink(dev, head); |
3057 | 3081 | ||
3058 | unregister_netdevice_queue(dev, head); | ||
3059 | list_del_rcu(&macsec->secys); | ||
3060 | if (list_empty(&rxd->secys)) { | 3082 | if (list_empty(&rxd->secys)) { |
3061 | netdev_rx_handler_unregister(real_dev); | 3083 | netdev_rx_handler_unregister(real_dev); |
3062 | kfree(rxd); | 3084 | kfree(rxd); |
3063 | } | 3085 | } |
3064 | |||
3065 | macsec_del_dev(macsec); | ||
3066 | } | 3086 | } |
3067 | 3087 | ||
3068 | static int register_macsec_dev(struct net_device *real_dev, | 3088 | static int register_macsec_dev(struct net_device *real_dev, |
@@ -3181,6 +3201,16 @@ static int macsec_newlink(struct net *net, struct net_device *dev, | |||
3181 | 3201 | ||
3182 | dev_hold(real_dev); | 3202 | dev_hold(real_dev); |
3183 | 3203 | ||
3204 | macsec->nest_level = dev_get_nest_level(real_dev) + 1; | ||
3205 | netdev_lockdep_set_classes(dev); | ||
3206 | lockdep_set_class_and_subclass(&dev->addr_list_lock, | ||
3207 | &macsec_netdev_addr_lock_key, | ||
3208 | macsec_get_nest_level(dev)); | ||
3209 | |||
3210 | err = netdev_upper_dev_link(real_dev, dev); | ||
3211 | if (err < 0) | ||
3212 | goto unregister; | ||
3213 | |||
3184 | /* need to be already registered so that ->init has run and | 3214 | /* need to be already registered so that ->init has run and |
3185 | * the MAC addr is set | 3215 | * the MAC addr is set |
3186 | */ | 3216 | */ |
@@ -3193,12 +3223,12 @@ static int macsec_newlink(struct net *net, struct net_device *dev, | |||
3193 | 3223 | ||
3194 | if (rx_handler && sci_exists(real_dev, sci)) { | 3224 | if (rx_handler && sci_exists(real_dev, sci)) { |
3195 | err = -EBUSY; | 3225 | err = -EBUSY; |
3196 | goto unregister; | 3226 | goto unlink; |
3197 | } | 3227 | } |
3198 | 3228 | ||
3199 | err = macsec_add_dev(dev, sci, icv_len); | 3229 | err = macsec_add_dev(dev, sci, icv_len); |
3200 | if (err) | 3230 | if (err) |
3201 | goto unregister; | 3231 | goto unlink; |
3202 | 3232 | ||
3203 | if (data) | 3233 | if (data) |
3204 | macsec_changelink_common(dev, data); | 3234 | macsec_changelink_common(dev, data); |
@@ -3213,6 +3243,8 @@ static int macsec_newlink(struct net *net, struct net_device *dev, | |||
3213 | 3243 | ||
3214 | del_dev: | 3244 | del_dev: |
3215 | macsec_del_dev(macsec); | 3245 | macsec_del_dev(macsec); |
3246 | unlink: | ||
3247 | netdev_upper_dev_unlink(real_dev, dev); | ||
3216 | unregister: | 3248 | unregister: |
3217 | unregister_netdevice(dev); | 3249 | unregister_netdevice(dev); |
3218 | return err; | 3250 | return err; |
@@ -3382,8 +3414,12 @@ static int macsec_notify(struct notifier_block *this, unsigned long event, | |||
3382 | 3414 | ||
3383 | rxd = macsec_data_rtnl(real_dev); | 3415 | rxd = macsec_data_rtnl(real_dev); |
3384 | list_for_each_entry_safe(m, n, &rxd->secys, secys) { | 3416 | list_for_each_entry_safe(m, n, &rxd->secys, secys) { |
3385 | macsec_dellink(m->secy.netdev, &head); | 3417 | macsec_common_dellink(m->secy.netdev, &head); |
3386 | } | 3418 | } |
3419 | |||
3420 | netdev_rx_handler_unregister(real_dev); | ||
3421 | kfree(rxd); | ||
3422 | |||
3387 | unregister_netdevice_many(&head); | 3423 | unregister_netdevice_many(&head); |
3388 | break; | 3424 | break; |
3389 | } | 3425 | } |
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index cd9b53834bf6..3234fcdea317 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c | |||
@@ -1315,7 +1315,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, | |||
1315 | vlan->dev = dev; | 1315 | vlan->dev = dev; |
1316 | vlan->port = port; | 1316 | vlan->port = port; |
1317 | vlan->set_features = MACVLAN_FEATURES; | 1317 | vlan->set_features = MACVLAN_FEATURES; |
1318 | vlan->nest_level = dev_get_nest_level(lowerdev, netif_is_macvlan) + 1; | 1318 | vlan->nest_level = dev_get_nest_level(lowerdev) + 1; |
1319 | 1319 | ||
1320 | vlan->mode = MACVLAN_MODE_VEPA; | 1320 | vlan->mode = MACVLAN_MODE_VEPA; |
1321 | if (data && data[IFLA_MACVLAN_MODE]) | 1321 | if (data && data[IFLA_MACVLAN_MODE]) |
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index a38c0dac514b..070e3290aa6e 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c | |||
@@ -275,7 +275,6 @@ static void macvtap_put_queue(struct macvtap_queue *q) | |||
275 | rtnl_unlock(); | 275 | rtnl_unlock(); |
276 | 276 | ||
277 | synchronize_rcu(); | 277 | synchronize_rcu(); |
278 | skb_array_cleanup(&q->skb_array); | ||
279 | sock_put(&q->sk); | 278 | sock_put(&q->sk); |
280 | } | 279 | } |
281 | 280 | ||
@@ -533,10 +532,8 @@ static void macvtap_sock_write_space(struct sock *sk) | |||
533 | static void macvtap_sock_destruct(struct sock *sk) | 532 | static void macvtap_sock_destruct(struct sock *sk) |
534 | { | 533 | { |
535 | struct macvtap_queue *q = container_of(sk, struct macvtap_queue, sk); | 534 | struct macvtap_queue *q = container_of(sk, struct macvtap_queue, sk); |
536 | struct sk_buff *skb; | ||
537 | 535 | ||
538 | while ((skb = skb_array_consume(&q->skb_array)) != NULL) | 536 | skb_array_cleanup(&q->skb_array); |
539 | kfree_skb(skb); | ||
540 | } | 537 | } |
541 | 538 | ||
542 | static int macvtap_open(struct inode *inode, struct file *file) | 539 | static int macvtap_open(struct inode *inode, struct file *file) |
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 1882d9828c99..053e87905b94 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c | |||
@@ -677,17 +677,28 @@ static void kszphy_get_stats(struct phy_device *phydev, | |||
677 | data[i] = kszphy_get_stat(phydev, i); | 677 | data[i] = kszphy_get_stat(phydev, i); |
678 | } | 678 | } |
679 | 679 | ||
680 | static int kszphy_resume(struct phy_device *phydev) | 680 | static int kszphy_suspend(struct phy_device *phydev) |
681 | { | 681 | { |
682 | int value; | 682 | /* Disable PHY Interrupts */ |
683 | if (phy_interrupt_is_valid(phydev)) { | ||
684 | phydev->interrupts = PHY_INTERRUPT_DISABLED; | ||
685 | if (phydev->drv->config_intr) | ||
686 | phydev->drv->config_intr(phydev); | ||
687 | } | ||
683 | 688 | ||
684 | mutex_lock(&phydev->lock); | 689 | return genphy_suspend(phydev); |
690 | } | ||
685 | 691 | ||
686 | value = phy_read(phydev, MII_BMCR); | 692 | static int kszphy_resume(struct phy_device *phydev) |
687 | phy_write(phydev, MII_BMCR, value & ~BMCR_PDOWN); | 693 | { |
694 | genphy_resume(phydev); | ||
688 | 695 | ||
689 | kszphy_config_intr(phydev); | 696 | /* Enable PHY Interrupts */ |
690 | mutex_unlock(&phydev->lock); | 697 | if (phy_interrupt_is_valid(phydev)) { |
698 | phydev->interrupts = PHY_INTERRUPT_ENABLED; | ||
699 | if (phydev->drv->config_intr) | ||
700 | phydev->drv->config_intr(phydev); | ||
701 | } | ||
691 | 702 | ||
692 | return 0; | 703 | return 0; |
693 | } | 704 | } |
@@ -900,7 +911,7 @@ static struct phy_driver ksphy_driver[] = { | |||
900 | .get_sset_count = kszphy_get_sset_count, | 911 | .get_sset_count = kszphy_get_sset_count, |
901 | .get_strings = kszphy_get_strings, | 912 | .get_strings = kszphy_get_strings, |
902 | .get_stats = kszphy_get_stats, | 913 | .get_stats = kszphy_get_stats, |
903 | .suspend = genphy_suspend, | 914 | .suspend = kszphy_suspend, |
904 | .resume = kszphy_resume, | 915 | .resume = kszphy_resume, |
905 | }, { | 916 | }, { |
906 | .phy_id = PHY_ID_KSZ8061, | 917 | .phy_id = PHY_ID_KSZ8061, |
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index da4e3d6632f6..c0dda6fc0921 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c | |||
@@ -1811,7 +1811,7 @@ static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, | |||
1811 | fl4.flowi4_mark = skb->mark; | 1811 | fl4.flowi4_mark = skb->mark; |
1812 | fl4.flowi4_proto = IPPROTO_UDP; | 1812 | fl4.flowi4_proto = IPPROTO_UDP; |
1813 | fl4.daddr = daddr; | 1813 | fl4.daddr = daddr; |
1814 | fl4.saddr = vxlan->cfg.saddr.sin.sin_addr.s_addr; | 1814 | fl4.saddr = *saddr; |
1815 | 1815 | ||
1816 | rt = ip_route_output_key(vxlan->net, &fl4); | 1816 | rt = ip_route_output_key(vxlan->net, &fl4); |
1817 | if (!IS_ERR(rt)) { | 1817 | if (!IS_ERR(rt)) { |
@@ -1847,7 +1847,7 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan, | |||
1847 | memset(&fl6, 0, sizeof(fl6)); | 1847 | memset(&fl6, 0, sizeof(fl6)); |
1848 | fl6.flowi6_oif = oif; | 1848 | fl6.flowi6_oif = oif; |
1849 | fl6.daddr = *daddr; | 1849 | fl6.daddr = *daddr; |
1850 | fl6.saddr = vxlan->cfg.saddr.sin6.sin6_addr; | 1850 | fl6.saddr = *saddr; |
1851 | fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tos), label); | 1851 | fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tos), label); |
1852 | fl6.flowi6_mark = skb->mark; | 1852 | fl6.flowi6_mark = skb->mark; |
1853 | fl6.flowi6_proto = IPPROTO_UDP; | 1853 | fl6.flowi6_proto = IPPROTO_UDP; |
@@ -1920,7 +1920,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, | |||
1920 | struct rtable *rt = NULL; | 1920 | struct rtable *rt = NULL; |
1921 | const struct iphdr *old_iph; | 1921 | const struct iphdr *old_iph; |
1922 | union vxlan_addr *dst; | 1922 | union vxlan_addr *dst; |
1923 | union vxlan_addr remote_ip; | 1923 | union vxlan_addr remote_ip, local_ip; |
1924 | union vxlan_addr *src; | ||
1924 | struct vxlan_metadata _md; | 1925 | struct vxlan_metadata _md; |
1925 | struct vxlan_metadata *md = &_md; | 1926 | struct vxlan_metadata *md = &_md; |
1926 | __be16 src_port = 0, dst_port; | 1927 | __be16 src_port = 0, dst_port; |
@@ -1938,6 +1939,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, | |||
1938 | dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port; | 1939 | dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port; |
1939 | vni = rdst->remote_vni; | 1940 | vni = rdst->remote_vni; |
1940 | dst = &rdst->remote_ip; | 1941 | dst = &rdst->remote_ip; |
1942 | src = &vxlan->cfg.saddr; | ||
1941 | dst_cache = &rdst->dst_cache; | 1943 | dst_cache = &rdst->dst_cache; |
1942 | } else { | 1944 | } else { |
1943 | if (!info) { | 1945 | if (!info) { |
@@ -1948,11 +1950,15 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, | |||
1948 | dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port; | 1950 | dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port; |
1949 | vni = vxlan_tun_id_to_vni(info->key.tun_id); | 1951 | vni = vxlan_tun_id_to_vni(info->key.tun_id); |
1950 | remote_ip.sa.sa_family = ip_tunnel_info_af(info); | 1952 | remote_ip.sa.sa_family = ip_tunnel_info_af(info); |
1951 | if (remote_ip.sa.sa_family == AF_INET) | 1953 | if (remote_ip.sa.sa_family == AF_INET) { |
1952 | remote_ip.sin.sin_addr.s_addr = info->key.u.ipv4.dst; | 1954 | remote_ip.sin.sin_addr.s_addr = info->key.u.ipv4.dst; |
1953 | else | 1955 | local_ip.sin.sin_addr.s_addr = info->key.u.ipv4.src; |
1956 | } else { | ||
1954 | remote_ip.sin6.sin6_addr = info->key.u.ipv6.dst; | 1957 | remote_ip.sin6.sin6_addr = info->key.u.ipv6.dst; |
1958 | local_ip.sin6.sin6_addr = info->key.u.ipv6.src; | ||
1959 | } | ||
1955 | dst = &remote_ip; | 1960 | dst = &remote_ip; |
1961 | src = &local_ip; | ||
1956 | dst_cache = &info->dst_cache; | 1962 | dst_cache = &info->dst_cache; |
1957 | } | 1963 | } |
1958 | 1964 | ||
@@ -1992,15 +1998,14 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, | |||
1992 | } | 1998 | } |
1993 | 1999 | ||
1994 | if (dst->sa.sa_family == AF_INET) { | 2000 | if (dst->sa.sa_family == AF_INET) { |
1995 | __be32 saddr; | ||
1996 | |||
1997 | if (!vxlan->vn4_sock) | 2001 | if (!vxlan->vn4_sock) |
1998 | goto drop; | 2002 | goto drop; |
1999 | sk = vxlan->vn4_sock->sock->sk; | 2003 | sk = vxlan->vn4_sock->sock->sk; |
2000 | 2004 | ||
2001 | rt = vxlan_get_route(vxlan, skb, | 2005 | rt = vxlan_get_route(vxlan, skb, |
2002 | rdst ? rdst->remote_ifindex : 0, tos, | 2006 | rdst ? rdst->remote_ifindex : 0, tos, |
2003 | dst->sin.sin_addr.s_addr, &saddr, | 2007 | dst->sin.sin_addr.s_addr, |
2008 | &src->sin.sin_addr.s_addr, | ||
2004 | dst_cache, info); | 2009 | dst_cache, info); |
2005 | if (IS_ERR(rt)) { | 2010 | if (IS_ERR(rt)) { |
2006 | netdev_dbg(dev, "no route to %pI4\n", | 2011 | netdev_dbg(dev, "no route to %pI4\n", |
@@ -2017,7 +2022,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, | |||
2017 | } | 2022 | } |
2018 | 2023 | ||
2019 | /* Bypass encapsulation if the destination is local */ | 2024 | /* Bypass encapsulation if the destination is local */ |
2020 | if (rt->rt_flags & RTCF_LOCAL && | 2025 | if (!info && rt->rt_flags & RTCF_LOCAL && |
2021 | !(rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) { | 2026 | !(rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) { |
2022 | struct vxlan_dev *dst_vxlan; | 2027 | struct vxlan_dev *dst_vxlan; |
2023 | 2028 | ||
@@ -2043,13 +2048,12 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, | |||
2043 | if (err < 0) | 2048 | if (err < 0) |
2044 | goto xmit_tx_error; | 2049 | goto xmit_tx_error; |
2045 | 2050 | ||
2046 | udp_tunnel_xmit_skb(rt, sk, skb, saddr, | 2051 | udp_tunnel_xmit_skb(rt, sk, skb, src->sin.sin_addr.s_addr, |
2047 | dst->sin.sin_addr.s_addr, tos, ttl, df, | 2052 | dst->sin.sin_addr.s_addr, tos, ttl, df, |
2048 | src_port, dst_port, xnet, !udp_sum); | 2053 | src_port, dst_port, xnet, !udp_sum); |
2049 | #if IS_ENABLED(CONFIG_IPV6) | 2054 | #if IS_ENABLED(CONFIG_IPV6) |
2050 | } else { | 2055 | } else { |
2051 | struct dst_entry *ndst; | 2056 | struct dst_entry *ndst; |
2052 | struct in6_addr saddr; | ||
2053 | u32 rt6i_flags; | 2057 | u32 rt6i_flags; |
2054 | 2058 | ||
2055 | if (!vxlan->vn6_sock) | 2059 | if (!vxlan->vn6_sock) |
@@ -2058,7 +2062,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, | |||
2058 | 2062 | ||
2059 | ndst = vxlan6_get_route(vxlan, skb, | 2063 | ndst = vxlan6_get_route(vxlan, skb, |
2060 | rdst ? rdst->remote_ifindex : 0, tos, | 2064 | rdst ? rdst->remote_ifindex : 0, tos, |
2061 | label, &dst->sin6.sin6_addr, &saddr, | 2065 | label, &dst->sin6.sin6_addr, |
2066 | &src->sin6.sin6_addr, | ||
2062 | dst_cache, info); | 2067 | dst_cache, info); |
2063 | if (IS_ERR(ndst)) { | 2068 | if (IS_ERR(ndst)) { |
2064 | netdev_dbg(dev, "no route to %pI6\n", | 2069 | netdev_dbg(dev, "no route to %pI6\n", |
@@ -2077,7 +2082,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, | |||
2077 | 2082 | ||
2078 | /* Bypass encapsulation if the destination is local */ | 2083 | /* Bypass encapsulation if the destination is local */ |
2079 | rt6i_flags = ((struct rt6_info *)ndst)->rt6i_flags; | 2084 | rt6i_flags = ((struct rt6_info *)ndst)->rt6i_flags; |
2080 | if (rt6i_flags & RTF_LOCAL && | 2085 | if (!info && rt6i_flags & RTF_LOCAL && |
2081 | !(rt6i_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) { | 2086 | !(rt6i_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) { |
2082 | struct vxlan_dev *dst_vxlan; | 2087 | struct vxlan_dev *dst_vxlan; |
2083 | 2088 | ||
@@ -2104,7 +2109,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, | |||
2104 | return; | 2109 | return; |
2105 | } | 2110 | } |
2106 | udp_tunnel6_xmit_skb(ndst, sk, skb, dev, | 2111 | udp_tunnel6_xmit_skb(ndst, sk, skb, dev, |
2107 | &saddr, &dst->sin6.sin6_addr, tos, ttl, | 2112 | &src->sin6.sin6_addr, |
2113 | &dst->sin6.sin6_addr, tos, ttl, | ||
2108 | label, src_port, dst_port, !udp_sum); | 2114 | label, src_port, dst_port, !udp_sum); |
2109 | #endif | 2115 | #endif |
2110 | } | 2116 | } |
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index 1d689169da76..9e1f2d9c9865 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c | |||
@@ -5700,10 +5700,11 @@ out: | |||
5700 | mutex_unlock(&wl->mutex); | 5700 | mutex_unlock(&wl->mutex); |
5701 | } | 5701 | } |
5702 | 5702 | ||
5703 | static u32 wlcore_op_get_expected_throughput(struct ieee80211_sta *sta) | 5703 | static u32 wlcore_op_get_expected_throughput(struct ieee80211_hw *hw, |
5704 | struct ieee80211_sta *sta) | ||
5704 | { | 5705 | { |
5705 | struct wl1271_station *wl_sta = (struct wl1271_station *)sta->drv_priv; | 5706 | struct wl1271_station *wl_sta = (struct wl1271_station *)sta->drv_priv; |
5706 | struct wl1271 *wl = wl_sta->wl; | 5707 | struct wl1271 *wl = hw->priv; |
5707 | u8 hlid = wl_sta->hlid; | 5708 | u8 hlid = wl_sta->hlid; |
5708 | 5709 | ||
5709 | /* return in units of Kbps */ | 5710 | /* return in units of Kbps */ |
diff --git a/drivers/of/base.c b/drivers/of/base.c index 7792266db259..3ce69536a7b3 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c | |||
@@ -1631,8 +1631,7 @@ static int __of_parse_phandle_with_args(const struct device_node *np, | |||
1631 | */ | 1631 | */ |
1632 | 1632 | ||
1633 | err: | 1633 | err: |
1634 | if (it.node) | 1634 | of_node_put(it.node); |
1635 | of_node_put(it.node); | ||
1636 | return rc; | 1635 | return rc; |
1637 | } | 1636 | } |
1638 | 1637 | ||
@@ -2343,20 +2342,13 @@ struct device_node *of_graph_get_endpoint_by_regs( | |||
2343 | const struct device_node *parent, int port_reg, int reg) | 2342 | const struct device_node *parent, int port_reg, int reg) |
2344 | { | 2343 | { |
2345 | struct of_endpoint endpoint; | 2344 | struct of_endpoint endpoint; |
2346 | struct device_node *node, *prev_node = NULL; | 2345 | struct device_node *node = NULL; |
2347 | |||
2348 | while (1) { | ||
2349 | node = of_graph_get_next_endpoint(parent, prev_node); | ||
2350 | of_node_put(prev_node); | ||
2351 | if (!node) | ||
2352 | break; | ||
2353 | 2346 | ||
2347 | for_each_endpoint_of_node(parent, node) { | ||
2354 | of_graph_parse_endpoint(node, &endpoint); | 2348 | of_graph_parse_endpoint(node, &endpoint); |
2355 | if (((port_reg == -1) || (endpoint.port == port_reg)) && | 2349 | if (((port_reg == -1) || (endpoint.port == port_reg)) && |
2356 | ((reg == -1) || (endpoint.id == reg))) | 2350 | ((reg == -1) || (endpoint.id == reg))) |
2357 | return node; | 2351 | return node; |
2358 | |||
2359 | prev_node = node; | ||
2360 | } | 2352 | } |
2361 | 2353 | ||
2362 | return NULL; | 2354 | return NULL; |
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index 55f1b8391149..085c6389afd1 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c | |||
@@ -517,7 +517,7 @@ static void *__unflatten_device_tree(const void *blob, | |||
517 | pr_warning("End of tree marker overwritten: %08x\n", | 517 | pr_warning("End of tree marker overwritten: %08x\n", |
518 | be32_to_cpup(mem + size)); | 518 | be32_to_cpup(mem + size)); |
519 | 519 | ||
520 | if (detached) { | 520 | if (detached && mynodes) { |
521 | of_node_set_flag(*mynodes, OF_DETACHED); | 521 | of_node_set_flag(*mynodes, OF_DETACHED); |
522 | pr_debug("unflattened tree is detached\n"); | 522 | pr_debug("unflattened tree is detached\n"); |
523 | } | 523 | } |
diff --git a/drivers/of/irq.c b/drivers/of/irq.c index 89a71c6074fc..a2e68f740eda 100644 --- a/drivers/of/irq.c +++ b/drivers/of/irq.c | |||
@@ -544,12 +544,15 @@ void __init of_irq_init(const struct of_device_id *matches) | |||
544 | 544 | ||
545 | list_del(&desc->list); | 545 | list_del(&desc->list); |
546 | 546 | ||
547 | of_node_set_flag(desc->dev, OF_POPULATED); | ||
548 | |||
547 | pr_debug("of_irq_init: init %s (%p), parent %p\n", | 549 | pr_debug("of_irq_init: init %s (%p), parent %p\n", |
548 | desc->dev->full_name, | 550 | desc->dev->full_name, |
549 | desc->dev, desc->interrupt_parent); | 551 | desc->dev, desc->interrupt_parent); |
550 | ret = desc->irq_init_cb(desc->dev, | 552 | ret = desc->irq_init_cb(desc->dev, |
551 | desc->interrupt_parent); | 553 | desc->interrupt_parent); |
552 | if (ret) { | 554 | if (ret) { |
555 | of_node_clear_flag(desc->dev, OF_POPULATED); | ||
553 | kfree(desc); | 556 | kfree(desc); |
554 | continue; | 557 | continue; |
555 | } | 558 | } |
@@ -559,8 +562,6 @@ void __init of_irq_init(const struct of_device_id *matches) | |||
559 | * its children can get processed in a subsequent pass. | 562 | * its children can get processed in a subsequent pass. |
560 | */ | 563 | */ |
561 | list_add_tail(&desc->list, &intc_parent_list); | 564 | list_add_tail(&desc->list, &intc_parent_list); |
562 | |||
563 | of_node_set_flag(desc->dev, OF_POPULATED); | ||
564 | } | 565 | } |
565 | 566 | ||
566 | /* Get the next pending parent that might have children */ | 567 | /* Get the next pending parent that might have children */ |
diff --git a/drivers/of/platform.c b/drivers/of/platform.c index 8aa197691074..f39ccd5aa701 100644 --- a/drivers/of/platform.c +++ b/drivers/of/platform.c | |||
@@ -497,6 +497,7 @@ int of_platform_default_populate(struct device_node *root, | |||
497 | } | 497 | } |
498 | EXPORT_SYMBOL_GPL(of_platform_default_populate); | 498 | EXPORT_SYMBOL_GPL(of_platform_default_populate); |
499 | 499 | ||
500 | #ifndef CONFIG_PPC | ||
500 | static int __init of_platform_default_populate_init(void) | 501 | static int __init of_platform_default_populate_init(void) |
501 | { | 502 | { |
502 | struct device_node *node; | 503 | struct device_node *node; |
@@ -521,6 +522,7 @@ static int __init of_platform_default_populate_init(void) | |||
521 | return 0; | 522 | return 0; |
522 | } | 523 | } |
523 | arch_initcall_sync(of_platform_default_populate_init); | 524 | arch_initcall_sync(of_platform_default_populate_init); |
525 | #endif | ||
524 | 526 | ||
525 | static int of_platform_device_destroy(struct device *dev, void *data) | 527 | static int of_platform_device_destroy(struct device *dev, void *data) |
526 | { | 528 | { |
diff --git a/drivers/pinctrl/intel/pinctrl-merrifield.c b/drivers/pinctrl/intel/pinctrl-merrifield.c index eb4990ff26ca..7fb765642ee7 100644 --- a/drivers/pinctrl/intel/pinctrl-merrifield.c +++ b/drivers/pinctrl/intel/pinctrl-merrifield.c | |||
@@ -11,6 +11,7 @@ | |||
11 | 11 | ||
12 | #include <linux/bitops.h> | 12 | #include <linux/bitops.h> |
13 | #include <linux/err.h> | 13 | #include <linux/err.h> |
14 | #include <linux/io.h> | ||
14 | #include <linux/module.h> | 15 | #include <linux/module.h> |
15 | #include <linux/platform_device.h> | 16 | #include <linux/platform_device.h> |
16 | #include <linux/pinctrl/pinconf.h> | 17 | #include <linux/pinctrl/pinconf.h> |
diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c index 11623c6b0cb3..44e69c963f5d 100644 --- a/drivers/pinctrl/meson/pinctrl-meson.c +++ b/drivers/pinctrl/meson/pinctrl-meson.c | |||
@@ -727,13 +727,7 @@ static int meson_pinctrl_probe(struct platform_device *pdev) | |||
727 | return PTR_ERR(pc->pcdev); | 727 | return PTR_ERR(pc->pcdev); |
728 | } | 728 | } |
729 | 729 | ||
730 | ret = meson_gpiolib_register(pc); | 730 | return meson_gpiolib_register(pc); |
731 | if (ret) { | ||
732 | pinctrl_unregister(pc->pcdev); | ||
733 | return ret; | ||
734 | } | ||
735 | |||
736 | return 0; | ||
737 | } | 731 | } |
738 | 732 | ||
739 | static struct platform_driver meson_pinctrl_driver = { | 733 | static struct platform_driver meson_pinctrl_driver = { |
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c index 634b4d30eefb..b3e772390ab6 100644 --- a/drivers/pinctrl/pinctrl-amd.c +++ b/drivers/pinctrl/pinctrl-amd.c | |||
@@ -43,17 +43,6 @@ static int amd_gpio_direction_input(struct gpio_chip *gc, unsigned offset) | |||
43 | 43 | ||
44 | spin_lock_irqsave(&gpio_dev->lock, flags); | 44 | spin_lock_irqsave(&gpio_dev->lock, flags); |
45 | pin_reg = readl(gpio_dev->base + offset * 4); | 45 | pin_reg = readl(gpio_dev->base + offset * 4); |
46 | /* | ||
47 | * Suppose BIOS or Bootloader sets specific debounce for the | ||
48 | * GPIO. if not, set debounce to be 2.75ms and remove glitch. | ||
49 | */ | ||
50 | if ((pin_reg & DB_TMR_OUT_MASK) == 0) { | ||
51 | pin_reg |= 0xf; | ||
52 | pin_reg |= BIT(DB_TMR_OUT_UNIT_OFF); | ||
53 | pin_reg |= DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF; | ||
54 | pin_reg &= ~BIT(DB_TMR_LARGE_OFF); | ||
55 | } | ||
56 | |||
57 | pin_reg &= ~BIT(OUTPUT_ENABLE_OFF); | 46 | pin_reg &= ~BIT(OUTPUT_ENABLE_OFF); |
58 | writel(pin_reg, gpio_dev->base + offset * 4); | 47 | writel(pin_reg, gpio_dev->base + offset * 4); |
59 | spin_unlock_irqrestore(&gpio_dev->lock, flags); | 48 | spin_unlock_irqrestore(&gpio_dev->lock, flags); |
@@ -326,15 +315,6 @@ static void amd_gpio_irq_enable(struct irq_data *d) | |||
326 | 315 | ||
327 | spin_lock_irqsave(&gpio_dev->lock, flags); | 316 | spin_lock_irqsave(&gpio_dev->lock, flags); |
328 | pin_reg = readl(gpio_dev->base + (d->hwirq)*4); | 317 | pin_reg = readl(gpio_dev->base + (d->hwirq)*4); |
329 | /* | ||
330 | Suppose BIOS or Bootloader sets specific debounce for the | ||
331 | GPIO. if not, set debounce to be 2.75ms. | ||
332 | */ | ||
333 | if ((pin_reg & DB_TMR_OUT_MASK) == 0) { | ||
334 | pin_reg |= 0xf; | ||
335 | pin_reg |= BIT(DB_TMR_OUT_UNIT_OFF); | ||
336 | pin_reg &= ~BIT(DB_TMR_LARGE_OFF); | ||
337 | } | ||
338 | pin_reg |= BIT(INTERRUPT_ENABLE_OFF); | 318 | pin_reg |= BIT(INTERRUPT_ENABLE_OFF); |
339 | pin_reg |= BIT(INTERRUPT_MASK_OFF); | 319 | pin_reg |= BIT(INTERRUPT_MASK_OFF); |
340 | writel(pin_reg, gpio_dev->base + (d->hwirq)*4); | 320 | writel(pin_reg, gpio_dev->base + (d->hwirq)*4); |
diff --git a/drivers/pinctrl/pinctrl-pistachio.c b/drivers/pinctrl/pinctrl-pistachio.c index c6d410ef8de0..7bad200bd67c 100644 --- a/drivers/pinctrl/pinctrl-pistachio.c +++ b/drivers/pinctrl/pinctrl-pistachio.c | |||
@@ -1432,7 +1432,6 @@ static int pistachio_pinctrl_probe(struct platform_device *pdev) | |||
1432 | { | 1432 | { |
1433 | struct pistachio_pinctrl *pctl; | 1433 | struct pistachio_pinctrl *pctl; |
1434 | struct resource *res; | 1434 | struct resource *res; |
1435 | int ret; | ||
1436 | 1435 | ||
1437 | pctl = devm_kzalloc(&pdev->dev, sizeof(*pctl), GFP_KERNEL); | 1436 | pctl = devm_kzalloc(&pdev->dev, sizeof(*pctl), GFP_KERNEL); |
1438 | if (!pctl) | 1437 | if (!pctl) |
@@ -1464,13 +1463,7 @@ static int pistachio_pinctrl_probe(struct platform_device *pdev) | |||
1464 | return PTR_ERR(pctl->pctldev); | 1463 | return PTR_ERR(pctl->pctldev); |
1465 | } | 1464 | } |
1466 | 1465 | ||
1467 | ret = pistachio_gpio_register(pctl); | 1466 | return pistachio_gpio_register(pctl); |
1468 | if (ret < 0) { | ||
1469 | pinctrl_unregister(pctl->pctldev); | ||
1470 | return ret; | ||
1471 | } | ||
1472 | |||
1473 | return 0; | ||
1474 | } | 1467 | } |
1475 | 1468 | ||
1476 | static struct platform_driver pistachio_pinctrl_driver = { | 1469 | static struct platform_driver pistachio_pinctrl_driver = { |
diff --git a/drivers/power/max17042_battery.c b/drivers/power/max17042_battery.c index 9c65f134d447..da7a75f82489 100644 --- a/drivers/power/max17042_battery.c +++ b/drivers/power/max17042_battery.c | |||
@@ -457,13 +457,16 @@ static inline void max17042_write_model_data(struct max17042_chip *chip, | |||
457 | } | 457 | } |
458 | 458 | ||
459 | static inline void max17042_read_model_data(struct max17042_chip *chip, | 459 | static inline void max17042_read_model_data(struct max17042_chip *chip, |
460 | u8 addr, u32 *data, int size) | 460 | u8 addr, u16 *data, int size) |
461 | { | 461 | { |
462 | struct regmap *map = chip->regmap; | 462 | struct regmap *map = chip->regmap; |
463 | int i; | 463 | int i; |
464 | u32 tmp; | ||
464 | 465 | ||
465 | for (i = 0; i < size; i++) | 466 | for (i = 0; i < size; i++) { |
466 | regmap_read(map, addr + i, &data[i]); | 467 | regmap_read(map, addr + i, &tmp); |
468 | data[i] = (u16)tmp; | ||
469 | } | ||
467 | } | 470 | } |
468 | 471 | ||
469 | static inline int max17042_model_data_compare(struct max17042_chip *chip, | 472 | static inline int max17042_model_data_compare(struct max17042_chip *chip, |
@@ -486,7 +489,7 @@ static int max17042_init_model(struct max17042_chip *chip) | |||
486 | { | 489 | { |
487 | int ret; | 490 | int ret; |
488 | int table_size = ARRAY_SIZE(chip->pdata->config_data->cell_char_tbl); | 491 | int table_size = ARRAY_SIZE(chip->pdata->config_data->cell_char_tbl); |
489 | u32 *temp_data; | 492 | u16 *temp_data; |
490 | 493 | ||
491 | temp_data = kcalloc(table_size, sizeof(*temp_data), GFP_KERNEL); | 494 | temp_data = kcalloc(table_size, sizeof(*temp_data), GFP_KERNEL); |
492 | if (!temp_data) | 495 | if (!temp_data) |
@@ -501,7 +504,7 @@ static int max17042_init_model(struct max17042_chip *chip) | |||
501 | ret = max17042_model_data_compare( | 504 | ret = max17042_model_data_compare( |
502 | chip, | 505 | chip, |
503 | chip->pdata->config_data->cell_char_tbl, | 506 | chip->pdata->config_data->cell_char_tbl, |
504 | (u16 *)temp_data, | 507 | temp_data, |
505 | table_size); | 508 | table_size); |
506 | 509 | ||
507 | max10742_lock_model(chip); | 510 | max10742_lock_model(chip); |
@@ -514,7 +517,7 @@ static int max17042_verify_model_lock(struct max17042_chip *chip) | |||
514 | { | 517 | { |
515 | int i; | 518 | int i; |
516 | int table_size = ARRAY_SIZE(chip->pdata->config_data->cell_char_tbl); | 519 | int table_size = ARRAY_SIZE(chip->pdata->config_data->cell_char_tbl); |
517 | u32 *temp_data; | 520 | u16 *temp_data; |
518 | int ret = 0; | 521 | int ret = 0; |
519 | 522 | ||
520 | temp_data = kcalloc(table_size, sizeof(*temp_data), GFP_KERNEL); | 523 | temp_data = kcalloc(table_size, sizeof(*temp_data), GFP_KERNEL); |
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig index 3bfac539334b..c74c3f67b8da 100644 --- a/drivers/power/reset/Kconfig +++ b/drivers/power/reset/Kconfig | |||
@@ -200,8 +200,8 @@ config REBOOT_MODE | |||
200 | config SYSCON_REBOOT_MODE | 200 | config SYSCON_REBOOT_MODE |
201 | tristate "Generic SYSCON regmap reboot mode driver" | 201 | tristate "Generic SYSCON regmap reboot mode driver" |
202 | depends on OF | 202 | depends on OF |
203 | depends on MFD_SYSCON | ||
203 | select REBOOT_MODE | 204 | select REBOOT_MODE |
204 | select MFD_SYSCON | ||
205 | help | 205 | help |
206 | Say y here will enable reboot mode driver. This will | 206 | Say y here will enable reboot mode driver. This will |
207 | get reboot mode arguments and store it in SYSCON mapped | 207 | get reboot mode arguments and store it in SYSCON mapped |
diff --git a/drivers/power/reset/hisi-reboot.c b/drivers/power/reset/hisi-reboot.c index 9ab7f562a83b..f69387e12c1e 100644 --- a/drivers/power/reset/hisi-reboot.c +++ b/drivers/power/reset/hisi-reboot.c | |||
@@ -53,13 +53,16 @@ static int hisi_reboot_probe(struct platform_device *pdev) | |||
53 | 53 | ||
54 | if (of_property_read_u32(np, "reboot-offset", &reboot_offset) < 0) { | 54 | if (of_property_read_u32(np, "reboot-offset", &reboot_offset) < 0) { |
55 | pr_err("failed to find reboot-offset property\n"); | 55 | pr_err("failed to find reboot-offset property\n"); |
56 | iounmap(base); | ||
56 | return -EINVAL; | 57 | return -EINVAL; |
57 | } | 58 | } |
58 | 59 | ||
59 | err = register_restart_handler(&hisi_restart_nb); | 60 | err = register_restart_handler(&hisi_restart_nb); |
60 | if (err) | 61 | if (err) { |
61 | dev_err(&pdev->dev, "cannot register restart handler (err=%d)\n", | 62 | dev_err(&pdev->dev, "cannot register restart handler (err=%d)\n", |
62 | err); | 63 | err); |
64 | iounmap(base); | ||
65 | } | ||
63 | 66 | ||
64 | return err; | 67 | return err; |
65 | } | 68 | } |
diff --git a/drivers/power/tps65217_charger.c b/drivers/power/tps65217_charger.c index 73dfae41def8..4c56e54af6ac 100644 --- a/drivers/power/tps65217_charger.c +++ b/drivers/power/tps65217_charger.c | |||
@@ -206,6 +206,7 @@ static int tps65217_charger_probe(struct platform_device *pdev) | |||
206 | if (!charger) | 206 | if (!charger) |
207 | return -ENOMEM; | 207 | return -ENOMEM; |
208 | 208 | ||
209 | platform_set_drvdata(pdev, charger); | ||
209 | charger->tps = tps; | 210 | charger->tps = tps; |
210 | charger->dev = &pdev->dev; | 211 | charger->dev = &pdev->dev; |
211 | 212 | ||
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 8973d34ce5ba..fb1b56a71475 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c | |||
@@ -1643,9 +1643,18 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, | |||
1643 | u8 *sense = NULL; | 1643 | u8 *sense = NULL; |
1644 | int expires; | 1644 | int expires; |
1645 | 1645 | ||
1646 | cqr = (struct dasd_ccw_req *) intparm; | ||
1646 | if (IS_ERR(irb)) { | 1647 | if (IS_ERR(irb)) { |
1647 | switch (PTR_ERR(irb)) { | 1648 | switch (PTR_ERR(irb)) { |
1648 | case -EIO: | 1649 | case -EIO: |
1650 | if (cqr && cqr->status == DASD_CQR_CLEAR_PENDING) { | ||
1651 | device = (struct dasd_device *) cqr->startdev; | ||
1652 | cqr->status = DASD_CQR_CLEARED; | ||
1653 | dasd_device_clear_timer(device); | ||
1654 | wake_up(&dasd_flush_wq); | ||
1655 | dasd_schedule_device_bh(device); | ||
1656 | return; | ||
1657 | } | ||
1649 | break; | 1658 | break; |
1650 | case -ETIMEDOUT: | 1659 | case -ETIMEDOUT: |
1651 | DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: " | 1660 | DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: " |
@@ -1661,7 +1670,6 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, | |||
1661 | } | 1670 | } |
1662 | 1671 | ||
1663 | now = get_tod_clock(); | 1672 | now = get_tod_clock(); |
1664 | cqr = (struct dasd_ccw_req *) intparm; | ||
1665 | /* check for conditions that should be handled immediately */ | 1673 | /* check for conditions that should be handled immediately */ |
1666 | if (!cqr || | 1674 | if (!cqr || |
1667 | !(scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && | 1675 | !(scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && |
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index fd2eff440098..98bbec44bcd0 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c | |||
@@ -5078,6 +5078,8 @@ static int dasd_eckd_read_message_buffer(struct dasd_device *device, | |||
5078 | return PTR_ERR(cqr); | 5078 | return PTR_ERR(cqr); |
5079 | } | 5079 | } |
5080 | 5080 | ||
5081 | cqr->lpm = lpum; | ||
5082 | retry: | ||
5081 | cqr->startdev = device; | 5083 | cqr->startdev = device; |
5082 | cqr->memdev = device; | 5084 | cqr->memdev = device; |
5083 | cqr->block = NULL; | 5085 | cqr->block = NULL; |
@@ -5122,6 +5124,14 @@ static int dasd_eckd_read_message_buffer(struct dasd_device *device, | |||
5122 | (prssdp + 1); | 5124 | (prssdp + 1); |
5123 | memcpy(messages, message_buf, | 5125 | memcpy(messages, message_buf, |
5124 | sizeof(struct dasd_rssd_messages)); | 5126 | sizeof(struct dasd_rssd_messages)); |
5127 | } else if (cqr->lpm) { | ||
5128 | /* | ||
5129 | * on z/VM we might not be able to do I/O on the requested path | ||
5130 | * but instead we get the required information on any path | ||
5131 | * so retry with open path mask | ||
5132 | */ | ||
5133 | cqr->lpm = 0; | ||
5134 | goto retry; | ||
5125 | } else | 5135 | } else |
5126 | DBF_EVENT_DEVID(DBF_WARNING, device->cdev, | 5136 | DBF_EVENT_DEVID(DBF_WARNING, device->cdev, |
5127 | "Reading messages failed with rc=%d\n" | 5137 | "Reading messages failed with rc=%d\n" |
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 7ada078ffdd0..6a58bc8f46e2 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c | |||
@@ -762,7 +762,6 @@ static int io_subchannel_initialize_dev(struct subchannel *sch, | |||
762 | priv->state = DEV_STATE_NOT_OPER; | 762 | priv->state = DEV_STATE_NOT_OPER; |
763 | priv->dev_id.devno = sch->schib.pmcw.dev; | 763 | priv->dev_id.devno = sch->schib.pmcw.dev; |
764 | priv->dev_id.ssid = sch->schid.ssid; | 764 | priv->dev_id.ssid = sch->schid.ssid; |
765 | priv->schid = sch->schid; | ||
766 | 765 | ||
767 | INIT_WORK(&priv->todo_work, ccw_device_todo); | 766 | INIT_WORK(&priv->todo_work, ccw_device_todo); |
768 | INIT_LIST_HEAD(&priv->cmb_list); | 767 | INIT_LIST_HEAD(&priv->cmb_list); |
@@ -1000,7 +999,6 @@ static int ccw_device_move_to_sch(struct ccw_device *cdev, | |||
1000 | put_device(&old_sch->dev); | 999 | put_device(&old_sch->dev); |
1001 | /* Initialize new subchannel. */ | 1000 | /* Initialize new subchannel. */ |
1002 | spin_lock_irq(sch->lock); | 1001 | spin_lock_irq(sch->lock); |
1003 | cdev->private->schid = sch->schid; | ||
1004 | cdev->ccwlock = sch->lock; | 1002 | cdev->ccwlock = sch->lock; |
1005 | if (!sch_is_pseudo_sch(sch)) | 1003 | if (!sch_is_pseudo_sch(sch)) |
1006 | sch_set_cdev(sch, cdev); | 1004 | sch_set_cdev(sch, cdev); |
diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c index 15b56a15db15..9bc3512374c9 100644 --- a/drivers/s390/cio/device_status.c +++ b/drivers/s390/cio/device_status.c | |||
@@ -26,6 +26,7 @@ | |||
26 | static void | 26 | static void |
27 | ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb) | 27 | ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb) |
28 | { | 28 | { |
29 | struct subchannel *sch = to_subchannel(cdev->dev.parent); | ||
29 | char dbf_text[15]; | 30 | char dbf_text[15]; |
30 | 31 | ||
31 | if (!scsw_is_valid_cstat(&irb->scsw) || | 32 | if (!scsw_is_valid_cstat(&irb->scsw) || |
@@ -36,10 +37,10 @@ ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb) | |||
36 | "received" | 37 | "received" |
37 | " ... device %04x on subchannel 0.%x.%04x, dev_stat " | 38 | " ... device %04x on subchannel 0.%x.%04x, dev_stat " |
38 | ": %02X sch_stat : %02X\n", | 39 | ": %02X sch_stat : %02X\n", |
39 | cdev->private->dev_id.devno, cdev->private->schid.ssid, | 40 | cdev->private->dev_id.devno, sch->schid.ssid, |
40 | cdev->private->schid.sch_no, | 41 | sch->schid.sch_no, |
41 | scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw)); | 42 | scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw)); |
42 | sprintf(dbf_text, "chk%x", cdev->private->schid.sch_no); | 43 | sprintf(dbf_text, "chk%x", sch->schid.sch_no); |
43 | CIO_TRACE_EVENT(0, dbf_text); | 44 | CIO_TRACE_EVENT(0, dbf_text); |
44 | CIO_HEX_EVENT(0, irb, sizeof(struct irb)); | 45 | CIO_HEX_EVENT(0, irb, sizeof(struct irb)); |
45 | } | 46 | } |
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h index 8975060af96c..220f49145b2f 100644 --- a/drivers/s390/cio/io_sch.h +++ b/drivers/s390/cio/io_sch.h | |||
@@ -120,7 +120,6 @@ struct ccw_device_private { | |||
120 | int state; /* device state */ | 120 | int state; /* device state */ |
121 | atomic_t onoff; | 121 | atomic_t onoff; |
122 | struct ccw_dev_id dev_id; /* device id */ | 122 | struct ccw_dev_id dev_id; /* device id */ |
123 | struct subchannel_id schid; /* subchannel number */ | ||
124 | struct ccw_request req; /* internal I/O request */ | 123 | struct ccw_request req; /* internal I/O request */ |
125 | int iretry; | 124 | int iretry; |
126 | u8 pgid_valid_mask; /* mask of valid PGIDs */ | 125 | u8 pgid_valid_mask; /* mask of valid PGIDs */ |
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index 4bb5262f7aee..71bf9bded485 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c | |||
@@ -686,6 +686,15 @@ static void qdio_kick_handler(struct qdio_q *q) | |||
686 | q->qdio_error = 0; | 686 | q->qdio_error = 0; |
687 | } | 687 | } |
688 | 688 | ||
689 | static inline int qdio_tasklet_schedule(struct qdio_q *q) | ||
690 | { | ||
691 | if (likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE)) { | ||
692 | tasklet_schedule(&q->tasklet); | ||
693 | return 0; | ||
694 | } | ||
695 | return -EPERM; | ||
696 | } | ||
697 | |||
689 | static void __qdio_inbound_processing(struct qdio_q *q) | 698 | static void __qdio_inbound_processing(struct qdio_q *q) |
690 | { | 699 | { |
691 | qperf_inc(q, tasklet_inbound); | 700 | qperf_inc(q, tasklet_inbound); |
@@ -698,10 +707,8 @@ static void __qdio_inbound_processing(struct qdio_q *q) | |||
698 | if (!qdio_inbound_q_done(q)) { | 707 | if (!qdio_inbound_q_done(q)) { |
699 | /* means poll time is not yet over */ | 708 | /* means poll time is not yet over */ |
700 | qperf_inc(q, tasklet_inbound_resched); | 709 | qperf_inc(q, tasklet_inbound_resched); |
701 | if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) { | 710 | if (!qdio_tasklet_schedule(q)) |
702 | tasklet_schedule(&q->tasklet); | ||
703 | return; | 711 | return; |
704 | } | ||
705 | } | 712 | } |
706 | 713 | ||
707 | qdio_stop_polling(q); | 714 | qdio_stop_polling(q); |
@@ -711,8 +718,7 @@ static void __qdio_inbound_processing(struct qdio_q *q) | |||
711 | */ | 718 | */ |
712 | if (!qdio_inbound_q_done(q)) { | 719 | if (!qdio_inbound_q_done(q)) { |
713 | qperf_inc(q, tasklet_inbound_resched2); | 720 | qperf_inc(q, tasklet_inbound_resched2); |
714 | if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) | 721 | qdio_tasklet_schedule(q); |
715 | tasklet_schedule(&q->tasklet); | ||
716 | } | 722 | } |
717 | } | 723 | } |
718 | 724 | ||
@@ -869,16 +875,15 @@ static void __qdio_outbound_processing(struct qdio_q *q) | |||
869 | * is noticed and outbound_handler is called after some time. | 875 | * is noticed and outbound_handler is called after some time. |
870 | */ | 876 | */ |
871 | if (qdio_outbound_q_done(q)) | 877 | if (qdio_outbound_q_done(q)) |
872 | del_timer(&q->u.out.timer); | 878 | del_timer_sync(&q->u.out.timer); |
873 | else | 879 | else |
874 | if (!timer_pending(&q->u.out.timer)) | 880 | if (!timer_pending(&q->u.out.timer) && |
881 | likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE)) | ||
875 | mod_timer(&q->u.out.timer, jiffies + 10 * HZ); | 882 | mod_timer(&q->u.out.timer, jiffies + 10 * HZ); |
876 | return; | 883 | return; |
877 | 884 | ||
878 | sched: | 885 | sched: |
879 | if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED)) | 886 | qdio_tasklet_schedule(q); |
880 | return; | ||
881 | tasklet_schedule(&q->tasklet); | ||
882 | } | 887 | } |
883 | 888 | ||
884 | /* outbound tasklet */ | 889 | /* outbound tasklet */ |
@@ -892,9 +897,7 @@ void qdio_outbound_timer(unsigned long data) | |||
892 | { | 897 | { |
893 | struct qdio_q *q = (struct qdio_q *)data; | 898 | struct qdio_q *q = (struct qdio_q *)data; |
894 | 899 | ||
895 | if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED)) | 900 | qdio_tasklet_schedule(q); |
896 | return; | ||
897 | tasklet_schedule(&q->tasklet); | ||
898 | } | 901 | } |
899 | 902 | ||
900 | static inline void qdio_check_outbound_after_thinint(struct qdio_q *q) | 903 | static inline void qdio_check_outbound_after_thinint(struct qdio_q *q) |
@@ -907,7 +910,7 @@ static inline void qdio_check_outbound_after_thinint(struct qdio_q *q) | |||
907 | 910 | ||
908 | for_each_output_queue(q->irq_ptr, out, i) | 911 | for_each_output_queue(q->irq_ptr, out, i) |
909 | if (!qdio_outbound_q_done(out)) | 912 | if (!qdio_outbound_q_done(out)) |
910 | tasklet_schedule(&out->tasklet); | 913 | qdio_tasklet_schedule(out); |
911 | } | 914 | } |
912 | 915 | ||
913 | static void __tiqdio_inbound_processing(struct qdio_q *q) | 916 | static void __tiqdio_inbound_processing(struct qdio_q *q) |
@@ -929,10 +932,8 @@ static void __tiqdio_inbound_processing(struct qdio_q *q) | |||
929 | 932 | ||
930 | if (!qdio_inbound_q_done(q)) { | 933 | if (!qdio_inbound_q_done(q)) { |
931 | qperf_inc(q, tasklet_inbound_resched); | 934 | qperf_inc(q, tasklet_inbound_resched); |
932 | if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) { | 935 | if (!qdio_tasklet_schedule(q)) |
933 | tasklet_schedule(&q->tasklet); | ||
934 | return; | 936 | return; |
935 | } | ||
936 | } | 937 | } |
937 | 938 | ||
938 | qdio_stop_polling(q); | 939 | qdio_stop_polling(q); |
@@ -942,8 +943,7 @@ static void __tiqdio_inbound_processing(struct qdio_q *q) | |||
942 | */ | 943 | */ |
943 | if (!qdio_inbound_q_done(q)) { | 944 | if (!qdio_inbound_q_done(q)) { |
944 | qperf_inc(q, tasklet_inbound_resched2); | 945 | qperf_inc(q, tasklet_inbound_resched2); |
945 | if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) | 946 | qdio_tasklet_schedule(q); |
946 | tasklet_schedule(&q->tasklet); | ||
947 | } | 947 | } |
948 | } | 948 | } |
949 | 949 | ||
@@ -977,7 +977,7 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr) | |||
977 | int i; | 977 | int i; |
978 | struct qdio_q *q; | 978 | struct qdio_q *q; |
979 | 979 | ||
980 | if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED)) | 980 | if (unlikely(irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)) |
981 | return; | 981 | return; |
982 | 982 | ||
983 | for_each_input_queue(irq_ptr, q, i) { | 983 | for_each_input_queue(irq_ptr, q, i) { |
@@ -1003,7 +1003,7 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr) | |||
1003 | continue; | 1003 | continue; |
1004 | if (need_siga_sync(q) && need_siga_sync_out_after_pci(q)) | 1004 | if (need_siga_sync(q) && need_siga_sync_out_after_pci(q)) |
1005 | qdio_siga_sync_q(q); | 1005 | qdio_siga_sync_q(q); |
1006 | tasklet_schedule(&q->tasklet); | 1006 | qdio_tasklet_schedule(q); |
1007 | } | 1007 | } |
1008 | } | 1008 | } |
1009 | 1009 | ||
@@ -1066,10 +1066,12 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, | |||
1066 | struct irb *irb) | 1066 | struct irb *irb) |
1067 | { | 1067 | { |
1068 | struct qdio_irq *irq_ptr = cdev->private->qdio_data; | 1068 | struct qdio_irq *irq_ptr = cdev->private->qdio_data; |
1069 | struct subchannel_id schid; | ||
1069 | int cstat, dstat; | 1070 | int cstat, dstat; |
1070 | 1071 | ||
1071 | if (!intparm || !irq_ptr) { | 1072 | if (!intparm || !irq_ptr) { |
1072 | DBF_ERROR("qint:%4x", cdev->private->schid.sch_no); | 1073 | ccw_device_get_schid(cdev, &schid); |
1074 | DBF_ERROR("qint:%4x", schid.sch_no); | ||
1073 | return; | 1075 | return; |
1074 | } | 1076 | } |
1075 | 1077 | ||
@@ -1122,12 +1124,14 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, | |||
1122 | int qdio_get_ssqd_desc(struct ccw_device *cdev, | 1124 | int qdio_get_ssqd_desc(struct ccw_device *cdev, |
1123 | struct qdio_ssqd_desc *data) | 1125 | struct qdio_ssqd_desc *data) |
1124 | { | 1126 | { |
1127 | struct subchannel_id schid; | ||
1125 | 1128 | ||
1126 | if (!cdev || !cdev->private) | 1129 | if (!cdev || !cdev->private) |
1127 | return -EINVAL; | 1130 | return -EINVAL; |
1128 | 1131 | ||
1129 | DBF_EVENT("get ssqd:%4x", cdev->private->schid.sch_no); | 1132 | ccw_device_get_schid(cdev, &schid); |
1130 | return qdio_setup_get_ssqd(NULL, &cdev->private->schid, data); | 1133 | DBF_EVENT("get ssqd:%4x", schid.sch_no); |
1134 | return qdio_setup_get_ssqd(NULL, &schid, data); | ||
1131 | } | 1135 | } |
1132 | EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc); | 1136 | EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc); |
1133 | 1137 | ||
@@ -1141,7 +1145,7 @@ static void qdio_shutdown_queues(struct ccw_device *cdev) | |||
1141 | tasklet_kill(&q->tasklet); | 1145 | tasklet_kill(&q->tasklet); |
1142 | 1146 | ||
1143 | for_each_output_queue(irq_ptr, q, i) { | 1147 | for_each_output_queue(irq_ptr, q, i) { |
1144 | del_timer(&q->u.out.timer); | 1148 | del_timer_sync(&q->u.out.timer); |
1145 | tasklet_kill(&q->tasklet); | 1149 | tasklet_kill(&q->tasklet); |
1146 | } | 1150 | } |
1147 | } | 1151 | } |
@@ -1154,14 +1158,15 @@ static void qdio_shutdown_queues(struct ccw_device *cdev) | |||
1154 | int qdio_shutdown(struct ccw_device *cdev, int how) | 1158 | int qdio_shutdown(struct ccw_device *cdev, int how) |
1155 | { | 1159 | { |
1156 | struct qdio_irq *irq_ptr = cdev->private->qdio_data; | 1160 | struct qdio_irq *irq_ptr = cdev->private->qdio_data; |
1161 | struct subchannel_id schid; | ||
1157 | int rc; | 1162 | int rc; |
1158 | unsigned long flags; | ||
1159 | 1163 | ||
1160 | if (!irq_ptr) | 1164 | if (!irq_ptr) |
1161 | return -ENODEV; | 1165 | return -ENODEV; |
1162 | 1166 | ||
1163 | WARN_ON_ONCE(irqs_disabled()); | 1167 | WARN_ON_ONCE(irqs_disabled()); |
1164 | DBF_EVENT("qshutdown:%4x", cdev->private->schid.sch_no); | 1168 | ccw_device_get_schid(cdev, &schid); |
1169 | DBF_EVENT("qshutdown:%4x", schid.sch_no); | ||
1165 | 1170 | ||
1166 | mutex_lock(&irq_ptr->setup_mutex); | 1171 | mutex_lock(&irq_ptr->setup_mutex); |
1167 | /* | 1172 | /* |
@@ -1184,7 +1189,7 @@ int qdio_shutdown(struct ccw_device *cdev, int how) | |||
1184 | qdio_shutdown_debug_entries(irq_ptr); | 1189 | qdio_shutdown_debug_entries(irq_ptr); |
1185 | 1190 | ||
1186 | /* cleanup subchannel */ | 1191 | /* cleanup subchannel */ |
1187 | spin_lock_irqsave(get_ccwdev_lock(cdev), flags); | 1192 | spin_lock_irq(get_ccwdev_lock(cdev)); |
1188 | 1193 | ||
1189 | if (how & QDIO_FLAG_CLEANUP_USING_CLEAR) | 1194 | if (how & QDIO_FLAG_CLEANUP_USING_CLEAR) |
1190 | rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP); | 1195 | rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP); |
@@ -1198,12 +1203,12 @@ int qdio_shutdown(struct ccw_device *cdev, int how) | |||
1198 | } | 1203 | } |
1199 | 1204 | ||
1200 | qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP); | 1205 | qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP); |
1201 | spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); | 1206 | spin_unlock_irq(get_ccwdev_lock(cdev)); |
1202 | wait_event_interruptible_timeout(cdev->private->wait_q, | 1207 | wait_event_interruptible_timeout(cdev->private->wait_q, |
1203 | irq_ptr->state == QDIO_IRQ_STATE_INACTIVE || | 1208 | irq_ptr->state == QDIO_IRQ_STATE_INACTIVE || |
1204 | irq_ptr->state == QDIO_IRQ_STATE_ERR, | 1209 | irq_ptr->state == QDIO_IRQ_STATE_ERR, |
1205 | 10 * HZ); | 1210 | 10 * HZ); |
1206 | spin_lock_irqsave(get_ccwdev_lock(cdev), flags); | 1211 | spin_lock_irq(get_ccwdev_lock(cdev)); |
1207 | 1212 | ||
1208 | no_cleanup: | 1213 | no_cleanup: |
1209 | qdio_shutdown_thinint(irq_ptr); | 1214 | qdio_shutdown_thinint(irq_ptr); |
@@ -1211,7 +1216,7 @@ no_cleanup: | |||
1211 | /* restore interrupt handler */ | 1216 | /* restore interrupt handler */ |
1212 | if ((void *)cdev->handler == (void *)qdio_int_handler) | 1217 | if ((void *)cdev->handler == (void *)qdio_int_handler) |
1213 | cdev->handler = irq_ptr->orig_handler; | 1218 | cdev->handler = irq_ptr->orig_handler; |
1214 | spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); | 1219 | spin_unlock_irq(get_ccwdev_lock(cdev)); |
1215 | 1220 | ||
1216 | qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); | 1221 | qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); |
1217 | mutex_unlock(&irq_ptr->setup_mutex); | 1222 | mutex_unlock(&irq_ptr->setup_mutex); |
@@ -1228,11 +1233,13 @@ EXPORT_SYMBOL_GPL(qdio_shutdown); | |||
1228 | int qdio_free(struct ccw_device *cdev) | 1233 | int qdio_free(struct ccw_device *cdev) |
1229 | { | 1234 | { |
1230 | struct qdio_irq *irq_ptr = cdev->private->qdio_data; | 1235 | struct qdio_irq *irq_ptr = cdev->private->qdio_data; |
1236 | struct subchannel_id schid; | ||
1231 | 1237 | ||
1232 | if (!irq_ptr) | 1238 | if (!irq_ptr) |
1233 | return -ENODEV; | 1239 | return -ENODEV; |
1234 | 1240 | ||
1235 | DBF_EVENT("qfree:%4x", cdev->private->schid.sch_no); | 1241 | ccw_device_get_schid(cdev, &schid); |
1242 | DBF_EVENT("qfree:%4x", schid.sch_no); | ||
1236 | DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf abandoned"); | 1243 | DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf abandoned"); |
1237 | mutex_lock(&irq_ptr->setup_mutex); | 1244 | mutex_lock(&irq_ptr->setup_mutex); |
1238 | 1245 | ||
@@ -1251,9 +1258,11 @@ EXPORT_SYMBOL_GPL(qdio_free); | |||
1251 | */ | 1258 | */ |
1252 | int qdio_allocate(struct qdio_initialize *init_data) | 1259 | int qdio_allocate(struct qdio_initialize *init_data) |
1253 | { | 1260 | { |
1261 | struct subchannel_id schid; | ||
1254 | struct qdio_irq *irq_ptr; | 1262 | struct qdio_irq *irq_ptr; |
1255 | 1263 | ||
1256 | DBF_EVENT("qallocate:%4x", init_data->cdev->private->schid.sch_no); | 1264 | ccw_device_get_schid(init_data->cdev, &schid); |
1265 | DBF_EVENT("qallocate:%4x", schid.sch_no); | ||
1257 | 1266 | ||
1258 | if ((init_data->no_input_qs && !init_data->input_handler) || | 1267 | if ((init_data->no_input_qs && !init_data->input_handler) || |
1259 | (init_data->no_output_qs && !init_data->output_handler)) | 1268 | (init_data->no_output_qs && !init_data->output_handler)) |
@@ -1331,20 +1340,18 @@ static void qdio_detect_hsicq(struct qdio_irq *irq_ptr) | |||
1331 | */ | 1340 | */ |
1332 | int qdio_establish(struct qdio_initialize *init_data) | 1341 | int qdio_establish(struct qdio_initialize *init_data) |
1333 | { | 1342 | { |
1334 | struct qdio_irq *irq_ptr; | ||
1335 | struct ccw_device *cdev = init_data->cdev; | 1343 | struct ccw_device *cdev = init_data->cdev; |
1336 | unsigned long saveflags; | 1344 | struct subchannel_id schid; |
1345 | struct qdio_irq *irq_ptr; | ||
1337 | int rc; | 1346 | int rc; |
1338 | 1347 | ||
1339 | DBF_EVENT("qestablish:%4x", cdev->private->schid.sch_no); | 1348 | ccw_device_get_schid(cdev, &schid); |
1349 | DBF_EVENT("qestablish:%4x", schid.sch_no); | ||
1340 | 1350 | ||
1341 | irq_ptr = cdev->private->qdio_data; | 1351 | irq_ptr = cdev->private->qdio_data; |
1342 | if (!irq_ptr) | 1352 | if (!irq_ptr) |
1343 | return -ENODEV; | 1353 | return -ENODEV; |
1344 | 1354 | ||
1345 | if (cdev->private->state != DEV_STATE_ONLINE) | ||
1346 | return -EINVAL; | ||
1347 | |||
1348 | mutex_lock(&irq_ptr->setup_mutex); | 1355 | mutex_lock(&irq_ptr->setup_mutex); |
1349 | qdio_setup_irq(init_data); | 1356 | qdio_setup_irq(init_data); |
1350 | 1357 | ||
@@ -1361,17 +1368,14 @@ int qdio_establish(struct qdio_initialize *init_data) | |||
1361 | irq_ptr->ccw.count = irq_ptr->equeue.count; | 1368 | irq_ptr->ccw.count = irq_ptr->equeue.count; |
1362 | irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr); | 1369 | irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr); |
1363 | 1370 | ||
1364 | spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags); | 1371 | spin_lock_irq(get_ccwdev_lock(cdev)); |
1365 | ccw_device_set_options_mask(cdev, 0); | 1372 | ccw_device_set_options_mask(cdev, 0); |
1366 | 1373 | ||
1367 | rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0); | 1374 | rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0); |
1375 | spin_unlock_irq(get_ccwdev_lock(cdev)); | ||
1368 | if (rc) { | 1376 | if (rc) { |
1369 | DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no); | 1377 | DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no); |
1370 | DBF_ERROR("rc:%4x", rc); | 1378 | DBF_ERROR("rc:%4x", rc); |
1371 | } | ||
1372 | spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags); | ||
1373 | |||
1374 | if (rc) { | ||
1375 | mutex_unlock(&irq_ptr->setup_mutex); | 1379 | mutex_unlock(&irq_ptr->setup_mutex); |
1376 | qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); | 1380 | qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); |
1377 | return rc; | 1381 | return rc; |
@@ -1407,19 +1411,17 @@ EXPORT_SYMBOL_GPL(qdio_establish); | |||
1407 | */ | 1411 | */ |
1408 | int qdio_activate(struct ccw_device *cdev) | 1412 | int qdio_activate(struct ccw_device *cdev) |
1409 | { | 1413 | { |
1414 | struct subchannel_id schid; | ||
1410 | struct qdio_irq *irq_ptr; | 1415 | struct qdio_irq *irq_ptr; |
1411 | int rc; | 1416 | int rc; |
1412 | unsigned long saveflags; | ||
1413 | 1417 | ||
1414 | DBF_EVENT("qactivate:%4x", cdev->private->schid.sch_no); | 1418 | ccw_device_get_schid(cdev, &schid); |
1419 | DBF_EVENT("qactivate:%4x", schid.sch_no); | ||
1415 | 1420 | ||
1416 | irq_ptr = cdev->private->qdio_data; | 1421 | irq_ptr = cdev->private->qdio_data; |
1417 | if (!irq_ptr) | 1422 | if (!irq_ptr) |
1418 | return -ENODEV; | 1423 | return -ENODEV; |
1419 | 1424 | ||
1420 | if (cdev->private->state != DEV_STATE_ONLINE) | ||
1421 | return -EINVAL; | ||
1422 | |||
1423 | mutex_lock(&irq_ptr->setup_mutex); | 1425 | mutex_lock(&irq_ptr->setup_mutex); |
1424 | if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) { | 1426 | if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) { |
1425 | rc = -EBUSY; | 1427 | rc = -EBUSY; |
@@ -1431,19 +1433,17 @@ int qdio_activate(struct ccw_device *cdev) | |||
1431 | irq_ptr->ccw.count = irq_ptr->aqueue.count; | 1433 | irq_ptr->ccw.count = irq_ptr->aqueue.count; |
1432 | irq_ptr->ccw.cda = 0; | 1434 | irq_ptr->ccw.cda = 0; |
1433 | 1435 | ||
1434 | spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags); | 1436 | spin_lock_irq(get_ccwdev_lock(cdev)); |
1435 | ccw_device_set_options(cdev, CCWDEV_REPORT_ALL); | 1437 | ccw_device_set_options(cdev, CCWDEV_REPORT_ALL); |
1436 | 1438 | ||
1437 | rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE, | 1439 | rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE, |
1438 | 0, DOIO_DENY_PREFETCH); | 1440 | 0, DOIO_DENY_PREFETCH); |
1441 | spin_unlock_irq(get_ccwdev_lock(cdev)); | ||
1439 | if (rc) { | 1442 | if (rc) { |
1440 | DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no); | 1443 | DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no); |
1441 | DBF_ERROR("rc:%4x", rc); | 1444 | DBF_ERROR("rc:%4x", rc); |
1442 | } | ||
1443 | spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags); | ||
1444 | |||
1445 | if (rc) | ||
1446 | goto out; | 1445 | goto out; |
1446 | } | ||
1447 | 1447 | ||
1448 | if (is_thinint_irq(irq_ptr)) | 1448 | if (is_thinint_irq(irq_ptr)) |
1449 | tiqdio_add_input_queues(irq_ptr); | 1449 | tiqdio_add_input_queues(irq_ptr); |
@@ -1585,10 +1585,11 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags, | |||
1585 | 1585 | ||
1586 | /* in case of SIGA errors we must process the error immediately */ | 1586 | /* in case of SIGA errors we must process the error immediately */ |
1587 | if (used >= q->u.out.scan_threshold || rc) | 1587 | if (used >= q->u.out.scan_threshold || rc) |
1588 | tasklet_schedule(&q->tasklet); | 1588 | qdio_tasklet_schedule(q); |
1589 | else | 1589 | else |
1590 | /* free the SBALs in case of no further traffic */ | 1590 | /* free the SBALs in case of no further traffic */ |
1591 | if (!timer_pending(&q->u.out.timer)) | 1591 | if (!timer_pending(&q->u.out.timer) && |
1592 | likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE)) | ||
1592 | mod_timer(&q->u.out.timer, jiffies + HZ); | 1593 | mod_timer(&q->u.out.timer, jiffies + HZ); |
1593 | return rc; | 1594 | return rc; |
1594 | } | 1595 | } |
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c index b381b3718a98..5648b715fed9 100644 --- a/drivers/scsi/aacraid/commctrl.c +++ b/drivers/scsi/aacraid/commctrl.c | |||
@@ -63,7 +63,7 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg) | |||
63 | struct fib *fibptr; | 63 | struct fib *fibptr; |
64 | struct hw_fib * hw_fib = (struct hw_fib *)0; | 64 | struct hw_fib * hw_fib = (struct hw_fib *)0; |
65 | dma_addr_t hw_fib_pa = (dma_addr_t)0LL; | 65 | dma_addr_t hw_fib_pa = (dma_addr_t)0LL; |
66 | unsigned size; | 66 | unsigned int size, osize; |
67 | int retval; | 67 | int retval; |
68 | 68 | ||
69 | if (dev->in_reset) { | 69 | if (dev->in_reset) { |
@@ -87,7 +87,8 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg) | |||
87 | * will not overrun the buffer when we copy the memory. Return | 87 | * will not overrun the buffer when we copy the memory. Return |
88 | * an error if we would. | 88 | * an error if we would. |
89 | */ | 89 | */ |
90 | size = le16_to_cpu(kfib->header.Size) + sizeof(struct aac_fibhdr); | 90 | osize = size = le16_to_cpu(kfib->header.Size) + |
91 | sizeof(struct aac_fibhdr); | ||
91 | if (size < le16_to_cpu(kfib->header.SenderSize)) | 92 | if (size < le16_to_cpu(kfib->header.SenderSize)) |
92 | size = le16_to_cpu(kfib->header.SenderSize); | 93 | size = le16_to_cpu(kfib->header.SenderSize); |
93 | if (size > dev->max_fib_size) { | 94 | if (size > dev->max_fib_size) { |
@@ -118,6 +119,14 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg) | |||
118 | goto cleanup; | 119 | goto cleanup; |
119 | } | 120 | } |
120 | 121 | ||
122 | /* Sanity check the second copy */ | ||
123 | if ((osize != le16_to_cpu(kfib->header.Size) + | ||
124 | sizeof(struct aac_fibhdr)) | ||
125 | || (size < le16_to_cpu(kfib->header.SenderSize))) { | ||
126 | retval = -EINVAL; | ||
127 | goto cleanup; | ||
128 | } | ||
129 | |||
121 | if (kfib->header.Command == cpu_to_le16(TakeABreakPt)) { | 130 | if (kfib->header.Command == cpu_to_le16(TakeABreakPt)) { |
122 | aac_adapter_interrupt(dev); | 131 | aac_adapter_interrupt(dev); |
123 | /* | 132 | /* |
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c index a569c65f22b1..dcf36537a767 100644 --- a/drivers/scsi/fcoe/fcoe_ctlr.c +++ b/drivers/scsi/fcoe/fcoe_ctlr.c | |||
@@ -2923,7 +2923,7 @@ static int fcoe_ctlr_vlan_recv(struct fcoe_ctlr *fip, struct sk_buff *skb) | |||
2923 | mutex_unlock(&fip->ctlr_mutex); | 2923 | mutex_unlock(&fip->ctlr_mutex); |
2924 | 2924 | ||
2925 | drop: | 2925 | drop: |
2926 | kfree(skb); | 2926 | kfree_skb(skb); |
2927 | return rc; | 2927 | return rc; |
2928 | } | 2928 | } |
2929 | 2929 | ||
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 2dab3dc2aa69..c1ed25adb17e 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c | |||
@@ -5037,7 +5037,7 @@ static int megasas_init_fw(struct megasas_instance *instance) | |||
5037 | /* Find first memory bar */ | 5037 | /* Find first memory bar */ |
5038 | bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM); | 5038 | bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM); |
5039 | instance->bar = find_first_bit(&bar_list, sizeof(unsigned long)); | 5039 | instance->bar = find_first_bit(&bar_list, sizeof(unsigned long)); |
5040 | if (pci_request_selected_regions(instance->pdev, instance->bar, | 5040 | if (pci_request_selected_regions(instance->pdev, 1<<instance->bar, |
5041 | "megasas: LSI")) { | 5041 | "megasas: LSI")) { |
5042 | dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n"); | 5042 | dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n"); |
5043 | return -EBUSY; | 5043 | return -EBUSY; |
@@ -5339,7 +5339,7 @@ fail_ready_state: | |||
5339 | iounmap(instance->reg_set); | 5339 | iounmap(instance->reg_set); |
5340 | 5340 | ||
5341 | fail_ioremap: | 5341 | fail_ioremap: |
5342 | pci_release_selected_regions(instance->pdev, instance->bar); | 5342 | pci_release_selected_regions(instance->pdev, 1<<instance->bar); |
5343 | 5343 | ||
5344 | return -EINVAL; | 5344 | return -EINVAL; |
5345 | } | 5345 | } |
@@ -5360,7 +5360,7 @@ static void megasas_release_mfi(struct megasas_instance *instance) | |||
5360 | 5360 | ||
5361 | iounmap(instance->reg_set); | 5361 | iounmap(instance->reg_set); |
5362 | 5362 | ||
5363 | pci_release_selected_regions(instance->pdev, instance->bar); | 5363 | pci_release_selected_regions(instance->pdev, 1<<instance->bar); |
5364 | } | 5364 | } |
5365 | 5365 | ||
5366 | /** | 5366 | /** |
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index ec837544f784..52d8bbf7feb5 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c | |||
@@ -2603,7 +2603,7 @@ megasas_release_fusion(struct megasas_instance *instance) | |||
2603 | 2603 | ||
2604 | iounmap(instance->reg_set); | 2604 | iounmap(instance->reg_set); |
2605 | 2605 | ||
2606 | pci_release_selected_regions(instance->pdev, instance->bar); | 2606 | pci_release_selected_regions(instance->pdev, 1<<instance->bar); |
2607 | } | 2607 | } |
2608 | 2608 | ||
2609 | /** | 2609 | /** |
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 751f13edece0..750f82c339d4 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c | |||
@@ -2188,6 +2188,17 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc) | |||
2188 | } else | 2188 | } else |
2189 | ioc->msix96_vector = 0; | 2189 | ioc->msix96_vector = 0; |
2190 | 2190 | ||
2191 | if (ioc->is_warpdrive) { | ||
2192 | ioc->reply_post_host_index[0] = (resource_size_t __iomem *) | ||
2193 | &ioc->chip->ReplyPostHostIndex; | ||
2194 | |||
2195 | for (i = 1; i < ioc->cpu_msix_table_sz; i++) | ||
2196 | ioc->reply_post_host_index[i] = | ||
2197 | (resource_size_t __iomem *) | ||
2198 | ((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1) | ||
2199 | * 4))); | ||
2200 | } | ||
2201 | |||
2191 | list_for_each_entry(reply_q, &ioc->reply_queue_list, list) | 2202 | list_for_each_entry(reply_q, &ioc->reply_queue_list, list) |
2192 | pr_info(MPT3SAS_FMT "%s: IRQ %d\n", | 2203 | pr_info(MPT3SAS_FMT "%s: IRQ %d\n", |
2193 | reply_q->name, ((ioc->msix_enable) ? "PCI-MSI-X enabled" : | 2204 | reply_q->name, ((ioc->msix_enable) ? "PCI-MSI-X enabled" : |
@@ -5280,17 +5291,6 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) | |||
5280 | if (r) | 5291 | if (r) |
5281 | goto out_free_resources; | 5292 | goto out_free_resources; |
5282 | 5293 | ||
5283 | if (ioc->is_warpdrive) { | ||
5284 | ioc->reply_post_host_index[0] = (resource_size_t __iomem *) | ||
5285 | &ioc->chip->ReplyPostHostIndex; | ||
5286 | |||
5287 | for (i = 1; i < ioc->cpu_msix_table_sz; i++) | ||
5288 | ioc->reply_post_host_index[i] = | ||
5289 | (resource_size_t __iomem *) | ||
5290 | ((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1) | ||
5291 | * 4))); | ||
5292 | } | ||
5293 | |||
5294 | pci_set_drvdata(ioc->pdev, ioc->shost); | 5294 | pci_set_drvdata(ioc->pdev, ioc->shost); |
5295 | r = _base_get_ioc_facts(ioc, CAN_SLEEP); | 5295 | r = _base_get_ioc_facts(ioc, CAN_SLEEP); |
5296 | if (r) | 5296 | if (r) |
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c index 53ef1cb6418e..0e8601aa877a 100644 --- a/drivers/scsi/ses.c +++ b/drivers/scsi/ses.c | |||
@@ -778,6 +778,8 @@ static void ses_intf_remove_enclosure(struct scsi_device *sdev) | |||
778 | if (!edev) | 778 | if (!edev) |
779 | return; | 779 | return; |
780 | 780 | ||
781 | enclosure_unregister(edev); | ||
782 | |||
781 | ses_dev = edev->scratch; | 783 | ses_dev = edev->scratch; |
782 | edev->scratch = NULL; | 784 | edev->scratch = NULL; |
783 | 785 | ||
@@ -789,7 +791,6 @@ static void ses_intf_remove_enclosure(struct scsi_device *sdev) | |||
789 | kfree(edev->component[0].scratch); | 791 | kfree(edev->component[0].scratch); |
790 | 792 | ||
791 | put_device(&edev->edev); | 793 | put_device(&edev->edev); |
792 | enclosure_unregister(edev); | ||
793 | } | 794 | } |
794 | 795 | ||
795 | static void ses_intf_remove(struct device *cdev, | 796 | static void ses_intf_remove(struct device *cdev, |
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 71912301ef7f..0f3f62e81e5b 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c | |||
@@ -1354,7 +1354,6 @@ made_compressed_probe: | |||
1354 | spin_lock_init(&acm->write_lock); | 1354 | spin_lock_init(&acm->write_lock); |
1355 | spin_lock_init(&acm->read_lock); | 1355 | spin_lock_init(&acm->read_lock); |
1356 | mutex_init(&acm->mutex); | 1356 | mutex_init(&acm->mutex); |
1357 | acm->rx_endpoint = usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress); | ||
1358 | acm->is_int_ep = usb_endpoint_xfer_int(epread); | 1357 | acm->is_int_ep = usb_endpoint_xfer_int(epread); |
1359 | if (acm->is_int_ep) | 1358 | if (acm->is_int_ep) |
1360 | acm->bInterval = epread->bInterval; | 1359 | acm->bInterval = epread->bInterval; |
@@ -1394,14 +1393,14 @@ made_compressed_probe: | |||
1394 | urb->transfer_dma = rb->dma; | 1393 | urb->transfer_dma = rb->dma; |
1395 | if (acm->is_int_ep) { | 1394 | if (acm->is_int_ep) { |
1396 | usb_fill_int_urb(urb, acm->dev, | 1395 | usb_fill_int_urb(urb, acm->dev, |
1397 | acm->rx_endpoint, | 1396 | usb_rcvintpipe(usb_dev, epread->bEndpointAddress), |
1398 | rb->base, | 1397 | rb->base, |
1399 | acm->readsize, | 1398 | acm->readsize, |
1400 | acm_read_bulk_callback, rb, | 1399 | acm_read_bulk_callback, rb, |
1401 | acm->bInterval); | 1400 | acm->bInterval); |
1402 | } else { | 1401 | } else { |
1403 | usb_fill_bulk_urb(urb, acm->dev, | 1402 | usb_fill_bulk_urb(urb, acm->dev, |
1404 | acm->rx_endpoint, | 1403 | usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress), |
1405 | rb->base, | 1404 | rb->base, |
1406 | acm->readsize, | 1405 | acm->readsize, |
1407 | acm_read_bulk_callback, rb); | 1406 | acm_read_bulk_callback, rb); |
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h index 05ce308d5d2a..1f1eabfd8462 100644 --- a/drivers/usb/class/cdc-acm.h +++ b/drivers/usb/class/cdc-acm.h | |||
@@ -96,7 +96,6 @@ struct acm { | |||
96 | struct acm_rb read_buffers[ACM_NR]; | 96 | struct acm_rb read_buffers[ACM_NR]; |
97 | struct acm_wb *putbuffer; /* for acm_tty_put_char() */ | 97 | struct acm_wb *putbuffer; /* for acm_tty_put_char() */ |
98 | int rx_buflimit; | 98 | int rx_buflimit; |
99 | int rx_endpoint; | ||
100 | spinlock_t read_lock; | 99 | spinlock_t read_lock; |
101 | int write_used; /* number of non-empty write buffers */ | 100 | int write_used; /* number of non-empty write buffers */ |
102 | int transmitting; | 101 | int transmitting; |
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c index 31ccdccd7a04..051163189810 100644 --- a/drivers/usb/core/config.c +++ b/drivers/usb/core/config.c | |||
@@ -171,6 +171,31 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno, | |||
171 | ep, buffer, size); | 171 | ep, buffer, size); |
172 | } | 172 | } |
173 | 173 | ||
174 | static const unsigned short low_speed_maxpacket_maxes[4] = { | ||
175 | [USB_ENDPOINT_XFER_CONTROL] = 8, | ||
176 | [USB_ENDPOINT_XFER_ISOC] = 0, | ||
177 | [USB_ENDPOINT_XFER_BULK] = 0, | ||
178 | [USB_ENDPOINT_XFER_INT] = 8, | ||
179 | }; | ||
180 | static const unsigned short full_speed_maxpacket_maxes[4] = { | ||
181 | [USB_ENDPOINT_XFER_CONTROL] = 64, | ||
182 | [USB_ENDPOINT_XFER_ISOC] = 1023, | ||
183 | [USB_ENDPOINT_XFER_BULK] = 64, | ||
184 | [USB_ENDPOINT_XFER_INT] = 64, | ||
185 | }; | ||
186 | static const unsigned short high_speed_maxpacket_maxes[4] = { | ||
187 | [USB_ENDPOINT_XFER_CONTROL] = 64, | ||
188 | [USB_ENDPOINT_XFER_ISOC] = 1024, | ||
189 | [USB_ENDPOINT_XFER_BULK] = 512, | ||
190 | [USB_ENDPOINT_XFER_INT] = 1023, | ||
191 | }; | ||
192 | static const unsigned short super_speed_maxpacket_maxes[4] = { | ||
193 | [USB_ENDPOINT_XFER_CONTROL] = 512, | ||
194 | [USB_ENDPOINT_XFER_ISOC] = 1024, | ||
195 | [USB_ENDPOINT_XFER_BULK] = 1024, | ||
196 | [USB_ENDPOINT_XFER_INT] = 1024, | ||
197 | }; | ||
198 | |||
174 | static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, | 199 | static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, |
175 | int asnum, struct usb_host_interface *ifp, int num_ep, | 200 | int asnum, struct usb_host_interface *ifp, int num_ep, |
176 | unsigned char *buffer, int size) | 201 | unsigned char *buffer, int size) |
@@ -179,6 +204,8 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, | |||
179 | struct usb_endpoint_descriptor *d; | 204 | struct usb_endpoint_descriptor *d; |
180 | struct usb_host_endpoint *endpoint; | 205 | struct usb_host_endpoint *endpoint; |
181 | int n, i, j, retval; | 206 | int n, i, j, retval; |
207 | unsigned int maxp; | ||
208 | const unsigned short *maxpacket_maxes; | ||
182 | 209 | ||
183 | d = (struct usb_endpoint_descriptor *) buffer; | 210 | d = (struct usb_endpoint_descriptor *) buffer; |
184 | buffer += d->bLength; | 211 | buffer += d->bLength; |
@@ -286,6 +313,42 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, | |||
286 | endpoint->desc.wMaxPacketSize = cpu_to_le16(8); | 313 | endpoint->desc.wMaxPacketSize = cpu_to_le16(8); |
287 | } | 314 | } |
288 | 315 | ||
316 | /* Validate the wMaxPacketSize field */ | ||
317 | maxp = usb_endpoint_maxp(&endpoint->desc); | ||
318 | |||
319 | /* Find the highest legal maxpacket size for this endpoint */ | ||
320 | i = 0; /* additional transactions per microframe */ | ||
321 | switch (to_usb_device(ddev)->speed) { | ||
322 | case USB_SPEED_LOW: | ||
323 | maxpacket_maxes = low_speed_maxpacket_maxes; | ||
324 | break; | ||
325 | case USB_SPEED_FULL: | ||
326 | maxpacket_maxes = full_speed_maxpacket_maxes; | ||
327 | break; | ||
328 | case USB_SPEED_HIGH: | ||
329 | /* Bits 12..11 are allowed only for HS periodic endpoints */ | ||
330 | if (usb_endpoint_xfer_int(d) || usb_endpoint_xfer_isoc(d)) { | ||
331 | i = maxp & (BIT(12) | BIT(11)); | ||
332 | maxp &= ~i; | ||
333 | } | ||
334 | /* fallthrough */ | ||
335 | default: | ||
336 | maxpacket_maxes = high_speed_maxpacket_maxes; | ||
337 | break; | ||
338 | case USB_SPEED_SUPER: | ||
339 | case USB_SPEED_SUPER_PLUS: | ||
340 | maxpacket_maxes = super_speed_maxpacket_maxes; | ||
341 | break; | ||
342 | } | ||
343 | j = maxpacket_maxes[usb_endpoint_type(&endpoint->desc)]; | ||
344 | |||
345 | if (maxp > j) { | ||
346 | dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has invalid maxpacket %d, setting to %d\n", | ||
347 | cfgno, inum, asnum, d->bEndpointAddress, maxp, j); | ||
348 | maxp = j; | ||
349 | endpoint->desc.wMaxPacketSize = cpu_to_le16(i | maxp); | ||
350 | } | ||
351 | |||
289 | /* | 352 | /* |
290 | * Some buggy high speed devices have bulk endpoints using | 353 | * Some buggy high speed devices have bulk endpoints using |
291 | * maxpacket sizes other than 512. High speed HCDs may not | 354 | * maxpacket sizes other than 512. High speed HCDs may not |
@@ -293,9 +356,6 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, | |||
293 | */ | 356 | */ |
294 | if (to_usb_device(ddev)->speed == USB_SPEED_HIGH | 357 | if (to_usb_device(ddev)->speed == USB_SPEED_HIGH |
295 | && usb_endpoint_xfer_bulk(d)) { | 358 | && usb_endpoint_xfer_bulk(d)) { |
296 | unsigned maxp; | ||
297 | |||
298 | maxp = usb_endpoint_maxp(&endpoint->desc) & 0x07ff; | ||
299 | if (maxp != 512) | 359 | if (maxp != 512) |
300 | dev_warn(ddev, "config %d interface %d altsetting %d " | 360 | dev_warn(ddev, "config %d interface %d altsetting %d " |
301 | "bulk endpoint 0x%X has invalid maxpacket %d\n", | 361 | "bulk endpoint 0x%X has invalid maxpacket %d\n", |
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index e9f5043a2167..e6a6d67c8705 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c | |||
@@ -241,7 +241,8 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma) | |||
241 | goto error_decrease_mem; | 241 | goto error_decrease_mem; |
242 | } | 242 | } |
243 | 243 | ||
244 | mem = usb_alloc_coherent(ps->dev, size, GFP_USER, &dma_handle); | 244 | mem = usb_alloc_coherent(ps->dev, size, GFP_USER | __GFP_NOWARN, |
245 | &dma_handle); | ||
245 | if (!mem) { | 246 | if (!mem) { |
246 | ret = -ENOMEM; | 247 | ret = -ENOMEM; |
247 | goto error_free_usbm; | 248 | goto error_free_usbm; |
@@ -2582,7 +2583,9 @@ static unsigned int usbdev_poll(struct file *file, | |||
2582 | if (file->f_mode & FMODE_WRITE && !list_empty(&ps->async_completed)) | 2583 | if (file->f_mode & FMODE_WRITE && !list_empty(&ps->async_completed)) |
2583 | mask |= POLLOUT | POLLWRNORM; | 2584 | mask |= POLLOUT | POLLWRNORM; |
2584 | if (!connected(ps)) | 2585 | if (!connected(ps)) |
2585 | mask |= POLLERR | POLLHUP; | 2586 | mask |= POLLHUP; |
2587 | if (list_empty(&ps->list)) | ||
2588 | mask |= POLLERR; | ||
2586 | return mask; | 2589 | return mask; |
2587 | } | 2590 | } |
2588 | 2591 | ||
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index bee13517676f..1d5fc32d06d0 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c | |||
@@ -1052,14 +1052,11 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) | |||
1052 | 1052 | ||
1053 | /* Continue a partial initialization */ | 1053 | /* Continue a partial initialization */ |
1054 | if (type == HUB_INIT2 || type == HUB_INIT3) { | 1054 | if (type == HUB_INIT2 || type == HUB_INIT3) { |
1055 | device_lock(hub->intfdev); | 1055 | device_lock(&hdev->dev); |
1056 | 1056 | ||
1057 | /* Was the hub disconnected while we were waiting? */ | 1057 | /* Was the hub disconnected while we were waiting? */ |
1058 | if (hub->disconnected) { | 1058 | if (hub->disconnected) |
1059 | device_unlock(hub->intfdev); | 1059 | goto disconnected; |
1060 | kref_put(&hub->kref, hub_release); | ||
1061 | return; | ||
1062 | } | ||
1063 | if (type == HUB_INIT2) | 1060 | if (type == HUB_INIT2) |
1064 | goto init2; | 1061 | goto init2; |
1065 | goto init3; | 1062 | goto init3; |
@@ -1262,7 +1259,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) | |||
1262 | queue_delayed_work(system_power_efficient_wq, | 1259 | queue_delayed_work(system_power_efficient_wq, |
1263 | &hub->init_work, | 1260 | &hub->init_work, |
1264 | msecs_to_jiffies(delay)); | 1261 | msecs_to_jiffies(delay)); |
1265 | device_unlock(hub->intfdev); | 1262 | device_unlock(&hdev->dev); |
1266 | return; /* Continues at init3: below */ | 1263 | return; /* Continues at init3: below */ |
1267 | } else { | 1264 | } else { |
1268 | msleep(delay); | 1265 | msleep(delay); |
@@ -1281,12 +1278,12 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) | |||
1281 | /* Scan all ports that need attention */ | 1278 | /* Scan all ports that need attention */ |
1282 | kick_hub_wq(hub); | 1279 | kick_hub_wq(hub); |
1283 | 1280 | ||
1284 | /* Allow autosuspend if it was suppressed */ | 1281 | if (type == HUB_INIT2 || type == HUB_INIT3) { |
1285 | if (type <= HUB_INIT3) | 1282 | /* Allow autosuspend if it was suppressed */ |
1283 | disconnected: | ||
1286 | usb_autopm_put_interface_async(to_usb_interface(hub->intfdev)); | 1284 | usb_autopm_put_interface_async(to_usb_interface(hub->intfdev)); |
1287 | 1285 | device_unlock(&hdev->dev); | |
1288 | if (type == HUB_INIT2 || type == HUB_INIT3) | 1286 | } |
1289 | device_unlock(hub->intfdev); | ||
1290 | 1287 | ||
1291 | kref_put(&hub->kref, hub_release); | 1288 | kref_put(&hub->kref, hub_release); |
1292 | } | 1289 | } |
@@ -1315,8 +1312,6 @@ static void hub_quiesce(struct usb_hub *hub, enum hub_quiescing_type type) | |||
1315 | struct usb_device *hdev = hub->hdev; | 1312 | struct usb_device *hdev = hub->hdev; |
1316 | int i; | 1313 | int i; |
1317 | 1314 | ||
1318 | cancel_delayed_work_sync(&hub->init_work); | ||
1319 | |||
1320 | /* hub_wq and related activity won't re-trigger */ | 1315 | /* hub_wq and related activity won't re-trigger */ |
1321 | hub->quiescing = 1; | 1316 | hub->quiescing = 1; |
1322 | 1317 | ||
diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c index 974335377d9f..e56d59b19a0e 100644 --- a/drivers/usb/dwc3/dwc3-of-simple.c +++ b/drivers/usb/dwc3/dwc3-of-simple.c | |||
@@ -61,6 +61,7 @@ static int dwc3_of_simple_probe(struct platform_device *pdev) | |||
61 | if (!simple->clks) | 61 | if (!simple->clks) |
62 | return -ENOMEM; | 62 | return -ENOMEM; |
63 | 63 | ||
64 | platform_set_drvdata(pdev, simple); | ||
64 | simple->dev = dev; | 65 | simple->dev = dev; |
65 | 66 | ||
66 | for (i = 0; i < simple->num_clocks; i++) { | 67 | for (i = 0; i < simple->num_clocks; i++) { |
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c index 45f5a232d9fb..2eb84d6c24a6 100644 --- a/drivers/usb/dwc3/dwc3-pci.c +++ b/drivers/usb/dwc3/dwc3-pci.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #define PCI_DEVICE_ID_INTEL_BXT 0x0aaa | 37 | #define PCI_DEVICE_ID_INTEL_BXT 0x0aaa |
38 | #define PCI_DEVICE_ID_INTEL_BXT_M 0x1aaa | 38 | #define PCI_DEVICE_ID_INTEL_BXT_M 0x1aaa |
39 | #define PCI_DEVICE_ID_INTEL_APL 0x5aaa | 39 | #define PCI_DEVICE_ID_INTEL_APL 0x5aaa |
40 | #define PCI_DEVICE_ID_INTEL_KBP 0xa2b0 | ||
40 | 41 | ||
41 | static const struct acpi_gpio_params reset_gpios = { 0, 0, false }; | 42 | static const struct acpi_gpio_params reset_gpios = { 0, 0, false }; |
42 | static const struct acpi_gpio_params cs_gpios = { 1, 0, false }; | 43 | static const struct acpi_gpio_params cs_gpios = { 1, 0, false }; |
@@ -227,6 +228,7 @@ static const struct pci_device_id dwc3_pci_id_table[] = { | |||
227 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT), }, | 228 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT), }, |
228 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT_M), }, | 229 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT_M), }, |
229 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_APL), }, | 230 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_APL), }, |
231 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBP), }, | ||
230 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), }, | 232 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), }, |
231 | { } /* Terminating Entry */ | 233 | { } /* Terminating Entry */ |
232 | }; | 234 | }; |
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 8f8c2157910e..1f5597ef945d 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c | |||
@@ -829,7 +829,7 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep, | |||
829 | if (!req->request.no_interrupt && !chain) | 829 | if (!req->request.no_interrupt && !chain) |
830 | trb->ctrl |= DWC3_TRB_CTRL_IOC | DWC3_TRB_CTRL_ISP_IMI; | 830 | trb->ctrl |= DWC3_TRB_CTRL_IOC | DWC3_TRB_CTRL_ISP_IMI; |
831 | 831 | ||
832 | if (last) | 832 | if (last && !usb_endpoint_xfer_isoc(dep->endpoint.desc)) |
833 | trb->ctrl |= DWC3_TRB_CTRL_LST; | 833 | trb->ctrl |= DWC3_TRB_CTRL_LST; |
834 | 834 | ||
835 | if (chain) | 835 | if (chain) |
@@ -1955,7 +1955,8 @@ static void dwc3_gadget_free_endpoints(struct dwc3 *dwc) | |||
1955 | 1955 | ||
1956 | static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep, | 1956 | static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep, |
1957 | struct dwc3_request *req, struct dwc3_trb *trb, | 1957 | struct dwc3_request *req, struct dwc3_trb *trb, |
1958 | const struct dwc3_event_depevt *event, int status) | 1958 | const struct dwc3_event_depevt *event, int status, |
1959 | int chain) | ||
1959 | { | 1960 | { |
1960 | unsigned int count; | 1961 | unsigned int count; |
1961 | unsigned int s_pkt = 0; | 1962 | unsigned int s_pkt = 0; |
@@ -1964,17 +1965,22 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep, | |||
1964 | dep->queued_requests--; | 1965 | dep->queued_requests--; |
1965 | trace_dwc3_complete_trb(dep, trb); | 1966 | trace_dwc3_complete_trb(dep, trb); |
1966 | 1967 | ||
1968 | /* | ||
1969 | * If we're in the middle of series of chained TRBs and we | ||
1970 | * receive a short transfer along the way, DWC3 will skip | ||
1971 | * through all TRBs including the last TRB in the chain (the | ||
1972 | * where CHN bit is zero. DWC3 will also avoid clearing HWO | ||
1973 | * bit and SW has to do it manually. | ||
1974 | * | ||
1975 | * We're going to do that here to avoid problems of HW trying | ||
1976 | * to use bogus TRBs for transfers. | ||
1977 | */ | ||
1978 | if (chain && (trb->ctrl & DWC3_TRB_CTRL_HWO)) | ||
1979 | trb->ctrl &= ~DWC3_TRB_CTRL_HWO; | ||
1980 | |||
1967 | if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN) | 1981 | if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN) |
1968 | /* | 1982 | return 1; |
1969 | * We continue despite the error. There is not much we | 1983 | |
1970 | * can do. If we don't clean it up we loop forever. If | ||
1971 | * we skip the TRB then it gets overwritten after a | ||
1972 | * while since we use them in a ring buffer. A BUG() | ||
1973 | * would help. Lets hope that if this occurs, someone | ||
1974 | * fixes the root cause instead of looking away :) | ||
1975 | */ | ||
1976 | dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n", | ||
1977 | dep->name, trb); | ||
1978 | count = trb->size & DWC3_TRB_SIZE_MASK; | 1984 | count = trb->size & DWC3_TRB_SIZE_MASK; |
1979 | 1985 | ||
1980 | if (dep->direction) { | 1986 | if (dep->direction) { |
@@ -2013,15 +2019,7 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep, | |||
2013 | s_pkt = 1; | 2019 | s_pkt = 1; |
2014 | } | 2020 | } |
2015 | 2021 | ||
2016 | /* | 2022 | if (s_pkt && !chain) |
2017 | * We assume here we will always receive the entire data block | ||
2018 | * which we should receive. Meaning, if we program RX to | ||
2019 | * receive 4K but we receive only 2K, we assume that's all we | ||
2020 | * should receive and we simply bounce the request back to the | ||
2021 | * gadget driver for further processing. | ||
2022 | */ | ||
2023 | req->request.actual += req->request.length - count; | ||
2024 | if (s_pkt) | ||
2025 | return 1; | 2023 | return 1; |
2026 | if ((event->status & DEPEVT_STATUS_LST) && | 2024 | if ((event->status & DEPEVT_STATUS_LST) && |
2027 | (trb->ctrl & (DWC3_TRB_CTRL_LST | | 2025 | (trb->ctrl & (DWC3_TRB_CTRL_LST | |
@@ -2040,13 +2038,17 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep, | |||
2040 | struct dwc3_trb *trb; | 2038 | struct dwc3_trb *trb; |
2041 | unsigned int slot; | 2039 | unsigned int slot; |
2042 | unsigned int i; | 2040 | unsigned int i; |
2041 | int count = 0; | ||
2043 | int ret; | 2042 | int ret; |
2044 | 2043 | ||
2045 | do { | 2044 | do { |
2045 | int chain; | ||
2046 | |||
2046 | req = next_request(&dep->started_list); | 2047 | req = next_request(&dep->started_list); |
2047 | if (WARN_ON_ONCE(!req)) | 2048 | if (WARN_ON_ONCE(!req)) |
2048 | return 1; | 2049 | return 1; |
2049 | 2050 | ||
2051 | chain = req->request.num_mapped_sgs > 0; | ||
2050 | i = 0; | 2052 | i = 0; |
2051 | do { | 2053 | do { |
2052 | slot = req->first_trb_index + i; | 2054 | slot = req->first_trb_index + i; |
@@ -2054,13 +2056,22 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep, | |||
2054 | slot++; | 2056 | slot++; |
2055 | slot %= DWC3_TRB_NUM; | 2057 | slot %= DWC3_TRB_NUM; |
2056 | trb = &dep->trb_pool[slot]; | 2058 | trb = &dep->trb_pool[slot]; |
2059 | count += trb->size & DWC3_TRB_SIZE_MASK; | ||
2057 | 2060 | ||
2058 | ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb, | 2061 | ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb, |
2059 | event, status); | 2062 | event, status, chain); |
2060 | if (ret) | 2063 | if (ret) |
2061 | break; | 2064 | break; |
2062 | } while (++i < req->request.num_mapped_sgs); | 2065 | } while (++i < req->request.num_mapped_sgs); |
2063 | 2066 | ||
2067 | /* | ||
2068 | * We assume here we will always receive the entire data block | ||
2069 | * which we should receive. Meaning, if we program RX to | ||
2070 | * receive 4K but we receive only 2K, we assume that's all we | ||
2071 | * should receive and we simply bounce the request back to the | ||
2072 | * gadget driver for further processing. | ||
2073 | */ | ||
2074 | req->request.actual += req->request.length - count; | ||
2064 | dwc3_gadget_giveback(dep, req, status); | 2075 | dwc3_gadget_giveback(dep, req, status); |
2065 | 2076 | ||
2066 | if (ret) | 2077 | if (ret) |
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index eb648485a58c..5ebe6af7976e 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c | |||
@@ -1913,6 +1913,8 @@ unknown: | |||
1913 | break; | 1913 | break; |
1914 | 1914 | ||
1915 | case USB_RECIP_ENDPOINT: | 1915 | case USB_RECIP_ENDPOINT: |
1916 | if (!cdev->config) | ||
1917 | break; | ||
1916 | endp = ((w_index & 0x80) >> 3) | (w_index & 0x0f); | 1918 | endp = ((w_index & 0x80) >> 3) | (w_index & 0x0f); |
1917 | list_for_each_entry(f, &cdev->config->functions, list) { | 1919 | list_for_each_entry(f, &cdev->config->functions, list) { |
1918 | if (test_bit(endp, f->endpoints)) | 1920 | if (test_bit(endp, f->endpoints)) |
@@ -2124,14 +2126,14 @@ int composite_os_desc_req_prepare(struct usb_composite_dev *cdev, | |||
2124 | 2126 | ||
2125 | cdev->os_desc_req = usb_ep_alloc_request(ep0, GFP_KERNEL); | 2127 | cdev->os_desc_req = usb_ep_alloc_request(ep0, GFP_KERNEL); |
2126 | if (!cdev->os_desc_req) { | 2128 | if (!cdev->os_desc_req) { |
2127 | ret = PTR_ERR(cdev->os_desc_req); | 2129 | ret = -ENOMEM; |
2128 | goto end; | 2130 | goto end; |
2129 | } | 2131 | } |
2130 | 2132 | ||
2131 | /* OS feature descriptor length <= 4kB */ | 2133 | /* OS feature descriptor length <= 4kB */ |
2132 | cdev->os_desc_req->buf = kmalloc(4096, GFP_KERNEL); | 2134 | cdev->os_desc_req->buf = kmalloc(4096, GFP_KERNEL); |
2133 | if (!cdev->os_desc_req->buf) { | 2135 | if (!cdev->os_desc_req->buf) { |
2134 | ret = PTR_ERR(cdev->os_desc_req->buf); | 2136 | ret = -ENOMEM; |
2135 | kfree(cdev->os_desc_req); | 2137 | kfree(cdev->os_desc_req); |
2136 | goto end; | 2138 | goto end; |
2137 | } | 2139 | } |
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c index 70cf3477f951..f9237fe2be05 100644 --- a/drivers/usb/gadget/configfs.c +++ b/drivers/usb/gadget/configfs.c | |||
@@ -1490,7 +1490,9 @@ void unregister_gadget_item(struct config_item *item) | |||
1490 | { | 1490 | { |
1491 | struct gadget_info *gi = to_gadget_info(item); | 1491 | struct gadget_info *gi = to_gadget_info(item); |
1492 | 1492 | ||
1493 | mutex_lock(&gi->lock); | ||
1493 | unregister_gadget(gi); | 1494 | unregister_gadget(gi); |
1495 | mutex_unlock(&gi->lock); | ||
1494 | } | 1496 | } |
1495 | EXPORT_SYMBOL_GPL(unregister_gadget_item); | 1497 | EXPORT_SYMBOL_GPL(unregister_gadget_item); |
1496 | 1498 | ||
diff --git a/drivers/usb/gadget/function/rndis.c b/drivers/usb/gadget/function/rndis.c index 943c21aafd3b..ab6ac1b74ac0 100644 --- a/drivers/usb/gadget/function/rndis.c +++ b/drivers/usb/gadget/function/rndis.c | |||
@@ -680,6 +680,12 @@ static int rndis_reset_response(struct rndis_params *params, | |||
680 | { | 680 | { |
681 | rndis_reset_cmplt_type *resp; | 681 | rndis_reset_cmplt_type *resp; |
682 | rndis_resp_t *r; | 682 | rndis_resp_t *r; |
683 | u8 *xbuf; | ||
684 | u32 length; | ||
685 | |||
686 | /* drain the response queue */ | ||
687 | while ((xbuf = rndis_get_next_response(params, &length))) | ||
688 | rndis_free_response(params, xbuf); | ||
683 | 689 | ||
684 | r = rndis_add_response(params, sizeof(rndis_reset_cmplt_type)); | 690 | r = rndis_add_response(params, sizeof(rndis_reset_cmplt_type)); |
685 | if (!r) | 691 | if (!r) |
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c index a3f7e7c55ebb..5f562c1ec795 100644 --- a/drivers/usb/gadget/function/u_ether.c +++ b/drivers/usb/gadget/function/u_ether.c | |||
@@ -556,7 +556,8 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb, | |||
556 | /* Multi frame CDC protocols may store the frame for | 556 | /* Multi frame CDC protocols may store the frame for |
557 | * later which is not a dropped frame. | 557 | * later which is not a dropped frame. |
558 | */ | 558 | */ |
559 | if (dev->port_usb->supports_multi_frame) | 559 | if (dev->port_usb && |
560 | dev->port_usb->supports_multi_frame) | ||
560 | goto multiframe; | 561 | goto multiframe; |
561 | goto drop; | 562 | goto drop; |
562 | } | 563 | } |
diff --git a/drivers/usb/gadget/function/uvc_configfs.c b/drivers/usb/gadget/function/uvc_configfs.c index 66753ba7a42e..31125a4a2658 100644 --- a/drivers/usb/gadget/function/uvc_configfs.c +++ b/drivers/usb/gadget/function/uvc_configfs.c | |||
@@ -2023,7 +2023,7 @@ static int uvcg_streaming_class_allow_link(struct config_item *src, | |||
2023 | if (!data) { | 2023 | if (!data) { |
2024 | kfree(*class_array); | 2024 | kfree(*class_array); |
2025 | *class_array = NULL; | 2025 | *class_array = NULL; |
2026 | ret = PTR_ERR(data); | 2026 | ret = -ENOMEM; |
2027 | goto unlock; | 2027 | goto unlock; |
2028 | } | 2028 | } |
2029 | cl_arr = *class_array; | 2029 | cl_arr = *class_array; |
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c index aa3707bdebb4..16104b5ebdcb 100644 --- a/drivers/usb/gadget/legacy/inode.c +++ b/drivers/usb/gadget/legacy/inode.c | |||
@@ -542,7 +542,7 @@ static ssize_t ep_aio(struct kiocb *iocb, | |||
542 | */ | 542 | */ |
543 | spin_lock_irq(&epdata->dev->lock); | 543 | spin_lock_irq(&epdata->dev->lock); |
544 | value = -ENODEV; | 544 | value = -ENODEV; |
545 | if (unlikely(epdata->ep)) | 545 | if (unlikely(epdata->ep == NULL)) |
546 | goto fail; | 546 | goto fail; |
547 | 547 | ||
548 | req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC); | 548 | req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC); |
@@ -606,7 +606,7 @@ ep_read_iter(struct kiocb *iocb, struct iov_iter *to) | |||
606 | } | 606 | } |
607 | if (is_sync_kiocb(iocb)) { | 607 | if (is_sync_kiocb(iocb)) { |
608 | value = ep_io(epdata, buf, len); | 608 | value = ep_io(epdata, buf, len); |
609 | if (value >= 0 && copy_to_iter(buf, value, to)) | 609 | if (value >= 0 && (copy_to_iter(buf, value, to) != value)) |
610 | value = -EFAULT; | 610 | value = -EFAULT; |
611 | } else { | 611 | } else { |
612 | struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL); | 612 | struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL); |
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c index ff8685ea7219..934f83881c30 100644 --- a/drivers/usb/gadget/udc/core.c +++ b/drivers/usb/gadget/udc/core.c | |||
@@ -1145,7 +1145,7 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget, | |||
1145 | if (ret != -EPROBE_DEFER) | 1145 | if (ret != -EPROBE_DEFER) |
1146 | list_del(&driver->pending); | 1146 | list_del(&driver->pending); |
1147 | if (ret) | 1147 | if (ret) |
1148 | goto err4; | 1148 | goto err5; |
1149 | break; | 1149 | break; |
1150 | } | 1150 | } |
1151 | } | 1151 | } |
@@ -1154,6 +1154,9 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget, | |||
1154 | 1154 | ||
1155 | return 0; | 1155 | return 0; |
1156 | 1156 | ||
1157 | err5: | ||
1158 | device_del(&udc->dev); | ||
1159 | |||
1157 | err4: | 1160 | err4: |
1158 | list_del(&udc->list); | 1161 | list_del(&udc->list); |
1159 | mutex_unlock(&udc_lock); | 1162 | mutex_unlock(&udc_lock); |
diff --git a/drivers/usb/gadget/udc/fsl_qe_udc.c b/drivers/usb/gadget/udc/fsl_qe_udc.c index 93d28cb00b76..cf8819a5c5b2 100644 --- a/drivers/usb/gadget/udc/fsl_qe_udc.c +++ b/drivers/usb/gadget/udc/fsl_qe_udc.c | |||
@@ -2053,7 +2053,7 @@ static void setup_received_handle(struct qe_udc *udc, | |||
2053 | struct qe_ep *ep; | 2053 | struct qe_ep *ep; |
2054 | 2054 | ||
2055 | if (wValue != 0 || wLength != 0 | 2055 | if (wValue != 0 || wLength != 0 |
2056 | || pipe > USB_MAX_ENDPOINTS) | 2056 | || pipe >= USB_MAX_ENDPOINTS) |
2057 | break; | 2057 | break; |
2058 | ep = &udc->eps[pipe]; | 2058 | ep = &udc->eps[pipe]; |
2059 | 2059 | ||
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index a962b89b65a6..1e5f529d51a2 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c | |||
@@ -332,11 +332,11 @@ static void ehci_turn_off_all_ports(struct ehci_hcd *ehci) | |||
332 | int port = HCS_N_PORTS(ehci->hcs_params); | 332 | int port = HCS_N_PORTS(ehci->hcs_params); |
333 | 333 | ||
334 | while (port--) { | 334 | while (port--) { |
335 | ehci_writel(ehci, PORT_RWC_BITS, | ||
336 | &ehci->regs->port_status[port]); | ||
337 | spin_unlock_irq(&ehci->lock); | 335 | spin_unlock_irq(&ehci->lock); |
338 | ehci_port_power(ehci, port, false); | 336 | ehci_port_power(ehci, port, false); |
339 | spin_lock_irq(&ehci->lock); | 337 | spin_lock_irq(&ehci->lock); |
338 | ehci_writel(ehci, PORT_RWC_BITS, | ||
339 | &ehci->regs->port_status[port]); | ||
340 | } | 340 | } |
341 | } | 341 | } |
342 | 342 | ||
diff --git a/drivers/usb/host/max3421-hcd.c b/drivers/usb/host/max3421-hcd.c index c369c29e496d..2f7690092a7f 100644 --- a/drivers/usb/host/max3421-hcd.c +++ b/drivers/usb/host/max3421-hcd.c | |||
@@ -1675,7 +1675,7 @@ max3421_gpout_set_value(struct usb_hcd *hcd, u8 pin_number, u8 value) | |||
1675 | if (pin_number > 7) | 1675 | if (pin_number > 7) |
1676 | return; | 1676 | return; |
1677 | 1677 | ||
1678 | mask = 1u << pin_number; | 1678 | mask = 1u << (pin_number % 4); |
1679 | idx = pin_number / 4; | 1679 | idx = pin_number / 4; |
1680 | 1680 | ||
1681 | if (value) | 1681 | if (value) |
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index d61fcc48099e..730b9fd26685 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c | |||
@@ -386,6 +386,9 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend) | |||
386 | 386 | ||
387 | ret = 0; | 387 | ret = 0; |
388 | virt_dev = xhci->devs[slot_id]; | 388 | virt_dev = xhci->devs[slot_id]; |
389 | if (!virt_dev) | ||
390 | return -ENODEV; | ||
391 | |||
389 | cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO); | 392 | cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO); |
390 | if (!cmd) { | 393 | if (!cmd) { |
391 | xhci_dbg(xhci, "Couldn't allocate command structure.\n"); | 394 | xhci_dbg(xhci, "Couldn't allocate command structure.\n"); |
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 4fd041bec332..d7b0f97abbad 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c | |||
@@ -314,11 +314,12 @@ static void xhci_pci_remove(struct pci_dev *dev) | |||
314 | usb_remove_hcd(xhci->shared_hcd); | 314 | usb_remove_hcd(xhci->shared_hcd); |
315 | usb_put_hcd(xhci->shared_hcd); | 315 | usb_put_hcd(xhci->shared_hcd); |
316 | } | 316 | } |
317 | usb_hcd_pci_remove(dev); | ||
318 | 317 | ||
319 | /* Workaround for spurious wakeups at shutdown with HSW */ | 318 | /* Workaround for spurious wakeups at shutdown with HSW */ |
320 | if (xhci->quirks & XHCI_SPURIOUS_WAKEUP) | 319 | if (xhci->quirks & XHCI_SPURIOUS_WAKEUP) |
321 | pci_set_power_state(dev, PCI_D3hot); | 320 | pci_set_power_state(dev, PCI_D3hot); |
321 | |||
322 | usb_hcd_pci_remove(dev); | ||
322 | } | 323 | } |
323 | 324 | ||
324 | #ifdef CONFIG_PM | 325 | #ifdef CONFIG_PM |
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 918e0c739b79..fd9fd12e4861 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c | |||
@@ -1334,12 +1334,6 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, | |||
1334 | 1334 | ||
1335 | cmd = list_entry(xhci->cmd_list.next, struct xhci_command, cmd_list); | 1335 | cmd = list_entry(xhci->cmd_list.next, struct xhci_command, cmd_list); |
1336 | 1336 | ||
1337 | if (cmd->command_trb != xhci->cmd_ring->dequeue) { | ||
1338 | xhci_err(xhci, | ||
1339 | "Command completion event does not match command\n"); | ||
1340 | return; | ||
1341 | } | ||
1342 | |||
1343 | del_timer(&xhci->cmd_timer); | 1337 | del_timer(&xhci->cmd_timer); |
1344 | 1338 | ||
1345 | trace_xhci_cmd_completion(cmd_trb, (struct xhci_generic_trb *) event); | 1339 | trace_xhci_cmd_completion(cmd_trb, (struct xhci_generic_trb *) event); |
@@ -1351,6 +1345,13 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, | |||
1351 | xhci_handle_stopped_cmd_ring(xhci, cmd); | 1345 | xhci_handle_stopped_cmd_ring(xhci, cmd); |
1352 | return; | 1346 | return; |
1353 | } | 1347 | } |
1348 | |||
1349 | if (cmd->command_trb != xhci->cmd_ring->dequeue) { | ||
1350 | xhci_err(xhci, | ||
1351 | "Command completion event does not match command\n"); | ||
1352 | return; | ||
1353 | } | ||
1354 | |||
1354 | /* | 1355 | /* |
1355 | * Host aborted the command ring, check if the current command was | 1356 | * Host aborted the command ring, check if the current command was |
1356 | * supposed to be aborted, otherwise continue normally. | 1357 | * supposed to be aborted, otherwise continue normally. |
@@ -3243,7 +3244,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
3243 | send_addr = addr; | 3244 | send_addr = addr; |
3244 | 3245 | ||
3245 | /* Queue the TRBs, even if they are zero-length */ | 3246 | /* Queue the TRBs, even if they are zero-length */ |
3246 | for (enqd_len = 0; enqd_len < full_len; enqd_len += trb_buff_len) { | 3247 | for (enqd_len = 0; first_trb || enqd_len < full_len; |
3248 | enqd_len += trb_buff_len) { | ||
3247 | field = TRB_TYPE(TRB_NORMAL); | 3249 | field = TRB_TYPE(TRB_NORMAL); |
3248 | 3250 | ||
3249 | /* TRB buffer should not cross 64KB boundaries */ | 3251 | /* TRB buffer should not cross 64KB boundaries */ |
diff --git a/drivers/usb/misc/ftdi-elan.c b/drivers/usb/misc/ftdi-elan.c index 52c27cab78c3..9b5b3b2281ca 100644 --- a/drivers/usb/misc/ftdi-elan.c +++ b/drivers/usb/misc/ftdi-elan.c | |||
@@ -665,7 +665,7 @@ static ssize_t ftdi_elan_read(struct file *file, char __user *buffer, | |||
665 | { | 665 | { |
666 | char data[30 *3 + 4]; | 666 | char data[30 *3 + 4]; |
667 | char *d = data; | 667 | char *d = data; |
668 | int m = (sizeof(data) - 1) / 3; | 668 | int m = (sizeof(data) - 1) / 3 - 1; |
669 | int bytes_read = 0; | 669 | int bytes_read = 0; |
670 | int retry_on_empty = 10; | 670 | int retry_on_empty = 10; |
671 | int retry_on_timeout = 5; | 671 | int retry_on_timeout = 5; |
@@ -1684,7 +1684,7 @@ wait:if (ftdi->disconnected > 0) { | |||
1684 | int i = 0; | 1684 | int i = 0; |
1685 | char data[30 *3 + 4]; | 1685 | char data[30 *3 + 4]; |
1686 | char *d = data; | 1686 | char *d = data; |
1687 | int m = (sizeof(data) - 1) / 3; | 1687 | int m = (sizeof(data) - 1) / 3 - 1; |
1688 | int l = 0; | 1688 | int l = 0; |
1689 | struct u132_target *target = &ftdi->target[ed]; | 1689 | struct u132_target *target = &ftdi->target[ed]; |
1690 | struct u132_command *command = &ftdi->command[ | 1690 | struct u132_command *command = &ftdi->command[ |
@@ -1876,7 +1876,7 @@ more:{ | |||
1876 | if (packet_bytes > 2) { | 1876 | if (packet_bytes > 2) { |
1877 | char diag[30 *3 + 4]; | 1877 | char diag[30 *3 + 4]; |
1878 | char *d = diag; | 1878 | char *d = diag; |
1879 | int m = (sizeof(diag) - 1) / 3; | 1879 | int m = (sizeof(diag) - 1) / 3 - 1; |
1880 | char *b = ftdi->bulk_in_buffer; | 1880 | char *b = ftdi->bulk_in_buffer; |
1881 | int bytes_read = 0; | 1881 | int bytes_read = 0; |
1882 | diag[0] = 0; | 1882 | diag[0] = 0; |
@@ -2053,7 +2053,7 @@ static int ftdi_elan_synchronize(struct usb_ftdi *ftdi) | |||
2053 | if (packet_bytes > 2) { | 2053 | if (packet_bytes > 2) { |
2054 | char diag[30 *3 + 4]; | 2054 | char diag[30 *3 + 4]; |
2055 | char *d = diag; | 2055 | char *d = diag; |
2056 | int m = (sizeof(diag) - 1) / 3; | 2056 | int m = (sizeof(diag) - 1) / 3 - 1; |
2057 | char *b = ftdi->bulk_in_buffer; | 2057 | char *b = ftdi->bulk_in_buffer; |
2058 | int bytes_read = 0; | 2058 | int bytes_read = 0; |
2059 | unsigned char c = 0; | 2059 | unsigned char c = 0; |
@@ -2155,7 +2155,7 @@ more:{ | |||
2155 | if (packet_bytes > 2) { | 2155 | if (packet_bytes > 2) { |
2156 | char diag[30 *3 + 4]; | 2156 | char diag[30 *3 + 4]; |
2157 | char *d = diag; | 2157 | char *d = diag; |
2158 | int m = (sizeof(diag) - 1) / 3; | 2158 | int m = (sizeof(diag) - 1) / 3 - 1; |
2159 | char *b = ftdi->bulk_in_buffer; | 2159 | char *b = ftdi->bulk_in_buffer; |
2160 | int bytes_read = 0; | 2160 | int bytes_read = 0; |
2161 | diag[0] = 0; | 2161 | diag[0] = 0; |
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c index 6b978f04b8d7..5c8210dc6fd9 100644 --- a/drivers/usb/misc/usbtest.c +++ b/drivers/usb/misc/usbtest.c | |||
@@ -585,7 +585,6 @@ static void sg_timeout(unsigned long _req) | |||
585 | { | 585 | { |
586 | struct usb_sg_request *req = (struct usb_sg_request *) _req; | 586 | struct usb_sg_request *req = (struct usb_sg_request *) _req; |
587 | 587 | ||
588 | req->status = -ETIMEDOUT; | ||
589 | usb_sg_cancel(req); | 588 | usb_sg_cancel(req); |
590 | } | 589 | } |
591 | 590 | ||
@@ -616,8 +615,10 @@ static int perform_sglist( | |||
616 | mod_timer(&sg_timer, jiffies + | 615 | mod_timer(&sg_timer, jiffies + |
617 | msecs_to_jiffies(SIMPLE_IO_TIMEOUT)); | 616 | msecs_to_jiffies(SIMPLE_IO_TIMEOUT)); |
618 | usb_sg_wait(req); | 617 | usb_sg_wait(req); |
619 | del_timer_sync(&sg_timer); | 618 | if (!del_timer_sync(&sg_timer)) |
620 | retval = req->status; | 619 | retval = -ETIMEDOUT; |
620 | else | ||
621 | retval = req->status; | ||
621 | 622 | ||
622 | /* FIXME check resulting data pattern */ | 623 | /* FIXME check resulting data pattern */ |
623 | 624 | ||
@@ -2602,7 +2603,7 @@ usbtest_ioctl(struct usb_interface *intf, unsigned int code, void *buf) | |||
2602 | ktime_get_ts64(&start); | 2603 | ktime_get_ts64(&start); |
2603 | 2604 | ||
2604 | retval = usbtest_do_ioctl(intf, param_32); | 2605 | retval = usbtest_do_ioctl(intf, param_32); |
2605 | if (retval) | 2606 | if (retval < 0) |
2606 | goto free_mutex; | 2607 | goto free_mutex; |
2607 | 2608 | ||
2608 | ktime_get_ts64(&end); | 2609 | ktime_get_ts64(&end); |
diff --git a/drivers/usb/phy/phy-omap-otg.c b/drivers/usb/phy/phy-omap-otg.c index 6f6d2a7fd5a0..6523af4f8f93 100644 --- a/drivers/usb/phy/phy-omap-otg.c +++ b/drivers/usb/phy/phy-omap-otg.c | |||
@@ -140,6 +140,8 @@ static int omap_otg_probe(struct platform_device *pdev) | |||
140 | (rev >> 4) & 0xf, rev & 0xf, config->extcon, otg_dev->id, | 140 | (rev >> 4) & 0xf, rev & 0xf, config->extcon, otg_dev->id, |
141 | otg_dev->vbus); | 141 | otg_dev->vbus); |
142 | 142 | ||
143 | platform_set_drvdata(pdev, otg_dev); | ||
144 | |||
143 | return 0; | 145 | return 0; |
144 | } | 146 | } |
145 | 147 | ||
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c index 8fbbc2d32371..ac67bab9124c 100644 --- a/drivers/usb/renesas_usbhs/common.c +++ b/drivers/usb/renesas_usbhs/common.c | |||
@@ -514,7 +514,8 @@ static struct renesas_usbhs_platform_info *usbhs_parse_dt(struct device *dev) | |||
514 | if (gpio > 0) | 514 | if (gpio > 0) |
515 | dparam->enable_gpio = gpio; | 515 | dparam->enable_gpio = gpio; |
516 | 516 | ||
517 | if (dparam->type == USBHS_TYPE_RCAR_GEN2) | 517 | if (dparam->type == USBHS_TYPE_RCAR_GEN2 || |
518 | dparam->type == USBHS_TYPE_RCAR_GEN3) | ||
518 | dparam->has_usb_dmac = 1; | 519 | dparam->has_usb_dmac = 1; |
519 | 520 | ||
520 | return info; | 521 | return info; |
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c index 280ed5ff021b..857e78337324 100644 --- a/drivers/usb/renesas_usbhs/fifo.c +++ b/drivers/usb/renesas_usbhs/fifo.c | |||
@@ -871,7 +871,7 @@ static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done) | |||
871 | 871 | ||
872 | /* use PIO if packet is less than pio_dma_border or pipe is DCP */ | 872 | /* use PIO if packet is less than pio_dma_border or pipe is DCP */ |
873 | if ((len < usbhs_get_dparam(priv, pio_dma_border)) || | 873 | if ((len < usbhs_get_dparam(priv, pio_dma_border)) || |
874 | usbhs_pipe_is_dcp(pipe)) | 874 | usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_ISOC)) |
875 | goto usbhsf_pio_prepare_push; | 875 | goto usbhsf_pio_prepare_push; |
876 | 876 | ||
877 | /* check data length if this driver don't use USB-DMAC */ | 877 | /* check data length if this driver don't use USB-DMAC */ |
@@ -976,7 +976,7 @@ static int usbhsf_dma_prepare_pop_with_usb_dmac(struct usbhs_pkt *pkt, | |||
976 | 976 | ||
977 | /* use PIO if packet is less than pio_dma_border or pipe is DCP */ | 977 | /* use PIO if packet is less than pio_dma_border or pipe is DCP */ |
978 | if ((pkt->length < usbhs_get_dparam(priv, pio_dma_border)) || | 978 | if ((pkt->length < usbhs_get_dparam(priv, pio_dma_border)) || |
979 | usbhs_pipe_is_dcp(pipe)) | 979 | usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_ISOC)) |
980 | goto usbhsf_pio_prepare_pop; | 980 | goto usbhsf_pio_prepare_pop; |
981 | 981 | ||
982 | fifo = usbhsf_get_dma_fifo(priv, pkt); | 982 | fifo = usbhsf_get_dma_fifo(priv, pkt); |
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c index 50f3363cc382..92bc83b92d10 100644 --- a/drivers/usb/renesas_usbhs/mod_gadget.c +++ b/drivers/usb/renesas_usbhs/mod_gadget.c | |||
@@ -617,10 +617,13 @@ static int usbhsg_ep_enable(struct usb_ep *ep, | |||
617 | * use dmaengine if possible. | 617 | * use dmaengine if possible. |
618 | * It will use pio handler if impossible. | 618 | * It will use pio handler if impossible. |
619 | */ | 619 | */ |
620 | if (usb_endpoint_dir_in(desc)) | 620 | if (usb_endpoint_dir_in(desc)) { |
621 | pipe->handler = &usbhs_fifo_dma_push_handler; | 621 | pipe->handler = &usbhs_fifo_dma_push_handler; |
622 | else | 622 | } else { |
623 | pipe->handler = &usbhs_fifo_dma_pop_handler; | 623 | pipe->handler = &usbhs_fifo_dma_pop_handler; |
624 | usbhs_xxxsts_clear(priv, BRDYSTS, | ||
625 | usbhs_pipe_number(pipe)); | ||
626 | } | ||
624 | 627 | ||
625 | ret = 0; | 628 | ret = 0; |
626 | } | 629 | } |
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 00820809139a..b2d767e743fc 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c | |||
@@ -648,6 +648,8 @@ static const struct usb_device_id id_table_combined[] = { | |||
648 | { USB_DEVICE(FTDI_VID, FTDI_ELV_TFD128_PID) }, | 648 | { USB_DEVICE(FTDI_VID, FTDI_ELV_TFD128_PID) }, |
649 | { USB_DEVICE(FTDI_VID, FTDI_ELV_FM3RX_PID) }, | 649 | { USB_DEVICE(FTDI_VID, FTDI_ELV_FM3RX_PID) }, |
650 | { USB_DEVICE(FTDI_VID, FTDI_ELV_WS777_PID) }, | 650 | { USB_DEVICE(FTDI_VID, FTDI_ELV_WS777_PID) }, |
651 | { USB_DEVICE(FTDI_VID, FTDI_PALMSENS_PID) }, | ||
652 | { USB_DEVICE(FTDI_VID, FTDI_IVIUM_XSTAT_PID) }, | ||
651 | { USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) }, | 653 | { USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) }, |
652 | { USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) }, | 654 | { USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) }, |
653 | { USB_DEVICE(FTDI_VID, LINX_FUTURE_0_PID) }, | 655 | { USB_DEVICE(FTDI_VID, LINX_FUTURE_0_PID) }, |
@@ -1008,6 +1010,7 @@ static const struct usb_device_id id_table_combined[] = { | |||
1008 | { USB_DEVICE(ICPDAS_VID, ICPDAS_I7560U_PID) }, | 1010 | { USB_DEVICE(ICPDAS_VID, ICPDAS_I7560U_PID) }, |
1009 | { USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) }, | 1011 | { USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) }, |
1010 | { USB_DEVICE(ICPDAS_VID, ICPDAS_I7563U_PID) }, | 1012 | { USB_DEVICE(ICPDAS_VID, ICPDAS_I7563U_PID) }, |
1013 | { USB_DEVICE(WICED_VID, WICED_USB20706V2_PID) }, | ||
1011 | { } /* Terminating entry */ | 1014 | { } /* Terminating entry */ |
1012 | }; | 1015 | }; |
1013 | 1016 | ||
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index c5d6c1e73e8e..f87a938cf005 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h | |||
@@ -406,6 +406,12 @@ | |||
406 | #define FTDI_4N_GALAXY_DE_3_PID 0xF3C2 | 406 | #define FTDI_4N_GALAXY_DE_3_PID 0xF3C2 |
407 | 407 | ||
408 | /* | 408 | /* |
409 | * Ivium Technologies product IDs | ||
410 | */ | ||
411 | #define FTDI_PALMSENS_PID 0xf440 | ||
412 | #define FTDI_IVIUM_XSTAT_PID 0xf441 | ||
413 | |||
414 | /* | ||
409 | * Linx Technologies product ids | 415 | * Linx Technologies product ids |
410 | */ | 416 | */ |
411 | #define LINX_SDMUSBQSS_PID 0xF448 /* Linx SDM-USB-QS-S */ | 417 | #define LINX_SDMUSBQSS_PID 0xF448 /* Linx SDM-USB-QS-S */ |
@@ -673,6 +679,12 @@ | |||
673 | #define INTREPID_NEOVI_PID 0x0701 | 679 | #define INTREPID_NEOVI_PID 0x0701 |
674 | 680 | ||
675 | /* | 681 | /* |
682 | * WICED USB UART | ||
683 | */ | ||
684 | #define WICED_VID 0x0A5C | ||
685 | #define WICED_USB20706V2_PID 0x6422 | ||
686 | |||
687 | /* | ||
676 | * Definitions for ID TECH (www.idt-net.com) devices | 688 | * Definitions for ID TECH (www.idt-net.com) devices |
677 | */ | 689 | */ |
678 | #define IDTECH_VID 0x0ACD /* ID TECH Vendor ID */ | 690 | #define IDTECH_VID 0x0ACD /* ID TECH Vendor ID */ |
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 8e07536c233a..bc472584a229 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c | |||
@@ -274,6 +274,12 @@ static void option_instat_callback(struct urb *urb); | |||
274 | #define TELIT_PRODUCT_LE920 0x1200 | 274 | #define TELIT_PRODUCT_LE920 0x1200 |
275 | #define TELIT_PRODUCT_LE910 0x1201 | 275 | #define TELIT_PRODUCT_LE910 0x1201 |
276 | #define TELIT_PRODUCT_LE910_USBCFG4 0x1206 | 276 | #define TELIT_PRODUCT_LE910_USBCFG4 0x1206 |
277 | #define TELIT_PRODUCT_LE920A4_1207 0x1207 | ||
278 | #define TELIT_PRODUCT_LE920A4_1208 0x1208 | ||
279 | #define TELIT_PRODUCT_LE920A4_1211 0x1211 | ||
280 | #define TELIT_PRODUCT_LE920A4_1212 0x1212 | ||
281 | #define TELIT_PRODUCT_LE920A4_1213 0x1213 | ||
282 | #define TELIT_PRODUCT_LE920A4_1214 0x1214 | ||
277 | 283 | ||
278 | /* ZTE PRODUCTS */ | 284 | /* ZTE PRODUCTS */ |
279 | #define ZTE_VENDOR_ID 0x19d2 | 285 | #define ZTE_VENDOR_ID 0x19d2 |
@@ -628,6 +634,11 @@ static const struct option_blacklist_info telit_le920_blacklist = { | |||
628 | .reserved = BIT(1) | BIT(5), | 634 | .reserved = BIT(1) | BIT(5), |
629 | }; | 635 | }; |
630 | 636 | ||
637 | static const struct option_blacklist_info telit_le920a4_blacklist_1 = { | ||
638 | .sendsetup = BIT(0), | ||
639 | .reserved = BIT(1), | ||
640 | }; | ||
641 | |||
631 | static const struct option_blacklist_info telit_le922_blacklist_usbcfg0 = { | 642 | static const struct option_blacklist_info telit_le922_blacklist_usbcfg0 = { |
632 | .sendsetup = BIT(2), | 643 | .sendsetup = BIT(2), |
633 | .reserved = BIT(0) | BIT(1) | BIT(3), | 644 | .reserved = BIT(0) | BIT(1) | BIT(3), |
@@ -1203,6 +1214,16 @@ static const struct usb_device_id option_ids[] = { | |||
1203 | .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 }, | 1214 | .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 }, |
1204 | { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920), | 1215 | { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920), |
1205 | .driver_info = (kernel_ulong_t)&telit_le920_blacklist }, | 1216 | .driver_info = (kernel_ulong_t)&telit_le920_blacklist }, |
1217 | { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1207) }, | ||
1218 | { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1208), | ||
1219 | .driver_info = (kernel_ulong_t)&telit_le920a4_blacklist_1 }, | ||
1220 | { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1211), | ||
1221 | .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 }, | ||
1222 | { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1212), | ||
1223 | .driver_info = (kernel_ulong_t)&telit_le920a4_blacklist_1 }, | ||
1224 | { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1213, 0xff) }, | ||
1225 | { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1214), | ||
1226 | .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 }, | ||
1206 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */ | 1227 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */ |
1207 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff), | 1228 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff), |
1208 | .driver_info = (kernel_ulong_t)&net_intf1_blacklist }, | 1229 | .driver_info = (kernel_ulong_t)&net_intf1_blacklist }, |
@@ -1966,6 +1987,7 @@ static const struct usb_device_id option_ids[] = { | |||
1966 | .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, | 1987 | .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, |
1967 | { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */ | 1988 | { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */ |
1968 | { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */ | 1989 | { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */ |
1990 | { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */ | ||
1969 | { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */ | 1991 | { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */ |
1970 | { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) }, | 1992 | { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) }, |
1971 | { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) }, | 1993 | { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) }, |
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c index b1b9bac44016..d213cf44a7e4 100644 --- a/drivers/usb/serial/usb-serial.c +++ b/drivers/usb/serial/usb-serial.c | |||
@@ -1433,7 +1433,7 @@ int usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[] | |||
1433 | 1433 | ||
1434 | rc = usb_register(udriver); | 1434 | rc = usb_register(udriver); |
1435 | if (rc) | 1435 | if (rc) |
1436 | return rc; | 1436 | goto failed_usb_register; |
1437 | 1437 | ||
1438 | for (sd = serial_drivers; *sd; ++sd) { | 1438 | for (sd = serial_drivers; *sd; ++sd) { |
1439 | (*sd)->usb_driver = udriver; | 1439 | (*sd)->usb_driver = udriver; |
@@ -1451,6 +1451,8 @@ int usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[] | |||
1451 | while (sd-- > serial_drivers) | 1451 | while (sd-- > serial_drivers) |
1452 | usb_serial_deregister(*sd); | 1452 | usb_serial_deregister(*sd); |
1453 | usb_deregister(udriver); | 1453 | usb_deregister(udriver); |
1454 | failed_usb_register: | ||
1455 | kfree(udriver); | ||
1454 | return rc; | 1456 | return rc; |
1455 | } | 1457 | } |
1456 | EXPORT_SYMBOL_GPL(usb_serial_register_drivers); | 1458 | EXPORT_SYMBOL_GPL(usb_serial_register_drivers); |
diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c index 388eec4e1a90..97fb2f8fa930 100644 --- a/drivers/vhost/test.c +++ b/drivers/vhost/test.c | |||
@@ -220,20 +220,20 @@ static long vhost_test_reset_owner(struct vhost_test *n) | |||
220 | { | 220 | { |
221 | void *priv = NULL; | 221 | void *priv = NULL; |
222 | long err; | 222 | long err; |
223 | struct vhost_memory *memory; | 223 | struct vhost_umem *umem; |
224 | 224 | ||
225 | mutex_lock(&n->dev.mutex); | 225 | mutex_lock(&n->dev.mutex); |
226 | err = vhost_dev_check_owner(&n->dev); | 226 | err = vhost_dev_check_owner(&n->dev); |
227 | if (err) | 227 | if (err) |
228 | goto done; | 228 | goto done; |
229 | memory = vhost_dev_reset_owner_prepare(); | 229 | umem = vhost_dev_reset_owner_prepare(); |
230 | if (!memory) { | 230 | if (!umem) { |
231 | err = -ENOMEM; | 231 | err = -ENOMEM; |
232 | goto done; | 232 | goto done; |
233 | } | 233 | } |
234 | vhost_test_stop(n, &priv); | 234 | vhost_test_stop(n, &priv); |
235 | vhost_test_flush(n); | 235 | vhost_test_flush(n); |
236 | vhost_dev_reset_owner(&n->dev, memory); | 236 | vhost_dev_reset_owner(&n->dev, umem); |
237 | done: | 237 | done: |
238 | mutex_unlock(&n->dev.mutex); | 238 | mutex_unlock(&n->dev.mutex); |
239 | return err; | 239 | return err; |
diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c index 4b0eff6da674..85737e96ab8b 100644 --- a/fs/afs/cmservice.c +++ b/fs/afs/cmservice.c | |||
@@ -189,11 +189,8 @@ static int afs_deliver_cb_callback(struct afs_call *call, struct sk_buff *skb, | |||
189 | case 1: | 189 | case 1: |
190 | _debug("extract FID count"); | 190 | _debug("extract FID count"); |
191 | ret = afs_extract_data(call, skb, last, &call->tmp, 4); | 191 | ret = afs_extract_data(call, skb, last, &call->tmp, 4); |
192 | switch (ret) { | 192 | if (ret < 0) |
193 | case 0: break; | 193 | return ret; |
194 | case -EAGAIN: return 0; | ||
195 | default: return ret; | ||
196 | } | ||
197 | 194 | ||
198 | call->count = ntohl(call->tmp); | 195 | call->count = ntohl(call->tmp); |
199 | _debug("FID count: %u", call->count); | 196 | _debug("FID count: %u", call->count); |
@@ -210,11 +207,8 @@ static int afs_deliver_cb_callback(struct afs_call *call, struct sk_buff *skb, | |||
210 | _debug("extract FID array"); | 207 | _debug("extract FID array"); |
211 | ret = afs_extract_data(call, skb, last, call->buffer, | 208 | ret = afs_extract_data(call, skb, last, call->buffer, |
212 | call->count * 3 * 4); | 209 | call->count * 3 * 4); |
213 | switch (ret) { | 210 | if (ret < 0) |
214 | case 0: break; | 211 | return ret; |
215 | case -EAGAIN: return 0; | ||
216 | default: return ret; | ||
217 | } | ||
218 | 212 | ||
219 | _debug("unmarshall FID array"); | 213 | _debug("unmarshall FID array"); |
220 | call->request = kcalloc(call->count, | 214 | call->request = kcalloc(call->count, |
@@ -239,11 +233,8 @@ static int afs_deliver_cb_callback(struct afs_call *call, struct sk_buff *skb, | |||
239 | case 3: | 233 | case 3: |
240 | _debug("extract CB count"); | 234 | _debug("extract CB count"); |
241 | ret = afs_extract_data(call, skb, last, &call->tmp, 4); | 235 | ret = afs_extract_data(call, skb, last, &call->tmp, 4); |
242 | switch (ret) { | 236 | if (ret < 0) |
243 | case 0: break; | 237 | return ret; |
244 | case -EAGAIN: return 0; | ||
245 | default: return ret; | ||
246 | } | ||
247 | 238 | ||
248 | tmp = ntohl(call->tmp); | 239 | tmp = ntohl(call->tmp); |
249 | _debug("CB count: %u", tmp); | 240 | _debug("CB count: %u", tmp); |
@@ -258,11 +249,8 @@ static int afs_deliver_cb_callback(struct afs_call *call, struct sk_buff *skb, | |||
258 | _debug("extract CB array"); | 249 | _debug("extract CB array"); |
259 | ret = afs_extract_data(call, skb, last, call->request, | 250 | ret = afs_extract_data(call, skb, last, call->request, |
260 | call->count * 3 * 4); | 251 | call->count * 3 * 4); |
261 | switch (ret) { | 252 | if (ret < 0) |
262 | case 0: break; | 253 | return ret; |
263 | case -EAGAIN: return 0; | ||
264 | default: return ret; | ||
265 | } | ||
266 | 254 | ||
267 | _debug("unmarshall CB array"); | 255 | _debug("unmarshall CB array"); |
268 | cb = call->request; | 256 | cb = call->request; |
@@ -278,9 +266,9 @@ static int afs_deliver_cb_callback(struct afs_call *call, struct sk_buff *skb, | |||
278 | call->unmarshall++; | 266 | call->unmarshall++; |
279 | 267 | ||
280 | case 5: | 268 | case 5: |
281 | _debug("trailer"); | 269 | ret = afs_data_complete(call, skb, last); |
282 | if (skb->len != 0) | 270 | if (ret < 0) |
283 | return -EBADMSG; | 271 | return ret; |
284 | 272 | ||
285 | /* Record that the message was unmarshalled successfully so | 273 | /* Record that the message was unmarshalled successfully so |
286 | * that the call destructor can know do the callback breaking | 274 | * that the call destructor can know do the callback breaking |
@@ -294,8 +282,6 @@ static int afs_deliver_cb_callback(struct afs_call *call, struct sk_buff *skb, | |||
294 | break; | 282 | break; |
295 | } | 283 | } |
296 | 284 | ||
297 | if (!last) | ||
298 | return 0; | ||
299 | 285 | ||
300 | call->state = AFS_CALL_REPLYING; | 286 | call->state = AFS_CALL_REPLYING; |
301 | 287 | ||
@@ -335,13 +321,13 @@ static int afs_deliver_cb_init_call_back_state(struct afs_call *call, | |||
335 | { | 321 | { |
336 | struct afs_server *server; | 322 | struct afs_server *server; |
337 | struct in_addr addr; | 323 | struct in_addr addr; |
324 | int ret; | ||
338 | 325 | ||
339 | _enter(",{%u},%d", skb->len, last); | 326 | _enter(",{%u},%d", skb->len, last); |
340 | 327 | ||
341 | if (skb->len > 0) | 328 | ret = afs_data_complete(call, skb, last); |
342 | return -EBADMSG; | 329 | if (ret < 0) |
343 | if (!last) | 330 | return ret; |
344 | return 0; | ||
345 | 331 | ||
346 | /* no unmarshalling required */ | 332 | /* no unmarshalling required */ |
347 | call->state = AFS_CALL_REPLYING; | 333 | call->state = AFS_CALL_REPLYING; |
@@ -371,8 +357,10 @@ static int afs_deliver_cb_init_call_back_state3(struct afs_call *call, | |||
371 | 357 | ||
372 | _enter(",{%u},%d", skb->len, last); | 358 | _enter(",{%u},%d", skb->len, last); |
373 | 359 | ||
360 | /* There are some arguments that we ignore */ | ||
361 | afs_data_consumed(call, skb); | ||
374 | if (!last) | 362 | if (!last) |
375 | return 0; | 363 | return -EAGAIN; |
376 | 364 | ||
377 | /* no unmarshalling required */ | 365 | /* no unmarshalling required */ |
378 | call->state = AFS_CALL_REPLYING; | 366 | call->state = AFS_CALL_REPLYING; |
@@ -408,12 +396,13 @@ static void SRXAFSCB_Probe(struct work_struct *work) | |||
408 | static int afs_deliver_cb_probe(struct afs_call *call, struct sk_buff *skb, | 396 | static int afs_deliver_cb_probe(struct afs_call *call, struct sk_buff *skb, |
409 | bool last) | 397 | bool last) |
410 | { | 398 | { |
399 | int ret; | ||
400 | |||
411 | _enter(",{%u},%d", skb->len, last); | 401 | _enter(",{%u},%d", skb->len, last); |
412 | 402 | ||
413 | if (skb->len > 0) | 403 | ret = afs_data_complete(call, skb, last); |
414 | return -EBADMSG; | 404 | if (ret < 0) |
415 | if (!last) | 405 | return ret; |
416 | return 0; | ||
417 | 406 | ||
418 | /* no unmarshalling required */ | 407 | /* no unmarshalling required */ |
419 | call->state = AFS_CALL_REPLYING; | 408 | call->state = AFS_CALL_REPLYING; |
@@ -460,10 +449,9 @@ static int afs_deliver_cb_probe_uuid(struct afs_call *call, struct sk_buff *skb, | |||
460 | 449 | ||
461 | _enter("{%u},{%u},%d", call->unmarshall, skb->len, last); | 450 | _enter("{%u},{%u},%d", call->unmarshall, skb->len, last); |
462 | 451 | ||
463 | if (skb->len > 0) | 452 | ret = afs_data_complete(call, skb, last); |
464 | return -EBADMSG; | 453 | if (ret < 0) |
465 | if (!last) | 454 | return ret; |
466 | return 0; | ||
467 | 455 | ||
468 | switch (call->unmarshall) { | 456 | switch (call->unmarshall) { |
469 | case 0: | 457 | case 0: |
@@ -509,8 +497,9 @@ static int afs_deliver_cb_probe_uuid(struct afs_call *call, struct sk_buff *skb, | |||
509 | break; | 497 | break; |
510 | } | 498 | } |
511 | 499 | ||
512 | if (!last) | 500 | ret = afs_data_complete(call, skb, last); |
513 | return 0; | 501 | if (ret < 0) |
502 | return ret; | ||
514 | 503 | ||
515 | call->state = AFS_CALL_REPLYING; | 504 | call->state = AFS_CALL_REPLYING; |
516 | 505 | ||
@@ -588,12 +577,13 @@ static void SRXAFSCB_TellMeAboutYourself(struct work_struct *work) | |||
588 | static int afs_deliver_cb_tell_me_about_yourself(struct afs_call *call, | 577 | static int afs_deliver_cb_tell_me_about_yourself(struct afs_call *call, |
589 | struct sk_buff *skb, bool last) | 578 | struct sk_buff *skb, bool last) |
590 | { | 579 | { |
580 | int ret; | ||
581 | |||
591 | _enter(",{%u},%d", skb->len, last); | 582 | _enter(",{%u},%d", skb->len, last); |
592 | 583 | ||
593 | if (skb->len > 0) | 584 | ret = afs_data_complete(call, skb, last); |
594 | return -EBADMSG; | 585 | if (ret < 0) |
595 | if (!last) | 586 | return ret; |
596 | return 0; | ||
597 | 587 | ||
598 | /* no unmarshalling required */ | 588 | /* no unmarshalling required */ |
599 | call->state = AFS_CALL_REPLYING; | 589 | call->state = AFS_CALL_REPLYING; |
diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c index c2e930ec2888..9312b92e54be 100644 --- a/fs/afs/fsclient.c +++ b/fs/afs/fsclient.c | |||
@@ -240,15 +240,13 @@ static int afs_deliver_fs_fetch_status(struct afs_call *call, | |||
240 | { | 240 | { |
241 | struct afs_vnode *vnode = call->reply; | 241 | struct afs_vnode *vnode = call->reply; |
242 | const __be32 *bp; | 242 | const __be32 *bp; |
243 | int ret; | ||
243 | 244 | ||
244 | _enter(",,%u", last); | 245 | _enter(",,%u", last); |
245 | 246 | ||
246 | afs_transfer_reply(call, skb); | 247 | ret = afs_transfer_reply(call, skb, last); |
247 | if (!last) | 248 | if (ret < 0) |
248 | return 0; | 249 | return ret; |
249 | |||
250 | if (call->reply_size != call->reply_max) | ||
251 | return -EBADMSG; | ||
252 | 250 | ||
253 | /* unmarshall the reply once we've received all of it */ | 251 | /* unmarshall the reply once we've received all of it */ |
254 | bp = call->buffer; | 252 | bp = call->buffer; |
@@ -335,11 +333,8 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call, | |||
335 | case 1: | 333 | case 1: |
336 | _debug("extract data length (MSW)"); | 334 | _debug("extract data length (MSW)"); |
337 | ret = afs_extract_data(call, skb, last, &call->tmp, 4); | 335 | ret = afs_extract_data(call, skb, last, &call->tmp, 4); |
338 | switch (ret) { | 336 | if (ret < 0) |
339 | case 0: break; | 337 | return ret; |
340 | case -EAGAIN: return 0; | ||
341 | default: return ret; | ||
342 | } | ||
343 | 338 | ||
344 | call->count = ntohl(call->tmp); | 339 | call->count = ntohl(call->tmp); |
345 | _debug("DATA length MSW: %u", call->count); | 340 | _debug("DATA length MSW: %u", call->count); |
@@ -353,11 +348,8 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call, | |||
353 | case 2: | 348 | case 2: |
354 | _debug("extract data length"); | 349 | _debug("extract data length"); |
355 | ret = afs_extract_data(call, skb, last, &call->tmp, 4); | 350 | ret = afs_extract_data(call, skb, last, &call->tmp, 4); |
356 | switch (ret) { | 351 | if (ret < 0) |
357 | case 0: break; | 352 | return ret; |
358 | case -EAGAIN: return 0; | ||
359 | default: return ret; | ||
360 | } | ||
361 | 353 | ||
362 | call->count = ntohl(call->tmp); | 354 | call->count = ntohl(call->tmp); |
363 | _debug("DATA length: %u", call->count); | 355 | _debug("DATA length: %u", call->count); |
@@ -375,11 +367,8 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call, | |||
375 | ret = afs_extract_data(call, skb, last, buffer, | 367 | ret = afs_extract_data(call, skb, last, buffer, |
376 | call->count); | 368 | call->count); |
377 | kunmap_atomic(buffer); | 369 | kunmap_atomic(buffer); |
378 | switch (ret) { | 370 | if (ret < 0) |
379 | case 0: break; | 371 | return ret; |
380 | case -EAGAIN: return 0; | ||
381 | default: return ret; | ||
382 | } | ||
383 | } | 372 | } |
384 | 373 | ||
385 | call->offset = 0; | 374 | call->offset = 0; |
@@ -389,11 +378,8 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call, | |||
389 | case 4: | 378 | case 4: |
390 | ret = afs_extract_data(call, skb, last, call->buffer, | 379 | ret = afs_extract_data(call, skb, last, call->buffer, |
391 | (21 + 3 + 6) * 4); | 380 | (21 + 3 + 6) * 4); |
392 | switch (ret) { | 381 | if (ret < 0) |
393 | case 0: break; | 382 | return ret; |
394 | case -EAGAIN: return 0; | ||
395 | default: return ret; | ||
396 | } | ||
397 | 383 | ||
398 | bp = call->buffer; | 384 | bp = call->buffer; |
399 | xdr_decode_AFSFetchStatus(&bp, &vnode->status, vnode, NULL); | 385 | xdr_decode_AFSFetchStatus(&bp, &vnode->status, vnode, NULL); |
@@ -405,15 +391,12 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call, | |||
405 | call->unmarshall++; | 391 | call->unmarshall++; |
406 | 392 | ||
407 | case 5: | 393 | case 5: |
408 | _debug("trailer"); | 394 | ret = afs_data_complete(call, skb, last); |
409 | if (skb->len != 0) | 395 | if (ret < 0) |
410 | return -EBADMSG; | 396 | return ret; |
411 | break; | 397 | break; |
412 | } | 398 | } |
413 | 399 | ||
414 | if (!last) | ||
415 | return 0; | ||
416 | |||
417 | if (call->count < PAGE_SIZE) { | 400 | if (call->count < PAGE_SIZE) { |
418 | _debug("clear"); | 401 | _debug("clear"); |
419 | page = call->reply3; | 402 | page = call->reply3; |
@@ -537,9 +520,8 @@ static int afs_deliver_fs_give_up_callbacks(struct afs_call *call, | |||
537 | { | 520 | { |
538 | _enter(",{%u},%d", skb->len, last); | 521 | _enter(",{%u},%d", skb->len, last); |
539 | 522 | ||
540 | if (skb->len > 0) | 523 | /* shouldn't be any reply data */ |
541 | return -EBADMSG; /* shouldn't be any reply data */ | 524 | return afs_data_complete(call, skb, last); |
542 | return 0; | ||
543 | } | 525 | } |
544 | 526 | ||
545 | /* | 527 | /* |
@@ -622,15 +604,13 @@ static int afs_deliver_fs_create_vnode(struct afs_call *call, | |||
622 | { | 604 | { |
623 | struct afs_vnode *vnode = call->reply; | 605 | struct afs_vnode *vnode = call->reply; |
624 | const __be32 *bp; | 606 | const __be32 *bp; |
607 | int ret; | ||
625 | 608 | ||
626 | _enter("{%u},{%u},%d", call->unmarshall, skb->len, last); | 609 | _enter("{%u},{%u},%d", call->unmarshall, skb->len, last); |
627 | 610 | ||
628 | afs_transfer_reply(call, skb); | 611 | ret = afs_transfer_reply(call, skb, last); |
629 | if (!last) | 612 | if (ret < 0) |
630 | return 0; | 613 | return ret; |
631 | |||
632 | if (call->reply_size != call->reply_max) | ||
633 | return -EBADMSG; | ||
634 | 614 | ||
635 | /* unmarshall the reply once we've received all of it */ | 615 | /* unmarshall the reply once we've received all of it */ |
636 | bp = call->buffer; | 616 | bp = call->buffer; |
@@ -721,15 +701,13 @@ static int afs_deliver_fs_remove(struct afs_call *call, | |||
721 | { | 701 | { |
722 | struct afs_vnode *vnode = call->reply; | 702 | struct afs_vnode *vnode = call->reply; |
723 | const __be32 *bp; | 703 | const __be32 *bp; |
704 | int ret; | ||
724 | 705 | ||
725 | _enter("{%u},{%u},%d", call->unmarshall, skb->len, last); | 706 | _enter("{%u},{%u},%d", call->unmarshall, skb->len, last); |
726 | 707 | ||
727 | afs_transfer_reply(call, skb); | 708 | ret = afs_transfer_reply(call, skb, last); |
728 | if (!last) | 709 | if (ret < 0) |
729 | return 0; | 710 | return ret; |
730 | |||
731 | if (call->reply_size != call->reply_max) | ||
732 | return -EBADMSG; | ||
733 | 711 | ||
734 | /* unmarshall the reply once we've received all of it */ | 712 | /* unmarshall the reply once we've received all of it */ |
735 | bp = call->buffer; | 713 | bp = call->buffer; |
@@ -804,15 +782,13 @@ static int afs_deliver_fs_link(struct afs_call *call, | |||
804 | { | 782 | { |
805 | struct afs_vnode *dvnode = call->reply, *vnode = call->reply2; | 783 | struct afs_vnode *dvnode = call->reply, *vnode = call->reply2; |
806 | const __be32 *bp; | 784 | const __be32 *bp; |
785 | int ret; | ||
807 | 786 | ||
808 | _enter("{%u},{%u},%d", call->unmarshall, skb->len, last); | 787 | _enter("{%u},{%u},%d", call->unmarshall, skb->len, last); |
809 | 788 | ||
810 | afs_transfer_reply(call, skb); | 789 | ret = afs_transfer_reply(call, skb, last); |
811 | if (!last) | 790 | if (ret < 0) |
812 | return 0; | 791 | return ret; |
813 | |||
814 | if (call->reply_size != call->reply_max) | ||
815 | return -EBADMSG; | ||
816 | 792 | ||
817 | /* unmarshall the reply once we've received all of it */ | 793 | /* unmarshall the reply once we've received all of it */ |
818 | bp = call->buffer; | 794 | bp = call->buffer; |
@@ -892,15 +868,13 @@ static int afs_deliver_fs_symlink(struct afs_call *call, | |||
892 | { | 868 | { |
893 | struct afs_vnode *vnode = call->reply; | 869 | struct afs_vnode *vnode = call->reply; |
894 | const __be32 *bp; | 870 | const __be32 *bp; |
871 | int ret; | ||
895 | 872 | ||
896 | _enter("{%u},{%u},%d", call->unmarshall, skb->len, last); | 873 | _enter("{%u},{%u},%d", call->unmarshall, skb->len, last); |
897 | 874 | ||
898 | afs_transfer_reply(call, skb); | 875 | ret = afs_transfer_reply(call, skb, last); |
899 | if (!last) | 876 | if (ret < 0) |
900 | return 0; | 877 | return ret; |
901 | |||
902 | if (call->reply_size != call->reply_max) | ||
903 | return -EBADMSG; | ||
904 | 878 | ||
905 | /* unmarshall the reply once we've received all of it */ | 879 | /* unmarshall the reply once we've received all of it */ |
906 | bp = call->buffer; | 880 | bp = call->buffer; |
@@ -999,15 +973,13 @@ static int afs_deliver_fs_rename(struct afs_call *call, | |||
999 | { | 973 | { |
1000 | struct afs_vnode *orig_dvnode = call->reply, *new_dvnode = call->reply2; | 974 | struct afs_vnode *orig_dvnode = call->reply, *new_dvnode = call->reply2; |
1001 | const __be32 *bp; | 975 | const __be32 *bp; |
976 | int ret; | ||
1002 | 977 | ||
1003 | _enter("{%u},{%u},%d", call->unmarshall, skb->len, last); | 978 | _enter("{%u},{%u},%d", call->unmarshall, skb->len, last); |
1004 | 979 | ||
1005 | afs_transfer_reply(call, skb); | 980 | ret = afs_transfer_reply(call, skb, last); |
1006 | if (!last) | 981 | if (ret < 0) |
1007 | return 0; | 982 | return ret; |
1008 | |||
1009 | if (call->reply_size != call->reply_max) | ||
1010 | return -EBADMSG; | ||
1011 | 983 | ||
1012 | /* unmarshall the reply once we've received all of it */ | 984 | /* unmarshall the reply once we've received all of it */ |
1013 | bp = call->buffer; | 985 | bp = call->buffer; |
@@ -1105,20 +1077,13 @@ static int afs_deliver_fs_store_data(struct afs_call *call, | |||
1105 | { | 1077 | { |
1106 | struct afs_vnode *vnode = call->reply; | 1078 | struct afs_vnode *vnode = call->reply; |
1107 | const __be32 *bp; | 1079 | const __be32 *bp; |
1080 | int ret; | ||
1108 | 1081 | ||
1109 | _enter(",,%u", last); | 1082 | _enter(",,%u", last); |
1110 | 1083 | ||
1111 | afs_transfer_reply(call, skb); | 1084 | ret = afs_transfer_reply(call, skb, last); |
1112 | if (!last) { | 1085 | if (ret < 0) |
1113 | _leave(" = 0 [more]"); | 1086 | return ret; |
1114 | return 0; | ||
1115 | } | ||
1116 | |||
1117 | if (call->reply_size != call->reply_max) { | ||
1118 | _leave(" = -EBADMSG [%u != %u]", | ||
1119 | call->reply_size, call->reply_max); | ||
1120 | return -EBADMSG; | ||
1121 | } | ||
1122 | 1087 | ||
1123 | /* unmarshall the reply once we've received all of it */ | 1088 | /* unmarshall the reply once we've received all of it */ |
1124 | bp = call->buffer; | 1089 | bp = call->buffer; |
@@ -1292,20 +1257,13 @@ static int afs_deliver_fs_store_status(struct afs_call *call, | |||
1292 | afs_dataversion_t *store_version; | 1257 | afs_dataversion_t *store_version; |
1293 | struct afs_vnode *vnode = call->reply; | 1258 | struct afs_vnode *vnode = call->reply; |
1294 | const __be32 *bp; | 1259 | const __be32 *bp; |
1260 | int ret; | ||
1295 | 1261 | ||
1296 | _enter(",,%u", last); | 1262 | _enter(",,%u", last); |
1297 | 1263 | ||
1298 | afs_transfer_reply(call, skb); | 1264 | ret = afs_transfer_reply(call, skb, last); |
1299 | if (!last) { | 1265 | if (ret < 0) |
1300 | _leave(" = 0 [more]"); | 1266 | return ret; |
1301 | return 0; | ||
1302 | } | ||
1303 | |||
1304 | if (call->reply_size != call->reply_max) { | ||
1305 | _leave(" = -EBADMSG [%u != %u]", | ||
1306 | call->reply_size, call->reply_max); | ||
1307 | return -EBADMSG; | ||
1308 | } | ||
1309 | 1267 | ||
1310 | /* unmarshall the reply once we've received all of it */ | 1268 | /* unmarshall the reply once we've received all of it */ |
1311 | store_version = NULL; | 1269 | store_version = NULL; |
@@ -1504,11 +1462,8 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call, | |||
1504 | _debug("extract status"); | 1462 | _debug("extract status"); |
1505 | ret = afs_extract_data(call, skb, last, call->buffer, | 1463 | ret = afs_extract_data(call, skb, last, call->buffer, |
1506 | 12 * 4); | 1464 | 12 * 4); |
1507 | switch (ret) { | 1465 | if (ret < 0) |
1508 | case 0: break; | 1466 | return ret; |
1509 | case -EAGAIN: return 0; | ||
1510 | default: return ret; | ||
1511 | } | ||
1512 | 1467 | ||
1513 | bp = call->buffer; | 1468 | bp = call->buffer; |
1514 | xdr_decode_AFSFetchVolumeStatus(&bp, call->reply2); | 1469 | xdr_decode_AFSFetchVolumeStatus(&bp, call->reply2); |
@@ -1518,11 +1473,8 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call, | |||
1518 | /* extract the volume name length */ | 1473 | /* extract the volume name length */ |
1519 | case 2: | 1474 | case 2: |
1520 | ret = afs_extract_data(call, skb, last, &call->tmp, 4); | 1475 | ret = afs_extract_data(call, skb, last, &call->tmp, 4); |
1521 | switch (ret) { | 1476 | if (ret < 0) |
1522 | case 0: break; | 1477 | return ret; |
1523 | case -EAGAIN: return 0; | ||
1524 | default: return ret; | ||
1525 | } | ||
1526 | 1478 | ||
1527 | call->count = ntohl(call->tmp); | 1479 | call->count = ntohl(call->tmp); |
1528 | _debug("volname length: %u", call->count); | 1480 | _debug("volname length: %u", call->count); |
@@ -1537,11 +1489,8 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call, | |||
1537 | if (call->count > 0) { | 1489 | if (call->count > 0) { |
1538 | ret = afs_extract_data(call, skb, last, call->reply3, | 1490 | ret = afs_extract_data(call, skb, last, call->reply3, |
1539 | call->count); | 1491 | call->count); |
1540 | switch (ret) { | 1492 | if (ret < 0) |
1541 | case 0: break; | 1493 | return ret; |
1542 | case -EAGAIN: return 0; | ||
1543 | default: return ret; | ||
1544 | } | ||
1545 | } | 1494 | } |
1546 | 1495 | ||
1547 | p = call->reply3; | 1496 | p = call->reply3; |
@@ -1561,11 +1510,8 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call, | |||
1561 | case 4: | 1510 | case 4: |
1562 | ret = afs_extract_data(call, skb, last, call->buffer, | 1511 | ret = afs_extract_data(call, skb, last, call->buffer, |
1563 | call->count); | 1512 | call->count); |
1564 | switch (ret) { | 1513 | if (ret < 0) |
1565 | case 0: break; | 1514 | return ret; |
1566 | case -EAGAIN: return 0; | ||
1567 | default: return ret; | ||
1568 | } | ||
1569 | 1515 | ||
1570 | call->offset = 0; | 1516 | call->offset = 0; |
1571 | call->unmarshall++; | 1517 | call->unmarshall++; |
@@ -1574,11 +1520,8 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call, | |||
1574 | /* extract the offline message length */ | 1520 | /* extract the offline message length */ |
1575 | case 5: | 1521 | case 5: |
1576 | ret = afs_extract_data(call, skb, last, &call->tmp, 4); | 1522 | ret = afs_extract_data(call, skb, last, &call->tmp, 4); |
1577 | switch (ret) { | 1523 | if (ret < 0) |
1578 | case 0: break; | 1524 | return ret; |
1579 | case -EAGAIN: return 0; | ||
1580 | default: return ret; | ||
1581 | } | ||
1582 | 1525 | ||
1583 | call->count = ntohl(call->tmp); | 1526 | call->count = ntohl(call->tmp); |
1584 | _debug("offline msg length: %u", call->count); | 1527 | _debug("offline msg length: %u", call->count); |
@@ -1593,11 +1536,8 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call, | |||
1593 | if (call->count > 0) { | 1536 | if (call->count > 0) { |
1594 | ret = afs_extract_data(call, skb, last, call->reply3, | 1537 | ret = afs_extract_data(call, skb, last, call->reply3, |
1595 | call->count); | 1538 | call->count); |
1596 | switch (ret) { | 1539 | if (ret < 0) |
1597 | case 0: break; | 1540 | return ret; |
1598 | case -EAGAIN: return 0; | ||
1599 | default: return ret; | ||
1600 | } | ||
1601 | } | 1541 | } |
1602 | 1542 | ||
1603 | p = call->reply3; | 1543 | p = call->reply3; |
@@ -1617,11 +1557,8 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call, | |||
1617 | case 7: | 1557 | case 7: |
1618 | ret = afs_extract_data(call, skb, last, call->buffer, | 1558 | ret = afs_extract_data(call, skb, last, call->buffer, |
1619 | call->count); | 1559 | call->count); |
1620 | switch (ret) { | 1560 | if (ret < 0) |
1621 | case 0: break; | 1561 | return ret; |
1622 | case -EAGAIN: return 0; | ||
1623 | default: return ret; | ||
1624 | } | ||
1625 | 1562 | ||
1626 | call->offset = 0; | 1563 | call->offset = 0; |
1627 | call->unmarshall++; | 1564 | call->unmarshall++; |
@@ -1630,11 +1567,8 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call, | |||
1630 | /* extract the message of the day length */ | 1567 | /* extract the message of the day length */ |
1631 | case 8: | 1568 | case 8: |
1632 | ret = afs_extract_data(call, skb, last, &call->tmp, 4); | 1569 | ret = afs_extract_data(call, skb, last, &call->tmp, 4); |
1633 | switch (ret) { | 1570 | if (ret < 0) |
1634 | case 0: break; | 1571 | return ret; |
1635 | case -EAGAIN: return 0; | ||
1636 | default: return ret; | ||
1637 | } | ||
1638 | 1572 | ||
1639 | call->count = ntohl(call->tmp); | 1573 | call->count = ntohl(call->tmp); |
1640 | _debug("motd length: %u", call->count); | 1574 | _debug("motd length: %u", call->count); |
@@ -1649,11 +1583,8 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call, | |||
1649 | if (call->count > 0) { | 1583 | if (call->count > 0) { |
1650 | ret = afs_extract_data(call, skb, last, call->reply3, | 1584 | ret = afs_extract_data(call, skb, last, call->reply3, |
1651 | call->count); | 1585 | call->count); |
1652 | switch (ret) { | 1586 | if (ret < 0) |
1653 | case 0: break; | 1587 | return ret; |
1654 | case -EAGAIN: return 0; | ||
1655 | default: return ret; | ||
1656 | } | ||
1657 | } | 1588 | } |
1658 | 1589 | ||
1659 | p = call->reply3; | 1590 | p = call->reply3; |
@@ -1673,26 +1604,20 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call, | |||
1673 | case 10: | 1604 | case 10: |
1674 | ret = afs_extract_data(call, skb, last, call->buffer, | 1605 | ret = afs_extract_data(call, skb, last, call->buffer, |
1675 | call->count); | 1606 | call->count); |
1676 | switch (ret) { | 1607 | if (ret < 0) |
1677 | case 0: break; | 1608 | return ret; |
1678 | case -EAGAIN: return 0; | ||
1679 | default: return ret; | ||
1680 | } | ||
1681 | 1609 | ||
1682 | call->offset = 0; | 1610 | call->offset = 0; |
1683 | call->unmarshall++; | 1611 | call->unmarshall++; |
1684 | no_motd_padding: | 1612 | no_motd_padding: |
1685 | 1613 | ||
1686 | case 11: | 1614 | case 11: |
1687 | _debug("trailer %d", skb->len); | 1615 | ret = afs_data_complete(call, skb, last); |
1688 | if (skb->len != 0) | 1616 | if (ret < 0) |
1689 | return -EBADMSG; | 1617 | return ret; |
1690 | break; | 1618 | break; |
1691 | } | 1619 | } |
1692 | 1620 | ||
1693 | if (!last) | ||
1694 | return 0; | ||
1695 | |||
1696 | _leave(" = 0 [done]"); | 1621 | _leave(" = 0 [done]"); |
1697 | return 0; | 1622 | return 0; |
1698 | } | 1623 | } |
@@ -1764,15 +1689,13 @@ static int afs_deliver_fs_xxxx_lock(struct afs_call *call, | |||
1764 | struct sk_buff *skb, bool last) | 1689 | struct sk_buff *skb, bool last) |
1765 | { | 1690 | { |
1766 | const __be32 *bp; | 1691 | const __be32 *bp; |
1692 | int ret; | ||
1767 | 1693 | ||
1768 | _enter("{%u},{%u},%d", call->unmarshall, skb->len, last); | 1694 | _enter("{%u},{%u},%d", call->unmarshall, skb->len, last); |
1769 | 1695 | ||
1770 | afs_transfer_reply(call, skb); | 1696 | ret = afs_transfer_reply(call, skb, last); |
1771 | if (!last) | 1697 | if (ret < 0) |
1772 | return 0; | 1698 | return ret; |
1773 | |||
1774 | if (call->reply_size != call->reply_max) | ||
1775 | return -EBADMSG; | ||
1776 | 1699 | ||
1777 | /* unmarshall the reply once we've received all of it */ | 1700 | /* unmarshall the reply once we've received all of it */ |
1778 | bp = call->buffer; | 1701 | bp = call->buffer; |
diff --git a/fs/afs/internal.h b/fs/afs/internal.h index 71d5982312f3..df976b2a7f40 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h | |||
@@ -609,17 +609,29 @@ extern void afs_proc_cell_remove(struct afs_cell *); | |||
609 | */ | 609 | */ |
610 | extern int afs_open_socket(void); | 610 | extern int afs_open_socket(void); |
611 | extern void afs_close_socket(void); | 611 | extern void afs_close_socket(void); |
612 | extern void afs_data_consumed(struct afs_call *, struct sk_buff *); | ||
612 | extern int afs_make_call(struct in_addr *, struct afs_call *, gfp_t, | 613 | extern int afs_make_call(struct in_addr *, struct afs_call *, gfp_t, |
613 | const struct afs_wait_mode *); | 614 | const struct afs_wait_mode *); |
614 | extern struct afs_call *afs_alloc_flat_call(const struct afs_call_type *, | 615 | extern struct afs_call *afs_alloc_flat_call(const struct afs_call_type *, |
615 | size_t, size_t); | 616 | size_t, size_t); |
616 | extern void afs_flat_call_destructor(struct afs_call *); | 617 | extern void afs_flat_call_destructor(struct afs_call *); |
617 | extern void afs_transfer_reply(struct afs_call *, struct sk_buff *); | 618 | extern int afs_transfer_reply(struct afs_call *, struct sk_buff *, bool); |
618 | extern void afs_send_empty_reply(struct afs_call *); | 619 | extern void afs_send_empty_reply(struct afs_call *); |
619 | extern void afs_send_simple_reply(struct afs_call *, const void *, size_t); | 620 | extern void afs_send_simple_reply(struct afs_call *, const void *, size_t); |
620 | extern int afs_extract_data(struct afs_call *, struct sk_buff *, bool, void *, | 621 | extern int afs_extract_data(struct afs_call *, struct sk_buff *, bool, void *, |
621 | size_t); | 622 | size_t); |
622 | 623 | ||
624 | static inline int afs_data_complete(struct afs_call *call, struct sk_buff *skb, | ||
625 | bool last) | ||
626 | { | ||
627 | if (skb->len > 0) | ||
628 | return -EBADMSG; | ||
629 | afs_data_consumed(call, skb); | ||
630 | if (!last) | ||
631 | return -EAGAIN; | ||
632 | return 0; | ||
633 | } | ||
634 | |||
623 | /* | 635 | /* |
624 | * security.c | 636 | * security.c |
625 | */ | 637 | */ |
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c index 4832de84d52c..14d04c848465 100644 --- a/fs/afs/rxrpc.c +++ b/fs/afs/rxrpc.c | |||
@@ -150,10 +150,9 @@ void afs_close_socket(void) | |||
150 | } | 150 | } |
151 | 151 | ||
152 | /* | 152 | /* |
153 | * note that the data in a socket buffer is now delivered and that the buffer | 153 | * Note that the data in a socket buffer is now consumed. |
154 | * should be freed | ||
155 | */ | 154 | */ |
156 | static void afs_data_delivered(struct sk_buff *skb) | 155 | void afs_data_consumed(struct afs_call *call, struct sk_buff *skb) |
157 | { | 156 | { |
158 | if (!skb) { | 157 | if (!skb) { |
159 | _debug("DLVR NULL [%d]", atomic_read(&afs_outstanding_skbs)); | 158 | _debug("DLVR NULL [%d]", atomic_read(&afs_outstanding_skbs)); |
@@ -161,9 +160,7 @@ static void afs_data_delivered(struct sk_buff *skb) | |||
161 | } else { | 160 | } else { |
162 | _debug("DLVR %p{%u} [%d]", | 161 | _debug("DLVR %p{%u} [%d]", |
163 | skb, skb->mark, atomic_read(&afs_outstanding_skbs)); | 162 | skb, skb->mark, atomic_read(&afs_outstanding_skbs)); |
164 | if (atomic_dec_return(&afs_outstanding_skbs) == -1) | 163 | rxrpc_kernel_data_consumed(call->rxcall, skb); |
165 | BUG(); | ||
166 | rxrpc_kernel_data_delivered(skb); | ||
167 | } | 164 | } |
168 | } | 165 | } |
169 | 166 | ||
@@ -489,9 +486,15 @@ static void afs_deliver_to_call(struct afs_call *call) | |||
489 | last = rxrpc_kernel_is_data_last(skb); | 486 | last = rxrpc_kernel_is_data_last(skb); |
490 | ret = call->type->deliver(call, skb, last); | 487 | ret = call->type->deliver(call, skb, last); |
491 | switch (ret) { | 488 | switch (ret) { |
489 | case -EAGAIN: | ||
490 | if (last) { | ||
491 | _debug("short data"); | ||
492 | goto unmarshal_error; | ||
493 | } | ||
494 | break; | ||
492 | case 0: | 495 | case 0: |
493 | if (last && | 496 | ASSERT(last); |
494 | call->state == AFS_CALL_AWAIT_REPLY) | 497 | if (call->state == AFS_CALL_AWAIT_REPLY) |
495 | call->state = AFS_CALL_COMPLETE; | 498 | call->state = AFS_CALL_COMPLETE; |
496 | break; | 499 | break; |
497 | case -ENOTCONN: | 500 | case -ENOTCONN: |
@@ -501,6 +504,7 @@ static void afs_deliver_to_call(struct afs_call *call) | |||
501 | abort_code = RX_INVALID_OPERATION; | 504 | abort_code = RX_INVALID_OPERATION; |
502 | goto do_abort; | 505 | goto do_abort; |
503 | default: | 506 | default: |
507 | unmarshal_error: | ||
504 | abort_code = RXGEN_CC_UNMARSHAL; | 508 | abort_code = RXGEN_CC_UNMARSHAL; |
505 | if (call->state != AFS_CALL_AWAIT_REPLY) | 509 | if (call->state != AFS_CALL_AWAIT_REPLY) |
506 | abort_code = RXGEN_SS_UNMARSHAL; | 510 | abort_code = RXGEN_SS_UNMARSHAL; |
@@ -511,9 +515,7 @@ static void afs_deliver_to_call(struct afs_call *call) | |||
511 | call->state = AFS_CALL_ERROR; | 515 | call->state = AFS_CALL_ERROR; |
512 | break; | 516 | break; |
513 | } | 517 | } |
514 | afs_data_delivered(skb); | 518 | break; |
515 | skb = NULL; | ||
516 | continue; | ||
517 | case RXRPC_SKB_MARK_FINAL_ACK: | 519 | case RXRPC_SKB_MARK_FINAL_ACK: |
518 | _debug("Rcv ACK"); | 520 | _debug("Rcv ACK"); |
519 | call->state = AFS_CALL_COMPLETE; | 521 | call->state = AFS_CALL_COMPLETE; |
@@ -685,15 +687,35 @@ static void afs_process_async_call(struct afs_call *call) | |||
685 | } | 687 | } |
686 | 688 | ||
687 | /* | 689 | /* |
688 | * empty a socket buffer into a flat reply buffer | 690 | * Empty a socket buffer into a flat reply buffer. |
689 | */ | 691 | */ |
690 | void afs_transfer_reply(struct afs_call *call, struct sk_buff *skb) | 692 | int afs_transfer_reply(struct afs_call *call, struct sk_buff *skb, bool last) |
691 | { | 693 | { |
692 | size_t len = skb->len; | 694 | size_t len = skb->len; |
693 | 695 | ||
694 | if (skb_copy_bits(skb, 0, call->buffer + call->reply_size, len) < 0) | 696 | if (len > call->reply_max - call->reply_size) { |
695 | BUG(); | 697 | _leave(" = -EBADMSG [%zu > %u]", |
696 | call->reply_size += len; | 698 | len, call->reply_max - call->reply_size); |
699 | return -EBADMSG; | ||
700 | } | ||
701 | |||
702 | if (len > 0) { | ||
703 | if (skb_copy_bits(skb, 0, call->buffer + call->reply_size, | ||
704 | len) < 0) | ||
705 | BUG(); | ||
706 | call->reply_size += len; | ||
707 | } | ||
708 | |||
709 | afs_data_consumed(call, skb); | ||
710 | if (!last) | ||
711 | return -EAGAIN; | ||
712 | |||
713 | if (call->reply_size != call->reply_max) { | ||
714 | _leave(" = -EBADMSG [%u != %u]", | ||
715 | call->reply_size, call->reply_max); | ||
716 | return -EBADMSG; | ||
717 | } | ||
718 | return 0; | ||
697 | } | 719 | } |
698 | 720 | ||
699 | /* | 721 | /* |
@@ -745,7 +767,8 @@ static void afs_collect_incoming_call(struct work_struct *work) | |||
745 | } | 767 | } |
746 | 768 | ||
747 | /* | 769 | /* |
748 | * grab the operation ID from an incoming cache manager call | 770 | * Grab the operation ID from an incoming cache manager call. The socket |
771 | * buffer is discarded on error or if we don't yet have sufficient data. | ||
749 | */ | 772 | */ |
750 | static int afs_deliver_cm_op_id(struct afs_call *call, struct sk_buff *skb, | 773 | static int afs_deliver_cm_op_id(struct afs_call *call, struct sk_buff *skb, |
751 | bool last) | 774 | bool last) |
@@ -766,12 +789,9 @@ static int afs_deliver_cm_op_id(struct afs_call *call, struct sk_buff *skb, | |||
766 | call->offset += len; | 789 | call->offset += len; |
767 | 790 | ||
768 | if (call->offset < 4) { | 791 | if (call->offset < 4) { |
769 | if (last) { | 792 | afs_data_consumed(call, skb); |
770 | _leave(" = -EBADMSG [op ID short]"); | 793 | _leave(" = -EAGAIN"); |
771 | return -EBADMSG; | 794 | return -EAGAIN; |
772 | } | ||
773 | _leave(" = 0 [incomplete]"); | ||
774 | return 0; | ||
775 | } | 795 | } |
776 | 796 | ||
777 | call->state = AFS_CALL_AWAIT_REQUEST; | 797 | call->state = AFS_CALL_AWAIT_REQUEST; |
@@ -855,7 +875,7 @@ void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len) | |||
855 | } | 875 | } |
856 | 876 | ||
857 | /* | 877 | /* |
858 | * extract a piece of data from the received data socket buffers | 878 | * Extract a piece of data from the received data socket buffers. |
859 | */ | 879 | */ |
860 | int afs_extract_data(struct afs_call *call, struct sk_buff *skb, | 880 | int afs_extract_data(struct afs_call *call, struct sk_buff *skb, |
861 | bool last, void *buf, size_t count) | 881 | bool last, void *buf, size_t count) |
@@ -873,10 +893,7 @@ int afs_extract_data(struct afs_call *call, struct sk_buff *skb, | |||
873 | call->offset += len; | 893 | call->offset += len; |
874 | 894 | ||
875 | if (call->offset < count) { | 895 | if (call->offset < count) { |
876 | if (last) { | 896 | afs_data_consumed(call, skb); |
877 | _leave(" = -EBADMSG [%d < %zu]", call->offset, count); | ||
878 | return -EBADMSG; | ||
879 | } | ||
880 | _leave(" = -EAGAIN"); | 897 | _leave(" = -EAGAIN"); |
881 | return -EAGAIN; | 898 | return -EAGAIN; |
882 | } | 899 | } |
diff --git a/fs/afs/vlclient.c b/fs/afs/vlclient.c index 340afd0cd182..f94d1abdc3eb 100644 --- a/fs/afs/vlclient.c +++ b/fs/afs/vlclient.c | |||
@@ -64,16 +64,13 @@ static int afs_deliver_vl_get_entry_by_xxx(struct afs_call *call, | |||
64 | struct afs_cache_vlocation *entry; | 64 | struct afs_cache_vlocation *entry; |
65 | __be32 *bp; | 65 | __be32 *bp; |
66 | u32 tmp; | 66 | u32 tmp; |
67 | int loop; | 67 | int loop, ret; |
68 | 68 | ||
69 | _enter(",,%u", last); | 69 | _enter(",,%u", last); |
70 | 70 | ||
71 | afs_transfer_reply(call, skb); | 71 | ret = afs_transfer_reply(call, skb, last); |
72 | if (!last) | 72 | if (ret < 0) |
73 | return 0; | 73 | return ret; |
74 | |||
75 | if (call->reply_size != call->reply_max) | ||
76 | return -EBADMSG; | ||
77 | 74 | ||
78 | /* unmarshall the reply once we've received all of it */ | 75 | /* unmarshall the reply once we've received all of it */ |
79 | entry = call->reply; | 76 | entry = call->reply; |
diff --git a/fs/iomap.c b/fs/iomap.c index 48141b8eff5f..0342254646e3 100644 --- a/fs/iomap.c +++ b/fs/iomap.c | |||
@@ -84,8 +84,11 @@ iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags, | |||
84 | * Now the data has been copied, commit the range we've copied. This | 84 | * Now the data has been copied, commit the range we've copied. This |
85 | * should not fail unless the filesystem has had a fatal error. | 85 | * should not fail unless the filesystem has had a fatal error. |
86 | */ | 86 | */ |
87 | ret = ops->iomap_end(inode, pos, length, written > 0 ? written : 0, | 87 | if (ops->iomap_end) { |
88 | flags, &iomap); | 88 | ret = ops->iomap_end(inode, pos, length, |
89 | written > 0 ? written : 0, | ||
90 | flags, &iomap); | ||
91 | } | ||
89 | 92 | ||
90 | return written ? written : ret; | 93 | return written ? written : ret; |
91 | } | 94 | } |
@@ -194,12 +197,9 @@ again: | |||
194 | if (mapping_writably_mapped(inode->i_mapping)) | 197 | if (mapping_writably_mapped(inode->i_mapping)) |
195 | flush_dcache_page(page); | 198 | flush_dcache_page(page); |
196 | 199 | ||
197 | pagefault_disable(); | ||
198 | copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes); | 200 | copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes); |
199 | pagefault_enable(); | ||
200 | 201 | ||
201 | flush_dcache_page(page); | 202 | flush_dcache_page(page); |
202 | mark_page_accessed(page); | ||
203 | 203 | ||
204 | status = iomap_write_end(inode, pos, bytes, copied, page); | 204 | status = iomap_write_end(inode, pos, bytes, copied, page); |
205 | if (unlikely(status < 0)) | 205 | if (unlikely(status < 0)) |
@@ -470,13 +470,18 @@ int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi, | |||
470 | if (ret) | 470 | if (ret) |
471 | return ret; | 471 | return ret; |
472 | 472 | ||
473 | ret = filemap_write_and_wait(inode->i_mapping); | 473 | if (fi->fi_flags & FIEMAP_FLAG_SYNC) { |
474 | if (ret) | 474 | ret = filemap_write_and_wait(inode->i_mapping); |
475 | return ret; | 475 | if (ret) |
476 | return ret; | ||
477 | } | ||
476 | 478 | ||
477 | while (len > 0) { | 479 | while (len > 0) { |
478 | ret = iomap_apply(inode, start, len, 0, ops, &ctx, | 480 | ret = iomap_apply(inode, start, len, 0, ops, &ctx, |
479 | iomap_fiemap_actor); | 481 | iomap_fiemap_actor); |
482 | /* inode with no (attribute) mapping will give ENOENT */ | ||
483 | if (ret == -ENOENT) | ||
484 | break; | ||
480 | if (ret < 0) | 485 | if (ret < 0) |
481 | return ret; | 486 | return ret; |
482 | if (ret == 0) | 487 | if (ret == 0) |
diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c index 776ae2f325d1..3dd8f1d54498 100644 --- a/fs/xfs/libxfs/xfs_alloc.c +++ b/fs/xfs/libxfs/xfs_alloc.c | |||
@@ -1582,6 +1582,7 @@ xfs_alloc_ag_vextent_small( | |||
1582 | xfs_extlen_t *flenp, /* result length */ | 1582 | xfs_extlen_t *flenp, /* result length */ |
1583 | int *stat) /* status: 0-freelist, 1-normal/none */ | 1583 | int *stat) /* status: 0-freelist, 1-normal/none */ |
1584 | { | 1584 | { |
1585 | struct xfs_owner_info oinfo; | ||
1585 | int error; | 1586 | int error; |
1586 | xfs_agblock_t fbno; | 1587 | xfs_agblock_t fbno; |
1587 | xfs_extlen_t flen; | 1588 | xfs_extlen_t flen; |
@@ -1624,6 +1625,18 @@ xfs_alloc_ag_vextent_small( | |||
1624 | error0); | 1625 | error0); |
1625 | args->wasfromfl = 1; | 1626 | args->wasfromfl = 1; |
1626 | trace_xfs_alloc_small_freelist(args); | 1627 | trace_xfs_alloc_small_freelist(args); |
1628 | |||
1629 | /* | ||
1630 | * If we're feeding an AGFL block to something that | ||
1631 | * doesn't live in the free space, we need to clear | ||
1632 | * out the OWN_AG rmap. | ||
1633 | */ | ||
1634 | xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_AG); | ||
1635 | error = xfs_rmap_free(args->tp, args->agbp, args->agno, | ||
1636 | fbno, 1, &oinfo); | ||
1637 | if (error) | ||
1638 | goto error0; | ||
1639 | |||
1627 | *stat = 0; | 1640 | *stat = 0; |
1628 | return 0; | 1641 | return 0; |
1629 | } | 1642 | } |
@@ -2264,6 +2277,7 @@ xfs_alloc_log_agf( | |||
2264 | offsetof(xfs_agf_t, agf_longest), | 2277 | offsetof(xfs_agf_t, agf_longest), |
2265 | offsetof(xfs_agf_t, agf_btreeblks), | 2278 | offsetof(xfs_agf_t, agf_btreeblks), |
2266 | offsetof(xfs_agf_t, agf_uuid), | 2279 | offsetof(xfs_agf_t, agf_uuid), |
2280 | offsetof(xfs_agf_t, agf_rmap_blocks), | ||
2267 | sizeof(xfs_agf_t) | 2281 | sizeof(xfs_agf_t) |
2268 | }; | 2282 | }; |
2269 | 2283 | ||
diff --git a/fs/xfs/libxfs/xfs_format.h b/fs/xfs/libxfs/xfs_format.h index f814d42c73b2..e6a8bea0f7ba 100644 --- a/fs/xfs/libxfs/xfs_format.h +++ b/fs/xfs/libxfs/xfs_format.h | |||
@@ -640,12 +640,15 @@ typedef struct xfs_agf { | |||
640 | __be32 agf_btreeblks; /* # of blocks held in AGF btrees */ | 640 | __be32 agf_btreeblks; /* # of blocks held in AGF btrees */ |
641 | uuid_t agf_uuid; /* uuid of filesystem */ | 641 | uuid_t agf_uuid; /* uuid of filesystem */ |
642 | 642 | ||
643 | __be32 agf_rmap_blocks; /* rmapbt blocks used */ | ||
644 | __be32 agf_padding; /* padding */ | ||
645 | |||
643 | /* | 646 | /* |
644 | * reserve some contiguous space for future logged fields before we add | 647 | * reserve some contiguous space for future logged fields before we add |
645 | * the unlogged fields. This makes the range logging via flags and | 648 | * the unlogged fields. This makes the range logging via flags and |
646 | * structure offsets much simpler. | 649 | * structure offsets much simpler. |
647 | */ | 650 | */ |
648 | __be64 agf_spare64[16]; | 651 | __be64 agf_spare64[15]; |
649 | 652 | ||
650 | /* unlogged fields, written during buffer writeback. */ | 653 | /* unlogged fields, written during buffer writeback. */ |
651 | __be64 agf_lsn; /* last write sequence */ | 654 | __be64 agf_lsn; /* last write sequence */ |
@@ -670,7 +673,8 @@ typedef struct xfs_agf { | |||
670 | #define XFS_AGF_LONGEST 0x00000400 | 673 | #define XFS_AGF_LONGEST 0x00000400 |
671 | #define XFS_AGF_BTREEBLKS 0x00000800 | 674 | #define XFS_AGF_BTREEBLKS 0x00000800 |
672 | #define XFS_AGF_UUID 0x00001000 | 675 | #define XFS_AGF_UUID 0x00001000 |
673 | #define XFS_AGF_NUM_BITS 13 | 676 | #define XFS_AGF_RMAP_BLOCKS 0x00002000 |
677 | #define XFS_AGF_NUM_BITS 14 | ||
674 | #define XFS_AGF_ALL_BITS ((1 << XFS_AGF_NUM_BITS) - 1) | 678 | #define XFS_AGF_ALL_BITS ((1 << XFS_AGF_NUM_BITS) - 1) |
675 | 679 | ||
676 | #define XFS_AGF_FLAGS \ | 680 | #define XFS_AGF_FLAGS \ |
@@ -686,7 +690,8 @@ typedef struct xfs_agf { | |||
686 | { XFS_AGF_FREEBLKS, "FREEBLKS" }, \ | 690 | { XFS_AGF_FREEBLKS, "FREEBLKS" }, \ |
687 | { XFS_AGF_LONGEST, "LONGEST" }, \ | 691 | { XFS_AGF_LONGEST, "LONGEST" }, \ |
688 | { XFS_AGF_BTREEBLKS, "BTREEBLKS" }, \ | 692 | { XFS_AGF_BTREEBLKS, "BTREEBLKS" }, \ |
689 | { XFS_AGF_UUID, "UUID" } | 693 | { XFS_AGF_UUID, "UUID" }, \ |
694 | { XFS_AGF_RMAP_BLOCKS, "RMAP_BLOCKS" } | ||
690 | 695 | ||
691 | /* disk block (xfs_daddr_t) in the AG */ | 696 | /* disk block (xfs_daddr_t) in the AG */ |
692 | #define XFS_AGF_DADDR(mp) ((xfs_daddr_t)(1 << (mp)->m_sectbb_log)) | 697 | #define XFS_AGF_DADDR(mp) ((xfs_daddr_t)(1 << (mp)->m_sectbb_log)) |
diff --git a/fs/xfs/libxfs/xfs_rmap_btree.c b/fs/xfs/libxfs/xfs_rmap_btree.c index bc1faebc84ec..17b8eeb34ac8 100644 --- a/fs/xfs/libxfs/xfs_rmap_btree.c +++ b/fs/xfs/libxfs/xfs_rmap_btree.c | |||
@@ -98,6 +98,8 @@ xfs_rmapbt_alloc_block( | |||
98 | union xfs_btree_ptr *new, | 98 | union xfs_btree_ptr *new, |
99 | int *stat) | 99 | int *stat) |
100 | { | 100 | { |
101 | struct xfs_buf *agbp = cur->bc_private.a.agbp; | ||
102 | struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp); | ||
101 | int error; | 103 | int error; |
102 | xfs_agblock_t bno; | 104 | xfs_agblock_t bno; |
103 | 105 | ||
@@ -124,6 +126,8 @@ xfs_rmapbt_alloc_block( | |||
124 | 126 | ||
125 | xfs_trans_agbtree_delta(cur->bc_tp, 1); | 127 | xfs_trans_agbtree_delta(cur->bc_tp, 1); |
126 | new->s = cpu_to_be32(bno); | 128 | new->s = cpu_to_be32(bno); |
129 | be32_add_cpu(&agf->agf_rmap_blocks, 1); | ||
130 | xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_RMAP_BLOCKS); | ||
127 | 131 | ||
128 | XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); | 132 | XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT); |
129 | *stat = 1; | 133 | *stat = 1; |
@@ -143,6 +147,8 @@ xfs_rmapbt_free_block( | |||
143 | bno = xfs_daddr_to_agbno(cur->bc_mp, XFS_BUF_ADDR(bp)); | 147 | bno = xfs_daddr_to_agbno(cur->bc_mp, XFS_BUF_ADDR(bp)); |
144 | trace_xfs_rmapbt_free_block(cur->bc_mp, cur->bc_private.a.agno, | 148 | trace_xfs_rmapbt_free_block(cur->bc_mp, cur->bc_private.a.agno, |
145 | bno, 1); | 149 | bno, 1); |
150 | be32_add_cpu(&agf->agf_rmap_blocks, -1); | ||
151 | xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_RMAP_BLOCKS); | ||
146 | error = xfs_alloc_put_freelist(cur->bc_tp, agbp, NULL, bno, 1); | 152 | error = xfs_alloc_put_freelist(cur->bc_tp, agbp, NULL, bno, 1); |
147 | if (error) | 153 | if (error) |
148 | return error; | 154 | return error; |
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index 47a318ce82e0..607cc29bba21 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c | |||
@@ -115,7 +115,6 @@ xfs_buf_ioacct_dec( | |||
115 | if (!(bp->b_flags & _XBF_IN_FLIGHT)) | 115 | if (!(bp->b_flags & _XBF_IN_FLIGHT)) |
116 | return; | 116 | return; |
117 | 117 | ||
118 | ASSERT(bp->b_flags & XBF_ASYNC); | ||
119 | bp->b_flags &= ~_XBF_IN_FLIGHT; | 118 | bp->b_flags &= ~_XBF_IN_FLIGHT; |
120 | percpu_counter_dec(&bp->b_target->bt_io_count); | 119 | percpu_counter_dec(&bp->b_target->bt_io_count); |
121 | } | 120 | } |
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index ed95e5bb04e6..e612a0233710 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c | |||
@@ -741,9 +741,20 @@ xfs_file_dax_write( | |||
741 | * page is inserted into the pagecache when we have to serve a write | 741 | * page is inserted into the pagecache when we have to serve a write |
742 | * fault on a hole. It should never be dirtied and can simply be | 742 | * fault on a hole. It should never be dirtied and can simply be |
743 | * dropped from the pagecache once we get real data for the page. | 743 | * dropped from the pagecache once we get real data for the page. |
744 | * | ||
745 | * XXX: This is racy against mmap, and there's nothing we can do about | ||
746 | * it. dax_do_io() should really do this invalidation internally as | ||
747 | * it will know if we've allocated over a holei for this specific IO and | ||
748 | * if so it needs to update the mapping tree and invalidate existing | ||
749 | * PTEs over the newly allocated range. Remove this invalidation when | ||
750 | * dax_do_io() is fixed up. | ||
744 | */ | 751 | */ |
745 | if (mapping->nrpages) { | 752 | if (mapping->nrpages) { |
746 | ret = invalidate_inode_pages2(mapping); | 753 | loff_t end = iocb->ki_pos + iov_iter_count(from) - 1; |
754 | |||
755 | ret = invalidate_inode_pages2_range(mapping, | ||
756 | iocb->ki_pos >> PAGE_SHIFT, | ||
757 | end >> PAGE_SHIFT); | ||
747 | WARN_ON_ONCE(ret); | 758 | WARN_ON_ONCE(ret); |
748 | } | 759 | } |
749 | 760 | ||
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c index 0f96847b90e1..0b7f986745c1 100644 --- a/fs/xfs/xfs_fsops.c +++ b/fs/xfs/xfs_fsops.c | |||
@@ -248,6 +248,7 @@ xfs_growfs_data_private( | |||
248 | agf->agf_roots[XFS_BTNUM_RMAPi] = | 248 | agf->agf_roots[XFS_BTNUM_RMAPi] = |
249 | cpu_to_be32(XFS_RMAP_BLOCK(mp)); | 249 | cpu_to_be32(XFS_RMAP_BLOCK(mp)); |
250 | agf->agf_levels[XFS_BTNUM_RMAPi] = cpu_to_be32(1); | 250 | agf->agf_levels[XFS_BTNUM_RMAPi] = cpu_to_be32(1); |
251 | agf->agf_rmap_blocks = cpu_to_be32(1); | ||
251 | } | 252 | } |
252 | 253 | ||
253 | agf->agf_flfirst = cpu_to_be32(1); | 254 | agf->agf_flfirst = cpu_to_be32(1); |
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c index 2114d53df433..2af0dda1c978 100644 --- a/fs/xfs/xfs_iomap.c +++ b/fs/xfs/xfs_iomap.c | |||
@@ -715,12 +715,16 @@ xfs_iomap_write_allocate( | |||
715 | * is in the delayed allocation extent on which we sit | 715 | * is in the delayed allocation extent on which we sit |
716 | * but before our buffer starts. | 716 | * but before our buffer starts. |
717 | */ | 717 | */ |
718 | |||
719 | nimaps = 0; | 718 | nimaps = 0; |
720 | while (nimaps == 0) { | 719 | while (nimaps == 0) { |
721 | nres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK); | 720 | nres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK); |
722 | 721 | /* | |
723 | error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, nres, | 722 | * We have already reserved space for the extent and any |
723 | * indirect blocks when creating the delalloc extent, | ||
724 | * there is no need to reserve space in this transaction | ||
725 | * again. | ||
726 | */ | ||
727 | error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, | ||
724 | 0, XFS_TRANS_RESERVE, &tp); | 728 | 0, XFS_TRANS_RESERVE, &tp); |
725 | if (error) | 729 | if (error) |
726 | return error; | 730 | return error; |
@@ -1037,20 +1041,14 @@ xfs_file_iomap_begin( | |||
1037 | return error; | 1041 | return error; |
1038 | 1042 | ||
1039 | trace_xfs_iomap_alloc(ip, offset, length, 0, &imap); | 1043 | trace_xfs_iomap_alloc(ip, offset, length, 0, &imap); |
1040 | xfs_bmbt_to_iomap(ip, iomap, &imap); | ||
1041 | } else if (nimaps) { | ||
1042 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | ||
1043 | trace_xfs_iomap_found(ip, offset, length, 0, &imap); | ||
1044 | xfs_bmbt_to_iomap(ip, iomap, &imap); | ||
1045 | } else { | 1044 | } else { |
1045 | ASSERT(nimaps); | ||
1046 | |||
1046 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | 1047 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
1047 | trace_xfs_iomap_not_found(ip, offset, length, 0, &imap); | 1048 | trace_xfs_iomap_found(ip, offset, length, 0, &imap); |
1048 | iomap->blkno = IOMAP_NULL_BLOCK; | ||
1049 | iomap->type = IOMAP_HOLE; | ||
1050 | iomap->offset = offset; | ||
1051 | iomap->length = length; | ||
1052 | } | 1049 | } |
1053 | 1050 | ||
1051 | xfs_bmbt_to_iomap(ip, iomap, &imap); | ||
1054 | return 0; | 1052 | return 0; |
1055 | } | 1053 | } |
1056 | 1054 | ||
@@ -1112,3 +1110,48 @@ struct iomap_ops xfs_iomap_ops = { | |||
1112 | .iomap_begin = xfs_file_iomap_begin, | 1110 | .iomap_begin = xfs_file_iomap_begin, |
1113 | .iomap_end = xfs_file_iomap_end, | 1111 | .iomap_end = xfs_file_iomap_end, |
1114 | }; | 1112 | }; |
1113 | |||
1114 | static int | ||
1115 | xfs_xattr_iomap_begin( | ||
1116 | struct inode *inode, | ||
1117 | loff_t offset, | ||
1118 | loff_t length, | ||
1119 | unsigned flags, | ||
1120 | struct iomap *iomap) | ||
1121 | { | ||
1122 | struct xfs_inode *ip = XFS_I(inode); | ||
1123 | struct xfs_mount *mp = ip->i_mount; | ||
1124 | xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); | ||
1125 | xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + length); | ||
1126 | struct xfs_bmbt_irec imap; | ||
1127 | int nimaps = 1, error = 0; | ||
1128 | unsigned lockmode; | ||
1129 | |||
1130 | if (XFS_FORCED_SHUTDOWN(mp)) | ||
1131 | return -EIO; | ||
1132 | |||
1133 | lockmode = xfs_ilock_data_map_shared(ip); | ||
1134 | |||
1135 | /* if there are no attribute fork or extents, return ENOENT */ | ||
1136 | if (XFS_IFORK_Q(ip) || !ip->i_d.di_anextents) { | ||
1137 | error = -ENOENT; | ||
1138 | goto out_unlock; | ||
1139 | } | ||
1140 | |||
1141 | ASSERT(ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL); | ||
1142 | error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap, | ||
1143 | &nimaps, XFS_BMAPI_ENTIRE | XFS_BMAPI_ATTRFORK); | ||
1144 | out_unlock: | ||
1145 | xfs_iunlock(ip, lockmode); | ||
1146 | |||
1147 | if (!error) { | ||
1148 | ASSERT(nimaps); | ||
1149 | xfs_bmbt_to_iomap(ip, iomap, &imap); | ||
1150 | } | ||
1151 | |||
1152 | return error; | ||
1153 | } | ||
1154 | |||
1155 | struct iomap_ops xfs_xattr_iomap_ops = { | ||
1156 | .iomap_begin = xfs_xattr_iomap_begin, | ||
1157 | }; | ||
diff --git a/fs/xfs/xfs_iomap.h b/fs/xfs/xfs_iomap.h index e066d045e2ff..fb8aca3d69ab 100644 --- a/fs/xfs/xfs_iomap.h +++ b/fs/xfs/xfs_iomap.h | |||
@@ -35,5 +35,6 @@ void xfs_bmbt_to_iomap(struct xfs_inode *, struct iomap *, | |||
35 | struct xfs_bmbt_irec *); | 35 | struct xfs_bmbt_irec *); |
36 | 36 | ||
37 | extern struct iomap_ops xfs_iomap_ops; | 37 | extern struct iomap_ops xfs_iomap_ops; |
38 | extern struct iomap_ops xfs_xattr_iomap_ops; | ||
38 | 39 | ||
39 | #endif /* __XFS_IOMAP_H__*/ | 40 | #endif /* __XFS_IOMAP_H__*/ |
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c index ab820f84ed50..b24c3102fa93 100644 --- a/fs/xfs/xfs_iops.c +++ b/fs/xfs/xfs_iops.c | |||
@@ -1009,7 +1009,14 @@ xfs_vn_fiemap( | |||
1009 | int error; | 1009 | int error; |
1010 | 1010 | ||
1011 | xfs_ilock(XFS_I(inode), XFS_IOLOCK_SHARED); | 1011 | xfs_ilock(XFS_I(inode), XFS_IOLOCK_SHARED); |
1012 | error = iomap_fiemap(inode, fieinfo, start, length, &xfs_iomap_ops); | 1012 | if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) { |
1013 | fieinfo->fi_flags &= ~FIEMAP_FLAG_XATTR; | ||
1014 | error = iomap_fiemap(inode, fieinfo, start, length, | ||
1015 | &xfs_xattr_iomap_ops); | ||
1016 | } else { | ||
1017 | error = iomap_fiemap(inode, fieinfo, start, length, | ||
1018 | &xfs_iomap_ops); | ||
1019 | } | ||
1013 | xfs_iunlock(XFS_I(inode), XFS_IOLOCK_SHARED); | 1020 | xfs_iunlock(XFS_I(inode), XFS_IOLOCK_SHARED); |
1014 | 1021 | ||
1015 | return error; | 1022 | return error; |
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h index 551b7e26980c..7e88bec3f359 100644 --- a/fs/xfs/xfs_trace.h +++ b/fs/xfs/xfs_trace.h | |||
@@ -1298,7 +1298,6 @@ DEFINE_IOMAP_EVENT(xfs_get_blocks_alloc); | |||
1298 | DEFINE_IOMAP_EVENT(xfs_get_blocks_map_direct); | 1298 | DEFINE_IOMAP_EVENT(xfs_get_blocks_map_direct); |
1299 | DEFINE_IOMAP_EVENT(xfs_iomap_alloc); | 1299 | DEFINE_IOMAP_EVENT(xfs_iomap_alloc); |
1300 | DEFINE_IOMAP_EVENT(xfs_iomap_found); | 1300 | DEFINE_IOMAP_EVENT(xfs_iomap_found); |
1301 | DEFINE_IOMAP_EVENT(xfs_iomap_not_found); | ||
1302 | 1301 | ||
1303 | DECLARE_EVENT_CLASS(xfs_simple_io_class, | 1302 | DECLARE_EVENT_CLASS(xfs_simple_io_class, |
1304 | TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count), | 1303 | TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count), |
diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 1bb954842725..436aa4e42221 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h | |||
@@ -527,13 +527,13 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s | |||
527 | * object's lifetime is managed by something other than RCU. That | 527 | * object's lifetime is managed by something other than RCU. That |
528 | * "something other" might be reference counting or simple immortality. | 528 | * "something other" might be reference counting or simple immortality. |
529 | * | 529 | * |
530 | * The seemingly unused void * variable is to validate @p is indeed a pointer | 530 | * The seemingly unused size_t variable is to validate @p is indeed a pointer |
531 | * type. All pointer types silently cast to void *. | 531 | * type by making sure it can be dereferenced. |
532 | */ | 532 | */ |
533 | #define lockless_dereference(p) \ | 533 | #define lockless_dereference(p) \ |
534 | ({ \ | 534 | ({ \ |
535 | typeof(p) _________p1 = READ_ONCE(p); \ | 535 | typeof(p) _________p1 = READ_ONCE(p); \ |
536 | __maybe_unused const void * const _________p2 = _________p1; \ | 536 | size_t __maybe_unused __size_of_ptr = sizeof(*(p)); \ |
537 | smp_read_barrier_depends(); /* Dependency order vs. p above. */ \ | 537 | smp_read_barrier_depends(); /* Dependency order vs. p above. */ \ |
538 | (_________p1); \ | 538 | (_________p1); \ |
539 | }) | 539 | }) |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 076df5360ba5..3a788bf0affd 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -3891,8 +3891,7 @@ void netdev_default_l2upper_neigh_destroy(struct net_device *dev, | |||
3891 | extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly; | 3891 | extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly; |
3892 | void netdev_rss_key_fill(void *buffer, size_t len); | 3892 | void netdev_rss_key_fill(void *buffer, size_t len); |
3893 | 3893 | ||
3894 | int dev_get_nest_level(struct net_device *dev, | 3894 | int dev_get_nest_level(struct net_device *dev); |
3895 | bool (*type_check)(const struct net_device *dev)); | ||
3896 | int skb_checksum_help(struct sk_buff *skb); | 3895 | int skb_checksum_help(struct sk_buff *skb); |
3897 | struct sk_buff *__skb_gso_segment(struct sk_buff *skb, | 3896 | struct sk_buff *__skb_gso_segment(struct sk_buff *skb, |
3898 | netdev_features_t features, bool tx_path); | 3897 | netdev_features_t features, bool tx_path); |
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index b1e3c57c7117..d6c4177df7cb 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h | |||
@@ -70,8 +70,16 @@ struct qed_dbcx_pfc_params { | |||
70 | u8 max_tc; | 70 | u8 max_tc; |
71 | }; | 71 | }; |
72 | 72 | ||
73 | enum qed_dcbx_sf_ieee_type { | ||
74 | QED_DCBX_SF_IEEE_ETHTYPE, | ||
75 | QED_DCBX_SF_IEEE_TCP_PORT, | ||
76 | QED_DCBX_SF_IEEE_UDP_PORT, | ||
77 | QED_DCBX_SF_IEEE_TCP_UDP_PORT | ||
78 | }; | ||
79 | |||
73 | struct qed_app_entry { | 80 | struct qed_app_entry { |
74 | bool ethtype; | 81 | bool ethtype; |
82 | enum qed_dcbx_sf_ieee_type sf_ieee; | ||
75 | bool enabled; | 83 | bool enabled; |
76 | u8 prio; | 84 | u8 prio; |
77 | u16 proto_id; | 85 | u16 proto_id; |
diff --git a/include/linux/sctp.h b/include/linux/sctp.h index de1f64318fc4..fcb4c3646173 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h | |||
@@ -705,70 +705,6 @@ typedef struct sctp_auth_chunk { | |||
705 | sctp_authhdr_t auth_hdr; | 705 | sctp_authhdr_t auth_hdr; |
706 | } __packed sctp_auth_chunk_t; | 706 | } __packed sctp_auth_chunk_t; |
707 | 707 | ||
708 | struct sctp_info { | ||
709 | __u32 sctpi_tag; | ||
710 | __u32 sctpi_state; | ||
711 | __u32 sctpi_rwnd; | ||
712 | __u16 sctpi_unackdata; | ||
713 | __u16 sctpi_penddata; | ||
714 | __u16 sctpi_instrms; | ||
715 | __u16 sctpi_outstrms; | ||
716 | __u32 sctpi_fragmentation_point; | ||
717 | __u32 sctpi_inqueue; | ||
718 | __u32 sctpi_outqueue; | ||
719 | __u32 sctpi_overall_error; | ||
720 | __u32 sctpi_max_burst; | ||
721 | __u32 sctpi_maxseg; | ||
722 | __u32 sctpi_peer_rwnd; | ||
723 | __u32 sctpi_peer_tag; | ||
724 | __u8 sctpi_peer_capable; | ||
725 | __u8 sctpi_peer_sack; | ||
726 | __u16 __reserved1; | ||
727 | |||
728 | /* assoc status info */ | ||
729 | __u64 sctpi_isacks; | ||
730 | __u64 sctpi_osacks; | ||
731 | __u64 sctpi_opackets; | ||
732 | __u64 sctpi_ipackets; | ||
733 | __u64 sctpi_rtxchunks; | ||
734 | __u64 sctpi_outofseqtsns; | ||
735 | __u64 sctpi_idupchunks; | ||
736 | __u64 sctpi_gapcnt; | ||
737 | __u64 sctpi_ouodchunks; | ||
738 | __u64 sctpi_iuodchunks; | ||
739 | __u64 sctpi_oodchunks; | ||
740 | __u64 sctpi_iodchunks; | ||
741 | __u64 sctpi_octrlchunks; | ||
742 | __u64 sctpi_ictrlchunks; | ||
743 | |||
744 | /* primary transport info */ | ||
745 | struct sockaddr_storage sctpi_p_address; | ||
746 | __s32 sctpi_p_state; | ||
747 | __u32 sctpi_p_cwnd; | ||
748 | __u32 sctpi_p_srtt; | ||
749 | __u32 sctpi_p_rto; | ||
750 | __u32 sctpi_p_hbinterval; | ||
751 | __u32 sctpi_p_pathmaxrxt; | ||
752 | __u32 sctpi_p_sackdelay; | ||
753 | __u32 sctpi_p_sackfreq; | ||
754 | __u32 sctpi_p_ssthresh; | ||
755 | __u32 sctpi_p_partial_bytes_acked; | ||
756 | __u32 sctpi_p_flight_size; | ||
757 | __u16 sctpi_p_error; | ||
758 | __u16 __reserved2; | ||
759 | |||
760 | /* sctp sock info */ | ||
761 | __u32 sctpi_s_autoclose; | ||
762 | __u32 sctpi_s_adaptation_ind; | ||
763 | __u32 sctpi_s_pd_point; | ||
764 | __u8 sctpi_s_nodelay; | ||
765 | __u8 sctpi_s_disable_fragments; | ||
766 | __u8 sctpi_s_v4mapped; | ||
767 | __u8 sctpi_s_frag_interleave; | ||
768 | __u32 sctpi_s_type; | ||
769 | __u32 __reserved3; | ||
770 | }; | ||
771 | |||
772 | struct sctp_infox { | 708 | struct sctp_infox { |
773 | struct sctp_info *sctpinfo; | 709 | struct sctp_info *sctpinfo; |
774 | struct sctp_association *asoc; | 710 | struct sctp_association *asoc; |
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 6f0b3e0adc73..0f665cb26b50 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
@@ -2847,6 +2847,18 @@ static inline int skb_linearize_cow(struct sk_buff *skb) | |||
2847 | __skb_linearize(skb) : 0; | 2847 | __skb_linearize(skb) : 0; |
2848 | } | 2848 | } |
2849 | 2849 | ||
2850 | static __always_inline void | ||
2851 | __skb_postpull_rcsum(struct sk_buff *skb, const void *start, unsigned int len, | ||
2852 | unsigned int off) | ||
2853 | { | ||
2854 | if (skb->ip_summed == CHECKSUM_COMPLETE) | ||
2855 | skb->csum = csum_block_sub(skb->csum, | ||
2856 | csum_partial(start, len, 0), off); | ||
2857 | else if (skb->ip_summed == CHECKSUM_PARTIAL && | ||
2858 | skb_checksum_start_offset(skb) < 0) | ||
2859 | skb->ip_summed = CHECKSUM_NONE; | ||
2860 | } | ||
2861 | |||
2850 | /** | 2862 | /** |
2851 | * skb_postpull_rcsum - update checksum for received skb after pull | 2863 | * skb_postpull_rcsum - update checksum for received skb after pull |
2852 | * @skb: buffer to update | 2864 | * @skb: buffer to update |
@@ -2857,36 +2869,38 @@ static inline int skb_linearize_cow(struct sk_buff *skb) | |||
2857 | * update the CHECKSUM_COMPLETE checksum, or set ip_summed to | 2869 | * update the CHECKSUM_COMPLETE checksum, or set ip_summed to |
2858 | * CHECKSUM_NONE so that it can be recomputed from scratch. | 2870 | * CHECKSUM_NONE so that it can be recomputed from scratch. |
2859 | */ | 2871 | */ |
2860 | |||
2861 | static inline void skb_postpull_rcsum(struct sk_buff *skb, | 2872 | static inline void skb_postpull_rcsum(struct sk_buff *skb, |
2862 | const void *start, unsigned int len) | 2873 | const void *start, unsigned int len) |
2863 | { | 2874 | { |
2864 | if (skb->ip_summed == CHECKSUM_COMPLETE) | 2875 | __skb_postpull_rcsum(skb, start, len, 0); |
2865 | skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0)); | ||
2866 | else if (skb->ip_summed == CHECKSUM_PARTIAL && | ||
2867 | skb_checksum_start_offset(skb) < 0) | ||
2868 | skb->ip_summed = CHECKSUM_NONE; | ||
2869 | } | 2876 | } |
2870 | 2877 | ||
2871 | unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len); | 2878 | static __always_inline void |
2879 | __skb_postpush_rcsum(struct sk_buff *skb, const void *start, unsigned int len, | ||
2880 | unsigned int off) | ||
2881 | { | ||
2882 | if (skb->ip_summed == CHECKSUM_COMPLETE) | ||
2883 | skb->csum = csum_block_add(skb->csum, | ||
2884 | csum_partial(start, len, 0), off); | ||
2885 | } | ||
2872 | 2886 | ||
2887 | /** | ||
2888 | * skb_postpush_rcsum - update checksum for received skb after push | ||
2889 | * @skb: buffer to update | ||
2890 | * @start: start of data after push | ||
2891 | * @len: length of data pushed | ||
2892 | * | ||
2893 | * After doing a push on a received packet, you need to call this to | ||
2894 | * update the CHECKSUM_COMPLETE checksum. | ||
2895 | */ | ||
2873 | static inline void skb_postpush_rcsum(struct sk_buff *skb, | 2896 | static inline void skb_postpush_rcsum(struct sk_buff *skb, |
2874 | const void *start, unsigned int len) | 2897 | const void *start, unsigned int len) |
2875 | { | 2898 | { |
2876 | /* For performing the reverse operation to skb_postpull_rcsum(), | 2899 | __skb_postpush_rcsum(skb, start, len, 0); |
2877 | * we can instead of ... | ||
2878 | * | ||
2879 | * skb->csum = csum_add(skb->csum, csum_partial(start, len, 0)); | ||
2880 | * | ||
2881 | * ... just use this equivalent version here to save a few | ||
2882 | * instructions. Feeding csum of 0 in csum_partial() and later | ||
2883 | * on adding skb->csum is equivalent to feed skb->csum in the | ||
2884 | * first place. | ||
2885 | */ | ||
2886 | if (skb->ip_summed == CHECKSUM_COMPLETE) | ||
2887 | skb->csum = csum_partial(start, len, skb->csum); | ||
2888 | } | 2900 | } |
2889 | 2901 | ||
2902 | unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len); | ||
2903 | |||
2890 | /** | 2904 | /** |
2891 | * skb_push_rcsum - push skb and update receive checksum | 2905 | * skb_push_rcsum - push skb and update receive checksum |
2892 | * @skb: buffer to update | 2906 | * @skb: buffer to update |
diff --git a/include/net/act_api.h b/include/net/act_api.h index 41e6a24a44b9..82f3c912a5b1 100644 --- a/include/net/act_api.h +++ b/include/net/act_api.h | |||
@@ -176,8 +176,8 @@ int tcf_register_action(struct tc_action_ops *a, struct pernet_operations *ops); | |||
176 | int tcf_unregister_action(struct tc_action_ops *a, | 176 | int tcf_unregister_action(struct tc_action_ops *a, |
177 | struct pernet_operations *ops); | 177 | struct pernet_operations *ops); |
178 | int tcf_action_destroy(struct list_head *actions, int bind); | 178 | int tcf_action_destroy(struct list_head *actions, int bind); |
179 | int tcf_action_exec(struct sk_buff *skb, const struct list_head *actions, | 179 | int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions, |
180 | struct tcf_result *res); | 180 | int nr_actions, struct tcf_result *res); |
181 | int tcf_action_init(struct net *net, struct nlattr *nla, | 181 | int tcf_action_init(struct net *net, struct nlattr *nla, |
182 | struct nlattr *est, char *n, int ovr, | 182 | struct nlattr *est, char *n, int ovr, |
183 | int bind, struct list_head *); | 183 | int bind, struct list_head *); |
@@ -189,30 +189,17 @@ int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int, int); | |||
189 | int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int, int); | 189 | int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int, int); |
190 | int tcf_action_copy_stats(struct sk_buff *, struct tc_action *, int); | 190 | int tcf_action_copy_stats(struct sk_buff *, struct tc_action *, int); |
191 | 191 | ||
192 | #define tc_no_actions(_exts) \ | 192 | #endif /* CONFIG_NET_CLS_ACT */ |
193 | (list_empty(&(_exts)->actions)) | ||
194 | |||
195 | #define tc_for_each_action(_a, _exts) \ | ||
196 | list_for_each_entry(a, &(_exts)->actions, list) | ||
197 | |||
198 | #define tc_single_action(_exts) \ | ||
199 | (list_is_singular(&(_exts)->actions)) | ||
200 | 193 | ||
201 | static inline void tcf_action_stats_update(struct tc_action *a, u64 bytes, | 194 | static inline void tcf_action_stats_update(struct tc_action *a, u64 bytes, |
202 | u64 packets, u64 lastuse) | 195 | u64 packets, u64 lastuse) |
203 | { | 196 | { |
197 | #ifdef CONFIG_NET_CLS_ACT | ||
204 | if (!a->ops->stats_update) | 198 | if (!a->ops->stats_update) |
205 | return; | 199 | return; |
206 | 200 | ||
207 | a->ops->stats_update(a, bytes, packets, lastuse); | 201 | a->ops->stats_update(a, bytes, packets, lastuse); |
202 | #endif | ||
208 | } | 203 | } |
209 | 204 | ||
210 | #else /* CONFIG_NET_CLS_ACT */ | ||
211 | |||
212 | #define tc_no_actions(_exts) true | ||
213 | #define tc_for_each_action(_a, _exts) while ((void)(_a), 0) | ||
214 | #define tc_single_action(_exts) false | ||
215 | #define tcf_action_stats_update(a, bytes, packets, lastuse) | ||
216 | |||
217 | #endif /* CONFIG_NET_CLS_ACT */ | ||
218 | #endif | 205 | #endif |
diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h index ac1bc3c49fbd..7b0f88699b25 100644 --- a/include/net/af_rxrpc.h +++ b/include/net/af_rxrpc.h | |||
@@ -40,12 +40,12 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *, | |||
40 | unsigned long, | 40 | unsigned long, |
41 | gfp_t); | 41 | gfp_t); |
42 | int rxrpc_kernel_send_data(struct rxrpc_call *, struct msghdr *, size_t); | 42 | int rxrpc_kernel_send_data(struct rxrpc_call *, struct msghdr *, size_t); |
43 | void rxrpc_kernel_data_consumed(struct rxrpc_call *, struct sk_buff *); | ||
43 | void rxrpc_kernel_abort_call(struct rxrpc_call *, u32); | 44 | void rxrpc_kernel_abort_call(struct rxrpc_call *, u32); |
44 | void rxrpc_kernel_end_call(struct rxrpc_call *); | 45 | void rxrpc_kernel_end_call(struct rxrpc_call *); |
45 | bool rxrpc_kernel_is_data_last(struct sk_buff *); | 46 | bool rxrpc_kernel_is_data_last(struct sk_buff *); |
46 | u32 rxrpc_kernel_get_abort_code(struct sk_buff *); | 47 | u32 rxrpc_kernel_get_abort_code(struct sk_buff *); |
47 | int rxrpc_kernel_get_error_number(struct sk_buff *); | 48 | int rxrpc_kernel_get_error_number(struct sk_buff *); |
48 | void rxrpc_kernel_data_delivered(struct sk_buff *); | ||
49 | void rxrpc_kernel_free_skb(struct sk_buff *); | 49 | void rxrpc_kernel_free_skb(struct sk_buff *); |
50 | struct rxrpc_call *rxrpc_kernel_accept_call(struct socket *, unsigned long); | 50 | struct rxrpc_call *rxrpc_kernel_accept_call(struct socket *, unsigned long); |
51 | int rxrpc_kernel_reject_call(struct socket *); | 51 | int rxrpc_kernel_reject_call(struct socket *); |
diff --git a/include/net/gre.h b/include/net/gre.h index 7a54a31d1d4c..73ea256eb7d7 100644 --- a/include/net/gre.h +++ b/include/net/gre.h | |||
@@ -104,6 +104,7 @@ static inline void gre_build_header(struct sk_buff *skb, int hdr_len, | |||
104 | 104 | ||
105 | skb_push(skb, hdr_len); | 105 | skb_push(skb, hdr_len); |
106 | 106 | ||
107 | skb_set_inner_protocol(skb, proto); | ||
107 | skb_reset_transport_header(skb); | 108 | skb_reset_transport_header(skb); |
108 | greh = (struct gre_base_hdr *)skb->data; | 109 | greh = (struct gre_base_hdr *)skb->data; |
109 | greh->flags = gre_tnl_flags_to_gre_flags(flags); | 110 | greh->flags = gre_tnl_flags_to_gre_flags(flags); |
diff --git a/include/net/inet_ecn.h b/include/net/inet_ecn.h index 0dc0a51da38f..dce2d586d9ce 100644 --- a/include/net/inet_ecn.h +++ b/include/net/inet_ecn.h | |||
@@ -128,7 +128,8 @@ static inline int IP6_ECN_set_ce(struct sk_buff *skb, struct ipv6hdr *iph) | |||
128 | to = from | htonl(INET_ECN_CE << 20); | 128 | to = from | htonl(INET_ECN_CE << 20); |
129 | *(__be32 *)iph = to; | 129 | *(__be32 *)iph = to; |
130 | if (skb->ip_summed == CHECKSUM_COMPLETE) | 130 | if (skb->ip_summed == CHECKSUM_COMPLETE) |
131 | skb->csum = csum_add(csum_sub(skb->csum, from), to); | 131 | skb->csum = csum_add(csum_sub(skb->csum, (__force __wsum)from), |
132 | (__force __wsum)to); | ||
132 | return 1; | 133 | return 1; |
133 | } | 134 | } |
134 | 135 | ||
diff --git a/include/net/mac80211.h b/include/net/mac80211.h index b4faadbb4e01..cca510a585c3 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h | |||
@@ -3620,7 +3620,8 @@ struct ieee80211_ops { | |||
3620 | 3620 | ||
3621 | int (*join_ibss)(struct ieee80211_hw *hw, struct ieee80211_vif *vif); | 3621 | int (*join_ibss)(struct ieee80211_hw *hw, struct ieee80211_vif *vif); |
3622 | void (*leave_ibss)(struct ieee80211_hw *hw, struct ieee80211_vif *vif); | 3622 | void (*leave_ibss)(struct ieee80211_hw *hw, struct ieee80211_vif *vif); |
3623 | u32 (*get_expected_throughput)(struct ieee80211_sta *sta); | 3623 | u32 (*get_expected_throughput)(struct ieee80211_hw *hw, |
3624 | struct ieee80211_sta *sta); | ||
3624 | int (*get_txpower)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, | 3625 | int (*get_txpower)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, |
3625 | int *dbm); | 3626 | int *dbm); |
3626 | 3627 | ||
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index 6f8d65342d3a..c99508d426cc 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h | |||
@@ -59,7 +59,8 @@ tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r) | |||
59 | struct tcf_exts { | 59 | struct tcf_exts { |
60 | #ifdef CONFIG_NET_CLS_ACT | 60 | #ifdef CONFIG_NET_CLS_ACT |
61 | __u32 type; /* for backward compat(TCA_OLD_COMPAT) */ | 61 | __u32 type; /* for backward compat(TCA_OLD_COMPAT) */ |
62 | struct list_head actions; | 62 | int nr_actions; |
63 | struct tc_action **actions; | ||
63 | #endif | 64 | #endif |
64 | /* Map to export classifier specific extension TLV types to the | 65 | /* Map to export classifier specific extension TLV types to the |
65 | * generic extensions API. Unsupported extensions must be set to 0. | 66 | * generic extensions API. Unsupported extensions must be set to 0. |
@@ -72,7 +73,10 @@ static inline void tcf_exts_init(struct tcf_exts *exts, int action, int police) | |||
72 | { | 73 | { |
73 | #ifdef CONFIG_NET_CLS_ACT | 74 | #ifdef CONFIG_NET_CLS_ACT |
74 | exts->type = 0; | 75 | exts->type = 0; |
75 | INIT_LIST_HEAD(&exts->actions); | 76 | exts->nr_actions = 0; |
77 | exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *), | ||
78 | GFP_KERNEL); | ||
79 | WARN_ON(!exts->actions); /* TODO: propagate the error to callers */ | ||
76 | #endif | 80 | #endif |
77 | exts->action = action; | 81 | exts->action = action; |
78 | exts->police = police; | 82 | exts->police = police; |
@@ -89,7 +93,7 @@ static inline int | |||
89 | tcf_exts_is_predicative(struct tcf_exts *exts) | 93 | tcf_exts_is_predicative(struct tcf_exts *exts) |
90 | { | 94 | { |
91 | #ifdef CONFIG_NET_CLS_ACT | 95 | #ifdef CONFIG_NET_CLS_ACT |
92 | return !list_empty(&exts->actions); | 96 | return exts->nr_actions; |
93 | #else | 97 | #else |
94 | return 0; | 98 | return 0; |
95 | #endif | 99 | #endif |
@@ -108,6 +112,20 @@ tcf_exts_is_available(struct tcf_exts *exts) | |||
108 | return tcf_exts_is_predicative(exts); | 112 | return tcf_exts_is_predicative(exts); |
109 | } | 113 | } |
110 | 114 | ||
115 | static inline void tcf_exts_to_list(const struct tcf_exts *exts, | ||
116 | struct list_head *actions) | ||
117 | { | ||
118 | #ifdef CONFIG_NET_CLS_ACT | ||
119 | int i; | ||
120 | |||
121 | for (i = 0; i < exts->nr_actions; i++) { | ||
122 | struct tc_action *a = exts->actions[i]; | ||
123 | |||
124 | list_add(&a->list, actions); | ||
125 | } | ||
126 | #endif | ||
127 | } | ||
128 | |||
111 | /** | 129 | /** |
112 | * tcf_exts_exec - execute tc filter extensions | 130 | * tcf_exts_exec - execute tc filter extensions |
113 | * @skb: socket buffer | 131 | * @skb: socket buffer |
@@ -124,12 +142,25 @@ tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts, | |||
124 | struct tcf_result *res) | 142 | struct tcf_result *res) |
125 | { | 143 | { |
126 | #ifdef CONFIG_NET_CLS_ACT | 144 | #ifdef CONFIG_NET_CLS_ACT |
127 | if (!list_empty(&exts->actions)) | 145 | if (exts->nr_actions) |
128 | return tcf_action_exec(skb, &exts->actions, res); | 146 | return tcf_action_exec(skb, exts->actions, exts->nr_actions, |
147 | res); | ||
129 | #endif | 148 | #endif |
130 | return 0; | 149 | return 0; |
131 | } | 150 | } |
132 | 151 | ||
152 | #ifdef CONFIG_NET_CLS_ACT | ||
153 | |||
154 | #define tc_no_actions(_exts) ((_exts)->nr_actions == 0) | ||
155 | #define tc_single_action(_exts) ((_exts)->nr_actions == 1) | ||
156 | |||
157 | #else /* CONFIG_NET_CLS_ACT */ | ||
158 | |||
159 | #define tc_no_actions(_exts) true | ||
160 | #define tc_single_action(_exts) false | ||
161 | |||
162 | #endif /* CONFIG_NET_CLS_ACT */ | ||
163 | |||
133 | int tcf_exts_validate(struct net *net, struct tcf_proto *tp, | 164 | int tcf_exts_validate(struct net *net, struct tcf_proto *tp, |
134 | struct nlattr **tb, struct nlattr *rate_tlv, | 165 | struct nlattr **tb, struct nlattr *rate_tlv, |
135 | struct tcf_exts *exts, bool ovr); | 166 | struct tcf_exts *exts, bool ovr); |
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index da218fec6056..9e5fc168c8a3 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h | |||
@@ -339,7 +339,7 @@ enum bpf_func_id { | |||
339 | BPF_FUNC_skb_change_type, | 339 | BPF_FUNC_skb_change_type, |
340 | 340 | ||
341 | /** | 341 | /** |
342 | * bpf_skb_in_cgroup(skb, map, index) - Check cgroup2 membership of skb | 342 | * bpf_skb_under_cgroup(skb, map, index) - Check cgroup2 membership of skb |
343 | * @skb: pointer to skb | 343 | * @skb: pointer to skb |
344 | * @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type | 344 | * @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type |
345 | * @index: index of the cgroup in the bpf_map | 345 | * @index: index of the cgroup in the bpf_map |
@@ -348,7 +348,7 @@ enum bpf_func_id { | |||
348 | * == 1 skb succeeded the cgroup2 descendant test | 348 | * == 1 skb succeeded the cgroup2 descendant test |
349 | * < 0 error | 349 | * < 0 error |
350 | */ | 350 | */ |
351 | BPF_FUNC_skb_in_cgroup, | 351 | BPF_FUNC_skb_under_cgroup, |
352 | 352 | ||
353 | /** | 353 | /** |
354 | * bpf_get_hash_recalc(skb) | 354 | * bpf_get_hash_recalc(skb) |
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index 01751faccaf8..c674ba2563b7 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h | |||
@@ -24,7 +24,7 @@ enum nft_registers { | |||
24 | __NFT_REG_MAX, | 24 | __NFT_REG_MAX, |
25 | 25 | ||
26 | NFT_REG32_00 = 8, | 26 | NFT_REG32_00 = 8, |
27 | MFT_REG32_01, | 27 | NFT_REG32_01, |
28 | NFT_REG32_02, | 28 | NFT_REG32_02, |
29 | NFT_REG32_03, | 29 | NFT_REG32_03, |
30 | NFT_REG32_04, | 30 | NFT_REG32_04, |
diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h index d304f4c9792c..a406adcc0793 100644 --- a/include/uapi/linux/sctp.h +++ b/include/uapi/linux/sctp.h | |||
@@ -944,4 +944,68 @@ struct sctp_default_prinfo { | |||
944 | __u16 pr_policy; | 944 | __u16 pr_policy; |
945 | }; | 945 | }; |
946 | 946 | ||
947 | struct sctp_info { | ||
948 | __u32 sctpi_tag; | ||
949 | __u32 sctpi_state; | ||
950 | __u32 sctpi_rwnd; | ||
951 | __u16 sctpi_unackdata; | ||
952 | __u16 sctpi_penddata; | ||
953 | __u16 sctpi_instrms; | ||
954 | __u16 sctpi_outstrms; | ||
955 | __u32 sctpi_fragmentation_point; | ||
956 | __u32 sctpi_inqueue; | ||
957 | __u32 sctpi_outqueue; | ||
958 | __u32 sctpi_overall_error; | ||
959 | __u32 sctpi_max_burst; | ||
960 | __u32 sctpi_maxseg; | ||
961 | __u32 sctpi_peer_rwnd; | ||
962 | __u32 sctpi_peer_tag; | ||
963 | __u8 sctpi_peer_capable; | ||
964 | __u8 sctpi_peer_sack; | ||
965 | __u16 __reserved1; | ||
966 | |||
967 | /* assoc status info */ | ||
968 | __u64 sctpi_isacks; | ||
969 | __u64 sctpi_osacks; | ||
970 | __u64 sctpi_opackets; | ||
971 | __u64 sctpi_ipackets; | ||
972 | __u64 sctpi_rtxchunks; | ||
973 | __u64 sctpi_outofseqtsns; | ||
974 | __u64 sctpi_idupchunks; | ||
975 | __u64 sctpi_gapcnt; | ||
976 | __u64 sctpi_ouodchunks; | ||
977 | __u64 sctpi_iuodchunks; | ||
978 | __u64 sctpi_oodchunks; | ||
979 | __u64 sctpi_iodchunks; | ||
980 | __u64 sctpi_octrlchunks; | ||
981 | __u64 sctpi_ictrlchunks; | ||
982 | |||
983 | /* primary transport info */ | ||
984 | struct sockaddr_storage sctpi_p_address; | ||
985 | __s32 sctpi_p_state; | ||
986 | __u32 sctpi_p_cwnd; | ||
987 | __u32 sctpi_p_srtt; | ||
988 | __u32 sctpi_p_rto; | ||
989 | __u32 sctpi_p_hbinterval; | ||
990 | __u32 sctpi_p_pathmaxrxt; | ||
991 | __u32 sctpi_p_sackdelay; | ||
992 | __u32 sctpi_p_sackfreq; | ||
993 | __u32 sctpi_p_ssthresh; | ||
994 | __u32 sctpi_p_partial_bytes_acked; | ||
995 | __u32 sctpi_p_flight_size; | ||
996 | __u16 sctpi_p_error; | ||
997 | __u16 __reserved2; | ||
998 | |||
999 | /* sctp sock info */ | ||
1000 | __u32 sctpi_s_autoclose; | ||
1001 | __u32 sctpi_s_adaptation_ind; | ||
1002 | __u32 sctpi_s_pd_point; | ||
1003 | __u8 sctpi_s_nodelay; | ||
1004 | __u8 sctpi_s_disable_fragments; | ||
1005 | __u8 sctpi_s_v4mapped; | ||
1006 | __u8 sctpi_s_frag_interleave; | ||
1007 | __u32 sctpi_s_type; | ||
1008 | __u32 __reserved3; | ||
1009 | }; | ||
1010 | |||
947 | #endif /* _UAPI_SCTP_H */ | 1011 | #endif /* _UAPI_SCTP_H */ |
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index fff3650d52fc..570eeca7bdfa 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c | |||
@@ -26,11 +26,18 @@ struct bpf_htab { | |||
26 | struct bucket *buckets; | 26 | struct bucket *buckets; |
27 | void *elems; | 27 | void *elems; |
28 | struct pcpu_freelist freelist; | 28 | struct pcpu_freelist freelist; |
29 | void __percpu *extra_elems; | ||
29 | atomic_t count; /* number of elements in this hashtable */ | 30 | atomic_t count; /* number of elements in this hashtable */ |
30 | u32 n_buckets; /* number of hash buckets */ | 31 | u32 n_buckets; /* number of hash buckets */ |
31 | u32 elem_size; /* size of each element in bytes */ | 32 | u32 elem_size; /* size of each element in bytes */ |
32 | }; | 33 | }; |
33 | 34 | ||
35 | enum extra_elem_state { | ||
36 | HTAB_NOT_AN_EXTRA_ELEM = 0, | ||
37 | HTAB_EXTRA_ELEM_FREE, | ||
38 | HTAB_EXTRA_ELEM_USED | ||
39 | }; | ||
40 | |||
34 | /* each htab element is struct htab_elem + key + value */ | 41 | /* each htab element is struct htab_elem + key + value */ |
35 | struct htab_elem { | 42 | struct htab_elem { |
36 | union { | 43 | union { |
@@ -38,7 +45,10 @@ struct htab_elem { | |||
38 | struct bpf_htab *htab; | 45 | struct bpf_htab *htab; |
39 | struct pcpu_freelist_node fnode; | 46 | struct pcpu_freelist_node fnode; |
40 | }; | 47 | }; |
41 | struct rcu_head rcu; | 48 | union { |
49 | struct rcu_head rcu; | ||
50 | enum extra_elem_state state; | ||
51 | }; | ||
42 | u32 hash; | 52 | u32 hash; |
43 | char key[0] __aligned(8); | 53 | char key[0] __aligned(8); |
44 | }; | 54 | }; |
@@ -113,6 +123,23 @@ free_elems: | |||
113 | return err; | 123 | return err; |
114 | } | 124 | } |
115 | 125 | ||
126 | static int alloc_extra_elems(struct bpf_htab *htab) | ||
127 | { | ||
128 | void __percpu *pptr; | ||
129 | int cpu; | ||
130 | |||
131 | pptr = __alloc_percpu_gfp(htab->elem_size, 8, GFP_USER | __GFP_NOWARN); | ||
132 | if (!pptr) | ||
133 | return -ENOMEM; | ||
134 | |||
135 | for_each_possible_cpu(cpu) { | ||
136 | ((struct htab_elem *)per_cpu_ptr(pptr, cpu))->state = | ||
137 | HTAB_EXTRA_ELEM_FREE; | ||
138 | } | ||
139 | htab->extra_elems = pptr; | ||
140 | return 0; | ||
141 | } | ||
142 | |||
116 | /* Called from syscall */ | 143 | /* Called from syscall */ |
117 | static struct bpf_map *htab_map_alloc(union bpf_attr *attr) | 144 | static struct bpf_map *htab_map_alloc(union bpf_attr *attr) |
118 | { | 145 | { |
@@ -185,6 +212,8 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr) | |||
185 | if (percpu) | 212 | if (percpu) |
186 | cost += (u64) round_up(htab->map.value_size, 8) * | 213 | cost += (u64) round_up(htab->map.value_size, 8) * |
187 | num_possible_cpus() * htab->map.max_entries; | 214 | num_possible_cpus() * htab->map.max_entries; |
215 | else | ||
216 | cost += (u64) htab->elem_size * num_possible_cpus(); | ||
188 | 217 | ||
189 | if (cost >= U32_MAX - PAGE_SIZE) | 218 | if (cost >= U32_MAX - PAGE_SIZE) |
190 | /* make sure page count doesn't overflow */ | 219 | /* make sure page count doesn't overflow */ |
@@ -212,14 +241,22 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr) | |||
212 | raw_spin_lock_init(&htab->buckets[i].lock); | 241 | raw_spin_lock_init(&htab->buckets[i].lock); |
213 | } | 242 | } |
214 | 243 | ||
244 | if (!percpu) { | ||
245 | err = alloc_extra_elems(htab); | ||
246 | if (err) | ||
247 | goto free_buckets; | ||
248 | } | ||
249 | |||
215 | if (!(attr->map_flags & BPF_F_NO_PREALLOC)) { | 250 | if (!(attr->map_flags & BPF_F_NO_PREALLOC)) { |
216 | err = prealloc_elems_and_freelist(htab); | 251 | err = prealloc_elems_and_freelist(htab); |
217 | if (err) | 252 | if (err) |
218 | goto free_buckets; | 253 | goto free_extra_elems; |
219 | } | 254 | } |
220 | 255 | ||
221 | return &htab->map; | 256 | return &htab->map; |
222 | 257 | ||
258 | free_extra_elems: | ||
259 | free_percpu(htab->extra_elems); | ||
223 | free_buckets: | 260 | free_buckets: |
224 | kvfree(htab->buckets); | 261 | kvfree(htab->buckets); |
225 | free_htab: | 262 | free_htab: |
@@ -349,7 +386,6 @@ static void htab_elem_free(struct bpf_htab *htab, struct htab_elem *l) | |||
349 | if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH) | 386 | if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH) |
350 | free_percpu(htab_elem_get_ptr(l, htab->map.key_size)); | 387 | free_percpu(htab_elem_get_ptr(l, htab->map.key_size)); |
351 | kfree(l); | 388 | kfree(l); |
352 | |||
353 | } | 389 | } |
354 | 390 | ||
355 | static void htab_elem_free_rcu(struct rcu_head *head) | 391 | static void htab_elem_free_rcu(struct rcu_head *head) |
@@ -370,6 +406,11 @@ static void htab_elem_free_rcu(struct rcu_head *head) | |||
370 | 406 | ||
371 | static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l) | 407 | static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l) |
372 | { | 408 | { |
409 | if (l->state == HTAB_EXTRA_ELEM_USED) { | ||
410 | l->state = HTAB_EXTRA_ELEM_FREE; | ||
411 | return; | ||
412 | } | ||
413 | |||
373 | if (!(htab->map.map_flags & BPF_F_NO_PREALLOC)) { | 414 | if (!(htab->map.map_flags & BPF_F_NO_PREALLOC)) { |
374 | pcpu_freelist_push(&htab->freelist, &l->fnode); | 415 | pcpu_freelist_push(&htab->freelist, &l->fnode); |
375 | } else { | 416 | } else { |
@@ -381,25 +422,44 @@ static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l) | |||
381 | 422 | ||
382 | static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key, | 423 | static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key, |
383 | void *value, u32 key_size, u32 hash, | 424 | void *value, u32 key_size, u32 hash, |
384 | bool percpu, bool onallcpus) | 425 | bool percpu, bool onallcpus, |
426 | bool old_elem_exists) | ||
385 | { | 427 | { |
386 | u32 size = htab->map.value_size; | 428 | u32 size = htab->map.value_size; |
387 | bool prealloc = !(htab->map.map_flags & BPF_F_NO_PREALLOC); | 429 | bool prealloc = !(htab->map.map_flags & BPF_F_NO_PREALLOC); |
388 | struct htab_elem *l_new; | 430 | struct htab_elem *l_new; |
389 | void __percpu *pptr; | 431 | void __percpu *pptr; |
432 | int err = 0; | ||
390 | 433 | ||
391 | if (prealloc) { | 434 | if (prealloc) { |
392 | l_new = (struct htab_elem *)pcpu_freelist_pop(&htab->freelist); | 435 | l_new = (struct htab_elem *)pcpu_freelist_pop(&htab->freelist); |
393 | if (!l_new) | 436 | if (!l_new) |
394 | return ERR_PTR(-E2BIG); | 437 | err = -E2BIG; |
395 | } else { | 438 | } else { |
396 | if (atomic_inc_return(&htab->count) > htab->map.max_entries) { | 439 | if (atomic_inc_return(&htab->count) > htab->map.max_entries) { |
397 | atomic_dec(&htab->count); | 440 | atomic_dec(&htab->count); |
398 | return ERR_PTR(-E2BIG); | 441 | err = -E2BIG; |
442 | } else { | ||
443 | l_new = kmalloc(htab->elem_size, | ||
444 | GFP_ATOMIC | __GFP_NOWARN); | ||
445 | if (!l_new) | ||
446 | return ERR_PTR(-ENOMEM); | ||
399 | } | 447 | } |
400 | l_new = kmalloc(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN); | 448 | } |
401 | if (!l_new) | 449 | |
402 | return ERR_PTR(-ENOMEM); | 450 | if (err) { |
451 | if (!old_elem_exists) | ||
452 | return ERR_PTR(err); | ||
453 | |||
454 | /* if we're updating the existing element and the hash table | ||
455 | * is full, use per-cpu extra elems | ||
456 | */ | ||
457 | l_new = this_cpu_ptr(htab->extra_elems); | ||
458 | if (l_new->state != HTAB_EXTRA_ELEM_FREE) | ||
459 | return ERR_PTR(-E2BIG); | ||
460 | l_new->state = HTAB_EXTRA_ELEM_USED; | ||
461 | } else { | ||
462 | l_new->state = HTAB_NOT_AN_EXTRA_ELEM; | ||
403 | } | 463 | } |
404 | 464 | ||
405 | memcpy(l_new->key, key, key_size); | 465 | memcpy(l_new->key, key, key_size); |
@@ -489,7 +549,8 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value, | |||
489 | if (ret) | 549 | if (ret) |
490 | goto err; | 550 | goto err; |
491 | 551 | ||
492 | l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false); | 552 | l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false, |
553 | !!l_old); | ||
493 | if (IS_ERR(l_new)) { | 554 | if (IS_ERR(l_new)) { |
494 | /* all pre-allocated elements are in use or memory exhausted */ | 555 | /* all pre-allocated elements are in use or memory exhausted */ |
495 | ret = PTR_ERR(l_new); | 556 | ret = PTR_ERR(l_new); |
@@ -563,7 +624,7 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key, | |||
563 | } | 624 | } |
564 | } else { | 625 | } else { |
565 | l_new = alloc_htab_elem(htab, key, value, key_size, | 626 | l_new = alloc_htab_elem(htab, key, value, key_size, |
566 | hash, true, onallcpus); | 627 | hash, true, onallcpus, false); |
567 | if (IS_ERR(l_new)) { | 628 | if (IS_ERR(l_new)) { |
568 | ret = PTR_ERR(l_new); | 629 | ret = PTR_ERR(l_new); |
569 | goto err; | 630 | goto err; |
@@ -652,6 +713,7 @@ static void htab_map_free(struct bpf_map *map) | |||
652 | htab_free_elems(htab); | 713 | htab_free_elems(htab); |
653 | pcpu_freelist_destroy(&htab->freelist); | 714 | pcpu_freelist_destroy(&htab->freelist); |
654 | } | 715 | } |
716 | free_percpu(htab->extra_elems); | ||
655 | kvfree(htab->buckets); | 717 | kvfree(htab->buckets); |
656 | kfree(htab); | 718 | kfree(htab); |
657 | } | 719 | } |
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index f72f23b8fdab..daea765d72e6 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c | |||
@@ -194,6 +194,7 @@ struct verifier_env { | |||
194 | struct verifier_state_list **explored_states; /* search pruning optimization */ | 194 | struct verifier_state_list **explored_states; /* search pruning optimization */ |
195 | struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */ | 195 | struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */ |
196 | u32 used_map_cnt; /* number of used maps */ | 196 | u32 used_map_cnt; /* number of used maps */ |
197 | u32 id_gen; /* used to generate unique reg IDs */ | ||
197 | bool allow_ptr_leaks; | 198 | bool allow_ptr_leaks; |
198 | }; | 199 | }; |
199 | 200 | ||
@@ -1052,7 +1053,7 @@ static int check_map_func_compatibility(struct bpf_map *map, int func_id) | |||
1052 | goto error; | 1053 | goto error; |
1053 | break; | 1054 | break; |
1054 | case BPF_MAP_TYPE_CGROUP_ARRAY: | 1055 | case BPF_MAP_TYPE_CGROUP_ARRAY: |
1055 | if (func_id != BPF_FUNC_skb_in_cgroup) | 1056 | if (func_id != BPF_FUNC_skb_under_cgroup) |
1056 | goto error; | 1057 | goto error; |
1057 | break; | 1058 | break; |
1058 | default: | 1059 | default: |
@@ -1074,7 +1075,7 @@ static int check_map_func_compatibility(struct bpf_map *map, int func_id) | |||
1074 | if (map->map_type != BPF_MAP_TYPE_STACK_TRACE) | 1075 | if (map->map_type != BPF_MAP_TYPE_STACK_TRACE) |
1075 | goto error; | 1076 | goto error; |
1076 | break; | 1077 | break; |
1077 | case BPF_FUNC_skb_in_cgroup: | 1078 | case BPF_FUNC_skb_under_cgroup: |
1078 | if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY) | 1079 | if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY) |
1079 | goto error; | 1080 | goto error; |
1080 | break; | 1081 | break; |
@@ -1301,7 +1302,7 @@ add_imm: | |||
1301 | /* dst_reg stays as pkt_ptr type and since some positive | 1302 | /* dst_reg stays as pkt_ptr type and since some positive |
1302 | * integer value was added to the pointer, increment its 'id' | 1303 | * integer value was added to the pointer, increment its 'id' |
1303 | */ | 1304 | */ |
1304 | dst_reg->id++; | 1305 | dst_reg->id = ++env->id_gen; |
1305 | 1306 | ||
1306 | /* something was added to pkt_ptr, set range and off to zero */ | 1307 | /* something was added to pkt_ptr, set range and off to zero */ |
1307 | dst_reg->off = 0; | 1308 | dst_reg->off = 0; |
diff --git a/kernel/events/core.c b/kernel/events/core.c index 1903b8f3a705..5650f5317e0c 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -242,18 +242,6 @@ unlock: | |||
242 | return ret; | 242 | return ret; |
243 | } | 243 | } |
244 | 244 | ||
245 | static void event_function_local(struct perf_event *event, event_f func, void *data) | ||
246 | { | ||
247 | struct event_function_struct efs = { | ||
248 | .event = event, | ||
249 | .func = func, | ||
250 | .data = data, | ||
251 | }; | ||
252 | |||
253 | int ret = event_function(&efs); | ||
254 | WARN_ON_ONCE(ret); | ||
255 | } | ||
256 | |||
257 | static void event_function_call(struct perf_event *event, event_f func, void *data) | 245 | static void event_function_call(struct perf_event *event, event_f func, void *data) |
258 | { | 246 | { |
259 | struct perf_event_context *ctx = event->ctx; | 247 | struct perf_event_context *ctx = event->ctx; |
@@ -303,6 +291,54 @@ again: | |||
303 | raw_spin_unlock_irq(&ctx->lock); | 291 | raw_spin_unlock_irq(&ctx->lock); |
304 | } | 292 | } |
305 | 293 | ||
294 | /* | ||
295 | * Similar to event_function_call() + event_function(), but hard assumes IRQs | ||
296 | * are already disabled and we're on the right CPU. | ||
297 | */ | ||
298 | static void event_function_local(struct perf_event *event, event_f func, void *data) | ||
299 | { | ||
300 | struct perf_event_context *ctx = event->ctx; | ||
301 | struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); | ||
302 | struct task_struct *task = READ_ONCE(ctx->task); | ||
303 | struct perf_event_context *task_ctx = NULL; | ||
304 | |||
305 | WARN_ON_ONCE(!irqs_disabled()); | ||
306 | |||
307 | if (task) { | ||
308 | if (task == TASK_TOMBSTONE) | ||
309 | return; | ||
310 | |||
311 | task_ctx = ctx; | ||
312 | } | ||
313 | |||
314 | perf_ctx_lock(cpuctx, task_ctx); | ||
315 | |||
316 | task = ctx->task; | ||
317 | if (task == TASK_TOMBSTONE) | ||
318 | goto unlock; | ||
319 | |||
320 | if (task) { | ||
321 | /* | ||
322 | * We must be either inactive or active and the right task, | ||
323 | * otherwise we're screwed, since we cannot IPI to somewhere | ||
324 | * else. | ||
325 | */ | ||
326 | if (ctx->is_active) { | ||
327 | if (WARN_ON_ONCE(task != current)) | ||
328 | goto unlock; | ||
329 | |||
330 | if (WARN_ON_ONCE(cpuctx->task_ctx != ctx)) | ||
331 | goto unlock; | ||
332 | } | ||
333 | } else { | ||
334 | WARN_ON_ONCE(&cpuctx->ctx != ctx); | ||
335 | } | ||
336 | |||
337 | func(event, cpuctx, ctx, data); | ||
338 | unlock: | ||
339 | perf_ctx_unlock(cpuctx, task_ctx); | ||
340 | } | ||
341 | |||
306 | #define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\ | 342 | #define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\ |
307 | PERF_FLAG_FD_OUTPUT |\ | 343 | PERF_FLAG_FD_OUTPUT |\ |
308 | PERF_FLAG_PID_CGROUP |\ | 344 | PERF_FLAG_PID_CGROUP |\ |
@@ -3513,9 +3549,10 @@ static int perf_event_read(struct perf_event *event, bool group) | |||
3513 | .group = group, | 3549 | .group = group, |
3514 | .ret = 0, | 3550 | .ret = 0, |
3515 | }; | 3551 | }; |
3516 | smp_call_function_single(event->oncpu, | 3552 | ret = smp_call_function_single(event->oncpu, __perf_event_read, &data, 1); |
3517 | __perf_event_read, &data, 1); | 3553 | /* The event must have been read from an online CPU: */ |
3518 | ret = data.ret; | 3554 | WARN_ON_ONCE(ret); |
3555 | ret = ret ? : data.ret; | ||
3519 | } else if (event->state == PERF_EVENT_STATE_INACTIVE) { | 3556 | } else if (event->state == PERF_EVENT_STATE_INACTIVE) { |
3520 | struct perf_event_context *ctx = event->ctx; | 3557 | struct perf_event_context *ctx = event->ctx; |
3521 | unsigned long flags; | 3558 | unsigned long flags; |
@@ -6584,15 +6621,6 @@ got_name: | |||
6584 | } | 6621 | } |
6585 | 6622 | ||
6586 | /* | 6623 | /* |
6587 | * Whether this @filter depends on a dynamic object which is not loaded | ||
6588 | * yet or its load addresses are not known. | ||
6589 | */ | ||
6590 | static bool perf_addr_filter_needs_mmap(struct perf_addr_filter *filter) | ||
6591 | { | ||
6592 | return filter->filter && filter->inode; | ||
6593 | } | ||
6594 | |||
6595 | /* | ||
6596 | * Check whether inode and address range match filter criteria. | 6624 | * Check whether inode and address range match filter criteria. |
6597 | */ | 6625 | */ |
6598 | static bool perf_addr_filter_match(struct perf_addr_filter *filter, | 6626 | static bool perf_addr_filter_match(struct perf_addr_filter *filter, |
@@ -6653,6 +6681,13 @@ static void perf_addr_filters_adjust(struct vm_area_struct *vma) | |||
6653 | struct perf_event_context *ctx; | 6681 | struct perf_event_context *ctx; |
6654 | int ctxn; | 6682 | int ctxn; |
6655 | 6683 | ||
6684 | /* | ||
6685 | * Data tracing isn't supported yet and as such there is no need | ||
6686 | * to keep track of anything that isn't related to executable code: | ||
6687 | */ | ||
6688 | if (!(vma->vm_flags & VM_EXEC)) | ||
6689 | return; | ||
6690 | |||
6656 | rcu_read_lock(); | 6691 | rcu_read_lock(); |
6657 | for_each_task_context_nr(ctxn) { | 6692 | for_each_task_context_nr(ctxn) { |
6658 | ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); | 6693 | ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); |
@@ -7805,7 +7840,11 @@ static void perf_event_addr_filters_apply(struct perf_event *event) | |||
7805 | list_for_each_entry(filter, &ifh->list, entry) { | 7840 | list_for_each_entry(filter, &ifh->list, entry) { |
7806 | event->addr_filters_offs[count] = 0; | 7841 | event->addr_filters_offs[count] = 0; |
7807 | 7842 | ||
7808 | if (perf_addr_filter_needs_mmap(filter)) | 7843 | /* |
7844 | * Adjust base offset if the filter is associated to a binary | ||
7845 | * that needs to be mapped: | ||
7846 | */ | ||
7847 | if (filter->inode) | ||
7809 | event->addr_filters_offs[count] = | 7848 | event->addr_filters_offs[count] = |
7810 | perf_addr_filter_apply(filter, mm); | 7849 | perf_addr_filter_apply(filter, mm); |
7811 | 7850 | ||
@@ -7936,8 +7975,10 @@ perf_event_parse_addr_filter(struct perf_event *event, char *fstr, | |||
7936 | goto fail; | 7975 | goto fail; |
7937 | } | 7976 | } |
7938 | 7977 | ||
7939 | if (token == IF_SRC_FILE) { | 7978 | if (token == IF_SRC_FILE || token == IF_SRC_FILEADDR) { |
7940 | filename = match_strdup(&args[2]); | 7979 | int fpos = filter->range ? 2 : 1; |
7980 | |||
7981 | filename = match_strdup(&args[fpos]); | ||
7941 | if (!filename) { | 7982 | if (!filename) { |
7942 | ret = -ENOMEM; | 7983 | ret = -ENOMEM; |
7943 | goto fail; | 7984 | goto fail; |
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index b7a525ab2083..8c50276b60d1 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c | |||
@@ -172,8 +172,10 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr, | |||
172 | mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); | 172 | mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); |
173 | err = -EAGAIN; | 173 | err = -EAGAIN; |
174 | ptep = page_check_address(page, mm, addr, &ptl, 0); | 174 | ptep = page_check_address(page, mm, addr, &ptl, 0); |
175 | if (!ptep) | 175 | if (!ptep) { |
176 | mem_cgroup_cancel_charge(kpage, memcg, false); | ||
176 | goto unlock; | 177 | goto unlock; |
178 | } | ||
177 | 179 | ||
178 | get_page(kpage); | 180 | get_page(kpage); |
179 | page_add_new_anon_rmap(kpage, vma, addr, false); | 181 | page_add_new_anon_rmap(kpage, vma, addr, false); |
@@ -200,7 +202,6 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr, | |||
200 | 202 | ||
201 | err = 0; | 203 | err = 0; |
202 | unlock: | 204 | unlock: |
203 | mem_cgroup_cancel_charge(kpage, memcg, false); | ||
204 | mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); | 205 | mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); |
205 | unlock_page(page); | 206 | unlock_page(page); |
206 | return err; | 207 | return err; |
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 9a0178c2ac1d..b02228411d57 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c | |||
@@ -835,9 +835,9 @@ static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn) | |||
835 | */ | 835 | */ |
836 | static bool rtree_next_node(struct memory_bitmap *bm) | 836 | static bool rtree_next_node(struct memory_bitmap *bm) |
837 | { | 837 | { |
838 | bm->cur.node = list_entry(bm->cur.node->list.next, | 838 | if (!list_is_last(&bm->cur.node->list, &bm->cur.zone->leaves)) { |
839 | struct rtree_node, list); | 839 | bm->cur.node = list_entry(bm->cur.node->list.next, |
840 | if (&bm->cur.node->list != &bm->cur.zone->leaves) { | 840 | struct rtree_node, list); |
841 | bm->cur.node_pfn += BM_BITS_PER_BLOCK; | 841 | bm->cur.node_pfn += BM_BITS_PER_BLOCK; |
842 | bm->cur.node_bit = 0; | 842 | bm->cur.node_bit = 0; |
843 | touch_softlockup_watchdog(); | 843 | touch_softlockup_watchdog(); |
@@ -845,9 +845,9 @@ static bool rtree_next_node(struct memory_bitmap *bm) | |||
845 | } | 845 | } |
846 | 846 | ||
847 | /* No more nodes, goto next zone */ | 847 | /* No more nodes, goto next zone */ |
848 | bm->cur.zone = list_entry(bm->cur.zone->list.next, | 848 | if (!list_is_last(&bm->cur.zone->list, &bm->zones)) { |
849 | bm->cur.zone = list_entry(bm->cur.zone->list.next, | ||
849 | struct mem_zone_bm_rtree, list); | 850 | struct mem_zone_bm_rtree, list); |
850 | if (&bm->cur.zone->list != &bm->zones) { | ||
851 | bm->cur.node = list_entry(bm->cur.zone->leaves.next, | 851 | bm->cur.node = list_entry(bm->cur.zone->leaves.next, |
852 | struct rtree_node, list); | 852 | struct rtree_node, list); |
853 | bm->cur.node_pfn = 0; | 853 | bm->cur.node_pfn = 0; |
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index 9858266fb0b3..a846cf89eb96 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c | |||
@@ -263,6 +263,11 @@ void account_idle_time(cputime_t cputime) | |||
263 | cpustat[CPUTIME_IDLE] += (__force u64) cputime; | 263 | cpustat[CPUTIME_IDLE] += (__force u64) cputime; |
264 | } | 264 | } |
265 | 265 | ||
266 | /* | ||
267 | * When a guest is interrupted for a longer amount of time, missed clock | ||
268 | * ticks are not redelivered later. Due to that, this function may on | ||
269 | * occasion account more time than the calling functions think elapsed. | ||
270 | */ | ||
266 | static __always_inline cputime_t steal_account_process_time(cputime_t maxtime) | 271 | static __always_inline cputime_t steal_account_process_time(cputime_t maxtime) |
267 | { | 272 | { |
268 | #ifdef CONFIG_PARAVIRT | 273 | #ifdef CONFIG_PARAVIRT |
@@ -371,7 +376,7 @@ static void irqtime_account_process_tick(struct task_struct *p, int user_tick, | |||
371 | * idle, or potentially user or system time. Due to rounding, | 376 | * idle, or potentially user or system time. Due to rounding, |
372 | * other time can exceed ticks occasionally. | 377 | * other time can exceed ticks occasionally. |
373 | */ | 378 | */ |
374 | other = account_other_time(cputime); | 379 | other = account_other_time(ULONG_MAX); |
375 | if (other >= cputime) | 380 | if (other >= cputime) |
376 | return; | 381 | return; |
377 | cputime -= other; | 382 | cputime -= other; |
@@ -486,7 +491,7 @@ void account_process_tick(struct task_struct *p, int user_tick) | |||
486 | } | 491 | } |
487 | 492 | ||
488 | cputime = cputime_one_jiffy; | 493 | cputime = cputime_one_jiffy; |
489 | steal = steal_account_process_time(cputime); | 494 | steal = steal_account_process_time(ULONG_MAX); |
490 | 495 | ||
491 | if (steal >= cputime) | 496 | if (steal >= cputime) |
492 | return; | 497 | return; |
@@ -516,7 +521,7 @@ void account_idle_ticks(unsigned long ticks) | |||
516 | } | 521 | } |
517 | 522 | ||
518 | cputime = jiffies_to_cputime(ticks); | 523 | cputime = jiffies_to_cputime(ticks); |
519 | steal = steal_account_process_time(cputime); | 524 | steal = steal_account_process_time(ULONG_MAX); |
520 | 525 | ||
521 | if (steal >= cputime) | 526 | if (steal >= cputime) |
522 | return; | 527 | return; |
@@ -614,19 +619,25 @@ static void cputime_adjust(struct task_cputime *curr, | |||
614 | stime = curr->stime; | 619 | stime = curr->stime; |
615 | utime = curr->utime; | 620 | utime = curr->utime; |
616 | 621 | ||
617 | if (utime == 0) { | 622 | /* |
618 | stime = rtime; | 623 | * If either stime or both stime and utime are 0, assume all runtime is |
624 | * userspace. Once a task gets some ticks, the monotonicy code at | ||
625 | * 'update' will ensure things converge to the observed ratio. | ||
626 | */ | ||
627 | if (stime == 0) { | ||
628 | utime = rtime; | ||
619 | goto update; | 629 | goto update; |
620 | } | 630 | } |
621 | 631 | ||
622 | if (stime == 0) { | 632 | if (utime == 0) { |
623 | utime = rtime; | 633 | stime = rtime; |
624 | goto update; | 634 | goto update; |
625 | } | 635 | } |
626 | 636 | ||
627 | stime = scale_stime((__force u64)stime, (__force u64)rtime, | 637 | stime = scale_stime((__force u64)stime, (__force u64)rtime, |
628 | (__force u64)(stime + utime)); | 638 | (__force u64)(stime + utime)); |
629 | 639 | ||
640 | update: | ||
630 | /* | 641 | /* |
631 | * Make sure stime doesn't go backwards; this preserves monotonicity | 642 | * Make sure stime doesn't go backwards; this preserves monotonicity |
632 | * for utime because rtime is monotonic. | 643 | * for utime because rtime is monotonic. |
@@ -649,7 +660,6 @@ static void cputime_adjust(struct task_cputime *curr, | |||
649 | stime = rtime - utime; | 660 | stime = rtime - utime; |
650 | } | 661 | } |
651 | 662 | ||
652 | update: | ||
653 | prev->stime = stime; | 663 | prev->stime = stime; |
654 | prev->utime = utime; | 664 | prev->utime = utime; |
655 | out: | 665 | out: |
@@ -694,6 +704,13 @@ static cputime_t get_vtime_delta(struct task_struct *tsk) | |||
694 | unsigned long now = READ_ONCE(jiffies); | 704 | unsigned long now = READ_ONCE(jiffies); |
695 | cputime_t delta, other; | 705 | cputime_t delta, other; |
696 | 706 | ||
707 | /* | ||
708 | * Unlike tick based timing, vtime based timing never has lost | ||
709 | * ticks, and no need for steal time accounting to make up for | ||
710 | * lost ticks. Vtime accounts a rounded version of actual | ||
711 | * elapsed time. Limit account_other_time to prevent rounding | ||
712 | * errors from causing elapsed vtime to go negative. | ||
713 | */ | ||
697 | delta = jiffies_to_cputime(now - tsk->vtime_snap); | 714 | delta = jiffies_to_cputime(now - tsk->vtime_snap); |
698 | other = account_other_time(delta); | 715 | other = account_other_time(delta); |
699 | WARN_ON_ONCE(tsk->vtime_snap_whence == VTIME_INACTIVE); | 716 | WARN_ON_ONCE(tsk->vtime_snap_whence == VTIME_INACTIVE); |
diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 5d845ffd7982..5ba520b544d7 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c | |||
@@ -30,7 +30,7 @@ | |||
30 | 30 | ||
31 | #define HASH_DEFAULT_SIZE 64UL | 31 | #define HASH_DEFAULT_SIZE 64UL |
32 | #define HASH_MIN_SIZE 4U | 32 | #define HASH_MIN_SIZE 4U |
33 | #define BUCKET_LOCKS_PER_CPU 128UL | 33 | #define BUCKET_LOCKS_PER_CPU 32UL |
34 | 34 | ||
35 | static u32 head_hashfn(struct rhashtable *ht, | 35 | static u32 head_hashfn(struct rhashtable *ht, |
36 | const struct bucket_table *tbl, | 36 | const struct bucket_table *tbl, |
@@ -70,7 +70,7 @@ static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl, | |||
70 | unsigned int nr_pcpus = num_possible_cpus(); | 70 | unsigned int nr_pcpus = num_possible_cpus(); |
71 | #endif | 71 | #endif |
72 | 72 | ||
73 | nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL); | 73 | nr_pcpus = min_t(unsigned int, nr_pcpus, 64UL); |
74 | size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul); | 74 | size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul); |
75 | 75 | ||
76 | /* Never allocate more than 0.5 locks per bucket */ | 76 | /* Never allocate more than 0.5 locks per bucket */ |
@@ -83,6 +83,9 @@ static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl, | |||
83 | tbl->locks = vmalloc(size * sizeof(spinlock_t)); | 83 | tbl->locks = vmalloc(size * sizeof(spinlock_t)); |
84 | else | 84 | else |
85 | #endif | 85 | #endif |
86 | if (gfp != GFP_KERNEL) | ||
87 | gfp |= __GFP_NOWARN | __GFP_NORETRY; | ||
88 | |||
86 | tbl->locks = kmalloc_array(size, sizeof(spinlock_t), | 89 | tbl->locks = kmalloc_array(size, sizeof(spinlock_t), |
87 | gfp); | 90 | gfp); |
88 | if (!tbl->locks) | 91 | if (!tbl->locks) |
@@ -321,12 +324,14 @@ static int rhashtable_expand(struct rhashtable *ht) | |||
321 | static int rhashtable_shrink(struct rhashtable *ht) | 324 | static int rhashtable_shrink(struct rhashtable *ht) |
322 | { | 325 | { |
323 | struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); | 326 | struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); |
324 | unsigned int size; | 327 | unsigned int nelems = atomic_read(&ht->nelems); |
328 | unsigned int size = 0; | ||
325 | int err; | 329 | int err; |
326 | 330 | ||
327 | ASSERT_RHT_MUTEX(ht); | 331 | ASSERT_RHT_MUTEX(ht); |
328 | 332 | ||
329 | size = roundup_pow_of_two(atomic_read(&ht->nelems) * 3 / 2); | 333 | if (nelems) |
334 | size = roundup_pow_of_two(nelems * 3 / 2); | ||
330 | if (size < ht->p.min_size) | 335 | if (size < ht->p.min_size) |
331 | size = ht->p.min_size; | 336 | size = ht->p.min_size; |
332 | 337 | ||
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c index 297fdb5e74bd..64e899b63337 100644 --- a/lib/test_rhashtable.c +++ b/lib/test_rhashtable.c | |||
@@ -38,7 +38,7 @@ MODULE_PARM_DESC(runs, "Number of test runs per variant (default: 4)"); | |||
38 | 38 | ||
39 | static int max_size = 0; | 39 | static int max_size = 0; |
40 | module_param(max_size, int, 0); | 40 | module_param(max_size, int, 0); |
41 | MODULE_PARM_DESC(runs, "Maximum table size (default: calculated)"); | 41 | MODULE_PARM_DESC(max_size, "Maximum table size (default: calculated)"); |
42 | 42 | ||
43 | static bool shrinking = false; | 43 | static bool shrinking = false; |
44 | module_param(shrinking, bool, 0); | 44 | module_param(shrinking, bool, 0); |
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index 82a116ba590e..8de138d3306b 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c | |||
@@ -169,7 +169,7 @@ int register_vlan_dev(struct net_device *dev) | |||
169 | if (err < 0) | 169 | if (err < 0) |
170 | goto out_uninit_mvrp; | 170 | goto out_uninit_mvrp; |
171 | 171 | ||
172 | vlan->nest_level = dev_get_nest_level(real_dev, is_vlan_dev) + 1; | 172 | vlan->nest_level = dev_get_nest_level(real_dev) + 1; |
173 | err = register_netdevice(dev); | 173 | err = register_netdevice(dev); |
174 | if (err < 0) | 174 | if (err < 0) |
175 | goto out_uninit_mvrp; | 175 | goto out_uninit_mvrp; |
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index c18080ad4085..cd620fab41b0 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c | |||
@@ -267,7 +267,7 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr) | |||
267 | 267 | ||
268 | /* If old entry was unassociated with any port, then delete it. */ | 268 | /* If old entry was unassociated with any port, then delete it. */ |
269 | f = __br_fdb_get(br, br->dev->dev_addr, 0); | 269 | f = __br_fdb_get(br, br->dev->dev_addr, 0); |
270 | if (f && f->is_local && !f->dst) | 270 | if (f && f->is_local && !f->dst && !f->added_by_user) |
271 | fdb_delete_local(br, NULL, f); | 271 | fdb_delete_local(br, NULL, f); |
272 | 272 | ||
273 | fdb_insert(br, NULL, newaddr, 0); | 273 | fdb_insert(br, NULL, newaddr, 0); |
@@ -282,7 +282,7 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr) | |||
282 | if (!br_vlan_should_use(v)) | 282 | if (!br_vlan_should_use(v)) |
283 | continue; | 283 | continue; |
284 | f = __br_fdb_get(br, br->dev->dev_addr, v->vid); | 284 | f = __br_fdb_get(br, br->dev->dev_addr, v->vid); |
285 | if (f && f->is_local && !f->dst) | 285 | if (f && f->is_local && !f->dst && !f->added_by_user) |
286 | fdb_delete_local(br, NULL, f); | 286 | fdb_delete_local(br, NULL, f); |
287 | fdb_insert(br, NULL, newaddr, v->vid); | 287 | fdb_insert(br, NULL, newaddr, v->vid); |
288 | } | 288 | } |
@@ -764,20 +764,25 @@ out: | |||
764 | } | 764 | } |
765 | 765 | ||
766 | /* Update (create or replace) forwarding database entry */ | 766 | /* Update (create or replace) forwarding database entry */ |
767 | static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr, | 767 | static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source, |
768 | __u16 state, __u16 flags, __u16 vid) | 768 | const __u8 *addr, __u16 state, __u16 flags, __u16 vid) |
769 | { | 769 | { |
770 | struct net_bridge *br = source->br; | ||
771 | struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)]; | 770 | struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)]; |
772 | struct net_bridge_fdb_entry *fdb; | 771 | struct net_bridge_fdb_entry *fdb; |
773 | bool modified = false; | 772 | bool modified = false; |
774 | 773 | ||
775 | /* If the port cannot learn allow only local and static entries */ | 774 | /* If the port cannot learn allow only local and static entries */ |
776 | if (!(state & NUD_PERMANENT) && !(state & NUD_NOARP) && | 775 | if (source && !(state & NUD_PERMANENT) && !(state & NUD_NOARP) && |
777 | !(source->state == BR_STATE_LEARNING || | 776 | !(source->state == BR_STATE_LEARNING || |
778 | source->state == BR_STATE_FORWARDING)) | 777 | source->state == BR_STATE_FORWARDING)) |
779 | return -EPERM; | 778 | return -EPERM; |
780 | 779 | ||
780 | if (!source && !(state & NUD_PERMANENT)) { | ||
781 | pr_info("bridge: RTM_NEWNEIGH %s without NUD_PERMANENT\n", | ||
782 | br->dev->name); | ||
783 | return -EINVAL; | ||
784 | } | ||
785 | |||
781 | fdb = fdb_find(head, addr, vid); | 786 | fdb = fdb_find(head, addr, vid); |
782 | if (fdb == NULL) { | 787 | if (fdb == NULL) { |
783 | if (!(flags & NLM_F_CREATE)) | 788 | if (!(flags & NLM_F_CREATE)) |
@@ -832,22 +837,28 @@ static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr, | |||
832 | return 0; | 837 | return 0; |
833 | } | 838 | } |
834 | 839 | ||
835 | static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge_port *p, | 840 | static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br, |
836 | const unsigned char *addr, u16 nlh_flags, u16 vid) | 841 | struct net_bridge_port *p, const unsigned char *addr, |
842 | u16 nlh_flags, u16 vid) | ||
837 | { | 843 | { |
838 | int err = 0; | 844 | int err = 0; |
839 | 845 | ||
840 | if (ndm->ndm_flags & NTF_USE) { | 846 | if (ndm->ndm_flags & NTF_USE) { |
847 | if (!p) { | ||
848 | pr_info("bridge: RTM_NEWNEIGH %s with NTF_USE is not supported\n", | ||
849 | br->dev->name); | ||
850 | return -EINVAL; | ||
851 | } | ||
841 | local_bh_disable(); | 852 | local_bh_disable(); |
842 | rcu_read_lock(); | 853 | rcu_read_lock(); |
843 | br_fdb_update(p->br, p, addr, vid, true); | 854 | br_fdb_update(br, p, addr, vid, true); |
844 | rcu_read_unlock(); | 855 | rcu_read_unlock(); |
845 | local_bh_enable(); | 856 | local_bh_enable(); |
846 | } else { | 857 | } else { |
847 | spin_lock_bh(&p->br->hash_lock); | 858 | spin_lock_bh(&br->hash_lock); |
848 | err = fdb_add_entry(p, addr, ndm->ndm_state, | 859 | err = fdb_add_entry(br, p, addr, ndm->ndm_state, |
849 | nlh_flags, vid); | 860 | nlh_flags, vid); |
850 | spin_unlock_bh(&p->br->hash_lock); | 861 | spin_unlock_bh(&br->hash_lock); |
851 | } | 862 | } |
852 | 863 | ||
853 | return err; | 864 | return err; |
@@ -884,6 +895,7 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], | |||
884 | dev->name); | 895 | dev->name); |
885 | return -EINVAL; | 896 | return -EINVAL; |
886 | } | 897 | } |
898 | br = p->br; | ||
887 | vg = nbp_vlan_group(p); | 899 | vg = nbp_vlan_group(p); |
888 | } | 900 | } |
889 | 901 | ||
@@ -895,15 +907,9 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], | |||
895 | } | 907 | } |
896 | 908 | ||
897 | /* VID was specified, so use it. */ | 909 | /* VID was specified, so use it. */ |
898 | if (dev->priv_flags & IFF_EBRIDGE) | 910 | err = __br_fdb_add(ndm, br, p, addr, nlh_flags, vid); |
899 | err = br_fdb_insert(br, NULL, addr, vid); | ||
900 | else | ||
901 | err = __br_fdb_add(ndm, p, addr, nlh_flags, vid); | ||
902 | } else { | 911 | } else { |
903 | if (dev->priv_flags & IFF_EBRIDGE) | 912 | err = __br_fdb_add(ndm, br, p, addr, nlh_flags, 0); |
904 | err = br_fdb_insert(br, NULL, addr, 0); | ||
905 | else | ||
906 | err = __br_fdb_add(ndm, p, addr, nlh_flags, 0); | ||
907 | if (err || !vg || !vg->num_vlans) | 913 | if (err || !vg || !vg->num_vlans) |
908 | goto out; | 914 | goto out; |
909 | 915 | ||
@@ -914,11 +920,7 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], | |||
914 | list_for_each_entry(v, &vg->vlan_list, vlist) { | 920 | list_for_each_entry(v, &vg->vlan_list, vlist) { |
915 | if (!br_vlan_should_use(v)) | 921 | if (!br_vlan_should_use(v)) |
916 | continue; | 922 | continue; |
917 | if (dev->priv_flags & IFF_EBRIDGE) | 923 | err = __br_fdb_add(ndm, br, p, addr, nlh_flags, v->vid); |
918 | err = br_fdb_insert(br, NULL, addr, v->vid); | ||
919 | else | ||
920 | err = __br_fdb_add(ndm, p, addr, nlh_flags, | ||
921 | v->vid); | ||
922 | if (err) | 924 | if (err) |
923 | goto out; | 925 | goto out; |
924 | } | 926 | } |
diff --git a/net/core/dev.c b/net/core/dev.c index 4ce07dc25573..dd6ce598de89 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -6045,8 +6045,7 @@ void *netdev_lower_dev_get_private(struct net_device *dev, | |||
6045 | EXPORT_SYMBOL(netdev_lower_dev_get_private); | 6045 | EXPORT_SYMBOL(netdev_lower_dev_get_private); |
6046 | 6046 | ||
6047 | 6047 | ||
6048 | int dev_get_nest_level(struct net_device *dev, | 6048 | int dev_get_nest_level(struct net_device *dev) |
6049 | bool (*type_check)(const struct net_device *dev)) | ||
6050 | { | 6049 | { |
6051 | struct net_device *lower = NULL; | 6050 | struct net_device *lower = NULL; |
6052 | struct list_head *iter; | 6051 | struct list_head *iter; |
@@ -6056,15 +6055,12 @@ int dev_get_nest_level(struct net_device *dev, | |||
6056 | ASSERT_RTNL(); | 6055 | ASSERT_RTNL(); |
6057 | 6056 | ||
6058 | netdev_for_each_lower_dev(dev, lower, iter) { | 6057 | netdev_for_each_lower_dev(dev, lower, iter) { |
6059 | nest = dev_get_nest_level(lower, type_check); | 6058 | nest = dev_get_nest_level(lower); |
6060 | if (max_nest < nest) | 6059 | if (max_nest < nest) |
6061 | max_nest = nest; | 6060 | max_nest = nest; |
6062 | } | 6061 | } |
6063 | 6062 | ||
6064 | if (type_check(dev)) | 6063 | return max_nest + 1; |
6065 | max_nest++; | ||
6066 | |||
6067 | return max_nest; | ||
6068 | } | 6064 | } |
6069 | EXPORT_SYMBOL(dev_get_nest_level); | 6065 | EXPORT_SYMBOL(dev_get_nest_level); |
6070 | 6066 | ||
diff --git a/net/core/filter.c b/net/core/filter.c index 5708999f8a79..cb06aceb512a 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
@@ -1355,56 +1355,47 @@ static inline int bpf_try_make_writable(struct sk_buff *skb, | |||
1355 | { | 1355 | { |
1356 | int err; | 1356 | int err; |
1357 | 1357 | ||
1358 | if (!skb_cloned(skb)) | 1358 | err = skb_ensure_writable(skb, write_len); |
1359 | return 0; | 1359 | bpf_compute_data_end(skb); |
1360 | if (skb_clone_writable(skb, write_len)) | 1360 | |
1361 | return 0; | ||
1362 | err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); | ||
1363 | if (!err) | ||
1364 | bpf_compute_data_end(skb); | ||
1365 | return err; | 1361 | return err; |
1366 | } | 1362 | } |
1367 | 1363 | ||
1364 | static inline void bpf_push_mac_rcsum(struct sk_buff *skb) | ||
1365 | { | ||
1366 | if (skb_at_tc_ingress(skb)) | ||
1367 | skb_postpush_rcsum(skb, skb_mac_header(skb), skb->mac_len); | ||
1368 | } | ||
1369 | |||
1370 | static inline void bpf_pull_mac_rcsum(struct sk_buff *skb) | ||
1371 | { | ||
1372 | if (skb_at_tc_ingress(skb)) | ||
1373 | skb_postpull_rcsum(skb, skb_mac_header(skb), skb->mac_len); | ||
1374 | } | ||
1375 | |||
1368 | static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags) | 1376 | static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags) |
1369 | { | 1377 | { |
1370 | struct bpf_scratchpad *sp = this_cpu_ptr(&bpf_sp); | ||
1371 | struct sk_buff *skb = (struct sk_buff *) (long) r1; | 1378 | struct sk_buff *skb = (struct sk_buff *) (long) r1; |
1372 | int offset = (int) r2; | 1379 | unsigned int offset = (unsigned int) r2; |
1373 | void *from = (void *) (long) r3; | 1380 | void *from = (void *) (long) r3; |
1374 | unsigned int len = (unsigned int) r4; | 1381 | unsigned int len = (unsigned int) r4; |
1375 | void *ptr; | 1382 | void *ptr; |
1376 | 1383 | ||
1377 | if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM | BPF_F_INVALIDATE_HASH))) | 1384 | if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM | BPF_F_INVALIDATE_HASH))) |
1378 | return -EINVAL; | 1385 | return -EINVAL; |
1379 | 1386 | if (unlikely(offset > 0xffff)) | |
1380 | /* bpf verifier guarantees that: | ||
1381 | * 'from' pointer points to bpf program stack | ||
1382 | * 'len' bytes of it were initialized | ||
1383 | * 'len' > 0 | ||
1384 | * 'skb' is a valid pointer to 'struct sk_buff' | ||
1385 | * | ||
1386 | * so check for invalid 'offset' and too large 'len' | ||
1387 | */ | ||
1388 | if (unlikely((u32) offset > 0xffff || len > sizeof(sp->buff))) | ||
1389 | return -EFAULT; | 1387 | return -EFAULT; |
1390 | if (unlikely(bpf_try_make_writable(skb, offset + len))) | 1388 | if (unlikely(bpf_try_make_writable(skb, offset + len))) |
1391 | return -EFAULT; | 1389 | return -EFAULT; |
1392 | 1390 | ||
1393 | ptr = skb_header_pointer(skb, offset, len, sp->buff); | 1391 | ptr = skb->data + offset; |
1394 | if (unlikely(!ptr)) | ||
1395 | return -EFAULT; | ||
1396 | |||
1397 | if (flags & BPF_F_RECOMPUTE_CSUM) | 1392 | if (flags & BPF_F_RECOMPUTE_CSUM) |
1398 | skb_postpull_rcsum(skb, ptr, len); | 1393 | __skb_postpull_rcsum(skb, ptr, len, offset); |
1399 | 1394 | ||
1400 | memcpy(ptr, from, len); | 1395 | memcpy(ptr, from, len); |
1401 | 1396 | ||
1402 | if (ptr == sp->buff) | ||
1403 | /* skb_store_bits cannot return -EFAULT here */ | ||
1404 | skb_store_bits(skb, offset, ptr, len); | ||
1405 | |||
1406 | if (flags & BPF_F_RECOMPUTE_CSUM) | 1397 | if (flags & BPF_F_RECOMPUTE_CSUM) |
1407 | skb_postpush_rcsum(skb, ptr, len); | 1398 | __skb_postpush_rcsum(skb, ptr, len, offset); |
1408 | if (flags & BPF_F_INVALIDATE_HASH) | 1399 | if (flags & BPF_F_INVALIDATE_HASH) |
1409 | skb_clear_hash(skb); | 1400 | skb_clear_hash(skb); |
1410 | 1401 | ||
@@ -1425,12 +1416,12 @@ static const struct bpf_func_proto bpf_skb_store_bytes_proto = { | |||
1425 | static u64 bpf_skb_load_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) | 1416 | static u64 bpf_skb_load_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) |
1426 | { | 1417 | { |
1427 | const struct sk_buff *skb = (const struct sk_buff *)(unsigned long) r1; | 1418 | const struct sk_buff *skb = (const struct sk_buff *)(unsigned long) r1; |
1428 | int offset = (int) r2; | 1419 | unsigned int offset = (unsigned int) r2; |
1429 | void *to = (void *)(unsigned long) r3; | 1420 | void *to = (void *)(unsigned long) r3; |
1430 | unsigned int len = (unsigned int) r4; | 1421 | unsigned int len = (unsigned int) r4; |
1431 | void *ptr; | 1422 | void *ptr; |
1432 | 1423 | ||
1433 | if (unlikely((u32) offset > 0xffff)) | 1424 | if (unlikely(offset > 0xffff)) |
1434 | goto err_clear; | 1425 | goto err_clear; |
1435 | 1426 | ||
1436 | ptr = skb_header_pointer(skb, offset, len, to); | 1427 | ptr = skb_header_pointer(skb, offset, len, to); |
@@ -1458,20 +1449,17 @@ static const struct bpf_func_proto bpf_skb_load_bytes_proto = { | |||
1458 | static u64 bpf_l3_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags) | 1449 | static u64 bpf_l3_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags) |
1459 | { | 1450 | { |
1460 | struct sk_buff *skb = (struct sk_buff *) (long) r1; | 1451 | struct sk_buff *skb = (struct sk_buff *) (long) r1; |
1461 | int offset = (int) r2; | 1452 | unsigned int offset = (unsigned int) r2; |
1462 | __sum16 sum, *ptr; | 1453 | __sum16 *ptr; |
1463 | 1454 | ||
1464 | if (unlikely(flags & ~(BPF_F_HDR_FIELD_MASK))) | 1455 | if (unlikely(flags & ~(BPF_F_HDR_FIELD_MASK))) |
1465 | return -EINVAL; | 1456 | return -EINVAL; |
1466 | if (unlikely((u32) offset > 0xffff)) | 1457 | if (unlikely(offset > 0xffff || offset & 1)) |
1467 | return -EFAULT; | 1458 | return -EFAULT; |
1468 | if (unlikely(bpf_try_make_writable(skb, offset + sizeof(sum)))) | 1459 | if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr)))) |
1469 | return -EFAULT; | ||
1470 | |||
1471 | ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum); | ||
1472 | if (unlikely(!ptr)) | ||
1473 | return -EFAULT; | 1460 | return -EFAULT; |
1474 | 1461 | ||
1462 | ptr = (__sum16 *)(skb->data + offset); | ||
1475 | switch (flags & BPF_F_HDR_FIELD_MASK) { | 1463 | switch (flags & BPF_F_HDR_FIELD_MASK) { |
1476 | case 0: | 1464 | case 0: |
1477 | if (unlikely(from != 0)) | 1465 | if (unlikely(from != 0)) |
@@ -1489,10 +1477,6 @@ static u64 bpf_l3_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags) | |||
1489 | return -EINVAL; | 1477 | return -EINVAL; |
1490 | } | 1478 | } |
1491 | 1479 | ||
1492 | if (ptr == &sum) | ||
1493 | /* skb_store_bits guaranteed to not return -EFAULT here */ | ||
1494 | skb_store_bits(skb, offset, ptr, sizeof(sum)); | ||
1495 | |||
1496 | return 0; | 1480 | return 0; |
1497 | } | 1481 | } |
1498 | 1482 | ||
@@ -1512,20 +1496,18 @@ static u64 bpf_l4_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags) | |||
1512 | struct sk_buff *skb = (struct sk_buff *) (long) r1; | 1496 | struct sk_buff *skb = (struct sk_buff *) (long) r1; |
1513 | bool is_pseudo = flags & BPF_F_PSEUDO_HDR; | 1497 | bool is_pseudo = flags & BPF_F_PSEUDO_HDR; |
1514 | bool is_mmzero = flags & BPF_F_MARK_MANGLED_0; | 1498 | bool is_mmzero = flags & BPF_F_MARK_MANGLED_0; |
1515 | int offset = (int) r2; | 1499 | unsigned int offset = (unsigned int) r2; |
1516 | __sum16 sum, *ptr; | 1500 | __sum16 *ptr; |
1517 | 1501 | ||
1518 | if (unlikely(flags & ~(BPF_F_MARK_MANGLED_0 | BPF_F_PSEUDO_HDR | | 1502 | if (unlikely(flags & ~(BPF_F_MARK_MANGLED_0 | BPF_F_PSEUDO_HDR | |
1519 | BPF_F_HDR_FIELD_MASK))) | 1503 | BPF_F_HDR_FIELD_MASK))) |
1520 | return -EINVAL; | 1504 | return -EINVAL; |
1521 | if (unlikely((u32) offset > 0xffff)) | 1505 | if (unlikely(offset > 0xffff || offset & 1)) |
1522 | return -EFAULT; | 1506 | return -EFAULT; |
1523 | if (unlikely(bpf_try_make_writable(skb, offset + sizeof(sum)))) | 1507 | if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr)))) |
1524 | return -EFAULT; | 1508 | return -EFAULT; |
1525 | 1509 | ||
1526 | ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum); | 1510 | ptr = (__sum16 *)(skb->data + offset); |
1527 | if (unlikely(!ptr)) | ||
1528 | return -EFAULT; | ||
1529 | if (is_mmzero && !*ptr) | 1511 | if (is_mmzero && !*ptr) |
1530 | return 0; | 1512 | return 0; |
1531 | 1513 | ||
@@ -1548,10 +1530,6 @@ static u64 bpf_l4_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags) | |||
1548 | 1530 | ||
1549 | if (is_mmzero && !*ptr) | 1531 | if (is_mmzero && !*ptr) |
1550 | *ptr = CSUM_MANGLED_0; | 1532 | *ptr = CSUM_MANGLED_0; |
1551 | if (ptr == &sum) | ||
1552 | /* skb_store_bits guaranteed to not return -EFAULT here */ | ||
1553 | skb_store_bits(skb, offset, ptr, sizeof(sum)); | ||
1554 | |||
1555 | return 0; | 1533 | return 0; |
1556 | } | 1534 | } |
1557 | 1535 | ||
@@ -1607,9 +1585,6 @@ static const struct bpf_func_proto bpf_csum_diff_proto = { | |||
1607 | 1585 | ||
1608 | static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb) | 1586 | static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb) |
1609 | { | 1587 | { |
1610 | if (skb_at_tc_ingress(skb)) | ||
1611 | skb_postpush_rcsum(skb, skb_mac_header(skb), skb->mac_len); | ||
1612 | |||
1613 | return dev_forward_skb(dev, skb); | 1588 | return dev_forward_skb(dev, skb); |
1614 | } | 1589 | } |
1615 | 1590 | ||
@@ -1648,6 +1623,8 @@ static u64 bpf_clone_redirect(u64 r1, u64 ifindex, u64 flags, u64 r4, u64 r5) | |||
1648 | if (unlikely(!skb)) | 1623 | if (unlikely(!skb)) |
1649 | return -ENOMEM; | 1624 | return -ENOMEM; |
1650 | 1625 | ||
1626 | bpf_push_mac_rcsum(skb); | ||
1627 | |||
1651 | return flags & BPF_F_INGRESS ? | 1628 | return flags & BPF_F_INGRESS ? |
1652 | __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb); | 1629 | __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb); |
1653 | } | 1630 | } |
@@ -1693,6 +1670,8 @@ int skb_do_redirect(struct sk_buff *skb) | |||
1693 | return -EINVAL; | 1670 | return -EINVAL; |
1694 | } | 1671 | } |
1695 | 1672 | ||
1673 | bpf_push_mac_rcsum(skb); | ||
1674 | |||
1696 | return ri->flags & BPF_F_INGRESS ? | 1675 | return ri->flags & BPF_F_INGRESS ? |
1697 | __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb); | 1676 | __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb); |
1698 | } | 1677 | } |
@@ -1756,7 +1735,10 @@ static u64 bpf_skb_vlan_push(u64 r1, u64 r2, u64 vlan_tci, u64 r4, u64 r5) | |||
1756 | vlan_proto != htons(ETH_P_8021AD))) | 1735 | vlan_proto != htons(ETH_P_8021AD))) |
1757 | vlan_proto = htons(ETH_P_8021Q); | 1736 | vlan_proto = htons(ETH_P_8021Q); |
1758 | 1737 | ||
1738 | bpf_push_mac_rcsum(skb); | ||
1759 | ret = skb_vlan_push(skb, vlan_proto, vlan_tci); | 1739 | ret = skb_vlan_push(skb, vlan_proto, vlan_tci); |
1740 | bpf_pull_mac_rcsum(skb); | ||
1741 | |||
1760 | bpf_compute_data_end(skb); | 1742 | bpf_compute_data_end(skb); |
1761 | return ret; | 1743 | return ret; |
1762 | } | 1744 | } |
@@ -1776,7 +1758,10 @@ static u64 bpf_skb_vlan_pop(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) | |||
1776 | struct sk_buff *skb = (struct sk_buff *) (long) r1; | 1758 | struct sk_buff *skb = (struct sk_buff *) (long) r1; |
1777 | int ret; | 1759 | int ret; |
1778 | 1760 | ||
1761 | bpf_push_mac_rcsum(skb); | ||
1779 | ret = skb_vlan_pop(skb); | 1762 | ret = skb_vlan_pop(skb); |
1763 | bpf_pull_mac_rcsum(skb); | ||
1764 | |||
1780 | bpf_compute_data_end(skb); | 1765 | bpf_compute_data_end(skb); |
1781 | return ret; | 1766 | return ret; |
1782 | } | 1767 | } |
@@ -2298,7 +2283,7 @@ bpf_get_skb_set_tunnel_proto(enum bpf_func_id which) | |||
2298 | } | 2283 | } |
2299 | 2284 | ||
2300 | #ifdef CONFIG_SOCK_CGROUP_DATA | 2285 | #ifdef CONFIG_SOCK_CGROUP_DATA |
2301 | static u64 bpf_skb_in_cgroup(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) | 2286 | static u64 bpf_skb_under_cgroup(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) |
2302 | { | 2287 | { |
2303 | struct sk_buff *skb = (struct sk_buff *)(long)r1; | 2288 | struct sk_buff *skb = (struct sk_buff *)(long)r1; |
2304 | struct bpf_map *map = (struct bpf_map *)(long)r2; | 2289 | struct bpf_map *map = (struct bpf_map *)(long)r2; |
@@ -2321,8 +2306,8 @@ static u64 bpf_skb_in_cgroup(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) | |||
2321 | return cgroup_is_descendant(sock_cgroup_ptr(&sk->sk_cgrp_data), cgrp); | 2306 | return cgroup_is_descendant(sock_cgroup_ptr(&sk->sk_cgrp_data), cgrp); |
2322 | } | 2307 | } |
2323 | 2308 | ||
2324 | static const struct bpf_func_proto bpf_skb_in_cgroup_proto = { | 2309 | static const struct bpf_func_proto bpf_skb_under_cgroup_proto = { |
2325 | .func = bpf_skb_in_cgroup, | 2310 | .func = bpf_skb_under_cgroup, |
2326 | .gpl_only = false, | 2311 | .gpl_only = false, |
2327 | .ret_type = RET_INTEGER, | 2312 | .ret_type = RET_INTEGER, |
2328 | .arg1_type = ARG_PTR_TO_CTX, | 2313 | .arg1_type = ARG_PTR_TO_CTX, |
@@ -2402,8 +2387,8 @@ tc_cls_act_func_proto(enum bpf_func_id func_id) | |||
2402 | case BPF_FUNC_get_smp_processor_id: | 2387 | case BPF_FUNC_get_smp_processor_id: |
2403 | return &bpf_get_smp_processor_id_proto; | 2388 | return &bpf_get_smp_processor_id_proto; |
2404 | #ifdef CONFIG_SOCK_CGROUP_DATA | 2389 | #ifdef CONFIG_SOCK_CGROUP_DATA |
2405 | case BPF_FUNC_skb_in_cgroup: | 2390 | case BPF_FUNC_skb_under_cgroup: |
2406 | return &bpf_skb_in_cgroup_proto; | 2391 | return &bpf_skb_under_cgroup_proto; |
2407 | #endif | 2392 | #endif |
2408 | default: | 2393 | default: |
2409 | return sk_filter_func_proto(func_id); | 2394 | return sk_filter_func_proto(func_id); |
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index d07fc076bea0..febca0f1008c 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c | |||
@@ -2452,9 +2452,7 @@ struct fib_route_iter { | |||
2452 | static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter, | 2452 | static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter, |
2453 | loff_t pos) | 2453 | loff_t pos) |
2454 | { | 2454 | { |
2455 | struct fib_table *tb = iter->main_tb; | ||
2456 | struct key_vector *l, **tp = &iter->tnode; | 2455 | struct key_vector *l, **tp = &iter->tnode; |
2457 | struct trie *t; | ||
2458 | t_key key; | 2456 | t_key key; |
2459 | 2457 | ||
2460 | /* use cache location of next-to-find key */ | 2458 | /* use cache location of next-to-find key */ |
@@ -2462,8 +2460,6 @@ static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter, | |||
2462 | pos -= iter->pos; | 2460 | pos -= iter->pos; |
2463 | key = iter->key; | 2461 | key = iter->key; |
2464 | } else { | 2462 | } else { |
2465 | t = (struct trie *)tb->tb_data; | ||
2466 | iter->tnode = t->kv; | ||
2467 | iter->pos = 0; | 2463 | iter->pos = 0; |
2468 | key = 0; | 2464 | key = 0; |
2469 | } | 2465 | } |
@@ -2504,12 +2500,12 @@ static void *fib_route_seq_start(struct seq_file *seq, loff_t *pos) | |||
2504 | return NULL; | 2500 | return NULL; |
2505 | 2501 | ||
2506 | iter->main_tb = tb; | 2502 | iter->main_tb = tb; |
2503 | t = (struct trie *)tb->tb_data; | ||
2504 | iter->tnode = t->kv; | ||
2507 | 2505 | ||
2508 | if (*pos != 0) | 2506 | if (*pos != 0) |
2509 | return fib_route_get_idx(iter, *pos); | 2507 | return fib_route_get_idx(iter, *pos); |
2510 | 2508 | ||
2511 | t = (struct trie *)tb->tb_data; | ||
2512 | iter->tnode = t->kv; | ||
2513 | iter->pos = 0; | 2509 | iter->pos = 0; |
2514 | iter->key = 0; | 2510 | iter->key = 0; |
2515 | 2511 | ||
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 5b1481be0282..113cc43df789 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
@@ -370,7 +370,6 @@ static void __gre_xmit(struct sk_buff *skb, struct net_device *dev, | |||
370 | tunnel->parms.o_flags, proto, tunnel->parms.o_key, | 370 | tunnel->parms.o_flags, proto, tunnel->parms.o_key, |
371 | htonl(tunnel->o_seqno)); | 371 | htonl(tunnel->o_seqno)); |
372 | 372 | ||
373 | skb_set_inner_protocol(skb, proto); | ||
374 | ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol); | 373 | ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol); |
375 | } | 374 | } |
376 | 375 | ||
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c index a917903d5e97..cc701fa70b12 100644 --- a/net/ipv4/ip_vti.c +++ b/net/ipv4/ip_vti.c | |||
@@ -557,6 +557,33 @@ static struct rtnl_link_ops vti_link_ops __read_mostly = { | |||
557 | .get_link_net = ip_tunnel_get_link_net, | 557 | .get_link_net = ip_tunnel_get_link_net, |
558 | }; | 558 | }; |
559 | 559 | ||
560 | static bool is_vti_tunnel(const struct net_device *dev) | ||
561 | { | ||
562 | return dev->netdev_ops == &vti_netdev_ops; | ||
563 | } | ||
564 | |||
565 | static int vti_device_event(struct notifier_block *unused, | ||
566 | unsigned long event, void *ptr) | ||
567 | { | ||
568 | struct net_device *dev = netdev_notifier_info_to_dev(ptr); | ||
569 | struct ip_tunnel *tunnel = netdev_priv(dev); | ||
570 | |||
571 | if (!is_vti_tunnel(dev)) | ||
572 | return NOTIFY_DONE; | ||
573 | |||
574 | switch (event) { | ||
575 | case NETDEV_DOWN: | ||
576 | if (!net_eq(tunnel->net, dev_net(dev))) | ||
577 | xfrm_garbage_collect(tunnel->net); | ||
578 | break; | ||
579 | } | ||
580 | return NOTIFY_DONE; | ||
581 | } | ||
582 | |||
583 | static struct notifier_block vti_notifier_block __read_mostly = { | ||
584 | .notifier_call = vti_device_event, | ||
585 | }; | ||
586 | |||
560 | static int __init vti_init(void) | 587 | static int __init vti_init(void) |
561 | { | 588 | { |
562 | const char *msg; | 589 | const char *msg; |
@@ -564,6 +591,8 @@ static int __init vti_init(void) | |||
564 | 591 | ||
565 | pr_info("IPv4 over IPsec tunneling driver\n"); | 592 | pr_info("IPv4 over IPsec tunneling driver\n"); |
566 | 593 | ||
594 | register_netdevice_notifier(&vti_notifier_block); | ||
595 | |||
567 | msg = "tunnel device"; | 596 | msg = "tunnel device"; |
568 | err = register_pernet_device(&vti_net_ops); | 597 | err = register_pernet_device(&vti_net_ops); |
569 | if (err < 0) | 598 | if (err < 0) |
@@ -596,6 +625,7 @@ xfrm_proto_ah_failed: | |||
596 | xfrm_proto_esp_failed: | 625 | xfrm_proto_esp_failed: |
597 | unregister_pernet_device(&vti_net_ops); | 626 | unregister_pernet_device(&vti_net_ops); |
598 | pernet_dev_failed: | 627 | pernet_dev_failed: |
628 | unregister_netdevice_notifier(&vti_notifier_block); | ||
599 | pr_err("vti init: failed to register %s\n", msg); | 629 | pr_err("vti init: failed to register %s\n", msg); |
600 | return err; | 630 | return err; |
601 | } | 631 | } |
@@ -607,6 +637,7 @@ static void __exit vti_fini(void) | |||
607 | xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH); | 637 | xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH); |
608 | xfrm4_protocol_deregister(&vti_esp4_protocol, IPPROTO_ESP); | 638 | xfrm4_protocol_deregister(&vti_esp4_protocol, IPPROTO_ESP); |
609 | unregister_pernet_device(&vti_net_ops); | 639 | unregister_pernet_device(&vti_net_ops); |
640 | unregister_netdevice_notifier(&vti_notifier_block); | ||
610 | } | 641 | } |
611 | 642 | ||
612 | module_init(vti_init); | 643 | module_init(vti_init); |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index ab3e796596b1..df8425fcbc2c 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -3543,7 +3543,7 @@ static int addrconf_ifdown(struct net_device *dev, int how) | |||
3543 | /* combine the user config with event to determine if permanent | 3543 | /* combine the user config with event to determine if permanent |
3544 | * addresses are to be removed from address hash table | 3544 | * addresses are to be removed from address hash table |
3545 | */ | 3545 | */ |
3546 | keep_addr = !(how || _keep_addr <= 0); | 3546 | keep_addr = !(how || _keep_addr <= 0 || idev->cnf.disable_ipv6); |
3547 | 3547 | ||
3548 | /* Step 2: clear hash table */ | 3548 | /* Step 2: clear hash table */ |
3549 | for (i = 0; i < IN6_ADDR_HSIZE; i++) { | 3549 | for (i = 0; i < IN6_ADDR_HSIZE; i++) { |
@@ -3599,7 +3599,7 @@ restart: | |||
3599 | /* re-combine the user config with event to determine if permanent | 3599 | /* re-combine the user config with event to determine if permanent |
3600 | * addresses are to be removed from the interface list | 3600 | * addresses are to be removed from the interface list |
3601 | */ | 3601 | */ |
3602 | keep_addr = (!how && _keep_addr > 0); | 3602 | keep_addr = (!how && _keep_addr > 0 && !idev->cnf.disable_ipv6); |
3603 | 3603 | ||
3604 | INIT_LIST_HEAD(&del_list); | 3604 | INIT_LIST_HEAD(&del_list); |
3605 | list_for_each_entry_safe(ifa, tmp, &idev->addr_list, if_list) { | 3605 | list_for_each_entry_safe(ifa, tmp, &idev->addr_list, if_list) { |
diff --git a/net/ipv6/calipso.c b/net/ipv6/calipso.c index c53b92c617c5..37ac9de713c6 100644 --- a/net/ipv6/calipso.c +++ b/net/ipv6/calipso.c | |||
@@ -952,8 +952,10 @@ calipso_opt_insert(struct ipv6_opt_hdr *hop, | |||
952 | memcpy(new, hop, start); | 952 | memcpy(new, hop, start); |
953 | ret_val = calipso_genopt((unsigned char *)new, start, buf_len, doi_def, | 953 | ret_val = calipso_genopt((unsigned char *)new, start, buf_len, doi_def, |
954 | secattr); | 954 | secattr); |
955 | if (ret_val < 0) | 955 | if (ret_val < 0) { |
956 | kfree(new); | ||
956 | return ERR_PTR(ret_val); | 957 | return ERR_PTR(ret_val); |
958 | } | ||
957 | 959 | ||
958 | buf_len = start + ret_val; | 960 | buf_len = start + ret_val; |
959 | /* At this point buf_len aligns to 4n, so (buf_len & 4) pads to 8n */ | 961 | /* At this point buf_len aligns to 4n, so (buf_len & 4) pads to 8n */ |
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 776d145113e1..704274cbd495 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c | |||
@@ -519,8 +519,6 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb, | |||
519 | gre_build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags, | 519 | gre_build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags, |
520 | protocol, tunnel->parms.o_key, htonl(tunnel->o_seqno)); | 520 | protocol, tunnel->parms.o_key, htonl(tunnel->o_seqno)); |
521 | 521 | ||
522 | skb_set_inner_protocol(skb, protocol); | ||
523 | |||
524 | return ip6_tnl_xmit(skb, dev, dsfield, fl6, encap_limit, pmtu, | 522 | return ip6_tnl_xmit(skb, dev, dsfield, fl6, encap_limit, pmtu, |
525 | NEXTHDR_GRE); | 523 | NEXTHDR_GRE); |
526 | } | 524 | } |
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c index fed40d1ec29b..0900352c924c 100644 --- a/net/ipv6/ping.c +++ b/net/ipv6/ping.c | |||
@@ -55,7 +55,7 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) | |||
55 | struct icmp6hdr user_icmph; | 55 | struct icmp6hdr user_icmph; |
56 | int addr_type; | 56 | int addr_type; |
57 | struct in6_addr *daddr; | 57 | struct in6_addr *daddr; |
58 | int iif = 0; | 58 | int oif = 0; |
59 | struct flowi6 fl6; | 59 | struct flowi6 fl6; |
60 | int err; | 60 | int err; |
61 | struct dst_entry *dst; | 61 | struct dst_entry *dst; |
@@ -78,25 +78,30 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) | |||
78 | if (u->sin6_family != AF_INET6) { | 78 | if (u->sin6_family != AF_INET6) { |
79 | return -EAFNOSUPPORT; | 79 | return -EAFNOSUPPORT; |
80 | } | 80 | } |
81 | if (sk->sk_bound_dev_if && | ||
82 | sk->sk_bound_dev_if != u->sin6_scope_id) { | ||
83 | return -EINVAL; | ||
84 | } | ||
85 | daddr = &(u->sin6_addr); | 81 | daddr = &(u->sin6_addr); |
86 | iif = u->sin6_scope_id; | 82 | if (__ipv6_addr_needs_scope_id(ipv6_addr_type(daddr))) |
83 | oif = u->sin6_scope_id; | ||
87 | } else { | 84 | } else { |
88 | if (sk->sk_state != TCP_ESTABLISHED) | 85 | if (sk->sk_state != TCP_ESTABLISHED) |
89 | return -EDESTADDRREQ; | 86 | return -EDESTADDRREQ; |
90 | daddr = &sk->sk_v6_daddr; | 87 | daddr = &sk->sk_v6_daddr; |
91 | } | 88 | } |
92 | 89 | ||
93 | if (!iif) | 90 | if (!oif) |
94 | iif = sk->sk_bound_dev_if; | 91 | oif = sk->sk_bound_dev_if; |
92 | |||
93 | if (!oif) | ||
94 | oif = np->sticky_pktinfo.ipi6_ifindex; | ||
95 | |||
96 | if (!oif && ipv6_addr_is_multicast(daddr)) | ||
97 | oif = np->mcast_oif; | ||
98 | else if (!oif) | ||
99 | oif = np->ucast_oif; | ||
95 | 100 | ||
96 | addr_type = ipv6_addr_type(daddr); | 101 | addr_type = ipv6_addr_type(daddr); |
97 | if (__ipv6_addr_needs_scope_id(addr_type) && !iif) | 102 | if ((__ipv6_addr_needs_scope_id(addr_type) && !oif) || |
98 | return -EINVAL; | 103 | (addr_type & IPV6_ADDR_MAPPED) || |
99 | if (addr_type & IPV6_ADDR_MAPPED) | 104 | (oif && sk->sk_bound_dev_if && oif != sk->sk_bound_dev_if)) |
100 | return -EINVAL; | 105 | return -EINVAL; |
101 | 106 | ||
102 | /* TODO: use ip6_datagram_send_ctl to get options from cmsg */ | 107 | /* TODO: use ip6_datagram_send_ctl to get options from cmsg */ |
@@ -106,16 +111,12 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) | |||
106 | fl6.flowi6_proto = IPPROTO_ICMPV6; | 111 | fl6.flowi6_proto = IPPROTO_ICMPV6; |
107 | fl6.saddr = np->saddr; | 112 | fl6.saddr = np->saddr; |
108 | fl6.daddr = *daddr; | 113 | fl6.daddr = *daddr; |
114 | fl6.flowi6_oif = oif; | ||
109 | fl6.flowi6_mark = sk->sk_mark; | 115 | fl6.flowi6_mark = sk->sk_mark; |
110 | fl6.fl6_icmp_type = user_icmph.icmp6_type; | 116 | fl6.fl6_icmp_type = user_icmph.icmp6_type; |
111 | fl6.fl6_icmp_code = user_icmph.icmp6_code; | 117 | fl6.fl6_icmp_code = user_icmph.icmp6_code; |
112 | security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); | 118 | security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); |
113 | 119 | ||
114 | if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr)) | ||
115 | fl6.flowi6_oif = np->mcast_oif; | ||
116 | else if (!fl6.flowi6_oif) | ||
117 | fl6.flowi6_oif = np->ucast_oif; | ||
118 | |||
119 | ipc6.tclass = np->tclass; | 120 | ipc6.tclass = np->tclass; |
120 | fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel); | 121 | fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel); |
121 | 122 | ||
diff --git a/net/irda/iriap.c b/net/irda/iriap.c index 4a7ae32afa09..1138eaf5c682 100644 --- a/net/irda/iriap.c +++ b/net/irda/iriap.c | |||
@@ -185,8 +185,12 @@ struct iriap_cb *iriap_open(__u8 slsap_sel, int mode, void *priv, | |||
185 | 185 | ||
186 | self->magic = IAS_MAGIC; | 186 | self->magic = IAS_MAGIC; |
187 | self->mode = mode; | 187 | self->mode = mode; |
188 | if (mode == IAS_CLIENT) | 188 | if (mode == IAS_CLIENT) { |
189 | iriap_register_lsap(self, slsap_sel, mode); | 189 | if (iriap_register_lsap(self, slsap_sel, mode)) { |
190 | kfree(self); | ||
191 | return NULL; | ||
192 | } | ||
193 | } | ||
190 | 194 | ||
191 | self->confirm = callback; | 195 | self->confirm = callback; |
192 | self->priv = priv; | 196 | self->priv = priv; |
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 47e99ab8d97a..543b1d4fc33d 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c | |||
@@ -869,7 +869,7 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev) | |||
869 | 869 | ||
870 | /* free all potentially still buffered bcast frames */ | 870 | /* free all potentially still buffered bcast frames */ |
871 | local->total_ps_buffered -= skb_queue_len(&sdata->u.ap.ps.bc_buf); | 871 | local->total_ps_buffered -= skb_queue_len(&sdata->u.ap.ps.bc_buf); |
872 | skb_queue_purge(&sdata->u.ap.ps.bc_buf); | 872 | ieee80211_purge_tx_queue(&local->hw, &sdata->u.ap.ps.bc_buf); |
873 | 873 | ||
874 | mutex_lock(&local->mtx); | 874 | mutex_lock(&local->mtx); |
875 | ieee80211_vif_copy_chanctx_to_vlans(sdata, true); | 875 | ieee80211_vif_copy_chanctx_to_vlans(sdata, true); |
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h index 184473c257eb..ba5fc1f01e53 100644 --- a/net/mac80211/driver-ops.h +++ b/net/mac80211/driver-ops.h | |||
@@ -1094,7 +1094,7 @@ static inline u32 drv_get_expected_throughput(struct ieee80211_local *local, | |||
1094 | 1094 | ||
1095 | trace_drv_get_expected_throughput(sta); | 1095 | trace_drv_get_expected_throughput(sta); |
1096 | if (local->ops->get_expected_throughput) | 1096 | if (local->ops->get_expected_throughput) |
1097 | ret = local->ops->get_expected_throughput(sta); | 1097 | ret = local->ops->get_expected_throughput(&local->hw, sta); |
1098 | trace_drv_return_u32(local, ret); | 1098 | trace_drv_return_u32(local, ret); |
1099 | 1099 | ||
1100 | return ret; | 1100 | return ret; |
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index c66411df9863..42120d965263 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c | |||
@@ -881,20 +881,22 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata) | |||
881 | 881 | ||
882 | netif_carrier_off(sdata->dev); | 882 | netif_carrier_off(sdata->dev); |
883 | 883 | ||
884 | /* flush STAs and mpaths on this iface */ | ||
885 | sta_info_flush(sdata); | ||
886 | mesh_path_flush_by_iface(sdata); | ||
887 | |||
884 | /* stop the beacon */ | 888 | /* stop the beacon */ |
885 | ifmsh->mesh_id_len = 0; | 889 | ifmsh->mesh_id_len = 0; |
886 | sdata->vif.bss_conf.enable_beacon = false; | 890 | sdata->vif.bss_conf.enable_beacon = false; |
887 | clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state); | 891 | clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state); |
888 | ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED); | 892 | ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED); |
893 | |||
894 | /* remove beacon */ | ||
889 | bcn = rcu_dereference_protected(ifmsh->beacon, | 895 | bcn = rcu_dereference_protected(ifmsh->beacon, |
890 | lockdep_is_held(&sdata->wdev.mtx)); | 896 | lockdep_is_held(&sdata->wdev.mtx)); |
891 | RCU_INIT_POINTER(ifmsh->beacon, NULL); | 897 | RCU_INIT_POINTER(ifmsh->beacon, NULL); |
892 | kfree_rcu(bcn, rcu_head); | 898 | kfree_rcu(bcn, rcu_head); |
893 | 899 | ||
894 | /* flush STAs and mpaths on this iface */ | ||
895 | sta_info_flush(sdata); | ||
896 | mesh_path_flush_by_iface(sdata); | ||
897 | |||
898 | /* free all potentially still buffered group-addressed frames */ | 900 | /* free all potentially still buffered group-addressed frames */ |
899 | local->total_ps_buffered -= skb_queue_len(&ifmsh->ps.bc_buf); | 901 | local->total_ps_buffered -= skb_queue_len(&ifmsh->ps.bc_buf); |
900 | skb_queue_purge(&ifmsh->ps.bc_buf); | 902 | skb_queue_purge(&ifmsh->ps.bc_buf); |
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 2e8a9024625a..9dce3b157908 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
@@ -1268,7 +1268,7 @@ static void sta_ps_start(struct sta_info *sta) | |||
1268 | for (tid = 0; tid < ARRAY_SIZE(sta->sta.txq); tid++) { | 1268 | for (tid = 0; tid < ARRAY_SIZE(sta->sta.txq); tid++) { |
1269 | struct txq_info *txqi = to_txq_info(sta->sta.txq[tid]); | 1269 | struct txq_info *txqi = to_txq_info(sta->sta.txq[tid]); |
1270 | 1270 | ||
1271 | if (!txqi->tin.backlog_packets) | 1271 | if (txqi->tin.backlog_packets) |
1272 | set_bit(tid, &sta->txq_buffered_tids); | 1272 | set_bit(tid, &sta->txq_buffered_tids); |
1273 | else | 1273 | else |
1274 | clear_bit(tid, &sta->txq_buffered_tids); | 1274 | clear_bit(tid, &sta->txq_buffered_tids); |
diff --git a/net/mac80211/status.c b/net/mac80211/status.c index c6d5c724e032..a2a68269675d 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c | |||
@@ -771,6 +771,13 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) | |||
771 | clear_sta_flag(sta, WLAN_STA_SP); | 771 | clear_sta_flag(sta, WLAN_STA_SP); |
772 | 772 | ||
773 | acked = !!(info->flags & IEEE80211_TX_STAT_ACK); | 773 | acked = !!(info->flags & IEEE80211_TX_STAT_ACK); |
774 | |||
775 | /* mesh Peer Service Period support */ | ||
776 | if (ieee80211_vif_is_mesh(&sta->sdata->vif) && | ||
777 | ieee80211_is_data_qos(fc)) | ||
778 | ieee80211_mpsp_trigger_process( | ||
779 | ieee80211_get_qos_ctl(hdr), sta, true, acked); | ||
780 | |||
774 | if (!acked && test_sta_flag(sta, WLAN_STA_PS_STA)) { | 781 | if (!acked && test_sta_flag(sta, WLAN_STA_PS_STA)) { |
775 | /* | 782 | /* |
776 | * The STA is in power save mode, so assume | 783 | * The STA is in power save mode, so assume |
@@ -781,13 +788,6 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) | |||
781 | return; | 788 | return; |
782 | } | 789 | } |
783 | 790 | ||
784 | /* mesh Peer Service Period support */ | ||
785 | if (ieee80211_vif_is_mesh(&sta->sdata->vif) && | ||
786 | ieee80211_is_data_qos(fc)) | ||
787 | ieee80211_mpsp_trigger_process( | ||
788 | ieee80211_get_qos_ctl(hdr), | ||
789 | sta, true, acked); | ||
790 | |||
791 | if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL) && | 791 | if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL) && |
792 | (ieee80211_is_data(hdr->frame_control)) && | 792 | (ieee80211_is_data(hdr->frame_control)) && |
793 | (rates_idx != -1)) | 793 | (rates_idx != -1)) |
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 91461c415525..502396694f47 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
@@ -368,7 +368,7 @@ static void purge_old_ps_buffers(struct ieee80211_local *local) | |||
368 | skb = skb_dequeue(&ps->bc_buf); | 368 | skb = skb_dequeue(&ps->bc_buf); |
369 | if (skb) { | 369 | if (skb) { |
370 | purged++; | 370 | purged++; |
371 | dev_kfree_skb(skb); | 371 | ieee80211_free_txskb(&local->hw, skb); |
372 | } | 372 | } |
373 | total += skb_queue_len(&ps->bc_buf); | 373 | total += skb_queue_len(&ps->bc_buf); |
374 | } | 374 | } |
@@ -451,7 +451,7 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx) | |||
451 | if (skb_queue_len(&ps->bc_buf) >= AP_MAX_BC_BUFFER) { | 451 | if (skb_queue_len(&ps->bc_buf) >= AP_MAX_BC_BUFFER) { |
452 | ps_dbg(tx->sdata, | 452 | ps_dbg(tx->sdata, |
453 | "BC TX buffer full - dropping the oldest frame\n"); | 453 | "BC TX buffer full - dropping the oldest frame\n"); |
454 | dev_kfree_skb(skb_dequeue(&ps->bc_buf)); | 454 | ieee80211_free_txskb(&tx->local->hw, skb_dequeue(&ps->bc_buf)); |
455 | } else | 455 | } else |
456 | tx->local->total_ps_buffered++; | 456 | tx->local->total_ps_buffered++; |
457 | 457 | ||
@@ -4275,7 +4275,7 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw, | |||
4275 | sdata = IEEE80211_DEV_TO_SUB_IF(skb->dev); | 4275 | sdata = IEEE80211_DEV_TO_SUB_IF(skb->dev); |
4276 | if (!ieee80211_tx_prepare(sdata, &tx, NULL, skb)) | 4276 | if (!ieee80211_tx_prepare(sdata, &tx, NULL, skb)) |
4277 | break; | 4277 | break; |
4278 | dev_kfree_skb_any(skb); | 4278 | ieee80211_free_txskb(hw, skb); |
4279 | } | 4279 | } |
4280 | 4280 | ||
4281 | info = IEEE80211_SKB_CB(skb); | 4281 | info = IEEE80211_SKB_CB(skb); |
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c index 9e3693128313..f8dbacf66795 100644 --- a/net/netfilter/nf_conntrack_expect.c +++ b/net/netfilter/nf_conntrack_expect.c | |||
@@ -574,7 +574,7 @@ static int exp_seq_show(struct seq_file *s, void *v) | |||
574 | helper = rcu_dereference(nfct_help(expect->master)->helper); | 574 | helper = rcu_dereference(nfct_help(expect->master)->helper); |
575 | if (helper) { | 575 | if (helper) { |
576 | seq_printf(s, "%s%s", expect->flags ? " " : "", helper->name); | 576 | seq_printf(s, "%s%s", expect->flags ? " " : "", helper->name); |
577 | if (helper->expect_policy[expect->class].name) | 577 | if (helper->expect_policy[expect->class].name[0]) |
578 | seq_printf(s, "/%s", | 578 | seq_printf(s, "/%s", |
579 | helper->expect_policy[expect->class].name); | 579 | helper->expect_policy[expect->class].name); |
580 | } | 580 | } |
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c index bb77a97961bf..5c0db5c64734 100644 --- a/net/netfilter/nf_conntrack_h323_main.c +++ b/net/netfilter/nf_conntrack_h323_main.c | |||
@@ -1473,7 +1473,8 @@ static int process_rcf(struct sk_buff *skb, struct nf_conn *ct, | |||
1473 | "timeout to %u seconds for", | 1473 | "timeout to %u seconds for", |
1474 | info->timeout); | 1474 | info->timeout); |
1475 | nf_ct_dump_tuple(&exp->tuple); | 1475 | nf_ct_dump_tuple(&exp->tuple); |
1476 | mod_timer(&exp->timeout, jiffies + info->timeout * HZ); | 1476 | mod_timer_pending(&exp->timeout, |
1477 | jiffies + info->timeout * HZ); | ||
1477 | } | 1478 | } |
1478 | spin_unlock_bh(&nf_conntrack_expect_lock); | 1479 | spin_unlock_bh(&nf_conntrack_expect_lock); |
1479 | } | 1480 | } |
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 050bb3420a6b..fdfc71f416b7 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c | |||
@@ -1894,6 +1894,8 @@ static int ctnetlink_new_conntrack(struct net *net, struct sock *ctnl, | |||
1894 | 1894 | ||
1895 | if (!cda[CTA_TUPLE_ORIG] || !cda[CTA_TUPLE_REPLY]) | 1895 | if (!cda[CTA_TUPLE_ORIG] || !cda[CTA_TUPLE_REPLY]) |
1896 | return -EINVAL; | 1896 | return -EINVAL; |
1897 | if (otuple.dst.protonum != rtuple.dst.protonum) | ||
1898 | return -EINVAL; | ||
1897 | 1899 | ||
1898 | ct = ctnetlink_create_conntrack(net, &zone, cda, &otuple, | 1900 | ct = ctnetlink_create_conntrack(net, &zone, cda, &otuple, |
1899 | &rtuple, u3); | 1901 | &rtuple, u3); |
@@ -2362,12 +2364,8 @@ ctnetlink_glue_attach_expect(const struct nlattr *attr, struct nf_conn *ct, | |||
2362 | return PTR_ERR(exp); | 2364 | return PTR_ERR(exp); |
2363 | 2365 | ||
2364 | err = nf_ct_expect_related_report(exp, portid, report); | 2366 | err = nf_ct_expect_related_report(exp, portid, report); |
2365 | if (err < 0) { | 2367 | nf_ct_expect_put(exp); |
2366 | nf_ct_expect_put(exp); | 2368 | return err; |
2367 | return err; | ||
2368 | } | ||
2369 | |||
2370 | return 0; | ||
2371 | } | 2369 | } |
2372 | 2370 | ||
2373 | static void ctnetlink_glue_seqadj(struct sk_buff *skb, struct nf_conn *ct, | 2371 | static void ctnetlink_glue_seqadj(struct sk_buff *skb, struct nf_conn *ct, |
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c index 8d9db9d4702b..7d77217de6a3 100644 --- a/net/netfilter/nf_conntrack_sip.c +++ b/net/netfilter/nf_conntrack_sip.c | |||
@@ -1383,7 +1383,7 @@ static int process_sip_response(struct sk_buff *skb, unsigned int protoff, | |||
1383 | return NF_DROP; | 1383 | return NF_DROP; |
1384 | } | 1384 | } |
1385 | cseq = simple_strtoul(*dptr + matchoff, NULL, 10); | 1385 | cseq = simple_strtoul(*dptr + matchoff, NULL, 10); |
1386 | if (!cseq) { | 1386 | if (!cseq && *(*dptr + matchoff) != '0') { |
1387 | nf_ct_helper_log(skb, ct, "cannot get cseq"); | 1387 | nf_ct_helper_log(skb, ct, "cannot get cseq"); |
1388 | return NF_DROP; | 1388 | return NF_DROP; |
1389 | } | 1389 | } |
@@ -1446,7 +1446,7 @@ static int process_sip_request(struct sk_buff *skb, unsigned int protoff, | |||
1446 | return NF_DROP; | 1446 | return NF_DROP; |
1447 | } | 1447 | } |
1448 | cseq = simple_strtoul(*dptr + matchoff, NULL, 10); | 1448 | cseq = simple_strtoul(*dptr + matchoff, NULL, 10); |
1449 | if (!cseq) { | 1449 | if (!cseq && *(*dptr + matchoff) != '0') { |
1450 | nf_ct_helper_log(skb, ct, "cannot get cseq"); | 1450 | nf_ct_helper_log(skb, ct, "cannot get cseq"); |
1451 | return NF_DROP; | 1451 | return NF_DROP; |
1452 | } | 1452 | } |
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index 5d36a0926b4a..f49f45081acb 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c | |||
@@ -1145,10 +1145,8 @@ static int nfqnl_recv_verdict(struct net *net, struct sock *ctnl, | |||
1145 | struct nfnl_queue_net *q = nfnl_queue_pernet(net); | 1145 | struct nfnl_queue_net *q = nfnl_queue_pernet(net); |
1146 | int err; | 1146 | int err; |
1147 | 1147 | ||
1148 | queue = instance_lookup(q, queue_num); | 1148 | queue = verdict_instance_lookup(q, queue_num, |
1149 | if (!queue) | 1149 | NETLINK_CB(skb).portid); |
1150 | queue = verdict_instance_lookup(q, queue_num, | ||
1151 | NETLINK_CB(skb).portid); | ||
1152 | if (IS_ERR(queue)) | 1150 | if (IS_ERR(queue)) |
1153 | return PTR_ERR(queue); | 1151 | return PTR_ERR(queue); |
1154 | 1152 | ||
diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c index ba7aed13e174..82c264e40278 100644 --- a/net/netfilter/nft_exthdr.c +++ b/net/netfilter/nft_exthdr.c | |||
@@ -59,6 +59,7 @@ static int nft_exthdr_init(const struct nft_ctx *ctx, | |||
59 | const struct nlattr * const tb[]) | 59 | const struct nlattr * const tb[]) |
60 | { | 60 | { |
61 | struct nft_exthdr *priv = nft_expr_priv(expr); | 61 | struct nft_exthdr *priv = nft_expr_priv(expr); |
62 | u32 offset, len; | ||
62 | 63 | ||
63 | if (tb[NFTA_EXTHDR_DREG] == NULL || | 64 | if (tb[NFTA_EXTHDR_DREG] == NULL || |
64 | tb[NFTA_EXTHDR_TYPE] == NULL || | 65 | tb[NFTA_EXTHDR_TYPE] == NULL || |
@@ -66,9 +67,15 @@ static int nft_exthdr_init(const struct nft_ctx *ctx, | |||
66 | tb[NFTA_EXTHDR_LEN] == NULL) | 67 | tb[NFTA_EXTHDR_LEN] == NULL) |
67 | return -EINVAL; | 68 | return -EINVAL; |
68 | 69 | ||
70 | offset = ntohl(nla_get_be32(tb[NFTA_EXTHDR_OFFSET])); | ||
71 | len = ntohl(nla_get_be32(tb[NFTA_EXTHDR_LEN])); | ||
72 | |||
73 | if (offset > U8_MAX || len > U8_MAX) | ||
74 | return -ERANGE; | ||
75 | |||
69 | priv->type = nla_get_u8(tb[NFTA_EXTHDR_TYPE]); | 76 | priv->type = nla_get_u8(tb[NFTA_EXTHDR_TYPE]); |
70 | priv->offset = ntohl(nla_get_be32(tb[NFTA_EXTHDR_OFFSET])); | 77 | priv->offset = offset; |
71 | priv->len = ntohl(nla_get_be32(tb[NFTA_EXTHDR_LEN])); | 78 | priv->len = len; |
72 | priv->dreg = nft_parse_register(tb[NFTA_EXTHDR_DREG]); | 79 | priv->dreg = nft_parse_register(tb[NFTA_EXTHDR_DREG]); |
73 | 80 | ||
74 | return nft_validate_register_store(ctx, priv->dreg, NULL, | 81 | return nft_validate_register_store(ctx, priv->dreg, NULL, |
diff --git a/net/netfilter/nft_rbtree.c b/net/netfilter/nft_rbtree.c index 6473936d05c6..ffe9ae062d23 100644 --- a/net/netfilter/nft_rbtree.c +++ b/net/netfilter/nft_rbtree.c | |||
@@ -70,7 +70,6 @@ static bool nft_rbtree_lookup(const struct net *net, const struct nft_set *set, | |||
70 | } else if (d > 0) | 70 | } else if (d > 0) |
71 | parent = parent->rb_right; | 71 | parent = parent->rb_right; |
72 | else { | 72 | else { |
73 | found: | ||
74 | if (!nft_set_elem_active(&rbe->ext, genmask)) { | 73 | if (!nft_set_elem_active(&rbe->ext, genmask)) { |
75 | parent = parent->rb_left; | 74 | parent = parent->rb_left; |
76 | continue; | 75 | continue; |
@@ -84,9 +83,12 @@ found: | |||
84 | } | 83 | } |
85 | } | 84 | } |
86 | 85 | ||
87 | if (set->flags & NFT_SET_INTERVAL && interval != NULL) { | 86 | if (set->flags & NFT_SET_INTERVAL && interval != NULL && |
88 | rbe = interval; | 87 | nft_set_elem_active(&interval->ext, genmask) && |
89 | goto found; | 88 | !nft_rbtree_interval_end(interval)) { |
89 | spin_unlock_bh(&nft_rbtree_lock); | ||
90 | *ext = &interval->ext; | ||
91 | return true; | ||
90 | } | 92 | } |
91 | out: | 93 | out: |
92 | spin_unlock_bh(&nft_rbtree_lock); | 94 | spin_unlock_bh(&nft_rbtree_lock); |
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index c644c78ed485..e054a748ff25 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c | |||
@@ -433,7 +433,6 @@ ovs_ct_find_existing(struct net *net, const struct nf_conntrack_zone *zone, | |||
433 | struct nf_conntrack_l4proto *l4proto; | 433 | struct nf_conntrack_l4proto *l4proto; |
434 | struct nf_conntrack_tuple tuple; | 434 | struct nf_conntrack_tuple tuple; |
435 | struct nf_conntrack_tuple_hash *h; | 435 | struct nf_conntrack_tuple_hash *h; |
436 | enum ip_conntrack_info ctinfo; | ||
437 | struct nf_conn *ct; | 436 | struct nf_conn *ct; |
438 | unsigned int dataoff; | 437 | unsigned int dataoff; |
439 | u8 protonum; | 438 | u8 protonum; |
@@ -458,13 +457,8 @@ ovs_ct_find_existing(struct net *net, const struct nf_conntrack_zone *zone, | |||
458 | 457 | ||
459 | ct = nf_ct_tuplehash_to_ctrack(h); | 458 | ct = nf_ct_tuplehash_to_ctrack(h); |
460 | 459 | ||
461 | ctinfo = ovs_ct_get_info(h); | ||
462 | if (ctinfo == IP_CT_NEW) { | ||
463 | /* This should not happen. */ | ||
464 | WARN_ONCE(1, "ovs_ct_find_existing: new packet for %p\n", ct); | ||
465 | } | ||
466 | skb->nfct = &ct->ct_general; | 460 | skb->nfct = &ct->ct_general; |
467 | skb->nfctinfo = ctinfo; | 461 | skb->nfctinfo = ovs_ct_get_info(h); |
468 | return ct; | 462 | return ct; |
469 | } | 463 | } |
470 | 464 | ||
diff --git a/net/openvswitch/vport-geneve.c b/net/openvswitch/vport-geneve.c index 1a1fcec88695..5aaf3babfc3f 100644 --- a/net/openvswitch/vport-geneve.c +++ b/net/openvswitch/vport-geneve.c | |||
@@ -93,7 +93,14 @@ static struct vport *geneve_tnl_create(const struct vport_parms *parms) | |||
93 | return ERR_CAST(dev); | 93 | return ERR_CAST(dev); |
94 | } | 94 | } |
95 | 95 | ||
96 | dev_change_flags(dev, dev->flags | IFF_UP); | 96 | err = dev_change_flags(dev, dev->flags | IFF_UP); |
97 | if (err < 0) { | ||
98 | rtnl_delete_link(dev); | ||
99 | rtnl_unlock(); | ||
100 | ovs_vport_free(vport); | ||
101 | goto error; | ||
102 | } | ||
103 | |||
97 | rtnl_unlock(); | 104 | rtnl_unlock(); |
98 | return vport; | 105 | return vport; |
99 | error: | 106 | error: |
diff --git a/net/openvswitch/vport-gre.c b/net/openvswitch/vport-gre.c index 7f8897f33a67..0e72d95b0e8f 100644 --- a/net/openvswitch/vport-gre.c +++ b/net/openvswitch/vport-gre.c | |||
@@ -54,6 +54,7 @@ static struct vport *gre_tnl_create(const struct vport_parms *parms) | |||
54 | struct net *net = ovs_dp_get_net(parms->dp); | 54 | struct net *net = ovs_dp_get_net(parms->dp); |
55 | struct net_device *dev; | 55 | struct net_device *dev; |
56 | struct vport *vport; | 56 | struct vport *vport; |
57 | int err; | ||
57 | 58 | ||
58 | vport = ovs_vport_alloc(0, &ovs_gre_vport_ops, parms); | 59 | vport = ovs_vport_alloc(0, &ovs_gre_vport_ops, parms); |
59 | if (IS_ERR(vport)) | 60 | if (IS_ERR(vport)) |
@@ -67,9 +68,15 @@ static struct vport *gre_tnl_create(const struct vport_parms *parms) | |||
67 | return ERR_CAST(dev); | 68 | return ERR_CAST(dev); |
68 | } | 69 | } |
69 | 70 | ||
70 | dev_change_flags(dev, dev->flags | IFF_UP); | 71 | err = dev_change_flags(dev, dev->flags | IFF_UP); |
71 | rtnl_unlock(); | 72 | if (err < 0) { |
73 | rtnl_delete_link(dev); | ||
74 | rtnl_unlock(); | ||
75 | ovs_vport_free(vport); | ||
76 | return ERR_PTR(err); | ||
77 | } | ||
72 | 78 | ||
79 | rtnl_unlock(); | ||
73 | return vport; | 80 | return vport; |
74 | } | 81 | } |
75 | 82 | ||
diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c index 434e04c3a189..95c36147a6e1 100644 --- a/net/openvswitch/vport-internal_dev.c +++ b/net/openvswitch/vport-internal_dev.c | |||
@@ -140,7 +140,7 @@ internal_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) | |||
140 | 140 | ||
141 | static void internal_set_rx_headroom(struct net_device *dev, int new_hr) | 141 | static void internal_set_rx_headroom(struct net_device *dev, int new_hr) |
142 | { | 142 | { |
143 | dev->needed_headroom = new_hr; | 143 | dev->needed_headroom = new_hr < 0 ? 0 : new_hr; |
144 | } | 144 | } |
145 | 145 | ||
146 | static const struct net_device_ops internal_dev_netdev_ops = { | 146 | static const struct net_device_ops internal_dev_netdev_ops = { |
diff --git a/net/openvswitch/vport-vxlan.c b/net/openvswitch/vport-vxlan.c index 5eb7694348b5..7eb955e453e6 100644 --- a/net/openvswitch/vport-vxlan.c +++ b/net/openvswitch/vport-vxlan.c | |||
@@ -130,7 +130,14 @@ static struct vport *vxlan_tnl_create(const struct vport_parms *parms) | |||
130 | return ERR_CAST(dev); | 130 | return ERR_CAST(dev); |
131 | } | 131 | } |
132 | 132 | ||
133 | dev_change_flags(dev, dev->flags | IFF_UP); | 133 | err = dev_change_flags(dev, dev->flags | IFF_UP); |
134 | if (err < 0) { | ||
135 | rtnl_delete_link(dev); | ||
136 | rtnl_unlock(); | ||
137 | ovs_vport_free(vport); | ||
138 | goto error; | ||
139 | } | ||
140 | |||
134 | rtnl_unlock(); | 141 | rtnl_unlock(); |
135 | return vport; | 142 | return vport; |
136 | error: | 143 | error: |
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index 1bb9e7ac9e14..ff83fb1ddd47 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h | |||
@@ -425,6 +425,7 @@ struct rxrpc_call { | |||
425 | spinlock_t lock; | 425 | spinlock_t lock; |
426 | rwlock_t state_lock; /* lock for state transition */ | 426 | rwlock_t state_lock; /* lock for state transition */ |
427 | atomic_t usage; | 427 | atomic_t usage; |
428 | atomic_t skb_count; /* Outstanding packets on this call */ | ||
428 | atomic_t sequence; /* Tx data packet sequence counter */ | 429 | atomic_t sequence; /* Tx data packet sequence counter */ |
429 | u32 local_abort; /* local abort code */ | 430 | u32 local_abort; /* local abort code */ |
430 | u32 remote_abort; /* remote abort code */ | 431 | u32 remote_abort; /* remote abort code */ |
diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c index 0b2832141bd0..9bae21e66d65 100644 --- a/net/rxrpc/call_accept.c +++ b/net/rxrpc/call_accept.c | |||
@@ -130,6 +130,7 @@ static int rxrpc_accept_incoming_call(struct rxrpc_local *local, | |||
130 | call->state = RXRPC_CALL_SERVER_ACCEPTING; | 130 | call->state = RXRPC_CALL_SERVER_ACCEPTING; |
131 | list_add_tail(&call->accept_link, &rx->acceptq); | 131 | list_add_tail(&call->accept_link, &rx->acceptq); |
132 | rxrpc_get_call(call); | 132 | rxrpc_get_call(call); |
133 | atomic_inc(&call->skb_count); | ||
133 | nsp = rxrpc_skb(notification); | 134 | nsp = rxrpc_skb(notification); |
134 | nsp->call = call; | 135 | nsp->call = call; |
135 | 136 | ||
diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c index fc32aa5764a2..e60cf65c2232 100644 --- a/net/rxrpc/call_event.c +++ b/net/rxrpc/call_event.c | |||
@@ -460,6 +460,7 @@ static void rxrpc_insert_oos_packet(struct rxrpc_call *call, | |||
460 | ASSERTCMP(sp->call, ==, NULL); | 460 | ASSERTCMP(sp->call, ==, NULL); |
461 | sp->call = call; | 461 | sp->call = call; |
462 | rxrpc_get_call(call); | 462 | rxrpc_get_call(call); |
463 | atomic_inc(&call->skb_count); | ||
463 | 464 | ||
464 | /* insert into the buffer in sequence order */ | 465 | /* insert into the buffer in sequence order */ |
465 | spin_lock_bh(&call->lock); | 466 | spin_lock_bh(&call->lock); |
@@ -734,6 +735,7 @@ all_acked: | |||
734 | skb->mark = RXRPC_SKB_MARK_FINAL_ACK; | 735 | skb->mark = RXRPC_SKB_MARK_FINAL_ACK; |
735 | sp->call = call; | 736 | sp->call = call; |
736 | rxrpc_get_call(call); | 737 | rxrpc_get_call(call); |
738 | atomic_inc(&call->skb_count); | ||
737 | spin_lock_bh(&call->lock); | 739 | spin_lock_bh(&call->lock); |
738 | if (rxrpc_queue_rcv_skb(call, skb, true, true) < 0) | 740 | if (rxrpc_queue_rcv_skb(call, skb, true, true) < 0) |
739 | BUG(); | 741 | BUG(); |
@@ -793,6 +795,7 @@ static int rxrpc_post_message(struct rxrpc_call *call, u32 mark, u32 error, | |||
793 | sp->error = error; | 795 | sp->error = error; |
794 | sp->call = call; | 796 | sp->call = call; |
795 | rxrpc_get_call(call); | 797 | rxrpc_get_call(call); |
798 | atomic_inc(&call->skb_count); | ||
796 | 799 | ||
797 | spin_lock_bh(&call->lock); | 800 | spin_lock_bh(&call->lock); |
798 | ret = rxrpc_queue_rcv_skb(call, skb, true, fatal); | 801 | ret = rxrpc_queue_rcv_skb(call, skb, true, fatal); |
@@ -834,6 +837,9 @@ void rxrpc_process_call(struct work_struct *work) | |||
834 | return; | 837 | return; |
835 | } | 838 | } |
836 | 839 | ||
840 | if (!call->conn) | ||
841 | goto skip_msg_init; | ||
842 | |||
837 | /* there's a good chance we're going to have to send a message, so set | 843 | /* there's a good chance we're going to have to send a message, so set |
838 | * one up in advance */ | 844 | * one up in advance */ |
839 | msg.msg_name = &call->conn->params.peer->srx.transport; | 845 | msg.msg_name = &call->conn->params.peer->srx.transport; |
@@ -856,6 +862,7 @@ void rxrpc_process_call(struct work_struct *work) | |||
856 | memset(iov, 0, sizeof(iov)); | 862 | memset(iov, 0, sizeof(iov)); |
857 | iov[0].iov_base = &whdr; | 863 | iov[0].iov_base = &whdr; |
858 | iov[0].iov_len = sizeof(whdr); | 864 | iov[0].iov_len = sizeof(whdr); |
865 | skip_msg_init: | ||
859 | 866 | ||
860 | /* deal with events of a final nature */ | 867 | /* deal with events of a final nature */ |
861 | if (test_bit(RXRPC_CALL_EV_RCVD_ERROR, &call->events)) { | 868 | if (test_bit(RXRPC_CALL_EV_RCVD_ERROR, &call->events)) { |
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c index 91287c9d01bb..ae057e0740f3 100644 --- a/net/rxrpc/call_object.c +++ b/net/rxrpc/call_object.c | |||
@@ -275,6 +275,7 @@ error: | |||
275 | list_del_init(&call->link); | 275 | list_del_init(&call->link); |
276 | write_unlock_bh(&rxrpc_call_lock); | 276 | write_unlock_bh(&rxrpc_call_lock); |
277 | 277 | ||
278 | set_bit(RXRPC_CALL_RELEASED, &call->flags); | ||
278 | call->state = RXRPC_CALL_DEAD; | 279 | call->state = RXRPC_CALL_DEAD; |
279 | rxrpc_put_call(call); | 280 | rxrpc_put_call(call); |
280 | _leave(" = %d", ret); | 281 | _leave(" = %d", ret); |
@@ -287,6 +288,7 @@ error: | |||
287 | */ | 288 | */ |
288 | found_user_ID_now_present: | 289 | found_user_ID_now_present: |
289 | write_unlock(&rx->call_lock); | 290 | write_unlock(&rx->call_lock); |
291 | set_bit(RXRPC_CALL_RELEASED, &call->flags); | ||
290 | call->state = RXRPC_CALL_DEAD; | 292 | call->state = RXRPC_CALL_DEAD; |
291 | rxrpc_put_call(call); | 293 | rxrpc_put_call(call); |
292 | _leave(" = -EEXIST [%p]", call); | 294 | _leave(" = -EEXIST [%p]", call); |
@@ -491,15 +493,9 @@ void rxrpc_release_call(struct rxrpc_call *call) | |||
491 | spin_lock_bh(&call->lock); | 493 | spin_lock_bh(&call->lock); |
492 | while ((skb = skb_dequeue(&call->rx_queue)) || | 494 | while ((skb = skb_dequeue(&call->rx_queue)) || |
493 | (skb = skb_dequeue(&call->rx_oos_queue))) { | 495 | (skb = skb_dequeue(&call->rx_oos_queue))) { |
494 | sp = rxrpc_skb(skb); | ||
495 | if (sp->call) { | ||
496 | ASSERTCMP(sp->call, ==, call); | ||
497 | rxrpc_put_call(call); | ||
498 | sp->call = NULL; | ||
499 | } | ||
500 | skb->destructor = NULL; | ||
501 | spin_unlock_bh(&call->lock); | 496 | spin_unlock_bh(&call->lock); |
502 | 497 | ||
498 | sp = rxrpc_skb(skb); | ||
503 | _debug("- zap %s %%%u #%u", | 499 | _debug("- zap %s %%%u #%u", |
504 | rxrpc_pkts[sp->hdr.type], | 500 | rxrpc_pkts[sp->hdr.type], |
505 | sp->hdr.serial, sp->hdr.seq); | 501 | sp->hdr.serial, sp->hdr.seq); |
@@ -605,6 +601,7 @@ void __rxrpc_put_call(struct rxrpc_call *call) | |||
605 | 601 | ||
606 | if (atomic_dec_and_test(&call->usage)) { | 602 | if (atomic_dec_and_test(&call->usage)) { |
607 | _debug("call %d dead", call->debug_id); | 603 | _debug("call %d dead", call->debug_id); |
604 | WARN_ON(atomic_read(&call->skb_count) != 0); | ||
608 | ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD); | 605 | ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD); |
609 | rxrpc_queue_work(&call->destroyer); | 606 | rxrpc_queue_work(&call->destroyer); |
610 | } | 607 | } |
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c index 991a20d25093..70bb77818dea 100644 --- a/net/rxrpc/input.c +++ b/net/rxrpc/input.c | |||
@@ -55,9 +55,6 @@ int rxrpc_queue_rcv_skb(struct rxrpc_call *call, struct sk_buff *skb, | |||
55 | if (test_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags)) { | 55 | if (test_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags)) { |
56 | _debug("already terminated"); | 56 | _debug("already terminated"); |
57 | ASSERTCMP(call->state, >=, RXRPC_CALL_COMPLETE); | 57 | ASSERTCMP(call->state, >=, RXRPC_CALL_COMPLETE); |
58 | skb->destructor = NULL; | ||
59 | sp->call = NULL; | ||
60 | rxrpc_put_call(call); | ||
61 | rxrpc_free_skb(skb); | 58 | rxrpc_free_skb(skb); |
62 | return 0; | 59 | return 0; |
63 | } | 60 | } |
@@ -111,13 +108,7 @@ int rxrpc_queue_rcv_skb(struct rxrpc_call *call, struct sk_buff *skb, | |||
111 | ret = 0; | 108 | ret = 0; |
112 | 109 | ||
113 | out: | 110 | out: |
114 | /* release the socket buffer */ | 111 | rxrpc_free_skb(skb); |
115 | if (skb) { | ||
116 | skb->destructor = NULL; | ||
117 | sp->call = NULL; | ||
118 | rxrpc_put_call(call); | ||
119 | rxrpc_free_skb(skb); | ||
120 | } | ||
121 | 112 | ||
122 | _leave(" = %d", ret); | 113 | _leave(" = %d", ret); |
123 | return ret; | 114 | return ret; |
@@ -133,11 +124,15 @@ static int rxrpc_fast_process_data(struct rxrpc_call *call, | |||
133 | struct rxrpc_skb_priv *sp; | 124 | struct rxrpc_skb_priv *sp; |
134 | bool terminal; | 125 | bool terminal; |
135 | int ret, ackbit, ack; | 126 | int ret, ackbit, ack; |
127 | u32 serial; | ||
128 | u8 flags; | ||
136 | 129 | ||
137 | _enter("{%u,%u},,{%u}", call->rx_data_post, call->rx_first_oos, seq); | 130 | _enter("{%u,%u},,{%u}", call->rx_data_post, call->rx_first_oos, seq); |
138 | 131 | ||
139 | sp = rxrpc_skb(skb); | 132 | sp = rxrpc_skb(skb); |
140 | ASSERTCMP(sp->call, ==, NULL); | 133 | ASSERTCMP(sp->call, ==, NULL); |
134 | flags = sp->hdr.flags; | ||
135 | serial = sp->hdr.serial; | ||
141 | 136 | ||
142 | spin_lock(&call->lock); | 137 | spin_lock(&call->lock); |
143 | 138 | ||
@@ -200,8 +195,9 @@ static int rxrpc_fast_process_data(struct rxrpc_call *call, | |||
200 | 195 | ||
201 | sp->call = call; | 196 | sp->call = call; |
202 | rxrpc_get_call(call); | 197 | rxrpc_get_call(call); |
203 | terminal = ((sp->hdr.flags & RXRPC_LAST_PACKET) && | 198 | atomic_inc(&call->skb_count); |
204 | !(sp->hdr.flags & RXRPC_CLIENT_INITIATED)); | 199 | terminal = ((flags & RXRPC_LAST_PACKET) && |
200 | !(flags & RXRPC_CLIENT_INITIATED)); | ||
205 | ret = rxrpc_queue_rcv_skb(call, skb, false, terminal); | 201 | ret = rxrpc_queue_rcv_skb(call, skb, false, terminal); |
206 | if (ret < 0) { | 202 | if (ret < 0) { |
207 | if (ret == -ENOMEM || ret == -ENOBUFS) { | 203 | if (ret == -ENOMEM || ret == -ENOBUFS) { |
@@ -213,12 +209,13 @@ static int rxrpc_fast_process_data(struct rxrpc_call *call, | |||
213 | } | 209 | } |
214 | 210 | ||
215 | skb = NULL; | 211 | skb = NULL; |
212 | sp = NULL; | ||
216 | 213 | ||
217 | _debug("post #%u", seq); | 214 | _debug("post #%u", seq); |
218 | ASSERTCMP(call->rx_data_post, ==, seq); | 215 | ASSERTCMP(call->rx_data_post, ==, seq); |
219 | call->rx_data_post++; | 216 | call->rx_data_post++; |
220 | 217 | ||
221 | if (sp->hdr.flags & RXRPC_LAST_PACKET) | 218 | if (flags & RXRPC_LAST_PACKET) |
222 | set_bit(RXRPC_CALL_RCVD_LAST, &call->flags); | 219 | set_bit(RXRPC_CALL_RCVD_LAST, &call->flags); |
223 | 220 | ||
224 | /* if we've reached an out of sequence packet then we need to drain | 221 | /* if we've reached an out of sequence packet then we need to drain |
@@ -234,7 +231,7 @@ static int rxrpc_fast_process_data(struct rxrpc_call *call, | |||
234 | 231 | ||
235 | spin_unlock(&call->lock); | 232 | spin_unlock(&call->lock); |
236 | atomic_inc(&call->ackr_not_idle); | 233 | atomic_inc(&call->ackr_not_idle); |
237 | rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, sp->hdr.serial, false); | 234 | rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, serial, false); |
238 | _leave(" = 0 [posted]"); | 235 | _leave(" = 0 [posted]"); |
239 | return 0; | 236 | return 0; |
240 | 237 | ||
@@ -247,7 +244,7 @@ out: | |||
247 | 244 | ||
248 | discard_and_ack: | 245 | discard_and_ack: |
249 | _debug("discard and ACK packet %p", skb); | 246 | _debug("discard and ACK packet %p", skb); |
250 | __rxrpc_propose_ACK(call, ack, sp->hdr.serial, true); | 247 | __rxrpc_propose_ACK(call, ack, serial, true); |
251 | discard: | 248 | discard: |
252 | spin_unlock(&call->lock); | 249 | spin_unlock(&call->lock); |
253 | rxrpc_free_skb(skb); | 250 | rxrpc_free_skb(skb); |
@@ -255,7 +252,7 @@ discard: | |||
255 | return 0; | 252 | return 0; |
256 | 253 | ||
257 | enqueue_and_ack: | 254 | enqueue_and_ack: |
258 | __rxrpc_propose_ACK(call, ack, sp->hdr.serial, true); | 255 | __rxrpc_propose_ACK(call, ack, serial, true); |
259 | enqueue_packet: | 256 | enqueue_packet: |
260 | _net("defer skb %p", skb); | 257 | _net("defer skb %p", skb); |
261 | spin_unlock(&call->lock); | 258 | spin_unlock(&call->lock); |
@@ -575,13 +572,13 @@ done: | |||
575 | * post connection-level events to the connection | 572 | * post connection-level events to the connection |
576 | * - this includes challenges, responses and some aborts | 573 | * - this includes challenges, responses and some aborts |
577 | */ | 574 | */ |
578 | static bool rxrpc_post_packet_to_conn(struct rxrpc_connection *conn, | 575 | static void rxrpc_post_packet_to_conn(struct rxrpc_connection *conn, |
579 | struct sk_buff *skb) | 576 | struct sk_buff *skb) |
580 | { | 577 | { |
581 | _enter("%p,%p", conn, skb); | 578 | _enter("%p,%p", conn, skb); |
582 | 579 | ||
583 | skb_queue_tail(&conn->rx_queue, skb); | 580 | skb_queue_tail(&conn->rx_queue, skb); |
584 | return rxrpc_queue_conn(conn); | 581 | rxrpc_queue_conn(conn); |
585 | } | 582 | } |
586 | 583 | ||
587 | /* | 584 | /* |
@@ -702,7 +699,6 @@ void rxrpc_data_ready(struct sock *sk) | |||
702 | 699 | ||
703 | rcu_read_lock(); | 700 | rcu_read_lock(); |
704 | 701 | ||
705 | retry_find_conn: | ||
706 | conn = rxrpc_find_connection_rcu(local, skb); | 702 | conn = rxrpc_find_connection_rcu(local, skb); |
707 | if (!conn) | 703 | if (!conn) |
708 | goto cant_route_call; | 704 | goto cant_route_call; |
@@ -710,8 +706,7 @@ retry_find_conn: | |||
710 | if (sp->hdr.callNumber == 0) { | 706 | if (sp->hdr.callNumber == 0) { |
711 | /* Connection-level packet */ | 707 | /* Connection-level packet */ |
712 | _debug("CONN %p {%d}", conn, conn->debug_id); | 708 | _debug("CONN %p {%d}", conn, conn->debug_id); |
713 | if (!rxrpc_post_packet_to_conn(conn, skb)) | 709 | rxrpc_post_packet_to_conn(conn, skb); |
714 | goto retry_find_conn; | ||
715 | } else { | 710 | } else { |
716 | /* Call-bound packets are routed by connection channel. */ | 711 | /* Call-bound packets are routed by connection channel. */ |
717 | unsigned int channel = sp->hdr.cid & RXRPC_CHANNELMASK; | 712 | unsigned int channel = sp->hdr.cid & RXRPC_CHANNELMASK; |
@@ -749,6 +744,8 @@ cant_route_call: | |||
749 | if (sp->hdr.type != RXRPC_PACKET_TYPE_ABORT) { | 744 | if (sp->hdr.type != RXRPC_PACKET_TYPE_ABORT) { |
750 | _debug("reject type %d",sp->hdr.type); | 745 | _debug("reject type %d",sp->hdr.type); |
751 | rxrpc_reject_packet(local, skb); | 746 | rxrpc_reject_packet(local, skb); |
747 | } else { | ||
748 | rxrpc_free_skb(skb); | ||
752 | } | 749 | } |
753 | _leave(" [no call]"); | 750 | _leave(" [no call]"); |
754 | return; | 751 | return; |
diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c index a3fa2ed85d63..9ed66d533002 100644 --- a/net/rxrpc/recvmsg.c +++ b/net/rxrpc/recvmsg.c | |||
@@ -203,6 +203,9 @@ int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, | |||
203 | } | 203 | } |
204 | 204 | ||
205 | /* we transferred the whole data packet */ | 205 | /* we transferred the whole data packet */ |
206 | if (!(flags & MSG_PEEK)) | ||
207 | rxrpc_kernel_data_consumed(call, skb); | ||
208 | |||
206 | if (sp->hdr.flags & RXRPC_LAST_PACKET) { | 209 | if (sp->hdr.flags & RXRPC_LAST_PACKET) { |
207 | _debug("last"); | 210 | _debug("last"); |
208 | if (rxrpc_conn_is_client(call->conn)) { | 211 | if (rxrpc_conn_is_client(call->conn)) { |
@@ -360,28 +363,6 @@ wait_error: | |||
360 | } | 363 | } |
361 | 364 | ||
362 | /** | 365 | /** |
363 | * rxrpc_kernel_data_delivered - Record delivery of data message | ||
364 | * @skb: Message holding data | ||
365 | * | ||
366 | * Record the delivery of a data message. This permits RxRPC to keep its | ||
367 | * tracking correct. The socket buffer will be deleted. | ||
368 | */ | ||
369 | void rxrpc_kernel_data_delivered(struct sk_buff *skb) | ||
370 | { | ||
371 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); | ||
372 | struct rxrpc_call *call = sp->call; | ||
373 | |||
374 | ASSERTCMP(sp->hdr.seq, >=, call->rx_data_recv); | ||
375 | ASSERTCMP(sp->hdr.seq, <=, call->rx_data_recv + 1); | ||
376 | call->rx_data_recv = sp->hdr.seq; | ||
377 | |||
378 | ASSERTCMP(sp->hdr.seq, >, call->rx_data_eaten); | ||
379 | rxrpc_free_skb(skb); | ||
380 | } | ||
381 | |||
382 | EXPORT_SYMBOL(rxrpc_kernel_data_delivered); | ||
383 | |||
384 | /** | ||
385 | * rxrpc_kernel_is_data_last - Determine if data message is last one | 366 | * rxrpc_kernel_is_data_last - Determine if data message is last one |
386 | * @skb: Message holding data | 367 | * @skb: Message holding data |
387 | * | 368 | * |
diff --git a/net/rxrpc/skbuff.c b/net/rxrpc/skbuff.c index eee0cfd9ac8c..06c51d4b622d 100644 --- a/net/rxrpc/skbuff.c +++ b/net/rxrpc/skbuff.c | |||
@@ -98,11 +98,39 @@ static void rxrpc_hard_ACK_data(struct rxrpc_call *call, | |||
98 | spin_unlock_bh(&call->lock); | 98 | spin_unlock_bh(&call->lock); |
99 | } | 99 | } |
100 | 100 | ||
101 | /** | ||
102 | * rxrpc_kernel_data_consumed - Record consumption of data message | ||
103 | * @call: The call to which the message pertains. | ||
104 | * @skb: Message holding data | ||
105 | * | ||
106 | * Record the consumption of a data message and generate an ACK if appropriate. | ||
107 | * The call state is shifted if this was the final packet. The caller must be | ||
108 | * in process context with no spinlocks held. | ||
109 | * | ||
110 | * TODO: Actually generate the ACK here rather than punting this to the | ||
111 | * workqueue. | ||
112 | */ | ||
113 | void rxrpc_kernel_data_consumed(struct rxrpc_call *call, struct sk_buff *skb) | ||
114 | { | ||
115 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); | ||
116 | |||
117 | _enter("%d,%p{%u}", call->debug_id, skb, sp->hdr.seq); | ||
118 | |||
119 | ASSERTCMP(sp->call, ==, call); | ||
120 | ASSERTCMP(sp->hdr.type, ==, RXRPC_PACKET_TYPE_DATA); | ||
121 | |||
122 | /* TODO: Fix the sequence number tracking */ | ||
123 | ASSERTCMP(sp->hdr.seq, >=, call->rx_data_recv); | ||
124 | ASSERTCMP(sp->hdr.seq, <=, call->rx_data_recv + 1); | ||
125 | ASSERTCMP(sp->hdr.seq, >, call->rx_data_eaten); | ||
126 | |||
127 | call->rx_data_recv = sp->hdr.seq; | ||
128 | rxrpc_hard_ACK_data(call, sp); | ||
129 | } | ||
130 | EXPORT_SYMBOL(rxrpc_kernel_data_consumed); | ||
131 | |||
101 | /* | 132 | /* |
102 | * destroy a packet that has an RxRPC control buffer | 133 | * Destroy a packet that has an RxRPC control buffer |
103 | * - advance the hard-ACK state of the parent call (done here in case something | ||
104 | * in the kernel bypasses recvmsg() and steals the packet directly off of the | ||
105 | * socket receive queue) | ||
106 | */ | 134 | */ |
107 | void rxrpc_packet_destructor(struct sk_buff *skb) | 135 | void rxrpc_packet_destructor(struct sk_buff *skb) |
108 | { | 136 | { |
@@ -112,9 +140,8 @@ void rxrpc_packet_destructor(struct sk_buff *skb) | |||
112 | _enter("%p{%p}", skb, call); | 140 | _enter("%p{%p}", skb, call); |
113 | 141 | ||
114 | if (call) { | 142 | if (call) { |
115 | /* send the final ACK on a client call */ | 143 | if (atomic_dec_return(&call->skb_count) < 0) |
116 | if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA) | 144 | BUG(); |
117 | rxrpc_hard_ACK_data(call, sp); | ||
118 | rxrpc_put_call(call); | 145 | rxrpc_put_call(call); |
119 | sp->call = NULL; | 146 | sp->call = NULL; |
120 | } | 147 | } |
diff --git a/net/sched/act_api.c b/net/sched/act_api.c index e4a5f2607ffa..d09d0687594b 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c | |||
@@ -64,7 +64,6 @@ int __tcf_hash_release(struct tc_action *p, bool bind, bool strict) | |||
64 | if (p->tcfa_bindcnt <= 0 && p->tcfa_refcnt <= 0) { | 64 | if (p->tcfa_bindcnt <= 0 && p->tcfa_refcnt <= 0) { |
65 | if (p->ops->cleanup) | 65 | if (p->ops->cleanup) |
66 | p->ops->cleanup(p, bind); | 66 | p->ops->cleanup(p, bind); |
67 | list_del(&p->list); | ||
68 | tcf_hash_destroy(p->hinfo, p); | 67 | tcf_hash_destroy(p->hinfo, p); |
69 | ret = ACT_P_DELETED; | 68 | ret = ACT_P_DELETED; |
70 | } | 69 | } |
@@ -421,18 +420,19 @@ static struct tc_action_ops *tc_lookup_action(struct nlattr *kind) | |||
421 | return res; | 420 | return res; |
422 | } | 421 | } |
423 | 422 | ||
424 | int tcf_action_exec(struct sk_buff *skb, const struct list_head *actions, | 423 | int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions, |
425 | struct tcf_result *res) | 424 | int nr_actions, struct tcf_result *res) |
426 | { | 425 | { |
427 | const struct tc_action *a; | 426 | int ret = -1, i; |
428 | int ret = -1; | ||
429 | 427 | ||
430 | if (skb->tc_verd & TC_NCLS) { | 428 | if (skb->tc_verd & TC_NCLS) { |
431 | skb->tc_verd = CLR_TC_NCLS(skb->tc_verd); | 429 | skb->tc_verd = CLR_TC_NCLS(skb->tc_verd); |
432 | ret = TC_ACT_OK; | 430 | ret = TC_ACT_OK; |
433 | goto exec_done; | 431 | goto exec_done; |
434 | } | 432 | } |
435 | list_for_each_entry(a, actions, list) { | 433 | for (i = 0; i < nr_actions; i++) { |
434 | const struct tc_action *a = actions[i]; | ||
435 | |||
436 | repeat: | 436 | repeat: |
437 | ret = a->ops->act(skb, a, res); | 437 | ret = a->ops->act(skb, a, res); |
438 | if (ret == TC_ACT_REPEAT) | 438 | if (ret == TC_ACT_REPEAT) |
@@ -754,16 +754,6 @@ err_out: | |||
754 | return ERR_PTR(err); | 754 | return ERR_PTR(err); |
755 | } | 755 | } |
756 | 756 | ||
757 | static void cleanup_a(struct list_head *actions) | ||
758 | { | ||
759 | struct tc_action *a, *tmp; | ||
760 | |||
761 | list_for_each_entry_safe(a, tmp, actions, list) { | ||
762 | list_del(&a->list); | ||
763 | kfree(a); | ||
764 | } | ||
765 | } | ||
766 | |||
767 | static int tca_action_flush(struct net *net, struct nlattr *nla, | 757 | static int tca_action_flush(struct net *net, struct nlattr *nla, |
768 | struct nlmsghdr *n, u32 portid) | 758 | struct nlmsghdr *n, u32 portid) |
769 | { | 759 | { |
@@ -905,7 +895,7 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n, | |||
905 | return ret; | 895 | return ret; |
906 | } | 896 | } |
907 | err: | 897 | err: |
908 | cleanup_a(&actions); | 898 | tcf_action_destroy(&actions, 0); |
909 | return ret; | 899 | return ret; |
910 | } | 900 | } |
911 | 901 | ||
@@ -942,15 +932,9 @@ tcf_action_add(struct net *net, struct nlattr *nla, struct nlmsghdr *n, | |||
942 | 932 | ||
943 | ret = tcf_action_init(net, nla, NULL, NULL, ovr, 0, &actions); | 933 | ret = tcf_action_init(net, nla, NULL, NULL, ovr, 0, &actions); |
944 | if (ret) | 934 | if (ret) |
945 | goto done; | 935 | return ret; |
946 | 936 | ||
947 | /* dump then free all the actions after update; inserted policy | 937 | return tcf_add_notify(net, n, &actions, portid); |
948 | * stays intact | ||
949 | */ | ||
950 | ret = tcf_add_notify(net, n, &actions, portid); | ||
951 | cleanup_a(&actions); | ||
952 | done: | ||
953 | return ret; | ||
954 | } | 938 | } |
955 | 939 | ||
956 | static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n) | 940 | static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n) |
diff --git a/net/sched/act_police.c b/net/sched/act_police.c index b3c7e975fc9e..8a3be1d99775 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c | |||
@@ -63,49 +63,8 @@ static int tcf_act_police_walker(struct net *net, struct sk_buff *skb, | |||
63 | const struct tc_action_ops *ops) | 63 | const struct tc_action_ops *ops) |
64 | { | 64 | { |
65 | struct tc_action_net *tn = net_generic(net, police_net_id); | 65 | struct tc_action_net *tn = net_generic(net, police_net_id); |
66 | struct tcf_hashinfo *hinfo = tn->hinfo; | ||
67 | int err = 0, index = -1, i = 0, s_i = 0, n_i = 0; | ||
68 | struct nlattr *nest; | ||
69 | |||
70 | spin_lock_bh(&hinfo->lock); | ||
71 | |||
72 | s_i = cb->args[0]; | ||
73 | |||
74 | for (i = 0; i < (POL_TAB_MASK + 1); i++) { | ||
75 | struct hlist_head *head; | ||
76 | struct tc_action *p; | ||
77 | |||
78 | head = &hinfo->htab[tcf_hash(i, POL_TAB_MASK)]; | ||
79 | |||
80 | hlist_for_each_entry_rcu(p, head, tcfa_head) { | ||
81 | index++; | ||
82 | if (index < s_i) | ||
83 | continue; | ||
84 | nest = nla_nest_start(skb, index); | ||
85 | if (nest == NULL) | ||
86 | goto nla_put_failure; | ||
87 | if (type == RTM_DELACTION) | ||
88 | err = tcf_action_dump_1(skb, p, 0, 1); | ||
89 | else | ||
90 | err = tcf_action_dump_1(skb, p, 0, 0); | ||
91 | if (err < 0) { | ||
92 | index--; | ||
93 | nla_nest_cancel(skb, nest); | ||
94 | goto done; | ||
95 | } | ||
96 | nla_nest_end(skb, nest); | ||
97 | n_i++; | ||
98 | } | ||
99 | } | ||
100 | done: | ||
101 | spin_unlock_bh(&hinfo->lock); | ||
102 | if (n_i) | ||
103 | cb->args[0] += n_i; | ||
104 | return n_i; | ||
105 | 66 | ||
106 | nla_put_failure: | 67 | return tcf_generic_walker(tn, skb, cb, type, ops); |
107 | nla_nest_cancel(skb, nest); | ||
108 | goto done; | ||
109 | } | 68 | } |
110 | 69 | ||
111 | static const struct nla_policy police_policy[TCA_POLICE_MAX + 1] = { | 70 | static const struct nla_policy police_policy[TCA_POLICE_MAX + 1] = { |
@@ -125,6 +84,7 @@ static int tcf_act_police_init(struct net *net, struct nlattr *nla, | |||
125 | struct tcf_police *police; | 84 | struct tcf_police *police; |
126 | struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL; | 85 | struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL; |
127 | struct tc_action_net *tn = net_generic(net, police_net_id); | 86 | struct tc_action_net *tn = net_generic(net, police_net_id); |
87 | bool exists = false; | ||
128 | int size; | 88 | int size; |
129 | 89 | ||
130 | if (nla == NULL) | 90 | if (nla == NULL) |
@@ -139,24 +99,24 @@ static int tcf_act_police_init(struct net *net, struct nlattr *nla, | |||
139 | size = nla_len(tb[TCA_POLICE_TBF]); | 99 | size = nla_len(tb[TCA_POLICE_TBF]); |
140 | if (size != sizeof(*parm) && size != sizeof(struct tc_police_compat)) | 100 | if (size != sizeof(*parm) && size != sizeof(struct tc_police_compat)) |
141 | return -EINVAL; | 101 | return -EINVAL; |
102 | |||
142 | parm = nla_data(tb[TCA_POLICE_TBF]); | 103 | parm = nla_data(tb[TCA_POLICE_TBF]); |
104 | exists = tcf_hash_check(tn, parm->index, a, bind); | ||
105 | if (exists && bind) | ||
106 | return 0; | ||
143 | 107 | ||
144 | if (parm->index) { | 108 | if (!exists) { |
145 | if (tcf_hash_check(tn, parm->index, a, bind)) { | ||
146 | if (ovr) | ||
147 | goto override; | ||
148 | /* not replacing */ | ||
149 | return -EEXIST; | ||
150 | } | ||
151 | } else { | ||
152 | ret = tcf_hash_create(tn, parm->index, NULL, a, | 109 | ret = tcf_hash_create(tn, parm->index, NULL, a, |
153 | &act_police_ops, bind, false); | 110 | &act_police_ops, bind, false); |
154 | if (ret) | 111 | if (ret) |
155 | return ret; | 112 | return ret; |
156 | ret = ACT_P_CREATED; | 113 | ret = ACT_P_CREATED; |
114 | } else { | ||
115 | tcf_hash_release(*a, bind); | ||
116 | if (!ovr) | ||
117 | return -EEXIST; | ||
157 | } | 118 | } |
158 | 119 | ||
159 | override: | ||
160 | police = to_police(*a); | 120 | police = to_police(*a); |
161 | if (parm->rate.rate) { | 121 | if (parm->rate.rate) { |
162 | err = -ENOMEM; | 122 | err = -ENOMEM; |
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 843a716a4303..a7c5645373af 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c | |||
@@ -541,8 +541,12 @@ out: | |||
541 | void tcf_exts_destroy(struct tcf_exts *exts) | 541 | void tcf_exts_destroy(struct tcf_exts *exts) |
542 | { | 542 | { |
543 | #ifdef CONFIG_NET_CLS_ACT | 543 | #ifdef CONFIG_NET_CLS_ACT |
544 | tcf_action_destroy(&exts->actions, TCA_ACT_UNBIND); | 544 | LIST_HEAD(actions); |
545 | INIT_LIST_HEAD(&exts->actions); | 545 | |
546 | tcf_exts_to_list(exts, &actions); | ||
547 | tcf_action_destroy(&actions, TCA_ACT_UNBIND); | ||
548 | kfree(exts->actions); | ||
549 | exts->nr_actions = 0; | ||
546 | #endif | 550 | #endif |
547 | } | 551 | } |
548 | EXPORT_SYMBOL(tcf_exts_destroy); | 552 | EXPORT_SYMBOL(tcf_exts_destroy); |
@@ -554,7 +558,6 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb, | |||
554 | { | 558 | { |
555 | struct tc_action *act; | 559 | struct tc_action *act; |
556 | 560 | ||
557 | INIT_LIST_HEAD(&exts->actions); | ||
558 | if (exts->police && tb[exts->police]) { | 561 | if (exts->police && tb[exts->police]) { |
559 | act = tcf_action_init_1(net, tb[exts->police], rate_tlv, | 562 | act = tcf_action_init_1(net, tb[exts->police], rate_tlv, |
560 | "police", ovr, | 563 | "police", ovr, |
@@ -563,14 +566,20 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb, | |||
563 | return PTR_ERR(act); | 566 | return PTR_ERR(act); |
564 | 567 | ||
565 | act->type = exts->type = TCA_OLD_COMPAT; | 568 | act->type = exts->type = TCA_OLD_COMPAT; |
566 | list_add(&act->list, &exts->actions); | 569 | exts->actions[0] = act; |
570 | exts->nr_actions = 1; | ||
567 | } else if (exts->action && tb[exts->action]) { | 571 | } else if (exts->action && tb[exts->action]) { |
568 | int err; | 572 | LIST_HEAD(actions); |
573 | int err, i = 0; | ||
574 | |||
569 | err = tcf_action_init(net, tb[exts->action], rate_tlv, | 575 | err = tcf_action_init(net, tb[exts->action], rate_tlv, |
570 | NULL, ovr, | 576 | NULL, ovr, |
571 | TCA_ACT_BIND, &exts->actions); | 577 | TCA_ACT_BIND, &actions); |
572 | if (err) | 578 | if (err) |
573 | return err; | 579 | return err; |
580 | list_for_each_entry(act, &actions, list) | ||
581 | exts->actions[i++] = act; | ||
582 | exts->nr_actions = i; | ||
574 | } | 583 | } |
575 | } | 584 | } |
576 | #else | 585 | #else |
@@ -587,37 +596,49 @@ void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst, | |||
587 | struct tcf_exts *src) | 596 | struct tcf_exts *src) |
588 | { | 597 | { |
589 | #ifdef CONFIG_NET_CLS_ACT | 598 | #ifdef CONFIG_NET_CLS_ACT |
590 | LIST_HEAD(tmp); | 599 | struct tcf_exts old = *dst; |
600 | |||
591 | tcf_tree_lock(tp); | 601 | tcf_tree_lock(tp); |
592 | list_splice_init(&dst->actions, &tmp); | 602 | dst->nr_actions = src->nr_actions; |
593 | list_splice(&src->actions, &dst->actions); | 603 | dst->actions = src->actions; |
594 | dst->type = src->type; | 604 | dst->type = src->type; |
595 | tcf_tree_unlock(tp); | 605 | tcf_tree_unlock(tp); |
596 | tcf_action_destroy(&tmp, TCA_ACT_UNBIND); | 606 | |
607 | tcf_exts_destroy(&old); | ||
597 | #endif | 608 | #endif |
598 | } | 609 | } |
599 | EXPORT_SYMBOL(tcf_exts_change); | 610 | EXPORT_SYMBOL(tcf_exts_change); |
600 | 611 | ||
601 | #define tcf_exts_first_act(ext) \ | 612 | #ifdef CONFIG_NET_CLS_ACT |
602 | list_first_entry_or_null(&(exts)->actions, \ | 613 | static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts) |
603 | struct tc_action, list) | 614 | { |
615 | if (exts->nr_actions == 0) | ||
616 | return NULL; | ||
617 | else | ||
618 | return exts->actions[0]; | ||
619 | } | ||
620 | #endif | ||
604 | 621 | ||
605 | int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts) | 622 | int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts) |
606 | { | 623 | { |
607 | #ifdef CONFIG_NET_CLS_ACT | 624 | #ifdef CONFIG_NET_CLS_ACT |
608 | struct nlattr *nest; | 625 | struct nlattr *nest; |
609 | 626 | ||
610 | if (exts->action && !list_empty(&exts->actions)) { | 627 | if (exts->action && exts->nr_actions) { |
611 | /* | 628 | /* |
612 | * again for backward compatible mode - we want | 629 | * again for backward compatible mode - we want |
613 | * to work with both old and new modes of entering | 630 | * to work with both old and new modes of entering |
614 | * tc data even if iproute2 was newer - jhs | 631 | * tc data even if iproute2 was newer - jhs |
615 | */ | 632 | */ |
616 | if (exts->type != TCA_OLD_COMPAT) { | 633 | if (exts->type != TCA_OLD_COMPAT) { |
634 | LIST_HEAD(actions); | ||
635 | |||
617 | nest = nla_nest_start(skb, exts->action); | 636 | nest = nla_nest_start(skb, exts->action); |
618 | if (nest == NULL) | 637 | if (nest == NULL) |
619 | goto nla_put_failure; | 638 | goto nla_put_failure; |
620 | if (tcf_action_dump(skb, &exts->actions, 0, 0) < 0) | 639 | |
640 | tcf_exts_to_list(exts, &actions); | ||
641 | if (tcf_action_dump(skb, &actions, 0, 0) < 0) | ||
621 | goto nla_put_failure; | 642 | goto nla_put_failure; |
622 | nla_nest_end(skb, nest); | 643 | nla_nest_end(skb, nest); |
623 | } else if (exts->police) { | 644 | } else if (exts->police) { |
diff --git a/net/sctp/proc.c b/net/sctp/proc.c index 4cb5aedfe3ee..ef8ba77a5bea 100644 --- a/net/sctp/proc.c +++ b/net/sctp/proc.c | |||
@@ -293,6 +293,7 @@ static void *sctp_transport_seq_start(struct seq_file *seq, loff_t *pos) | |||
293 | return ERR_PTR(err); | 293 | return ERR_PTR(err); |
294 | } | 294 | } |
295 | 295 | ||
296 | iter->start_fail = 0; | ||
296 | return sctp_transport_get_idx(seq_file_net(seq), &iter->hti, *pos); | 297 | return sctp_transport_get_idx(seq_file_net(seq), &iter->hti, *pos); |
297 | } | 298 | } |
298 | 299 | ||
diff --git a/net/sctp/sctp_diag.c b/net/sctp/sctp_diag.c index f69edcf219e5..bb691538adc8 100644 --- a/net/sctp/sctp_diag.c +++ b/net/sctp/sctp_diag.c | |||
@@ -13,6 +13,7 @@ static void inet_diag_msg_sctpasoc_fill(struct inet_diag_msg *r, | |||
13 | { | 13 | { |
14 | union sctp_addr laddr, paddr; | 14 | union sctp_addr laddr, paddr; |
15 | struct dst_entry *dst; | 15 | struct dst_entry *dst; |
16 | struct timer_list *t3_rtx = &asoc->peer.primary_path->T3_rtx_timer; | ||
16 | 17 | ||
17 | laddr = list_entry(asoc->base.bind_addr.address_list.next, | 18 | laddr = list_entry(asoc->base.bind_addr.address_list.next, |
18 | struct sctp_sockaddr_entry, list)->a; | 19 | struct sctp_sockaddr_entry, list)->a; |
@@ -40,10 +41,15 @@ static void inet_diag_msg_sctpasoc_fill(struct inet_diag_msg *r, | |||
40 | } | 41 | } |
41 | 42 | ||
42 | r->idiag_state = asoc->state; | 43 | r->idiag_state = asoc->state; |
43 | r->idiag_timer = SCTP_EVENT_TIMEOUT_T3_RTX; | 44 | if (timer_pending(t3_rtx)) { |
44 | r->idiag_retrans = asoc->rtx_data_chunks; | 45 | r->idiag_timer = SCTP_EVENT_TIMEOUT_T3_RTX; |
45 | r->idiag_expires = jiffies_to_msecs( | 46 | r->idiag_retrans = asoc->rtx_data_chunks; |
46 | asoc->timeouts[SCTP_EVENT_TIMEOUT_T3_RTX] - jiffies); | 47 | r->idiag_expires = jiffies_to_msecs(t3_rtx->expires - jiffies); |
48 | } else { | ||
49 | r->idiag_timer = 0; | ||
50 | r->idiag_retrans = 0; | ||
51 | r->idiag_expires = 0; | ||
52 | } | ||
47 | } | 53 | } |
48 | 54 | ||
49 | static int inet_diag_msg_sctpladdrs_fill(struct sk_buff *skb, | 55 | static int inet_diag_msg_sctpladdrs_fill(struct sk_buff *skb, |
@@ -350,7 +356,7 @@ static int sctp_ep_dump(struct sctp_endpoint *ep, void *p) | |||
350 | if (cb->args[4] < cb->args[1]) | 356 | if (cb->args[4] < cb->args[1]) |
351 | goto next; | 357 | goto next; |
352 | 358 | ||
353 | if ((r->idiag_states & ~TCPF_LISTEN) && !list_empty(&ep->asocs)) | 359 | if (!(r->idiag_states & TCPF_LISTEN) && !list_empty(&ep->asocs)) |
354 | goto next; | 360 | goto next; |
355 | 361 | ||
356 | if (r->sdiag_family != AF_UNSPEC && | 362 | if (r->sdiag_family != AF_UNSPEC && |
@@ -465,7 +471,7 @@ skip: | |||
465 | * 3 : to mark if we have dumped the ep info of the current asoc | 471 | * 3 : to mark if we have dumped the ep info of the current asoc |
466 | * 4 : to work as a temporary variable to traversal list | 472 | * 4 : to work as a temporary variable to traversal list |
467 | */ | 473 | */ |
468 | if (!(idiag_states & ~TCPF_LISTEN)) | 474 | if (!(idiag_states & ~(TCPF_LISTEN | TCPF_CLOSE))) |
469 | goto done; | 475 | goto done; |
470 | sctp_for_each_transport(sctp_tsp_dump, net, cb->args[2], &commp); | 476 | sctp_for_each_transport(sctp_tsp_dump, net, cb->args[2], &commp); |
471 | done: | 477 | done: |
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c index 1bc4f71aaba8..d85b803da11d 100644 --- a/net/sctp/ulpevent.c +++ b/net/sctp/ulpevent.c | |||
@@ -702,14 +702,14 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc, | |||
702 | */ | 702 | */ |
703 | sctp_ulpevent_init(event, 0, skb->len + sizeof(struct sk_buff)); | 703 | sctp_ulpevent_init(event, 0, skb->len + sizeof(struct sk_buff)); |
704 | 704 | ||
705 | sctp_ulpevent_receive_data(event, asoc); | ||
706 | |||
707 | /* And hold the chunk as we need it for getting the IP headers | 705 | /* And hold the chunk as we need it for getting the IP headers |
708 | * later in recvmsg | 706 | * later in recvmsg |
709 | */ | 707 | */ |
710 | sctp_chunk_hold(chunk); | 708 | sctp_chunk_hold(chunk); |
711 | event->chunk = chunk; | 709 | event->chunk = chunk; |
712 | 710 | ||
711 | sctp_ulpevent_receive_data(event, asoc); | ||
712 | |||
713 | event->stream = ntohs(chunk->subh.data_hdr->stream); | 713 | event->stream = ntohs(chunk->subh.data_hdr->stream); |
714 | event->ssn = ntohs(chunk->subh.data_hdr->ssn); | 714 | event->ssn = ntohs(chunk->subh.data_hdr->ssn); |
715 | event->ppid = chunk->subh.data_hdr->ppid; | 715 | event->ppid = chunk->subh.data_hdr->ppid; |
diff --git a/net/tipc/monitor.c b/net/tipc/monitor.c index b62caa1c770c..ed97a5876ebe 100644 --- a/net/tipc/monitor.c +++ b/net/tipc/monitor.c | |||
@@ -728,12 +728,13 @@ int tipc_nl_add_monitor_peer(struct net *net, struct tipc_nl_msg *msg, | |||
728 | u32 bearer_id, u32 *prev_node) | 728 | u32 bearer_id, u32 *prev_node) |
729 | { | 729 | { |
730 | struct tipc_monitor *mon = tipc_monitor(net, bearer_id); | 730 | struct tipc_monitor *mon = tipc_monitor(net, bearer_id); |
731 | struct tipc_peer *peer = mon->self; | 731 | struct tipc_peer *peer; |
732 | 732 | ||
733 | if (!mon) | 733 | if (!mon) |
734 | return -EINVAL; | 734 | return -EINVAL; |
735 | 735 | ||
736 | read_lock_bh(&mon->lock); | 736 | read_lock_bh(&mon->lock); |
737 | peer = mon->self; | ||
737 | do { | 738 | do { |
738 | if (*prev_node) { | 739 | if (*prev_node) { |
739 | if (peer->addr == *prev_node) | 740 | if (peer->addr == *prev_node) |
diff --git a/net/tipc/socket.c b/net/tipc/socket.c index c49b8df438cb..f9f5f3c3dab5 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c | |||
@@ -2180,7 +2180,8 @@ restart: | |||
2180 | TIPC_CONN_MSG, SHORT_H_SIZE, | 2180 | TIPC_CONN_MSG, SHORT_H_SIZE, |
2181 | 0, dnode, onode, dport, oport, | 2181 | 0, dnode, onode, dport, oport, |
2182 | TIPC_CONN_SHUTDOWN); | 2182 | TIPC_CONN_SHUTDOWN); |
2183 | tipc_node_xmit_skb(net, skb, dnode, tsk->portid); | 2183 | if (skb) |
2184 | tipc_node_xmit_skb(net, skb, dnode, tsk->portid); | ||
2184 | } | 2185 | } |
2185 | tsk->connected = 0; | 2186 | tsk->connected = 0; |
2186 | sock->state = SS_DISCONNECTING; | 2187 | sock->state = SS_DISCONNECTING; |
diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c index 699dfabdbccd..936d7eee62d0 100644 --- a/net/vmw_vsock/virtio_transport.c +++ b/net/vmw_vsock/virtio_transport.c | |||
@@ -87,9 +87,6 @@ virtio_transport_send_pkt_work(struct work_struct *work) | |||
87 | 87 | ||
88 | vq = vsock->vqs[VSOCK_VQ_TX]; | 88 | vq = vsock->vqs[VSOCK_VQ_TX]; |
89 | 89 | ||
90 | /* Avoid unnecessary interrupts while we're processing the ring */ | ||
91 | virtqueue_disable_cb(vq); | ||
92 | |||
93 | for (;;) { | 90 | for (;;) { |
94 | struct virtio_vsock_pkt *pkt; | 91 | struct virtio_vsock_pkt *pkt; |
95 | struct scatterlist hdr, buf, *sgs[2]; | 92 | struct scatterlist hdr, buf, *sgs[2]; |
@@ -99,7 +96,6 @@ virtio_transport_send_pkt_work(struct work_struct *work) | |||
99 | spin_lock_bh(&vsock->send_pkt_list_lock); | 96 | spin_lock_bh(&vsock->send_pkt_list_lock); |
100 | if (list_empty(&vsock->send_pkt_list)) { | 97 | if (list_empty(&vsock->send_pkt_list)) { |
101 | spin_unlock_bh(&vsock->send_pkt_list_lock); | 98 | spin_unlock_bh(&vsock->send_pkt_list_lock); |
102 | virtqueue_enable_cb(vq); | ||
103 | break; | 99 | break; |
104 | } | 100 | } |
105 | 101 | ||
@@ -118,13 +114,13 @@ virtio_transport_send_pkt_work(struct work_struct *work) | |||
118 | } | 114 | } |
119 | 115 | ||
120 | ret = virtqueue_add_sgs(vq, sgs, out_sg, in_sg, pkt, GFP_KERNEL); | 116 | ret = virtqueue_add_sgs(vq, sgs, out_sg, in_sg, pkt, GFP_KERNEL); |
117 | /* Usually this means that there is no more space available in | ||
118 | * the vq | ||
119 | */ | ||
121 | if (ret < 0) { | 120 | if (ret < 0) { |
122 | spin_lock_bh(&vsock->send_pkt_list_lock); | 121 | spin_lock_bh(&vsock->send_pkt_list_lock); |
123 | list_add(&pkt->list, &vsock->send_pkt_list); | 122 | list_add(&pkt->list, &vsock->send_pkt_list); |
124 | spin_unlock_bh(&vsock->send_pkt_list_lock); | 123 | spin_unlock_bh(&vsock->send_pkt_list_lock); |
125 | |||
126 | if (!virtqueue_enable_cb(vq) && ret == -ENOSPC) | ||
127 | continue; /* retry now that we have more space */ | ||
128 | break; | 124 | break; |
129 | } | 125 | } |
130 | 126 | ||
diff --git a/net/wireless/chan.c b/net/wireless/chan.c index b0e11b6dc994..0f506220a3bd 100644 --- a/net/wireless/chan.c +++ b/net/wireless/chan.c | |||
@@ -513,6 +513,7 @@ static bool cfg80211_chandef_dfs_available(struct wiphy *wiphy, | |||
513 | r = cfg80211_get_chans_dfs_available(wiphy, | 513 | r = cfg80211_get_chans_dfs_available(wiphy, |
514 | chandef->center_freq2, | 514 | chandef->center_freq2, |
515 | width); | 515 | width); |
516 | break; | ||
516 | default: | 517 | default: |
517 | WARN_ON(chandef->center_freq2); | 518 | WARN_ON(chandef->center_freq2); |
518 | break; | 519 | break; |
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 46417f9cce68..f02653a08993 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
@@ -5380,6 +5380,7 @@ static int nl80211_parse_mesh_config(struct genl_info *info, | |||
5380 | { | 5380 | { |
5381 | struct nlattr *tb[NL80211_MESHCONF_ATTR_MAX + 1]; | 5381 | struct nlattr *tb[NL80211_MESHCONF_ATTR_MAX + 1]; |
5382 | u32 mask = 0; | 5382 | u32 mask = 0; |
5383 | u16 ht_opmode; | ||
5383 | 5384 | ||
5384 | #define FILL_IN_MESH_PARAM_IF_SET(tb, cfg, param, min, max, mask, attr, fn) \ | 5385 | #define FILL_IN_MESH_PARAM_IF_SET(tb, cfg, param, min, max, mask, attr, fn) \ |
5385 | do { \ | 5386 | do { \ |
@@ -5471,9 +5472,36 @@ do { \ | |||
5471 | FILL_IN_MESH_PARAM_IF_SET(tb, cfg, rssi_threshold, -255, 0, | 5472 | FILL_IN_MESH_PARAM_IF_SET(tb, cfg, rssi_threshold, -255, 0, |
5472 | mask, NL80211_MESHCONF_RSSI_THRESHOLD, | 5473 | mask, NL80211_MESHCONF_RSSI_THRESHOLD, |
5473 | nl80211_check_s32); | 5474 | nl80211_check_s32); |
5474 | FILL_IN_MESH_PARAM_IF_SET(tb, cfg, ht_opmode, 0, 16, | 5475 | /* |
5475 | mask, NL80211_MESHCONF_HT_OPMODE, | 5476 | * Check HT operation mode based on |
5476 | nl80211_check_u16); | 5477 | * IEEE 802.11 2012 8.4.2.59 HT Operation element. |
5478 | */ | ||
5479 | if (tb[NL80211_MESHCONF_HT_OPMODE]) { | ||
5480 | ht_opmode = nla_get_u16(tb[NL80211_MESHCONF_HT_OPMODE]); | ||
5481 | |||
5482 | if (ht_opmode & ~(IEEE80211_HT_OP_MODE_PROTECTION | | ||
5483 | IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT | | ||
5484 | IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT)) | ||
5485 | return -EINVAL; | ||
5486 | |||
5487 | if ((ht_opmode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT) && | ||
5488 | (ht_opmode & IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT)) | ||
5489 | return -EINVAL; | ||
5490 | |||
5491 | switch (ht_opmode & IEEE80211_HT_OP_MODE_PROTECTION) { | ||
5492 | case IEEE80211_HT_OP_MODE_PROTECTION_NONE: | ||
5493 | case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ: | ||
5494 | if (ht_opmode & IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT) | ||
5495 | return -EINVAL; | ||
5496 | break; | ||
5497 | case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER: | ||
5498 | case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED: | ||
5499 | if (!(ht_opmode & IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT)) | ||
5500 | return -EINVAL; | ||
5501 | break; | ||
5502 | } | ||
5503 | cfg->ht_opmode = ht_opmode; | ||
5504 | } | ||
5477 | FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPactivePathToRootTimeout, | 5505 | FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPactivePathToRootTimeout, |
5478 | 1, 65535, mask, | 5506 | 1, 65535, mask, |
5479 | NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT, | 5507 | NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT, |
diff --git a/samples/bpf/bpf_helpers.h b/samples/bpf/bpf_helpers.h index 217c8d507f2e..7927a090fa0d 100644 --- a/samples/bpf/bpf_helpers.h +++ b/samples/bpf/bpf_helpers.h | |||
@@ -72,8 +72,8 @@ static int (*bpf_l3_csum_replace)(void *ctx, int off, int from, int to, int flag | |||
72 | (void *) BPF_FUNC_l3_csum_replace; | 72 | (void *) BPF_FUNC_l3_csum_replace; |
73 | static int (*bpf_l4_csum_replace)(void *ctx, int off, int from, int to, int flags) = | 73 | static int (*bpf_l4_csum_replace)(void *ctx, int off, int from, int to, int flags) = |
74 | (void *) BPF_FUNC_l4_csum_replace; | 74 | (void *) BPF_FUNC_l4_csum_replace; |
75 | static int (*bpf_skb_in_cgroup)(void *ctx, void *map, int index) = | 75 | static int (*bpf_skb_under_cgroup)(void *ctx, void *map, int index) = |
76 | (void *) BPF_FUNC_skb_in_cgroup; | 76 | (void *) BPF_FUNC_skb_under_cgroup; |
77 | 77 | ||
78 | #if defined(__x86_64__) | 78 | #if defined(__x86_64__) |
79 | 79 | ||
diff --git a/samples/bpf/test_cgrp2_tc_kern.c b/samples/bpf/test_cgrp2_tc_kern.c index 2732c37c8d5b..10ff73404e3a 100644 --- a/samples/bpf/test_cgrp2_tc_kern.c +++ b/samples/bpf/test_cgrp2_tc_kern.c | |||
@@ -57,7 +57,7 @@ int handle_egress(struct __sk_buff *skb) | |||
57 | bpf_trace_printk(dont_care_msg, sizeof(dont_care_msg), | 57 | bpf_trace_printk(dont_care_msg, sizeof(dont_care_msg), |
58 | eth->h_proto, ip6h->nexthdr); | 58 | eth->h_proto, ip6h->nexthdr); |
59 | return TC_ACT_OK; | 59 | return TC_ACT_OK; |
60 | } else if (bpf_skb_in_cgroup(skb, &test_cgrp2_array_pin, 0) != 1) { | 60 | } else if (bpf_skb_under_cgroup(skb, &test_cgrp2_array_pin, 0) != 1) { |
61 | bpf_trace_printk(pass_msg, sizeof(pass_msg)); | 61 | bpf_trace_printk(pass_msg, sizeof(pass_msg)); |
62 | return TC_ACT_OK; | 62 | return TC_ACT_OK; |
63 | } else { | 63 | } else { |
diff --git a/samples/bpf/test_maps.c b/samples/bpf/test_maps.c index 47bf0858f9e4..cce2b59751eb 100644 --- a/samples/bpf/test_maps.c +++ b/samples/bpf/test_maps.c | |||
@@ -68,7 +68,16 @@ static void test_hashmap_sanity(int i, void *data) | |||
68 | assert(bpf_update_elem(map_fd, &key, &value, BPF_NOEXIST) == -1 && | 68 | assert(bpf_update_elem(map_fd, &key, &value, BPF_NOEXIST) == -1 && |
69 | errno == E2BIG); | 69 | errno == E2BIG); |
70 | 70 | ||
71 | /* update existing element, thought the map is full */ | ||
72 | key = 1; | ||
73 | assert(bpf_update_elem(map_fd, &key, &value, BPF_EXIST) == 0); | ||
74 | key = 2; | ||
75 | assert(bpf_update_elem(map_fd, &key, &value, BPF_ANY) == 0); | ||
76 | key = 1; | ||
77 | assert(bpf_update_elem(map_fd, &key, &value, BPF_ANY) == 0); | ||
78 | |||
71 | /* check that key = 0 doesn't exist */ | 79 | /* check that key = 0 doesn't exist */ |
80 | key = 0; | ||
72 | assert(bpf_delete_elem(map_fd, &key) == -1 && errno == ENOENT); | 81 | assert(bpf_delete_elem(map_fd, &key) == -1 && errno == ENOENT); |
73 | 82 | ||
74 | /* iterate over two elements */ | 83 | /* iterate over two elements */ |
@@ -413,10 +422,12 @@ static void do_work(int fn, void *data) | |||
413 | 422 | ||
414 | for (i = fn; i < MAP_SIZE; i += TASKS) { | 423 | for (i = fn; i < MAP_SIZE; i += TASKS) { |
415 | key = value = i; | 424 | key = value = i; |
416 | if (do_update) | 425 | if (do_update) { |
417 | assert(bpf_update_elem(map_fd, &key, &value, BPF_NOEXIST) == 0); | 426 | assert(bpf_update_elem(map_fd, &key, &value, BPF_NOEXIST) == 0); |
418 | else | 427 | assert(bpf_update_elem(map_fd, &key, &value, BPF_EXIST) == 0); |
428 | } else { | ||
419 | assert(bpf_delete_elem(map_fd, &key) == 0); | 429 | assert(bpf_delete_elem(map_fd, &key) == 0); |
430 | } | ||
420 | } | 431 | } |
421 | } | 432 | } |
422 | 433 | ||
diff --git a/security/Kconfig b/security/Kconfig index df28f2b6f3e1..da10d9b573a4 100644 --- a/security/Kconfig +++ b/security/Kconfig | |||
@@ -136,6 +136,7 @@ config HAVE_ARCH_HARDENED_USERCOPY | |||
136 | config HARDENED_USERCOPY | 136 | config HARDENED_USERCOPY |
137 | bool "Harden memory copies between kernel and userspace" | 137 | bool "Harden memory copies between kernel and userspace" |
138 | depends on HAVE_ARCH_HARDENED_USERCOPY | 138 | depends on HAVE_ARCH_HARDENED_USERCOPY |
139 | depends on HAVE_HARDENED_USERCOPY_ALLOCATOR | ||
139 | select BUG | 140 | select BUG |
140 | help | 141 | help |
141 | This option checks for obviously wrong memory regions when | 142 | This option checks for obviously wrong memory regions when |
diff --git a/tools/arch/arm64/include/uapi/asm/kvm.h b/tools/arch/arm64/include/uapi/asm/kvm.h index f209ea151dca..3051f86a9b5f 100644 --- a/tools/arch/arm64/include/uapi/asm/kvm.h +++ b/tools/arch/arm64/include/uapi/asm/kvm.h | |||
@@ -87,9 +87,11 @@ struct kvm_regs { | |||
87 | /* Supported VGICv3 address types */ | 87 | /* Supported VGICv3 address types */ |
88 | #define KVM_VGIC_V3_ADDR_TYPE_DIST 2 | 88 | #define KVM_VGIC_V3_ADDR_TYPE_DIST 2 |
89 | #define KVM_VGIC_V3_ADDR_TYPE_REDIST 3 | 89 | #define KVM_VGIC_V3_ADDR_TYPE_REDIST 3 |
90 | #define KVM_VGIC_ITS_ADDR_TYPE 4 | ||
90 | 91 | ||
91 | #define KVM_VGIC_V3_DIST_SIZE SZ_64K | 92 | #define KVM_VGIC_V3_DIST_SIZE SZ_64K |
92 | #define KVM_VGIC_V3_REDIST_SIZE (2 * SZ_64K) | 93 | #define KVM_VGIC_V3_REDIST_SIZE (2 * SZ_64K) |
94 | #define KVM_VGIC_V3_ITS_SIZE (2 * SZ_64K) | ||
93 | 95 | ||
94 | #define KVM_ARM_VCPU_POWER_OFF 0 /* CPU is started in OFF state */ | 96 | #define KVM_ARM_VCPU_POWER_OFF 0 /* CPU is started in OFF state */ |
95 | #define KVM_ARM_VCPU_EL1_32BIT 1 /* CPU running a 32bit VM */ | 97 | #define KVM_ARM_VCPU_EL1_32BIT 1 /* CPU running a 32bit VM */ |
diff --git a/tools/arch/s390/include/uapi/asm/kvm.h b/tools/arch/s390/include/uapi/asm/kvm.h index 3b8e99ef9d58..a2ffec4139ad 100644 --- a/tools/arch/s390/include/uapi/asm/kvm.h +++ b/tools/arch/s390/include/uapi/asm/kvm.h | |||
@@ -93,6 +93,47 @@ struct kvm_s390_vm_cpu_machine { | |||
93 | __u64 fac_list[256]; | 93 | __u64 fac_list[256]; |
94 | }; | 94 | }; |
95 | 95 | ||
96 | #define KVM_S390_VM_CPU_PROCESSOR_FEAT 2 | ||
97 | #define KVM_S390_VM_CPU_MACHINE_FEAT 3 | ||
98 | |||
99 | #define KVM_S390_VM_CPU_FEAT_NR_BITS 1024 | ||
100 | #define KVM_S390_VM_CPU_FEAT_ESOP 0 | ||
101 | #define KVM_S390_VM_CPU_FEAT_SIEF2 1 | ||
102 | #define KVM_S390_VM_CPU_FEAT_64BSCAO 2 | ||
103 | #define KVM_S390_VM_CPU_FEAT_SIIF 3 | ||
104 | #define KVM_S390_VM_CPU_FEAT_GPERE 4 | ||
105 | #define KVM_S390_VM_CPU_FEAT_GSLS 5 | ||
106 | #define KVM_S390_VM_CPU_FEAT_IB 6 | ||
107 | #define KVM_S390_VM_CPU_FEAT_CEI 7 | ||
108 | #define KVM_S390_VM_CPU_FEAT_IBS 8 | ||
109 | #define KVM_S390_VM_CPU_FEAT_SKEY 9 | ||
110 | #define KVM_S390_VM_CPU_FEAT_CMMA 10 | ||
111 | #define KVM_S390_VM_CPU_FEAT_PFMFI 11 | ||
112 | #define KVM_S390_VM_CPU_FEAT_SIGPIF 12 | ||
113 | struct kvm_s390_vm_cpu_feat { | ||
114 | __u64 feat[16]; | ||
115 | }; | ||
116 | |||
117 | #define KVM_S390_VM_CPU_PROCESSOR_SUBFUNC 4 | ||
118 | #define KVM_S390_VM_CPU_MACHINE_SUBFUNC 5 | ||
119 | /* for "test bit" instructions MSB 0 bit ordering, for "query" raw blocks */ | ||
120 | struct kvm_s390_vm_cpu_subfunc { | ||
121 | __u8 plo[32]; /* always */ | ||
122 | __u8 ptff[16]; /* with TOD-clock steering */ | ||
123 | __u8 kmac[16]; /* with MSA */ | ||
124 | __u8 kmc[16]; /* with MSA */ | ||
125 | __u8 km[16]; /* with MSA */ | ||
126 | __u8 kimd[16]; /* with MSA */ | ||
127 | __u8 klmd[16]; /* with MSA */ | ||
128 | __u8 pckmo[16]; /* with MSA3 */ | ||
129 | __u8 kmctr[16]; /* with MSA4 */ | ||
130 | __u8 kmf[16]; /* with MSA4 */ | ||
131 | __u8 kmo[16]; /* with MSA4 */ | ||
132 | __u8 pcc[16]; /* with MSA4 */ | ||
133 | __u8 ppno[16]; /* with MSA5 */ | ||
134 | __u8 reserved[1824]; | ||
135 | }; | ||
136 | |||
96 | /* kvm attributes for crypto */ | 137 | /* kvm attributes for crypto */ |
97 | #define KVM_S390_VM_CRYPTO_ENABLE_AES_KW 0 | 138 | #define KVM_S390_VM_CRYPTO_ENABLE_AES_KW 0 |
98 | #define KVM_S390_VM_CRYPTO_ENABLE_DEA_KW 1 | 139 | #define KVM_S390_VM_CRYPTO_ENABLE_DEA_KW 1 |
diff --git a/tools/arch/s390/include/uapi/asm/sie.h b/tools/arch/s390/include/uapi/asm/sie.h index 8fb5d4a6dd25..3ac634368939 100644 --- a/tools/arch/s390/include/uapi/asm/sie.h +++ b/tools/arch/s390/include/uapi/asm/sie.h | |||
@@ -140,6 +140,7 @@ | |||
140 | exit_code_ipa0(0xB2, 0x4c, "TAR"), \ | 140 | exit_code_ipa0(0xB2, 0x4c, "TAR"), \ |
141 | exit_code_ipa0(0xB2, 0x50, "CSP"), \ | 141 | exit_code_ipa0(0xB2, 0x50, "CSP"), \ |
142 | exit_code_ipa0(0xB2, 0x54, "MVPG"), \ | 142 | exit_code_ipa0(0xB2, 0x54, "MVPG"), \ |
143 | exit_code_ipa0(0xB2, 0x56, "STHYI"), \ | ||
143 | exit_code_ipa0(0xB2, 0x58, "BSG"), \ | 144 | exit_code_ipa0(0xB2, 0x58, "BSG"), \ |
144 | exit_code_ipa0(0xB2, 0x5a, "BSA"), \ | 145 | exit_code_ipa0(0xB2, 0x5a, "BSA"), \ |
145 | exit_code_ipa0(0xB2, 0x5f, "CHSC"), \ | 146 | exit_code_ipa0(0xB2, 0x5f, "CHSC"), \ |
diff --git a/tools/perf/arch/powerpc/util/sym-handling.c b/tools/perf/arch/powerpc/util/sym-handling.c index 8d4dc97d80ba..35745a733100 100644 --- a/tools/perf/arch/powerpc/util/sym-handling.c +++ b/tools/perf/arch/powerpc/util/sym-handling.c | |||
@@ -97,6 +97,7 @@ void arch__fix_tev_from_maps(struct perf_probe_event *pev, | |||
97 | } | 97 | } |
98 | } | 98 | } |
99 | 99 | ||
100 | #ifdef HAVE_LIBELF_SUPPORT | ||
100 | void arch__post_process_probe_trace_events(struct perf_probe_event *pev, | 101 | void arch__post_process_probe_trace_events(struct perf_probe_event *pev, |
101 | int ntevs) | 102 | int ntevs) |
102 | { | 103 | { |
@@ -118,5 +119,6 @@ void arch__post_process_probe_trace_events(struct perf_probe_event *pev, | |||
118 | } | 119 | } |
119 | } | 120 | } |
120 | } | 121 | } |
122 | #endif /* HAVE_LIBELF_SUPPORT */ | ||
121 | 123 | ||
122 | #endif | 124 | #endif |
diff --git a/tools/perf/arch/x86/util/intel-pt.c b/tools/perf/arch/x86/util/intel-pt.c index fb51457ba338..a2412e9d883b 100644 --- a/tools/perf/arch/x86/util/intel-pt.c +++ b/tools/perf/arch/x86/util/intel-pt.c | |||
@@ -501,7 +501,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr, | |||
501 | struct intel_pt_recording *ptr = | 501 | struct intel_pt_recording *ptr = |
502 | container_of(itr, struct intel_pt_recording, itr); | 502 | container_of(itr, struct intel_pt_recording, itr); |
503 | struct perf_pmu *intel_pt_pmu = ptr->intel_pt_pmu; | 503 | struct perf_pmu *intel_pt_pmu = ptr->intel_pt_pmu; |
504 | bool have_timing_info; | 504 | bool have_timing_info, need_immediate = false; |
505 | struct perf_evsel *evsel, *intel_pt_evsel = NULL; | 505 | struct perf_evsel *evsel, *intel_pt_evsel = NULL; |
506 | const struct cpu_map *cpus = evlist->cpus; | 506 | const struct cpu_map *cpus = evlist->cpus; |
507 | bool privileged = geteuid() == 0 || perf_event_paranoid() < 0; | 507 | bool privileged = geteuid() == 0 || perf_event_paranoid() < 0; |
@@ -655,6 +655,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr, | |||
655 | ptr->have_sched_switch = 3; | 655 | ptr->have_sched_switch = 3; |
656 | } else { | 656 | } else { |
657 | opts->record_switch_events = true; | 657 | opts->record_switch_events = true; |
658 | need_immediate = true; | ||
658 | if (cpu_wide) | 659 | if (cpu_wide) |
659 | ptr->have_sched_switch = 3; | 660 | ptr->have_sched_switch = 3; |
660 | else | 661 | else |
@@ -700,6 +701,9 @@ static int intel_pt_recording_options(struct auxtrace_record *itr, | |||
700 | tracking_evsel->attr.freq = 0; | 701 | tracking_evsel->attr.freq = 0; |
701 | tracking_evsel->attr.sample_period = 1; | 702 | tracking_evsel->attr.sample_period = 1; |
702 | 703 | ||
704 | if (need_immediate) | ||
705 | tracking_evsel->immediate = true; | ||
706 | |||
703 | /* In per-cpu case, always need the time of mmap events etc */ | 707 | /* In per-cpu case, always need the time of mmap events etc */ |
704 | if (!cpu_map__empty(cpus)) { | 708 | if (!cpu_map__empty(cpus)) { |
705 | perf_evsel__set_sample_bit(tracking_evsel, TIME); | 709 | perf_evsel__set_sample_bit(tracking_evsel, TIME); |
diff --git a/tools/perf/builtin-mem.c b/tools/perf/builtin-mem.c index d608a2c9e48c..d1ce29be560e 100644 --- a/tools/perf/builtin-mem.c +++ b/tools/perf/builtin-mem.c | |||
@@ -88,6 +88,9 @@ static int __cmd_record(int argc, const char **argv, struct perf_mem *mem) | |||
88 | if (mem->operation & MEM_OPERATION_LOAD) | 88 | if (mem->operation & MEM_OPERATION_LOAD) |
89 | perf_mem_events[PERF_MEM_EVENTS__LOAD].record = true; | 89 | perf_mem_events[PERF_MEM_EVENTS__LOAD].record = true; |
90 | 90 | ||
91 | if (mem->operation & MEM_OPERATION_STORE) | ||
92 | perf_mem_events[PERF_MEM_EVENTS__STORE].record = true; | ||
93 | |||
91 | if (perf_mem_events[PERF_MEM_EVENTS__LOAD].record) | 94 | if (perf_mem_events[PERF_MEM_EVENTS__LOAD].record) |
92 | rec_argv[i++] = "-W"; | 95 | rec_argv[i++] = "-W"; |
93 | 96 | ||
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c index 9c640a8081c7..c859e59dfe3e 100644 --- a/tools/perf/builtin-script.c +++ b/tools/perf/builtin-script.c | |||
@@ -371,14 +371,16 @@ static int perf_session__check_output_opt(struct perf_session *session) | |||
371 | 371 | ||
372 | if (!no_callchain) { | 372 | if (!no_callchain) { |
373 | bool use_callchain = false; | 373 | bool use_callchain = false; |
374 | bool not_pipe = false; | ||
374 | 375 | ||
375 | evlist__for_each_entry(session->evlist, evsel) { | 376 | evlist__for_each_entry(session->evlist, evsel) { |
377 | not_pipe = true; | ||
376 | if (evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN) { | 378 | if (evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN) { |
377 | use_callchain = true; | 379 | use_callchain = true; |
378 | break; | 380 | break; |
379 | } | 381 | } |
380 | } | 382 | } |
381 | if (!use_callchain) | 383 | if (not_pipe && !use_callchain) |
382 | symbol_conf.use_callchain = false; | 384 | symbol_conf.use_callchain = false; |
383 | } | 385 | } |
384 | 386 | ||
@@ -1690,8 +1692,13 @@ static int list_available_scripts(const struct option *opt __maybe_unused, | |||
1690 | snprintf(scripts_path, MAXPATHLEN, "%s/scripts", get_argv_exec_path()); | 1692 | snprintf(scripts_path, MAXPATHLEN, "%s/scripts", get_argv_exec_path()); |
1691 | 1693 | ||
1692 | scripts_dir = opendir(scripts_path); | 1694 | scripts_dir = opendir(scripts_path); |
1693 | if (!scripts_dir) | 1695 | if (!scripts_dir) { |
1694 | return -1; | 1696 | fprintf(stdout, |
1697 | "open(%s) failed.\n" | ||
1698 | "Check \"PERF_EXEC_PATH\" env to set scripts dir.\n", | ||
1699 | scripts_path); | ||
1700 | exit(-1); | ||
1701 | } | ||
1695 | 1702 | ||
1696 | for_each_lang(scripts_path, scripts_dir, lang_dirent) { | 1703 | for_each_lang(scripts_path, scripts_dir, lang_dirent) { |
1697 | snprintf(lang_path, MAXPATHLEN, "%s/%s/bin", scripts_path, | 1704 | snprintf(lang_path, MAXPATHLEN, "%s/%s/bin", scripts_path, |
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c index 9c8f15da86ce..8ff6c6a61291 100644 --- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c +++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c | |||
@@ -123,8 +123,6 @@ struct intel_pt_decoder { | |||
123 | bool have_calc_cyc_to_tsc; | 123 | bool have_calc_cyc_to_tsc; |
124 | int exec_mode; | 124 | int exec_mode; |
125 | unsigned int insn_bytes; | 125 | unsigned int insn_bytes; |
126 | uint64_t sign_bit; | ||
127 | uint64_t sign_bits; | ||
128 | uint64_t period; | 126 | uint64_t period; |
129 | enum intel_pt_period_type period_type; | 127 | enum intel_pt_period_type period_type; |
130 | uint64_t tot_insn_cnt; | 128 | uint64_t tot_insn_cnt; |
@@ -191,9 +189,6 @@ struct intel_pt_decoder *intel_pt_decoder_new(struct intel_pt_params *params) | |||
191 | decoder->data = params->data; | 189 | decoder->data = params->data; |
192 | decoder->return_compression = params->return_compression; | 190 | decoder->return_compression = params->return_compression; |
193 | 191 | ||
194 | decoder->sign_bit = (uint64_t)1 << 47; | ||
195 | decoder->sign_bits = ~(((uint64_t)1 << 48) - 1); | ||
196 | |||
197 | decoder->period = params->period; | 192 | decoder->period = params->period; |
198 | decoder->period_type = params->period_type; | 193 | decoder->period_type = params->period_type; |
199 | 194 | ||
@@ -362,21 +357,30 @@ int intel_pt__strerror(int code, char *buf, size_t buflen) | |||
362 | return 0; | 357 | return 0; |
363 | } | 358 | } |
364 | 359 | ||
365 | static uint64_t intel_pt_calc_ip(struct intel_pt_decoder *decoder, | 360 | static uint64_t intel_pt_calc_ip(const struct intel_pt_pkt *packet, |
366 | const struct intel_pt_pkt *packet, | ||
367 | uint64_t last_ip) | 361 | uint64_t last_ip) |
368 | { | 362 | { |
369 | uint64_t ip; | 363 | uint64_t ip; |
370 | 364 | ||
371 | switch (packet->count) { | 365 | switch (packet->count) { |
372 | case 2: | 366 | case 1: |
373 | ip = (last_ip & (uint64_t)0xffffffffffff0000ULL) | | 367 | ip = (last_ip & (uint64_t)0xffffffffffff0000ULL) | |
374 | packet->payload; | 368 | packet->payload; |
375 | break; | 369 | break; |
376 | case 4: | 370 | case 2: |
377 | ip = (last_ip & (uint64_t)0xffffffff00000000ULL) | | 371 | ip = (last_ip & (uint64_t)0xffffffff00000000ULL) | |
378 | packet->payload; | 372 | packet->payload; |
379 | break; | 373 | break; |
374 | case 3: | ||
375 | ip = packet->payload; | ||
376 | /* Sign-extend 6-byte ip */ | ||
377 | if (ip & (uint64_t)0x800000000000ULL) | ||
378 | ip |= (uint64_t)0xffff000000000000ULL; | ||
379 | break; | ||
380 | case 4: | ||
381 | ip = (last_ip & (uint64_t)0xffff000000000000ULL) | | ||
382 | packet->payload; | ||
383 | break; | ||
380 | case 6: | 384 | case 6: |
381 | ip = packet->payload; | 385 | ip = packet->payload; |
382 | break; | 386 | break; |
@@ -384,16 +388,12 @@ static uint64_t intel_pt_calc_ip(struct intel_pt_decoder *decoder, | |||
384 | return 0; | 388 | return 0; |
385 | } | 389 | } |
386 | 390 | ||
387 | if (ip & decoder->sign_bit) | ||
388 | return ip | decoder->sign_bits; | ||
389 | |||
390 | return ip; | 391 | return ip; |
391 | } | 392 | } |
392 | 393 | ||
393 | static inline void intel_pt_set_last_ip(struct intel_pt_decoder *decoder) | 394 | static inline void intel_pt_set_last_ip(struct intel_pt_decoder *decoder) |
394 | { | 395 | { |
395 | decoder->last_ip = intel_pt_calc_ip(decoder, &decoder->packet, | 396 | decoder->last_ip = intel_pt_calc_ip(&decoder->packet, decoder->last_ip); |
396 | decoder->last_ip); | ||
397 | } | 397 | } |
398 | 398 | ||
399 | static inline void intel_pt_set_ip(struct intel_pt_decoder *decoder) | 399 | static inline void intel_pt_set_ip(struct intel_pt_decoder *decoder) |
@@ -1657,6 +1657,12 @@ next: | |||
1657 | } | 1657 | } |
1658 | } | 1658 | } |
1659 | 1659 | ||
1660 | static inline bool intel_pt_have_ip(struct intel_pt_decoder *decoder) | ||
1661 | { | ||
1662 | return decoder->last_ip || decoder->packet.count == 0 || | ||
1663 | decoder->packet.count == 3 || decoder->packet.count == 6; | ||
1664 | } | ||
1665 | |||
1660 | /* Walk PSB+ packets to get in sync. */ | 1666 | /* Walk PSB+ packets to get in sync. */ |
1661 | static int intel_pt_walk_psb(struct intel_pt_decoder *decoder) | 1667 | static int intel_pt_walk_psb(struct intel_pt_decoder *decoder) |
1662 | { | 1668 | { |
@@ -1677,8 +1683,7 @@ static int intel_pt_walk_psb(struct intel_pt_decoder *decoder) | |||
1677 | 1683 | ||
1678 | case INTEL_PT_FUP: | 1684 | case INTEL_PT_FUP: |
1679 | decoder->pge = true; | 1685 | decoder->pge = true; |
1680 | if (decoder->last_ip || decoder->packet.count == 6 || | 1686 | if (intel_pt_have_ip(decoder)) { |
1681 | decoder->packet.count == 0) { | ||
1682 | uint64_t current_ip = decoder->ip; | 1687 | uint64_t current_ip = decoder->ip; |
1683 | 1688 | ||
1684 | intel_pt_set_ip(decoder); | 1689 | intel_pt_set_ip(decoder); |
@@ -1767,8 +1772,7 @@ static int intel_pt_walk_to_ip(struct intel_pt_decoder *decoder) | |||
1767 | case INTEL_PT_TIP_PGE: | 1772 | case INTEL_PT_TIP_PGE: |
1768 | case INTEL_PT_TIP: | 1773 | case INTEL_PT_TIP: |
1769 | decoder->pge = decoder->packet.type != INTEL_PT_TIP_PGD; | 1774 | decoder->pge = decoder->packet.type != INTEL_PT_TIP_PGD; |
1770 | if (decoder->last_ip || decoder->packet.count == 6 || | 1775 | if (intel_pt_have_ip(decoder)) |
1771 | decoder->packet.count == 0) | ||
1772 | intel_pt_set_ip(decoder); | 1776 | intel_pt_set_ip(decoder); |
1773 | if (decoder->ip) | 1777 | if (decoder->ip) |
1774 | return 0; | 1778 | return 0; |
@@ -1776,9 +1780,7 @@ static int intel_pt_walk_to_ip(struct intel_pt_decoder *decoder) | |||
1776 | 1780 | ||
1777 | case INTEL_PT_FUP: | 1781 | case INTEL_PT_FUP: |
1778 | if (decoder->overflow) { | 1782 | if (decoder->overflow) { |
1779 | if (decoder->last_ip || | 1783 | if (intel_pt_have_ip(decoder)) |
1780 | decoder->packet.count == 6 || | ||
1781 | decoder->packet.count == 0) | ||
1782 | intel_pt_set_ip(decoder); | 1784 | intel_pt_set_ip(decoder); |
1783 | if (decoder->ip) | 1785 | if (decoder->ip) |
1784 | return 0; | 1786 | return 0; |
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c index b1257c816310..4f7b32020487 100644 --- a/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c +++ b/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c | |||
@@ -292,36 +292,46 @@ static int intel_pt_get_ip(enum intel_pt_pkt_type type, unsigned int byte, | |||
292 | const unsigned char *buf, size_t len, | 292 | const unsigned char *buf, size_t len, |
293 | struct intel_pt_pkt *packet) | 293 | struct intel_pt_pkt *packet) |
294 | { | 294 | { |
295 | switch (byte >> 5) { | 295 | int ip_len; |
296 | |||
297 | packet->count = byte >> 5; | ||
298 | |||
299 | switch (packet->count) { | ||
296 | case 0: | 300 | case 0: |
297 | packet->count = 0; | 301 | ip_len = 0; |
298 | break; | 302 | break; |
299 | case 1: | 303 | case 1: |
300 | if (len < 3) | 304 | if (len < 3) |
301 | return INTEL_PT_NEED_MORE_BYTES; | 305 | return INTEL_PT_NEED_MORE_BYTES; |
302 | packet->count = 2; | 306 | ip_len = 2; |
303 | packet->payload = le16_to_cpu(*(uint16_t *)(buf + 1)); | 307 | packet->payload = le16_to_cpu(*(uint16_t *)(buf + 1)); |
304 | break; | 308 | break; |
305 | case 2: | 309 | case 2: |
306 | if (len < 5) | 310 | if (len < 5) |
307 | return INTEL_PT_NEED_MORE_BYTES; | 311 | return INTEL_PT_NEED_MORE_BYTES; |
308 | packet->count = 4; | 312 | ip_len = 4; |
309 | packet->payload = le32_to_cpu(*(uint32_t *)(buf + 1)); | 313 | packet->payload = le32_to_cpu(*(uint32_t *)(buf + 1)); |
310 | break; | 314 | break; |
311 | case 3: | 315 | case 3: |
312 | case 6: | 316 | case 4: |
313 | if (len < 7) | 317 | if (len < 7) |
314 | return INTEL_PT_NEED_MORE_BYTES; | 318 | return INTEL_PT_NEED_MORE_BYTES; |
315 | packet->count = 6; | 319 | ip_len = 6; |
316 | memcpy_le64(&packet->payload, buf + 1, 6); | 320 | memcpy_le64(&packet->payload, buf + 1, 6); |
317 | break; | 321 | break; |
322 | case 6: | ||
323 | if (len < 9) | ||
324 | return INTEL_PT_NEED_MORE_BYTES; | ||
325 | ip_len = 8; | ||
326 | packet->payload = le64_to_cpu(*(uint64_t *)(buf + 1)); | ||
327 | break; | ||
318 | default: | 328 | default: |
319 | return INTEL_PT_BAD_PACKET; | 329 | return INTEL_PT_BAD_PACKET; |
320 | } | 330 | } |
321 | 331 | ||
322 | packet->type = type; | 332 | packet->type = type; |
323 | 333 | ||
324 | return packet->count + 1; | 334 | return ip_len + 1; |
325 | } | 335 | } |
326 | 336 | ||
327 | static int intel_pt_get_mode(const unsigned char *buf, size_t len, | 337 | static int intel_pt_get_mode(const unsigned char *buf, size_t len, |
diff --git a/tools/perf/util/jitdump.c b/tools/perf/util/jitdump.c index 9f3305f6b6d5..95f0884aae02 100644 --- a/tools/perf/util/jitdump.c +++ b/tools/perf/util/jitdump.c | |||
@@ -1,3 +1,4 @@ | |||
1 | #include <sys/sysmacros.h> | ||
1 | #include <sys/types.h> | 2 | #include <sys/types.h> |
2 | #include <stdio.h> | 3 | #include <stdio.h> |
3 | #include <stdlib.h> | 4 | #include <stdlib.h> |
diff --git a/tools/perf/util/probe-file.c b/tools/perf/util/probe-file.c index 9aed9c332da6..9c3b9ed5b3c3 100644 --- a/tools/perf/util/probe-file.c +++ b/tools/perf/util/probe-file.c | |||
@@ -133,7 +133,7 @@ int probe_file__open_both(int *kfd, int *ufd, int flag) | |||
133 | /* Get raw string list of current kprobe_events or uprobe_events */ | 133 | /* Get raw string list of current kprobe_events or uprobe_events */ |
134 | struct strlist *probe_file__get_rawlist(int fd) | 134 | struct strlist *probe_file__get_rawlist(int fd) |
135 | { | 135 | { |
136 | int ret, idx; | 136 | int ret, idx, fddup; |
137 | FILE *fp; | 137 | FILE *fp; |
138 | char buf[MAX_CMDLEN]; | 138 | char buf[MAX_CMDLEN]; |
139 | char *p; | 139 | char *p; |
@@ -143,8 +143,17 @@ struct strlist *probe_file__get_rawlist(int fd) | |||
143 | return NULL; | 143 | return NULL; |
144 | 144 | ||
145 | sl = strlist__new(NULL, NULL); | 145 | sl = strlist__new(NULL, NULL); |
146 | if (sl == NULL) | ||
147 | return NULL; | ||
148 | |||
149 | fddup = dup(fd); | ||
150 | if (fddup < 0) | ||
151 | goto out_free_sl; | ||
152 | |||
153 | fp = fdopen(fddup, "r"); | ||
154 | if (!fp) | ||
155 | goto out_close_fddup; | ||
146 | 156 | ||
147 | fp = fdopen(dup(fd), "r"); | ||
148 | while (!feof(fp)) { | 157 | while (!feof(fp)) { |
149 | p = fgets(buf, MAX_CMDLEN, fp); | 158 | p = fgets(buf, MAX_CMDLEN, fp); |
150 | if (!p) | 159 | if (!p) |
@@ -156,13 +165,21 @@ struct strlist *probe_file__get_rawlist(int fd) | |||
156 | ret = strlist__add(sl, buf); | 165 | ret = strlist__add(sl, buf); |
157 | if (ret < 0) { | 166 | if (ret < 0) { |
158 | pr_debug("strlist__add failed (%d)\n", ret); | 167 | pr_debug("strlist__add failed (%d)\n", ret); |
159 | strlist__delete(sl); | 168 | goto out_close_fp; |
160 | return NULL; | ||
161 | } | 169 | } |
162 | } | 170 | } |
163 | fclose(fp); | 171 | fclose(fp); |
164 | 172 | ||
165 | return sl; | 173 | return sl; |
174 | |||
175 | out_close_fp: | ||
176 | fclose(fp); | ||
177 | goto out_free_sl; | ||
178 | out_close_fddup: | ||
179 | close(fddup); | ||
180 | out_free_sl: | ||
181 | strlist__delete(sl); | ||
182 | return NULL; | ||
166 | } | 183 | } |
167 | 184 | ||
168 | static struct strlist *__probe_file__get_namelist(int fd, bool include_group) | 185 | static struct strlist *__probe_file__get_namelist(int fd, bool include_group) |
@@ -447,12 +464,17 @@ static int probe_cache__load(struct probe_cache *pcache) | |||
447 | { | 464 | { |
448 | struct probe_cache_entry *entry = NULL; | 465 | struct probe_cache_entry *entry = NULL; |
449 | char buf[MAX_CMDLEN], *p; | 466 | char buf[MAX_CMDLEN], *p; |
450 | int ret = 0; | 467 | int ret = 0, fddup; |
451 | FILE *fp; | 468 | FILE *fp; |
452 | 469 | ||
453 | fp = fdopen(dup(pcache->fd), "r"); | 470 | fddup = dup(pcache->fd); |
454 | if (!fp) | 471 | if (fddup < 0) |
472 | return -errno; | ||
473 | fp = fdopen(fddup, "r"); | ||
474 | if (!fp) { | ||
475 | close(fddup); | ||
455 | return -EINVAL; | 476 | return -EINVAL; |
477 | } | ||
456 | 478 | ||
457 | while (!feof(fp)) { | 479 | while (!feof(fp)) { |
458 | if (!fgets(buf, MAX_CMDLEN, fp)) | 480 | if (!fgets(buf, MAX_CMDLEN, fp)) |
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c index a34321e9b44d..a811c13a74d6 100644 --- a/tools/perf/util/symbol-elf.c +++ b/tools/perf/util/symbol-elf.c | |||
@@ -837,7 +837,8 @@ int dso__load_sym(struct dso *dso, struct map *map, | |||
837 | sec = syms_ss->symtab; | 837 | sec = syms_ss->symtab; |
838 | shdr = syms_ss->symshdr; | 838 | shdr = syms_ss->symshdr; |
839 | 839 | ||
840 | if (elf_section_by_name(elf, &ehdr, &tshdr, ".text", NULL)) | 840 | if (elf_section_by_name(runtime_ss->elf, &runtime_ss->ehdr, &tshdr, |
841 | ".text", NULL)) | ||
841 | dso->text_offset = tshdr.sh_addr - tshdr.sh_offset; | 842 | dso->text_offset = tshdr.sh_addr - tshdr.sh_offset; |
842 | 843 | ||
843 | if (runtime_ss->opdsec) | 844 | if (runtime_ss->opdsec) |
diff --git a/tools/virtio/linux/dma-mapping.h b/tools/virtio/linux/dma-mapping.h index 4f93af89ae16..18601f6689b9 100644 --- a/tools/virtio/linux/dma-mapping.h +++ b/tools/virtio/linux/dma-mapping.h | |||
@@ -14,4 +14,20 @@ enum dma_data_direction { | |||
14 | DMA_NONE = 3, | 14 | DMA_NONE = 3, |
15 | }; | 15 | }; |
16 | 16 | ||
17 | #define dma_alloc_coherent(d, s, hp, f) ({ \ | ||
18 | void *__dma_alloc_coherent_p = kmalloc((s), (f)); \ | ||
19 | *(hp) = (unsigned long)__dma_alloc_coherent_p; \ | ||
20 | __dma_alloc_coherent_p; \ | ||
21 | }) | ||
22 | |||
23 | #define dma_free_coherent(d, s, p, h) kfree(p) | ||
24 | |||
25 | #define dma_map_page(d, p, o, s, dir) (page_to_phys(p) + (o)) | ||
26 | |||
27 | #define dma_map_single(d, p, s, dir) (virt_to_phys(p)) | ||
28 | #define dma_mapping_error(...) (0) | ||
29 | |||
30 | #define dma_unmap_single(...) do { } while (0) | ||
31 | #define dma_unmap_page(...) do { } while (0) | ||
32 | |||
17 | #endif | 33 | #endif |
diff --git a/tools/virtio/linux/kernel.h b/tools/virtio/linux/kernel.h index 033849948215..d9554fc3f340 100644 --- a/tools/virtio/linux/kernel.h +++ b/tools/virtio/linux/kernel.h | |||
@@ -20,7 +20,9 @@ | |||
20 | 20 | ||
21 | #define PAGE_SIZE getpagesize() | 21 | #define PAGE_SIZE getpagesize() |
22 | #define PAGE_MASK (~(PAGE_SIZE-1)) | 22 | #define PAGE_MASK (~(PAGE_SIZE-1)) |
23 | #define PAGE_ALIGN(x) ((x + PAGE_SIZE - 1) & PAGE_MASK) | ||
23 | 24 | ||
25 | typedef unsigned long long phys_addr_t; | ||
24 | typedef unsigned long long dma_addr_t; | 26 | typedef unsigned long long dma_addr_t; |
25 | typedef size_t __kernel_size_t; | 27 | typedef size_t __kernel_size_t; |
26 | typedef unsigned int __wsum; | 28 | typedef unsigned int __wsum; |
@@ -57,6 +59,11 @@ static inline void *kzalloc(size_t s, gfp_t gfp) | |||
57 | return p; | 59 | return p; |
58 | } | 60 | } |
59 | 61 | ||
62 | static inline void *alloc_pages_exact(size_t s, gfp_t gfp) | ||
63 | { | ||
64 | return kmalloc(s, gfp); | ||
65 | } | ||
66 | |||
60 | static inline void kfree(void *p) | 67 | static inline void kfree(void *p) |
61 | { | 68 | { |
62 | if (p >= __kfree_ignore_start && p < __kfree_ignore_end) | 69 | if (p >= __kfree_ignore_start && p < __kfree_ignore_end) |
@@ -64,6 +71,11 @@ static inline void kfree(void *p) | |||
64 | free(p); | 71 | free(p); |
65 | } | 72 | } |
66 | 73 | ||
74 | static inline void free_pages_exact(void *p, size_t s) | ||
75 | { | ||
76 | kfree(p); | ||
77 | } | ||
78 | |||
67 | static inline void *krealloc(void *p, size_t s, gfp_t gfp) | 79 | static inline void *krealloc(void *p, size_t s, gfp_t gfp) |
68 | { | 80 | { |
69 | return realloc(p, s); | 81 | return realloc(p, s); |
@@ -105,6 +117,8 @@ static inline void free_page(unsigned long addr) | |||
105 | #define dev_err(dev, format, ...) fprintf (stderr, format, ## __VA_ARGS__) | 117 | #define dev_err(dev, format, ...) fprintf (stderr, format, ## __VA_ARGS__) |
106 | #define dev_warn(dev, format, ...) fprintf (stderr, format, ## __VA_ARGS__) | 118 | #define dev_warn(dev, format, ...) fprintf (stderr, format, ## __VA_ARGS__) |
107 | 119 | ||
120 | #define WARN_ON_ONCE(cond) ((cond) && fprintf (stderr, "WARNING\n")) | ||
121 | |||
108 | #define min(x, y) ({ \ | 122 | #define min(x, y) ({ \ |
109 | typeof(x) _min1 = (x); \ | 123 | typeof(x) _min1 = (x); \ |
110 | typeof(y) _min2 = (y); \ | 124 | typeof(y) _min2 = (y); \ |
diff --git a/tools/virtio/linux/slab.h b/tools/virtio/linux/slab.h index 81baeac8ae40..7e1c1197d439 100644 --- a/tools/virtio/linux/slab.h +++ b/tools/virtio/linux/slab.h | |||
@@ -1,2 +1,6 @@ | |||
1 | #ifndef LINUX_SLAB_H | 1 | #ifndef LINUX_SLAB_H |
2 | #define GFP_KERNEL 0 | ||
3 | #define GFP_ATOMIC 0 | ||
4 | #define __GFP_NOWARN 0 | ||
5 | #define __GFP_ZERO 0 | ||
2 | #endif | 6 | #endif |
diff --git a/tools/virtio/linux/virtio.h b/tools/virtio/linux/virtio.h index ee125e714053..9377c8b4ac16 100644 --- a/tools/virtio/linux/virtio.h +++ b/tools/virtio/linux/virtio.h | |||
@@ -3,8 +3,12 @@ | |||
3 | #include <linux/scatterlist.h> | 3 | #include <linux/scatterlist.h> |
4 | #include <linux/kernel.h> | 4 | #include <linux/kernel.h> |
5 | 5 | ||
6 | struct device { | ||
7 | void *parent; | ||
8 | }; | ||
9 | |||
6 | struct virtio_device { | 10 | struct virtio_device { |
7 | void *dev; | 11 | struct device dev; |
8 | u64 features; | 12 | u64 features; |
9 | }; | 13 | }; |
10 | 14 | ||
diff --git a/tools/virtio/linux/virtio_config.h b/tools/virtio/linux/virtio_config.h index 57a6964a1e35..9ba11815e0a1 100644 --- a/tools/virtio/linux/virtio_config.h +++ b/tools/virtio/linux/virtio_config.h | |||
@@ -40,6 +40,19 @@ static inline void __virtio_clear_bit(struct virtio_device *vdev, | |||
40 | #define virtio_has_feature(dev, feature) \ | 40 | #define virtio_has_feature(dev, feature) \ |
41 | (__virtio_test_bit((dev), feature)) | 41 | (__virtio_test_bit((dev), feature)) |
42 | 42 | ||
43 | /** | ||
44 | * virtio_has_iommu_quirk - determine whether this device has the iommu quirk | ||
45 | * @vdev: the device | ||
46 | */ | ||
47 | static inline bool virtio_has_iommu_quirk(const struct virtio_device *vdev) | ||
48 | { | ||
49 | /* | ||
50 | * Note the reverse polarity of the quirk feature (compared to most | ||
51 | * other features), this is for compatibility with legacy systems. | ||
52 | */ | ||
53 | return !virtio_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM); | ||
54 | } | ||
55 | |||
43 | static inline bool virtio_is_little_endian(struct virtio_device *vdev) | 56 | static inline bool virtio_is_little_endian(struct virtio_device *vdev) |
44 | { | 57 | { |
45 | return virtio_has_feature(vdev, VIRTIO_F_VERSION_1) || | 58 | return virtio_has_feature(vdev, VIRTIO_F_VERSION_1) || |
diff --git a/tools/virtio/ringtest/ptr_ring.c b/tools/virtio/ringtest/ptr_ring.c index 68e4f9f0da3a..bd2ad1d3b7a9 100644 --- a/tools/virtio/ringtest/ptr_ring.c +++ b/tools/virtio/ringtest/ptr_ring.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #define cache_line_size() SMP_CACHE_BYTES | 13 | #define cache_line_size() SMP_CACHE_BYTES |
14 | #define ____cacheline_aligned_in_smp __attribute__ ((aligned (SMP_CACHE_BYTES))) | 14 | #define ____cacheline_aligned_in_smp __attribute__ ((aligned (SMP_CACHE_BYTES))) |
15 | #define unlikely(x) (__builtin_expect(!!(x), 0)) | 15 | #define unlikely(x) (__builtin_expect(!!(x), 0)) |
16 | #define likely(x) (__builtin_expect(!!(x), 1)) | ||
16 | #define ALIGN(x, a) (((x) + (a) - 1) / (a) * (a)) | 17 | #define ALIGN(x, a) (((x) + (a) - 1) / (a) * (a)) |
17 | typedef pthread_spinlock_t spinlock_t; | 18 | typedef pthread_spinlock_t spinlock_t; |
18 | 19 | ||