diff options
484 files changed, 5835 insertions, 5047 deletions
@@ -3709,6 +3709,13 @@ N: Dirk Verworner | |||
3709 | D: Co-author of German book ``Linux-Kernel-Programmierung'' | 3709 | D: Co-author of German book ``Linux-Kernel-Programmierung'' |
3710 | D: Co-founder of Berlin Linux User Group | 3710 | D: Co-founder of Berlin Linux User Group |
3711 | 3711 | ||
3712 | N: Andrew Victor | ||
3713 | E: linux@maxim.org.za | ||
3714 | W: http://maxim.org.za/at91_26.html | ||
3715 | D: First maintainer of Atmel ARM-based SoC, aka AT91 | ||
3716 | D: Introduced support for at91rm9200, the first chip of AT91 family | ||
3717 | S: South Africa | ||
3718 | |||
3712 | N: Riku Voipio | 3719 | N: Riku Voipio |
3713 | E: riku.voipio@iki.fi | 3720 | E: riku.voipio@iki.fi |
3714 | D: Author of PCA9532 LED and Fintek f75375s hwmon driver | 3721 | D: Author of PCA9532 LED and Fintek f75375s hwmon driver |
diff --git a/Documentation/IPMI.txt b/Documentation/IPMI.txt index 653d5d739d7f..31d1d658827f 100644 --- a/Documentation/IPMI.txt +++ b/Documentation/IPMI.txt | |||
@@ -505,7 +505,10 @@ at module load time (for a module) with: | |||
505 | 505 | ||
506 | The addresses are normal I2C addresses. The adapter is the string | 506 | The addresses are normal I2C addresses. The adapter is the string |
507 | name of the adapter, as shown in /sys/class/i2c-adapter/i2c-<n>/name. | 507 | name of the adapter, as shown in /sys/class/i2c-adapter/i2c-<n>/name. |
508 | It is *NOT* i2c-<n> itself. | 508 | It is *NOT* i2c-<n> itself. Also, the comparison is done ignoring |
509 | spaces, so if the name is "This is an I2C chip" you can say | ||
510 | adapter_name=ThisisanI2cchip. This is because it's hard to pass in | ||
511 | spaces in kernel parameters. | ||
509 | 512 | ||
510 | The debug flags are bit flags for each BMC found, they are: | 513 | The debug flags are bit flags for each BMC found, they are: |
511 | IPMI messages: 1, driver state: 2, timing: 4, I2C probe: 8 | 514 | IPMI messages: 1, driver state: 2, timing: 4, I2C probe: 8 |
diff --git a/Documentation/acpi/enumeration.txt b/Documentation/acpi/enumeration.txt index 750401f91341..15dfce708ebf 100644 --- a/Documentation/acpi/enumeration.txt +++ b/Documentation/acpi/enumeration.txt | |||
@@ -253,7 +253,7 @@ input driver: | |||
253 | GPIO support | 253 | GPIO support |
254 | ~~~~~~~~~~~~ | 254 | ~~~~~~~~~~~~ |
255 | ACPI 5 introduced two new resources to describe GPIO connections: GpioIo | 255 | ACPI 5 introduced two new resources to describe GPIO connections: GpioIo |
256 | and GpioInt. These resources are used be used to pass GPIO numbers used by | 256 | and GpioInt. These resources can be used to pass GPIO numbers used by |
257 | the device to the driver. ACPI 5.1 extended this with _DSD (Device | 257 | the device to the driver. ACPI 5.1 extended this with _DSD (Device |
258 | Specific Data) which made it possible to name the GPIOs among other things. | 258 | Specific Data) which made it possible to name the GPIOs among other things. |
259 | 259 | ||
diff --git a/Documentation/acpi/gpio-properties.txt b/Documentation/acpi/gpio-properties.txt index ae36fcf86dc7..f35dad11f0de 100644 --- a/Documentation/acpi/gpio-properties.txt +++ b/Documentation/acpi/gpio-properties.txt | |||
@@ -1,9 +1,9 @@ | |||
1 | _DSD Device Properties Related to GPIO | 1 | _DSD Device Properties Related to GPIO |
2 | -------------------------------------- | 2 | -------------------------------------- |
3 | 3 | ||
4 | With the release of ACPI 5.1 and the _DSD configuration objecte names | 4 | With the release of ACPI 5.1, the _DSD configuration object finally |
5 | can finally be given to GPIOs (and other things as well) returned by | 5 | allows names to be given to GPIOs (and other things as well) returned |
6 | _CRS. Previously, we were only able to use an integer index to find | 6 | by _CRS. Previously, we were only able to use an integer index to find |
7 | the corresponding GPIO, which is pretty error prone (it depends on | 7 | the corresponding GPIO, which is pretty error prone (it depends on |
8 | the _CRS output ordering, for example). | 8 | the _CRS output ordering, for example). |
9 | 9 | ||
diff --git a/Documentation/devicetree/bindings/arm/omap/l3-noc.txt b/Documentation/devicetree/bindings/arm/omap/l3-noc.txt index 974624ea68f6..161448da959d 100644 --- a/Documentation/devicetree/bindings/arm/omap/l3-noc.txt +++ b/Documentation/devicetree/bindings/arm/omap/l3-noc.txt | |||
@@ -6,6 +6,7 @@ provided by Arteris. | |||
6 | Required properties: | 6 | Required properties: |
7 | - compatible : Should be "ti,omap3-l3-smx" for OMAP3 family | 7 | - compatible : Should be "ti,omap3-l3-smx" for OMAP3 family |
8 | Should be "ti,omap4-l3-noc" for OMAP4 family | 8 | Should be "ti,omap4-l3-noc" for OMAP4 family |
9 | Should be "ti,omap5-l3-noc" for OMAP5 family | ||
9 | Should be "ti,dra7-l3-noc" for DRA7 family | 10 | Should be "ti,dra7-l3-noc" for DRA7 family |
10 | Should be "ti,am4372-l3-noc" for AM43 family | 11 | Should be "ti,am4372-l3-noc" for AM43 family |
11 | - reg: Contains L3 register address range for each noc domain. | 12 | - reg: Contains L3 register address range for each noc domain. |
diff --git a/Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt b/Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt index a4873e5e3e36..e30e184f50c7 100644 --- a/Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt +++ b/Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt | |||
@@ -38,7 +38,7 @@ dma_apbx: dma-apbx@80024000 { | |||
38 | 80 81 68 69 | 38 | 80 81 68 69 |
39 | 70 71 72 73 | 39 | 70 71 72 73 |
40 | 74 75 76 77>; | 40 | 74 75 76 77>; |
41 | interrupt-names = "auart4-rx", "aurat4-tx", "spdif-tx", "empty", | 41 | interrupt-names = "auart4-rx", "auart4-tx", "spdif-tx", "empty", |
42 | "saif0", "saif1", "i2c0", "i2c1", | 42 | "saif0", "saif1", "i2c0", "i2c1", |
43 | "auart0-rx", "auart0-tx", "auart1-rx", "auart1-tx", | 43 | "auart0-rx", "auart0-tx", "auart1-rx", "auart1-tx", |
44 | "auart2-rx", "auart2-tx", "auart3-rx", "auart3-tx"; | 44 | "auart2-rx", "auart2-tx", "auart3-rx", "auart3-tx"; |
diff --git a/Documentation/devicetree/bindings/rtc/abracon,abx80x.txt b/Documentation/devicetree/bindings/rtc/abracon,abx80x.txt new file mode 100644 index 000000000000..be789685a1c2 --- /dev/null +++ b/Documentation/devicetree/bindings/rtc/abracon,abx80x.txt | |||
@@ -0,0 +1,30 @@ | |||
1 | Abracon ABX80X I2C ultra low power RTC/Alarm chip | ||
2 | |||
3 | The Abracon ABX80X family consist of the ab0801, ab0803, ab0804, ab0805, ab1801, | ||
4 | ab1803, ab1804 and ab1805. The ab0805 is the superset of ab080x and the ab1805 | ||
5 | is the superset of ab180x. | ||
6 | |||
7 | Required properties: | ||
8 | |||
9 | - "compatible": should one of: | ||
10 | "abracon,abx80x" | ||
11 | "abracon,ab0801" | ||
12 | "abracon,ab0803" | ||
13 | "abracon,ab0804" | ||
14 | "abracon,ab0805" | ||
15 | "abracon,ab1801" | ||
16 | "abracon,ab1803" | ||
17 | "abracon,ab1804" | ||
18 | "abracon,ab1805" | ||
19 | Using "abracon,abx80x" will enable chip autodetection. | ||
20 | - "reg": I2C bus address of the device | ||
21 | |||
22 | Optional properties: | ||
23 | |||
24 | The abx804 and abx805 have a trickle charger that is able to charge the | ||
25 | connected battery or supercap. Both the following properties have to be defined | ||
26 | and valid to enable charging: | ||
27 | |||
28 | - "abracon,tc-diode": should be "standard" (0.6V) or "schottky" (0.3V) | ||
29 | - "abracon,tc-resistor": should be <0>, <3>, <6> or <11>. 0 disables the output | ||
30 | resistor, the other values are in ohm. | ||
diff --git a/Documentation/kasan.txt b/Documentation/kasan.txt index 092fc10961fe..4692241789b1 100644 --- a/Documentation/kasan.txt +++ b/Documentation/kasan.txt | |||
@@ -9,7 +9,9 @@ a fast and comprehensive solution for finding use-after-free and out-of-bounds | |||
9 | bugs. | 9 | bugs. |
10 | 10 | ||
11 | KASan uses compile-time instrumentation for checking every memory access, | 11 | KASan uses compile-time instrumentation for checking every memory access, |
12 | therefore you will need a certain version of GCC > 4.9.2 | 12 | therefore you will need a gcc version of 4.9.2 or later. KASan could detect out |
13 | of bounds accesses to stack or global variables, but only if gcc 5.0 or later was | ||
14 | used to built the kernel. | ||
13 | 15 | ||
14 | Currently KASan is supported only for x86_64 architecture and requires that the | 16 | Currently KASan is supported only for x86_64 architecture and requires that the |
15 | kernel be built with the SLUB allocator. | 17 | kernel be built with the SLUB allocator. |
@@ -23,8 +25,8 @@ To enable KASAN configure kernel with: | |||
23 | 25 | ||
24 | and choose between CONFIG_KASAN_OUTLINE and CONFIG_KASAN_INLINE. Outline/inline | 26 | and choose between CONFIG_KASAN_OUTLINE and CONFIG_KASAN_INLINE. Outline/inline |
25 | is compiler instrumentation types. The former produces smaller binary the | 27 | is compiler instrumentation types. The former produces smaller binary the |
26 | latter is 1.1 - 2 times faster. Inline instrumentation requires GCC 5.0 or | 28 | latter is 1.1 - 2 times faster. Inline instrumentation requires a gcc version |
27 | latter. | 29 | of 5.0 or later. |
28 | 30 | ||
29 | Currently KASAN works only with the SLUB memory allocator. | 31 | Currently KASAN works only with the SLUB memory allocator. |
30 | For better bug detection and nicer report, enable CONFIG_STACKTRACE and put | 32 | For better bug detection and nicer report, enable CONFIG_STACKTRACE and put |
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index f6befa9855c1..61ab1628a057 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
@@ -3787,6 +3787,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |||
3787 | READ_CAPACITY_16 command); | 3787 | READ_CAPACITY_16 command); |
3788 | f = NO_REPORT_OPCODES (don't use report opcodes | 3788 | f = NO_REPORT_OPCODES (don't use report opcodes |
3789 | command, uas only); | 3789 | command, uas only); |
3790 | g = MAX_SECTORS_240 (don't transfer more than | ||
3791 | 240 sectors at a time, uas only); | ||
3790 | h = CAPACITY_HEURISTICS (decrease the | 3792 | h = CAPACITY_HEURISTICS (decrease the |
3791 | reported device capacity by one | 3793 | reported device capacity by one |
3792 | sector if the number is odd); | 3794 | sector if the number is odd); |
diff --git a/Documentation/module-signing.txt b/Documentation/module-signing.txt index 09c2382ad055..c72702ec1ded 100644 --- a/Documentation/module-signing.txt +++ b/Documentation/module-signing.txt | |||
@@ -119,9 +119,9 @@ Most notably, in the x509.genkey file, the req_distinguished_name section | |||
119 | should be altered from the default: | 119 | should be altered from the default: |
120 | 120 | ||
121 | [ req_distinguished_name ] | 121 | [ req_distinguished_name ] |
122 | O = Magrathea | 122 | #O = Unspecified company |
123 | CN = Glacier signing key | 123 | CN = Build time autogenerated kernel key |
124 | emailAddress = slartibartfast@magrathea.h2g2 | 124 | #emailAddress = unspecified.user@unspecified.company |
125 | 125 | ||
126 | The generated RSA key size can also be set with: | 126 | The generated RSA key size can also be set with: |
127 | 127 | ||
diff --git a/Documentation/networking/mpls-sysctl.txt b/Documentation/networking/mpls-sysctl.txt index 639ddf0ece9b..9ed15f86c17c 100644 --- a/Documentation/networking/mpls-sysctl.txt +++ b/Documentation/networking/mpls-sysctl.txt | |||
@@ -18,3 +18,12 @@ platform_labels - INTEGER | |||
18 | 18 | ||
19 | Possible values: 0 - 1048575 | 19 | Possible values: 0 - 1048575 |
20 | Default: 0 | 20 | Default: 0 |
21 | |||
22 | conf/<interface>/input - BOOL | ||
23 | Control whether packets can be input on this interface. | ||
24 | |||
25 | If disabled, packets will be discarded without further | ||
26 | processing. | ||
27 | |||
28 | 0 - disabled (default) | ||
29 | not 0 - enabled | ||
diff --git a/Documentation/networking/scaling.txt b/Documentation/networking/scaling.txt index cbfac0949635..59f4db2a0c85 100644 --- a/Documentation/networking/scaling.txt +++ b/Documentation/networking/scaling.txt | |||
@@ -282,7 +282,7 @@ following is true: | |||
282 | 282 | ||
283 | - The current CPU's queue head counter >= the recorded tail counter | 283 | - The current CPU's queue head counter >= the recorded tail counter |
284 | value in rps_dev_flow[i] | 284 | value in rps_dev_flow[i] |
285 | - The current CPU is unset (equal to RPS_NO_CPU) | 285 | - The current CPU is unset (>= nr_cpu_ids) |
286 | - The current CPU is offline | 286 | - The current CPU is offline |
287 | 287 | ||
288 | After this check, the packet is sent to the (possibly updated) current | 288 | After this check, the packet is sent to the (possibly updated) current |
diff --git a/Documentation/powerpc/transactional_memory.txt b/Documentation/powerpc/transactional_memory.txt index ba0a2a4a54ba..ded69794a5c0 100644 --- a/Documentation/powerpc/transactional_memory.txt +++ b/Documentation/powerpc/transactional_memory.txt | |||
@@ -74,23 +74,22 @@ Causes of transaction aborts | |||
74 | Syscalls | 74 | Syscalls |
75 | ======== | 75 | ======== |
76 | 76 | ||
77 | Syscalls made from within an active transaction will not be performed and the | 77 | Performing syscalls from within transaction is not recommended, and can lead |
78 | transaction will be doomed by the kernel with the failure code TM_CAUSE_SYSCALL | 78 | to unpredictable results. |
79 | | TM_CAUSE_PERSISTENT. | ||
80 | 79 | ||
81 | Syscalls made from within a suspended transaction are performed as normal and | 80 | Syscalls do not by design abort transactions, but beware: The kernel code will |
82 | the transaction is not explicitly doomed by the kernel. However, what the | 81 | not be running in transactional state. The effect of syscalls will always |
83 | kernel does to perform the syscall may result in the transaction being doomed | 82 | remain visible, but depending on the call they may abort your transaction as a |
84 | by the hardware. The syscall is performed in suspended mode so any side | 83 | side-effect, read soon-to-be-aborted transactional data that should not remain |
85 | effects will be persistent, independent of transaction success or failure. No | 84 | invisible, etc. If you constantly retry a transaction that constantly aborts |
86 | guarantees are provided by the kernel about which syscalls will affect | 85 | itself by calling a syscall, you'll have a livelock & make no progress. |
87 | transaction success. | ||
88 | 86 | ||
89 | Care must be taken when relying on syscalls to abort during active transactions | 87 | Simple syscalls (e.g. sigprocmask()) "could" be OK. Even things like write() |
90 | if the calls are made via a library. Libraries may cache values (which may | 88 | from, say, printf() should be OK as long as the kernel does not access any |
91 | give the appearance of success) or perform operations that cause transaction | 89 | memory that was accessed transactionally. |
92 | failure before entering the kernel (which may produce different failure codes). | 90 | |
93 | Examples are glibc's getpid() and lazy symbol resolution. | 91 | Consider any syscalls that happen to work as debug-only -- not recommended for |
92 | production use. Best to queue them up till after the transaction is over. | ||
94 | 93 | ||
95 | 94 | ||
96 | Signals | 95 | Signals |
@@ -177,7 +176,8 @@ kernel aborted a transaction: | |||
177 | TM_CAUSE_RESCHED Thread was rescheduled. | 176 | TM_CAUSE_RESCHED Thread was rescheduled. |
178 | TM_CAUSE_TLBI Software TLB invalid. | 177 | TM_CAUSE_TLBI Software TLB invalid. |
179 | TM_CAUSE_FAC_UNAV FP/VEC/VSX unavailable trap. | 178 | TM_CAUSE_FAC_UNAV FP/VEC/VSX unavailable trap. |
180 | TM_CAUSE_SYSCALL Syscall from active transaction. | 179 | TM_CAUSE_SYSCALL Currently unused; future syscalls that must abort |
180 | transactions for consistency will use this. | ||
181 | TM_CAUSE_SIGNAL Signal delivered. | 181 | TM_CAUSE_SIGNAL Signal delivered. |
182 | TM_CAUSE_MISC Currently unused. | 182 | TM_CAUSE_MISC Currently unused. |
183 | TM_CAUSE_ALIGNMENT Alignment fault. | 183 | TM_CAUSE_ALIGNMENT Alignment fault. |
diff --git a/MAINTAINERS b/MAINTAINERS index e9919782444b..246ca6964158 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -892,11 +892,10 @@ S: Maintained | |||
892 | F: arch/arm/mach-alpine/ | 892 | F: arch/arm/mach-alpine/ |
893 | 893 | ||
894 | ARM/ATMEL AT91RM9200 AND AT91SAM ARM ARCHITECTURES | 894 | ARM/ATMEL AT91RM9200 AND AT91SAM ARM ARCHITECTURES |
895 | M: Andrew Victor <linux@maxim.org.za> | ||
896 | M: Nicolas Ferre <nicolas.ferre@atmel.com> | 895 | M: Nicolas Ferre <nicolas.ferre@atmel.com> |
896 | M: Alexandre Belloni <alexandre.belloni@free-electrons.com> | ||
897 | M: Jean-Christophe Plagniol-Villard <plagnioj@jcrosoft.com> | 897 | M: Jean-Christophe Plagniol-Villard <plagnioj@jcrosoft.com> |
898 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 898 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
899 | W: http://maxim.org.za/at91_26.html | ||
900 | W: http://www.linux4sam.org | 899 | W: http://www.linux4sam.org |
901 | S: Supported | 900 | S: Supported |
902 | F: arch/arm/mach-at91/ | 901 | F: arch/arm/mach-at91/ |
@@ -990,6 +989,12 @@ F: drivers/clocksource/timer-prima2.c | |||
990 | F: drivers/clocksource/timer-atlas7.c | 989 | F: drivers/clocksource/timer-atlas7.c |
991 | N: [^a-z]sirf | 990 | N: [^a-z]sirf |
992 | 991 | ||
992 | ARM/CONEXANT DIGICOLOR MACHINE SUPPORT | ||
993 | M: Baruch Siach <baruch@tkos.co.il> | ||
994 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | ||
995 | S: Maintained | ||
996 | N: digicolor | ||
997 | |||
993 | ARM/EBSA110 MACHINE SUPPORT | 998 | ARM/EBSA110 MACHINE SUPPORT |
994 | M: Russell King <linux@arm.linux.org.uk> | 999 | M: Russell King <linux@arm.linux.org.uk> |
995 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 1000 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
@@ -1439,9 +1444,10 @@ ARM/SOCFPGA ARCHITECTURE | |||
1439 | M: Dinh Nguyen <dinguyen@opensource.altera.com> | 1444 | M: Dinh Nguyen <dinguyen@opensource.altera.com> |
1440 | S: Maintained | 1445 | S: Maintained |
1441 | F: arch/arm/mach-socfpga/ | 1446 | F: arch/arm/mach-socfpga/ |
1447 | F: arch/arm/boot/dts/socfpga* | ||
1448 | F: arch/arm/configs/socfpga_defconfig | ||
1442 | W: http://www.rocketboards.org | 1449 | W: http://www.rocketboards.org |
1443 | T: git://git.rocketboards.org/linux-socfpga.git | 1450 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/dinguyen/linux.git |
1444 | T: git://git.rocketboards.org/linux-socfpga-next.git | ||
1445 | 1451 | ||
1446 | ARM/SOCFPGA CLOCK FRAMEWORK SUPPORT | 1452 | ARM/SOCFPGA CLOCK FRAMEWORK SUPPORT |
1447 | M: Dinh Nguyen <dinguyen@opensource.altera.com> | 1453 | M: Dinh Nguyen <dinguyen@opensource.altera.com> |
@@ -2116,8 +2122,9 @@ S: Supported | |||
2116 | F: drivers/net/ethernet/broadcom/bnx2x/ | 2122 | F: drivers/net/ethernet/broadcom/bnx2x/ |
2117 | 2123 | ||
2118 | BROADCOM BCM281XX/BCM11XXX/BCM216XX ARM ARCHITECTURE | 2124 | BROADCOM BCM281XX/BCM11XXX/BCM216XX ARM ARCHITECTURE |
2119 | M: Christian Daudt <bcm@fixthebug.org> | ||
2120 | M: Florian Fainelli <f.fainelli@gmail.com> | 2125 | M: Florian Fainelli <f.fainelli@gmail.com> |
2126 | M: Ray Jui <rjui@broadcom.com> | ||
2127 | M: Scott Branden <sbranden@broadcom.com> | ||
2121 | L: bcm-kernel-feedback-list@broadcom.com | 2128 | L: bcm-kernel-feedback-list@broadcom.com |
2122 | T: git git://github.com/broadcom/mach-bcm | 2129 | T: git git://github.com/broadcom/mach-bcm |
2123 | S: Maintained | 2130 | S: Maintained |
@@ -2168,7 +2175,6 @@ S: Maintained | |||
2168 | F: drivers/usb/gadget/udc/bcm63xx_udc.* | 2175 | F: drivers/usb/gadget/udc/bcm63xx_udc.* |
2169 | 2176 | ||
2170 | BROADCOM BCM7XXX ARM ARCHITECTURE | 2177 | BROADCOM BCM7XXX ARM ARCHITECTURE |
2171 | M: Marc Carino <marc.ceeeee@gmail.com> | ||
2172 | M: Brian Norris <computersforpeace@gmail.com> | 2178 | M: Brian Norris <computersforpeace@gmail.com> |
2173 | M: Gregory Fong <gregory.0xf0@gmail.com> | 2179 | M: Gregory Fong <gregory.0xf0@gmail.com> |
2174 | M: Florian Fainelli <f.fainelli@gmail.com> | 2180 | M: Florian Fainelli <f.fainelli@gmail.com> |
@@ -3413,6 +3419,13 @@ F: drivers/gpu/drm/rcar-du/ | |||
3413 | F: drivers/gpu/drm/shmobile/ | 3419 | F: drivers/gpu/drm/shmobile/ |
3414 | F: include/linux/platform_data/shmob_drm.h | 3420 | F: include/linux/platform_data/shmob_drm.h |
3415 | 3421 | ||
3422 | DRM DRIVERS FOR ROCKCHIP | ||
3423 | M: Mark Yao <mark.yao@rock-chips.com> | ||
3424 | L: dri-devel@lists.freedesktop.org | ||
3425 | S: Maintained | ||
3426 | F: drivers/gpu/drm/rockchip/ | ||
3427 | F: Documentation/devicetree/bindings/video/rockchip* | ||
3428 | |||
3416 | DSBR100 USB FM RADIO DRIVER | 3429 | DSBR100 USB FM RADIO DRIVER |
3417 | M: Alexey Klimov <klimov.linux@gmail.com> | 3430 | M: Alexey Klimov <klimov.linux@gmail.com> |
3418 | L: linux-media@vger.kernel.org | 3431 | L: linux-media@vger.kernel.org |
@@ -5035,17 +5048,19 @@ S: Orphan | |||
5035 | F: drivers/video/fbdev/imsttfb.c | 5048 | F: drivers/video/fbdev/imsttfb.c |
5036 | 5049 | ||
5037 | INFINIBAND SUBSYSTEM | 5050 | INFINIBAND SUBSYSTEM |
5038 | M: Roland Dreier <roland@kernel.org> | 5051 | M: Doug Ledford <dledford@redhat.com> |
5039 | M: Sean Hefty <sean.hefty@intel.com> | 5052 | M: Sean Hefty <sean.hefty@intel.com> |
5040 | M: Hal Rosenstock <hal.rosenstock@gmail.com> | 5053 | M: Hal Rosenstock <hal.rosenstock@gmail.com> |
5041 | L: linux-rdma@vger.kernel.org | 5054 | L: linux-rdma@vger.kernel.org |
5042 | W: http://www.openfabrics.org/ | 5055 | W: http://www.openfabrics.org/ |
5043 | Q: http://patchwork.kernel.org/project/linux-rdma/list/ | 5056 | Q: http://patchwork.kernel.org/project/linux-rdma/list/ |
5044 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband.git | 5057 | T: git git://github.com/dledford/linux.git |
5045 | S: Supported | 5058 | S: Supported |
5046 | F: Documentation/infiniband/ | 5059 | F: Documentation/infiniband/ |
5047 | F: drivers/infiniband/ | 5060 | F: drivers/infiniband/ |
5048 | F: include/uapi/linux/if_infiniband.h | 5061 | F: include/uapi/linux/if_infiniband.h |
5062 | F: include/uapi/rdma/ | ||
5063 | F: include/rdma/ | ||
5049 | 5064 | ||
5050 | INOTIFY | 5065 | INOTIFY |
5051 | M: John McCutchan <john@johnmccutchan.com> | 5066 | M: John McCutchan <john@johnmccutchan.com> |
@@ -5798,6 +5813,7 @@ F: drivers/scsi/53c700* | |||
5798 | LED SUBSYSTEM | 5813 | LED SUBSYSTEM |
5799 | M: Bryan Wu <cooloney@gmail.com> | 5814 | M: Bryan Wu <cooloney@gmail.com> |
5800 | M: Richard Purdie <rpurdie@rpsys.net> | 5815 | M: Richard Purdie <rpurdie@rpsys.net> |
5816 | M: Jacek Anaszewski <j.anaszewski@samsung.com> | ||
5801 | L: linux-leds@vger.kernel.org | 5817 | L: linux-leds@vger.kernel.org |
5802 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/cooloney/linux-leds.git | 5818 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/cooloney/linux-leds.git |
5803 | S: Maintained | 5819 | S: Maintained |
@@ -10534,7 +10550,6 @@ F: include/linux/virtio_console.h | |||
10534 | F: include/uapi/linux/virtio_console.h | 10550 | F: include/uapi/linux/virtio_console.h |
10535 | 10551 | ||
10536 | VIRTIO CORE, NET AND BLOCK DRIVERS | 10552 | VIRTIO CORE, NET AND BLOCK DRIVERS |
10537 | M: Rusty Russell <rusty@rustcorp.com.au> | ||
10538 | M: "Michael S. Tsirkin" <mst@redhat.com> | 10553 | M: "Michael S. Tsirkin" <mst@redhat.com> |
10539 | L: virtualization@lists.linux-foundation.org | 10554 | L: virtualization@lists.linux-foundation.org |
10540 | S: Maintained | 10555 | S: Maintained |
@@ -11042,6 +11057,7 @@ F: drivers/media/pci/zoran/ | |||
11042 | ZRAM COMPRESSED RAM BLOCK DEVICE DRVIER | 11057 | ZRAM COMPRESSED RAM BLOCK DEVICE DRVIER |
11043 | M: Minchan Kim <minchan@kernel.org> | 11058 | M: Minchan Kim <minchan@kernel.org> |
11044 | M: Nitin Gupta <ngupta@vflare.org> | 11059 | M: Nitin Gupta <ngupta@vflare.org> |
11060 | R: Sergey Senozhatsky <sergey.senozhatsky.work@gmail.com> | ||
11045 | L: linux-kernel@vger.kernel.org | 11061 | L: linux-kernel@vger.kernel.org |
11046 | S: Maintained | 11062 | S: Maintained |
11047 | F: drivers/block/zram/ | 11063 | F: drivers/block/zram/ |
@@ -1,7 +1,7 @@ | |||
1 | VERSION = 4 | 1 | VERSION = 4 |
2 | PATCHLEVEL = 1 | 2 | PATCHLEVEL = 1 |
3 | SUBLEVEL = 0 | 3 | SUBLEVEL = 0 |
4 | EXTRAVERSION = -rc1 | 4 | EXTRAVERSION = -rc3 |
5 | NAME = Hurr durr I'ma sheep | 5 | NAME = Hurr durr I'ma sheep |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/arm/boot/dts/am437x-sk-evm.dts b/arch/arm/boot/dts/am437x-sk-evm.dts index 8ae29c955c11..c17097d2c167 100644 --- a/arch/arm/boot/dts/am437x-sk-evm.dts +++ b/arch/arm/boot/dts/am437x-sk-evm.dts | |||
@@ -49,7 +49,7 @@ | |||
49 | pinctrl-0 = <&matrix_keypad_pins>; | 49 | pinctrl-0 = <&matrix_keypad_pins>; |
50 | 50 | ||
51 | debounce-delay-ms = <5>; | 51 | debounce-delay-ms = <5>; |
52 | col-scan-delay-us = <1500>; | 52 | col-scan-delay-us = <5>; |
53 | 53 | ||
54 | row-gpios = <&gpio5 5 GPIO_ACTIVE_HIGH /* Bank5, pin5 */ | 54 | row-gpios = <&gpio5 5 GPIO_ACTIVE_HIGH /* Bank5, pin5 */ |
55 | &gpio5 6 GPIO_ACTIVE_HIGH>; /* Bank5, pin6 */ | 55 | &gpio5 6 GPIO_ACTIVE_HIGH>; /* Bank5, pin6 */ |
@@ -473,7 +473,7 @@ | |||
473 | interrupt-parent = <&gpio0>; | 473 | interrupt-parent = <&gpio0>; |
474 | interrupts = <31 0>; | 474 | interrupts = <31 0>; |
475 | 475 | ||
476 | wake-gpios = <&gpio1 28 GPIO_ACTIVE_HIGH>; | 476 | reset-gpios = <&gpio1 28 GPIO_ACTIVE_LOW>; |
477 | 477 | ||
478 | touchscreen-size-x = <480>; | 478 | touchscreen-size-x = <480>; |
479 | touchscreen-size-y = <272>; | 479 | touchscreen-size-y = <272>; |
diff --git a/arch/arm/boot/dts/am57xx-beagle-x15.dts b/arch/arm/boot/dts/am57xx-beagle-x15.dts index 15f198e4864d..7128fad991ac 100644 --- a/arch/arm/boot/dts/am57xx-beagle-x15.dts +++ b/arch/arm/boot/dts/am57xx-beagle-x15.dts | |||
@@ -18,6 +18,7 @@ | |||
18 | aliases { | 18 | aliases { |
19 | rtc0 = &mcp_rtc; | 19 | rtc0 = &mcp_rtc; |
20 | rtc1 = &tps659038_rtc; | 20 | rtc1 = &tps659038_rtc; |
21 | rtc2 = &rtc; | ||
21 | }; | 22 | }; |
22 | 23 | ||
23 | memory { | 24 | memory { |
@@ -83,7 +84,7 @@ | |||
83 | gpio_fan: gpio_fan { | 84 | gpio_fan: gpio_fan { |
84 | /* Based on 5v 500mA AFB02505HHB */ | 85 | /* Based on 5v 500mA AFB02505HHB */ |
85 | compatible = "gpio-fan"; | 86 | compatible = "gpio-fan"; |
86 | gpios = <&tps659038_gpio 1 GPIO_ACTIVE_HIGH>; | 87 | gpios = <&tps659038_gpio 2 GPIO_ACTIVE_HIGH>; |
87 | gpio-fan,speed-map = <0 0>, | 88 | gpio-fan,speed-map = <0 0>, |
88 | <13000 1>; | 89 | <13000 1>; |
89 | #cooling-cells = <2>; | 90 | #cooling-cells = <2>; |
@@ -130,8 +131,8 @@ | |||
130 | 131 | ||
131 | uart3_pins_default: uart3_pins_default { | 132 | uart3_pins_default: uart3_pins_default { |
132 | pinctrl-single,pins = < | 133 | pinctrl-single,pins = < |
133 | 0x248 (PIN_INPUT_SLEW | MUX_MODE0) /* uart3_rxd.rxd */ | 134 | 0x3f8 (PIN_INPUT_SLEW | MUX_MODE2) /* uart2_ctsn.uart3_rxd */ |
134 | 0x24c (PIN_INPUT_SLEW | MUX_MODE0) /* uart3_txd.txd */ | 135 | 0x3fc (PIN_INPUT_SLEW | MUX_MODE1) /* uart2_rtsn.uart3_txd */ |
135 | >; | 136 | >; |
136 | }; | 137 | }; |
137 | 138 | ||
@@ -455,7 +456,7 @@ | |||
455 | mcp_rtc: rtc@6f { | 456 | mcp_rtc: rtc@6f { |
456 | compatible = "microchip,mcp7941x"; | 457 | compatible = "microchip,mcp7941x"; |
457 | reg = <0x6f>; | 458 | reg = <0x6f>; |
458 | interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_LOW>; /* IRQ_SYS_1N */ | 459 | interrupts = <GIC_SPI 2 IRQ_TYPE_EDGE_RISING>; /* IRQ_SYS_1N */ |
459 | 460 | ||
460 | pinctrl-names = "default"; | 461 | pinctrl-names = "default"; |
461 | pinctrl-0 = <&mcp79410_pins_default>; | 462 | pinctrl-0 = <&mcp79410_pins_default>; |
@@ -478,7 +479,7 @@ | |||
478 | &uart3 { | 479 | &uart3 { |
479 | status = "okay"; | 480 | status = "okay"; |
480 | interrupts-extended = <&crossbar_mpu GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>, | 481 | interrupts-extended = <&crossbar_mpu GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>, |
481 | <&dra7_pmx_core 0x248>; | 482 | <&dra7_pmx_core 0x3f8>; |
482 | 483 | ||
483 | pinctrl-names = "default"; | 484 | pinctrl-names = "default"; |
484 | pinctrl-0 = <&uart3_pins_default>; | 485 | pinctrl-0 = <&uart3_pins_default>; |
diff --git a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts index e3b08fb959e5..990e8a2100f0 100644 --- a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts +++ b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts | |||
@@ -105,6 +105,10 @@ | |||
105 | }; | 105 | }; |
106 | 106 | ||
107 | internal-regs { | 107 | internal-regs { |
108 | rtc@10300 { | ||
109 | /* No crystal connected to the internal RTC */ | ||
110 | status = "disabled"; | ||
111 | }; | ||
108 | serial@12000 { | 112 | serial@12000 { |
109 | status = "okay"; | 113 | status = "okay"; |
110 | }; | 114 | }; |
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi index 5332b57b4950..f03a091cd076 100644 --- a/arch/arm/boot/dts/dra7.dtsi +++ b/arch/arm/boot/dts/dra7.dtsi | |||
@@ -911,7 +911,7 @@ | |||
911 | ti,clock-cycles = <16>; | 911 | ti,clock-cycles = <16>; |
912 | 912 | ||
913 | reg = <0x4ae07ddc 0x4>, <0x4ae07de0 0x4>, | 913 | reg = <0x4ae07ddc 0x4>, <0x4ae07de0 0x4>, |
914 | <0x4ae06014 0x4>, <0x4a003b20 0x8>, | 914 | <0x4ae06014 0x4>, <0x4a003b20 0xc>, |
915 | <0x4ae0c158 0x4>; | 915 | <0x4ae0c158 0x4>; |
916 | reg-names = "setup-address", "control-address", | 916 | reg-names = "setup-address", "control-address", |
917 | "int-address", "efuse-address", | 917 | "int-address", "efuse-address", |
@@ -944,7 +944,7 @@ | |||
944 | ti,clock-cycles = <16>; | 944 | ti,clock-cycles = <16>; |
945 | 945 | ||
946 | reg = <0x4ae07e34 0x4>, <0x4ae07e24 0x4>, | 946 | reg = <0x4ae07e34 0x4>, <0x4ae07e24 0x4>, |
947 | <0x4ae06010 0x4>, <0x4a0025cc 0x8>, | 947 | <0x4ae06010 0x4>, <0x4a0025cc 0xc>, |
948 | <0x4a002470 0x4>; | 948 | <0x4a002470 0x4>; |
949 | reg-names = "setup-address", "control-address", | 949 | reg-names = "setup-address", "control-address", |
950 | "int-address", "efuse-address", | 950 | "int-address", "efuse-address", |
@@ -977,7 +977,7 @@ | |||
977 | ti,clock-cycles = <16>; | 977 | ti,clock-cycles = <16>; |
978 | 978 | ||
979 | reg = <0x4ae07e30 0x4>, <0x4ae07e20 0x4>, | 979 | reg = <0x4ae07e30 0x4>, <0x4ae07e20 0x4>, |
980 | <0x4ae06010 0x4>, <0x4a0025e0 0x8>, | 980 | <0x4ae06010 0x4>, <0x4a0025e0 0xc>, |
981 | <0x4a00246c 0x4>; | 981 | <0x4a00246c 0x4>; |
982 | reg-names = "setup-address", "control-address", | 982 | reg-names = "setup-address", "control-address", |
983 | "int-address", "efuse-address", | 983 | "int-address", "efuse-address", |
@@ -1010,7 +1010,7 @@ | |||
1010 | ti,clock-cycles = <16>; | 1010 | ti,clock-cycles = <16>; |
1011 | 1011 | ||
1012 | reg = <0x4ae07de4 0x4>, <0x4ae07de8 0x4>, | 1012 | reg = <0x4ae07de4 0x4>, <0x4ae07de8 0x4>, |
1013 | <0x4ae06010 0x4>, <0x4a003b08 0x8>, | 1013 | <0x4ae06010 0x4>, <0x4a003b08 0xc>, |
1014 | <0x4ae0c154 0x4>; | 1014 | <0x4ae0c154 0x4>; |
1015 | reg-names = "setup-address", "control-address", | 1015 | reg-names = "setup-address", "control-address", |
1016 | "int-address", "efuse-address", | 1016 | "int-address", "efuse-address", |
@@ -1203,7 +1203,7 @@ | |||
1203 | status = "disabled"; | 1203 | status = "disabled"; |
1204 | }; | 1204 | }; |
1205 | 1205 | ||
1206 | rtc@48838000 { | 1206 | rtc: rtc@48838000 { |
1207 | compatible = "ti,am3352-rtc"; | 1207 | compatible = "ti,am3352-rtc"; |
1208 | reg = <0x48838000 0x100>; | 1208 | reg = <0x48838000 0x100>; |
1209 | interrupts = <GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH>, | 1209 | interrupts = <GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH>, |
diff --git a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi index 8de12af7c276..d6b49e5b32e9 100644 --- a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi +++ b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi | |||
@@ -9,6 +9,7 @@ | |||
9 | 9 | ||
10 | #include <dt-bindings/sound/samsung-i2s.h> | 10 | #include <dt-bindings/sound/samsung-i2s.h> |
11 | #include <dt-bindings/input/input.h> | 11 | #include <dt-bindings/input/input.h> |
12 | #include <dt-bindings/clock/maxim,max77686.h> | ||
12 | #include "exynos4412.dtsi" | 13 | #include "exynos4412.dtsi" |
13 | 14 | ||
14 | / { | 15 | / { |
@@ -105,6 +106,8 @@ | |||
105 | 106 | ||
106 | rtc@10070000 { | 107 | rtc@10070000 { |
107 | status = "okay"; | 108 | status = "okay"; |
109 | clocks = <&clock CLK_RTC>, <&max77686 MAX77686_CLK_AP>; | ||
110 | clock-names = "rtc", "rtc_src"; | ||
108 | }; | 111 | }; |
109 | 112 | ||
110 | g2d@10800000 { | 113 | g2d@10800000 { |
diff --git a/arch/arm/boot/dts/exynos5250-snow.dts b/arch/arm/boot/dts/exynos5250-snow.dts index 2657e842e5a5..1eca97ee4bd6 100644 --- a/arch/arm/boot/dts/exynos5250-snow.dts +++ b/arch/arm/boot/dts/exynos5250-snow.dts | |||
@@ -567,6 +567,7 @@ | |||
567 | num-slots = <1>; | 567 | num-slots = <1>; |
568 | broken-cd; | 568 | broken-cd; |
569 | cap-sdio-irq; | 569 | cap-sdio-irq; |
570 | keep-power-in-suspend; | ||
570 | card-detect-delay = <200>; | 571 | card-detect-delay = <200>; |
571 | samsung,dw-mshc-ciu-div = <3>; | 572 | samsung,dw-mshc-ciu-div = <3>; |
572 | samsung,dw-mshc-sdr-timing = <2 3>; | 573 | samsung,dw-mshc-sdr-timing = <2 3>; |
diff --git a/arch/arm/boot/dts/exynos5420-trip-points.dtsi b/arch/arm/boot/dts/exynos5420-trip-points.dtsi index 5d31fc140823..2180a0152c9b 100644 --- a/arch/arm/boot/dts/exynos5420-trip-points.dtsi +++ b/arch/arm/boot/dts/exynos5420-trip-points.dtsi | |||
@@ -28,7 +28,7 @@ trips { | |||
28 | type = "active"; | 28 | type = "active"; |
29 | }; | 29 | }; |
30 | cpu-crit-0 { | 30 | cpu-crit-0 { |
31 | temperature = <1200000>; /* millicelsius */ | 31 | temperature = <120000>; /* millicelsius */ |
32 | hysteresis = <0>; /* millicelsius */ | 32 | hysteresis = <0>; /* millicelsius */ |
33 | type = "critical"; | 33 | type = "critical"; |
34 | }; | 34 | }; |
diff --git a/arch/arm/boot/dts/exynos5420.dtsi b/arch/arm/boot/dts/exynos5420.dtsi index f67b23f303c3..45317538bbae 100644 --- a/arch/arm/boot/dts/exynos5420.dtsi +++ b/arch/arm/boot/dts/exynos5420.dtsi | |||
@@ -536,6 +536,7 @@ | |||
536 | clock-names = "dp"; | 536 | clock-names = "dp"; |
537 | phys = <&dp_phy>; | 537 | phys = <&dp_phy>; |
538 | phy-names = "dp"; | 538 | phy-names = "dp"; |
539 | power-domains = <&disp_pd>; | ||
539 | }; | 540 | }; |
540 | 541 | ||
541 | mipi_phy: video-phy@10040714 { | 542 | mipi_phy: video-phy@10040714 { |
diff --git a/arch/arm/boot/dts/exynos5440-trip-points.dtsi b/arch/arm/boot/dts/exynos5440-trip-points.dtsi index 48adfa8f4300..356e963edf11 100644 --- a/arch/arm/boot/dts/exynos5440-trip-points.dtsi +++ b/arch/arm/boot/dts/exynos5440-trip-points.dtsi | |||
@@ -18,7 +18,7 @@ trips { | |||
18 | type = "active"; | 18 | type = "active"; |
19 | }; | 19 | }; |
20 | cpu-crit-0 { | 20 | cpu-crit-0 { |
21 | temperature = <1050000>; /* millicelsius */ | 21 | temperature = <105000>; /* millicelsius */ |
22 | hysteresis = <0>; /* millicelsius */ | 22 | hysteresis = <0>; /* millicelsius */ |
23 | type = "critical"; | 23 | type = "critical"; |
24 | }; | 24 | }; |
diff --git a/arch/arm/boot/dts/imx23-olinuxino.dts b/arch/arm/boot/dts/imx23-olinuxino.dts index 7e6eef2488e8..82045398bf1f 100644 --- a/arch/arm/boot/dts/imx23-olinuxino.dts +++ b/arch/arm/boot/dts/imx23-olinuxino.dts | |||
@@ -12,6 +12,7 @@ | |||
12 | */ | 12 | */ |
13 | 13 | ||
14 | /dts-v1/; | 14 | /dts-v1/; |
15 | #include <dt-bindings/gpio/gpio.h> | ||
15 | #include "imx23.dtsi" | 16 | #include "imx23.dtsi" |
16 | 17 | ||
17 | / { | 18 | / { |
@@ -93,6 +94,7 @@ | |||
93 | 94 | ||
94 | ahb@80080000 { | 95 | ahb@80080000 { |
95 | usb0: usb@80080000 { | 96 | usb0: usb@80080000 { |
97 | dr_mode = "host"; | ||
96 | vbus-supply = <®_usb0_vbus>; | 98 | vbus-supply = <®_usb0_vbus>; |
97 | status = "okay"; | 99 | status = "okay"; |
98 | }; | 100 | }; |
@@ -122,7 +124,7 @@ | |||
122 | 124 | ||
123 | user { | 125 | user { |
124 | label = "green"; | 126 | label = "green"; |
125 | gpios = <&gpio2 1 1>; | 127 | gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>; |
126 | }; | 128 | }; |
127 | }; | 129 | }; |
128 | }; | 130 | }; |
diff --git a/arch/arm/boot/dts/imx25.dtsi b/arch/arm/boot/dts/imx25.dtsi index e4d3aecc4ed2..677f81d9dcd5 100644 --- a/arch/arm/boot/dts/imx25.dtsi +++ b/arch/arm/boot/dts/imx25.dtsi | |||
@@ -428,6 +428,7 @@ | |||
428 | 428 | ||
429 | pwm4: pwm@53fc8000 { | 429 | pwm4: pwm@53fc8000 { |
430 | compatible = "fsl,imx25-pwm", "fsl,imx27-pwm"; | 430 | compatible = "fsl,imx25-pwm", "fsl,imx27-pwm"; |
431 | #pwm-cells = <2>; | ||
431 | reg = <0x53fc8000 0x4000>; | 432 | reg = <0x53fc8000 0x4000>; |
432 | clocks = <&clks 108>, <&clks 52>; | 433 | clocks = <&clks 108>, <&clks 52>; |
433 | clock-names = "ipg", "per"; | 434 | clock-names = "ipg", "per"; |
diff --git a/arch/arm/boot/dts/imx28.dtsi b/arch/arm/boot/dts/imx28.dtsi index 25e25f82fbae..4e073e854742 100644 --- a/arch/arm/boot/dts/imx28.dtsi +++ b/arch/arm/boot/dts/imx28.dtsi | |||
@@ -913,7 +913,7 @@ | |||
913 | 80 81 68 69 | 913 | 80 81 68 69 |
914 | 70 71 72 73 | 914 | 70 71 72 73 |
915 | 74 75 76 77>; | 915 | 74 75 76 77>; |
916 | interrupt-names = "auart4-rx", "aurat4-tx", "spdif-tx", "empty", | 916 | interrupt-names = "auart4-rx", "auart4-tx", "spdif-tx", "empty", |
917 | "saif0", "saif1", "i2c0", "i2c1", | 917 | "saif0", "saif1", "i2c0", "i2c1", |
918 | "auart0-rx", "auart0-tx", "auart1-rx", "auart1-tx", | 918 | "auart0-rx", "auart0-tx", "auart1-rx", "auart1-tx", |
919 | "auart2-rx", "auart2-tx", "auart3-rx", "auart3-tx"; | 919 | "auart2-rx", "auart2-tx", "auart3-rx", "auart3-tx"; |
diff --git a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi index 19cc269a08d4..1ce6133b67f5 100644 --- a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi +++ b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi | |||
@@ -31,6 +31,7 @@ | |||
31 | regulator-min-microvolt = <5000000>; | 31 | regulator-min-microvolt = <5000000>; |
32 | regulator-max-microvolt = <5000000>; | 32 | regulator-max-microvolt = <5000000>; |
33 | gpio = <&gpio4 15 0>; | 33 | gpio = <&gpio4 15 0>; |
34 | enable-active-high; | ||
34 | }; | 35 | }; |
35 | 36 | ||
36 | reg_usb_h1_vbus: regulator@1 { | 37 | reg_usb_h1_vbus: regulator@1 { |
@@ -40,6 +41,7 @@ | |||
40 | regulator-min-microvolt = <5000000>; | 41 | regulator-min-microvolt = <5000000>; |
41 | regulator-max-microvolt = <5000000>; | 42 | regulator-max-microvolt = <5000000>; |
42 | gpio = <&gpio1 0 0>; | 43 | gpio = <&gpio1 0 0>; |
44 | enable-active-high; | ||
43 | }; | 45 | }; |
44 | }; | 46 | }; |
45 | 47 | ||
diff --git a/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi b/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi index 46b2fed7c319..3b24b12651b2 100644 --- a/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi +++ b/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi | |||
@@ -185,7 +185,6 @@ | |||
185 | &i2c3 { | 185 | &i2c3 { |
186 | pinctrl-names = "default"; | 186 | pinctrl-names = "default"; |
187 | pinctrl-0 = <&pinctrl_i2c3>; | 187 | pinctrl-0 = <&pinctrl_i2c3>; |
188 | pinctrl-assert-gpios = <&gpio5 4 GPIO_ACTIVE_HIGH>; | ||
189 | status = "okay"; | 188 | status = "okay"; |
190 | 189 | ||
191 | max7310_a: gpio@30 { | 190 | max7310_a: gpio@30 { |
diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts index a29315833ecd..5c16145920ea 100644 --- a/arch/arm/boot/dts/omap3-n900.dts +++ b/arch/arm/boot/dts/omap3-n900.dts | |||
@@ -498,6 +498,8 @@ | |||
498 | DRVDD-supply = <&vmmc2>; | 498 | DRVDD-supply = <&vmmc2>; |
499 | IOVDD-supply = <&vio>; | 499 | IOVDD-supply = <&vio>; |
500 | DVDD-supply = <&vio>; | 500 | DVDD-supply = <&vio>; |
501 | |||
502 | ai3x-micbias-vg = <1>; | ||
501 | }; | 503 | }; |
502 | 504 | ||
503 | tlv320aic3x_aux: tlv320aic3x@19 { | 505 | tlv320aic3x_aux: tlv320aic3x@19 { |
@@ -509,6 +511,8 @@ | |||
509 | DRVDD-supply = <&vmmc2>; | 511 | DRVDD-supply = <&vmmc2>; |
510 | IOVDD-supply = <&vio>; | 512 | IOVDD-supply = <&vio>; |
511 | DVDD-supply = <&vio>; | 513 | DVDD-supply = <&vio>; |
514 | |||
515 | ai3x-micbias-vg = <2>; | ||
512 | }; | 516 | }; |
513 | 517 | ||
514 | tsl2563: tsl2563@29 { | 518 | tsl2563: tsl2563@29 { |
diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi index d18a90f5eca3..69a40cfc1f29 100644 --- a/arch/arm/boot/dts/omap3.dtsi +++ b/arch/arm/boot/dts/omap3.dtsi | |||
@@ -456,6 +456,7 @@ | |||
456 | }; | 456 | }; |
457 | 457 | ||
458 | mmu_isp: mmu@480bd400 { | 458 | mmu_isp: mmu@480bd400 { |
459 | #iommu-cells = <0>; | ||
459 | compatible = "ti,omap2-iommu"; | 460 | compatible = "ti,omap2-iommu"; |
460 | reg = <0x480bd400 0x80>; | 461 | reg = <0x480bd400 0x80>; |
461 | interrupts = <24>; | 462 | interrupts = <24>; |
@@ -464,6 +465,7 @@ | |||
464 | }; | 465 | }; |
465 | 466 | ||
466 | mmu_iva: mmu@5d000000 { | 467 | mmu_iva: mmu@5d000000 { |
468 | #iommu-cells = <0>; | ||
467 | compatible = "ti,omap2-iommu"; | 469 | compatible = "ti,omap2-iommu"; |
468 | reg = <0x5d000000 0x80>; | 470 | reg = <0x5d000000 0x80>; |
469 | interrupts = <28>; | 471 | interrupts = <28>; |
diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi index efe5f737f39b..7d24ae0306b5 100644 --- a/arch/arm/boot/dts/omap5.dtsi +++ b/arch/arm/boot/dts/omap5.dtsi | |||
@@ -128,7 +128,7 @@ | |||
128 | * hierarchy. | 128 | * hierarchy. |
129 | */ | 129 | */ |
130 | ocp { | 130 | ocp { |
131 | compatible = "ti,omap4-l3-noc", "simple-bus"; | 131 | compatible = "ti,omap5-l3-noc", "simple-bus"; |
132 | #address-cells = <1>; | 132 | #address-cells = <1>; |
133 | #size-cells = <1>; | 133 | #size-cells = <1>; |
134 | ranges; | 134 | ranges; |
diff --git a/arch/arm/boot/dts/r8a7791-koelsch.dts b/arch/arm/boot/dts/r8a7791-koelsch.dts index 74c3212f1f11..824ddab9c3ad 100644 --- a/arch/arm/boot/dts/r8a7791-koelsch.dts +++ b/arch/arm/boot/dts/r8a7791-koelsch.dts | |||
@@ -545,7 +545,7 @@ | |||
545 | compatible = "adi,adv7511w"; | 545 | compatible = "adi,adv7511w"; |
546 | reg = <0x39>; | 546 | reg = <0x39>; |
547 | interrupt-parent = <&gpio3>; | 547 | interrupt-parent = <&gpio3>; |
548 | interrupts = <29 IRQ_TYPE_EDGE_FALLING>; | 548 | interrupts = <29 IRQ_TYPE_LEVEL_LOW>; |
549 | 549 | ||
550 | adi,input-depth = <8>; | 550 | adi,input-depth = <8>; |
551 | adi,input-colorspace = "rgb"; | 551 | adi,input-colorspace = "rgb"; |
diff --git a/arch/arm/boot/dts/ste-dbx5x0.dtsi b/arch/arm/boot/dts/ste-dbx5x0.dtsi index bfd3f1c734b8..2201cd5da3bb 100644 --- a/arch/arm/boot/dts/ste-dbx5x0.dtsi +++ b/arch/arm/boot/dts/ste-dbx5x0.dtsi | |||
@@ -1017,23 +1017,6 @@ | |||
1017 | status = "disabled"; | 1017 | status = "disabled"; |
1018 | }; | 1018 | }; |
1019 | 1019 | ||
1020 | vmmci: regulator-gpio { | ||
1021 | compatible = "regulator-gpio"; | ||
1022 | |||
1023 | regulator-min-microvolt = <1800000>; | ||
1024 | regulator-max-microvolt = <2900000>; | ||
1025 | regulator-name = "mmci-reg"; | ||
1026 | regulator-type = "voltage"; | ||
1027 | |||
1028 | startup-delay-us = <100>; | ||
1029 | enable-active-high; | ||
1030 | |||
1031 | states = <1800000 0x1 | ||
1032 | 2900000 0x0>; | ||
1033 | |||
1034 | status = "disabled"; | ||
1035 | }; | ||
1036 | |||
1037 | mcde@a0350000 { | 1020 | mcde@a0350000 { |
1038 | compatible = "stericsson,mcde"; | 1021 | compatible = "stericsson,mcde"; |
1039 | reg = <0xa0350000 0x1000>, /* MCDE */ | 1022 | reg = <0xa0350000 0x1000>, /* MCDE */ |
diff --git a/arch/arm/boot/dts/ste-href.dtsi b/arch/arm/boot/dts/ste-href.dtsi index bf8f0eddc2c0..744c1e3a744d 100644 --- a/arch/arm/boot/dts/ste-href.dtsi +++ b/arch/arm/boot/dts/ste-href.dtsi | |||
@@ -111,6 +111,21 @@ | |||
111 | pinctrl-1 = <&i2c3_sleep_mode>; | 111 | pinctrl-1 = <&i2c3_sleep_mode>; |
112 | }; | 112 | }; |
113 | 113 | ||
114 | vmmci: regulator-gpio { | ||
115 | compatible = "regulator-gpio"; | ||
116 | |||
117 | regulator-min-microvolt = <1800000>; | ||
118 | regulator-max-microvolt = <2900000>; | ||
119 | regulator-name = "mmci-reg"; | ||
120 | regulator-type = "voltage"; | ||
121 | |||
122 | startup-delay-us = <100>; | ||
123 | enable-active-high; | ||
124 | |||
125 | states = <1800000 0x1 | ||
126 | 2900000 0x0>; | ||
127 | }; | ||
128 | |||
114 | // External Micro SD slot | 129 | // External Micro SD slot |
115 | sdi0_per1@80126000 { | 130 | sdi0_per1@80126000 { |
116 | arm,primecell-periphid = <0x10480180>; | 131 | arm,primecell-periphid = <0x10480180>; |
diff --git a/arch/arm/boot/dts/ste-snowball.dts b/arch/arm/boot/dts/ste-snowball.dts index 206826a855c0..1bc84ebdccaa 100644 --- a/arch/arm/boot/dts/ste-snowball.dts +++ b/arch/arm/boot/dts/ste-snowball.dts | |||
@@ -146,8 +146,21 @@ | |||
146 | }; | 146 | }; |
147 | 147 | ||
148 | vmmci: regulator-gpio { | 148 | vmmci: regulator-gpio { |
149 | compatible = "regulator-gpio"; | ||
150 | |||
149 | gpios = <&gpio7 4 0x4>; | 151 | gpios = <&gpio7 4 0x4>; |
150 | enable-gpio = <&gpio6 25 0x4>; | 152 | enable-gpio = <&gpio6 25 0x4>; |
153 | |||
154 | regulator-min-microvolt = <1800000>; | ||
155 | regulator-max-microvolt = <2900000>; | ||
156 | regulator-name = "mmci-reg"; | ||
157 | regulator-type = "voltage"; | ||
158 | |||
159 | startup-delay-us = <100>; | ||
160 | enable-active-high; | ||
161 | |||
162 | states = <1800000 0x1 | ||
163 | 2900000 0x0>; | ||
151 | }; | 164 | }; |
152 | 165 | ||
153 | // External Micro SD slot | 166 | // External Micro SD slot |
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig index ab86655c1f4b..0ca4a3eaf65d 100644 --- a/arch/arm/configs/multi_v7_defconfig +++ b/arch/arm/configs/multi_v7_defconfig | |||
@@ -39,11 +39,14 @@ CONFIG_ARCH_HIP04=y | |||
39 | CONFIG_ARCH_KEYSTONE=y | 39 | CONFIG_ARCH_KEYSTONE=y |
40 | CONFIG_ARCH_MESON=y | 40 | CONFIG_ARCH_MESON=y |
41 | CONFIG_ARCH_MXC=y | 41 | CONFIG_ARCH_MXC=y |
42 | CONFIG_SOC_IMX50=y | ||
42 | CONFIG_SOC_IMX51=y | 43 | CONFIG_SOC_IMX51=y |
43 | CONFIG_SOC_IMX53=y | 44 | CONFIG_SOC_IMX53=y |
44 | CONFIG_SOC_IMX6Q=y | 45 | CONFIG_SOC_IMX6Q=y |
45 | CONFIG_SOC_IMX6SL=y | 46 | CONFIG_SOC_IMX6SL=y |
47 | CONFIG_SOC_IMX6SX=y | ||
46 | CONFIG_SOC_VF610=y | 48 | CONFIG_SOC_VF610=y |
49 | CONFIG_SOC_LS1021A=y | ||
47 | CONFIG_ARCH_OMAP3=y | 50 | CONFIG_ARCH_OMAP3=y |
48 | CONFIG_ARCH_OMAP4=y | 51 | CONFIG_ARCH_OMAP4=y |
49 | CONFIG_SOC_OMAP5=y | 52 | CONFIG_SOC_OMAP5=y |
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig index 9ff7b54b2a83..3743ca221d40 100644 --- a/arch/arm/configs/omap2plus_defconfig +++ b/arch/arm/configs/omap2plus_defconfig | |||
@@ -393,7 +393,7 @@ CONFIG_TI_EDMA=y | |||
393 | CONFIG_DMA_OMAP=y | 393 | CONFIG_DMA_OMAP=y |
394 | # CONFIG_IOMMU_SUPPORT is not set | 394 | # CONFIG_IOMMU_SUPPORT is not set |
395 | CONFIG_EXTCON=m | 395 | CONFIG_EXTCON=m |
396 | CONFIG_EXTCON_GPIO=m | 396 | CONFIG_EXTCON_USB_GPIO=m |
397 | CONFIG_EXTCON_PALMAS=m | 397 | CONFIG_EXTCON_PALMAS=m |
398 | CONFIG_TI_EMIF=m | 398 | CONFIG_TI_EMIF=m |
399 | CONFIG_PWM=y | 399 | CONFIG_PWM=y |
diff --git a/arch/arm/include/asm/dma-iommu.h b/arch/arm/include/asm/dma-iommu.h index 8e3fcb924db6..2ef282f96651 100644 --- a/arch/arm/include/asm/dma-iommu.h +++ b/arch/arm/include/asm/dma-iommu.h | |||
@@ -25,7 +25,7 @@ struct dma_iommu_mapping { | |||
25 | }; | 25 | }; |
26 | 26 | ||
27 | struct dma_iommu_mapping * | 27 | struct dma_iommu_mapping * |
28 | arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size); | 28 | arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, u64 size); |
29 | 29 | ||
30 | void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping); | 30 | void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping); |
31 | 31 | ||
diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h index 2f7e6ff67d51..0b579b2f4e0e 100644 --- a/arch/arm/include/asm/xen/page.h +++ b/arch/arm/include/asm/xen/page.h | |||
@@ -110,5 +110,6 @@ static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn) | |||
110 | bool xen_arch_need_swiotlb(struct device *dev, | 110 | bool xen_arch_need_swiotlb(struct device *dev, |
111 | unsigned long pfn, | 111 | unsigned long pfn, |
112 | unsigned long mfn); | 112 | unsigned long mfn); |
113 | unsigned long xen_get_swiotlb_free_pages(unsigned int order); | ||
113 | 114 | ||
114 | #endif /* _ASM_ARM_XEN_PAGE_H */ | 115 | #endif /* _ASM_ARM_XEN_PAGE_H */ |
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c index 91c7ba182dcd..213919ba326f 100644 --- a/arch/arm/kernel/perf_event_cpu.c +++ b/arch/arm/kernel/perf_event_cpu.c | |||
@@ -303,12 +303,17 @@ static int probe_current_pmu(struct arm_pmu *pmu) | |||
303 | 303 | ||
304 | static int of_pmu_irq_cfg(struct platform_device *pdev) | 304 | static int of_pmu_irq_cfg(struct platform_device *pdev) |
305 | { | 305 | { |
306 | int i; | 306 | int i, irq; |
307 | int *irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL); | 307 | int *irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL); |
308 | 308 | ||
309 | if (!irqs) | 309 | if (!irqs) |
310 | return -ENOMEM; | 310 | return -ENOMEM; |
311 | 311 | ||
312 | /* Don't bother with PPIs; they're already affine */ | ||
313 | irq = platform_get_irq(pdev, 0); | ||
314 | if (irq >= 0 && irq_is_percpu(irq)) | ||
315 | return 0; | ||
316 | |||
312 | for (i = 0; i < pdev->num_resources; ++i) { | 317 | for (i = 0; i < pdev->num_resources; ++i) { |
313 | struct device_node *dn; | 318 | struct device_node *dn; |
314 | int cpu; | 319 | int cpu; |
@@ -317,7 +322,7 @@ static int of_pmu_irq_cfg(struct platform_device *pdev) | |||
317 | i); | 322 | i); |
318 | if (!dn) { | 323 | if (!dn) { |
319 | pr_warn("Failed to parse %s/interrupt-affinity[%d]\n", | 324 | pr_warn("Failed to parse %s/interrupt-affinity[%d]\n", |
320 | of_node_full_name(dn), i); | 325 | of_node_full_name(pdev->dev.of_node), i); |
321 | break; | 326 | break; |
322 | } | 327 | } |
323 | 328 | ||
diff --git a/arch/arm/mach-imx/devices/platform-sdhci-esdhc-imx.c b/arch/arm/mach-imx/devices/platform-sdhci-esdhc-imx.c index fb8d4a2ad48c..a5edd7d60266 100644 --- a/arch/arm/mach-imx/devices/platform-sdhci-esdhc-imx.c +++ b/arch/arm/mach-imx/devices/platform-sdhci-esdhc-imx.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2010 Pengutronix, Wolfram Sang <w.sang@pengutronix.de> | 2 | * Copyright (C) 2010 Pengutronix, Wolfram Sang <kernel@pengutronix.de> |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify it under | 4 | * This program is free software; you can redistribute it and/or modify it under |
5 | * the terms of the GNU General Public License version 2 as published by the | 5 | * the terms of the GNU General Public License version 2 as published by the |
diff --git a/arch/arm/mach-omap2/prm-regbits-34xx.h b/arch/arm/mach-omap2/prm-regbits-34xx.h index cbefbd7cfdb5..661d753df584 100644 --- a/arch/arm/mach-omap2/prm-regbits-34xx.h +++ b/arch/arm/mach-omap2/prm-regbits-34xx.h | |||
@@ -112,6 +112,7 @@ | |||
112 | #define OMAP3430_VC_CMD_ONLP_SHIFT 16 | 112 | #define OMAP3430_VC_CMD_ONLP_SHIFT 16 |
113 | #define OMAP3430_VC_CMD_RET_SHIFT 8 | 113 | #define OMAP3430_VC_CMD_RET_SHIFT 8 |
114 | #define OMAP3430_VC_CMD_OFF_SHIFT 0 | 114 | #define OMAP3430_VC_CMD_OFF_SHIFT 0 |
115 | #define OMAP3430_SREN_MASK (1 << 4) | ||
115 | #define OMAP3430_HSEN_MASK (1 << 3) | 116 | #define OMAP3430_HSEN_MASK (1 << 3) |
116 | #define OMAP3430_MCODE_MASK (0x7 << 0) | 117 | #define OMAP3430_MCODE_MASK (0x7 << 0) |
117 | #define OMAP3430_VALID_MASK (1 << 24) | 118 | #define OMAP3430_VALID_MASK (1 << 24) |
diff --git a/arch/arm/mach-omap2/prm-regbits-44xx.h b/arch/arm/mach-omap2/prm-regbits-44xx.h index b1c7a33e00e7..e794828dee55 100644 --- a/arch/arm/mach-omap2/prm-regbits-44xx.h +++ b/arch/arm/mach-omap2/prm-regbits-44xx.h | |||
@@ -35,6 +35,7 @@ | |||
35 | #define OMAP4430_GLOBAL_WARM_SW_RST_SHIFT 1 | 35 | #define OMAP4430_GLOBAL_WARM_SW_RST_SHIFT 1 |
36 | #define OMAP4430_GLOBAL_WUEN_MASK (1 << 16) | 36 | #define OMAP4430_GLOBAL_WUEN_MASK (1 << 16) |
37 | #define OMAP4430_HSMCODE_MASK (0x7 << 0) | 37 | #define OMAP4430_HSMCODE_MASK (0x7 << 0) |
38 | #define OMAP4430_SRMODEEN_MASK (1 << 4) | ||
38 | #define OMAP4430_HSMODEEN_MASK (1 << 3) | 39 | #define OMAP4430_HSMODEEN_MASK (1 << 3) |
39 | #define OMAP4430_HSSCLL_SHIFT 24 | 40 | #define OMAP4430_HSSCLL_SHIFT 24 |
40 | #define OMAP4430_ICEPICK_RST_SHIFT 9 | 41 | #define OMAP4430_ICEPICK_RST_SHIFT 9 |
diff --git a/arch/arm/mach-omap2/vc.c b/arch/arm/mach-omap2/vc.c index be9ef834fa81..076fd20d7e5a 100644 --- a/arch/arm/mach-omap2/vc.c +++ b/arch/arm/mach-omap2/vc.c | |||
@@ -316,7 +316,8 @@ static void __init omap3_vc_init_pmic_signaling(struct voltagedomain *voltdm) | |||
316 | * idle. And we can also scale voltages to zero for off-idle. | 316 | * idle. And we can also scale voltages to zero for off-idle. |
317 | * Note that no actual voltage scaling during off-idle will | 317 | * Note that no actual voltage scaling during off-idle will |
318 | * happen unless the board specific twl4030 PMIC scripts are | 318 | * happen unless the board specific twl4030 PMIC scripts are |
319 | * loaded. | 319 | * loaded. See also omap_vc_i2c_init for comments regarding |
320 | * erratum i531. | ||
320 | */ | 321 | */ |
321 | val = voltdm->read(OMAP3_PRM_VOLTCTRL_OFFSET); | 322 | val = voltdm->read(OMAP3_PRM_VOLTCTRL_OFFSET); |
322 | if (!(val & OMAP3430_PRM_VOLTCTRL_SEL_OFF)) { | 323 | if (!(val & OMAP3430_PRM_VOLTCTRL_SEL_OFF)) { |
@@ -704,9 +705,16 @@ static void __init omap_vc_i2c_init(struct voltagedomain *voltdm) | |||
704 | return; | 705 | return; |
705 | } | 706 | } |
706 | 707 | ||
708 | /* | ||
709 | * Note that for omap3 OMAP3430_SREN_MASK clears SREN to work around | ||
710 | * erratum i531 "Extra Power Consumed When Repeated Start Operation | ||
711 | * Mode Is Enabled on I2C Interface Dedicated for Smart Reflex (I2C4)". | ||
712 | * Otherwise I2C4 eventually leads into about 23mW extra power being | ||
713 | * consumed even during off idle using VMODE. | ||
714 | */ | ||
707 | i2c_high_speed = voltdm->pmic->i2c_high_speed; | 715 | i2c_high_speed = voltdm->pmic->i2c_high_speed; |
708 | if (i2c_high_speed) | 716 | if (i2c_high_speed) |
709 | voltdm->rmw(vc->common->i2c_cfg_hsen_mask, | 717 | voltdm->rmw(vc->common->i2c_cfg_clear_mask, |
710 | vc->common->i2c_cfg_hsen_mask, | 718 | vc->common->i2c_cfg_hsen_mask, |
711 | vc->common->i2c_cfg_reg); | 719 | vc->common->i2c_cfg_reg); |
712 | 720 | ||
diff --git a/arch/arm/mach-omap2/vc.h b/arch/arm/mach-omap2/vc.h index cdbdd78e755e..89b83b7ff3ec 100644 --- a/arch/arm/mach-omap2/vc.h +++ b/arch/arm/mach-omap2/vc.h | |||
@@ -34,6 +34,7 @@ struct voltagedomain; | |||
34 | * @cmd_ret_shift: RET field shift in PRM_VC_CMD_VAL_* register | 34 | * @cmd_ret_shift: RET field shift in PRM_VC_CMD_VAL_* register |
35 | * @cmd_off_shift: OFF field shift in PRM_VC_CMD_VAL_* register | 35 | * @cmd_off_shift: OFF field shift in PRM_VC_CMD_VAL_* register |
36 | * @i2c_cfg_reg: I2C configuration register offset | 36 | * @i2c_cfg_reg: I2C configuration register offset |
37 | * @i2c_cfg_clear_mask: high-speed mode bit clear mask in I2C config register | ||
37 | * @i2c_cfg_hsen_mask: high-speed mode bit field mask in I2C config register | 38 | * @i2c_cfg_hsen_mask: high-speed mode bit field mask in I2C config register |
38 | * @i2c_mcode_mask: MCODE field mask for I2C config register | 39 | * @i2c_mcode_mask: MCODE field mask for I2C config register |
39 | * | 40 | * |
@@ -52,6 +53,7 @@ struct omap_vc_common { | |||
52 | u8 cmd_ret_shift; | 53 | u8 cmd_ret_shift; |
53 | u8 cmd_off_shift; | 54 | u8 cmd_off_shift; |
54 | u8 i2c_cfg_reg; | 55 | u8 i2c_cfg_reg; |
56 | u8 i2c_cfg_clear_mask; | ||
55 | u8 i2c_cfg_hsen_mask; | 57 | u8 i2c_cfg_hsen_mask; |
56 | u8 i2c_mcode_mask; | 58 | u8 i2c_mcode_mask; |
57 | }; | 59 | }; |
diff --git a/arch/arm/mach-omap2/vc3xxx_data.c b/arch/arm/mach-omap2/vc3xxx_data.c index 75bc4aa22b3a..71d74c9172c1 100644 --- a/arch/arm/mach-omap2/vc3xxx_data.c +++ b/arch/arm/mach-omap2/vc3xxx_data.c | |||
@@ -40,6 +40,7 @@ static struct omap_vc_common omap3_vc_common = { | |||
40 | .cmd_onlp_shift = OMAP3430_VC_CMD_ONLP_SHIFT, | 40 | .cmd_onlp_shift = OMAP3430_VC_CMD_ONLP_SHIFT, |
41 | .cmd_ret_shift = OMAP3430_VC_CMD_RET_SHIFT, | 41 | .cmd_ret_shift = OMAP3430_VC_CMD_RET_SHIFT, |
42 | .cmd_off_shift = OMAP3430_VC_CMD_OFF_SHIFT, | 42 | .cmd_off_shift = OMAP3430_VC_CMD_OFF_SHIFT, |
43 | .i2c_cfg_clear_mask = OMAP3430_SREN_MASK | OMAP3430_HSEN_MASK, | ||
43 | .i2c_cfg_hsen_mask = OMAP3430_HSEN_MASK, | 44 | .i2c_cfg_hsen_mask = OMAP3430_HSEN_MASK, |
44 | .i2c_cfg_reg = OMAP3_PRM_VC_I2C_CFG_OFFSET, | 45 | .i2c_cfg_reg = OMAP3_PRM_VC_I2C_CFG_OFFSET, |
45 | .i2c_mcode_mask = OMAP3430_MCODE_MASK, | 46 | .i2c_mcode_mask = OMAP3430_MCODE_MASK, |
diff --git a/arch/arm/mach-omap2/vc44xx_data.c b/arch/arm/mach-omap2/vc44xx_data.c index 085e5d6a04fd..2abd5fa8a697 100644 --- a/arch/arm/mach-omap2/vc44xx_data.c +++ b/arch/arm/mach-omap2/vc44xx_data.c | |||
@@ -42,6 +42,7 @@ static const struct omap_vc_common omap4_vc_common = { | |||
42 | .cmd_ret_shift = OMAP4430_RET_SHIFT, | 42 | .cmd_ret_shift = OMAP4430_RET_SHIFT, |
43 | .cmd_off_shift = OMAP4430_OFF_SHIFT, | 43 | .cmd_off_shift = OMAP4430_OFF_SHIFT, |
44 | .i2c_cfg_reg = OMAP4_PRM_VC_CFG_I2C_MODE_OFFSET, | 44 | .i2c_cfg_reg = OMAP4_PRM_VC_CFG_I2C_MODE_OFFSET, |
45 | .i2c_cfg_clear_mask = OMAP4430_SRMODEEN_MASK | OMAP4430_HSMODEEN_MASK, | ||
45 | .i2c_cfg_hsen_mask = OMAP4430_HSMODEEN_MASK, | 46 | .i2c_cfg_hsen_mask = OMAP4430_HSMODEEN_MASK, |
46 | .i2c_mcode_mask = OMAP4430_HSMCODE_MASK, | 47 | .i2c_mcode_mask = OMAP4430_HSMCODE_MASK, |
47 | }; | 48 | }; |
diff --git a/arch/arm/mach-pxa/Kconfig b/arch/arm/mach-pxa/Kconfig index 8896e71586f5..f09683687963 100644 --- a/arch/arm/mach-pxa/Kconfig +++ b/arch/arm/mach-pxa/Kconfig | |||
@@ -691,4 +691,13 @@ config SHARPSL_PM_MAX1111 | |||
691 | config PXA310_ULPI | 691 | config PXA310_ULPI |
692 | bool | 692 | bool |
693 | 693 | ||
694 | config PXA_SYSTEMS_CPLDS | ||
695 | tristate "Motherboard cplds" | ||
696 | default ARCH_LUBBOCK || MACH_MAINSTONE | ||
697 | help | ||
698 | This driver supports the Lubbock and Mainstone multifunction chip | ||
699 | found on the pxa25x development platform system (Lubbock) and pxa27x | ||
700 | development platform system (Mainstone). This IO board supports the | ||
701 | interrupts handling, ethernet controller, flash chips, etc ... | ||
702 | |||
694 | endif | 703 | endif |
diff --git a/arch/arm/mach-pxa/Makefile b/arch/arm/mach-pxa/Makefile index eb0bf7678a99..4087d334ecdf 100644 --- a/arch/arm/mach-pxa/Makefile +++ b/arch/arm/mach-pxa/Makefile | |||
@@ -90,4 +90,5 @@ obj-$(CONFIG_MACH_RAUMFELD_CONNECTOR) += raumfeld.o | |||
90 | obj-$(CONFIG_MACH_RAUMFELD_SPEAKER) += raumfeld.o | 90 | obj-$(CONFIG_MACH_RAUMFELD_SPEAKER) += raumfeld.o |
91 | obj-$(CONFIG_MACH_ZIPIT2) += z2.o | 91 | obj-$(CONFIG_MACH_ZIPIT2) += z2.o |
92 | 92 | ||
93 | obj-$(CONFIG_PXA_SYSTEMS_CPLDS) += pxa_cplds_irqs.o | ||
93 | obj-$(CONFIG_TOSA_BT) += tosa-bt.o | 94 | obj-$(CONFIG_TOSA_BT) += tosa-bt.o |
diff --git a/arch/arm/mach-pxa/include/mach/lubbock.h b/arch/arm/mach-pxa/include/mach/lubbock.h index 958cd6af9384..1eecf794acd2 100644 --- a/arch/arm/mach-pxa/include/mach/lubbock.h +++ b/arch/arm/mach-pxa/include/mach/lubbock.h | |||
@@ -37,7 +37,9 @@ | |||
37 | #define LUB_GP __LUB_REG(LUBBOCK_FPGA_PHYS + 0x100) | 37 | #define LUB_GP __LUB_REG(LUBBOCK_FPGA_PHYS + 0x100) |
38 | 38 | ||
39 | /* Board specific IRQs */ | 39 | /* Board specific IRQs */ |
40 | #define LUBBOCK_IRQ(x) (IRQ_BOARD_START + (x)) | 40 | #define LUBBOCK_NR_IRQS IRQ_BOARD_START |
41 | |||
42 | #define LUBBOCK_IRQ(x) (LUBBOCK_NR_IRQS + (x)) | ||
41 | #define LUBBOCK_SD_IRQ LUBBOCK_IRQ(0) | 43 | #define LUBBOCK_SD_IRQ LUBBOCK_IRQ(0) |
42 | #define LUBBOCK_SA1111_IRQ LUBBOCK_IRQ(1) | 44 | #define LUBBOCK_SA1111_IRQ LUBBOCK_IRQ(1) |
43 | #define LUBBOCK_USB_IRQ LUBBOCK_IRQ(2) /* usb connect */ | 45 | #define LUBBOCK_USB_IRQ LUBBOCK_IRQ(2) /* usb connect */ |
@@ -47,8 +49,7 @@ | |||
47 | #define LUBBOCK_USB_DISC_IRQ LUBBOCK_IRQ(6) /* usb disconnect */ | 49 | #define LUBBOCK_USB_DISC_IRQ LUBBOCK_IRQ(6) /* usb disconnect */ |
48 | #define LUBBOCK_LAST_IRQ LUBBOCK_IRQ(6) | 50 | #define LUBBOCK_LAST_IRQ LUBBOCK_IRQ(6) |
49 | 51 | ||
50 | #define LUBBOCK_SA1111_IRQ_BASE (IRQ_BOARD_START + 16) | 52 | #define LUBBOCK_SA1111_IRQ_BASE (LUBBOCK_NR_IRQS + 32) |
51 | #define LUBBOCK_NR_IRQS (IRQ_BOARD_START + 16 + 55) | ||
52 | 53 | ||
53 | #ifndef __ASSEMBLY__ | 54 | #ifndef __ASSEMBLY__ |
54 | extern void lubbock_set_misc_wr(unsigned int mask, unsigned int set); | 55 | extern void lubbock_set_misc_wr(unsigned int mask, unsigned int set); |
diff --git a/arch/arm/mach-pxa/include/mach/mainstone.h b/arch/arm/mach-pxa/include/mach/mainstone.h index 1bfc4e822a41..e82a7d31104e 100644 --- a/arch/arm/mach-pxa/include/mach/mainstone.h +++ b/arch/arm/mach-pxa/include/mach/mainstone.h | |||
@@ -120,7 +120,9 @@ | |||
120 | #define MST_PCMCIA_PWR_VCC_50 0x4 /* voltage VCC = 5.0V */ | 120 | #define MST_PCMCIA_PWR_VCC_50 0x4 /* voltage VCC = 5.0V */ |
121 | 121 | ||
122 | /* board specific IRQs */ | 122 | /* board specific IRQs */ |
123 | #define MAINSTONE_IRQ(x) (IRQ_BOARD_START + (x)) | 123 | #define MAINSTONE_NR_IRQS IRQ_BOARD_START |
124 | |||
125 | #define MAINSTONE_IRQ(x) (MAINSTONE_NR_IRQS + (x)) | ||
124 | #define MAINSTONE_MMC_IRQ MAINSTONE_IRQ(0) | 126 | #define MAINSTONE_MMC_IRQ MAINSTONE_IRQ(0) |
125 | #define MAINSTONE_USIM_IRQ MAINSTONE_IRQ(1) | 127 | #define MAINSTONE_USIM_IRQ MAINSTONE_IRQ(1) |
126 | #define MAINSTONE_USBC_IRQ MAINSTONE_IRQ(2) | 128 | #define MAINSTONE_USBC_IRQ MAINSTONE_IRQ(2) |
@@ -136,6 +138,4 @@ | |||
136 | #define MAINSTONE_S1_STSCHG_IRQ MAINSTONE_IRQ(14) | 138 | #define MAINSTONE_S1_STSCHG_IRQ MAINSTONE_IRQ(14) |
137 | #define MAINSTONE_S1_IRQ MAINSTONE_IRQ(15) | 139 | #define MAINSTONE_S1_IRQ MAINSTONE_IRQ(15) |
138 | 140 | ||
139 | #define MAINSTONE_NR_IRQS (IRQ_BOARD_START + 16) | ||
140 | |||
141 | #endif | 141 | #endif |
diff --git a/arch/arm/mach-pxa/lubbock.c b/arch/arm/mach-pxa/lubbock.c index d8a1be619f21..4ac9ab80d24b 100644 --- a/arch/arm/mach-pxa/lubbock.c +++ b/arch/arm/mach-pxa/lubbock.c | |||
@@ -12,6 +12,7 @@ | |||
12 | * published by the Free Software Foundation. | 12 | * published by the Free Software Foundation. |
13 | */ | 13 | */ |
14 | #include <linux/gpio.h> | 14 | #include <linux/gpio.h> |
15 | #include <linux/gpio/machine.h> | ||
15 | #include <linux/module.h> | 16 | #include <linux/module.h> |
16 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
17 | #include <linux/init.h> | 18 | #include <linux/init.h> |
@@ -123,84 +124,6 @@ void lubbock_set_misc_wr(unsigned int mask, unsigned int set) | |||
123 | } | 124 | } |
124 | EXPORT_SYMBOL(lubbock_set_misc_wr); | 125 | EXPORT_SYMBOL(lubbock_set_misc_wr); |
125 | 126 | ||
126 | static unsigned long lubbock_irq_enabled; | ||
127 | |||
128 | static void lubbock_mask_irq(struct irq_data *d) | ||
129 | { | ||
130 | int lubbock_irq = (d->irq - LUBBOCK_IRQ(0)); | ||
131 | LUB_IRQ_MASK_EN = (lubbock_irq_enabled &= ~(1 << lubbock_irq)); | ||
132 | } | ||
133 | |||
134 | static void lubbock_unmask_irq(struct irq_data *d) | ||
135 | { | ||
136 | int lubbock_irq = (d->irq - LUBBOCK_IRQ(0)); | ||
137 | /* the irq can be acknowledged only if deasserted, so it's done here */ | ||
138 | LUB_IRQ_SET_CLR &= ~(1 << lubbock_irq); | ||
139 | LUB_IRQ_MASK_EN = (lubbock_irq_enabled |= (1 << lubbock_irq)); | ||
140 | } | ||
141 | |||
142 | static struct irq_chip lubbock_irq_chip = { | ||
143 | .name = "FPGA", | ||
144 | .irq_ack = lubbock_mask_irq, | ||
145 | .irq_mask = lubbock_mask_irq, | ||
146 | .irq_unmask = lubbock_unmask_irq, | ||
147 | }; | ||
148 | |||
149 | static void lubbock_irq_handler(unsigned int irq, struct irq_desc *desc) | ||
150 | { | ||
151 | unsigned long pending = LUB_IRQ_SET_CLR & lubbock_irq_enabled; | ||
152 | do { | ||
153 | /* clear our parent irq */ | ||
154 | desc->irq_data.chip->irq_ack(&desc->irq_data); | ||
155 | if (likely(pending)) { | ||
156 | irq = LUBBOCK_IRQ(0) + __ffs(pending); | ||
157 | generic_handle_irq(irq); | ||
158 | } | ||
159 | pending = LUB_IRQ_SET_CLR & lubbock_irq_enabled; | ||
160 | } while (pending); | ||
161 | } | ||
162 | |||
163 | static void __init lubbock_init_irq(void) | ||
164 | { | ||
165 | int irq; | ||
166 | |||
167 | pxa25x_init_irq(); | ||
168 | |||
169 | /* setup extra lubbock irqs */ | ||
170 | for (irq = LUBBOCK_IRQ(0); irq <= LUBBOCK_LAST_IRQ; irq++) { | ||
171 | irq_set_chip_and_handler(irq, &lubbock_irq_chip, | ||
172 | handle_level_irq); | ||
173 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); | ||
174 | } | ||
175 | |||
176 | irq_set_chained_handler(PXA_GPIO_TO_IRQ(0), lubbock_irq_handler); | ||
177 | irq_set_irq_type(PXA_GPIO_TO_IRQ(0), IRQ_TYPE_EDGE_FALLING); | ||
178 | } | ||
179 | |||
180 | #ifdef CONFIG_PM | ||
181 | |||
182 | static void lubbock_irq_resume(void) | ||
183 | { | ||
184 | LUB_IRQ_MASK_EN = lubbock_irq_enabled; | ||
185 | } | ||
186 | |||
187 | static struct syscore_ops lubbock_irq_syscore_ops = { | ||
188 | .resume = lubbock_irq_resume, | ||
189 | }; | ||
190 | |||
191 | static int __init lubbock_irq_device_init(void) | ||
192 | { | ||
193 | if (machine_is_lubbock()) { | ||
194 | register_syscore_ops(&lubbock_irq_syscore_ops); | ||
195 | return 0; | ||
196 | } | ||
197 | return -ENODEV; | ||
198 | } | ||
199 | |||
200 | device_initcall(lubbock_irq_device_init); | ||
201 | |||
202 | #endif | ||
203 | |||
204 | static int lubbock_udc_is_connected(void) | 127 | static int lubbock_udc_is_connected(void) |
205 | { | 128 | { |
206 | return (LUB_MISC_RD & (1 << 9)) == 0; | 129 | return (LUB_MISC_RD & (1 << 9)) == 0; |
@@ -383,11 +306,38 @@ static struct platform_device lubbock_flash_device[2] = { | |||
383 | }, | 306 | }, |
384 | }; | 307 | }; |
385 | 308 | ||
309 | static struct resource lubbock_cplds_resources[] = { | ||
310 | [0] = { | ||
311 | .start = LUBBOCK_FPGA_PHYS + 0xc0, | ||
312 | .end = LUBBOCK_FPGA_PHYS + 0xe0 - 1, | ||
313 | .flags = IORESOURCE_MEM, | ||
314 | }, | ||
315 | [1] = { | ||
316 | .start = PXA_GPIO_TO_IRQ(0), | ||
317 | .end = PXA_GPIO_TO_IRQ(0), | ||
318 | .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWEDGE, | ||
319 | }, | ||
320 | [2] = { | ||
321 | .start = LUBBOCK_IRQ(0), | ||
322 | .end = LUBBOCK_IRQ(6), | ||
323 | .flags = IORESOURCE_IRQ, | ||
324 | }, | ||
325 | }; | ||
326 | |||
327 | static struct platform_device lubbock_cplds_device = { | ||
328 | .name = "pxa_cplds_irqs", | ||
329 | .id = -1, | ||
330 | .resource = &lubbock_cplds_resources[0], | ||
331 | .num_resources = 3, | ||
332 | }; | ||
333 | |||
334 | |||
386 | static struct platform_device *devices[] __initdata = { | 335 | static struct platform_device *devices[] __initdata = { |
387 | &sa1111_device, | 336 | &sa1111_device, |
388 | &smc91x_device, | 337 | &smc91x_device, |
389 | &lubbock_flash_device[0], | 338 | &lubbock_flash_device[0], |
390 | &lubbock_flash_device[1], | 339 | &lubbock_flash_device[1], |
340 | &lubbock_cplds_device, | ||
391 | }; | 341 | }; |
392 | 342 | ||
393 | static struct pxafb_mode_info sharp_lm8v31_mode = { | 343 | static struct pxafb_mode_info sharp_lm8v31_mode = { |
@@ -648,7 +598,7 @@ MACHINE_START(LUBBOCK, "Intel DBPXA250 Development Platform (aka Lubbock)") | |||
648 | /* Maintainer: MontaVista Software Inc. */ | 598 | /* Maintainer: MontaVista Software Inc. */ |
649 | .map_io = lubbock_map_io, | 599 | .map_io = lubbock_map_io, |
650 | .nr_irqs = LUBBOCK_NR_IRQS, | 600 | .nr_irqs = LUBBOCK_NR_IRQS, |
651 | .init_irq = lubbock_init_irq, | 601 | .init_irq = pxa25x_init_irq, |
652 | .handle_irq = pxa25x_handle_irq, | 602 | .handle_irq = pxa25x_handle_irq, |
653 | .init_time = pxa_timer_init, | 603 | .init_time = pxa_timer_init, |
654 | .init_machine = lubbock_init, | 604 | .init_machine = lubbock_init, |
diff --git a/arch/arm/mach-pxa/mainstone.c b/arch/arm/mach-pxa/mainstone.c index 78b84c0dfc79..2c0658cf6be2 100644 --- a/arch/arm/mach-pxa/mainstone.c +++ b/arch/arm/mach-pxa/mainstone.c | |||
@@ -13,6 +13,7 @@ | |||
13 | * published by the Free Software Foundation. | 13 | * published by the Free Software Foundation. |
14 | */ | 14 | */ |
15 | #include <linux/gpio.h> | 15 | #include <linux/gpio.h> |
16 | #include <linux/gpio/machine.h> | ||
16 | #include <linux/init.h> | 17 | #include <linux/init.h> |
17 | #include <linux/platform_device.h> | 18 | #include <linux/platform_device.h> |
18 | #include <linux/syscore_ops.h> | 19 | #include <linux/syscore_ops.h> |
@@ -122,92 +123,6 @@ static unsigned long mainstone_pin_config[] = { | |||
122 | GPIO1_GPIO | WAKEUP_ON_EDGE_BOTH, | 123 | GPIO1_GPIO | WAKEUP_ON_EDGE_BOTH, |
123 | }; | 124 | }; |
124 | 125 | ||
125 | static unsigned long mainstone_irq_enabled; | ||
126 | |||
127 | static void mainstone_mask_irq(struct irq_data *d) | ||
128 | { | ||
129 | int mainstone_irq = (d->irq - MAINSTONE_IRQ(0)); | ||
130 | MST_INTMSKENA = (mainstone_irq_enabled &= ~(1 << mainstone_irq)); | ||
131 | } | ||
132 | |||
133 | static void mainstone_unmask_irq(struct irq_data *d) | ||
134 | { | ||
135 | int mainstone_irq = (d->irq - MAINSTONE_IRQ(0)); | ||
136 | /* the irq can be acknowledged only if deasserted, so it's done here */ | ||
137 | MST_INTSETCLR &= ~(1 << mainstone_irq); | ||
138 | MST_INTMSKENA = (mainstone_irq_enabled |= (1 << mainstone_irq)); | ||
139 | } | ||
140 | |||
141 | static struct irq_chip mainstone_irq_chip = { | ||
142 | .name = "FPGA", | ||
143 | .irq_ack = mainstone_mask_irq, | ||
144 | .irq_mask = mainstone_mask_irq, | ||
145 | .irq_unmask = mainstone_unmask_irq, | ||
146 | }; | ||
147 | |||
148 | static void mainstone_irq_handler(unsigned int irq, struct irq_desc *desc) | ||
149 | { | ||
150 | unsigned long pending = MST_INTSETCLR & mainstone_irq_enabled; | ||
151 | do { | ||
152 | /* clear useless edge notification */ | ||
153 | desc->irq_data.chip->irq_ack(&desc->irq_data); | ||
154 | if (likely(pending)) { | ||
155 | irq = MAINSTONE_IRQ(0) + __ffs(pending); | ||
156 | generic_handle_irq(irq); | ||
157 | } | ||
158 | pending = MST_INTSETCLR & mainstone_irq_enabled; | ||
159 | } while (pending); | ||
160 | } | ||
161 | |||
162 | static void __init mainstone_init_irq(void) | ||
163 | { | ||
164 | int irq; | ||
165 | |||
166 | pxa27x_init_irq(); | ||
167 | |||
168 | /* setup extra Mainstone irqs */ | ||
169 | for(irq = MAINSTONE_IRQ(0); irq <= MAINSTONE_IRQ(15); irq++) { | ||
170 | irq_set_chip_and_handler(irq, &mainstone_irq_chip, | ||
171 | handle_level_irq); | ||
172 | if (irq == MAINSTONE_IRQ(10) || irq == MAINSTONE_IRQ(14)) | ||
173 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE | IRQF_NOAUTOEN); | ||
174 | else | ||
175 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); | ||
176 | } | ||
177 | set_irq_flags(MAINSTONE_IRQ(8), 0); | ||
178 | set_irq_flags(MAINSTONE_IRQ(12), 0); | ||
179 | |||
180 | MST_INTMSKENA = 0; | ||
181 | MST_INTSETCLR = 0; | ||
182 | |||
183 | irq_set_chained_handler(PXA_GPIO_TO_IRQ(0), mainstone_irq_handler); | ||
184 | irq_set_irq_type(PXA_GPIO_TO_IRQ(0), IRQ_TYPE_EDGE_FALLING); | ||
185 | } | ||
186 | |||
187 | #ifdef CONFIG_PM | ||
188 | |||
189 | static void mainstone_irq_resume(void) | ||
190 | { | ||
191 | MST_INTMSKENA = mainstone_irq_enabled; | ||
192 | } | ||
193 | |||
194 | static struct syscore_ops mainstone_irq_syscore_ops = { | ||
195 | .resume = mainstone_irq_resume, | ||
196 | }; | ||
197 | |||
198 | static int __init mainstone_irq_device_init(void) | ||
199 | { | ||
200 | if (machine_is_mainstone()) | ||
201 | register_syscore_ops(&mainstone_irq_syscore_ops); | ||
202 | |||
203 | return 0; | ||
204 | } | ||
205 | |||
206 | device_initcall(mainstone_irq_device_init); | ||
207 | |||
208 | #endif | ||
209 | |||
210 | |||
211 | static struct resource smc91x_resources[] = { | 126 | static struct resource smc91x_resources[] = { |
212 | [0] = { | 127 | [0] = { |
213 | .start = (MST_ETH_PHYS + 0x300), | 128 | .start = (MST_ETH_PHYS + 0x300), |
@@ -487,11 +402,37 @@ static struct platform_device mst_gpio_keys_device = { | |||
487 | }, | 402 | }, |
488 | }; | 403 | }; |
489 | 404 | ||
405 | static struct resource mst_cplds_resources[] = { | ||
406 | [0] = { | ||
407 | .start = MST_FPGA_PHYS + 0xc0, | ||
408 | .end = MST_FPGA_PHYS + 0xe0 - 1, | ||
409 | .flags = IORESOURCE_MEM, | ||
410 | }, | ||
411 | [1] = { | ||
412 | .start = PXA_GPIO_TO_IRQ(0), | ||
413 | .end = PXA_GPIO_TO_IRQ(0), | ||
414 | .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWEDGE, | ||
415 | }, | ||
416 | [2] = { | ||
417 | .start = MAINSTONE_IRQ(0), | ||
418 | .end = MAINSTONE_IRQ(15), | ||
419 | .flags = IORESOURCE_IRQ, | ||
420 | }, | ||
421 | }; | ||
422 | |||
423 | static struct platform_device mst_cplds_device = { | ||
424 | .name = "pxa_cplds_irqs", | ||
425 | .id = -1, | ||
426 | .resource = &mst_cplds_resources[0], | ||
427 | .num_resources = 3, | ||
428 | }; | ||
429 | |||
490 | static struct platform_device *platform_devices[] __initdata = { | 430 | static struct platform_device *platform_devices[] __initdata = { |
491 | &smc91x_device, | 431 | &smc91x_device, |
492 | &mst_flash_device[0], | 432 | &mst_flash_device[0], |
493 | &mst_flash_device[1], | 433 | &mst_flash_device[1], |
494 | &mst_gpio_keys_device, | 434 | &mst_gpio_keys_device, |
435 | &mst_cplds_device, | ||
495 | }; | 436 | }; |
496 | 437 | ||
497 | static struct pxaohci_platform_data mainstone_ohci_platform_data = { | 438 | static struct pxaohci_platform_data mainstone_ohci_platform_data = { |
@@ -718,7 +659,7 @@ MACHINE_START(MAINSTONE, "Intel HCDDBBVA0 Development Platform (aka Mainstone)") | |||
718 | .atag_offset = 0x100, /* BLOB boot parameter setting */ | 659 | .atag_offset = 0x100, /* BLOB boot parameter setting */ |
719 | .map_io = mainstone_map_io, | 660 | .map_io = mainstone_map_io, |
720 | .nr_irqs = MAINSTONE_NR_IRQS, | 661 | .nr_irqs = MAINSTONE_NR_IRQS, |
721 | .init_irq = mainstone_init_irq, | 662 | .init_irq = pxa27x_init_irq, |
722 | .handle_irq = pxa27x_handle_irq, | 663 | .handle_irq = pxa27x_handle_irq, |
723 | .init_time = pxa_timer_init, | 664 | .init_time = pxa_timer_init, |
724 | .init_machine = mainstone_init, | 665 | .init_machine = mainstone_init, |
diff --git a/arch/arm/mach-pxa/pxa_cplds_irqs.c b/arch/arm/mach-pxa/pxa_cplds_irqs.c new file mode 100644 index 000000000000..f1aeb54fabe3 --- /dev/null +++ b/arch/arm/mach-pxa/pxa_cplds_irqs.c | |||
@@ -0,0 +1,200 @@ | |||
1 | /* | ||
2 | * Intel Reference Systems cplds | ||
3 | * | ||
4 | * Copyright (C) 2014 Robert Jarzmik | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * Cplds motherboard driver, supporting lubbock and mainstone SoC board. | ||
12 | */ | ||
13 | |||
14 | #include <linux/bitops.h> | ||
15 | #include <linux/gpio.h> | ||
16 | #include <linux/gpio/consumer.h> | ||
17 | #include <linux/interrupt.h> | ||
18 | #include <linux/io.h> | ||
19 | #include <linux/irq.h> | ||
20 | #include <linux/irqdomain.h> | ||
21 | #include <linux/mfd/core.h> | ||
22 | #include <linux/module.h> | ||
23 | #include <linux/of_platform.h> | ||
24 | |||
25 | #define FPGA_IRQ_MASK_EN 0x0 | ||
26 | #define FPGA_IRQ_SET_CLR 0x10 | ||
27 | |||
28 | #define CPLDS_NB_IRQ 32 | ||
29 | |||
30 | struct cplds { | ||
31 | void __iomem *base; | ||
32 | int irq; | ||
33 | unsigned int irq_mask; | ||
34 | struct gpio_desc *gpio0; | ||
35 | struct irq_domain *irqdomain; | ||
36 | }; | ||
37 | |||
38 | static irqreturn_t cplds_irq_handler(int in_irq, void *d) | ||
39 | { | ||
40 | struct cplds *fpga = d; | ||
41 | unsigned long pending; | ||
42 | unsigned int bit; | ||
43 | |||
44 | pending = readl(fpga->base + FPGA_IRQ_SET_CLR) & fpga->irq_mask; | ||
45 | for_each_set_bit(bit, &pending, CPLDS_NB_IRQ) | ||
46 | generic_handle_irq(irq_find_mapping(fpga->irqdomain, bit)); | ||
47 | |||
48 | return IRQ_HANDLED; | ||
49 | } | ||
50 | |||
51 | static void cplds_irq_mask_ack(struct irq_data *d) | ||
52 | { | ||
53 | struct cplds *fpga = irq_data_get_irq_chip_data(d); | ||
54 | unsigned int cplds_irq = irqd_to_hwirq(d); | ||
55 | unsigned int set, bit = BIT(cplds_irq); | ||
56 | |||
57 | fpga->irq_mask &= ~bit; | ||
58 | writel(fpga->irq_mask, fpga->base + FPGA_IRQ_MASK_EN); | ||
59 | set = readl(fpga->base + FPGA_IRQ_SET_CLR); | ||
60 | writel(set & ~bit, fpga->base + FPGA_IRQ_SET_CLR); | ||
61 | } | ||
62 | |||
63 | static void cplds_irq_unmask(struct irq_data *d) | ||
64 | { | ||
65 | struct cplds *fpga = irq_data_get_irq_chip_data(d); | ||
66 | unsigned int cplds_irq = irqd_to_hwirq(d); | ||
67 | unsigned int bit = BIT(cplds_irq); | ||
68 | |||
69 | fpga->irq_mask |= bit; | ||
70 | writel(fpga->irq_mask, fpga->base + FPGA_IRQ_MASK_EN); | ||
71 | } | ||
72 | |||
73 | static struct irq_chip cplds_irq_chip = { | ||
74 | .name = "pxa_cplds", | ||
75 | .irq_mask_ack = cplds_irq_mask_ack, | ||
76 | .irq_unmask = cplds_irq_unmask, | ||
77 | .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE, | ||
78 | }; | ||
79 | |||
80 | static int cplds_irq_domain_map(struct irq_domain *d, unsigned int irq, | ||
81 | irq_hw_number_t hwirq) | ||
82 | { | ||
83 | struct cplds *fpga = d->host_data; | ||
84 | |||
85 | irq_set_chip_and_handler(irq, &cplds_irq_chip, handle_level_irq); | ||
86 | irq_set_chip_data(irq, fpga); | ||
87 | |||
88 | return 0; | ||
89 | } | ||
90 | |||
91 | static const struct irq_domain_ops cplds_irq_domain_ops = { | ||
92 | .xlate = irq_domain_xlate_twocell, | ||
93 | .map = cplds_irq_domain_map, | ||
94 | }; | ||
95 | |||
96 | static int cplds_resume(struct platform_device *pdev) | ||
97 | { | ||
98 | struct cplds *fpga = platform_get_drvdata(pdev); | ||
99 | |||
100 | writel(fpga->irq_mask, fpga->base + FPGA_IRQ_MASK_EN); | ||
101 | |||
102 | return 0; | ||
103 | } | ||
104 | |||
105 | static int cplds_probe(struct platform_device *pdev) | ||
106 | { | ||
107 | struct resource *res; | ||
108 | struct cplds *fpga; | ||
109 | int ret; | ||
110 | unsigned int base_irq = 0; | ||
111 | unsigned long irqflags = 0; | ||
112 | |||
113 | fpga = devm_kzalloc(&pdev->dev, sizeof(*fpga), GFP_KERNEL); | ||
114 | if (!fpga) | ||
115 | return -ENOMEM; | ||
116 | |||
117 | res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | ||
118 | if (res) { | ||
119 | fpga->irq = (unsigned int)res->start; | ||
120 | irqflags = res->flags; | ||
121 | } | ||
122 | if (!fpga->irq) | ||
123 | return -ENODEV; | ||
124 | |||
125 | base_irq = platform_get_irq(pdev, 1); | ||
126 | if (base_irq < 0) | ||
127 | base_irq = 0; | ||
128 | |||
129 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
130 | fpga->base = devm_ioremap_resource(&pdev->dev, res); | ||
131 | if (IS_ERR(fpga->base)) | ||
132 | return PTR_ERR(fpga->base); | ||
133 | |||
134 | platform_set_drvdata(pdev, fpga); | ||
135 | |||
136 | writel(fpga->irq_mask, fpga->base + FPGA_IRQ_MASK_EN); | ||
137 | writel(0, fpga->base + FPGA_IRQ_SET_CLR); | ||
138 | |||
139 | ret = devm_request_irq(&pdev->dev, fpga->irq, cplds_irq_handler, | ||
140 | irqflags, dev_name(&pdev->dev), fpga); | ||
141 | if (ret == -ENOSYS) | ||
142 | return -EPROBE_DEFER; | ||
143 | |||
144 | if (ret) { | ||
145 | dev_err(&pdev->dev, "couldn't request main irq%d: %d\n", | ||
146 | fpga->irq, ret); | ||
147 | return ret; | ||
148 | } | ||
149 | |||
150 | irq_set_irq_wake(fpga->irq, 1); | ||
151 | fpga->irqdomain = irq_domain_add_linear(pdev->dev.of_node, | ||
152 | CPLDS_NB_IRQ, | ||
153 | &cplds_irq_domain_ops, fpga); | ||
154 | if (!fpga->irqdomain) | ||
155 | return -ENODEV; | ||
156 | |||
157 | if (base_irq) { | ||
158 | ret = irq_create_strict_mappings(fpga->irqdomain, base_irq, 0, | ||
159 | CPLDS_NB_IRQ); | ||
160 | if (ret) { | ||
161 | dev_err(&pdev->dev, "couldn't create the irq mapping %d..%d\n", | ||
162 | base_irq, base_irq + CPLDS_NB_IRQ); | ||
163 | return ret; | ||
164 | } | ||
165 | } | ||
166 | |||
167 | return 0; | ||
168 | } | ||
169 | |||
170 | static int cplds_remove(struct platform_device *pdev) | ||
171 | { | ||
172 | struct cplds *fpga = platform_get_drvdata(pdev); | ||
173 | |||
174 | irq_set_chip_and_handler(fpga->irq, NULL, NULL); | ||
175 | |||
176 | return 0; | ||
177 | } | ||
178 | |||
179 | static const struct of_device_id cplds_id_table[] = { | ||
180 | { .compatible = "intel,lubbock-cplds-irqs", }, | ||
181 | { .compatible = "intel,mainstone-cplds-irqs", }, | ||
182 | { } | ||
183 | }; | ||
184 | MODULE_DEVICE_TABLE(of, cplds_id_table); | ||
185 | |||
186 | static struct platform_driver cplds_driver = { | ||
187 | .driver = { | ||
188 | .name = "pxa_cplds_irqs", | ||
189 | .of_match_table = of_match_ptr(cplds_id_table), | ||
190 | }, | ||
191 | .probe = cplds_probe, | ||
192 | .remove = cplds_remove, | ||
193 | .resume = cplds_resume, | ||
194 | }; | ||
195 | |||
196 | module_platform_driver(cplds_driver); | ||
197 | |||
198 | MODULE_DESCRIPTION("PXA Cplds interrupts driver"); | ||
199 | MODULE_AUTHOR("Robert Jarzmik <robert.jarzmik@free.fr>"); | ||
200 | MODULE_LICENSE("GPL"); | ||
diff --git a/arch/arm/mach-rockchip/pm.c b/arch/arm/mach-rockchip/pm.c index b07d88602073..22812fe06460 100644 --- a/arch/arm/mach-rockchip/pm.c +++ b/arch/arm/mach-rockchip/pm.c | |||
@@ -44,9 +44,11 @@ static void __iomem *rk3288_bootram_base; | |||
44 | static phys_addr_t rk3288_bootram_phy; | 44 | static phys_addr_t rk3288_bootram_phy; |
45 | 45 | ||
46 | static struct regmap *pmu_regmap; | 46 | static struct regmap *pmu_regmap; |
47 | static struct regmap *grf_regmap; | ||
47 | static struct regmap *sgrf_regmap; | 48 | static struct regmap *sgrf_regmap; |
48 | 49 | ||
49 | static u32 rk3288_pmu_pwr_mode_con; | 50 | static u32 rk3288_pmu_pwr_mode_con; |
51 | static u32 rk3288_grf_soc_con0; | ||
50 | static u32 rk3288_sgrf_soc_con0; | 52 | static u32 rk3288_sgrf_soc_con0; |
51 | 53 | ||
52 | static inline u32 rk3288_l2_config(void) | 54 | static inline u32 rk3288_l2_config(void) |
@@ -70,12 +72,26 @@ static void rk3288_slp_mode_set(int level) | |||
70 | { | 72 | { |
71 | u32 mode_set, mode_set1; | 73 | u32 mode_set, mode_set1; |
72 | 74 | ||
75 | regmap_read(grf_regmap, RK3288_GRF_SOC_CON0, &rk3288_grf_soc_con0); | ||
76 | |||
73 | regmap_read(sgrf_regmap, RK3288_SGRF_SOC_CON0, &rk3288_sgrf_soc_con0); | 77 | regmap_read(sgrf_regmap, RK3288_SGRF_SOC_CON0, &rk3288_sgrf_soc_con0); |
74 | 78 | ||
75 | regmap_read(pmu_regmap, RK3288_PMU_PWRMODE_CON, | 79 | regmap_read(pmu_regmap, RK3288_PMU_PWRMODE_CON, |
76 | &rk3288_pmu_pwr_mode_con); | 80 | &rk3288_pmu_pwr_mode_con); |
77 | 81 | ||
78 | /* | 82 | /* |
83 | * We need set this bit GRF_FORCE_JTAG here, for the debug module, | ||
84 | * otherwise, it may become inaccessible after resume. | ||
85 | * This creates a potential security issue, as the sdmmc pins may | ||
86 | * accept jtag data for a short time during resume if no card is | ||
87 | * inserted. | ||
88 | * But this is of course also true for the regular boot, before we | ||
89 | * turn of the jtag/sdmmc autodetect. | ||
90 | */ | ||
91 | regmap_write(grf_regmap, RK3288_GRF_SOC_CON0, GRF_FORCE_JTAG | | ||
92 | GRF_FORCE_JTAG_WRITE); | ||
93 | |||
94 | /* | ||
79 | * SGRF_FAST_BOOT_EN - system to boot from FAST_BOOT_ADDR | 95 | * SGRF_FAST_BOOT_EN - system to boot from FAST_BOOT_ADDR |
80 | * PCLK_WDT_GATE - disable WDT during suspend. | 96 | * PCLK_WDT_GATE - disable WDT during suspend. |
81 | */ | 97 | */ |
@@ -83,6 +99,13 @@ static void rk3288_slp_mode_set(int level) | |||
83 | SGRF_PCLK_WDT_GATE | SGRF_FAST_BOOT_EN | 99 | SGRF_PCLK_WDT_GATE | SGRF_FAST_BOOT_EN |
84 | | SGRF_PCLK_WDT_GATE_WRITE | SGRF_FAST_BOOT_EN_WRITE); | 100 | | SGRF_PCLK_WDT_GATE_WRITE | SGRF_FAST_BOOT_EN_WRITE); |
85 | 101 | ||
102 | /* | ||
103 | * The dapswjdp can not auto reset before resume, that cause it may | ||
104 | * access some illegal address during resume. Let's disable it before | ||
105 | * suspend, and the MASKROM will enable it back. | ||
106 | */ | ||
107 | regmap_write(sgrf_regmap, RK3288_SGRF_CPU_CON0, SGRF_DAPDEVICEEN_WRITE); | ||
108 | |||
86 | /* booting address of resuming system is from this register value */ | 109 | /* booting address of resuming system is from this register value */ |
87 | regmap_write(sgrf_regmap, RK3288_SGRF_FAST_BOOT_ADDR, | 110 | regmap_write(sgrf_regmap, RK3288_SGRF_FAST_BOOT_ADDR, |
88 | rk3288_bootram_phy); | 111 | rk3288_bootram_phy); |
@@ -128,6 +151,9 @@ static void rk3288_slp_mode_set_resume(void) | |||
128 | regmap_write(sgrf_regmap, RK3288_SGRF_SOC_CON0, | 151 | regmap_write(sgrf_regmap, RK3288_SGRF_SOC_CON0, |
129 | rk3288_sgrf_soc_con0 | SGRF_PCLK_WDT_GATE_WRITE | 152 | rk3288_sgrf_soc_con0 | SGRF_PCLK_WDT_GATE_WRITE |
130 | | SGRF_FAST_BOOT_EN_WRITE); | 153 | | SGRF_FAST_BOOT_EN_WRITE); |
154 | |||
155 | regmap_write(grf_regmap, RK3288_GRF_SOC_CON0, rk3288_grf_soc_con0 | | ||
156 | GRF_FORCE_JTAG_WRITE); | ||
131 | } | 157 | } |
132 | 158 | ||
133 | static int rockchip_lpmode_enter(unsigned long arg) | 159 | static int rockchip_lpmode_enter(unsigned long arg) |
@@ -186,6 +212,13 @@ static int rk3288_suspend_init(struct device_node *np) | |||
186 | return PTR_ERR(pmu_regmap); | 212 | return PTR_ERR(pmu_regmap); |
187 | } | 213 | } |
188 | 214 | ||
215 | grf_regmap = syscon_regmap_lookup_by_compatible( | ||
216 | "rockchip,rk3288-grf"); | ||
217 | if (IS_ERR(grf_regmap)) { | ||
218 | pr_err("%s: could not find grf regmap\n", __func__); | ||
219 | return PTR_ERR(pmu_regmap); | ||
220 | } | ||
221 | |||
189 | sram_np = of_find_compatible_node(NULL, NULL, | 222 | sram_np = of_find_compatible_node(NULL, NULL, |
190 | "rockchip,rk3288-pmu-sram"); | 223 | "rockchip,rk3288-pmu-sram"); |
191 | if (!sram_np) { | 224 | if (!sram_np) { |
diff --git a/arch/arm/mach-rockchip/pm.h b/arch/arm/mach-rockchip/pm.h index 03ff31d8282d..f8a747bc1437 100644 --- a/arch/arm/mach-rockchip/pm.h +++ b/arch/arm/mach-rockchip/pm.h | |||
@@ -48,6 +48,10 @@ static inline void rockchip_suspend_init(void) | |||
48 | #define RK3288_PMU_WAKEUP_RST_CLR_CNT 0x44 | 48 | #define RK3288_PMU_WAKEUP_RST_CLR_CNT 0x44 |
49 | #define RK3288_PMU_PWRMODE_CON1 0x90 | 49 | #define RK3288_PMU_PWRMODE_CON1 0x90 |
50 | 50 | ||
51 | #define RK3288_GRF_SOC_CON0 0x244 | ||
52 | #define GRF_FORCE_JTAG BIT(12) | ||
53 | #define GRF_FORCE_JTAG_WRITE BIT(28) | ||
54 | |||
51 | #define RK3288_SGRF_SOC_CON0 (0x0000) | 55 | #define RK3288_SGRF_SOC_CON0 (0x0000) |
52 | #define RK3288_SGRF_FAST_BOOT_ADDR (0x0120) | 56 | #define RK3288_SGRF_FAST_BOOT_ADDR (0x0120) |
53 | #define SGRF_PCLK_WDT_GATE BIT(6) | 57 | #define SGRF_PCLK_WDT_GATE BIT(6) |
@@ -55,6 +59,10 @@ static inline void rockchip_suspend_init(void) | |||
55 | #define SGRF_FAST_BOOT_EN BIT(8) | 59 | #define SGRF_FAST_BOOT_EN BIT(8) |
56 | #define SGRF_FAST_BOOT_EN_WRITE BIT(24) | 60 | #define SGRF_FAST_BOOT_EN_WRITE BIT(24) |
57 | 61 | ||
62 | #define RK3288_SGRF_CPU_CON0 (0x40) | ||
63 | #define SGRF_DAPDEVICEEN BIT(0) | ||
64 | #define SGRF_DAPDEVICEEN_WRITE BIT(16) | ||
65 | |||
58 | #define RK3288_CRU_MODE_CON 0x50 | 66 | #define RK3288_CRU_MODE_CON 0x50 |
59 | #define RK3288_CRU_SEL0_CON 0x60 | 67 | #define RK3288_CRU_SEL0_CON 0x60 |
60 | #define RK3288_CRU_SEL1_CON 0x64 | 68 | #define RK3288_CRU_SEL1_CON 0x64 |
diff --git a/arch/arm/mach-rockchip/rockchip.c b/arch/arm/mach-rockchip/rockchip.c index d360ec044b66..b6cf3b449428 100644 --- a/arch/arm/mach-rockchip/rockchip.c +++ b/arch/arm/mach-rockchip/rockchip.c | |||
@@ -30,11 +30,30 @@ | |||
30 | #include "pm.h" | 30 | #include "pm.h" |
31 | 31 | ||
32 | #define RK3288_GRF_SOC_CON0 0x244 | 32 | #define RK3288_GRF_SOC_CON0 0x244 |
33 | #define RK3288_TIMER6_7_PHYS 0xff810000 | ||
33 | 34 | ||
34 | static void __init rockchip_timer_init(void) | 35 | static void __init rockchip_timer_init(void) |
35 | { | 36 | { |
36 | if (of_machine_is_compatible("rockchip,rk3288")) { | 37 | if (of_machine_is_compatible("rockchip,rk3288")) { |
37 | struct regmap *grf; | 38 | struct regmap *grf; |
39 | void __iomem *reg_base; | ||
40 | |||
41 | /* | ||
42 | * Most/all uboot versions for rk3288 don't enable timer7 | ||
43 | * which is needed for the architected timer to work. | ||
44 | * So make sure it is running during early boot. | ||
45 | */ | ||
46 | reg_base = ioremap(RK3288_TIMER6_7_PHYS, SZ_16K); | ||
47 | if (reg_base) { | ||
48 | writel(0, reg_base + 0x30); | ||
49 | writel(0xffffffff, reg_base + 0x20); | ||
50 | writel(0xffffffff, reg_base + 0x24); | ||
51 | writel(1, reg_base + 0x30); | ||
52 | dsb(); | ||
53 | iounmap(reg_base); | ||
54 | } else { | ||
55 | pr_err("rockchip: could not map timer7 registers\n"); | ||
56 | } | ||
38 | 57 | ||
39 | /* | 58 | /* |
40 | * Disable auto jtag/sdmmc switching that causes issues | 59 | * Disable auto jtag/sdmmc switching that causes issues |
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 09c5fe3d30c2..7e7583ddd607 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -1878,7 +1878,7 @@ struct dma_map_ops iommu_coherent_ops = { | |||
1878 | * arm_iommu_attach_device function. | 1878 | * arm_iommu_attach_device function. |
1879 | */ | 1879 | */ |
1880 | struct dma_iommu_mapping * | 1880 | struct dma_iommu_mapping * |
1881 | arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size) | 1881 | arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, u64 size) |
1882 | { | 1882 | { |
1883 | unsigned int bits = size >> PAGE_SHIFT; | 1883 | unsigned int bits = size >> PAGE_SHIFT; |
1884 | unsigned int bitmap_size = BITS_TO_LONGS(bits) * sizeof(long); | 1884 | unsigned int bitmap_size = BITS_TO_LONGS(bits) * sizeof(long); |
@@ -1886,6 +1886,10 @@ arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size) | |||
1886 | int extensions = 1; | 1886 | int extensions = 1; |
1887 | int err = -ENOMEM; | 1887 | int err = -ENOMEM; |
1888 | 1888 | ||
1889 | /* currently only 32-bit DMA address space is supported */ | ||
1890 | if (size > DMA_BIT_MASK(32) + 1) | ||
1891 | return ERR_PTR(-ERANGE); | ||
1892 | |||
1889 | if (!bitmap_size) | 1893 | if (!bitmap_size) |
1890 | return ERR_PTR(-EINVAL); | 1894 | return ERR_PTR(-EINVAL); |
1891 | 1895 | ||
@@ -2057,13 +2061,6 @@ static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size, | |||
2057 | if (!iommu) | 2061 | if (!iommu) |
2058 | return false; | 2062 | return false; |
2059 | 2063 | ||
2060 | /* | ||
2061 | * currently arm_iommu_create_mapping() takes a max of size_t | ||
2062 | * for size param. So check this limit for now. | ||
2063 | */ | ||
2064 | if (size > SIZE_MAX) | ||
2065 | return false; | ||
2066 | |||
2067 | mapping = arm_iommu_create_mapping(dev->bus, dma_base, size); | 2064 | mapping = arm_iommu_create_mapping(dev->bus, dma_base, size); |
2068 | if (IS_ERR(mapping)) { | 2065 | if (IS_ERR(mapping)) { |
2069 | pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n", | 2066 | pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n", |
diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S index aa0519eed698..774ef1323554 100644 --- a/arch/arm/mm/proc-arm1020.S +++ b/arch/arm/mm/proc-arm1020.S | |||
@@ -22,8 +22,6 @@ | |||
22 | * | 22 | * |
23 | * These are the low level assembler for performing cache and TLB | 23 | * These are the low level assembler for performing cache and TLB |
24 | * functions on the arm1020. | 24 | * functions on the arm1020. |
25 | * | ||
26 | * CONFIG_CPU_ARM1020_CPU_IDLE -> nohlt | ||
27 | */ | 25 | */ |
28 | #include <linux/linkage.h> | 26 | #include <linux/linkage.h> |
29 | #include <linux/init.h> | 27 | #include <linux/init.h> |
diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S index bff4c7f70fd6..ae3c27b71594 100644 --- a/arch/arm/mm/proc-arm1020e.S +++ b/arch/arm/mm/proc-arm1020e.S | |||
@@ -22,8 +22,6 @@ | |||
22 | * | 22 | * |
23 | * These are the low level assembler for performing cache and TLB | 23 | * These are the low level assembler for performing cache and TLB |
24 | * functions on the arm1020e. | 24 | * functions on the arm1020e. |
25 | * | ||
26 | * CONFIG_CPU_ARM1020_CPU_IDLE -> nohlt | ||
27 | */ | 25 | */ |
28 | #include <linux/linkage.h> | 26 | #include <linux/linkage.h> |
29 | #include <linux/init.h> | 27 | #include <linux/init.h> |
diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S index ede8c54ab4aa..32a47cc19076 100644 --- a/arch/arm/mm/proc-arm925.S +++ b/arch/arm/mm/proc-arm925.S | |||
@@ -441,9 +441,6 @@ ENTRY(cpu_arm925_set_pte_ext) | |||
441 | .type __arm925_setup, #function | 441 | .type __arm925_setup, #function |
442 | __arm925_setup: | 442 | __arm925_setup: |
443 | mov r0, #0 | 443 | mov r0, #0 |
444 | #if defined(CONFIG_CPU_ICACHE_STREAMING_DISABLE) | ||
445 | orr r0,r0,#1 << 7 | ||
446 | #endif | ||
447 | 444 | ||
448 | /* Transparent on, D-cache clean & flush mode. See NOTE2 above */ | 445 | /* Transparent on, D-cache clean & flush mode. See NOTE2 above */ |
449 | orr r0,r0,#1 << 1 @ transparent mode on | 446 | orr r0,r0,#1 << 1 @ transparent mode on |
diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S index e494d6d6acbe..92e08bf37aad 100644 --- a/arch/arm/mm/proc-feroceon.S +++ b/arch/arm/mm/proc-feroceon.S | |||
@@ -602,7 +602,6 @@ __\name\()_proc_info: | |||
602 | PMD_SECT_AP_WRITE | \ | 602 | PMD_SECT_AP_WRITE | \ |
603 | PMD_SECT_AP_READ | 603 | PMD_SECT_AP_READ |
604 | initfn __feroceon_setup, __\name\()_proc_info | 604 | initfn __feroceon_setup, __\name\()_proc_info |
605 | .long __feroceon_setup | ||
606 | .long cpu_arch_name | 605 | .long cpu_arch_name |
607 | .long cpu_elf_name | 606 | .long cpu_elf_name |
608 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP | 607 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP |
diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c index 793551d15f1d..498325074a06 100644 --- a/arch/arm/xen/mm.c +++ b/arch/arm/xen/mm.c | |||
@@ -4,6 +4,7 @@ | |||
4 | #include <linux/gfp.h> | 4 | #include <linux/gfp.h> |
5 | #include <linux/highmem.h> | 5 | #include <linux/highmem.h> |
6 | #include <linux/export.h> | 6 | #include <linux/export.h> |
7 | #include <linux/memblock.h> | ||
7 | #include <linux/of_address.h> | 8 | #include <linux/of_address.h> |
8 | #include <linux/slab.h> | 9 | #include <linux/slab.h> |
9 | #include <linux/types.h> | 10 | #include <linux/types.h> |
@@ -21,6 +22,20 @@ | |||
21 | #include <asm/xen/hypercall.h> | 22 | #include <asm/xen/hypercall.h> |
22 | #include <asm/xen/interface.h> | 23 | #include <asm/xen/interface.h> |
23 | 24 | ||
25 | unsigned long xen_get_swiotlb_free_pages(unsigned int order) | ||
26 | { | ||
27 | struct memblock_region *reg; | ||
28 | gfp_t flags = __GFP_NOWARN; | ||
29 | |||
30 | for_each_memblock(memory, reg) { | ||
31 | if (reg->base < (phys_addr_t)0xffffffff) { | ||
32 | flags |= __GFP_DMA; | ||
33 | break; | ||
34 | } | ||
35 | } | ||
36 | return __get_free_pages(flags, order); | ||
37 | } | ||
38 | |||
24 | enum dma_cache_op { | 39 | enum dma_cache_op { |
25 | DMA_UNMAP, | 40 | DMA_UNMAP, |
26 | DMA_MAP, | 41 | DMA_MAP, |
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 4269dba63cf1..7796af4b1d6f 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig | |||
@@ -31,6 +31,7 @@ config ARM64 | |||
31 | select GENERIC_EARLY_IOREMAP | 31 | select GENERIC_EARLY_IOREMAP |
32 | select GENERIC_IRQ_PROBE | 32 | select GENERIC_IRQ_PROBE |
33 | select GENERIC_IRQ_SHOW | 33 | select GENERIC_IRQ_SHOW |
34 | select GENERIC_IRQ_SHOW_LEVEL | ||
34 | select GENERIC_PCI_IOMAP | 35 | select GENERIC_PCI_IOMAP |
35 | select GENERIC_SCHED_CLOCK | 36 | select GENERIC_SCHED_CLOCK |
36 | select GENERIC_SMP_IDLE_THREAD | 37 | select GENERIC_SMP_IDLE_THREAD |
diff --git a/arch/arm64/crypto/crc32-arm64.c b/arch/arm64/crypto/crc32-arm64.c index 9499199924ae..6a37c3c6b11d 100644 --- a/arch/arm64/crypto/crc32-arm64.c +++ b/arch/arm64/crypto/crc32-arm64.c | |||
@@ -147,13 +147,21 @@ static int chksum_final(struct shash_desc *desc, u8 *out) | |||
147 | { | 147 | { |
148 | struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); | 148 | struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); |
149 | 149 | ||
150 | put_unaligned_le32(ctx->crc, out); | ||
151 | return 0; | ||
152 | } | ||
153 | |||
154 | static int chksumc_final(struct shash_desc *desc, u8 *out) | ||
155 | { | ||
156 | struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); | ||
157 | |||
150 | put_unaligned_le32(~ctx->crc, out); | 158 | put_unaligned_le32(~ctx->crc, out); |
151 | return 0; | 159 | return 0; |
152 | } | 160 | } |
153 | 161 | ||
154 | static int __chksum_finup(u32 crc, const u8 *data, unsigned int len, u8 *out) | 162 | static int __chksum_finup(u32 crc, const u8 *data, unsigned int len, u8 *out) |
155 | { | 163 | { |
156 | put_unaligned_le32(~crc32_arm64_le_hw(crc, data, len), out); | 164 | put_unaligned_le32(crc32_arm64_le_hw(crc, data, len), out); |
157 | return 0; | 165 | return 0; |
158 | } | 166 | } |
159 | 167 | ||
@@ -199,6 +207,14 @@ static int crc32_cra_init(struct crypto_tfm *tfm) | |||
199 | { | 207 | { |
200 | struct chksum_ctx *mctx = crypto_tfm_ctx(tfm); | 208 | struct chksum_ctx *mctx = crypto_tfm_ctx(tfm); |
201 | 209 | ||
210 | mctx->key = 0; | ||
211 | return 0; | ||
212 | } | ||
213 | |||
214 | static int crc32c_cra_init(struct crypto_tfm *tfm) | ||
215 | { | ||
216 | struct chksum_ctx *mctx = crypto_tfm_ctx(tfm); | ||
217 | |||
202 | mctx->key = ~0; | 218 | mctx->key = ~0; |
203 | return 0; | 219 | return 0; |
204 | } | 220 | } |
@@ -229,7 +245,7 @@ static struct shash_alg crc32c_alg = { | |||
229 | .setkey = chksum_setkey, | 245 | .setkey = chksum_setkey, |
230 | .init = chksum_init, | 246 | .init = chksum_init, |
231 | .update = chksumc_update, | 247 | .update = chksumc_update, |
232 | .final = chksum_final, | 248 | .final = chksumc_final, |
233 | .finup = chksumc_finup, | 249 | .finup = chksumc_finup, |
234 | .digest = chksumc_digest, | 250 | .digest = chksumc_digest, |
235 | .descsize = sizeof(struct chksum_desc_ctx), | 251 | .descsize = sizeof(struct chksum_desc_ctx), |
@@ -241,7 +257,7 @@ static struct shash_alg crc32c_alg = { | |||
241 | .cra_alignmask = 0, | 257 | .cra_alignmask = 0, |
242 | .cra_ctxsize = sizeof(struct chksum_ctx), | 258 | .cra_ctxsize = sizeof(struct chksum_ctx), |
243 | .cra_module = THIS_MODULE, | 259 | .cra_module = THIS_MODULE, |
244 | .cra_init = crc32_cra_init, | 260 | .cra_init = crc32c_cra_init, |
245 | } | 261 | } |
246 | }; | 262 | }; |
247 | 263 | ||
diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c index 114e7cc5de8c..aefda9868627 100644 --- a/arch/arm64/crypto/sha1-ce-glue.c +++ b/arch/arm64/crypto/sha1-ce-glue.c | |||
@@ -74,6 +74,9 @@ static int sha1_ce_finup(struct shash_desc *desc, const u8 *data, | |||
74 | 74 | ||
75 | static int sha1_ce_final(struct shash_desc *desc, u8 *out) | 75 | static int sha1_ce_final(struct shash_desc *desc, u8 *out) |
76 | { | 76 | { |
77 | struct sha1_ce_state *sctx = shash_desc_ctx(desc); | ||
78 | |||
79 | sctx->finalize = 0; | ||
77 | kernel_neon_begin_partial(16); | 80 | kernel_neon_begin_partial(16); |
78 | sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform); | 81 | sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform); |
79 | kernel_neon_end(); | 82 | kernel_neon_end(); |
diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c index 1340e44c048b..7cd587564a41 100644 --- a/arch/arm64/crypto/sha2-ce-glue.c +++ b/arch/arm64/crypto/sha2-ce-glue.c | |||
@@ -75,6 +75,9 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data, | |||
75 | 75 | ||
76 | static int sha256_ce_final(struct shash_desc *desc, u8 *out) | 76 | static int sha256_ce_final(struct shash_desc *desc, u8 *out) |
77 | { | 77 | { |
78 | struct sha256_ce_state *sctx = shash_desc_ctx(desc); | ||
79 | |||
80 | sctx->finalize = 0; | ||
78 | kernel_neon_begin_partial(28); | 81 | kernel_neon_begin_partial(28); |
79 | sha256_base_do_finalize(desc, (sha256_block_fn *)sha2_ce_transform); | 82 | sha256_base_do_finalize(desc, (sha256_block_fn *)sha2_ce_transform); |
80 | kernel_neon_end(); | 83 | kernel_neon_end(); |
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index a5abb0062d6e..71f19c4dc0de 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h | |||
@@ -65,6 +65,14 @@ do { \ | |||
65 | do { \ | 65 | do { \ |
66 | compiletime_assert_atomic_type(*p); \ | 66 | compiletime_assert_atomic_type(*p); \ |
67 | switch (sizeof(*p)) { \ | 67 | switch (sizeof(*p)) { \ |
68 | case 1: \ | ||
69 | asm volatile ("stlrb %w1, %0" \ | ||
70 | : "=Q" (*p) : "r" (v) : "memory"); \ | ||
71 | break; \ | ||
72 | case 2: \ | ||
73 | asm volatile ("stlrh %w1, %0" \ | ||
74 | : "=Q" (*p) : "r" (v) : "memory"); \ | ||
75 | break; \ | ||
68 | case 4: \ | 76 | case 4: \ |
69 | asm volatile ("stlr %w1, %0" \ | 77 | asm volatile ("stlr %w1, %0" \ |
70 | : "=Q" (*p) : "r" (v) : "memory"); \ | 78 | : "=Q" (*p) : "r" (v) : "memory"); \ |
@@ -81,6 +89,14 @@ do { \ | |||
81 | typeof(*p) ___p1; \ | 89 | typeof(*p) ___p1; \ |
82 | compiletime_assert_atomic_type(*p); \ | 90 | compiletime_assert_atomic_type(*p); \ |
83 | switch (sizeof(*p)) { \ | 91 | switch (sizeof(*p)) { \ |
92 | case 1: \ | ||
93 | asm volatile ("ldarb %w0, %1" \ | ||
94 | : "=r" (___p1) : "Q" (*p) : "memory"); \ | ||
95 | break; \ | ||
96 | case 2: \ | ||
97 | asm volatile ("ldarh %w0, %1" \ | ||
98 | : "=r" (___p1) : "Q" (*p) : "memory"); \ | ||
99 | break; \ | ||
84 | case 4: \ | 100 | case 4: \ |
85 | asm volatile ("ldar %w0, %1" \ | 101 | asm volatile ("ldar %w0, %1" \ |
86 | : "=r" (___p1) : "Q" (*p) : "memory"); \ | 102 | : "=r" (___p1) : "Q" (*p) : "memory"); \ |
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index 195991dadc37..23f25acf43a9 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c | |||
@@ -1310,7 +1310,7 @@ static const struct of_device_id armpmu_of_device_ids[] = { | |||
1310 | 1310 | ||
1311 | static int armpmu_device_probe(struct platform_device *pdev) | 1311 | static int armpmu_device_probe(struct platform_device *pdev) |
1312 | { | 1312 | { |
1313 | int i, *irqs; | 1313 | int i, irq, *irqs; |
1314 | 1314 | ||
1315 | if (!cpu_pmu) | 1315 | if (!cpu_pmu) |
1316 | return -ENODEV; | 1316 | return -ENODEV; |
@@ -1319,6 +1319,11 @@ static int armpmu_device_probe(struct platform_device *pdev) | |||
1319 | if (!irqs) | 1319 | if (!irqs) |
1320 | return -ENOMEM; | 1320 | return -ENOMEM; |
1321 | 1321 | ||
1322 | /* Don't bother with PPIs; they're already affine */ | ||
1323 | irq = platform_get_irq(pdev, 0); | ||
1324 | if (irq >= 0 && irq_is_percpu(irq)) | ||
1325 | return 0; | ||
1326 | |||
1322 | for (i = 0; i < pdev->num_resources; ++i) { | 1327 | for (i = 0; i < pdev->num_resources; ++i) { |
1323 | struct device_node *dn; | 1328 | struct device_node *dn; |
1324 | int cpu; | 1329 | int cpu; |
@@ -1327,7 +1332,7 @@ static int armpmu_device_probe(struct platform_device *pdev) | |||
1327 | i); | 1332 | i); |
1328 | if (!dn) { | 1333 | if (!dn) { |
1329 | pr_warn("Failed to parse %s/interrupt-affinity[%d]\n", | 1334 | pr_warn("Failed to parse %s/interrupt-affinity[%d]\n", |
1330 | of_node_full_name(dn), i); | 1335 | of_node_full_name(pdev->dev.of_node), i); |
1331 | break; | 1336 | break; |
1332 | } | 1337 | } |
1333 | 1338 | ||
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index ef7d112f5ce0..b0bd4e5fd5cf 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c | |||
@@ -67,8 +67,7 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags) | |||
67 | 67 | ||
68 | *ret_page = phys_to_page(phys); | 68 | *ret_page = phys_to_page(phys); |
69 | ptr = (void *)val; | 69 | ptr = (void *)val; |
70 | if (flags & __GFP_ZERO) | 70 | memset(ptr, 0, size); |
71 | memset(ptr, 0, size); | ||
72 | } | 71 | } |
73 | 72 | ||
74 | return ptr; | 73 | return ptr; |
@@ -105,7 +104,6 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size, | |||
105 | struct page *page; | 104 | struct page *page; |
106 | void *addr; | 105 | void *addr; |
107 | 106 | ||
108 | size = PAGE_ALIGN(size); | ||
109 | page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT, | 107 | page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT, |
110 | get_order(size)); | 108 | get_order(size)); |
111 | if (!page) | 109 | if (!page) |
@@ -113,8 +111,7 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size, | |||
113 | 111 | ||
114 | *dma_handle = phys_to_dma(dev, page_to_phys(page)); | 112 | *dma_handle = phys_to_dma(dev, page_to_phys(page)); |
115 | addr = page_address(page); | 113 | addr = page_address(page); |
116 | if (flags & __GFP_ZERO) | 114 | memset(addr, 0, size); |
117 | memset(addr, 0, size); | ||
118 | return addr; | 115 | return addr; |
119 | } else { | 116 | } else { |
120 | return swiotlb_alloc_coherent(dev, size, dma_handle, flags); | 117 | return swiotlb_alloc_coherent(dev, size, dma_handle, flags); |
@@ -195,6 +192,8 @@ static void __dma_free(struct device *dev, size_t size, | |||
195 | { | 192 | { |
196 | void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle)); | 193 | void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle)); |
197 | 194 | ||
195 | size = PAGE_ALIGN(size); | ||
196 | |||
198 | if (!is_device_dma_coherent(dev)) { | 197 | if (!is_device_dma_coherent(dev)) { |
199 | if (__free_from_pool(vaddr, size)) | 198 | if (__free_from_pool(vaddr, size)) |
200 | return; | 199 | return; |
diff --git a/arch/m32r/kernel/smp.c b/arch/m32r/kernel/smp.c index ce7aea34fdf4..c18ddc74ef9a 100644 --- a/arch/m32r/kernel/smp.c +++ b/arch/m32r/kernel/smp.c | |||
@@ -45,7 +45,7 @@ static volatile unsigned long flushcache_cpumask = 0; | |||
45 | /* | 45 | /* |
46 | * For flush_tlb_others() | 46 | * For flush_tlb_others() |
47 | */ | 47 | */ |
48 | static volatile cpumask_t flush_cpumask; | 48 | static cpumask_t flush_cpumask; |
49 | static struct mm_struct *flush_mm; | 49 | static struct mm_struct *flush_mm; |
50 | static struct vm_area_struct *flush_vma; | 50 | static struct vm_area_struct *flush_vma; |
51 | static volatile unsigned long flush_va; | 51 | static volatile unsigned long flush_va; |
@@ -415,7 +415,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, | |||
415 | */ | 415 | */ |
416 | send_IPI_mask(&cpumask, INVALIDATE_TLB_IPI, 0); | 416 | send_IPI_mask(&cpumask, INVALIDATE_TLB_IPI, 0); |
417 | 417 | ||
418 | while (!cpumask_empty((cpumask_t*)&flush_cpumask)) { | 418 | while (!cpumask_empty(&flush_cpumask)) { |
419 | /* nothing. lockup detection does not belong here */ | 419 | /* nothing. lockup detection does not belong here */ |
420 | mb(); | 420 | mb(); |
421 | } | 421 | } |
@@ -468,7 +468,7 @@ void smp_invalidate_interrupt(void) | |||
468 | __flush_tlb_page(va); | 468 | __flush_tlb_page(va); |
469 | } | 469 | } |
470 | } | 470 | } |
471 | cpumask_clear_cpu(cpu_id, (cpumask_t*)&flush_cpumask); | 471 | cpumask_clear_cpu(cpu_id, &flush_cpumask); |
472 | } | 472 | } |
473 | 473 | ||
474 | /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ | 474 | /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ |
diff --git a/arch/powerpc/include/uapi/asm/tm.h b/arch/powerpc/include/uapi/asm/tm.h index 5047659815a5..5d836b7c1176 100644 --- a/arch/powerpc/include/uapi/asm/tm.h +++ b/arch/powerpc/include/uapi/asm/tm.h | |||
@@ -11,7 +11,7 @@ | |||
11 | #define TM_CAUSE_RESCHED 0xde | 11 | #define TM_CAUSE_RESCHED 0xde |
12 | #define TM_CAUSE_TLBI 0xdc | 12 | #define TM_CAUSE_TLBI 0xdc |
13 | #define TM_CAUSE_FAC_UNAV 0xda | 13 | #define TM_CAUSE_FAC_UNAV 0xda |
14 | #define TM_CAUSE_SYSCALL 0xd8 | 14 | #define TM_CAUSE_SYSCALL 0xd8 /* future use */ |
15 | #define TM_CAUSE_MISC 0xd6 /* future use */ | 15 | #define TM_CAUSE_MISC 0xd6 /* future use */ |
16 | #define TM_CAUSE_SIGNAL 0xd4 | 16 | #define TM_CAUSE_SIGNAL 0xd4 |
17 | #define TM_CAUSE_ALIGNMENT 0xd2 | 17 | #define TM_CAUSE_ALIGNMENT 0xd2 |
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index 44b480e3a5af..9ee61d15653d 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c | |||
@@ -749,21 +749,24 @@ int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state stat | |||
749 | eeh_unfreeze_pe(pe, false); | 749 | eeh_unfreeze_pe(pe, false); |
750 | eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED); | 750 | eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED); |
751 | eeh_pe_dev_traverse(pe, eeh_restore_dev_state, dev); | 751 | eeh_pe_dev_traverse(pe, eeh_restore_dev_state, dev); |
752 | eeh_pe_state_clear(pe, EEH_PE_ISOLATED); | ||
752 | break; | 753 | break; |
753 | case pcie_hot_reset: | 754 | case pcie_hot_reset: |
755 | eeh_pe_state_mark(pe, EEH_PE_ISOLATED); | ||
754 | eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE); | 756 | eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE); |
755 | eeh_pe_dev_traverse(pe, eeh_disable_and_save_dev_state, dev); | 757 | eeh_pe_dev_traverse(pe, eeh_disable_and_save_dev_state, dev); |
756 | eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED); | 758 | eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED); |
757 | eeh_ops->reset(pe, EEH_RESET_HOT); | 759 | eeh_ops->reset(pe, EEH_RESET_HOT); |
758 | break; | 760 | break; |
759 | case pcie_warm_reset: | 761 | case pcie_warm_reset: |
762 | eeh_pe_state_mark(pe, EEH_PE_ISOLATED); | ||
760 | eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE); | 763 | eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE); |
761 | eeh_pe_dev_traverse(pe, eeh_disable_and_save_dev_state, dev); | 764 | eeh_pe_dev_traverse(pe, eeh_disable_and_save_dev_state, dev); |
762 | eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED); | 765 | eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED); |
763 | eeh_ops->reset(pe, EEH_RESET_FUNDAMENTAL); | 766 | eeh_ops->reset(pe, EEH_RESET_FUNDAMENTAL); |
764 | break; | 767 | break; |
765 | default: | 768 | default: |
766 | eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED); | 769 | eeh_pe_state_clear(pe, EEH_PE_ISOLATED | EEH_PE_CFG_BLOCKED); |
767 | return -EINVAL; | 770 | return -EINVAL; |
768 | }; | 771 | }; |
769 | 772 | ||
@@ -1058,6 +1061,9 @@ void eeh_add_device_early(struct pci_dn *pdn) | |||
1058 | if (!edev || !eeh_enabled()) | 1061 | if (!edev || !eeh_enabled()) |
1059 | return; | 1062 | return; |
1060 | 1063 | ||
1064 | if (!eeh_has_flag(EEH_PROBE_MODE_DEVTREE)) | ||
1065 | return; | ||
1066 | |||
1061 | /* USB Bus children of PCI devices will not have BUID's */ | 1067 | /* USB Bus children of PCI devices will not have BUID's */ |
1062 | phb = edev->phb; | 1068 | phb = edev->phb; |
1063 | if (NULL == phb || | 1069 | if (NULL == phb || |
@@ -1112,6 +1118,9 @@ void eeh_add_device_late(struct pci_dev *dev) | |||
1112 | return; | 1118 | return; |
1113 | } | 1119 | } |
1114 | 1120 | ||
1121 | if (eeh_has_flag(EEH_PROBE_MODE_DEV)) | ||
1122 | eeh_ops->probe(pdn, NULL); | ||
1123 | |||
1115 | /* | 1124 | /* |
1116 | * The EEH cache might not be removed correctly because of | 1125 | * The EEH cache might not be removed correctly because of |
1117 | * unbalanced kref to the device during unplug time, which | 1126 | * unbalanced kref to the device during unplug time, which |
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 8ca9434c40e6..afbc20019c2e 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S | |||
@@ -34,7 +34,6 @@ | |||
34 | #include <asm/ftrace.h> | 34 | #include <asm/ftrace.h> |
35 | #include <asm/hw_irq.h> | 35 | #include <asm/hw_irq.h> |
36 | #include <asm/context_tracking.h> | 36 | #include <asm/context_tracking.h> |
37 | #include <asm/tm.h> | ||
38 | 37 | ||
39 | /* | 38 | /* |
40 | * System calls. | 39 | * System calls. |
@@ -146,24 +145,6 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR) | |||
146 | andi. r11,r10,_TIF_SYSCALL_DOTRACE | 145 | andi. r11,r10,_TIF_SYSCALL_DOTRACE |
147 | bne syscall_dotrace | 146 | bne syscall_dotrace |
148 | .Lsyscall_dotrace_cont: | 147 | .Lsyscall_dotrace_cont: |
149 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | ||
150 | BEGIN_FTR_SECTION | ||
151 | b 1f | ||
152 | END_FTR_SECTION_IFCLR(CPU_FTR_TM) | ||
153 | extrdi. r11, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */ | ||
154 | beq+ 1f | ||
155 | |||
156 | /* Doom the transaction and don't perform the syscall: */ | ||
157 | mfmsr r11 | ||
158 | li r12, 1 | ||
159 | rldimi r11, r12, MSR_TM_LG, 63-MSR_TM_LG | ||
160 | mtmsrd r11, 0 | ||
161 | li r11, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT) | ||
162 | TABORT(R11) | ||
163 | |||
164 | b .Lsyscall_exit | ||
165 | 1: | ||
166 | #endif | ||
167 | cmpldi 0,r0,NR_syscalls | 148 | cmpldi 0,r0,NR_syscalls |
168 | bge- syscall_enosys | 149 | bge- syscall_enosys |
169 | 150 | ||
diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S index eeaa0d5f69d5..ccde8f084ce4 100644 --- a/arch/powerpc/kernel/idle_power7.S +++ b/arch/powerpc/kernel/idle_power7.S | |||
@@ -501,9 +501,11 @@ BEGIN_FTR_SECTION | |||
501 | CHECK_HMI_INTERRUPT | 501 | CHECK_HMI_INTERRUPT |
502 | END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) | 502 | END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) |
503 | ld r1,PACAR1(r13) | 503 | ld r1,PACAR1(r13) |
504 | ld r6,_CCR(r1) | ||
504 | ld r4,_MSR(r1) | 505 | ld r4,_MSR(r1) |
505 | ld r5,_NIP(r1) | 506 | ld r5,_NIP(r1) |
506 | addi r1,r1,INT_FRAME_SIZE | 507 | addi r1,r1,INT_FRAME_SIZE |
508 | mtcr r6 | ||
507 | mtspr SPRN_SRR1,r4 | 509 | mtspr SPRN_SRR1,r4 |
508 | mtspr SPRN_SRR0,r5 | 510 | mtspr SPRN_SRR0,r5 |
509 | rfid | 511 | rfid |
diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c index 8f3e6cc54d95..c6ca7db64673 100644 --- a/arch/powerpc/kvm/book3s_xics.c +++ b/arch/powerpc/kvm/book3s_xics.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/err.h> | 12 | #include <linux/err.h> |
13 | #include <linux/gfp.h> | 13 | #include <linux/gfp.h> |
14 | #include <linux/anon_inodes.h> | 14 | #include <linux/anon_inodes.h> |
15 | #include <linux/spinlock.h> | ||
15 | 16 | ||
16 | #include <asm/uaccess.h> | 17 | #include <asm/uaccess.h> |
17 | #include <asm/kvm_book3s.h> | 18 | #include <asm/kvm_book3s.h> |
@@ -20,7 +21,6 @@ | |||
20 | #include <asm/xics.h> | 21 | #include <asm/xics.h> |
21 | #include <asm/debug.h> | 22 | #include <asm/debug.h> |
22 | #include <asm/time.h> | 23 | #include <asm/time.h> |
23 | #include <asm/spinlock.h> | ||
24 | 24 | ||
25 | #include <linux/debugfs.h> | 25 | #include <linux/debugfs.h> |
26 | #include <linux/seq_file.h> | 26 | #include <linux/seq_file.h> |
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 920c252d1f49..f8bc950efcae 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c | |||
@@ -2693,7 +2693,6 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np, | |||
2693 | hose->last_busno = 0xff; | 2693 | hose->last_busno = 0xff; |
2694 | } | 2694 | } |
2695 | hose->private_data = phb; | 2695 | hose->private_data = phb; |
2696 | hose->controller_ops = pnv_pci_controller_ops; | ||
2697 | phb->hub_id = hub_id; | 2696 | phb->hub_id = hub_id; |
2698 | phb->opal_id = phb_id; | 2697 | phb->opal_id = phb_id; |
2699 | phb->type = ioda_type; | 2698 | phb->type = ioda_type; |
@@ -2812,6 +2811,7 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np, | |||
2812 | pnv_pci_controller_ops.enable_device_hook = pnv_pci_enable_device_hook; | 2811 | pnv_pci_controller_ops.enable_device_hook = pnv_pci_enable_device_hook; |
2813 | pnv_pci_controller_ops.window_alignment = pnv_pci_window_alignment; | 2812 | pnv_pci_controller_ops.window_alignment = pnv_pci_window_alignment; |
2814 | pnv_pci_controller_ops.reset_secondary_bus = pnv_pci_reset_secondary_bus; | 2813 | pnv_pci_controller_ops.reset_secondary_bus = pnv_pci_reset_secondary_bus; |
2814 | hose->controller_ops = pnv_pci_controller_ops; | ||
2815 | 2815 | ||
2816 | #ifdef CONFIG_PCI_IOV | 2816 | #ifdef CONFIG_PCI_IOV |
2817 | ppc_md.pcibios_fixup_sriov = pnv_pci_ioda_fixup_iov_resources; | 2817 | ppc_md.pcibios_fixup_sriov = pnv_pci_ioda_fixup_iov_resources; |
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c index b4b11096ea8b..019d34aaf054 100644 --- a/arch/powerpc/platforms/pseries/dlpar.c +++ b/arch/powerpc/platforms/pseries/dlpar.c | |||
@@ -412,6 +412,10 @@ static ssize_t dlpar_cpu_probe(const char *buf, size_t count) | |||
412 | if (rc) | 412 | if (rc) |
413 | return -EINVAL; | 413 | return -EINVAL; |
414 | 414 | ||
415 | rc = dlpar_acquire_drc(drc_index); | ||
416 | if (rc) | ||
417 | return -EINVAL; | ||
418 | |||
415 | parent = of_find_node_by_path("/cpus"); | 419 | parent = of_find_node_by_path("/cpus"); |
416 | if (!parent) | 420 | if (!parent) |
417 | return -ENODEV; | 421 | return -ENODEV; |
@@ -422,12 +426,6 @@ static ssize_t dlpar_cpu_probe(const char *buf, size_t count) | |||
422 | 426 | ||
423 | of_node_put(parent); | 427 | of_node_put(parent); |
424 | 428 | ||
425 | rc = dlpar_acquire_drc(drc_index); | ||
426 | if (rc) { | ||
427 | dlpar_free_cc_nodes(dn); | ||
428 | return -EINVAL; | ||
429 | } | ||
430 | |||
431 | rc = dlpar_attach_node(dn); | 429 | rc = dlpar_attach_node(dn); |
432 | if (rc) { | 430 | if (rc) { |
433 | dlpar_release_drc(drc_index); | 431 | dlpar_release_drc(drc_index); |
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 8e58c614c37d..b06dc3839268 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
@@ -115,7 +115,7 @@ config S390 | |||
115 | select HAVE_ARCH_SECCOMP_FILTER | 115 | select HAVE_ARCH_SECCOMP_FILTER |
116 | select HAVE_ARCH_TRACEHOOK | 116 | select HAVE_ARCH_TRACEHOOK |
117 | select HAVE_ARCH_TRANSPARENT_HUGEPAGE | 117 | select HAVE_ARCH_TRANSPARENT_HUGEPAGE |
118 | select HAVE_BPF_JIT if PACK_STACK && HAVE_MARCH_Z9_109_FEATURES | 118 | select HAVE_BPF_JIT if PACK_STACK && HAVE_MARCH_Z196_FEATURES |
119 | select HAVE_CMPXCHG_DOUBLE | 119 | select HAVE_CMPXCHG_DOUBLE |
120 | select HAVE_CMPXCHG_LOCAL | 120 | select HAVE_CMPXCHG_LOCAL |
121 | select HAVE_DEBUG_KMEMLEAK | 121 | select HAVE_DEBUG_KMEMLEAK |
diff --git a/arch/s390/crypto/crypt_s390.h b/arch/s390/crypto/crypt_s390.h index ba3b2aefddf5..d9c4c313fbc6 100644 --- a/arch/s390/crypto/crypt_s390.h +++ b/arch/s390/crypto/crypt_s390.h | |||
@@ -3,9 +3,10 @@ | |||
3 | * | 3 | * |
4 | * Support for s390 cryptographic instructions. | 4 | * Support for s390 cryptographic instructions. |
5 | * | 5 | * |
6 | * Copyright IBM Corp. 2003, 2007 | 6 | * Copyright IBM Corp. 2003, 2015 |
7 | * Author(s): Thomas Spatzier | 7 | * Author(s): Thomas Spatzier |
8 | * Jan Glauber (jan.glauber@de.ibm.com) | 8 | * Jan Glauber (jan.glauber@de.ibm.com) |
9 | * Harald Freudenberger (freude@de.ibm.com) | ||
9 | * | 10 | * |
10 | * This program is free software; you can redistribute it and/or modify it | 11 | * This program is free software; you can redistribute it and/or modify it |
11 | * under the terms of the GNU General Public License as published by the Free | 12 | * under the terms of the GNU General Public License as published by the Free |
@@ -28,15 +29,17 @@ | |||
28 | #define CRYPT_S390_MSA 0x1 | 29 | #define CRYPT_S390_MSA 0x1 |
29 | #define CRYPT_S390_MSA3 0x2 | 30 | #define CRYPT_S390_MSA3 0x2 |
30 | #define CRYPT_S390_MSA4 0x4 | 31 | #define CRYPT_S390_MSA4 0x4 |
32 | #define CRYPT_S390_MSA5 0x8 | ||
31 | 33 | ||
32 | /* s390 cryptographic operations */ | 34 | /* s390 cryptographic operations */ |
33 | enum crypt_s390_operations { | 35 | enum crypt_s390_operations { |
34 | CRYPT_S390_KM = 0x0100, | 36 | CRYPT_S390_KM = 0x0100, |
35 | CRYPT_S390_KMC = 0x0200, | 37 | CRYPT_S390_KMC = 0x0200, |
36 | CRYPT_S390_KIMD = 0x0300, | 38 | CRYPT_S390_KIMD = 0x0300, |
37 | CRYPT_S390_KLMD = 0x0400, | 39 | CRYPT_S390_KLMD = 0x0400, |
38 | CRYPT_S390_KMAC = 0x0500, | 40 | CRYPT_S390_KMAC = 0x0500, |
39 | CRYPT_S390_KMCTR = 0x0600 | 41 | CRYPT_S390_KMCTR = 0x0600, |
42 | CRYPT_S390_PPNO = 0x0700 | ||
40 | }; | 43 | }; |
41 | 44 | ||
42 | /* | 45 | /* |
@@ -138,6 +141,16 @@ enum crypt_s390_kmac_func { | |||
138 | KMAC_TDEA_192 = CRYPT_S390_KMAC | 3 | 141 | KMAC_TDEA_192 = CRYPT_S390_KMAC | 3 |
139 | }; | 142 | }; |
140 | 143 | ||
144 | /* | ||
145 | * function codes for PPNO (PERFORM PSEUDORANDOM NUMBER | ||
146 | * OPERATION) instruction | ||
147 | */ | ||
148 | enum crypt_s390_ppno_func { | ||
149 | PPNO_QUERY = CRYPT_S390_PPNO | 0, | ||
150 | PPNO_SHA512_DRNG_GEN = CRYPT_S390_PPNO | 3, | ||
151 | PPNO_SHA512_DRNG_SEED = CRYPT_S390_PPNO | 0x83 | ||
152 | }; | ||
153 | |||
141 | /** | 154 | /** |
142 | * crypt_s390_km: | 155 | * crypt_s390_km: |
143 | * @func: the function code passed to KM; see crypt_s390_km_func | 156 | * @func: the function code passed to KM; see crypt_s390_km_func |
@@ -162,11 +175,11 @@ static inline int crypt_s390_km(long func, void *param, | |||
162 | int ret; | 175 | int ret; |
163 | 176 | ||
164 | asm volatile( | 177 | asm volatile( |
165 | "0: .insn rre,0xb92e0000,%3,%1 \n" /* KM opcode */ | 178 | "0: .insn rre,0xb92e0000,%3,%1\n" /* KM opcode */ |
166 | "1: brc 1,0b \n" /* handle partial completion */ | 179 | "1: brc 1,0b\n" /* handle partial completion */ |
167 | " la %0,0\n" | 180 | " la %0,0\n" |
168 | "2:\n" | 181 | "2:\n" |
169 | EX_TABLE(0b,2b) EX_TABLE(1b,2b) | 182 | EX_TABLE(0b, 2b) EX_TABLE(1b, 2b) |
170 | : "=d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest) | 183 | : "=d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest) |
171 | : "d" (__func), "a" (__param), "0" (-1) : "cc", "memory"); | 184 | : "d" (__func), "a" (__param), "0" (-1) : "cc", "memory"); |
172 | if (ret < 0) | 185 | if (ret < 0) |
@@ -198,11 +211,11 @@ static inline int crypt_s390_kmc(long func, void *param, | |||
198 | int ret; | 211 | int ret; |
199 | 212 | ||
200 | asm volatile( | 213 | asm volatile( |
201 | "0: .insn rre,0xb92f0000,%3,%1 \n" /* KMC opcode */ | 214 | "0: .insn rre,0xb92f0000,%3,%1\n" /* KMC opcode */ |
202 | "1: brc 1,0b \n" /* handle partial completion */ | 215 | "1: brc 1,0b\n" /* handle partial completion */ |
203 | " la %0,0\n" | 216 | " la %0,0\n" |
204 | "2:\n" | 217 | "2:\n" |
205 | EX_TABLE(0b,2b) EX_TABLE(1b,2b) | 218 | EX_TABLE(0b, 2b) EX_TABLE(1b, 2b) |
206 | : "=d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest) | 219 | : "=d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest) |
207 | : "d" (__func), "a" (__param), "0" (-1) : "cc", "memory"); | 220 | : "d" (__func), "a" (__param), "0" (-1) : "cc", "memory"); |
208 | if (ret < 0) | 221 | if (ret < 0) |
@@ -233,11 +246,11 @@ static inline int crypt_s390_kimd(long func, void *param, | |||
233 | int ret; | 246 | int ret; |
234 | 247 | ||
235 | asm volatile( | 248 | asm volatile( |
236 | "0: .insn rre,0xb93e0000,%1,%1 \n" /* KIMD opcode */ | 249 | "0: .insn rre,0xb93e0000,%1,%1\n" /* KIMD opcode */ |
237 | "1: brc 1,0b \n" /* handle partial completion */ | 250 | "1: brc 1,0b\n" /* handle partial completion */ |
238 | " la %0,0\n" | 251 | " la %0,0\n" |
239 | "2:\n" | 252 | "2:\n" |
240 | EX_TABLE(0b,2b) EX_TABLE(1b,2b) | 253 | EX_TABLE(0b, 2b) EX_TABLE(1b, 2b) |
241 | : "=d" (ret), "+a" (__src), "+d" (__src_len) | 254 | : "=d" (ret), "+a" (__src), "+d" (__src_len) |
242 | : "d" (__func), "a" (__param), "0" (-1) : "cc", "memory"); | 255 | : "d" (__func), "a" (__param), "0" (-1) : "cc", "memory"); |
243 | if (ret < 0) | 256 | if (ret < 0) |
@@ -267,11 +280,11 @@ static inline int crypt_s390_klmd(long func, void *param, | |||
267 | int ret; | 280 | int ret; |
268 | 281 | ||
269 | asm volatile( | 282 | asm volatile( |
270 | "0: .insn rre,0xb93f0000,%1,%1 \n" /* KLMD opcode */ | 283 | "0: .insn rre,0xb93f0000,%1,%1\n" /* KLMD opcode */ |
271 | "1: brc 1,0b \n" /* handle partial completion */ | 284 | "1: brc 1,0b\n" /* handle partial completion */ |
272 | " la %0,0\n" | 285 | " la %0,0\n" |
273 | "2:\n" | 286 | "2:\n" |
274 | EX_TABLE(0b,2b) EX_TABLE(1b,2b) | 287 | EX_TABLE(0b, 2b) EX_TABLE(1b, 2b) |
275 | : "=d" (ret), "+a" (__src), "+d" (__src_len) | 288 | : "=d" (ret), "+a" (__src), "+d" (__src_len) |
276 | : "d" (__func), "a" (__param), "0" (-1) : "cc", "memory"); | 289 | : "d" (__func), "a" (__param), "0" (-1) : "cc", "memory"); |
277 | if (ret < 0) | 290 | if (ret < 0) |
@@ -302,11 +315,11 @@ static inline int crypt_s390_kmac(long func, void *param, | |||
302 | int ret; | 315 | int ret; |
303 | 316 | ||
304 | asm volatile( | 317 | asm volatile( |
305 | "0: .insn rre,0xb91e0000,%1,%1 \n" /* KLAC opcode */ | 318 | "0: .insn rre,0xb91e0000,%1,%1\n" /* KLAC opcode */ |
306 | "1: brc 1,0b \n" /* handle partial completion */ | 319 | "1: brc 1,0b\n" /* handle partial completion */ |
307 | " la %0,0\n" | 320 | " la %0,0\n" |
308 | "2:\n" | 321 | "2:\n" |
309 | EX_TABLE(0b,2b) EX_TABLE(1b,2b) | 322 | EX_TABLE(0b, 2b) EX_TABLE(1b, 2b) |
310 | : "=d" (ret), "+a" (__src), "+d" (__src_len) | 323 | : "=d" (ret), "+a" (__src), "+d" (__src_len) |
311 | : "d" (__func), "a" (__param), "0" (-1) : "cc", "memory"); | 324 | : "d" (__func), "a" (__param), "0" (-1) : "cc", "memory"); |
312 | if (ret < 0) | 325 | if (ret < 0) |
@@ -340,11 +353,11 @@ static inline int crypt_s390_kmctr(long func, void *param, u8 *dest, | |||
340 | int ret = -1; | 353 | int ret = -1; |
341 | 354 | ||
342 | asm volatile( | 355 | asm volatile( |
343 | "0: .insn rrf,0xb92d0000,%3,%1,%4,0 \n" /* KMCTR opcode */ | 356 | "0: .insn rrf,0xb92d0000,%3,%1,%4,0\n" /* KMCTR opcode */ |
344 | "1: brc 1,0b \n" /* handle partial completion */ | 357 | "1: brc 1,0b\n" /* handle partial completion */ |
345 | " la %0,0\n" | 358 | " la %0,0\n" |
346 | "2:\n" | 359 | "2:\n" |
347 | EX_TABLE(0b,2b) EX_TABLE(1b,2b) | 360 | EX_TABLE(0b, 2b) EX_TABLE(1b, 2b) |
348 | : "+d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest), | 361 | : "+d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest), |
349 | "+a" (__ctr) | 362 | "+a" (__ctr) |
350 | : "d" (__func), "a" (__param) : "cc", "memory"); | 363 | : "d" (__func), "a" (__param) : "cc", "memory"); |
@@ -354,6 +367,47 @@ static inline int crypt_s390_kmctr(long func, void *param, u8 *dest, | |||
354 | } | 367 | } |
355 | 368 | ||
356 | /** | 369 | /** |
370 | * crypt_s390_ppno: | ||
371 | * @func: the function code passed to PPNO; see crypt_s390_ppno_func | ||
372 | * @param: address of parameter block; see POP for details on each func | ||
373 | * @dest: address of destination memory area | ||
374 | * @dest_len: size of destination memory area in bytes | ||
375 | * @seed: address of seed data | ||
376 | * @seed_len: size of seed data in bytes | ||
377 | * | ||
378 | * Executes the PPNO (PERFORM PSEUDORANDOM NUMBER OPERATION) | ||
379 | * operation of the CPU. | ||
380 | * | ||
381 | * Returns -1 for failure, 0 for the query func, number of random | ||
382 | * bytes stored in dest buffer for generate function | ||
383 | */ | ||
384 | static inline int crypt_s390_ppno(long func, void *param, | ||
385 | u8 *dest, long dest_len, | ||
386 | const u8 *seed, long seed_len) | ||
387 | { | ||
388 | register long __func asm("0") = func & CRYPT_S390_FUNC_MASK; | ||
389 | register void *__param asm("1") = param; /* param block (240 bytes) */ | ||
390 | register u8 *__dest asm("2") = dest; /* buf for recv random bytes */ | ||
391 | register long __dest_len asm("3") = dest_len; /* requested random bytes */ | ||
392 | register const u8 *__seed asm("4") = seed; /* buf with seed data */ | ||
393 | register long __seed_len asm("5") = seed_len; /* bytes in seed buf */ | ||
394 | int ret = -1; | ||
395 | |||
396 | asm volatile ( | ||
397 | "0: .insn rre,0xb93c0000,%1,%5\n" /* PPNO opcode */ | ||
398 | "1: brc 1,0b\n" /* handle partial completion */ | ||
399 | " la %0,0\n" | ||
400 | "2:\n" | ||
401 | EX_TABLE(0b, 2b) EX_TABLE(1b, 2b) | ||
402 | : "+d" (ret), "+a"(__dest), "+d"(__dest_len) | ||
403 | : "d"(__func), "a"(__param), "a"(__seed), "d"(__seed_len) | ||
404 | : "cc", "memory"); | ||
405 | if (ret < 0) | ||
406 | return ret; | ||
407 | return (func & CRYPT_S390_FUNC_MASK) ? dest_len - __dest_len : 0; | ||
408 | } | ||
409 | |||
410 | /** | ||
357 | * crypt_s390_func_available: | 411 | * crypt_s390_func_available: |
358 | * @func: the function code of the specific function; 0 if op in general | 412 | * @func: the function code of the specific function; 0 if op in general |
359 | * | 413 | * |
@@ -373,6 +427,9 @@ static inline int crypt_s390_func_available(int func, | |||
373 | return 0; | 427 | return 0; |
374 | if (facility_mask & CRYPT_S390_MSA4 && !test_facility(77)) | 428 | if (facility_mask & CRYPT_S390_MSA4 && !test_facility(77)) |
375 | return 0; | 429 | return 0; |
430 | if (facility_mask & CRYPT_S390_MSA5 && !test_facility(57)) | ||
431 | return 0; | ||
432 | |||
376 | switch (func & CRYPT_S390_OP_MASK) { | 433 | switch (func & CRYPT_S390_OP_MASK) { |
377 | case CRYPT_S390_KM: | 434 | case CRYPT_S390_KM: |
378 | ret = crypt_s390_km(KM_QUERY, &status, NULL, NULL, 0); | 435 | ret = crypt_s390_km(KM_QUERY, &status, NULL, NULL, 0); |
@@ -390,8 +447,12 @@ static inline int crypt_s390_func_available(int func, | |||
390 | ret = crypt_s390_kmac(KMAC_QUERY, &status, NULL, 0); | 447 | ret = crypt_s390_kmac(KMAC_QUERY, &status, NULL, 0); |
391 | break; | 448 | break; |
392 | case CRYPT_S390_KMCTR: | 449 | case CRYPT_S390_KMCTR: |
393 | ret = crypt_s390_kmctr(KMCTR_QUERY, &status, NULL, NULL, 0, | 450 | ret = crypt_s390_kmctr(KMCTR_QUERY, &status, |
394 | NULL); | 451 | NULL, NULL, 0, NULL); |
452 | break; | ||
453 | case CRYPT_S390_PPNO: | ||
454 | ret = crypt_s390_ppno(PPNO_QUERY, &status, | ||
455 | NULL, 0, NULL, 0); | ||
395 | break; | 456 | break; |
396 | default: | 457 | default: |
397 | return 0; | 458 | return 0; |
@@ -419,15 +480,14 @@ static inline int crypt_s390_pcc(long func, void *param) | |||
419 | int ret = -1; | 480 | int ret = -1; |
420 | 481 | ||
421 | asm volatile( | 482 | asm volatile( |
422 | "0: .insn rre,0xb92c0000,0,0 \n" /* PCC opcode */ | 483 | "0: .insn rre,0xb92c0000,0,0\n" /* PCC opcode */ |
423 | "1: brc 1,0b \n" /* handle partial completion */ | 484 | "1: brc 1,0b\n" /* handle partial completion */ |
424 | " la %0,0\n" | 485 | " la %0,0\n" |
425 | "2:\n" | 486 | "2:\n" |
426 | EX_TABLE(0b,2b) EX_TABLE(1b,2b) | 487 | EX_TABLE(0b, 2b) EX_TABLE(1b, 2b) |
427 | : "+d" (ret) | 488 | : "+d" (ret) |
428 | : "d" (__func), "a" (__param) : "cc", "memory"); | 489 | : "d" (__func), "a" (__param) : "cc", "memory"); |
429 | return ret; | 490 | return ret; |
430 | } | 491 | } |
431 | 492 | ||
432 | |||
433 | #endif /* _CRYPTO_ARCH_S390_CRYPT_S390_H */ | 493 | #endif /* _CRYPTO_ARCH_S390_CRYPT_S390_H */ |
diff --git a/arch/s390/crypto/prng.c b/arch/s390/crypto/prng.c index 94a35a4c1b48..1f374b39a4ec 100644 --- a/arch/s390/crypto/prng.c +++ b/arch/s390/crypto/prng.c | |||
@@ -1,106 +1,529 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright IBM Corp. 2006, 2007 | 2 | * Copyright IBM Corp. 2006, 2015 |
3 | * Author(s): Jan Glauber <jan.glauber@de.ibm.com> | 3 | * Author(s): Jan Glauber <jan.glauber@de.ibm.com> |
4 | * Harald Freudenberger <freude@de.ibm.com> | ||
4 | * Driver for the s390 pseudo random number generator | 5 | * Driver for the s390 pseudo random number generator |
5 | */ | 6 | */ |
7 | |||
8 | #define KMSG_COMPONENT "prng" | ||
9 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
10 | |||
6 | #include <linux/fs.h> | 11 | #include <linux/fs.h> |
12 | #include <linux/fips.h> | ||
7 | #include <linux/init.h> | 13 | #include <linux/init.h> |
8 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
15 | #include <linux/device.h> | ||
9 | #include <linux/miscdevice.h> | 16 | #include <linux/miscdevice.h> |
10 | #include <linux/module.h> | 17 | #include <linux/module.h> |
11 | #include <linux/moduleparam.h> | 18 | #include <linux/moduleparam.h> |
19 | #include <linux/mutex.h> | ||
12 | #include <linux/random.h> | 20 | #include <linux/random.h> |
13 | #include <linux/slab.h> | 21 | #include <linux/slab.h> |
14 | #include <asm/debug.h> | 22 | #include <asm/debug.h> |
15 | #include <asm/uaccess.h> | 23 | #include <asm/uaccess.h> |
24 | #include <asm/timex.h> | ||
16 | 25 | ||
17 | #include "crypt_s390.h" | 26 | #include "crypt_s390.h" |
18 | 27 | ||
19 | MODULE_LICENSE("GPL"); | 28 | MODULE_LICENSE("GPL"); |
20 | MODULE_AUTHOR("Jan Glauber <jan.glauber@de.ibm.com>"); | 29 | MODULE_AUTHOR("IBM Corporation"); |
21 | MODULE_DESCRIPTION("s390 PRNG interface"); | 30 | MODULE_DESCRIPTION("s390 PRNG interface"); |
22 | 31 | ||
23 | static int prng_chunk_size = 256; | 32 | |
24 | module_param(prng_chunk_size, int, S_IRUSR | S_IRGRP | S_IROTH); | 33 | #define PRNG_MODE_AUTO 0 |
34 | #define PRNG_MODE_TDES 1 | ||
35 | #define PRNG_MODE_SHA512 2 | ||
36 | |||
37 | static unsigned int prng_mode = PRNG_MODE_AUTO; | ||
38 | module_param_named(mode, prng_mode, int, 0); | ||
39 | MODULE_PARM_DESC(prng_mode, "PRNG mode: 0 - auto, 1 - TDES, 2 - SHA512"); | ||
40 | |||
41 | |||
42 | #define PRNG_CHUNKSIZE_TDES_MIN 8 | ||
43 | #define PRNG_CHUNKSIZE_TDES_MAX (64*1024) | ||
44 | #define PRNG_CHUNKSIZE_SHA512_MIN 64 | ||
45 | #define PRNG_CHUNKSIZE_SHA512_MAX (64*1024) | ||
46 | |||
47 | static unsigned int prng_chunk_size = 256; | ||
48 | module_param_named(chunksize, prng_chunk_size, int, 0); | ||
25 | MODULE_PARM_DESC(prng_chunk_size, "PRNG read chunk size in bytes"); | 49 | MODULE_PARM_DESC(prng_chunk_size, "PRNG read chunk size in bytes"); |
26 | 50 | ||
27 | static int prng_entropy_limit = 4096; | 51 | |
28 | module_param(prng_entropy_limit, int, S_IRUSR | S_IRGRP | S_IROTH | S_IWUSR); | 52 | #define PRNG_RESEED_LIMIT_TDES 4096 |
29 | MODULE_PARM_DESC(prng_entropy_limit, | 53 | #define PRNG_RESEED_LIMIT_TDES_LOWER 4096 |
30 | "PRNG add entropy after that much bytes were produced"); | 54 | #define PRNG_RESEED_LIMIT_SHA512 100000 |
55 | #define PRNG_RESEED_LIMIT_SHA512_LOWER 10000 | ||
56 | |||
57 | static unsigned int prng_reseed_limit; | ||
58 | module_param_named(reseed_limit, prng_reseed_limit, int, 0); | ||
59 | MODULE_PARM_DESC(prng_reseed_limit, "PRNG reseed limit"); | ||
60 | |||
31 | 61 | ||
32 | /* | 62 | /* |
33 | * Any one who considers arithmetical methods of producing random digits is, | 63 | * Any one who considers arithmetical methods of producing random digits is, |
34 | * of course, in a state of sin. -- John von Neumann | 64 | * of course, in a state of sin. -- John von Neumann |
35 | */ | 65 | */ |
36 | 66 | ||
37 | struct s390_prng_data { | 67 | static int prng_errorflag; |
38 | unsigned long count; /* how many bytes were produced */ | 68 | |
39 | char *buf; | 69 | #define PRNG_GEN_ENTROPY_FAILED 1 |
70 | #define PRNG_SELFTEST_FAILED 2 | ||
71 | #define PRNG_INSTANTIATE_FAILED 3 | ||
72 | #define PRNG_SEED_FAILED 4 | ||
73 | #define PRNG_RESEED_FAILED 5 | ||
74 | #define PRNG_GEN_FAILED 6 | ||
75 | |||
76 | struct prng_ws_s { | ||
77 | u8 parm_block[32]; | ||
78 | u32 reseed_counter; | ||
79 | u64 byte_counter; | ||
40 | }; | 80 | }; |
41 | 81 | ||
42 | static struct s390_prng_data *p; | 82 | struct ppno_ws_s { |
83 | u32 res; | ||
84 | u32 reseed_counter; | ||
85 | u64 stream_bytes; | ||
86 | u8 V[112]; | ||
87 | u8 C[112]; | ||
88 | }; | ||
43 | 89 | ||
44 | /* copied from libica, use a non-zero initial parameter block */ | 90 | struct prng_data_s { |
45 | static unsigned char parm_block[32] = { | 91 | struct mutex mutex; |
46 | 0x0F,0x2B,0x8E,0x63,0x8C,0x8E,0xD2,0x52,0x64,0xB7,0xA0,0x7B,0x75,0x28,0xB8,0xF4, | 92 | union { |
47 | 0x75,0x5F,0xD2,0xA6,0x8D,0x97,0x11,0xFF,0x49,0xD8,0x23,0xF3,0x7E,0x21,0xEC,0xA0, | 93 | struct prng_ws_s prngws; |
94 | struct ppno_ws_s ppnows; | ||
95 | }; | ||
96 | u8 *buf; | ||
97 | u32 rest; | ||
98 | u8 *prev; | ||
48 | }; | 99 | }; |
49 | 100 | ||
50 | static int prng_open(struct inode *inode, struct file *file) | 101 | static struct prng_data_s *prng_data; |
102 | |||
103 | /* initial parameter block for tdes mode, copied from libica */ | ||
104 | static const u8 initial_parm_block[32] __initconst = { | ||
105 | 0x0F, 0x2B, 0x8E, 0x63, 0x8C, 0x8E, 0xD2, 0x52, | ||
106 | 0x64, 0xB7, 0xA0, 0x7B, 0x75, 0x28, 0xB8, 0xF4, | ||
107 | 0x75, 0x5F, 0xD2, 0xA6, 0x8D, 0x97, 0x11, 0xFF, | ||
108 | 0x49, 0xD8, 0x23, 0xF3, 0x7E, 0x21, 0xEC, 0xA0 }; | ||
109 | |||
110 | |||
111 | /*** helper functions ***/ | ||
112 | |||
113 | static int generate_entropy(u8 *ebuf, size_t nbytes) | ||
51 | { | 114 | { |
52 | return nonseekable_open(inode, file); | 115 | int n, ret = 0; |
116 | u8 *pg, *h, hash[32]; | ||
117 | |||
118 | pg = (u8 *) __get_free_page(GFP_KERNEL); | ||
119 | if (!pg) { | ||
120 | prng_errorflag = PRNG_GEN_ENTROPY_FAILED; | ||
121 | return -ENOMEM; | ||
122 | } | ||
123 | |||
124 | while (nbytes) { | ||
125 | /* fill page with urandom bytes */ | ||
126 | get_random_bytes(pg, PAGE_SIZE); | ||
127 | /* exor page with stckf values */ | ||
128 | for (n = 0; n < sizeof(PAGE_SIZE/sizeof(u64)); n++) { | ||
129 | u64 *p = ((u64 *)pg) + n; | ||
130 | *p ^= get_tod_clock_fast(); | ||
131 | } | ||
132 | n = (nbytes < sizeof(hash)) ? nbytes : sizeof(hash); | ||
133 | if (n < sizeof(hash)) | ||
134 | h = hash; | ||
135 | else | ||
136 | h = ebuf; | ||
137 | /* generate sha256 from this page */ | ||
138 | if (crypt_s390_kimd(KIMD_SHA_256, h, | ||
139 | pg, PAGE_SIZE) != PAGE_SIZE) { | ||
140 | prng_errorflag = PRNG_GEN_ENTROPY_FAILED; | ||
141 | ret = -EIO; | ||
142 | goto out; | ||
143 | } | ||
144 | if (n < sizeof(hash)) | ||
145 | memcpy(ebuf, hash, n); | ||
146 | ret += n; | ||
147 | ebuf += n; | ||
148 | nbytes -= n; | ||
149 | } | ||
150 | |||
151 | out: | ||
152 | free_page((unsigned long)pg); | ||
153 | return ret; | ||
53 | } | 154 | } |
54 | 155 | ||
55 | static void prng_add_entropy(void) | 156 | |
157 | /*** tdes functions ***/ | ||
158 | |||
159 | static void prng_tdes_add_entropy(void) | ||
56 | { | 160 | { |
57 | __u64 entropy[4]; | 161 | __u64 entropy[4]; |
58 | unsigned int i; | 162 | unsigned int i; |
59 | int ret; | 163 | int ret; |
60 | 164 | ||
61 | for (i = 0; i < 16; i++) { | 165 | for (i = 0; i < 16; i++) { |
62 | ret = crypt_s390_kmc(KMC_PRNG, parm_block, (char *)entropy, | 166 | ret = crypt_s390_kmc(KMC_PRNG, prng_data->prngws.parm_block, |
63 | (char *)entropy, sizeof(entropy)); | 167 | (char *)entropy, (char *)entropy, |
168 | sizeof(entropy)); | ||
64 | BUG_ON(ret < 0 || ret != sizeof(entropy)); | 169 | BUG_ON(ret < 0 || ret != sizeof(entropy)); |
65 | memcpy(parm_block, entropy, sizeof(entropy)); | 170 | memcpy(prng_data->prngws.parm_block, entropy, sizeof(entropy)); |
66 | } | 171 | } |
67 | } | 172 | } |
68 | 173 | ||
69 | static void prng_seed(int nbytes) | 174 | |
175 | static void prng_tdes_seed(int nbytes) | ||
70 | { | 176 | { |
71 | char buf[16]; | 177 | char buf[16]; |
72 | int i = 0; | 178 | int i = 0; |
73 | 179 | ||
74 | BUG_ON(nbytes > 16); | 180 | BUG_ON(nbytes > sizeof(buf)); |
181 | |||
75 | get_random_bytes(buf, nbytes); | 182 | get_random_bytes(buf, nbytes); |
76 | 183 | ||
77 | /* Add the entropy */ | 184 | /* Add the entropy */ |
78 | while (nbytes >= 8) { | 185 | while (nbytes >= 8) { |
79 | *((__u64 *)parm_block) ^= *((__u64 *)(buf+i)); | 186 | *((__u64 *)prng_data->prngws.parm_block) ^= *((__u64 *)(buf+i)); |
80 | prng_add_entropy(); | 187 | prng_tdes_add_entropy(); |
81 | i += 8; | 188 | i += 8; |
82 | nbytes -= 8; | 189 | nbytes -= 8; |
83 | } | 190 | } |
84 | prng_add_entropy(); | 191 | prng_tdes_add_entropy(); |
192 | prng_data->prngws.reseed_counter = 0; | ||
193 | } | ||
194 | |||
195 | |||
196 | static int __init prng_tdes_instantiate(void) | ||
197 | { | ||
198 | int datalen; | ||
199 | |||
200 | pr_debug("prng runs in TDES mode with " | ||
201 | "chunksize=%d and reseed_limit=%u\n", | ||
202 | prng_chunk_size, prng_reseed_limit); | ||
203 | |||
204 | /* memory allocation, prng_data struct init, mutex init */ | ||
205 | datalen = sizeof(struct prng_data_s) + prng_chunk_size; | ||
206 | prng_data = kzalloc(datalen, GFP_KERNEL); | ||
207 | if (!prng_data) { | ||
208 | prng_errorflag = PRNG_INSTANTIATE_FAILED; | ||
209 | return -ENOMEM; | ||
210 | } | ||
211 | mutex_init(&prng_data->mutex); | ||
212 | prng_data->buf = ((u8 *)prng_data) + sizeof(struct prng_data_s); | ||
213 | memcpy(prng_data->prngws.parm_block, initial_parm_block, 32); | ||
214 | |||
215 | /* initialize the PRNG, add 128 bits of entropy */ | ||
216 | prng_tdes_seed(16); | ||
217 | |||
218 | return 0; | ||
85 | } | 219 | } |
86 | 220 | ||
87 | static ssize_t prng_read(struct file *file, char __user *ubuf, size_t nbytes, | 221 | |
88 | loff_t *ppos) | 222 | static void prng_tdes_deinstantiate(void) |
223 | { | ||
224 | pr_debug("The prng module stopped " | ||
225 | "after running in triple DES mode\n"); | ||
226 | kzfree(prng_data); | ||
227 | } | ||
228 | |||
229 | |||
230 | /*** sha512 functions ***/ | ||
231 | |||
232 | static int __init prng_sha512_selftest(void) | ||
89 | { | 233 | { |
90 | int chunk, n; | 234 | /* NIST DRBG testvector for Hash Drbg, Sha-512, Count #0 */ |
235 | static const u8 seed[] __initconst = { | ||
236 | 0x6b, 0x50, 0xa7, 0xd8, 0xf8, 0xa5, 0x5d, 0x7a, | ||
237 | 0x3d, 0xf8, 0xbb, 0x40, 0xbc, 0xc3, 0xb7, 0x22, | ||
238 | 0xd8, 0x70, 0x8d, 0xe6, 0x7f, 0xda, 0x01, 0x0b, | ||
239 | 0x03, 0xc4, 0xc8, 0x4d, 0x72, 0x09, 0x6f, 0x8c, | ||
240 | 0x3e, 0xc6, 0x49, 0xcc, 0x62, 0x56, 0xd9, 0xfa, | ||
241 | 0x31, 0xdb, 0x7a, 0x29, 0x04, 0xaa, 0xf0, 0x25 }; | ||
242 | static const u8 V0[] __initconst = { | ||
243 | 0x00, 0xad, 0xe3, 0x6f, 0x9a, 0x01, 0xc7, 0x76, | ||
244 | 0x61, 0x34, 0x35, 0xf5, 0x4e, 0x24, 0x74, 0x22, | ||
245 | 0x21, 0x9a, 0x29, 0x89, 0xc7, 0x93, 0x2e, 0x60, | ||
246 | 0x1e, 0xe8, 0x14, 0x24, 0x8d, 0xd5, 0x03, 0xf1, | ||
247 | 0x65, 0x5d, 0x08, 0x22, 0x72, 0xd5, 0xad, 0x95, | ||
248 | 0xe1, 0x23, 0x1e, 0x8a, 0xa7, 0x13, 0xd9, 0x2b, | ||
249 | 0x5e, 0xbc, 0xbb, 0x80, 0xab, 0x8d, 0xe5, 0x79, | ||
250 | 0xab, 0x5b, 0x47, 0x4e, 0xdd, 0xee, 0x6b, 0x03, | ||
251 | 0x8f, 0x0f, 0x5c, 0x5e, 0xa9, 0x1a, 0x83, 0xdd, | ||
252 | 0xd3, 0x88, 0xb2, 0x75, 0x4b, 0xce, 0x83, 0x36, | ||
253 | 0x57, 0x4b, 0xf1, 0x5c, 0xca, 0x7e, 0x09, 0xc0, | ||
254 | 0xd3, 0x89, 0xc6, 0xe0, 0xda, 0xc4, 0x81, 0x7e, | ||
255 | 0x5b, 0xf9, 0xe1, 0x01, 0xc1, 0x92, 0x05, 0xea, | ||
256 | 0xf5, 0x2f, 0xc6, 0xc6, 0xc7, 0x8f, 0xbc, 0xf4 }; | ||
257 | static const u8 C0[] __initconst = { | ||
258 | 0x00, 0xf4, 0xa3, 0xe5, 0xa0, 0x72, 0x63, 0x95, | ||
259 | 0xc6, 0x4f, 0x48, 0xd0, 0x8b, 0x5b, 0x5f, 0x8e, | ||
260 | 0x6b, 0x96, 0x1f, 0x16, 0xed, 0xbc, 0x66, 0x94, | ||
261 | 0x45, 0x31, 0xd7, 0x47, 0x73, 0x22, 0xa5, 0x86, | ||
262 | 0xce, 0xc0, 0x4c, 0xac, 0x63, 0xb8, 0x39, 0x50, | ||
263 | 0xbf, 0xe6, 0x59, 0x6c, 0x38, 0x58, 0x99, 0x1f, | ||
264 | 0x27, 0xa7, 0x9d, 0x71, 0x2a, 0xb3, 0x7b, 0xf9, | ||
265 | 0xfb, 0x17, 0x86, 0xaa, 0x99, 0x81, 0xaa, 0x43, | ||
266 | 0xe4, 0x37, 0xd3, 0x1e, 0x6e, 0xe5, 0xe6, 0xee, | ||
267 | 0xc2, 0xed, 0x95, 0x4f, 0x53, 0x0e, 0x46, 0x8a, | ||
268 | 0xcc, 0x45, 0xa5, 0xdb, 0x69, 0x0d, 0x81, 0xc9, | ||
269 | 0x32, 0x92, 0xbc, 0x8f, 0x33, 0xe6, 0xf6, 0x09, | ||
270 | 0x7c, 0x8e, 0x05, 0x19, 0x0d, 0xf1, 0xb6, 0xcc, | ||
271 | 0xf3, 0x02, 0x21, 0x90, 0x25, 0xec, 0xed, 0x0e }; | ||
272 | static const u8 random[] __initconst = { | ||
273 | 0x95, 0xb7, 0xf1, 0x7e, 0x98, 0x02, 0xd3, 0x57, | ||
274 | 0x73, 0x92, 0xc6, 0xa9, 0xc0, 0x80, 0x83, 0xb6, | ||
275 | 0x7d, 0xd1, 0x29, 0x22, 0x65, 0xb5, 0xf4, 0x2d, | ||
276 | 0x23, 0x7f, 0x1c, 0x55, 0xbb, 0x9b, 0x10, 0xbf, | ||
277 | 0xcf, 0xd8, 0x2c, 0x77, 0xa3, 0x78, 0xb8, 0x26, | ||
278 | 0x6a, 0x00, 0x99, 0x14, 0x3b, 0x3c, 0x2d, 0x64, | ||
279 | 0x61, 0x1e, 0xee, 0xb6, 0x9a, 0xcd, 0xc0, 0x55, | ||
280 | 0x95, 0x7c, 0x13, 0x9e, 0x8b, 0x19, 0x0c, 0x7a, | ||
281 | 0x06, 0x95, 0x5f, 0x2c, 0x79, 0x7c, 0x27, 0x78, | ||
282 | 0xde, 0x94, 0x03, 0x96, 0xa5, 0x01, 0xf4, 0x0e, | ||
283 | 0x91, 0x39, 0x6a, 0xcf, 0x8d, 0x7e, 0x45, 0xeb, | ||
284 | 0xdb, 0xb5, 0x3b, 0xbf, 0x8c, 0x97, 0x52, 0x30, | ||
285 | 0xd2, 0xf0, 0xff, 0x91, 0x06, 0xc7, 0x61, 0x19, | ||
286 | 0xae, 0x49, 0x8e, 0x7f, 0xbc, 0x03, 0xd9, 0x0f, | ||
287 | 0x8e, 0x4c, 0x51, 0x62, 0x7a, 0xed, 0x5c, 0x8d, | ||
288 | 0x42, 0x63, 0xd5, 0xd2, 0xb9, 0x78, 0x87, 0x3a, | ||
289 | 0x0d, 0xe5, 0x96, 0xee, 0x6d, 0xc7, 0xf7, 0xc2, | ||
290 | 0x9e, 0x37, 0xee, 0xe8, 0xb3, 0x4c, 0x90, 0xdd, | ||
291 | 0x1c, 0xf6, 0xa9, 0xdd, 0xb2, 0x2b, 0x4c, 0xbd, | ||
292 | 0x08, 0x6b, 0x14, 0xb3, 0x5d, 0xe9, 0x3d, 0xa2, | ||
293 | 0xd5, 0xcb, 0x18, 0x06, 0x69, 0x8c, 0xbd, 0x7b, | ||
294 | 0xbb, 0x67, 0xbf, 0xe3, 0xd3, 0x1f, 0xd2, 0xd1, | ||
295 | 0xdb, 0xd2, 0xa1, 0xe0, 0x58, 0xa3, 0xeb, 0x99, | ||
296 | 0xd7, 0xe5, 0x1f, 0x1a, 0x93, 0x8e, 0xed, 0x5e, | ||
297 | 0x1c, 0x1d, 0xe2, 0x3a, 0x6b, 0x43, 0x45, 0xd3, | ||
298 | 0x19, 0x14, 0x09, 0xf9, 0x2f, 0x39, 0xb3, 0x67, | ||
299 | 0x0d, 0x8d, 0xbf, 0xb6, 0x35, 0xd8, 0xe6, 0xa3, | ||
300 | 0x69, 0x32, 0xd8, 0x10, 0x33, 0xd1, 0x44, 0x8d, | ||
301 | 0x63, 0xb4, 0x03, 0xdd, 0xf8, 0x8e, 0x12, 0x1b, | ||
302 | 0x6e, 0x81, 0x9a, 0xc3, 0x81, 0x22, 0x6c, 0x13, | ||
303 | 0x21, 0xe4, 0xb0, 0x86, 0x44, 0xf6, 0x72, 0x7c, | ||
304 | 0x36, 0x8c, 0x5a, 0x9f, 0x7a, 0x4b, 0x3e, 0xe2 }; | ||
305 | |||
91 | int ret = 0; | 306 | int ret = 0; |
92 | int tmp; | 307 | u8 buf[sizeof(random)]; |
308 | struct ppno_ws_s ws; | ||
309 | |||
310 | memset(&ws, 0, sizeof(ws)); | ||
311 | |||
312 | /* initial seed */ | ||
313 | ret = crypt_s390_ppno(PPNO_SHA512_DRNG_SEED, | ||
314 | &ws, NULL, 0, | ||
315 | seed, sizeof(seed)); | ||
316 | if (ret < 0) { | ||
317 | pr_err("The prng self test seed operation for the " | ||
318 | "SHA-512 mode failed with rc=%d\n", ret); | ||
319 | prng_errorflag = PRNG_SELFTEST_FAILED; | ||
320 | return -EIO; | ||
321 | } | ||
322 | |||
323 | /* check working states V and C */ | ||
324 | if (memcmp(ws.V, V0, sizeof(V0)) != 0 | ||
325 | || memcmp(ws.C, C0, sizeof(C0)) != 0) { | ||
326 | pr_err("The prng self test state test " | ||
327 | "for the SHA-512 mode failed\n"); | ||
328 | prng_errorflag = PRNG_SELFTEST_FAILED; | ||
329 | return -EIO; | ||
330 | } | ||
331 | |||
332 | /* generate random bytes */ | ||
333 | ret = crypt_s390_ppno(PPNO_SHA512_DRNG_GEN, | ||
334 | &ws, buf, sizeof(buf), | ||
335 | NULL, 0); | ||
336 | if (ret < 0) { | ||
337 | pr_err("The prng self test generate operation for " | ||
338 | "the SHA-512 mode failed with rc=%d\n", ret); | ||
339 | prng_errorflag = PRNG_SELFTEST_FAILED; | ||
340 | return -EIO; | ||
341 | } | ||
342 | ret = crypt_s390_ppno(PPNO_SHA512_DRNG_GEN, | ||
343 | &ws, buf, sizeof(buf), | ||
344 | NULL, 0); | ||
345 | if (ret < 0) { | ||
346 | pr_err("The prng self test generate operation for " | ||
347 | "the SHA-512 mode failed with rc=%d\n", ret); | ||
348 | prng_errorflag = PRNG_SELFTEST_FAILED; | ||
349 | return -EIO; | ||
350 | } | ||
351 | |||
352 | /* check against expected data */ | ||
353 | if (memcmp(buf, random, sizeof(random)) != 0) { | ||
354 | pr_err("The prng self test data test " | ||
355 | "for the SHA-512 mode failed\n"); | ||
356 | prng_errorflag = PRNG_SELFTEST_FAILED; | ||
357 | return -EIO; | ||
358 | } | ||
359 | |||
360 | return 0; | ||
361 | } | ||
362 | |||
363 | |||
364 | static int __init prng_sha512_instantiate(void) | ||
365 | { | ||
366 | int ret, datalen; | ||
367 | u8 seed[64]; | ||
368 | |||
369 | pr_debug("prng runs in SHA-512 mode " | ||
370 | "with chunksize=%d and reseed_limit=%u\n", | ||
371 | prng_chunk_size, prng_reseed_limit); | ||
372 | |||
373 | /* memory allocation, prng_data struct init, mutex init */ | ||
374 | datalen = sizeof(struct prng_data_s) + prng_chunk_size; | ||
375 | if (fips_enabled) | ||
376 | datalen += prng_chunk_size; | ||
377 | prng_data = kzalloc(datalen, GFP_KERNEL); | ||
378 | if (!prng_data) { | ||
379 | prng_errorflag = PRNG_INSTANTIATE_FAILED; | ||
380 | return -ENOMEM; | ||
381 | } | ||
382 | mutex_init(&prng_data->mutex); | ||
383 | prng_data->buf = ((u8 *)prng_data) + sizeof(struct prng_data_s); | ||
384 | |||
385 | /* selftest */ | ||
386 | ret = prng_sha512_selftest(); | ||
387 | if (ret) | ||
388 | goto outfree; | ||
389 | |||
390 | /* generate initial seed bytestring, first 48 bytes of entropy */ | ||
391 | ret = generate_entropy(seed, 48); | ||
392 | if (ret != 48) | ||
393 | goto outfree; | ||
394 | /* followed by 16 bytes of unique nonce */ | ||
395 | get_tod_clock_ext(seed + 48); | ||
396 | |||
397 | /* initial seed of the ppno drng */ | ||
398 | ret = crypt_s390_ppno(PPNO_SHA512_DRNG_SEED, | ||
399 | &prng_data->ppnows, NULL, 0, | ||
400 | seed, sizeof(seed)); | ||
401 | if (ret < 0) { | ||
402 | prng_errorflag = PRNG_SEED_FAILED; | ||
403 | ret = -EIO; | ||
404 | goto outfree; | ||
405 | } | ||
406 | |||
407 | /* if fips mode is enabled, generate a first block of random | ||
408 | bytes for the FIPS 140-2 Conditional Self Test */ | ||
409 | if (fips_enabled) { | ||
410 | prng_data->prev = prng_data->buf + prng_chunk_size; | ||
411 | ret = crypt_s390_ppno(PPNO_SHA512_DRNG_GEN, | ||
412 | &prng_data->ppnows, | ||
413 | prng_data->prev, | ||
414 | prng_chunk_size, | ||
415 | NULL, 0); | ||
416 | if (ret < 0 || ret != prng_chunk_size) { | ||
417 | prng_errorflag = PRNG_GEN_FAILED; | ||
418 | ret = -EIO; | ||
419 | goto outfree; | ||
420 | } | ||
421 | } | ||
422 | |||
423 | return 0; | ||
424 | |||
425 | outfree: | ||
426 | kfree(prng_data); | ||
427 | return ret; | ||
428 | } | ||
429 | |||
430 | |||
431 | static void prng_sha512_deinstantiate(void) | ||
432 | { | ||
433 | pr_debug("The prng module stopped after running in SHA-512 mode\n"); | ||
434 | kzfree(prng_data); | ||
435 | } | ||
436 | |||
437 | |||
438 | static int prng_sha512_reseed(void) | ||
439 | { | ||
440 | int ret; | ||
441 | u8 seed[32]; | ||
442 | |||
443 | /* generate 32 bytes of fresh entropy */ | ||
444 | ret = generate_entropy(seed, sizeof(seed)); | ||
445 | if (ret != sizeof(seed)) | ||
446 | return ret; | ||
447 | |||
448 | /* do a reseed of the ppno drng with this bytestring */ | ||
449 | ret = crypt_s390_ppno(PPNO_SHA512_DRNG_SEED, | ||
450 | &prng_data->ppnows, NULL, 0, | ||
451 | seed, sizeof(seed)); | ||
452 | if (ret) { | ||
453 | prng_errorflag = PRNG_RESEED_FAILED; | ||
454 | return -EIO; | ||
455 | } | ||
456 | |||
457 | return 0; | ||
458 | } | ||
459 | |||
460 | |||
461 | static int prng_sha512_generate(u8 *buf, size_t nbytes) | ||
462 | { | ||
463 | int ret; | ||
464 | |||
465 | /* reseed needed ? */ | ||
466 | if (prng_data->ppnows.reseed_counter > prng_reseed_limit) { | ||
467 | ret = prng_sha512_reseed(); | ||
468 | if (ret) | ||
469 | return ret; | ||
470 | } | ||
471 | |||
472 | /* PPNO generate */ | ||
473 | ret = crypt_s390_ppno(PPNO_SHA512_DRNG_GEN, | ||
474 | &prng_data->ppnows, buf, nbytes, | ||
475 | NULL, 0); | ||
476 | if (ret < 0 || ret != nbytes) { | ||
477 | prng_errorflag = PRNG_GEN_FAILED; | ||
478 | return -EIO; | ||
479 | } | ||
480 | |||
481 | /* FIPS 140-2 Conditional Self Test */ | ||
482 | if (fips_enabled) { | ||
483 | if (!memcmp(prng_data->prev, buf, nbytes)) { | ||
484 | prng_errorflag = PRNG_GEN_FAILED; | ||
485 | return -EILSEQ; | ||
486 | } | ||
487 | memcpy(prng_data->prev, buf, nbytes); | ||
488 | } | ||
489 | |||
490 | return ret; | ||
491 | } | ||
492 | |||
493 | |||
494 | /*** file io functions ***/ | ||
495 | |||
496 | static int prng_open(struct inode *inode, struct file *file) | ||
497 | { | ||
498 | return nonseekable_open(inode, file); | ||
499 | } | ||
500 | |||
501 | |||
502 | static ssize_t prng_tdes_read(struct file *file, char __user *ubuf, | ||
503 | size_t nbytes, loff_t *ppos) | ||
504 | { | ||
505 | int chunk, n, tmp, ret = 0; | ||
506 | |||
507 | /* lock prng_data struct */ | ||
508 | if (mutex_lock_interruptible(&prng_data->mutex)) | ||
509 | return -ERESTARTSYS; | ||
93 | 510 | ||
94 | /* nbytes can be arbitrary length, we split it into chunks */ | ||
95 | while (nbytes) { | 511 | while (nbytes) { |
96 | /* same as in extract_entropy_user in random.c */ | ||
97 | if (need_resched()) { | 512 | if (need_resched()) { |
98 | if (signal_pending(current)) { | 513 | if (signal_pending(current)) { |
99 | if (ret == 0) | 514 | if (ret == 0) |
100 | ret = -ERESTARTSYS; | 515 | ret = -ERESTARTSYS; |
101 | break; | 516 | break; |
102 | } | 517 | } |
518 | /* give mutex free before calling schedule() */ | ||
519 | mutex_unlock(&prng_data->mutex); | ||
103 | schedule(); | 520 | schedule(); |
521 | /* occopy mutex again */ | ||
522 | if (mutex_lock_interruptible(&prng_data->mutex)) { | ||
523 | if (ret == 0) | ||
524 | ret = -ERESTARTSYS; | ||
525 | return ret; | ||
526 | } | ||
104 | } | 527 | } |
105 | 528 | ||
106 | /* | 529 | /* |
@@ -112,12 +535,11 @@ static ssize_t prng_read(struct file *file, char __user *ubuf, size_t nbytes, | |||
112 | /* PRNG only likes multiples of 8 bytes */ | 535 | /* PRNG only likes multiples of 8 bytes */ |
113 | n = (chunk + 7) & -8; | 536 | n = (chunk + 7) & -8; |
114 | 537 | ||
115 | if (p->count > prng_entropy_limit) | 538 | if (prng_data->prngws.reseed_counter > prng_reseed_limit) |
116 | prng_seed(8); | 539 | prng_tdes_seed(8); |
117 | 540 | ||
118 | /* if the CPU supports PRNG stckf is present too */ | 541 | /* if the CPU supports PRNG stckf is present too */ |
119 | asm volatile(".insn s,0xb27c0000,%0" | 542 | *((unsigned long long *)prng_data->buf) = get_tod_clock_fast(); |
120 | : "=m" (*((unsigned long long *)p->buf)) : : "cc"); | ||
121 | 543 | ||
122 | /* | 544 | /* |
123 | * Beside the STCKF the input for the TDES-EDE is the output | 545 | * Beside the STCKF the input for the TDES-EDE is the output |
@@ -132,35 +554,259 @@ static ssize_t prng_read(struct file *file, char __user *ubuf, size_t nbytes, | |||
132 | * Note: you can still get strict X9.17 conformity by setting | 554 | * Note: you can still get strict X9.17 conformity by setting |
133 | * prng_chunk_size to 8 bytes. | 555 | * prng_chunk_size to 8 bytes. |
134 | */ | 556 | */ |
135 | tmp = crypt_s390_kmc(KMC_PRNG, parm_block, p->buf, p->buf, n); | 557 | tmp = crypt_s390_kmc(KMC_PRNG, prng_data->prngws.parm_block, |
136 | BUG_ON((tmp < 0) || (tmp != n)); | 558 | prng_data->buf, prng_data->buf, n); |
559 | if (tmp < 0 || tmp != n) { | ||
560 | ret = -EIO; | ||
561 | break; | ||
562 | } | ||
137 | 563 | ||
138 | p->count += n; | 564 | prng_data->prngws.byte_counter += n; |
565 | prng_data->prngws.reseed_counter += n; | ||
139 | 566 | ||
140 | if (copy_to_user(ubuf, p->buf, chunk)) | 567 | if (copy_to_user(ubuf, prng_data->buf, chunk)) |
141 | return -EFAULT; | 568 | return -EFAULT; |
142 | 569 | ||
143 | nbytes -= chunk; | 570 | nbytes -= chunk; |
144 | ret += chunk; | 571 | ret += chunk; |
145 | ubuf += chunk; | 572 | ubuf += chunk; |
146 | } | 573 | } |
574 | |||
575 | /* unlock prng_data struct */ | ||
576 | mutex_unlock(&prng_data->mutex); | ||
577 | |||
147 | return ret; | 578 | return ret; |
148 | } | 579 | } |
149 | 580 | ||
150 | static const struct file_operations prng_fops = { | 581 | |
582 | static ssize_t prng_sha512_read(struct file *file, char __user *ubuf, | ||
583 | size_t nbytes, loff_t *ppos) | ||
584 | { | ||
585 | int n, ret = 0; | ||
586 | u8 *p; | ||
587 | |||
588 | /* if errorflag is set do nothing and return 'broken pipe' */ | ||
589 | if (prng_errorflag) | ||
590 | return -EPIPE; | ||
591 | |||
592 | /* lock prng_data struct */ | ||
593 | if (mutex_lock_interruptible(&prng_data->mutex)) | ||
594 | return -ERESTARTSYS; | ||
595 | |||
596 | while (nbytes) { | ||
597 | if (need_resched()) { | ||
598 | if (signal_pending(current)) { | ||
599 | if (ret == 0) | ||
600 | ret = -ERESTARTSYS; | ||
601 | break; | ||
602 | } | ||
603 | /* give mutex free before calling schedule() */ | ||
604 | mutex_unlock(&prng_data->mutex); | ||
605 | schedule(); | ||
606 | /* occopy mutex again */ | ||
607 | if (mutex_lock_interruptible(&prng_data->mutex)) { | ||
608 | if (ret == 0) | ||
609 | ret = -ERESTARTSYS; | ||
610 | return ret; | ||
611 | } | ||
612 | } | ||
613 | if (prng_data->rest) { | ||
614 | /* push left over random bytes from the previous read */ | ||
615 | p = prng_data->buf + prng_chunk_size - prng_data->rest; | ||
616 | n = (nbytes < prng_data->rest) ? | ||
617 | nbytes : prng_data->rest; | ||
618 | prng_data->rest -= n; | ||
619 | } else { | ||
620 | /* generate one chunk of random bytes into read buf */ | ||
621 | p = prng_data->buf; | ||
622 | n = prng_sha512_generate(p, prng_chunk_size); | ||
623 | if (n < 0) { | ||
624 | ret = n; | ||
625 | break; | ||
626 | } | ||
627 | if (nbytes < prng_chunk_size) { | ||
628 | n = nbytes; | ||
629 | prng_data->rest = prng_chunk_size - n; | ||
630 | } else { | ||
631 | n = prng_chunk_size; | ||
632 | prng_data->rest = 0; | ||
633 | } | ||
634 | } | ||
635 | if (copy_to_user(ubuf, p, n)) { | ||
636 | ret = -EFAULT; | ||
637 | break; | ||
638 | } | ||
639 | ubuf += n; | ||
640 | nbytes -= n; | ||
641 | ret += n; | ||
642 | } | ||
643 | |||
644 | /* unlock prng_data struct */ | ||
645 | mutex_unlock(&prng_data->mutex); | ||
646 | |||
647 | return ret; | ||
648 | } | ||
649 | |||
650 | |||
651 | /*** sysfs stuff ***/ | ||
652 | |||
653 | static const struct file_operations prng_sha512_fops = { | ||
654 | .owner = THIS_MODULE, | ||
655 | .open = &prng_open, | ||
656 | .release = NULL, | ||
657 | .read = &prng_sha512_read, | ||
658 | .llseek = noop_llseek, | ||
659 | }; | ||
660 | static const struct file_operations prng_tdes_fops = { | ||
151 | .owner = THIS_MODULE, | 661 | .owner = THIS_MODULE, |
152 | .open = &prng_open, | 662 | .open = &prng_open, |
153 | .release = NULL, | 663 | .release = NULL, |
154 | .read = &prng_read, | 664 | .read = &prng_tdes_read, |
155 | .llseek = noop_llseek, | 665 | .llseek = noop_llseek, |
156 | }; | 666 | }; |
157 | 667 | ||
158 | static struct miscdevice prng_dev = { | 668 | static struct miscdevice prng_sha512_dev = { |
669 | .name = "prandom", | ||
670 | .minor = MISC_DYNAMIC_MINOR, | ||
671 | .fops = &prng_sha512_fops, | ||
672 | }; | ||
673 | static struct miscdevice prng_tdes_dev = { | ||
159 | .name = "prandom", | 674 | .name = "prandom", |
160 | .minor = MISC_DYNAMIC_MINOR, | 675 | .minor = MISC_DYNAMIC_MINOR, |
161 | .fops = &prng_fops, | 676 | .fops = &prng_tdes_fops, |
162 | }; | 677 | }; |
163 | 678 | ||
679 | |||
680 | /* chunksize attribute (ro) */ | ||
681 | static ssize_t prng_chunksize_show(struct device *dev, | ||
682 | struct device_attribute *attr, | ||
683 | char *buf) | ||
684 | { | ||
685 | return snprintf(buf, PAGE_SIZE, "%u\n", prng_chunk_size); | ||
686 | } | ||
687 | static DEVICE_ATTR(chunksize, 0444, prng_chunksize_show, NULL); | ||
688 | |||
689 | /* counter attribute (ro) */ | ||
690 | static ssize_t prng_counter_show(struct device *dev, | ||
691 | struct device_attribute *attr, | ||
692 | char *buf) | ||
693 | { | ||
694 | u64 counter; | ||
695 | |||
696 | if (mutex_lock_interruptible(&prng_data->mutex)) | ||
697 | return -ERESTARTSYS; | ||
698 | if (prng_mode == PRNG_MODE_SHA512) | ||
699 | counter = prng_data->ppnows.stream_bytes; | ||
700 | else | ||
701 | counter = prng_data->prngws.byte_counter; | ||
702 | mutex_unlock(&prng_data->mutex); | ||
703 | |||
704 | return snprintf(buf, PAGE_SIZE, "%llu\n", counter); | ||
705 | } | ||
706 | static DEVICE_ATTR(byte_counter, 0444, prng_counter_show, NULL); | ||
707 | |||
708 | /* errorflag attribute (ro) */ | ||
709 | static ssize_t prng_errorflag_show(struct device *dev, | ||
710 | struct device_attribute *attr, | ||
711 | char *buf) | ||
712 | { | ||
713 | return snprintf(buf, PAGE_SIZE, "%d\n", prng_errorflag); | ||
714 | } | ||
715 | static DEVICE_ATTR(errorflag, 0444, prng_errorflag_show, NULL); | ||
716 | |||
717 | /* mode attribute (ro) */ | ||
718 | static ssize_t prng_mode_show(struct device *dev, | ||
719 | struct device_attribute *attr, | ||
720 | char *buf) | ||
721 | { | ||
722 | if (prng_mode == PRNG_MODE_TDES) | ||
723 | return snprintf(buf, PAGE_SIZE, "TDES\n"); | ||
724 | else | ||
725 | return snprintf(buf, PAGE_SIZE, "SHA512\n"); | ||
726 | } | ||
727 | static DEVICE_ATTR(mode, 0444, prng_mode_show, NULL); | ||
728 | |||
729 | /* reseed attribute (w) */ | ||
730 | static ssize_t prng_reseed_store(struct device *dev, | ||
731 | struct device_attribute *attr, | ||
732 | const char *buf, size_t count) | ||
733 | { | ||
734 | if (mutex_lock_interruptible(&prng_data->mutex)) | ||
735 | return -ERESTARTSYS; | ||
736 | prng_sha512_reseed(); | ||
737 | mutex_unlock(&prng_data->mutex); | ||
738 | |||
739 | return count; | ||
740 | } | ||
741 | static DEVICE_ATTR(reseed, 0200, NULL, prng_reseed_store); | ||
742 | |||
743 | /* reseed limit attribute (rw) */ | ||
744 | static ssize_t prng_reseed_limit_show(struct device *dev, | ||
745 | struct device_attribute *attr, | ||
746 | char *buf) | ||
747 | { | ||
748 | return snprintf(buf, PAGE_SIZE, "%u\n", prng_reseed_limit); | ||
749 | } | ||
750 | static ssize_t prng_reseed_limit_store(struct device *dev, | ||
751 | struct device_attribute *attr, | ||
752 | const char *buf, size_t count) | ||
753 | { | ||
754 | unsigned limit; | ||
755 | |||
756 | if (sscanf(buf, "%u\n", &limit) != 1) | ||
757 | return -EINVAL; | ||
758 | |||
759 | if (prng_mode == PRNG_MODE_SHA512) { | ||
760 | if (limit < PRNG_RESEED_LIMIT_SHA512_LOWER) | ||
761 | return -EINVAL; | ||
762 | } else { | ||
763 | if (limit < PRNG_RESEED_LIMIT_TDES_LOWER) | ||
764 | return -EINVAL; | ||
765 | } | ||
766 | |||
767 | prng_reseed_limit = limit; | ||
768 | |||
769 | return count; | ||
770 | } | ||
771 | static DEVICE_ATTR(reseed_limit, 0644, | ||
772 | prng_reseed_limit_show, prng_reseed_limit_store); | ||
773 | |||
774 | /* strength attribute (ro) */ | ||
775 | static ssize_t prng_strength_show(struct device *dev, | ||
776 | struct device_attribute *attr, | ||
777 | char *buf) | ||
778 | { | ||
779 | return snprintf(buf, PAGE_SIZE, "256\n"); | ||
780 | } | ||
781 | static DEVICE_ATTR(strength, 0444, prng_strength_show, NULL); | ||
782 | |||
783 | static struct attribute *prng_sha512_dev_attrs[] = { | ||
784 | &dev_attr_errorflag.attr, | ||
785 | &dev_attr_chunksize.attr, | ||
786 | &dev_attr_byte_counter.attr, | ||
787 | &dev_attr_mode.attr, | ||
788 | &dev_attr_reseed.attr, | ||
789 | &dev_attr_reseed_limit.attr, | ||
790 | &dev_attr_strength.attr, | ||
791 | NULL | ||
792 | }; | ||
793 | static struct attribute *prng_tdes_dev_attrs[] = { | ||
794 | &dev_attr_chunksize.attr, | ||
795 | &dev_attr_byte_counter.attr, | ||
796 | &dev_attr_mode.attr, | ||
797 | NULL | ||
798 | }; | ||
799 | |||
800 | static struct attribute_group prng_sha512_dev_attr_group = { | ||
801 | .attrs = prng_sha512_dev_attrs | ||
802 | }; | ||
803 | static struct attribute_group prng_tdes_dev_attr_group = { | ||
804 | .attrs = prng_tdes_dev_attrs | ||
805 | }; | ||
806 | |||
807 | |||
808 | /*** module init and exit ***/ | ||
809 | |||
164 | static int __init prng_init(void) | 810 | static int __init prng_init(void) |
165 | { | 811 | { |
166 | int ret; | 812 | int ret; |
@@ -169,43 +815,105 @@ static int __init prng_init(void) | |||
169 | if (!crypt_s390_func_available(KMC_PRNG, CRYPT_S390_MSA)) | 815 | if (!crypt_s390_func_available(KMC_PRNG, CRYPT_S390_MSA)) |
170 | return -EOPNOTSUPP; | 816 | return -EOPNOTSUPP; |
171 | 817 | ||
172 | if (prng_chunk_size < 8) | 818 | /* choose prng mode */ |
173 | return -EINVAL; | 819 | if (prng_mode != PRNG_MODE_TDES) { |
820 | /* check for MSA5 support for PPNO operations */ | ||
821 | if (!crypt_s390_func_available(PPNO_SHA512_DRNG_GEN, | ||
822 | CRYPT_S390_MSA5)) { | ||
823 | if (prng_mode == PRNG_MODE_SHA512) { | ||
824 | pr_err("The prng module cannot " | ||
825 | "start in SHA-512 mode\n"); | ||
826 | return -EOPNOTSUPP; | ||
827 | } | ||
828 | prng_mode = PRNG_MODE_TDES; | ||
829 | } else | ||
830 | prng_mode = PRNG_MODE_SHA512; | ||
831 | } | ||
174 | 832 | ||
175 | p = kmalloc(sizeof(struct s390_prng_data), GFP_KERNEL); | 833 | if (prng_mode == PRNG_MODE_SHA512) { |
176 | if (!p) | ||
177 | return -ENOMEM; | ||
178 | p->count = 0; | ||
179 | 834 | ||
180 | p->buf = kmalloc(prng_chunk_size, GFP_KERNEL); | 835 | /* SHA512 mode */ |
181 | if (!p->buf) { | ||
182 | ret = -ENOMEM; | ||
183 | goto out_free; | ||
184 | } | ||
185 | 836 | ||
186 | /* initialize the PRNG, add 128 bits of entropy */ | 837 | if (prng_chunk_size < PRNG_CHUNKSIZE_SHA512_MIN |
187 | prng_seed(16); | 838 | || prng_chunk_size > PRNG_CHUNKSIZE_SHA512_MAX) |
839 | return -EINVAL; | ||
840 | prng_chunk_size = (prng_chunk_size + 0x3f) & ~0x3f; | ||
188 | 841 | ||
189 | ret = misc_register(&prng_dev); | 842 | if (prng_reseed_limit == 0) |
190 | if (ret) | 843 | prng_reseed_limit = PRNG_RESEED_LIMIT_SHA512; |
191 | goto out_buf; | 844 | else if (prng_reseed_limit < PRNG_RESEED_LIMIT_SHA512_LOWER) |
192 | return 0; | 845 | return -EINVAL; |
846 | |||
847 | ret = prng_sha512_instantiate(); | ||
848 | if (ret) | ||
849 | goto out; | ||
850 | |||
851 | ret = misc_register(&prng_sha512_dev); | ||
852 | if (ret) { | ||
853 | prng_sha512_deinstantiate(); | ||
854 | goto out; | ||
855 | } | ||
856 | ret = sysfs_create_group(&prng_sha512_dev.this_device->kobj, | ||
857 | &prng_sha512_dev_attr_group); | ||
858 | if (ret) { | ||
859 | misc_deregister(&prng_sha512_dev); | ||
860 | prng_sha512_deinstantiate(); | ||
861 | goto out; | ||
862 | } | ||
193 | 863 | ||
194 | out_buf: | 864 | } else { |
195 | kfree(p->buf); | 865 | |
196 | out_free: | 866 | /* TDES mode */ |
197 | kfree(p); | 867 | |
868 | if (prng_chunk_size < PRNG_CHUNKSIZE_TDES_MIN | ||
869 | || prng_chunk_size > PRNG_CHUNKSIZE_TDES_MAX) | ||
870 | return -EINVAL; | ||
871 | prng_chunk_size = (prng_chunk_size + 0x07) & ~0x07; | ||
872 | |||
873 | if (prng_reseed_limit == 0) | ||
874 | prng_reseed_limit = PRNG_RESEED_LIMIT_TDES; | ||
875 | else if (prng_reseed_limit < PRNG_RESEED_LIMIT_TDES_LOWER) | ||
876 | return -EINVAL; | ||
877 | |||
878 | ret = prng_tdes_instantiate(); | ||
879 | if (ret) | ||
880 | goto out; | ||
881 | |||
882 | ret = misc_register(&prng_tdes_dev); | ||
883 | if (ret) { | ||
884 | prng_tdes_deinstantiate(); | ||
885 | goto out; | ||
886 | } | ||
887 | ret = sysfs_create_group(&prng_tdes_dev.this_device->kobj, | ||
888 | &prng_tdes_dev_attr_group); | ||
889 | if (ret) { | ||
890 | misc_deregister(&prng_tdes_dev); | ||
891 | prng_tdes_deinstantiate(); | ||
892 | goto out; | ||
893 | } | ||
894 | |||
895 | } | ||
896 | |||
897 | out: | ||
198 | return ret; | 898 | return ret; |
199 | } | 899 | } |
200 | 900 | ||
901 | |||
201 | static void __exit prng_exit(void) | 902 | static void __exit prng_exit(void) |
202 | { | 903 | { |
203 | /* wipe me */ | 904 | if (prng_mode == PRNG_MODE_SHA512) { |
204 | kzfree(p->buf); | 905 | sysfs_remove_group(&prng_sha512_dev.this_device->kobj, |
205 | kfree(p); | 906 | &prng_sha512_dev_attr_group); |
206 | 907 | misc_deregister(&prng_sha512_dev); | |
207 | misc_deregister(&prng_dev); | 908 | prng_sha512_deinstantiate(); |
909 | } else { | ||
910 | sysfs_remove_group(&prng_tdes_dev.this_device->kobj, | ||
911 | &prng_tdes_dev_attr_group); | ||
912 | misc_deregister(&prng_tdes_dev); | ||
913 | prng_tdes_deinstantiate(); | ||
914 | } | ||
208 | } | 915 | } |
209 | 916 | ||
917 | |||
210 | module_init(prng_init); | 918 | module_init(prng_init); |
211 | module_exit(prng_exit); | 919 | module_exit(prng_exit); |
diff --git a/arch/s390/include/asm/kexec.h b/arch/s390/include/asm/kexec.h index 694bcd6bd927..2f924bc30e35 100644 --- a/arch/s390/include/asm/kexec.h +++ b/arch/s390/include/asm/kexec.h | |||
@@ -26,6 +26,9 @@ | |||
26 | /* Not more than 2GB */ | 26 | /* Not more than 2GB */ |
27 | #define KEXEC_CONTROL_MEMORY_LIMIT (1UL<<31) | 27 | #define KEXEC_CONTROL_MEMORY_LIMIT (1UL<<31) |
28 | 28 | ||
29 | /* Allocate control page with GFP_DMA */ | ||
30 | #define KEXEC_CONTROL_MEMORY_GFP GFP_DMA | ||
31 | |||
29 | /* Maximum address we can use for the crash control pages */ | 32 | /* Maximum address we can use for the crash control pages */ |
30 | #define KEXEC_CRASH_CONTROL_MEMORY_LIMIT (-1UL) | 33 | #define KEXEC_CRASH_CONTROL_MEMORY_LIMIT (-1UL) |
31 | 34 | ||
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h index a5e656260a70..d29ad9545b41 100644 --- a/arch/s390/include/asm/mmu.h +++ b/arch/s390/include/asm/mmu.h | |||
@@ -14,7 +14,9 @@ typedef struct { | |||
14 | unsigned long asce_bits; | 14 | unsigned long asce_bits; |
15 | unsigned long asce_limit; | 15 | unsigned long asce_limit; |
16 | unsigned long vdso_base; | 16 | unsigned long vdso_base; |
17 | /* The mmu context has extended page tables. */ | 17 | /* The mmu context allocates 4K page tables. */ |
18 | unsigned int alloc_pgste:1; | ||
19 | /* The mmu context uses extended page tables. */ | ||
18 | unsigned int has_pgste:1; | 20 | unsigned int has_pgste:1; |
19 | /* The mmu context uses storage keys. */ | 21 | /* The mmu context uses storage keys. */ |
20 | unsigned int use_skey:1; | 22 | unsigned int use_skey:1; |
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index d25d9ff10ba8..fb1b93ea3e3f 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h | |||
@@ -20,8 +20,11 @@ static inline int init_new_context(struct task_struct *tsk, | |||
20 | mm->context.flush_mm = 0; | 20 | mm->context.flush_mm = 0; |
21 | mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS; | 21 | mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS; |
22 | mm->context.asce_bits |= _ASCE_TYPE_REGION3; | 22 | mm->context.asce_bits |= _ASCE_TYPE_REGION3; |
23 | #ifdef CONFIG_PGSTE | ||
24 | mm->context.alloc_pgste = page_table_allocate_pgste; | ||
23 | mm->context.has_pgste = 0; | 25 | mm->context.has_pgste = 0; |
24 | mm->context.use_skey = 0; | 26 | mm->context.use_skey = 0; |
27 | #endif | ||
25 | mm->context.asce_limit = STACK_TOP_MAX; | 28 | mm->context.asce_limit = STACK_TOP_MAX; |
26 | crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm)); | 29 | crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm)); |
27 | return 0; | 30 | return 0; |
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h index 51e7fb634ebc..7b7858f158b4 100644 --- a/arch/s390/include/asm/pgalloc.h +++ b/arch/s390/include/asm/pgalloc.h | |||
@@ -21,6 +21,7 @@ void crst_table_free(struct mm_struct *, unsigned long *); | |||
21 | unsigned long *page_table_alloc(struct mm_struct *); | 21 | unsigned long *page_table_alloc(struct mm_struct *); |
22 | void page_table_free(struct mm_struct *, unsigned long *); | 22 | void page_table_free(struct mm_struct *, unsigned long *); |
23 | void page_table_free_rcu(struct mmu_gather *, unsigned long *, unsigned long); | 23 | void page_table_free_rcu(struct mmu_gather *, unsigned long *, unsigned long); |
24 | extern int page_table_allocate_pgste; | ||
24 | 25 | ||
25 | int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, | 26 | int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, |
26 | unsigned long key, bool nq); | 27 | unsigned long key, bool nq); |
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 989cfae9e202..fc642399b489 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h | |||
@@ -12,12 +12,9 @@ | |||
12 | #define _ASM_S390_PGTABLE_H | 12 | #define _ASM_S390_PGTABLE_H |
13 | 13 | ||
14 | /* | 14 | /* |
15 | * The Linux memory management assumes a three-level page table setup. For | 15 | * The Linux memory management assumes a three-level page table setup. |
16 | * s390 31 bit we "fold" the mid level into the top-level page table, so | 16 | * For s390 64 bit we use up to four of the five levels the hardware |
17 | * that we physically have the same two-level page table as the s390 mmu | 17 | * provides (region first tables are not used). |
18 | * expects in 31 bit mode. For s390 64 bit we use three of the five levels | ||
19 | * the hardware provides (region first and region second tables are not | ||
20 | * used). | ||
21 | * | 18 | * |
22 | * The "pgd_xxx()" functions are trivial for a folded two-level | 19 | * The "pgd_xxx()" functions are trivial for a folded two-level |
23 | * setup: the pgd is never bad, and a pmd always exists (as it's folded | 20 | * setup: the pgd is never bad, and a pmd always exists (as it's folded |
@@ -101,8 +98,8 @@ extern unsigned long zero_page_mask; | |||
101 | 98 | ||
102 | #ifndef __ASSEMBLY__ | 99 | #ifndef __ASSEMBLY__ |
103 | /* | 100 | /* |
104 | * The vmalloc and module area will always be on the topmost area of the kernel | 101 | * The vmalloc and module area will always be on the topmost area of the |
105 | * mapping. We reserve 96MB (31bit) / 128GB (64bit) for vmalloc and modules. | 102 | * kernel mapping. We reserve 128GB (64bit) for vmalloc and modules. |
106 | * On 64 bit kernels we have a 2GB area at the top of the vmalloc area where | 103 | * On 64 bit kernels we have a 2GB area at the top of the vmalloc area where |
107 | * modules will reside. That makes sure that inter module branches always | 104 | * modules will reside. That makes sure that inter module branches always |
108 | * happen without trampolines and in addition the placement within a 2GB frame | 105 | * happen without trampolines and in addition the placement within a 2GB frame |
@@ -131,38 +128,6 @@ static inline int is_module_addr(void *addr) | |||
131 | } | 128 | } |
132 | 129 | ||
133 | /* | 130 | /* |
134 | * A 31 bit pagetable entry of S390 has following format: | ||
135 | * | PFRA | | OS | | ||
136 | * 0 0IP0 | ||
137 | * 00000000001111111111222222222233 | ||
138 | * 01234567890123456789012345678901 | ||
139 | * | ||
140 | * I Page-Invalid Bit: Page is not available for address-translation | ||
141 | * P Page-Protection Bit: Store access not possible for page | ||
142 | * | ||
143 | * A 31 bit segmenttable entry of S390 has following format: | ||
144 | * | P-table origin | |PTL | ||
145 | * 0 IC | ||
146 | * 00000000001111111111222222222233 | ||
147 | * 01234567890123456789012345678901 | ||
148 | * | ||
149 | * I Segment-Invalid Bit: Segment is not available for address-translation | ||
150 | * C Common-Segment Bit: Segment is not private (PoP 3-30) | ||
151 | * PTL Page-Table-Length: Page-table length (PTL+1*16 entries -> up to 256) | ||
152 | * | ||
153 | * The 31 bit segmenttable origin of S390 has following format: | ||
154 | * | ||
155 | * |S-table origin | | STL | | ||
156 | * X **GPS | ||
157 | * 00000000001111111111222222222233 | ||
158 | * 01234567890123456789012345678901 | ||
159 | * | ||
160 | * X Space-Switch event: | ||
161 | * G Segment-Invalid Bit: * | ||
162 | * P Private-Space Bit: Segment is not private (PoP 3-30) | ||
163 | * S Storage-Alteration: | ||
164 | * STL Segment-Table-Length: Segment-table length (STL+1*16 entries -> up to 2048) | ||
165 | * | ||
166 | * A 64 bit pagetable entry of S390 has following format: | 131 | * A 64 bit pagetable entry of S390 has following format: |
167 | * | PFRA |0IPC| OS | | 132 | * | PFRA |0IPC| OS | |
168 | * 0000000000111111111122222222223333333333444444444455555555556666 | 133 | * 0000000000111111111122222222223333333333444444444455555555556666 |
@@ -220,7 +185,6 @@ static inline int is_module_addr(void *addr) | |||
220 | 185 | ||
221 | /* Software bits in the page table entry */ | 186 | /* Software bits in the page table entry */ |
222 | #define _PAGE_PRESENT 0x001 /* SW pte present bit */ | 187 | #define _PAGE_PRESENT 0x001 /* SW pte present bit */ |
223 | #define _PAGE_TYPE 0x002 /* SW pte type bit */ | ||
224 | #define _PAGE_YOUNG 0x004 /* SW pte young bit */ | 188 | #define _PAGE_YOUNG 0x004 /* SW pte young bit */ |
225 | #define _PAGE_DIRTY 0x008 /* SW pte dirty bit */ | 189 | #define _PAGE_DIRTY 0x008 /* SW pte dirty bit */ |
226 | #define _PAGE_READ 0x010 /* SW pte read bit */ | 190 | #define _PAGE_READ 0x010 /* SW pte read bit */ |
@@ -240,31 +204,34 @@ static inline int is_module_addr(void *addr) | |||
240 | * table lock held. | 204 | * table lock held. |
241 | * | 205 | * |
242 | * The following table gives the different possible bit combinations for | 206 | * The following table gives the different possible bit combinations for |
243 | * the pte hardware and software bits in the last 12 bits of a pte: | 207 | * the pte hardware and software bits in the last 12 bits of a pte |
208 | * (. unassigned bit, x don't care, t swap type): | ||
244 | * | 209 | * |
245 | * 842100000000 | 210 | * 842100000000 |
246 | * 000084210000 | 211 | * 000084210000 |
247 | * 000000008421 | 212 | * 000000008421 |
248 | * .IR...wrdytp | 213 | * .IR.uswrdy.p |
249 | * empty .10...000000 | 214 | * empty .10.00000000 |
250 | * swap .10...xxxx10 | 215 | * swap .11..ttttt.0 |
251 | * file .11...xxxxx0 | 216 | * prot-none, clean, old .11.xx0000.1 |
252 | * prot-none, clean, old .11...000001 | 217 | * prot-none, clean, young .11.xx0001.1 |
253 | * prot-none, clean, young .11...000101 | 218 | * prot-none, dirty, old .10.xx0010.1 |
254 | * prot-none, dirty, old .10...001001 | 219 | * prot-none, dirty, young .10.xx0011.1 |
255 | * prot-none, dirty, young .10...001101 | 220 | * read-only, clean, old .11.xx0100.1 |
256 | * read-only, clean, old .11...010001 | 221 | * read-only, clean, young .01.xx0101.1 |
257 | * read-only, clean, young .01...010101 | 222 | * read-only, dirty, old .11.xx0110.1 |
258 | * read-only, dirty, old .11...011001 | 223 | * read-only, dirty, young .01.xx0111.1 |
259 | * read-only, dirty, young .01...011101 | 224 | * read-write, clean, old .11.xx1100.1 |
260 | * read-write, clean, old .11...110001 | 225 | * read-write, clean, young .01.xx1101.1 |
261 | * read-write, clean, young .01...110101 | 226 | * read-write, dirty, old .10.xx1110.1 |
262 | * read-write, dirty, old .10...111001 | 227 | * read-write, dirty, young .00.xx1111.1 |
263 | * read-write, dirty, young .00...111101 | 228 | * HW-bits: R read-only, I invalid |
229 | * SW-bits: p present, y young, d dirty, r read, w write, s special, | ||
230 | * u unused, l large | ||
264 | * | 231 | * |
265 | * pte_present is true for the bit pattern .xx...xxxxx1, (pte & 0x001) == 0x001 | 232 | * pte_none is true for the bit pattern .10.00000000, pte == 0x400 |
266 | * pte_none is true for the bit pattern .10...xxxx00, (pte & 0x603) == 0x400 | 233 | * pte_swap is true for the bit pattern .11..ooooo.0, (pte & 0x201) == 0x200 |
267 | * pte_swap is true for the bit pattern .10...xxxx10, (pte & 0x603) == 0x402 | 234 | * pte_present is true for the bit pattern .xx.xxxxxx.1, (pte & 0x001) == 0x001 |
268 | */ | 235 | */ |
269 | 236 | ||
270 | /* Bits in the segment/region table address-space-control-element */ | 237 | /* Bits in the segment/region table address-space-control-element */ |
@@ -335,6 +302,8 @@ static inline int is_module_addr(void *addr) | |||
335 | * read-write, dirty, young 11..0...0...11 | 302 | * read-write, dirty, young 11..0...0...11 |
336 | * The segment table origin is used to distinguish empty (origin==0) from | 303 | * The segment table origin is used to distinguish empty (origin==0) from |
337 | * read-write, old segment table entries (origin!=0) | 304 | * read-write, old segment table entries (origin!=0) |
305 | * HW-bits: R read-only, I invalid | ||
306 | * SW-bits: y young, d dirty, r read, w write | ||
338 | */ | 307 | */ |
339 | 308 | ||
340 | #define _SEGMENT_ENTRY_SPLIT_BIT 11 /* THP splitting bit number */ | 309 | #define _SEGMENT_ENTRY_SPLIT_BIT 11 /* THP splitting bit number */ |
@@ -423,6 +392,15 @@ static inline int mm_has_pgste(struct mm_struct *mm) | |||
423 | return 0; | 392 | return 0; |
424 | } | 393 | } |
425 | 394 | ||
395 | static inline int mm_alloc_pgste(struct mm_struct *mm) | ||
396 | { | ||
397 | #ifdef CONFIG_PGSTE | ||
398 | if (unlikely(mm->context.alloc_pgste)) | ||
399 | return 1; | ||
400 | #endif | ||
401 | return 0; | ||
402 | } | ||
403 | |||
426 | /* | 404 | /* |
427 | * In the case that a guest uses storage keys | 405 | * In the case that a guest uses storage keys |
428 | * faults should no longer be backed by zero pages | 406 | * faults should no longer be backed by zero pages |
@@ -582,10 +560,9 @@ static inline int pte_none(pte_t pte) | |||
582 | 560 | ||
583 | static inline int pte_swap(pte_t pte) | 561 | static inline int pte_swap(pte_t pte) |
584 | { | 562 | { |
585 | /* Bit pattern: (pte & 0x603) == 0x402 */ | 563 | /* Bit pattern: (pte & 0x201) == 0x200 */ |
586 | return (pte_val(pte) & (_PAGE_INVALID | _PAGE_PROTECT | | 564 | return (pte_val(pte) & (_PAGE_PROTECT | _PAGE_PRESENT)) |
587 | _PAGE_TYPE | _PAGE_PRESENT)) | 565 | == _PAGE_PROTECT; |
588 | == (_PAGE_INVALID | _PAGE_TYPE); | ||
589 | } | 566 | } |
590 | 567 | ||
591 | static inline int pte_special(pte_t pte) | 568 | static inline int pte_special(pte_t pte) |
@@ -1586,51 +1563,51 @@ static inline int has_transparent_hugepage(void) | |||
1586 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | 1563 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
1587 | 1564 | ||
1588 | /* | 1565 | /* |
1589 | * 31 bit swap entry format: | ||
1590 | * A page-table entry has some bits we have to treat in a special way. | ||
1591 | * Bits 0, 20 and bit 23 have to be zero, otherwise an specification | ||
1592 | * exception will occur instead of a page translation exception. The | ||
1593 | * specifiation exception has the bad habit not to store necessary | ||
1594 | * information in the lowcore. | ||
1595 | * Bits 21, 22, 30 and 31 are used to indicate the page type. | ||
1596 | * A swap pte is indicated by bit pattern (pte & 0x603) == 0x402 | ||
1597 | * This leaves the bits 1-19 and bits 24-29 to store type and offset. | ||
1598 | * We use the 5 bits from 25-29 for the type and the 20 bits from 1-19 | ||
1599 | * plus 24 for the offset. | ||
1600 | * 0| offset |0110|o|type |00| | ||
1601 | * 0 0000000001111111111 2222 2 22222 33 | ||
1602 | * 0 1234567890123456789 0123 4 56789 01 | ||
1603 | * | ||
1604 | * 64 bit swap entry format: | 1566 | * 64 bit swap entry format: |
1605 | * A page-table entry has some bits we have to treat in a special way. | 1567 | * A page-table entry has some bits we have to treat in a special way. |
1606 | * Bits 52 and bit 55 have to be zero, otherwise an specification | 1568 | * Bits 52 and bit 55 have to be zero, otherwise an specification |
1607 | * exception will occur instead of a page translation exception. The | 1569 | * exception will occur instead of a page translation exception. The |
1608 | * specifiation exception has the bad habit not to store necessary | 1570 | * specifiation exception has the bad habit not to store necessary |
1609 | * information in the lowcore. | 1571 | * information in the lowcore. |
1610 | * Bits 53, 54, 62 and 63 are used to indicate the page type. | 1572 | * Bits 54 and 63 are used to indicate the page type. |
1611 | * A swap pte is indicated by bit pattern (pte & 0x603) == 0x402 | 1573 | * A swap pte is indicated by bit pattern (pte & 0x201) == 0x200 |
1612 | * This leaves the bits 0-51 and bits 56-61 to store type and offset. | 1574 | * This leaves the bits 0-51 and bits 56-62 to store type and offset. |
1613 | * We use the 5 bits from 57-61 for the type and the 53 bits from 0-51 | 1575 | * We use the 5 bits from 57-61 for the type and the 52 bits from 0-51 |
1614 | * plus 56 for the offset. | 1576 | * for the offset. |
1615 | * | offset |0110|o|type |00| | 1577 | * | offset |01100|type |00| |
1616 | * 0000000000111111111122222222223333333333444444444455 5555 5 55566 66 | 1578 | * |0000000000111111111122222222223333333333444444444455|55555|55566|66| |
1617 | * 0123456789012345678901234567890123456789012345678901 2345 6 78901 23 | 1579 | * |0123456789012345678901234567890123456789012345678901|23456|78901|23| |
1618 | */ | 1580 | */ |
1619 | 1581 | ||
1620 | #define __SWP_OFFSET_MASK (~0UL >> 11) | 1582 | #define __SWP_OFFSET_MASK ((1UL << 52) - 1) |
1583 | #define __SWP_OFFSET_SHIFT 12 | ||
1584 | #define __SWP_TYPE_MASK ((1UL << 5) - 1) | ||
1585 | #define __SWP_TYPE_SHIFT 2 | ||
1621 | 1586 | ||
1622 | static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) | 1587 | static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) |
1623 | { | 1588 | { |
1624 | pte_t pte; | 1589 | pte_t pte; |
1625 | offset &= __SWP_OFFSET_MASK; | 1590 | |
1626 | pte_val(pte) = _PAGE_INVALID | _PAGE_TYPE | ((type & 0x1f) << 2) | | 1591 | pte_val(pte) = _PAGE_INVALID | _PAGE_PROTECT; |
1627 | ((offset & 1UL) << 7) | ((offset & ~1UL) << 11); | 1592 | pte_val(pte) |= (offset & __SWP_OFFSET_MASK) << __SWP_OFFSET_SHIFT; |
1593 | pte_val(pte) |= (type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT; | ||
1628 | return pte; | 1594 | return pte; |
1629 | } | 1595 | } |
1630 | 1596 | ||
1631 | #define __swp_type(entry) (((entry).val >> 2) & 0x1f) | 1597 | static inline unsigned long __swp_type(swp_entry_t entry) |
1632 | #define __swp_offset(entry) (((entry).val >> 11) | (((entry).val >> 7) & 1)) | 1598 | { |
1633 | #define __swp_entry(type,offset) ((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) }) | 1599 | return (entry.val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK; |
1600 | } | ||
1601 | |||
1602 | static inline unsigned long __swp_offset(swp_entry_t entry) | ||
1603 | { | ||
1604 | return (entry.val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK; | ||
1605 | } | ||
1606 | |||
1607 | static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset) | ||
1608 | { | ||
1609 | return (swp_entry_t) { pte_val(mk_swap_pte(type, offset)) }; | ||
1610 | } | ||
1634 | 1611 | ||
1635 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) | 1612 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) |
1636 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) | 1613 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) |
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c index 210ffede0153..e617e74b7be2 100644 --- a/arch/s390/mm/hugetlbpage.c +++ b/arch/s390/mm/hugetlbpage.c | |||
@@ -14,20 +14,23 @@ static inline pmd_t __pte_to_pmd(pte_t pte) | |||
14 | 14 | ||
15 | /* | 15 | /* |
16 | * Convert encoding pte bits pmd bits | 16 | * Convert encoding pte bits pmd bits |
17 | * .IR...wrdytp dy..R...I...wr | 17 | * lIR.uswrdy.p dy..R...I...wr |
18 | * empty .10...000000 -> 00..0...1...00 | 18 | * empty 010.000000.0 -> 00..0...1...00 |
19 | * prot-none, clean, old .11...000001 -> 00..1...1...00 | 19 | * prot-none, clean, old 111.000000.1 -> 00..1...1...00 |
20 | * prot-none, clean, young .11...000101 -> 01..1...1...00 | 20 | * prot-none, clean, young 111.000001.1 -> 01..1...1...00 |
21 | * prot-none, dirty, old .10...001001 -> 10..1...1...00 | 21 | * prot-none, dirty, old 111.000010.1 -> 10..1...1...00 |
22 | * prot-none, dirty, young .10...001101 -> 11..1...1...00 | 22 | * prot-none, dirty, young 111.000011.1 -> 11..1...1...00 |
23 | * read-only, clean, old .11...010001 -> 00..1...1...01 | 23 | * read-only, clean, old 111.000100.1 -> 00..1...1...01 |
24 | * read-only, clean, young .01...010101 -> 01..1...0...01 | 24 | * read-only, clean, young 101.000101.1 -> 01..1...0...01 |
25 | * read-only, dirty, old .11...011001 -> 10..1...1...01 | 25 | * read-only, dirty, old 111.000110.1 -> 10..1...1...01 |
26 | * read-only, dirty, young .01...011101 -> 11..1...0...01 | 26 | * read-only, dirty, young 101.000111.1 -> 11..1...0...01 |
27 | * read-write, clean, old .11...110001 -> 00..0...1...11 | 27 | * read-write, clean, old 111.001100.1 -> 00..1...1...11 |
28 | * read-write, clean, young .01...110101 -> 01..0...0...11 | 28 | * read-write, clean, young 101.001101.1 -> 01..1...0...11 |
29 | * read-write, dirty, old .10...111001 -> 10..0...1...11 | 29 | * read-write, dirty, old 110.001110.1 -> 10..0...1...11 |
30 | * read-write, dirty, young .00...111101 -> 11..0...0...11 | 30 | * read-write, dirty, young 100.001111.1 -> 11..0...0...11 |
31 | * HW-bits: R read-only, I invalid | ||
32 | * SW-bits: p present, y young, d dirty, r read, w write, s special, | ||
33 | * u unused, l large | ||
31 | */ | 34 | */ |
32 | if (pte_present(pte)) { | 35 | if (pte_present(pte)) { |
33 | pmd_val(pmd) = pte_val(pte) & PAGE_MASK; | 36 | pmd_val(pmd) = pte_val(pte) & PAGE_MASK; |
@@ -48,20 +51,23 @@ static inline pte_t __pmd_to_pte(pmd_t pmd) | |||
48 | 51 | ||
49 | /* | 52 | /* |
50 | * Convert encoding pmd bits pte bits | 53 | * Convert encoding pmd bits pte bits |
51 | * dy..R...I...wr .IR...wrdytp | 54 | * dy..R...I...wr lIR.uswrdy.p |
52 | * empty 00..0...1...00 -> .10...001100 | 55 | * empty 00..0...1...00 -> 010.000000.0 |
53 | * prot-none, clean, old 00..0...1...00 -> .10...000001 | 56 | * prot-none, clean, old 00..1...1...00 -> 111.000000.1 |
54 | * prot-none, clean, young 01..0...1...00 -> .10...000101 | 57 | * prot-none, clean, young 01..1...1...00 -> 111.000001.1 |
55 | * prot-none, dirty, old 10..0...1...00 -> .10...001001 | 58 | * prot-none, dirty, old 10..1...1...00 -> 111.000010.1 |
56 | * prot-none, dirty, young 11..0...1...00 -> .10...001101 | 59 | * prot-none, dirty, young 11..1...1...00 -> 111.000011.1 |
57 | * read-only, clean, old 00..1...1...01 -> .11...010001 | 60 | * read-only, clean, old 00..1...1...01 -> 111.000100.1 |
58 | * read-only, clean, young 01..1...1...01 -> .11...010101 | 61 | * read-only, clean, young 01..1...0...01 -> 101.000101.1 |
59 | * read-only, dirty, old 10..1...1...01 -> .11...011001 | 62 | * read-only, dirty, old 10..1...1...01 -> 111.000110.1 |
60 | * read-only, dirty, young 11..1...1...01 -> .11...011101 | 63 | * read-only, dirty, young 11..1...0...01 -> 101.000111.1 |
61 | * read-write, clean, old 00..0...1...11 -> .10...110001 | 64 | * read-write, clean, old 00..1...1...11 -> 111.001100.1 |
62 | * read-write, clean, young 01..0...1...11 -> .10...110101 | 65 | * read-write, clean, young 01..1...0...11 -> 101.001101.1 |
63 | * read-write, dirty, old 10..0...1...11 -> .10...111001 | 66 | * read-write, dirty, old 10..0...1...11 -> 110.001110.1 |
64 | * read-write, dirty, young 11..0...1...11 -> .10...111101 | 67 | * read-write, dirty, young 11..0...0...11 -> 100.001111.1 |
68 | * HW-bits: R read-only, I invalid | ||
69 | * SW-bits: p present, y young, d dirty, r read, w write, s special, | ||
70 | * u unused, l large | ||
65 | */ | 71 | */ |
66 | if (pmd_present(pmd)) { | 72 | if (pmd_present(pmd)) { |
67 | pte_val(pte) = pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN_LARGE; | 73 | pte_val(pte) = pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN_LARGE; |
@@ -70,8 +76,8 @@ static inline pte_t __pmd_to_pte(pmd_t pmd) | |||
70 | pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) << 4; | 76 | pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) << 4; |
71 | pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_INVALID) << 5; | 77 | pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_INVALID) << 5; |
72 | pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT); | 78 | pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT); |
73 | pmd_val(pmd) |= (pte_val(pte) & _PAGE_DIRTY) << 10; | 79 | pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) >> 10; |
74 | pmd_val(pmd) |= (pte_val(pte) & _PAGE_YOUNG) << 10; | 80 | pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) >> 10; |
75 | } else | 81 | } else |
76 | pte_val(pte) = _PAGE_INVALID; | 82 | pte_val(pte) = _PAGE_INVALID; |
77 | return pte; | 83 | return pte; |
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 33f589459113..b33f66110ca9 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/rcupdate.h> | 18 | #include <linux/rcupdate.h> |
19 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
20 | #include <linux/swapops.h> | 20 | #include <linux/swapops.h> |
21 | #include <linux/sysctl.h> | ||
21 | #include <linux/ksm.h> | 22 | #include <linux/ksm.h> |
22 | #include <linux/mman.h> | 23 | #include <linux/mman.h> |
23 | 24 | ||
@@ -920,6 +921,40 @@ unsigned long get_guest_storage_key(struct mm_struct *mm, unsigned long addr) | |||
920 | } | 921 | } |
921 | EXPORT_SYMBOL(get_guest_storage_key); | 922 | EXPORT_SYMBOL(get_guest_storage_key); |
922 | 923 | ||
924 | static int page_table_allocate_pgste_min = 0; | ||
925 | static int page_table_allocate_pgste_max = 1; | ||
926 | int page_table_allocate_pgste = 0; | ||
927 | EXPORT_SYMBOL(page_table_allocate_pgste); | ||
928 | |||
929 | static struct ctl_table page_table_sysctl[] = { | ||
930 | { | ||
931 | .procname = "allocate_pgste", | ||
932 | .data = &page_table_allocate_pgste, | ||
933 | .maxlen = sizeof(int), | ||
934 | .mode = S_IRUGO | S_IWUSR, | ||
935 | .proc_handler = proc_dointvec, | ||
936 | .extra1 = &page_table_allocate_pgste_min, | ||
937 | .extra2 = &page_table_allocate_pgste_max, | ||
938 | }, | ||
939 | { } | ||
940 | }; | ||
941 | |||
942 | static struct ctl_table page_table_sysctl_dir[] = { | ||
943 | { | ||
944 | .procname = "vm", | ||
945 | .maxlen = 0, | ||
946 | .mode = 0555, | ||
947 | .child = page_table_sysctl, | ||
948 | }, | ||
949 | { } | ||
950 | }; | ||
951 | |||
952 | static int __init page_table_register_sysctl(void) | ||
953 | { | ||
954 | return register_sysctl_table(page_table_sysctl_dir) ? 0 : -ENOMEM; | ||
955 | } | ||
956 | __initcall(page_table_register_sysctl); | ||
957 | |||
923 | #else /* CONFIG_PGSTE */ | 958 | #else /* CONFIG_PGSTE */ |
924 | 959 | ||
925 | static inline int page_table_with_pgste(struct page *page) | 960 | static inline int page_table_with_pgste(struct page *page) |
@@ -963,7 +998,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm) | |||
963 | struct page *uninitialized_var(page); | 998 | struct page *uninitialized_var(page); |
964 | unsigned int mask, bit; | 999 | unsigned int mask, bit; |
965 | 1000 | ||
966 | if (mm_has_pgste(mm)) | 1001 | if (mm_alloc_pgste(mm)) |
967 | return page_table_alloc_pgste(mm); | 1002 | return page_table_alloc_pgste(mm); |
968 | /* Allocate fragments of a 4K page as 1K/2K page table */ | 1003 | /* Allocate fragments of a 4K page as 1K/2K page table */ |
969 | spin_lock_bh(&mm->context.list_lock); | 1004 | spin_lock_bh(&mm->context.list_lock); |
@@ -1165,116 +1200,25 @@ static inline void thp_split_mm(struct mm_struct *mm) | |||
1165 | } | 1200 | } |
1166 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | 1201 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
1167 | 1202 | ||
1168 | static unsigned long page_table_realloc_pmd(struct mmu_gather *tlb, | ||
1169 | struct mm_struct *mm, pud_t *pud, | ||
1170 | unsigned long addr, unsigned long end) | ||
1171 | { | ||
1172 | unsigned long next, *table, *new; | ||
1173 | struct page *page; | ||
1174 | spinlock_t *ptl; | ||
1175 | pmd_t *pmd; | ||
1176 | |||
1177 | pmd = pmd_offset(pud, addr); | ||
1178 | do { | ||
1179 | next = pmd_addr_end(addr, end); | ||
1180 | again: | ||
1181 | if (pmd_none_or_clear_bad(pmd)) | ||
1182 | continue; | ||
1183 | table = (unsigned long *) pmd_deref(*pmd); | ||
1184 | page = pfn_to_page(__pa(table) >> PAGE_SHIFT); | ||
1185 | if (page_table_with_pgste(page)) | ||
1186 | continue; | ||
1187 | /* Allocate new page table with pgstes */ | ||
1188 | new = page_table_alloc_pgste(mm); | ||
1189 | if (!new) | ||
1190 | return -ENOMEM; | ||
1191 | |||
1192 | ptl = pmd_lock(mm, pmd); | ||
1193 | if (likely((unsigned long *) pmd_deref(*pmd) == table)) { | ||
1194 | /* Nuke pmd entry pointing to the "short" page table */ | ||
1195 | pmdp_flush_lazy(mm, addr, pmd); | ||
1196 | pmd_clear(pmd); | ||
1197 | /* Copy ptes from old table to new table */ | ||
1198 | memcpy(new, table, PAGE_SIZE/2); | ||
1199 | clear_table(table, _PAGE_INVALID, PAGE_SIZE/2); | ||
1200 | /* Establish new table */ | ||
1201 | pmd_populate(mm, pmd, (pte_t *) new); | ||
1202 | /* Free old table with rcu, there might be a walker! */ | ||
1203 | page_table_free_rcu(tlb, table, addr); | ||
1204 | new = NULL; | ||
1205 | } | ||
1206 | spin_unlock(ptl); | ||
1207 | if (new) { | ||
1208 | page_table_free_pgste(new); | ||
1209 | goto again; | ||
1210 | } | ||
1211 | } while (pmd++, addr = next, addr != end); | ||
1212 | |||
1213 | return addr; | ||
1214 | } | ||
1215 | |||
1216 | static unsigned long page_table_realloc_pud(struct mmu_gather *tlb, | ||
1217 | struct mm_struct *mm, pgd_t *pgd, | ||
1218 | unsigned long addr, unsigned long end) | ||
1219 | { | ||
1220 | unsigned long next; | ||
1221 | pud_t *pud; | ||
1222 | |||
1223 | pud = pud_offset(pgd, addr); | ||
1224 | do { | ||
1225 | next = pud_addr_end(addr, end); | ||
1226 | if (pud_none_or_clear_bad(pud)) | ||
1227 | continue; | ||
1228 | next = page_table_realloc_pmd(tlb, mm, pud, addr, next); | ||
1229 | if (unlikely(IS_ERR_VALUE(next))) | ||
1230 | return next; | ||
1231 | } while (pud++, addr = next, addr != end); | ||
1232 | |||
1233 | return addr; | ||
1234 | } | ||
1235 | |||
1236 | static unsigned long page_table_realloc(struct mmu_gather *tlb, struct mm_struct *mm, | ||
1237 | unsigned long addr, unsigned long end) | ||
1238 | { | ||
1239 | unsigned long next; | ||
1240 | pgd_t *pgd; | ||
1241 | |||
1242 | pgd = pgd_offset(mm, addr); | ||
1243 | do { | ||
1244 | next = pgd_addr_end(addr, end); | ||
1245 | if (pgd_none_or_clear_bad(pgd)) | ||
1246 | continue; | ||
1247 | next = page_table_realloc_pud(tlb, mm, pgd, addr, next); | ||
1248 | if (unlikely(IS_ERR_VALUE(next))) | ||
1249 | return next; | ||
1250 | } while (pgd++, addr = next, addr != end); | ||
1251 | |||
1252 | return 0; | ||
1253 | } | ||
1254 | |||
1255 | /* | 1203 | /* |
1256 | * switch on pgstes for its userspace process (for kvm) | 1204 | * switch on pgstes for its userspace process (for kvm) |
1257 | */ | 1205 | */ |
1258 | int s390_enable_sie(void) | 1206 | int s390_enable_sie(void) |
1259 | { | 1207 | { |
1260 | struct task_struct *tsk = current; | 1208 | struct mm_struct *mm = current->mm; |
1261 | struct mm_struct *mm = tsk->mm; | ||
1262 | struct mmu_gather tlb; | ||
1263 | 1209 | ||
1264 | /* Do we have pgstes? if yes, we are done */ | 1210 | /* Do we have pgstes? if yes, we are done */ |
1265 | if (mm_has_pgste(tsk->mm)) | 1211 | if (mm_has_pgste(mm)) |
1266 | return 0; | 1212 | return 0; |
1267 | 1213 | /* Fail if the page tables are 2K */ | |
1214 | if (!mm_alloc_pgste(mm)) | ||
1215 | return -EINVAL; | ||
1268 | down_write(&mm->mmap_sem); | 1216 | down_write(&mm->mmap_sem); |
1217 | mm->context.has_pgste = 1; | ||
1269 | /* split thp mappings and disable thp for future mappings */ | 1218 | /* split thp mappings and disable thp for future mappings */ |
1270 | thp_split_mm(mm); | 1219 | thp_split_mm(mm); |
1271 | /* Reallocate the page tables with pgstes */ | ||
1272 | tlb_gather_mmu(&tlb, mm, 0, TASK_SIZE); | ||
1273 | if (!page_table_realloc(&tlb, mm, 0, TASK_SIZE)) | ||
1274 | mm->context.has_pgste = 1; | ||
1275 | tlb_finish_mmu(&tlb, 0, TASK_SIZE); | ||
1276 | up_write(&mm->mmap_sem); | 1220 | up_write(&mm->mmap_sem); |
1277 | return mm->context.has_pgste ? 0 : -ENOMEM; | 1221 | return 0; |
1278 | } | 1222 | } |
1279 | EXPORT_SYMBOL_GPL(s390_enable_sie); | 1223 | EXPORT_SYMBOL_GPL(s390_enable_sie); |
1280 | 1224 | ||
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c index 6873f006f7d0..d366675e4bf8 100644 --- a/arch/tile/kernel/setup.c +++ b/arch/tile/kernel/setup.c | |||
@@ -774,7 +774,7 @@ static void __init zone_sizes_init(void) | |||
774 | * though, there'll be no lowmem, so we just alloc_bootmem | 774 | * though, there'll be no lowmem, so we just alloc_bootmem |
775 | * the memmap. There will be no percpu memory either. | 775 | * the memmap. There will be no percpu memory either. |
776 | */ | 776 | */ |
777 | if (i != 0 && cpumask_test_cpu(i, &isolnodes)) { | 777 | if (i != 0 && node_isset(i, isolnodes)) { |
778 | node_memmap_pfn[i] = | 778 | node_memmap_pfn[i] = |
779 | alloc_bootmem_pfn(0, memmap_size, 0); | 779 | alloc_bootmem_pfn(0, memmap_size, 0); |
780 | BUG_ON(node_percpu[i] != 0); | 780 | BUG_ON(node_percpu[i] != 0); |
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c index ef17683484e9..48304b89b601 100644 --- a/arch/x86/boot/compressed/eboot.c +++ b/arch/x86/boot/compressed/eboot.c | |||
@@ -1109,6 +1109,8 @@ struct boot_params *make_boot_params(struct efi_config *c) | |||
1109 | if (!cmdline_ptr) | 1109 | if (!cmdline_ptr) |
1110 | goto fail; | 1110 | goto fail; |
1111 | hdr->cmd_line_ptr = (unsigned long)cmdline_ptr; | 1111 | hdr->cmd_line_ptr = (unsigned long)cmdline_ptr; |
1112 | /* Fill in upper bits of command line address, NOP on 32 bit */ | ||
1113 | boot_params->ext_cmd_line_ptr = (u64)(unsigned long)cmdline_ptr >> 32; | ||
1112 | 1114 | ||
1113 | hdr->ramdisk_image = 0; | 1115 | hdr->ramdisk_image = 0; |
1114 | hdr->ramdisk_size = 0; | 1116 | hdr->ramdisk_size = 0; |
diff --git a/arch/x86/include/asm/hypervisor.h b/arch/x86/include/asm/hypervisor.h index e42f758a0fbd..055ea9941dd5 100644 --- a/arch/x86/include/asm/hypervisor.h +++ b/arch/x86/include/asm/hypervisor.h | |||
@@ -50,7 +50,7 @@ extern const struct hypervisor_x86 *x86_hyper; | |||
50 | /* Recognized hypervisors */ | 50 | /* Recognized hypervisors */ |
51 | extern const struct hypervisor_x86 x86_hyper_vmware; | 51 | extern const struct hypervisor_x86 x86_hyper_vmware; |
52 | extern const struct hypervisor_x86 x86_hyper_ms_hyperv; | 52 | extern const struct hypervisor_x86 x86_hyper_ms_hyperv; |
53 | extern const struct hypervisor_x86 x86_hyper_xen_hvm; | 53 | extern const struct hypervisor_x86 x86_hyper_xen; |
54 | extern const struct hypervisor_x86 x86_hyper_kvm; | 54 | extern const struct hypervisor_x86 x86_hyper_kvm; |
55 | 55 | ||
56 | extern void init_hypervisor(struct cpuinfo_x86 *c); | 56 | extern void init_hypervisor(struct cpuinfo_x86 *c); |
diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h index 25b1cc07d496..d6b078e9fa28 100644 --- a/arch/x86/include/asm/pvclock.h +++ b/arch/x86/include/asm/pvclock.h | |||
@@ -95,7 +95,6 @@ unsigned __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src, | |||
95 | 95 | ||
96 | struct pvclock_vsyscall_time_info { | 96 | struct pvclock_vsyscall_time_info { |
97 | struct pvclock_vcpu_time_info pvti; | 97 | struct pvclock_vcpu_time_info pvti; |
98 | u32 migrate_count; | ||
99 | } __attribute__((__aligned__(SMP_CACHE_BYTES))); | 98 | } __attribute__((__aligned__(SMP_CACHE_BYTES))); |
100 | 99 | ||
101 | #define PVTI_SIZE sizeof(struct pvclock_vsyscall_time_info) | 100 | #define PVTI_SIZE sizeof(struct pvclock_vsyscall_time_info) |
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h index cf87de3fc390..64b611782ef0 100644 --- a/arch/x86/include/asm/spinlock.h +++ b/arch/x86/include/asm/spinlock.h | |||
@@ -169,7 +169,7 @@ static inline int arch_spin_is_contended(arch_spinlock_t *lock) | |||
169 | struct __raw_tickets tmp = READ_ONCE(lock->tickets); | 169 | struct __raw_tickets tmp = READ_ONCE(lock->tickets); |
170 | 170 | ||
171 | tmp.head &= ~TICKET_SLOWPATH_FLAG; | 171 | tmp.head &= ~TICKET_SLOWPATH_FLAG; |
172 | return (tmp.tail - tmp.head) > TICKET_LOCK_INC; | 172 | return (__ticket_t)(tmp.tail - tmp.head) > TICKET_LOCK_INC; |
173 | } | 173 | } |
174 | #define arch_spin_is_contended arch_spin_is_contended | 174 | #define arch_spin_is_contended arch_spin_is_contended |
175 | 175 | ||
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h index 358dcd338915..c44a5d53e464 100644 --- a/arch/x86/include/asm/xen/page.h +++ b/arch/x86/include/asm/xen/page.h | |||
@@ -269,4 +269,9 @@ static inline bool xen_arch_need_swiotlb(struct device *dev, | |||
269 | return false; | 269 | return false; |
270 | } | 270 | } |
271 | 271 | ||
272 | static inline unsigned long xen_get_swiotlb_free_pages(unsigned int order) | ||
273 | { | ||
274 | return __get_free_pages(__GFP_NOWARN, order); | ||
275 | } | ||
276 | |||
272 | #endif /* _ASM_X86_XEN_PAGE_H */ | 277 | #endif /* _ASM_X86_XEN_PAGE_H */ |
diff --git a/arch/x86/kernel/cpu/hypervisor.c b/arch/x86/kernel/cpu/hypervisor.c index 36ce402a3fa5..d820d8eae96b 100644 --- a/arch/x86/kernel/cpu/hypervisor.c +++ b/arch/x86/kernel/cpu/hypervisor.c | |||
@@ -27,8 +27,8 @@ | |||
27 | 27 | ||
28 | static const __initconst struct hypervisor_x86 * const hypervisors[] = | 28 | static const __initconst struct hypervisor_x86 * const hypervisors[] = |
29 | { | 29 | { |
30 | #ifdef CONFIG_XEN_PVHVM | 30 | #ifdef CONFIG_XEN |
31 | &x86_hyper_xen_hvm, | 31 | &x86_hyper_xen, |
32 | #endif | 32 | #endif |
33 | &x86_hyper_vmware, | 33 | &x86_hyper_vmware, |
34 | &x86_hyper_ms_hyperv, | 34 | &x86_hyper_ms_hyperv, |
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 219d3fb423a1..960e85de13fb 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -2533,34 +2533,6 @@ ssize_t intel_event_sysfs_show(char *page, u64 config) | |||
2533 | return x86_event_sysfs_show(page, config, event); | 2533 | return x86_event_sysfs_show(page, config, event); |
2534 | } | 2534 | } |
2535 | 2535 | ||
2536 | static __initconst const struct x86_pmu core_pmu = { | ||
2537 | .name = "core", | ||
2538 | .handle_irq = x86_pmu_handle_irq, | ||
2539 | .disable_all = x86_pmu_disable_all, | ||
2540 | .enable_all = core_pmu_enable_all, | ||
2541 | .enable = core_pmu_enable_event, | ||
2542 | .disable = x86_pmu_disable_event, | ||
2543 | .hw_config = x86_pmu_hw_config, | ||
2544 | .schedule_events = x86_schedule_events, | ||
2545 | .eventsel = MSR_ARCH_PERFMON_EVENTSEL0, | ||
2546 | .perfctr = MSR_ARCH_PERFMON_PERFCTR0, | ||
2547 | .event_map = intel_pmu_event_map, | ||
2548 | .max_events = ARRAY_SIZE(intel_perfmon_event_map), | ||
2549 | .apic = 1, | ||
2550 | /* | ||
2551 | * Intel PMCs cannot be accessed sanely above 32 bit width, | ||
2552 | * so we install an artificial 1<<31 period regardless of | ||
2553 | * the generic event period: | ||
2554 | */ | ||
2555 | .max_period = (1ULL << 31) - 1, | ||
2556 | .get_event_constraints = intel_get_event_constraints, | ||
2557 | .put_event_constraints = intel_put_event_constraints, | ||
2558 | .event_constraints = intel_core_event_constraints, | ||
2559 | .guest_get_msrs = core_guest_get_msrs, | ||
2560 | .format_attrs = intel_arch_formats_attr, | ||
2561 | .events_sysfs_show = intel_event_sysfs_show, | ||
2562 | }; | ||
2563 | |||
2564 | struct intel_shared_regs *allocate_shared_regs(int cpu) | 2536 | struct intel_shared_regs *allocate_shared_regs(int cpu) |
2565 | { | 2537 | { |
2566 | struct intel_shared_regs *regs; | 2538 | struct intel_shared_regs *regs; |
@@ -2743,6 +2715,44 @@ static struct attribute *intel_arch3_formats_attr[] = { | |||
2743 | NULL, | 2715 | NULL, |
2744 | }; | 2716 | }; |
2745 | 2717 | ||
2718 | static __initconst const struct x86_pmu core_pmu = { | ||
2719 | .name = "core", | ||
2720 | .handle_irq = x86_pmu_handle_irq, | ||
2721 | .disable_all = x86_pmu_disable_all, | ||
2722 | .enable_all = core_pmu_enable_all, | ||
2723 | .enable = core_pmu_enable_event, | ||
2724 | .disable = x86_pmu_disable_event, | ||
2725 | .hw_config = x86_pmu_hw_config, | ||
2726 | .schedule_events = x86_schedule_events, | ||
2727 | .eventsel = MSR_ARCH_PERFMON_EVENTSEL0, | ||
2728 | .perfctr = MSR_ARCH_PERFMON_PERFCTR0, | ||
2729 | .event_map = intel_pmu_event_map, | ||
2730 | .max_events = ARRAY_SIZE(intel_perfmon_event_map), | ||
2731 | .apic = 1, | ||
2732 | /* | ||
2733 | * Intel PMCs cannot be accessed sanely above 32-bit width, | ||
2734 | * so we install an artificial 1<<31 period regardless of | ||
2735 | * the generic event period: | ||
2736 | */ | ||
2737 | .max_period = (1ULL<<31) - 1, | ||
2738 | .get_event_constraints = intel_get_event_constraints, | ||
2739 | .put_event_constraints = intel_put_event_constraints, | ||
2740 | .event_constraints = intel_core_event_constraints, | ||
2741 | .guest_get_msrs = core_guest_get_msrs, | ||
2742 | .format_attrs = intel_arch_formats_attr, | ||
2743 | .events_sysfs_show = intel_event_sysfs_show, | ||
2744 | |||
2745 | /* | ||
2746 | * Virtual (or funny metal) CPU can define x86_pmu.extra_regs | ||
2747 | * together with PMU version 1 and thus be using core_pmu with | ||
2748 | * shared_regs. We need following callbacks here to allocate | ||
2749 | * it properly. | ||
2750 | */ | ||
2751 | .cpu_prepare = intel_pmu_cpu_prepare, | ||
2752 | .cpu_starting = intel_pmu_cpu_starting, | ||
2753 | .cpu_dying = intel_pmu_cpu_dying, | ||
2754 | }; | ||
2755 | |||
2746 | static __initconst const struct x86_pmu intel_pmu = { | 2756 | static __initconst const struct x86_pmu intel_pmu = { |
2747 | .name = "Intel", | 2757 | .name = "Intel", |
2748 | .handle_irq = intel_pmu_handle_irq, | 2758 | .handle_irq = intel_pmu_handle_irq, |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c b/arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c index 3001015b755c..4562e9e22c60 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c | |||
@@ -1,6 +1,13 @@ | |||
1 | /* Nehalem/SandBridge/Haswell uncore support */ | 1 | /* Nehalem/SandBridge/Haswell uncore support */ |
2 | #include "perf_event_intel_uncore.h" | 2 | #include "perf_event_intel_uncore.h" |
3 | 3 | ||
4 | /* Uncore IMC PCI IDs */ | ||
5 | #define PCI_DEVICE_ID_INTEL_SNB_IMC 0x0100 | ||
6 | #define PCI_DEVICE_ID_INTEL_IVB_IMC 0x0154 | ||
7 | #define PCI_DEVICE_ID_INTEL_IVB_E3_IMC 0x0150 | ||
8 | #define PCI_DEVICE_ID_INTEL_HSW_IMC 0x0c00 | ||
9 | #define PCI_DEVICE_ID_INTEL_HSW_U_IMC 0x0a04 | ||
10 | |||
4 | /* SNB event control */ | 11 | /* SNB event control */ |
5 | #define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff | 12 | #define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff |
6 | #define SNB_UNC_CTL_UMASK_MASK 0x0000ff00 | 13 | #define SNB_UNC_CTL_UMASK_MASK 0x0000ff00 |
@@ -472,6 +479,10 @@ static const struct pci_device_id hsw_uncore_pci_ids[] = { | |||
472 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_IMC), | 479 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_IMC), |
473 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | 480 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), |
474 | }, | 481 | }, |
482 | { /* IMC */ | ||
483 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_U_IMC), | ||
484 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | ||
485 | }, | ||
475 | { /* end: all zeroes */ }, | 486 | { /* end: all zeroes */ }, |
476 | }; | 487 | }; |
477 | 488 | ||
@@ -502,6 +513,7 @@ static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = { | |||
502 | IMC_DEV(IVB_IMC, &ivb_uncore_pci_driver), /* 3rd Gen Core processor */ | 513 | IMC_DEV(IVB_IMC, &ivb_uncore_pci_driver), /* 3rd Gen Core processor */ |
503 | IMC_DEV(IVB_E3_IMC, &ivb_uncore_pci_driver), /* Xeon E3-1200 v2/3rd Gen Core processor */ | 514 | IMC_DEV(IVB_E3_IMC, &ivb_uncore_pci_driver), /* Xeon E3-1200 v2/3rd Gen Core processor */ |
504 | IMC_DEV(HSW_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core Processor */ | 515 | IMC_DEV(HSW_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core Processor */ |
516 | IMC_DEV(HSW_U_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core ULT Mobile Processor */ | ||
505 | { /* end marker */ } | 517 | { /* end marker */ } |
506 | }; | 518 | }; |
507 | 519 | ||
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 8213da62b1b7..6e338e3b1dc0 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
@@ -57,7 +57,7 @@ __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = { | |||
57 | .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, | 57 | .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, |
58 | #endif | 58 | #endif |
59 | }; | 59 | }; |
60 | EXPORT_PER_CPU_SYMBOL_GPL(cpu_tss); | 60 | EXPORT_PER_CPU_SYMBOL(cpu_tss); |
61 | 61 | ||
62 | #ifdef CONFIG_X86_64 | 62 | #ifdef CONFIG_X86_64 |
63 | static DEFINE_PER_CPU(unsigned char, is_idle); | 63 | static DEFINE_PER_CPU(unsigned char, is_idle); |
@@ -156,11 +156,13 @@ void flush_thread(void) | |||
156 | /* FPU state will be reallocated lazily at the first use. */ | 156 | /* FPU state will be reallocated lazily at the first use. */ |
157 | drop_fpu(tsk); | 157 | drop_fpu(tsk); |
158 | free_thread_xstate(tsk); | 158 | free_thread_xstate(tsk); |
159 | } else if (!used_math()) { | 159 | } else { |
160 | /* kthread execs. TODO: cleanup this horror. */ | 160 | if (!tsk_used_math(tsk)) { |
161 | if (WARN_ON(init_fpu(tsk))) | 161 | /* kthread execs. TODO: cleanup this horror. */ |
162 | force_sig(SIGKILL, tsk); | 162 | if (WARN_ON(init_fpu(tsk))) |
163 | user_fpu_begin(); | 163 | force_sig(SIGKILL, tsk); |
164 | user_fpu_begin(); | ||
165 | } | ||
164 | restore_init_xstate(); | 166 | restore_init_xstate(); |
165 | } | 167 | } |
166 | } | 168 | } |
diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c index e5ecd20e72dd..2f355d229a58 100644 --- a/arch/x86/kernel/pvclock.c +++ b/arch/x86/kernel/pvclock.c | |||
@@ -141,46 +141,7 @@ void pvclock_read_wallclock(struct pvclock_wall_clock *wall_clock, | |||
141 | set_normalized_timespec(ts, now.tv_sec, now.tv_nsec); | 141 | set_normalized_timespec(ts, now.tv_sec, now.tv_nsec); |
142 | } | 142 | } |
143 | 143 | ||
144 | static struct pvclock_vsyscall_time_info *pvclock_vdso_info; | ||
145 | |||
146 | static struct pvclock_vsyscall_time_info * | ||
147 | pvclock_get_vsyscall_user_time_info(int cpu) | ||
148 | { | ||
149 | if (!pvclock_vdso_info) { | ||
150 | BUG(); | ||
151 | return NULL; | ||
152 | } | ||
153 | |||
154 | return &pvclock_vdso_info[cpu]; | ||
155 | } | ||
156 | |||
157 | struct pvclock_vcpu_time_info *pvclock_get_vsyscall_time_info(int cpu) | ||
158 | { | ||
159 | return &pvclock_get_vsyscall_user_time_info(cpu)->pvti; | ||
160 | } | ||
161 | |||
162 | #ifdef CONFIG_X86_64 | 144 | #ifdef CONFIG_X86_64 |
163 | static int pvclock_task_migrate(struct notifier_block *nb, unsigned long l, | ||
164 | void *v) | ||
165 | { | ||
166 | struct task_migration_notifier *mn = v; | ||
167 | struct pvclock_vsyscall_time_info *pvti; | ||
168 | |||
169 | pvti = pvclock_get_vsyscall_user_time_info(mn->from_cpu); | ||
170 | |||
171 | /* this is NULL when pvclock vsyscall is not initialized */ | ||
172 | if (unlikely(pvti == NULL)) | ||
173 | return NOTIFY_DONE; | ||
174 | |||
175 | pvti->migrate_count++; | ||
176 | |||
177 | return NOTIFY_DONE; | ||
178 | } | ||
179 | |||
180 | static struct notifier_block pvclock_migrate = { | ||
181 | .notifier_call = pvclock_task_migrate, | ||
182 | }; | ||
183 | |||
184 | /* | 145 | /* |
185 | * Initialize the generic pvclock vsyscall state. This will allocate | 146 | * Initialize the generic pvclock vsyscall state. This will allocate |
186 | * a/some page(s) for the per-vcpu pvclock information, set up a | 147 | * a/some page(s) for the per-vcpu pvclock information, set up a |
@@ -194,17 +155,12 @@ int __init pvclock_init_vsyscall(struct pvclock_vsyscall_time_info *i, | |||
194 | 155 | ||
195 | WARN_ON (size != PVCLOCK_VSYSCALL_NR_PAGES*PAGE_SIZE); | 156 | WARN_ON (size != PVCLOCK_VSYSCALL_NR_PAGES*PAGE_SIZE); |
196 | 157 | ||
197 | pvclock_vdso_info = i; | ||
198 | |||
199 | for (idx = 0; idx <= (PVCLOCK_FIXMAP_END-PVCLOCK_FIXMAP_BEGIN); idx++) { | 158 | for (idx = 0; idx <= (PVCLOCK_FIXMAP_END-PVCLOCK_FIXMAP_BEGIN); idx++) { |
200 | __set_fixmap(PVCLOCK_FIXMAP_BEGIN + idx, | 159 | __set_fixmap(PVCLOCK_FIXMAP_BEGIN + idx, |
201 | __pa(i) + (idx*PAGE_SIZE), | 160 | __pa(i) + (idx*PAGE_SIZE), |
202 | PAGE_KERNEL_VVAR); | 161 | PAGE_KERNEL_VVAR); |
203 | } | 162 | } |
204 | 163 | ||
205 | |||
206 | register_task_migration_notifier(&pvclock_migrate); | ||
207 | |||
208 | return 0; | 164 | return 0; |
209 | } | 165 | } |
210 | #endif | 166 | #endif |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index ed31c31b2485..c73efcd03e29 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -1669,12 +1669,28 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) | |||
1669 | &guest_hv_clock, sizeof(guest_hv_clock)))) | 1669 | &guest_hv_clock, sizeof(guest_hv_clock)))) |
1670 | return 0; | 1670 | return 0; |
1671 | 1671 | ||
1672 | /* | 1672 | /* This VCPU is paused, but it's legal for a guest to read another |
1673 | * The interface expects us to write an even number signaling that the | 1673 | * VCPU's kvmclock, so we really have to follow the specification where |
1674 | * update is finished. Since the guest won't see the intermediate | 1674 | * it says that version is odd if data is being modified, and even after |
1675 | * state, we just increase by 2 at the end. | 1675 | * it is consistent. |
1676 | * | ||
1677 | * Version field updates must be kept separate. This is because | ||
1678 | * kvm_write_guest_cached might use a "rep movs" instruction, and | ||
1679 | * writes within a string instruction are weakly ordered. So there | ||
1680 | * are three writes overall. | ||
1681 | * | ||
1682 | * As a small optimization, only write the version field in the first | ||
1683 | * and third write. The vcpu->pv_time cache is still valid, because the | ||
1684 | * version field is the first in the struct. | ||
1676 | */ | 1685 | */ |
1677 | vcpu->hv_clock.version = guest_hv_clock.version + 2; | 1686 | BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0); |
1687 | |||
1688 | vcpu->hv_clock.version = guest_hv_clock.version + 1; | ||
1689 | kvm_write_guest_cached(v->kvm, &vcpu->pv_time, | ||
1690 | &vcpu->hv_clock, | ||
1691 | sizeof(vcpu->hv_clock.version)); | ||
1692 | |||
1693 | smp_wmb(); | ||
1678 | 1694 | ||
1679 | /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */ | 1695 | /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */ |
1680 | pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED); | 1696 | pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED); |
@@ -1695,6 +1711,13 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) | |||
1695 | kvm_write_guest_cached(v->kvm, &vcpu->pv_time, | 1711 | kvm_write_guest_cached(v->kvm, &vcpu->pv_time, |
1696 | &vcpu->hv_clock, | 1712 | &vcpu->hv_clock, |
1697 | sizeof(vcpu->hv_clock)); | 1713 | sizeof(vcpu->hv_clock)); |
1714 | |||
1715 | smp_wmb(); | ||
1716 | |||
1717 | vcpu->hv_clock.version++; | ||
1718 | kvm_write_guest_cached(v->kvm, &vcpu->pv_time, | ||
1719 | &vcpu->hv_clock, | ||
1720 | sizeof(vcpu->hv_clock.version)); | ||
1698 | return 0; | 1721 | return 0; |
1699 | } | 1722 | } |
1700 | 1723 | ||
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 5ead4d6cf3a7..70e7444c6835 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c | |||
@@ -351,18 +351,20 @@ int arch_ioremap_pmd_supported(void) | |||
351 | */ | 351 | */ |
352 | void *xlate_dev_mem_ptr(phys_addr_t phys) | 352 | void *xlate_dev_mem_ptr(phys_addr_t phys) |
353 | { | 353 | { |
354 | void *addr; | 354 | unsigned long start = phys & PAGE_MASK; |
355 | unsigned long start = phys & PAGE_MASK; | 355 | unsigned long offset = phys & ~PAGE_MASK; |
356 | unsigned long vaddr; | ||
356 | 357 | ||
357 | /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */ | 358 | /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */ |
358 | if (page_is_ram(start >> PAGE_SHIFT)) | 359 | if (page_is_ram(start >> PAGE_SHIFT)) |
359 | return __va(phys); | 360 | return __va(phys); |
360 | 361 | ||
361 | addr = (void __force *)ioremap_cache(start, PAGE_SIZE); | 362 | vaddr = (unsigned long)ioremap_cache(start, PAGE_SIZE); |
362 | if (addr) | 363 | /* Only add the offset on success and return NULL if the ioremap() failed: */ |
363 | addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK)); | 364 | if (vaddr) |
365 | vaddr += offset; | ||
364 | 366 | ||
365 | return addr; | 367 | return (void *)vaddr; |
366 | } | 368 | } |
367 | 369 | ||
368 | void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr) | 370 | void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr) |
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c index e4695985f9de..d93963340c3c 100644 --- a/arch/x86/pci/acpi.c +++ b/arch/x86/pci/acpi.c | |||
@@ -325,6 +325,26 @@ static void release_pci_root_info(struct pci_host_bridge *bridge) | |||
325 | kfree(info); | 325 | kfree(info); |
326 | } | 326 | } |
327 | 327 | ||
328 | /* | ||
329 | * An IO port or MMIO resource assigned to a PCI host bridge may be | ||
330 | * consumed by the host bridge itself or available to its child | ||
331 | * bus/devices. The ACPI specification defines a bit (Producer/Consumer) | ||
332 | * to tell whether the resource is consumed by the host bridge itself, | ||
333 | * but firmware hasn't used that bit consistently, so we can't rely on it. | ||
334 | * | ||
335 | * On x86 and IA64 platforms, all IO port and MMIO resources are assumed | ||
336 | * to be available to child bus/devices except one special case: | ||
337 | * IO port [0xCF8-0xCFF] is consumed by the host bridge itself | ||
338 | * to access PCI configuration space. | ||
339 | * | ||
340 | * So explicitly filter out PCI CFG IO ports[0xCF8-0xCFF]. | ||
341 | */ | ||
342 | static bool resource_is_pcicfg_ioport(struct resource *res) | ||
343 | { | ||
344 | return (res->flags & IORESOURCE_IO) && | ||
345 | res->start == 0xCF8 && res->end == 0xCFF; | ||
346 | } | ||
347 | |||
328 | static void probe_pci_root_info(struct pci_root_info *info, | 348 | static void probe_pci_root_info(struct pci_root_info *info, |
329 | struct acpi_device *device, | 349 | struct acpi_device *device, |
330 | int busnum, int domain, | 350 | int busnum, int domain, |
@@ -346,8 +366,8 @@ static void probe_pci_root_info(struct pci_root_info *info, | |||
346 | "no IO and memory resources present in _CRS\n"); | 366 | "no IO and memory resources present in _CRS\n"); |
347 | else | 367 | else |
348 | resource_list_for_each_entry_safe(entry, tmp, list) { | 368 | resource_list_for_each_entry_safe(entry, tmp, list) { |
349 | if ((entry->res->flags & IORESOURCE_WINDOW) == 0 || | 369 | if ((entry->res->flags & IORESOURCE_DISABLED) || |
350 | (entry->res->flags & IORESOURCE_DISABLED)) | 370 | resource_is_pcicfg_ioport(entry->res)) |
351 | resource_list_destroy_entry(entry); | 371 | resource_list_destroy_entry(entry); |
352 | else | 372 | else |
353 | entry->res->name = info->name; | 373 | entry->res->name = info->name; |
diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c index 40d2473836c9..9793322751e0 100644 --- a/arch/x86/vdso/vclock_gettime.c +++ b/arch/x86/vdso/vclock_gettime.c | |||
@@ -82,15 +82,18 @@ static notrace cycle_t vread_pvclock(int *mode) | |||
82 | cycle_t ret; | 82 | cycle_t ret; |
83 | u64 last; | 83 | u64 last; |
84 | u32 version; | 84 | u32 version; |
85 | u32 migrate_count; | ||
86 | u8 flags; | 85 | u8 flags; |
87 | unsigned cpu, cpu1; | 86 | unsigned cpu, cpu1; |
88 | 87 | ||
89 | 88 | ||
90 | /* | 89 | /* |
91 | * When looping to get a consistent (time-info, tsc) pair, we | 90 | * Note: hypervisor must guarantee that: |
92 | * also need to deal with the possibility we can switch vcpus, | 91 | * 1. cpu ID number maps 1:1 to per-CPU pvclock time info. |
93 | * so make sure we always re-fetch time-info for the current vcpu. | 92 | * 2. that per-CPU pvclock time info is updated if the |
93 | * underlying CPU changes. | ||
94 | * 3. that version is increased whenever underlying CPU | ||
95 | * changes. | ||
96 | * | ||
94 | */ | 97 | */ |
95 | do { | 98 | do { |
96 | cpu = __getcpu() & VGETCPU_CPU_MASK; | 99 | cpu = __getcpu() & VGETCPU_CPU_MASK; |
@@ -99,27 +102,20 @@ static notrace cycle_t vread_pvclock(int *mode) | |||
99 | * __getcpu() calls (Gleb). | 102 | * __getcpu() calls (Gleb). |
100 | */ | 103 | */ |
101 | 104 | ||
102 | /* Make sure migrate_count will change if we leave the VCPU. */ | 105 | pvti = get_pvti(cpu); |
103 | do { | ||
104 | pvti = get_pvti(cpu); | ||
105 | migrate_count = pvti->migrate_count; | ||
106 | |||
107 | cpu1 = cpu; | ||
108 | cpu = __getcpu() & VGETCPU_CPU_MASK; | ||
109 | } while (unlikely(cpu != cpu1)); | ||
110 | 106 | ||
111 | version = __pvclock_read_cycles(&pvti->pvti, &ret, &flags); | 107 | version = __pvclock_read_cycles(&pvti->pvti, &ret, &flags); |
112 | 108 | ||
113 | /* | 109 | /* |
114 | * Test we're still on the cpu as well as the version. | 110 | * Test we're still on the cpu as well as the version. |
115 | * - We must read TSC of pvti's VCPU. | 111 | * We could have been migrated just after the first |
116 | * - KVM doesn't follow the versioning protocol, so data could | 112 | * vgetcpu but before fetching the version, so we |
117 | * change before version if we left the VCPU. | 113 | * wouldn't notice a version change. |
118 | */ | 114 | */ |
119 | smp_rmb(); | 115 | cpu1 = __getcpu() & VGETCPU_CPU_MASK; |
120 | } while (unlikely((pvti->pvti.version & 1) || | 116 | } while (unlikely(cpu != cpu1 || |
121 | pvti->pvti.version != version || | 117 | (pvti->pvti.version & 1) || |
122 | pvti->migrate_count != migrate_count)); | 118 | pvti->pvti.version != version)); |
123 | 119 | ||
124 | if (unlikely(!(flags & PVCLOCK_TSC_STABLE_BIT))) | 120 | if (unlikely(!(flags & PVCLOCK_TSC_STABLE_BIT))) |
125 | *mode = VCLOCK_NONE; | 121 | *mode = VCLOCK_NONE; |
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 94578efd3067..46957ead3060 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -1760,6 +1760,9 @@ static struct notifier_block xen_hvm_cpu_notifier = { | |||
1760 | 1760 | ||
1761 | static void __init xen_hvm_guest_init(void) | 1761 | static void __init xen_hvm_guest_init(void) |
1762 | { | 1762 | { |
1763 | if (xen_pv_domain()) | ||
1764 | return; | ||
1765 | |||
1763 | init_hvm_pv_info(); | 1766 | init_hvm_pv_info(); |
1764 | 1767 | ||
1765 | xen_hvm_init_shared_info(); | 1768 | xen_hvm_init_shared_info(); |
@@ -1775,6 +1778,7 @@ static void __init xen_hvm_guest_init(void) | |||
1775 | xen_hvm_init_time_ops(); | 1778 | xen_hvm_init_time_ops(); |
1776 | xen_hvm_init_mmu_ops(); | 1779 | xen_hvm_init_mmu_ops(); |
1777 | } | 1780 | } |
1781 | #endif | ||
1778 | 1782 | ||
1779 | static bool xen_nopv = false; | 1783 | static bool xen_nopv = false; |
1780 | static __init int xen_parse_nopv(char *arg) | 1784 | static __init int xen_parse_nopv(char *arg) |
@@ -1784,14 +1788,11 @@ static __init int xen_parse_nopv(char *arg) | |||
1784 | } | 1788 | } |
1785 | early_param("xen_nopv", xen_parse_nopv); | 1789 | early_param("xen_nopv", xen_parse_nopv); |
1786 | 1790 | ||
1787 | static uint32_t __init xen_hvm_platform(void) | 1791 | static uint32_t __init xen_platform(void) |
1788 | { | 1792 | { |
1789 | if (xen_nopv) | 1793 | if (xen_nopv) |
1790 | return 0; | 1794 | return 0; |
1791 | 1795 | ||
1792 | if (xen_pv_domain()) | ||
1793 | return 0; | ||
1794 | |||
1795 | return xen_cpuid_base(); | 1796 | return xen_cpuid_base(); |
1796 | } | 1797 | } |
1797 | 1798 | ||
@@ -1809,11 +1810,19 @@ bool xen_hvm_need_lapic(void) | |||
1809 | } | 1810 | } |
1810 | EXPORT_SYMBOL_GPL(xen_hvm_need_lapic); | 1811 | EXPORT_SYMBOL_GPL(xen_hvm_need_lapic); |
1811 | 1812 | ||
1812 | const struct hypervisor_x86 x86_hyper_xen_hvm __refconst = { | 1813 | static void xen_set_cpu_features(struct cpuinfo_x86 *c) |
1813 | .name = "Xen HVM", | 1814 | { |
1814 | .detect = xen_hvm_platform, | 1815 | if (xen_pv_domain()) |
1816 | clear_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS); | ||
1817 | } | ||
1818 | |||
1819 | const struct hypervisor_x86 x86_hyper_xen = { | ||
1820 | .name = "Xen", | ||
1821 | .detect = xen_platform, | ||
1822 | #ifdef CONFIG_XEN_PVHVM | ||
1815 | .init_platform = xen_hvm_guest_init, | 1823 | .init_platform = xen_hvm_guest_init, |
1824 | #endif | ||
1816 | .x2apic_available = xen_x2apic_para_available, | 1825 | .x2apic_available = xen_x2apic_para_available, |
1826 | .set_cpu_features = xen_set_cpu_features, | ||
1817 | }; | 1827 | }; |
1818 | EXPORT_SYMBOL(x86_hyper_xen_hvm); | 1828 | EXPORT_SYMBOL(x86_hyper_xen); |
1819 | #endif | ||
diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c index d9497698645a..53b4c0811f4f 100644 --- a/arch/x86/xen/suspend.c +++ b/arch/x86/xen/suspend.c | |||
@@ -88,7 +88,17 @@ static void xen_vcpu_notify_restore(void *data) | |||
88 | tick_resume_local(); | 88 | tick_resume_local(); |
89 | } | 89 | } |
90 | 90 | ||
91 | static void xen_vcpu_notify_suspend(void *data) | ||
92 | { | ||
93 | tick_suspend_local(); | ||
94 | } | ||
95 | |||
91 | void xen_arch_resume(void) | 96 | void xen_arch_resume(void) |
92 | { | 97 | { |
93 | on_each_cpu(xen_vcpu_notify_restore, NULL, 1); | 98 | on_each_cpu(xen_vcpu_notify_restore, NULL, 1); |
94 | } | 99 | } |
100 | |||
101 | void xen_arch_suspend(void) | ||
102 | { | ||
103 | on_each_cpu(xen_vcpu_notify_suspend, NULL, 1); | ||
104 | } | ||
diff --git a/block/blk-core.c b/block/blk-core.c index fd154b94447a..7871603f0a29 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -552,6 +552,8 @@ void blk_cleanup_queue(struct request_queue *q) | |||
552 | q->queue_lock = &q->__queue_lock; | 552 | q->queue_lock = &q->__queue_lock; |
553 | spin_unlock_irq(lock); | 553 | spin_unlock_irq(lock); |
554 | 554 | ||
555 | bdi_destroy(&q->backing_dev_info); | ||
556 | |||
555 | /* @q is and will stay empty, shutdown and put */ | 557 | /* @q is and will stay empty, shutdown and put */ |
556 | blk_put_queue(q); | 558 | blk_put_queue(q); |
557 | } | 559 | } |
diff --git a/block/blk-mq.c b/block/blk-mq.c index ade8a2d1b0aa..e68b71b85a7e 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -677,8 +677,11 @@ static void blk_mq_rq_timer(unsigned long priv) | |||
677 | data.next = blk_rq_timeout(round_jiffies_up(data.next)); | 677 | data.next = blk_rq_timeout(round_jiffies_up(data.next)); |
678 | mod_timer(&q->timeout, data.next); | 678 | mod_timer(&q->timeout, data.next); |
679 | } else { | 679 | } else { |
680 | queue_for_each_hw_ctx(q, hctx, i) | 680 | queue_for_each_hw_ctx(q, hctx, i) { |
681 | blk_mq_tag_idle(hctx); | 681 | /* the hctx may be unmapped, so check it here */ |
682 | if (blk_mq_hw_queue_mapped(hctx)) | ||
683 | blk_mq_tag_idle(hctx); | ||
684 | } | ||
682 | } | 685 | } |
683 | } | 686 | } |
684 | 687 | ||
@@ -855,6 +858,16 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) | |||
855 | spin_lock(&hctx->lock); | 858 | spin_lock(&hctx->lock); |
856 | list_splice(&rq_list, &hctx->dispatch); | 859 | list_splice(&rq_list, &hctx->dispatch); |
857 | spin_unlock(&hctx->lock); | 860 | spin_unlock(&hctx->lock); |
861 | /* | ||
862 | * the queue is expected stopped with BLK_MQ_RQ_QUEUE_BUSY, but | ||
863 | * it's possible the queue is stopped and restarted again | ||
864 | * before this. Queue restart will dispatch requests. And since | ||
865 | * requests in rq_list aren't added into hctx->dispatch yet, | ||
866 | * the requests in rq_list might get lost. | ||
867 | * | ||
868 | * blk_mq_run_hw_queue() already checks the STOPPED bit | ||
869 | **/ | ||
870 | blk_mq_run_hw_queue(hctx, true); | ||
858 | } | 871 | } |
859 | } | 872 | } |
860 | 873 | ||
@@ -1571,22 +1584,6 @@ static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu) | |||
1571 | return NOTIFY_OK; | 1584 | return NOTIFY_OK; |
1572 | } | 1585 | } |
1573 | 1586 | ||
1574 | static int blk_mq_hctx_cpu_online(struct blk_mq_hw_ctx *hctx, int cpu) | ||
1575 | { | ||
1576 | struct request_queue *q = hctx->queue; | ||
1577 | struct blk_mq_tag_set *set = q->tag_set; | ||
1578 | |||
1579 | if (set->tags[hctx->queue_num]) | ||
1580 | return NOTIFY_OK; | ||
1581 | |||
1582 | set->tags[hctx->queue_num] = blk_mq_init_rq_map(set, hctx->queue_num); | ||
1583 | if (!set->tags[hctx->queue_num]) | ||
1584 | return NOTIFY_STOP; | ||
1585 | |||
1586 | hctx->tags = set->tags[hctx->queue_num]; | ||
1587 | return NOTIFY_OK; | ||
1588 | } | ||
1589 | |||
1590 | static int blk_mq_hctx_notify(void *data, unsigned long action, | 1587 | static int blk_mq_hctx_notify(void *data, unsigned long action, |
1591 | unsigned int cpu) | 1588 | unsigned int cpu) |
1592 | { | 1589 | { |
@@ -1594,8 +1591,11 @@ static int blk_mq_hctx_notify(void *data, unsigned long action, | |||
1594 | 1591 | ||
1595 | if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) | 1592 | if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) |
1596 | return blk_mq_hctx_cpu_offline(hctx, cpu); | 1593 | return blk_mq_hctx_cpu_offline(hctx, cpu); |
1597 | else if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) | 1594 | |
1598 | return blk_mq_hctx_cpu_online(hctx, cpu); | 1595 | /* |
1596 | * In case of CPU online, tags may be reallocated | ||
1597 | * in blk_mq_map_swqueue() after mapping is updated. | ||
1598 | */ | ||
1599 | 1599 | ||
1600 | return NOTIFY_OK; | 1600 | return NOTIFY_OK; |
1601 | } | 1601 | } |
@@ -1775,6 +1775,7 @@ static void blk_mq_map_swqueue(struct request_queue *q) | |||
1775 | unsigned int i; | 1775 | unsigned int i; |
1776 | struct blk_mq_hw_ctx *hctx; | 1776 | struct blk_mq_hw_ctx *hctx; |
1777 | struct blk_mq_ctx *ctx; | 1777 | struct blk_mq_ctx *ctx; |
1778 | struct blk_mq_tag_set *set = q->tag_set; | ||
1778 | 1779 | ||
1779 | queue_for_each_hw_ctx(q, hctx, i) { | 1780 | queue_for_each_hw_ctx(q, hctx, i) { |
1780 | cpumask_clear(hctx->cpumask); | 1781 | cpumask_clear(hctx->cpumask); |
@@ -1803,16 +1804,20 @@ static void blk_mq_map_swqueue(struct request_queue *q) | |||
1803 | * disable it and free the request entries. | 1804 | * disable it and free the request entries. |
1804 | */ | 1805 | */ |
1805 | if (!hctx->nr_ctx) { | 1806 | if (!hctx->nr_ctx) { |
1806 | struct blk_mq_tag_set *set = q->tag_set; | ||
1807 | |||
1808 | if (set->tags[i]) { | 1807 | if (set->tags[i]) { |
1809 | blk_mq_free_rq_map(set, set->tags[i], i); | 1808 | blk_mq_free_rq_map(set, set->tags[i], i); |
1810 | set->tags[i] = NULL; | 1809 | set->tags[i] = NULL; |
1811 | hctx->tags = NULL; | ||
1812 | } | 1810 | } |
1811 | hctx->tags = NULL; | ||
1813 | continue; | 1812 | continue; |
1814 | } | 1813 | } |
1815 | 1814 | ||
1815 | /* unmapped hw queue can be remapped after CPU topo changed */ | ||
1816 | if (!set->tags[i]) | ||
1817 | set->tags[i] = blk_mq_init_rq_map(set, i); | ||
1818 | hctx->tags = set->tags[i]; | ||
1819 | WARN_ON(!hctx->tags); | ||
1820 | |||
1816 | /* | 1821 | /* |
1817 | * Set the map size to the number of mapped software queues. | 1822 | * Set the map size to the number of mapped software queues. |
1818 | * This is more accurate and more efficient than looping | 1823 | * This is more accurate and more efficient than looping |
@@ -2090,9 +2095,16 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb, | |||
2090 | */ | 2095 | */ |
2091 | list_for_each_entry(q, &all_q_list, all_q_node) | 2096 | list_for_each_entry(q, &all_q_list, all_q_node) |
2092 | blk_mq_freeze_queue_start(q); | 2097 | blk_mq_freeze_queue_start(q); |
2093 | list_for_each_entry(q, &all_q_list, all_q_node) | 2098 | list_for_each_entry(q, &all_q_list, all_q_node) { |
2094 | blk_mq_freeze_queue_wait(q); | 2099 | blk_mq_freeze_queue_wait(q); |
2095 | 2100 | ||
2101 | /* | ||
2102 | * timeout handler can't touch hw queue during the | ||
2103 | * reinitialization | ||
2104 | */ | ||
2105 | del_timer_sync(&q->timeout); | ||
2106 | } | ||
2107 | |||
2096 | list_for_each_entry(q, &all_q_list, all_q_node) | 2108 | list_for_each_entry(q, &all_q_list, all_q_node) |
2097 | blk_mq_queue_reinit(q); | 2109 | blk_mq_queue_reinit(q); |
2098 | 2110 | ||
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index faaf36ade7eb..2b8fd302f677 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c | |||
@@ -522,8 +522,6 @@ static void blk_release_queue(struct kobject *kobj) | |||
522 | 522 | ||
523 | blk_trace_shutdown(q); | 523 | blk_trace_shutdown(q); |
524 | 524 | ||
525 | bdi_destroy(&q->backing_dev_info); | ||
526 | |||
527 | ida_simple_remove(&blk_queue_ida, q->id); | 525 | ida_simple_remove(&blk_queue_ida, q->id); |
528 | call_rcu(&q->rcu_head, blk_free_queue_rcu); | 526 | call_rcu(&q->rcu_head, blk_free_queue_rcu); |
529 | } | 527 | } |
diff --git a/block/bounce.c b/block/bounce.c index ab21ba203d5c..ed9dd8067120 100644 --- a/block/bounce.c +++ b/block/bounce.c | |||
@@ -221,8 +221,8 @@ bounce: | |||
221 | if (page_to_pfn(page) <= queue_bounce_pfn(q) && !force) | 221 | if (page_to_pfn(page) <= queue_bounce_pfn(q) && !force) |
222 | continue; | 222 | continue; |
223 | 223 | ||
224 | inc_zone_page_state(to->bv_page, NR_BOUNCE); | ||
225 | to->bv_page = mempool_alloc(pool, q->bounce_gfp); | 224 | to->bv_page = mempool_alloc(pool, q->bounce_gfp); |
225 | inc_zone_page_state(to->bv_page, NR_BOUNCE); | ||
226 | 226 | ||
227 | if (rw == WRITE) { | 227 | if (rw == WRITE) { |
228 | char *vto, *vfrom; | 228 | char *vto, *vfrom; |
diff --git a/block/elevator.c b/block/elevator.c index 59794d0d38e3..8985038f398c 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
@@ -157,7 +157,7 @@ struct elevator_queue *elevator_alloc(struct request_queue *q, | |||
157 | 157 | ||
158 | eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node); | 158 | eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node); |
159 | if (unlikely(!eq)) | 159 | if (unlikely(!eq)) |
160 | goto err; | 160 | return NULL; |
161 | 161 | ||
162 | eq->type = e; | 162 | eq->type = e; |
163 | kobject_init(&eq->kobj, &elv_ktype); | 163 | kobject_init(&eq->kobj, &elv_ktype); |
@@ -165,10 +165,6 @@ struct elevator_queue *elevator_alloc(struct request_queue *q, | |||
165 | hash_init(eq->hash); | 165 | hash_init(eq->hash); |
166 | 166 | ||
167 | return eq; | 167 | return eq; |
168 | err: | ||
169 | kfree(eq); | ||
170 | elevator_put(e); | ||
171 | return NULL; | ||
172 | } | 168 | } |
173 | EXPORT_SYMBOL(elevator_alloc); | 169 | EXPORT_SYMBOL(elevator_alloc); |
174 | 170 | ||
diff --git a/drivers/acpi/acpi_pnp.c b/drivers/acpi/acpi_pnp.c index b193f8425999..ff6d8adc9cda 100644 --- a/drivers/acpi/acpi_pnp.c +++ b/drivers/acpi/acpi_pnp.c | |||
@@ -304,6 +304,8 @@ static const struct acpi_device_id acpi_pnp_device_ids[] = { | |||
304 | {"PNPb006"}, | 304 | {"PNPb006"}, |
305 | /* cs423x-pnpbios */ | 305 | /* cs423x-pnpbios */ |
306 | {"CSC0100"}, | 306 | {"CSC0100"}, |
307 | {"CSC0103"}, | ||
308 | {"CSC0110"}, | ||
307 | {"CSC0000"}, | 309 | {"CSC0000"}, |
308 | {"GIM0100"}, /* Guillemot Turtlebeach something appears to be cs4232 compatible */ | 310 | {"GIM0100"}, /* Guillemot Turtlebeach something appears to be cs4232 compatible */ |
309 | /* es18xx-pnpbios */ | 311 | /* es18xx-pnpbios */ |
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c index 5589a6e2a023..8244f013f210 100644 --- a/drivers/acpi/resource.c +++ b/drivers/acpi/resource.c | |||
@@ -573,7 +573,7 @@ EXPORT_SYMBOL_GPL(acpi_dev_get_resources); | |||
573 | * @ares: Input ACPI resource object. | 573 | * @ares: Input ACPI resource object. |
574 | * @types: Valid resource types of IORESOURCE_XXX | 574 | * @types: Valid resource types of IORESOURCE_XXX |
575 | * | 575 | * |
576 | * This is a hepler function to support acpi_dev_get_resources(), which filters | 576 | * This is a helper function to support acpi_dev_get_resources(), which filters |
577 | * ACPI resource objects according to resource types. | 577 | * ACPI resource objects according to resource types. |
578 | */ | 578 | */ |
579 | int acpi_dev_filter_resource_type(struct acpi_resource *ares, | 579 | int acpi_dev_filter_resource_type(struct acpi_resource *ares, |
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c index cd827625cf07..01504c819e8f 100644 --- a/drivers/acpi/sbs.c +++ b/drivers/acpi/sbs.c | |||
@@ -684,7 +684,7 @@ static int acpi_sbs_add(struct acpi_device *device) | |||
684 | if (!sbs_manager_broken) { | 684 | if (!sbs_manager_broken) { |
685 | result = acpi_manager_get_info(sbs); | 685 | result = acpi_manager_get_info(sbs); |
686 | if (!result) { | 686 | if (!result) { |
687 | sbs->manager_present = 0; | 687 | sbs->manager_present = 1; |
688 | for (id = 0; id < MAX_SBS_BAT; ++id) | 688 | for (id = 0; id < MAX_SBS_BAT; ++id) |
689 | if ((sbs->batteries_supported & (1 << id))) | 689 | if ((sbs->batteries_supported & (1 << id))) |
690 | acpi_battery_add(sbs, id); | 690 | acpi_battery_add(sbs, id); |
diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c index 26e5b5060523..bf034f8b7c1a 100644 --- a/drivers/acpi/sbshc.c +++ b/drivers/acpi/sbshc.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/delay.h> | 14 | #include <linux/delay.h> |
15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
16 | #include <linux/interrupt.h> | 16 | #include <linux/interrupt.h> |
17 | #include <linux/dmi.h> | ||
17 | #include "sbshc.h" | 18 | #include "sbshc.h" |
18 | 19 | ||
19 | #define PREFIX "ACPI: " | 20 | #define PREFIX "ACPI: " |
@@ -87,6 +88,8 @@ enum acpi_smb_offset { | |||
87 | ACPI_SMB_ALARM_DATA = 0x26, /* 2 bytes alarm data */ | 88 | ACPI_SMB_ALARM_DATA = 0x26, /* 2 bytes alarm data */ |
88 | }; | 89 | }; |
89 | 90 | ||
91 | static bool macbook; | ||
92 | |||
90 | static inline int smb_hc_read(struct acpi_smb_hc *hc, u8 address, u8 *data) | 93 | static inline int smb_hc_read(struct acpi_smb_hc *hc, u8 address, u8 *data) |
91 | { | 94 | { |
92 | return ec_read(hc->offset + address, data); | 95 | return ec_read(hc->offset + address, data); |
@@ -132,6 +135,8 @@ static int acpi_smbus_transaction(struct acpi_smb_hc *hc, u8 protocol, | |||
132 | } | 135 | } |
133 | 136 | ||
134 | mutex_lock(&hc->lock); | 137 | mutex_lock(&hc->lock); |
138 | if (macbook) | ||
139 | udelay(5); | ||
135 | if (smb_hc_read(hc, ACPI_SMB_PROTOCOL, &temp)) | 140 | if (smb_hc_read(hc, ACPI_SMB_PROTOCOL, &temp)) |
136 | goto end; | 141 | goto end; |
137 | if (temp) { | 142 | if (temp) { |
@@ -257,12 +262,29 @@ extern int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit, | |||
257 | acpi_handle handle, acpi_ec_query_func func, | 262 | acpi_handle handle, acpi_ec_query_func func, |
258 | void *data); | 263 | void *data); |
259 | 264 | ||
265 | static int macbook_dmi_match(const struct dmi_system_id *d) | ||
266 | { | ||
267 | pr_debug("Detected MacBook, enabling workaround\n"); | ||
268 | macbook = true; | ||
269 | return 0; | ||
270 | } | ||
271 | |||
272 | static struct dmi_system_id acpi_smbus_dmi_table[] = { | ||
273 | { macbook_dmi_match, "Apple MacBook", { | ||
274 | DMI_MATCH(DMI_BOARD_VENDOR, "Apple"), | ||
275 | DMI_MATCH(DMI_PRODUCT_NAME, "MacBook") }, | ||
276 | }, | ||
277 | { }, | ||
278 | }; | ||
279 | |||
260 | static int acpi_smbus_hc_add(struct acpi_device *device) | 280 | static int acpi_smbus_hc_add(struct acpi_device *device) |
261 | { | 281 | { |
262 | int status; | 282 | int status; |
263 | unsigned long long val; | 283 | unsigned long long val; |
264 | struct acpi_smb_hc *hc; | 284 | struct acpi_smb_hc *hc; |
265 | 285 | ||
286 | dmi_check_system(acpi_smbus_dmi_table); | ||
287 | |||
266 | if (!device) | 288 | if (!device) |
267 | return -EINVAL; | 289 | return -EINVAL; |
268 | 290 | ||
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index 5f601553b9b0..9dca4b995be0 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig | |||
@@ -270,6 +270,7 @@ config ATA_PIIX | |||
270 | config SATA_DWC | 270 | config SATA_DWC |
271 | tristate "DesignWare Cores SATA support" | 271 | tristate "DesignWare Cores SATA support" |
272 | depends on 460EX | 272 | depends on 460EX |
273 | select DW_DMAC | ||
273 | help | 274 | help |
274 | This option enables support for the on-chip SATA controller of the | 275 | This option enables support for the on-chip SATA controller of the |
275 | AppliedMicro processor 460EX. | 276 | AppliedMicro processor 460EX. |
@@ -729,15 +730,6 @@ config PATA_SC1200 | |||
729 | 730 | ||
730 | If unsure, say N. | 731 | If unsure, say N. |
731 | 732 | ||
732 | config PATA_SCC | ||
733 | tristate "Toshiba's Cell Reference Set IDE support" | ||
734 | depends on PCI && PPC_CELLEB | ||
735 | help | ||
736 | This option enables support for the built-in IDE controller on | ||
737 | Toshiba Cell Reference Board. | ||
738 | |||
739 | If unsure, say N. | ||
740 | |||
741 | config PATA_SCH | 733 | config PATA_SCH |
742 | tristate "Intel SCH PATA support" | 734 | tristate "Intel SCH PATA support" |
743 | depends on PCI | 735 | depends on PCI |
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile index b67e995179a9..40f7865f20a1 100644 --- a/drivers/ata/Makefile +++ b/drivers/ata/Makefile | |||
@@ -75,7 +75,6 @@ obj-$(CONFIG_PATA_PDC_OLD) += pata_pdc202xx_old.o | |||
75 | obj-$(CONFIG_PATA_RADISYS) += pata_radisys.o | 75 | obj-$(CONFIG_PATA_RADISYS) += pata_radisys.o |
76 | obj-$(CONFIG_PATA_RDC) += pata_rdc.o | 76 | obj-$(CONFIG_PATA_RDC) += pata_rdc.o |
77 | obj-$(CONFIG_PATA_SC1200) += pata_sc1200.o | 77 | obj-$(CONFIG_PATA_SC1200) += pata_sc1200.o |
78 | obj-$(CONFIG_PATA_SCC) += pata_scc.o | ||
79 | obj-$(CONFIG_PATA_SCH) += pata_sch.o | 78 | obj-$(CONFIG_PATA_SCH) += pata_sch.o |
80 | obj-$(CONFIG_PATA_SERVERWORKS) += pata_serverworks.o | 79 | obj-$(CONFIG_PATA_SERVERWORKS) += pata_serverworks.o |
81 | obj-$(CONFIG_PATA_SIL680) += pata_sil680.o | 80 | obj-$(CONFIG_PATA_SIL680) += pata_sil680.o |
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index c7a92a743ed0..65ee94454bbd 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c | |||
@@ -66,6 +66,7 @@ enum board_ids { | |||
66 | board_ahci_yes_fbs, | 66 | board_ahci_yes_fbs, |
67 | 67 | ||
68 | /* board IDs for specific chipsets in alphabetical order */ | 68 | /* board IDs for specific chipsets in alphabetical order */ |
69 | board_ahci_avn, | ||
69 | board_ahci_mcp65, | 70 | board_ahci_mcp65, |
70 | board_ahci_mcp77, | 71 | board_ahci_mcp77, |
71 | board_ahci_mcp89, | 72 | board_ahci_mcp89, |
@@ -84,6 +85,8 @@ enum board_ids { | |||
84 | static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); | 85 | static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); |
85 | static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class, | 86 | static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class, |
86 | unsigned long deadline); | 87 | unsigned long deadline); |
88 | static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class, | ||
89 | unsigned long deadline); | ||
87 | static void ahci_mcp89_apple_enable(struct pci_dev *pdev); | 90 | static void ahci_mcp89_apple_enable(struct pci_dev *pdev); |
88 | static bool is_mcp89_apple(struct pci_dev *pdev); | 91 | static bool is_mcp89_apple(struct pci_dev *pdev); |
89 | static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class, | 92 | static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class, |
@@ -107,6 +110,11 @@ static struct ata_port_operations ahci_p5wdh_ops = { | |||
107 | .hardreset = ahci_p5wdh_hardreset, | 110 | .hardreset = ahci_p5wdh_hardreset, |
108 | }; | 111 | }; |
109 | 112 | ||
113 | static struct ata_port_operations ahci_avn_ops = { | ||
114 | .inherits = &ahci_ops, | ||
115 | .hardreset = ahci_avn_hardreset, | ||
116 | }; | ||
117 | |||
110 | static const struct ata_port_info ahci_port_info[] = { | 118 | static const struct ata_port_info ahci_port_info[] = { |
111 | /* by features */ | 119 | /* by features */ |
112 | [board_ahci] = { | 120 | [board_ahci] = { |
@@ -151,6 +159,12 @@ static const struct ata_port_info ahci_port_info[] = { | |||
151 | .port_ops = &ahci_ops, | 159 | .port_ops = &ahci_ops, |
152 | }, | 160 | }, |
153 | /* by chipsets */ | 161 | /* by chipsets */ |
162 | [board_ahci_avn] = { | ||
163 | .flags = AHCI_FLAG_COMMON, | ||
164 | .pio_mask = ATA_PIO4, | ||
165 | .udma_mask = ATA_UDMA6, | ||
166 | .port_ops = &ahci_avn_ops, | ||
167 | }, | ||
154 | [board_ahci_mcp65] = { | 168 | [board_ahci_mcp65] = { |
155 | AHCI_HFLAGS (AHCI_HFLAG_NO_FPDMA_AA | AHCI_HFLAG_NO_PMP | | 169 | AHCI_HFLAGS (AHCI_HFLAG_NO_FPDMA_AA | AHCI_HFLAG_NO_PMP | |
156 | AHCI_HFLAG_YES_NCQ), | 170 | AHCI_HFLAG_YES_NCQ), |
@@ -290,14 +304,14 @@ static const struct pci_device_id ahci_pci_tbl[] = { | |||
290 | { PCI_VDEVICE(INTEL, 0x1f27), board_ahci }, /* Avoton RAID */ | 304 | { PCI_VDEVICE(INTEL, 0x1f27), board_ahci }, /* Avoton RAID */ |
291 | { PCI_VDEVICE(INTEL, 0x1f2e), board_ahci }, /* Avoton RAID */ | 305 | { PCI_VDEVICE(INTEL, 0x1f2e), board_ahci }, /* Avoton RAID */ |
292 | { PCI_VDEVICE(INTEL, 0x1f2f), board_ahci }, /* Avoton RAID */ | 306 | { PCI_VDEVICE(INTEL, 0x1f2f), board_ahci }, /* Avoton RAID */ |
293 | { PCI_VDEVICE(INTEL, 0x1f32), board_ahci }, /* Avoton AHCI */ | 307 | { PCI_VDEVICE(INTEL, 0x1f32), board_ahci_avn }, /* Avoton AHCI */ |
294 | { PCI_VDEVICE(INTEL, 0x1f33), board_ahci }, /* Avoton AHCI */ | 308 | { PCI_VDEVICE(INTEL, 0x1f33), board_ahci_avn }, /* Avoton AHCI */ |
295 | { PCI_VDEVICE(INTEL, 0x1f34), board_ahci }, /* Avoton RAID */ | 309 | { PCI_VDEVICE(INTEL, 0x1f34), board_ahci_avn }, /* Avoton RAID */ |
296 | { PCI_VDEVICE(INTEL, 0x1f35), board_ahci }, /* Avoton RAID */ | 310 | { PCI_VDEVICE(INTEL, 0x1f35), board_ahci_avn }, /* Avoton RAID */ |
297 | { PCI_VDEVICE(INTEL, 0x1f36), board_ahci }, /* Avoton RAID */ | 311 | { PCI_VDEVICE(INTEL, 0x1f36), board_ahci_avn }, /* Avoton RAID */ |
298 | { PCI_VDEVICE(INTEL, 0x1f37), board_ahci }, /* Avoton RAID */ | 312 | { PCI_VDEVICE(INTEL, 0x1f37), board_ahci_avn }, /* Avoton RAID */ |
299 | { PCI_VDEVICE(INTEL, 0x1f3e), board_ahci }, /* Avoton RAID */ | 313 | { PCI_VDEVICE(INTEL, 0x1f3e), board_ahci_avn }, /* Avoton RAID */ |
300 | { PCI_VDEVICE(INTEL, 0x1f3f), board_ahci }, /* Avoton RAID */ | 314 | { PCI_VDEVICE(INTEL, 0x1f3f), board_ahci_avn }, /* Avoton RAID */ |
301 | { PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Wellsburg RAID */ | 315 | { PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Wellsburg RAID */ |
302 | { PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Wellsburg RAID */ | 316 | { PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Wellsburg RAID */ |
303 | { PCI_VDEVICE(INTEL, 0x8d02), board_ahci }, /* Wellsburg AHCI */ | 317 | { PCI_VDEVICE(INTEL, 0x8d02), board_ahci }, /* Wellsburg AHCI */ |
@@ -670,6 +684,79 @@ static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class, | |||
670 | return rc; | 684 | return rc; |
671 | } | 685 | } |
672 | 686 | ||
687 | /* | ||
688 | * ahci_avn_hardreset - attempt more aggressive recovery of Avoton ports. | ||
689 | * | ||
690 | * It has been observed with some SSDs that the timing of events in the | ||
691 | * link synchronization phase can leave the port in a state that can not | ||
692 | * be recovered by a SATA-hard-reset alone. The failing signature is | ||
693 | * SStatus.DET stuck at 1 ("Device presence detected but Phy | ||
694 | * communication not established"). It was found that unloading and | ||
695 | * reloading the driver when this problem occurs allows the drive | ||
696 | * connection to be recovered (DET advanced to 0x3). The critical | ||
697 | * component of reloading the driver is that the port state machines are | ||
698 | * reset by bouncing "port enable" in the AHCI PCS configuration | ||
699 | * register. So, reproduce that effect by bouncing a port whenever we | ||
700 | * see DET==1 after a reset. | ||
701 | */ | ||
702 | static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class, | ||
703 | unsigned long deadline) | ||
704 | { | ||
705 | const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context); | ||
706 | struct ata_port *ap = link->ap; | ||
707 | struct ahci_port_priv *pp = ap->private_data; | ||
708 | struct ahci_host_priv *hpriv = ap->host->private_data; | ||
709 | u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; | ||
710 | unsigned long tmo = deadline - jiffies; | ||
711 | struct ata_taskfile tf; | ||
712 | bool online; | ||
713 | int rc, i; | ||
714 | |||
715 | DPRINTK("ENTER\n"); | ||
716 | |||
717 | ahci_stop_engine(ap); | ||
718 | |||
719 | for (i = 0; i < 2; i++) { | ||
720 | u16 val; | ||
721 | u32 sstatus; | ||
722 | int port = ap->port_no; | ||
723 | struct ata_host *host = ap->host; | ||
724 | struct pci_dev *pdev = to_pci_dev(host->dev); | ||
725 | |||
726 | /* clear D2H reception area to properly wait for D2H FIS */ | ||
727 | ata_tf_init(link->device, &tf); | ||
728 | tf.command = ATA_BUSY; | ||
729 | ata_tf_to_fis(&tf, 0, 0, d2h_fis); | ||
730 | |||
731 | rc = sata_link_hardreset(link, timing, deadline, &online, | ||
732 | ahci_check_ready); | ||
733 | |||
734 | if (sata_scr_read(link, SCR_STATUS, &sstatus) != 0 || | ||
735 | (sstatus & 0xf) != 1) | ||
736 | break; | ||
737 | |||
738 | ata_link_printk(link, KERN_INFO, "avn bounce port%d\n", | ||
739 | port); | ||
740 | |||
741 | pci_read_config_word(pdev, 0x92, &val); | ||
742 | val &= ~(1 << port); | ||
743 | pci_write_config_word(pdev, 0x92, val); | ||
744 | ata_msleep(ap, 1000); | ||
745 | val |= 1 << port; | ||
746 | pci_write_config_word(pdev, 0x92, val); | ||
747 | deadline += tmo; | ||
748 | } | ||
749 | |||
750 | hpriv->start_engine(ap); | ||
751 | |||
752 | if (online) | ||
753 | *class = ahci_dev_classify(ap); | ||
754 | |||
755 | DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class); | ||
756 | return rc; | ||
757 | } | ||
758 | |||
759 | |||
673 | #ifdef CONFIG_PM | 760 | #ifdef CONFIG_PM |
674 | static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) | 761 | static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) |
675 | { | 762 | { |
diff --git a/drivers/ata/ahci_st.c b/drivers/ata/ahci_st.c index ea0ff005b86c..8ff428fe8e0f 100644 --- a/drivers/ata/ahci_st.c +++ b/drivers/ata/ahci_st.c | |||
@@ -37,7 +37,6 @@ struct st_ahci_drv_data { | |||
37 | struct reset_control *pwr; | 37 | struct reset_control *pwr; |
38 | struct reset_control *sw_rst; | 38 | struct reset_control *sw_rst; |
39 | struct reset_control *pwr_rst; | 39 | struct reset_control *pwr_rst; |
40 | struct ahci_host_priv *hpriv; | ||
41 | }; | 40 | }; |
42 | 41 | ||
43 | static void st_ahci_configure_oob(void __iomem *mmio) | 42 | static void st_ahci_configure_oob(void __iomem *mmio) |
@@ -55,9 +54,10 @@ static void st_ahci_configure_oob(void __iomem *mmio) | |||
55 | writel(new_val, mmio + ST_AHCI_OOBR); | 54 | writel(new_val, mmio + ST_AHCI_OOBR); |
56 | } | 55 | } |
57 | 56 | ||
58 | static int st_ahci_deassert_resets(struct device *dev) | 57 | static int st_ahci_deassert_resets(struct ahci_host_priv *hpriv, |
58 | struct device *dev) | ||
59 | { | 59 | { |
60 | struct st_ahci_drv_data *drv_data = dev_get_drvdata(dev); | 60 | struct st_ahci_drv_data *drv_data = hpriv->plat_data; |
61 | int err; | 61 | int err; |
62 | 62 | ||
63 | if (drv_data->pwr) { | 63 | if (drv_data->pwr) { |
@@ -90,8 +90,8 @@ static int st_ahci_deassert_resets(struct device *dev) | |||
90 | static void st_ahci_host_stop(struct ata_host *host) | 90 | static void st_ahci_host_stop(struct ata_host *host) |
91 | { | 91 | { |
92 | struct ahci_host_priv *hpriv = host->private_data; | 92 | struct ahci_host_priv *hpriv = host->private_data; |
93 | struct st_ahci_drv_data *drv_data = hpriv->plat_data; | ||
93 | struct device *dev = host->dev; | 94 | struct device *dev = host->dev; |
94 | struct st_ahci_drv_data *drv_data = dev_get_drvdata(dev); | ||
95 | int err; | 95 | int err; |
96 | 96 | ||
97 | if (drv_data->pwr) { | 97 | if (drv_data->pwr) { |
@@ -103,29 +103,30 @@ static void st_ahci_host_stop(struct ata_host *host) | |||
103 | ahci_platform_disable_resources(hpriv); | 103 | ahci_platform_disable_resources(hpriv); |
104 | } | 104 | } |
105 | 105 | ||
106 | static int st_ahci_probe_resets(struct platform_device *pdev) | 106 | static int st_ahci_probe_resets(struct ahci_host_priv *hpriv, |
107 | struct device *dev) | ||
107 | { | 108 | { |
108 | struct st_ahci_drv_data *drv_data = platform_get_drvdata(pdev); | 109 | struct st_ahci_drv_data *drv_data = hpriv->plat_data; |
109 | 110 | ||
110 | drv_data->pwr = devm_reset_control_get(&pdev->dev, "pwr-dwn"); | 111 | drv_data->pwr = devm_reset_control_get(dev, "pwr-dwn"); |
111 | if (IS_ERR(drv_data->pwr)) { | 112 | if (IS_ERR(drv_data->pwr)) { |
112 | dev_info(&pdev->dev, "power reset control not defined\n"); | 113 | dev_info(dev, "power reset control not defined\n"); |
113 | drv_data->pwr = NULL; | 114 | drv_data->pwr = NULL; |
114 | } | 115 | } |
115 | 116 | ||
116 | drv_data->sw_rst = devm_reset_control_get(&pdev->dev, "sw-rst"); | 117 | drv_data->sw_rst = devm_reset_control_get(dev, "sw-rst"); |
117 | if (IS_ERR(drv_data->sw_rst)) { | 118 | if (IS_ERR(drv_data->sw_rst)) { |
118 | dev_info(&pdev->dev, "soft reset control not defined\n"); | 119 | dev_info(dev, "soft reset control not defined\n"); |
119 | drv_data->sw_rst = NULL; | 120 | drv_data->sw_rst = NULL; |
120 | } | 121 | } |
121 | 122 | ||
122 | drv_data->pwr_rst = devm_reset_control_get(&pdev->dev, "pwr-rst"); | 123 | drv_data->pwr_rst = devm_reset_control_get(dev, "pwr-rst"); |
123 | if (IS_ERR(drv_data->pwr_rst)) { | 124 | if (IS_ERR(drv_data->pwr_rst)) { |
124 | dev_dbg(&pdev->dev, "power soft reset control not defined\n"); | 125 | dev_dbg(dev, "power soft reset control not defined\n"); |
125 | drv_data->pwr_rst = NULL; | 126 | drv_data->pwr_rst = NULL; |
126 | } | 127 | } |
127 | 128 | ||
128 | return st_ahci_deassert_resets(&pdev->dev); | 129 | return st_ahci_deassert_resets(hpriv, dev); |
129 | } | 130 | } |
130 | 131 | ||
131 | static struct ata_port_operations st_ahci_port_ops = { | 132 | static struct ata_port_operations st_ahci_port_ops = { |
@@ -154,15 +155,12 @@ static int st_ahci_probe(struct platform_device *pdev) | |||
154 | if (!drv_data) | 155 | if (!drv_data) |
155 | return -ENOMEM; | 156 | return -ENOMEM; |
156 | 157 | ||
157 | platform_set_drvdata(pdev, drv_data); | ||
158 | |||
159 | hpriv = ahci_platform_get_resources(pdev); | 158 | hpriv = ahci_platform_get_resources(pdev); |
160 | if (IS_ERR(hpriv)) | 159 | if (IS_ERR(hpriv)) |
161 | return PTR_ERR(hpriv); | 160 | return PTR_ERR(hpriv); |
161 | hpriv->plat_data = drv_data; | ||
162 | 162 | ||
163 | drv_data->hpriv = hpriv; | 163 | err = st_ahci_probe_resets(hpriv, &pdev->dev); |
164 | |||
165 | err = st_ahci_probe_resets(pdev); | ||
166 | if (err) | 164 | if (err) |
167 | return err; | 165 | return err; |
168 | 166 | ||
@@ -170,7 +168,7 @@ static int st_ahci_probe(struct platform_device *pdev) | |||
170 | if (err) | 168 | if (err) |
171 | return err; | 169 | return err; |
172 | 170 | ||
173 | st_ahci_configure_oob(drv_data->hpriv->mmio); | 171 | st_ahci_configure_oob(hpriv->mmio); |
174 | 172 | ||
175 | err = ahci_platform_init_host(pdev, hpriv, &st_ahci_port_info, | 173 | err = ahci_platform_init_host(pdev, hpriv, &st_ahci_port_info, |
176 | &ahci_platform_sht); | 174 | &ahci_platform_sht); |
@@ -185,8 +183,9 @@ static int st_ahci_probe(struct platform_device *pdev) | |||
185 | #ifdef CONFIG_PM_SLEEP | 183 | #ifdef CONFIG_PM_SLEEP |
186 | static int st_ahci_suspend(struct device *dev) | 184 | static int st_ahci_suspend(struct device *dev) |
187 | { | 185 | { |
188 | struct st_ahci_drv_data *drv_data = dev_get_drvdata(dev); | 186 | struct ata_host *host = dev_get_drvdata(dev); |
189 | struct ahci_host_priv *hpriv = drv_data->hpriv; | 187 | struct ahci_host_priv *hpriv = host->private_data; |
188 | struct st_ahci_drv_data *drv_data = hpriv->plat_data; | ||
190 | int err; | 189 | int err; |
191 | 190 | ||
192 | err = ahci_platform_suspend_host(dev); | 191 | err = ahci_platform_suspend_host(dev); |
@@ -208,21 +207,21 @@ static int st_ahci_suspend(struct device *dev) | |||
208 | 207 | ||
209 | static int st_ahci_resume(struct device *dev) | 208 | static int st_ahci_resume(struct device *dev) |
210 | { | 209 | { |
211 | struct st_ahci_drv_data *drv_data = dev_get_drvdata(dev); | 210 | struct ata_host *host = dev_get_drvdata(dev); |
212 | struct ahci_host_priv *hpriv = drv_data->hpriv; | 211 | struct ahci_host_priv *hpriv = host->private_data; |
213 | int err; | 212 | int err; |
214 | 213 | ||
215 | err = ahci_platform_enable_resources(hpriv); | 214 | err = ahci_platform_enable_resources(hpriv); |
216 | if (err) | 215 | if (err) |
217 | return err; | 216 | return err; |
218 | 217 | ||
219 | err = st_ahci_deassert_resets(dev); | 218 | err = st_ahci_deassert_resets(hpriv, dev); |
220 | if (err) { | 219 | if (err) { |
221 | ahci_platform_disable_resources(hpriv); | 220 | ahci_platform_disable_resources(hpriv); |
222 | return err; | 221 | return err; |
223 | } | 222 | } |
224 | 223 | ||
225 | st_ahci_configure_oob(drv_data->hpriv->mmio); | 224 | st_ahci_configure_oob(hpriv->mmio); |
226 | 225 | ||
227 | return ahci_platform_resume_host(dev); | 226 | return ahci_platform_resume_host(dev); |
228 | } | 227 | } |
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index 61a9c07e0dff..287c4ba0219f 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c | |||
@@ -1707,8 +1707,7 @@ static void ahci_handle_port_interrupt(struct ata_port *ap, | |||
1707 | if (unlikely(resetting)) | 1707 | if (unlikely(resetting)) |
1708 | status &= ~PORT_IRQ_BAD_PMP; | 1708 | status &= ~PORT_IRQ_BAD_PMP; |
1709 | 1709 | ||
1710 | /* if LPM is enabled, PHYRDY doesn't mean anything */ | 1710 | if (sata_lpm_ignore_phy_events(&ap->link)) { |
1711 | if (ap->link.lpm_policy > ATA_LPM_MAX_POWER) { | ||
1712 | status &= ~PORT_IRQ_PHYRDY; | 1711 | status &= ~PORT_IRQ_PHYRDY; |
1713 | ahci_scr_write(&ap->link, SCR_ERROR, SERR_PHYRDY_CHG); | 1712 | ahci_scr_write(&ap->link, SCR_ERROR, SERR_PHYRDY_CHG); |
1714 | } | 1713 | } |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index f6cb1f1b30b7..577849c6611a 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
@@ -4235,7 +4235,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { | |||
4235 | ATA_HORKAGE_ZERO_AFTER_TRIM, }, | 4235 | ATA_HORKAGE_ZERO_AFTER_TRIM, }, |
4236 | { "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | | 4236 | { "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | |
4237 | ATA_HORKAGE_ZERO_AFTER_TRIM, }, | 4237 | ATA_HORKAGE_ZERO_AFTER_TRIM, }, |
4238 | { "Samsung SSD 850 PRO*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | | 4238 | { "Samsung SSD 8*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | |
4239 | ATA_HORKAGE_ZERO_AFTER_TRIM, }, | 4239 | ATA_HORKAGE_ZERO_AFTER_TRIM, }, |
4240 | 4240 | ||
4241 | /* | 4241 | /* |
@@ -6752,6 +6752,38 @@ u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val, | |||
6752 | return tmp; | 6752 | return tmp; |
6753 | } | 6753 | } |
6754 | 6754 | ||
6755 | /** | ||
6756 | * sata_lpm_ignore_phy_events - test if PHY event should be ignored | ||
6757 | * @link: Link receiving the event | ||
6758 | * | ||
6759 | * Test whether the received PHY event has to be ignored or not. | ||
6760 | * | ||
6761 | * LOCKING: | ||
6762 | * None: | ||
6763 | * | ||
6764 | * RETURNS: | ||
6765 | * True if the event has to be ignored. | ||
6766 | */ | ||
6767 | bool sata_lpm_ignore_phy_events(struct ata_link *link) | ||
6768 | { | ||
6769 | unsigned long lpm_timeout = link->last_lpm_change + | ||
6770 | msecs_to_jiffies(ATA_TMOUT_SPURIOUS_PHY); | ||
6771 | |||
6772 | /* if LPM is enabled, PHYRDY doesn't mean anything */ | ||
6773 | if (link->lpm_policy > ATA_LPM_MAX_POWER) | ||
6774 | return true; | ||
6775 | |||
6776 | /* ignore the first PHY event after the LPM policy changed | ||
6777 | * as it is might be spurious | ||
6778 | */ | ||
6779 | if ((link->flags & ATA_LFLAG_CHANGED) && | ||
6780 | time_before(jiffies, lpm_timeout)) | ||
6781 | return true; | ||
6782 | |||
6783 | return false; | ||
6784 | } | ||
6785 | EXPORT_SYMBOL_GPL(sata_lpm_ignore_phy_events); | ||
6786 | |||
6755 | /* | 6787 | /* |
6756 | * Dummy port_ops | 6788 | * Dummy port_ops |
6757 | */ | 6789 | */ |
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 07f41be38fbe..cf0022ec07f2 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c | |||
@@ -3597,6 +3597,9 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, | |||
3597 | } | 3597 | } |
3598 | } | 3598 | } |
3599 | 3599 | ||
3600 | link->last_lpm_change = jiffies; | ||
3601 | link->flags |= ATA_LFLAG_CHANGED; | ||
3602 | |||
3600 | return 0; | 3603 | return 0; |
3601 | 3604 | ||
3602 | fail: | 3605 | fail: |
diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c deleted file mode 100644 index 5cd60d6388ec..000000000000 --- a/drivers/ata/pata_scc.c +++ /dev/null | |||
@@ -1,1110 +0,0 @@ | |||
1 | /* | ||
2 | * Support for IDE interfaces on Celleb platform | ||
3 | * | ||
4 | * (C) Copyright 2006 TOSHIBA CORPORATION | ||
5 | * | ||
6 | * This code is based on drivers/ata/ata_piix.c: | ||
7 | * Copyright 2003-2005 Red Hat Inc | ||
8 | * Copyright 2003-2005 Jeff Garzik | ||
9 | * Copyright (C) 1998-1999 Andrzej Krzysztofowicz, Author and Maintainer | ||
10 | * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org> | ||
11 | * Copyright (C) 2003 Red Hat Inc | ||
12 | * | ||
13 | * and drivers/ata/ahci.c: | ||
14 | * Copyright 2004-2005 Red Hat, Inc. | ||
15 | * | ||
16 | * and drivers/ata/libata-core.c: | ||
17 | * Copyright 2003-2004 Red Hat, Inc. All rights reserved. | ||
18 | * Copyright 2003-2004 Jeff Garzik | ||
19 | * | ||
20 | * This program is free software; you can redistribute it and/or modify | ||
21 | * it under the terms of the GNU General Public License as published by | ||
22 | * the Free Software Foundation; either version 2 of the License, or | ||
23 | * (at your option) any later version. | ||
24 | * | ||
25 | * This program is distributed in the hope that it will be useful, | ||
26 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
27 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
28 | * GNU General Public License for more details. | ||
29 | * | ||
30 | * You should have received a copy of the GNU General Public License along | ||
31 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
32 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
33 | */ | ||
34 | |||
35 | #include <linux/kernel.h> | ||
36 | #include <linux/module.h> | ||
37 | #include <linux/pci.h> | ||
38 | #include <linux/blkdev.h> | ||
39 | #include <linux/delay.h> | ||
40 | #include <linux/device.h> | ||
41 | #include <scsi/scsi_host.h> | ||
42 | #include <linux/libata.h> | ||
43 | |||
44 | #define DRV_NAME "pata_scc" | ||
45 | #define DRV_VERSION "0.3" | ||
46 | |||
47 | #define PCI_DEVICE_ID_TOSHIBA_SCC_ATA 0x01b4 | ||
48 | |||
49 | /* PCI BARs */ | ||
50 | #define SCC_CTRL_BAR 0 | ||
51 | #define SCC_BMID_BAR 1 | ||
52 | |||
53 | /* offset of CTRL registers */ | ||
54 | #define SCC_CTL_PIOSHT 0x000 | ||
55 | #define SCC_CTL_PIOCT 0x004 | ||
56 | #define SCC_CTL_MDMACT 0x008 | ||
57 | #define SCC_CTL_MCRCST 0x00C | ||
58 | #define SCC_CTL_SDMACT 0x010 | ||
59 | #define SCC_CTL_SCRCST 0x014 | ||
60 | #define SCC_CTL_UDENVT 0x018 | ||
61 | #define SCC_CTL_TDVHSEL 0x020 | ||
62 | #define SCC_CTL_MODEREG 0x024 | ||
63 | #define SCC_CTL_ECMODE 0xF00 | ||
64 | #define SCC_CTL_MAEA0 0xF50 | ||
65 | #define SCC_CTL_MAEC0 0xF54 | ||
66 | #define SCC_CTL_CCKCTRL 0xFF0 | ||
67 | |||
68 | /* offset of BMID registers */ | ||
69 | #define SCC_DMA_CMD 0x000 | ||
70 | #define SCC_DMA_STATUS 0x004 | ||
71 | #define SCC_DMA_TABLE_OFS 0x008 | ||
72 | #define SCC_DMA_INTMASK 0x010 | ||
73 | #define SCC_DMA_INTST 0x014 | ||
74 | #define SCC_DMA_PTERADD 0x018 | ||
75 | #define SCC_REG_CMD_ADDR 0x020 | ||
76 | #define SCC_REG_DATA 0x000 | ||
77 | #define SCC_REG_ERR 0x004 | ||
78 | #define SCC_REG_FEATURE 0x004 | ||
79 | #define SCC_REG_NSECT 0x008 | ||
80 | #define SCC_REG_LBAL 0x00C | ||
81 | #define SCC_REG_LBAM 0x010 | ||
82 | #define SCC_REG_LBAH 0x014 | ||
83 | #define SCC_REG_DEVICE 0x018 | ||
84 | #define SCC_REG_STATUS 0x01C | ||
85 | #define SCC_REG_CMD 0x01C | ||
86 | #define SCC_REG_ALTSTATUS 0x020 | ||
87 | |||
88 | /* register value */ | ||
89 | #define TDVHSEL_MASTER 0x00000001 | ||
90 | #define TDVHSEL_SLAVE 0x00000004 | ||
91 | |||
92 | #define MODE_JCUSFEN 0x00000080 | ||
93 | |||
94 | #define ECMODE_VALUE 0x01 | ||
95 | |||
96 | #define CCKCTRL_ATARESET 0x00040000 | ||
97 | #define CCKCTRL_BUFCNT 0x00020000 | ||
98 | #define CCKCTRL_CRST 0x00010000 | ||
99 | #define CCKCTRL_OCLKEN 0x00000100 | ||
100 | #define CCKCTRL_ATACLKOEN 0x00000002 | ||
101 | #define CCKCTRL_LCLKEN 0x00000001 | ||
102 | |||
103 | #define QCHCD_IOS_SS 0x00000001 | ||
104 | |||
105 | #define QCHSD_STPDIAG 0x00020000 | ||
106 | |||
107 | #define INTMASK_MSK 0xD1000012 | ||
108 | #define INTSTS_SERROR 0x80000000 | ||
109 | #define INTSTS_PRERR 0x40000000 | ||
110 | #define INTSTS_RERR 0x10000000 | ||
111 | #define INTSTS_ICERR 0x01000000 | ||
112 | #define INTSTS_BMSINT 0x00000010 | ||
113 | #define INTSTS_BMHE 0x00000008 | ||
114 | #define INTSTS_IOIRQS 0x00000004 | ||
115 | #define INTSTS_INTRQ 0x00000002 | ||
116 | #define INTSTS_ACTEINT 0x00000001 | ||
117 | |||
118 | |||
119 | /* PIO transfer mode table */ | ||
120 | /* JCHST */ | ||
121 | static const unsigned long JCHSTtbl[2][7] = { | ||
122 | {0x0E, 0x05, 0x02, 0x03, 0x02, 0x00, 0x00}, /* 100MHz */ | ||
123 | {0x13, 0x07, 0x04, 0x04, 0x03, 0x00, 0x00} /* 133MHz */ | ||
124 | }; | ||
125 | |||
126 | /* JCHHT */ | ||
127 | static const unsigned long JCHHTtbl[2][7] = { | ||
128 | {0x0E, 0x02, 0x02, 0x02, 0x02, 0x00, 0x00}, /* 100MHz */ | ||
129 | {0x13, 0x03, 0x03, 0x03, 0x03, 0x00, 0x00} /* 133MHz */ | ||
130 | }; | ||
131 | |||
132 | /* JCHCT */ | ||
133 | static const unsigned long JCHCTtbl[2][7] = { | ||
134 | {0x1D, 0x1D, 0x1C, 0x0B, 0x06, 0x00, 0x00}, /* 100MHz */ | ||
135 | {0x27, 0x26, 0x26, 0x0E, 0x09, 0x00, 0x00} /* 133MHz */ | ||
136 | }; | ||
137 | |||
138 | /* DMA transfer mode table */ | ||
139 | /* JCHDCTM/JCHDCTS */ | ||
140 | static const unsigned long JCHDCTxtbl[2][7] = { | ||
141 | {0x0A, 0x06, 0x04, 0x03, 0x01, 0x00, 0x00}, /* 100MHz */ | ||
142 | {0x0E, 0x09, 0x06, 0x04, 0x02, 0x01, 0x00} /* 133MHz */ | ||
143 | }; | ||
144 | |||
145 | /* JCSTWTM/JCSTWTS */ | ||
146 | static const unsigned long JCSTWTxtbl[2][7] = { | ||
147 | {0x06, 0x04, 0x03, 0x02, 0x02, 0x02, 0x00}, /* 100MHz */ | ||
148 | {0x09, 0x06, 0x04, 0x02, 0x02, 0x02, 0x02} /* 133MHz */ | ||
149 | }; | ||
150 | |||
151 | /* JCTSS */ | ||
152 | static const unsigned long JCTSStbl[2][7] = { | ||
153 | {0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x00}, /* 100MHz */ | ||
154 | {0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05} /* 133MHz */ | ||
155 | }; | ||
156 | |||
157 | /* JCENVT */ | ||
158 | static const unsigned long JCENVTtbl[2][7] = { | ||
159 | {0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00}, /* 100MHz */ | ||
160 | {0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02} /* 133MHz */ | ||
161 | }; | ||
162 | |||
163 | /* JCACTSELS/JCACTSELM */ | ||
164 | static const unsigned long JCACTSELtbl[2][7] = { | ||
165 | {0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00}, /* 100MHz */ | ||
166 | {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01} /* 133MHz */ | ||
167 | }; | ||
168 | |||
169 | static const struct pci_device_id scc_pci_tbl[] = { | ||
170 | { PCI_VDEVICE(TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_SCC_ATA), 0}, | ||
171 | { } /* terminate list */ | ||
172 | }; | ||
173 | |||
174 | /** | ||
175 | * scc_set_piomode - Initialize host controller PATA PIO timings | ||
176 | * @ap: Port whose timings we are configuring | ||
177 | * @adev: um | ||
178 | * | ||
179 | * Set PIO mode for device. | ||
180 | * | ||
181 | * LOCKING: | ||
182 | * None (inherited from caller). | ||
183 | */ | ||
184 | |||
185 | static void scc_set_piomode (struct ata_port *ap, struct ata_device *adev) | ||
186 | { | ||
187 | unsigned int pio = adev->pio_mode - XFER_PIO_0; | ||
188 | void __iomem *ctrl_base = ap->host->iomap[SCC_CTRL_BAR]; | ||
189 | void __iomem *cckctrl_port = ctrl_base + SCC_CTL_CCKCTRL; | ||
190 | void __iomem *piosht_port = ctrl_base + SCC_CTL_PIOSHT; | ||
191 | void __iomem *pioct_port = ctrl_base + SCC_CTL_PIOCT; | ||
192 | unsigned long reg; | ||
193 | int offset; | ||
194 | |||
195 | reg = in_be32(cckctrl_port); | ||
196 | if (reg & CCKCTRL_ATACLKOEN) | ||
197 | offset = 1; /* 133MHz */ | ||
198 | else | ||
199 | offset = 0; /* 100MHz */ | ||
200 | |||
201 | reg = JCHSTtbl[offset][pio] << 16 | JCHHTtbl[offset][pio]; | ||
202 | out_be32(piosht_port, reg); | ||
203 | reg = JCHCTtbl[offset][pio]; | ||
204 | out_be32(pioct_port, reg); | ||
205 | } | ||
206 | |||
207 | /** | ||
208 | * scc_set_dmamode - Initialize host controller PATA DMA timings | ||
209 | * @ap: Port whose timings we are configuring | ||
210 | * @adev: um | ||
211 | * | ||
212 | * Set UDMA mode for device. | ||
213 | * | ||
214 | * LOCKING: | ||
215 | * None (inherited from caller). | ||
216 | */ | ||
217 | |||
218 | static void scc_set_dmamode (struct ata_port *ap, struct ata_device *adev) | ||
219 | { | ||
220 | unsigned int udma = adev->dma_mode; | ||
221 | unsigned int is_slave = (adev->devno != 0); | ||
222 | u8 speed = udma; | ||
223 | void __iomem *ctrl_base = ap->host->iomap[SCC_CTRL_BAR]; | ||
224 | void __iomem *cckctrl_port = ctrl_base + SCC_CTL_CCKCTRL; | ||
225 | void __iomem *mdmact_port = ctrl_base + SCC_CTL_MDMACT; | ||
226 | void __iomem *mcrcst_port = ctrl_base + SCC_CTL_MCRCST; | ||
227 | void __iomem *sdmact_port = ctrl_base + SCC_CTL_SDMACT; | ||
228 | void __iomem *scrcst_port = ctrl_base + SCC_CTL_SCRCST; | ||
229 | void __iomem *udenvt_port = ctrl_base + SCC_CTL_UDENVT; | ||
230 | void __iomem *tdvhsel_port = ctrl_base + SCC_CTL_TDVHSEL; | ||
231 | int offset, idx; | ||
232 | |||
233 | if (in_be32(cckctrl_port) & CCKCTRL_ATACLKOEN) | ||
234 | offset = 1; /* 133MHz */ | ||
235 | else | ||
236 | offset = 0; /* 100MHz */ | ||
237 | |||
238 | if (speed >= XFER_UDMA_0) | ||
239 | idx = speed - XFER_UDMA_0; | ||
240 | else | ||
241 | return; | ||
242 | |||
243 | if (is_slave) { | ||
244 | out_be32(sdmact_port, JCHDCTxtbl[offset][idx]); | ||
245 | out_be32(scrcst_port, JCSTWTxtbl[offset][idx]); | ||
246 | out_be32(tdvhsel_port, | ||
247 | (in_be32(tdvhsel_port) & ~TDVHSEL_SLAVE) | (JCACTSELtbl[offset][idx] << 2)); | ||
248 | } else { | ||
249 | out_be32(mdmact_port, JCHDCTxtbl[offset][idx]); | ||
250 | out_be32(mcrcst_port, JCSTWTxtbl[offset][idx]); | ||
251 | out_be32(tdvhsel_port, | ||
252 | (in_be32(tdvhsel_port) & ~TDVHSEL_MASTER) | JCACTSELtbl[offset][idx]); | ||
253 | } | ||
254 | out_be32(udenvt_port, | ||
255 | JCTSStbl[offset][idx] << 16 | JCENVTtbl[offset][idx]); | ||
256 | } | ||
257 | |||
258 | unsigned long scc_mode_filter(struct ata_device *adev, unsigned long mask) | ||
259 | { | ||
260 | /* errata A308 workaround: limit ATAPI UDMA mode to UDMA4 */ | ||
261 | if (adev->class == ATA_DEV_ATAPI && | ||
262 | (mask & (0xE0 << ATA_SHIFT_UDMA))) { | ||
263 | printk(KERN_INFO "%s: limit ATAPI UDMA to UDMA4\n", DRV_NAME); | ||
264 | mask &= ~(0xE0 << ATA_SHIFT_UDMA); | ||
265 | } | ||
266 | return mask; | ||
267 | } | ||
268 | |||
269 | /** | ||
270 | * scc_tf_load - send taskfile registers to host controller | ||
271 | * @ap: Port to which output is sent | ||
272 | * @tf: ATA taskfile register set | ||
273 | * | ||
274 | * Note: Original code is ata_sff_tf_load(). | ||
275 | */ | ||
276 | |||
277 | static void scc_tf_load (struct ata_port *ap, const struct ata_taskfile *tf) | ||
278 | { | ||
279 | struct ata_ioports *ioaddr = &ap->ioaddr; | ||
280 | unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; | ||
281 | |||
282 | if (tf->ctl != ap->last_ctl) { | ||
283 | out_be32(ioaddr->ctl_addr, tf->ctl); | ||
284 | ap->last_ctl = tf->ctl; | ||
285 | ata_wait_idle(ap); | ||
286 | } | ||
287 | |||
288 | if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { | ||
289 | out_be32(ioaddr->feature_addr, tf->hob_feature); | ||
290 | out_be32(ioaddr->nsect_addr, tf->hob_nsect); | ||
291 | out_be32(ioaddr->lbal_addr, tf->hob_lbal); | ||
292 | out_be32(ioaddr->lbam_addr, tf->hob_lbam); | ||
293 | out_be32(ioaddr->lbah_addr, tf->hob_lbah); | ||
294 | VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n", | ||
295 | tf->hob_feature, | ||
296 | tf->hob_nsect, | ||
297 | tf->hob_lbal, | ||
298 | tf->hob_lbam, | ||
299 | tf->hob_lbah); | ||
300 | } | ||
301 | |||
302 | if (is_addr) { | ||
303 | out_be32(ioaddr->feature_addr, tf->feature); | ||
304 | out_be32(ioaddr->nsect_addr, tf->nsect); | ||
305 | out_be32(ioaddr->lbal_addr, tf->lbal); | ||
306 | out_be32(ioaddr->lbam_addr, tf->lbam); | ||
307 | out_be32(ioaddr->lbah_addr, tf->lbah); | ||
308 | VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n", | ||
309 | tf->feature, | ||
310 | tf->nsect, | ||
311 | tf->lbal, | ||
312 | tf->lbam, | ||
313 | tf->lbah); | ||
314 | } | ||
315 | |||
316 | if (tf->flags & ATA_TFLAG_DEVICE) { | ||
317 | out_be32(ioaddr->device_addr, tf->device); | ||
318 | VPRINTK("device 0x%X\n", tf->device); | ||
319 | } | ||
320 | |||
321 | ata_wait_idle(ap); | ||
322 | } | ||
323 | |||
324 | /** | ||
325 | * scc_check_status - Read device status reg & clear interrupt | ||
326 | * @ap: port where the device is | ||
327 | * | ||
328 | * Note: Original code is ata_check_status(). | ||
329 | */ | ||
330 | |||
331 | static u8 scc_check_status (struct ata_port *ap) | ||
332 | { | ||
333 | return in_be32(ap->ioaddr.status_addr); | ||
334 | } | ||
335 | |||
336 | /** | ||
337 | * scc_tf_read - input device's ATA taskfile shadow registers | ||
338 | * @ap: Port from which input is read | ||
339 | * @tf: ATA taskfile register set for storing input | ||
340 | * | ||
341 | * Note: Original code is ata_sff_tf_read(). | ||
342 | */ | ||
343 | |||
344 | static void scc_tf_read (struct ata_port *ap, struct ata_taskfile *tf) | ||
345 | { | ||
346 | struct ata_ioports *ioaddr = &ap->ioaddr; | ||
347 | |||
348 | tf->command = scc_check_status(ap); | ||
349 | tf->feature = in_be32(ioaddr->error_addr); | ||
350 | tf->nsect = in_be32(ioaddr->nsect_addr); | ||
351 | tf->lbal = in_be32(ioaddr->lbal_addr); | ||
352 | tf->lbam = in_be32(ioaddr->lbam_addr); | ||
353 | tf->lbah = in_be32(ioaddr->lbah_addr); | ||
354 | tf->device = in_be32(ioaddr->device_addr); | ||
355 | |||
356 | if (tf->flags & ATA_TFLAG_LBA48) { | ||
357 | out_be32(ioaddr->ctl_addr, tf->ctl | ATA_HOB); | ||
358 | tf->hob_feature = in_be32(ioaddr->error_addr); | ||
359 | tf->hob_nsect = in_be32(ioaddr->nsect_addr); | ||
360 | tf->hob_lbal = in_be32(ioaddr->lbal_addr); | ||
361 | tf->hob_lbam = in_be32(ioaddr->lbam_addr); | ||
362 | tf->hob_lbah = in_be32(ioaddr->lbah_addr); | ||
363 | out_be32(ioaddr->ctl_addr, tf->ctl); | ||
364 | ap->last_ctl = tf->ctl; | ||
365 | } | ||
366 | } | ||
367 | |||
368 | /** | ||
369 | * scc_exec_command - issue ATA command to host controller | ||
370 | * @ap: port to which command is being issued | ||
371 | * @tf: ATA taskfile register set | ||
372 | * | ||
373 | * Note: Original code is ata_sff_exec_command(). | ||
374 | */ | ||
375 | |||
376 | static void scc_exec_command (struct ata_port *ap, | ||
377 | const struct ata_taskfile *tf) | ||
378 | { | ||
379 | DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command); | ||
380 | |||
381 | out_be32(ap->ioaddr.command_addr, tf->command); | ||
382 | ata_sff_pause(ap); | ||
383 | } | ||
384 | |||
385 | /** | ||
386 | * scc_check_altstatus - Read device alternate status reg | ||
387 | * @ap: port where the device is | ||
388 | */ | ||
389 | |||
390 | static u8 scc_check_altstatus (struct ata_port *ap) | ||
391 | { | ||
392 | return in_be32(ap->ioaddr.altstatus_addr); | ||
393 | } | ||
394 | |||
395 | /** | ||
396 | * scc_dev_select - Select device 0/1 on ATA bus | ||
397 | * @ap: ATA channel to manipulate | ||
398 | * @device: ATA device (numbered from zero) to select | ||
399 | * | ||
400 | * Note: Original code is ata_sff_dev_select(). | ||
401 | */ | ||
402 | |||
403 | static void scc_dev_select (struct ata_port *ap, unsigned int device) | ||
404 | { | ||
405 | u8 tmp; | ||
406 | |||
407 | if (device == 0) | ||
408 | tmp = ATA_DEVICE_OBS; | ||
409 | else | ||
410 | tmp = ATA_DEVICE_OBS | ATA_DEV1; | ||
411 | |||
412 | out_be32(ap->ioaddr.device_addr, tmp); | ||
413 | ata_sff_pause(ap); | ||
414 | } | ||
415 | |||
416 | /** | ||
417 | * scc_set_devctl - Write device control reg | ||
418 | * @ap: port where the device is | ||
419 | * @ctl: value to write | ||
420 | */ | ||
421 | |||
422 | static void scc_set_devctl(struct ata_port *ap, u8 ctl) | ||
423 | { | ||
424 | out_be32(ap->ioaddr.ctl_addr, ctl); | ||
425 | } | ||
426 | |||
427 | /** | ||
428 | * scc_bmdma_setup - Set up PCI IDE BMDMA transaction | ||
429 | * @qc: Info associated with this ATA transaction. | ||
430 | * | ||
431 | * Note: Original code is ata_bmdma_setup(). | ||
432 | */ | ||
433 | |||
434 | static void scc_bmdma_setup (struct ata_queued_cmd *qc) | ||
435 | { | ||
436 | struct ata_port *ap = qc->ap; | ||
437 | unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); | ||
438 | u8 dmactl; | ||
439 | void __iomem *mmio = ap->ioaddr.bmdma_addr; | ||
440 | |||
441 | /* load PRD table addr */ | ||
442 | out_be32(mmio + SCC_DMA_TABLE_OFS, ap->bmdma_prd_dma); | ||
443 | |||
444 | /* specify data direction, triple-check start bit is clear */ | ||
445 | dmactl = in_be32(mmio + SCC_DMA_CMD); | ||
446 | dmactl &= ~(ATA_DMA_WR | ATA_DMA_START); | ||
447 | if (!rw) | ||
448 | dmactl |= ATA_DMA_WR; | ||
449 | out_be32(mmio + SCC_DMA_CMD, dmactl); | ||
450 | |||
451 | /* issue r/w command */ | ||
452 | ap->ops->sff_exec_command(ap, &qc->tf); | ||
453 | } | ||
454 | |||
455 | /** | ||
456 | * scc_bmdma_start - Start a PCI IDE BMDMA transaction | ||
457 | * @qc: Info associated with this ATA transaction. | ||
458 | * | ||
459 | * Note: Original code is ata_bmdma_start(). | ||
460 | */ | ||
461 | |||
462 | static void scc_bmdma_start (struct ata_queued_cmd *qc) | ||
463 | { | ||
464 | struct ata_port *ap = qc->ap; | ||
465 | u8 dmactl; | ||
466 | void __iomem *mmio = ap->ioaddr.bmdma_addr; | ||
467 | |||
468 | /* start host DMA transaction */ | ||
469 | dmactl = in_be32(mmio + SCC_DMA_CMD); | ||
470 | out_be32(mmio + SCC_DMA_CMD, dmactl | ATA_DMA_START); | ||
471 | } | ||
472 | |||
473 | /** | ||
474 | * scc_devchk - PATA device presence detection | ||
475 | * @ap: ATA channel to examine | ||
476 | * @device: Device to examine (starting at zero) | ||
477 | * | ||
478 | * Note: Original code is ata_devchk(). | ||
479 | */ | ||
480 | |||
481 | static unsigned int scc_devchk (struct ata_port *ap, | ||
482 | unsigned int device) | ||
483 | { | ||
484 | struct ata_ioports *ioaddr = &ap->ioaddr; | ||
485 | u8 nsect, lbal; | ||
486 | |||
487 | ap->ops->sff_dev_select(ap, device); | ||
488 | |||
489 | out_be32(ioaddr->nsect_addr, 0x55); | ||
490 | out_be32(ioaddr->lbal_addr, 0xaa); | ||
491 | |||
492 | out_be32(ioaddr->nsect_addr, 0xaa); | ||
493 | out_be32(ioaddr->lbal_addr, 0x55); | ||
494 | |||
495 | out_be32(ioaddr->nsect_addr, 0x55); | ||
496 | out_be32(ioaddr->lbal_addr, 0xaa); | ||
497 | |||
498 | nsect = in_be32(ioaddr->nsect_addr); | ||
499 | lbal = in_be32(ioaddr->lbal_addr); | ||
500 | |||
501 | if ((nsect == 0x55) && (lbal == 0xaa)) | ||
502 | return 1; /* we found a device */ | ||
503 | |||
504 | return 0; /* nothing found */ | ||
505 | } | ||
506 | |||
507 | /** | ||
508 | * scc_wait_after_reset - wait for devices to become ready after reset | ||
509 | * | ||
510 | * Note: Original code is ata_sff_wait_after_reset | ||
511 | */ | ||
512 | |||
513 | static int scc_wait_after_reset(struct ata_link *link, unsigned int devmask, | ||
514 | unsigned long deadline) | ||
515 | { | ||
516 | struct ata_port *ap = link->ap; | ||
517 | struct ata_ioports *ioaddr = &ap->ioaddr; | ||
518 | unsigned int dev0 = devmask & (1 << 0); | ||
519 | unsigned int dev1 = devmask & (1 << 1); | ||
520 | int rc, ret = 0; | ||
521 | |||
522 | /* Spec mandates ">= 2ms" before checking status. We wait | ||
523 | * 150ms, because that was the magic delay used for ATAPI | ||
524 | * devices in Hale Landis's ATADRVR, for the period of time | ||
525 | * between when the ATA command register is written, and then | ||
526 | * status is checked. Because waiting for "a while" before | ||
527 | * checking status is fine, post SRST, we perform this magic | ||
528 | * delay here as well. | ||
529 | * | ||
530 | * Old drivers/ide uses the 2mS rule and then waits for ready. | ||
531 | */ | ||
532 | ata_msleep(ap, 150); | ||
533 | |||
534 | /* always check readiness of the master device */ | ||
535 | rc = ata_sff_wait_ready(link, deadline); | ||
536 | /* -ENODEV means the odd clown forgot the D7 pulldown resistor | ||
537 | * and TF status is 0xff, bail out on it too. | ||
538 | */ | ||
539 | if (rc) | ||
540 | return rc; | ||
541 | |||
542 | /* if device 1 was found in ata_devchk, wait for register | ||
543 | * access briefly, then wait for BSY to clear. | ||
544 | */ | ||
545 | if (dev1) { | ||
546 | int i; | ||
547 | |||
548 | ap->ops->sff_dev_select(ap, 1); | ||
549 | |||
550 | /* Wait for register access. Some ATAPI devices fail | ||
551 | * to set nsect/lbal after reset, so don't waste too | ||
552 | * much time on it. We're gonna wait for !BSY anyway. | ||
553 | */ | ||
554 | for (i = 0; i < 2; i++) { | ||
555 | u8 nsect, lbal; | ||
556 | |||
557 | nsect = in_be32(ioaddr->nsect_addr); | ||
558 | lbal = in_be32(ioaddr->lbal_addr); | ||
559 | if ((nsect == 1) && (lbal == 1)) | ||
560 | break; | ||
561 | ata_msleep(ap, 50); /* give drive a breather */ | ||
562 | } | ||
563 | |||
564 | rc = ata_sff_wait_ready(link, deadline); | ||
565 | if (rc) { | ||
566 | if (rc != -ENODEV) | ||
567 | return rc; | ||
568 | ret = rc; | ||
569 | } | ||
570 | } | ||
571 | |||
572 | /* is all this really necessary? */ | ||
573 | ap->ops->sff_dev_select(ap, 0); | ||
574 | if (dev1) | ||
575 | ap->ops->sff_dev_select(ap, 1); | ||
576 | if (dev0) | ||
577 | ap->ops->sff_dev_select(ap, 0); | ||
578 | |||
579 | return ret; | ||
580 | } | ||
581 | |||
582 | /** | ||
583 | * scc_bus_softreset - PATA device software reset | ||
584 | * | ||
585 | * Note: Original code is ata_bus_softreset(). | ||
586 | */ | ||
587 | |||
588 | static int scc_bus_softreset(struct ata_port *ap, unsigned int devmask, | ||
589 | unsigned long deadline) | ||
590 | { | ||
591 | struct ata_ioports *ioaddr = &ap->ioaddr; | ||
592 | |||
593 | DPRINTK("ata%u: bus reset via SRST\n", ap->print_id); | ||
594 | |||
595 | /* software reset. causes dev0 to be selected */ | ||
596 | out_be32(ioaddr->ctl_addr, ap->ctl); | ||
597 | udelay(20); | ||
598 | out_be32(ioaddr->ctl_addr, ap->ctl | ATA_SRST); | ||
599 | udelay(20); | ||
600 | out_be32(ioaddr->ctl_addr, ap->ctl); | ||
601 | |||
602 | return scc_wait_after_reset(&ap->link, devmask, deadline); | ||
603 | } | ||
604 | |||
605 | /** | ||
606 | * scc_softreset - reset host port via ATA SRST | ||
607 | * @ap: port to reset | ||
608 | * @classes: resulting classes of attached devices | ||
609 | * @deadline: deadline jiffies for the operation | ||
610 | * | ||
611 | * Note: Original code is ata_sff_softreset(). | ||
612 | */ | ||
613 | |||
614 | static int scc_softreset(struct ata_link *link, unsigned int *classes, | ||
615 | unsigned long deadline) | ||
616 | { | ||
617 | struct ata_port *ap = link->ap; | ||
618 | unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; | ||
619 | unsigned int devmask = 0; | ||
620 | int rc; | ||
621 | u8 err; | ||
622 | |||
623 | DPRINTK("ENTER\n"); | ||
624 | |||
625 | /* determine if device 0/1 are present */ | ||
626 | if (scc_devchk(ap, 0)) | ||
627 | devmask |= (1 << 0); | ||
628 | if (slave_possible && scc_devchk(ap, 1)) | ||
629 | devmask |= (1 << 1); | ||
630 | |||
631 | /* select device 0 again */ | ||
632 | ap->ops->sff_dev_select(ap, 0); | ||
633 | |||
634 | /* issue bus reset */ | ||
635 | DPRINTK("about to softreset, devmask=%x\n", devmask); | ||
636 | rc = scc_bus_softreset(ap, devmask, deadline); | ||
637 | if (rc) { | ||
638 | ata_port_err(ap, "SRST failed (err_mask=0x%x)\n", rc); | ||
639 | return -EIO; | ||
640 | } | ||
641 | |||
642 | /* determine by signature whether we have ATA or ATAPI devices */ | ||
643 | classes[0] = ata_sff_dev_classify(&ap->link.device[0], | ||
644 | devmask & (1 << 0), &err); | ||
645 | if (slave_possible && err != 0x81) | ||
646 | classes[1] = ata_sff_dev_classify(&ap->link.device[1], | ||
647 | devmask & (1 << 1), &err); | ||
648 | |||
649 | DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]); | ||
650 | return 0; | ||
651 | } | ||
652 | |||
653 | /** | ||
654 | * scc_bmdma_stop - Stop PCI IDE BMDMA transfer | ||
655 | * @qc: Command we are ending DMA for | ||
656 | */ | ||
657 | |||
658 | static void scc_bmdma_stop (struct ata_queued_cmd *qc) | ||
659 | { | ||
660 | struct ata_port *ap = qc->ap; | ||
661 | void __iomem *ctrl_base = ap->host->iomap[SCC_CTRL_BAR]; | ||
662 | void __iomem *bmid_base = ap->host->iomap[SCC_BMID_BAR]; | ||
663 | u32 reg; | ||
664 | |||
665 | while (1) { | ||
666 | reg = in_be32(bmid_base + SCC_DMA_INTST); | ||
667 | |||
668 | if (reg & INTSTS_SERROR) { | ||
669 | printk(KERN_WARNING "%s: SERROR\n", DRV_NAME); | ||
670 | out_be32(bmid_base + SCC_DMA_INTST, INTSTS_SERROR|INTSTS_BMSINT); | ||
671 | out_be32(bmid_base + SCC_DMA_CMD, | ||
672 | in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START); | ||
673 | continue; | ||
674 | } | ||
675 | |||
676 | if (reg & INTSTS_PRERR) { | ||
677 | u32 maea0, maec0; | ||
678 | maea0 = in_be32(ctrl_base + SCC_CTL_MAEA0); | ||
679 | maec0 = in_be32(ctrl_base + SCC_CTL_MAEC0); | ||
680 | printk(KERN_WARNING "%s: PRERR [addr:%x cmd:%x]\n", DRV_NAME, maea0, maec0); | ||
681 | out_be32(bmid_base + SCC_DMA_INTST, INTSTS_PRERR|INTSTS_BMSINT); | ||
682 | out_be32(bmid_base + SCC_DMA_CMD, | ||
683 | in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START); | ||
684 | continue; | ||
685 | } | ||
686 | |||
687 | if (reg & INTSTS_RERR) { | ||
688 | printk(KERN_WARNING "%s: Response Error\n", DRV_NAME); | ||
689 | out_be32(bmid_base + SCC_DMA_INTST, INTSTS_RERR|INTSTS_BMSINT); | ||
690 | out_be32(bmid_base + SCC_DMA_CMD, | ||
691 | in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START); | ||
692 | continue; | ||
693 | } | ||
694 | |||
695 | if (reg & INTSTS_ICERR) { | ||
696 | out_be32(bmid_base + SCC_DMA_CMD, | ||
697 | in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START); | ||
698 | printk(KERN_WARNING "%s: Illegal Configuration\n", DRV_NAME); | ||
699 | out_be32(bmid_base + SCC_DMA_INTST, INTSTS_ICERR|INTSTS_BMSINT); | ||
700 | continue; | ||
701 | } | ||
702 | |||
703 | if (reg & INTSTS_BMSINT) { | ||
704 | unsigned int classes; | ||
705 | unsigned long deadline = ata_deadline(jiffies, ATA_TMOUT_BOOT); | ||
706 | printk(KERN_WARNING "%s: Internal Bus Error\n", DRV_NAME); | ||
707 | out_be32(bmid_base + SCC_DMA_INTST, INTSTS_BMSINT); | ||
708 | /* TBD: SW reset */ | ||
709 | scc_softreset(&ap->link, &classes, deadline); | ||
710 | continue; | ||
711 | } | ||
712 | |||
713 | if (reg & INTSTS_BMHE) { | ||
714 | out_be32(bmid_base + SCC_DMA_INTST, INTSTS_BMHE); | ||
715 | continue; | ||
716 | } | ||
717 | |||
718 | if (reg & INTSTS_ACTEINT) { | ||
719 | out_be32(bmid_base + SCC_DMA_INTST, INTSTS_ACTEINT); | ||
720 | continue; | ||
721 | } | ||
722 | |||
723 | if (reg & INTSTS_IOIRQS) { | ||
724 | out_be32(bmid_base + SCC_DMA_INTST, INTSTS_IOIRQS); | ||
725 | continue; | ||
726 | } | ||
727 | break; | ||
728 | } | ||
729 | |||
730 | /* clear start/stop bit */ | ||
731 | out_be32(bmid_base + SCC_DMA_CMD, | ||
732 | in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START); | ||
733 | |||
734 | /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ | ||
735 | ata_sff_dma_pause(ap); /* dummy read */ | ||
736 | } | ||
737 | |||
738 | /** | ||
739 | * scc_bmdma_status - Read PCI IDE BMDMA status | ||
740 | * @ap: Port associated with this ATA transaction. | ||
741 | */ | ||
742 | |||
743 | static u8 scc_bmdma_status (struct ata_port *ap) | ||
744 | { | ||
745 | void __iomem *mmio = ap->ioaddr.bmdma_addr; | ||
746 | u8 host_stat = in_be32(mmio + SCC_DMA_STATUS); | ||
747 | u32 int_status = in_be32(mmio + SCC_DMA_INTST); | ||
748 | struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag); | ||
749 | static int retry = 0; | ||
750 | |||
751 | /* return if IOS_SS is cleared */ | ||
752 | if (!(in_be32(mmio + SCC_DMA_CMD) & ATA_DMA_START)) | ||
753 | return host_stat; | ||
754 | |||
755 | /* errata A252,A308 workaround: Step4 */ | ||
756 | if ((scc_check_altstatus(ap) & ATA_ERR) | ||
757 | && (int_status & INTSTS_INTRQ)) | ||
758 | return (host_stat | ATA_DMA_INTR); | ||
759 | |||
760 | /* errata A308 workaround Step5 */ | ||
761 | if (int_status & INTSTS_IOIRQS) { | ||
762 | host_stat |= ATA_DMA_INTR; | ||
763 | |||
764 | /* We don't check ATAPI DMA because it is limited to UDMA4 */ | ||
765 | if ((qc->tf.protocol == ATA_PROT_DMA && | ||
766 | qc->dev->xfer_mode > XFER_UDMA_4)) { | ||
767 | if (!(int_status & INTSTS_ACTEINT)) { | ||
768 | printk(KERN_WARNING "ata%u: operation failed (transfer data loss)\n", | ||
769 | ap->print_id); | ||
770 | host_stat |= ATA_DMA_ERR; | ||
771 | if (retry++) | ||
772 | ap->udma_mask &= ~(1 << qc->dev->xfer_mode); | ||
773 | } else | ||
774 | retry = 0; | ||
775 | } | ||
776 | } | ||
777 | |||
778 | return host_stat; | ||
779 | } | ||
780 | |||
781 | /** | ||
782 | * scc_data_xfer - Transfer data by PIO | ||
783 | * @dev: device for this I/O | ||
784 | * @buf: data buffer | ||
785 | * @buflen: buffer length | ||
786 | * @rw: read/write | ||
787 | * | ||
788 | * Note: Original code is ata_sff_data_xfer(). | ||
789 | */ | ||
790 | |||
791 | static unsigned int scc_data_xfer (struct ata_device *dev, unsigned char *buf, | ||
792 | unsigned int buflen, int rw) | ||
793 | { | ||
794 | struct ata_port *ap = dev->link->ap; | ||
795 | unsigned int words = buflen >> 1; | ||
796 | unsigned int i; | ||
797 | __le16 *buf16 = (__le16 *) buf; | ||
798 | void __iomem *mmio = ap->ioaddr.data_addr; | ||
799 | |||
800 | /* Transfer multiple of 2 bytes */ | ||
801 | if (rw == READ) | ||
802 | for (i = 0; i < words; i++) | ||
803 | buf16[i] = cpu_to_le16(in_be32(mmio)); | ||
804 | else | ||
805 | for (i = 0; i < words; i++) | ||
806 | out_be32(mmio, le16_to_cpu(buf16[i])); | ||
807 | |||
808 | /* Transfer trailing 1 byte, if any. */ | ||
809 | if (unlikely(buflen & 0x01)) { | ||
810 | __le16 align_buf[1] = { 0 }; | ||
811 | unsigned char *trailing_buf = buf + buflen - 1; | ||
812 | |||
813 | if (rw == READ) { | ||
814 | align_buf[0] = cpu_to_le16(in_be32(mmio)); | ||
815 | memcpy(trailing_buf, align_buf, 1); | ||
816 | } else { | ||
817 | memcpy(align_buf, trailing_buf, 1); | ||
818 | out_be32(mmio, le16_to_cpu(align_buf[0])); | ||
819 | } | ||
820 | words++; | ||
821 | } | ||
822 | |||
823 | return words << 1; | ||
824 | } | ||
825 | |||
826 | /** | ||
827 | * scc_postreset - standard postreset callback | ||
828 | * @ap: the target ata_port | ||
829 | * @classes: classes of attached devices | ||
830 | * | ||
831 | * Note: Original code is ata_sff_postreset(). | ||
832 | */ | ||
833 | |||
834 | static void scc_postreset(struct ata_link *link, unsigned int *classes) | ||
835 | { | ||
836 | struct ata_port *ap = link->ap; | ||
837 | |||
838 | DPRINTK("ENTER\n"); | ||
839 | |||
840 | /* is double-select really necessary? */ | ||
841 | if (classes[0] != ATA_DEV_NONE) | ||
842 | ap->ops->sff_dev_select(ap, 1); | ||
843 | if (classes[1] != ATA_DEV_NONE) | ||
844 | ap->ops->sff_dev_select(ap, 0); | ||
845 | |||
846 | /* bail out if no device is present */ | ||
847 | if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) { | ||
848 | DPRINTK("EXIT, no device\n"); | ||
849 | return; | ||
850 | } | ||
851 | |||
852 | /* set up device control */ | ||
853 | out_be32(ap->ioaddr.ctl_addr, ap->ctl); | ||
854 | |||
855 | DPRINTK("EXIT\n"); | ||
856 | } | ||
857 | |||
858 | /** | ||
859 | * scc_irq_clear - Clear PCI IDE BMDMA interrupt. | ||
860 | * @ap: Port associated with this ATA transaction. | ||
861 | * | ||
862 | * Note: Original code is ata_bmdma_irq_clear(). | ||
863 | */ | ||
864 | |||
865 | static void scc_irq_clear (struct ata_port *ap) | ||
866 | { | ||
867 | void __iomem *mmio = ap->ioaddr.bmdma_addr; | ||
868 | |||
869 | if (!mmio) | ||
870 | return; | ||
871 | |||
872 | out_be32(mmio + SCC_DMA_STATUS, in_be32(mmio + SCC_DMA_STATUS)); | ||
873 | } | ||
874 | |||
875 | /** | ||
876 | * scc_port_start - Set port up for dma. | ||
877 | * @ap: Port to initialize | ||
878 | * | ||
879 | * Allocate space for PRD table using ata_bmdma_port_start(). | ||
880 | * Set PRD table address for PTERADD. (PRD Transfer End Read) | ||
881 | */ | ||
882 | |||
883 | static int scc_port_start (struct ata_port *ap) | ||
884 | { | ||
885 | void __iomem *mmio = ap->ioaddr.bmdma_addr; | ||
886 | int rc; | ||
887 | |||
888 | rc = ata_bmdma_port_start(ap); | ||
889 | if (rc) | ||
890 | return rc; | ||
891 | |||
892 | out_be32(mmio + SCC_DMA_PTERADD, ap->bmdma_prd_dma); | ||
893 | return 0; | ||
894 | } | ||
895 | |||
896 | /** | ||
897 | * scc_port_stop - Undo scc_port_start() | ||
898 | * @ap: Port to shut down | ||
899 | * | ||
900 | * Reset PTERADD. | ||
901 | */ | ||
902 | |||
903 | static void scc_port_stop (struct ata_port *ap) | ||
904 | { | ||
905 | void __iomem *mmio = ap->ioaddr.bmdma_addr; | ||
906 | |||
907 | out_be32(mmio + SCC_DMA_PTERADD, 0); | ||
908 | } | ||
909 | |||
910 | static struct scsi_host_template scc_sht = { | ||
911 | ATA_BMDMA_SHT(DRV_NAME), | ||
912 | }; | ||
913 | |||
914 | static struct ata_port_operations scc_pata_ops = { | ||
915 | .inherits = &ata_bmdma_port_ops, | ||
916 | |||
917 | .set_piomode = scc_set_piomode, | ||
918 | .set_dmamode = scc_set_dmamode, | ||
919 | .mode_filter = scc_mode_filter, | ||
920 | |||
921 | .sff_tf_load = scc_tf_load, | ||
922 | .sff_tf_read = scc_tf_read, | ||
923 | .sff_exec_command = scc_exec_command, | ||
924 | .sff_check_status = scc_check_status, | ||
925 | .sff_check_altstatus = scc_check_altstatus, | ||
926 | .sff_dev_select = scc_dev_select, | ||
927 | .sff_set_devctl = scc_set_devctl, | ||
928 | |||
929 | .bmdma_setup = scc_bmdma_setup, | ||
930 | .bmdma_start = scc_bmdma_start, | ||
931 | .bmdma_stop = scc_bmdma_stop, | ||
932 | .bmdma_status = scc_bmdma_status, | ||
933 | .sff_data_xfer = scc_data_xfer, | ||
934 | |||
935 | .cable_detect = ata_cable_80wire, | ||
936 | .softreset = scc_softreset, | ||
937 | .postreset = scc_postreset, | ||
938 | |||
939 | .sff_irq_clear = scc_irq_clear, | ||
940 | |||
941 | .port_start = scc_port_start, | ||
942 | .port_stop = scc_port_stop, | ||
943 | }; | ||
944 | |||
945 | static struct ata_port_info scc_port_info[] = { | ||
946 | { | ||
947 | .flags = ATA_FLAG_SLAVE_POSS, | ||
948 | .pio_mask = ATA_PIO4, | ||
949 | /* No MWDMA */ | ||
950 | .udma_mask = ATA_UDMA6, | ||
951 | .port_ops = &scc_pata_ops, | ||
952 | }, | ||
953 | }; | ||
954 | |||
955 | /** | ||
956 | * scc_reset_controller - initialize SCC PATA controller. | ||
957 | */ | ||
958 | |||
959 | static int scc_reset_controller(struct ata_host *host) | ||
960 | { | ||
961 | void __iomem *ctrl_base = host->iomap[SCC_CTRL_BAR]; | ||
962 | void __iomem *bmid_base = host->iomap[SCC_BMID_BAR]; | ||
963 | void __iomem *cckctrl_port = ctrl_base + SCC_CTL_CCKCTRL; | ||
964 | void __iomem *mode_port = ctrl_base + SCC_CTL_MODEREG; | ||
965 | void __iomem *ecmode_port = ctrl_base + SCC_CTL_ECMODE; | ||
966 | void __iomem *intmask_port = bmid_base + SCC_DMA_INTMASK; | ||
967 | void __iomem *dmastatus_port = bmid_base + SCC_DMA_STATUS; | ||
968 | u32 reg = 0; | ||
969 | |||
970 | out_be32(cckctrl_port, reg); | ||
971 | reg |= CCKCTRL_ATACLKOEN; | ||
972 | out_be32(cckctrl_port, reg); | ||
973 | reg |= CCKCTRL_LCLKEN | CCKCTRL_OCLKEN; | ||
974 | out_be32(cckctrl_port, reg); | ||
975 | reg |= CCKCTRL_CRST; | ||
976 | out_be32(cckctrl_port, reg); | ||
977 | |||
978 | for (;;) { | ||
979 | reg = in_be32(cckctrl_port); | ||
980 | if (reg & CCKCTRL_CRST) | ||
981 | break; | ||
982 | udelay(5000); | ||
983 | } | ||
984 | |||
985 | reg |= CCKCTRL_ATARESET; | ||
986 | out_be32(cckctrl_port, reg); | ||
987 | out_be32(ecmode_port, ECMODE_VALUE); | ||
988 | out_be32(mode_port, MODE_JCUSFEN); | ||
989 | out_be32(intmask_port, INTMASK_MSK); | ||
990 | |||
991 | if (in_be32(dmastatus_port) & QCHSD_STPDIAG) { | ||
992 | printk(KERN_WARNING "%s: failed to detect 80c cable. (PDIAG# is high)\n", DRV_NAME); | ||
993 | return -EIO; | ||
994 | } | ||
995 | |||
996 | return 0; | ||
997 | } | ||
998 | |||
999 | /** | ||
1000 | * scc_setup_ports - initialize ioaddr with SCC PATA port offsets. | ||
1001 | * @ioaddr: IO address structure to be initialized | ||
1002 | * @base: base address of BMID region | ||
1003 | */ | ||
1004 | |||
1005 | static void scc_setup_ports (struct ata_ioports *ioaddr, void __iomem *base) | ||
1006 | { | ||
1007 | ioaddr->cmd_addr = base + SCC_REG_CMD_ADDR; | ||
1008 | ioaddr->altstatus_addr = ioaddr->cmd_addr + SCC_REG_ALTSTATUS; | ||
1009 | ioaddr->ctl_addr = ioaddr->cmd_addr + SCC_REG_ALTSTATUS; | ||
1010 | ioaddr->bmdma_addr = base; | ||
1011 | ioaddr->data_addr = ioaddr->cmd_addr + SCC_REG_DATA; | ||
1012 | ioaddr->error_addr = ioaddr->cmd_addr + SCC_REG_ERR; | ||
1013 | ioaddr->feature_addr = ioaddr->cmd_addr + SCC_REG_FEATURE; | ||
1014 | ioaddr->nsect_addr = ioaddr->cmd_addr + SCC_REG_NSECT; | ||
1015 | ioaddr->lbal_addr = ioaddr->cmd_addr + SCC_REG_LBAL; | ||
1016 | ioaddr->lbam_addr = ioaddr->cmd_addr + SCC_REG_LBAM; | ||
1017 | ioaddr->lbah_addr = ioaddr->cmd_addr + SCC_REG_LBAH; | ||
1018 | ioaddr->device_addr = ioaddr->cmd_addr + SCC_REG_DEVICE; | ||
1019 | ioaddr->status_addr = ioaddr->cmd_addr + SCC_REG_STATUS; | ||
1020 | ioaddr->command_addr = ioaddr->cmd_addr + SCC_REG_CMD; | ||
1021 | } | ||
1022 | |||
1023 | static int scc_host_init(struct ata_host *host) | ||
1024 | { | ||
1025 | struct pci_dev *pdev = to_pci_dev(host->dev); | ||
1026 | int rc; | ||
1027 | |||
1028 | rc = scc_reset_controller(host); | ||
1029 | if (rc) | ||
1030 | return rc; | ||
1031 | |||
1032 | rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK); | ||
1033 | if (rc) | ||
1034 | return rc; | ||
1035 | rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK); | ||
1036 | if (rc) | ||
1037 | return rc; | ||
1038 | |||
1039 | scc_setup_ports(&host->ports[0]->ioaddr, host->iomap[SCC_BMID_BAR]); | ||
1040 | |||
1041 | pci_set_master(pdev); | ||
1042 | |||
1043 | return 0; | ||
1044 | } | ||
1045 | |||
1046 | /** | ||
1047 | * scc_init_one - Register SCC PATA device with kernel services | ||
1048 | * @pdev: PCI device to register | ||
1049 | * @ent: Entry in scc_pci_tbl matching with @pdev | ||
1050 | * | ||
1051 | * LOCKING: | ||
1052 | * Inherited from PCI layer (may sleep). | ||
1053 | * | ||
1054 | * RETURNS: | ||
1055 | * Zero on success, or -ERRNO value. | ||
1056 | */ | ||
1057 | |||
1058 | static int scc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | ||
1059 | { | ||
1060 | unsigned int board_idx = (unsigned int) ent->driver_data; | ||
1061 | const struct ata_port_info *ppi[] = { &scc_port_info[board_idx], NULL }; | ||
1062 | struct ata_host *host; | ||
1063 | int rc; | ||
1064 | |||
1065 | ata_print_version_once(&pdev->dev, DRV_VERSION); | ||
1066 | |||
1067 | host = ata_host_alloc_pinfo(&pdev->dev, ppi, 1); | ||
1068 | if (!host) | ||
1069 | return -ENOMEM; | ||
1070 | |||
1071 | rc = pcim_enable_device(pdev); | ||
1072 | if (rc) | ||
1073 | return rc; | ||
1074 | |||
1075 | rc = pcim_iomap_regions(pdev, (1 << SCC_CTRL_BAR) | (1 << SCC_BMID_BAR), DRV_NAME); | ||
1076 | if (rc == -EBUSY) | ||
1077 | pcim_pin_device(pdev); | ||
1078 | if (rc) | ||
1079 | return rc; | ||
1080 | host->iomap = pcim_iomap_table(pdev); | ||
1081 | |||
1082 | ata_port_pbar_desc(host->ports[0], SCC_CTRL_BAR, -1, "ctrl"); | ||
1083 | ata_port_pbar_desc(host->ports[0], SCC_BMID_BAR, -1, "bmid"); | ||
1084 | |||
1085 | rc = scc_host_init(host); | ||
1086 | if (rc) | ||
1087 | return rc; | ||
1088 | |||
1089 | return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, | ||
1090 | IRQF_SHARED, &scc_sht); | ||
1091 | } | ||
1092 | |||
1093 | static struct pci_driver scc_pci_driver = { | ||
1094 | .name = DRV_NAME, | ||
1095 | .id_table = scc_pci_tbl, | ||
1096 | .probe = scc_init_one, | ||
1097 | .remove = ata_pci_remove_one, | ||
1098 | #ifdef CONFIG_PM_SLEEP | ||
1099 | .suspend = ata_pci_device_suspend, | ||
1100 | .resume = ata_pci_device_resume, | ||
1101 | #endif | ||
1102 | }; | ||
1103 | |||
1104 | module_pci_driver(scc_pci_driver); | ||
1105 | |||
1106 | MODULE_AUTHOR("Toshiba corp"); | ||
1107 | MODULE_DESCRIPTION("SCSI low-level driver for Toshiba SCC PATA controller"); | ||
1108 | MODULE_LICENSE("GPL"); | ||
1109 | MODULE_DEVICE_TABLE(pci, scc_pci_tbl); | ||
1110 | MODULE_VERSION(DRV_VERSION); | ||
diff --git a/drivers/block/loop.c b/drivers/block/loop.c index ae3fcb4199e9..d7173cb1ea76 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c | |||
@@ -1620,8 +1620,8 @@ out: | |||
1620 | 1620 | ||
1621 | static void loop_remove(struct loop_device *lo) | 1621 | static void loop_remove(struct loop_device *lo) |
1622 | { | 1622 | { |
1623 | del_gendisk(lo->lo_disk); | ||
1624 | blk_cleanup_queue(lo->lo_queue); | 1623 | blk_cleanup_queue(lo->lo_queue); |
1624 | del_gendisk(lo->lo_disk); | ||
1625 | blk_mq_free_tag_set(&lo->tag_set); | 1625 | blk_mq_free_tag_set(&lo->tag_set); |
1626 | put_disk(lo->lo_disk); | 1626 | put_disk(lo->lo_disk); |
1627 | kfree(lo); | 1627 | kfree(lo); |
diff --git a/drivers/block/nvme-scsi.c b/drivers/block/nvme-scsi.c index 6b736b00f63e..88f13c525712 100644 --- a/drivers/block/nvme-scsi.c +++ b/drivers/block/nvme-scsi.c | |||
@@ -944,7 +944,8 @@ static int nvme_trans_ext_inq_page(struct nvme_ns *ns, struct sg_io_hdr *hdr, | |||
944 | static int nvme_trans_bdev_limits_page(struct nvme_ns *ns, struct sg_io_hdr *hdr, | 944 | static int nvme_trans_bdev_limits_page(struct nvme_ns *ns, struct sg_io_hdr *hdr, |
945 | u8 *inq_response, int alloc_len) | 945 | u8 *inq_response, int alloc_len) |
946 | { | 946 | { |
947 | __be32 max_sectors = cpu_to_be32(queue_max_hw_sectors(ns->queue)); | 947 | __be32 max_sectors = cpu_to_be32( |
948 | nvme_block_nr(ns, queue_max_hw_sectors(ns->queue))); | ||
948 | __be32 max_discard = cpu_to_be32(ns->queue->limits.max_discard_sectors); | 949 | __be32 max_discard = cpu_to_be32(ns->queue->limits.max_discard_sectors); |
949 | __be32 discard_desc_count = cpu_to_be32(0x100); | 950 | __be32 discard_desc_count = cpu_to_be32(0x100); |
950 | 951 | ||
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 812523330a78..ec6c5c6e1ac9 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c | |||
@@ -2264,6 +2264,11 @@ static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request) | |||
2264 | result, xferred); | 2264 | result, xferred); |
2265 | if (!img_request->result) | 2265 | if (!img_request->result) |
2266 | img_request->result = result; | 2266 | img_request->result = result; |
2267 | /* | ||
2268 | * Need to end I/O on the entire obj_request worth of | ||
2269 | * bytes in case of error. | ||
2270 | */ | ||
2271 | xferred = obj_request->length; | ||
2267 | } | 2272 | } |
2268 | 2273 | ||
2269 | /* Image object requests don't own their page array */ | 2274 | /* Image object requests don't own their page array */ |
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index bd2b3bbbb22c..713fc9ff1149 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c | |||
@@ -265,17 +265,6 @@ static void put_persistent_gnt(struct xen_blkif *blkif, | |||
265 | atomic_dec(&blkif->persistent_gnt_in_use); | 265 | atomic_dec(&blkif->persistent_gnt_in_use); |
266 | } | 266 | } |
267 | 267 | ||
268 | static void free_persistent_gnts_unmap_callback(int result, | ||
269 | struct gntab_unmap_queue_data *data) | ||
270 | { | ||
271 | struct completion *c = data->data; | ||
272 | |||
273 | /* BUG_ON used to reproduce existing behaviour, | ||
274 | but is this the best way to deal with this? */ | ||
275 | BUG_ON(result); | ||
276 | complete(c); | ||
277 | } | ||
278 | |||
279 | static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root, | 268 | static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root, |
280 | unsigned int num) | 269 | unsigned int num) |
281 | { | 270 | { |
@@ -285,12 +274,7 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root, | |||
285 | struct rb_node *n; | 274 | struct rb_node *n; |
286 | int segs_to_unmap = 0; | 275 | int segs_to_unmap = 0; |
287 | struct gntab_unmap_queue_data unmap_data; | 276 | struct gntab_unmap_queue_data unmap_data; |
288 | struct completion unmap_completion; | ||
289 | 277 | ||
290 | init_completion(&unmap_completion); | ||
291 | |||
292 | unmap_data.data = &unmap_completion; | ||
293 | unmap_data.done = &free_persistent_gnts_unmap_callback; | ||
294 | unmap_data.pages = pages; | 278 | unmap_data.pages = pages; |
295 | unmap_data.unmap_ops = unmap; | 279 | unmap_data.unmap_ops = unmap; |
296 | unmap_data.kunmap_ops = NULL; | 280 | unmap_data.kunmap_ops = NULL; |
@@ -310,8 +294,7 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root, | |||
310 | !rb_next(&persistent_gnt->node)) { | 294 | !rb_next(&persistent_gnt->node)) { |
311 | 295 | ||
312 | unmap_data.count = segs_to_unmap; | 296 | unmap_data.count = segs_to_unmap; |
313 | gnttab_unmap_refs_async(&unmap_data); | 297 | BUG_ON(gnttab_unmap_refs_sync(&unmap_data)); |
314 | wait_for_completion(&unmap_completion); | ||
315 | 298 | ||
316 | put_free_pages(blkif, pages, segs_to_unmap); | 299 | put_free_pages(blkif, pages, segs_to_unmap); |
317 | segs_to_unmap = 0; | 300 | segs_to_unmap = 0; |
@@ -329,8 +312,13 @@ void xen_blkbk_unmap_purged_grants(struct work_struct *work) | |||
329 | struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | 312 | struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; |
330 | struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | 313 | struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST]; |
331 | struct persistent_gnt *persistent_gnt; | 314 | struct persistent_gnt *persistent_gnt; |
332 | int ret, segs_to_unmap = 0; | 315 | int segs_to_unmap = 0; |
333 | struct xen_blkif *blkif = container_of(work, typeof(*blkif), persistent_purge_work); | 316 | struct xen_blkif *blkif = container_of(work, typeof(*blkif), persistent_purge_work); |
317 | struct gntab_unmap_queue_data unmap_data; | ||
318 | |||
319 | unmap_data.pages = pages; | ||
320 | unmap_data.unmap_ops = unmap; | ||
321 | unmap_data.kunmap_ops = NULL; | ||
334 | 322 | ||
335 | while(!list_empty(&blkif->persistent_purge_list)) { | 323 | while(!list_empty(&blkif->persistent_purge_list)) { |
336 | persistent_gnt = list_first_entry(&blkif->persistent_purge_list, | 324 | persistent_gnt = list_first_entry(&blkif->persistent_purge_list, |
@@ -346,17 +334,16 @@ void xen_blkbk_unmap_purged_grants(struct work_struct *work) | |||
346 | pages[segs_to_unmap] = persistent_gnt->page; | 334 | pages[segs_to_unmap] = persistent_gnt->page; |
347 | 335 | ||
348 | if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) { | 336 | if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) { |
349 | ret = gnttab_unmap_refs(unmap, NULL, pages, | 337 | unmap_data.count = segs_to_unmap; |
350 | segs_to_unmap); | 338 | BUG_ON(gnttab_unmap_refs_sync(&unmap_data)); |
351 | BUG_ON(ret); | ||
352 | put_free_pages(blkif, pages, segs_to_unmap); | 339 | put_free_pages(blkif, pages, segs_to_unmap); |
353 | segs_to_unmap = 0; | 340 | segs_to_unmap = 0; |
354 | } | 341 | } |
355 | kfree(persistent_gnt); | 342 | kfree(persistent_gnt); |
356 | } | 343 | } |
357 | if (segs_to_unmap > 0) { | 344 | if (segs_to_unmap > 0) { |
358 | ret = gnttab_unmap_refs(unmap, NULL, pages, segs_to_unmap); | 345 | unmap_data.count = segs_to_unmap; |
359 | BUG_ON(ret); | 346 | BUG_ON(gnttab_unmap_refs_sync(&unmap_data)); |
360 | put_free_pages(blkif, pages, segs_to_unmap); | 347 | put_free_pages(blkif, pages, segs_to_unmap); |
361 | } | 348 | } |
362 | } | 349 | } |
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index c94386aa563d..8dcbced0eafd 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c | |||
@@ -74,6 +74,27 @@ static inline struct zram *dev_to_zram(struct device *dev) | |||
74 | return (struct zram *)dev_to_disk(dev)->private_data; | 74 | return (struct zram *)dev_to_disk(dev)->private_data; |
75 | } | 75 | } |
76 | 76 | ||
77 | static ssize_t compact_store(struct device *dev, | ||
78 | struct device_attribute *attr, const char *buf, size_t len) | ||
79 | { | ||
80 | unsigned long nr_migrated; | ||
81 | struct zram *zram = dev_to_zram(dev); | ||
82 | struct zram_meta *meta; | ||
83 | |||
84 | down_read(&zram->init_lock); | ||
85 | if (!init_done(zram)) { | ||
86 | up_read(&zram->init_lock); | ||
87 | return -EINVAL; | ||
88 | } | ||
89 | |||
90 | meta = zram->meta; | ||
91 | nr_migrated = zs_compact(meta->mem_pool); | ||
92 | atomic64_add(nr_migrated, &zram->stats.num_migrated); | ||
93 | up_read(&zram->init_lock); | ||
94 | |||
95 | return len; | ||
96 | } | ||
97 | |||
77 | static ssize_t disksize_show(struct device *dev, | 98 | static ssize_t disksize_show(struct device *dev, |
78 | struct device_attribute *attr, char *buf) | 99 | struct device_attribute *attr, char *buf) |
79 | { | 100 | { |
@@ -1038,6 +1059,7 @@ static const struct block_device_operations zram_devops = { | |||
1038 | .owner = THIS_MODULE | 1059 | .owner = THIS_MODULE |
1039 | }; | 1060 | }; |
1040 | 1061 | ||
1062 | static DEVICE_ATTR_WO(compact); | ||
1041 | static DEVICE_ATTR_RW(disksize); | 1063 | static DEVICE_ATTR_RW(disksize); |
1042 | static DEVICE_ATTR_RO(initstate); | 1064 | static DEVICE_ATTR_RO(initstate); |
1043 | static DEVICE_ATTR_WO(reset); | 1065 | static DEVICE_ATTR_WO(reset); |
@@ -1114,6 +1136,7 @@ static struct attribute *zram_disk_attrs[] = { | |||
1114 | &dev_attr_num_writes.attr, | 1136 | &dev_attr_num_writes.attr, |
1115 | &dev_attr_failed_reads.attr, | 1137 | &dev_attr_failed_reads.attr, |
1116 | &dev_attr_failed_writes.attr, | 1138 | &dev_attr_failed_writes.attr, |
1139 | &dev_attr_compact.attr, | ||
1117 | &dev_attr_invalid_io.attr, | 1140 | &dev_attr_invalid_io.attr, |
1118 | &dev_attr_notify_free.attr, | 1141 | &dev_attr_notify_free.attr, |
1119 | &dev_attr_zero_pages.attr, | 1142 | &dev_attr_zero_pages.attr, |
diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c index b854125e4831..5340604b23a4 100644 --- a/drivers/bus/arm-cci.c +++ b/drivers/bus/arm-cci.c | |||
@@ -660,7 +660,7 @@ validate_group(struct perf_event *event) | |||
660 | * Initialise the fake PMU. We only need to populate the | 660 | * Initialise the fake PMU. We only need to populate the |
661 | * used_mask for the purposes of validation. | 661 | * used_mask for the purposes of validation. |
662 | */ | 662 | */ |
663 | .used_mask = CPU_BITS_NONE, | 663 | .used_mask = { 0 }, |
664 | }; | 664 | }; |
665 | 665 | ||
666 | if (!validate_event(event->pmu, &fake_pmu, leader)) | 666 | if (!validate_event(event->pmu, &fake_pmu, leader)) |
diff --git a/drivers/bus/omap_l3_noc.c b/drivers/bus/omap_l3_noc.c index 11f7982cbdb3..ebee57d715d2 100644 --- a/drivers/bus/omap_l3_noc.c +++ b/drivers/bus/omap_l3_noc.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * OMAP L3 Interconnect error handling driver | 2 | * OMAP L3 Interconnect error handling driver |
3 | * | 3 | * |
4 | * Copyright (C) 2011-2014 Texas Instruments Incorporated - http://www.ti.com/ | 4 | * Copyright (C) 2011-2015 Texas Instruments Incorporated - http://www.ti.com/ |
5 | * Santosh Shilimkar <santosh.shilimkar@ti.com> | 5 | * Santosh Shilimkar <santosh.shilimkar@ti.com> |
6 | * Sricharan <r.sricharan@ti.com> | 6 | * Sricharan <r.sricharan@ti.com> |
7 | * | 7 | * |
@@ -233,7 +233,8 @@ static irqreturn_t l3_interrupt_handler(int irq, void *_l3) | |||
233 | } | 233 | } |
234 | 234 | ||
235 | static const struct of_device_id l3_noc_match[] = { | 235 | static const struct of_device_id l3_noc_match[] = { |
236 | {.compatible = "ti,omap4-l3-noc", .data = &omap_l3_data}, | 236 | {.compatible = "ti,omap4-l3-noc", .data = &omap4_l3_data}, |
237 | {.compatible = "ti,omap5-l3-noc", .data = &omap5_l3_data}, | ||
237 | {.compatible = "ti,dra7-l3-noc", .data = &dra_l3_data}, | 238 | {.compatible = "ti,dra7-l3-noc", .data = &dra_l3_data}, |
238 | {.compatible = "ti,am4372-l3-noc", .data = &am4372_l3_data}, | 239 | {.compatible = "ti,am4372-l3-noc", .data = &am4372_l3_data}, |
239 | {}, | 240 | {}, |
diff --git a/drivers/bus/omap_l3_noc.h b/drivers/bus/omap_l3_noc.h index 95254585db86..73431f81da28 100644 --- a/drivers/bus/omap_l3_noc.h +++ b/drivers/bus/omap_l3_noc.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * OMAP L3 Interconnect error handling driver header | 2 | * OMAP L3 Interconnect error handling driver header |
3 | * | 3 | * |
4 | * Copyright (C) 2011-2014 Texas Instruments Incorporated - http://www.ti.com/ | 4 | * Copyright (C) 2011-2015 Texas Instruments Incorporated - http://www.ti.com/ |
5 | * Santosh Shilimkar <santosh.shilimkar@ti.com> | 5 | * Santosh Shilimkar <santosh.shilimkar@ti.com> |
6 | * sricharan <r.sricharan@ti.com> | 6 | * sricharan <r.sricharan@ti.com> |
7 | * | 7 | * |
@@ -175,16 +175,14 @@ static struct l3_flagmux_data omap_l3_flagmux_clk2 = { | |||
175 | }; | 175 | }; |
176 | 176 | ||
177 | 177 | ||
178 | static struct l3_target_data omap_l3_target_data_clk3[] = { | 178 | static struct l3_target_data omap4_l3_target_data_clk3[] = { |
179 | {0x0100, "EMUSS",}, | 179 | {0x0100, "DEBUGSS",}, |
180 | {0x0300, "DEBUG SOURCE",}, | ||
181 | {0x0, "HOST CLK3",}, | ||
182 | }; | 180 | }; |
183 | 181 | ||
184 | static struct l3_flagmux_data omap_l3_flagmux_clk3 = { | 182 | static struct l3_flagmux_data omap4_l3_flagmux_clk3 = { |
185 | .offset = 0x0200, | 183 | .offset = 0x0200, |
186 | .l3_targ = omap_l3_target_data_clk3, | 184 | .l3_targ = omap4_l3_target_data_clk3, |
187 | .num_targ_data = ARRAY_SIZE(omap_l3_target_data_clk3), | 185 | .num_targ_data = ARRAY_SIZE(omap4_l3_target_data_clk3), |
188 | }; | 186 | }; |
189 | 187 | ||
190 | static struct l3_masters_data omap_l3_masters[] = { | 188 | static struct l3_masters_data omap_l3_masters[] = { |
@@ -215,21 +213,49 @@ static struct l3_masters_data omap_l3_masters[] = { | |||
215 | { 0x32, "USBHOSTFS"} | 213 | { 0x32, "USBHOSTFS"} |
216 | }; | 214 | }; |
217 | 215 | ||
218 | static struct l3_flagmux_data *omap_l3_flagmux[] = { | 216 | static struct l3_flagmux_data *omap4_l3_flagmux[] = { |
219 | &omap_l3_flagmux_clk1, | 217 | &omap_l3_flagmux_clk1, |
220 | &omap_l3_flagmux_clk2, | 218 | &omap_l3_flagmux_clk2, |
221 | &omap_l3_flagmux_clk3, | 219 | &omap4_l3_flagmux_clk3, |
222 | }; | 220 | }; |
223 | 221 | ||
224 | static const struct omap_l3 omap_l3_data = { | 222 | static const struct omap_l3 omap4_l3_data = { |
225 | .l3_flagmux = omap_l3_flagmux, | 223 | .l3_flagmux = omap4_l3_flagmux, |
226 | .num_modules = ARRAY_SIZE(omap_l3_flagmux), | 224 | .num_modules = ARRAY_SIZE(omap4_l3_flagmux), |
227 | .l3_masters = omap_l3_masters, | 225 | .l3_masters = omap_l3_masters, |
228 | .num_masters = ARRAY_SIZE(omap_l3_masters), | 226 | .num_masters = ARRAY_SIZE(omap_l3_masters), |
229 | /* The 6 MSBs of register field used to distinguish initiator */ | 227 | /* The 6 MSBs of register field used to distinguish initiator */ |
230 | .mst_addr_mask = 0xFC, | 228 | .mst_addr_mask = 0xFC, |
231 | }; | 229 | }; |
232 | 230 | ||
231 | /* OMAP5 data */ | ||
232 | static struct l3_target_data omap5_l3_target_data_clk3[] = { | ||
233 | {0x0100, "L3INSTR",}, | ||
234 | {0x0300, "DEBUGSS",}, | ||
235 | {0x0, "HOSTCLK3",}, | ||
236 | }; | ||
237 | |||
238 | static struct l3_flagmux_data omap5_l3_flagmux_clk3 = { | ||
239 | .offset = 0x0200, | ||
240 | .l3_targ = omap5_l3_target_data_clk3, | ||
241 | .num_targ_data = ARRAY_SIZE(omap5_l3_target_data_clk3), | ||
242 | }; | ||
243 | |||
244 | static struct l3_flagmux_data *omap5_l3_flagmux[] = { | ||
245 | &omap_l3_flagmux_clk1, | ||
246 | &omap_l3_flagmux_clk2, | ||
247 | &omap5_l3_flagmux_clk3, | ||
248 | }; | ||
249 | |||
250 | static const struct omap_l3 omap5_l3_data = { | ||
251 | .l3_flagmux = omap5_l3_flagmux, | ||
252 | .num_modules = ARRAY_SIZE(omap5_l3_flagmux), | ||
253 | .l3_masters = omap_l3_masters, | ||
254 | .num_masters = ARRAY_SIZE(omap_l3_masters), | ||
255 | /* The 6 MSBs of register field used to distinguish initiator */ | ||
256 | .mst_addr_mask = 0x7E0, | ||
257 | }; | ||
258 | |||
233 | /* DRA7 data */ | 259 | /* DRA7 data */ |
234 | static struct l3_target_data dra_l3_target_data_clk1[] = { | 260 | static struct l3_target_data dra_l3_target_data_clk1[] = { |
235 | {0x2a00, "AES1",}, | 261 | {0x2a00, "AES1",}, |
@@ -274,7 +300,7 @@ static struct l3_flagmux_data dra_l3_flagmux_clk1 = { | |||
274 | 300 | ||
275 | static struct l3_target_data dra_l3_target_data_clk2[] = { | 301 | static struct l3_target_data dra_l3_target_data_clk2[] = { |
276 | {0x0, "HOST CLK1",}, | 302 | {0x0, "HOST CLK1",}, |
277 | {0x0, "HOST CLK2",}, | 303 | {0x800000, "HOST CLK2",}, |
278 | {0xdead, L3_TARGET_NOT_SUPPORTED,}, | 304 | {0xdead, L3_TARGET_NOT_SUPPORTED,}, |
279 | {0x3400, "SHA2_2",}, | 305 | {0x3400, "SHA2_2",}, |
280 | {0x0900, "BB2D",}, | 306 | {0x0900, "BB2D",}, |
diff --git a/drivers/char/hw_random/bcm63xx-rng.c b/drivers/char/hw_random/bcm63xx-rng.c index d1494ecd9e11..4b31f1387f37 100644 --- a/drivers/char/hw_random/bcm63xx-rng.c +++ b/drivers/char/hw_random/bcm63xx-rng.c | |||
@@ -57,7 +57,7 @@ static void bcm63xx_rng_cleanup(struct hwrng *rng) | |||
57 | val &= ~RNG_EN; | 57 | val &= ~RNG_EN; |
58 | __raw_writel(val, priv->regs + RNG_CTRL); | 58 | __raw_writel(val, priv->regs + RNG_CTRL); |
59 | 59 | ||
60 | clk_didsable_unprepare(prov->clk); | 60 | clk_disable_unprepare(priv->clk); |
61 | } | 61 | } |
62 | 62 | ||
63 | static int bcm63xx_rng_data_present(struct hwrng *rng, int wait) | 63 | static int bcm63xx_rng_data_present(struct hwrng *rng, int wait) |
@@ -97,14 +97,14 @@ static int bcm63xx_rng_probe(struct platform_device *pdev) | |||
97 | priv->rng.name = pdev->name; | 97 | priv->rng.name = pdev->name; |
98 | priv->rng.init = bcm63xx_rng_init; | 98 | priv->rng.init = bcm63xx_rng_init; |
99 | priv->rng.cleanup = bcm63xx_rng_cleanup; | 99 | priv->rng.cleanup = bcm63xx_rng_cleanup; |
100 | prov->rng.data_present = bcm63xx_rng_data_present; | 100 | priv->rng.data_present = bcm63xx_rng_data_present; |
101 | priv->rng.data_read = bcm63xx_rng_data_read; | 101 | priv->rng.data_read = bcm63xx_rng_data_read; |
102 | 102 | ||
103 | priv->clk = devm_clk_get(&pdev->dev, "ipsec"); | 103 | priv->clk = devm_clk_get(&pdev->dev, "ipsec"); |
104 | if (IS_ERR(priv->clk)) { | 104 | if (IS_ERR(priv->clk)) { |
105 | error = PTR_ERR(priv->clk); | 105 | ret = PTR_ERR(priv->clk); |
106 | dev_err(&pdev->dev, "no clock for device: %d\n", error); | 106 | dev_err(&pdev->dev, "no clock for device: %d\n", ret); |
107 | return error; | 107 | return ret; |
108 | } | 108 | } |
109 | 109 | ||
110 | if (!devm_request_mem_region(&pdev->dev, r->start, | 110 | if (!devm_request_mem_region(&pdev->dev, r->start, |
@@ -120,11 +120,11 @@ static int bcm63xx_rng_probe(struct platform_device *pdev) | |||
120 | return -ENOMEM; | 120 | return -ENOMEM; |
121 | } | 121 | } |
122 | 122 | ||
123 | error = devm_hwrng_register(&pdev->dev, &priv->rng); | 123 | ret = devm_hwrng_register(&pdev->dev, &priv->rng); |
124 | if (error) { | 124 | if (ret) { |
125 | dev_err(&pdev->dev, "failed to register rng device: %d\n", | 125 | dev_err(&pdev->dev, "failed to register rng device: %d\n", |
126 | error); | 126 | ret); |
127 | return error; | 127 | return ret; |
128 | } | 128 | } |
129 | 129 | ||
130 | dev_info(&pdev->dev, "registered RNG driver\n"); | 130 | dev_info(&pdev->dev, "registered RNG driver\n"); |
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index 9bb592872532..bf75f6361773 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c | |||
@@ -2000,7 +2000,7 @@ static int smi_ipmb_proc_show(struct seq_file *m, void *v) | |||
2000 | seq_printf(m, " %x", intf->channels[i].address); | 2000 | seq_printf(m, " %x", intf->channels[i].address); |
2001 | seq_putc(m, '\n'); | 2001 | seq_putc(m, '\n'); |
2002 | 2002 | ||
2003 | return seq_has_overflowed(m); | 2003 | return 0; |
2004 | } | 2004 | } |
2005 | 2005 | ||
2006 | static int smi_ipmb_proc_open(struct inode *inode, struct file *file) | 2006 | static int smi_ipmb_proc_open(struct inode *inode, struct file *file) |
@@ -2023,7 +2023,7 @@ static int smi_version_proc_show(struct seq_file *m, void *v) | |||
2023 | ipmi_version_major(&intf->bmc->id), | 2023 | ipmi_version_major(&intf->bmc->id), |
2024 | ipmi_version_minor(&intf->bmc->id)); | 2024 | ipmi_version_minor(&intf->bmc->id)); |
2025 | 2025 | ||
2026 | return seq_has_overflowed(m); | 2026 | return 0; |
2027 | } | 2027 | } |
2028 | 2028 | ||
2029 | static int smi_version_proc_open(struct inode *inode, struct file *file) | 2029 | static int smi_version_proc_open(struct inode *inode, struct file *file) |
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 5e90a18afbaf..8a45e92ff60c 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c | |||
@@ -942,8 +942,7 @@ static void sender(void *send_info, | |||
942 | * If we are running to completion, start it and run | 942 | * If we are running to completion, start it and run |
943 | * transactions until everything is clear. | 943 | * transactions until everything is clear. |
944 | */ | 944 | */ |
945 | smi_info->curr_msg = msg; | 945 | smi_info->waiting_msg = msg; |
946 | smi_info->waiting_msg = NULL; | ||
947 | 946 | ||
948 | /* | 947 | /* |
949 | * Run to completion means we are single-threaded, no | 948 | * Run to completion means we are single-threaded, no |
@@ -2244,7 +2243,7 @@ static int ipmi_pnp_probe(struct pnp_dev *dev, | |||
2244 | acpi_handle handle; | 2243 | acpi_handle handle; |
2245 | acpi_status status; | 2244 | acpi_status status; |
2246 | unsigned long long tmp; | 2245 | unsigned long long tmp; |
2247 | int rv; | 2246 | int rv = -EINVAL; |
2248 | 2247 | ||
2249 | acpi_dev = pnp_acpi_device(dev); | 2248 | acpi_dev = pnp_acpi_device(dev); |
2250 | if (!acpi_dev) | 2249 | if (!acpi_dev) |
@@ -2262,8 +2261,10 @@ static int ipmi_pnp_probe(struct pnp_dev *dev, | |||
2262 | 2261 | ||
2263 | /* _IFT tells us the interface type: KCS, BT, etc */ | 2262 | /* _IFT tells us the interface type: KCS, BT, etc */ |
2264 | status = acpi_evaluate_integer(handle, "_IFT", NULL, &tmp); | 2263 | status = acpi_evaluate_integer(handle, "_IFT", NULL, &tmp); |
2265 | if (ACPI_FAILURE(status)) | 2264 | if (ACPI_FAILURE(status)) { |
2265 | dev_err(&dev->dev, "Could not find ACPI IPMI interface type\n"); | ||
2266 | goto err_free; | 2266 | goto err_free; |
2267 | } | ||
2267 | 2268 | ||
2268 | switch (tmp) { | 2269 | switch (tmp) { |
2269 | case 1: | 2270 | case 1: |
@@ -2276,6 +2277,7 @@ static int ipmi_pnp_probe(struct pnp_dev *dev, | |||
2276 | info->si_type = SI_BT; | 2277 | info->si_type = SI_BT; |
2277 | break; | 2278 | break; |
2278 | case 4: /* SSIF, just ignore */ | 2279 | case 4: /* SSIF, just ignore */ |
2280 | rv = -ENODEV; | ||
2279 | goto err_free; | 2281 | goto err_free; |
2280 | default: | 2282 | default: |
2281 | dev_info(&dev->dev, "unknown IPMI type %lld\n", tmp); | 2283 | dev_info(&dev->dev, "unknown IPMI type %lld\n", tmp); |
@@ -2336,7 +2338,7 @@ static int ipmi_pnp_probe(struct pnp_dev *dev, | |||
2336 | 2338 | ||
2337 | err_free: | 2339 | err_free: |
2338 | kfree(info); | 2340 | kfree(info); |
2339 | return -EINVAL; | 2341 | return rv; |
2340 | } | 2342 | } |
2341 | 2343 | ||
2342 | static void ipmi_pnp_remove(struct pnp_dev *dev) | 2344 | static void ipmi_pnp_remove(struct pnp_dev *dev) |
@@ -3080,7 +3082,7 @@ static int smi_type_proc_show(struct seq_file *m, void *v) | |||
3080 | 3082 | ||
3081 | seq_printf(m, "%s\n", si_to_str[smi->si_type]); | 3083 | seq_printf(m, "%s\n", si_to_str[smi->si_type]); |
3082 | 3084 | ||
3083 | return seq_has_overflowed(m); | 3085 | return 0; |
3084 | } | 3086 | } |
3085 | 3087 | ||
3086 | static int smi_type_proc_open(struct inode *inode, struct file *file) | 3088 | static int smi_type_proc_open(struct inode *inode, struct file *file) |
@@ -3153,7 +3155,7 @@ static int smi_params_proc_show(struct seq_file *m, void *v) | |||
3153 | smi->irq, | 3155 | smi->irq, |
3154 | smi->slave_addr); | 3156 | smi->slave_addr); |
3155 | 3157 | ||
3156 | return seq_has_overflowed(m); | 3158 | return 0; |
3157 | } | 3159 | } |
3158 | 3160 | ||
3159 | static int smi_params_proc_open(struct inode *inode, struct file *file) | 3161 | static int smi_params_proc_open(struct inode *inode, struct file *file) |
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c index f40e3bd2c69c..207689c444a8 100644 --- a/drivers/char/ipmi/ipmi_ssif.c +++ b/drivers/char/ipmi/ipmi_ssif.c | |||
@@ -31,7 +31,6 @@ | |||
31 | * interface into the I2C driver, I believe. | 31 | * interface into the I2C driver, I believe. |
32 | */ | 32 | */ |
33 | 33 | ||
34 | #include <linux/version.h> | ||
35 | #if defined(MODVERSIONS) | 34 | #if defined(MODVERSIONS) |
36 | #include <linux/modversions.h> | 35 | #include <linux/modversions.h> |
37 | #endif | 36 | #endif |
@@ -166,6 +165,9 @@ enum ssif_stat_indexes { | |||
166 | /* Number of watchdog pretimeouts. */ | 165 | /* Number of watchdog pretimeouts. */ |
167 | SSIF_STAT_watchdog_pretimeouts, | 166 | SSIF_STAT_watchdog_pretimeouts, |
168 | 167 | ||
168 | /* Number of alers received. */ | ||
169 | SSIF_STAT_alerts, | ||
170 | |||
169 | /* Always add statistics before this value, it must be last. */ | 171 | /* Always add statistics before this value, it must be last. */ |
170 | SSIF_NUM_STATS | 172 | SSIF_NUM_STATS |
171 | }; | 173 | }; |
@@ -214,7 +216,16 @@ struct ssif_info { | |||
214 | #define WDT_PRE_TIMEOUT_INT 0x08 | 216 | #define WDT_PRE_TIMEOUT_INT 0x08 |
215 | unsigned char msg_flags; | 217 | unsigned char msg_flags; |
216 | 218 | ||
219 | u8 global_enables; | ||
217 | bool has_event_buffer; | 220 | bool has_event_buffer; |
221 | bool supports_alert; | ||
222 | |||
223 | /* | ||
224 | * Used to tell what we should do with alerts. If we are | ||
225 | * waiting on a response, read the data immediately. | ||
226 | */ | ||
227 | bool got_alert; | ||
228 | bool waiting_alert; | ||
218 | 229 | ||
219 | /* | 230 | /* |
220 | * If set to true, this will request events the next time the | 231 | * If set to true, this will request events the next time the |
@@ -478,13 +489,13 @@ static int ipmi_ssif_thread(void *data) | |||
478 | 489 | ||
479 | if (ssif_info->i2c_read_write == I2C_SMBUS_WRITE) { | 490 | if (ssif_info->i2c_read_write == I2C_SMBUS_WRITE) { |
480 | result = i2c_smbus_write_block_data( | 491 | result = i2c_smbus_write_block_data( |
481 | ssif_info->client, SSIF_IPMI_REQUEST, | 492 | ssif_info->client, ssif_info->i2c_command, |
482 | ssif_info->i2c_data[0], | 493 | ssif_info->i2c_data[0], |
483 | ssif_info->i2c_data + 1); | 494 | ssif_info->i2c_data + 1); |
484 | ssif_info->done_handler(ssif_info, result, NULL, 0); | 495 | ssif_info->done_handler(ssif_info, result, NULL, 0); |
485 | } else { | 496 | } else { |
486 | result = i2c_smbus_read_block_data( | 497 | result = i2c_smbus_read_block_data( |
487 | ssif_info->client, SSIF_IPMI_RESPONSE, | 498 | ssif_info->client, ssif_info->i2c_command, |
488 | ssif_info->i2c_data); | 499 | ssif_info->i2c_data); |
489 | if (result < 0) | 500 | if (result < 0) |
490 | ssif_info->done_handler(ssif_info, result, | 501 | ssif_info->done_handler(ssif_info, result, |
@@ -518,15 +529,12 @@ static int ssif_i2c_send(struct ssif_info *ssif_info, | |||
518 | static void msg_done_handler(struct ssif_info *ssif_info, int result, | 529 | static void msg_done_handler(struct ssif_info *ssif_info, int result, |
519 | unsigned char *data, unsigned int len); | 530 | unsigned char *data, unsigned int len); |
520 | 531 | ||
521 | static void retry_timeout(unsigned long data) | 532 | static void start_get(struct ssif_info *ssif_info) |
522 | { | 533 | { |
523 | struct ssif_info *ssif_info = (void *) data; | ||
524 | int rv; | 534 | int rv; |
525 | 535 | ||
526 | if (ssif_info->stopping) | ||
527 | return; | ||
528 | |||
529 | ssif_info->rtc_us_timer = 0; | 536 | ssif_info->rtc_us_timer = 0; |
537 | ssif_info->multi_pos = 0; | ||
530 | 538 | ||
531 | rv = ssif_i2c_send(ssif_info, msg_done_handler, I2C_SMBUS_READ, | 539 | rv = ssif_i2c_send(ssif_info, msg_done_handler, I2C_SMBUS_READ, |
532 | SSIF_IPMI_RESPONSE, | 540 | SSIF_IPMI_RESPONSE, |
@@ -540,6 +548,46 @@ static void retry_timeout(unsigned long data) | |||
540 | } | 548 | } |
541 | } | 549 | } |
542 | 550 | ||
551 | static void retry_timeout(unsigned long data) | ||
552 | { | ||
553 | struct ssif_info *ssif_info = (void *) data; | ||
554 | unsigned long oflags, *flags; | ||
555 | bool waiting; | ||
556 | |||
557 | if (ssif_info->stopping) | ||
558 | return; | ||
559 | |||
560 | flags = ipmi_ssif_lock_cond(ssif_info, &oflags); | ||
561 | waiting = ssif_info->waiting_alert; | ||
562 | ssif_info->waiting_alert = false; | ||
563 | ipmi_ssif_unlock_cond(ssif_info, flags); | ||
564 | |||
565 | if (waiting) | ||
566 | start_get(ssif_info); | ||
567 | } | ||
568 | |||
569 | |||
570 | static void ssif_alert(struct i2c_client *client, unsigned int data) | ||
571 | { | ||
572 | struct ssif_info *ssif_info = i2c_get_clientdata(client); | ||
573 | unsigned long oflags, *flags; | ||
574 | bool do_get = false; | ||
575 | |||
576 | ssif_inc_stat(ssif_info, alerts); | ||
577 | |||
578 | flags = ipmi_ssif_lock_cond(ssif_info, &oflags); | ||
579 | if (ssif_info->waiting_alert) { | ||
580 | ssif_info->waiting_alert = false; | ||
581 | del_timer(&ssif_info->retry_timer); | ||
582 | do_get = true; | ||
583 | } else if (ssif_info->curr_msg) { | ||
584 | ssif_info->got_alert = true; | ||
585 | } | ||
586 | ipmi_ssif_unlock_cond(ssif_info, flags); | ||
587 | if (do_get) | ||
588 | start_get(ssif_info); | ||
589 | } | ||
590 | |||
543 | static int start_resend(struct ssif_info *ssif_info); | 591 | static int start_resend(struct ssif_info *ssif_info); |
544 | 592 | ||
545 | static void msg_done_handler(struct ssif_info *ssif_info, int result, | 593 | static void msg_done_handler(struct ssif_info *ssif_info, int result, |
@@ -559,9 +607,12 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, | |||
559 | if (ssif_info->retries_left > 0) { | 607 | if (ssif_info->retries_left > 0) { |
560 | ssif_inc_stat(ssif_info, receive_retries); | 608 | ssif_inc_stat(ssif_info, receive_retries); |
561 | 609 | ||
610 | flags = ipmi_ssif_lock_cond(ssif_info, &oflags); | ||
611 | ssif_info->waiting_alert = true; | ||
612 | ssif_info->rtc_us_timer = SSIF_MSG_USEC; | ||
562 | mod_timer(&ssif_info->retry_timer, | 613 | mod_timer(&ssif_info->retry_timer, |
563 | jiffies + SSIF_MSG_JIFFIES); | 614 | jiffies + SSIF_MSG_JIFFIES); |
564 | ssif_info->rtc_us_timer = SSIF_MSG_USEC; | 615 | ipmi_ssif_unlock_cond(ssif_info, flags); |
565 | return; | 616 | return; |
566 | } | 617 | } |
567 | 618 | ||
@@ -581,9 +632,9 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, | |||
581 | ssif_inc_stat(ssif_info, received_message_parts); | 632 | ssif_inc_stat(ssif_info, received_message_parts); |
582 | 633 | ||
583 | /* Remove the multi-part read marker. */ | 634 | /* Remove the multi-part read marker. */ |
584 | for (i = 0; i < (len-2); i++) | ||
585 | ssif_info->data[i] = data[i+2]; | ||
586 | len -= 2; | 635 | len -= 2; |
636 | for (i = 0; i < len; i++) | ||
637 | ssif_info->data[i] = data[i+2]; | ||
587 | ssif_info->multi_len = len; | 638 | ssif_info->multi_len = len; |
588 | ssif_info->multi_pos = 1; | 639 | ssif_info->multi_pos = 1; |
589 | 640 | ||
@@ -610,9 +661,9 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, | |||
610 | goto continue_op; | 661 | goto continue_op; |
611 | } | 662 | } |
612 | 663 | ||
613 | blocknum = data[ssif_info->multi_len]; | 664 | blocknum = data[0]; |
614 | 665 | ||
615 | if (ssif_info->multi_len+len-1 > IPMI_MAX_MSG_LENGTH) { | 666 | if (ssif_info->multi_len + len - 1 > IPMI_MAX_MSG_LENGTH) { |
616 | /* Received message too big, abort the operation. */ | 667 | /* Received message too big, abort the operation. */ |
617 | result = -E2BIG; | 668 | result = -E2BIG; |
618 | if (ssif_info->ssif_debug & SSIF_DEBUG_MSG) | 669 | if (ssif_info->ssif_debug & SSIF_DEBUG_MSG) |
@@ -622,15 +673,15 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, | |||
622 | } | 673 | } |
623 | 674 | ||
624 | /* Remove the blocknum from the data. */ | 675 | /* Remove the blocknum from the data. */ |
625 | for (i = 0; i < (len-1); i++) | ||
626 | ssif_info->data[i+ssif_info->multi_len] = data[i+1]; | ||
627 | len--; | 676 | len--; |
677 | for (i = 0; i < len; i++) | ||
678 | ssif_info->data[i + ssif_info->multi_len] = data[i + 1]; | ||
628 | ssif_info->multi_len += len; | 679 | ssif_info->multi_len += len; |
629 | if (blocknum == 0xff) { | 680 | if (blocknum == 0xff) { |
630 | /* End of read */ | 681 | /* End of read */ |
631 | len = ssif_info->multi_len; | 682 | len = ssif_info->multi_len; |
632 | data = ssif_info->data; | 683 | data = ssif_info->data; |
633 | } else if ((blocknum+1) != ssif_info->multi_pos) { | 684 | } else if (blocknum + 1 != ssif_info->multi_pos) { |
634 | /* | 685 | /* |
635 | * Out of sequence block, just abort. Block | 686 | * Out of sequence block, just abort. Block |
636 | * numbers start at zero for the second block, | 687 | * numbers start at zero for the second block, |
@@ -650,7 +701,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, | |||
650 | if (rv < 0) { | 701 | if (rv < 0) { |
651 | if (ssif_info->ssif_debug & SSIF_DEBUG_MSG) | 702 | if (ssif_info->ssif_debug & SSIF_DEBUG_MSG) |
652 | pr_info(PFX | 703 | pr_info(PFX |
653 | "Error from i2c_non_blocking_op(2)\n"); | 704 | "Error from ssif_i2c_send\n"); |
654 | 705 | ||
655 | result = -EIO; | 706 | result = -EIO; |
656 | } else | 707 | } else |
@@ -830,7 +881,11 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result, | |||
830 | } | 881 | } |
831 | 882 | ||
832 | if (ssif_info->multi_data) { | 883 | if (ssif_info->multi_data) { |
833 | /* In the middle of a multi-data write. */ | 884 | /* |
885 | * In the middle of a multi-data write. See the comment | ||
886 | * in the SSIF_MULTI_n_PART case in the probe function | ||
887 | * for details on the intricacies of this. | ||
888 | */ | ||
834 | int left; | 889 | int left; |
835 | 890 | ||
836 | ssif_inc_stat(ssif_info, sent_messages_parts); | 891 | ssif_inc_stat(ssif_info, sent_messages_parts); |
@@ -864,15 +919,32 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result, | |||
864 | msg_done_handler(ssif_info, -EIO, NULL, 0); | 919 | msg_done_handler(ssif_info, -EIO, NULL, 0); |
865 | } | 920 | } |
866 | } else { | 921 | } else { |
922 | unsigned long oflags, *flags; | ||
923 | bool got_alert; | ||
924 | |||
867 | ssif_inc_stat(ssif_info, sent_messages); | 925 | ssif_inc_stat(ssif_info, sent_messages); |
868 | ssif_inc_stat(ssif_info, sent_messages_parts); | 926 | ssif_inc_stat(ssif_info, sent_messages_parts); |
869 | 927 | ||
870 | /* Wait a jiffie then request the next message */ | 928 | flags = ipmi_ssif_lock_cond(ssif_info, &oflags); |
871 | ssif_info->retries_left = SSIF_RECV_RETRIES; | 929 | got_alert = ssif_info->got_alert; |
872 | ssif_info->rtc_us_timer = SSIF_MSG_PART_USEC; | 930 | if (got_alert) { |
873 | mod_timer(&ssif_info->retry_timer, | 931 | ssif_info->got_alert = false; |
874 | jiffies + SSIF_MSG_PART_JIFFIES); | 932 | ssif_info->waiting_alert = false; |
875 | return; | 933 | } |
934 | |||
935 | if (got_alert) { | ||
936 | ipmi_ssif_unlock_cond(ssif_info, flags); | ||
937 | /* The alert already happened, try now. */ | ||
938 | retry_timeout((unsigned long) ssif_info); | ||
939 | } else { | ||
940 | /* Wait a jiffie then request the next message */ | ||
941 | ssif_info->waiting_alert = true; | ||
942 | ssif_info->retries_left = SSIF_RECV_RETRIES; | ||
943 | ssif_info->rtc_us_timer = SSIF_MSG_PART_USEC; | ||
944 | mod_timer(&ssif_info->retry_timer, | ||
945 | jiffies + SSIF_MSG_PART_JIFFIES); | ||
946 | ipmi_ssif_unlock_cond(ssif_info, flags); | ||
947 | } | ||
876 | } | 948 | } |
877 | } | 949 | } |
878 | 950 | ||
@@ -881,6 +953,8 @@ static int start_resend(struct ssif_info *ssif_info) | |||
881 | int rv; | 953 | int rv; |
882 | int command; | 954 | int command; |
883 | 955 | ||
956 | ssif_info->got_alert = false; | ||
957 | |||
884 | if (ssif_info->data_len > 32) { | 958 | if (ssif_info->data_len > 32) { |
885 | command = SSIF_IPMI_MULTI_PART_REQUEST_START; | 959 | command = SSIF_IPMI_MULTI_PART_REQUEST_START; |
886 | ssif_info->multi_data = ssif_info->data; | 960 | ssif_info->multi_data = ssif_info->data; |
@@ -915,7 +989,7 @@ static int start_send(struct ssif_info *ssif_info, | |||
915 | return -E2BIG; | 989 | return -E2BIG; |
916 | 990 | ||
917 | ssif_info->retries_left = SSIF_SEND_RETRIES; | 991 | ssif_info->retries_left = SSIF_SEND_RETRIES; |
918 | memcpy(ssif_info->data+1, data, len); | 992 | memcpy(ssif_info->data + 1, data, len); |
919 | ssif_info->data_len = len; | 993 | ssif_info->data_len = len; |
920 | return start_resend(ssif_info); | 994 | return start_resend(ssif_info); |
921 | } | 995 | } |
@@ -1200,7 +1274,7 @@ static int smi_type_proc_show(struct seq_file *m, void *v) | |||
1200 | { | 1274 | { |
1201 | seq_puts(m, "ssif\n"); | 1275 | seq_puts(m, "ssif\n"); |
1202 | 1276 | ||
1203 | return seq_has_overflowed(m); | 1277 | return 0; |
1204 | } | 1278 | } |
1205 | 1279 | ||
1206 | static int smi_type_proc_open(struct inode *inode, struct file *file) | 1280 | static int smi_type_proc_open(struct inode *inode, struct file *file) |
@@ -1243,6 +1317,8 @@ static int smi_stats_proc_show(struct seq_file *m, void *v) | |||
1243 | ssif_get_stat(ssif_info, events)); | 1317 | ssif_get_stat(ssif_info, events)); |
1244 | seq_printf(m, "watchdog_pretimeouts: %u\n", | 1318 | seq_printf(m, "watchdog_pretimeouts: %u\n", |
1245 | ssif_get_stat(ssif_info, watchdog_pretimeouts)); | 1319 | ssif_get_stat(ssif_info, watchdog_pretimeouts)); |
1320 | seq_printf(m, "alerts: %u\n", | ||
1321 | ssif_get_stat(ssif_info, alerts)); | ||
1246 | return 0; | 1322 | return 0; |
1247 | } | 1323 | } |
1248 | 1324 | ||
@@ -1258,6 +1334,23 @@ static const struct file_operations smi_stats_proc_ops = { | |||
1258 | .release = single_release, | 1334 | .release = single_release, |
1259 | }; | 1335 | }; |
1260 | 1336 | ||
1337 | static int strcmp_nospace(char *s1, char *s2) | ||
1338 | { | ||
1339 | while (*s1 && *s2) { | ||
1340 | while (isspace(*s1)) | ||
1341 | s1++; | ||
1342 | while (isspace(*s2)) | ||
1343 | s2++; | ||
1344 | if (*s1 > *s2) | ||
1345 | return 1; | ||
1346 | if (*s1 < *s2) | ||
1347 | return -1; | ||
1348 | s1++; | ||
1349 | s2++; | ||
1350 | } | ||
1351 | return 0; | ||
1352 | } | ||
1353 | |||
1261 | static struct ssif_addr_info *ssif_info_find(unsigned short addr, | 1354 | static struct ssif_addr_info *ssif_info_find(unsigned short addr, |
1262 | char *adapter_name, | 1355 | char *adapter_name, |
1263 | bool match_null_name) | 1356 | bool match_null_name) |
@@ -1272,8 +1365,10 @@ restart: | |||
1272 | /* One is NULL and one is not */ | 1365 | /* One is NULL and one is not */ |
1273 | continue; | 1366 | continue; |
1274 | } | 1367 | } |
1275 | if (strcmp(info->adapter_name, adapter_name)) | 1368 | if (adapter_name && |
1276 | /* Names to not match */ | 1369 | strcmp_nospace(info->adapter_name, |
1370 | adapter_name)) | ||
1371 | /* Names do not match */ | ||
1277 | continue; | 1372 | continue; |
1278 | } | 1373 | } |
1279 | found = info; | 1374 | found = info; |
@@ -1306,6 +1401,12 @@ static bool check_acpi(struct ssif_info *ssif_info, struct device *dev) | |||
1306 | return false; | 1401 | return false; |
1307 | } | 1402 | } |
1308 | 1403 | ||
1404 | /* | ||
1405 | * Global enables we care about. | ||
1406 | */ | ||
1407 | #define GLOBAL_ENABLES_MASK (IPMI_BMC_EVT_MSG_BUFF | IPMI_BMC_RCV_MSG_INTR | \ | ||
1408 | IPMI_BMC_EVT_MSG_INTR) | ||
1409 | |||
1309 | static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id) | 1410 | static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id) |
1310 | { | 1411 | { |
1311 | unsigned char msg[3]; | 1412 | unsigned char msg[3]; |
@@ -1391,13 +1492,33 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id) | |||
1391 | break; | 1492 | break; |
1392 | 1493 | ||
1393 | case SSIF_MULTI_2_PART: | 1494 | case SSIF_MULTI_2_PART: |
1394 | if (ssif_info->max_xmit_msg_size > 64) | 1495 | if (ssif_info->max_xmit_msg_size > 63) |
1395 | ssif_info->max_xmit_msg_size = 64; | 1496 | ssif_info->max_xmit_msg_size = 63; |
1396 | if (ssif_info->max_recv_msg_size > 62) | 1497 | if (ssif_info->max_recv_msg_size > 62) |
1397 | ssif_info->max_recv_msg_size = 62; | 1498 | ssif_info->max_recv_msg_size = 62; |
1398 | break; | 1499 | break; |
1399 | 1500 | ||
1400 | case SSIF_MULTI_n_PART: | 1501 | case SSIF_MULTI_n_PART: |
1502 | /* | ||
1503 | * The specification is rather confusing at | ||
1504 | * this point, but I think I understand what | ||
1505 | * is meant. At least I have a workable | ||
1506 | * solution. With multi-part messages, you | ||
1507 | * cannot send a message that is a multiple of | ||
1508 | * 32-bytes in length, because the start and | ||
1509 | * middle messages are 32-bytes and the end | ||
1510 | * message must be at least one byte. You | ||
1511 | * can't fudge on an extra byte, that would | ||
1512 | * screw up things like fru data writes. So | ||
1513 | * we limit the length to 63 bytes. That way | ||
1514 | * a 32-byte message gets sent as a single | ||
1515 | * part. A larger message will be a 32-byte | ||
1516 | * start and the next message is always going | ||
1517 | * to be 1-31 bytes in length. Not ideal, but | ||
1518 | * it should work. | ||
1519 | */ | ||
1520 | if (ssif_info->max_xmit_msg_size > 63) | ||
1521 | ssif_info->max_xmit_msg_size = 63; | ||
1401 | break; | 1522 | break; |
1402 | 1523 | ||
1403 | default: | 1524 | default: |
@@ -1407,7 +1528,7 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id) | |||
1407 | } else { | 1528 | } else { |
1408 | no_support: | 1529 | no_support: |
1409 | /* Assume no multi-part or PEC support */ | 1530 | /* Assume no multi-part or PEC support */ |
1410 | pr_info(PFX "Error fetching SSIF: %d %d %2.2x, your system probably doesn't support this command so using defaults\n", | 1531 | pr_info(PFX "Error fetching SSIF: %d %d %2.2x, your system probably doesn't support this command so using defaults\n", |
1411 | rv, len, resp[2]); | 1532 | rv, len, resp[2]); |
1412 | 1533 | ||
1413 | ssif_info->max_xmit_msg_size = 32; | 1534 | ssif_info->max_xmit_msg_size = 32; |
@@ -1436,6 +1557,8 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id) | |||
1436 | goto found; | 1557 | goto found; |
1437 | } | 1558 | } |
1438 | 1559 | ||
1560 | ssif_info->global_enables = resp[3]; | ||
1561 | |||
1439 | if (resp[3] & IPMI_BMC_EVT_MSG_BUFF) { | 1562 | if (resp[3] & IPMI_BMC_EVT_MSG_BUFF) { |
1440 | ssif_info->has_event_buffer = true; | 1563 | ssif_info->has_event_buffer = true; |
1441 | /* buffer is already enabled, nothing to do. */ | 1564 | /* buffer is already enabled, nothing to do. */ |
@@ -1444,18 +1567,37 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id) | |||
1444 | 1567 | ||
1445 | msg[0] = IPMI_NETFN_APP_REQUEST << 2; | 1568 | msg[0] = IPMI_NETFN_APP_REQUEST << 2; |
1446 | msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD; | 1569 | msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD; |
1447 | msg[2] = resp[3] | IPMI_BMC_EVT_MSG_BUFF; | 1570 | msg[2] = ssif_info->global_enables | IPMI_BMC_EVT_MSG_BUFF; |
1448 | rv = do_cmd(client, 3, msg, &len, resp); | 1571 | rv = do_cmd(client, 3, msg, &len, resp); |
1449 | if (rv || (len < 2)) { | 1572 | if (rv || (len < 2)) { |
1450 | pr_warn(PFX "Error getting global enables: %d %d %2.2x\n", | 1573 | pr_warn(PFX "Error setting global enables: %d %d %2.2x\n", |
1451 | rv, len, resp[2]); | 1574 | rv, len, resp[2]); |
1452 | rv = 0; /* Not fatal */ | 1575 | rv = 0; /* Not fatal */ |
1453 | goto found; | 1576 | goto found; |
1454 | } | 1577 | } |
1455 | 1578 | ||
1456 | if (resp[2] == 0) | 1579 | if (resp[2] == 0) { |
1457 | /* A successful return means the event buffer is supported. */ | 1580 | /* A successful return means the event buffer is supported. */ |
1458 | ssif_info->has_event_buffer = true; | 1581 | ssif_info->has_event_buffer = true; |
1582 | ssif_info->global_enables |= IPMI_BMC_EVT_MSG_BUFF; | ||
1583 | } | ||
1584 | |||
1585 | msg[0] = IPMI_NETFN_APP_REQUEST << 2; | ||
1586 | msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD; | ||
1587 | msg[2] = ssif_info->global_enables | IPMI_BMC_RCV_MSG_INTR; | ||
1588 | rv = do_cmd(client, 3, msg, &len, resp); | ||
1589 | if (rv || (len < 2)) { | ||
1590 | pr_warn(PFX "Error setting global enables: %d %d %2.2x\n", | ||
1591 | rv, len, resp[2]); | ||
1592 | rv = 0; /* Not fatal */ | ||
1593 | goto found; | ||
1594 | } | ||
1595 | |||
1596 | if (resp[2] == 0) { | ||
1597 | /* A successful return means the alert is supported. */ | ||
1598 | ssif_info->supports_alert = true; | ||
1599 | ssif_info->global_enables |= IPMI_BMC_RCV_MSG_INTR; | ||
1600 | } | ||
1459 | 1601 | ||
1460 | found: | 1602 | found: |
1461 | ssif_info->intf_num = atomic_inc_return(&next_intf); | 1603 | ssif_info->intf_num = atomic_inc_return(&next_intf); |
@@ -1813,6 +1955,7 @@ static struct i2c_driver ssif_i2c_driver = { | |||
1813 | }, | 1955 | }, |
1814 | .probe = ssif_probe, | 1956 | .probe = ssif_probe, |
1815 | .remove = ssif_remove, | 1957 | .remove = ssif_remove, |
1958 | .alert = ssif_alert, | ||
1816 | .id_table = ssif_id, | 1959 | .id_table = ssif_id, |
1817 | .detect = ssif_detect | 1960 | .detect = ssif_detect |
1818 | }; | 1961 | }; |
@@ -1832,7 +1975,7 @@ static int init_ipmi_ssif(void) | |||
1832 | rv = new_ssif_client(addr[i], adapter_name[i], | 1975 | rv = new_ssif_client(addr[i], adapter_name[i], |
1833 | dbg[i], slave_addrs[i], | 1976 | dbg[i], slave_addrs[i], |
1834 | SI_HARDCODED); | 1977 | SI_HARDCODED); |
1835 | if (!rv) | 1978 | if (rv) |
1836 | pr_err(PFX | 1979 | pr_err(PFX |
1837 | "Couldn't add hardcoded device at addr 0x%x\n", | 1980 | "Couldn't add hardcoded device at addr 0x%x\n", |
1838 | addr[i]); | 1981 | addr[i]); |
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 7a73a279e179..61c417b9e53f 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c | |||
@@ -158,9 +158,18 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, | |||
158 | int entered_state; | 158 | int entered_state; |
159 | 159 | ||
160 | struct cpuidle_state *target_state = &drv->states[index]; | 160 | struct cpuidle_state *target_state = &drv->states[index]; |
161 | bool broadcast = !!(target_state->flags & CPUIDLE_FLAG_TIMER_STOP); | ||
161 | ktime_t time_start, time_end; | 162 | ktime_t time_start, time_end; |
162 | s64 diff; | 163 | s64 diff; |
163 | 164 | ||
165 | /* | ||
166 | * Tell the time framework to switch to a broadcast timer because our | ||
167 | * local timer will be shut down. If a local timer is used from another | ||
168 | * CPU as a broadcast timer, this call may fail if it is not available. | ||
169 | */ | ||
170 | if (broadcast && tick_broadcast_enter()) | ||
171 | return -EBUSY; | ||
172 | |||
164 | trace_cpu_idle_rcuidle(index, dev->cpu); | 173 | trace_cpu_idle_rcuidle(index, dev->cpu); |
165 | time_start = ktime_get(); | 174 | time_start = ktime_get(); |
166 | 175 | ||
@@ -169,6 +178,13 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, | |||
169 | time_end = ktime_get(); | 178 | time_end = ktime_get(); |
170 | trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu); | 179 | trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu); |
171 | 180 | ||
181 | if (broadcast) { | ||
182 | if (WARN_ON_ONCE(!irqs_disabled())) | ||
183 | local_irq_disable(); | ||
184 | |||
185 | tick_broadcast_exit(); | ||
186 | } | ||
187 | |||
172 | if (!cpuidle_state_is_coupled(dev, drv, entered_state)) | 188 | if (!cpuidle_state_is_coupled(dev, drv, entered_state)) |
173 | local_irq_enable(); | 189 | local_irq_enable(); |
174 | 190 | ||
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index fd7ac13f2574..bda2cb06dc7a 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
@@ -437,6 +437,7 @@ config IMG_MDC_DMA | |||
437 | 437 | ||
438 | config XGENE_DMA | 438 | config XGENE_DMA |
439 | tristate "APM X-Gene DMA support" | 439 | tristate "APM X-Gene DMA support" |
440 | depends on ARCH_XGENE || COMPILE_TEST | ||
440 | select DMA_ENGINE | 441 | select DMA_ENGINE |
441 | select DMA_ENGINE_RAID | 442 | select DMA_ENGINE_RAID |
442 | select ASYNC_TX_ENABLE_CHANNEL_SWITCH | 443 | select ASYNC_TX_ENABLE_CHANNEL_SWITCH |
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 0e035a8cf401..2890d744bb1b 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c | |||
@@ -571,11 +571,15 @@ struct dma_chan *dma_get_any_slave_channel(struct dma_device *device) | |||
571 | 571 | ||
572 | chan = private_candidate(&mask, device, NULL, NULL); | 572 | chan = private_candidate(&mask, device, NULL, NULL); |
573 | if (chan) { | 573 | if (chan) { |
574 | dma_cap_set(DMA_PRIVATE, device->cap_mask); | ||
575 | device->privatecnt++; | ||
574 | err = dma_chan_get(chan); | 576 | err = dma_chan_get(chan); |
575 | if (err) { | 577 | if (err) { |
576 | pr_debug("%s: failed to get %s: (%d)\n", | 578 | pr_debug("%s: failed to get %s: (%d)\n", |
577 | __func__, dma_chan_name(chan), err); | 579 | __func__, dma_chan_name(chan), err); |
578 | chan = NULL; | 580 | chan = NULL; |
581 | if (--device->privatecnt == 0) | ||
582 | dma_cap_clear(DMA_PRIVATE, device->cap_mask); | ||
579 | } | 583 | } |
580 | } | 584 | } |
581 | 585 | ||
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c index f705798ce3eb..ebd8a5f398b0 100644 --- a/drivers/dma/sh/usb-dmac.c +++ b/drivers/dma/sh/usb-dmac.c | |||
@@ -673,6 +673,7 @@ static struct dma_chan *usb_dmac_of_xlate(struct of_phandle_args *dma_spec, | |||
673 | * Power management | 673 | * Power management |
674 | */ | 674 | */ |
675 | 675 | ||
676 | #ifdef CONFIG_PM | ||
676 | static int usb_dmac_runtime_suspend(struct device *dev) | 677 | static int usb_dmac_runtime_suspend(struct device *dev) |
677 | { | 678 | { |
678 | struct usb_dmac *dmac = dev_get_drvdata(dev); | 679 | struct usb_dmac *dmac = dev_get_drvdata(dev); |
@@ -690,6 +691,7 @@ static int usb_dmac_runtime_resume(struct device *dev) | |||
690 | 691 | ||
691 | return usb_dmac_init(dmac); | 692 | return usb_dmac_init(dmac); |
692 | } | 693 | } |
694 | #endif /* CONFIG_PM */ | ||
693 | 695 | ||
694 | static const struct dev_pm_ops usb_dmac_pm = { | 696 | static const struct dev_pm_ops usb_dmac_pm = { |
695 | SET_RUNTIME_PM_OPS(usb_dmac_runtime_suspend, usb_dmac_runtime_resume, | 697 | SET_RUNTIME_PM_OPS(usb_dmac_runtime_suspend, usb_dmac_runtime_resume, |
diff --git a/drivers/firmware/efi/runtime-map.c b/drivers/firmware/efi/runtime-map.c index 87b8e3b900d2..5c55227a34c8 100644 --- a/drivers/firmware/efi/runtime-map.c +++ b/drivers/firmware/efi/runtime-map.c | |||
@@ -120,7 +120,8 @@ add_sysfs_runtime_map_entry(struct kobject *kobj, int nr) | |||
120 | entry = kzalloc(sizeof(*entry), GFP_KERNEL); | 120 | entry = kzalloc(sizeof(*entry), GFP_KERNEL); |
121 | if (!entry) { | 121 | if (!entry) { |
122 | kset_unregister(map_kset); | 122 | kset_unregister(map_kset); |
123 | return entry; | 123 | map_kset = NULL; |
124 | return ERR_PTR(-ENOMEM); | ||
124 | } | 125 | } |
125 | 126 | ||
126 | memcpy(&entry->md, efi_runtime_map + nr * efi_memdesc_size, | 127 | memcpy(&entry->md, efi_runtime_map + nr * efi_memdesc_size, |
@@ -132,6 +133,7 @@ add_sysfs_runtime_map_entry(struct kobject *kobj, int nr) | |||
132 | if (ret) { | 133 | if (ret) { |
133 | kobject_put(&entry->kobj); | 134 | kobject_put(&entry->kobj); |
134 | kset_unregister(map_kset); | 135 | kset_unregister(map_kset); |
136 | map_kset = NULL; | ||
135 | return ERR_PTR(ret); | 137 | return ERR_PTR(ret); |
136 | } | 138 | } |
137 | 139 | ||
@@ -195,8 +197,6 @@ out_add_entry: | |||
195 | entry = *(map_entries + j); | 197 | entry = *(map_entries + j); |
196 | kobject_put(&entry->kobj); | 198 | kobject_put(&entry->kobj); |
197 | } | 199 | } |
198 | if (map_kset) | ||
199 | kset_unregister(map_kset); | ||
200 | out: | 200 | out: |
201 | return ret; | 201 | return ret; |
202 | } | 202 | } |
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c index cd1d5bf48f36..b232397ad7ec 100644 --- a/drivers/gpio/gpio-omap.c +++ b/drivers/gpio/gpio-omap.c | |||
@@ -1054,38 +1054,8 @@ static void omap_gpio_mod_init(struct gpio_bank *bank) | |||
1054 | dev_err(bank->dev, "Could not get gpio dbck\n"); | 1054 | dev_err(bank->dev, "Could not get gpio dbck\n"); |
1055 | } | 1055 | } |
1056 | 1056 | ||
1057 | static void | ||
1058 | omap_mpuio_alloc_gc(struct gpio_bank *bank, unsigned int irq_start, | ||
1059 | unsigned int num) | ||
1060 | { | ||
1061 | struct irq_chip_generic *gc; | ||
1062 | struct irq_chip_type *ct; | ||
1063 | |||
1064 | gc = irq_alloc_generic_chip("MPUIO", 1, irq_start, bank->base, | ||
1065 | handle_simple_irq); | ||
1066 | if (!gc) { | ||
1067 | dev_err(bank->dev, "Memory alloc failed for gc\n"); | ||
1068 | return; | ||
1069 | } | ||
1070 | |||
1071 | ct = gc->chip_types; | ||
1072 | |||
1073 | /* NOTE: No ack required, reading IRQ status clears it. */ | ||
1074 | ct->chip.irq_mask = irq_gc_mask_set_bit; | ||
1075 | ct->chip.irq_unmask = irq_gc_mask_clr_bit; | ||
1076 | ct->chip.irq_set_type = omap_gpio_irq_type; | ||
1077 | |||
1078 | if (bank->regs->wkup_en) | ||
1079 | ct->chip.irq_set_wake = omap_gpio_wake_enable; | ||
1080 | |||
1081 | ct->regs.mask = OMAP_MPUIO_GPIO_INT / bank->stride; | ||
1082 | irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE, | ||
1083 | IRQ_NOREQUEST | IRQ_NOPROBE, 0); | ||
1084 | } | ||
1085 | |||
1086 | static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc) | 1057 | static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc) |
1087 | { | 1058 | { |
1088 | int j; | ||
1089 | static int gpio; | 1059 | static int gpio; |
1090 | int irq_base = 0; | 1060 | int irq_base = 0; |
1091 | int ret; | 1061 | int ret; |
@@ -1132,6 +1102,15 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc) | |||
1132 | } | 1102 | } |
1133 | #endif | 1103 | #endif |
1134 | 1104 | ||
1105 | /* MPUIO is a bit different, reading IRQ status clears it */ | ||
1106 | if (bank->is_mpuio) { | ||
1107 | irqc->irq_ack = dummy_irq_chip.irq_ack; | ||
1108 | irqc->irq_mask = irq_gc_mask_set_bit; | ||
1109 | irqc->irq_unmask = irq_gc_mask_clr_bit; | ||
1110 | if (!bank->regs->wkup_en) | ||
1111 | irqc->irq_set_wake = NULL; | ||
1112 | } | ||
1113 | |||
1135 | ret = gpiochip_irqchip_add(&bank->chip, irqc, | 1114 | ret = gpiochip_irqchip_add(&bank->chip, irqc, |
1136 | irq_base, omap_gpio_irq_handler, | 1115 | irq_base, omap_gpio_irq_handler, |
1137 | IRQ_TYPE_NONE); | 1116 | IRQ_TYPE_NONE); |
@@ -1145,15 +1124,6 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc) | |||
1145 | gpiochip_set_chained_irqchip(&bank->chip, irqc, | 1124 | gpiochip_set_chained_irqchip(&bank->chip, irqc, |
1146 | bank->irq, omap_gpio_irq_handler); | 1125 | bank->irq, omap_gpio_irq_handler); |
1147 | 1126 | ||
1148 | for (j = 0; j < bank->width; j++) { | ||
1149 | int irq = irq_find_mapping(bank->chip.irqdomain, j); | ||
1150 | if (bank->is_mpuio) { | ||
1151 | omap_mpuio_alloc_gc(bank, irq, bank->width); | ||
1152 | irq_set_chip_and_handler(irq, NULL, NULL); | ||
1153 | set_irq_flags(irq, 0); | ||
1154 | } | ||
1155 | } | ||
1156 | |||
1157 | return 0; | 1127 | return 0; |
1158 | } | 1128 | } |
1159 | 1129 | ||
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index d2303d50f561..725d16138b74 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c | |||
@@ -550,7 +550,7 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address, | |||
550 | 550 | ||
551 | length = min(agpio->pin_table_length, (u16)(pin_index + bits)); | 551 | length = min(agpio->pin_table_length, (u16)(pin_index + bits)); |
552 | for (i = pin_index; i < length; ++i) { | 552 | for (i = pin_index; i < length; ++i) { |
553 | unsigned pin = agpio->pin_table[i]; | 553 | int pin = agpio->pin_table[i]; |
554 | struct acpi_gpio_connection *conn; | 554 | struct acpi_gpio_connection *conn; |
555 | struct gpio_desc *desc; | 555 | struct gpio_desc *desc; |
556 | bool found; | 556 | bool found; |
diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c index 7722ed53bd65..af3bc7a8033b 100644 --- a/drivers/gpio/gpiolib-sysfs.c +++ b/drivers/gpio/gpiolib-sysfs.c | |||
@@ -551,6 +551,7 @@ static struct class gpio_class = { | |||
551 | */ | 551 | */ |
552 | int gpiod_export(struct gpio_desc *desc, bool direction_may_change) | 552 | int gpiod_export(struct gpio_desc *desc, bool direction_may_change) |
553 | { | 553 | { |
554 | struct gpio_chip *chip; | ||
554 | unsigned long flags; | 555 | unsigned long flags; |
555 | int status; | 556 | int status; |
556 | const char *ioname = NULL; | 557 | const char *ioname = NULL; |
@@ -568,8 +569,16 @@ int gpiod_export(struct gpio_desc *desc, bool direction_may_change) | |||
568 | return -EINVAL; | 569 | return -EINVAL; |
569 | } | 570 | } |
570 | 571 | ||
572 | chip = desc->chip; | ||
573 | |||
571 | mutex_lock(&sysfs_lock); | 574 | mutex_lock(&sysfs_lock); |
572 | 575 | ||
576 | /* check if chip is being removed */ | ||
577 | if (!chip || !chip->exported) { | ||
578 | status = -ENODEV; | ||
579 | goto fail_unlock; | ||
580 | } | ||
581 | |||
573 | spin_lock_irqsave(&gpio_lock, flags); | 582 | spin_lock_irqsave(&gpio_lock, flags); |
574 | if (!test_bit(FLAG_REQUESTED, &desc->flags) || | 583 | if (!test_bit(FLAG_REQUESTED, &desc->flags) || |
575 | test_bit(FLAG_EXPORT, &desc->flags)) { | 584 | test_bit(FLAG_EXPORT, &desc->flags)) { |
@@ -783,12 +792,15 @@ void gpiochip_unexport(struct gpio_chip *chip) | |||
783 | { | 792 | { |
784 | int status; | 793 | int status; |
785 | struct device *dev; | 794 | struct device *dev; |
795 | struct gpio_desc *desc; | ||
796 | unsigned int i; | ||
786 | 797 | ||
787 | mutex_lock(&sysfs_lock); | 798 | mutex_lock(&sysfs_lock); |
788 | dev = class_find_device(&gpio_class, NULL, chip, match_export); | 799 | dev = class_find_device(&gpio_class, NULL, chip, match_export); |
789 | if (dev) { | 800 | if (dev) { |
790 | put_device(dev); | 801 | put_device(dev); |
791 | device_unregister(dev); | 802 | device_unregister(dev); |
803 | /* prevent further gpiod exports */ | ||
792 | chip->exported = false; | 804 | chip->exported = false; |
793 | status = 0; | 805 | status = 0; |
794 | } else | 806 | } else |
@@ -797,6 +809,13 @@ void gpiochip_unexport(struct gpio_chip *chip) | |||
797 | 809 | ||
798 | if (status) | 810 | if (status) |
799 | chip_dbg(chip, "%s: status %d\n", __func__, status); | 811 | chip_dbg(chip, "%s: status %d\n", __func__, status); |
812 | |||
813 | /* unregister gpiod class devices owned by sysfs */ | ||
814 | for (i = 0; i < chip->ngpio; i++) { | ||
815 | desc = &chip->desc[i]; | ||
816 | if (test_and_clear_bit(FLAG_SYSFS, &desc->flags)) | ||
817 | gpiod_free(desc); | ||
818 | } | ||
800 | } | 819 | } |
801 | 820 | ||
802 | static int __init gpiolib_sysfs_init(void) | 821 | static int __init gpiolib_sysfs_init(void) |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 69af73f15310..596ee5cd3b84 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | |||
@@ -430,9 +430,10 @@ static int unregister_process_nocpsch(struct device_queue_manager *dqm, | |||
430 | 430 | ||
431 | BUG_ON(!dqm || !qpd); | 431 | BUG_ON(!dqm || !qpd); |
432 | 432 | ||
433 | BUG_ON(!list_empty(&qpd->queues_list)); | 433 | pr_debug("In func %s\n", __func__); |
434 | 434 | ||
435 | pr_debug("kfd: In func %s\n", __func__); | 435 | pr_debug("qpd->queues_list is %s\n", |
436 | list_empty(&qpd->queues_list) ? "empty" : "not empty"); | ||
436 | 437 | ||
437 | retval = 0; | 438 | retval = 0; |
438 | mutex_lock(&dqm->lock); | 439 | mutex_lock(&dqm->lock); |
@@ -882,6 +883,8 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, | |||
882 | return -ENOMEM; | 883 | return -ENOMEM; |
883 | } | 884 | } |
884 | 885 | ||
886 | init_sdma_vm(dqm, q, qpd); | ||
887 | |||
885 | retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj, | 888 | retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj, |
886 | &q->gart_mqd_addr, &q->properties); | 889 | &q->gart_mqd_addr, &q->properties); |
887 | if (retval != 0) | 890 | if (retval != 0) |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 661c6605d31b..e469c4b2e8cc 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c | |||
@@ -728,9 +728,9 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, | |||
728 | sysfs_show_32bit_prop(buffer, "max_engine_clk_fcompute", | 728 | sysfs_show_32bit_prop(buffer, "max_engine_clk_fcompute", |
729 | dev->gpu->kfd2kgd->get_max_engine_clock_in_mhz( | 729 | dev->gpu->kfd2kgd->get_max_engine_clock_in_mhz( |
730 | dev->gpu->kgd)); | 730 | dev->gpu->kgd)); |
731 | |||
731 | sysfs_show_64bit_prop(buffer, "local_mem_size", | 732 | sysfs_show_64bit_prop(buffer, "local_mem_size", |
732 | dev->gpu->kfd2kgd->get_vmem_size( | 733 | (unsigned long long int) 0); |
733 | dev->gpu->kgd)); | ||
734 | 734 | ||
735 | sysfs_show_32bit_prop(buffer, "fw_version", | 735 | sysfs_show_32bit_prop(buffer, "fw_version", |
736 | dev->gpu->kfd2kgd->get_fw_version( | 736 | dev->gpu->kfd2kgd->get_fw_version( |
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index c8a34476570a..af9662e58272 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c | |||
@@ -131,12 +131,11 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc) | |||
131 | 131 | ||
132 | /* Reinitialize corresponding vblank timestamp if high-precision query | 132 | /* Reinitialize corresponding vblank timestamp if high-precision query |
133 | * available. Skip this step if query unsupported or failed. Will | 133 | * available. Skip this step if query unsupported or failed. Will |
134 | * reinitialize delayed at next vblank interrupt in that case. | 134 | * reinitialize delayed at next vblank interrupt in that case and |
135 | * assign 0 for now, to mark the vblanktimestamp as invalid. | ||
135 | */ | 136 | */ |
136 | if (rc) { | 137 | tslot = atomic_read(&vblank->count) + diff; |
137 | tslot = atomic_read(&vblank->count) + diff; | 138 | vblanktimestamp(dev, crtc, tslot) = rc ? t_vblank : (struct timeval) {0, 0}; |
138 | vblanktimestamp(dev, crtc, tslot) = t_vblank; | ||
139 | } | ||
140 | 139 | ||
141 | smp_mb__before_atomic(); | 140 | smp_mb__before_atomic(); |
142 | atomic_add(diff, &vblank->count); | 141 | atomic_add(diff, &vblank->count); |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 3da1af46625c..773d1d24e604 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -6074,6 +6074,8 @@ enum skl_disp_power_wells { | |||
6074 | #define GTFIFOCTL 0x120008 | 6074 | #define GTFIFOCTL 0x120008 |
6075 | #define GT_FIFO_FREE_ENTRIES_MASK 0x7f | 6075 | #define GT_FIFO_FREE_ENTRIES_MASK 0x7f |
6076 | #define GT_FIFO_NUM_RESERVED_ENTRIES 20 | 6076 | #define GT_FIFO_NUM_RESERVED_ENTRIES 20 |
6077 | #define GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL (1 << 12) | ||
6078 | #define GT_FIFO_CTL_RC6_POLICY_STALL (1 << 11) | ||
6077 | 6079 | ||
6078 | #define HSW_IDICR 0x9008 | 6080 | #define HSW_IDICR 0x9008 |
6079 | #define IDIHASHMSK(x) (((x) & 0x3f) << 16) | 6081 | #define IDIHASHMSK(x) (((x) & 0x3f) << 16) |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index d547d9c8dda2..d0f3cbc87474 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -13635,9 +13635,6 @@ static const struct intel_dmi_quirk intel_dmi_quirks[] = { | |||
13635 | }; | 13635 | }; |
13636 | 13636 | ||
13637 | static struct intel_quirk intel_quirks[] = { | 13637 | static struct intel_quirk intel_quirks[] = { |
13638 | /* HP Mini needs pipe A force quirk (LP: #322104) */ | ||
13639 | { 0x27ae, 0x103c, 0x361a, quirk_pipea_force }, | ||
13640 | |||
13641 | /* Toshiba Protege R-205, S-209 needs pipe A force quirk */ | 13638 | /* Toshiba Protege R-205, S-209 needs pipe A force quirk */ |
13642 | { 0x2592, 0x1179, 0x0001, quirk_pipea_force }, | 13639 | { 0x2592, 0x1179, 0x0001, quirk_pipea_force }, |
13643 | 13640 | ||
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index d0237102c27e..f27346e907b1 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -1348,7 +1348,7 @@ intel_dp_compute_config(struct intel_encoder *encoder, | |||
1348 | 1348 | ||
1349 | pipe_config->has_dp_encoder = true; | 1349 | pipe_config->has_dp_encoder = true; |
1350 | pipe_config->has_drrs = false; | 1350 | pipe_config->has_drrs = false; |
1351 | pipe_config->has_audio = intel_dp->has_audio; | 1351 | pipe_config->has_audio = intel_dp->has_audio && port != PORT_A; |
1352 | 1352 | ||
1353 | if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) { | 1353 | if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) { |
1354 | intel_fixed_panel_mode(intel_connector->panel.fixed_mode, | 1354 | intel_fixed_panel_mode(intel_connector->panel.fixed_mode, |
@@ -2211,8 +2211,8 @@ static void intel_dp_get_config(struct intel_encoder *encoder, | |||
2211 | int dotclock; | 2211 | int dotclock; |
2212 | 2212 | ||
2213 | tmp = I915_READ(intel_dp->output_reg); | 2213 | tmp = I915_READ(intel_dp->output_reg); |
2214 | if (tmp & DP_AUDIO_OUTPUT_ENABLE) | 2214 | |
2215 | pipe_config->has_audio = true; | 2215 | pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A; |
2216 | 2216 | ||
2217 | if ((port == PORT_A) || !HAS_PCH_CPT(dev)) { | 2217 | if ((port == PORT_A) || !HAS_PCH_CPT(dev)) { |
2218 | if (tmp & DP_SYNC_HS_HIGH) | 2218 | if (tmp & DP_SYNC_HS_HIGH) |
@@ -3812,7 +3812,8 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) | |||
3812 | if (val == 0) | 3812 | if (val == 0) |
3813 | break; | 3813 | break; |
3814 | 3814 | ||
3815 | intel_dp->sink_rates[i] = val * 200; | 3815 | /* Value read is in kHz while drm clock is saved in deca-kHz */ |
3816 | intel_dp->sink_rates[i] = (val * 200) / 10; | ||
3816 | } | 3817 | } |
3817 | intel_dp->num_sink_rates = i; | 3818 | intel_dp->num_sink_rates = i; |
3818 | } | 3819 | } |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 5abda1d2c018..fbcc7dff0d63 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
@@ -813,12 +813,28 @@ static int intel_dual_link_lvds_callback(const struct dmi_system_id *id) | |||
813 | static const struct dmi_system_id intel_dual_link_lvds[] = { | 813 | static const struct dmi_system_id intel_dual_link_lvds[] = { |
814 | { | 814 | { |
815 | .callback = intel_dual_link_lvds_callback, | 815 | .callback = intel_dual_link_lvds_callback, |
816 | .ident = "Apple MacBook Pro (Core i5/i7 Series)", | 816 | .ident = "Apple MacBook Pro 15\" (2010)", |
817 | .matches = { | ||
818 | DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), | ||
819 | DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro6,2"), | ||
820 | }, | ||
821 | }, | ||
822 | { | ||
823 | .callback = intel_dual_link_lvds_callback, | ||
824 | .ident = "Apple MacBook Pro 15\" (2011)", | ||
817 | .matches = { | 825 | .matches = { |
818 | DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), | 826 | DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), |
819 | DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro8,2"), | 827 | DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro8,2"), |
820 | }, | 828 | }, |
821 | }, | 829 | }, |
830 | { | ||
831 | .callback = intel_dual_link_lvds_callback, | ||
832 | .ident = "Apple MacBook Pro 15\" (2012)", | ||
833 | .matches = { | ||
834 | DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), | ||
835 | DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro9,1"), | ||
836 | }, | ||
837 | }, | ||
822 | { } /* terminating entry */ | 838 | { } /* terminating entry */ |
823 | }; | 839 | }; |
824 | 840 | ||
@@ -848,6 +864,11 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder) | |||
848 | if (i915.lvds_channel_mode > 0) | 864 | if (i915.lvds_channel_mode > 0) |
849 | return i915.lvds_channel_mode == 2; | 865 | return i915.lvds_channel_mode == 2; |
850 | 866 | ||
867 | /* single channel LVDS is limited to 112 MHz */ | ||
868 | if (lvds_encoder->attached_connector->base.panel.fixed_mode->clock | ||
869 | > 112999) | ||
870 | return true; | ||
871 | |||
851 | if (dmi_check_system(intel_dual_link_lvds)) | 872 | if (dmi_check_system(intel_dual_link_lvds)) |
852 | return true; | 873 | return true; |
853 | 874 | ||
@@ -1111,6 +1132,8 @@ void intel_lvds_init(struct drm_device *dev) | |||
1111 | out: | 1132 | out: |
1112 | mutex_unlock(&dev->mode_config.mutex); | 1133 | mutex_unlock(&dev->mode_config.mutex); |
1113 | 1134 | ||
1135 | intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode); | ||
1136 | |||
1114 | lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder); | 1137 | lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder); |
1115 | DRM_DEBUG_KMS("detected %s-link lvds configuration\n", | 1138 | DRM_DEBUG_KMS("detected %s-link lvds configuration\n", |
1116 | lvds_encoder->is_dual_link ? "dual" : "single"); | 1139 | lvds_encoder->is_dual_link ? "dual" : "single"); |
@@ -1125,7 +1148,6 @@ out: | |||
1125 | } | 1148 | } |
1126 | drm_connector_register(connector); | 1149 | drm_connector_register(connector); |
1127 | 1150 | ||
1128 | intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode); | ||
1129 | intel_panel_setup_backlight(connector, INVALID_PIPE); | 1151 | intel_panel_setup_backlight(connector, INVALID_PIPE); |
1130 | 1152 | ||
1131 | return; | 1153 | return; |
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index ab5cc94588e1..ff2a74651dd4 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c | |||
@@ -360,6 +360,14 @@ static void __intel_uncore_early_sanitize(struct drm_device *dev, | |||
360 | __raw_i915_write32(dev_priv, GTFIFODBG, | 360 | __raw_i915_write32(dev_priv, GTFIFODBG, |
361 | __raw_i915_read32(dev_priv, GTFIFODBG)); | 361 | __raw_i915_read32(dev_priv, GTFIFODBG)); |
362 | 362 | ||
363 | /* WaDisableShadowRegForCpd:chv */ | ||
364 | if (IS_CHERRYVIEW(dev)) { | ||
365 | __raw_i915_write32(dev_priv, GTFIFOCTL, | ||
366 | __raw_i915_read32(dev_priv, GTFIFOCTL) | | ||
367 | GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL | | ||
368 | GT_FIFO_CTL_RC6_POLICY_STALL); | ||
369 | } | ||
370 | |||
363 | intel_uncore_forcewake_reset(dev, restore_forcewake); | 371 | intel_uncore_forcewake_reset(dev, restore_forcewake); |
364 | } | 372 | } |
365 | 373 | ||
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index dac78ad24b31..42b2ea3fdcf3 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c | |||
@@ -580,6 +580,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
580 | else | 580 | else |
581 | radeon_crtc->pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV; | 581 | radeon_crtc->pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV; |
582 | 582 | ||
583 | /* if there is no audio, set MINM_OVER_MAXP */ | ||
584 | if (!drm_detect_monitor_audio(radeon_connector_edid(connector))) | ||
585 | radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP; | ||
583 | if (rdev->family < CHIP_RV770) | 586 | if (rdev->family < CHIP_RV770) |
584 | radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP; | 587 | radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP; |
585 | /* use frac fb div on APUs */ | 588 | /* use frac fb div on APUs */ |
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index f57c1ab617bc..dd39f434b4a7 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c | |||
@@ -1761,17 +1761,15 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) | |||
1761 | struct drm_device *dev = encoder->dev; | 1761 | struct drm_device *dev = encoder->dev; |
1762 | struct radeon_device *rdev = dev->dev_private; | 1762 | struct radeon_device *rdev = dev->dev_private; |
1763 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 1763 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
1764 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | ||
1765 | int encoder_mode = atombios_get_encoder_mode(encoder); | 1764 | int encoder_mode = atombios_get_encoder_mode(encoder); |
1766 | 1765 | ||
1767 | DRM_DEBUG_KMS("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n", | 1766 | DRM_DEBUG_KMS("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n", |
1768 | radeon_encoder->encoder_id, mode, radeon_encoder->devices, | 1767 | radeon_encoder->encoder_id, mode, radeon_encoder->devices, |
1769 | radeon_encoder->active_device); | 1768 | radeon_encoder->active_device); |
1770 | 1769 | ||
1771 | if (connector && (radeon_audio != 0) && | 1770 | if ((radeon_audio != 0) && |
1772 | ((encoder_mode == ATOM_ENCODER_MODE_HDMI) || | 1771 | ((encoder_mode == ATOM_ENCODER_MODE_HDMI) || |
1773 | (ENCODER_MODE_IS_DP(encoder_mode) && | 1772 | ENCODER_MODE_IS_DP(encoder_mode))) |
1774 | drm_detect_monitor_audio(radeon_connector_edid(connector))))) | ||
1775 | radeon_audio_dpms(encoder, mode); | 1773 | radeon_audio_dpms(encoder, mode); |
1776 | 1774 | ||
1777 | switch (radeon_encoder->encoder_id) { | 1775 | switch (radeon_encoder->encoder_id) { |
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c index 3adc2afe32aa..68fd9fc677e3 100644 --- a/drivers/gpu/drm/radeon/dce6_afmt.c +++ b/drivers/gpu/drm/radeon/dce6_afmt.c | |||
@@ -295,28 +295,3 @@ void dce6_dp_audio_set_dto(struct radeon_device *rdev, | |||
295 | WREG32(DCCG_AUDIO_DTO1_MODULE, clock); | 295 | WREG32(DCCG_AUDIO_DTO1_MODULE, clock); |
296 | } | 296 | } |
297 | } | 297 | } |
298 | |||
299 | void dce6_dp_enable(struct drm_encoder *encoder, bool enable) | ||
300 | { | ||
301 | struct drm_device *dev = encoder->dev; | ||
302 | struct radeon_device *rdev = dev->dev_private; | ||
303 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
304 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | ||
305 | |||
306 | if (!dig || !dig->afmt) | ||
307 | return; | ||
308 | |||
309 | if (enable) { | ||
310 | WREG32(EVERGREEN_DP_SEC_TIMESTAMP + dig->afmt->offset, | ||
311 | EVERGREEN_DP_SEC_TIMESTAMP_MODE(1)); | ||
312 | WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset, | ||
313 | EVERGREEN_DP_SEC_ASP_ENABLE | /* Audio packet transmission */ | ||
314 | EVERGREEN_DP_SEC_ATP_ENABLE | /* Audio timestamp packet transmission */ | ||
315 | EVERGREEN_DP_SEC_AIP_ENABLE | /* Audio infoframe packet transmission */ | ||
316 | EVERGREEN_DP_SEC_STREAM_ENABLE); /* Master enable for secondary stream engine */ | ||
317 | } else { | ||
318 | WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset, 0); | ||
319 | } | ||
320 | |||
321 | dig->afmt->enabled = enable; | ||
322 | } | ||
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c index c18d4ecbd95d..0926739c9fa7 100644 --- a/drivers/gpu/drm/radeon/evergreen_hdmi.c +++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c | |||
@@ -219,13 +219,9 @@ void evergreen_set_avi_packet(struct radeon_device *rdev, u32 offset, | |||
219 | WREG32(AFMT_AVI_INFO3 + offset, | 219 | WREG32(AFMT_AVI_INFO3 + offset, |
220 | frame[0xC] | (frame[0xD] << 8) | (buffer[1] << 24)); | 220 | frame[0xC] | (frame[0xD] << 8) | (buffer[1] << 24)); |
221 | 221 | ||
222 | WREG32_OR(HDMI_INFOFRAME_CONTROL0 + offset, | ||
223 | HDMI_AVI_INFO_SEND | /* enable AVI info frames */ | ||
224 | HDMI_AVI_INFO_CONT); /* required for audio info values to be updated */ | ||
225 | |||
226 | WREG32_P(HDMI_INFOFRAME_CONTROL1 + offset, | 222 | WREG32_P(HDMI_INFOFRAME_CONTROL1 + offset, |
227 | HDMI_AVI_INFO_LINE(2), /* anything other than 0 */ | 223 | HDMI_AVI_INFO_LINE(2), /* anything other than 0 */ |
228 | ~HDMI_AVI_INFO_LINE_MASK); | 224 | ~HDMI_AVI_INFO_LINE_MASK); |
229 | } | 225 | } |
230 | 226 | ||
231 | void dce4_hdmi_audio_set_dto(struct radeon_device *rdev, | 227 | void dce4_hdmi_audio_set_dto(struct radeon_device *rdev, |
@@ -370,9 +366,13 @@ void dce4_set_audio_packet(struct drm_encoder *encoder, u32 offset) | |||
370 | WREG32(AFMT_AUDIO_PACKET_CONTROL2 + offset, | 366 | WREG32(AFMT_AUDIO_PACKET_CONTROL2 + offset, |
371 | AFMT_AUDIO_CHANNEL_ENABLE(0xff)); | 367 | AFMT_AUDIO_CHANNEL_ENABLE(0xff)); |
372 | 368 | ||
369 | WREG32(HDMI_AUDIO_PACKET_CONTROL + offset, | ||
370 | HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */ | ||
371 | HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */ | ||
372 | |||
373 | /* allow 60958 channel status and send audio packets fields to be updated */ | 373 | /* allow 60958 channel status and send audio packets fields to be updated */ |
374 | WREG32(AFMT_AUDIO_PACKET_CONTROL + offset, | 374 | WREG32_OR(AFMT_AUDIO_PACKET_CONTROL + offset, |
375 | AFMT_AUDIO_SAMPLE_SEND | AFMT_RESET_FIFO_WHEN_AUDIO_DIS | AFMT_60958_CS_UPDATE); | 375 | AFMT_RESET_FIFO_WHEN_AUDIO_DIS | AFMT_60958_CS_UPDATE); |
376 | } | 376 | } |
377 | 377 | ||
378 | 378 | ||
@@ -398,17 +398,26 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable) | |||
398 | return; | 398 | return; |
399 | 399 | ||
400 | if (enable) { | 400 | if (enable) { |
401 | WREG32(HDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, | 401 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); |
402 | HDMI_AUDIO_INFO_LINE(2)); /* anything other than 0 */ | ||
403 | |||
404 | WREG32(HDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset, | ||
405 | HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */ | ||
406 | HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */ | ||
407 | 402 | ||
408 | WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, | 403 | if (drm_detect_monitor_audio(radeon_connector_edid(connector))) { |
409 | HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */ | 404 | WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, |
410 | HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */ | 405 | HDMI_AVI_INFO_SEND | /* enable AVI info frames */ |
406 | HDMI_AVI_INFO_CONT | /* required for audio info values to be updated */ | ||
407 | HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */ | ||
408 | HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */ | ||
409 | WREG32_OR(AFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, | ||
410 | AFMT_AUDIO_SAMPLE_SEND); | ||
411 | } else { | ||
412 | WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, | ||
413 | HDMI_AVI_INFO_SEND | /* enable AVI info frames */ | ||
414 | HDMI_AVI_INFO_CONT); /* required for audio info values to be updated */ | ||
415 | WREG32_AND(AFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, | ||
416 | ~AFMT_AUDIO_SAMPLE_SEND); | ||
417 | } | ||
411 | } else { | 418 | } else { |
419 | WREG32_AND(AFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, | ||
420 | ~AFMT_AUDIO_SAMPLE_SEND); | ||
412 | WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, 0); | 421 | WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, 0); |
413 | } | 422 | } |
414 | 423 | ||
@@ -424,20 +433,24 @@ void evergreen_dp_enable(struct drm_encoder *encoder, bool enable) | |||
424 | struct radeon_device *rdev = dev->dev_private; | 433 | struct radeon_device *rdev = dev->dev_private; |
425 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 434 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
426 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | 435 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
436 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | ||
427 | 437 | ||
428 | if (!dig || !dig->afmt) | 438 | if (!dig || !dig->afmt) |
429 | return; | 439 | return; |
430 | 440 | ||
431 | if (enable) { | 441 | if (enable && drm_detect_monitor_audio(radeon_connector_edid(connector))) { |
432 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | 442 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); |
433 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | 443 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
434 | struct radeon_connector_atom_dig *dig_connector; | 444 | struct radeon_connector_atom_dig *dig_connector; |
435 | uint32_t val; | 445 | uint32_t val; |
436 | 446 | ||
447 | WREG32_OR(AFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, | ||
448 | AFMT_AUDIO_SAMPLE_SEND); | ||
449 | |||
437 | WREG32(EVERGREEN_DP_SEC_TIMESTAMP + dig->afmt->offset, | 450 | WREG32(EVERGREEN_DP_SEC_TIMESTAMP + dig->afmt->offset, |
438 | EVERGREEN_DP_SEC_TIMESTAMP_MODE(1)); | 451 | EVERGREEN_DP_SEC_TIMESTAMP_MODE(1)); |
439 | 452 | ||
440 | if (radeon_connector->con_priv) { | 453 | if (!ASIC_IS_DCE6(rdev) && radeon_connector->con_priv) { |
441 | dig_connector = radeon_connector->con_priv; | 454 | dig_connector = radeon_connector->con_priv; |
442 | val = RREG32(EVERGREEN_DP_SEC_AUD_N + dig->afmt->offset); | 455 | val = RREG32(EVERGREEN_DP_SEC_AUD_N + dig->afmt->offset); |
443 | val &= ~EVERGREEN_DP_SEC_N_BASE_MULTIPLE(0xf); | 456 | val &= ~EVERGREEN_DP_SEC_N_BASE_MULTIPLE(0xf); |
@@ -457,6 +470,8 @@ void evergreen_dp_enable(struct drm_encoder *encoder, bool enable) | |||
457 | EVERGREEN_DP_SEC_STREAM_ENABLE); /* Master enable for secondary stream engine */ | 470 | EVERGREEN_DP_SEC_STREAM_ENABLE); /* Master enable for secondary stream engine */ |
458 | } else { | 471 | } else { |
459 | WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset, 0); | 472 | WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset, 0); |
473 | WREG32_AND(AFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, | ||
474 | ~AFMT_AUDIO_SAMPLE_SEND); | ||
460 | } | 475 | } |
461 | 476 | ||
462 | dig->afmt->enabled = enable; | 477 | dig->afmt->enabled = enable; |
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c index dd6606b8e23c..e85894ade95c 100644 --- a/drivers/gpu/drm/radeon/r600_hdmi.c +++ b/drivers/gpu/drm/radeon/r600_hdmi.c | |||
@@ -228,12 +228,13 @@ void r600_set_avi_packet(struct radeon_device *rdev, u32 offset, | |||
228 | WREG32(HDMI0_AVI_INFO3 + offset, | 228 | WREG32(HDMI0_AVI_INFO3 + offset, |
229 | frame[0xC] | (frame[0xD] << 8) | (buffer[1] << 24)); | 229 | frame[0xC] | (frame[0xD] << 8) | (buffer[1] << 24)); |
230 | 230 | ||
231 | WREG32_OR(HDMI0_INFOFRAME_CONTROL1 + offset, | ||
232 | HDMI0_AVI_INFO_LINE(2)); /* anything other than 0 */ | ||
233 | |||
231 | WREG32_OR(HDMI0_INFOFRAME_CONTROL0 + offset, | 234 | WREG32_OR(HDMI0_INFOFRAME_CONTROL0 + offset, |
232 | HDMI0_AVI_INFO_SEND | /* enable AVI info frames */ | 235 | HDMI0_AVI_INFO_SEND | /* enable AVI info frames */ |
233 | HDMI0_AVI_INFO_CONT); /* send AVI info frames every frame/field */ | 236 | HDMI0_AVI_INFO_CONT); /* send AVI info frames every frame/field */ |
234 | 237 | ||
235 | WREG32_OR(HDMI0_INFOFRAME_CONTROL1 + offset, | ||
236 | HDMI0_AVI_INFO_LINE(2)); /* anything other than 0 */ | ||
237 | } | 238 | } |
238 | 239 | ||
239 | /* | 240 | /* |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index d2abe481954f..46eb0fa75a61 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -1673,7 +1673,6 @@ struct radeon_uvd { | |||
1673 | struct radeon_bo *vcpu_bo; | 1673 | struct radeon_bo *vcpu_bo; |
1674 | void *cpu_addr; | 1674 | void *cpu_addr; |
1675 | uint64_t gpu_addr; | 1675 | uint64_t gpu_addr; |
1676 | void *saved_bo; | ||
1677 | atomic_t handles[RADEON_MAX_UVD_HANDLES]; | 1676 | atomic_t handles[RADEON_MAX_UVD_HANDLES]; |
1678 | struct drm_file *filp[RADEON_MAX_UVD_HANDLES]; | 1677 | struct drm_file *filp[RADEON_MAX_UVD_HANDLES]; |
1679 | unsigned img_size[RADEON_MAX_UVD_HANDLES]; | 1678 | unsigned img_size[RADEON_MAX_UVD_HANDLES]; |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index fafd8ce4d58f..8dbf5083c4ff 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c | |||
@@ -1202,7 +1202,7 @@ static struct radeon_asic rs780_asic = { | |||
1202 | static struct radeon_asic_ring rv770_uvd_ring = { | 1202 | static struct radeon_asic_ring rv770_uvd_ring = { |
1203 | .ib_execute = &uvd_v1_0_ib_execute, | 1203 | .ib_execute = &uvd_v1_0_ib_execute, |
1204 | .emit_fence = &uvd_v2_2_fence_emit, | 1204 | .emit_fence = &uvd_v2_2_fence_emit, |
1205 | .emit_semaphore = &uvd_v1_0_semaphore_emit, | 1205 | .emit_semaphore = &uvd_v2_2_semaphore_emit, |
1206 | .cs_parse = &radeon_uvd_cs_parse, | 1206 | .cs_parse = &radeon_uvd_cs_parse, |
1207 | .ring_test = &uvd_v1_0_ring_test, | 1207 | .ring_test = &uvd_v1_0_ring_test, |
1208 | .ib_test = &uvd_v1_0_ib_test, | 1208 | .ib_test = &uvd_v1_0_ib_test, |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index cf0a90bb61ca..a3ca8cd305c5 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h | |||
@@ -949,6 +949,10 @@ void uvd_v1_0_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); | |||
949 | int uvd_v2_2_resume(struct radeon_device *rdev); | 949 | int uvd_v2_2_resume(struct radeon_device *rdev); |
950 | void uvd_v2_2_fence_emit(struct radeon_device *rdev, | 950 | void uvd_v2_2_fence_emit(struct radeon_device *rdev, |
951 | struct radeon_fence *fence); | 951 | struct radeon_fence *fence); |
952 | bool uvd_v2_2_semaphore_emit(struct radeon_device *rdev, | ||
953 | struct radeon_ring *ring, | ||
954 | struct radeon_semaphore *semaphore, | ||
955 | bool emit_wait); | ||
952 | 956 | ||
953 | /* uvd v3.1 */ | 957 | /* uvd v3.1 */ |
954 | bool uvd_v3_1_semaphore_emit(struct radeon_device *rdev, | 958 | bool uvd_v3_1_semaphore_emit(struct radeon_device *rdev, |
diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c index 48d49e651a30..dcb779647c57 100644 --- a/drivers/gpu/drm/radeon/radeon_audio.c +++ b/drivers/gpu/drm/radeon/radeon_audio.c | |||
@@ -102,7 +102,6 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder, | |||
102 | void r600_hdmi_enable(struct drm_encoder *encoder, bool enable); | 102 | void r600_hdmi_enable(struct drm_encoder *encoder, bool enable); |
103 | void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable); | 103 | void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable); |
104 | void evergreen_dp_enable(struct drm_encoder *encoder, bool enable); | 104 | void evergreen_dp_enable(struct drm_encoder *encoder, bool enable); |
105 | void dce6_dp_enable(struct drm_encoder *encoder, bool enable); | ||
106 | 105 | ||
107 | static const u32 pin_offsets[7] = | 106 | static const u32 pin_offsets[7] = |
108 | { | 107 | { |
@@ -240,7 +239,7 @@ static struct radeon_audio_funcs dce6_dp_funcs = { | |||
240 | .set_avi_packet = evergreen_set_avi_packet, | 239 | .set_avi_packet = evergreen_set_avi_packet, |
241 | .set_audio_packet = dce4_set_audio_packet, | 240 | .set_audio_packet = dce4_set_audio_packet, |
242 | .mode_set = radeon_audio_dp_mode_set, | 241 | .mode_set = radeon_audio_dp_mode_set, |
243 | .dpms = dce6_dp_enable, | 242 | .dpms = evergreen_dp_enable, |
244 | }; | 243 | }; |
245 | 244 | ||
246 | static void radeon_audio_interface_init(struct radeon_device *rdev) | 245 | static void radeon_audio_interface_init(struct radeon_device *rdev) |
@@ -461,30 +460,37 @@ void radeon_audio_detect(struct drm_connector *connector, | |||
461 | if (!connector || !connector->encoder) | 460 | if (!connector || !connector->encoder) |
462 | return; | 461 | return; |
463 | 462 | ||
463 | if (!radeon_encoder_is_digital(connector->encoder)) | ||
464 | return; | ||
465 | |||
464 | rdev = connector->encoder->dev->dev_private; | 466 | rdev = connector->encoder->dev->dev_private; |
467 | |||
468 | if (!radeon_audio_chipset_supported(rdev)) | ||
469 | return; | ||
470 | |||
465 | radeon_encoder = to_radeon_encoder(connector->encoder); | 471 | radeon_encoder = to_radeon_encoder(connector->encoder); |
466 | dig = radeon_encoder->enc_priv; | 472 | dig = radeon_encoder->enc_priv; |
467 | 473 | ||
468 | if (status == connector_status_connected) { | 474 | if (!dig->afmt) |
469 | struct radeon_connector *radeon_connector; | 475 | return; |
470 | int sink_type; | ||
471 | |||
472 | if (!drm_detect_monitor_audio(radeon_connector_edid(connector))) { | ||
473 | radeon_encoder->audio = NULL; | ||
474 | return; | ||
475 | } | ||
476 | 476 | ||
477 | radeon_connector = to_radeon_connector(connector); | 477 | if (status == connector_status_connected) { |
478 | sink_type = radeon_dp_getsinktype(radeon_connector); | 478 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
479 | 479 | ||
480 | if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort && | 480 | if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort && |
481 | sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) | 481 | radeon_dp_getsinktype(radeon_connector) == |
482 | CONNECTOR_OBJECT_ID_DISPLAYPORT) | ||
482 | radeon_encoder->audio = rdev->audio.dp_funcs; | 483 | radeon_encoder->audio = rdev->audio.dp_funcs; |
483 | else | 484 | else |
484 | radeon_encoder->audio = rdev->audio.hdmi_funcs; | 485 | radeon_encoder->audio = rdev->audio.hdmi_funcs; |
485 | 486 | ||
486 | dig->afmt->pin = radeon_audio_get_pin(connector->encoder); | 487 | dig->afmt->pin = radeon_audio_get_pin(connector->encoder); |
487 | radeon_audio_enable(rdev, dig->afmt->pin, 0xf); | 488 | if (drm_detect_monitor_audio(radeon_connector_edid(connector))) { |
489 | radeon_audio_enable(rdev, dig->afmt->pin, 0xf); | ||
490 | } else { | ||
491 | radeon_audio_enable(rdev, dig->afmt->pin, 0); | ||
492 | dig->afmt->pin = NULL; | ||
493 | } | ||
488 | } else { | 494 | } else { |
489 | radeon_audio_enable(rdev, dig->afmt->pin, 0); | 495 | radeon_audio_enable(rdev, dig->afmt->pin, 0); |
490 | dig->afmt->pin = NULL; | 496 | dig->afmt->pin = NULL; |
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index cebb65e07e1d..d17d251dbd4f 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c | |||
@@ -1379,8 +1379,10 @@ out: | |||
1379 | /* updated in get modes as well since we need to know if it's analog or digital */ | 1379 | /* updated in get modes as well since we need to know if it's analog or digital */ |
1380 | radeon_connector_update_scratch_regs(connector, ret); | 1380 | radeon_connector_update_scratch_regs(connector, ret); |
1381 | 1381 | ||
1382 | if (radeon_audio != 0) | 1382 | if (radeon_audio != 0) { |
1383 | radeon_connector_get_edid(connector); | ||
1383 | radeon_audio_detect(connector, ret); | 1384 | radeon_audio_detect(connector, ret); |
1385 | } | ||
1384 | 1386 | ||
1385 | exit: | 1387 | exit: |
1386 | pm_runtime_mark_last_busy(connector->dev->dev); | 1388 | pm_runtime_mark_last_busy(connector->dev->dev); |
@@ -1717,8 +1719,10 @@ radeon_dp_detect(struct drm_connector *connector, bool force) | |||
1717 | 1719 | ||
1718 | radeon_connector_update_scratch_regs(connector, ret); | 1720 | radeon_connector_update_scratch_regs(connector, ret); |
1719 | 1721 | ||
1720 | if (radeon_audio != 0) | 1722 | if (radeon_audio != 0) { |
1723 | radeon_connector_get_edid(connector); | ||
1721 | radeon_audio_detect(connector, ret); | 1724 | radeon_audio_detect(connector, ret); |
1725 | } | ||
1722 | 1726 | ||
1723 | out: | 1727 | out: |
1724 | pm_runtime_mark_last_busy(connector->dev->dev); | 1728 | pm_runtime_mark_last_busy(connector->dev->dev); |
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index 4d0f96cc3da4..ab39b85e0f76 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c | |||
@@ -88,7 +88,7 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) | |||
88 | p->dma_reloc_idx = 0; | 88 | p->dma_reloc_idx = 0; |
89 | /* FIXME: we assume that each relocs use 4 dwords */ | 89 | /* FIXME: we assume that each relocs use 4 dwords */ |
90 | p->nrelocs = chunk->length_dw / 4; | 90 | p->nrelocs = chunk->length_dw / 4; |
91 | p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_bo_list), GFP_KERNEL); | 91 | p->relocs = drm_calloc_large(p->nrelocs, sizeof(struct radeon_bo_list)); |
92 | if (p->relocs == NULL) { | 92 | if (p->relocs == NULL) { |
93 | return -ENOMEM; | 93 | return -ENOMEM; |
94 | } | 94 | } |
@@ -428,7 +428,7 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo | |||
428 | } | 428 | } |
429 | } | 429 | } |
430 | kfree(parser->track); | 430 | kfree(parser->track); |
431 | kfree(parser->relocs); | 431 | drm_free_large(parser->relocs); |
432 | drm_free_large(parser->vm_bos); | 432 | drm_free_large(parser->vm_bos); |
433 | for (i = 0; i < parser->nchunks; i++) | 433 | for (i = 0; i < parser->nchunks; i++) |
434 | drm_free_large(parser->chunks[i].kdata); | 434 | drm_free_large(parser->chunks[i].kdata); |
diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c index 01701376b239..eef006c48584 100644 --- a/drivers/gpu/drm/radeon/radeon_mn.c +++ b/drivers/gpu/drm/radeon/radeon_mn.c | |||
@@ -135,28 +135,31 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn, | |||
135 | while (it) { | 135 | while (it) { |
136 | struct radeon_mn_node *node; | 136 | struct radeon_mn_node *node; |
137 | struct radeon_bo *bo; | 137 | struct radeon_bo *bo; |
138 | int r; | 138 | long r; |
139 | 139 | ||
140 | node = container_of(it, struct radeon_mn_node, it); | 140 | node = container_of(it, struct radeon_mn_node, it); |
141 | it = interval_tree_iter_next(it, start, end); | 141 | it = interval_tree_iter_next(it, start, end); |
142 | 142 | ||
143 | list_for_each_entry(bo, &node->bos, mn_list) { | 143 | list_for_each_entry(bo, &node->bos, mn_list) { |
144 | 144 | ||
145 | if (!bo->tbo.ttm || bo->tbo.ttm->state != tt_bound) | ||
146 | continue; | ||
147 | |||
145 | r = radeon_bo_reserve(bo, true); | 148 | r = radeon_bo_reserve(bo, true); |
146 | if (r) { | 149 | if (r) { |
147 | DRM_ERROR("(%d) failed to reserve user bo\n", r); | 150 | DRM_ERROR("(%ld) failed to reserve user bo\n", r); |
148 | continue; | 151 | continue; |
149 | } | 152 | } |
150 | 153 | ||
151 | r = reservation_object_wait_timeout_rcu(bo->tbo.resv, | 154 | r = reservation_object_wait_timeout_rcu(bo->tbo.resv, |
152 | true, false, MAX_SCHEDULE_TIMEOUT); | 155 | true, false, MAX_SCHEDULE_TIMEOUT); |
153 | if (r) | 156 | if (r <= 0) |
154 | DRM_ERROR("(%d) failed to wait for user bo\n", r); | 157 | DRM_ERROR("(%ld) failed to wait for user bo\n", r); |
155 | 158 | ||
156 | radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU); | 159 | radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU); |
157 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); | 160 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); |
158 | if (r) | 161 | if (r) |
159 | DRM_ERROR("(%d) failed to validate user bo\n", r); | 162 | DRM_ERROR("(%ld) failed to validate user bo\n", r); |
160 | 163 | ||
161 | radeon_bo_unreserve(bo); | 164 | radeon_bo_unreserve(bo); |
162 | } | 165 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index b292aca0f342..edafd3c2b170 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c | |||
@@ -591,8 +591,7 @@ static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm) | |||
591 | { | 591 | { |
592 | struct radeon_device *rdev = radeon_get_rdev(ttm->bdev); | 592 | struct radeon_device *rdev = radeon_get_rdev(ttm->bdev); |
593 | struct radeon_ttm_tt *gtt = (void *)ttm; | 593 | struct radeon_ttm_tt *gtt = (void *)ttm; |
594 | struct scatterlist *sg; | 594 | struct sg_page_iter sg_iter; |
595 | int i; | ||
596 | 595 | ||
597 | int write = !(gtt->userflags & RADEON_GEM_USERPTR_READONLY); | 596 | int write = !(gtt->userflags & RADEON_GEM_USERPTR_READONLY); |
598 | enum dma_data_direction direction = write ? | 597 | enum dma_data_direction direction = write ? |
@@ -605,9 +604,8 @@ static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm) | |||
605 | /* free the sg table and pages again */ | 604 | /* free the sg table and pages again */ |
606 | dma_unmap_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction); | 605 | dma_unmap_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction); |
607 | 606 | ||
608 | for_each_sg(ttm->sg->sgl, sg, ttm->sg->nents, i) { | 607 | for_each_sg_page(ttm->sg->sgl, &sg_iter, ttm->sg->nents, 0) { |
609 | struct page *page = sg_page(sg); | 608 | struct page *page = sg_page_iter_page(&sg_iter); |
610 | |||
611 | if (!(gtt->userflags & RADEON_GEM_USERPTR_READONLY)) | 609 | if (!(gtt->userflags & RADEON_GEM_USERPTR_READONLY)) |
612 | set_page_dirty(page); | 610 | set_page_dirty(page); |
613 | 611 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index c10b2aec6450..6edcb5485092 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c | |||
@@ -204,28 +204,32 @@ void radeon_uvd_fini(struct radeon_device *rdev) | |||
204 | 204 | ||
205 | int radeon_uvd_suspend(struct radeon_device *rdev) | 205 | int radeon_uvd_suspend(struct radeon_device *rdev) |
206 | { | 206 | { |
207 | unsigned size; | 207 | int i, r; |
208 | void *ptr; | ||
209 | int i; | ||
210 | 208 | ||
211 | if (rdev->uvd.vcpu_bo == NULL) | 209 | if (rdev->uvd.vcpu_bo == NULL) |
212 | return 0; | 210 | return 0; |
213 | 211 | ||
214 | for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) | 212 | for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { |
215 | if (atomic_read(&rdev->uvd.handles[i])) | 213 | uint32_t handle = atomic_read(&rdev->uvd.handles[i]); |
216 | break; | 214 | if (handle != 0) { |
215 | struct radeon_fence *fence; | ||
217 | 216 | ||
218 | if (i == RADEON_MAX_UVD_HANDLES) | 217 | radeon_uvd_note_usage(rdev); |
219 | return 0; | ||
220 | 218 | ||
221 | size = radeon_bo_size(rdev->uvd.vcpu_bo); | 219 | r = radeon_uvd_get_destroy_msg(rdev, |
222 | size -= rdev->uvd_fw->size; | 220 | R600_RING_TYPE_UVD_INDEX, handle, &fence); |
221 | if (r) { | ||
222 | DRM_ERROR("Error destroying UVD (%d)!\n", r); | ||
223 | continue; | ||
224 | } | ||
223 | 225 | ||
224 | ptr = rdev->uvd.cpu_addr; | 226 | radeon_fence_wait(fence, false); |
225 | ptr += rdev->uvd_fw->size; | 227 | radeon_fence_unref(&fence); |
226 | 228 | ||
227 | rdev->uvd.saved_bo = kmalloc(size, GFP_KERNEL); | 229 | rdev->uvd.filp[i] = NULL; |
228 | memcpy(rdev->uvd.saved_bo, ptr, size); | 230 | atomic_set(&rdev->uvd.handles[i], 0); |
231 | } | ||
232 | } | ||
229 | 233 | ||
230 | return 0; | 234 | return 0; |
231 | } | 235 | } |
@@ -246,12 +250,7 @@ int radeon_uvd_resume(struct radeon_device *rdev) | |||
246 | ptr = rdev->uvd.cpu_addr; | 250 | ptr = rdev->uvd.cpu_addr; |
247 | ptr += rdev->uvd_fw->size; | 251 | ptr += rdev->uvd_fw->size; |
248 | 252 | ||
249 | if (rdev->uvd.saved_bo != NULL) { | 253 | memset(ptr, 0, size); |
250 | memcpy(ptr, rdev->uvd.saved_bo, size); | ||
251 | kfree(rdev->uvd.saved_bo); | ||
252 | rdev->uvd.saved_bo = NULL; | ||
253 | } else | ||
254 | memset(ptr, 0, size); | ||
255 | 254 | ||
256 | return 0; | 255 | return 0; |
257 | } | 256 | } |
@@ -396,6 +395,29 @@ static int radeon_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[]) | |||
396 | return 0; | 395 | return 0; |
397 | } | 396 | } |
398 | 397 | ||
398 | static int radeon_uvd_validate_codec(struct radeon_cs_parser *p, | ||
399 | unsigned stream_type) | ||
400 | { | ||
401 | switch (stream_type) { | ||
402 | case 0: /* H264 */ | ||
403 | case 1: /* VC1 */ | ||
404 | /* always supported */ | ||
405 | return 0; | ||
406 | |||
407 | case 3: /* MPEG2 */ | ||
408 | case 4: /* MPEG4 */ | ||
409 | /* only since UVD 3 */ | ||
410 | if (p->rdev->family >= CHIP_PALM) | ||
411 | return 0; | ||
412 | |||
413 | /* fall through */ | ||
414 | default: | ||
415 | DRM_ERROR("UVD codec not supported by hardware %d!\n", | ||
416 | stream_type); | ||
417 | return -EINVAL; | ||
418 | } | ||
419 | } | ||
420 | |||
399 | static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, | 421 | static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, |
400 | unsigned offset, unsigned buf_sizes[]) | 422 | unsigned offset, unsigned buf_sizes[]) |
401 | { | 423 | { |
@@ -436,50 +458,70 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, | |||
436 | return -EINVAL; | 458 | return -EINVAL; |
437 | } | 459 | } |
438 | 460 | ||
439 | if (msg_type == 1) { | 461 | switch (msg_type) { |
440 | /* it's a decode msg, calc buffer sizes */ | 462 | case 0: |
441 | r = radeon_uvd_cs_msg_decode(msg, buf_sizes); | 463 | /* it's a create msg, calc image size (width * height) */ |
442 | /* calc image size (width * height) */ | 464 | img_size = msg[7] * msg[8]; |
443 | img_size = msg[6] * msg[7]; | 465 | |
466 | r = radeon_uvd_validate_codec(p, msg[4]); | ||
444 | radeon_bo_kunmap(bo); | 467 | radeon_bo_kunmap(bo); |
445 | if (r) | 468 | if (r) |
446 | return r; | 469 | return r; |
447 | 470 | ||
448 | } else if (msg_type == 2) { | 471 | /* try to alloc a new handle */ |
472 | for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { | ||
473 | if (atomic_read(&p->rdev->uvd.handles[i]) == handle) { | ||
474 | DRM_ERROR("Handle 0x%x already in use!\n", handle); | ||
475 | return -EINVAL; | ||
476 | } | ||
477 | |||
478 | if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) { | ||
479 | p->rdev->uvd.filp[i] = p->filp; | ||
480 | p->rdev->uvd.img_size[i] = img_size; | ||
481 | return 0; | ||
482 | } | ||
483 | } | ||
484 | |||
485 | DRM_ERROR("No more free UVD handles!\n"); | ||
486 | return -EINVAL; | ||
487 | |||
488 | case 1: | ||
489 | /* it's a decode msg, validate codec and calc buffer sizes */ | ||
490 | r = radeon_uvd_validate_codec(p, msg[4]); | ||
491 | if (!r) | ||
492 | r = radeon_uvd_cs_msg_decode(msg, buf_sizes); | ||
493 | radeon_bo_kunmap(bo); | ||
494 | if (r) | ||
495 | return r; | ||
496 | |||
497 | /* validate the handle */ | ||
498 | for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { | ||
499 | if (atomic_read(&p->rdev->uvd.handles[i]) == handle) { | ||
500 | if (p->rdev->uvd.filp[i] != p->filp) { | ||
501 | DRM_ERROR("UVD handle collision detected!\n"); | ||
502 | return -EINVAL; | ||
503 | } | ||
504 | return 0; | ||
505 | } | ||
506 | } | ||
507 | |||
508 | DRM_ERROR("Invalid UVD handle 0x%x!\n", handle); | ||
509 | return -ENOENT; | ||
510 | |||
511 | case 2: | ||
449 | /* it's a destroy msg, free the handle */ | 512 | /* it's a destroy msg, free the handle */ |
450 | for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) | 513 | for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) |
451 | atomic_cmpxchg(&p->rdev->uvd.handles[i], handle, 0); | 514 | atomic_cmpxchg(&p->rdev->uvd.handles[i], handle, 0); |
452 | radeon_bo_kunmap(bo); | 515 | radeon_bo_kunmap(bo); |
453 | return 0; | 516 | return 0; |
454 | } else { | ||
455 | /* it's a create msg, calc image size (width * height) */ | ||
456 | img_size = msg[7] * msg[8]; | ||
457 | radeon_bo_kunmap(bo); | ||
458 | 517 | ||
459 | if (msg_type != 0) { | 518 | default: |
460 | DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type); | ||
461 | return -EINVAL; | ||
462 | } | ||
463 | |||
464 | /* it's a create msg, no special handling needed */ | ||
465 | } | ||
466 | |||
467 | /* create or decode, validate the handle */ | ||
468 | for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { | ||
469 | if (atomic_read(&p->rdev->uvd.handles[i]) == handle) | ||
470 | return 0; | ||
471 | } | ||
472 | 519 | ||
473 | /* handle not found try to alloc a new one */ | 520 | DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type); |
474 | for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { | 521 | return -EINVAL; |
475 | if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) { | ||
476 | p->rdev->uvd.filp[i] = p->filp; | ||
477 | p->rdev->uvd.img_size[i] = img_size; | ||
478 | return 0; | ||
479 | } | ||
480 | } | 522 | } |
481 | 523 | ||
482 | DRM_ERROR("No more free UVD handles!\n"); | 524 | BUG(); |
483 | return -EINVAL; | 525 | return -EINVAL; |
484 | } | 526 | } |
485 | 527 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c index 24f849f888bb..0de5711ac508 100644 --- a/drivers/gpu/drm/radeon/radeon_vce.c +++ b/drivers/gpu/drm/radeon/radeon_vce.c | |||
@@ -493,18 +493,27 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi, | |||
493 | * | 493 | * |
494 | * @p: parser context | 494 | * @p: parser context |
495 | * @handle: handle to validate | 495 | * @handle: handle to validate |
496 | * @allocated: allocated a new handle? | ||
496 | * | 497 | * |
497 | * Validates the handle and return the found session index or -EINVAL | 498 | * Validates the handle and return the found session index or -EINVAL |
498 | * we we don't have another free session index. | 499 | * we we don't have another free session index. |
499 | */ | 500 | */ |
500 | int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle) | 501 | static int radeon_vce_validate_handle(struct radeon_cs_parser *p, |
502 | uint32_t handle, bool *allocated) | ||
501 | { | 503 | { |
502 | unsigned i; | 504 | unsigned i; |
503 | 505 | ||
506 | *allocated = false; | ||
507 | |||
504 | /* validate the handle */ | 508 | /* validate the handle */ |
505 | for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) { | 509 | for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) { |
506 | if (atomic_read(&p->rdev->vce.handles[i]) == handle) | 510 | if (atomic_read(&p->rdev->vce.handles[i]) == handle) { |
511 | if (p->rdev->vce.filp[i] != p->filp) { | ||
512 | DRM_ERROR("VCE handle collision detected!\n"); | ||
513 | return -EINVAL; | ||
514 | } | ||
507 | return i; | 515 | return i; |
516 | } | ||
508 | } | 517 | } |
509 | 518 | ||
510 | /* handle not found try to alloc a new one */ | 519 | /* handle not found try to alloc a new one */ |
@@ -512,6 +521,7 @@ int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle) | |||
512 | if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) { | 521 | if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) { |
513 | p->rdev->vce.filp[i] = p->filp; | 522 | p->rdev->vce.filp[i] = p->filp; |
514 | p->rdev->vce.img_size[i] = 0; | 523 | p->rdev->vce.img_size[i] = 0; |
524 | *allocated = true; | ||
515 | return i; | 525 | return i; |
516 | } | 526 | } |
517 | } | 527 | } |
@@ -529,10 +539,10 @@ int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle) | |||
529 | int radeon_vce_cs_parse(struct radeon_cs_parser *p) | 539 | int radeon_vce_cs_parse(struct radeon_cs_parser *p) |
530 | { | 540 | { |
531 | int session_idx = -1; | 541 | int session_idx = -1; |
532 | bool destroyed = false; | 542 | bool destroyed = false, created = false, allocated = false; |
533 | uint32_t tmp, handle = 0; | 543 | uint32_t tmp, handle = 0; |
534 | uint32_t *size = &tmp; | 544 | uint32_t *size = &tmp; |
535 | int i, r; | 545 | int i, r = 0; |
536 | 546 | ||
537 | while (p->idx < p->chunk_ib->length_dw) { | 547 | while (p->idx < p->chunk_ib->length_dw) { |
538 | uint32_t len = radeon_get_ib_value(p, p->idx); | 548 | uint32_t len = radeon_get_ib_value(p, p->idx); |
@@ -540,18 +550,21 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p) | |||
540 | 550 | ||
541 | if ((len < 8) || (len & 3)) { | 551 | if ((len < 8) || (len & 3)) { |
542 | DRM_ERROR("invalid VCE command length (%d)!\n", len); | 552 | DRM_ERROR("invalid VCE command length (%d)!\n", len); |
543 | return -EINVAL; | 553 | r = -EINVAL; |
554 | goto out; | ||
544 | } | 555 | } |
545 | 556 | ||
546 | if (destroyed) { | 557 | if (destroyed) { |
547 | DRM_ERROR("No other command allowed after destroy!\n"); | 558 | DRM_ERROR("No other command allowed after destroy!\n"); |
548 | return -EINVAL; | 559 | r = -EINVAL; |
560 | goto out; | ||
549 | } | 561 | } |
550 | 562 | ||
551 | switch (cmd) { | 563 | switch (cmd) { |
552 | case 0x00000001: // session | 564 | case 0x00000001: // session |
553 | handle = radeon_get_ib_value(p, p->idx + 2); | 565 | handle = radeon_get_ib_value(p, p->idx + 2); |
554 | session_idx = radeon_vce_validate_handle(p, handle); | 566 | session_idx = radeon_vce_validate_handle(p, handle, |
567 | &allocated); | ||
555 | if (session_idx < 0) | 568 | if (session_idx < 0) |
556 | return session_idx; | 569 | return session_idx; |
557 | size = &p->rdev->vce.img_size[session_idx]; | 570 | size = &p->rdev->vce.img_size[session_idx]; |
@@ -561,6 +574,13 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p) | |||
561 | break; | 574 | break; |
562 | 575 | ||
563 | case 0x01000001: // create | 576 | case 0x01000001: // create |
577 | created = true; | ||
578 | if (!allocated) { | ||
579 | DRM_ERROR("Handle already in use!\n"); | ||
580 | r = -EINVAL; | ||
581 | goto out; | ||
582 | } | ||
583 | |||
564 | *size = radeon_get_ib_value(p, p->idx + 8) * | 584 | *size = radeon_get_ib_value(p, p->idx + 8) * |
565 | radeon_get_ib_value(p, p->idx + 10) * | 585 | radeon_get_ib_value(p, p->idx + 10) * |
566 | 8 * 3 / 2; | 586 | 8 * 3 / 2; |
@@ -578,12 +598,12 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p) | |||
578 | r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9, | 598 | r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9, |
579 | *size); | 599 | *size); |
580 | if (r) | 600 | if (r) |
581 | return r; | 601 | goto out; |
582 | 602 | ||
583 | r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11, | 603 | r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11, |
584 | *size / 3); | 604 | *size / 3); |
585 | if (r) | 605 | if (r) |
586 | return r; | 606 | goto out; |
587 | break; | 607 | break; |
588 | 608 | ||
589 | case 0x02000001: // destroy | 609 | case 0x02000001: // destroy |
@@ -594,7 +614,7 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p) | |||
594 | r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2, | 614 | r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2, |
595 | *size * 2); | 615 | *size * 2); |
596 | if (r) | 616 | if (r) |
597 | return r; | 617 | goto out; |
598 | break; | 618 | break; |
599 | 619 | ||
600 | case 0x05000004: // video bitstream buffer | 620 | case 0x05000004: // video bitstream buffer |
@@ -602,36 +622,47 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p) | |||
602 | r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2, | 622 | r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2, |
603 | tmp); | 623 | tmp); |
604 | if (r) | 624 | if (r) |
605 | return r; | 625 | goto out; |
606 | break; | 626 | break; |
607 | 627 | ||
608 | case 0x05000005: // feedback buffer | 628 | case 0x05000005: // feedback buffer |
609 | r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2, | 629 | r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2, |
610 | 4096); | 630 | 4096); |
611 | if (r) | 631 | if (r) |
612 | return r; | 632 | goto out; |
613 | break; | 633 | break; |
614 | 634 | ||
615 | default: | 635 | default: |
616 | DRM_ERROR("invalid VCE command (0x%x)!\n", cmd); | 636 | DRM_ERROR("invalid VCE command (0x%x)!\n", cmd); |
617 | return -EINVAL; | 637 | r = -EINVAL; |
638 | goto out; | ||
618 | } | 639 | } |
619 | 640 | ||
620 | if (session_idx == -1) { | 641 | if (session_idx == -1) { |
621 | DRM_ERROR("no session command at start of IB\n"); | 642 | DRM_ERROR("no session command at start of IB\n"); |
622 | return -EINVAL; | 643 | r = -EINVAL; |
644 | goto out; | ||
623 | } | 645 | } |
624 | 646 | ||
625 | p->idx += len / 4; | 647 | p->idx += len / 4; |
626 | } | 648 | } |
627 | 649 | ||
628 | if (destroyed) { | 650 | if (allocated && !created) { |
629 | /* IB contains a destroy msg, free the handle */ | 651 | DRM_ERROR("New session without create command!\n"); |
652 | r = -ENOENT; | ||
653 | } | ||
654 | |||
655 | out: | ||
656 | if ((!r && destroyed) || (r && allocated)) { | ||
657 | /* | ||
658 | * IB contains a destroy msg or we have allocated an | ||
659 | * handle and got an error, anyway free the handle | ||
660 | */ | ||
630 | for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) | 661 | for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) |
631 | atomic_cmpxchg(&p->rdev->vce.handles[i], handle, 0); | 662 | atomic_cmpxchg(&p->rdev->vce.handles[i], handle, 0); |
632 | } | 663 | } |
633 | 664 | ||
634 | return 0; | 665 | return r; |
635 | } | 666 | } |
636 | 667 | ||
637 | /** | 668 | /** |
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c index 2a5a4a9e772d..de42fc4a22b8 100644 --- a/drivers/gpu/drm/radeon/radeon_vm.c +++ b/drivers/gpu/drm/radeon/radeon_vm.c | |||
@@ -473,6 +473,23 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev, | |||
473 | } | 473 | } |
474 | 474 | ||
475 | mutex_lock(&vm->mutex); | 475 | mutex_lock(&vm->mutex); |
476 | soffset /= RADEON_GPU_PAGE_SIZE; | ||
477 | eoffset /= RADEON_GPU_PAGE_SIZE; | ||
478 | if (soffset || eoffset) { | ||
479 | struct interval_tree_node *it; | ||
480 | it = interval_tree_iter_first(&vm->va, soffset, eoffset - 1); | ||
481 | if (it && it != &bo_va->it) { | ||
482 | struct radeon_bo_va *tmp; | ||
483 | tmp = container_of(it, struct radeon_bo_va, it); | ||
484 | /* bo and tmp overlap, invalid offset */ | ||
485 | dev_err(rdev->dev, "bo %p va 0x%010Lx conflict with " | ||
486 | "(bo %p 0x%010lx 0x%010lx)\n", bo_va->bo, | ||
487 | soffset, tmp->bo, tmp->it.start, tmp->it.last); | ||
488 | mutex_unlock(&vm->mutex); | ||
489 | return -EINVAL; | ||
490 | } | ||
491 | } | ||
492 | |||
476 | if (bo_va->it.start || bo_va->it.last) { | 493 | if (bo_va->it.start || bo_va->it.last) { |
477 | if (bo_va->addr) { | 494 | if (bo_va->addr) { |
478 | /* add a clone of the bo_va to clear the old address */ | 495 | /* add a clone of the bo_va to clear the old address */ |
@@ -490,6 +507,8 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev, | |||
490 | spin_lock(&vm->status_lock); | 507 | spin_lock(&vm->status_lock); |
491 | list_add(&tmp->vm_status, &vm->freed); | 508 | list_add(&tmp->vm_status, &vm->freed); |
492 | spin_unlock(&vm->status_lock); | 509 | spin_unlock(&vm->status_lock); |
510 | |||
511 | bo_va->addr = 0; | ||
493 | } | 512 | } |
494 | 513 | ||
495 | interval_tree_remove(&bo_va->it, &vm->va); | 514 | interval_tree_remove(&bo_va->it, &vm->va); |
@@ -497,21 +516,7 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev, | |||
497 | bo_va->it.last = 0; | 516 | bo_va->it.last = 0; |
498 | } | 517 | } |
499 | 518 | ||
500 | soffset /= RADEON_GPU_PAGE_SIZE; | ||
501 | eoffset /= RADEON_GPU_PAGE_SIZE; | ||
502 | if (soffset || eoffset) { | 519 | if (soffset || eoffset) { |
503 | struct interval_tree_node *it; | ||
504 | it = interval_tree_iter_first(&vm->va, soffset, eoffset - 1); | ||
505 | if (it) { | ||
506 | struct radeon_bo_va *tmp; | ||
507 | tmp = container_of(it, struct radeon_bo_va, it); | ||
508 | /* bo and tmp overlap, invalid offset */ | ||
509 | dev_err(rdev->dev, "bo %p va 0x%010Lx conflict with " | ||
510 | "(bo %p 0x%010lx 0x%010lx)\n", bo_va->bo, | ||
511 | soffset, tmp->bo, tmp->it.start, tmp->it.last); | ||
512 | mutex_unlock(&vm->mutex); | ||
513 | return -EINVAL; | ||
514 | } | ||
515 | bo_va->it.start = soffset; | 520 | bo_va->it.start = soffset; |
516 | bo_va->it.last = eoffset - 1; | 521 | bo_va->it.last = eoffset - 1; |
517 | interval_tree_insert(&bo_va->it, &vm->va); | 522 | interval_tree_insert(&bo_va->it, &vm->va); |
@@ -1107,7 +1112,8 @@ void radeon_vm_bo_rmv(struct radeon_device *rdev, | |||
1107 | list_del(&bo_va->bo_list); | 1112 | list_del(&bo_va->bo_list); |
1108 | 1113 | ||
1109 | mutex_lock(&vm->mutex); | 1114 | mutex_lock(&vm->mutex); |
1110 | interval_tree_remove(&bo_va->it, &vm->va); | 1115 | if (bo_va->it.start || bo_va->it.last) |
1116 | interval_tree_remove(&bo_va->it, &vm->va); | ||
1111 | spin_lock(&vm->status_lock); | 1117 | spin_lock(&vm->status_lock); |
1112 | list_del(&bo_va->vm_status); | 1118 | list_del(&bo_va->vm_status); |
1113 | 1119 | ||
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h index 3cf1e2921545..9ef2064b1c9c 100644 --- a/drivers/gpu/drm/radeon/rv770d.h +++ b/drivers/gpu/drm/radeon/rv770d.h | |||
@@ -989,6 +989,9 @@ | |||
989 | ((n) & 0x3FFF) << 16) | 989 | ((n) & 0x3FFF) << 16) |
990 | 990 | ||
991 | /* UVD */ | 991 | /* UVD */ |
992 | #define UVD_SEMA_ADDR_LOW 0xef00 | ||
993 | #define UVD_SEMA_ADDR_HIGH 0xef04 | ||
994 | #define UVD_SEMA_CMD 0xef08 | ||
992 | #define UVD_GPCOM_VCPU_CMD 0xef0c | 995 | #define UVD_GPCOM_VCPU_CMD 0xef0c |
993 | #define UVD_GPCOM_VCPU_DATA0 0xef10 | 996 | #define UVD_GPCOM_VCPU_DATA0 0xef10 |
994 | #define UVD_GPCOM_VCPU_DATA1 0xef14 | 997 | #define UVD_GPCOM_VCPU_DATA1 0xef14 |
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index b35bccfeef79..ff8b83f5e929 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c | |||
@@ -2924,6 +2924,7 @@ struct si_dpm_quirk { | |||
2924 | static struct si_dpm_quirk si_dpm_quirk_list[] = { | 2924 | static struct si_dpm_quirk si_dpm_quirk_list[] = { |
2925 | /* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */ | 2925 | /* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */ |
2926 | { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 }, | 2926 | { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 }, |
2927 | { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 }, | ||
2927 | { 0, 0, 0, 0 }, | 2928 | { 0, 0, 0, 0 }, |
2928 | }; | 2929 | }; |
2929 | 2930 | ||
diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c index e72b3cb59358..c6b1cbca47fc 100644 --- a/drivers/gpu/drm/radeon/uvd_v1_0.c +++ b/drivers/gpu/drm/radeon/uvd_v1_0.c | |||
@@ -466,18 +466,8 @@ bool uvd_v1_0_semaphore_emit(struct radeon_device *rdev, | |||
466 | struct radeon_semaphore *semaphore, | 466 | struct radeon_semaphore *semaphore, |
467 | bool emit_wait) | 467 | bool emit_wait) |
468 | { | 468 | { |
469 | uint64_t addr = semaphore->gpu_addr; | 469 | /* disable semaphores for UVD V1 hardware */ |
470 | 470 | return false; | |
471 | radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0)); | ||
472 | radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF); | ||
473 | |||
474 | radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0)); | ||
475 | radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF); | ||
476 | |||
477 | radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0)); | ||
478 | radeon_ring_write(ring, emit_wait ? 1 : 0); | ||
479 | |||
480 | return true; | ||
481 | } | 471 | } |
482 | 472 | ||
483 | /** | 473 | /** |
diff --git a/drivers/gpu/drm/radeon/uvd_v2_2.c b/drivers/gpu/drm/radeon/uvd_v2_2.c index 89193519f8a1..7ed778cec7c6 100644 --- a/drivers/gpu/drm/radeon/uvd_v2_2.c +++ b/drivers/gpu/drm/radeon/uvd_v2_2.c | |||
@@ -60,6 +60,35 @@ void uvd_v2_2_fence_emit(struct radeon_device *rdev, | |||
60 | } | 60 | } |
61 | 61 | ||
62 | /** | 62 | /** |
63 | * uvd_v2_2_semaphore_emit - emit semaphore command | ||
64 | * | ||
65 | * @rdev: radeon_device pointer | ||
66 | * @ring: radeon_ring pointer | ||
67 | * @semaphore: semaphore to emit commands for | ||
68 | * @emit_wait: true if we should emit a wait command | ||
69 | * | ||
70 | * Emit a semaphore command (either wait or signal) to the UVD ring. | ||
71 | */ | ||
72 | bool uvd_v2_2_semaphore_emit(struct radeon_device *rdev, | ||
73 | struct radeon_ring *ring, | ||
74 | struct radeon_semaphore *semaphore, | ||
75 | bool emit_wait) | ||
76 | { | ||
77 | uint64_t addr = semaphore->gpu_addr; | ||
78 | |||
79 | radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0)); | ||
80 | radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF); | ||
81 | |||
82 | radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0)); | ||
83 | radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF); | ||
84 | |||
85 | radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0)); | ||
86 | radeon_ring_write(ring, emit_wait ? 1 : 0); | ||
87 | |||
88 | return true; | ||
89 | } | ||
90 | |||
91 | /** | ||
63 | * uvd_v2_2_resume - memory controller programming | 92 | * uvd_v2_2_resume - memory controller programming |
64 | * | 93 | * |
65 | * @rdev: radeon_device pointer | 94 | * @rdev: radeon_device pointer |
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index ccb0ce073ef2..4557f335a8a5 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c | |||
@@ -1409,7 +1409,7 @@ static int vop_bind(struct device *dev, struct device *master, void *data) | |||
1409 | struct vop *vop; | 1409 | struct vop *vop; |
1410 | struct resource *res; | 1410 | struct resource *res; |
1411 | size_t alloc_size; | 1411 | size_t alloc_size; |
1412 | int ret; | 1412 | int ret, irq; |
1413 | 1413 | ||
1414 | of_id = of_match_device(vop_driver_dt_match, dev); | 1414 | of_id = of_match_device(vop_driver_dt_match, dev); |
1415 | vop_data = of_id->data; | 1415 | vop_data = of_id->data; |
@@ -1445,11 +1445,12 @@ static int vop_bind(struct device *dev, struct device *master, void *data) | |||
1445 | return ret; | 1445 | return ret; |
1446 | } | 1446 | } |
1447 | 1447 | ||
1448 | vop->irq = platform_get_irq(pdev, 0); | 1448 | irq = platform_get_irq(pdev, 0); |
1449 | if (vop->irq < 0) { | 1449 | if (irq < 0) { |
1450 | dev_err(dev, "cannot find irq for vop\n"); | 1450 | dev_err(dev, "cannot find irq for vop\n"); |
1451 | return vop->irq; | 1451 | return irq; |
1452 | } | 1452 | } |
1453 | vop->irq = (unsigned int)irq; | ||
1453 | 1454 | ||
1454 | spin_lock_init(&vop->reg_lock); | 1455 | spin_lock_init(&vop->reg_lock); |
1455 | spin_lock_init(&vop->irq_lock); | 1456 | spin_lock_init(&vop->irq_lock); |
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index 1833abd7d3aa..bfad15a913a0 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c | |||
@@ -173,7 +173,6 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags) | |||
173 | drm->irq_enabled = true; | 173 | drm->irq_enabled = true; |
174 | 174 | ||
175 | /* syncpoints are used for full 32-bit hardware VBLANK counters */ | 175 | /* syncpoints are used for full 32-bit hardware VBLANK counters */ |
176 | drm->vblank_disable_immediate = true; | ||
177 | drm->max_vblank_count = 0xffffffff; | 176 | drm->max_vblank_count = 0xffffffff; |
178 | 177 | ||
179 | err = drm_vblank_init(drm, drm->mode_config.num_crtc); | 178 | err = drm_vblank_init(drm, drm->mode_config.num_crtc); |
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig index a04c49f2a011..39ea67f9b066 100644 --- a/drivers/ide/Kconfig +++ b/drivers/ide/Kconfig | |||
@@ -643,15 +643,6 @@ config BLK_DEV_TC86C001 | |||
643 | help | 643 | help |
644 | This driver adds support for Toshiba TC86C001 GOKU-S chip. | 644 | This driver adds support for Toshiba TC86C001 GOKU-S chip. |
645 | 645 | ||
646 | config BLK_DEV_CELLEB | ||
647 | tristate "Toshiba's Cell Reference Set IDE support" | ||
648 | depends on PPC_CELLEB | ||
649 | select BLK_DEV_IDEDMA_PCI | ||
650 | help | ||
651 | This driver provides support for the on-board IDE controller on | ||
652 | Toshiba Cell Reference Board. | ||
653 | If unsure, say Y. | ||
654 | |||
655 | endif | 646 | endif |
656 | 647 | ||
657 | # TODO: BLK_DEV_IDEDMA_PCI -> BLK_DEV_IDEDMA_SFF | 648 | # TODO: BLK_DEV_IDEDMA_PCI -> BLK_DEV_IDEDMA_SFF |
diff --git a/drivers/ide/Makefile b/drivers/ide/Makefile index a04ee82f1c8f..2a8c417d4081 100644 --- a/drivers/ide/Makefile +++ b/drivers/ide/Makefile | |||
@@ -38,7 +38,6 @@ obj-$(CONFIG_BLK_DEV_AEC62XX) += aec62xx.o | |||
38 | obj-$(CONFIG_BLK_DEV_ALI15X3) += alim15x3.o | 38 | obj-$(CONFIG_BLK_DEV_ALI15X3) += alim15x3.o |
39 | obj-$(CONFIG_BLK_DEV_AMD74XX) += amd74xx.o | 39 | obj-$(CONFIG_BLK_DEV_AMD74XX) += amd74xx.o |
40 | obj-$(CONFIG_BLK_DEV_ATIIXP) += atiixp.o | 40 | obj-$(CONFIG_BLK_DEV_ATIIXP) += atiixp.o |
41 | obj-$(CONFIG_BLK_DEV_CELLEB) += scc_pata.o | ||
42 | obj-$(CONFIG_BLK_DEV_CMD64X) += cmd64x.o | 41 | obj-$(CONFIG_BLK_DEV_CMD64X) += cmd64x.o |
43 | obj-$(CONFIG_BLK_DEV_CS5520) += cs5520.o | 42 | obj-$(CONFIG_BLK_DEV_CS5520) += cs5520.o |
44 | obj-$(CONFIG_BLK_DEV_CS5530) += cs5530.o | 43 | obj-$(CONFIG_BLK_DEV_CS5530) += cs5530.o |
diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c deleted file mode 100644 index 2a2d188b5d5b..000000000000 --- a/drivers/ide/scc_pata.c +++ /dev/null | |||
@@ -1,887 +0,0 @@ | |||
1 | /* | ||
2 | * Support for IDE interfaces on Celleb platform | ||
3 | * | ||
4 | * (C) Copyright 2006 TOSHIBA CORPORATION | ||
5 | * | ||
6 | * This code is based on drivers/ide/pci/siimage.c: | ||
7 | * Copyright (C) 2001-2002 Andre Hedrick <andre@linux-ide.org> | ||
8 | * Copyright (C) 2003 Red Hat | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License as published by | ||
12 | * the Free Software Foundation; either version 2 of the License, or | ||
13 | * (at your option) any later version. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, | ||
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
18 | * GNU General Public License for more details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public License along | ||
21 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
22 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
23 | */ | ||
24 | |||
25 | #include <linux/types.h> | ||
26 | #include <linux/module.h> | ||
27 | #include <linux/pci.h> | ||
28 | #include <linux/delay.h> | ||
29 | #include <linux/ide.h> | ||
30 | #include <linux/init.h> | ||
31 | |||
32 | #define PCI_DEVICE_ID_TOSHIBA_SCC_ATA 0x01b4 | ||
33 | |||
34 | #define SCC_PATA_NAME "scc IDE" | ||
35 | |||
36 | #define TDVHSEL_MASTER 0x00000001 | ||
37 | #define TDVHSEL_SLAVE 0x00000004 | ||
38 | |||
39 | #define MODE_JCUSFEN 0x00000080 | ||
40 | |||
41 | #define CCKCTRL_ATARESET 0x00040000 | ||
42 | #define CCKCTRL_BUFCNT 0x00020000 | ||
43 | #define CCKCTRL_CRST 0x00010000 | ||
44 | #define CCKCTRL_OCLKEN 0x00000100 | ||
45 | #define CCKCTRL_ATACLKOEN 0x00000002 | ||
46 | #define CCKCTRL_LCLKEN 0x00000001 | ||
47 | |||
48 | #define QCHCD_IOS_SS 0x00000001 | ||
49 | |||
50 | #define QCHSD_STPDIAG 0x00020000 | ||
51 | |||
52 | #define INTMASK_MSK 0xD1000012 | ||
53 | #define INTSTS_SERROR 0x80000000 | ||
54 | #define INTSTS_PRERR 0x40000000 | ||
55 | #define INTSTS_RERR 0x10000000 | ||
56 | #define INTSTS_ICERR 0x01000000 | ||
57 | #define INTSTS_BMSINT 0x00000010 | ||
58 | #define INTSTS_BMHE 0x00000008 | ||
59 | #define INTSTS_IOIRQS 0x00000004 | ||
60 | #define INTSTS_INTRQ 0x00000002 | ||
61 | #define INTSTS_ACTEINT 0x00000001 | ||
62 | |||
63 | #define ECMODE_VALUE 0x01 | ||
64 | |||
65 | static struct scc_ports { | ||
66 | unsigned long ctl, dma; | ||
67 | struct ide_host *host; /* for removing port from system */ | ||
68 | } scc_ports[MAX_HWIFS]; | ||
69 | |||
70 | /* PIO transfer mode table */ | ||
71 | /* JCHST */ | ||
72 | static unsigned long JCHSTtbl[2][7] = { | ||
73 | {0x0E, 0x05, 0x02, 0x03, 0x02, 0x00, 0x00}, /* 100MHz */ | ||
74 | {0x13, 0x07, 0x04, 0x04, 0x03, 0x00, 0x00} /* 133MHz */ | ||
75 | }; | ||
76 | |||
77 | /* JCHHT */ | ||
78 | static unsigned long JCHHTtbl[2][7] = { | ||
79 | {0x0E, 0x02, 0x02, 0x02, 0x02, 0x00, 0x00}, /* 100MHz */ | ||
80 | {0x13, 0x03, 0x03, 0x03, 0x03, 0x00, 0x00} /* 133MHz */ | ||
81 | }; | ||
82 | |||
83 | /* JCHCT */ | ||
84 | static unsigned long JCHCTtbl[2][7] = { | ||
85 | {0x1D, 0x1D, 0x1C, 0x0B, 0x06, 0x00, 0x00}, /* 100MHz */ | ||
86 | {0x27, 0x26, 0x26, 0x0E, 0x09, 0x00, 0x00} /* 133MHz */ | ||
87 | }; | ||
88 | |||
89 | |||
90 | /* DMA transfer mode table */ | ||
91 | /* JCHDCTM/JCHDCTS */ | ||
92 | static unsigned long JCHDCTxtbl[2][7] = { | ||
93 | {0x0A, 0x06, 0x04, 0x03, 0x01, 0x00, 0x00}, /* 100MHz */ | ||
94 | {0x0E, 0x09, 0x06, 0x04, 0x02, 0x01, 0x00} /* 133MHz */ | ||
95 | }; | ||
96 | |||
97 | /* JCSTWTM/JCSTWTS */ | ||
98 | static unsigned long JCSTWTxtbl[2][7] = { | ||
99 | {0x06, 0x04, 0x03, 0x02, 0x02, 0x02, 0x00}, /* 100MHz */ | ||
100 | {0x09, 0x06, 0x04, 0x02, 0x02, 0x02, 0x02} /* 133MHz */ | ||
101 | }; | ||
102 | |||
103 | /* JCTSS */ | ||
104 | static unsigned long JCTSStbl[2][7] = { | ||
105 | {0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x00}, /* 100MHz */ | ||
106 | {0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05} /* 133MHz */ | ||
107 | }; | ||
108 | |||
109 | /* JCENVT */ | ||
110 | static unsigned long JCENVTtbl[2][7] = { | ||
111 | {0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00}, /* 100MHz */ | ||
112 | {0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02} /* 133MHz */ | ||
113 | }; | ||
114 | |||
115 | /* JCACTSELS/JCACTSELM */ | ||
116 | static unsigned long JCACTSELtbl[2][7] = { | ||
117 | {0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00}, /* 100MHz */ | ||
118 | {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01} /* 133MHz */ | ||
119 | }; | ||
120 | |||
121 | |||
122 | static u8 scc_ide_inb(unsigned long port) | ||
123 | { | ||
124 | u32 data = in_be32((void*)port); | ||
125 | return (u8)data; | ||
126 | } | ||
127 | |||
128 | static void scc_exec_command(ide_hwif_t *hwif, u8 cmd) | ||
129 | { | ||
130 | out_be32((void *)hwif->io_ports.command_addr, cmd); | ||
131 | eieio(); | ||
132 | in_be32((void *)(hwif->dma_base + 0x01c)); | ||
133 | eieio(); | ||
134 | } | ||
135 | |||
136 | static u8 scc_read_status(ide_hwif_t *hwif) | ||
137 | { | ||
138 | return (u8)in_be32((void *)hwif->io_ports.status_addr); | ||
139 | } | ||
140 | |||
141 | static u8 scc_read_altstatus(ide_hwif_t *hwif) | ||
142 | { | ||
143 | return (u8)in_be32((void *)hwif->io_ports.ctl_addr); | ||
144 | } | ||
145 | |||
146 | static u8 scc_dma_sff_read_status(ide_hwif_t *hwif) | ||
147 | { | ||
148 | return (u8)in_be32((void *)(hwif->dma_base + 4)); | ||
149 | } | ||
150 | |||
151 | static void scc_write_devctl(ide_hwif_t *hwif, u8 ctl) | ||
152 | { | ||
153 | out_be32((void *)hwif->io_ports.ctl_addr, ctl); | ||
154 | eieio(); | ||
155 | in_be32((void *)(hwif->dma_base + 0x01c)); | ||
156 | eieio(); | ||
157 | } | ||
158 | |||
159 | static void scc_ide_insw(unsigned long port, void *addr, u32 count) | ||
160 | { | ||
161 | u16 *ptr = (u16 *)addr; | ||
162 | while (count--) { | ||
163 | *ptr++ = le16_to_cpu(in_be32((void*)port)); | ||
164 | } | ||
165 | } | ||
166 | |||
167 | static void scc_ide_insl(unsigned long port, void *addr, u32 count) | ||
168 | { | ||
169 | u16 *ptr = (u16 *)addr; | ||
170 | while (count--) { | ||
171 | *ptr++ = le16_to_cpu(in_be32((void*)port)); | ||
172 | *ptr++ = le16_to_cpu(in_be32((void*)port)); | ||
173 | } | ||
174 | } | ||
175 | |||
176 | static void scc_ide_outb(u8 addr, unsigned long port) | ||
177 | { | ||
178 | out_be32((void*)port, addr); | ||
179 | } | ||
180 | |||
181 | static void | ||
182 | scc_ide_outsw(unsigned long port, void *addr, u32 count) | ||
183 | { | ||
184 | u16 *ptr = (u16 *)addr; | ||
185 | while (count--) { | ||
186 | out_be32((void*)port, cpu_to_le16(*ptr++)); | ||
187 | } | ||
188 | } | ||
189 | |||
190 | static void | ||
191 | scc_ide_outsl(unsigned long port, void *addr, u32 count) | ||
192 | { | ||
193 | u16 *ptr = (u16 *)addr; | ||
194 | while (count--) { | ||
195 | out_be32((void*)port, cpu_to_le16(*ptr++)); | ||
196 | out_be32((void*)port, cpu_to_le16(*ptr++)); | ||
197 | } | ||
198 | } | ||
199 | |||
200 | /** | ||
201 | * scc_set_pio_mode - set host controller for PIO mode | ||
202 | * @hwif: port | ||
203 | * @drive: drive | ||
204 | * | ||
205 | * Load the timing settings for this device mode into the | ||
206 | * controller. | ||
207 | */ | ||
208 | |||
209 | static void scc_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) | ||
210 | { | ||
211 | struct scc_ports *ports = ide_get_hwifdata(hwif); | ||
212 | unsigned long ctl_base = ports->ctl; | ||
213 | unsigned long cckctrl_port = ctl_base + 0xff0; | ||
214 | unsigned long piosht_port = ctl_base + 0x000; | ||
215 | unsigned long pioct_port = ctl_base + 0x004; | ||
216 | unsigned long reg; | ||
217 | int offset; | ||
218 | const u8 pio = drive->pio_mode - XFER_PIO_0; | ||
219 | |||
220 | reg = in_be32((void __iomem *)cckctrl_port); | ||
221 | if (reg & CCKCTRL_ATACLKOEN) { | ||
222 | offset = 1; /* 133MHz */ | ||
223 | } else { | ||
224 | offset = 0; /* 100MHz */ | ||
225 | } | ||
226 | reg = JCHSTtbl[offset][pio] << 16 | JCHHTtbl[offset][pio]; | ||
227 | out_be32((void __iomem *)piosht_port, reg); | ||
228 | reg = JCHCTtbl[offset][pio]; | ||
229 | out_be32((void __iomem *)pioct_port, reg); | ||
230 | } | ||
231 | |||
232 | /** | ||
233 | * scc_set_dma_mode - set host controller for DMA mode | ||
234 | * @hwif: port | ||
235 | * @drive: drive | ||
236 | * | ||
237 | * Load the timing settings for this device mode into the | ||
238 | * controller. | ||
239 | */ | ||
240 | |||
241 | static void scc_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive) | ||
242 | { | ||
243 | struct scc_ports *ports = ide_get_hwifdata(hwif); | ||
244 | unsigned long ctl_base = ports->ctl; | ||
245 | unsigned long cckctrl_port = ctl_base + 0xff0; | ||
246 | unsigned long mdmact_port = ctl_base + 0x008; | ||
247 | unsigned long mcrcst_port = ctl_base + 0x00c; | ||
248 | unsigned long sdmact_port = ctl_base + 0x010; | ||
249 | unsigned long scrcst_port = ctl_base + 0x014; | ||
250 | unsigned long udenvt_port = ctl_base + 0x018; | ||
251 | unsigned long tdvhsel_port = ctl_base + 0x020; | ||
252 | int is_slave = drive->dn & 1; | ||
253 | int offset, idx; | ||
254 | unsigned long reg; | ||
255 | unsigned long jcactsel; | ||
256 | const u8 speed = drive->dma_mode; | ||
257 | |||
258 | reg = in_be32((void __iomem *)cckctrl_port); | ||
259 | if (reg & CCKCTRL_ATACLKOEN) { | ||
260 | offset = 1; /* 133MHz */ | ||
261 | } else { | ||
262 | offset = 0; /* 100MHz */ | ||
263 | } | ||
264 | |||
265 | idx = speed - XFER_UDMA_0; | ||
266 | |||
267 | jcactsel = JCACTSELtbl[offset][idx]; | ||
268 | if (is_slave) { | ||
269 | out_be32((void __iomem *)sdmact_port, JCHDCTxtbl[offset][idx]); | ||
270 | out_be32((void __iomem *)scrcst_port, JCSTWTxtbl[offset][idx]); | ||
271 | jcactsel = jcactsel << 2; | ||
272 | out_be32((void __iomem *)tdvhsel_port, (in_be32((void __iomem *)tdvhsel_port) & ~TDVHSEL_SLAVE) | jcactsel); | ||
273 | } else { | ||
274 | out_be32((void __iomem *)mdmact_port, JCHDCTxtbl[offset][idx]); | ||
275 | out_be32((void __iomem *)mcrcst_port, JCSTWTxtbl[offset][idx]); | ||
276 | out_be32((void __iomem *)tdvhsel_port, (in_be32((void __iomem *)tdvhsel_port) & ~TDVHSEL_MASTER) | jcactsel); | ||
277 | } | ||
278 | reg = JCTSStbl[offset][idx] << 16 | JCENVTtbl[offset][idx]; | ||
279 | out_be32((void __iomem *)udenvt_port, reg); | ||
280 | } | ||
281 | |||
282 | static void scc_dma_host_set(ide_drive_t *drive, int on) | ||
283 | { | ||
284 | ide_hwif_t *hwif = drive->hwif; | ||
285 | u8 unit = drive->dn & 1; | ||
286 | u8 dma_stat = scc_dma_sff_read_status(hwif); | ||
287 | |||
288 | if (on) | ||
289 | dma_stat |= (1 << (5 + unit)); | ||
290 | else | ||
291 | dma_stat &= ~(1 << (5 + unit)); | ||
292 | |||
293 | scc_ide_outb(dma_stat, hwif->dma_base + 4); | ||
294 | } | ||
295 | |||
296 | /** | ||
297 | * scc_dma_setup - begin a DMA phase | ||
298 | * @drive: target device | ||
299 | * @cmd: command | ||
300 | * | ||
301 | * Build an IDE DMA PRD (IDE speak for scatter gather table) | ||
302 | * and then set up the DMA transfer registers. | ||
303 | * | ||
304 | * Returns 0 on success. If a PIO fallback is required then 1 | ||
305 | * is returned. | ||
306 | */ | ||
307 | |||
308 | static int scc_dma_setup(ide_drive_t *drive, struct ide_cmd *cmd) | ||
309 | { | ||
310 | ide_hwif_t *hwif = drive->hwif; | ||
311 | u32 rw = (cmd->tf_flags & IDE_TFLAG_WRITE) ? 0 : ATA_DMA_WR; | ||
312 | u8 dma_stat; | ||
313 | |||
314 | /* fall back to pio! */ | ||
315 | if (ide_build_dmatable(drive, cmd) == 0) | ||
316 | return 1; | ||
317 | |||
318 | /* PRD table */ | ||
319 | out_be32((void __iomem *)(hwif->dma_base + 8), hwif->dmatable_dma); | ||
320 | |||
321 | /* specify r/w */ | ||
322 | out_be32((void __iomem *)hwif->dma_base, rw); | ||
323 | |||
324 | /* read DMA status for INTR & ERROR flags */ | ||
325 | dma_stat = scc_dma_sff_read_status(hwif); | ||
326 | |||
327 | /* clear INTR & ERROR flags */ | ||
328 | out_be32((void __iomem *)(hwif->dma_base + 4), dma_stat | 6); | ||
329 | |||
330 | return 0; | ||
331 | } | ||
332 | |||
333 | static void scc_dma_start(ide_drive_t *drive) | ||
334 | { | ||
335 | ide_hwif_t *hwif = drive->hwif; | ||
336 | u8 dma_cmd = scc_ide_inb(hwif->dma_base); | ||
337 | |||
338 | /* start DMA */ | ||
339 | scc_ide_outb(dma_cmd | 1, hwif->dma_base); | ||
340 | } | ||
341 | |||
342 | static int __scc_dma_end(ide_drive_t *drive) | ||
343 | { | ||
344 | ide_hwif_t *hwif = drive->hwif; | ||
345 | u8 dma_stat, dma_cmd; | ||
346 | |||
347 | /* get DMA command mode */ | ||
348 | dma_cmd = scc_ide_inb(hwif->dma_base); | ||
349 | /* stop DMA */ | ||
350 | scc_ide_outb(dma_cmd & ~1, hwif->dma_base); | ||
351 | /* get DMA status */ | ||
352 | dma_stat = scc_dma_sff_read_status(hwif); | ||
353 | /* clear the INTR & ERROR bits */ | ||
354 | scc_ide_outb(dma_stat | 6, hwif->dma_base + 4); | ||
355 | /* verify good DMA status */ | ||
356 | return (dma_stat & 7) != 4 ? (0x10 | dma_stat) : 0; | ||
357 | } | ||
358 | |||
359 | /** | ||
360 | * scc_dma_end - Stop DMA | ||
361 | * @drive: IDE drive | ||
362 | * | ||
363 | * Check and clear INT Status register. | ||
364 | * Then call __scc_dma_end(). | ||
365 | */ | ||
366 | |||
367 | static int scc_dma_end(ide_drive_t *drive) | ||
368 | { | ||
369 | ide_hwif_t *hwif = drive->hwif; | ||
370 | void __iomem *dma_base = (void __iomem *)hwif->dma_base; | ||
371 | unsigned long intsts_port = hwif->dma_base + 0x014; | ||
372 | u32 reg; | ||
373 | int dma_stat, data_loss = 0; | ||
374 | static int retry = 0; | ||
375 | |||
376 | /* errata A308 workaround: Step5 (check data loss) */ | ||
377 | /* We don't check non ide_disk because it is limited to UDMA4 */ | ||
378 | if (!(in_be32((void __iomem *)hwif->io_ports.ctl_addr) | ||
379 | & ATA_ERR) && | ||
380 | drive->media == ide_disk && drive->current_speed > XFER_UDMA_4) { | ||
381 | reg = in_be32((void __iomem *)intsts_port); | ||
382 | if (!(reg & INTSTS_ACTEINT)) { | ||
383 | printk(KERN_WARNING "%s: operation failed (transfer data loss)\n", | ||
384 | drive->name); | ||
385 | data_loss = 1; | ||
386 | if (retry++) { | ||
387 | struct request *rq = hwif->rq; | ||
388 | ide_drive_t *drive; | ||
389 | int i; | ||
390 | |||
391 | /* ERROR_RESET and drive->crc_count are needed | ||
392 | * to reduce DMA transfer mode in retry process. | ||
393 | */ | ||
394 | if (rq) | ||
395 | rq->errors |= ERROR_RESET; | ||
396 | |||
397 | ide_port_for_each_dev(i, drive, hwif) | ||
398 | drive->crc_count++; | ||
399 | } | ||
400 | } | ||
401 | } | ||
402 | |||
403 | while (1) { | ||
404 | reg = in_be32((void __iomem *)intsts_port); | ||
405 | |||
406 | if (reg & INTSTS_SERROR) { | ||
407 | printk(KERN_WARNING "%s: SERROR\n", SCC_PATA_NAME); | ||
408 | out_be32((void __iomem *)intsts_port, INTSTS_SERROR|INTSTS_BMSINT); | ||
409 | |||
410 | out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS); | ||
411 | continue; | ||
412 | } | ||
413 | |||
414 | if (reg & INTSTS_PRERR) { | ||
415 | u32 maea0, maec0; | ||
416 | unsigned long ctl_base = hwif->config_data; | ||
417 | |||
418 | maea0 = in_be32((void __iomem *)(ctl_base + 0xF50)); | ||
419 | maec0 = in_be32((void __iomem *)(ctl_base + 0xF54)); | ||
420 | |||
421 | printk(KERN_WARNING "%s: PRERR [addr:%x cmd:%x]\n", SCC_PATA_NAME, maea0, maec0); | ||
422 | |||
423 | out_be32((void __iomem *)intsts_port, INTSTS_PRERR|INTSTS_BMSINT); | ||
424 | |||
425 | out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS); | ||
426 | continue; | ||
427 | } | ||
428 | |||
429 | if (reg & INTSTS_RERR) { | ||
430 | printk(KERN_WARNING "%s: Response Error\n", SCC_PATA_NAME); | ||
431 | out_be32((void __iomem *)intsts_port, INTSTS_RERR|INTSTS_BMSINT); | ||
432 | |||
433 | out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS); | ||
434 | continue; | ||
435 | } | ||
436 | |||
437 | if (reg & INTSTS_ICERR) { | ||
438 | out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS); | ||
439 | |||
440 | printk(KERN_WARNING "%s: Illegal Configuration\n", SCC_PATA_NAME); | ||
441 | out_be32((void __iomem *)intsts_port, INTSTS_ICERR|INTSTS_BMSINT); | ||
442 | continue; | ||
443 | } | ||
444 | |||
445 | if (reg & INTSTS_BMSINT) { | ||
446 | printk(KERN_WARNING "%s: Internal Bus Error\n", SCC_PATA_NAME); | ||
447 | out_be32((void __iomem *)intsts_port, INTSTS_BMSINT); | ||
448 | |||
449 | ide_do_reset(drive); | ||
450 | continue; | ||
451 | } | ||
452 | |||
453 | if (reg & INTSTS_BMHE) { | ||
454 | out_be32((void __iomem *)intsts_port, INTSTS_BMHE); | ||
455 | continue; | ||
456 | } | ||
457 | |||
458 | if (reg & INTSTS_ACTEINT) { | ||
459 | out_be32((void __iomem *)intsts_port, INTSTS_ACTEINT); | ||
460 | continue; | ||
461 | } | ||
462 | |||
463 | if (reg & INTSTS_IOIRQS) { | ||
464 | out_be32((void __iomem *)intsts_port, INTSTS_IOIRQS); | ||
465 | continue; | ||
466 | } | ||
467 | break; | ||
468 | } | ||
469 | |||
470 | dma_stat = __scc_dma_end(drive); | ||
471 | if (data_loss) | ||
472 | dma_stat |= 2; /* emulate DMA error (to retry command) */ | ||
473 | return dma_stat; | ||
474 | } | ||
475 | |||
476 | /* returns 1 if dma irq issued, 0 otherwise */ | ||
477 | static int scc_dma_test_irq(ide_drive_t *drive) | ||
478 | { | ||
479 | ide_hwif_t *hwif = drive->hwif; | ||
480 | u32 int_stat = in_be32((void __iomem *)hwif->dma_base + 0x014); | ||
481 | |||
482 | /* SCC errata A252,A308 workaround: Step4 */ | ||
483 | if ((in_be32((void __iomem *)hwif->io_ports.ctl_addr) | ||
484 | & ATA_ERR) && | ||
485 | (int_stat & INTSTS_INTRQ)) | ||
486 | return 1; | ||
487 | |||
488 | /* SCC errata A308 workaround: Step5 (polling IOIRQS) */ | ||
489 | if (int_stat & INTSTS_IOIRQS) | ||
490 | return 1; | ||
491 | |||
492 | return 0; | ||
493 | } | ||
494 | |||
495 | static u8 scc_udma_filter(ide_drive_t *drive) | ||
496 | { | ||
497 | ide_hwif_t *hwif = drive->hwif; | ||
498 | u8 mask = hwif->ultra_mask; | ||
499 | |||
500 | /* errata A308 workaround: limit non ide_disk drive to UDMA4 */ | ||
501 | if ((drive->media != ide_disk) && (mask & 0xE0)) { | ||
502 | printk(KERN_INFO "%s: limit %s to UDMA4\n", | ||
503 | SCC_PATA_NAME, drive->name); | ||
504 | mask = ATA_UDMA4; | ||
505 | } | ||
506 | |||
507 | return mask; | ||
508 | } | ||
509 | |||
510 | /** | ||
511 | * setup_mmio_scc - map CTRL/BMID region | ||
512 | * @dev: PCI device we are configuring | ||
513 | * @name: device name | ||
514 | * | ||
515 | */ | ||
516 | |||
517 | static int setup_mmio_scc (struct pci_dev *dev, const char *name) | ||
518 | { | ||
519 | void __iomem *ctl_addr; | ||
520 | void __iomem *dma_addr; | ||
521 | int i, ret; | ||
522 | |||
523 | for (i = 0; i < MAX_HWIFS; i++) { | ||
524 | if (scc_ports[i].ctl == 0) | ||
525 | break; | ||
526 | } | ||
527 | if (i >= MAX_HWIFS) | ||
528 | return -ENOMEM; | ||
529 | |||
530 | ret = pci_request_selected_regions(dev, (1 << 2) - 1, name); | ||
531 | if (ret < 0) { | ||
532 | printk(KERN_ERR "%s: can't reserve resources\n", name); | ||
533 | return ret; | ||
534 | } | ||
535 | |||
536 | ctl_addr = pci_ioremap_bar(dev, 0); | ||
537 | if (!ctl_addr) | ||
538 | goto fail_0; | ||
539 | |||
540 | dma_addr = pci_ioremap_bar(dev, 1); | ||
541 | if (!dma_addr) | ||
542 | goto fail_1; | ||
543 | |||
544 | pci_set_master(dev); | ||
545 | scc_ports[i].ctl = (unsigned long)ctl_addr; | ||
546 | scc_ports[i].dma = (unsigned long)dma_addr; | ||
547 | pci_set_drvdata(dev, (void *) &scc_ports[i]); | ||
548 | |||
549 | return 1; | ||
550 | |||
551 | fail_1: | ||
552 | iounmap(ctl_addr); | ||
553 | fail_0: | ||
554 | return -ENOMEM; | ||
555 | } | ||
556 | |||
557 | static int scc_ide_setup_pci_device(struct pci_dev *dev, | ||
558 | const struct ide_port_info *d) | ||
559 | { | ||
560 | struct scc_ports *ports = pci_get_drvdata(dev); | ||
561 | struct ide_host *host; | ||
562 | struct ide_hw hw, *hws[] = { &hw }; | ||
563 | int i, rc; | ||
564 | |||
565 | memset(&hw, 0, sizeof(hw)); | ||
566 | for (i = 0; i <= 8; i++) | ||
567 | hw.io_ports_array[i] = ports->dma + 0x20 + i * 4; | ||
568 | hw.irq = dev->irq; | ||
569 | hw.dev = &dev->dev; | ||
570 | |||
571 | rc = ide_host_add(d, hws, 1, &host); | ||
572 | if (rc) | ||
573 | return rc; | ||
574 | |||
575 | ports->host = host; | ||
576 | |||
577 | return 0; | ||
578 | } | ||
579 | |||
580 | /** | ||
581 | * init_setup_scc - set up an SCC PATA Controller | ||
582 | * @dev: PCI device | ||
583 | * @d: IDE port info | ||
584 | * | ||
585 | * Perform the initial set up for this device. | ||
586 | */ | ||
587 | |||
588 | static int init_setup_scc(struct pci_dev *dev, const struct ide_port_info *d) | ||
589 | { | ||
590 | unsigned long ctl_base; | ||
591 | unsigned long dma_base; | ||
592 | unsigned long cckctrl_port; | ||
593 | unsigned long intmask_port; | ||
594 | unsigned long mode_port; | ||
595 | unsigned long ecmode_port; | ||
596 | u32 reg = 0; | ||
597 | struct scc_ports *ports; | ||
598 | int rc; | ||
599 | |||
600 | rc = pci_enable_device(dev); | ||
601 | if (rc) | ||
602 | goto end; | ||
603 | |||
604 | rc = setup_mmio_scc(dev, d->name); | ||
605 | if (rc < 0) | ||
606 | goto end; | ||
607 | |||
608 | ports = pci_get_drvdata(dev); | ||
609 | ctl_base = ports->ctl; | ||
610 | dma_base = ports->dma; | ||
611 | cckctrl_port = ctl_base + 0xff0; | ||
612 | intmask_port = dma_base + 0x010; | ||
613 | mode_port = ctl_base + 0x024; | ||
614 | ecmode_port = ctl_base + 0xf00; | ||
615 | |||
616 | /* controller initialization */ | ||
617 | reg = 0; | ||
618 | out_be32((void*)cckctrl_port, reg); | ||
619 | reg |= CCKCTRL_ATACLKOEN; | ||
620 | out_be32((void*)cckctrl_port, reg); | ||
621 | reg |= CCKCTRL_LCLKEN | CCKCTRL_OCLKEN; | ||
622 | out_be32((void*)cckctrl_port, reg); | ||
623 | reg |= CCKCTRL_CRST; | ||
624 | out_be32((void*)cckctrl_port, reg); | ||
625 | |||
626 | for (;;) { | ||
627 | reg = in_be32((void*)cckctrl_port); | ||
628 | if (reg & CCKCTRL_CRST) | ||
629 | break; | ||
630 | udelay(5000); | ||
631 | } | ||
632 | |||
633 | reg |= CCKCTRL_ATARESET; | ||
634 | out_be32((void*)cckctrl_port, reg); | ||
635 | |||
636 | out_be32((void*)ecmode_port, ECMODE_VALUE); | ||
637 | out_be32((void*)mode_port, MODE_JCUSFEN); | ||
638 | out_be32((void*)intmask_port, INTMASK_MSK); | ||
639 | |||
640 | rc = scc_ide_setup_pci_device(dev, d); | ||
641 | |||
642 | end: | ||
643 | return rc; | ||
644 | } | ||
645 | |||
646 | static void scc_tf_load(ide_drive_t *drive, struct ide_taskfile *tf, u8 valid) | ||
647 | { | ||
648 | struct ide_io_ports *io_ports = &drive->hwif->io_ports; | ||
649 | |||
650 | if (valid & IDE_VALID_FEATURE) | ||
651 | scc_ide_outb(tf->feature, io_ports->feature_addr); | ||
652 | if (valid & IDE_VALID_NSECT) | ||
653 | scc_ide_outb(tf->nsect, io_ports->nsect_addr); | ||
654 | if (valid & IDE_VALID_LBAL) | ||
655 | scc_ide_outb(tf->lbal, io_ports->lbal_addr); | ||
656 | if (valid & IDE_VALID_LBAM) | ||
657 | scc_ide_outb(tf->lbam, io_ports->lbam_addr); | ||
658 | if (valid & IDE_VALID_LBAH) | ||
659 | scc_ide_outb(tf->lbah, io_ports->lbah_addr); | ||
660 | if (valid & IDE_VALID_DEVICE) | ||
661 | scc_ide_outb(tf->device, io_ports->device_addr); | ||
662 | } | ||
663 | |||
664 | static void scc_tf_read(ide_drive_t *drive, struct ide_taskfile *tf, u8 valid) | ||
665 | { | ||
666 | struct ide_io_ports *io_ports = &drive->hwif->io_ports; | ||
667 | |||
668 | if (valid & IDE_VALID_ERROR) | ||
669 | tf->error = scc_ide_inb(io_ports->feature_addr); | ||
670 | if (valid & IDE_VALID_NSECT) | ||
671 | tf->nsect = scc_ide_inb(io_ports->nsect_addr); | ||
672 | if (valid & IDE_VALID_LBAL) | ||
673 | tf->lbal = scc_ide_inb(io_ports->lbal_addr); | ||
674 | if (valid & IDE_VALID_LBAM) | ||
675 | tf->lbam = scc_ide_inb(io_ports->lbam_addr); | ||
676 | if (valid & IDE_VALID_LBAH) | ||
677 | tf->lbah = scc_ide_inb(io_ports->lbah_addr); | ||
678 | if (valid & IDE_VALID_DEVICE) | ||
679 | tf->device = scc_ide_inb(io_ports->device_addr); | ||
680 | } | ||
681 | |||
682 | static void scc_input_data(ide_drive_t *drive, struct ide_cmd *cmd, | ||
683 | void *buf, unsigned int len) | ||
684 | { | ||
685 | unsigned long data_addr = drive->hwif->io_ports.data_addr; | ||
686 | |||
687 | len++; | ||
688 | |||
689 | if (drive->io_32bit) { | ||
690 | scc_ide_insl(data_addr, buf, len / 4); | ||
691 | |||
692 | if ((len & 3) >= 2) | ||
693 | scc_ide_insw(data_addr, (u8 *)buf + (len & ~3), 1); | ||
694 | } else | ||
695 | scc_ide_insw(data_addr, buf, len / 2); | ||
696 | } | ||
697 | |||
698 | static void scc_output_data(ide_drive_t *drive, struct ide_cmd *cmd, | ||
699 | void *buf, unsigned int len) | ||
700 | { | ||
701 | unsigned long data_addr = drive->hwif->io_ports.data_addr; | ||
702 | |||
703 | len++; | ||
704 | |||
705 | if (drive->io_32bit) { | ||
706 | scc_ide_outsl(data_addr, buf, len / 4); | ||
707 | |||
708 | if ((len & 3) >= 2) | ||
709 | scc_ide_outsw(data_addr, (u8 *)buf + (len & ~3), 1); | ||
710 | } else | ||
711 | scc_ide_outsw(data_addr, buf, len / 2); | ||
712 | } | ||
713 | |||
714 | /** | ||
715 | * init_mmio_iops_scc - set up the iops for MMIO | ||
716 | * @hwif: interface to set up | ||
717 | * | ||
718 | */ | ||
719 | |||
720 | static void init_mmio_iops_scc(ide_hwif_t *hwif) | ||
721 | { | ||
722 | struct pci_dev *dev = to_pci_dev(hwif->dev); | ||
723 | struct scc_ports *ports = pci_get_drvdata(dev); | ||
724 | unsigned long dma_base = ports->dma; | ||
725 | |||
726 | ide_set_hwifdata(hwif, ports); | ||
727 | |||
728 | hwif->dma_base = dma_base; | ||
729 | hwif->config_data = ports->ctl; | ||
730 | } | ||
731 | |||
732 | /** | ||
733 | * init_iops_scc - set up iops | ||
734 | * @hwif: interface to set up | ||
735 | * | ||
736 | * Do the basic setup for the SCC hardware interface | ||
737 | * and then do the MMIO setup. | ||
738 | */ | ||
739 | |||
740 | static void init_iops_scc(ide_hwif_t *hwif) | ||
741 | { | ||
742 | struct pci_dev *dev = to_pci_dev(hwif->dev); | ||
743 | |||
744 | hwif->hwif_data = NULL; | ||
745 | if (pci_get_drvdata(dev) == NULL) | ||
746 | return; | ||
747 | init_mmio_iops_scc(hwif); | ||
748 | } | ||
749 | |||
750 | static int scc_init_dma(ide_hwif_t *hwif, const struct ide_port_info *d) | ||
751 | { | ||
752 | return ide_allocate_dma_engine(hwif); | ||
753 | } | ||
754 | |||
755 | static u8 scc_cable_detect(ide_hwif_t *hwif) | ||
756 | { | ||
757 | return ATA_CBL_PATA80; | ||
758 | } | ||
759 | |||
760 | /** | ||
761 | * init_hwif_scc - set up hwif | ||
762 | * @hwif: interface to set up | ||
763 | * | ||
764 | * We do the basic set up of the interface structure. The SCC | ||
765 | * requires several custom handlers so we override the default | ||
766 | * ide DMA handlers appropriately. | ||
767 | */ | ||
768 | |||
769 | static void init_hwif_scc(ide_hwif_t *hwif) | ||
770 | { | ||
771 | /* PTERADD */ | ||
772 | out_be32((void __iomem *)(hwif->dma_base + 0x018), hwif->dmatable_dma); | ||
773 | |||
774 | if (in_be32((void __iomem *)(hwif->config_data + 0xff0)) & CCKCTRL_ATACLKOEN) | ||
775 | hwif->ultra_mask = ATA_UDMA6; /* 133MHz */ | ||
776 | else | ||
777 | hwif->ultra_mask = ATA_UDMA5; /* 100MHz */ | ||
778 | } | ||
779 | |||
780 | static const struct ide_tp_ops scc_tp_ops = { | ||
781 | .exec_command = scc_exec_command, | ||
782 | .read_status = scc_read_status, | ||
783 | .read_altstatus = scc_read_altstatus, | ||
784 | .write_devctl = scc_write_devctl, | ||
785 | |||
786 | .dev_select = ide_dev_select, | ||
787 | .tf_load = scc_tf_load, | ||
788 | .tf_read = scc_tf_read, | ||
789 | |||
790 | .input_data = scc_input_data, | ||
791 | .output_data = scc_output_data, | ||
792 | }; | ||
793 | |||
794 | static const struct ide_port_ops scc_port_ops = { | ||
795 | .set_pio_mode = scc_set_pio_mode, | ||
796 | .set_dma_mode = scc_set_dma_mode, | ||
797 | .udma_filter = scc_udma_filter, | ||
798 | .cable_detect = scc_cable_detect, | ||
799 | }; | ||
800 | |||
801 | static const struct ide_dma_ops scc_dma_ops = { | ||
802 | .dma_host_set = scc_dma_host_set, | ||
803 | .dma_setup = scc_dma_setup, | ||
804 | .dma_start = scc_dma_start, | ||
805 | .dma_end = scc_dma_end, | ||
806 | .dma_test_irq = scc_dma_test_irq, | ||
807 | .dma_lost_irq = ide_dma_lost_irq, | ||
808 | .dma_timer_expiry = ide_dma_sff_timer_expiry, | ||
809 | .dma_sff_read_status = scc_dma_sff_read_status, | ||
810 | }; | ||
811 | |||
812 | static const struct ide_port_info scc_chipset = { | ||
813 | .name = "sccIDE", | ||
814 | .init_iops = init_iops_scc, | ||
815 | .init_dma = scc_init_dma, | ||
816 | .init_hwif = init_hwif_scc, | ||
817 | .tp_ops = &scc_tp_ops, | ||
818 | .port_ops = &scc_port_ops, | ||
819 | .dma_ops = &scc_dma_ops, | ||
820 | .host_flags = IDE_HFLAG_SINGLE, | ||
821 | .irq_flags = IRQF_SHARED, | ||
822 | .pio_mask = ATA_PIO4, | ||
823 | .chipset = ide_pci, | ||
824 | }; | ||
825 | |||
826 | /** | ||
827 | * scc_init_one - pci layer discovery entry | ||
828 | * @dev: PCI device | ||
829 | * @id: ident table entry | ||
830 | * | ||
831 | * Called by the PCI code when it finds an SCC PATA controller. | ||
832 | * We then use the IDE PCI generic helper to do most of the work. | ||
833 | */ | ||
834 | |||
835 | static int scc_init_one(struct pci_dev *dev, const struct pci_device_id *id) | ||
836 | { | ||
837 | return init_setup_scc(dev, &scc_chipset); | ||
838 | } | ||
839 | |||
840 | /** | ||
841 | * scc_remove - pci layer remove entry | ||
842 | * @dev: PCI device | ||
843 | * | ||
844 | * Called by the PCI code when it removes an SCC PATA controller. | ||
845 | */ | ||
846 | |||
847 | static void scc_remove(struct pci_dev *dev) | ||
848 | { | ||
849 | struct scc_ports *ports = pci_get_drvdata(dev); | ||
850 | struct ide_host *host = ports->host; | ||
851 | |||
852 | ide_host_remove(host); | ||
853 | |||
854 | iounmap((void*)ports->dma); | ||
855 | iounmap((void*)ports->ctl); | ||
856 | pci_release_selected_regions(dev, (1 << 2) - 1); | ||
857 | memset(ports, 0, sizeof(*ports)); | ||
858 | } | ||
859 | |||
860 | static const struct pci_device_id scc_pci_tbl[] = { | ||
861 | { PCI_VDEVICE(TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_SCC_ATA), 0 }, | ||
862 | { 0, }, | ||
863 | }; | ||
864 | MODULE_DEVICE_TABLE(pci, scc_pci_tbl); | ||
865 | |||
866 | static struct pci_driver scc_pci_driver = { | ||
867 | .name = "SCC IDE", | ||
868 | .id_table = scc_pci_tbl, | ||
869 | .probe = scc_init_one, | ||
870 | .remove = scc_remove, | ||
871 | }; | ||
872 | |||
873 | static int __init scc_ide_init(void) | ||
874 | { | ||
875 | return ide_pci_register_driver(&scc_pci_driver); | ||
876 | } | ||
877 | |||
878 | static void __exit scc_ide_exit(void) | ||
879 | { | ||
880 | pci_unregister_driver(&scc_pci_driver); | ||
881 | } | ||
882 | |||
883 | module_init(scc_ide_init); | ||
884 | module_exit(scc_ide_exit); | ||
885 | |||
886 | MODULE_DESCRIPTION("PCI driver module for Toshiba SCC IDE"); | ||
887 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index f80da50d84a5..38339d220d7f 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c | |||
@@ -472,13 +472,8 @@ int rdma_addr_find_dmac_by_grh(union ib_gid *sgid, union ib_gid *dgid, u8 *dmac, | |||
472 | } sgid_addr, dgid_addr; | 472 | } sgid_addr, dgid_addr; |
473 | 473 | ||
474 | 474 | ||
475 | ret = rdma_gid2ip(&sgid_addr._sockaddr, sgid); | 475 | rdma_gid2ip(&sgid_addr._sockaddr, sgid); |
476 | if (ret) | 476 | rdma_gid2ip(&dgid_addr._sockaddr, dgid); |
477 | return ret; | ||
478 | |||
479 | ret = rdma_gid2ip(&dgid_addr._sockaddr, dgid); | ||
480 | if (ret) | ||
481 | return ret; | ||
482 | 477 | ||
483 | memset(&dev_addr, 0, sizeof(dev_addr)); | 478 | memset(&dev_addr, 0, sizeof(dev_addr)); |
484 | 479 | ||
@@ -512,10 +507,8 @@ int rdma_addr_find_smac_by_sgid(union ib_gid *sgid, u8 *smac, u16 *vlan_id) | |||
512 | struct sockaddr_in6 _sockaddr_in6; | 507 | struct sockaddr_in6 _sockaddr_in6; |
513 | } gid_addr; | 508 | } gid_addr; |
514 | 509 | ||
515 | ret = rdma_gid2ip(&gid_addr._sockaddr, sgid); | 510 | rdma_gid2ip(&gid_addr._sockaddr, sgid); |
516 | 511 | ||
517 | if (ret) | ||
518 | return ret; | ||
519 | memset(&dev_addr, 0, sizeof(dev_addr)); | 512 | memset(&dev_addr, 0, sizeof(dev_addr)); |
520 | ret = rdma_translate_ip(&gid_addr._sockaddr, &dev_addr, vlan_id); | 513 | ret = rdma_translate_ip(&gid_addr._sockaddr, &dev_addr, vlan_id); |
521 | if (ret) | 514 | if (ret) |
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index e28a494e2a3a..0c1419105ff0 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c | |||
@@ -437,39 +437,38 @@ static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id) | |||
437 | return cm_id_priv; | 437 | return cm_id_priv; |
438 | } | 438 | } |
439 | 439 | ||
440 | static void cm_mask_copy(u8 *dst, u8 *src, u8 *mask) | 440 | static void cm_mask_copy(u32 *dst, const u32 *src, const u32 *mask) |
441 | { | 441 | { |
442 | int i; | 442 | int i; |
443 | 443 | ||
444 | for (i = 0; i < IB_CM_COMPARE_SIZE / sizeof(unsigned long); i++) | 444 | for (i = 0; i < IB_CM_COMPARE_SIZE; i++) |
445 | ((unsigned long *) dst)[i] = ((unsigned long *) src)[i] & | 445 | dst[i] = src[i] & mask[i]; |
446 | ((unsigned long *) mask)[i]; | ||
447 | } | 446 | } |
448 | 447 | ||
449 | static int cm_compare_data(struct ib_cm_compare_data *src_data, | 448 | static int cm_compare_data(struct ib_cm_compare_data *src_data, |
450 | struct ib_cm_compare_data *dst_data) | 449 | struct ib_cm_compare_data *dst_data) |
451 | { | 450 | { |
452 | u8 src[IB_CM_COMPARE_SIZE]; | 451 | u32 src[IB_CM_COMPARE_SIZE]; |
453 | u8 dst[IB_CM_COMPARE_SIZE]; | 452 | u32 dst[IB_CM_COMPARE_SIZE]; |
454 | 453 | ||
455 | if (!src_data || !dst_data) | 454 | if (!src_data || !dst_data) |
456 | return 0; | 455 | return 0; |
457 | 456 | ||
458 | cm_mask_copy(src, src_data->data, dst_data->mask); | 457 | cm_mask_copy(src, src_data->data, dst_data->mask); |
459 | cm_mask_copy(dst, dst_data->data, src_data->mask); | 458 | cm_mask_copy(dst, dst_data->data, src_data->mask); |
460 | return memcmp(src, dst, IB_CM_COMPARE_SIZE); | 459 | return memcmp(src, dst, sizeof(src)); |
461 | } | 460 | } |
462 | 461 | ||
463 | static int cm_compare_private_data(u8 *private_data, | 462 | static int cm_compare_private_data(u32 *private_data, |
464 | struct ib_cm_compare_data *dst_data) | 463 | struct ib_cm_compare_data *dst_data) |
465 | { | 464 | { |
466 | u8 src[IB_CM_COMPARE_SIZE]; | 465 | u32 src[IB_CM_COMPARE_SIZE]; |
467 | 466 | ||
468 | if (!dst_data) | 467 | if (!dst_data) |
469 | return 0; | 468 | return 0; |
470 | 469 | ||
471 | cm_mask_copy(src, private_data, dst_data->mask); | 470 | cm_mask_copy(src, private_data, dst_data->mask); |
472 | return memcmp(src, dst_data->data, IB_CM_COMPARE_SIZE); | 471 | return memcmp(src, dst_data->data, sizeof(src)); |
473 | } | 472 | } |
474 | 473 | ||
475 | /* | 474 | /* |
@@ -538,7 +537,7 @@ static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv) | |||
538 | 537 | ||
539 | static struct cm_id_private * cm_find_listen(struct ib_device *device, | 538 | static struct cm_id_private * cm_find_listen(struct ib_device *device, |
540 | __be64 service_id, | 539 | __be64 service_id, |
541 | u8 *private_data) | 540 | u32 *private_data) |
542 | { | 541 | { |
543 | struct rb_node *node = cm.listen_service_table.rb_node; | 542 | struct rb_node *node = cm.listen_service_table.rb_node; |
544 | struct cm_id_private *cm_id_priv; | 543 | struct cm_id_private *cm_id_priv; |
@@ -953,7 +952,7 @@ int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask, | |||
953 | cm_mask_copy(cm_id_priv->compare_data->data, | 952 | cm_mask_copy(cm_id_priv->compare_data->data, |
954 | compare_data->data, compare_data->mask); | 953 | compare_data->data, compare_data->mask); |
955 | memcpy(cm_id_priv->compare_data->mask, compare_data->mask, | 954 | memcpy(cm_id_priv->compare_data->mask, compare_data->mask, |
956 | IB_CM_COMPARE_SIZE); | 955 | sizeof(compare_data->mask)); |
957 | } | 956 | } |
958 | 957 | ||
959 | cm_id->state = IB_CM_LISTEN; | 958 | cm_id->state = IB_CM_LISTEN; |
diff --git a/drivers/infiniband/core/cm_msgs.h b/drivers/infiniband/core/cm_msgs.h index be068f47e47e..8b76f0ef965e 100644 --- a/drivers/infiniband/core/cm_msgs.h +++ b/drivers/infiniband/core/cm_msgs.h | |||
@@ -103,7 +103,7 @@ struct cm_req_msg { | |||
103 | /* local ACK timeout:5, rsvd:3 */ | 103 | /* local ACK timeout:5, rsvd:3 */ |
104 | u8 alt_offset139; | 104 | u8 alt_offset139; |
105 | 105 | ||
106 | u8 private_data[IB_CM_REQ_PRIVATE_DATA_SIZE]; | 106 | u32 private_data[IB_CM_REQ_PRIVATE_DATA_SIZE / sizeof(u32)]; |
107 | 107 | ||
108 | } __attribute__ ((packed)); | 108 | } __attribute__ ((packed)); |
109 | 109 | ||
@@ -801,7 +801,7 @@ struct cm_sidr_req_msg { | |||
801 | __be16 rsvd; | 801 | __be16 rsvd; |
802 | __be64 service_id; | 802 | __be64 service_id; |
803 | 803 | ||
804 | u8 private_data[IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE]; | 804 | u32 private_data[IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE / sizeof(u32)]; |
805 | } __attribute__ ((packed)); | 805 | } __attribute__ ((packed)); |
806 | 806 | ||
807 | struct cm_sidr_rep_msg { | 807 | struct cm_sidr_rep_msg { |
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index d570030d899c..06441a43c3aa 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c | |||
@@ -859,19 +859,27 @@ static void cma_save_ib_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id | |||
859 | memcpy(&ib->sib_addr, &path->dgid, 16); | 859 | memcpy(&ib->sib_addr, &path->dgid, 16); |
860 | } | 860 | } |
861 | 861 | ||
862 | static __be16 ss_get_port(const struct sockaddr_storage *ss) | ||
863 | { | ||
864 | if (ss->ss_family == AF_INET) | ||
865 | return ((struct sockaddr_in *)ss)->sin_port; | ||
866 | else if (ss->ss_family == AF_INET6) | ||
867 | return ((struct sockaddr_in6 *)ss)->sin6_port; | ||
868 | BUG(); | ||
869 | } | ||
870 | |||
862 | static void cma_save_ip4_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id, | 871 | static void cma_save_ip4_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id, |
863 | struct cma_hdr *hdr) | 872 | struct cma_hdr *hdr) |
864 | { | 873 | { |
865 | struct sockaddr_in *listen4, *ip4; | 874 | struct sockaddr_in *ip4; |
866 | 875 | ||
867 | listen4 = (struct sockaddr_in *) &listen_id->route.addr.src_addr; | ||
868 | ip4 = (struct sockaddr_in *) &id->route.addr.src_addr; | 876 | ip4 = (struct sockaddr_in *) &id->route.addr.src_addr; |
869 | ip4->sin_family = listen4->sin_family; | 877 | ip4->sin_family = AF_INET; |
870 | ip4->sin_addr.s_addr = hdr->dst_addr.ip4.addr; | 878 | ip4->sin_addr.s_addr = hdr->dst_addr.ip4.addr; |
871 | ip4->sin_port = listen4->sin_port; | 879 | ip4->sin_port = ss_get_port(&listen_id->route.addr.src_addr); |
872 | 880 | ||
873 | ip4 = (struct sockaddr_in *) &id->route.addr.dst_addr; | 881 | ip4 = (struct sockaddr_in *) &id->route.addr.dst_addr; |
874 | ip4->sin_family = listen4->sin_family; | 882 | ip4->sin_family = AF_INET; |
875 | ip4->sin_addr.s_addr = hdr->src_addr.ip4.addr; | 883 | ip4->sin_addr.s_addr = hdr->src_addr.ip4.addr; |
876 | ip4->sin_port = hdr->port; | 884 | ip4->sin_port = hdr->port; |
877 | } | 885 | } |
@@ -879,16 +887,15 @@ static void cma_save_ip4_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_i | |||
879 | static void cma_save_ip6_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id, | 887 | static void cma_save_ip6_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id, |
880 | struct cma_hdr *hdr) | 888 | struct cma_hdr *hdr) |
881 | { | 889 | { |
882 | struct sockaddr_in6 *listen6, *ip6; | 890 | struct sockaddr_in6 *ip6; |
883 | 891 | ||
884 | listen6 = (struct sockaddr_in6 *) &listen_id->route.addr.src_addr; | ||
885 | ip6 = (struct sockaddr_in6 *) &id->route.addr.src_addr; | 892 | ip6 = (struct sockaddr_in6 *) &id->route.addr.src_addr; |
886 | ip6->sin6_family = listen6->sin6_family; | 893 | ip6->sin6_family = AF_INET6; |
887 | ip6->sin6_addr = hdr->dst_addr.ip6; | 894 | ip6->sin6_addr = hdr->dst_addr.ip6; |
888 | ip6->sin6_port = listen6->sin6_port; | 895 | ip6->sin6_port = ss_get_port(&listen_id->route.addr.src_addr); |
889 | 896 | ||
890 | ip6 = (struct sockaddr_in6 *) &id->route.addr.dst_addr; | 897 | ip6 = (struct sockaddr_in6 *) &id->route.addr.dst_addr; |
891 | ip6->sin6_family = listen6->sin6_family; | 898 | ip6->sin6_family = AF_INET6; |
892 | ip6->sin6_addr = hdr->src_addr.ip6; | 899 | ip6->sin6_addr = hdr->src_addr.ip6; |
893 | ip6->sin6_port = hdr->port; | 900 | ip6->sin6_port = hdr->port; |
894 | } | 901 | } |
diff --git a/drivers/infiniband/core/iwpm_msg.c b/drivers/infiniband/core/iwpm_msg.c index b85ddbc979e0..ab081702566f 100644 --- a/drivers/infiniband/core/iwpm_msg.c +++ b/drivers/infiniband/core/iwpm_msg.c | |||
@@ -468,7 +468,8 @@ add_mapping_response_exit: | |||
468 | } | 468 | } |
469 | EXPORT_SYMBOL(iwpm_add_mapping_cb); | 469 | EXPORT_SYMBOL(iwpm_add_mapping_cb); |
470 | 470 | ||
471 | /* netlink attribute policy for the response to add and query mapping request */ | 471 | /* netlink attribute policy for the response to add and query mapping request |
472 | * and response with remote address info */ | ||
472 | static const struct nla_policy resp_query_policy[IWPM_NLA_RQUERY_MAPPING_MAX] = { | 473 | static const struct nla_policy resp_query_policy[IWPM_NLA_RQUERY_MAPPING_MAX] = { |
473 | [IWPM_NLA_QUERY_MAPPING_SEQ] = { .type = NLA_U32 }, | 474 | [IWPM_NLA_QUERY_MAPPING_SEQ] = { .type = NLA_U32 }, |
474 | [IWPM_NLA_QUERY_LOCAL_ADDR] = { .len = sizeof(struct sockaddr_storage) }, | 475 | [IWPM_NLA_QUERY_LOCAL_ADDR] = { .len = sizeof(struct sockaddr_storage) }, |
@@ -559,6 +560,76 @@ query_mapping_response_exit: | |||
559 | } | 560 | } |
560 | EXPORT_SYMBOL(iwpm_add_and_query_mapping_cb); | 561 | EXPORT_SYMBOL(iwpm_add_and_query_mapping_cb); |
561 | 562 | ||
563 | /* | ||
564 | * iwpm_remote_info_cb - Process a port mapper message, containing | ||
565 | * the remote connecting peer address info | ||
566 | */ | ||
567 | int iwpm_remote_info_cb(struct sk_buff *skb, struct netlink_callback *cb) | ||
568 | { | ||
569 | struct nlattr *nltb[IWPM_NLA_RQUERY_MAPPING_MAX]; | ||
570 | struct sockaddr_storage *local_sockaddr, *remote_sockaddr; | ||
571 | struct sockaddr_storage *mapped_loc_sockaddr, *mapped_rem_sockaddr; | ||
572 | struct iwpm_remote_info *rem_info; | ||
573 | const char *msg_type; | ||
574 | u8 nl_client; | ||
575 | int ret = -EINVAL; | ||
576 | |||
577 | msg_type = "Remote Mapping info"; | ||
578 | if (iwpm_parse_nlmsg(cb, IWPM_NLA_RQUERY_MAPPING_MAX, | ||
579 | resp_query_policy, nltb, msg_type)) | ||
580 | return ret; | ||
581 | |||
582 | nl_client = RDMA_NL_GET_CLIENT(cb->nlh->nlmsg_type); | ||
583 | if (!iwpm_valid_client(nl_client)) { | ||
584 | pr_info("%s: Invalid port mapper client = %d\n", | ||
585 | __func__, nl_client); | ||
586 | return ret; | ||
587 | } | ||
588 | atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq); | ||
589 | |||
590 | local_sockaddr = (struct sockaddr_storage *) | ||
591 | nla_data(nltb[IWPM_NLA_QUERY_LOCAL_ADDR]); | ||
592 | remote_sockaddr = (struct sockaddr_storage *) | ||
593 | nla_data(nltb[IWPM_NLA_QUERY_REMOTE_ADDR]); | ||
594 | mapped_loc_sockaddr = (struct sockaddr_storage *) | ||
595 | nla_data(nltb[IWPM_NLA_RQUERY_MAPPED_LOC_ADDR]); | ||
596 | mapped_rem_sockaddr = (struct sockaddr_storage *) | ||
597 | nla_data(nltb[IWPM_NLA_RQUERY_MAPPED_REM_ADDR]); | ||
598 | |||
599 | if (mapped_loc_sockaddr->ss_family != local_sockaddr->ss_family || | ||
600 | mapped_rem_sockaddr->ss_family != remote_sockaddr->ss_family) { | ||
601 | pr_info("%s: Sockaddr family doesn't match the requested one\n", | ||
602 | __func__); | ||
603 | return ret; | ||
604 | } | ||
605 | rem_info = kzalloc(sizeof(struct iwpm_remote_info), GFP_ATOMIC); | ||
606 | if (!rem_info) { | ||
607 | pr_err("%s: Unable to allocate a remote info\n", __func__); | ||
608 | ret = -ENOMEM; | ||
609 | return ret; | ||
610 | } | ||
611 | memcpy(&rem_info->mapped_loc_sockaddr, mapped_loc_sockaddr, | ||
612 | sizeof(struct sockaddr_storage)); | ||
613 | memcpy(&rem_info->remote_sockaddr, remote_sockaddr, | ||
614 | sizeof(struct sockaddr_storage)); | ||
615 | memcpy(&rem_info->mapped_rem_sockaddr, mapped_rem_sockaddr, | ||
616 | sizeof(struct sockaddr_storage)); | ||
617 | rem_info->nl_client = nl_client; | ||
618 | |||
619 | iwpm_add_remote_info(rem_info); | ||
620 | |||
621 | iwpm_print_sockaddr(local_sockaddr, | ||
622 | "remote_info: Local sockaddr:"); | ||
623 | iwpm_print_sockaddr(mapped_loc_sockaddr, | ||
624 | "remote_info: Mapped local sockaddr:"); | ||
625 | iwpm_print_sockaddr(remote_sockaddr, | ||
626 | "remote_info: Remote sockaddr:"); | ||
627 | iwpm_print_sockaddr(mapped_rem_sockaddr, | ||
628 | "remote_info: Mapped remote sockaddr:"); | ||
629 | return ret; | ||
630 | } | ||
631 | EXPORT_SYMBOL(iwpm_remote_info_cb); | ||
632 | |||
562 | /* netlink attribute policy for the received request for mapping info */ | 633 | /* netlink attribute policy for the received request for mapping info */ |
563 | static const struct nla_policy resp_mapinfo_policy[IWPM_NLA_MAPINFO_REQ_MAX] = { | 634 | static const struct nla_policy resp_mapinfo_policy[IWPM_NLA_MAPINFO_REQ_MAX] = { |
564 | [IWPM_NLA_MAPINFO_ULIB_NAME] = { .type = NLA_STRING, | 635 | [IWPM_NLA_MAPINFO_ULIB_NAME] = { .type = NLA_STRING, |
diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c index 69e9f84c1605..a626795bf9c7 100644 --- a/drivers/infiniband/core/iwpm_util.c +++ b/drivers/infiniband/core/iwpm_util.c | |||
@@ -33,8 +33,10 @@ | |||
33 | 33 | ||
34 | #include "iwpm_util.h" | 34 | #include "iwpm_util.h" |
35 | 35 | ||
36 | #define IWPM_HASH_BUCKET_SIZE 512 | 36 | #define IWPM_MAPINFO_HASH_SIZE 512 |
37 | #define IWPM_HASH_BUCKET_MASK (IWPM_HASH_BUCKET_SIZE - 1) | 37 | #define IWPM_MAPINFO_HASH_MASK (IWPM_MAPINFO_HASH_SIZE - 1) |
38 | #define IWPM_REMINFO_HASH_SIZE 64 | ||
39 | #define IWPM_REMINFO_HASH_MASK (IWPM_REMINFO_HASH_SIZE - 1) | ||
38 | 40 | ||
39 | static LIST_HEAD(iwpm_nlmsg_req_list); | 41 | static LIST_HEAD(iwpm_nlmsg_req_list); |
40 | static DEFINE_SPINLOCK(iwpm_nlmsg_req_lock); | 42 | static DEFINE_SPINLOCK(iwpm_nlmsg_req_lock); |
@@ -42,31 +44,49 @@ static DEFINE_SPINLOCK(iwpm_nlmsg_req_lock); | |||
42 | static struct hlist_head *iwpm_hash_bucket; | 44 | static struct hlist_head *iwpm_hash_bucket; |
43 | static DEFINE_SPINLOCK(iwpm_mapinfo_lock); | 45 | static DEFINE_SPINLOCK(iwpm_mapinfo_lock); |
44 | 46 | ||
47 | static struct hlist_head *iwpm_reminfo_bucket; | ||
48 | static DEFINE_SPINLOCK(iwpm_reminfo_lock); | ||
49 | |||
45 | static DEFINE_MUTEX(iwpm_admin_lock); | 50 | static DEFINE_MUTEX(iwpm_admin_lock); |
46 | static struct iwpm_admin_data iwpm_admin; | 51 | static struct iwpm_admin_data iwpm_admin; |
47 | 52 | ||
48 | int iwpm_init(u8 nl_client) | 53 | int iwpm_init(u8 nl_client) |
49 | { | 54 | { |
55 | int ret = 0; | ||
50 | if (iwpm_valid_client(nl_client)) | 56 | if (iwpm_valid_client(nl_client)) |
51 | return -EINVAL; | 57 | return -EINVAL; |
52 | mutex_lock(&iwpm_admin_lock); | 58 | mutex_lock(&iwpm_admin_lock); |
53 | if (atomic_read(&iwpm_admin.refcount) == 0) { | 59 | if (atomic_read(&iwpm_admin.refcount) == 0) { |
54 | iwpm_hash_bucket = kzalloc(IWPM_HASH_BUCKET_SIZE * | 60 | iwpm_hash_bucket = kzalloc(IWPM_MAPINFO_HASH_SIZE * |
55 | sizeof(struct hlist_head), GFP_KERNEL); | 61 | sizeof(struct hlist_head), GFP_KERNEL); |
56 | if (!iwpm_hash_bucket) { | 62 | if (!iwpm_hash_bucket) { |
57 | mutex_unlock(&iwpm_admin_lock); | 63 | ret = -ENOMEM; |
58 | pr_err("%s Unable to create mapinfo hash table\n", __func__); | 64 | pr_err("%s Unable to create mapinfo hash table\n", __func__); |
59 | return -ENOMEM; | 65 | goto init_exit; |
66 | } | ||
67 | iwpm_reminfo_bucket = kzalloc(IWPM_REMINFO_HASH_SIZE * | ||
68 | sizeof(struct hlist_head), GFP_KERNEL); | ||
69 | if (!iwpm_reminfo_bucket) { | ||
70 | kfree(iwpm_hash_bucket); | ||
71 | ret = -ENOMEM; | ||
72 | pr_err("%s Unable to create reminfo hash table\n", __func__); | ||
73 | goto init_exit; | ||
60 | } | 74 | } |
61 | } | 75 | } |
62 | atomic_inc(&iwpm_admin.refcount); | 76 | atomic_inc(&iwpm_admin.refcount); |
77 | init_exit: | ||
63 | mutex_unlock(&iwpm_admin_lock); | 78 | mutex_unlock(&iwpm_admin_lock); |
64 | iwpm_set_valid(nl_client, 1); | 79 | if (!ret) { |
65 | return 0; | 80 | iwpm_set_valid(nl_client, 1); |
81 | pr_debug("%s: Mapinfo and reminfo tables are created\n", | ||
82 | __func__); | ||
83 | } | ||
84 | return ret; | ||
66 | } | 85 | } |
67 | EXPORT_SYMBOL(iwpm_init); | 86 | EXPORT_SYMBOL(iwpm_init); |
68 | 87 | ||
69 | static void free_hash_bucket(void); | 88 | static void free_hash_bucket(void); |
89 | static void free_reminfo_bucket(void); | ||
70 | 90 | ||
71 | int iwpm_exit(u8 nl_client) | 91 | int iwpm_exit(u8 nl_client) |
72 | { | 92 | { |
@@ -81,7 +101,8 @@ int iwpm_exit(u8 nl_client) | |||
81 | } | 101 | } |
82 | if (atomic_dec_and_test(&iwpm_admin.refcount)) { | 102 | if (atomic_dec_and_test(&iwpm_admin.refcount)) { |
83 | free_hash_bucket(); | 103 | free_hash_bucket(); |
84 | pr_debug("%s: Mapinfo hash table is destroyed\n", __func__); | 104 | free_reminfo_bucket(); |
105 | pr_debug("%s: Resources are destroyed\n", __func__); | ||
85 | } | 106 | } |
86 | mutex_unlock(&iwpm_admin_lock); | 107 | mutex_unlock(&iwpm_admin_lock); |
87 | iwpm_set_valid(nl_client, 0); | 108 | iwpm_set_valid(nl_client, 0); |
@@ -89,7 +110,7 @@ int iwpm_exit(u8 nl_client) | |||
89 | } | 110 | } |
90 | EXPORT_SYMBOL(iwpm_exit); | 111 | EXPORT_SYMBOL(iwpm_exit); |
91 | 112 | ||
92 | static struct hlist_head *get_hash_bucket_head(struct sockaddr_storage *, | 113 | static struct hlist_head *get_mapinfo_hash_bucket(struct sockaddr_storage *, |
93 | struct sockaddr_storage *); | 114 | struct sockaddr_storage *); |
94 | 115 | ||
95 | int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr, | 116 | int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr, |
@@ -99,9 +120,10 @@ int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr, | |||
99 | struct hlist_head *hash_bucket_head; | 120 | struct hlist_head *hash_bucket_head; |
100 | struct iwpm_mapping_info *map_info; | 121 | struct iwpm_mapping_info *map_info; |
101 | unsigned long flags; | 122 | unsigned long flags; |
123 | int ret = -EINVAL; | ||
102 | 124 | ||
103 | if (!iwpm_valid_client(nl_client)) | 125 | if (!iwpm_valid_client(nl_client)) |
104 | return -EINVAL; | 126 | return ret; |
105 | map_info = kzalloc(sizeof(struct iwpm_mapping_info), GFP_KERNEL); | 127 | map_info = kzalloc(sizeof(struct iwpm_mapping_info), GFP_KERNEL); |
106 | if (!map_info) { | 128 | if (!map_info) { |
107 | pr_err("%s: Unable to allocate a mapping info\n", __func__); | 129 | pr_err("%s: Unable to allocate a mapping info\n", __func__); |
@@ -115,13 +137,16 @@ int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr, | |||
115 | 137 | ||
116 | spin_lock_irqsave(&iwpm_mapinfo_lock, flags); | 138 | spin_lock_irqsave(&iwpm_mapinfo_lock, flags); |
117 | if (iwpm_hash_bucket) { | 139 | if (iwpm_hash_bucket) { |
118 | hash_bucket_head = get_hash_bucket_head( | 140 | hash_bucket_head = get_mapinfo_hash_bucket( |
119 | &map_info->local_sockaddr, | 141 | &map_info->local_sockaddr, |
120 | &map_info->mapped_sockaddr); | 142 | &map_info->mapped_sockaddr); |
121 | hlist_add_head(&map_info->hlist_node, hash_bucket_head); | 143 | if (hash_bucket_head) { |
144 | hlist_add_head(&map_info->hlist_node, hash_bucket_head); | ||
145 | ret = 0; | ||
146 | } | ||
122 | } | 147 | } |
123 | spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags); | 148 | spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags); |
124 | return 0; | 149 | return ret; |
125 | } | 150 | } |
126 | EXPORT_SYMBOL(iwpm_create_mapinfo); | 151 | EXPORT_SYMBOL(iwpm_create_mapinfo); |
127 | 152 | ||
@@ -136,9 +161,12 @@ int iwpm_remove_mapinfo(struct sockaddr_storage *local_sockaddr, | |||
136 | 161 | ||
137 | spin_lock_irqsave(&iwpm_mapinfo_lock, flags); | 162 | spin_lock_irqsave(&iwpm_mapinfo_lock, flags); |
138 | if (iwpm_hash_bucket) { | 163 | if (iwpm_hash_bucket) { |
139 | hash_bucket_head = get_hash_bucket_head( | 164 | hash_bucket_head = get_mapinfo_hash_bucket( |
140 | local_sockaddr, | 165 | local_sockaddr, |
141 | mapped_local_addr); | 166 | mapped_local_addr); |
167 | if (!hash_bucket_head) | ||
168 | goto remove_mapinfo_exit; | ||
169 | |||
142 | hlist_for_each_entry_safe(map_info, tmp_hlist_node, | 170 | hlist_for_each_entry_safe(map_info, tmp_hlist_node, |
143 | hash_bucket_head, hlist_node) { | 171 | hash_bucket_head, hlist_node) { |
144 | 172 | ||
@@ -152,6 +180,7 @@ int iwpm_remove_mapinfo(struct sockaddr_storage *local_sockaddr, | |||
152 | } | 180 | } |
153 | } | 181 | } |
154 | } | 182 | } |
183 | remove_mapinfo_exit: | ||
155 | spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags); | 184 | spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags); |
156 | return ret; | 185 | return ret; |
157 | } | 186 | } |
@@ -166,7 +195,7 @@ static void free_hash_bucket(void) | |||
166 | 195 | ||
167 | /* remove all the mapinfo data from the list */ | 196 | /* remove all the mapinfo data from the list */ |
168 | spin_lock_irqsave(&iwpm_mapinfo_lock, flags); | 197 | spin_lock_irqsave(&iwpm_mapinfo_lock, flags); |
169 | for (i = 0; i < IWPM_HASH_BUCKET_SIZE; i++) { | 198 | for (i = 0; i < IWPM_MAPINFO_HASH_SIZE; i++) { |
170 | hlist_for_each_entry_safe(map_info, tmp_hlist_node, | 199 | hlist_for_each_entry_safe(map_info, tmp_hlist_node, |
171 | &iwpm_hash_bucket[i], hlist_node) { | 200 | &iwpm_hash_bucket[i], hlist_node) { |
172 | 201 | ||
@@ -180,6 +209,96 @@ static void free_hash_bucket(void) | |||
180 | spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags); | 209 | spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags); |
181 | } | 210 | } |
182 | 211 | ||
212 | static void free_reminfo_bucket(void) | ||
213 | { | ||
214 | struct hlist_node *tmp_hlist_node; | ||
215 | struct iwpm_remote_info *rem_info; | ||
216 | unsigned long flags; | ||
217 | int i; | ||
218 | |||
219 | /* remove all the remote info from the list */ | ||
220 | spin_lock_irqsave(&iwpm_reminfo_lock, flags); | ||
221 | for (i = 0; i < IWPM_REMINFO_HASH_SIZE; i++) { | ||
222 | hlist_for_each_entry_safe(rem_info, tmp_hlist_node, | ||
223 | &iwpm_reminfo_bucket[i], hlist_node) { | ||
224 | |||
225 | hlist_del_init(&rem_info->hlist_node); | ||
226 | kfree(rem_info); | ||
227 | } | ||
228 | } | ||
229 | /* free the hash list */ | ||
230 | kfree(iwpm_reminfo_bucket); | ||
231 | iwpm_reminfo_bucket = NULL; | ||
232 | spin_unlock_irqrestore(&iwpm_reminfo_lock, flags); | ||
233 | } | ||
234 | |||
235 | static struct hlist_head *get_reminfo_hash_bucket(struct sockaddr_storage *, | ||
236 | struct sockaddr_storage *); | ||
237 | |||
238 | void iwpm_add_remote_info(struct iwpm_remote_info *rem_info) | ||
239 | { | ||
240 | struct hlist_head *hash_bucket_head; | ||
241 | unsigned long flags; | ||
242 | |||
243 | spin_lock_irqsave(&iwpm_reminfo_lock, flags); | ||
244 | if (iwpm_reminfo_bucket) { | ||
245 | hash_bucket_head = get_reminfo_hash_bucket( | ||
246 | &rem_info->mapped_loc_sockaddr, | ||
247 | &rem_info->mapped_rem_sockaddr); | ||
248 | if (hash_bucket_head) | ||
249 | hlist_add_head(&rem_info->hlist_node, hash_bucket_head); | ||
250 | } | ||
251 | spin_unlock_irqrestore(&iwpm_reminfo_lock, flags); | ||
252 | } | ||
253 | |||
254 | int iwpm_get_remote_info(struct sockaddr_storage *mapped_loc_addr, | ||
255 | struct sockaddr_storage *mapped_rem_addr, | ||
256 | struct sockaddr_storage *remote_addr, | ||
257 | u8 nl_client) | ||
258 | { | ||
259 | struct hlist_node *tmp_hlist_node; | ||
260 | struct hlist_head *hash_bucket_head; | ||
261 | struct iwpm_remote_info *rem_info = NULL; | ||
262 | unsigned long flags; | ||
263 | int ret = -EINVAL; | ||
264 | |||
265 | if (!iwpm_valid_client(nl_client)) { | ||
266 | pr_info("%s: Invalid client = %d\n", __func__, nl_client); | ||
267 | return ret; | ||
268 | } | ||
269 | spin_lock_irqsave(&iwpm_reminfo_lock, flags); | ||
270 | if (iwpm_reminfo_bucket) { | ||
271 | hash_bucket_head = get_reminfo_hash_bucket( | ||
272 | mapped_loc_addr, | ||
273 | mapped_rem_addr); | ||
274 | if (!hash_bucket_head) | ||
275 | goto get_remote_info_exit; | ||
276 | hlist_for_each_entry_safe(rem_info, tmp_hlist_node, | ||
277 | hash_bucket_head, hlist_node) { | ||
278 | |||
279 | if (!iwpm_compare_sockaddr(&rem_info->mapped_loc_sockaddr, | ||
280 | mapped_loc_addr) && | ||
281 | !iwpm_compare_sockaddr(&rem_info->mapped_rem_sockaddr, | ||
282 | mapped_rem_addr)) { | ||
283 | |||
284 | memcpy(remote_addr, &rem_info->remote_sockaddr, | ||
285 | sizeof(struct sockaddr_storage)); | ||
286 | iwpm_print_sockaddr(remote_addr, | ||
287 | "get_remote_info: Remote sockaddr:"); | ||
288 | |||
289 | hlist_del_init(&rem_info->hlist_node); | ||
290 | kfree(rem_info); | ||
291 | ret = 0; | ||
292 | break; | ||
293 | } | ||
294 | } | ||
295 | } | ||
296 | get_remote_info_exit: | ||
297 | spin_unlock_irqrestore(&iwpm_reminfo_lock, flags); | ||
298 | return ret; | ||
299 | } | ||
300 | EXPORT_SYMBOL(iwpm_get_remote_info); | ||
301 | |||
183 | struct iwpm_nlmsg_request *iwpm_get_nlmsg_request(__u32 nlmsg_seq, | 302 | struct iwpm_nlmsg_request *iwpm_get_nlmsg_request(__u32 nlmsg_seq, |
184 | u8 nl_client, gfp_t gfp) | 303 | u8 nl_client, gfp_t gfp) |
185 | { | 304 | { |
@@ -409,31 +528,54 @@ static u32 iwpm_ipv4_jhash(struct sockaddr_in *ipv4_sockaddr) | |||
409 | return hash; | 528 | return hash; |
410 | } | 529 | } |
411 | 530 | ||
412 | static struct hlist_head *get_hash_bucket_head(struct sockaddr_storage | 531 | static int get_hash_bucket(struct sockaddr_storage *a_sockaddr, |
413 | *local_sockaddr, | 532 | struct sockaddr_storage *b_sockaddr, u32 *hash) |
414 | struct sockaddr_storage | ||
415 | *mapped_sockaddr) | ||
416 | { | 533 | { |
417 | u32 local_hash, mapped_hash, hash; | 534 | u32 a_hash, b_hash; |
418 | 535 | ||
419 | if (local_sockaddr->ss_family == AF_INET) { | 536 | if (a_sockaddr->ss_family == AF_INET) { |
420 | local_hash = iwpm_ipv4_jhash((struct sockaddr_in *) local_sockaddr); | 537 | a_hash = iwpm_ipv4_jhash((struct sockaddr_in *) a_sockaddr); |
421 | mapped_hash = iwpm_ipv4_jhash((struct sockaddr_in *) mapped_sockaddr); | 538 | b_hash = iwpm_ipv4_jhash((struct sockaddr_in *) b_sockaddr); |
422 | 539 | ||
423 | } else if (local_sockaddr->ss_family == AF_INET6) { | 540 | } else if (a_sockaddr->ss_family == AF_INET6) { |
424 | local_hash = iwpm_ipv6_jhash((struct sockaddr_in6 *) local_sockaddr); | 541 | a_hash = iwpm_ipv6_jhash((struct sockaddr_in6 *) a_sockaddr); |
425 | mapped_hash = iwpm_ipv6_jhash((struct sockaddr_in6 *) mapped_sockaddr); | 542 | b_hash = iwpm_ipv6_jhash((struct sockaddr_in6 *) b_sockaddr); |
426 | } else { | 543 | } else { |
427 | pr_err("%s: Invalid sockaddr family\n", __func__); | 544 | pr_err("%s: Invalid sockaddr family\n", __func__); |
428 | return NULL; | 545 | return -EINVAL; |
429 | } | 546 | } |
430 | 547 | ||
431 | if (local_hash == mapped_hash) /* if port mapper isn't available */ | 548 | if (a_hash == b_hash) /* if port mapper isn't available */ |
432 | hash = local_hash; | 549 | *hash = a_hash; |
433 | else | 550 | else |
434 | hash = jhash_2words(local_hash, mapped_hash, 0); | 551 | *hash = jhash_2words(a_hash, b_hash, 0); |
552 | return 0; | ||
553 | } | ||
554 | |||
555 | static struct hlist_head *get_mapinfo_hash_bucket(struct sockaddr_storage | ||
556 | *local_sockaddr, struct sockaddr_storage | ||
557 | *mapped_sockaddr) | ||
558 | { | ||
559 | u32 hash; | ||
560 | int ret; | ||
435 | 561 | ||
436 | return &iwpm_hash_bucket[hash & IWPM_HASH_BUCKET_MASK]; | 562 | ret = get_hash_bucket(local_sockaddr, mapped_sockaddr, &hash); |
563 | if (ret) | ||
564 | return NULL; | ||
565 | return &iwpm_hash_bucket[hash & IWPM_MAPINFO_HASH_MASK]; | ||
566 | } | ||
567 | |||
568 | static struct hlist_head *get_reminfo_hash_bucket(struct sockaddr_storage | ||
569 | *mapped_loc_sockaddr, struct sockaddr_storage | ||
570 | *mapped_rem_sockaddr) | ||
571 | { | ||
572 | u32 hash; | ||
573 | int ret; | ||
574 | |||
575 | ret = get_hash_bucket(mapped_loc_sockaddr, mapped_rem_sockaddr, &hash); | ||
576 | if (ret) | ||
577 | return NULL; | ||
578 | return &iwpm_reminfo_bucket[hash & IWPM_REMINFO_HASH_MASK]; | ||
437 | } | 579 | } |
438 | 580 | ||
439 | static int send_mapinfo_num(u32 mapping_num, u8 nl_client, int iwpm_pid) | 581 | static int send_mapinfo_num(u32 mapping_num, u8 nl_client, int iwpm_pid) |
@@ -512,7 +654,7 @@ int iwpm_send_mapinfo(u8 nl_client, int iwpm_pid) | |||
512 | } | 654 | } |
513 | skb_num++; | 655 | skb_num++; |
514 | spin_lock_irqsave(&iwpm_mapinfo_lock, flags); | 656 | spin_lock_irqsave(&iwpm_mapinfo_lock, flags); |
515 | for (i = 0; i < IWPM_HASH_BUCKET_SIZE; i++) { | 657 | for (i = 0; i < IWPM_MAPINFO_HASH_SIZE; i++) { |
516 | hlist_for_each_entry(map_info, &iwpm_hash_bucket[i], | 658 | hlist_for_each_entry(map_info, &iwpm_hash_bucket[i], |
517 | hlist_node) { | 659 | hlist_node) { |
518 | if (map_info->nl_client != nl_client) | 660 | if (map_info->nl_client != nl_client) |
@@ -595,7 +737,7 @@ int iwpm_mapinfo_available(void) | |||
595 | 737 | ||
596 | spin_lock_irqsave(&iwpm_mapinfo_lock, flags); | 738 | spin_lock_irqsave(&iwpm_mapinfo_lock, flags); |
597 | if (iwpm_hash_bucket) { | 739 | if (iwpm_hash_bucket) { |
598 | for (i = 0; i < IWPM_HASH_BUCKET_SIZE; i++) { | 740 | for (i = 0; i < IWPM_MAPINFO_HASH_SIZE; i++) { |
599 | if (!hlist_empty(&iwpm_hash_bucket[i])) { | 741 | if (!hlist_empty(&iwpm_hash_bucket[i])) { |
600 | full_bucket = 1; | 742 | full_bucket = 1; |
601 | break; | 743 | break; |
diff --git a/drivers/infiniband/core/iwpm_util.h b/drivers/infiniband/core/iwpm_util.h index 9777c869a140..ee2d9ff095be 100644 --- a/drivers/infiniband/core/iwpm_util.h +++ b/drivers/infiniband/core/iwpm_util.h | |||
@@ -76,6 +76,14 @@ struct iwpm_mapping_info { | |||
76 | u8 nl_client; | 76 | u8 nl_client; |
77 | }; | 77 | }; |
78 | 78 | ||
79 | struct iwpm_remote_info { | ||
80 | struct hlist_node hlist_node; | ||
81 | struct sockaddr_storage remote_sockaddr; | ||
82 | struct sockaddr_storage mapped_loc_sockaddr; | ||
83 | struct sockaddr_storage mapped_rem_sockaddr; | ||
84 | u8 nl_client; | ||
85 | }; | ||
86 | |||
79 | struct iwpm_admin_data { | 87 | struct iwpm_admin_data { |
80 | atomic_t refcount; | 88 | atomic_t refcount; |
81 | atomic_t nlmsg_seq; | 89 | atomic_t nlmsg_seq; |
@@ -128,6 +136,13 @@ int iwpm_wait_complete_req(struct iwpm_nlmsg_request *nlmsg_request); | |||
128 | int iwpm_get_nlmsg_seq(void); | 136 | int iwpm_get_nlmsg_seq(void); |
129 | 137 | ||
130 | /** | 138 | /** |
139 | * iwpm_add_reminfo - Add remote address info of the connecting peer | ||
140 | * to the remote info hash table | ||
141 | * @reminfo: The remote info to be added | ||
142 | */ | ||
143 | void iwpm_add_remote_info(struct iwpm_remote_info *reminfo); | ||
144 | |||
145 | /** | ||
131 | * iwpm_valid_client - Check if the port mapper client is valid | 146 | * iwpm_valid_client - Check if the port mapper client is valid |
132 | * @nl_client: The index of the netlink client | 147 | * @nl_client: The index of the netlink client |
133 | * | 148 | * |
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c index 8b8cc6fa0ab0..40becdb3196e 100644 --- a/drivers/infiniband/core/umem_odp.c +++ b/drivers/infiniband/core/umem_odp.c | |||
@@ -446,7 +446,6 @@ static int ib_umem_odp_map_dma_single_page( | |||
446 | int remove_existing_mapping = 0; | 446 | int remove_existing_mapping = 0; |
447 | int ret = 0; | 447 | int ret = 0; |
448 | 448 | ||
449 | mutex_lock(&umem->odp_data->umem_mutex); | ||
450 | /* | 449 | /* |
451 | * Note: we avoid writing if seq is different from the initial seq, to | 450 | * Note: we avoid writing if seq is different from the initial seq, to |
452 | * handle case of a racing notifier. This check also allows us to bail | 451 | * handle case of a racing notifier. This check also allows us to bail |
@@ -479,8 +478,6 @@ static int ib_umem_odp_map_dma_single_page( | |||
479 | } | 478 | } |
480 | 479 | ||
481 | out: | 480 | out: |
482 | mutex_unlock(&umem->odp_data->umem_mutex); | ||
483 | |||
484 | /* On Demand Paging - avoid pinning the page */ | 481 | /* On Demand Paging - avoid pinning the page */ |
485 | if (umem->context->invalidate_range || !stored_page) | 482 | if (umem->context->invalidate_range || !stored_page) |
486 | put_page(page); | 483 | put_page(page); |
@@ -586,6 +583,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt, | |||
586 | 583 | ||
587 | bcnt -= min_t(size_t, npages << PAGE_SHIFT, bcnt); | 584 | bcnt -= min_t(size_t, npages << PAGE_SHIFT, bcnt); |
588 | user_virt += npages << PAGE_SHIFT; | 585 | user_virt += npages << PAGE_SHIFT; |
586 | mutex_lock(&umem->odp_data->umem_mutex); | ||
589 | for (j = 0; j < npages; ++j) { | 587 | for (j = 0; j < npages; ++j) { |
590 | ret = ib_umem_odp_map_dma_single_page( | 588 | ret = ib_umem_odp_map_dma_single_page( |
591 | umem, k, base_virt_addr, local_page_list[j], | 589 | umem, k, base_virt_addr, local_page_list[j], |
@@ -594,6 +592,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt, | |||
594 | break; | 592 | break; |
595 | k++; | 593 | k++; |
596 | } | 594 | } |
595 | mutex_unlock(&umem->odp_data->umem_mutex); | ||
597 | 596 | ||
598 | if (ret < 0) { | 597 | if (ret < 0) { |
599 | /* Release left over pages when handling errors. */ | 598 | /* Release left over pages when handling errors. */ |
@@ -633,12 +632,11 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt, | |||
633 | * faults from completion. We might be racing with other | 632 | * faults from completion. We might be racing with other |
634 | * invalidations, so we must make sure we free each page only | 633 | * invalidations, so we must make sure we free each page only |
635 | * once. */ | 634 | * once. */ |
635 | mutex_lock(&umem->odp_data->umem_mutex); | ||
636 | for (addr = virt; addr < bound; addr += (u64)umem->page_size) { | 636 | for (addr = virt; addr < bound; addr += (u64)umem->page_size) { |
637 | idx = (addr - ib_umem_start(umem)) / PAGE_SIZE; | 637 | idx = (addr - ib_umem_start(umem)) / PAGE_SIZE; |
638 | mutex_lock(&umem->odp_data->umem_mutex); | ||
639 | if (umem->odp_data->page_list[idx]) { | 638 | if (umem->odp_data->page_list[idx]) { |
640 | struct page *page = umem->odp_data->page_list[idx]; | 639 | struct page *page = umem->odp_data->page_list[idx]; |
641 | struct page *head_page = compound_head(page); | ||
642 | dma_addr_t dma = umem->odp_data->dma_list[idx]; | 640 | dma_addr_t dma = umem->odp_data->dma_list[idx]; |
643 | dma_addr_t dma_addr = dma & ODP_DMA_ADDR_MASK; | 641 | dma_addr_t dma_addr = dma & ODP_DMA_ADDR_MASK; |
644 | 642 | ||
@@ -646,7 +644,8 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt, | |||
646 | 644 | ||
647 | ib_dma_unmap_page(dev, dma_addr, PAGE_SIZE, | 645 | ib_dma_unmap_page(dev, dma_addr, PAGE_SIZE, |
648 | DMA_BIDIRECTIONAL); | 646 | DMA_BIDIRECTIONAL); |
649 | if (dma & ODP_WRITE_ALLOWED_BIT) | 647 | if (dma & ODP_WRITE_ALLOWED_BIT) { |
648 | struct page *head_page = compound_head(page); | ||
650 | /* | 649 | /* |
651 | * set_page_dirty prefers being called with | 650 | * set_page_dirty prefers being called with |
652 | * the page lock. However, MMU notifiers are | 651 | * the page lock. However, MMU notifiers are |
@@ -657,13 +656,14 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt, | |||
657 | * be removed. | 656 | * be removed. |
658 | */ | 657 | */ |
659 | set_page_dirty(head_page); | 658 | set_page_dirty(head_page); |
659 | } | ||
660 | /* on demand pinning support */ | 660 | /* on demand pinning support */ |
661 | if (!umem->context->invalidate_range) | 661 | if (!umem->context->invalidate_range) |
662 | put_page(page); | 662 | put_page(page); |
663 | umem->odp_data->page_list[idx] = NULL; | 663 | umem->odp_data->page_list[idx] = NULL; |
664 | umem->odp_data->dma_list[idx] = 0; | 664 | umem->odp_data->dma_list[idx] = 0; |
665 | } | 665 | } |
666 | mutex_unlock(&umem->odp_data->umem_mutex); | ||
667 | } | 666 | } |
667 | mutex_unlock(&umem->odp_data->umem_mutex); | ||
668 | } | 668 | } |
669 | EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages); | 669 | EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages); |
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 57176ddd4c50..bb95a6c0477b 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c | |||
@@ -583,6 +583,22 @@ static void c4iw_record_pm_msg(struct c4iw_ep *ep, | |||
583 | sizeof(ep->com.mapped_remote_addr)); | 583 | sizeof(ep->com.mapped_remote_addr)); |
584 | } | 584 | } |
585 | 585 | ||
586 | static int get_remote_addr(struct c4iw_ep *ep) | ||
587 | { | ||
588 | int ret; | ||
589 | |||
590 | print_addr(&ep->com, __func__, "get_remote_addr"); | ||
591 | |||
592 | ret = iwpm_get_remote_info(&ep->com.mapped_local_addr, | ||
593 | &ep->com.mapped_remote_addr, | ||
594 | &ep->com.remote_addr, RDMA_NL_C4IW); | ||
595 | if (ret) | ||
596 | pr_info(MOD "Unable to find remote peer addr info - err %d\n", | ||
597 | ret); | ||
598 | |||
599 | return ret; | ||
600 | } | ||
601 | |||
586 | static void best_mtu(const unsigned short *mtus, unsigned short mtu, | 602 | static void best_mtu(const unsigned short *mtus, unsigned short mtu, |
587 | unsigned int *idx, int use_ts, int ipv6) | 603 | unsigned int *idx, int use_ts, int ipv6) |
588 | { | 604 | { |
@@ -675,7 +691,7 @@ static int send_connect(struct c4iw_ep *ep) | |||
675 | if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) { | 691 | if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) { |
676 | opt2 |= T5_OPT_2_VALID_F; | 692 | opt2 |= T5_OPT_2_VALID_F; |
677 | opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE); | 693 | opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE); |
678 | opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */ | 694 | opt2 |= T5_ISS_F; |
679 | } | 695 | } |
680 | t4_set_arp_err_handler(skb, ep, act_open_req_arp_failure); | 696 | t4_set_arp_err_handler(skb, ep, act_open_req_arp_failure); |
681 | 697 | ||
@@ -2042,9 +2058,12 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) | |||
2042 | status, status2errno(status)); | 2058 | status, status2errno(status)); |
2043 | 2059 | ||
2044 | if (is_neg_adv(status)) { | 2060 | if (is_neg_adv(status)) { |
2045 | dev_warn(&dev->rdev.lldi.pdev->dev, | 2061 | PDBG("%s Connection problems for atid %u status %u (%s)\n", |
2046 | "Connection problems for atid %u status %u (%s)\n", | 2062 | __func__, atid, status, neg_adv_str(status)); |
2047 | atid, status, neg_adv_str(status)); | 2063 | ep->stats.connect_neg_adv++; |
2064 | mutex_lock(&dev->rdev.stats.lock); | ||
2065 | dev->rdev.stats.neg_adv++; | ||
2066 | mutex_unlock(&dev->rdev.stats.lock); | ||
2048 | return 0; | 2067 | return 0; |
2049 | } | 2068 | } |
2050 | 2069 | ||
@@ -2214,7 +2233,7 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb, | |||
2214 | u32 isn = (prandom_u32() & ~7UL) - 1; | 2233 | u32 isn = (prandom_u32() & ~7UL) - 1; |
2215 | opt2 |= T5_OPT_2_VALID_F; | 2234 | opt2 |= T5_OPT_2_VALID_F; |
2216 | opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE); | 2235 | opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE); |
2217 | opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */ | 2236 | opt2 |= T5_ISS_F; |
2218 | rpl5 = (void *)rpl; | 2237 | rpl5 = (void *)rpl; |
2219 | memset(&rpl5->iss, 0, roundup(sizeof(*rpl5)-sizeof(*rpl), 16)); | 2238 | memset(&rpl5->iss, 0, roundup(sizeof(*rpl5)-sizeof(*rpl), 16)); |
2220 | if (peer2peer) | 2239 | if (peer2peer) |
@@ -2352,27 +2371,57 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) | |||
2352 | state_set(&child_ep->com, CONNECTING); | 2371 | state_set(&child_ep->com, CONNECTING); |
2353 | child_ep->com.dev = dev; | 2372 | child_ep->com.dev = dev; |
2354 | child_ep->com.cm_id = NULL; | 2373 | child_ep->com.cm_id = NULL; |
2374 | |||
2375 | /* | ||
2376 | * The mapped_local and mapped_remote addresses get setup with | ||
2377 | * the actual 4-tuple. The local address will be based on the | ||
2378 | * actual local address of the connection, but on the port number | ||
2379 | * of the parent listening endpoint. The remote address is | ||
2380 | * setup based on a query to the IWPM since we don't know what it | ||
2381 | * originally was before mapping. If no mapping was done, then | ||
2382 | * mapped_remote == remote, and mapped_local == local. | ||
2383 | */ | ||
2355 | if (iptype == 4) { | 2384 | if (iptype == 4) { |
2356 | struct sockaddr_in *sin = (struct sockaddr_in *) | 2385 | struct sockaddr_in *sin = (struct sockaddr_in *) |
2357 | &child_ep->com.local_addr; | 2386 | &child_ep->com.mapped_local_addr; |
2387 | |||
2358 | sin->sin_family = PF_INET; | 2388 | sin->sin_family = PF_INET; |
2359 | sin->sin_port = local_port; | 2389 | sin->sin_port = local_port; |
2360 | sin->sin_addr.s_addr = *(__be32 *)local_ip; | 2390 | sin->sin_addr.s_addr = *(__be32 *)local_ip; |
2361 | sin = (struct sockaddr_in *)&child_ep->com.remote_addr; | 2391 | |
2392 | sin = (struct sockaddr_in *)&child_ep->com.local_addr; | ||
2393 | sin->sin_family = PF_INET; | ||
2394 | sin->sin_port = ((struct sockaddr_in *) | ||
2395 | &parent_ep->com.local_addr)->sin_port; | ||
2396 | sin->sin_addr.s_addr = *(__be32 *)local_ip; | ||
2397 | |||
2398 | sin = (struct sockaddr_in *)&child_ep->com.mapped_remote_addr; | ||
2362 | sin->sin_family = PF_INET; | 2399 | sin->sin_family = PF_INET; |
2363 | sin->sin_port = peer_port; | 2400 | sin->sin_port = peer_port; |
2364 | sin->sin_addr.s_addr = *(__be32 *)peer_ip; | 2401 | sin->sin_addr.s_addr = *(__be32 *)peer_ip; |
2365 | } else { | 2402 | } else { |
2366 | struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) | 2403 | struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) |
2367 | &child_ep->com.local_addr; | 2404 | &child_ep->com.mapped_local_addr; |
2405 | |||
2368 | sin6->sin6_family = PF_INET6; | 2406 | sin6->sin6_family = PF_INET6; |
2369 | sin6->sin6_port = local_port; | 2407 | sin6->sin6_port = local_port; |
2370 | memcpy(sin6->sin6_addr.s6_addr, local_ip, 16); | 2408 | memcpy(sin6->sin6_addr.s6_addr, local_ip, 16); |
2371 | sin6 = (struct sockaddr_in6 *)&child_ep->com.remote_addr; | 2409 | |
2410 | sin6 = (struct sockaddr_in6 *)&child_ep->com.local_addr; | ||
2411 | sin6->sin6_family = PF_INET6; | ||
2412 | sin6->sin6_port = ((struct sockaddr_in6 *) | ||
2413 | &parent_ep->com.local_addr)->sin6_port; | ||
2414 | memcpy(sin6->sin6_addr.s6_addr, local_ip, 16); | ||
2415 | |||
2416 | sin6 = (struct sockaddr_in6 *)&child_ep->com.mapped_remote_addr; | ||
2372 | sin6->sin6_family = PF_INET6; | 2417 | sin6->sin6_family = PF_INET6; |
2373 | sin6->sin6_port = peer_port; | 2418 | sin6->sin6_port = peer_port; |
2374 | memcpy(sin6->sin6_addr.s6_addr, peer_ip, 16); | 2419 | memcpy(sin6->sin6_addr.s6_addr, peer_ip, 16); |
2375 | } | 2420 | } |
2421 | memcpy(&child_ep->com.remote_addr, &child_ep->com.mapped_remote_addr, | ||
2422 | sizeof(child_ep->com.remote_addr)); | ||
2423 | get_remote_addr(child_ep); | ||
2424 | |||
2376 | c4iw_get_ep(&parent_ep->com); | 2425 | c4iw_get_ep(&parent_ep->com); |
2377 | child_ep->parent_ep = parent_ep; | 2426 | child_ep->parent_ep = parent_ep; |
2378 | child_ep->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid)); | 2427 | child_ep->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid)); |
@@ -2520,9 +2569,13 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) | |||
2520 | 2569 | ||
2521 | ep = lookup_tid(t, tid); | 2570 | ep = lookup_tid(t, tid); |
2522 | if (is_neg_adv(req->status)) { | 2571 | if (is_neg_adv(req->status)) { |
2523 | dev_warn(&dev->rdev.lldi.pdev->dev, | 2572 | PDBG("%s Negative advice on abort- tid %u status %d (%s)\n", |
2524 | "Negative advice on abort - tid %u status %d (%s)\n", | 2573 | __func__, ep->hwtid, req->status, |
2525 | ep->hwtid, req->status, neg_adv_str(req->status)); | 2574 | neg_adv_str(req->status)); |
2575 | ep->stats.abort_neg_adv++; | ||
2576 | mutex_lock(&dev->rdev.stats.lock); | ||
2577 | dev->rdev.stats.neg_adv++; | ||
2578 | mutex_unlock(&dev->rdev.stats.lock); | ||
2526 | return 0; | 2579 | return 0; |
2527 | } | 2580 | } |
2528 | PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid, | 2581 | PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid, |
@@ -3571,7 +3624,7 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb, | |||
3571 | * TP will ignore any value > 0 for MSS index. | 3624 | * TP will ignore any value > 0 for MSS index. |
3572 | */ | 3625 | */ |
3573 | req->tcb.opt0 = cpu_to_be64(MSS_IDX_V(0xF)); | 3626 | req->tcb.opt0 = cpu_to_be64(MSS_IDX_V(0xF)); |
3574 | req->cookie = (unsigned long)skb; | 3627 | req->cookie = (uintptr_t)skb; |
3575 | 3628 | ||
3576 | set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id); | 3629 | set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id); |
3577 | ret = cxgb4_ofld_send(dev->rdev.lldi.ports[0], req_skb); | 3630 | ret = cxgb4_ofld_send(dev->rdev.lldi.ports[0], req_skb); |
@@ -3931,9 +3984,11 @@ static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb) | |||
3931 | return 0; | 3984 | return 0; |
3932 | } | 3985 | } |
3933 | if (is_neg_adv(req->status)) { | 3986 | if (is_neg_adv(req->status)) { |
3934 | dev_warn(&dev->rdev.lldi.pdev->dev, | 3987 | PDBG("%s Negative advice on abort- tid %u status %d (%s)\n", |
3935 | "Negative advice on abort - tid %u status %d (%s)\n", | 3988 | __func__, ep->hwtid, req->status, |
3936 | ep->hwtid, req->status, neg_adv_str(req->status)); | 3989 | neg_adv_str(req->status)); |
3990 | ep->stats.abort_neg_adv++; | ||
3991 | dev->rdev.stats.neg_adv++; | ||
3937 | kfree_skb(skb); | 3992 | kfree_skb(skb); |
3938 | return 0; | 3993 | return 0; |
3939 | } | 3994 | } |
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c index ab7692ac2044..68ddb3710215 100644 --- a/drivers/infiniband/hw/cxgb4/cq.c +++ b/drivers/infiniband/hw/cxgb4/cq.c | |||
@@ -55,7 +55,7 @@ static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, | |||
55 | FW_RI_RES_WR_NRES_V(1) | | 55 | FW_RI_RES_WR_NRES_V(1) | |
56 | FW_WR_COMPL_F); | 56 | FW_WR_COMPL_F); |
57 | res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); | 57 | res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); |
58 | res_wr->cookie = (unsigned long) &wr_wait; | 58 | res_wr->cookie = (uintptr_t)&wr_wait; |
59 | res = res_wr->res; | 59 | res = res_wr->res; |
60 | res->u.cq.restype = FW_RI_RES_TYPE_CQ; | 60 | res->u.cq.restype = FW_RI_RES_TYPE_CQ; |
61 | res->u.cq.op = FW_RI_RES_OP_RESET; | 61 | res->u.cq.op = FW_RI_RES_OP_RESET; |
@@ -125,7 +125,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, | |||
125 | FW_RI_RES_WR_NRES_V(1) | | 125 | FW_RI_RES_WR_NRES_V(1) | |
126 | FW_WR_COMPL_F); | 126 | FW_WR_COMPL_F); |
127 | res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); | 127 | res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); |
128 | res_wr->cookie = (unsigned long) &wr_wait; | 128 | res_wr->cookie = (uintptr_t)&wr_wait; |
129 | res = res_wr->res; | 129 | res = res_wr->res; |
130 | res->u.cq.restype = FW_RI_RES_TYPE_CQ; | 130 | res->u.cq.restype = FW_RI_RES_TYPE_CQ; |
131 | res->u.cq.op = FW_RI_RES_OP_WRITE; | 131 | res->u.cq.op = FW_RI_RES_OP_WRITE; |
@@ -156,12 +156,19 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, | |||
156 | goto err4; | 156 | goto err4; |
157 | 157 | ||
158 | cq->gen = 1; | 158 | cq->gen = 1; |
159 | cq->gts = rdev->lldi.gts_reg; | ||
160 | cq->rdev = rdev; | 159 | cq->rdev = rdev; |
161 | if (user) { | 160 | if (user) { |
162 | cq->ugts = (u64)pci_resource_start(rdev->lldi.pdev, 2) + | 161 | u32 off = (cq->cqid << rdev->cqshift) & PAGE_MASK; |
163 | (cq->cqid << rdev->cqshift); | 162 | |
164 | cq->ugts &= PAGE_MASK; | 163 | cq->ugts = (u64)rdev->bar2_pa + off; |
164 | } else if (is_t4(rdev->lldi.adapter_type)) { | ||
165 | cq->gts = rdev->lldi.gts_reg; | ||
166 | cq->qid_mask = -1U; | ||
167 | } else { | ||
168 | u32 off = ((cq->cqid << rdev->cqshift) & PAGE_MASK) + 12; | ||
169 | |||
170 | cq->gts = rdev->bar2_kva + off; | ||
171 | cq->qid_mask = rdev->qpmask; | ||
165 | } | 172 | } |
166 | return 0; | 173 | return 0; |
167 | err4: | 174 | err4: |
@@ -970,8 +977,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries, | |||
970 | } | 977 | } |
971 | PDBG("%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n", | 978 | PDBG("%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n", |
972 | __func__, chp->cq.cqid, chp, chp->cq.size, | 979 | __func__, chp->cq.cqid, chp, chp->cq.size, |
973 | chp->cq.memsize, | 980 | chp->cq.memsize, (unsigned long long) chp->cq.dma_addr); |
974 | (unsigned long long) chp->cq.dma_addr); | ||
975 | return &chp->ibcq; | 981 | return &chp->ibcq; |
976 | err5: | 982 | err5: |
977 | kfree(mm2); | 983 | kfree(mm2); |
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c index 8fb295e4a9ab..cf54d6922dc4 100644 --- a/drivers/infiniband/hw/cxgb4/device.c +++ b/drivers/infiniband/hw/cxgb4/device.c | |||
@@ -93,6 +93,7 @@ static struct ibnl_client_cbs c4iw_nl_cb_table[] = { | |||
93 | [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb}, | 93 | [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb}, |
94 | [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb}, | 94 | [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb}, |
95 | [RDMA_NL_IWPM_HANDLE_ERR] = {.dump = iwpm_mapping_error_cb}, | 95 | [RDMA_NL_IWPM_HANDLE_ERR] = {.dump = iwpm_mapping_error_cb}, |
96 | [RDMA_NL_IWPM_REMOTE_INFO] = {.dump = iwpm_remote_info_cb}, | ||
96 | [RDMA_NL_IWPM_MAPINFO] = {.dump = iwpm_mapping_info_cb}, | 97 | [RDMA_NL_IWPM_MAPINFO] = {.dump = iwpm_mapping_info_cb}, |
97 | [RDMA_NL_IWPM_MAPINFO_NUM] = {.dump = iwpm_ack_mapping_info_cb} | 98 | [RDMA_NL_IWPM_MAPINFO_NUM] = {.dump = iwpm_ack_mapping_info_cb} |
98 | }; | 99 | }; |
@@ -151,7 +152,7 @@ static int wr_log_show(struct seq_file *seq, void *v) | |||
151 | int prev_ts_set = 0; | 152 | int prev_ts_set = 0; |
152 | int idx, end; | 153 | int idx, end; |
153 | 154 | ||
154 | #define ts2ns(ts) div64_ul((ts) * dev->rdev.lldi.cclk_ps, 1000) | 155 | #define ts2ns(ts) div64_u64((ts) * dev->rdev.lldi.cclk_ps, 1000) |
155 | 156 | ||
156 | idx = atomic_read(&dev->rdev.wr_log_idx) & | 157 | idx = atomic_read(&dev->rdev.wr_log_idx) & |
157 | (dev->rdev.wr_log_size - 1); | 158 | (dev->rdev.wr_log_size - 1); |
@@ -489,6 +490,7 @@ static int stats_show(struct seq_file *seq, void *v) | |||
489 | dev->rdev.stats.act_ofld_conn_fails); | 490 | dev->rdev.stats.act_ofld_conn_fails); |
490 | seq_printf(seq, "PAS_OFLD_CONN_FAILS: %10llu\n", | 491 | seq_printf(seq, "PAS_OFLD_CONN_FAILS: %10llu\n", |
491 | dev->rdev.stats.pas_ofld_conn_fails); | 492 | dev->rdev.stats.pas_ofld_conn_fails); |
493 | seq_printf(seq, "NEG_ADV_RCVD: %10llu\n", dev->rdev.stats.neg_adv); | ||
492 | seq_printf(seq, "AVAILABLE IRD: %10u\n", dev->avail_ird); | 494 | seq_printf(seq, "AVAILABLE IRD: %10u\n", dev->avail_ird); |
493 | return 0; | 495 | return 0; |
494 | } | 496 | } |
@@ -560,10 +562,13 @@ static int dump_ep(int id, void *p, void *data) | |||
560 | cc = snprintf(epd->buf + epd->pos, space, | 562 | cc = snprintf(epd->buf + epd->pos, space, |
561 | "ep %p cm_id %p qp %p state %d flags 0x%lx " | 563 | "ep %p cm_id %p qp %p state %d flags 0x%lx " |
562 | "history 0x%lx hwtid %d atid %d " | 564 | "history 0x%lx hwtid %d atid %d " |
565 | "conn_na %u abort_na %u " | ||
563 | "%pI4:%d/%d <-> %pI4:%d/%d\n", | 566 | "%pI4:%d/%d <-> %pI4:%d/%d\n", |
564 | ep, ep->com.cm_id, ep->com.qp, | 567 | ep, ep->com.cm_id, ep->com.qp, |
565 | (int)ep->com.state, ep->com.flags, | 568 | (int)ep->com.state, ep->com.flags, |
566 | ep->com.history, ep->hwtid, ep->atid, | 569 | ep->com.history, ep->hwtid, ep->atid, |
570 | ep->stats.connect_neg_adv, | ||
571 | ep->stats.abort_neg_adv, | ||
567 | &lsin->sin_addr, ntohs(lsin->sin_port), | 572 | &lsin->sin_addr, ntohs(lsin->sin_port), |
568 | ntohs(mapped_lsin->sin_port), | 573 | ntohs(mapped_lsin->sin_port), |
569 | &rsin->sin_addr, ntohs(rsin->sin_port), | 574 | &rsin->sin_addr, ntohs(rsin->sin_port), |
@@ -581,10 +586,13 @@ static int dump_ep(int id, void *p, void *data) | |||
581 | cc = snprintf(epd->buf + epd->pos, space, | 586 | cc = snprintf(epd->buf + epd->pos, space, |
582 | "ep %p cm_id %p qp %p state %d flags 0x%lx " | 587 | "ep %p cm_id %p qp %p state %d flags 0x%lx " |
583 | "history 0x%lx hwtid %d atid %d " | 588 | "history 0x%lx hwtid %d atid %d " |
589 | "conn_na %u abort_na %u " | ||
584 | "%pI6:%d/%d <-> %pI6:%d/%d\n", | 590 | "%pI6:%d/%d <-> %pI6:%d/%d\n", |
585 | ep, ep->com.cm_id, ep->com.qp, | 591 | ep, ep->com.cm_id, ep->com.qp, |
586 | (int)ep->com.state, ep->com.flags, | 592 | (int)ep->com.state, ep->com.flags, |
587 | ep->com.history, ep->hwtid, ep->atid, | 593 | ep->com.history, ep->hwtid, ep->atid, |
594 | ep->stats.connect_neg_adv, | ||
595 | ep->stats.abort_neg_adv, | ||
588 | &lsin6->sin6_addr, ntohs(lsin6->sin6_port), | 596 | &lsin6->sin6_addr, ntohs(lsin6->sin6_port), |
589 | ntohs(mapped_lsin6->sin6_port), | 597 | ntohs(mapped_lsin6->sin6_port), |
590 | &rsin6->sin6_addr, ntohs(rsin6->sin6_port), | 598 | &rsin6->sin6_addr, ntohs(rsin6->sin6_port), |
@@ -765,6 +773,29 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev) | |||
765 | c4iw_init_dev_ucontext(rdev, &rdev->uctx); | 773 | c4iw_init_dev_ucontext(rdev, &rdev->uctx); |
766 | 774 | ||
767 | /* | 775 | /* |
776 | * This implementation assumes udb_density == ucq_density! Eventually | ||
777 | * we might need to support this but for now fail the open. Also the | ||
778 | * cqid and qpid range must match for now. | ||
779 | */ | ||
780 | if (rdev->lldi.udb_density != rdev->lldi.ucq_density) { | ||
781 | pr_err(MOD "%s: unsupported udb/ucq densities %u/%u\n", | ||
782 | pci_name(rdev->lldi.pdev), rdev->lldi.udb_density, | ||
783 | rdev->lldi.ucq_density); | ||
784 | err = -EINVAL; | ||
785 | goto err1; | ||
786 | } | ||
787 | if (rdev->lldi.vr->qp.start != rdev->lldi.vr->cq.start || | ||
788 | rdev->lldi.vr->qp.size != rdev->lldi.vr->cq.size) { | ||
789 | pr_err(MOD "%s: unsupported qp and cq id ranges " | ||
790 | "qp start %u size %u cq start %u size %u\n", | ||
791 | pci_name(rdev->lldi.pdev), rdev->lldi.vr->qp.start, | ||
792 | rdev->lldi.vr->qp.size, rdev->lldi.vr->cq.size, | ||
793 | rdev->lldi.vr->cq.size); | ||
794 | err = -EINVAL; | ||
795 | goto err1; | ||
796 | } | ||
797 | |||
798 | /* | ||
768 | * qpshift is the number of bits to shift the qpid left in order | 799 | * qpshift is the number of bits to shift the qpid left in order |
769 | * to get the correct address of the doorbell for that qp. | 800 | * to get the correct address of the doorbell for that qp. |
770 | */ | 801 | */ |
@@ -784,10 +815,10 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev) | |||
784 | rdev->lldi.vr->qp.size, | 815 | rdev->lldi.vr->qp.size, |
785 | rdev->lldi.vr->cq.start, | 816 | rdev->lldi.vr->cq.start, |
786 | rdev->lldi.vr->cq.size); | 817 | rdev->lldi.vr->cq.size); |
787 | PDBG("udb len 0x%x udb base %llx db_reg %p gts_reg %p qpshift %lu " | 818 | PDBG("udb len 0x%x udb base %p db_reg %p gts_reg %p qpshift %lu " |
788 | "qpmask 0x%x cqshift %lu cqmask 0x%x\n", | 819 | "qpmask 0x%x cqshift %lu cqmask 0x%x\n", |
789 | (unsigned)pci_resource_len(rdev->lldi.pdev, 2), | 820 | (unsigned)pci_resource_len(rdev->lldi.pdev, 2), |
790 | (u64)pci_resource_start(rdev->lldi.pdev, 2), | 821 | (void *)pci_resource_start(rdev->lldi.pdev, 2), |
791 | rdev->lldi.db_reg, | 822 | rdev->lldi.db_reg, |
792 | rdev->lldi.gts_reg, | 823 | rdev->lldi.gts_reg, |
793 | rdev->qpshift, rdev->qpmask, | 824 | rdev->qpshift, rdev->qpmask, |
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index d87e1650f643..97bb5550a6cf 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h | |||
@@ -137,6 +137,7 @@ struct c4iw_stats { | |||
137 | u64 tcam_full; | 137 | u64 tcam_full; |
138 | u64 act_ofld_conn_fails; | 138 | u64 act_ofld_conn_fails; |
139 | u64 pas_ofld_conn_fails; | 139 | u64 pas_ofld_conn_fails; |
140 | u64 neg_adv; | ||
140 | }; | 141 | }; |
141 | 142 | ||
142 | struct c4iw_hw_queue { | 143 | struct c4iw_hw_queue { |
@@ -814,6 +815,11 @@ struct c4iw_listen_ep { | |||
814 | int backlog; | 815 | int backlog; |
815 | }; | 816 | }; |
816 | 817 | ||
818 | struct c4iw_ep_stats { | ||
819 | unsigned connect_neg_adv; | ||
820 | unsigned abort_neg_adv; | ||
821 | }; | ||
822 | |||
817 | struct c4iw_ep { | 823 | struct c4iw_ep { |
818 | struct c4iw_ep_common com; | 824 | struct c4iw_ep_common com; |
819 | struct c4iw_ep *parent_ep; | 825 | struct c4iw_ep *parent_ep; |
@@ -846,6 +852,7 @@ struct c4iw_ep { | |||
846 | unsigned int retry_count; | 852 | unsigned int retry_count; |
847 | int snd_win; | 853 | int snd_win; |
848 | int rcv_win; | 854 | int rcv_win; |
855 | struct c4iw_ep_stats stats; | ||
849 | }; | 856 | }; |
850 | 857 | ||
851 | static inline void print_addr(struct c4iw_ep_common *epc, const char *func, | 858 | static inline void print_addr(struct c4iw_ep_common *epc, const char *func, |
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c index 3ef0cf9f5c44..cff815b91707 100644 --- a/drivers/infiniband/hw/cxgb4/mem.c +++ b/drivers/infiniband/hw/cxgb4/mem.c | |||
@@ -144,7 +144,7 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len, | |||
144 | if (i == (num_wqe-1)) { | 144 | if (i == (num_wqe-1)) { |
145 | req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR) | | 145 | req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR) | |
146 | FW_WR_COMPL_F); | 146 | FW_WR_COMPL_F); |
147 | req->wr.wr_lo = (__force __be64)(unsigned long) &wr_wait; | 147 | req->wr.wr_lo = (__force __be64)&wr_wait; |
148 | } else | 148 | } else |
149 | req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR)); | 149 | req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR)); |
150 | req->wr.wr_mid = cpu_to_be32( | 150 | req->wr.wr_mid = cpu_to_be32( |
@@ -676,12 +676,12 @@ struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc) | |||
676 | mhp->attr.zbva = 0; | 676 | mhp->attr.zbva = 0; |
677 | mhp->attr.va_fbo = 0; | 677 | mhp->attr.va_fbo = 0; |
678 | mhp->attr.page_size = 0; | 678 | mhp->attr.page_size = 0; |
679 | mhp->attr.len = ~0UL; | 679 | mhp->attr.len = ~0ULL; |
680 | mhp->attr.pbl_size = 0; | 680 | mhp->attr.pbl_size = 0; |
681 | 681 | ||
682 | ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, php->pdid, | 682 | ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, php->pdid, |
683 | FW_RI_STAG_NSMR, mhp->attr.perms, | 683 | FW_RI_STAG_NSMR, mhp->attr.perms, |
684 | mhp->attr.mw_bind_enable, 0, 0, ~0UL, 0, 0, 0); | 684 | mhp->attr.mw_bind_enable, 0, 0, ~0ULL, 0, 0, 0); |
685 | if (ret) | 685 | if (ret) |
686 | goto err1; | 686 | goto err1; |
687 | 687 | ||
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index 15cae5a31018..389ced335bc5 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c | |||
@@ -275,7 +275,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, | |||
275 | FW_RI_RES_WR_NRES_V(2) | | 275 | FW_RI_RES_WR_NRES_V(2) | |
276 | FW_WR_COMPL_F); | 276 | FW_WR_COMPL_F); |
277 | res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); | 277 | res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); |
278 | res_wr->cookie = (unsigned long) &wr_wait; | 278 | res_wr->cookie = (uintptr_t)&wr_wait; |
279 | res = res_wr->res; | 279 | res = res_wr->res; |
280 | res->u.sqrq.restype = FW_RI_RES_TYPE_SQ; | 280 | res->u.sqrq.restype = FW_RI_RES_TYPE_SQ; |
281 | res->u.sqrq.op = FW_RI_RES_OP_WRITE; | 281 | res->u.sqrq.op = FW_RI_RES_OP_WRITE; |
@@ -1209,7 +1209,7 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp, | |||
1209 | wqe->flowid_len16 = cpu_to_be32( | 1209 | wqe->flowid_len16 = cpu_to_be32( |
1210 | FW_WR_FLOWID_V(ep->hwtid) | | 1210 | FW_WR_FLOWID_V(ep->hwtid) | |
1211 | FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16))); | 1211 | FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16))); |
1212 | wqe->cookie = (unsigned long) &ep->com.wr_wait; | 1212 | wqe->cookie = (uintptr_t)&ep->com.wr_wait; |
1213 | 1213 | ||
1214 | wqe->u.fini.type = FW_RI_TYPE_FINI; | 1214 | wqe->u.fini.type = FW_RI_TYPE_FINI; |
1215 | ret = c4iw_ofld_send(&rhp->rdev, skb); | 1215 | ret = c4iw_ofld_send(&rhp->rdev, skb); |
@@ -1279,7 +1279,7 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp) | |||
1279 | FW_WR_FLOWID_V(qhp->ep->hwtid) | | 1279 | FW_WR_FLOWID_V(qhp->ep->hwtid) | |
1280 | FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16))); | 1280 | FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16))); |
1281 | 1281 | ||
1282 | wqe->cookie = (unsigned long) &qhp->ep->com.wr_wait; | 1282 | wqe->cookie = (uintptr_t)&qhp->ep->com.wr_wait; |
1283 | 1283 | ||
1284 | wqe->u.init.type = FW_RI_TYPE_INIT; | 1284 | wqe->u.init.type = FW_RI_TYPE_INIT; |
1285 | wqe->u.init.mpareqbit_p2ptype = | 1285 | wqe->u.init.mpareqbit_p2ptype = |
@@ -1766,11 +1766,11 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, | |||
1766 | mm2->len = PAGE_ALIGN(qhp->wq.rq.memsize); | 1766 | mm2->len = PAGE_ALIGN(qhp->wq.rq.memsize); |
1767 | insert_mmap(ucontext, mm2); | 1767 | insert_mmap(ucontext, mm2); |
1768 | mm3->key = uresp.sq_db_gts_key; | 1768 | mm3->key = uresp.sq_db_gts_key; |
1769 | mm3->addr = (__force unsigned long) qhp->wq.sq.udb; | 1769 | mm3->addr = (__force unsigned long)qhp->wq.sq.udb; |
1770 | mm3->len = PAGE_SIZE; | 1770 | mm3->len = PAGE_SIZE; |
1771 | insert_mmap(ucontext, mm3); | 1771 | insert_mmap(ucontext, mm3); |
1772 | mm4->key = uresp.rq_db_gts_key; | 1772 | mm4->key = uresp.rq_db_gts_key; |
1773 | mm4->addr = (__force unsigned long) qhp->wq.rq.udb; | 1773 | mm4->addr = (__force unsigned long)qhp->wq.rq.udb; |
1774 | mm4->len = PAGE_SIZE; | 1774 | mm4->len = PAGE_SIZE; |
1775 | insert_mmap(ucontext, mm4); | 1775 | insert_mmap(ucontext, mm4); |
1776 | if (mm5) { | 1776 | if (mm5) { |
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h index 871cdcac7be2..7f2a6c244d25 100644 --- a/drivers/infiniband/hw/cxgb4/t4.h +++ b/drivers/infiniband/hw/cxgb4/t4.h | |||
@@ -539,6 +539,7 @@ struct t4_cq { | |||
539 | size_t memsize; | 539 | size_t memsize; |
540 | __be64 bits_type_ts; | 540 | __be64 bits_type_ts; |
541 | u32 cqid; | 541 | u32 cqid; |
542 | u32 qid_mask; | ||
542 | int vector; | 543 | int vector; |
543 | u16 size; /* including status page */ | 544 | u16 size; /* including status page */ |
544 | u16 cidx; | 545 | u16 cidx; |
@@ -563,12 +564,12 @@ static inline int t4_arm_cq(struct t4_cq *cq, int se) | |||
563 | set_bit(CQ_ARMED, &cq->flags); | 564 | set_bit(CQ_ARMED, &cq->flags); |
564 | while (cq->cidx_inc > CIDXINC_M) { | 565 | while (cq->cidx_inc > CIDXINC_M) { |
565 | val = SEINTARM_V(0) | CIDXINC_V(CIDXINC_M) | TIMERREG_V(7) | | 566 | val = SEINTARM_V(0) | CIDXINC_V(CIDXINC_M) | TIMERREG_V(7) | |
566 | INGRESSQID_V(cq->cqid); | 567 | INGRESSQID_V(cq->cqid & cq->qid_mask); |
567 | writel(val, cq->gts); | 568 | writel(val, cq->gts); |
568 | cq->cidx_inc -= CIDXINC_M; | 569 | cq->cidx_inc -= CIDXINC_M; |
569 | } | 570 | } |
570 | val = SEINTARM_V(se) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(6) | | 571 | val = SEINTARM_V(se) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(6) | |
571 | INGRESSQID_V(cq->cqid); | 572 | INGRESSQID_V(cq->cqid & cq->qid_mask); |
572 | writel(val, cq->gts); | 573 | writel(val, cq->gts); |
573 | cq->cidx_inc = 0; | 574 | cq->cidx_inc = 0; |
574 | return 0; | 575 | return 0; |
@@ -601,7 +602,7 @@ static inline void t4_hwcq_consume(struct t4_cq *cq) | |||
601 | u32 val; | 602 | u32 val; |
602 | 603 | ||
603 | val = SEINTARM_V(0) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(7) | | 604 | val = SEINTARM_V(0) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(7) | |
604 | INGRESSQID_V(cq->cqid); | 605 | INGRESSQID_V(cq->cqid & cq->qid_mask); |
605 | writel(val, cq->gts); | 606 | writel(val, cq->gts); |
606 | cq->cidx_inc = 0; | 607 | cq->cidx_inc = 0; |
607 | } | 608 | } |
diff --git a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h index 5e53327fc647..343e8daf2270 100644 --- a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h +++ b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h | |||
@@ -848,6 +848,8 @@ enum { /* TCP congestion control algorithms */ | |||
848 | #define CONG_CNTRL_V(x) ((x) << CONG_CNTRL_S) | 848 | #define CONG_CNTRL_V(x) ((x) << CONG_CNTRL_S) |
849 | #define CONG_CNTRL_G(x) (((x) >> CONG_CNTRL_S) & CONG_CNTRL_M) | 849 | #define CONG_CNTRL_G(x) (((x) >> CONG_CNTRL_S) & CONG_CNTRL_M) |
850 | 850 | ||
851 | #define CONG_CNTRL_VALID (1 << 18) | 851 | #define T5_ISS_S 18 |
852 | #define T5_ISS_V(x) ((x) << T5_ISS_S) | ||
853 | #define T5_ISS_F T5_ISS_V(1U) | ||
852 | 854 | ||
853 | #endif /* _T4FW_RI_API_H_ */ | 855 | #endif /* _T4FW_RI_API_H_ */ |
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c index 3b2a6dc8ea99..9f9d5c563a61 100644 --- a/drivers/infiniband/hw/nes/nes.c +++ b/drivers/infiniband/hw/nes/nes.c | |||
@@ -116,6 +116,7 @@ static struct ibnl_client_cbs nes_nl_cb_table[] = { | |||
116 | [RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb}, | 116 | [RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb}, |
117 | [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb}, | 117 | [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb}, |
118 | [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb}, | 118 | [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb}, |
119 | [RDMA_NL_IWPM_REMOTE_INFO] = {.dump = iwpm_remote_info_cb}, | ||
119 | [RDMA_NL_IWPM_HANDLE_ERR] = {.dump = iwpm_mapping_error_cb}, | 120 | [RDMA_NL_IWPM_HANDLE_ERR] = {.dump = iwpm_mapping_error_cb}, |
120 | [RDMA_NL_IWPM_MAPINFO] = {.dump = iwpm_mapping_info_cb}, | 121 | [RDMA_NL_IWPM_MAPINFO] = {.dump = iwpm_mapping_info_cb}, |
121 | [RDMA_NL_IWPM_MAPINFO_NUM] = {.dump = iwpm_ack_mapping_info_cb} | 122 | [RDMA_NL_IWPM_MAPINFO_NUM] = {.dump = iwpm_ack_mapping_info_cb} |
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c index 6f09a72e78d7..72b43417cbe3 100644 --- a/drivers/infiniband/hw/nes/nes_cm.c +++ b/drivers/infiniband/hw/nes/nes_cm.c | |||
@@ -596,27 +596,52 @@ static void nes_form_reg_msg(struct nes_vnic *nesvnic, | |||
596 | memcpy(pm_msg->if_name, nesvnic->netdev->name, IWPM_IFNAME_SIZE); | 596 | memcpy(pm_msg->if_name, nesvnic->netdev->name, IWPM_IFNAME_SIZE); |
597 | } | 597 | } |
598 | 598 | ||
599 | static void record_sockaddr_info(struct sockaddr_storage *addr_info, | ||
600 | nes_addr_t *ip_addr, u16 *port_num) | ||
601 | { | ||
602 | struct sockaddr_in *in_addr = (struct sockaddr_in *)addr_info; | ||
603 | |||
604 | if (in_addr->sin_family == AF_INET) { | ||
605 | *ip_addr = ntohl(in_addr->sin_addr.s_addr); | ||
606 | *port_num = ntohs(in_addr->sin_port); | ||
607 | } | ||
608 | } | ||
609 | |||
599 | /* | 610 | /* |
600 | * nes_record_pm_msg - Save the received mapping info | 611 | * nes_record_pm_msg - Save the received mapping info |
601 | */ | 612 | */ |
602 | static void nes_record_pm_msg(struct nes_cm_info *cm_info, | 613 | static void nes_record_pm_msg(struct nes_cm_info *cm_info, |
603 | struct iwpm_sa_data *pm_msg) | 614 | struct iwpm_sa_data *pm_msg) |
604 | { | 615 | { |
605 | struct sockaddr_in *mapped_loc_addr = | 616 | record_sockaddr_info(&pm_msg->mapped_loc_addr, |
606 | (struct sockaddr_in *)&pm_msg->mapped_loc_addr; | 617 | &cm_info->mapped_loc_addr, &cm_info->mapped_loc_port); |
607 | struct sockaddr_in *mapped_rem_addr = | 618 | |
608 | (struct sockaddr_in *)&pm_msg->mapped_rem_addr; | 619 | record_sockaddr_info(&pm_msg->mapped_rem_addr, |
609 | 620 | &cm_info->mapped_rem_addr, &cm_info->mapped_rem_port); | |
610 | if (mapped_loc_addr->sin_family == AF_INET) { | 621 | } |
611 | cm_info->mapped_loc_addr = | 622 | |
612 | ntohl(mapped_loc_addr->sin_addr.s_addr); | 623 | /* |
613 | cm_info->mapped_loc_port = ntohs(mapped_loc_addr->sin_port); | 624 | * nes_get_reminfo - Get the address info of the remote connecting peer |
614 | } | 625 | */ |
615 | if (mapped_rem_addr->sin_family == AF_INET) { | 626 | static int nes_get_remote_addr(struct nes_cm_node *cm_node) |
616 | cm_info->mapped_rem_addr = | 627 | { |
617 | ntohl(mapped_rem_addr->sin_addr.s_addr); | 628 | struct sockaddr_storage mapped_loc_addr, mapped_rem_addr; |
618 | cm_info->mapped_rem_port = ntohs(mapped_rem_addr->sin_port); | 629 | struct sockaddr_storage remote_addr; |
619 | } | 630 | int ret; |
631 | |||
632 | nes_create_sockaddr(htonl(cm_node->mapped_loc_addr), | ||
633 | htons(cm_node->mapped_loc_port), &mapped_loc_addr); | ||
634 | nes_create_sockaddr(htonl(cm_node->mapped_rem_addr), | ||
635 | htons(cm_node->mapped_rem_port), &mapped_rem_addr); | ||
636 | |||
637 | ret = iwpm_get_remote_info(&mapped_loc_addr, &mapped_rem_addr, | ||
638 | &remote_addr, RDMA_NL_NES); | ||
639 | if (ret) | ||
640 | nes_debug(NES_DBG_CM, "Unable to find remote peer address info\n"); | ||
641 | else | ||
642 | record_sockaddr_info(&remote_addr, &cm_node->rem_addr, | ||
643 | &cm_node->rem_port); | ||
644 | return ret; | ||
620 | } | 645 | } |
621 | 646 | ||
622 | /** | 647 | /** |
@@ -1566,9 +1591,14 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core, | |||
1566 | return NULL; | 1591 | return NULL; |
1567 | 1592 | ||
1568 | /* set our node specific transport info */ | 1593 | /* set our node specific transport info */ |
1569 | cm_node->loc_addr = cm_info->loc_addr; | 1594 | if (listener) { |
1595 | cm_node->loc_addr = listener->loc_addr; | ||
1596 | cm_node->loc_port = listener->loc_port; | ||
1597 | } else { | ||
1598 | cm_node->loc_addr = cm_info->loc_addr; | ||
1599 | cm_node->loc_port = cm_info->loc_port; | ||
1600 | } | ||
1570 | cm_node->rem_addr = cm_info->rem_addr; | 1601 | cm_node->rem_addr = cm_info->rem_addr; |
1571 | cm_node->loc_port = cm_info->loc_port; | ||
1572 | cm_node->rem_port = cm_info->rem_port; | 1602 | cm_node->rem_port = cm_info->rem_port; |
1573 | 1603 | ||
1574 | cm_node->mapped_loc_addr = cm_info->mapped_loc_addr; | 1604 | cm_node->mapped_loc_addr = cm_info->mapped_loc_addr; |
@@ -2151,6 +2181,7 @@ static int handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, | |||
2151 | cm_node->state = NES_CM_STATE_ESTABLISHED; | 2181 | cm_node->state = NES_CM_STATE_ESTABLISHED; |
2152 | if (datasize) { | 2182 | if (datasize) { |
2153 | cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize; | 2183 | cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize; |
2184 | nes_get_remote_addr(cm_node); | ||
2154 | handle_rcv_mpa(cm_node, skb); | 2185 | handle_rcv_mpa(cm_node, skb); |
2155 | } else { /* rcvd ACK only */ | 2186 | } else { /* rcvd ACK only */ |
2156 | dev_kfree_skb_any(skb); | 2187 | dev_kfree_skb_any(skb); |
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h index ffd48bfc4923..ba5173e24973 100644 --- a/drivers/infiniband/hw/qib/qib.h +++ b/drivers/infiniband/hw/qib/qib.h | |||
@@ -1136,7 +1136,6 @@ extern struct qib_devdata *qib_lookup(int unit); | |||
1136 | extern u32 qib_cpulist_count; | 1136 | extern u32 qib_cpulist_count; |
1137 | extern unsigned long *qib_cpulist; | 1137 | extern unsigned long *qib_cpulist; |
1138 | 1138 | ||
1139 | extern unsigned qib_wc_pat; | ||
1140 | extern unsigned qib_cc_table_size; | 1139 | extern unsigned qib_cc_table_size; |
1141 | int qib_init(struct qib_devdata *, int); | 1140 | int qib_init(struct qib_devdata *, int); |
1142 | int init_chip_wc_pat(struct qib_devdata *dd, u32); | 1141 | int init_chip_wc_pat(struct qib_devdata *dd, u32); |
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c index 9ea6c440a00c..725881890c4a 100644 --- a/drivers/infiniband/hw/qib/qib_file_ops.c +++ b/drivers/infiniband/hw/qib/qib_file_ops.c | |||
@@ -835,7 +835,8 @@ static int mmap_piobufs(struct vm_area_struct *vma, | |||
835 | vma->vm_flags &= ~VM_MAYREAD; | 835 | vma->vm_flags &= ~VM_MAYREAD; |
836 | vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND; | 836 | vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND; |
837 | 837 | ||
838 | if (qib_wc_pat) | 838 | /* We used PAT if wc_cookie == 0 */ |
839 | if (!dd->wc_cookie) | ||
839 | vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); | 840 | vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); |
840 | 841 | ||
841 | ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT, | 842 | ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT, |
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c index 0d2ba59af30a..4b927809d1a1 100644 --- a/drivers/infiniband/hw/qib/qib_iba6120.c +++ b/drivers/infiniband/hw/qib/qib_iba6120.c | |||
@@ -3315,11 +3315,9 @@ static int init_6120_variables(struct qib_devdata *dd) | |||
3315 | qib_6120_config_ctxts(dd); | 3315 | qib_6120_config_ctxts(dd); |
3316 | qib_set_ctxtcnt(dd); | 3316 | qib_set_ctxtcnt(dd); |
3317 | 3317 | ||
3318 | if (qib_wc_pat) { | 3318 | ret = init_chip_wc_pat(dd, 0); |
3319 | ret = init_chip_wc_pat(dd, 0); | 3319 | if (ret) |
3320 | if (ret) | 3320 | goto bail; |
3321 | goto bail; | ||
3322 | } | ||
3323 | set_6120_baseaddrs(dd); /* set chip access pointers now */ | 3321 | set_6120_baseaddrs(dd); /* set chip access pointers now */ |
3324 | 3322 | ||
3325 | ret = 0; | 3323 | ret = 0; |
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c index 22affda8af88..00b2af211157 100644 --- a/drivers/infiniband/hw/qib/qib_iba7220.c +++ b/drivers/infiniband/hw/qib/qib_iba7220.c | |||
@@ -4126,11 +4126,9 @@ static int qib_init_7220_variables(struct qib_devdata *dd) | |||
4126 | qib_7220_config_ctxts(dd); | 4126 | qib_7220_config_ctxts(dd); |
4127 | qib_set_ctxtcnt(dd); /* needed for PAT setup */ | 4127 | qib_set_ctxtcnt(dd); /* needed for PAT setup */ |
4128 | 4128 | ||
4129 | if (qib_wc_pat) { | 4129 | ret = init_chip_wc_pat(dd, 0); |
4130 | ret = init_chip_wc_pat(dd, 0); | 4130 | if (ret) |
4131 | if (ret) | 4131 | goto bail; |
4132 | goto bail; | ||
4133 | } | ||
4134 | set_7220_baseaddrs(dd); /* set chip access pointers now */ | 4132 | set_7220_baseaddrs(dd); /* set chip access pointers now */ |
4135 | 4133 | ||
4136 | ret = 0; | 4134 | ret = 0; |
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index ef97b71c8f7d..f32b4628e991 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c | |||
@@ -6429,6 +6429,7 @@ static int qib_init_7322_variables(struct qib_devdata *dd) | |||
6429 | unsigned features, pidx, sbufcnt; | 6429 | unsigned features, pidx, sbufcnt; |
6430 | int ret, mtu; | 6430 | int ret, mtu; |
6431 | u32 sbufs, updthresh; | 6431 | u32 sbufs, updthresh; |
6432 | resource_size_t vl15off; | ||
6432 | 6433 | ||
6433 | /* pport structs are contiguous, allocated after devdata */ | 6434 | /* pport structs are contiguous, allocated after devdata */ |
6434 | ppd = (struct qib_pportdata *)(dd + 1); | 6435 | ppd = (struct qib_pportdata *)(dd + 1); |
@@ -6677,29 +6678,27 @@ static int qib_init_7322_variables(struct qib_devdata *dd) | |||
6677 | qib_7322_config_ctxts(dd); | 6678 | qib_7322_config_ctxts(dd); |
6678 | qib_set_ctxtcnt(dd); | 6679 | qib_set_ctxtcnt(dd); |
6679 | 6680 | ||
6680 | if (qib_wc_pat) { | 6681 | /* |
6681 | resource_size_t vl15off; | 6682 | * We do not set WC on the VL15 buffers to avoid |
6682 | /* | 6683 | * a rare problem with unaligned writes from |
6683 | * We do not set WC on the VL15 buffers to avoid | 6684 | * interrupt-flushed store buffers, so we need |
6684 | * a rare problem with unaligned writes from | 6685 | * to map those separately here. We can't solve |
6685 | * interrupt-flushed store buffers, so we need | 6686 | * this for the rarely used mtrr case. |
6686 | * to map those separately here. We can't solve | 6687 | */ |
6687 | * this for the rarely used mtrr case. | 6688 | ret = init_chip_wc_pat(dd, 0); |
6688 | */ | 6689 | if (ret) |
6689 | ret = init_chip_wc_pat(dd, 0); | 6690 | goto bail; |
6690 | if (ret) | ||
6691 | goto bail; | ||
6692 | 6691 | ||
6693 | /* vl15 buffers start just after the 4k buffers */ | 6692 | /* vl15 buffers start just after the 4k buffers */ |
6694 | vl15off = dd->physaddr + (dd->piobufbase >> 32) + | 6693 | vl15off = dd->physaddr + (dd->piobufbase >> 32) + |
6695 | dd->piobcnt4k * dd->align4k; | 6694 | dd->piobcnt4k * dd->align4k; |
6696 | dd->piovl15base = ioremap_nocache(vl15off, | 6695 | dd->piovl15base = ioremap_nocache(vl15off, |
6697 | NUM_VL15_BUFS * dd->align4k); | 6696 | NUM_VL15_BUFS * dd->align4k); |
6698 | if (!dd->piovl15base) { | 6697 | if (!dd->piovl15base) { |
6699 | ret = -ENOMEM; | 6698 | ret = -ENOMEM; |
6700 | goto bail; | 6699 | goto bail; |
6701 | } | ||
6702 | } | 6700 | } |
6701 | |||
6703 | qib_7322_set_baseaddrs(dd); /* set chip access pointers now */ | 6702 | qib_7322_set_baseaddrs(dd); /* set chip access pointers now */ |
6704 | 6703 | ||
6705 | ret = 0; | 6704 | ret = 0; |
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c index 2ee36953e234..7e00470adc30 100644 --- a/drivers/infiniband/hw/qib/qib_init.c +++ b/drivers/infiniband/hw/qib/qib_init.c | |||
@@ -91,15 +91,6 @@ MODULE_PARM_DESC(krcvqs, "number of kernel receive queues per IB port"); | |||
91 | unsigned qib_cc_table_size; | 91 | unsigned qib_cc_table_size; |
92 | module_param_named(cc_table_size, qib_cc_table_size, uint, S_IRUGO); | 92 | module_param_named(cc_table_size, qib_cc_table_size, uint, S_IRUGO); |
93 | MODULE_PARM_DESC(cc_table_size, "Congestion control table entries 0 (CCA disabled - default), min = 128, max = 1984"); | 93 | MODULE_PARM_DESC(cc_table_size, "Congestion control table entries 0 (CCA disabled - default), min = 128, max = 1984"); |
94 | /* | ||
95 | * qib_wc_pat parameter: | ||
96 | * 0 is WC via MTRR | ||
97 | * 1 is WC via PAT | ||
98 | * If PAT initialization fails, code reverts back to MTRR | ||
99 | */ | ||
100 | unsigned qib_wc_pat = 1; /* default (1) is to use PAT, not MTRR */ | ||
101 | module_param_named(wc_pat, qib_wc_pat, uint, S_IRUGO); | ||
102 | MODULE_PARM_DESC(wc_pat, "enable write-combining via PAT mechanism"); | ||
103 | 94 | ||
104 | static void verify_interrupt(unsigned long); | 95 | static void verify_interrupt(unsigned long); |
105 | 96 | ||
@@ -1377,8 +1368,7 @@ static void cleanup_device_data(struct qib_devdata *dd) | |||
1377 | spin_unlock(&dd->pport[pidx].cc_shadow_lock); | 1368 | spin_unlock(&dd->pport[pidx].cc_shadow_lock); |
1378 | } | 1369 | } |
1379 | 1370 | ||
1380 | if (!qib_wc_pat) | 1371 | qib_disable_wc(dd); |
1381 | qib_disable_wc(dd); | ||
1382 | 1372 | ||
1383 | if (dd->pioavailregs_dma) { | 1373 | if (dd->pioavailregs_dma) { |
1384 | dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE, | 1374 | dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE, |
@@ -1547,14 +1537,12 @@ static int qib_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1547 | goto bail; | 1537 | goto bail; |
1548 | } | 1538 | } |
1549 | 1539 | ||
1550 | if (!qib_wc_pat) { | 1540 | ret = qib_enable_wc(dd); |
1551 | ret = qib_enable_wc(dd); | 1541 | if (ret) { |
1552 | if (ret) { | 1542 | qib_dev_err(dd, |
1553 | qib_dev_err(dd, | 1543 | "Write combining not enabled (err %d): performance may be poor\n", |
1554 | "Write combining not enabled (err %d): performance may be poor\n", | 1544 | -ret); |
1555 | -ret); | 1545 | ret = 0; |
1556 | ret = 0; | ||
1557 | } | ||
1558 | } | 1546 | } |
1559 | 1547 | ||
1560 | qib_verify_pioperf(dd); | 1548 | qib_verify_pioperf(dd); |
diff --git a/drivers/infiniband/hw/qib/qib_wc_x86_64.c b/drivers/infiniband/hw/qib/qib_wc_x86_64.c index 81b225f2300a..6d61ef98721c 100644 --- a/drivers/infiniband/hw/qib/qib_wc_x86_64.c +++ b/drivers/infiniband/hw/qib/qib_wc_x86_64.c | |||
@@ -116,21 +116,9 @@ int qib_enable_wc(struct qib_devdata *dd) | |||
116 | } | 116 | } |
117 | 117 | ||
118 | if (!ret) { | 118 | if (!ret) { |
119 | int cookie; | 119 | dd->wc_cookie = arch_phys_wc_add(pioaddr, piolen); |
120 | 120 | if (dd->wc_cookie < 0) | |
121 | cookie = mtrr_add(pioaddr, piolen, MTRR_TYPE_WRCOMB, 0); | 121 | ret = -EINVAL; |
122 | if (cookie < 0) { | ||
123 | { | ||
124 | qib_devinfo(dd->pcidev, | ||
125 | "mtrr_add() WC for PIO bufs failed (%d)\n", | ||
126 | cookie); | ||
127 | ret = -EINVAL; | ||
128 | } | ||
129 | } else { | ||
130 | dd->wc_cookie = cookie; | ||
131 | dd->wc_base = (unsigned long) pioaddr; | ||
132 | dd->wc_len = (unsigned long) piolen; | ||
133 | } | ||
134 | } | 122 | } |
135 | 123 | ||
136 | return ret; | 124 | return ret; |
@@ -142,18 +130,7 @@ int qib_enable_wc(struct qib_devdata *dd) | |||
142 | */ | 130 | */ |
143 | void qib_disable_wc(struct qib_devdata *dd) | 131 | void qib_disable_wc(struct qib_devdata *dd) |
144 | { | 132 | { |
145 | if (dd->wc_cookie) { | 133 | arch_phys_wc_del(dd->wc_cookie); |
146 | int r; | ||
147 | |||
148 | r = mtrr_del(dd->wc_cookie, dd->wc_base, | ||
149 | dd->wc_len); | ||
150 | if (r < 0) | ||
151 | qib_devinfo(dd->pcidev, | ||
152 | "mtrr_del(%lx, %lx, %lx) failed: %d\n", | ||
153 | dd->wc_cookie, dd->wc_base, | ||
154 | dd->wc_len, r); | ||
155 | dd->wc_cookie = 0; /* even on failure */ | ||
156 | } | ||
157 | } | 134 | } |
158 | 135 | ||
159 | /** | 136 | /** |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 56959adb6c7d..cf32a778e7d0 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c | |||
@@ -386,8 +386,8 @@ static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_i | |||
386 | rx->rx_ring[i].mapping, | 386 | rx->rx_ring[i].mapping, |
387 | GFP_KERNEL)) { | 387 | GFP_KERNEL)) { |
388 | ipoib_warn(priv, "failed to allocate receive buffer %d\n", i); | 388 | ipoib_warn(priv, "failed to allocate receive buffer %d\n", i); |
389 | ret = -ENOMEM; | 389 | ret = -ENOMEM; |
390 | goto err_count; | 390 | goto err_count; |
391 | } | 391 | } |
392 | ret = ipoib_cm_post_receive_nonsrq(dev, rx, &t->wr, t->sge, i); | 392 | ret = ipoib_cm_post_receive_nonsrq(dev, rx, &t->wr, t->sge, i); |
393 | if (ret) { | 393 | if (ret) { |
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c index a1cbba9056fd..3465faf1809e 100644 --- a/drivers/iommu/amd_iommu_v2.c +++ b/drivers/iommu/amd_iommu_v2.c | |||
@@ -266,6 +266,7 @@ static void put_pasid_state(struct pasid_state *pasid_state) | |||
266 | 266 | ||
267 | static void put_pasid_state_wait(struct pasid_state *pasid_state) | 267 | static void put_pasid_state_wait(struct pasid_state *pasid_state) |
268 | { | 268 | { |
269 | atomic_dec(&pasid_state->count); | ||
269 | wait_event(pasid_state->wq, !atomic_read(&pasid_state->count)); | 270 | wait_event(pasid_state->wq, !atomic_read(&pasid_state->count)); |
270 | free_pasid_state(pasid_state); | 271 | free_pasid_state(pasid_state); |
271 | } | 272 | } |
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 9f7e1d34a32b..66a803b9dd3a 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c | |||
@@ -224,14 +224,7 @@ | |||
224 | #define RESUME_TERMINATE (1 << 0) | 224 | #define RESUME_TERMINATE (1 << 0) |
225 | 225 | ||
226 | #define TTBCR2_SEP_SHIFT 15 | 226 | #define TTBCR2_SEP_SHIFT 15 |
227 | #define TTBCR2_SEP_MASK 0x7 | 227 | #define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT) |
228 | |||
229 | #define TTBCR2_ADDR_32 0 | ||
230 | #define TTBCR2_ADDR_36 1 | ||
231 | #define TTBCR2_ADDR_40 2 | ||
232 | #define TTBCR2_ADDR_42 3 | ||
233 | #define TTBCR2_ADDR_44 4 | ||
234 | #define TTBCR2_ADDR_48 5 | ||
235 | 228 | ||
236 | #define TTBRn_HI_ASID_SHIFT 16 | 229 | #define TTBRn_HI_ASID_SHIFT 16 |
237 | 230 | ||
@@ -793,26 +786,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain, | |||
793 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR); | 786 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR); |
794 | if (smmu->version > ARM_SMMU_V1) { | 787 | if (smmu->version > ARM_SMMU_V1) { |
795 | reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32; | 788 | reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32; |
796 | switch (smmu->va_size) { | 789 | reg |= TTBCR2_SEP_UPSTREAM; |
797 | case 32: | ||
798 | reg |= (TTBCR2_ADDR_32 << TTBCR2_SEP_SHIFT); | ||
799 | break; | ||
800 | case 36: | ||
801 | reg |= (TTBCR2_ADDR_36 << TTBCR2_SEP_SHIFT); | ||
802 | break; | ||
803 | case 40: | ||
804 | reg |= (TTBCR2_ADDR_40 << TTBCR2_SEP_SHIFT); | ||
805 | break; | ||
806 | case 42: | ||
807 | reg |= (TTBCR2_ADDR_42 << TTBCR2_SEP_SHIFT); | ||
808 | break; | ||
809 | case 44: | ||
810 | reg |= (TTBCR2_ADDR_44 << TTBCR2_SEP_SHIFT); | ||
811 | break; | ||
812 | case 48: | ||
813 | reg |= (TTBCR2_ADDR_48 << TTBCR2_SEP_SHIFT); | ||
814 | break; | ||
815 | } | ||
816 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2); | 790 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2); |
817 | } | 791 | } |
818 | } else { | 792 | } else { |
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index 4015560bf486..cab214544237 100644 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c | |||
@@ -1004,20 +1004,18 @@ static int rk_iommu_remove(struct platform_device *pdev) | |||
1004 | return 0; | 1004 | return 0; |
1005 | } | 1005 | } |
1006 | 1006 | ||
1007 | #ifdef CONFIG_OF | ||
1008 | static const struct of_device_id rk_iommu_dt_ids[] = { | 1007 | static const struct of_device_id rk_iommu_dt_ids[] = { |
1009 | { .compatible = "rockchip,iommu" }, | 1008 | { .compatible = "rockchip,iommu" }, |
1010 | { /* sentinel */ } | 1009 | { /* sentinel */ } |
1011 | }; | 1010 | }; |
1012 | MODULE_DEVICE_TABLE(of, rk_iommu_dt_ids); | 1011 | MODULE_DEVICE_TABLE(of, rk_iommu_dt_ids); |
1013 | #endif | ||
1014 | 1012 | ||
1015 | static struct platform_driver rk_iommu_driver = { | 1013 | static struct platform_driver rk_iommu_driver = { |
1016 | .probe = rk_iommu_probe, | 1014 | .probe = rk_iommu_probe, |
1017 | .remove = rk_iommu_remove, | 1015 | .remove = rk_iommu_remove, |
1018 | .driver = { | 1016 | .driver = { |
1019 | .name = "rk_iommu", | 1017 | .name = "rk_iommu", |
1020 | .of_match_table = of_match_ptr(rk_iommu_dt_ids), | 1018 | .of_match_table = rk_iommu_dt_ids, |
1021 | }, | 1019 | }, |
1022 | }; | 1020 | }; |
1023 | 1021 | ||
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index 7b315e385ba3..01999d74bd3a 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c | |||
@@ -82,19 +82,6 @@ static DEFINE_RAW_SPINLOCK(irq_controller_lock); | |||
82 | #define NR_GIC_CPU_IF 8 | 82 | #define NR_GIC_CPU_IF 8 |
83 | static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly; | 83 | static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly; |
84 | 84 | ||
85 | /* | ||
86 | * Supported arch specific GIC irq extension. | ||
87 | * Default make them NULL. | ||
88 | */ | ||
89 | struct irq_chip gic_arch_extn = { | ||
90 | .irq_eoi = NULL, | ||
91 | .irq_mask = NULL, | ||
92 | .irq_unmask = NULL, | ||
93 | .irq_retrigger = NULL, | ||
94 | .irq_set_type = NULL, | ||
95 | .irq_set_wake = NULL, | ||
96 | }; | ||
97 | |||
98 | #ifndef MAX_GIC_NR | 85 | #ifndef MAX_GIC_NR |
99 | #define MAX_GIC_NR 1 | 86 | #define MAX_GIC_NR 1 |
100 | #endif | 87 | #endif |
@@ -167,34 +154,16 @@ static int gic_peek_irq(struct irq_data *d, u32 offset) | |||
167 | 154 | ||
168 | static void gic_mask_irq(struct irq_data *d) | 155 | static void gic_mask_irq(struct irq_data *d) |
169 | { | 156 | { |
170 | unsigned long flags; | ||
171 | |||
172 | raw_spin_lock_irqsave(&irq_controller_lock, flags); | ||
173 | gic_poke_irq(d, GIC_DIST_ENABLE_CLEAR); | 157 | gic_poke_irq(d, GIC_DIST_ENABLE_CLEAR); |
174 | if (gic_arch_extn.irq_mask) | ||
175 | gic_arch_extn.irq_mask(d); | ||
176 | raw_spin_unlock_irqrestore(&irq_controller_lock, flags); | ||
177 | } | 158 | } |
178 | 159 | ||
179 | static void gic_unmask_irq(struct irq_data *d) | 160 | static void gic_unmask_irq(struct irq_data *d) |
180 | { | 161 | { |
181 | unsigned long flags; | ||
182 | |||
183 | raw_spin_lock_irqsave(&irq_controller_lock, flags); | ||
184 | if (gic_arch_extn.irq_unmask) | ||
185 | gic_arch_extn.irq_unmask(d); | ||
186 | gic_poke_irq(d, GIC_DIST_ENABLE_SET); | 162 | gic_poke_irq(d, GIC_DIST_ENABLE_SET); |
187 | raw_spin_unlock_irqrestore(&irq_controller_lock, flags); | ||
188 | } | 163 | } |
189 | 164 | ||
190 | static void gic_eoi_irq(struct irq_data *d) | 165 | static void gic_eoi_irq(struct irq_data *d) |
191 | { | 166 | { |
192 | if (gic_arch_extn.irq_eoi) { | ||
193 | raw_spin_lock(&irq_controller_lock); | ||
194 | gic_arch_extn.irq_eoi(d); | ||
195 | raw_spin_unlock(&irq_controller_lock); | ||
196 | } | ||
197 | |||
198 | writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI); | 167 | writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI); |
199 | } | 168 | } |
200 | 169 | ||
@@ -251,8 +220,6 @@ static int gic_set_type(struct irq_data *d, unsigned int type) | |||
251 | { | 220 | { |
252 | void __iomem *base = gic_dist_base(d); | 221 | void __iomem *base = gic_dist_base(d); |
253 | unsigned int gicirq = gic_irq(d); | 222 | unsigned int gicirq = gic_irq(d); |
254 | unsigned long flags; | ||
255 | int ret; | ||
256 | 223 | ||
257 | /* Interrupt configuration for SGIs can't be changed */ | 224 | /* Interrupt configuration for SGIs can't be changed */ |
258 | if (gicirq < 16) | 225 | if (gicirq < 16) |
@@ -263,25 +230,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type) | |||
263 | type != IRQ_TYPE_EDGE_RISING) | 230 | type != IRQ_TYPE_EDGE_RISING) |
264 | return -EINVAL; | 231 | return -EINVAL; |
265 | 232 | ||
266 | raw_spin_lock_irqsave(&irq_controller_lock, flags); | 233 | return gic_configure_irq(gicirq, type, base, NULL); |
267 | |||
268 | if (gic_arch_extn.irq_set_type) | ||
269 | gic_arch_extn.irq_set_type(d, type); | ||
270 | |||
271 | ret = gic_configure_irq(gicirq, type, base, NULL); | ||
272 | |||
273 | raw_spin_unlock_irqrestore(&irq_controller_lock, flags); | ||
274 | |||
275 | return ret; | ||
276 | } | ||
277 | |||
278 | static int gic_retrigger(struct irq_data *d) | ||
279 | { | ||
280 | if (gic_arch_extn.irq_retrigger) | ||
281 | return gic_arch_extn.irq_retrigger(d); | ||
282 | |||
283 | /* the genirq layer expects 0 if we can't retrigger in hardware */ | ||
284 | return 0; | ||
285 | } | 234 | } |
286 | 235 | ||
287 | #ifdef CONFIG_SMP | 236 | #ifdef CONFIG_SMP |
@@ -312,21 +261,6 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, | |||
312 | } | 261 | } |
313 | #endif | 262 | #endif |
314 | 263 | ||
315 | #ifdef CONFIG_PM | ||
316 | static int gic_set_wake(struct irq_data *d, unsigned int on) | ||
317 | { | ||
318 | int ret = -ENXIO; | ||
319 | |||
320 | if (gic_arch_extn.irq_set_wake) | ||
321 | ret = gic_arch_extn.irq_set_wake(d, on); | ||
322 | |||
323 | return ret; | ||
324 | } | ||
325 | |||
326 | #else | ||
327 | #define gic_set_wake NULL | ||
328 | #endif | ||
329 | |||
330 | static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) | 264 | static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) |
331 | { | 265 | { |
332 | u32 irqstat, irqnr; | 266 | u32 irqstat, irqnr; |
@@ -385,11 +319,9 @@ static struct irq_chip gic_chip = { | |||
385 | .irq_unmask = gic_unmask_irq, | 319 | .irq_unmask = gic_unmask_irq, |
386 | .irq_eoi = gic_eoi_irq, | 320 | .irq_eoi = gic_eoi_irq, |
387 | .irq_set_type = gic_set_type, | 321 | .irq_set_type = gic_set_type, |
388 | .irq_retrigger = gic_retrigger, | ||
389 | #ifdef CONFIG_SMP | 322 | #ifdef CONFIG_SMP |
390 | .irq_set_affinity = gic_set_affinity, | 323 | .irq_set_affinity = gic_set_affinity, |
391 | #endif | 324 | #endif |
392 | .irq_set_wake = gic_set_wake, | ||
393 | .irq_get_irqchip_state = gic_irq_get_irqchip_state, | 325 | .irq_get_irqchip_state = gic_irq_get_irqchip_state, |
394 | .irq_set_irqchip_state = gic_irq_set_irqchip_state, | 326 | .irq_set_irqchip_state = gic_irq_set_irqchip_state, |
395 | }; | 327 | }; |
@@ -1055,7 +987,6 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start, | |||
1055 | set_handle_irq(gic_handle_irq); | 987 | set_handle_irq(gic_handle_irq); |
1056 | } | 988 | } |
1057 | 989 | ||
1058 | gic_chip.flags |= gic_arch_extn.flags; | ||
1059 | gic_dist_init(gic); | 990 | gic_dist_init(gic); |
1060 | gic_cpu_init(gic); | 991 | gic_cpu_init(gic); |
1061 | gic_pm_init(gic); | 992 | gic_pm_init(gic); |
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 9eeea196328a..5503e43e5f28 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
@@ -925,10 +925,11 @@ static int crypt_convert(struct crypt_config *cc, | |||
925 | 925 | ||
926 | switch (r) { | 926 | switch (r) { |
927 | /* async */ | 927 | /* async */ |
928 | case -EINPROGRESS: | ||
929 | case -EBUSY: | 928 | case -EBUSY: |
930 | wait_for_completion(&ctx->restart); | 929 | wait_for_completion(&ctx->restart); |
931 | reinit_completion(&ctx->restart); | 930 | reinit_completion(&ctx->restart); |
931 | /* fall through*/ | ||
932 | case -EINPROGRESS: | ||
932 | ctx->req = NULL; | 933 | ctx->req = NULL; |
933 | ctx->cc_sector++; | 934 | ctx->cc_sector++; |
934 | continue; | 935 | continue; |
@@ -1345,8 +1346,10 @@ static void kcryptd_async_done(struct crypto_async_request *async_req, | |||
1345 | struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx); | 1346 | struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx); |
1346 | struct crypt_config *cc = io->cc; | 1347 | struct crypt_config *cc = io->cc; |
1347 | 1348 | ||
1348 | if (error == -EINPROGRESS) | 1349 | if (error == -EINPROGRESS) { |
1350 | complete(&ctx->restart); | ||
1349 | return; | 1351 | return; |
1352 | } | ||
1350 | 1353 | ||
1351 | if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post) | 1354 | if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post) |
1352 | error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq); | 1355 | error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq); |
@@ -1357,15 +1360,12 @@ static void kcryptd_async_done(struct crypto_async_request *async_req, | |||
1357 | crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio); | 1360 | crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio); |
1358 | 1361 | ||
1359 | if (!atomic_dec_and_test(&ctx->cc_pending)) | 1362 | if (!atomic_dec_and_test(&ctx->cc_pending)) |
1360 | goto done; | 1363 | return; |
1361 | 1364 | ||
1362 | if (bio_data_dir(io->base_bio) == READ) | 1365 | if (bio_data_dir(io->base_bio) == READ) |
1363 | kcryptd_crypt_read_done(io); | 1366 | kcryptd_crypt_read_done(io); |
1364 | else | 1367 | else |
1365 | kcryptd_crypt_write_io_submit(io, 1); | 1368 | kcryptd_crypt_write_io_submit(io, 1); |
1366 | done: | ||
1367 | if (!completion_done(&ctx->restart)) | ||
1368 | complete(&ctx->restart); | ||
1369 | } | 1369 | } |
1370 | 1370 | ||
1371 | static void kcryptd_crypt(struct work_struct *work) | 1371 | static void kcryptd_crypt(struct work_struct *work) |
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index c8a18e4ee9dc..720ceeb7fa9b 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c | |||
@@ -1298,21 +1298,22 @@ static int table_load(struct dm_ioctl *param, size_t param_size) | |||
1298 | goto err_unlock_md_type; | 1298 | goto err_unlock_md_type; |
1299 | } | 1299 | } |
1300 | 1300 | ||
1301 | if (dm_get_md_type(md) == DM_TYPE_NONE) | 1301 | if (dm_get_md_type(md) == DM_TYPE_NONE) { |
1302 | /* Initial table load: acquire type of table. */ | 1302 | /* Initial table load: acquire type of table. */ |
1303 | dm_set_md_type(md, dm_table_get_type(t)); | 1303 | dm_set_md_type(md, dm_table_get_type(t)); |
1304 | else if (dm_get_md_type(md) != dm_table_get_type(t)) { | 1304 | |
1305 | /* setup md->queue to reflect md's type (may block) */ | ||
1306 | r = dm_setup_md_queue(md); | ||
1307 | if (r) { | ||
1308 | DMWARN("unable to set up device queue for new table."); | ||
1309 | goto err_unlock_md_type; | ||
1310 | } | ||
1311 | } else if (dm_get_md_type(md) != dm_table_get_type(t)) { | ||
1305 | DMWARN("can't change device type after initial table load."); | 1312 | DMWARN("can't change device type after initial table load."); |
1306 | r = -EINVAL; | 1313 | r = -EINVAL; |
1307 | goto err_unlock_md_type; | 1314 | goto err_unlock_md_type; |
1308 | } | 1315 | } |
1309 | 1316 | ||
1310 | /* setup md->queue to reflect md's type (may block) */ | ||
1311 | r = dm_setup_md_queue(md); | ||
1312 | if (r) { | ||
1313 | DMWARN("unable to set up device queue for new table."); | ||
1314 | goto err_unlock_md_type; | ||
1315 | } | ||
1316 | dm_unlock_md_type(md); | 1317 | dm_unlock_md_type(md); |
1317 | 1318 | ||
1318 | /* stage inactive table */ | 1319 | /* stage inactive table */ |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index f8c7ca3e8947..a930b72314ac 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -1082,18 +1082,26 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue) | |||
1082 | dm_put(md); | 1082 | dm_put(md); |
1083 | } | 1083 | } |
1084 | 1084 | ||
1085 | static void free_rq_clone(struct request *clone) | 1085 | static void free_rq_clone(struct request *clone, bool must_be_mapped) |
1086 | { | 1086 | { |
1087 | struct dm_rq_target_io *tio = clone->end_io_data; | 1087 | struct dm_rq_target_io *tio = clone->end_io_data; |
1088 | struct mapped_device *md = tio->md; | 1088 | struct mapped_device *md = tio->md; |
1089 | 1089 | ||
1090 | WARN_ON_ONCE(must_be_mapped && !clone->q); | ||
1091 | |||
1090 | blk_rq_unprep_clone(clone); | 1092 | blk_rq_unprep_clone(clone); |
1091 | 1093 | ||
1092 | if (clone->q->mq_ops) | 1094 | if (md->type == DM_TYPE_MQ_REQUEST_BASED) |
1095 | /* stacked on blk-mq queue(s) */ | ||
1093 | tio->ti->type->release_clone_rq(clone); | 1096 | tio->ti->type->release_clone_rq(clone); |
1094 | else if (!md->queue->mq_ops) | 1097 | else if (!md->queue->mq_ops) |
1095 | /* request_fn queue stacked on request_fn queue(s) */ | 1098 | /* request_fn queue stacked on request_fn queue(s) */ |
1096 | free_clone_request(md, clone); | 1099 | free_clone_request(md, clone); |
1100 | /* | ||
1101 | * NOTE: for the blk-mq queue stacked on request_fn queue(s) case: | ||
1102 | * no need to call free_clone_request() because we leverage blk-mq by | ||
1103 | * allocating the clone at the end of the blk-mq pdu (see: clone_rq) | ||
1104 | */ | ||
1097 | 1105 | ||
1098 | if (!md->queue->mq_ops) | 1106 | if (!md->queue->mq_ops) |
1099 | free_rq_tio(tio); | 1107 | free_rq_tio(tio); |
@@ -1124,7 +1132,7 @@ static void dm_end_request(struct request *clone, int error) | |||
1124 | rq->sense_len = clone->sense_len; | 1132 | rq->sense_len = clone->sense_len; |
1125 | } | 1133 | } |
1126 | 1134 | ||
1127 | free_rq_clone(clone); | 1135 | free_rq_clone(clone, true); |
1128 | if (!rq->q->mq_ops) | 1136 | if (!rq->q->mq_ops) |
1129 | blk_end_request_all(rq, error); | 1137 | blk_end_request_all(rq, error); |
1130 | else | 1138 | else |
@@ -1143,7 +1151,7 @@ static void dm_unprep_request(struct request *rq) | |||
1143 | } | 1151 | } |
1144 | 1152 | ||
1145 | if (clone) | 1153 | if (clone) |
1146 | free_rq_clone(clone); | 1154 | free_rq_clone(clone, false); |
1147 | } | 1155 | } |
1148 | 1156 | ||
1149 | /* | 1157 | /* |
@@ -2662,9 +2670,6 @@ static int dm_init_request_based_queue(struct mapped_device *md) | |||
2662 | { | 2670 | { |
2663 | struct request_queue *q = NULL; | 2671 | struct request_queue *q = NULL; |
2664 | 2672 | ||
2665 | if (md->queue->elevator) | ||
2666 | return 0; | ||
2667 | |||
2668 | /* Fully initialize the queue */ | 2673 | /* Fully initialize the queue */ |
2669 | q = blk_init_allocated_queue(md->queue, dm_request_fn, NULL); | 2674 | q = blk_init_allocated_queue(md->queue, dm_request_fn, NULL); |
2670 | if (!q) | 2675 | if (!q) |
diff --git a/drivers/md/md.c b/drivers/md/md.c index d4f31e195e26..593a02476c78 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -4818,12 +4818,12 @@ static void md_free(struct kobject *ko) | |||
4818 | if (mddev->sysfs_state) | 4818 | if (mddev->sysfs_state) |
4819 | sysfs_put(mddev->sysfs_state); | 4819 | sysfs_put(mddev->sysfs_state); |
4820 | 4820 | ||
4821 | if (mddev->queue) | ||
4822 | blk_cleanup_queue(mddev->queue); | ||
4821 | if (mddev->gendisk) { | 4823 | if (mddev->gendisk) { |
4822 | del_gendisk(mddev->gendisk); | 4824 | del_gendisk(mddev->gendisk); |
4823 | put_disk(mddev->gendisk); | 4825 | put_disk(mddev->gendisk); |
4824 | } | 4826 | } |
4825 | if (mddev->queue) | ||
4826 | blk_cleanup_queue(mddev->queue); | ||
4827 | 4827 | ||
4828 | kfree(mddev); | 4828 | kfree(mddev); |
4829 | } | 4829 | } |
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index 2cb59a641cd2..6a68ef5246d4 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c | |||
@@ -188,8 +188,9 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf) | |||
188 | } | 188 | } |
189 | dev[j] = rdev1; | 189 | dev[j] = rdev1; |
190 | 190 | ||
191 | disk_stack_limits(mddev->gendisk, rdev1->bdev, | 191 | if (mddev->queue) |
192 | rdev1->data_offset << 9); | 192 | disk_stack_limits(mddev->gendisk, rdev1->bdev, |
193 | rdev1->data_offset << 9); | ||
193 | 194 | ||
194 | if (rdev1->bdev->bd_disk->queue->merge_bvec_fn) | 195 | if (rdev1->bdev->bd_disk->queue->merge_bvec_fn) |
195 | conf->has_merge_bvec = 1; | 196 | conf->has_merge_bvec = 1; |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 77dfd720aaa0..1ba97fdc6df1 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -1078,9 +1078,6 @@ again: | |||
1078 | pr_debug("skip op %ld on disc %d for sector %llu\n", | 1078 | pr_debug("skip op %ld on disc %d for sector %llu\n", |
1079 | bi->bi_rw, i, (unsigned long long)sh->sector); | 1079 | bi->bi_rw, i, (unsigned long long)sh->sector); |
1080 | clear_bit(R5_LOCKED, &sh->dev[i].flags); | 1080 | clear_bit(R5_LOCKED, &sh->dev[i].flags); |
1081 | if (sh->batch_head) | ||
1082 | set_bit(STRIPE_BATCH_ERR, | ||
1083 | &sh->batch_head->state); | ||
1084 | set_bit(STRIPE_HANDLE, &sh->state); | 1081 | set_bit(STRIPE_HANDLE, &sh->state); |
1085 | } | 1082 | } |
1086 | 1083 | ||
@@ -1971,17 +1968,30 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) | |||
1971 | put_cpu(); | 1968 | put_cpu(); |
1972 | } | 1969 | } |
1973 | 1970 | ||
1971 | static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp) | ||
1972 | { | ||
1973 | struct stripe_head *sh; | ||
1974 | |||
1975 | sh = kmem_cache_zalloc(sc, gfp); | ||
1976 | if (sh) { | ||
1977 | spin_lock_init(&sh->stripe_lock); | ||
1978 | spin_lock_init(&sh->batch_lock); | ||
1979 | INIT_LIST_HEAD(&sh->batch_list); | ||
1980 | INIT_LIST_HEAD(&sh->lru); | ||
1981 | atomic_set(&sh->count, 1); | ||
1982 | } | ||
1983 | return sh; | ||
1984 | } | ||
1974 | static int grow_one_stripe(struct r5conf *conf, gfp_t gfp) | 1985 | static int grow_one_stripe(struct r5conf *conf, gfp_t gfp) |
1975 | { | 1986 | { |
1976 | struct stripe_head *sh; | 1987 | struct stripe_head *sh; |
1977 | sh = kmem_cache_zalloc(conf->slab_cache, gfp); | 1988 | |
1989 | sh = alloc_stripe(conf->slab_cache, gfp); | ||
1978 | if (!sh) | 1990 | if (!sh) |
1979 | return 0; | 1991 | return 0; |
1980 | 1992 | ||
1981 | sh->raid_conf = conf; | 1993 | sh->raid_conf = conf; |
1982 | 1994 | ||
1983 | spin_lock_init(&sh->stripe_lock); | ||
1984 | |||
1985 | if (grow_buffers(sh, gfp)) { | 1995 | if (grow_buffers(sh, gfp)) { |
1986 | shrink_buffers(sh); | 1996 | shrink_buffers(sh); |
1987 | kmem_cache_free(conf->slab_cache, sh); | 1997 | kmem_cache_free(conf->slab_cache, sh); |
@@ -1990,13 +2000,8 @@ static int grow_one_stripe(struct r5conf *conf, gfp_t gfp) | |||
1990 | sh->hash_lock_index = | 2000 | sh->hash_lock_index = |
1991 | conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS; | 2001 | conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS; |
1992 | /* we just created an active stripe so... */ | 2002 | /* we just created an active stripe so... */ |
1993 | atomic_set(&sh->count, 1); | ||
1994 | atomic_inc(&conf->active_stripes); | 2003 | atomic_inc(&conf->active_stripes); |
1995 | INIT_LIST_HEAD(&sh->lru); | ||
1996 | 2004 | ||
1997 | spin_lock_init(&sh->batch_lock); | ||
1998 | INIT_LIST_HEAD(&sh->batch_list); | ||
1999 | sh->batch_head = NULL; | ||
2000 | release_stripe(sh); | 2005 | release_stripe(sh); |
2001 | conf->max_nr_stripes++; | 2006 | conf->max_nr_stripes++; |
2002 | return 1; | 2007 | return 1; |
@@ -2060,6 +2065,35 @@ static struct flex_array *scribble_alloc(int num, int cnt, gfp_t flags) | |||
2060 | return ret; | 2065 | return ret; |
2061 | } | 2066 | } |
2062 | 2067 | ||
2068 | static int resize_chunks(struct r5conf *conf, int new_disks, int new_sectors) | ||
2069 | { | ||
2070 | unsigned long cpu; | ||
2071 | int err = 0; | ||
2072 | |||
2073 | mddev_suspend(conf->mddev); | ||
2074 | get_online_cpus(); | ||
2075 | for_each_present_cpu(cpu) { | ||
2076 | struct raid5_percpu *percpu; | ||
2077 | struct flex_array *scribble; | ||
2078 | |||
2079 | percpu = per_cpu_ptr(conf->percpu, cpu); | ||
2080 | scribble = scribble_alloc(new_disks, | ||
2081 | new_sectors / STRIPE_SECTORS, | ||
2082 | GFP_NOIO); | ||
2083 | |||
2084 | if (scribble) { | ||
2085 | flex_array_free(percpu->scribble); | ||
2086 | percpu->scribble = scribble; | ||
2087 | } else { | ||
2088 | err = -ENOMEM; | ||
2089 | break; | ||
2090 | } | ||
2091 | } | ||
2092 | put_online_cpus(); | ||
2093 | mddev_resume(conf->mddev); | ||
2094 | return err; | ||
2095 | } | ||
2096 | |||
2063 | static int resize_stripes(struct r5conf *conf, int newsize) | 2097 | static int resize_stripes(struct r5conf *conf, int newsize) |
2064 | { | 2098 | { |
2065 | /* Make all the stripes able to hold 'newsize' devices. | 2099 | /* Make all the stripes able to hold 'newsize' devices. |
@@ -2088,7 +2122,6 @@ static int resize_stripes(struct r5conf *conf, int newsize) | |||
2088 | struct stripe_head *osh, *nsh; | 2122 | struct stripe_head *osh, *nsh; |
2089 | LIST_HEAD(newstripes); | 2123 | LIST_HEAD(newstripes); |
2090 | struct disk_info *ndisks; | 2124 | struct disk_info *ndisks; |
2091 | unsigned long cpu; | ||
2092 | int err; | 2125 | int err; |
2093 | struct kmem_cache *sc; | 2126 | struct kmem_cache *sc; |
2094 | int i; | 2127 | int i; |
@@ -2109,13 +2142,11 @@ static int resize_stripes(struct r5conf *conf, int newsize) | |||
2109 | return -ENOMEM; | 2142 | return -ENOMEM; |
2110 | 2143 | ||
2111 | for (i = conf->max_nr_stripes; i; i--) { | 2144 | for (i = conf->max_nr_stripes; i; i--) { |
2112 | nsh = kmem_cache_zalloc(sc, GFP_KERNEL); | 2145 | nsh = alloc_stripe(sc, GFP_KERNEL); |
2113 | if (!nsh) | 2146 | if (!nsh) |
2114 | break; | 2147 | break; |
2115 | 2148 | ||
2116 | nsh->raid_conf = conf; | 2149 | nsh->raid_conf = conf; |
2117 | spin_lock_init(&nsh->stripe_lock); | ||
2118 | |||
2119 | list_add(&nsh->lru, &newstripes); | 2150 | list_add(&nsh->lru, &newstripes); |
2120 | } | 2151 | } |
2121 | if (i) { | 2152 | if (i) { |
@@ -2142,13 +2173,11 @@ static int resize_stripes(struct r5conf *conf, int newsize) | |||
2142 | lock_device_hash_lock(conf, hash)); | 2173 | lock_device_hash_lock(conf, hash)); |
2143 | osh = get_free_stripe(conf, hash); | 2174 | osh = get_free_stripe(conf, hash); |
2144 | unlock_device_hash_lock(conf, hash); | 2175 | unlock_device_hash_lock(conf, hash); |
2145 | atomic_set(&nsh->count, 1); | 2176 | |
2146 | for(i=0; i<conf->pool_size; i++) { | 2177 | for(i=0; i<conf->pool_size; i++) { |
2147 | nsh->dev[i].page = osh->dev[i].page; | 2178 | nsh->dev[i].page = osh->dev[i].page; |
2148 | nsh->dev[i].orig_page = osh->dev[i].page; | 2179 | nsh->dev[i].orig_page = osh->dev[i].page; |
2149 | } | 2180 | } |
2150 | for( ; i<newsize; i++) | ||
2151 | nsh->dev[i].page = NULL; | ||
2152 | nsh->hash_lock_index = hash; | 2181 | nsh->hash_lock_index = hash; |
2153 | kmem_cache_free(conf->slab_cache, osh); | 2182 | kmem_cache_free(conf->slab_cache, osh); |
2154 | cnt++; | 2183 | cnt++; |
@@ -2174,25 +2203,6 @@ static int resize_stripes(struct r5conf *conf, int newsize) | |||
2174 | } else | 2203 | } else |
2175 | err = -ENOMEM; | 2204 | err = -ENOMEM; |
2176 | 2205 | ||
2177 | get_online_cpus(); | ||
2178 | for_each_present_cpu(cpu) { | ||
2179 | struct raid5_percpu *percpu; | ||
2180 | struct flex_array *scribble; | ||
2181 | |||
2182 | percpu = per_cpu_ptr(conf->percpu, cpu); | ||
2183 | scribble = scribble_alloc(newsize, conf->chunk_sectors / | ||
2184 | STRIPE_SECTORS, GFP_NOIO); | ||
2185 | |||
2186 | if (scribble) { | ||
2187 | flex_array_free(percpu->scribble); | ||
2188 | percpu->scribble = scribble; | ||
2189 | } else { | ||
2190 | err = -ENOMEM; | ||
2191 | break; | ||
2192 | } | ||
2193 | } | ||
2194 | put_online_cpus(); | ||
2195 | |||
2196 | /* Step 4, return new stripes to service */ | 2206 | /* Step 4, return new stripes to service */ |
2197 | while(!list_empty(&newstripes)) { | 2207 | while(!list_empty(&newstripes)) { |
2198 | nsh = list_entry(newstripes.next, struct stripe_head, lru); | 2208 | nsh = list_entry(newstripes.next, struct stripe_head, lru); |
@@ -2212,7 +2222,8 @@ static int resize_stripes(struct r5conf *conf, int newsize) | |||
2212 | 2222 | ||
2213 | conf->slab_cache = sc; | 2223 | conf->slab_cache = sc; |
2214 | conf->active_name = 1-conf->active_name; | 2224 | conf->active_name = 1-conf->active_name; |
2215 | conf->pool_size = newsize; | 2225 | if (!err) |
2226 | conf->pool_size = newsize; | ||
2216 | return err; | 2227 | return err; |
2217 | } | 2228 | } |
2218 | 2229 | ||
@@ -2434,7 +2445,7 @@ static void raid5_end_write_request(struct bio *bi, int error) | |||
2434 | } | 2445 | } |
2435 | rdev_dec_pending(rdev, conf->mddev); | 2446 | rdev_dec_pending(rdev, conf->mddev); |
2436 | 2447 | ||
2437 | if (sh->batch_head && !uptodate) | 2448 | if (sh->batch_head && !uptodate && !replacement) |
2438 | set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state); | 2449 | set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state); |
2439 | 2450 | ||
2440 | if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags)) | 2451 | if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags)) |
@@ -3278,7 +3289,9 @@ static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s, | |||
3278 | /* reconstruct-write isn't being forced */ | 3289 | /* reconstruct-write isn't being forced */ |
3279 | return 0; | 3290 | return 0; |
3280 | for (i = 0; i < s->failed; i++) { | 3291 | for (i = 0; i < s->failed; i++) { |
3281 | if (!test_bit(R5_UPTODATE, &fdev[i]->flags) && | 3292 | if (s->failed_num[i] != sh->pd_idx && |
3293 | s->failed_num[i] != sh->qd_idx && | ||
3294 | !test_bit(R5_UPTODATE, &fdev[i]->flags) && | ||
3282 | !test_bit(R5_OVERWRITE, &fdev[i]->flags)) | 3295 | !test_bit(R5_OVERWRITE, &fdev[i]->flags)) |
3283 | return 1; | 3296 | return 1; |
3284 | } | 3297 | } |
@@ -3298,6 +3311,7 @@ static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s, | |||
3298 | */ | 3311 | */ |
3299 | BUG_ON(test_bit(R5_Wantcompute, &dev->flags)); | 3312 | BUG_ON(test_bit(R5_Wantcompute, &dev->flags)); |
3300 | BUG_ON(test_bit(R5_Wantread, &dev->flags)); | 3313 | BUG_ON(test_bit(R5_Wantread, &dev->flags)); |
3314 | BUG_ON(sh->batch_head); | ||
3301 | if ((s->uptodate == disks - 1) && | 3315 | if ((s->uptodate == disks - 1) && |
3302 | (s->failed && (disk_idx == s->failed_num[0] || | 3316 | (s->failed && (disk_idx == s->failed_num[0] || |
3303 | disk_idx == s->failed_num[1]))) { | 3317 | disk_idx == s->failed_num[1]))) { |
@@ -3366,7 +3380,6 @@ static void handle_stripe_fill(struct stripe_head *sh, | |||
3366 | { | 3380 | { |
3367 | int i; | 3381 | int i; |
3368 | 3382 | ||
3369 | BUG_ON(sh->batch_head); | ||
3370 | /* look for blocks to read/compute, skip this if a compute | 3383 | /* look for blocks to read/compute, skip this if a compute |
3371 | * is already in flight, or if the stripe contents are in the | 3384 | * is already in flight, or if the stripe contents are in the |
3372 | * midst of changing due to a write | 3385 | * midst of changing due to a write |
@@ -4198,15 +4211,9 @@ static void check_break_stripe_batch_list(struct stripe_head *sh) | |||
4198 | return; | 4211 | return; |
4199 | 4212 | ||
4200 | head_sh = sh; | 4213 | head_sh = sh; |
4201 | do { | ||
4202 | sh = list_first_entry(&sh->batch_list, | ||
4203 | struct stripe_head, batch_list); | ||
4204 | BUG_ON(sh == head_sh); | ||
4205 | } while (!test_bit(STRIPE_DEGRADED, &sh->state)); | ||
4206 | 4214 | ||
4207 | while (sh != head_sh) { | 4215 | list_for_each_entry_safe(sh, next, &head_sh->batch_list, batch_list) { |
4208 | next = list_first_entry(&sh->batch_list, | 4216 | |
4209 | struct stripe_head, batch_list); | ||
4210 | list_del_init(&sh->batch_list); | 4217 | list_del_init(&sh->batch_list); |
4211 | 4218 | ||
4212 | set_mask_bits(&sh->state, ~STRIPE_EXPAND_SYNC_FLAG, | 4219 | set_mask_bits(&sh->state, ~STRIPE_EXPAND_SYNC_FLAG, |
@@ -4226,8 +4233,6 @@ static void check_break_stripe_batch_list(struct stripe_head *sh) | |||
4226 | 4233 | ||
4227 | set_bit(STRIPE_HANDLE, &sh->state); | 4234 | set_bit(STRIPE_HANDLE, &sh->state); |
4228 | release_stripe(sh); | 4235 | release_stripe(sh); |
4229 | |||
4230 | sh = next; | ||
4231 | } | 4236 | } |
4232 | } | 4237 | } |
4233 | 4238 | ||
@@ -6221,8 +6226,11 @@ static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu | |||
6221 | percpu->spare_page = alloc_page(GFP_KERNEL); | 6226 | percpu->spare_page = alloc_page(GFP_KERNEL); |
6222 | if (!percpu->scribble) | 6227 | if (!percpu->scribble) |
6223 | percpu->scribble = scribble_alloc(max(conf->raid_disks, | 6228 | percpu->scribble = scribble_alloc(max(conf->raid_disks, |
6224 | conf->previous_raid_disks), conf->chunk_sectors / | 6229 | conf->previous_raid_disks), |
6225 | STRIPE_SECTORS, GFP_KERNEL); | 6230 | max(conf->chunk_sectors, |
6231 | conf->prev_chunk_sectors) | ||
6232 | / STRIPE_SECTORS, | ||
6233 | GFP_KERNEL); | ||
6226 | 6234 | ||
6227 | if (!percpu->scribble || (conf->level == 6 && !percpu->spare_page)) { | 6235 | if (!percpu->scribble || (conf->level == 6 && !percpu->spare_page)) { |
6228 | free_scratch_buffer(conf, percpu); | 6236 | free_scratch_buffer(conf, percpu); |
@@ -7198,6 +7206,15 @@ static int check_reshape(struct mddev *mddev) | |||
7198 | if (!check_stripe_cache(mddev)) | 7206 | if (!check_stripe_cache(mddev)) |
7199 | return -ENOSPC; | 7207 | return -ENOSPC; |
7200 | 7208 | ||
7209 | if (mddev->new_chunk_sectors > mddev->chunk_sectors || | ||
7210 | mddev->delta_disks > 0) | ||
7211 | if (resize_chunks(conf, | ||
7212 | conf->previous_raid_disks | ||
7213 | + max(0, mddev->delta_disks), | ||
7214 | max(mddev->new_chunk_sectors, | ||
7215 | mddev->chunk_sectors) | ||
7216 | ) < 0) | ||
7217 | return -ENOMEM; | ||
7201 | return resize_stripes(conf, (conf->previous_raid_disks | 7218 | return resize_stripes(conf, (conf->previous_raid_disks |
7202 | + mddev->delta_disks)); | 7219 | + mddev->delta_disks)); |
7203 | } | 7220 | } |
diff --git a/drivers/media/platform/marvell-ccic/mcam-core.c b/drivers/media/platform/marvell-ccic/mcam-core.c index 9c64b5d01c6a..110fd70c7326 100644 --- a/drivers/media/platform/marvell-ccic/mcam-core.c +++ b/drivers/media/platform/marvell-ccic/mcam-core.c | |||
@@ -116,8 +116,8 @@ static struct mcam_format_struct { | |||
116 | .planar = false, | 116 | .planar = false, |
117 | }, | 117 | }, |
118 | { | 118 | { |
119 | .desc = "UYVY 4:2:2", | 119 | .desc = "YVYU 4:2:2", |
120 | .pixelformat = V4L2_PIX_FMT_UYVY, | 120 | .pixelformat = V4L2_PIX_FMT_YVYU, |
121 | .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8, | 121 | .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8, |
122 | .bpp = 2, | 122 | .bpp = 2, |
123 | .planar = false, | 123 | .planar = false, |
@@ -748,7 +748,7 @@ static void mcam_ctlr_image(struct mcam_camera *cam) | |||
748 | 748 | ||
749 | switch (fmt->pixelformat) { | 749 | switch (fmt->pixelformat) { |
750 | case V4L2_PIX_FMT_YUYV: | 750 | case V4L2_PIX_FMT_YUYV: |
751 | case V4L2_PIX_FMT_UYVY: | 751 | case V4L2_PIX_FMT_YVYU: |
752 | widthy = fmt->width * 2; | 752 | widthy = fmt->width * 2; |
753 | widthuv = 0; | 753 | widthuv = 0; |
754 | break; | 754 | break; |
@@ -784,15 +784,15 @@ static void mcam_ctlr_image(struct mcam_camera *cam) | |||
784 | case V4L2_PIX_FMT_YUV420: | 784 | case V4L2_PIX_FMT_YUV420: |
785 | case V4L2_PIX_FMT_YVU420: | 785 | case V4L2_PIX_FMT_YVU420: |
786 | mcam_reg_write_mask(cam, REG_CTRL0, | 786 | mcam_reg_write_mask(cam, REG_CTRL0, |
787 | C0_DF_YUV | C0_YUV_420PL | C0_YUVE_YVYU, C0_DF_MASK); | 787 | C0_DF_YUV | C0_YUV_420PL | C0_YUVE_VYUY, C0_DF_MASK); |
788 | break; | 788 | break; |
789 | case V4L2_PIX_FMT_YUYV: | 789 | case V4L2_PIX_FMT_YUYV: |
790 | mcam_reg_write_mask(cam, REG_CTRL0, | 790 | mcam_reg_write_mask(cam, REG_CTRL0, |
791 | C0_DF_YUV | C0_YUV_PACKED | C0_YUVE_UYVY, C0_DF_MASK); | 791 | C0_DF_YUV | C0_YUV_PACKED | C0_YUVE_NOSWAP, C0_DF_MASK); |
792 | break; | 792 | break; |
793 | case V4L2_PIX_FMT_UYVY: | 793 | case V4L2_PIX_FMT_YVYU: |
794 | mcam_reg_write_mask(cam, REG_CTRL0, | 794 | mcam_reg_write_mask(cam, REG_CTRL0, |
795 | C0_DF_YUV | C0_YUV_PACKED | C0_YUVE_YUYV, C0_DF_MASK); | 795 | C0_DF_YUV | C0_YUV_PACKED | C0_YUVE_SWAP24, C0_DF_MASK); |
796 | break; | 796 | break; |
797 | case V4L2_PIX_FMT_JPEG: | 797 | case V4L2_PIX_FMT_JPEG: |
798 | mcam_reg_write_mask(cam, REG_CTRL0, | 798 | mcam_reg_write_mask(cam, REG_CTRL0, |
diff --git a/drivers/media/platform/marvell-ccic/mcam-core.h b/drivers/media/platform/marvell-ccic/mcam-core.h index aa0c6eac254a..7ffdf4dbaf8c 100644 --- a/drivers/media/platform/marvell-ccic/mcam-core.h +++ b/drivers/media/platform/marvell-ccic/mcam-core.h | |||
@@ -330,10 +330,10 @@ int mccic_resume(struct mcam_camera *cam); | |||
330 | #define C0_YUVE_YVYU 0x00010000 /* Y1CrY0Cb */ | 330 | #define C0_YUVE_YVYU 0x00010000 /* Y1CrY0Cb */ |
331 | #define C0_YUVE_VYUY 0x00020000 /* CrY1CbY0 */ | 331 | #define C0_YUVE_VYUY 0x00020000 /* CrY1CbY0 */ |
332 | #define C0_YUVE_UYVY 0x00030000 /* CbY1CrY0 */ | 332 | #define C0_YUVE_UYVY 0x00030000 /* CbY1CrY0 */ |
333 | #define C0_YUVE_XYUV 0x00000000 /* 420: .YUV */ | 333 | #define C0_YUVE_NOSWAP 0x00000000 /* no bytes swapping */ |
334 | #define C0_YUVE_XYVU 0x00010000 /* 420: .YVU */ | 334 | #define C0_YUVE_SWAP13 0x00010000 /* swap byte 1 and 3 */ |
335 | #define C0_YUVE_XUVY 0x00020000 /* 420: .UVY */ | 335 | #define C0_YUVE_SWAP24 0x00020000 /* swap byte 2 and 4 */ |
336 | #define C0_YUVE_XVUY 0x00030000 /* 420: .VUY */ | 336 | #define C0_YUVE_SWAP1324 0x00030000 /* swap bytes 1&3 and 2&4 */ |
337 | /* Bayer bits 18,19 if needed */ | 337 | /* Bayer bits 18,19 if needed */ |
338 | #define C0_EOF_VSYNC 0x00400000 /* Generate EOF by VSYNC */ | 338 | #define C0_EOF_VSYNC 0x00400000 /* Generate EOF by VSYNC */ |
339 | #define C0_VEDGE_CTRL 0x00800000 /* Detect falling edge of VSYNC */ | 339 | #define C0_VEDGE_CTRL 0x00800000 /* Detect falling edge of VSYNC */ |
diff --git a/drivers/media/platform/soc_camera/rcar_vin.c b/drivers/media/platform/soc_camera/rcar_vin.c index 9351f64dee7b..6460f8e1b07f 100644 --- a/drivers/media/platform/soc_camera/rcar_vin.c +++ b/drivers/media/platform/soc_camera/rcar_vin.c | |||
@@ -135,6 +135,8 @@ | |||
135 | #define VIN_MAX_WIDTH 2048 | 135 | #define VIN_MAX_WIDTH 2048 |
136 | #define VIN_MAX_HEIGHT 2048 | 136 | #define VIN_MAX_HEIGHT 2048 |
137 | 137 | ||
138 | #define TIMEOUT_MS 100 | ||
139 | |||
138 | enum chip_id { | 140 | enum chip_id { |
139 | RCAR_GEN2, | 141 | RCAR_GEN2, |
140 | RCAR_H1, | 142 | RCAR_H1, |
@@ -820,7 +822,10 @@ static void rcar_vin_wait_stop_streaming(struct rcar_vin_priv *priv) | |||
820 | if (priv->state == STOPPING) { | 822 | if (priv->state == STOPPING) { |
821 | priv->request_to_stop = true; | 823 | priv->request_to_stop = true; |
822 | spin_unlock_irq(&priv->lock); | 824 | spin_unlock_irq(&priv->lock); |
823 | wait_for_completion(&priv->capture_stop); | 825 | if (!wait_for_completion_timeout( |
826 | &priv->capture_stop, | ||
827 | msecs_to_jiffies(TIMEOUT_MS))) | ||
828 | priv->state = STOPPED; | ||
824 | spin_lock_irq(&priv->lock); | 829 | spin_lock_irq(&priv->lock); |
825 | } | 830 | } |
826 | } | 831 | } |
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index 2c25271f8c41..60f7141a6b02 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c | |||
@@ -1029,6 +1029,18 @@ static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type) | |||
1029 | md->reset_done &= ~type; | 1029 | md->reset_done &= ~type; |
1030 | } | 1030 | } |
1031 | 1031 | ||
1032 | int mmc_access_rpmb(struct mmc_queue *mq) | ||
1033 | { | ||
1034 | struct mmc_blk_data *md = mq->data; | ||
1035 | /* | ||
1036 | * If this is a RPMB partition access, return ture | ||
1037 | */ | ||
1038 | if (md && md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) | ||
1039 | return true; | ||
1040 | |||
1041 | return false; | ||
1042 | } | ||
1043 | |||
1032 | static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) | 1044 | static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) |
1033 | { | 1045 | { |
1034 | struct mmc_blk_data *md = mq->data; | 1046 | struct mmc_blk_data *md = mq->data; |
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c index 236d194c2883..8efa3684aef8 100644 --- a/drivers/mmc/card/queue.c +++ b/drivers/mmc/card/queue.c | |||
@@ -38,7 +38,7 @@ static int mmc_prep_request(struct request_queue *q, struct request *req) | |||
38 | return BLKPREP_KILL; | 38 | return BLKPREP_KILL; |
39 | } | 39 | } |
40 | 40 | ||
41 | if (mq && mmc_card_removed(mq->card)) | 41 | if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq))) |
42 | return BLKPREP_KILL; | 42 | return BLKPREP_KILL; |
43 | 43 | ||
44 | req->cmd_flags |= REQ_DONTPREP; | 44 | req->cmd_flags |= REQ_DONTPREP; |
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h index 5752d50049a3..99e6521e6169 100644 --- a/drivers/mmc/card/queue.h +++ b/drivers/mmc/card/queue.h | |||
@@ -73,4 +73,6 @@ extern void mmc_queue_bounce_post(struct mmc_queue_req *); | |||
73 | extern int mmc_packed_init(struct mmc_queue *, struct mmc_card *); | 73 | extern int mmc_packed_init(struct mmc_queue *, struct mmc_card *); |
74 | extern void mmc_packed_clean(struct mmc_queue *); | 74 | extern void mmc_packed_clean(struct mmc_queue *); |
75 | 75 | ||
76 | extern int mmc_access_rpmb(struct mmc_queue *); | ||
77 | |||
76 | #endif | 78 | #endif |
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index c296bc098fe2..92e7671426eb 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c | |||
@@ -2651,6 +2651,7 @@ int mmc_pm_notify(struct notifier_block *notify_block, | |||
2651 | switch (mode) { | 2651 | switch (mode) { |
2652 | case PM_HIBERNATION_PREPARE: | 2652 | case PM_HIBERNATION_PREPARE: |
2653 | case PM_SUSPEND_PREPARE: | 2653 | case PM_SUSPEND_PREPARE: |
2654 | case PM_RESTORE_PREPARE: | ||
2654 | spin_lock_irqsave(&host->lock, flags); | 2655 | spin_lock_irqsave(&host->lock, flags); |
2655 | host->rescan_disable = 1; | 2656 | host->rescan_disable = 1; |
2656 | spin_unlock_irqrestore(&host->lock, flags); | 2657 | spin_unlock_irqrestore(&host->lock, flags); |
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index 38b29265cc7c..5f5adafb253a 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c | |||
@@ -589,9 +589,11 @@ static int dw_mci_idmac_init(struct dw_mci *host) | |||
589 | host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc); | 589 | host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc); |
590 | 590 | ||
591 | /* Forward link the descriptor list */ | 591 | /* Forward link the descriptor list */ |
592 | for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++) | 592 | for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++) { |
593 | p->des3 = cpu_to_le32(host->sg_dma + | 593 | p->des3 = cpu_to_le32(host->sg_dma + |
594 | (sizeof(struct idmac_desc) * (i + 1))); | 594 | (sizeof(struct idmac_desc) * (i + 1))); |
595 | p->des1 = 0; | ||
596 | } | ||
595 | 597 | ||
596 | /* Set the last descriptor as the end-of-ring descriptor */ | 598 | /* Set the last descriptor as the end-of-ring descriptor */ |
597 | p->des3 = cpu_to_le32(host->sg_dma); | 599 | p->des3 = cpu_to_le32(host->sg_dma); |
@@ -1300,7 +1302,8 @@ static int dw_mci_get_cd(struct mmc_host *mmc) | |||
1300 | int gpio_cd = mmc_gpio_get_cd(mmc); | 1302 | int gpio_cd = mmc_gpio_get_cd(mmc); |
1301 | 1303 | ||
1302 | /* Use platform get_cd function, else try onboard card detect */ | 1304 | /* Use platform get_cd function, else try onboard card detect */ |
1303 | if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION) | 1305 | if ((brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION) || |
1306 | (mmc->caps & MMC_CAP_NONREMOVABLE)) | ||
1304 | present = 1; | 1307 | present = 1; |
1305 | else if (!IS_ERR_VALUE(gpio_cd)) | 1308 | else if (!IS_ERR_VALUE(gpio_cd)) |
1306 | present = gpio_cd; | 1309 | present = gpio_cd; |
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c index 2b6ef6bd5d5f..7eff087cf515 100644 --- a/drivers/mmc/host/sh_mmcif.c +++ b/drivers/mmc/host/sh_mmcif.c | |||
@@ -1408,7 +1408,7 @@ static int sh_mmcif_probe(struct platform_device *pdev) | |||
1408 | host = mmc_priv(mmc); | 1408 | host = mmc_priv(mmc); |
1409 | host->mmc = mmc; | 1409 | host->mmc = mmc; |
1410 | host->addr = reg; | 1410 | host->addr = reg; |
1411 | host->timeout = msecs_to_jiffies(1000); | 1411 | host->timeout = msecs_to_jiffies(10000); |
1412 | host->ccs_enable = !pd || !pd->ccs_unsupported; | 1412 | host->ccs_enable = !pd || !pd->ccs_unsupported; |
1413 | host->clk_ctrl2_enable = pd && pd->clk_ctrl2_present; | 1413 | host->clk_ctrl2_enable = pd && pd->clk_ctrl2_present; |
1414 | 1414 | ||
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 78dde56ae6e6..d5fe5d5f490f 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -82,6 +82,8 @@ | |||
82 | #include <net/bond_3ad.h> | 82 | #include <net/bond_3ad.h> |
83 | #include <net/bond_alb.h> | 83 | #include <net/bond_alb.h> |
84 | 84 | ||
85 | #include "bonding_priv.h" | ||
86 | |||
85 | /*---------------------------- Module parameters ----------------------------*/ | 87 | /*---------------------------- Module parameters ----------------------------*/ |
86 | 88 | ||
87 | /* monitor all links that often (in milliseconds). <=0 disables monitoring */ | 89 | /* monitor all links that often (in milliseconds). <=0 disables monitoring */ |
@@ -4542,6 +4544,8 @@ unsigned int bond_get_num_tx_queues(void) | |||
4542 | int bond_create(struct net *net, const char *name) | 4544 | int bond_create(struct net *net, const char *name) |
4543 | { | 4545 | { |
4544 | struct net_device *bond_dev; | 4546 | struct net_device *bond_dev; |
4547 | struct bonding *bond; | ||
4548 | struct alb_bond_info *bond_info; | ||
4545 | int res; | 4549 | int res; |
4546 | 4550 | ||
4547 | rtnl_lock(); | 4551 | rtnl_lock(); |
@@ -4555,6 +4559,14 @@ int bond_create(struct net *net, const char *name) | |||
4555 | return -ENOMEM; | 4559 | return -ENOMEM; |
4556 | } | 4560 | } |
4557 | 4561 | ||
4562 | /* | ||
4563 | * Initialize rx_hashtbl_used_head to RLB_NULL_INDEX. | ||
4564 | * It is set to 0 by default which is wrong. | ||
4565 | */ | ||
4566 | bond = netdev_priv(bond_dev); | ||
4567 | bond_info = &(BOND_ALB_INFO(bond)); | ||
4568 | bond_info->rx_hashtbl_used_head = RLB_NULL_INDEX; | ||
4569 | |||
4558 | dev_net_set(bond_dev, net); | 4570 | dev_net_set(bond_dev, net); |
4559 | bond_dev->rtnl_link_ops = &bond_link_ops; | 4571 | bond_dev->rtnl_link_ops = &bond_link_ops; |
4560 | 4572 | ||
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c index 62694cfc05b6..b20b35acb47d 100644 --- a/drivers/net/bonding/bond_procfs.c +++ b/drivers/net/bonding/bond_procfs.c | |||
@@ -4,6 +4,7 @@ | |||
4 | #include <net/netns/generic.h> | 4 | #include <net/netns/generic.h> |
5 | #include <net/bonding.h> | 5 | #include <net/bonding.h> |
6 | 6 | ||
7 | #include "bonding_priv.h" | ||
7 | 8 | ||
8 | static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos) | 9 | static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos) |
9 | __acquires(RCU) | 10 | __acquires(RCU) |
diff --git a/drivers/net/bonding/bonding_priv.h b/drivers/net/bonding/bonding_priv.h new file mode 100644 index 000000000000..5a4d81a9437c --- /dev/null +++ b/drivers/net/bonding/bonding_priv.h | |||
@@ -0,0 +1,25 @@ | |||
1 | /* | ||
2 | * Bond several ethernet interfaces into a Cisco, running 'Etherchannel'. | ||
3 | * | ||
4 | * Portions are (c) Copyright 1995 Simon "Guru Aleph-Null" Janes | ||
5 | * NCM: Network and Communications Management, Inc. | ||
6 | * | ||
7 | * BUT, I'm the one who modified it for ethernet, so: | ||
8 | * (c) Copyright 1999, Thomas Davis, tadavis@lbl.gov | ||
9 | * | ||
10 | * This software may be used and distributed according to the terms | ||
11 | * of the GNU Public License, incorporated herein by reference. | ||
12 | * | ||
13 | */ | ||
14 | |||
15 | #ifndef _BONDING_PRIV_H | ||
16 | #define _BONDING_PRIV_H | ||
17 | |||
18 | #define DRV_VERSION "3.7.1" | ||
19 | #define DRV_RELDATE "April 27, 2011" | ||
20 | #define DRV_NAME "bonding" | ||
21 | #define DRV_DESCRIPTION "Ethernet Channel Bonding Driver" | ||
22 | |||
23 | #define bond_version DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n" | ||
24 | |||
25 | #endif | ||
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig index 58808f651452..e8c96b8e86f4 100644 --- a/drivers/net/can/Kconfig +++ b/drivers/net/can/Kconfig | |||
@@ -112,7 +112,7 @@ config PCH_CAN | |||
112 | 112 | ||
113 | config CAN_GRCAN | 113 | config CAN_GRCAN |
114 | tristate "Aeroflex Gaisler GRCAN and GRHCAN CAN devices" | 114 | tristate "Aeroflex Gaisler GRCAN and GRHCAN CAN devices" |
115 | depends on OF | 115 | depends on OF && HAS_DMA |
116 | ---help--- | 116 | ---help--- |
117 | Say Y here if you want to use Aeroflex Gaisler GRCAN or GRHCAN. | 117 | Say Y here if you want to use Aeroflex Gaisler GRCAN or GRHCAN. |
118 | Note that the driver supports little endian, even though little | 118 | Note that the driver supports little endian, even though little |
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c index 4643914859b2..8b17a9065b0b 100644 --- a/drivers/net/can/usb/kvaser_usb.c +++ b/drivers/net/can/usb/kvaser_usb.c | |||
@@ -1102,7 +1102,7 @@ static void kvaser_usb_rx_can_err(const struct kvaser_usb_net_priv *priv, | |||
1102 | 1102 | ||
1103 | if (msg->u.rx_can_header.flag & (MSG_FLAG_ERROR_FRAME | | 1103 | if (msg->u.rx_can_header.flag & (MSG_FLAG_ERROR_FRAME | |
1104 | MSG_FLAG_NERR)) { | 1104 | MSG_FLAG_NERR)) { |
1105 | netdev_err(priv->netdev, "Unknow error (flags: 0x%02x)\n", | 1105 | netdev_err(priv->netdev, "Unknown error (flags: 0x%02x)\n", |
1106 | msg->u.rx_can_header.flag); | 1106 | msg->u.rx_can_header.flag); |
1107 | 1107 | ||
1108 | stats->rx_errors++; | 1108 | stats->rx_errors++; |
diff --git a/drivers/net/ethernet/8390/etherh.c b/drivers/net/ethernet/8390/etherh.c index b36ee9e0d220..d686b9cac29f 100644 --- a/drivers/net/ethernet/8390/etherh.c +++ b/drivers/net/ethernet/8390/etherh.c | |||
@@ -523,7 +523,7 @@ static int etherh_addr(char *addr, struct expansion_card *ec) | |||
523 | char *s; | 523 | char *s; |
524 | 524 | ||
525 | if (!ecard_readchunk(&cd, ec, 0xf5, 0)) { | 525 | if (!ecard_readchunk(&cd, ec, 0xf5, 0)) { |
526 | printk(KERN_ERR "%s: unable to read podule description string\n", | 526 | printk(KERN_ERR "%s: unable to read module description string\n", |
527 | dev_name(&ec->dev)); | 527 | dev_name(&ec->dev)); |
528 | goto no_addr; | 528 | goto no_addr; |
529 | } | 529 | } |
diff --git a/drivers/net/ethernet/altera/altera_msgdmahw.h b/drivers/net/ethernet/altera/altera_msgdmahw.h index eba070f16782..89cd11d86642 100644 --- a/drivers/net/ethernet/altera/altera_msgdmahw.h +++ b/drivers/net/ethernet/altera/altera_msgdmahw.h | |||
@@ -58,15 +58,12 @@ struct msgdma_extended_desc { | |||
58 | /* Tx buffer control flags | 58 | /* Tx buffer control flags |
59 | */ | 59 | */ |
60 | #define MSGDMA_DESC_CTL_TX_FIRST (MSGDMA_DESC_CTL_GEN_SOP | \ | 60 | #define MSGDMA_DESC_CTL_TX_FIRST (MSGDMA_DESC_CTL_GEN_SOP | \ |
61 | MSGDMA_DESC_CTL_TR_ERR_IRQ | \ | ||
62 | MSGDMA_DESC_CTL_GO) | 61 | MSGDMA_DESC_CTL_GO) |
63 | 62 | ||
64 | #define MSGDMA_DESC_CTL_TX_MIDDLE (MSGDMA_DESC_CTL_TR_ERR_IRQ | \ | 63 | #define MSGDMA_DESC_CTL_TX_MIDDLE (MSGDMA_DESC_CTL_GO) |
65 | MSGDMA_DESC_CTL_GO) | ||
66 | 64 | ||
67 | #define MSGDMA_DESC_CTL_TX_LAST (MSGDMA_DESC_CTL_GEN_EOP | \ | 65 | #define MSGDMA_DESC_CTL_TX_LAST (MSGDMA_DESC_CTL_GEN_EOP | \ |
68 | MSGDMA_DESC_CTL_TR_COMP_IRQ | \ | 66 | MSGDMA_DESC_CTL_TR_COMP_IRQ | \ |
69 | MSGDMA_DESC_CTL_TR_ERR_IRQ | \ | ||
70 | MSGDMA_DESC_CTL_GO) | 67 | MSGDMA_DESC_CTL_GO) |
71 | 68 | ||
72 | #define MSGDMA_DESC_CTL_TX_SINGLE (MSGDMA_DESC_CTL_GEN_SOP | \ | 69 | #define MSGDMA_DESC_CTL_TX_SINGLE (MSGDMA_DESC_CTL_GEN_SOP | \ |
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index 90a76306ad0f..da48e66377b5 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c | |||
@@ -391,6 +391,12 @@ static int tse_rx(struct altera_tse_private *priv, int limit) | |||
391 | "RCV pktstatus %08X pktlength %08X\n", | 391 | "RCV pktstatus %08X pktlength %08X\n", |
392 | pktstatus, pktlength); | 392 | pktstatus, pktlength); |
393 | 393 | ||
394 | /* DMA trasfer from TSE starts with 2 aditional bytes for | ||
395 | * IP payload alignment. Status returned by get_rx_status() | ||
396 | * contains DMA transfer length. Packet is 2 bytes shorter. | ||
397 | */ | ||
398 | pktlength -= 2; | ||
399 | |||
394 | count++; | 400 | count++; |
395 | next_entry = (++priv->rx_cons) % priv->rx_ring_size; | 401 | next_entry = (++priv->rx_cons) % priv->rx_ring_size; |
396 | 402 | ||
@@ -777,6 +783,8 @@ static int init_phy(struct net_device *dev) | |||
777 | struct altera_tse_private *priv = netdev_priv(dev); | 783 | struct altera_tse_private *priv = netdev_priv(dev); |
778 | struct phy_device *phydev; | 784 | struct phy_device *phydev; |
779 | struct device_node *phynode; | 785 | struct device_node *phynode; |
786 | bool fixed_link = false; | ||
787 | int rc = 0; | ||
780 | 788 | ||
781 | /* Avoid init phy in case of no phy present */ | 789 | /* Avoid init phy in case of no phy present */ |
782 | if (!priv->phy_iface) | 790 | if (!priv->phy_iface) |
@@ -789,13 +797,32 @@ static int init_phy(struct net_device *dev) | |||
789 | phynode = of_parse_phandle(priv->device->of_node, "phy-handle", 0); | 797 | phynode = of_parse_phandle(priv->device->of_node, "phy-handle", 0); |
790 | 798 | ||
791 | if (!phynode) { | 799 | if (!phynode) { |
792 | netdev_dbg(dev, "no phy-handle found\n"); | 800 | /* check if a fixed-link is defined in device-tree */ |
793 | if (!priv->mdio) { | 801 | if (of_phy_is_fixed_link(priv->device->of_node)) { |
794 | netdev_err(dev, | 802 | rc = of_phy_register_fixed_link(priv->device->of_node); |
795 | "No phy-handle nor local mdio specified\n"); | 803 | if (rc < 0) { |
796 | return -ENODEV; | 804 | netdev_err(dev, "cannot register fixed PHY\n"); |
805 | return rc; | ||
806 | } | ||
807 | |||
808 | /* In the case of a fixed PHY, the DT node associated | ||
809 | * to the PHY is the Ethernet MAC DT node. | ||
810 | */ | ||
811 | phynode = of_node_get(priv->device->of_node); | ||
812 | fixed_link = true; | ||
813 | |||
814 | netdev_dbg(dev, "fixed-link detected\n"); | ||
815 | phydev = of_phy_connect(dev, phynode, | ||
816 | &altera_tse_adjust_link, | ||
817 | 0, priv->phy_iface); | ||
818 | } else { | ||
819 | netdev_dbg(dev, "no phy-handle found\n"); | ||
820 | if (!priv->mdio) { | ||
821 | netdev_err(dev, "No phy-handle nor local mdio specified\n"); | ||
822 | return -ENODEV; | ||
823 | } | ||
824 | phydev = connect_local_phy(dev); | ||
797 | } | 825 | } |
798 | phydev = connect_local_phy(dev); | ||
799 | } else { | 826 | } else { |
800 | netdev_dbg(dev, "phy-handle found\n"); | 827 | netdev_dbg(dev, "phy-handle found\n"); |
801 | phydev = of_phy_connect(dev, phynode, | 828 | phydev = of_phy_connect(dev, phynode, |
@@ -819,10 +846,10 @@ static int init_phy(struct net_device *dev) | |||
819 | /* Broken HW is sometimes missing the pull-up resistor on the | 846 | /* Broken HW is sometimes missing the pull-up resistor on the |
820 | * MDIO line, which results in reads to non-existent devices returning | 847 | * MDIO line, which results in reads to non-existent devices returning |
821 | * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent | 848 | * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent |
822 | * device as well. | 849 | * device as well. If a fixed-link is used the phy_id is always 0. |
823 | * Note: phydev->phy_id is the result of reading the UID PHY registers. | 850 | * Note: phydev->phy_id is the result of reading the UID PHY registers. |
824 | */ | 851 | */ |
825 | if (phydev->phy_id == 0) { | 852 | if ((phydev->phy_id == 0) && !fixed_link) { |
826 | netdev_err(dev, "Bad PHY UID 0x%08x\n", phydev->phy_id); | 853 | netdev_err(dev, "Bad PHY UID 0x%08x\n", phydev->phy_id); |
827 | phy_disconnect(phydev); | 854 | phy_disconnect(phydev); |
828 | return -ENODEV; | 855 | return -ENODEV; |
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig index c638c85f3954..089c269637b7 100644 --- a/drivers/net/ethernet/amd/Kconfig +++ b/drivers/net/ethernet/amd/Kconfig | |||
@@ -179,7 +179,7 @@ config SUNLANCE | |||
179 | 179 | ||
180 | config AMD_XGBE | 180 | config AMD_XGBE |
181 | tristate "AMD 10GbE Ethernet driver" | 181 | tristate "AMD 10GbE Ethernet driver" |
182 | depends on (OF_NET || ACPI) && HAS_IOMEM | 182 | depends on (OF_NET || ACPI) && HAS_IOMEM && HAS_DMA |
183 | select PHYLIB | 183 | select PHYLIB |
184 | select AMD_XGBE_PHY | 184 | select AMD_XGBE_PHY |
185 | select BITREVERSE | 185 | select BITREVERSE |
diff --git a/drivers/net/ethernet/arc/Kconfig b/drivers/net/ethernet/arc/Kconfig index 8e262e2b39b6..dea29ee24da4 100644 --- a/drivers/net/ethernet/arc/Kconfig +++ b/drivers/net/ethernet/arc/Kconfig | |||
@@ -25,8 +25,7 @@ config ARC_EMAC_CORE | |||
25 | config ARC_EMAC | 25 | config ARC_EMAC |
26 | tristate "ARC EMAC support" | 26 | tristate "ARC EMAC support" |
27 | select ARC_EMAC_CORE | 27 | select ARC_EMAC_CORE |
28 | depends on OF_IRQ | 28 | depends on OF_IRQ && OF_NET && HAS_DMA |
29 | depends on OF_NET | ||
30 | ---help--- | 29 | ---help--- |
31 | On some legacy ARC (Synopsys) FPGA boards such as ARCAngel4/ML50x | 30 | On some legacy ARC (Synopsys) FPGA boards such as ARCAngel4/ML50x |
32 | non-standard on-chip ethernet device ARC EMAC 10/100 is used. | 31 | non-standard on-chip ethernet device ARC EMAC 10/100 is used. |
@@ -35,7 +34,7 @@ config ARC_EMAC | |||
35 | config EMAC_ROCKCHIP | 34 | config EMAC_ROCKCHIP |
36 | tristate "Rockchip EMAC support" | 35 | tristate "Rockchip EMAC support" |
37 | select ARC_EMAC_CORE | 36 | select ARC_EMAC_CORE |
38 | depends on OF_IRQ && OF_NET && REGULATOR | 37 | depends on OF_IRQ && OF_NET && REGULATOR && HAS_DMA |
39 | ---help--- | 38 | ---help--- |
40 | Support for Rockchip RK3066/RK3188 EMAC ethernet controllers. | 39 | Support for Rockchip RK3066/RK3188 EMAC ethernet controllers. |
41 | This selects Rockchip SoC glue layer support for the | 40 | This selects Rockchip SoC glue layer support for the |
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_hw.h b/drivers/net/ethernet/atheros/atl1e/atl1e_hw.h index 74df16aef793..88a6271de5bc 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e_hw.h +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_hw.h | |||
@@ -129,7 +129,7 @@ s32 atl1e_restart_autoneg(struct atl1e_hw *hw); | |||
129 | #define TWSI_CTRL_LD_SLV_ADDR_SHIFT 8 | 129 | #define TWSI_CTRL_LD_SLV_ADDR_SHIFT 8 |
130 | #define TWSI_CTRL_SW_LDSTART 0x800 | 130 | #define TWSI_CTRL_SW_LDSTART 0x800 |
131 | #define TWSI_CTRL_HW_LDSTART 0x1000 | 131 | #define TWSI_CTRL_HW_LDSTART 0x1000 |
132 | #define TWSI_CTRL_SMB_SLV_ADDR_MASK 0x0x7F | 132 | #define TWSI_CTRL_SMB_SLV_ADDR_MASK 0x7F |
133 | #define TWSI_CTRL_SMB_SLV_ADDR_SHIFT 15 | 133 | #define TWSI_CTRL_SMB_SLV_ADDR_SHIFT 15 |
134 | #define TWSI_CTRL_LD_EXIST 0x400000 | 134 | #define TWSI_CTRL_LD_EXIST 0x400000 |
135 | #define TWSI_CTRL_READ_FREQ_SEL_MASK 0x3 | 135 | #define TWSI_CTRL_READ_FREQ_SEL_MASK 0x3 |
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h index 7e3d87a88c76..e2c043eabbf3 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.h +++ b/drivers/net/ethernet/broadcom/bcmsysport.h | |||
@@ -543,7 +543,7 @@ struct bcm_sysport_tx_counters { | |||
543 | u32 jbr; /* RO # of xmited jabber count*/ | 543 | u32 jbr; /* RO # of xmited jabber count*/ |
544 | u32 bytes; /* RO # of xmited byte count */ | 544 | u32 bytes; /* RO # of xmited byte count */ |
545 | u32 pok; /* RO # of xmited good pkt */ | 545 | u32 pok; /* RO # of xmited good pkt */ |
546 | u32 uc; /* RO (0x0x4f0)# of xmited unitcast pkt */ | 546 | u32 uc; /* RO (0x4f0) # of xmited unicast pkt */ |
547 | }; | 547 | }; |
548 | 548 | ||
549 | struct bcm_sysport_mib { | 549 | struct bcm_sysport_mib { |
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index de77d3a74abc..21e3c38c7c75 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c | |||
@@ -1260,7 +1260,7 @@ static int bgmac_poll(struct napi_struct *napi, int weight) | |||
1260 | 1260 | ||
1261 | /* Poll again if more events arrived in the meantime */ | 1261 | /* Poll again if more events arrived in the meantime */ |
1262 | if (bgmac_read(bgmac, BGMAC_INT_STATUS) & (BGMAC_IS_TX0 | BGMAC_IS_RX)) | 1262 | if (bgmac_read(bgmac, BGMAC_INT_STATUS) & (BGMAC_IS_TX0 | BGMAC_IS_RX)) |
1263 | return handled; | 1263 | return weight; |
1264 | 1264 | ||
1265 | if (handled < weight) { | 1265 | if (handled < weight) { |
1266 | napi_complete(napi); | 1266 | napi_complete(napi); |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index 355d5fea5be9..a3b0f7a0c61e 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h | |||
@@ -521,6 +521,7 @@ struct bnx2x_fp_txdata { | |||
521 | }; | 521 | }; |
522 | 522 | ||
523 | enum bnx2x_tpa_mode_t { | 523 | enum bnx2x_tpa_mode_t { |
524 | TPA_MODE_DISABLED, | ||
524 | TPA_MODE_LRO, | 525 | TPA_MODE_LRO, |
525 | TPA_MODE_GRO | 526 | TPA_MODE_GRO |
526 | }; | 527 | }; |
@@ -589,7 +590,6 @@ struct bnx2x_fastpath { | |||
589 | 590 | ||
590 | /* TPA related */ | 591 | /* TPA related */ |
591 | struct bnx2x_agg_info *tpa_info; | 592 | struct bnx2x_agg_info *tpa_info; |
592 | u8 disable_tpa; | ||
593 | #ifdef BNX2X_STOP_ON_ERROR | 593 | #ifdef BNX2X_STOP_ON_ERROR |
594 | u64 tpa_queue_used; | 594 | u64 tpa_queue_used; |
595 | #endif | 595 | #endif |
@@ -1545,9 +1545,7 @@ struct bnx2x { | |||
1545 | #define USING_MSIX_FLAG (1 << 5) | 1545 | #define USING_MSIX_FLAG (1 << 5) |
1546 | #define USING_MSI_FLAG (1 << 6) | 1546 | #define USING_MSI_FLAG (1 << 6) |
1547 | #define DISABLE_MSI_FLAG (1 << 7) | 1547 | #define DISABLE_MSI_FLAG (1 << 7) |
1548 | #define TPA_ENABLE_FLAG (1 << 8) | ||
1549 | #define NO_MCP_FLAG (1 << 9) | 1548 | #define NO_MCP_FLAG (1 << 9) |
1550 | #define GRO_ENABLE_FLAG (1 << 10) | ||
1551 | #define MF_FUNC_DIS (1 << 11) | 1549 | #define MF_FUNC_DIS (1 << 11) |
1552 | #define OWN_CNIC_IRQ (1 << 12) | 1550 | #define OWN_CNIC_IRQ (1 << 12) |
1553 | #define NO_ISCSI_OOO_FLAG (1 << 13) | 1551 | #define NO_ISCSI_OOO_FLAG (1 << 13) |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 2f63467bce46..a8bb8f664d3d 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | |||
@@ -947,10 +947,10 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) | |||
947 | u16 frag_size, pages; | 947 | u16 frag_size, pages; |
948 | #ifdef BNX2X_STOP_ON_ERROR | 948 | #ifdef BNX2X_STOP_ON_ERROR |
949 | /* sanity check */ | 949 | /* sanity check */ |
950 | if (fp->disable_tpa && | 950 | if (fp->mode == TPA_MODE_DISABLED && |
951 | (CQE_TYPE_START(cqe_fp_type) || | 951 | (CQE_TYPE_START(cqe_fp_type) || |
952 | CQE_TYPE_STOP(cqe_fp_type))) | 952 | CQE_TYPE_STOP(cqe_fp_type))) |
953 | BNX2X_ERR("START/STOP packet while disable_tpa type %x\n", | 953 | BNX2X_ERR("START/STOP packet while TPA disabled, type %x\n", |
954 | CQE_TYPE(cqe_fp_type)); | 954 | CQE_TYPE(cqe_fp_type)); |
955 | #endif | 955 | #endif |
956 | 956 | ||
@@ -1396,7 +1396,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp) | |||
1396 | DP(NETIF_MSG_IFUP, | 1396 | DP(NETIF_MSG_IFUP, |
1397 | "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size); | 1397 | "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size); |
1398 | 1398 | ||
1399 | if (!fp->disable_tpa) { | 1399 | if (fp->mode != TPA_MODE_DISABLED) { |
1400 | /* Fill the per-aggregation pool */ | 1400 | /* Fill the per-aggregation pool */ |
1401 | for (i = 0; i < MAX_AGG_QS(bp); i++) { | 1401 | for (i = 0; i < MAX_AGG_QS(bp); i++) { |
1402 | struct bnx2x_agg_info *tpa_info = | 1402 | struct bnx2x_agg_info *tpa_info = |
@@ -1410,7 +1410,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp) | |||
1410 | BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n", | 1410 | BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n", |
1411 | j); | 1411 | j); |
1412 | bnx2x_free_tpa_pool(bp, fp, i); | 1412 | bnx2x_free_tpa_pool(bp, fp, i); |
1413 | fp->disable_tpa = 1; | 1413 | fp->mode = TPA_MODE_DISABLED; |
1414 | break; | 1414 | break; |
1415 | } | 1415 | } |
1416 | dma_unmap_addr_set(first_buf, mapping, 0); | 1416 | dma_unmap_addr_set(first_buf, mapping, 0); |
@@ -1438,7 +1438,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp) | |||
1438 | ring_prod); | 1438 | ring_prod); |
1439 | bnx2x_free_tpa_pool(bp, fp, | 1439 | bnx2x_free_tpa_pool(bp, fp, |
1440 | MAX_AGG_QS(bp)); | 1440 | MAX_AGG_QS(bp)); |
1441 | fp->disable_tpa = 1; | 1441 | fp->mode = TPA_MODE_DISABLED; |
1442 | ring_prod = 0; | 1442 | ring_prod = 0; |
1443 | break; | 1443 | break; |
1444 | } | 1444 | } |
@@ -1560,7 +1560,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp) | |||
1560 | 1560 | ||
1561 | bnx2x_free_rx_bds(fp); | 1561 | bnx2x_free_rx_bds(fp); |
1562 | 1562 | ||
1563 | if (!fp->disable_tpa) | 1563 | if (fp->mode != TPA_MODE_DISABLED) |
1564 | bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp)); | 1564 | bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp)); |
1565 | } | 1565 | } |
1566 | } | 1566 | } |
@@ -2477,17 +2477,19 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index) | |||
2477 | /* set the tpa flag for each queue. The tpa flag determines the queue | 2477 | /* set the tpa flag for each queue. The tpa flag determines the queue |
2478 | * minimal size so it must be set prior to queue memory allocation | 2478 | * minimal size so it must be set prior to queue memory allocation |
2479 | */ | 2479 | */ |
2480 | fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG || | 2480 | if (bp->dev->features & NETIF_F_LRO) |
2481 | (bp->flags & GRO_ENABLE_FLAG && | ||
2482 | bnx2x_mtu_allows_gro(bp->dev->mtu))); | ||
2483 | if (bp->flags & TPA_ENABLE_FLAG) | ||
2484 | fp->mode = TPA_MODE_LRO; | 2481 | fp->mode = TPA_MODE_LRO; |
2485 | else if (bp->flags & GRO_ENABLE_FLAG) | 2482 | else if (bp->dev->features & NETIF_F_GRO && |
2483 | bnx2x_mtu_allows_gro(bp->dev->mtu)) | ||
2486 | fp->mode = TPA_MODE_GRO; | 2484 | fp->mode = TPA_MODE_GRO; |
2485 | else | ||
2486 | fp->mode = TPA_MODE_DISABLED; | ||
2487 | 2487 | ||
2488 | /* We don't want TPA on an FCoE L2 ring */ | 2488 | /* We don't want TPA if it's disabled in bp |
2489 | if (IS_FCOE_FP(fp)) | 2489 | * or if this is an FCoE L2 ring. |
2490 | fp->disable_tpa = 1; | 2490 | */ |
2491 | if (bp->disable_tpa || IS_FCOE_FP(fp)) | ||
2492 | fp->mode = TPA_MODE_DISABLED; | ||
2491 | } | 2493 | } |
2492 | 2494 | ||
2493 | int bnx2x_load_cnic(struct bnx2x *bp) | 2495 | int bnx2x_load_cnic(struct bnx2x *bp) |
@@ -2608,7 +2610,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
2608 | /* | 2610 | /* |
2609 | * Zero fastpath structures preserving invariants like napi, which are | 2611 | * Zero fastpath structures preserving invariants like napi, which are |
2610 | * allocated only once, fp index, max_cos, bp pointer. | 2612 | * allocated only once, fp index, max_cos, bp pointer. |
2611 | * Also set fp->disable_tpa and txdata_ptr. | 2613 | * Also set fp->mode and txdata_ptr. |
2612 | */ | 2614 | */ |
2613 | DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues); | 2615 | DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues); |
2614 | for_each_queue(bp, i) | 2616 | for_each_queue(bp, i) |
@@ -3247,7 +3249,7 @@ int bnx2x_low_latency_recv(struct napi_struct *napi) | |||
3247 | 3249 | ||
3248 | if ((bp->state == BNX2X_STATE_CLOSED) || | 3250 | if ((bp->state == BNX2X_STATE_CLOSED) || |
3249 | (bp->state == BNX2X_STATE_ERROR) || | 3251 | (bp->state == BNX2X_STATE_ERROR) || |
3250 | (bp->flags & (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG))) | 3252 | (bp->dev->features & (NETIF_F_LRO | NETIF_F_GRO))) |
3251 | return LL_FLUSH_FAILED; | 3253 | return LL_FLUSH_FAILED; |
3252 | 3254 | ||
3253 | if (!bnx2x_fp_lock_poll(fp)) | 3255 | if (!bnx2x_fp_lock_poll(fp)) |
@@ -4543,7 +4545,7 @@ alloc_mem_err: | |||
4543 | * In these cases we disable the queue | 4545 | * In these cases we disable the queue |
4544 | * Min size is different for OOO, TPA and non-TPA queues | 4546 | * Min size is different for OOO, TPA and non-TPA queues |
4545 | */ | 4547 | */ |
4546 | if (ring_size < (fp->disable_tpa ? | 4548 | if (ring_size < (fp->mode == TPA_MODE_DISABLED ? |
4547 | MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) { | 4549 | MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) { |
4548 | /* release memory allocated for this queue */ | 4550 | /* release memory allocated for this queue */ |
4549 | bnx2x_free_fp_mem_at(bp, index); | 4551 | bnx2x_free_fp_mem_at(bp, index); |
@@ -4809,66 +4811,71 @@ netdev_features_t bnx2x_fix_features(struct net_device *dev, | |||
4809 | { | 4811 | { |
4810 | struct bnx2x *bp = netdev_priv(dev); | 4812 | struct bnx2x *bp = netdev_priv(dev); |
4811 | 4813 | ||
4814 | if (pci_num_vf(bp->pdev)) { | ||
4815 | netdev_features_t changed = dev->features ^ features; | ||
4816 | |||
4817 | /* Revert the requested changes in features if they | ||
4818 | * would require internal reload of PF in bnx2x_set_features(). | ||
4819 | */ | ||
4820 | if (!(features & NETIF_F_RXCSUM) && !bp->disable_tpa) { | ||
4821 | features &= ~NETIF_F_RXCSUM; | ||
4822 | features |= dev->features & NETIF_F_RXCSUM; | ||
4823 | } | ||
4824 | |||
4825 | if (changed & NETIF_F_LOOPBACK) { | ||
4826 | features &= ~NETIF_F_LOOPBACK; | ||
4827 | features |= dev->features & NETIF_F_LOOPBACK; | ||
4828 | } | ||
4829 | } | ||
4830 | |||
4812 | /* TPA requires Rx CSUM offloading */ | 4831 | /* TPA requires Rx CSUM offloading */ |
4813 | if (!(features & NETIF_F_RXCSUM)) { | 4832 | if (!(features & NETIF_F_RXCSUM)) { |
4814 | features &= ~NETIF_F_LRO; | 4833 | features &= ~NETIF_F_LRO; |
4815 | features &= ~NETIF_F_GRO; | 4834 | features &= ~NETIF_F_GRO; |
4816 | } | 4835 | } |
4817 | 4836 | ||
4818 | /* Note: do not disable SW GRO in kernel when HW GRO is off */ | ||
4819 | if (bp->disable_tpa) | ||
4820 | features &= ~NETIF_F_LRO; | ||
4821 | |||
4822 | return features; | 4837 | return features; |
4823 | } | 4838 | } |
4824 | 4839 | ||
4825 | int bnx2x_set_features(struct net_device *dev, netdev_features_t features) | 4840 | int bnx2x_set_features(struct net_device *dev, netdev_features_t features) |
4826 | { | 4841 | { |
4827 | struct bnx2x *bp = netdev_priv(dev); | 4842 | struct bnx2x *bp = netdev_priv(dev); |
4828 | u32 flags = bp->flags; | 4843 | netdev_features_t changes = features ^ dev->features; |
4829 | u32 changes; | ||
4830 | bool bnx2x_reload = false; | 4844 | bool bnx2x_reload = false; |
4845 | int rc; | ||
4831 | 4846 | ||
4832 | if (features & NETIF_F_LRO) | 4847 | /* VFs or non SRIOV PFs should be able to change loopback feature */ |
4833 | flags |= TPA_ENABLE_FLAG; | 4848 | if (!pci_num_vf(bp->pdev)) { |
4834 | else | 4849 | if (features & NETIF_F_LOOPBACK) { |
4835 | flags &= ~TPA_ENABLE_FLAG; | 4850 | if (bp->link_params.loopback_mode != LOOPBACK_BMAC) { |
4836 | 4851 | bp->link_params.loopback_mode = LOOPBACK_BMAC; | |
4837 | if (features & NETIF_F_GRO) | 4852 | bnx2x_reload = true; |
4838 | flags |= GRO_ENABLE_FLAG; | 4853 | } |
4839 | else | 4854 | } else { |
4840 | flags &= ~GRO_ENABLE_FLAG; | 4855 | if (bp->link_params.loopback_mode != LOOPBACK_NONE) { |
4841 | 4856 | bp->link_params.loopback_mode = LOOPBACK_NONE; | |
4842 | if (features & NETIF_F_LOOPBACK) { | 4857 | bnx2x_reload = true; |
4843 | if (bp->link_params.loopback_mode != LOOPBACK_BMAC) { | 4858 | } |
4844 | bp->link_params.loopback_mode = LOOPBACK_BMAC; | ||
4845 | bnx2x_reload = true; | ||
4846 | } | ||
4847 | } else { | ||
4848 | if (bp->link_params.loopback_mode != LOOPBACK_NONE) { | ||
4849 | bp->link_params.loopback_mode = LOOPBACK_NONE; | ||
4850 | bnx2x_reload = true; | ||
4851 | } | 4859 | } |
4852 | } | 4860 | } |
4853 | 4861 | ||
4854 | changes = flags ^ bp->flags; | ||
4855 | |||
4856 | /* if GRO is changed while LRO is enabled, don't force a reload */ | 4862 | /* if GRO is changed while LRO is enabled, don't force a reload */ |
4857 | if ((changes & GRO_ENABLE_FLAG) && (flags & TPA_ENABLE_FLAG)) | 4863 | if ((changes & NETIF_F_GRO) && (features & NETIF_F_LRO)) |
4858 | changes &= ~GRO_ENABLE_FLAG; | 4864 | changes &= ~NETIF_F_GRO; |
4859 | 4865 | ||
4860 | /* if GRO is changed while HW TPA is off, don't force a reload */ | 4866 | /* if GRO is changed while HW TPA is off, don't force a reload */ |
4861 | if ((changes & GRO_ENABLE_FLAG) && bp->disable_tpa) | 4867 | if ((changes & NETIF_F_GRO) && bp->disable_tpa) |
4862 | changes &= ~GRO_ENABLE_FLAG; | 4868 | changes &= ~NETIF_F_GRO; |
4863 | 4869 | ||
4864 | if (changes) | 4870 | if (changes) |
4865 | bnx2x_reload = true; | 4871 | bnx2x_reload = true; |
4866 | 4872 | ||
4867 | bp->flags = flags; | ||
4868 | |||
4869 | if (bnx2x_reload) { | 4873 | if (bnx2x_reload) { |
4870 | if (bp->recovery_state == BNX2X_RECOVERY_DONE) | 4874 | if (bp->recovery_state == BNX2X_RECOVERY_DONE) { |
4871 | return bnx2x_reload_if_running(dev); | 4875 | dev->features = features; |
4876 | rc = bnx2x_reload_if_running(dev); | ||
4877 | return rc ? rc : 1; | ||
4878 | } | ||
4872 | /* else: bnx2x_nic_load() will be called at end of recovery */ | 4879 | /* else: bnx2x_nic_load() will be called at end of recovery */ |
4873 | } | 4880 | } |
4874 | 4881 | ||
@@ -4931,6 +4938,11 @@ int bnx2x_resume(struct pci_dev *pdev) | |||
4931 | } | 4938 | } |
4932 | bp = netdev_priv(dev); | 4939 | bp = netdev_priv(dev); |
4933 | 4940 | ||
4941 | if (pci_num_vf(bp->pdev)) { | ||
4942 | DP(BNX2X_MSG_IOV, "VFs are enabled, can not change MTU\n"); | ||
4943 | return -EPERM; | ||
4944 | } | ||
4945 | |||
4934 | if (bp->recovery_state != BNX2X_RECOVERY_DONE) { | 4946 | if (bp->recovery_state != BNX2X_RECOVERY_DONE) { |
4935 | BNX2X_ERR("Handling parity error recovery. Try again later\n"); | 4947 | BNX2X_ERR("Handling parity error recovery. Try again later\n"); |
4936 | return -EAGAIN; | 4948 | return -EAGAIN; |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index adcacda7af7b..d7a71758e876 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h | |||
@@ -969,7 +969,7 @@ static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp, | |||
969 | { | 969 | { |
970 | int i; | 970 | int i; |
971 | 971 | ||
972 | if (fp->disable_tpa) | 972 | if (fp->mode == TPA_MODE_DISABLED) |
973 | return; | 973 | return; |
974 | 974 | ||
975 | for (i = 0; i < last; i++) | 975 | for (i = 0; i < last; i++) |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index e3d853cab7c9..48ed005ba73f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c | |||
@@ -1843,6 +1843,12 @@ static int bnx2x_set_ringparam(struct net_device *dev, | |||
1843 | "set ring params command parameters: rx_pending = %d, tx_pending = %d\n", | 1843 | "set ring params command parameters: rx_pending = %d, tx_pending = %d\n", |
1844 | ering->rx_pending, ering->tx_pending); | 1844 | ering->rx_pending, ering->tx_pending); |
1845 | 1845 | ||
1846 | if (pci_num_vf(bp->pdev)) { | ||
1847 | DP(BNX2X_MSG_IOV, | ||
1848 | "VFs are enabled, can not change ring parameters\n"); | ||
1849 | return -EPERM; | ||
1850 | } | ||
1851 | |||
1846 | if (bp->recovery_state != BNX2X_RECOVERY_DONE) { | 1852 | if (bp->recovery_state != BNX2X_RECOVERY_DONE) { |
1847 | DP(BNX2X_MSG_ETHTOOL, | 1853 | DP(BNX2X_MSG_ETHTOOL, |
1848 | "Handling parity error recovery. Try again later\n"); | 1854 | "Handling parity error recovery. Try again later\n"); |
@@ -2899,6 +2905,12 @@ static void bnx2x_self_test(struct net_device *dev, | |||
2899 | u8 is_serdes, link_up; | 2905 | u8 is_serdes, link_up; |
2900 | int rc, cnt = 0; | 2906 | int rc, cnt = 0; |
2901 | 2907 | ||
2908 | if (pci_num_vf(bp->pdev)) { | ||
2909 | DP(BNX2X_MSG_IOV, | ||
2910 | "VFs are enabled, can not perform self test\n"); | ||
2911 | return; | ||
2912 | } | ||
2913 | |||
2902 | if (bp->recovery_state != BNX2X_RECOVERY_DONE) { | 2914 | if (bp->recovery_state != BNX2X_RECOVERY_DONE) { |
2903 | netdev_err(bp->dev, | 2915 | netdev_err(bp->dev, |
2904 | "Handling parity error recovery. Try again later\n"); | 2916 | "Handling parity error recovery. Try again later\n"); |
@@ -3468,6 +3480,11 @@ static int bnx2x_set_channels(struct net_device *dev, | |||
3468 | channels->rx_count, channels->tx_count, channels->other_count, | 3480 | channels->rx_count, channels->tx_count, channels->other_count, |
3469 | channels->combined_count); | 3481 | channels->combined_count); |
3470 | 3482 | ||
3483 | if (pci_num_vf(bp->pdev)) { | ||
3484 | DP(BNX2X_MSG_IOV, "VFs are enabled, can not set channels\n"); | ||
3485 | return -EPERM; | ||
3486 | } | ||
3487 | |||
3471 | /* We don't support separate rx / tx channels. | 3488 | /* We don't support separate rx / tx channels. |
3472 | * We don't allow setting 'other' channels. | 3489 | * We don't allow setting 'other' channels. |
3473 | */ | 3490 | */ |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index b9f85fccb419..556dcc162a62 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | |||
@@ -3128,7 +3128,7 @@ static unsigned long bnx2x_get_q_flags(struct bnx2x *bp, | |||
3128 | __set_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, &flags); | 3128 | __set_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, &flags); |
3129 | } | 3129 | } |
3130 | 3130 | ||
3131 | if (!fp->disable_tpa) { | 3131 | if (fp->mode != TPA_MODE_DISABLED) { |
3132 | __set_bit(BNX2X_Q_FLG_TPA, &flags); | 3132 | __set_bit(BNX2X_Q_FLG_TPA, &flags); |
3133 | __set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags); | 3133 | __set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags); |
3134 | if (fp->mode == TPA_MODE_GRO) | 3134 | if (fp->mode == TPA_MODE_GRO) |
@@ -3176,7 +3176,7 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp, | |||
3176 | u16 sge_sz = 0; | 3176 | u16 sge_sz = 0; |
3177 | u16 tpa_agg_size = 0; | 3177 | u16 tpa_agg_size = 0; |
3178 | 3178 | ||
3179 | if (!fp->disable_tpa) { | 3179 | if (fp->mode != TPA_MODE_DISABLED) { |
3180 | pause->sge_th_lo = SGE_TH_LO(bp); | 3180 | pause->sge_th_lo = SGE_TH_LO(bp); |
3181 | pause->sge_th_hi = SGE_TH_HI(bp); | 3181 | pause->sge_th_hi = SGE_TH_HI(bp); |
3182 | 3182 | ||
@@ -3304,7 +3304,7 @@ static void bnx2x_pf_init(struct bnx2x *bp) | |||
3304 | /* This flag is relevant for E1x only. | 3304 | /* This flag is relevant for E1x only. |
3305 | * E2 doesn't have a TPA configuration in a function level. | 3305 | * E2 doesn't have a TPA configuration in a function level. |
3306 | */ | 3306 | */ |
3307 | flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0; | 3307 | flags |= (bp->dev->features & NETIF_F_LRO) ? FUNC_FLG_TPA : 0; |
3308 | 3308 | ||
3309 | func_init.func_flgs = flags; | 3309 | func_init.func_flgs = flags; |
3310 | func_init.pf_id = BP_FUNC(bp); | 3310 | func_init.pf_id = BP_FUNC(bp); |
@@ -12107,11 +12107,8 @@ static int bnx2x_init_bp(struct bnx2x *bp) | |||
12107 | 12107 | ||
12108 | /* Set TPA flags */ | 12108 | /* Set TPA flags */ |
12109 | if (bp->disable_tpa) { | 12109 | if (bp->disable_tpa) { |
12110 | bp->flags &= ~(TPA_ENABLE_FLAG | GRO_ENABLE_FLAG); | 12110 | bp->dev->hw_features &= ~NETIF_F_LRO; |
12111 | bp->dev->features &= ~NETIF_F_LRO; | 12111 | bp->dev->features &= ~NETIF_F_LRO; |
12112 | } else { | ||
12113 | bp->flags |= (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG); | ||
12114 | bp->dev->features |= NETIF_F_LRO; | ||
12115 | } | 12112 | } |
12116 | 12113 | ||
12117 | if (CHIP_IS_E1(bp)) | 12114 | if (CHIP_IS_E1(bp)) |
@@ -13371,6 +13368,12 @@ static int bnx2x_init_one(struct pci_dev *pdev, | |||
13371 | bool is_vf; | 13368 | bool is_vf; |
13372 | int cnic_cnt; | 13369 | int cnic_cnt; |
13373 | 13370 | ||
13371 | /* Management FW 'remembers' living interfaces. Allow it some time | ||
13372 | * to forget previously living interfaces, allowing a proper re-load. | ||
13373 | */ | ||
13374 | if (is_kdump_kernel()) | ||
13375 | msleep(5000); | ||
13376 | |||
13374 | /* An estimated maximum supported CoS number according to the chip | 13377 | /* An estimated maximum supported CoS number according to the chip |
13375 | * version. | 13378 | * version. |
13376 | * We will try to roughly estimate the maximum number of CoSes this chip | 13379 | * We will try to roughly estimate the maximum number of CoSes this chip |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index 15b2d1647560..06b8c0d8fd3b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c | |||
@@ -594,7 +594,7 @@ int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
594 | bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SETUP_Q, sizeof(*req)); | 594 | bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SETUP_Q, sizeof(*req)); |
595 | 595 | ||
596 | /* select tpa mode to request */ | 596 | /* select tpa mode to request */ |
597 | if (!fp->disable_tpa) { | 597 | if (fp->mode != TPA_MODE_DISABLED) { |
598 | flags |= VFPF_QUEUE_FLG_TPA; | 598 | flags |= VFPF_QUEUE_FLG_TPA; |
599 | flags |= VFPF_QUEUE_FLG_TPA_IPV6; | 599 | flags |= VFPF_QUEUE_FLG_TPA_IPV6; |
600 | if (fp->mode == TPA_MODE_GRO) | 600 | if (fp->mode == TPA_MODE_GRO) |
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 1270b189a9a2..069952fa5d64 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c | |||
@@ -18129,7 +18129,9 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev, | |||
18129 | 18129 | ||
18130 | rtnl_lock(); | 18130 | rtnl_lock(); |
18131 | 18131 | ||
18132 | tp->pcierr_recovery = true; | 18132 | /* We needn't recover from permanent error */ |
18133 | if (state == pci_channel_io_frozen) | ||
18134 | tp->pcierr_recovery = true; | ||
18133 | 18135 | ||
18134 | /* We probably don't have netdev yet */ | 18136 | /* We probably don't have netdev yet */ |
18135 | if (!netdev || !netif_running(netdev)) | 18137 | if (!netdev || !netif_running(netdev)) |
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index 9f5387249f24..4104d49f005d 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c | |||
@@ -707,6 +707,9 @@ static void gem_rx_refill(struct macb *bp) | |||
707 | 707 | ||
708 | /* properly align Ethernet header */ | 708 | /* properly align Ethernet header */ |
709 | skb_reserve(skb, NET_IP_ALIGN); | 709 | skb_reserve(skb, NET_IP_ALIGN); |
710 | } else { | ||
711 | bp->rx_ring[entry].addr &= ~MACB_BIT(RX_USED); | ||
712 | bp->rx_ring[entry].ctrl = 0; | ||
710 | } | 713 | } |
711 | } | 714 | } |
712 | 715 | ||
@@ -1473,9 +1476,9 @@ static void macb_init_rings(struct macb *bp) | |||
1473 | for (i = 0; i < TX_RING_SIZE; i++) { | 1476 | for (i = 0; i < TX_RING_SIZE; i++) { |
1474 | bp->queues[0].tx_ring[i].addr = 0; | 1477 | bp->queues[0].tx_ring[i].addr = 0; |
1475 | bp->queues[0].tx_ring[i].ctrl = MACB_BIT(TX_USED); | 1478 | bp->queues[0].tx_ring[i].ctrl = MACB_BIT(TX_USED); |
1476 | bp->queues[0].tx_head = 0; | ||
1477 | bp->queues[0].tx_tail = 0; | ||
1478 | } | 1479 | } |
1480 | bp->queues[0].tx_head = 0; | ||
1481 | bp->queues[0].tx_tail = 0; | ||
1479 | bp->queues[0].tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP); | 1482 | bp->queues[0].tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP); |
1480 | 1483 | ||
1481 | bp->rx_tail = 0; | 1484 | bp->rx_tail = 0; |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 5959e3ae72da..e8578a742f2a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | |||
@@ -492,7 +492,7 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, | |||
492 | memoffset = (mtype * (edc_size * 1024 * 1024)); | 492 | memoffset = (mtype * (edc_size * 1024 * 1024)); |
493 | else { | 493 | else { |
494 | mc_size = EXT_MEM0_SIZE_G(t4_read_reg(adap, | 494 | mc_size = EXT_MEM0_SIZE_G(t4_read_reg(adap, |
495 | MA_EXT_MEMORY1_BAR_A)); | 495 | MA_EXT_MEMORY0_BAR_A)); |
496 | memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024; | 496 | memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024; |
497 | } | 497 | } |
498 | 498 | ||
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index fb0bc3c3620e..a6dcbf850c1f 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c | |||
@@ -4846,7 +4846,8 @@ err: | |||
4846 | } | 4846 | } |
4847 | 4847 | ||
4848 | static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, | 4848 | static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, |
4849 | struct net_device *dev, u32 filter_mask) | 4849 | struct net_device *dev, u32 filter_mask, |
4850 | int nlflags) | ||
4850 | { | 4851 | { |
4851 | struct be_adapter *adapter = netdev_priv(dev); | 4852 | struct be_adapter *adapter = netdev_priv(dev); |
4852 | int status = 0; | 4853 | int status = 0; |
@@ -4868,7 +4869,7 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, | |||
4868 | return ndo_dflt_bridge_getlink(skb, pid, seq, dev, | 4869 | return ndo_dflt_bridge_getlink(skb, pid, seq, dev, |
4869 | hsw_mode == PORT_FWD_TYPE_VEPA ? | 4870 | hsw_mode == PORT_FWD_TYPE_VEPA ? |
4870 | BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB, | 4871 | BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB, |
4871 | 0, 0); | 4872 | 0, 0, nlflags); |
4872 | } | 4873 | } |
4873 | 4874 | ||
4874 | #ifdef CONFIG_BE2NET_VXLAN | 4875 | #ifdef CONFIG_BE2NET_VXLAN |
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index f6a3a7abd468..66d47e448e4d 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c | |||
@@ -988,7 +988,10 @@ fec_restart(struct net_device *ndev) | |||
988 | rcntl |= 0x40000000 | 0x00000020; | 988 | rcntl |= 0x40000000 | 0x00000020; |
989 | 989 | ||
990 | /* RGMII, RMII or MII */ | 990 | /* RGMII, RMII or MII */ |
991 | if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII) | 991 | if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII || |
992 | fep->phy_interface == PHY_INTERFACE_MODE_RGMII_ID || | ||
993 | fep->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID || | ||
994 | fep->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) | ||
992 | rcntl |= (1 << 6); | 995 | rcntl |= (1 << 6); |
993 | else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) | 996 | else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) |
994 | rcntl |= (1 << 8); | 997 | rcntl |= (1 << 8); |
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index 291c87036e17..2a0dc127df3f 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c | |||
@@ -3347,7 +3347,7 @@ static int ehea_register_memory_hooks(void) | |||
3347 | { | 3347 | { |
3348 | int ret = 0; | 3348 | int ret = 0; |
3349 | 3349 | ||
3350 | if (atomic_inc_and_test(&ehea_memory_hooks_registered)) | 3350 | if (atomic_inc_return(&ehea_memory_hooks_registered) > 1) |
3351 | return 0; | 3351 | return 0; |
3352 | 3352 | ||
3353 | ret = ehea_create_busmap(); | 3353 | ret = ehea_create_busmap(); |
@@ -3381,12 +3381,14 @@ out3: | |||
3381 | out2: | 3381 | out2: |
3382 | unregister_reboot_notifier(&ehea_reboot_nb); | 3382 | unregister_reboot_notifier(&ehea_reboot_nb); |
3383 | out: | 3383 | out: |
3384 | atomic_dec(&ehea_memory_hooks_registered); | ||
3384 | return ret; | 3385 | return ret; |
3385 | } | 3386 | } |
3386 | 3387 | ||
3387 | static void ehea_unregister_memory_hooks(void) | 3388 | static void ehea_unregister_memory_hooks(void) |
3388 | { | 3389 | { |
3389 | if (atomic_read(&ehea_memory_hooks_registered)) | 3390 | /* Only remove the hooks if we've registered them */ |
3391 | if (atomic_read(&ehea_memory_hooks_registered) == 0) | ||
3390 | return; | 3392 | return; |
3391 | 3393 | ||
3392 | unregister_reboot_notifier(&ehea_reboot_nb); | 3394 | unregister_reboot_notifier(&ehea_reboot_nb); |
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index cd7675ac5bf9..18134766a114 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c | |||
@@ -1238,7 +1238,7 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu) | |||
1238 | return -EINVAL; | 1238 | return -EINVAL; |
1239 | 1239 | ||
1240 | for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) | 1240 | for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) |
1241 | if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) | 1241 | if (new_mtu_oh <= adapter->rx_buff_pool[i].buff_size) |
1242 | break; | 1242 | break; |
1243 | 1243 | ||
1244 | if (i == IBMVETH_NUM_BUFF_POOLS) | 1244 | if (i == IBMVETH_NUM_BUFF_POOLS) |
@@ -1257,7 +1257,7 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu) | |||
1257 | for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { | 1257 | for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { |
1258 | adapter->rx_buff_pool[i].active = 1; | 1258 | adapter->rx_buff_pool[i].active = 1; |
1259 | 1259 | ||
1260 | if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) { | 1260 | if (new_mtu_oh <= adapter->rx_buff_pool[i].buff_size) { |
1261 | dev->mtu = new_mtu; | 1261 | dev->mtu = new_mtu; |
1262 | vio_cmo_set_dev_desired(viodev, | 1262 | vio_cmo_set_dev_desired(viodev, |
1263 | ibmveth_get_desired_dma | 1263 | ibmveth_get_desired_dma |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 24481cd7e59a..a54c14491e3b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c | |||
@@ -8053,10 +8053,10 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev, | |||
8053 | #ifdef HAVE_BRIDGE_FILTER | 8053 | #ifdef HAVE_BRIDGE_FILTER |
8054 | static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, | 8054 | static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, |
8055 | struct net_device *dev, | 8055 | struct net_device *dev, |
8056 | u32 __always_unused filter_mask) | 8056 | u32 __always_unused filter_mask, int nlflags) |
8057 | #else | 8057 | #else |
8058 | static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, | 8058 | static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, |
8059 | struct net_device *dev) | 8059 | struct net_device *dev, int nlflags) |
8060 | #endif /* HAVE_BRIDGE_FILTER */ | 8060 | #endif /* HAVE_BRIDGE_FILTER */ |
8061 | { | 8061 | { |
8062 | struct i40e_netdev_priv *np = netdev_priv(dev); | 8062 | struct i40e_netdev_priv *np = netdev_priv(dev); |
@@ -8078,7 +8078,8 @@ static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, | |||
8078 | if (!veb) | 8078 | if (!veb) |
8079 | return 0; | 8079 | return 0; |
8080 | 8080 | ||
8081 | return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode); | 8081 | return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode, |
8082 | nlflags); | ||
8082 | } | 8083 | } |
8083 | #endif /* HAVE_BRIDGE_ATTRIBS */ | 8084 | #endif /* HAVE_BRIDGE_ATTRIBS */ |
8084 | 8085 | ||
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index d3f4b0ceb3f7..5be12a00e1f4 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
@@ -8044,7 +8044,7 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev, | |||
8044 | 8044 | ||
8045 | static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, | 8045 | static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, |
8046 | struct net_device *dev, | 8046 | struct net_device *dev, |
8047 | u32 filter_mask) | 8047 | u32 filter_mask, int nlflags) |
8048 | { | 8048 | { |
8049 | struct ixgbe_adapter *adapter = netdev_priv(dev); | 8049 | struct ixgbe_adapter *adapter = netdev_priv(dev); |
8050 | 8050 | ||
@@ -8052,7 +8052,7 @@ static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, | |||
8052 | return 0; | 8052 | return 0; |
8053 | 8053 | ||
8054 | return ndo_dflt_bridge_getlink(skb, pid, seq, dev, | 8054 | return ndo_dflt_bridge_getlink(skb, pid, seq, dev, |
8055 | adapter->bridge_mode, 0, 0); | 8055 | adapter->bridge_mode, 0, 0, nlflags); |
8056 | } | 8056 | } |
8057 | 8057 | ||
8058 | static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev) | 8058 | static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev) |
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index af829c578400..7ace07dad6a3 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c | |||
@@ -1508,7 +1508,8 @@ static int pxa168_eth_probe(struct platform_device *pdev) | |||
1508 | np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); | 1508 | np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); |
1509 | if (!np) { | 1509 | if (!np) { |
1510 | dev_err(&pdev->dev, "missing phy-handle\n"); | 1510 | dev_err(&pdev->dev, "missing phy-handle\n"); |
1511 | return -EINVAL; | 1511 | err = -EINVAL; |
1512 | goto err_netdev; | ||
1512 | } | 1513 | } |
1513 | of_property_read_u32(np, "reg", &pep->phy_addr); | 1514 | of_property_read_u32(np, "reg", &pep->phy_addr); |
1514 | pep->phy_intf = of_get_phy_mode(pdev->dev.of_node); | 1515 | pep->phy_intf = of_get_phy_mode(pdev->dev.of_node); |
@@ -1526,7 +1527,7 @@ static int pxa168_eth_probe(struct platform_device *pdev) | |||
1526 | pep->smi_bus = mdiobus_alloc(); | 1527 | pep->smi_bus = mdiobus_alloc(); |
1527 | if (pep->smi_bus == NULL) { | 1528 | if (pep->smi_bus == NULL) { |
1528 | err = -ENOMEM; | 1529 | err = -ENOMEM; |
1529 | goto err_base; | 1530 | goto err_netdev; |
1530 | } | 1531 | } |
1531 | pep->smi_bus->priv = pep; | 1532 | pep->smi_bus->priv = pep; |
1532 | pep->smi_bus->name = "pxa168_eth smi"; | 1533 | pep->smi_bus->name = "pxa168_eth smi"; |
@@ -1551,13 +1552,10 @@ err_mdiobus: | |||
1551 | mdiobus_unregister(pep->smi_bus); | 1552 | mdiobus_unregister(pep->smi_bus); |
1552 | err_free_mdio: | 1553 | err_free_mdio: |
1553 | mdiobus_free(pep->smi_bus); | 1554 | mdiobus_free(pep->smi_bus); |
1554 | err_base: | ||
1555 | iounmap(pep->base); | ||
1556 | err_netdev: | 1555 | err_netdev: |
1557 | free_netdev(dev); | 1556 | free_netdev(dev); |
1558 | err_clk: | 1557 | err_clk: |
1559 | clk_disable(clk); | 1558 | clk_disable_unprepare(clk); |
1560 | clk_put(clk); | ||
1561 | return err; | 1559 | return err; |
1562 | } | 1560 | } |
1563 | 1561 | ||
@@ -1574,13 +1572,9 @@ static int pxa168_eth_remove(struct platform_device *pdev) | |||
1574 | if (pep->phy) | 1572 | if (pep->phy) |
1575 | phy_disconnect(pep->phy); | 1573 | phy_disconnect(pep->phy); |
1576 | if (pep->clk) { | 1574 | if (pep->clk) { |
1577 | clk_disable(pep->clk); | 1575 | clk_disable_unprepare(pep->clk); |
1578 | clk_put(pep->clk); | ||
1579 | pep->clk = NULL; | ||
1580 | } | 1576 | } |
1581 | 1577 | ||
1582 | iounmap(pep->base); | ||
1583 | pep->base = NULL; | ||
1584 | mdiobus_unregister(pep->smi_bus); | 1578 | mdiobus_unregister(pep->smi_bus); |
1585 | mdiobus_free(pep->smi_bus); | 1579 | mdiobus_free(pep->smi_bus); |
1586 | unregister_netdev(dev); | 1580 | unregister_netdev(dev); |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index 3f44e2bbb982..a2ddf3d75ff8 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c | |||
@@ -1102,20 +1102,21 @@ static int mlx4_en_check_rxfh_func(struct net_device *dev, u8 hfunc) | |||
1102 | struct mlx4_en_priv *priv = netdev_priv(dev); | 1102 | struct mlx4_en_priv *priv = netdev_priv(dev); |
1103 | 1103 | ||
1104 | /* check if requested function is supported by the device */ | 1104 | /* check if requested function is supported by the device */ |
1105 | if ((hfunc == ETH_RSS_HASH_TOP && | 1105 | if (hfunc == ETH_RSS_HASH_TOP) { |
1106 | !(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP)) || | 1106 | if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP)) |
1107 | (hfunc == ETH_RSS_HASH_XOR && | 1107 | return -EINVAL; |
1108 | !(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR))) | 1108 | if (!(dev->features & NETIF_F_RXHASH)) |
1109 | return -EINVAL; | 1109 | en_warn(priv, "Toeplitz hash function should be used in conjunction with RX hashing for optimal performance\n"); |
1110 | return 0; | ||
1111 | } else if (hfunc == ETH_RSS_HASH_XOR) { | ||
1112 | if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR)) | ||
1113 | return -EINVAL; | ||
1114 | if (dev->features & NETIF_F_RXHASH) | ||
1115 | en_warn(priv, "Enabling both XOR Hash function and RX Hashing can limit RPS functionality\n"); | ||
1116 | return 0; | ||
1117 | } | ||
1110 | 1118 | ||
1111 | priv->rss_hash_fn = hfunc; | 1119 | return -EINVAL; |
1112 | if (hfunc == ETH_RSS_HASH_TOP && !(dev->features & NETIF_F_RXHASH)) | ||
1113 | en_warn(priv, | ||
1114 | "Toeplitz hash function should be used in conjunction with RX hashing for optimal performance\n"); | ||
1115 | if (hfunc == ETH_RSS_HASH_XOR && (dev->features & NETIF_F_RXHASH)) | ||
1116 | en_warn(priv, | ||
1117 | "Enabling both XOR Hash function and RX Hashing can limit RPS functionality\n"); | ||
1118 | return 0; | ||
1119 | } | 1120 | } |
1120 | 1121 | ||
1121 | static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key, | 1122 | static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key, |
@@ -1189,6 +1190,8 @@ static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index, | |||
1189 | priv->prof->rss_rings = rss_rings; | 1190 | priv->prof->rss_rings = rss_rings; |
1190 | if (key) | 1191 | if (key) |
1191 | memcpy(priv->rss_key, key, MLX4_EN_RSS_KEY_SIZE); | 1192 | memcpy(priv->rss_key, key, MLX4_EN_RSS_KEY_SIZE); |
1193 | if (hfunc != ETH_RSS_HASH_NO_CHANGE) | ||
1194 | priv->rss_hash_fn = hfunc; | ||
1192 | 1195 | ||
1193 | if (port_up) { | 1196 | if (port_up) { |
1194 | err = mlx4_en_start_port(dev); | 1197 | err = mlx4_en_start_port(dev); |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 0f1afc085d58..32f5ec737472 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c | |||
@@ -1467,6 +1467,7 @@ static void mlx4_en_service_task(struct work_struct *work) | |||
1467 | if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) | 1467 | if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) |
1468 | mlx4_en_ptp_overflow_check(mdev); | 1468 | mlx4_en_ptp_overflow_check(mdev); |
1469 | 1469 | ||
1470 | mlx4_en_recover_from_oom(priv); | ||
1470 | queue_delayed_work(mdev->workqueue, &priv->service_task, | 1471 | queue_delayed_work(mdev->workqueue, &priv->service_task, |
1471 | SERVICE_TASK_DELAY); | 1472 | SERVICE_TASK_DELAY); |
1472 | } | 1473 | } |
@@ -1721,7 +1722,7 @@ mac_err: | |||
1721 | cq_err: | 1722 | cq_err: |
1722 | while (rx_index--) { | 1723 | while (rx_index--) { |
1723 | mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]); | 1724 | mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]); |
1724 | mlx4_en_free_affinity_hint(priv, i); | 1725 | mlx4_en_free_affinity_hint(priv, rx_index); |
1725 | } | 1726 | } |
1726 | for (i = 0; i < priv->rx_ring_num; i++) | 1727 | for (i = 0; i < priv->rx_ring_num; i++) |
1727 | mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]); | 1728 | mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]); |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 4fdd3c37e47b..2a77a6b19121 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c | |||
@@ -244,6 +244,12 @@ static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv, | |||
244 | return mlx4_en_alloc_frags(priv, rx_desc, frags, ring->page_alloc, gfp); | 244 | return mlx4_en_alloc_frags(priv, rx_desc, frags, ring->page_alloc, gfp); |
245 | } | 245 | } |
246 | 246 | ||
247 | static inline bool mlx4_en_is_ring_empty(struct mlx4_en_rx_ring *ring) | ||
248 | { | ||
249 | BUG_ON((u32)(ring->prod - ring->cons) > ring->actual_size); | ||
250 | return ring->prod == ring->cons; | ||
251 | } | ||
252 | |||
247 | static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring) | 253 | static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring) |
248 | { | 254 | { |
249 | *ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff); | 255 | *ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff); |
@@ -315,8 +321,7 @@ static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv, | |||
315 | ring->cons, ring->prod); | 321 | ring->cons, ring->prod); |
316 | 322 | ||
317 | /* Unmap and free Rx buffers */ | 323 | /* Unmap and free Rx buffers */ |
318 | BUG_ON((u32) (ring->prod - ring->cons) > ring->actual_size); | 324 | while (!mlx4_en_is_ring_empty(ring)) { |
319 | while (ring->cons != ring->prod) { | ||
320 | index = ring->cons & ring->size_mask; | 325 | index = ring->cons & ring->size_mask; |
321 | en_dbg(DRV, priv, "Processing descriptor:%d\n", index); | 326 | en_dbg(DRV, priv, "Processing descriptor:%d\n", index); |
322 | mlx4_en_free_rx_desc(priv, ring, index); | 327 | mlx4_en_free_rx_desc(priv, ring, index); |
@@ -491,6 +496,23 @@ err_allocator: | |||
491 | return err; | 496 | return err; |
492 | } | 497 | } |
493 | 498 | ||
499 | /* We recover from out of memory by scheduling our napi poll | ||
500 | * function (mlx4_en_process_cq), which tries to allocate | ||
501 | * all missing RX buffers (call to mlx4_en_refill_rx_buffers). | ||
502 | */ | ||
503 | void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv) | ||
504 | { | ||
505 | int ring; | ||
506 | |||
507 | if (!priv->port_up) | ||
508 | return; | ||
509 | |||
510 | for (ring = 0; ring < priv->rx_ring_num; ring++) { | ||
511 | if (mlx4_en_is_ring_empty(priv->rx_ring[ring])) | ||
512 | napi_reschedule(&priv->rx_cq[ring]->napi); | ||
513 | } | ||
514 | } | ||
515 | |||
494 | void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv, | 516 | void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv, |
495 | struct mlx4_en_rx_ring **pring, | 517 | struct mlx4_en_rx_ring **pring, |
496 | u32 size, u16 stride) | 518 | u32 size, u16 stride) |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 1783705273d8..f7bf312fb443 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c | |||
@@ -143,8 +143,10 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, | |||
143 | ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type; | 143 | ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type; |
144 | ring->queue_index = queue_index; | 144 | ring->queue_index = queue_index; |
145 | 145 | ||
146 | if (queue_index < priv->num_tx_rings_p_up && cpu_online(queue_index)) | 146 | if (queue_index < priv->num_tx_rings_p_up) |
147 | cpumask_set_cpu(queue_index, &ring->affinity_mask); | 147 | cpumask_set_cpu_local_first(queue_index, |
148 | priv->mdev->dev->numa_node, | ||
149 | &ring->affinity_mask); | ||
148 | 150 | ||
149 | *pring = ring; | 151 | *pring = ring; |
150 | return 0; | 152 | return 0; |
@@ -213,7 +215,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, | |||
213 | 215 | ||
214 | err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context, | 216 | err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context, |
215 | &ring->qp, &ring->qp_state); | 217 | &ring->qp, &ring->qp_state); |
216 | if (!user_prio && cpu_online(ring->queue_index)) | 218 | if (!cpumask_empty(&ring->affinity_mask)) |
217 | netif_set_xps_queue(priv->dev, &ring->affinity_mask, | 219 | netif_set_xps_queue(priv->dev, &ring->affinity_mask, |
218 | ring->queue_index); | 220 | ring->queue_index); |
219 | 221 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index a4079811b176..e30bf57ad7a1 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c | |||
@@ -56,11 +56,13 @@ MODULE_PARM_DESC(enable_qos, "Enable Enhanced QoS support (default: on)"); | |||
56 | #define MLX4_GET(dest, source, offset) \ | 56 | #define MLX4_GET(dest, source, offset) \ |
57 | do { \ | 57 | do { \ |
58 | void *__p = (char *) (source) + (offset); \ | 58 | void *__p = (char *) (source) + (offset); \ |
59 | u64 val; \ | ||
59 | switch (sizeof (dest)) { \ | 60 | switch (sizeof (dest)) { \ |
60 | case 1: (dest) = *(u8 *) __p; break; \ | 61 | case 1: (dest) = *(u8 *) __p; break; \ |
61 | case 2: (dest) = be16_to_cpup(__p); break; \ | 62 | case 2: (dest) = be16_to_cpup(__p); break; \ |
62 | case 4: (dest) = be32_to_cpup(__p); break; \ | 63 | case 4: (dest) = be32_to_cpup(__p); break; \ |
63 | case 8: (dest) = be64_to_cpup(__p); break; \ | 64 | case 8: val = get_unaligned((u64 *)__p); \ |
65 | (dest) = be64_to_cpu(val); break; \ | ||
64 | default: __buggy_use_of_MLX4_GET(); \ | 66 | default: __buggy_use_of_MLX4_GET(); \ |
65 | } \ | 67 | } \ |
66 | } while (0) | 68 | } while (0) |
@@ -1605,9 +1607,17 @@ static void get_board_id(void *vsd, char *board_id) | |||
1605 | * swaps each 4-byte word before passing it back to | 1607 | * swaps each 4-byte word before passing it back to |
1606 | * us. Therefore we need to swab it before printing. | 1608 | * us. Therefore we need to swab it before printing. |
1607 | */ | 1609 | */ |
1608 | for (i = 0; i < 4; ++i) | 1610 | u32 *bid_u32 = (u32 *)board_id; |
1609 | ((u32 *) board_id)[i] = | 1611 | |
1610 | swab32(*(u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4)); | 1612 | for (i = 0; i < 4; ++i) { |
1613 | u32 *addr; | ||
1614 | u32 val; | ||
1615 | |||
1616 | addr = (u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4); | ||
1617 | val = get_unaligned(addr); | ||
1618 | val = swab32(val); | ||
1619 | put_unaligned(val, &bid_u32[i]); | ||
1620 | } | ||
1611 | } | 1621 | } |
1612 | } | 1622 | } |
1613 | 1623 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 9de30216b146..d021f079f181 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | |||
@@ -774,6 +774,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, | |||
774 | void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv, | 774 | void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv, |
775 | struct mlx4_en_tx_ring *ring); | 775 | struct mlx4_en_tx_ring *ring); |
776 | void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev); | 776 | void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev); |
777 | void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv); | ||
777 | int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, | 778 | int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, |
778 | struct mlx4_en_rx_ring **pring, | 779 | struct mlx4_en_rx_ring **pring, |
779 | u32 size, u16 stride, int node); | 780 | u32 size, u16 stride, int node); |
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index 1412f5af05ec..2bae50292dcd 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c | |||
@@ -69,11 +69,7 @@ | |||
69 | #include <net/ip.h> | 69 | #include <net/ip.h> |
70 | #include <net/tcp.h> | 70 | #include <net/tcp.h> |
71 | #include <asm/byteorder.h> | 71 | #include <asm/byteorder.h> |
72 | #include <asm/io.h> | ||
73 | #include <asm/processor.h> | 72 | #include <asm/processor.h> |
74 | #ifdef CONFIG_MTRR | ||
75 | #include <asm/mtrr.h> | ||
76 | #endif | ||
77 | #include <net/busy_poll.h> | 73 | #include <net/busy_poll.h> |
78 | 74 | ||
79 | #include "myri10ge_mcp.h" | 75 | #include "myri10ge_mcp.h" |
@@ -242,8 +238,7 @@ struct myri10ge_priv { | |||
242 | unsigned int rdma_tags_available; | 238 | unsigned int rdma_tags_available; |
243 | int intr_coal_delay; | 239 | int intr_coal_delay; |
244 | __be32 __iomem *intr_coal_delay_ptr; | 240 | __be32 __iomem *intr_coal_delay_ptr; |
245 | int mtrr; | 241 | int wc_cookie; |
246 | int wc_enabled; | ||
247 | int down_cnt; | 242 | int down_cnt; |
248 | wait_queue_head_t down_wq; | 243 | wait_queue_head_t down_wq; |
249 | struct work_struct watchdog_work; | 244 | struct work_struct watchdog_work; |
@@ -1905,7 +1900,7 @@ static const char myri10ge_gstrings_main_stats[][ETH_GSTRING_LEN] = { | |||
1905 | "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors", | 1900 | "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors", |
1906 | "tx_heartbeat_errors", "tx_window_errors", | 1901 | "tx_heartbeat_errors", "tx_window_errors", |
1907 | /* device-specific stats */ | 1902 | /* device-specific stats */ |
1908 | "tx_boundary", "WC", "irq", "MSI", "MSIX", | 1903 | "tx_boundary", "irq", "MSI", "MSIX", |
1909 | "read_dma_bw_MBs", "write_dma_bw_MBs", "read_write_dma_bw_MBs", | 1904 | "read_dma_bw_MBs", "write_dma_bw_MBs", "read_write_dma_bw_MBs", |
1910 | "serial_number", "watchdog_resets", | 1905 | "serial_number", "watchdog_resets", |
1911 | #ifdef CONFIG_MYRI10GE_DCA | 1906 | #ifdef CONFIG_MYRI10GE_DCA |
@@ -1984,7 +1979,6 @@ myri10ge_get_ethtool_stats(struct net_device *netdev, | |||
1984 | data[i] = ((u64 *)&link_stats)[i]; | 1979 | data[i] = ((u64 *)&link_stats)[i]; |
1985 | 1980 | ||
1986 | data[i++] = (unsigned int)mgp->tx_boundary; | 1981 | data[i++] = (unsigned int)mgp->tx_boundary; |
1987 | data[i++] = (unsigned int)mgp->wc_enabled; | ||
1988 | data[i++] = (unsigned int)mgp->pdev->irq; | 1982 | data[i++] = (unsigned int)mgp->pdev->irq; |
1989 | data[i++] = (unsigned int)mgp->msi_enabled; | 1983 | data[i++] = (unsigned int)mgp->msi_enabled; |
1990 | data[i++] = (unsigned int)mgp->msix_enabled; | 1984 | data[i++] = (unsigned int)mgp->msix_enabled; |
@@ -4040,14 +4034,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
4040 | 4034 | ||
4041 | mgp->board_span = pci_resource_len(pdev, 0); | 4035 | mgp->board_span = pci_resource_len(pdev, 0); |
4042 | mgp->iomem_base = pci_resource_start(pdev, 0); | 4036 | mgp->iomem_base = pci_resource_start(pdev, 0); |
4043 | mgp->mtrr = -1; | 4037 | mgp->wc_cookie = arch_phys_wc_add(mgp->iomem_base, mgp->board_span); |
4044 | mgp->wc_enabled = 0; | ||
4045 | #ifdef CONFIG_MTRR | ||
4046 | mgp->mtrr = mtrr_add(mgp->iomem_base, mgp->board_span, | ||
4047 | MTRR_TYPE_WRCOMB, 1); | ||
4048 | if (mgp->mtrr >= 0) | ||
4049 | mgp->wc_enabled = 1; | ||
4050 | #endif | ||
4051 | mgp->sram = ioremap_wc(mgp->iomem_base, mgp->board_span); | 4038 | mgp->sram = ioremap_wc(mgp->iomem_base, mgp->board_span); |
4052 | if (mgp->sram == NULL) { | 4039 | if (mgp->sram == NULL) { |
4053 | dev_err(&pdev->dev, "ioremap failed for %ld bytes at 0x%lx\n", | 4040 | dev_err(&pdev->dev, "ioremap failed for %ld bytes at 0x%lx\n", |
@@ -4146,14 +4133,14 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
4146 | goto abort_with_state; | 4133 | goto abort_with_state; |
4147 | } | 4134 | } |
4148 | if (mgp->msix_enabled) | 4135 | if (mgp->msix_enabled) |
4149 | dev_info(dev, "%d MSI-X IRQs, tx bndry %d, fw %s, WC %s\n", | 4136 | dev_info(dev, "%d MSI-X IRQs, tx bndry %d, fw %s, MTRR %s, WC Enabled\n", |
4150 | mgp->num_slices, mgp->tx_boundary, mgp->fw_name, | 4137 | mgp->num_slices, mgp->tx_boundary, mgp->fw_name, |
4151 | (mgp->wc_enabled ? "Enabled" : "Disabled")); | 4138 | (mgp->wc_cookie > 0 ? "Enabled" : "Disabled")); |
4152 | else | 4139 | else |
4153 | dev_info(dev, "%s IRQ %d, tx bndry %d, fw %s, WC %s\n", | 4140 | dev_info(dev, "%s IRQ %d, tx bndry %d, fw %s, MTRR %s, WC Enabled\n", |
4154 | mgp->msi_enabled ? "MSI" : "xPIC", | 4141 | mgp->msi_enabled ? "MSI" : "xPIC", |
4155 | pdev->irq, mgp->tx_boundary, mgp->fw_name, | 4142 | pdev->irq, mgp->tx_boundary, mgp->fw_name, |
4156 | (mgp->wc_enabled ? "Enabled" : "Disabled")); | 4143 | (mgp->wc_cookie > 0 ? "Enabled" : "Disabled")); |
4157 | 4144 | ||
4158 | board_number++; | 4145 | board_number++; |
4159 | return 0; | 4146 | return 0; |
@@ -4175,10 +4162,7 @@ abort_with_ioremap: | |||
4175 | iounmap(mgp->sram); | 4162 | iounmap(mgp->sram); |
4176 | 4163 | ||
4177 | abort_with_mtrr: | 4164 | abort_with_mtrr: |
4178 | #ifdef CONFIG_MTRR | 4165 | arch_phys_wc_del(mgp->wc_cookie); |
4179 | if (mgp->mtrr >= 0) | ||
4180 | mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span); | ||
4181 | #endif | ||
4182 | dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd), | 4166 | dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd), |
4183 | mgp->cmd, mgp->cmd_bus); | 4167 | mgp->cmd, mgp->cmd_bus); |
4184 | 4168 | ||
@@ -4220,11 +4204,7 @@ static void myri10ge_remove(struct pci_dev *pdev) | |||
4220 | pci_restore_state(pdev); | 4204 | pci_restore_state(pdev); |
4221 | 4205 | ||
4222 | iounmap(mgp->sram); | 4206 | iounmap(mgp->sram); |
4223 | 4207 | arch_phys_wc_del(mgp->wc_cookie); | |
4224 | #ifdef CONFIG_MTRR | ||
4225 | if (mgp->mtrr >= 0) | ||
4226 | mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span); | ||
4227 | #endif | ||
4228 | myri10ge_free_slices(mgp); | 4208 | myri10ge_free_slices(mgp); |
4229 | kfree(mgp->msix_vectors); | 4209 | kfree(mgp->msix_vectors); |
4230 | dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd), | 4210 | dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd), |
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c index 5c4068353f66..8da7c3faf817 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c | |||
@@ -135,7 +135,7 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter) | |||
135 | int i, j; | 135 | int i, j; |
136 | struct nx_host_tx_ring *tx_ring = adapter->tx_ring; | 136 | struct nx_host_tx_ring *tx_ring = adapter->tx_ring; |
137 | 137 | ||
138 | spin_lock(&adapter->tx_clean_lock); | 138 | spin_lock_bh(&adapter->tx_clean_lock); |
139 | cmd_buf = tx_ring->cmd_buf_arr; | 139 | cmd_buf = tx_ring->cmd_buf_arr; |
140 | for (i = 0; i < tx_ring->num_desc; i++) { | 140 | for (i = 0; i < tx_ring->num_desc; i++) { |
141 | buffrag = cmd_buf->frag_array; | 141 | buffrag = cmd_buf->frag_array; |
@@ -159,7 +159,7 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter) | |||
159 | } | 159 | } |
160 | cmd_buf++; | 160 | cmd_buf++; |
161 | } | 161 | } |
162 | spin_unlock(&adapter->tx_clean_lock); | 162 | spin_unlock_bh(&adapter->tx_clean_lock); |
163 | } | 163 | } |
164 | 164 | ||
165 | void netxen_free_sw_resources(struct netxen_adapter *adapter) | 165 | void netxen_free_sw_resources(struct netxen_adapter *adapter) |
diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c index a570a60533be..ec251531bd9f 100644 --- a/drivers/net/ethernet/rocker/rocker.c +++ b/drivers/net/ethernet/rocker/rocker.c | |||
@@ -4176,14 +4176,15 @@ static int rocker_port_bridge_setlink(struct net_device *dev, | |||
4176 | 4176 | ||
4177 | static int rocker_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, | 4177 | static int rocker_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, |
4178 | struct net_device *dev, | 4178 | struct net_device *dev, |
4179 | u32 filter_mask) | 4179 | u32 filter_mask, int nlflags) |
4180 | { | 4180 | { |
4181 | struct rocker_port *rocker_port = netdev_priv(dev); | 4181 | struct rocker_port *rocker_port = netdev_priv(dev); |
4182 | u16 mode = BRIDGE_MODE_UNDEF; | 4182 | u16 mode = BRIDGE_MODE_UNDEF; |
4183 | u32 mask = BR_LEARNING | BR_LEARNING_SYNC; | 4183 | u32 mask = BR_LEARNING | BR_LEARNING_SYNC; |
4184 | 4184 | ||
4185 | return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, | 4185 | return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, |
4186 | rocker_port->brport_flags, mask); | 4186 | rocker_port->brport_flags, mask, |
4187 | nlflags); | ||
4187 | } | 4188 | } |
4188 | 4189 | ||
4189 | static int rocker_port_get_phys_port_name(struct net_device *dev, | 4190 | static int rocker_port_get_phys_port_name(struct net_device *dev, |
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c index 2bef655279f3..9b7e0a34c98b 100644 --- a/drivers/net/ethernet/ti/netcp_ethss.c +++ b/drivers/net/ethernet/ti/netcp_ethss.c | |||
@@ -1765,7 +1765,9 @@ static void netcp_ethss_link_state_action(struct gbe_priv *gbe_dev, | |||
1765 | ALE_PORT_STATE, | 1765 | ALE_PORT_STATE, |
1766 | ALE_PORT_STATE_FORWARD); | 1766 | ALE_PORT_STATE_FORWARD); |
1767 | 1767 | ||
1768 | if (ndev && slave->open) | 1768 | if (ndev && slave->open && |
1769 | slave->link_interface != SGMII_LINK_MAC_PHY && | ||
1770 | slave->link_interface != XGMII_LINK_MAC_PHY) | ||
1769 | netif_carrier_on(ndev); | 1771 | netif_carrier_on(ndev); |
1770 | } else { | 1772 | } else { |
1771 | writel(mac_control, GBE_REG_ADDR(slave, emac_regs, | 1773 | writel(mac_control, GBE_REG_ADDR(slave, emac_regs, |
@@ -1773,7 +1775,9 @@ static void netcp_ethss_link_state_action(struct gbe_priv *gbe_dev, | |||
1773 | cpsw_ale_control_set(gbe_dev->ale, slave->port_num, | 1775 | cpsw_ale_control_set(gbe_dev->ale, slave->port_num, |
1774 | ALE_PORT_STATE, | 1776 | ALE_PORT_STATE, |
1775 | ALE_PORT_STATE_DISABLE); | 1777 | ALE_PORT_STATE_DISABLE); |
1776 | if (ndev) | 1778 | if (ndev && |
1779 | slave->link_interface != SGMII_LINK_MAC_PHY && | ||
1780 | slave->link_interface != XGMII_LINK_MAC_PHY) | ||
1777 | netif_carrier_off(ndev); | 1781 | netif_carrier_off(ndev); |
1778 | } | 1782 | } |
1779 | 1783 | ||
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index a10b31664709..41071d32bc8e 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h | |||
@@ -128,7 +128,6 @@ struct ndis_tcp_ip_checksum_info; | |||
128 | struct hv_netvsc_packet { | 128 | struct hv_netvsc_packet { |
129 | /* Bookkeeping stuff */ | 129 | /* Bookkeeping stuff */ |
130 | u32 status; | 130 | u32 status; |
131 | bool part_of_skb; | ||
132 | 131 | ||
133 | bool is_data_pkt; | 132 | bool is_data_pkt; |
134 | bool xmit_more; /* from skb */ | 133 | bool xmit_more; /* from skb */ |
@@ -612,6 +611,15 @@ struct multi_send_data { | |||
612 | u32 count; /* counter of batched packets */ | 611 | u32 count; /* counter of batched packets */ |
613 | }; | 612 | }; |
614 | 613 | ||
614 | /* The context of the netvsc device */ | ||
615 | struct net_device_context { | ||
616 | /* point back to our device context */ | ||
617 | struct hv_device *device_ctx; | ||
618 | struct delayed_work dwork; | ||
619 | struct work_struct work; | ||
620 | u32 msg_enable; /* debug level */ | ||
621 | }; | ||
622 | |||
615 | /* Per netvsc device */ | 623 | /* Per netvsc device */ |
616 | struct netvsc_device { | 624 | struct netvsc_device { |
617 | struct hv_device *dev; | 625 | struct hv_device *dev; |
@@ -667,6 +675,9 @@ struct netvsc_device { | |||
667 | struct multi_send_data msd[NR_CPUS]; | 675 | struct multi_send_data msd[NR_CPUS]; |
668 | u32 max_pkt; /* max number of pkt in one send, e.g. 8 */ | 676 | u32 max_pkt; /* max number of pkt in one send, e.g. 8 */ |
669 | u32 pkt_align; /* alignment bytes, e.g. 8 */ | 677 | u32 pkt_align; /* alignment bytes, e.g. 8 */ |
678 | |||
679 | /* The net device context */ | ||
680 | struct net_device_context *nd_ctx; | ||
670 | }; | 681 | }; |
671 | 682 | ||
672 | /* NdisInitialize message */ | 683 | /* NdisInitialize message */ |
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 2e8ad0636b46..2d9ef533cc48 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c | |||
@@ -889,11 +889,6 @@ int netvsc_send(struct hv_device *device, | |||
889 | } else { | 889 | } else { |
890 | packet->page_buf_cnt = 0; | 890 | packet->page_buf_cnt = 0; |
891 | packet->total_data_buflen += msd_len; | 891 | packet->total_data_buflen += msd_len; |
892 | if (!packet->part_of_skb) { | ||
893 | skb = (struct sk_buff *)(unsigned long)packet-> | ||
894 | send_completion_tid; | ||
895 | packet->send_completion_tid = 0; | ||
896 | } | ||
897 | } | 892 | } |
898 | 893 | ||
899 | if (msdp->pkt) | 894 | if (msdp->pkt) |
@@ -1197,6 +1192,9 @@ int netvsc_device_add(struct hv_device *device, void *additional_info) | |||
1197 | */ | 1192 | */ |
1198 | ndev = net_device->ndev; | 1193 | ndev = net_device->ndev; |
1199 | 1194 | ||
1195 | /* Add netvsc_device context to netvsc_device */ | ||
1196 | net_device->nd_ctx = netdev_priv(ndev); | ||
1197 | |||
1200 | /* Initialize the NetVSC channel extension */ | 1198 | /* Initialize the NetVSC channel extension */ |
1201 | init_completion(&net_device->channel_init_wait); | 1199 | init_completion(&net_device->channel_init_wait); |
1202 | 1200 | ||
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index a3a9d3898a6e..5993c7e2d723 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c | |||
@@ -40,18 +40,21 @@ | |||
40 | 40 | ||
41 | #include "hyperv_net.h" | 41 | #include "hyperv_net.h" |
42 | 42 | ||
43 | struct net_device_context { | ||
44 | /* point back to our device context */ | ||
45 | struct hv_device *device_ctx; | ||
46 | struct delayed_work dwork; | ||
47 | struct work_struct work; | ||
48 | }; | ||
49 | 43 | ||
50 | #define RING_SIZE_MIN 64 | 44 | #define RING_SIZE_MIN 64 |
51 | static int ring_size = 128; | 45 | static int ring_size = 128; |
52 | module_param(ring_size, int, S_IRUGO); | 46 | module_param(ring_size, int, S_IRUGO); |
53 | MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)"); | 47 | MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)"); |
54 | 48 | ||
49 | static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | | ||
50 | NETIF_MSG_LINK | NETIF_MSG_IFUP | | ||
51 | NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR | | ||
52 | NETIF_MSG_TX_ERR; | ||
53 | |||
54 | static int debug = -1; | ||
55 | module_param(debug, int, S_IRUGO); | ||
56 | MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); | ||
57 | |||
55 | static void do_set_multicast(struct work_struct *w) | 58 | static void do_set_multicast(struct work_struct *w) |
56 | { | 59 | { |
57 | struct net_device_context *ndevctx = | 60 | struct net_device_context *ndevctx = |
@@ -235,9 +238,6 @@ void netvsc_xmit_completion(void *context) | |||
235 | struct sk_buff *skb = (struct sk_buff *) | 238 | struct sk_buff *skb = (struct sk_buff *) |
236 | (unsigned long)packet->send_completion_tid; | 239 | (unsigned long)packet->send_completion_tid; |
237 | 240 | ||
238 | if (!packet->part_of_skb) | ||
239 | kfree(packet); | ||
240 | |||
241 | if (skb) | 241 | if (skb) |
242 | dev_kfree_skb_any(skb); | 242 | dev_kfree_skb_any(skb); |
243 | } | 243 | } |
@@ -389,7 +389,6 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) | |||
389 | u32 net_trans_info; | 389 | u32 net_trans_info; |
390 | u32 hash; | 390 | u32 hash; |
391 | u32 skb_length; | 391 | u32 skb_length; |
392 | u32 head_room; | ||
393 | u32 pkt_sz; | 392 | u32 pkt_sz; |
394 | struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT]; | 393 | struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT]; |
395 | 394 | ||
@@ -402,7 +401,6 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) | |||
402 | 401 | ||
403 | check_size: | 402 | check_size: |
404 | skb_length = skb->len; | 403 | skb_length = skb->len; |
405 | head_room = skb_headroom(skb); | ||
406 | num_data_pgs = netvsc_get_slots(skb) + 2; | 404 | num_data_pgs = netvsc_get_slots(skb) + 2; |
407 | if (num_data_pgs > MAX_PAGE_BUFFER_COUNT && linear) { | 405 | if (num_data_pgs > MAX_PAGE_BUFFER_COUNT && linear) { |
408 | net_alert_ratelimited("packet too big: %u pages (%u bytes)\n", | 406 | net_alert_ratelimited("packet too big: %u pages (%u bytes)\n", |
@@ -421,20 +419,14 @@ check_size: | |||
421 | 419 | ||
422 | pkt_sz = sizeof(struct hv_netvsc_packet) + RNDIS_AND_PPI_SIZE; | 420 | pkt_sz = sizeof(struct hv_netvsc_packet) + RNDIS_AND_PPI_SIZE; |
423 | 421 | ||
424 | if (head_room < pkt_sz) { | 422 | ret = skb_cow_head(skb, pkt_sz); |
425 | packet = kmalloc(pkt_sz, GFP_ATOMIC); | 423 | if (ret) { |
426 | if (!packet) { | 424 | netdev_err(net, "unable to alloc hv_netvsc_packet\n"); |
427 | /* out of memory, drop packet */ | 425 | ret = -ENOMEM; |
428 | netdev_err(net, "unable to alloc hv_netvsc_packet\n"); | 426 | goto drop; |
429 | ret = -ENOMEM; | ||
430 | goto drop; | ||
431 | } | ||
432 | packet->part_of_skb = false; | ||
433 | } else { | ||
434 | /* Use the headroom for building up the packet */ | ||
435 | packet = (struct hv_netvsc_packet *)skb->head; | ||
436 | packet->part_of_skb = true; | ||
437 | } | 427 | } |
428 | /* Use the headroom for building up the packet */ | ||
429 | packet = (struct hv_netvsc_packet *)skb->head; | ||
438 | 430 | ||
439 | packet->status = 0; | 431 | packet->status = 0; |
440 | packet->xmit_more = skb->xmit_more; | 432 | packet->xmit_more = skb->xmit_more; |
@@ -591,8 +583,6 @@ drop: | |||
591 | net->stats.tx_bytes += skb_length; | 583 | net->stats.tx_bytes += skb_length; |
592 | net->stats.tx_packets++; | 584 | net->stats.tx_packets++; |
593 | } else { | 585 | } else { |
594 | if (packet && !packet->part_of_skb) | ||
595 | kfree(packet); | ||
596 | if (ret != -EAGAIN) { | 586 | if (ret != -EAGAIN) { |
597 | dev_kfree_skb_any(skb); | 587 | dev_kfree_skb_any(skb); |
598 | net->stats.tx_dropped++; | 588 | net->stats.tx_dropped++; |
@@ -888,6 +878,11 @@ static int netvsc_probe(struct hv_device *dev, | |||
888 | 878 | ||
889 | net_device_ctx = netdev_priv(net); | 879 | net_device_ctx = netdev_priv(net); |
890 | net_device_ctx->device_ctx = dev; | 880 | net_device_ctx->device_ctx = dev; |
881 | net_device_ctx->msg_enable = netif_msg_init(debug, default_msg); | ||
882 | if (netif_msg_probe(net_device_ctx)) | ||
883 | netdev_dbg(net, "netvsc msg_enable: %d\n", | ||
884 | net_device_ctx->msg_enable); | ||
885 | |||
891 | hv_set_drvdata(dev, net); | 886 | hv_set_drvdata(dev, net); |
892 | INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change); | 887 | INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change); |
893 | INIT_WORK(&net_device_ctx->work, do_set_multicast); | 888 | INIT_WORK(&net_device_ctx->work, do_set_multicast); |
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 0d92efefd796..9118cea91882 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c | |||
@@ -429,7 +429,8 @@ int rndis_filter_receive(struct hv_device *dev, | |||
429 | 429 | ||
430 | rndis_msg = pkt->data; | 430 | rndis_msg = pkt->data; |
431 | 431 | ||
432 | dump_rndis_message(dev, rndis_msg); | 432 | if (netif_msg_rx_err(net_dev->nd_ctx)) |
433 | dump_rndis_message(dev, rndis_msg); | ||
433 | 434 | ||
434 | switch (rndis_msg->ndis_msg_type) { | 435 | switch (rndis_msg->ndis_msg_type) { |
435 | case RNDIS_MSG_PACKET: | 436 | case RNDIS_MSG_PACKET: |
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c index 49ce7ece5af3..c9cb486c753d 100644 --- a/drivers/net/phy/mdio-gpio.c +++ b/drivers/net/phy/mdio-gpio.c | |||
@@ -80,7 +80,8 @@ static void mdio_dir(struct mdiobb_ctrl *ctrl, int dir) | |||
80 | * assume the pin serves as pull-up. If direction is | 80 | * assume the pin serves as pull-up. If direction is |
81 | * output, the default value is high. | 81 | * output, the default value is high. |
82 | */ | 82 | */ |
83 | gpio_set_value(bitbang->mdo, 1 ^ bitbang->mdo_active_low); | 83 | gpio_set_value_cansleep(bitbang->mdo, |
84 | 1 ^ bitbang->mdo_active_low); | ||
84 | return; | 85 | return; |
85 | } | 86 | } |
86 | 87 | ||
@@ -96,7 +97,8 @@ static int mdio_get(struct mdiobb_ctrl *ctrl) | |||
96 | struct mdio_gpio_info *bitbang = | 97 | struct mdio_gpio_info *bitbang = |
97 | container_of(ctrl, struct mdio_gpio_info, ctrl); | 98 | container_of(ctrl, struct mdio_gpio_info, ctrl); |
98 | 99 | ||
99 | return gpio_get_value(bitbang->mdio) ^ bitbang->mdio_active_low; | 100 | return gpio_get_value_cansleep(bitbang->mdio) ^ |
101 | bitbang->mdio_active_low; | ||
100 | } | 102 | } |
101 | 103 | ||
102 | static void mdio_set(struct mdiobb_ctrl *ctrl, int what) | 104 | static void mdio_set(struct mdiobb_ctrl *ctrl, int what) |
@@ -105,9 +107,11 @@ static void mdio_set(struct mdiobb_ctrl *ctrl, int what) | |||
105 | container_of(ctrl, struct mdio_gpio_info, ctrl); | 107 | container_of(ctrl, struct mdio_gpio_info, ctrl); |
106 | 108 | ||
107 | if (bitbang->mdo) | 109 | if (bitbang->mdo) |
108 | gpio_set_value(bitbang->mdo, what ^ bitbang->mdo_active_low); | 110 | gpio_set_value_cansleep(bitbang->mdo, |
111 | what ^ bitbang->mdo_active_low); | ||
109 | else | 112 | else |
110 | gpio_set_value(bitbang->mdio, what ^ bitbang->mdio_active_low); | 113 | gpio_set_value_cansleep(bitbang->mdio, |
114 | what ^ bitbang->mdio_active_low); | ||
111 | } | 115 | } |
112 | 116 | ||
113 | static void mdc_set(struct mdiobb_ctrl *ctrl, int what) | 117 | static void mdc_set(struct mdiobb_ctrl *ctrl, int what) |
@@ -115,7 +119,7 @@ static void mdc_set(struct mdiobb_ctrl *ctrl, int what) | |||
115 | struct mdio_gpio_info *bitbang = | 119 | struct mdio_gpio_info *bitbang = |
116 | container_of(ctrl, struct mdio_gpio_info, ctrl); | 120 | container_of(ctrl, struct mdio_gpio_info, ctrl); |
117 | 121 | ||
118 | gpio_set_value(bitbang->mdc, what ^ bitbang->mdc_active_low); | 122 | gpio_set_value_cansleep(bitbang->mdc, what ^ bitbang->mdc_active_low); |
119 | } | 123 | } |
120 | 124 | ||
121 | static struct mdiobb_ops mdio_gpio_ops = { | 125 | static struct mdiobb_ops mdio_gpio_ops = { |
diff --git a/drivers/net/phy/mdio-mux-gpio.c b/drivers/net/phy/mdio-mux-gpio.c index 1a87a585e74d..66edd99bc302 100644 --- a/drivers/net/phy/mdio-mux-gpio.c +++ b/drivers/net/phy/mdio-mux-gpio.c | |||
@@ -12,33 +12,30 @@ | |||
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <linux/phy.h> | 13 | #include <linux/phy.h> |
14 | #include <linux/mdio-mux.h> | 14 | #include <linux/mdio-mux.h> |
15 | #include <linux/of_gpio.h> | 15 | #include <linux/gpio/consumer.h> |
16 | 16 | ||
17 | #define DRV_VERSION "1.1" | 17 | #define DRV_VERSION "1.1" |
18 | #define DRV_DESCRIPTION "GPIO controlled MDIO bus multiplexer driver" | 18 | #define DRV_DESCRIPTION "GPIO controlled MDIO bus multiplexer driver" |
19 | 19 | ||
20 | #define MDIO_MUX_GPIO_MAX_BITS 8 | ||
21 | |||
22 | struct mdio_mux_gpio_state { | 20 | struct mdio_mux_gpio_state { |
23 | struct gpio_desc *gpio[MDIO_MUX_GPIO_MAX_BITS]; | 21 | struct gpio_descs *gpios; |
24 | unsigned int num_gpios; | ||
25 | void *mux_handle; | 22 | void *mux_handle; |
26 | }; | 23 | }; |
27 | 24 | ||
28 | static int mdio_mux_gpio_switch_fn(int current_child, int desired_child, | 25 | static int mdio_mux_gpio_switch_fn(int current_child, int desired_child, |
29 | void *data) | 26 | void *data) |
30 | { | 27 | { |
31 | int values[MDIO_MUX_GPIO_MAX_BITS]; | ||
32 | unsigned int n; | ||
33 | struct mdio_mux_gpio_state *s = data; | 28 | struct mdio_mux_gpio_state *s = data; |
29 | int values[s->gpios->ndescs]; | ||
30 | unsigned int n; | ||
34 | 31 | ||
35 | if (current_child == desired_child) | 32 | if (current_child == desired_child) |
36 | return 0; | 33 | return 0; |
37 | 34 | ||
38 | for (n = 0; n < s->num_gpios; n++) { | 35 | for (n = 0; n < s->gpios->ndescs; n++) |
39 | values[n] = (desired_child >> n) & 1; | 36 | values[n] = (desired_child >> n) & 1; |
40 | } | 37 | |
41 | gpiod_set_array_cansleep(s->num_gpios, s->gpio, values); | 38 | gpiod_set_array_cansleep(s->gpios->ndescs, s->gpios->desc, values); |
42 | 39 | ||
43 | return 0; | 40 | return 0; |
44 | } | 41 | } |
@@ -46,56 +43,33 @@ static int mdio_mux_gpio_switch_fn(int current_child, int desired_child, | |||
46 | static int mdio_mux_gpio_probe(struct platform_device *pdev) | 43 | static int mdio_mux_gpio_probe(struct platform_device *pdev) |
47 | { | 44 | { |
48 | struct mdio_mux_gpio_state *s; | 45 | struct mdio_mux_gpio_state *s; |
49 | int num_gpios; | ||
50 | unsigned int n; | ||
51 | int r; | 46 | int r; |
52 | 47 | ||
53 | if (!pdev->dev.of_node) | ||
54 | return -ENODEV; | ||
55 | |||
56 | num_gpios = of_gpio_count(pdev->dev.of_node); | ||
57 | if (num_gpios <= 0 || num_gpios > MDIO_MUX_GPIO_MAX_BITS) | ||
58 | return -ENODEV; | ||
59 | |||
60 | s = devm_kzalloc(&pdev->dev, sizeof(*s), GFP_KERNEL); | 48 | s = devm_kzalloc(&pdev->dev, sizeof(*s), GFP_KERNEL); |
61 | if (!s) | 49 | if (!s) |
62 | return -ENOMEM; | 50 | return -ENOMEM; |
63 | 51 | ||
64 | s->num_gpios = num_gpios; | 52 | s->gpios = gpiod_get_array(&pdev->dev, NULL, GPIOD_OUT_LOW); |
65 | 53 | if (IS_ERR(s->gpios)) | |
66 | for (n = 0; n < num_gpios; ) { | 54 | return PTR_ERR(s->gpios); |
67 | struct gpio_desc *gpio = gpiod_get_index(&pdev->dev, NULL, n, | ||
68 | GPIOD_OUT_LOW); | ||
69 | if (IS_ERR(gpio)) { | ||
70 | r = PTR_ERR(gpio); | ||
71 | goto err; | ||
72 | } | ||
73 | s->gpio[n] = gpio; | ||
74 | n++; | ||
75 | } | ||
76 | 55 | ||
77 | r = mdio_mux_init(&pdev->dev, | 56 | r = mdio_mux_init(&pdev->dev, |
78 | mdio_mux_gpio_switch_fn, &s->mux_handle, s); | 57 | mdio_mux_gpio_switch_fn, &s->mux_handle, s); |
79 | 58 | ||
80 | if (r == 0) { | 59 | if (r != 0) { |
81 | pdev->dev.platform_data = s; | 60 | gpiod_put_array(s->gpios); |
82 | return 0; | 61 | return r; |
83 | } | ||
84 | err: | ||
85 | while (n) { | ||
86 | n--; | ||
87 | gpiod_put(s->gpio[n]); | ||
88 | } | 62 | } |
89 | return r; | 63 | |
64 | pdev->dev.platform_data = s; | ||
65 | return 0; | ||
90 | } | 66 | } |
91 | 67 | ||
92 | static int mdio_mux_gpio_remove(struct platform_device *pdev) | 68 | static int mdio_mux_gpio_remove(struct platform_device *pdev) |
93 | { | 69 | { |
94 | unsigned int n; | ||
95 | struct mdio_mux_gpio_state *s = dev_get_platdata(&pdev->dev); | 70 | struct mdio_mux_gpio_state *s = dev_get_platdata(&pdev->dev); |
96 | mdio_mux_uninit(s->mux_handle); | 71 | mdio_mux_uninit(s->mux_handle); |
97 | for (n = 0; n < s->num_gpios; n++) | 72 | gpiod_put_array(s->gpios); |
98 | gpiod_put(s->gpio[n]); | ||
99 | return 0; | 73 | return 0; |
100 | } | 74 | } |
101 | 75 | ||
diff --git a/drivers/net/ppp/ppp_mppe.c b/drivers/net/ppp/ppp_mppe.c index 911b21602ff2..05005c660d4d 100644 --- a/drivers/net/ppp/ppp_mppe.c +++ b/drivers/net/ppp/ppp_mppe.c | |||
@@ -478,7 +478,6 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf, | |||
478 | struct blkcipher_desc desc = { .tfm = state->arc4 }; | 478 | struct blkcipher_desc desc = { .tfm = state->arc4 }; |
479 | unsigned ccount; | 479 | unsigned ccount; |
480 | int flushed = MPPE_BITS(ibuf) & MPPE_BIT_FLUSHED; | 480 | int flushed = MPPE_BITS(ibuf) & MPPE_BIT_FLUSHED; |
481 | int sanity = 0; | ||
482 | struct scatterlist sg_in[1], sg_out[1]; | 481 | struct scatterlist sg_in[1], sg_out[1]; |
483 | 482 | ||
484 | if (isize <= PPP_HDRLEN + MPPE_OVHD) { | 483 | if (isize <= PPP_HDRLEN + MPPE_OVHD) { |
@@ -514,31 +513,19 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf, | |||
514 | "mppe_decompress[%d]: ENCRYPTED bit not set!\n", | 513 | "mppe_decompress[%d]: ENCRYPTED bit not set!\n", |
515 | state->unit); | 514 | state->unit); |
516 | state->sanity_errors += 100; | 515 | state->sanity_errors += 100; |
517 | sanity = 1; | 516 | goto sanity_error; |
518 | } | 517 | } |
519 | if (!state->stateful && !flushed) { | 518 | if (!state->stateful && !flushed) { |
520 | printk(KERN_DEBUG "mppe_decompress[%d]: FLUSHED bit not set in " | 519 | printk(KERN_DEBUG "mppe_decompress[%d]: FLUSHED bit not set in " |
521 | "stateless mode!\n", state->unit); | 520 | "stateless mode!\n", state->unit); |
522 | state->sanity_errors += 100; | 521 | state->sanity_errors += 100; |
523 | sanity = 1; | 522 | goto sanity_error; |
524 | } | 523 | } |
525 | if (state->stateful && ((ccount & 0xff) == 0xff) && !flushed) { | 524 | if (state->stateful && ((ccount & 0xff) == 0xff) && !flushed) { |
526 | printk(KERN_DEBUG "mppe_decompress[%d]: FLUSHED bit not set on " | 525 | printk(KERN_DEBUG "mppe_decompress[%d]: FLUSHED bit not set on " |
527 | "flag packet!\n", state->unit); | 526 | "flag packet!\n", state->unit); |
528 | state->sanity_errors += 100; | 527 | state->sanity_errors += 100; |
529 | sanity = 1; | 528 | goto sanity_error; |
530 | } | ||
531 | |||
532 | if (sanity) { | ||
533 | if (state->sanity_errors < SANITY_MAX) | ||
534 | return DECOMP_ERROR; | ||
535 | else | ||
536 | /* | ||
537 | * Take LCP down if the peer is sending too many bogons. | ||
538 | * We don't want to do this for a single or just a few | ||
539 | * instances since it could just be due to packet corruption. | ||
540 | */ | ||
541 | return DECOMP_FATALERROR; | ||
542 | } | 529 | } |
543 | 530 | ||
544 | /* | 531 | /* |
@@ -546,6 +533,13 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf, | |||
546 | */ | 533 | */ |
547 | 534 | ||
548 | if (!state->stateful) { | 535 | if (!state->stateful) { |
536 | /* Discard late packet */ | ||
537 | if ((ccount - state->ccount) % MPPE_CCOUNT_SPACE | ||
538 | > MPPE_CCOUNT_SPACE / 2) { | ||
539 | state->sanity_errors++; | ||
540 | goto sanity_error; | ||
541 | } | ||
542 | |||
549 | /* RFC 3078, sec 8.1. Rekey for every packet. */ | 543 | /* RFC 3078, sec 8.1. Rekey for every packet. */ |
550 | while (state->ccount != ccount) { | 544 | while (state->ccount != ccount) { |
551 | mppe_rekey(state, 0); | 545 | mppe_rekey(state, 0); |
@@ -649,6 +643,16 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf, | |||
649 | state->sanity_errors >>= 1; | 643 | state->sanity_errors >>= 1; |
650 | 644 | ||
651 | return osize; | 645 | return osize; |
646 | |||
647 | sanity_error: | ||
648 | if (state->sanity_errors < SANITY_MAX) | ||
649 | return DECOMP_ERROR; | ||
650 | else | ||
651 | /* Take LCP down if the peer is sending too many bogons. | ||
652 | * We don't want to do this for a single or just a few | ||
653 | * instances since it could just be due to packet corruption. | ||
654 | */ | ||
655 | return DECOMP_FATALERROR; | ||
652 | } | 656 | } |
653 | 657 | ||
654 | /* | 658 | /* |
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 154116aafd0d..27a5f954f8e9 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c | |||
@@ -730,12 +730,8 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan, | |||
730 | /* Only change unicasts */ | 730 | /* Only change unicasts */ |
731 | if (!(is_multicast_ether_addr(f->eth_addr) || | 731 | if (!(is_multicast_ether_addr(f->eth_addr) || |
732 | is_zero_ether_addr(f->eth_addr))) { | 732 | is_zero_ether_addr(f->eth_addr))) { |
733 | int rc = vxlan_fdb_replace(f, ip, port, vni, | 733 | notify |= vxlan_fdb_replace(f, ip, port, vni, |
734 | ifindex); | 734 | ifindex); |
735 | |||
736 | if (rc < 0) | ||
737 | return rc; | ||
738 | notify |= rc; | ||
739 | } else | 735 | } else |
740 | return -EOPNOTSUPP; | 736 | return -EOPNOTSUPP; |
741 | } | 737 | } |
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c index 89dca77ca038..18ee2089df4a 100644 --- a/drivers/pinctrl/core.c +++ b/drivers/pinctrl/core.c | |||
@@ -1110,7 +1110,7 @@ void devm_pinctrl_put(struct pinctrl *p) | |||
1110 | EXPORT_SYMBOL_GPL(devm_pinctrl_put); | 1110 | EXPORT_SYMBOL_GPL(devm_pinctrl_put); |
1111 | 1111 | ||
1112 | int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps, | 1112 | int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps, |
1113 | bool dup, bool locked) | 1113 | bool dup) |
1114 | { | 1114 | { |
1115 | int i, ret; | 1115 | int i, ret; |
1116 | struct pinctrl_maps *maps_node; | 1116 | struct pinctrl_maps *maps_node; |
@@ -1178,11 +1178,9 @@ int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps, | |||
1178 | maps_node->maps = maps; | 1178 | maps_node->maps = maps; |
1179 | } | 1179 | } |
1180 | 1180 | ||
1181 | if (!locked) | 1181 | mutex_lock(&pinctrl_maps_mutex); |
1182 | mutex_lock(&pinctrl_maps_mutex); | ||
1183 | list_add_tail(&maps_node->node, &pinctrl_maps); | 1182 | list_add_tail(&maps_node->node, &pinctrl_maps); |
1184 | if (!locked) | 1183 | mutex_unlock(&pinctrl_maps_mutex); |
1185 | mutex_unlock(&pinctrl_maps_mutex); | ||
1186 | 1184 | ||
1187 | return 0; | 1185 | return 0; |
1188 | } | 1186 | } |
@@ -1197,7 +1195,7 @@ int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps, | |||
1197 | int pinctrl_register_mappings(struct pinctrl_map const *maps, | 1195 | int pinctrl_register_mappings(struct pinctrl_map const *maps, |
1198 | unsigned num_maps) | 1196 | unsigned num_maps) |
1199 | { | 1197 | { |
1200 | return pinctrl_register_map(maps, num_maps, true, false); | 1198 | return pinctrl_register_map(maps, num_maps, true); |
1201 | } | 1199 | } |
1202 | 1200 | ||
1203 | void pinctrl_unregister_map(struct pinctrl_map const *map) | 1201 | void pinctrl_unregister_map(struct pinctrl_map const *map) |
diff --git a/drivers/pinctrl/core.h b/drivers/pinctrl/core.h index 75476b3d87da..b24ea846c867 100644 --- a/drivers/pinctrl/core.h +++ b/drivers/pinctrl/core.h | |||
@@ -183,7 +183,7 @@ static inline struct pin_desc *pin_desc_get(struct pinctrl_dev *pctldev, | |||
183 | } | 183 | } |
184 | 184 | ||
185 | int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps, | 185 | int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps, |
186 | bool dup, bool locked); | 186 | bool dup); |
187 | void pinctrl_unregister_map(struct pinctrl_map const *map); | 187 | void pinctrl_unregister_map(struct pinctrl_map const *map); |
188 | 188 | ||
189 | extern int pinctrl_force_sleep(struct pinctrl_dev *pctldev); | 189 | extern int pinctrl_force_sleep(struct pinctrl_dev *pctldev); |
diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c index eda13de2e7c0..0bbf7d71b281 100644 --- a/drivers/pinctrl/devicetree.c +++ b/drivers/pinctrl/devicetree.c | |||
@@ -92,7 +92,7 @@ static int dt_remember_or_free_map(struct pinctrl *p, const char *statename, | |||
92 | dt_map->num_maps = num_maps; | 92 | dt_map->num_maps = num_maps; |
93 | list_add_tail(&dt_map->node, &p->dt_maps); | 93 | list_add_tail(&dt_map->node, &p->dt_maps); |
94 | 94 | ||
95 | return pinctrl_register_map(map, num_maps, false, true); | 95 | return pinctrl_register_map(map, num_maps, false); |
96 | } | 96 | } |
97 | 97 | ||
98 | struct pinctrl_dev *of_pinctrl_get(struct device_node *np) | 98 | struct pinctrl_dev *of_pinctrl_get(struct device_node *np) |
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c index 493294c0ebe6..474812e2b0cb 100644 --- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c +++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c | |||
@@ -881,6 +881,8 @@ static int mtk_gpio_set_debounce(struct gpio_chip *chip, unsigned offset, | |||
881 | if (!mtk_eint_get_mask(pctl, eint_num)) { | 881 | if (!mtk_eint_get_mask(pctl, eint_num)) { |
882 | mtk_eint_mask(d); | 882 | mtk_eint_mask(d); |
883 | unmask = 1; | 883 | unmask = 1; |
884 | } else { | ||
885 | unmask = 0; | ||
884 | } | 886 | } |
885 | 887 | ||
886 | clr_bit = 0xff << eint_offset; | 888 | clr_bit = 0xff << eint_offset; |
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-370.c b/drivers/pinctrl/mvebu/pinctrl-armada-370.c index 42f930f70de3..03aa58c4cb85 100644 --- a/drivers/pinctrl/mvebu/pinctrl-armada-370.c +++ b/drivers/pinctrl/mvebu/pinctrl-armada-370.c | |||
@@ -364,7 +364,7 @@ static struct mvebu_mpp_mode mv88f6710_mpp_modes[] = { | |||
364 | MPP_FUNCTION(0x5, "audio", "mclk"), | 364 | MPP_FUNCTION(0x5, "audio", "mclk"), |
365 | MPP_FUNCTION(0x6, "uart0", "cts")), | 365 | MPP_FUNCTION(0x6, "uart0", "cts")), |
366 | MPP_MODE(63, | 366 | MPP_MODE(63, |
367 | MPP_FUNCTION(0x0, "gpo", NULL), | 367 | MPP_FUNCTION(0x0, "gpio", NULL), |
368 | MPP_FUNCTION(0x1, "spi0", "sck"), | 368 | MPP_FUNCTION(0x1, "spi0", "sck"), |
369 | MPP_FUNCTION(0x2, "tclk", NULL)), | 369 | MPP_FUNCTION(0x2, "tclk", NULL)), |
370 | MPP_MODE(64, | 370 | MPP_MODE(64, |
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c index b2d22218a258..ae4115e4b4ef 100644 --- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c +++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c | |||
@@ -260,6 +260,7 @@ static int pmic_gpio_set_mux(struct pinctrl_dev *pctldev, unsigned function, | |||
260 | val = 1; | 260 | val = 1; |
261 | } | 261 | } |
262 | 262 | ||
263 | val = val << PMIC_GPIO_REG_MODE_DIR_SHIFT; | ||
263 | val |= pad->function << PMIC_GPIO_REG_MODE_FUNCTION_SHIFT; | 264 | val |= pad->function << PMIC_GPIO_REG_MODE_FUNCTION_SHIFT; |
264 | val |= pad->out_value & PMIC_GPIO_REG_MODE_VALUE_SHIFT; | 265 | val |= pad->out_value & PMIC_GPIO_REG_MODE_VALUE_SHIFT; |
265 | 266 | ||
@@ -417,7 +418,7 @@ static int pmic_gpio_config_set(struct pinctrl_dev *pctldev, unsigned int pin, | |||
417 | return ret; | 418 | return ret; |
418 | 419 | ||
419 | val = pad->buffer_type << PMIC_GPIO_REG_OUT_TYPE_SHIFT; | 420 | val = pad->buffer_type << PMIC_GPIO_REG_OUT_TYPE_SHIFT; |
420 | val = pad->strength << PMIC_GPIO_REG_OUT_STRENGTH_SHIFT; | 421 | val |= pad->strength << PMIC_GPIO_REG_OUT_STRENGTH_SHIFT; |
421 | 422 | ||
422 | ret = pmic_gpio_write(state, pad, PMIC_GPIO_REG_DIG_OUT_CTL, val); | 423 | ret = pmic_gpio_write(state, pad, PMIC_GPIO_REG_DIG_OUT_CTL, val); |
423 | if (ret < 0) | 424 | if (ret < 0) |
@@ -466,12 +467,13 @@ static void pmic_gpio_config_dbg_show(struct pinctrl_dev *pctldev, | |||
466 | seq_puts(s, " ---"); | 467 | seq_puts(s, " ---"); |
467 | } else { | 468 | } else { |
468 | 469 | ||
469 | if (!pad->input_enabled) { | 470 | if (pad->input_enabled) { |
470 | ret = pmic_gpio_read(state, pad, PMIC_MPP_REG_RT_STS); | 471 | ret = pmic_gpio_read(state, pad, PMIC_MPP_REG_RT_STS); |
471 | if (!ret) { | 472 | if (ret < 0) |
472 | ret &= PMIC_MPP_REG_RT_STS_VAL_MASK; | 473 | return; |
473 | pad->out_value = ret; | 474 | |
474 | } | 475 | ret &= PMIC_MPP_REG_RT_STS_VAL_MASK; |
476 | pad->out_value = ret; | ||
475 | } | 477 | } |
476 | 478 | ||
477 | seq_printf(s, " %-4s", pad->output_enabled ? "out" : "in"); | 479 | seq_printf(s, " %-4s", pad->output_enabled ? "out" : "in"); |
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c index 8f36c5f91949..211b942ad6d5 100644 --- a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c +++ b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c | |||
@@ -370,6 +370,7 @@ static int pmic_mpp_set_mux(struct pinctrl_dev *pctldev, unsigned function, | |||
370 | } | 370 | } |
371 | } | 371 | } |
372 | 372 | ||
373 | val = val << PMIC_MPP_REG_MODE_DIR_SHIFT; | ||
373 | val |= pad->function << PMIC_MPP_REG_MODE_FUNCTION_SHIFT; | 374 | val |= pad->function << PMIC_MPP_REG_MODE_FUNCTION_SHIFT; |
374 | val |= pad->out_value & PMIC_MPP_REG_MODE_VALUE_MASK; | 375 | val |= pad->out_value & PMIC_MPP_REG_MODE_VALUE_MASK; |
375 | 376 | ||
@@ -576,10 +577,11 @@ static void pmic_mpp_config_dbg_show(struct pinctrl_dev *pctldev, | |||
576 | 577 | ||
577 | if (pad->input_enabled) { | 578 | if (pad->input_enabled) { |
578 | ret = pmic_mpp_read(state, pad, PMIC_MPP_REG_RT_STS); | 579 | ret = pmic_mpp_read(state, pad, PMIC_MPP_REG_RT_STS); |
579 | if (!ret) { | 580 | if (ret < 0) |
580 | ret &= PMIC_MPP_REG_RT_STS_VAL_MASK; | 581 | return; |
581 | pad->out_value = ret; | 582 | |
582 | } | 583 | ret &= PMIC_MPP_REG_RT_STS_VAL_MASK; |
584 | pad->out_value = ret; | ||
583 | } | 585 | } |
584 | 586 | ||
585 | seq_printf(s, " %-4s", pad->output_enabled ? "out" : "in"); | 587 | seq_printf(s, " %-4s", pad->output_enabled ? "out" : "in"); |
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c index b3d419a84723..b496db87bc05 100644 --- a/drivers/platform/x86/ideapad-laptop.c +++ b/drivers/platform/x86/ideapad-laptop.c | |||
@@ -830,6 +830,13 @@ static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data) | |||
830 | */ | 830 | */ |
831 | static const struct dmi_system_id no_hw_rfkill_list[] = { | 831 | static const struct dmi_system_id no_hw_rfkill_list[] = { |
832 | { | 832 | { |
833 | .ident = "Lenovo G40-30", | ||
834 | .matches = { | ||
835 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | ||
836 | DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo G40-30"), | ||
837 | }, | ||
838 | }, | ||
839 | { | ||
833 | .ident = "Lenovo Yoga 2 11 / 13 / Pro", | 840 | .ident = "Lenovo Yoga 2 11 / 13 / Pro", |
834 | .matches = { | 841 | .matches = { |
835 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | 842 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), |
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index 7769575345d8..9bb9ad6d4a1b 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c | |||
@@ -2115,7 +2115,7 @@ static int hotkey_mask_get(void) | |||
2115 | return 0; | 2115 | return 0; |
2116 | } | 2116 | } |
2117 | 2117 | ||
2118 | void static hotkey_mask_warn_incomplete_mask(void) | 2118 | static void hotkey_mask_warn_incomplete_mask(void) |
2119 | { | 2119 | { |
2120 | /* log only what the user can fix... */ | 2120 | /* log only what the user can fix... */ |
2121 | const u32 wantedmask = hotkey_driver_mask & | 2121 | const u32 wantedmask = hotkey_driver_mask & |
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index 6149ae01e11f..0fe4ad8826b2 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig | |||
@@ -164,6 +164,16 @@ config RTC_DRV_ABB5ZES3 | |||
164 | This driver can also be built as a module. If so, the module | 164 | This driver can also be built as a module. If so, the module |
165 | will be called rtc-ab-b5ze-s3. | 165 | will be called rtc-ab-b5ze-s3. |
166 | 166 | ||
167 | config RTC_DRV_ABX80X | ||
168 | tristate "Abracon ABx80x" | ||
169 | help | ||
170 | If you say yes here you get support for Abracon AB080X and AB180X | ||
171 | families of ultra-low-power battery- and capacitor-backed real-time | ||
172 | clock chips. | ||
173 | |||
174 | This driver can also be built as a module. If so, the module | ||
175 | will be called rtc-abx80x. | ||
176 | |||
167 | config RTC_DRV_AS3722 | 177 | config RTC_DRV_AS3722 |
168 | tristate "ams AS3722 RTC driver" | 178 | tristate "ams AS3722 RTC driver" |
169 | depends on MFD_AS3722 | 179 | depends on MFD_AS3722 |
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile index c31731c29762..2b82e2b0311b 100644 --- a/drivers/rtc/Makefile +++ b/drivers/rtc/Makefile | |||
@@ -25,6 +25,7 @@ obj-$(CONFIG_RTC_DRV_88PM80X) += rtc-88pm80x.o | |||
25 | obj-$(CONFIG_RTC_DRV_AB3100) += rtc-ab3100.o | 25 | obj-$(CONFIG_RTC_DRV_AB3100) += rtc-ab3100.o |
26 | obj-$(CONFIG_RTC_DRV_AB8500) += rtc-ab8500.o | 26 | obj-$(CONFIG_RTC_DRV_AB8500) += rtc-ab8500.o |
27 | obj-$(CONFIG_RTC_DRV_ABB5ZES3) += rtc-ab-b5ze-s3.o | 27 | obj-$(CONFIG_RTC_DRV_ABB5ZES3) += rtc-ab-b5ze-s3.o |
28 | obj-$(CONFIG_RTC_DRV_ABX80X) += rtc-abx80x.o | ||
28 | obj-$(CONFIG_RTC_DRV_ARMADA38X) += rtc-armada38x.o | 29 | obj-$(CONFIG_RTC_DRV_ARMADA38X) += rtc-armada38x.o |
29 | obj-$(CONFIG_RTC_DRV_AS3722) += rtc-as3722.o | 30 | obj-$(CONFIG_RTC_DRV_AS3722) += rtc-as3722.o |
30 | obj-$(CONFIG_RTC_DRV_AT32AP700X)+= rtc-at32ap700x.o | 31 | obj-$(CONFIG_RTC_DRV_AT32AP700X)+= rtc-at32ap700x.o |
diff --git a/drivers/rtc/rtc-abx80x.c b/drivers/rtc/rtc-abx80x.c new file mode 100644 index 000000000000..4337c3bc6ace --- /dev/null +++ b/drivers/rtc/rtc-abx80x.c | |||
@@ -0,0 +1,307 @@ | |||
1 | /* | ||
2 | * A driver for the I2C members of the Abracon AB x8xx RTC family, | ||
3 | * and compatible: AB 1805 and AB 0805 | ||
4 | * | ||
5 | * Copyright 2014-2015 Macq S.A. | ||
6 | * | ||
7 | * Author: Philippe De Muyter <phdm@macqel.be> | ||
8 | * Author: Alexandre Belloni <alexandre.belloni@free-electrons.com> | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | */ | ||
15 | |||
16 | #include <linux/bcd.h> | ||
17 | #include <linux/i2c.h> | ||
18 | #include <linux/module.h> | ||
19 | #include <linux/rtc.h> | ||
20 | |||
21 | #define ABX8XX_REG_HTH 0x00 | ||
22 | #define ABX8XX_REG_SC 0x01 | ||
23 | #define ABX8XX_REG_MN 0x02 | ||
24 | #define ABX8XX_REG_HR 0x03 | ||
25 | #define ABX8XX_REG_DA 0x04 | ||
26 | #define ABX8XX_REG_MO 0x05 | ||
27 | #define ABX8XX_REG_YR 0x06 | ||
28 | #define ABX8XX_REG_WD 0x07 | ||
29 | |||
30 | #define ABX8XX_REG_CTRL1 0x10 | ||
31 | #define ABX8XX_CTRL_WRITE BIT(1) | ||
32 | #define ABX8XX_CTRL_12_24 BIT(6) | ||
33 | |||
34 | #define ABX8XX_REG_CFG_KEY 0x1f | ||
35 | #define ABX8XX_CFG_KEY_MISC 0x9d | ||
36 | |||
37 | #define ABX8XX_REG_ID0 0x28 | ||
38 | |||
39 | #define ABX8XX_REG_TRICKLE 0x20 | ||
40 | #define ABX8XX_TRICKLE_CHARGE_ENABLE 0xa0 | ||
41 | #define ABX8XX_TRICKLE_STANDARD_DIODE 0x8 | ||
42 | #define ABX8XX_TRICKLE_SCHOTTKY_DIODE 0x4 | ||
43 | |||
44 | static u8 trickle_resistors[] = {0, 3, 6, 11}; | ||
45 | |||
46 | enum abx80x_chip {AB0801, AB0803, AB0804, AB0805, | ||
47 | AB1801, AB1803, AB1804, AB1805, ABX80X}; | ||
48 | |||
49 | struct abx80x_cap { | ||
50 | u16 pn; | ||
51 | bool has_tc; | ||
52 | }; | ||
53 | |||
54 | static struct abx80x_cap abx80x_caps[] = { | ||
55 | [AB0801] = {.pn = 0x0801}, | ||
56 | [AB0803] = {.pn = 0x0803}, | ||
57 | [AB0804] = {.pn = 0x0804, .has_tc = true}, | ||
58 | [AB0805] = {.pn = 0x0805, .has_tc = true}, | ||
59 | [AB1801] = {.pn = 0x1801}, | ||
60 | [AB1803] = {.pn = 0x1803}, | ||
61 | [AB1804] = {.pn = 0x1804, .has_tc = true}, | ||
62 | [AB1805] = {.pn = 0x1805, .has_tc = true}, | ||
63 | [ABX80X] = {.pn = 0} | ||
64 | }; | ||
65 | |||
66 | static struct i2c_driver abx80x_driver; | ||
67 | |||
68 | static int abx80x_enable_trickle_charger(struct i2c_client *client, | ||
69 | u8 trickle_cfg) | ||
70 | { | ||
71 | int err; | ||
72 | |||
73 | /* | ||
74 | * Write the configuration key register to enable access to the Trickle | ||
75 | * register | ||
76 | */ | ||
77 | err = i2c_smbus_write_byte_data(client, ABX8XX_REG_CFG_KEY, | ||
78 | ABX8XX_CFG_KEY_MISC); | ||
79 | if (err < 0) { | ||
80 | dev_err(&client->dev, "Unable to write configuration key\n"); | ||
81 | return -EIO; | ||
82 | } | ||
83 | |||
84 | err = i2c_smbus_write_byte_data(client, ABX8XX_REG_TRICKLE, | ||
85 | ABX8XX_TRICKLE_CHARGE_ENABLE | | ||
86 | trickle_cfg); | ||
87 | if (err < 0) { | ||
88 | dev_err(&client->dev, "Unable to write trickle register\n"); | ||
89 | return -EIO; | ||
90 | } | ||
91 | |||
92 | return 0; | ||
93 | } | ||
94 | |||
95 | static int abx80x_rtc_read_time(struct device *dev, struct rtc_time *tm) | ||
96 | { | ||
97 | struct i2c_client *client = to_i2c_client(dev); | ||
98 | unsigned char buf[8]; | ||
99 | int err; | ||
100 | |||
101 | err = i2c_smbus_read_i2c_block_data(client, ABX8XX_REG_HTH, | ||
102 | sizeof(buf), buf); | ||
103 | if (err < 0) { | ||
104 | dev_err(&client->dev, "Unable to read date\n"); | ||
105 | return -EIO; | ||
106 | } | ||
107 | |||
108 | tm->tm_sec = bcd2bin(buf[ABX8XX_REG_SC] & 0x7F); | ||
109 | tm->tm_min = bcd2bin(buf[ABX8XX_REG_MN] & 0x7F); | ||
110 | tm->tm_hour = bcd2bin(buf[ABX8XX_REG_HR] & 0x3F); | ||
111 | tm->tm_wday = buf[ABX8XX_REG_WD] & 0x7; | ||
112 | tm->tm_mday = bcd2bin(buf[ABX8XX_REG_DA] & 0x3F); | ||
113 | tm->tm_mon = bcd2bin(buf[ABX8XX_REG_MO] & 0x1F) - 1; | ||
114 | tm->tm_year = bcd2bin(buf[ABX8XX_REG_YR]) + 100; | ||
115 | |||
116 | err = rtc_valid_tm(tm); | ||
117 | if (err < 0) | ||
118 | dev_err(&client->dev, "retrieved date/time is not valid.\n"); | ||
119 | |||
120 | return err; | ||
121 | } | ||
122 | |||
123 | static int abx80x_rtc_set_time(struct device *dev, struct rtc_time *tm) | ||
124 | { | ||
125 | struct i2c_client *client = to_i2c_client(dev); | ||
126 | unsigned char buf[8]; | ||
127 | int err; | ||
128 | |||
129 | if (tm->tm_year < 100) | ||
130 | return -EINVAL; | ||
131 | |||
132 | buf[ABX8XX_REG_HTH] = 0; | ||
133 | buf[ABX8XX_REG_SC] = bin2bcd(tm->tm_sec); | ||
134 | buf[ABX8XX_REG_MN] = bin2bcd(tm->tm_min); | ||
135 | buf[ABX8XX_REG_HR] = bin2bcd(tm->tm_hour); | ||
136 | buf[ABX8XX_REG_DA] = bin2bcd(tm->tm_mday); | ||
137 | buf[ABX8XX_REG_MO] = bin2bcd(tm->tm_mon + 1); | ||
138 | buf[ABX8XX_REG_YR] = bin2bcd(tm->tm_year - 100); | ||
139 | buf[ABX8XX_REG_WD] = tm->tm_wday; | ||
140 | |||
141 | err = i2c_smbus_write_i2c_block_data(client, ABX8XX_REG_HTH, | ||
142 | sizeof(buf), buf); | ||
143 | if (err < 0) { | ||
144 | dev_err(&client->dev, "Unable to write to date registers\n"); | ||
145 | return -EIO; | ||
146 | } | ||
147 | |||
148 | return 0; | ||
149 | } | ||
150 | |||
151 | static const struct rtc_class_ops abx80x_rtc_ops = { | ||
152 | .read_time = abx80x_rtc_read_time, | ||
153 | .set_time = abx80x_rtc_set_time, | ||
154 | }; | ||
155 | |||
156 | static int abx80x_dt_trickle_cfg(struct device_node *np) | ||
157 | { | ||
158 | const char *diode; | ||
159 | int trickle_cfg = 0; | ||
160 | int i, ret; | ||
161 | u32 tmp; | ||
162 | |||
163 | ret = of_property_read_string(np, "abracon,tc-diode", &diode); | ||
164 | if (ret) | ||
165 | return ret; | ||
166 | |||
167 | if (!strcmp(diode, "standard")) | ||
168 | trickle_cfg |= ABX8XX_TRICKLE_STANDARD_DIODE; | ||
169 | else if (!strcmp(diode, "schottky")) | ||
170 | trickle_cfg |= ABX8XX_TRICKLE_SCHOTTKY_DIODE; | ||
171 | else | ||
172 | return -EINVAL; | ||
173 | |||
174 | ret = of_property_read_u32(np, "abracon,tc-resistor", &tmp); | ||
175 | if (ret) | ||
176 | return ret; | ||
177 | |||
178 | for (i = 0; i < sizeof(trickle_resistors); i++) | ||
179 | if (trickle_resistors[i] == tmp) | ||
180 | break; | ||
181 | |||
182 | if (i == sizeof(trickle_resistors)) | ||
183 | return -EINVAL; | ||
184 | |||
185 | return (trickle_cfg | i); | ||
186 | } | ||
187 | |||
188 | static int abx80x_probe(struct i2c_client *client, | ||
189 | const struct i2c_device_id *id) | ||
190 | { | ||
191 | struct device_node *np = client->dev.of_node; | ||
192 | struct rtc_device *rtc; | ||
193 | int i, data, err, trickle_cfg = -EINVAL; | ||
194 | char buf[7]; | ||
195 | unsigned int part = id->driver_data; | ||
196 | unsigned int partnumber; | ||
197 | unsigned int majrev, minrev; | ||
198 | unsigned int lot; | ||
199 | unsigned int wafer; | ||
200 | unsigned int uid; | ||
201 | |||
202 | if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) | ||
203 | return -ENODEV; | ||
204 | |||
205 | err = i2c_smbus_read_i2c_block_data(client, ABX8XX_REG_ID0, | ||
206 | sizeof(buf), buf); | ||
207 | if (err < 0) { | ||
208 | dev_err(&client->dev, "Unable to read partnumber\n"); | ||
209 | return -EIO; | ||
210 | } | ||
211 | |||
212 | partnumber = (buf[0] << 8) | buf[1]; | ||
213 | majrev = buf[2] >> 3; | ||
214 | minrev = buf[2] & 0x7; | ||
215 | lot = ((buf[4] & 0x80) << 2) | ((buf[6] & 0x80) << 1) | buf[3]; | ||
216 | uid = ((buf[4] & 0x7f) << 8) | buf[5]; | ||
217 | wafer = (buf[6] & 0x7c) >> 2; | ||
218 | dev_info(&client->dev, "model %04x, revision %u.%u, lot %x, wafer %x, uid %x\n", | ||
219 | partnumber, majrev, minrev, lot, wafer, uid); | ||
220 | |||
221 | data = i2c_smbus_read_byte_data(client, ABX8XX_REG_CTRL1); | ||
222 | if (data < 0) { | ||
223 | dev_err(&client->dev, "Unable to read control register\n"); | ||
224 | return -EIO; | ||
225 | } | ||
226 | |||
227 | err = i2c_smbus_write_byte_data(client, ABX8XX_REG_CTRL1, | ||
228 | ((data & ~ABX8XX_CTRL_12_24) | | ||
229 | ABX8XX_CTRL_WRITE)); | ||
230 | if (err < 0) { | ||
231 | dev_err(&client->dev, "Unable to write control register\n"); | ||
232 | return -EIO; | ||
233 | } | ||
234 | |||
235 | /* part autodetection */ | ||
236 | if (part == ABX80X) { | ||
237 | for (i = 0; abx80x_caps[i].pn; i++) | ||
238 | if (partnumber == abx80x_caps[i].pn) | ||
239 | break; | ||
240 | if (abx80x_caps[i].pn == 0) { | ||
241 | dev_err(&client->dev, "Unknown part: %04x\n", | ||
242 | partnumber); | ||
243 | return -EINVAL; | ||
244 | } | ||
245 | part = i; | ||
246 | } | ||
247 | |||
248 | if (partnumber != abx80x_caps[part].pn) { | ||
249 | dev_err(&client->dev, "partnumber mismatch %04x != %04x\n", | ||
250 | partnumber, abx80x_caps[part].pn); | ||
251 | return -EINVAL; | ||
252 | } | ||
253 | |||
254 | if (np && abx80x_caps[part].has_tc) | ||
255 | trickle_cfg = abx80x_dt_trickle_cfg(np); | ||
256 | |||
257 | if (trickle_cfg > 0) { | ||
258 | dev_info(&client->dev, "Enabling trickle charger: %02x\n", | ||
259 | trickle_cfg); | ||
260 | abx80x_enable_trickle_charger(client, trickle_cfg); | ||
261 | } | ||
262 | |||
263 | rtc = devm_rtc_device_register(&client->dev, abx80x_driver.driver.name, | ||
264 | &abx80x_rtc_ops, THIS_MODULE); | ||
265 | |||
266 | if (IS_ERR(rtc)) | ||
267 | return PTR_ERR(rtc); | ||
268 | |||
269 | i2c_set_clientdata(client, rtc); | ||
270 | |||
271 | return 0; | ||
272 | } | ||
273 | |||
274 | static int abx80x_remove(struct i2c_client *client) | ||
275 | { | ||
276 | return 0; | ||
277 | } | ||
278 | |||
279 | static const struct i2c_device_id abx80x_id[] = { | ||
280 | { "abx80x", ABX80X }, | ||
281 | { "ab0801", AB0801 }, | ||
282 | { "ab0803", AB0803 }, | ||
283 | { "ab0804", AB0804 }, | ||
284 | { "ab0805", AB0805 }, | ||
285 | { "ab1801", AB1801 }, | ||
286 | { "ab1803", AB1803 }, | ||
287 | { "ab1804", AB1804 }, | ||
288 | { "ab1805", AB1805 }, | ||
289 | { } | ||
290 | }; | ||
291 | MODULE_DEVICE_TABLE(i2c, abx80x_id); | ||
292 | |||
293 | static struct i2c_driver abx80x_driver = { | ||
294 | .driver = { | ||
295 | .name = "rtc-abx80x", | ||
296 | }, | ||
297 | .probe = abx80x_probe, | ||
298 | .remove = abx80x_remove, | ||
299 | .id_table = abx80x_id, | ||
300 | }; | ||
301 | |||
302 | module_i2c_driver(abx80x_driver); | ||
303 | |||
304 | MODULE_AUTHOR("Philippe De Muyter <phdm@macqel.be>"); | ||
305 | MODULE_AUTHOR("Alexandre Belloni <alexandre.belloni@free-electrons.com>"); | ||
306 | MODULE_DESCRIPTION("Abracon ABX80X RTC driver"); | ||
307 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/rtc/rtc-armada38x.c b/drivers/rtc/rtc-armada38x.c index 43e04af39e09..cb70ced7e0db 100644 --- a/drivers/rtc/rtc-armada38x.c +++ b/drivers/rtc/rtc-armada38x.c | |||
@@ -40,6 +40,13 @@ struct armada38x_rtc { | |||
40 | void __iomem *regs; | 40 | void __iomem *regs; |
41 | void __iomem *regs_soc; | 41 | void __iomem *regs_soc; |
42 | spinlock_t lock; | 42 | spinlock_t lock; |
43 | /* | ||
44 | * While setting the time, the RTC TIME register should not be | ||
45 | * accessed. Setting the RTC time involves sleeping during | ||
46 | * 100ms, so a mutex instead of a spinlock is used to protect | ||
47 | * it | ||
48 | */ | ||
49 | struct mutex mutex_time; | ||
43 | int irq; | 50 | int irq; |
44 | }; | 51 | }; |
45 | 52 | ||
@@ -59,8 +66,7 @@ static int armada38x_rtc_read_time(struct device *dev, struct rtc_time *tm) | |||
59 | struct armada38x_rtc *rtc = dev_get_drvdata(dev); | 66 | struct armada38x_rtc *rtc = dev_get_drvdata(dev); |
60 | unsigned long time, time_check, flags; | 67 | unsigned long time, time_check, flags; |
61 | 68 | ||
62 | spin_lock_irqsave(&rtc->lock, flags); | 69 | mutex_lock(&rtc->mutex_time); |
63 | |||
64 | time = readl(rtc->regs + RTC_TIME); | 70 | time = readl(rtc->regs + RTC_TIME); |
65 | /* | 71 | /* |
66 | * WA for failing time set attempts. As stated in HW ERRATA if | 72 | * WA for failing time set attempts. As stated in HW ERRATA if |
@@ -71,7 +77,7 @@ static int armada38x_rtc_read_time(struct device *dev, struct rtc_time *tm) | |||
71 | if ((time_check - time) > 1) | 77 | if ((time_check - time) > 1) |
72 | time_check = readl(rtc->regs + RTC_TIME); | 78 | time_check = readl(rtc->regs + RTC_TIME); |
73 | 79 | ||
74 | spin_unlock_irqrestore(&rtc->lock, flags); | 80 | mutex_unlock(&rtc->mutex_time); |
75 | 81 | ||
76 | rtc_time_to_tm(time_check, tm); | 82 | rtc_time_to_tm(time_check, tm); |
77 | 83 | ||
@@ -94,19 +100,12 @@ static int armada38x_rtc_set_time(struct device *dev, struct rtc_time *tm) | |||
94 | * then wait for 100ms before writing to the time register to be | 100 | * then wait for 100ms before writing to the time register to be |
95 | * sure that the data will be taken into account. | 101 | * sure that the data will be taken into account. |
96 | */ | 102 | */ |
97 | spin_lock_irqsave(&rtc->lock, flags); | 103 | mutex_lock(&rtc->mutex_time); |
98 | |||
99 | rtc_delayed_write(0, rtc, RTC_STATUS); | 104 | rtc_delayed_write(0, rtc, RTC_STATUS); |
100 | |||
101 | spin_unlock_irqrestore(&rtc->lock, flags); | ||
102 | |||
103 | msleep(100); | 105 | msleep(100); |
104 | |||
105 | spin_lock_irqsave(&rtc->lock, flags); | ||
106 | |||
107 | rtc_delayed_write(time, rtc, RTC_TIME); | 106 | rtc_delayed_write(time, rtc, RTC_TIME); |
107 | mutex_unlock(&rtc->mutex_time); | ||
108 | 108 | ||
109 | spin_unlock_irqrestore(&rtc->lock, flags); | ||
110 | out: | 109 | out: |
111 | return ret; | 110 | return ret; |
112 | } | 111 | } |
@@ -230,6 +229,7 @@ static __init int armada38x_rtc_probe(struct platform_device *pdev) | |||
230 | return -ENOMEM; | 229 | return -ENOMEM; |
231 | 230 | ||
232 | spin_lock_init(&rtc->lock); | 231 | spin_lock_init(&rtc->lock); |
232 | mutex_init(&rtc->mutex_time); | ||
233 | 233 | ||
234 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rtc"); | 234 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rtc"); |
235 | rtc->regs = devm_ioremap_resource(&pdev->dev, res); | 235 | rtc->regs = devm_ioremap_resource(&pdev->dev, res); |
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c index c43aca69fb30..0fc3fe5fd5b8 100644 --- a/drivers/s390/char/con3215.c +++ b/drivers/s390/char/con3215.c | |||
@@ -667,6 +667,8 @@ static struct raw3215_info *raw3215_alloc_info(void) | |||
667 | info->buffer = kzalloc(RAW3215_BUFFER_SIZE, GFP_KERNEL | GFP_DMA); | 667 | info->buffer = kzalloc(RAW3215_BUFFER_SIZE, GFP_KERNEL | GFP_DMA); |
668 | info->inbuf = kzalloc(RAW3215_INBUF_SIZE, GFP_KERNEL | GFP_DMA); | 668 | info->inbuf = kzalloc(RAW3215_INBUF_SIZE, GFP_KERNEL | GFP_DMA); |
669 | if (!info->buffer || !info->inbuf) { | 669 | if (!info->buffer || !info->inbuf) { |
670 | kfree(info->inbuf); | ||
671 | kfree(info->buffer); | ||
670 | kfree(info); | 672 | kfree(info); |
671 | return NULL; | 673 | return NULL; |
672 | } | 674 | } |
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c index 7600639db4c4..add419d6ff34 100644 --- a/drivers/scsi/3w-9xxx.c +++ b/drivers/scsi/3w-9xxx.c | |||
@@ -149,7 +149,6 @@ static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset); | |||
149 | static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg); | 149 | static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg); |
150 | static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id); | 150 | static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id); |
151 | static char *twa_string_lookup(twa_message_type *table, unsigned int aen_code); | 151 | static char *twa_string_lookup(twa_message_type *table, unsigned int aen_code); |
152 | static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id); | ||
153 | 152 | ||
154 | /* Functions */ | 153 | /* Functions */ |
155 | 154 | ||
@@ -1340,11 +1339,11 @@ static irqreturn_t twa_interrupt(int irq, void *dev_instance) | |||
1340 | } | 1339 | } |
1341 | 1340 | ||
1342 | /* Now complete the io */ | 1341 | /* Now complete the io */ |
1342 | scsi_dma_unmap(cmd); | ||
1343 | cmd->scsi_done(cmd); | ||
1343 | tw_dev->state[request_id] = TW_S_COMPLETED; | 1344 | tw_dev->state[request_id] = TW_S_COMPLETED; |
1344 | twa_free_request_id(tw_dev, request_id); | 1345 | twa_free_request_id(tw_dev, request_id); |
1345 | tw_dev->posted_request_count--; | 1346 | tw_dev->posted_request_count--; |
1346 | tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]); | ||
1347 | twa_unmap_scsi_data(tw_dev, request_id); | ||
1348 | } | 1347 | } |
1349 | 1348 | ||
1350 | /* Check for valid status after each drain */ | 1349 | /* Check for valid status after each drain */ |
@@ -1402,26 +1401,6 @@ static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_comm | |||
1402 | } | 1401 | } |
1403 | } /* End twa_load_sgl() */ | 1402 | } /* End twa_load_sgl() */ |
1404 | 1403 | ||
1405 | /* This function will perform a pci-dma mapping for a scatter gather list */ | ||
1406 | static int twa_map_scsi_sg_data(TW_Device_Extension *tw_dev, int request_id) | ||
1407 | { | ||
1408 | int use_sg; | ||
1409 | struct scsi_cmnd *cmd = tw_dev->srb[request_id]; | ||
1410 | |||
1411 | use_sg = scsi_dma_map(cmd); | ||
1412 | if (!use_sg) | ||
1413 | return 0; | ||
1414 | else if (use_sg < 0) { | ||
1415 | TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1c, "Failed to map scatter gather list"); | ||
1416 | return 0; | ||
1417 | } | ||
1418 | |||
1419 | cmd->SCp.phase = TW_PHASE_SGLIST; | ||
1420 | cmd->SCp.have_data_in = use_sg; | ||
1421 | |||
1422 | return use_sg; | ||
1423 | } /* End twa_map_scsi_sg_data() */ | ||
1424 | |||
1425 | /* This function will poll for a response interrupt of a request */ | 1404 | /* This function will poll for a response interrupt of a request */ |
1426 | static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds) | 1405 | static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds) |
1427 | { | 1406 | { |
@@ -1600,9 +1579,11 @@ static int twa_reset_device_extension(TW_Device_Extension *tw_dev) | |||
1600 | (tw_dev->state[i] != TW_S_INITIAL) && | 1579 | (tw_dev->state[i] != TW_S_INITIAL) && |
1601 | (tw_dev->state[i] != TW_S_COMPLETED)) { | 1580 | (tw_dev->state[i] != TW_S_COMPLETED)) { |
1602 | if (tw_dev->srb[i]) { | 1581 | if (tw_dev->srb[i]) { |
1603 | tw_dev->srb[i]->result = (DID_RESET << 16); | 1582 | struct scsi_cmnd *cmd = tw_dev->srb[i]; |
1604 | tw_dev->srb[i]->scsi_done(tw_dev->srb[i]); | 1583 | |
1605 | twa_unmap_scsi_data(tw_dev, i); | 1584 | cmd->result = (DID_RESET << 16); |
1585 | scsi_dma_unmap(cmd); | ||
1586 | cmd->scsi_done(cmd); | ||
1606 | } | 1587 | } |
1607 | } | 1588 | } |
1608 | } | 1589 | } |
@@ -1781,21 +1762,18 @@ static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_ | |||
1781 | /* Save the scsi command for use by the ISR */ | 1762 | /* Save the scsi command for use by the ISR */ |
1782 | tw_dev->srb[request_id] = SCpnt; | 1763 | tw_dev->srb[request_id] = SCpnt; |
1783 | 1764 | ||
1784 | /* Initialize phase to zero */ | ||
1785 | SCpnt->SCp.phase = TW_PHASE_INITIAL; | ||
1786 | |||
1787 | retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL); | 1765 | retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL); |
1788 | switch (retval) { | 1766 | switch (retval) { |
1789 | case SCSI_MLQUEUE_HOST_BUSY: | 1767 | case SCSI_MLQUEUE_HOST_BUSY: |
1768 | scsi_dma_unmap(SCpnt); | ||
1790 | twa_free_request_id(tw_dev, request_id); | 1769 | twa_free_request_id(tw_dev, request_id); |
1791 | twa_unmap_scsi_data(tw_dev, request_id); | ||
1792 | break; | 1770 | break; |
1793 | case 1: | 1771 | case 1: |
1794 | tw_dev->state[request_id] = TW_S_COMPLETED; | ||
1795 | twa_free_request_id(tw_dev, request_id); | ||
1796 | twa_unmap_scsi_data(tw_dev, request_id); | ||
1797 | SCpnt->result = (DID_ERROR << 16); | 1772 | SCpnt->result = (DID_ERROR << 16); |
1773 | scsi_dma_unmap(SCpnt); | ||
1798 | done(SCpnt); | 1774 | done(SCpnt); |
1775 | tw_dev->state[request_id] = TW_S_COMPLETED; | ||
1776 | twa_free_request_id(tw_dev, request_id); | ||
1799 | retval = 0; | 1777 | retval = 0; |
1800 | } | 1778 | } |
1801 | out: | 1779 | out: |
@@ -1863,8 +1841,8 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, | |||
1863 | command_packet->sg_list[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]); | 1841 | command_packet->sg_list[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]); |
1864 | command_packet->sg_list[0].length = cpu_to_le32(TW_MIN_SGL_LENGTH); | 1842 | command_packet->sg_list[0].length = cpu_to_le32(TW_MIN_SGL_LENGTH); |
1865 | } else { | 1843 | } else { |
1866 | sg_count = twa_map_scsi_sg_data(tw_dev, request_id); | 1844 | sg_count = scsi_dma_map(srb); |
1867 | if (sg_count == 0) | 1845 | if (sg_count < 0) |
1868 | goto out; | 1846 | goto out; |
1869 | 1847 | ||
1870 | scsi_for_each_sg(srb, sg, sg_count, i) { | 1848 | scsi_for_each_sg(srb, sg, sg_count, i) { |
@@ -1979,15 +1957,6 @@ static char *twa_string_lookup(twa_message_type *table, unsigned int code) | |||
1979 | return(table[index].text); | 1957 | return(table[index].text); |
1980 | } /* End twa_string_lookup() */ | 1958 | } /* End twa_string_lookup() */ |
1981 | 1959 | ||
1982 | /* This function will perform a pci-dma unmap */ | ||
1983 | static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id) | ||
1984 | { | ||
1985 | struct scsi_cmnd *cmd = tw_dev->srb[request_id]; | ||
1986 | |||
1987 | if (cmd->SCp.phase == TW_PHASE_SGLIST) | ||
1988 | scsi_dma_unmap(cmd); | ||
1989 | } /* End twa_unmap_scsi_data() */ | ||
1990 | |||
1991 | /* This function gets called when a disk is coming on-line */ | 1960 | /* This function gets called when a disk is coming on-line */ |
1992 | static int twa_slave_configure(struct scsi_device *sdev) | 1961 | static int twa_slave_configure(struct scsi_device *sdev) |
1993 | { | 1962 | { |
diff --git a/drivers/scsi/3w-9xxx.h b/drivers/scsi/3w-9xxx.h index 040f7214e5b7..0fdc83cfa0e1 100644 --- a/drivers/scsi/3w-9xxx.h +++ b/drivers/scsi/3w-9xxx.h | |||
@@ -324,11 +324,6 @@ static twa_message_type twa_error_table[] = { | |||
324 | #define TW_CURRENT_DRIVER_BUILD 0 | 324 | #define TW_CURRENT_DRIVER_BUILD 0 |
325 | #define TW_CURRENT_DRIVER_BRANCH 0 | 325 | #define TW_CURRENT_DRIVER_BRANCH 0 |
326 | 326 | ||
327 | /* Phase defines */ | ||
328 | #define TW_PHASE_INITIAL 0 | ||
329 | #define TW_PHASE_SINGLE 1 | ||
330 | #define TW_PHASE_SGLIST 2 | ||
331 | |||
332 | /* Misc defines */ | 327 | /* Misc defines */ |
333 | #define TW_9550SX_DRAIN_COMPLETED 0xFFFF | 328 | #define TW_9550SX_DRAIN_COMPLETED 0xFFFF |
334 | #define TW_SECTOR_SIZE 512 | 329 | #define TW_SECTOR_SIZE 512 |
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c index 2361772d5909..f8374850f714 100644 --- a/drivers/scsi/3w-sas.c +++ b/drivers/scsi/3w-sas.c | |||
@@ -290,26 +290,6 @@ static int twl_post_command_packet(TW_Device_Extension *tw_dev, int request_id) | |||
290 | return 0; | 290 | return 0; |
291 | } /* End twl_post_command_packet() */ | 291 | } /* End twl_post_command_packet() */ |
292 | 292 | ||
293 | /* This function will perform a pci-dma mapping for a scatter gather list */ | ||
294 | static int twl_map_scsi_sg_data(TW_Device_Extension *tw_dev, int request_id) | ||
295 | { | ||
296 | int use_sg; | ||
297 | struct scsi_cmnd *cmd = tw_dev->srb[request_id]; | ||
298 | |||
299 | use_sg = scsi_dma_map(cmd); | ||
300 | if (!use_sg) | ||
301 | return 0; | ||
302 | else if (use_sg < 0) { | ||
303 | TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1, "Failed to map scatter gather list"); | ||
304 | return 0; | ||
305 | } | ||
306 | |||
307 | cmd->SCp.phase = TW_PHASE_SGLIST; | ||
308 | cmd->SCp.have_data_in = use_sg; | ||
309 | |||
310 | return use_sg; | ||
311 | } /* End twl_map_scsi_sg_data() */ | ||
312 | |||
313 | /* This function hands scsi cdb's to the firmware */ | 293 | /* This function hands scsi cdb's to the firmware */ |
314 | static int twl_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry_ISO *sglistarg) | 294 | static int twl_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry_ISO *sglistarg) |
315 | { | 295 | { |
@@ -357,8 +337,8 @@ static int twl_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, | |||
357 | if (!sglistarg) { | 337 | if (!sglistarg) { |
358 | /* Map sglist from scsi layer to cmd packet */ | 338 | /* Map sglist from scsi layer to cmd packet */ |
359 | if (scsi_sg_count(srb)) { | 339 | if (scsi_sg_count(srb)) { |
360 | sg_count = twl_map_scsi_sg_data(tw_dev, request_id); | 340 | sg_count = scsi_dma_map(srb); |
361 | if (sg_count == 0) | 341 | if (sg_count <= 0) |
362 | goto out; | 342 | goto out; |
363 | 343 | ||
364 | scsi_for_each_sg(srb, sg, sg_count, i) { | 344 | scsi_for_each_sg(srb, sg, sg_count, i) { |
@@ -1102,15 +1082,6 @@ out: | |||
1102 | return retval; | 1082 | return retval; |
1103 | } /* End twl_initialize_device_extension() */ | 1083 | } /* End twl_initialize_device_extension() */ |
1104 | 1084 | ||
1105 | /* This function will perform a pci-dma unmap */ | ||
1106 | static void twl_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id) | ||
1107 | { | ||
1108 | struct scsi_cmnd *cmd = tw_dev->srb[request_id]; | ||
1109 | |||
1110 | if (cmd->SCp.phase == TW_PHASE_SGLIST) | ||
1111 | scsi_dma_unmap(cmd); | ||
1112 | } /* End twl_unmap_scsi_data() */ | ||
1113 | |||
1114 | /* This function will handle attention interrupts */ | 1085 | /* This function will handle attention interrupts */ |
1115 | static int twl_handle_attention_interrupt(TW_Device_Extension *tw_dev) | 1086 | static int twl_handle_attention_interrupt(TW_Device_Extension *tw_dev) |
1116 | { | 1087 | { |
@@ -1251,11 +1222,11 @@ static irqreturn_t twl_interrupt(int irq, void *dev_instance) | |||
1251 | } | 1222 | } |
1252 | 1223 | ||
1253 | /* Now complete the io */ | 1224 | /* Now complete the io */ |
1225 | scsi_dma_unmap(cmd); | ||
1226 | cmd->scsi_done(cmd); | ||
1254 | tw_dev->state[request_id] = TW_S_COMPLETED; | 1227 | tw_dev->state[request_id] = TW_S_COMPLETED; |
1255 | twl_free_request_id(tw_dev, request_id); | 1228 | twl_free_request_id(tw_dev, request_id); |
1256 | tw_dev->posted_request_count--; | 1229 | tw_dev->posted_request_count--; |
1257 | tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]); | ||
1258 | twl_unmap_scsi_data(tw_dev, request_id); | ||
1259 | } | 1230 | } |
1260 | 1231 | ||
1261 | /* Check for another response interrupt */ | 1232 | /* Check for another response interrupt */ |
@@ -1400,10 +1371,12 @@ static int twl_reset_device_extension(TW_Device_Extension *tw_dev, int ioctl_res | |||
1400 | if ((tw_dev->state[i] != TW_S_FINISHED) && | 1371 | if ((tw_dev->state[i] != TW_S_FINISHED) && |
1401 | (tw_dev->state[i] != TW_S_INITIAL) && | 1372 | (tw_dev->state[i] != TW_S_INITIAL) && |
1402 | (tw_dev->state[i] != TW_S_COMPLETED)) { | 1373 | (tw_dev->state[i] != TW_S_COMPLETED)) { |
1403 | if (tw_dev->srb[i]) { | 1374 | struct scsi_cmnd *cmd = tw_dev->srb[i]; |
1404 | tw_dev->srb[i]->result = (DID_RESET << 16); | 1375 | |
1405 | tw_dev->srb[i]->scsi_done(tw_dev->srb[i]); | 1376 | if (cmd) { |
1406 | twl_unmap_scsi_data(tw_dev, i); | 1377 | cmd->result = (DID_RESET << 16); |
1378 | scsi_dma_unmap(cmd); | ||
1379 | cmd->scsi_done(cmd); | ||
1407 | } | 1380 | } |
1408 | } | 1381 | } |
1409 | } | 1382 | } |
@@ -1507,9 +1480,6 @@ static int twl_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_ | |||
1507 | /* Save the scsi command for use by the ISR */ | 1480 | /* Save the scsi command for use by the ISR */ |
1508 | tw_dev->srb[request_id] = SCpnt; | 1481 | tw_dev->srb[request_id] = SCpnt; |
1509 | 1482 | ||
1510 | /* Initialize phase to zero */ | ||
1511 | SCpnt->SCp.phase = TW_PHASE_INITIAL; | ||
1512 | |||
1513 | retval = twl_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL); | 1483 | retval = twl_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL); |
1514 | if (retval) { | 1484 | if (retval) { |
1515 | tw_dev->state[request_id] = TW_S_COMPLETED; | 1485 | tw_dev->state[request_id] = TW_S_COMPLETED; |
diff --git a/drivers/scsi/3w-sas.h b/drivers/scsi/3w-sas.h index d474892701d4..fec6449c7595 100644 --- a/drivers/scsi/3w-sas.h +++ b/drivers/scsi/3w-sas.h | |||
@@ -103,10 +103,6 @@ static char *twl_aen_severity_table[] = | |||
103 | #define TW_CURRENT_DRIVER_BUILD 0 | 103 | #define TW_CURRENT_DRIVER_BUILD 0 |
104 | #define TW_CURRENT_DRIVER_BRANCH 0 | 104 | #define TW_CURRENT_DRIVER_BRANCH 0 |
105 | 105 | ||
106 | /* Phase defines */ | ||
107 | #define TW_PHASE_INITIAL 0 | ||
108 | #define TW_PHASE_SGLIST 2 | ||
109 | |||
110 | /* Misc defines */ | 106 | /* Misc defines */ |
111 | #define TW_SECTOR_SIZE 512 | 107 | #define TW_SECTOR_SIZE 512 |
112 | #define TW_MAX_UNITS 32 | 108 | #define TW_MAX_UNITS 32 |
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c index c75f2048319f..2940bd769936 100644 --- a/drivers/scsi/3w-xxxx.c +++ b/drivers/scsi/3w-xxxx.c | |||
@@ -1271,32 +1271,6 @@ static int tw_initialize_device_extension(TW_Device_Extension *tw_dev) | |||
1271 | return 0; | 1271 | return 0; |
1272 | } /* End tw_initialize_device_extension() */ | 1272 | } /* End tw_initialize_device_extension() */ |
1273 | 1273 | ||
1274 | static int tw_map_scsi_sg_data(struct pci_dev *pdev, struct scsi_cmnd *cmd) | ||
1275 | { | ||
1276 | int use_sg; | ||
1277 | |||
1278 | dprintk(KERN_WARNING "3w-xxxx: tw_map_scsi_sg_data()\n"); | ||
1279 | |||
1280 | use_sg = scsi_dma_map(cmd); | ||
1281 | if (use_sg < 0) { | ||
1282 | printk(KERN_WARNING "3w-xxxx: tw_map_scsi_sg_data(): pci_map_sg() failed.\n"); | ||
1283 | return 0; | ||
1284 | } | ||
1285 | |||
1286 | cmd->SCp.phase = TW_PHASE_SGLIST; | ||
1287 | cmd->SCp.have_data_in = use_sg; | ||
1288 | |||
1289 | return use_sg; | ||
1290 | } /* End tw_map_scsi_sg_data() */ | ||
1291 | |||
1292 | static void tw_unmap_scsi_data(struct pci_dev *pdev, struct scsi_cmnd *cmd) | ||
1293 | { | ||
1294 | dprintk(KERN_WARNING "3w-xxxx: tw_unmap_scsi_data()\n"); | ||
1295 | |||
1296 | if (cmd->SCp.phase == TW_PHASE_SGLIST) | ||
1297 | scsi_dma_unmap(cmd); | ||
1298 | } /* End tw_unmap_scsi_data() */ | ||
1299 | |||
1300 | /* This function will reset a device extension */ | 1274 | /* This function will reset a device extension */ |
1301 | static int tw_reset_device_extension(TW_Device_Extension *tw_dev) | 1275 | static int tw_reset_device_extension(TW_Device_Extension *tw_dev) |
1302 | { | 1276 | { |
@@ -1319,8 +1293,8 @@ static int tw_reset_device_extension(TW_Device_Extension *tw_dev) | |||
1319 | srb = tw_dev->srb[i]; | 1293 | srb = tw_dev->srb[i]; |
1320 | if (srb != NULL) { | 1294 | if (srb != NULL) { |
1321 | srb->result = (DID_RESET << 16); | 1295 | srb->result = (DID_RESET << 16); |
1322 | tw_dev->srb[i]->scsi_done(tw_dev->srb[i]); | 1296 | scsi_dma_unmap(srb); |
1323 | tw_unmap_scsi_data(tw_dev->tw_pci_dev, tw_dev->srb[i]); | 1297 | srb->scsi_done(srb); |
1324 | } | 1298 | } |
1325 | } | 1299 | } |
1326 | } | 1300 | } |
@@ -1767,8 +1741,8 @@ static int tw_scsiop_read_write(TW_Device_Extension *tw_dev, int request_id) | |||
1767 | command_packet->byte8.io.lba = lba; | 1741 | command_packet->byte8.io.lba = lba; |
1768 | command_packet->byte6.block_count = num_sectors; | 1742 | command_packet->byte6.block_count = num_sectors; |
1769 | 1743 | ||
1770 | use_sg = tw_map_scsi_sg_data(tw_dev->tw_pci_dev, tw_dev->srb[request_id]); | 1744 | use_sg = scsi_dma_map(srb); |
1771 | if (!use_sg) | 1745 | if (use_sg <= 0) |
1772 | return 1; | 1746 | return 1; |
1773 | 1747 | ||
1774 | scsi_for_each_sg(tw_dev->srb[request_id], sg, use_sg, i) { | 1748 | scsi_for_each_sg(tw_dev->srb[request_id], sg, use_sg, i) { |
@@ -1955,9 +1929,6 @@ static int tw_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_c | |||
1955 | /* Save the scsi command for use by the ISR */ | 1929 | /* Save the scsi command for use by the ISR */ |
1956 | tw_dev->srb[request_id] = SCpnt; | 1930 | tw_dev->srb[request_id] = SCpnt; |
1957 | 1931 | ||
1958 | /* Initialize phase to zero */ | ||
1959 | SCpnt->SCp.phase = TW_PHASE_INITIAL; | ||
1960 | |||
1961 | switch (*command) { | 1932 | switch (*command) { |
1962 | case READ_10: | 1933 | case READ_10: |
1963 | case READ_6: | 1934 | case READ_6: |
@@ -2185,12 +2156,11 @@ static irqreturn_t tw_interrupt(int irq, void *dev_instance) | |||
2185 | 2156 | ||
2186 | /* Now complete the io */ | 2157 | /* Now complete the io */ |
2187 | if ((error != TW_ISR_DONT_COMPLETE)) { | 2158 | if ((error != TW_ISR_DONT_COMPLETE)) { |
2159 | scsi_dma_unmap(tw_dev->srb[request_id]); | ||
2160 | tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]); | ||
2188 | tw_dev->state[request_id] = TW_S_COMPLETED; | 2161 | tw_dev->state[request_id] = TW_S_COMPLETED; |
2189 | tw_state_request_finish(tw_dev, request_id); | 2162 | tw_state_request_finish(tw_dev, request_id); |
2190 | tw_dev->posted_request_count--; | 2163 | tw_dev->posted_request_count--; |
2191 | tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]); | ||
2192 | |||
2193 | tw_unmap_scsi_data(tw_dev->tw_pci_dev, tw_dev->srb[request_id]); | ||
2194 | } | 2164 | } |
2195 | } | 2165 | } |
2196 | 2166 | ||
diff --git a/drivers/scsi/3w-xxxx.h b/drivers/scsi/3w-xxxx.h index 29b0b84ed69e..6f65e663d393 100644 --- a/drivers/scsi/3w-xxxx.h +++ b/drivers/scsi/3w-xxxx.h | |||
@@ -195,11 +195,6 @@ static unsigned char tw_sense_table[][4] = | |||
195 | #define TW_AEN_SMART_FAIL 0x000F | 195 | #define TW_AEN_SMART_FAIL 0x000F |
196 | #define TW_AEN_SBUF_FAIL 0x0024 | 196 | #define TW_AEN_SBUF_FAIL 0x0024 |
197 | 197 | ||
198 | /* Phase defines */ | ||
199 | #define TW_PHASE_INITIAL 0 | ||
200 | #define TW_PHASE_SINGLE 1 | ||
201 | #define TW_PHASE_SGLIST 2 | ||
202 | |||
203 | /* Misc defines */ | 198 | /* Misc defines */ |
204 | #define TW_ALIGNMENT_6000 64 /* 64 bytes */ | 199 | #define TW_ALIGNMENT_6000 64 /* 64 bytes */ |
205 | #define TW_ALIGNMENT_7000 4 /* 4 bytes */ | 200 | #define TW_ALIGNMENT_7000 4 /* 4 bytes */ |
diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c index ec432763a29a..b95d2779f467 100644 --- a/drivers/scsi/aha1542.c +++ b/drivers/scsi/aha1542.c | |||
@@ -375,9 +375,10 @@ static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd) | |||
375 | u8 lun = cmd->device->lun; | 375 | u8 lun = cmd->device->lun; |
376 | unsigned long flags; | 376 | unsigned long flags; |
377 | int bufflen = scsi_bufflen(cmd); | 377 | int bufflen = scsi_bufflen(cmd); |
378 | int mbo; | 378 | int mbo, sg_count; |
379 | struct mailbox *mb = aha1542->mb; | 379 | struct mailbox *mb = aha1542->mb; |
380 | struct ccb *ccb = aha1542->ccb; | 380 | struct ccb *ccb = aha1542->ccb; |
381 | struct chain *cptr; | ||
381 | 382 | ||
382 | if (*cmd->cmnd == REQUEST_SENSE) { | 383 | if (*cmd->cmnd == REQUEST_SENSE) { |
383 | /* Don't do the command - we have the sense data already */ | 384 | /* Don't do the command - we have the sense data already */ |
@@ -397,6 +398,13 @@ static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd) | |||
397 | print_hex_dump_bytes("command: ", DUMP_PREFIX_NONE, cmd->cmnd, cmd->cmd_len); | 398 | print_hex_dump_bytes("command: ", DUMP_PREFIX_NONE, cmd->cmnd, cmd->cmd_len); |
398 | } | 399 | } |
399 | #endif | 400 | #endif |
401 | if (bufflen) { /* allocate memory before taking host_lock */ | ||
402 | sg_count = scsi_sg_count(cmd); | ||
403 | cptr = kmalloc(sizeof(*cptr) * sg_count, GFP_KERNEL | GFP_DMA); | ||
404 | if (!cptr) | ||
405 | return SCSI_MLQUEUE_HOST_BUSY; | ||
406 | } | ||
407 | |||
400 | /* Use the outgoing mailboxes in a round-robin fashion, because this | 408 | /* Use the outgoing mailboxes in a round-robin fashion, because this |
401 | is how the host adapter will scan for them */ | 409 | is how the host adapter will scan for them */ |
402 | 410 | ||
@@ -441,19 +449,10 @@ static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd) | |||
441 | 449 | ||
442 | if (bufflen) { | 450 | if (bufflen) { |
443 | struct scatterlist *sg; | 451 | struct scatterlist *sg; |
444 | struct chain *cptr; | 452 | int i; |
445 | int i, sg_count = scsi_sg_count(cmd); | ||
446 | 453 | ||
447 | ccb[mbo].op = 2; /* SCSI Initiator Command w/scatter-gather */ | 454 | ccb[mbo].op = 2; /* SCSI Initiator Command w/scatter-gather */ |
448 | cmd->host_scribble = kmalloc(sizeof(*cptr)*sg_count, | 455 | cmd->host_scribble = (void *)cptr; |
449 | GFP_KERNEL | GFP_DMA); | ||
450 | cptr = (struct chain *) cmd->host_scribble; | ||
451 | if (cptr == NULL) { | ||
452 | /* free the claimed mailbox slot */ | ||
453 | aha1542->int_cmds[mbo] = NULL; | ||
454 | spin_unlock_irqrestore(sh->host_lock, flags); | ||
455 | return SCSI_MLQUEUE_HOST_BUSY; | ||
456 | } | ||
457 | scsi_for_each_sg(cmd, sg, sg_count, i) { | 456 | scsi_for_each_sg(cmd, sg, sg_count, i) { |
458 | any2scsi(cptr[i].dataptr, isa_page_to_bus(sg_page(sg)) | 457 | any2scsi(cptr[i].dataptr, isa_page_to_bus(sg_page(sg)) |
459 | + sg->offset); | 458 | + sg->offset); |
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c index 262ab837a704..9f77d23239a2 100644 --- a/drivers/scsi/scsi_devinfo.c +++ b/drivers/scsi/scsi_devinfo.c | |||
@@ -226,6 +226,7 @@ static struct { | |||
226 | {"PIONEER", "CD-ROM DRM-624X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, | 226 | {"PIONEER", "CD-ROM DRM-624X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, |
227 | {"Promise", "VTrak E610f", NULL, BLIST_SPARSELUN | BLIST_NO_RSOC}, | 227 | {"Promise", "VTrak E610f", NULL, BLIST_SPARSELUN | BLIST_NO_RSOC}, |
228 | {"Promise", "", NULL, BLIST_SPARSELUN}, | 228 | {"Promise", "", NULL, BLIST_SPARSELUN}, |
229 | {"QNAP", "iSCSI Storage", NULL, BLIST_MAX_1024}, | ||
229 | {"QUANTUM", "XP34301", "1071", BLIST_NOTQ}, | 230 | {"QUANTUM", "XP34301", "1071", BLIST_NOTQ}, |
230 | {"REGAL", "CDC-4X", NULL, BLIST_MAX5LUN | BLIST_SINGLELUN}, | 231 | {"REGAL", "CDC-4X", NULL, BLIST_MAX5LUN | BLIST_SINGLELUN}, |
231 | {"SanDisk", "ImageMate CF-SD1", NULL, BLIST_FORCELUN}, | 232 | {"SanDisk", "ImageMate CF-SD1", NULL, BLIST_FORCELUN}, |
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 60aae01caa89..6efab1c455e1 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c | |||
@@ -897,6 +897,12 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result, | |||
897 | */ | 897 | */ |
898 | if (*bflags & BLIST_MAX_512) | 898 | if (*bflags & BLIST_MAX_512) |
899 | blk_queue_max_hw_sectors(sdev->request_queue, 512); | 899 | blk_queue_max_hw_sectors(sdev->request_queue, 512); |
900 | /* | ||
901 | * Max 1024 sector transfer length for targets that report incorrect | ||
902 | * max/optimal lengths and relied on the old block layer safe default | ||
903 | */ | ||
904 | else if (*bflags & BLIST_MAX_1024) | ||
905 | blk_queue_max_hw_sectors(sdev->request_queue, 1024); | ||
900 | 906 | ||
901 | /* | 907 | /* |
902 | * Some devices may not want to have a start command automatically | 908 | * Some devices may not want to have a start command automatically |
diff --git a/drivers/sh/pm_runtime.c b/drivers/sh/pm_runtime.c index cd4c293f0dd0..fe8875f0d7be 100644 --- a/drivers/sh/pm_runtime.c +++ b/drivers/sh/pm_runtime.c | |||
@@ -80,9 +80,10 @@ static int __init sh_pm_runtime_init(void) | |||
80 | if (IS_ENABLED(CONFIG_ARCH_SHMOBILE_MULTI)) { | 80 | if (IS_ENABLED(CONFIG_ARCH_SHMOBILE_MULTI)) { |
81 | if (!of_machine_is_compatible("renesas,emev2") && | 81 | if (!of_machine_is_compatible("renesas,emev2") && |
82 | !of_machine_is_compatible("renesas,r7s72100") && | 82 | !of_machine_is_compatible("renesas,r7s72100") && |
83 | !of_machine_is_compatible("renesas,r8a73a4") && | ||
84 | #ifndef CONFIG_PM_GENERIC_DOMAINS_OF | 83 | #ifndef CONFIG_PM_GENERIC_DOMAINS_OF |
84 | !of_machine_is_compatible("renesas,r8a73a4") && | ||
85 | !of_machine_is_compatible("renesas,r8a7740") && | 85 | !of_machine_is_compatible("renesas,r8a7740") && |
86 | !of_machine_is_compatible("renesas,sh73a0") && | ||
86 | #endif | 87 | #endif |
87 | !of_machine_is_compatible("renesas,r8a7778") && | 88 | !of_machine_is_compatible("renesas,r8a7778") && |
88 | !of_machine_is_compatible("renesas,r8a7779") && | 89 | !of_machine_is_compatible("renesas,r8a7779") && |
@@ -90,9 +91,7 @@ static int __init sh_pm_runtime_init(void) | |||
90 | !of_machine_is_compatible("renesas,r8a7791") && | 91 | !of_machine_is_compatible("renesas,r8a7791") && |
91 | !of_machine_is_compatible("renesas,r8a7792") && | 92 | !of_machine_is_compatible("renesas,r8a7792") && |
92 | !of_machine_is_compatible("renesas,r8a7793") && | 93 | !of_machine_is_compatible("renesas,r8a7793") && |
93 | !of_machine_is_compatible("renesas,r8a7794") && | 94 | !of_machine_is_compatible("renesas,r8a7794")) |
94 | !of_machine_is_compatible("renesas,sh7372") && | ||
95 | !of_machine_is_compatible("renesas,sh73a0")) | ||
96 | return 0; | 95 | return 0; |
97 | } | 96 | } |
98 | 97 | ||
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 198f96b7fb45..72b059081559 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig | |||
@@ -78,6 +78,7 @@ config SPI_ATMEL | |||
78 | config SPI_BCM2835 | 78 | config SPI_BCM2835 |
79 | tristate "BCM2835 SPI controller" | 79 | tristate "BCM2835 SPI controller" |
80 | depends on ARCH_BCM2835 || COMPILE_TEST | 80 | depends on ARCH_BCM2835 || COMPILE_TEST |
81 | depends on GPIOLIB | ||
81 | help | 82 | help |
82 | This selects a driver for the Broadcom BCM2835 SPI master. | 83 | This selects a driver for the Broadcom BCM2835 SPI master. |
83 | 84 | ||
@@ -302,7 +303,7 @@ config SPI_FSL_SPI | |||
302 | config SPI_FSL_DSPI | 303 | config SPI_FSL_DSPI |
303 | tristate "Freescale DSPI controller" | 304 | tristate "Freescale DSPI controller" |
304 | select REGMAP_MMIO | 305 | select REGMAP_MMIO |
305 | depends on SOC_VF610 || COMPILE_TEST | 306 | depends on SOC_VF610 || SOC_LS1021A || COMPILE_TEST |
306 | help | 307 | help |
307 | This enables support for the Freescale DSPI controller in master | 308 | This enables support for the Freescale DSPI controller in master |
308 | mode. VF610 platform uses the controller. | 309 | mode. VF610 platform uses the controller. |
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c index f63864a893c5..37875cf942f7 100644 --- a/drivers/spi/spi-bcm2835.c +++ b/drivers/spi/spi-bcm2835.c | |||
@@ -164,13 +164,12 @@ static int bcm2835_spi_transfer_one_poll(struct spi_master *master, | |||
164 | unsigned long xfer_time_us) | 164 | unsigned long xfer_time_us) |
165 | { | 165 | { |
166 | struct bcm2835_spi *bs = spi_master_get_devdata(master); | 166 | struct bcm2835_spi *bs = spi_master_get_devdata(master); |
167 | unsigned long timeout = jiffies + | 167 | /* set timeout to 1 second of maximum polling */ |
168 | max(4 * xfer_time_us * HZ / 1000000, 2uL); | 168 | unsigned long timeout = jiffies + HZ; |
169 | 169 | ||
170 | /* enable HW block without interrupts */ | 170 | /* enable HW block without interrupts */ |
171 | bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA); | 171 | bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA); |
172 | 172 | ||
173 | /* set timeout to 4x the expected time, or 2 jiffies */ | ||
174 | /* loop until finished the transfer */ | 173 | /* loop until finished the transfer */ |
175 | while (bs->rx_len) { | 174 | while (bs->rx_len) { |
176 | /* read from fifo as much as possible */ | 175 | /* read from fifo as much as possible */ |
diff --git a/drivers/spi/spi-bitbang.c b/drivers/spi/spi-bitbang.c index 5ef6638d5e8a..840a4984d365 100644 --- a/drivers/spi/spi-bitbang.c +++ b/drivers/spi/spi-bitbang.c | |||
@@ -180,7 +180,6 @@ int spi_bitbang_setup(struct spi_device *spi) | |||
180 | { | 180 | { |
181 | struct spi_bitbang_cs *cs = spi->controller_state; | 181 | struct spi_bitbang_cs *cs = spi->controller_state; |
182 | struct spi_bitbang *bitbang; | 182 | struct spi_bitbang *bitbang; |
183 | int retval; | ||
184 | unsigned long flags; | 183 | unsigned long flags; |
185 | 184 | ||
186 | bitbang = spi_master_get_devdata(spi->master); | 185 | bitbang = spi_master_get_devdata(spi->master); |
@@ -197,9 +196,11 @@ int spi_bitbang_setup(struct spi_device *spi) | |||
197 | if (!cs->txrx_word) | 196 | if (!cs->txrx_word) |
198 | return -EINVAL; | 197 | return -EINVAL; |
199 | 198 | ||
200 | retval = bitbang->setup_transfer(spi, NULL); | 199 | if (bitbang->setup_transfer) { |
201 | if (retval < 0) | 200 | int retval = bitbang->setup_transfer(spi, NULL); |
202 | return retval; | 201 | if (retval < 0) |
202 | return retval; | ||
203 | } | ||
203 | 204 | ||
204 | dev_dbg(&spi->dev, "%s, %u nsec/bit\n", __func__, 2 * cs->nsecs); | 205 | dev_dbg(&spi->dev, "%s, %u nsec/bit\n", __func__, 2 * cs->nsecs); |
205 | 206 | ||
@@ -295,9 +296,11 @@ static int spi_bitbang_transfer_one(struct spi_master *master, | |||
295 | 296 | ||
296 | /* init (-1) or override (1) transfer params */ | 297 | /* init (-1) or override (1) transfer params */ |
297 | if (do_setup != 0) { | 298 | if (do_setup != 0) { |
298 | status = bitbang->setup_transfer(spi, t); | 299 | if (bitbang->setup_transfer) { |
299 | if (status < 0) | 300 | status = bitbang->setup_transfer(spi, t); |
300 | break; | 301 | if (status < 0) |
302 | break; | ||
303 | } | ||
301 | if (do_setup == -1) | 304 | if (do_setup == -1) |
302 | do_setup = 0; | 305 | do_setup = 0; |
303 | } | 306 | } |
diff --git a/drivers/spi/spi-fsl-cpm.c b/drivers/spi/spi-fsl-cpm.c index 9c46a3058743..896add8cfd3b 100644 --- a/drivers/spi/spi-fsl-cpm.c +++ b/drivers/spi/spi-fsl-cpm.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/of_address.h> | 24 | #include <linux/of_address.h> |
25 | #include <linux/spi/spi.h> | 25 | #include <linux/spi/spi.h> |
26 | #include <linux/types.h> | 26 | #include <linux/types.h> |
27 | #include <linux/platform_device.h> | ||
27 | 28 | ||
28 | #include "spi-fsl-cpm.h" | 29 | #include "spi-fsl-cpm.h" |
29 | #include "spi-fsl-lib.h" | 30 | #include "spi-fsl-lib.h" |
@@ -269,17 +270,6 @@ static unsigned long fsl_spi_cpm_get_pram(struct mpc8xxx_spi *mspi) | |||
269 | if (mspi->flags & SPI_CPM2) { | 270 | if (mspi->flags & SPI_CPM2) { |
270 | pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64); | 271 | pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64); |
271 | out_be16(spi_base, pram_ofs); | 272 | out_be16(spi_base, pram_ofs); |
272 | } else { | ||
273 | struct spi_pram __iomem *pram = spi_base; | ||
274 | u16 rpbase = in_be16(&pram->rpbase); | ||
275 | |||
276 | /* Microcode relocation patch applied? */ | ||
277 | if (rpbase) { | ||
278 | pram_ofs = rpbase; | ||
279 | } else { | ||
280 | pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64); | ||
281 | out_be16(spi_base, pram_ofs); | ||
282 | } | ||
283 | } | 273 | } |
284 | 274 | ||
285 | iounmap(spi_base); | 275 | iounmap(spi_base); |
@@ -292,7 +282,6 @@ int fsl_spi_cpm_init(struct mpc8xxx_spi *mspi) | |||
292 | struct device_node *np = dev->of_node; | 282 | struct device_node *np = dev->of_node; |
293 | const u32 *iprop; | 283 | const u32 *iprop; |
294 | int size; | 284 | int size; |
295 | unsigned long pram_ofs; | ||
296 | unsigned long bds_ofs; | 285 | unsigned long bds_ofs; |
297 | 286 | ||
298 | if (!(mspi->flags & SPI_CPM_MODE)) | 287 | if (!(mspi->flags & SPI_CPM_MODE)) |
@@ -319,8 +308,26 @@ int fsl_spi_cpm_init(struct mpc8xxx_spi *mspi) | |||
319 | } | 308 | } |
320 | } | 309 | } |
321 | 310 | ||
322 | pram_ofs = fsl_spi_cpm_get_pram(mspi); | 311 | if (mspi->flags & SPI_CPM1) { |
323 | if (IS_ERR_VALUE(pram_ofs)) { | 312 | struct resource *res; |
313 | void *pram; | ||
314 | |||
315 | res = platform_get_resource(to_platform_device(dev), | ||
316 | IORESOURCE_MEM, 1); | ||
317 | pram = devm_ioremap_resource(dev, res); | ||
318 | if (IS_ERR(pram)) | ||
319 | mspi->pram = NULL; | ||
320 | else | ||
321 | mspi->pram = pram; | ||
322 | } else { | ||
323 | unsigned long pram_ofs = fsl_spi_cpm_get_pram(mspi); | ||
324 | |||
325 | if (IS_ERR_VALUE(pram_ofs)) | ||
326 | mspi->pram = NULL; | ||
327 | else | ||
328 | mspi->pram = cpm_muram_addr(pram_ofs); | ||
329 | } | ||
330 | if (mspi->pram == NULL) { | ||
324 | dev_err(dev, "can't allocate spi parameter ram\n"); | 331 | dev_err(dev, "can't allocate spi parameter ram\n"); |
325 | goto err_pram; | 332 | goto err_pram; |
326 | } | 333 | } |
@@ -346,8 +353,6 @@ int fsl_spi_cpm_init(struct mpc8xxx_spi *mspi) | |||
346 | goto err_dummy_rx; | 353 | goto err_dummy_rx; |
347 | } | 354 | } |
348 | 355 | ||
349 | mspi->pram = cpm_muram_addr(pram_ofs); | ||
350 | |||
351 | mspi->tx_bd = cpm_muram_addr(bds_ofs); | 356 | mspi->tx_bd = cpm_muram_addr(bds_ofs); |
352 | mspi->rx_bd = cpm_muram_addr(bds_ofs + sizeof(*mspi->tx_bd)); | 357 | mspi->rx_bd = cpm_muram_addr(bds_ofs + sizeof(*mspi->tx_bd)); |
353 | 358 | ||
@@ -375,7 +380,8 @@ err_dummy_rx: | |||
375 | err_dummy_tx: | 380 | err_dummy_tx: |
376 | cpm_muram_free(bds_ofs); | 381 | cpm_muram_free(bds_ofs); |
377 | err_bds: | 382 | err_bds: |
378 | cpm_muram_free(pram_ofs); | 383 | if (!(mspi->flags & SPI_CPM1)) |
384 | cpm_muram_free(cpm_muram_offset(mspi->pram)); | ||
379 | err_pram: | 385 | err_pram: |
380 | fsl_spi_free_dummy_rx(); | 386 | fsl_spi_free_dummy_rx(); |
381 | return -ENOMEM; | 387 | return -ENOMEM; |
diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c index d0a73a09a9bd..80d245ac846f 100644 --- a/drivers/spi/spi-fsl-espi.c +++ b/drivers/spi/spi-fsl-espi.c | |||
@@ -359,14 +359,16 @@ static void fsl_espi_rw_trans(struct spi_message *m, | |||
359 | struct fsl_espi_transfer *trans, u8 *rx_buff) | 359 | struct fsl_espi_transfer *trans, u8 *rx_buff) |
360 | { | 360 | { |
361 | struct fsl_espi_transfer *espi_trans = trans; | 361 | struct fsl_espi_transfer *espi_trans = trans; |
362 | unsigned int n_tx = espi_trans->n_tx; | 362 | unsigned int total_len = espi_trans->len; |
363 | unsigned int n_rx = espi_trans->n_rx; | ||
364 | struct spi_transfer *t; | 363 | struct spi_transfer *t; |
365 | u8 *local_buf; | 364 | u8 *local_buf; |
366 | u8 *rx_buf = rx_buff; | 365 | u8 *rx_buf = rx_buff; |
367 | unsigned int trans_len; | 366 | unsigned int trans_len; |
368 | unsigned int addr; | 367 | unsigned int addr; |
369 | int i, pos, loop; | 368 | unsigned int tx_only; |
369 | unsigned int rx_pos = 0; | ||
370 | unsigned int pos; | ||
371 | int i, loop; | ||
370 | 372 | ||
371 | local_buf = kzalloc(SPCOM_TRANLEN_MAX, GFP_KERNEL); | 373 | local_buf = kzalloc(SPCOM_TRANLEN_MAX, GFP_KERNEL); |
372 | if (!local_buf) { | 374 | if (!local_buf) { |
@@ -374,36 +376,48 @@ static void fsl_espi_rw_trans(struct spi_message *m, | |||
374 | return; | 376 | return; |
375 | } | 377 | } |
376 | 378 | ||
377 | for (pos = 0, loop = 0; pos < n_rx; pos += trans_len, loop++) { | 379 | for (pos = 0, loop = 0; pos < total_len; pos += trans_len, loop++) { |
378 | trans_len = n_rx - pos; | 380 | trans_len = total_len - pos; |
379 | if (trans_len > SPCOM_TRANLEN_MAX - n_tx) | ||
380 | trans_len = SPCOM_TRANLEN_MAX - n_tx; | ||
381 | 381 | ||
382 | i = 0; | 382 | i = 0; |
383 | tx_only = 0; | ||
383 | list_for_each_entry(t, &m->transfers, transfer_list) { | 384 | list_for_each_entry(t, &m->transfers, transfer_list) { |
384 | if (t->tx_buf) { | 385 | if (t->tx_buf) { |
385 | memcpy(local_buf + i, t->tx_buf, t->len); | 386 | memcpy(local_buf + i, t->tx_buf, t->len); |
386 | i += t->len; | 387 | i += t->len; |
388 | if (!t->rx_buf) | ||
389 | tx_only += t->len; | ||
387 | } | 390 | } |
388 | } | 391 | } |
389 | 392 | ||
393 | /* Add additional TX bytes to compensate SPCOM_TRANLEN_MAX */ | ||
394 | if (loop > 0) | ||
395 | trans_len += tx_only; | ||
396 | |||
397 | if (trans_len > SPCOM_TRANLEN_MAX) | ||
398 | trans_len = SPCOM_TRANLEN_MAX; | ||
399 | |||
400 | /* Update device offset */ | ||
390 | if (pos > 0) { | 401 | if (pos > 0) { |
391 | addr = fsl_espi_cmd2addr(local_buf); | 402 | addr = fsl_espi_cmd2addr(local_buf); |
392 | addr += pos; | 403 | addr += rx_pos; |
393 | fsl_espi_addr2cmd(addr, local_buf); | 404 | fsl_espi_addr2cmd(addr, local_buf); |
394 | } | 405 | } |
395 | 406 | ||
396 | espi_trans->n_tx = n_tx; | 407 | espi_trans->len = trans_len; |
397 | espi_trans->n_rx = trans_len; | ||
398 | espi_trans->len = trans_len + n_tx; | ||
399 | espi_trans->tx_buf = local_buf; | 408 | espi_trans->tx_buf = local_buf; |
400 | espi_trans->rx_buf = local_buf; | 409 | espi_trans->rx_buf = local_buf; |
401 | fsl_espi_do_trans(m, espi_trans); | 410 | fsl_espi_do_trans(m, espi_trans); |
402 | 411 | ||
403 | memcpy(rx_buf + pos, espi_trans->rx_buf + n_tx, trans_len); | 412 | /* If there is at least one RX byte then copy it to rx_buf */ |
413 | if (tx_only < SPCOM_TRANLEN_MAX) | ||
414 | memcpy(rx_buf + rx_pos, espi_trans->rx_buf + tx_only, | ||
415 | trans_len - tx_only); | ||
416 | |||
417 | rx_pos += trans_len - tx_only; | ||
404 | 418 | ||
405 | if (loop > 0) | 419 | if (loop > 0) |
406 | espi_trans->actual_length += espi_trans->len - n_tx; | 420 | espi_trans->actual_length += espi_trans->len - tx_only; |
407 | else | 421 | else |
408 | espi_trans->actual_length += espi_trans->len; | 422 | espi_trans->actual_length += espi_trans->len; |
409 | } | 423 | } |
@@ -418,6 +432,7 @@ static int fsl_espi_do_one_msg(struct spi_master *master, | |||
418 | u8 *rx_buf = NULL; | 432 | u8 *rx_buf = NULL; |
419 | unsigned int n_tx = 0; | 433 | unsigned int n_tx = 0; |
420 | unsigned int n_rx = 0; | 434 | unsigned int n_rx = 0; |
435 | unsigned int xfer_len = 0; | ||
421 | struct fsl_espi_transfer espi_trans; | 436 | struct fsl_espi_transfer espi_trans; |
422 | 437 | ||
423 | list_for_each_entry(t, &m->transfers, transfer_list) { | 438 | list_for_each_entry(t, &m->transfers, transfer_list) { |
@@ -427,11 +442,13 @@ static int fsl_espi_do_one_msg(struct spi_master *master, | |||
427 | n_rx += t->len; | 442 | n_rx += t->len; |
428 | rx_buf = t->rx_buf; | 443 | rx_buf = t->rx_buf; |
429 | } | 444 | } |
445 | if ((t->tx_buf) || (t->rx_buf)) | ||
446 | xfer_len += t->len; | ||
430 | } | 447 | } |
431 | 448 | ||
432 | espi_trans.n_tx = n_tx; | 449 | espi_trans.n_tx = n_tx; |
433 | espi_trans.n_rx = n_rx; | 450 | espi_trans.n_rx = n_rx; |
434 | espi_trans.len = n_tx + n_rx; | 451 | espi_trans.len = xfer_len; |
435 | espi_trans.actual_length = 0; | 452 | espi_trans.actual_length = 0; |
436 | espi_trans.status = 0; | 453 | espi_trans.status = 0; |
437 | 454 | ||
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c index 4df8942058de..d1a5b9fc3eba 100644 --- a/drivers/spi/spi-omap2-mcspi.c +++ b/drivers/spi/spi-omap2-mcspi.c | |||
@@ -1210,6 +1210,7 @@ static int omap2_mcspi_transfer_one_message(struct spi_master *master, | |||
1210 | struct omap2_mcspi *mcspi; | 1210 | struct omap2_mcspi *mcspi; |
1211 | struct omap2_mcspi_dma *mcspi_dma; | 1211 | struct omap2_mcspi_dma *mcspi_dma; |
1212 | struct spi_transfer *t; | 1212 | struct spi_transfer *t; |
1213 | int status; | ||
1213 | 1214 | ||
1214 | spi = m->spi; | 1215 | spi = m->spi; |
1215 | mcspi = spi_master_get_devdata(master); | 1216 | mcspi = spi_master_get_devdata(master); |
@@ -1229,7 +1230,8 @@ static int omap2_mcspi_transfer_one_message(struct spi_master *master, | |||
1229 | tx_buf ? "tx" : "", | 1230 | tx_buf ? "tx" : "", |
1230 | rx_buf ? "rx" : "", | 1231 | rx_buf ? "rx" : "", |
1231 | t->bits_per_word); | 1232 | t->bits_per_word); |
1232 | return -EINVAL; | 1233 | status = -EINVAL; |
1234 | goto out; | ||
1233 | } | 1235 | } |
1234 | 1236 | ||
1235 | if (m->is_dma_mapped || len < DMA_MIN_BYTES) | 1237 | if (m->is_dma_mapped || len < DMA_MIN_BYTES) |
@@ -1241,7 +1243,8 @@ static int omap2_mcspi_transfer_one_message(struct spi_master *master, | |||
1241 | if (dma_mapping_error(mcspi->dev, t->tx_dma)) { | 1243 | if (dma_mapping_error(mcspi->dev, t->tx_dma)) { |
1242 | dev_dbg(mcspi->dev, "dma %cX %d bytes error\n", | 1244 | dev_dbg(mcspi->dev, "dma %cX %d bytes error\n", |
1243 | 'T', len); | 1245 | 'T', len); |
1244 | return -EINVAL; | 1246 | status = -EINVAL; |
1247 | goto out; | ||
1245 | } | 1248 | } |
1246 | } | 1249 | } |
1247 | if (mcspi_dma->dma_rx && rx_buf != NULL) { | 1250 | if (mcspi_dma->dma_rx && rx_buf != NULL) { |
@@ -1253,14 +1256,19 @@ static int omap2_mcspi_transfer_one_message(struct spi_master *master, | |||
1253 | if (tx_buf != NULL) | 1256 | if (tx_buf != NULL) |
1254 | dma_unmap_single(mcspi->dev, t->tx_dma, | 1257 | dma_unmap_single(mcspi->dev, t->tx_dma, |
1255 | len, DMA_TO_DEVICE); | 1258 | len, DMA_TO_DEVICE); |
1256 | return -EINVAL; | 1259 | status = -EINVAL; |
1260 | goto out; | ||
1257 | } | 1261 | } |
1258 | } | 1262 | } |
1259 | } | 1263 | } |
1260 | 1264 | ||
1261 | omap2_mcspi_work(mcspi, m); | 1265 | omap2_mcspi_work(mcspi, m); |
1266 | /* spi_finalize_current_message() changes the status inside the | ||
1267 | * spi_message, save the status here. */ | ||
1268 | status = m->status; | ||
1269 | out: | ||
1262 | spi_finalize_current_message(master); | 1270 | spi_finalize_current_message(master); |
1263 | return 0; | 1271 | return status; |
1264 | } | 1272 | } |
1265 | 1273 | ||
1266 | static int omap2_mcspi_master_setup(struct omap2_mcspi *mcspi) | 1274 | static int omap2_mcspi_master_setup(struct omap2_mcspi *mcspi) |
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index d5d7d2235163..50910d85df5a 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c | |||
@@ -583,6 +583,15 @@ static int spi_unmap_msg(struct spi_master *master, struct spi_message *msg) | |||
583 | rx_dev = master->dma_rx->device->dev; | 583 | rx_dev = master->dma_rx->device->dev; |
584 | 584 | ||
585 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { | 585 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { |
586 | /* | ||
587 | * Restore the original value of tx_buf or rx_buf if they are | ||
588 | * NULL. | ||
589 | */ | ||
590 | if (xfer->tx_buf == master->dummy_tx) | ||
591 | xfer->tx_buf = NULL; | ||
592 | if (xfer->rx_buf == master->dummy_rx) | ||
593 | xfer->rx_buf = NULL; | ||
594 | |||
586 | if (!master->can_dma(master, msg->spi, xfer)) | 595 | if (!master->can_dma(master, msg->spi, xfer)) |
587 | continue; | 596 | continue; |
588 | 597 | ||
diff --git a/drivers/staging/media/omap4iss/Kconfig b/drivers/staging/media/omap4iss/Kconfig index b78643f907e7..072dac04a750 100644 --- a/drivers/staging/media/omap4iss/Kconfig +++ b/drivers/staging/media/omap4iss/Kconfig | |||
@@ -2,6 +2,7 @@ config VIDEO_OMAP4 | |||
2 | bool "OMAP 4 Camera support" | 2 | bool "OMAP 4 Camera support" |
3 | depends on VIDEO_V4L2=y && VIDEO_V4L2_SUBDEV_API && I2C=y && ARCH_OMAP4 | 3 | depends on VIDEO_V4L2=y && VIDEO_V4L2_SUBDEV_API && I2C=y && ARCH_OMAP4 |
4 | depends on HAS_DMA | 4 | depends on HAS_DMA |
5 | select MFD_SYSCON | ||
5 | select VIDEOBUF2_DMA_CONTIG | 6 | select VIDEOBUF2_DMA_CONTIG |
6 | ---help--- | 7 | ---help--- |
7 | Driver for an OMAP 4 ISS controller. | 8 | Driver for an OMAP 4 ISS controller. |
diff --git a/drivers/staging/media/omap4iss/iss.c b/drivers/staging/media/omap4iss/iss.c index e0ad5e520e2d..7ced940bd807 100644 --- a/drivers/staging/media/omap4iss/iss.c +++ b/drivers/staging/media/omap4iss/iss.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/dma-mapping.h> | 17 | #include <linux/dma-mapping.h> |
18 | #include <linux/i2c.h> | 18 | #include <linux/i2c.h> |
19 | #include <linux/interrupt.h> | 19 | #include <linux/interrupt.h> |
20 | #include <linux/mfd/syscon.h> | ||
20 | #include <linux/module.h> | 21 | #include <linux/module.h> |
21 | #include <linux/platform_device.h> | 22 | #include <linux/platform_device.h> |
22 | #include <linux/slab.h> | 23 | #include <linux/slab.h> |
@@ -1386,6 +1387,16 @@ static int iss_probe(struct platform_device *pdev) | |||
1386 | 1387 | ||
1387 | platform_set_drvdata(pdev, iss); | 1388 | platform_set_drvdata(pdev, iss); |
1388 | 1389 | ||
1390 | /* | ||
1391 | * TODO: When implementing DT support switch to syscon regmap lookup by | ||
1392 | * phandle. | ||
1393 | */ | ||
1394 | iss->syscon = syscon_regmap_lookup_by_compatible("syscon"); | ||
1395 | if (IS_ERR(iss->syscon)) { | ||
1396 | ret = PTR_ERR(iss->syscon); | ||
1397 | goto error; | ||
1398 | } | ||
1399 | |||
1389 | /* Clocks */ | 1400 | /* Clocks */ |
1390 | ret = iss_map_mem_resource(pdev, iss, OMAP4_ISS_MEM_TOP); | 1401 | ret = iss_map_mem_resource(pdev, iss, OMAP4_ISS_MEM_TOP); |
1391 | if (ret < 0) | 1402 | if (ret < 0) |
diff --git a/drivers/staging/media/omap4iss/iss.h b/drivers/staging/media/omap4iss/iss.h index 734cfeeb0314..35df8b4709e6 100644 --- a/drivers/staging/media/omap4iss/iss.h +++ b/drivers/staging/media/omap4iss/iss.h | |||
@@ -29,6 +29,8 @@ | |||
29 | #include "iss_ipipe.h" | 29 | #include "iss_ipipe.h" |
30 | #include "iss_resizer.h" | 30 | #include "iss_resizer.h" |
31 | 31 | ||
32 | struct regmap; | ||
33 | |||
32 | #define to_iss_device(ptr_module) \ | 34 | #define to_iss_device(ptr_module) \ |
33 | container_of(ptr_module, struct iss_device, ptr_module) | 35 | container_of(ptr_module, struct iss_device, ptr_module) |
34 | #define to_device(ptr_module) \ | 36 | #define to_device(ptr_module) \ |
@@ -79,6 +81,7 @@ struct iss_reg { | |||
79 | 81 | ||
80 | /* | 82 | /* |
81 | * struct iss_device - ISS device structure. | 83 | * struct iss_device - ISS device structure. |
84 | * @syscon: Regmap for the syscon register space | ||
82 | * @crashed: Bitmask of crashed entities (indexed by entity ID) | 85 | * @crashed: Bitmask of crashed entities (indexed by entity ID) |
83 | */ | 86 | */ |
84 | struct iss_device { | 87 | struct iss_device { |
@@ -93,6 +96,7 @@ struct iss_device { | |||
93 | 96 | ||
94 | struct resource *res[OMAP4_ISS_MEM_LAST]; | 97 | struct resource *res[OMAP4_ISS_MEM_LAST]; |
95 | void __iomem *regs[OMAP4_ISS_MEM_LAST]; | 98 | void __iomem *regs[OMAP4_ISS_MEM_LAST]; |
99 | struct regmap *syscon; | ||
96 | 100 | ||
97 | u64 raw_dmamask; | 101 | u64 raw_dmamask; |
98 | 102 | ||
diff --git a/drivers/staging/media/omap4iss/iss_csiphy.c b/drivers/staging/media/omap4iss/iss_csiphy.c index 7c3d55d811ef..748607f8918f 100644 --- a/drivers/staging/media/omap4iss/iss_csiphy.c +++ b/drivers/staging/media/omap4iss/iss_csiphy.c | |||
@@ -13,6 +13,7 @@ | |||
13 | 13 | ||
14 | #include <linux/delay.h> | 14 | #include <linux/delay.h> |
15 | #include <linux/device.h> | 15 | #include <linux/device.h> |
16 | #include <linux/regmap.h> | ||
16 | 17 | ||
17 | #include "../../../../arch/arm/mach-omap2/control.h" | 18 | #include "../../../../arch/arm/mach-omap2/control.h" |
18 | 19 | ||
@@ -140,9 +141,11 @@ int omap4iss_csiphy_config(struct iss_device *iss, | |||
140 | * - bit [18] : CSIPHY1 CTRLCLK enable | 141 | * - bit [18] : CSIPHY1 CTRLCLK enable |
141 | * - bit [17:16] : CSIPHY1 config: 00 d-phy, 01/10 ccp2 | 142 | * - bit [17:16] : CSIPHY1 config: 00 d-phy, 01/10 ccp2 |
142 | */ | 143 | */ |
143 | cam_rx_ctrl = omap4_ctrl_pad_readl( | 144 | /* |
144 | OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_CAMERA_RX); | 145 | * TODO: When implementing DT support specify the CONTROL_CAMERA_RX |
145 | 146 | * register offset in the syscon property instead of hardcoding it. | |
147 | */ | ||
148 | regmap_read(iss->syscon, 0x68, &cam_rx_ctrl); | ||
146 | 149 | ||
147 | if (subdevs->interface == ISS_INTERFACE_CSI2A_PHY1) { | 150 | if (subdevs->interface == ISS_INTERFACE_CSI2A_PHY1) { |
148 | cam_rx_ctrl &= ~(OMAP4_CAMERARX_CSI21_LANEENABLE_MASK | | 151 | cam_rx_ctrl &= ~(OMAP4_CAMERARX_CSI21_LANEENABLE_MASK | |
@@ -166,8 +169,7 @@ int omap4iss_csiphy_config(struct iss_device *iss, | |||
166 | cam_rx_ctrl |= OMAP4_CAMERARX_CSI22_CTRLCLKEN_MASK; | 169 | cam_rx_ctrl |= OMAP4_CAMERARX_CSI22_CTRLCLKEN_MASK; |
167 | } | 170 | } |
168 | 171 | ||
169 | omap4_ctrl_pad_writel(cam_rx_ctrl, | 172 | regmap_write(iss->syscon, 0x68, cam_rx_ctrl); |
170 | OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_CAMERA_RX); | ||
171 | 173 | ||
172 | /* Reset used lane count */ | 174 | /* Reset used lane count */ |
173 | csi2->phy->used_data_lanes = 0; | 175 | csi2->phy->used_data_lanes = 0; |
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c index f1e57425e39f..5bab1c684bb1 100644 --- a/drivers/tty/hvc/hvc_xen.c +++ b/drivers/tty/hvc/hvc_xen.c | |||
@@ -299,11 +299,27 @@ static int xen_initial_domain_console_init(void) | |||
299 | return 0; | 299 | return 0; |
300 | } | 300 | } |
301 | 301 | ||
302 | static void xen_console_update_evtchn(struct xencons_info *info) | ||
303 | { | ||
304 | if (xen_hvm_domain()) { | ||
305 | uint64_t v; | ||
306 | int err; | ||
307 | |||
308 | err = hvm_get_parameter(HVM_PARAM_CONSOLE_EVTCHN, &v); | ||
309 | if (!err && v) | ||
310 | info->evtchn = v; | ||
311 | } else | ||
312 | info->evtchn = xen_start_info->console.domU.evtchn; | ||
313 | } | ||
314 | |||
302 | void xen_console_resume(void) | 315 | void xen_console_resume(void) |
303 | { | 316 | { |
304 | struct xencons_info *info = vtermno_to_xencons(HVC_COOKIE); | 317 | struct xencons_info *info = vtermno_to_xencons(HVC_COOKIE); |
305 | if (info != NULL && info->irq) | 318 | if (info != NULL && info->irq) { |
319 | if (!xen_initial_domain()) | ||
320 | xen_console_update_evtchn(info); | ||
306 | rebind_evtchn_irq(info->evtchn, info->irq); | 321 | rebind_evtchn_irq(info->evtchn, info->irq); |
322 | } | ||
307 | } | 323 | } |
308 | 324 | ||
309 | static void xencons_disconnect_backend(struct xencons_info *info) | 325 | static void xencons_disconnect_backend(struct xencons_info *info) |
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index 08da4d3e2162..46bcebba54b2 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c | |||
@@ -1998,6 +1998,8 @@ pci_wch_ch38x_setup(struct serial_private *priv, | |||
1998 | #define PCIE_DEVICE_ID_WCH_CH382_2S1P 0x3250 | 1998 | #define PCIE_DEVICE_ID_WCH_CH382_2S1P 0x3250 |
1999 | #define PCIE_DEVICE_ID_WCH_CH384_4S 0x3470 | 1999 | #define PCIE_DEVICE_ID_WCH_CH384_4S 0x3470 |
2000 | 2000 | ||
2001 | #define PCI_DEVICE_ID_EXAR_XR17V8358 0x8358 | ||
2002 | |||
2001 | /* Unknown vendors/cards - this should not be in linux/pci_ids.h */ | 2003 | /* Unknown vendors/cards - this should not be in linux/pci_ids.h */ |
2002 | #define PCI_SUBDEVICE_ID_UNKNOWN_0x1584 0x1584 | 2004 | #define PCI_SUBDEVICE_ID_UNKNOWN_0x1584 0x1584 |
2003 | #define PCI_SUBDEVICE_ID_UNKNOWN_0x1588 0x1588 | 2005 | #define PCI_SUBDEVICE_ID_UNKNOWN_0x1588 0x1588 |
@@ -2520,6 +2522,13 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = { | |||
2520 | .subdevice = PCI_ANY_ID, | 2522 | .subdevice = PCI_ANY_ID, |
2521 | .setup = pci_xr17v35x_setup, | 2523 | .setup = pci_xr17v35x_setup, |
2522 | }, | 2524 | }, |
2525 | { | ||
2526 | .vendor = PCI_VENDOR_ID_EXAR, | ||
2527 | .device = PCI_DEVICE_ID_EXAR_XR17V8358, | ||
2528 | .subvendor = PCI_ANY_ID, | ||
2529 | .subdevice = PCI_ANY_ID, | ||
2530 | .setup = pci_xr17v35x_setup, | ||
2531 | }, | ||
2523 | /* | 2532 | /* |
2524 | * Xircom cards | 2533 | * Xircom cards |
2525 | */ | 2534 | */ |
@@ -2999,6 +3008,7 @@ enum pci_board_num_t { | |||
2999 | pbn_exar_XR17V352, | 3008 | pbn_exar_XR17V352, |
3000 | pbn_exar_XR17V354, | 3009 | pbn_exar_XR17V354, |
3001 | pbn_exar_XR17V358, | 3010 | pbn_exar_XR17V358, |
3011 | pbn_exar_XR17V8358, | ||
3002 | pbn_exar_ibm_saturn, | 3012 | pbn_exar_ibm_saturn, |
3003 | pbn_pasemi_1682M, | 3013 | pbn_pasemi_1682M, |
3004 | pbn_ni8430_2, | 3014 | pbn_ni8430_2, |
@@ -3685,6 +3695,14 @@ static struct pciserial_board pci_boards[] = { | |||
3685 | .reg_shift = 0, | 3695 | .reg_shift = 0, |
3686 | .first_offset = 0, | 3696 | .first_offset = 0, |
3687 | }, | 3697 | }, |
3698 | [pbn_exar_XR17V8358] = { | ||
3699 | .flags = FL_BASE0, | ||
3700 | .num_ports = 16, | ||
3701 | .base_baud = 7812500, | ||
3702 | .uart_offset = 0x400, | ||
3703 | .reg_shift = 0, | ||
3704 | .first_offset = 0, | ||
3705 | }, | ||
3688 | [pbn_exar_ibm_saturn] = { | 3706 | [pbn_exar_ibm_saturn] = { |
3689 | .flags = FL_BASE0, | 3707 | .flags = FL_BASE0, |
3690 | .num_ports = 1, | 3708 | .num_ports = 1, |
@@ -5080,7 +5098,7 @@ static struct pci_device_id serial_pci_tbl[] = { | |||
5080 | 0, | 5098 | 0, |
5081 | 0, pbn_exar_XR17C158 }, | 5099 | 0, pbn_exar_XR17C158 }, |
5082 | /* | 5100 | /* |
5083 | * Exar Corp. XR17V35[248] Dual/Quad/Octal PCIe UARTs | 5101 | * Exar Corp. XR17V[48]35[248] Dual/Quad/Octal/Hexa PCIe UARTs |
5084 | */ | 5102 | */ |
5085 | { PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17V352, | 5103 | { PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17V352, |
5086 | PCI_ANY_ID, PCI_ANY_ID, | 5104 | PCI_ANY_ID, PCI_ANY_ID, |
@@ -5094,7 +5112,10 @@ static struct pci_device_id serial_pci_tbl[] = { | |||
5094 | PCI_ANY_ID, PCI_ANY_ID, | 5112 | PCI_ANY_ID, PCI_ANY_ID, |
5095 | 0, | 5113 | 0, |
5096 | 0, pbn_exar_XR17V358 }, | 5114 | 0, pbn_exar_XR17V358 }, |
5097 | 5115 | { PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17V8358, | |
5116 | PCI_ANY_ID, PCI_ANY_ID, | ||
5117 | 0, | ||
5118 | 0, pbn_exar_XR17V8358 }, | ||
5098 | /* | 5119 | /* |
5099 | * Topic TP560 Data/Fax/Voice 56k modem (reported by Evan Clarke) | 5120 | * Topic TP560 Data/Fax/Voice 56k modem (reported by Evan Clarke) |
5100 | */ | 5121 | */ |
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c index d58fe4763d9e..27dade29646b 100644 --- a/drivers/tty/serial/atmel_serial.c +++ b/drivers/tty/serial/atmel_serial.c | |||
@@ -880,6 +880,7 @@ static int atmel_prepare_tx_dma(struct uart_port *port) | |||
880 | config.direction = DMA_MEM_TO_DEV; | 880 | config.direction = DMA_MEM_TO_DEV; |
881 | config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; | 881 | config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; |
882 | config.dst_addr = port->mapbase + ATMEL_US_THR; | 882 | config.dst_addr = port->mapbase + ATMEL_US_THR; |
883 | config.dst_maxburst = 1; | ||
883 | 884 | ||
884 | ret = dmaengine_slave_config(atmel_port->chan_tx, | 885 | ret = dmaengine_slave_config(atmel_port->chan_tx, |
885 | &config); | 886 | &config); |
@@ -1059,6 +1060,7 @@ static int atmel_prepare_rx_dma(struct uart_port *port) | |||
1059 | config.direction = DMA_DEV_TO_MEM; | 1060 | config.direction = DMA_DEV_TO_MEM; |
1060 | config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; | 1061 | config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; |
1061 | config.src_addr = port->mapbase + ATMEL_US_RHR; | 1062 | config.src_addr = port->mapbase + ATMEL_US_RHR; |
1063 | config.src_maxburst = 1; | ||
1062 | 1064 | ||
1063 | ret = dmaengine_slave_config(atmel_port->chan_rx, | 1065 | ret = dmaengine_slave_config(atmel_port->chan_rx, |
1064 | &config); | 1066 | &config); |
diff --git a/drivers/tty/serial/of_serial.c b/drivers/tty/serial/of_serial.c index 5b73afb9f9f3..137381e649e5 100644 --- a/drivers/tty/serial/of_serial.c +++ b/drivers/tty/serial/of_serial.c | |||
@@ -346,7 +346,6 @@ static const struct of_device_id of_platform_serial_table[] = { | |||
346 | { .compatible = "ibm,qpace-nwp-serial", | 346 | { .compatible = "ibm,qpace-nwp-serial", |
347 | .data = (void *)PORT_NWPSERIAL, }, | 347 | .data = (void *)PORT_NWPSERIAL, }, |
348 | #endif | 348 | #endif |
349 | { .type = "serial", .data = (void *)PORT_UNKNOWN, }, | ||
350 | { /* end of list */ }, | 349 | { /* end of list */ }, |
351 | }; | 350 | }; |
352 | 351 | ||
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c index cf08876922f1..a0ae942d9562 100644 --- a/drivers/tty/serial/samsung.c +++ b/drivers/tty/serial/samsung.c | |||
@@ -1068,8 +1068,9 @@ static int s3c64xx_serial_startup(struct uart_port *port) | |||
1068 | spin_lock_irqsave(&port->lock, flags); | 1068 | spin_lock_irqsave(&port->lock, flags); |
1069 | 1069 | ||
1070 | ufcon = rd_regl(port, S3C2410_UFCON); | 1070 | ufcon = rd_regl(port, S3C2410_UFCON); |
1071 | ufcon |= S3C2410_UFCON_RESETRX | S3C2410_UFCON_RESETTX | | 1071 | ufcon |= S3C2410_UFCON_RESETRX | S5PV210_UFCON_RXTRIG8; |
1072 | S5PV210_UFCON_RXTRIG8; | 1072 | if (!uart_console(port)) |
1073 | ufcon |= S3C2410_UFCON_RESETTX; | ||
1073 | wr_regl(port, S3C2410_UFCON, ufcon); | 1074 | wr_regl(port, S3C2410_UFCON, ufcon); |
1074 | 1075 | ||
1075 | enable_rx_pio(ourport); | 1076 | enable_rx_pio(ourport); |
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index eb5b03be9dfd..0b7bb12dfc68 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c | |||
@@ -1770,7 +1770,7 @@ static const struct file_operations uart_proc_fops = { | |||
1770 | * @port: the port to write the message | 1770 | * @port: the port to write the message |
1771 | * @s: array of characters | 1771 | * @s: array of characters |
1772 | * @count: number of characters in string to write | 1772 | * @count: number of characters in string to write |
1773 | * @write: function to write character to port | 1773 | * @putchar: function to write character to port |
1774 | */ | 1774 | */ |
1775 | void uart_console_write(struct uart_port *port, const char *s, | 1775 | void uart_console_write(struct uart_port *port, const char *s, |
1776 | unsigned int count, | 1776 | unsigned int count, |
diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c index 708eead850b0..b1c6bd3d483f 100644 --- a/drivers/tty/serial/uartlite.c +++ b/drivers/tty/serial/uartlite.c | |||
@@ -632,7 +632,8 @@ MODULE_DEVICE_TABLE(of, ulite_of_match); | |||
632 | 632 | ||
633 | static int ulite_probe(struct platform_device *pdev) | 633 | static int ulite_probe(struct platform_device *pdev) |
634 | { | 634 | { |
635 | struct resource *res, *res2; | 635 | struct resource *res; |
636 | int irq; | ||
636 | int id = pdev->id; | 637 | int id = pdev->id; |
637 | #ifdef CONFIG_OF | 638 | #ifdef CONFIG_OF |
638 | const __be32 *prop; | 639 | const __be32 *prop; |
@@ -646,11 +647,11 @@ static int ulite_probe(struct platform_device *pdev) | |||
646 | if (!res) | 647 | if (!res) |
647 | return -ENODEV; | 648 | return -ENODEV; |
648 | 649 | ||
649 | res2 = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | 650 | irq = platform_get_irq(pdev, 0); |
650 | if (!res2) | 651 | if (irq <= 0) |
651 | return -ENODEV; | 652 | return -ENXIO; |
652 | 653 | ||
653 | return ulite_assign(&pdev->dev, id, res->start, res2->start); | 654 | return ulite_assign(&pdev->dev, id, res->start, irq); |
654 | } | 655 | } |
655 | 656 | ||
656 | static int ulite_remove(struct platform_device *pdev) | 657 | static int ulite_remove(struct platform_device *pdev) |
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c index f218ec658f5d..3ddbac767db3 100644 --- a/drivers/tty/serial/xilinx_uartps.c +++ b/drivers/tty/serial/xilinx_uartps.c | |||
@@ -1331,9 +1331,9 @@ static SIMPLE_DEV_PM_OPS(cdns_uart_dev_pm_ops, cdns_uart_suspend, | |||
1331 | */ | 1331 | */ |
1332 | static int cdns_uart_probe(struct platform_device *pdev) | 1332 | static int cdns_uart_probe(struct platform_device *pdev) |
1333 | { | 1333 | { |
1334 | int rc, id; | 1334 | int rc, id, irq; |
1335 | struct uart_port *port; | 1335 | struct uart_port *port; |
1336 | struct resource *res, *res2; | 1336 | struct resource *res; |
1337 | struct cdns_uart *cdns_uart_data; | 1337 | struct cdns_uart *cdns_uart_data; |
1338 | 1338 | ||
1339 | cdns_uart_data = devm_kzalloc(&pdev->dev, sizeof(*cdns_uart_data), | 1339 | cdns_uart_data = devm_kzalloc(&pdev->dev, sizeof(*cdns_uart_data), |
@@ -1380,9 +1380,9 @@ static int cdns_uart_probe(struct platform_device *pdev) | |||
1380 | goto err_out_clk_disable; | 1380 | goto err_out_clk_disable; |
1381 | } | 1381 | } |
1382 | 1382 | ||
1383 | res2 = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | 1383 | irq = platform_get_irq(pdev, 0); |
1384 | if (!res2) { | 1384 | if (irq <= 0) { |
1385 | rc = -ENODEV; | 1385 | rc = -ENXIO; |
1386 | goto err_out_clk_disable; | 1386 | goto err_out_clk_disable; |
1387 | } | 1387 | } |
1388 | 1388 | ||
@@ -1411,7 +1411,7 @@ static int cdns_uart_probe(struct platform_device *pdev) | |||
1411 | * and triggers invocation of the config_port() entry point. | 1411 | * and triggers invocation of the config_port() entry point. |
1412 | */ | 1412 | */ |
1413 | port->mapbase = res->start; | 1413 | port->mapbase = res->start; |
1414 | port->irq = res2->start; | 1414 | port->irq = irq; |
1415 | port->dev = &pdev->dev; | 1415 | port->dev = &pdev->dev; |
1416 | port->uartclk = clk_get_rate(cdns_uart_data->uartclk); | 1416 | port->uartclk = clk_get_rate(cdns_uart_data->uartclk); |
1417 | port->private_data = cdns_uart_data; | 1417 | port->private_data = cdns_uart_data; |
diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c index 632fc8152061..8e53fe469664 100644 --- a/drivers/tty/tty_ioctl.c +++ b/drivers/tty/tty_ioctl.c | |||
@@ -536,7 +536,7 @@ EXPORT_SYMBOL(tty_termios_hw_change); | |||
536 | * Locking: termios_rwsem | 536 | * Locking: termios_rwsem |
537 | */ | 537 | */ |
538 | 538 | ||
539 | static int tty_set_termios(struct tty_struct *tty, struct ktermios *new_termios) | 539 | int tty_set_termios(struct tty_struct *tty, struct ktermios *new_termios) |
540 | { | 540 | { |
541 | struct ktermios old_termios; | 541 | struct ktermios old_termios; |
542 | struct tty_ldisc *ld; | 542 | struct tty_ldisc *ld; |
@@ -569,6 +569,7 @@ static int tty_set_termios(struct tty_struct *tty, struct ktermios *new_termios) | |||
569 | up_write(&tty->termios_rwsem); | 569 | up_write(&tty->termios_rwsem); |
570 | return 0; | 570 | return 0; |
571 | } | 571 | } |
572 | EXPORT_SYMBOL_GPL(tty_set_termios); | ||
572 | 573 | ||
573 | /** | 574 | /** |
574 | * set_termios - set termios values for a tty | 575 | * set_termios - set termios values for a tty |
diff --git a/drivers/usb/chipidea/otg_fsm.c b/drivers/usb/chipidea/otg_fsm.c index 083acf45ad5a..19d655a743b5 100644 --- a/drivers/usb/chipidea/otg_fsm.c +++ b/drivers/usb/chipidea/otg_fsm.c | |||
@@ -520,7 +520,6 @@ static int ci_otg_start_host(struct otg_fsm *fsm, int on) | |||
520 | { | 520 | { |
521 | struct ci_hdrc *ci = container_of(fsm, struct ci_hdrc, fsm); | 521 | struct ci_hdrc *ci = container_of(fsm, struct ci_hdrc, fsm); |
522 | 522 | ||
523 | mutex_unlock(&fsm->lock); | ||
524 | if (on) { | 523 | if (on) { |
525 | ci_role_stop(ci); | 524 | ci_role_stop(ci); |
526 | ci_role_start(ci, CI_ROLE_HOST); | 525 | ci_role_start(ci, CI_ROLE_HOST); |
@@ -529,7 +528,6 @@ static int ci_otg_start_host(struct otg_fsm *fsm, int on) | |||
529 | hw_device_reset(ci); | 528 | hw_device_reset(ci); |
530 | ci_role_start(ci, CI_ROLE_GADGET); | 529 | ci_role_start(ci, CI_ROLE_GADGET); |
531 | } | 530 | } |
532 | mutex_lock(&fsm->lock); | ||
533 | return 0; | 531 | return 0; |
534 | } | 532 | } |
535 | 533 | ||
@@ -537,12 +535,10 @@ static int ci_otg_start_gadget(struct otg_fsm *fsm, int on) | |||
537 | { | 535 | { |
538 | struct ci_hdrc *ci = container_of(fsm, struct ci_hdrc, fsm); | 536 | struct ci_hdrc *ci = container_of(fsm, struct ci_hdrc, fsm); |
539 | 537 | ||
540 | mutex_unlock(&fsm->lock); | ||
541 | if (on) | 538 | if (on) |
542 | usb_gadget_vbus_connect(&ci->gadget); | 539 | usb_gadget_vbus_connect(&ci->gadget); |
543 | else | 540 | else |
544 | usb_gadget_vbus_disconnect(&ci->gadget); | 541 | usb_gadget_vbus_disconnect(&ci->gadget); |
545 | mutex_lock(&fsm->lock); | ||
546 | 542 | ||
547 | return 0; | 543 | return 0; |
548 | } | 544 | } |
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 3e15add665e2..5c8f58114677 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c | |||
@@ -1142,11 +1142,16 @@ static int acm_probe(struct usb_interface *intf, | |||
1142 | } | 1142 | } |
1143 | 1143 | ||
1144 | while (buflen > 0) { | 1144 | while (buflen > 0) { |
1145 | elength = buffer[0]; | ||
1146 | if (!elength) { | ||
1147 | dev_err(&intf->dev, "skipping garbage byte\n"); | ||
1148 | elength = 1; | ||
1149 | goto next_desc; | ||
1150 | } | ||
1145 | if (buffer[1] != USB_DT_CS_INTERFACE) { | 1151 | if (buffer[1] != USB_DT_CS_INTERFACE) { |
1146 | dev_err(&intf->dev, "skipping garbage\n"); | 1152 | dev_err(&intf->dev, "skipping garbage\n"); |
1147 | goto next_desc; | 1153 | goto next_desc; |
1148 | } | 1154 | } |
1149 | elength = buffer[0]; | ||
1150 | 1155 | ||
1151 | switch (buffer[2]) { | 1156 | switch (buffer[2]) { |
1152 | case USB_CDC_UNION_TYPE: /* we've found it */ | 1157 | case USB_CDC_UNION_TYPE: /* we've found it */ |
diff --git a/drivers/usb/host/ehci-msm.c b/drivers/usb/host/ehci-msm.c index 9db74ca7e5b9..275c92e53a59 100644 --- a/drivers/usb/host/ehci-msm.c +++ b/drivers/usb/host/ehci-msm.c | |||
@@ -88,13 +88,20 @@ static int ehci_msm_probe(struct platform_device *pdev) | |||
88 | } | 88 | } |
89 | 89 | ||
90 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 90 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
91 | hcd->regs = devm_ioremap_resource(&pdev->dev, res); | 91 | if (!res) { |
92 | if (IS_ERR(hcd->regs)) { | 92 | dev_err(&pdev->dev, "Unable to get memory resource\n"); |
93 | ret = PTR_ERR(hcd->regs); | 93 | ret = -ENODEV; |
94 | goto put_hcd; | 94 | goto put_hcd; |
95 | } | 95 | } |
96 | |||
96 | hcd->rsrc_start = res->start; | 97 | hcd->rsrc_start = res->start; |
97 | hcd->rsrc_len = resource_size(res); | 98 | hcd->rsrc_len = resource_size(res); |
99 | hcd->regs = devm_ioremap(&pdev->dev, hcd->rsrc_start, hcd->rsrc_len); | ||
100 | if (!hcd->regs) { | ||
101 | dev_err(&pdev->dev, "ioremap failed\n"); | ||
102 | ret = -ENOMEM; | ||
103 | goto put_hcd; | ||
104 | } | ||
98 | 105 | ||
99 | /* | 106 | /* |
100 | * OTG driver takes care of PHY initialization, clock management, | 107 | * OTG driver takes care of PHY initialization, clock management, |
diff --git a/drivers/usb/storage/uas-detect.h b/drivers/usb/storage/uas-detect.h index 9893d696fc97..f58caa9e6a27 100644 --- a/drivers/usb/storage/uas-detect.h +++ b/drivers/usb/storage/uas-detect.h | |||
@@ -51,7 +51,8 @@ static int uas_find_endpoints(struct usb_host_interface *alt, | |||
51 | } | 51 | } |
52 | 52 | ||
53 | static int uas_use_uas_driver(struct usb_interface *intf, | 53 | static int uas_use_uas_driver(struct usb_interface *intf, |
54 | const struct usb_device_id *id) | 54 | const struct usb_device_id *id, |
55 | unsigned long *flags_ret) | ||
55 | { | 56 | { |
56 | struct usb_host_endpoint *eps[4] = { }; | 57 | struct usb_host_endpoint *eps[4] = { }; |
57 | struct usb_device *udev = interface_to_usbdev(intf); | 58 | struct usb_device *udev = interface_to_usbdev(intf); |
@@ -73,7 +74,7 @@ static int uas_use_uas_driver(struct usb_interface *intf, | |||
73 | * this writing the following versions exist: | 74 | * this writing the following versions exist: |
74 | * ASM1051 - no uas support version | 75 | * ASM1051 - no uas support version |
75 | * ASM1051 - with broken (*) uas support | 76 | * ASM1051 - with broken (*) uas support |
76 | * ASM1053 - with working uas support | 77 | * ASM1053 - with working uas support, but problems with large xfers |
77 | * ASM1153 - with working uas support | 78 | * ASM1153 - with working uas support |
78 | * | 79 | * |
79 | * Devices with these chips re-use a number of device-ids over the | 80 | * Devices with these chips re-use a number of device-ids over the |
@@ -103,6 +104,9 @@ static int uas_use_uas_driver(struct usb_interface *intf, | |||
103 | } else if (usb_ss_max_streams(&eps[1]->ss_ep_comp) == 32) { | 104 | } else if (usb_ss_max_streams(&eps[1]->ss_ep_comp) == 32) { |
104 | /* Possibly an ASM1051, disable uas */ | 105 | /* Possibly an ASM1051, disable uas */ |
105 | flags |= US_FL_IGNORE_UAS; | 106 | flags |= US_FL_IGNORE_UAS; |
107 | } else { | ||
108 | /* ASM1053, these have issues with large transfers */ | ||
109 | flags |= US_FL_MAX_SECTORS_240; | ||
106 | } | 110 | } |
107 | } | 111 | } |
108 | 112 | ||
@@ -132,5 +136,8 @@ static int uas_use_uas_driver(struct usb_interface *intf, | |||
132 | return 0; | 136 | return 0; |
133 | } | 137 | } |
134 | 138 | ||
139 | if (flags_ret) | ||
140 | *flags_ret = flags; | ||
141 | |||
135 | return 1; | 142 | return 1; |
136 | } | 143 | } |
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c index 6cdabdc119a7..6d3122afeed3 100644 --- a/drivers/usb/storage/uas.c +++ b/drivers/usb/storage/uas.c | |||
@@ -759,7 +759,10 @@ static int uas_eh_bus_reset_handler(struct scsi_cmnd *cmnd) | |||
759 | 759 | ||
760 | static int uas_slave_alloc(struct scsi_device *sdev) | 760 | static int uas_slave_alloc(struct scsi_device *sdev) |
761 | { | 761 | { |
762 | sdev->hostdata = (void *)sdev->host->hostdata; | 762 | struct uas_dev_info *devinfo = |
763 | (struct uas_dev_info *)sdev->host->hostdata; | ||
764 | |||
765 | sdev->hostdata = devinfo; | ||
763 | 766 | ||
764 | /* USB has unusual DMA-alignment requirements: Although the | 767 | /* USB has unusual DMA-alignment requirements: Although the |
765 | * starting address of each scatter-gather element doesn't matter, | 768 | * starting address of each scatter-gather element doesn't matter, |
@@ -778,6 +781,11 @@ static int uas_slave_alloc(struct scsi_device *sdev) | |||
778 | */ | 781 | */ |
779 | blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1)); | 782 | blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1)); |
780 | 783 | ||
784 | if (devinfo->flags & US_FL_MAX_SECTORS_64) | ||
785 | blk_queue_max_hw_sectors(sdev->request_queue, 64); | ||
786 | else if (devinfo->flags & US_FL_MAX_SECTORS_240) | ||
787 | blk_queue_max_hw_sectors(sdev->request_queue, 240); | ||
788 | |||
781 | return 0; | 789 | return 0; |
782 | } | 790 | } |
783 | 791 | ||
@@ -887,8 +895,9 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id) | |||
887 | struct Scsi_Host *shost = NULL; | 895 | struct Scsi_Host *shost = NULL; |
888 | struct uas_dev_info *devinfo; | 896 | struct uas_dev_info *devinfo; |
889 | struct usb_device *udev = interface_to_usbdev(intf); | 897 | struct usb_device *udev = interface_to_usbdev(intf); |
898 | unsigned long dev_flags; | ||
890 | 899 | ||
891 | if (!uas_use_uas_driver(intf, id)) | 900 | if (!uas_use_uas_driver(intf, id, &dev_flags)) |
892 | return -ENODEV; | 901 | return -ENODEV; |
893 | 902 | ||
894 | if (uas_switch_interface(udev, intf)) | 903 | if (uas_switch_interface(udev, intf)) |
@@ -910,8 +919,7 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id) | |||
910 | devinfo->udev = udev; | 919 | devinfo->udev = udev; |
911 | devinfo->resetting = 0; | 920 | devinfo->resetting = 0; |
912 | devinfo->shutdown = 0; | 921 | devinfo->shutdown = 0; |
913 | devinfo->flags = id->driver_info; | 922 | devinfo->flags = dev_flags; |
914 | usb_stor_adjust_quirks(udev, &devinfo->flags); | ||
915 | init_usb_anchor(&devinfo->cmd_urbs); | 923 | init_usb_anchor(&devinfo->cmd_urbs); |
916 | init_usb_anchor(&devinfo->sense_urbs); | 924 | init_usb_anchor(&devinfo->sense_urbs); |
917 | init_usb_anchor(&devinfo->data_urbs); | 925 | init_usb_anchor(&devinfo->data_urbs); |
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c index 5600c33fcadb..6c10c888f35f 100644 --- a/drivers/usb/storage/usb.c +++ b/drivers/usb/storage/usb.c | |||
@@ -479,7 +479,8 @@ void usb_stor_adjust_quirks(struct usb_device *udev, unsigned long *fflags) | |||
479 | US_FL_SINGLE_LUN | US_FL_NO_WP_DETECT | | 479 | US_FL_SINGLE_LUN | US_FL_NO_WP_DETECT | |
480 | US_FL_NO_READ_DISC_INFO | US_FL_NO_READ_CAPACITY_16 | | 480 | US_FL_NO_READ_DISC_INFO | US_FL_NO_READ_CAPACITY_16 | |
481 | US_FL_INITIAL_READ10 | US_FL_WRITE_CACHE | | 481 | US_FL_INITIAL_READ10 | US_FL_WRITE_CACHE | |
482 | US_FL_NO_ATA_1X | US_FL_NO_REPORT_OPCODES); | 482 | US_FL_NO_ATA_1X | US_FL_NO_REPORT_OPCODES | |
483 | US_FL_MAX_SECTORS_240); | ||
483 | 484 | ||
484 | p = quirks; | 485 | p = quirks; |
485 | while (*p) { | 486 | while (*p) { |
@@ -520,6 +521,9 @@ void usb_stor_adjust_quirks(struct usb_device *udev, unsigned long *fflags) | |||
520 | case 'f': | 521 | case 'f': |
521 | f |= US_FL_NO_REPORT_OPCODES; | 522 | f |= US_FL_NO_REPORT_OPCODES; |
522 | break; | 523 | break; |
524 | case 'g': | ||
525 | f |= US_FL_MAX_SECTORS_240; | ||
526 | break; | ||
523 | case 'h': | 527 | case 'h': |
524 | f |= US_FL_CAPACITY_HEURISTICS; | 528 | f |= US_FL_CAPACITY_HEURISTICS; |
525 | break; | 529 | break; |
@@ -1080,7 +1084,7 @@ static int storage_probe(struct usb_interface *intf, | |||
1080 | 1084 | ||
1081 | /* If uas is enabled and this device can do uas then ignore it. */ | 1085 | /* If uas is enabled and this device can do uas then ignore it. */ |
1082 | #if IS_ENABLED(CONFIG_USB_UAS) | 1086 | #if IS_ENABLED(CONFIG_USB_UAS) |
1083 | if (uas_use_uas_driver(intf, id)) | 1087 | if (uas_use_uas_driver(intf, id, NULL)) |
1084 | return -ENXIO; | 1088 | return -ENXIO; |
1085 | #endif | 1089 | #endif |
1086 | 1090 | ||
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index 69fab0fd15ae..e9851add6f4e 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c | |||
@@ -907,8 +907,14 @@ static void vfio_pci_request(void *device_data, unsigned int count) | |||
907 | mutex_lock(&vdev->igate); | 907 | mutex_lock(&vdev->igate); |
908 | 908 | ||
909 | if (vdev->req_trigger) { | 909 | if (vdev->req_trigger) { |
910 | dev_dbg(&vdev->pdev->dev, "Requesting device from user\n"); | 910 | if (!(count % 10)) |
911 | dev_notice_ratelimited(&vdev->pdev->dev, | ||
912 | "Relaying device request to user (#%u)\n", | ||
913 | count); | ||
911 | eventfd_signal(vdev->req_trigger, 1); | 914 | eventfd_signal(vdev->req_trigger, 1); |
915 | } else if (count == 0) { | ||
916 | dev_warn(&vdev->pdev->dev, | ||
917 | "No device request channel registered, blocked until released by user\n"); | ||
912 | } | 918 | } |
913 | 919 | ||
914 | mutex_unlock(&vdev->igate); | 920 | mutex_unlock(&vdev->igate); |
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c index 0d336625ac71..e1278fe04b1e 100644 --- a/drivers/vfio/vfio.c +++ b/drivers/vfio/vfio.c | |||
@@ -710,6 +710,8 @@ void *vfio_del_group_dev(struct device *dev) | |||
710 | void *device_data = device->device_data; | 710 | void *device_data = device->device_data; |
711 | struct vfio_unbound_dev *unbound; | 711 | struct vfio_unbound_dev *unbound; |
712 | unsigned int i = 0; | 712 | unsigned int i = 0; |
713 | long ret; | ||
714 | bool interrupted = false; | ||
713 | 715 | ||
714 | /* | 716 | /* |
715 | * The group exists so long as we have a device reference. Get | 717 | * The group exists so long as we have a device reference. Get |
@@ -755,9 +757,22 @@ void *vfio_del_group_dev(struct device *dev) | |||
755 | 757 | ||
756 | vfio_device_put(device); | 758 | vfio_device_put(device); |
757 | 759 | ||
758 | } while (wait_event_interruptible_timeout(vfio.release_q, | 760 | if (interrupted) { |
759 | !vfio_dev_present(group, dev), | 761 | ret = wait_event_timeout(vfio.release_q, |
760 | HZ * 10) <= 0); | 762 | !vfio_dev_present(group, dev), HZ * 10); |
763 | } else { | ||
764 | ret = wait_event_interruptible_timeout(vfio.release_q, | ||
765 | !vfio_dev_present(group, dev), HZ * 10); | ||
766 | if (ret == -ERESTARTSYS) { | ||
767 | interrupted = true; | ||
768 | dev_warn(dev, | ||
769 | "Device is currently in use, task" | ||
770 | " \"%s\" (%d) " | ||
771 | "blocked until device is released", | ||
772 | current->comm, task_pid_nr(current)); | ||
773 | } | ||
774 | } | ||
775 | } while (ret <= 0); | ||
761 | 776 | ||
762 | vfio_group_put(group); | 777 | vfio_group_put(group); |
763 | 778 | ||
diff --git a/drivers/xen/events/events_2l.c b/drivers/xen/events/events_2l.c index 5db43fc100a4..7dd46312c180 100644 --- a/drivers/xen/events/events_2l.c +++ b/drivers/xen/events/events_2l.c | |||
@@ -345,6 +345,15 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id) | |||
345 | return IRQ_HANDLED; | 345 | return IRQ_HANDLED; |
346 | } | 346 | } |
347 | 347 | ||
348 | static void evtchn_2l_resume(void) | ||
349 | { | ||
350 | int i; | ||
351 | |||
352 | for_each_online_cpu(i) | ||
353 | memset(per_cpu(cpu_evtchn_mask, i), 0, sizeof(xen_ulong_t) * | ||
354 | EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD); | ||
355 | } | ||
356 | |||
348 | static const struct evtchn_ops evtchn_ops_2l = { | 357 | static const struct evtchn_ops evtchn_ops_2l = { |
349 | .max_channels = evtchn_2l_max_channels, | 358 | .max_channels = evtchn_2l_max_channels, |
350 | .nr_channels = evtchn_2l_max_channels, | 359 | .nr_channels = evtchn_2l_max_channels, |
@@ -356,6 +365,7 @@ static const struct evtchn_ops evtchn_ops_2l = { | |||
356 | .mask = evtchn_2l_mask, | 365 | .mask = evtchn_2l_mask, |
357 | .unmask = evtchn_2l_unmask, | 366 | .unmask = evtchn_2l_unmask, |
358 | .handle_events = evtchn_2l_handle_events, | 367 | .handle_events = evtchn_2l_handle_events, |
368 | .resume = evtchn_2l_resume, | ||
359 | }; | 369 | }; |
360 | 370 | ||
361 | void __init xen_evtchn_2l_init(void) | 371 | void __init xen_evtchn_2l_init(void) |
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index 70fba973a107..2b8553bd8715 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c | |||
@@ -529,8 +529,8 @@ static unsigned int __startup_pirq(unsigned int irq) | |||
529 | if (rc) | 529 | if (rc) |
530 | goto err; | 530 | goto err; |
531 | 531 | ||
532 | bind_evtchn_to_cpu(evtchn, 0); | ||
533 | info->evtchn = evtchn; | 532 | info->evtchn = evtchn; |
533 | bind_evtchn_to_cpu(evtchn, 0); | ||
534 | 534 | ||
535 | rc = xen_evtchn_port_setup(info); | 535 | rc = xen_evtchn_port_setup(info); |
536 | if (rc) | 536 | if (rc) |
@@ -1279,8 +1279,9 @@ void rebind_evtchn_irq(int evtchn, int irq) | |||
1279 | 1279 | ||
1280 | mutex_unlock(&irq_mapping_update_lock); | 1280 | mutex_unlock(&irq_mapping_update_lock); |
1281 | 1281 | ||
1282 | /* new event channels are always bound to cpu 0 */ | 1282 | bind_evtchn_to_cpu(evtchn, info->cpu); |
1283 | irq_set_affinity(irq, cpumask_of(0)); | 1283 | /* This will be deferred until interrupt is processed */ |
1284 | irq_set_affinity(irq, cpumask_of(info->cpu)); | ||
1284 | 1285 | ||
1285 | /* Unmask the event channel. */ | 1286 | /* Unmask the event channel. */ |
1286 | enable_irq(irq); | 1287 | enable_irq(irq); |
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index d5bb1a33d0a3..89274850741b 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c | |||
@@ -327,30 +327,10 @@ static int map_grant_pages(struct grant_map *map) | |||
327 | return err; | 327 | return err; |
328 | } | 328 | } |
329 | 329 | ||
330 | struct unmap_grant_pages_callback_data | ||
331 | { | ||
332 | struct completion completion; | ||
333 | int result; | ||
334 | }; | ||
335 | |||
336 | static void unmap_grant_callback(int result, | ||
337 | struct gntab_unmap_queue_data *data) | ||
338 | { | ||
339 | struct unmap_grant_pages_callback_data* d = data->data; | ||
340 | |||
341 | d->result = result; | ||
342 | complete(&d->completion); | ||
343 | } | ||
344 | |||
345 | static int __unmap_grant_pages(struct grant_map *map, int offset, int pages) | 330 | static int __unmap_grant_pages(struct grant_map *map, int offset, int pages) |
346 | { | 331 | { |
347 | int i, err = 0; | 332 | int i, err = 0; |
348 | struct gntab_unmap_queue_data unmap_data; | 333 | struct gntab_unmap_queue_data unmap_data; |
349 | struct unmap_grant_pages_callback_data data; | ||
350 | |||
351 | init_completion(&data.completion); | ||
352 | unmap_data.data = &data; | ||
353 | unmap_data.done= &unmap_grant_callback; | ||
354 | 334 | ||
355 | if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) { | 335 | if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) { |
356 | int pgno = (map->notify.addr >> PAGE_SHIFT); | 336 | int pgno = (map->notify.addr >> PAGE_SHIFT); |
@@ -367,11 +347,9 @@ static int __unmap_grant_pages(struct grant_map *map, int offset, int pages) | |||
367 | unmap_data.pages = map->pages + offset; | 347 | unmap_data.pages = map->pages + offset; |
368 | unmap_data.count = pages; | 348 | unmap_data.count = pages; |
369 | 349 | ||
370 | gnttab_unmap_refs_async(&unmap_data); | 350 | err = gnttab_unmap_refs_sync(&unmap_data); |
371 | 351 | if (err) | |
372 | wait_for_completion(&data.completion); | 352 | return err; |
373 | if (data.result) | ||
374 | return data.result; | ||
375 | 353 | ||
376 | for (i = 0; i < pages; i++) { | 354 | for (i = 0; i < pages; i++) { |
377 | if (map->unmap_ops[offset+i].status) | 355 | if (map->unmap_ops[offset+i].status) |
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index 17972fbacddc..b1c7170e5c9e 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c | |||
@@ -123,6 +123,11 @@ struct gnttab_ops { | |||
123 | int (*query_foreign_access)(grant_ref_t ref); | 123 | int (*query_foreign_access)(grant_ref_t ref); |
124 | }; | 124 | }; |
125 | 125 | ||
126 | struct unmap_refs_callback_data { | ||
127 | struct completion completion; | ||
128 | int result; | ||
129 | }; | ||
130 | |||
126 | static struct gnttab_ops *gnttab_interface; | 131 | static struct gnttab_ops *gnttab_interface; |
127 | 132 | ||
128 | static int grant_table_version; | 133 | static int grant_table_version; |
@@ -863,6 +868,29 @@ void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item) | |||
863 | } | 868 | } |
864 | EXPORT_SYMBOL_GPL(gnttab_unmap_refs_async); | 869 | EXPORT_SYMBOL_GPL(gnttab_unmap_refs_async); |
865 | 870 | ||
871 | static void unmap_refs_callback(int result, | ||
872 | struct gntab_unmap_queue_data *data) | ||
873 | { | ||
874 | struct unmap_refs_callback_data *d = data->data; | ||
875 | |||
876 | d->result = result; | ||
877 | complete(&d->completion); | ||
878 | } | ||
879 | |||
880 | int gnttab_unmap_refs_sync(struct gntab_unmap_queue_data *item) | ||
881 | { | ||
882 | struct unmap_refs_callback_data data; | ||
883 | |||
884 | init_completion(&data.completion); | ||
885 | item->data = &data; | ||
886 | item->done = &unmap_refs_callback; | ||
887 | gnttab_unmap_refs_async(item); | ||
888 | wait_for_completion(&data.completion); | ||
889 | |||
890 | return data.result; | ||
891 | } | ||
892 | EXPORT_SYMBOL_GPL(gnttab_unmap_refs_sync); | ||
893 | |||
866 | static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes) | 894 | static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes) |
867 | { | 895 | { |
868 | int rc; | 896 | int rc; |
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c index bf1940706422..9e6a85104a20 100644 --- a/drivers/xen/manage.c +++ b/drivers/xen/manage.c | |||
@@ -131,6 +131,8 @@ static void do_suspend(void) | |||
131 | goto out_resume; | 131 | goto out_resume; |
132 | } | 132 | } |
133 | 133 | ||
134 | xen_arch_suspend(); | ||
135 | |||
134 | si.cancelled = 1; | 136 | si.cancelled = 1; |
135 | 137 | ||
136 | err = stop_machine(xen_suspend, &si, cpumask_of(0)); | 138 | err = stop_machine(xen_suspend, &si, cpumask_of(0)); |
@@ -148,11 +150,12 @@ static void do_suspend(void) | |||
148 | si.cancelled = 1; | 150 | si.cancelled = 1; |
149 | } | 151 | } |
150 | 152 | ||
153 | xen_arch_resume(); | ||
154 | |||
151 | out_resume: | 155 | out_resume: |
152 | if (!si.cancelled) { | 156 | if (!si.cancelled) |
153 | xen_arch_resume(); | ||
154 | xs_resume(); | 157 | xs_resume(); |
155 | } else | 158 | else |
156 | xs_suspend_cancel(); | 159 | xs_suspend_cancel(); |
157 | 160 | ||
158 | dpm_resume_end(si.cancelled ? PMSG_THAW : PMSG_RESTORE); | 161 | dpm_resume_end(si.cancelled ? PMSG_THAW : PMSG_RESTORE); |
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index 810ad419e34c..4c549323c605 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c | |||
@@ -235,7 +235,7 @@ retry: | |||
235 | #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT)) | 235 | #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT)) |
236 | #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) | 236 | #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) |
237 | while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { | 237 | while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { |
238 | xen_io_tlb_start = (void *)__get_free_pages(__GFP_NOWARN, order); | 238 | xen_io_tlb_start = (void *)xen_get_swiotlb_free_pages(order); |
239 | if (xen_io_tlb_start) | 239 | if (xen_io_tlb_start) |
240 | break; | 240 | break; |
241 | order--; | 241 | order--; |
diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c index 75fe3d466515..9c234209d8b5 100644 --- a/drivers/xen/xen-pciback/conf_space.c +++ b/drivers/xen/xen-pciback/conf_space.c | |||
@@ -16,8 +16,8 @@ | |||
16 | #include "conf_space.h" | 16 | #include "conf_space.h" |
17 | #include "conf_space_quirks.h" | 17 | #include "conf_space_quirks.h" |
18 | 18 | ||
19 | bool permissive; | 19 | bool xen_pcibk_permissive; |
20 | module_param(permissive, bool, 0644); | 20 | module_param_named(permissive, xen_pcibk_permissive, bool, 0644); |
21 | 21 | ||
22 | /* This is where xen_pcibk_read_config_byte, xen_pcibk_read_config_word, | 22 | /* This is where xen_pcibk_read_config_byte, xen_pcibk_read_config_word, |
23 | * xen_pcibk_write_config_word, and xen_pcibk_write_config_byte are created. */ | 23 | * xen_pcibk_write_config_word, and xen_pcibk_write_config_byte are created. */ |
@@ -262,7 +262,7 @@ int xen_pcibk_config_write(struct pci_dev *dev, int offset, int size, u32 value) | |||
262 | * This means that some fields may still be read-only because | 262 | * This means that some fields may still be read-only because |
263 | * they have entries in the config_field list that intercept | 263 | * they have entries in the config_field list that intercept |
264 | * the write and do nothing. */ | 264 | * the write and do nothing. */ |
265 | if (dev_data->permissive || permissive) { | 265 | if (dev_data->permissive || xen_pcibk_permissive) { |
266 | switch (size) { | 266 | switch (size) { |
267 | case 1: | 267 | case 1: |
268 | err = pci_write_config_byte(dev, offset, | 268 | err = pci_write_config_byte(dev, offset, |
diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h index 2e1d73d1d5d0..62461a8ba1d6 100644 --- a/drivers/xen/xen-pciback/conf_space.h +++ b/drivers/xen/xen-pciback/conf_space.h | |||
@@ -64,7 +64,7 @@ struct config_field_entry { | |||
64 | void *data; | 64 | void *data; |
65 | }; | 65 | }; |
66 | 66 | ||
67 | extern bool permissive; | 67 | extern bool xen_pcibk_permissive; |
68 | 68 | ||
69 | #define OFFSET(cfg_entry) ((cfg_entry)->base_offset+(cfg_entry)->field->offset) | 69 | #define OFFSET(cfg_entry) ((cfg_entry)->base_offset+(cfg_entry)->field->offset) |
70 | 70 | ||
diff --git a/drivers/xen/xen-pciback/conf_space_header.c b/drivers/xen/xen-pciback/conf_space_header.c index c2260a0456c9..ad3d17d29c81 100644 --- a/drivers/xen/xen-pciback/conf_space_header.c +++ b/drivers/xen/xen-pciback/conf_space_header.c | |||
@@ -118,7 +118,7 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data) | |||
118 | 118 | ||
119 | cmd->val = value; | 119 | cmd->val = value; |
120 | 120 | ||
121 | if (!permissive && (!dev_data || !dev_data->permissive)) | 121 | if (!xen_pcibk_permissive && (!dev_data || !dev_data->permissive)) |
122 | return 0; | 122 | return 0; |
123 | 123 | ||
124 | /* Only allow the guest to control certain bits. */ | 124 | /* Only allow the guest to control certain bits. */ |
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c index 564b31584860..5390a674b5e3 100644 --- a/drivers/xen/xenbus/xenbus_probe.c +++ b/drivers/xen/xenbus/xenbus_probe.c | |||
@@ -57,6 +57,7 @@ | |||
57 | #include <xen/xen.h> | 57 | #include <xen/xen.h> |
58 | #include <xen/xenbus.h> | 58 | #include <xen/xenbus.h> |
59 | #include <xen/events.h> | 59 | #include <xen/events.h> |
60 | #include <xen/xen-ops.h> | ||
60 | #include <xen/page.h> | 61 | #include <xen/page.h> |
61 | 62 | ||
62 | #include <xen/hvm.h> | 63 | #include <xen/hvm.h> |
@@ -735,6 +736,30 @@ static int __init xenstored_local_init(void) | |||
735 | return err; | 736 | return err; |
736 | } | 737 | } |
737 | 738 | ||
739 | static int xenbus_resume_cb(struct notifier_block *nb, | ||
740 | unsigned long action, void *data) | ||
741 | { | ||
742 | int err = 0; | ||
743 | |||
744 | if (xen_hvm_domain()) { | ||
745 | uint64_t v; | ||
746 | |||
747 | err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v); | ||
748 | if (!err && v) | ||
749 | xen_store_evtchn = v; | ||
750 | else | ||
751 | pr_warn("Cannot update xenstore event channel: %d\n", | ||
752 | err); | ||
753 | } else | ||
754 | xen_store_evtchn = xen_start_info->store_evtchn; | ||
755 | |||
756 | return err; | ||
757 | } | ||
758 | |||
759 | static struct notifier_block xenbus_resume_nb = { | ||
760 | .notifier_call = xenbus_resume_cb, | ||
761 | }; | ||
762 | |||
738 | static int __init xenbus_init(void) | 763 | static int __init xenbus_init(void) |
739 | { | 764 | { |
740 | int err = 0; | 765 | int err = 0; |
@@ -793,6 +818,10 @@ static int __init xenbus_init(void) | |||
793 | goto out_error; | 818 | goto out_error; |
794 | } | 819 | } |
795 | 820 | ||
821 | if ((xen_store_domain_type != XS_LOCAL) && | ||
822 | (xen_store_domain_type != XS_UNKNOWN)) | ||
823 | xen_resume_notifier_register(&xenbus_resume_nb); | ||
824 | |||
796 | #ifdef CONFIG_XEN_COMPAT_XENFS | 825 | #ifdef CONFIG_XEN_COMPAT_XENFS |
797 | /* | 826 | /* |
798 | * Create xenfs mountpoint in /proc for compatibility with | 827 | * Create xenfs mountpoint in /proc for compatibility with |
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index cde698a07d21..a2ae42720a6a 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c | |||
@@ -1802,6 +1802,8 @@ int btrfs_fill_inode(struct inode *inode, u32 *rdev) | |||
1802 | set_nlink(inode, btrfs_stack_inode_nlink(inode_item)); | 1802 | set_nlink(inode, btrfs_stack_inode_nlink(inode_item)); |
1803 | inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item)); | 1803 | inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item)); |
1804 | BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item); | 1804 | BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item); |
1805 | BTRFS_I(inode)->last_trans = btrfs_stack_inode_transid(inode_item); | ||
1806 | |||
1805 | inode->i_version = btrfs_stack_inode_sequence(inode_item); | 1807 | inode->i_version = btrfs_stack_inode_sequence(inode_item); |
1806 | inode->i_rdev = 0; | 1808 | inode->i_rdev = 0; |
1807 | *rdev = btrfs_stack_inode_rdev(inode_item); | 1809 | *rdev = btrfs_stack_inode_rdev(inode_item); |
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 1eef4ee01d1a..0ec8e228b89f 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
@@ -3178,8 +3178,8 @@ static int write_one_cache_group(struct btrfs_trans_handle *trans, | |||
3178 | bi = btrfs_item_ptr_offset(leaf, path->slots[0]); | 3178 | bi = btrfs_item_ptr_offset(leaf, path->slots[0]); |
3179 | write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item)); | 3179 | write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item)); |
3180 | btrfs_mark_buffer_dirty(leaf); | 3180 | btrfs_mark_buffer_dirty(leaf); |
3181 | btrfs_release_path(path); | ||
3182 | fail: | 3181 | fail: |
3182 | btrfs_release_path(path); | ||
3183 | if (ret) | 3183 | if (ret) |
3184 | btrfs_abort_transaction(trans, root, ret); | 3184 | btrfs_abort_transaction(trans, root, ret); |
3185 | return ret; | 3185 | return ret; |
@@ -3305,8 +3305,7 @@ again: | |||
3305 | 3305 | ||
3306 | spin_lock(&block_group->lock); | 3306 | spin_lock(&block_group->lock); |
3307 | if (block_group->cached != BTRFS_CACHE_FINISHED || | 3307 | if (block_group->cached != BTRFS_CACHE_FINISHED || |
3308 | !btrfs_test_opt(root, SPACE_CACHE) || | 3308 | !btrfs_test_opt(root, SPACE_CACHE)) { |
3309 | block_group->delalloc_bytes) { | ||
3310 | /* | 3309 | /* |
3311 | * don't bother trying to write stuff out _if_ | 3310 | * don't bother trying to write stuff out _if_ |
3312 | * a) we're not cached, | 3311 | * a) we're not cached, |
@@ -3408,17 +3407,14 @@ int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans, | |||
3408 | int loops = 0; | 3407 | int loops = 0; |
3409 | 3408 | ||
3410 | spin_lock(&cur_trans->dirty_bgs_lock); | 3409 | spin_lock(&cur_trans->dirty_bgs_lock); |
3411 | if (!list_empty(&cur_trans->dirty_bgs)) { | 3410 | if (list_empty(&cur_trans->dirty_bgs)) { |
3412 | list_splice_init(&cur_trans->dirty_bgs, &dirty); | 3411 | spin_unlock(&cur_trans->dirty_bgs_lock); |
3412 | return 0; | ||
3413 | } | 3413 | } |
3414 | list_splice_init(&cur_trans->dirty_bgs, &dirty); | ||
3414 | spin_unlock(&cur_trans->dirty_bgs_lock); | 3415 | spin_unlock(&cur_trans->dirty_bgs_lock); |
3415 | 3416 | ||
3416 | again: | 3417 | again: |
3417 | if (list_empty(&dirty)) { | ||
3418 | btrfs_free_path(path); | ||
3419 | return 0; | ||
3420 | } | ||
3421 | |||
3422 | /* | 3418 | /* |
3423 | * make sure all the block groups on our dirty list actually | 3419 | * make sure all the block groups on our dirty list actually |
3424 | * exist | 3420 | * exist |
@@ -3431,18 +3427,16 @@ again: | |||
3431 | return -ENOMEM; | 3427 | return -ENOMEM; |
3432 | } | 3428 | } |
3433 | 3429 | ||
3430 | /* | ||
3431 | * cache_write_mutex is here only to save us from balance or automatic | ||
3432 | * removal of empty block groups deleting this block group while we are | ||
3433 | * writing out the cache | ||
3434 | */ | ||
3435 | mutex_lock(&trans->transaction->cache_write_mutex); | ||
3434 | while (!list_empty(&dirty)) { | 3436 | while (!list_empty(&dirty)) { |
3435 | cache = list_first_entry(&dirty, | 3437 | cache = list_first_entry(&dirty, |
3436 | struct btrfs_block_group_cache, | 3438 | struct btrfs_block_group_cache, |
3437 | dirty_list); | 3439 | dirty_list); |
3438 | |||
3439 | /* | ||
3440 | * cache_write_mutex is here only to save us from balance | ||
3441 | * deleting this block group while we are writing out the | ||
3442 | * cache | ||
3443 | */ | ||
3444 | mutex_lock(&trans->transaction->cache_write_mutex); | ||
3445 | |||
3446 | /* | 3440 | /* |
3447 | * this can happen if something re-dirties a block | 3441 | * this can happen if something re-dirties a block |
3448 | * group that is already under IO. Just wait for it to | 3442 | * group that is already under IO. Just wait for it to |
@@ -3495,7 +3489,6 @@ again: | |||
3495 | } | 3489 | } |
3496 | if (!ret) | 3490 | if (!ret) |
3497 | ret = write_one_cache_group(trans, root, path, cache); | 3491 | ret = write_one_cache_group(trans, root, path, cache); |
3498 | mutex_unlock(&trans->transaction->cache_write_mutex); | ||
3499 | 3492 | ||
3500 | /* if its not on the io list, we need to put the block group */ | 3493 | /* if its not on the io list, we need to put the block group */ |
3501 | if (should_put) | 3494 | if (should_put) |
@@ -3503,7 +3496,16 @@ again: | |||
3503 | 3496 | ||
3504 | if (ret) | 3497 | if (ret) |
3505 | break; | 3498 | break; |
3499 | |||
3500 | /* | ||
3501 | * Avoid blocking other tasks for too long. It might even save | ||
3502 | * us from writing caches for block groups that are going to be | ||
3503 | * removed. | ||
3504 | */ | ||
3505 | mutex_unlock(&trans->transaction->cache_write_mutex); | ||
3506 | mutex_lock(&trans->transaction->cache_write_mutex); | ||
3506 | } | 3507 | } |
3508 | mutex_unlock(&trans->transaction->cache_write_mutex); | ||
3507 | 3509 | ||
3508 | /* | 3510 | /* |
3509 | * go through delayed refs for all the stuff we've just kicked off | 3511 | * go through delayed refs for all the stuff we've just kicked off |
@@ -3514,8 +3516,15 @@ again: | |||
3514 | loops++; | 3516 | loops++; |
3515 | spin_lock(&cur_trans->dirty_bgs_lock); | 3517 | spin_lock(&cur_trans->dirty_bgs_lock); |
3516 | list_splice_init(&cur_trans->dirty_bgs, &dirty); | 3518 | list_splice_init(&cur_trans->dirty_bgs, &dirty); |
3519 | /* | ||
3520 | * dirty_bgs_lock protects us from concurrent block group | ||
3521 | * deletes too (not just cache_write_mutex). | ||
3522 | */ | ||
3523 | if (!list_empty(&dirty)) { | ||
3524 | spin_unlock(&cur_trans->dirty_bgs_lock); | ||
3525 | goto again; | ||
3526 | } | ||
3517 | spin_unlock(&cur_trans->dirty_bgs_lock); | 3527 | spin_unlock(&cur_trans->dirty_bgs_lock); |
3518 | goto again; | ||
3519 | } | 3528 | } |
3520 | 3529 | ||
3521 | btrfs_free_path(path); | 3530 | btrfs_free_path(path); |
@@ -7537,7 +7546,7 @@ static void unuse_block_rsv(struct btrfs_fs_info *fs_info, | |||
7537 | * returns the key for the extent through ins, and a tree buffer for | 7546 | * returns the key for the extent through ins, and a tree buffer for |
7538 | * the first block of the extent through buf. | 7547 | * the first block of the extent through buf. |
7539 | * | 7548 | * |
7540 | * returns the tree buffer or NULL. | 7549 | * returns the tree buffer or an ERR_PTR on error. |
7541 | */ | 7550 | */ |
7542 | struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans, | 7551 | struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans, |
7543 | struct btrfs_root *root, | 7552 | struct btrfs_root *root, |
@@ -7548,6 +7557,7 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans, | |||
7548 | struct btrfs_key ins; | 7557 | struct btrfs_key ins; |
7549 | struct btrfs_block_rsv *block_rsv; | 7558 | struct btrfs_block_rsv *block_rsv; |
7550 | struct extent_buffer *buf; | 7559 | struct extent_buffer *buf; |
7560 | struct btrfs_delayed_extent_op *extent_op; | ||
7551 | u64 flags = 0; | 7561 | u64 flags = 0; |
7552 | int ret; | 7562 | int ret; |
7553 | u32 blocksize = root->nodesize; | 7563 | u32 blocksize = root->nodesize; |
@@ -7568,13 +7578,14 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans, | |||
7568 | 7578 | ||
7569 | ret = btrfs_reserve_extent(root, blocksize, blocksize, | 7579 | ret = btrfs_reserve_extent(root, blocksize, blocksize, |
7570 | empty_size, hint, &ins, 0, 0); | 7580 | empty_size, hint, &ins, 0, 0); |
7571 | if (ret) { | 7581 | if (ret) |
7572 | unuse_block_rsv(root->fs_info, block_rsv, blocksize); | 7582 | goto out_unuse; |
7573 | return ERR_PTR(ret); | ||
7574 | } | ||
7575 | 7583 | ||
7576 | buf = btrfs_init_new_buffer(trans, root, ins.objectid, level); | 7584 | buf = btrfs_init_new_buffer(trans, root, ins.objectid, level); |
7577 | BUG_ON(IS_ERR(buf)); /* -ENOMEM */ | 7585 | if (IS_ERR(buf)) { |
7586 | ret = PTR_ERR(buf); | ||
7587 | goto out_free_reserved; | ||
7588 | } | ||
7578 | 7589 | ||
7579 | if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) { | 7590 | if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) { |
7580 | if (parent == 0) | 7591 | if (parent == 0) |
@@ -7584,9 +7595,11 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans, | |||
7584 | BUG_ON(parent > 0); | 7595 | BUG_ON(parent > 0); |
7585 | 7596 | ||
7586 | if (root_objectid != BTRFS_TREE_LOG_OBJECTID) { | 7597 | if (root_objectid != BTRFS_TREE_LOG_OBJECTID) { |
7587 | struct btrfs_delayed_extent_op *extent_op; | ||
7588 | extent_op = btrfs_alloc_delayed_extent_op(); | 7598 | extent_op = btrfs_alloc_delayed_extent_op(); |
7589 | BUG_ON(!extent_op); /* -ENOMEM */ | 7599 | if (!extent_op) { |
7600 | ret = -ENOMEM; | ||
7601 | goto out_free_buf; | ||
7602 | } | ||
7590 | if (key) | 7603 | if (key) |
7591 | memcpy(&extent_op->key, key, sizeof(extent_op->key)); | 7604 | memcpy(&extent_op->key, key, sizeof(extent_op->key)); |
7592 | else | 7605 | else |
@@ -7601,13 +7614,24 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans, | |||
7601 | extent_op->level = level; | 7614 | extent_op->level = level; |
7602 | 7615 | ||
7603 | ret = btrfs_add_delayed_tree_ref(root->fs_info, trans, | 7616 | ret = btrfs_add_delayed_tree_ref(root->fs_info, trans, |
7604 | ins.objectid, | 7617 | ins.objectid, ins.offset, |
7605 | ins.offset, parent, root_objectid, | 7618 | parent, root_objectid, level, |
7606 | level, BTRFS_ADD_DELAYED_EXTENT, | 7619 | BTRFS_ADD_DELAYED_EXTENT, |
7607 | extent_op, 0); | 7620 | extent_op, 0); |
7608 | BUG_ON(ret); /* -ENOMEM */ | 7621 | if (ret) |
7622 | goto out_free_delayed; | ||
7609 | } | 7623 | } |
7610 | return buf; | 7624 | return buf; |
7625 | |||
7626 | out_free_delayed: | ||
7627 | btrfs_free_delayed_extent_op(extent_op); | ||
7628 | out_free_buf: | ||
7629 | free_extent_buffer(buf); | ||
7630 | out_free_reserved: | ||
7631 | btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 0); | ||
7632 | out_unuse: | ||
7633 | unuse_block_rsv(root->fs_info, block_rsv, blocksize); | ||
7634 | return ERR_PTR(ret); | ||
7611 | } | 7635 | } |
7612 | 7636 | ||
7613 | struct walk_control { | 7637 | struct walk_control { |
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 782f3bc4651d..43af5a61ad25 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
@@ -4560,36 +4560,37 @@ static void btrfs_release_extent_buffer_page(struct extent_buffer *eb) | |||
4560 | do { | 4560 | do { |
4561 | index--; | 4561 | index--; |
4562 | page = eb->pages[index]; | 4562 | page = eb->pages[index]; |
4563 | if (page && mapped) { | 4563 | if (!page) |
4564 | continue; | ||
4565 | if (mapped) | ||
4564 | spin_lock(&page->mapping->private_lock); | 4566 | spin_lock(&page->mapping->private_lock); |
4567 | /* | ||
4568 | * We do this since we'll remove the pages after we've | ||
4569 | * removed the eb from the radix tree, so we could race | ||
4570 | * and have this page now attached to the new eb. So | ||
4571 | * only clear page_private if it's still connected to | ||
4572 | * this eb. | ||
4573 | */ | ||
4574 | if (PagePrivate(page) && | ||
4575 | page->private == (unsigned long)eb) { | ||
4576 | BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)); | ||
4577 | BUG_ON(PageDirty(page)); | ||
4578 | BUG_ON(PageWriteback(page)); | ||
4565 | /* | 4579 | /* |
4566 | * We do this since we'll remove the pages after we've | 4580 | * We need to make sure we haven't be attached |
4567 | * removed the eb from the radix tree, so we could race | 4581 | * to a new eb. |
4568 | * and have this page now attached to the new eb. So | ||
4569 | * only clear page_private if it's still connected to | ||
4570 | * this eb. | ||
4571 | */ | 4582 | */ |
4572 | if (PagePrivate(page) && | 4583 | ClearPagePrivate(page); |
4573 | page->private == (unsigned long)eb) { | 4584 | set_page_private(page, 0); |
4574 | BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)); | 4585 | /* One for the page private */ |
4575 | BUG_ON(PageDirty(page)); | ||
4576 | BUG_ON(PageWriteback(page)); | ||
4577 | /* | ||
4578 | * We need to make sure we haven't be attached | ||
4579 | * to a new eb. | ||
4580 | */ | ||
4581 | ClearPagePrivate(page); | ||
4582 | set_page_private(page, 0); | ||
4583 | /* One for the page private */ | ||
4584 | page_cache_release(page); | ||
4585 | } | ||
4586 | spin_unlock(&page->mapping->private_lock); | ||
4587 | |||
4588 | } | ||
4589 | if (page) { | ||
4590 | /* One for when we alloced the page */ | ||
4591 | page_cache_release(page); | 4586 | page_cache_release(page); |
4592 | } | 4587 | } |
4588 | |||
4589 | if (mapped) | ||
4590 | spin_unlock(&page->mapping->private_lock); | ||
4591 | |||
4592 | /* One for when we alloced the page */ | ||
4593 | page_cache_release(page); | ||
4593 | } while (index != 0); | 4594 | } while (index != 0); |
4594 | } | 4595 | } |
4595 | 4596 | ||
@@ -4870,6 +4871,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info, | |||
4870 | mark_extent_buffer_accessed(exists, p); | 4871 | mark_extent_buffer_accessed(exists, p); |
4871 | goto free_eb; | 4872 | goto free_eb; |
4872 | } | 4873 | } |
4874 | exists = NULL; | ||
4873 | 4875 | ||
4874 | /* | 4876 | /* |
4875 | * Do this so attach doesn't complain and we need to | 4877 | * Do this so attach doesn't complain and we need to |
@@ -4933,12 +4935,12 @@ again: | |||
4933 | return eb; | 4935 | return eb; |
4934 | 4936 | ||
4935 | free_eb: | 4937 | free_eb: |
4938 | WARN_ON(!atomic_dec_and_test(&eb->refs)); | ||
4936 | for (i = 0; i < num_pages; i++) { | 4939 | for (i = 0; i < num_pages; i++) { |
4937 | if (eb->pages[i]) | 4940 | if (eb->pages[i]) |
4938 | unlock_page(eb->pages[i]); | 4941 | unlock_page(eb->pages[i]); |
4939 | } | 4942 | } |
4940 | 4943 | ||
4941 | WARN_ON(!atomic_dec_and_test(&eb->refs)); | ||
4942 | btrfs_release_extent_buffer(eb); | 4944 | btrfs_release_extent_buffer(eb); |
4943 | return exists; | 4945 | return exists; |
4944 | } | 4946 | } |
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 81fa75a8e1f3..5e020d76fd07 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c | |||
@@ -86,7 +86,7 @@ static struct inode *__lookup_free_space_inode(struct btrfs_root *root, | |||
86 | 86 | ||
87 | mapping_set_gfp_mask(inode->i_mapping, | 87 | mapping_set_gfp_mask(inode->i_mapping, |
88 | mapping_gfp_mask(inode->i_mapping) & | 88 | mapping_gfp_mask(inode->i_mapping) & |
89 | ~(GFP_NOFS & ~__GFP_HIGHMEM)); | 89 | ~(__GFP_FS | __GFP_HIGHMEM)); |
90 | 90 | ||
91 | return inode; | 91 | return inode; |
92 | } | 92 | } |
@@ -1218,7 +1218,7 @@ out: | |||
1218 | * | 1218 | * |
1219 | * This function writes out a free space cache struct to disk for quick recovery | 1219 | * This function writes out a free space cache struct to disk for quick recovery |
1220 | * on mount. This will return 0 if it was successfull in writing the cache out, | 1220 | * on mount. This will return 0 if it was successfull in writing the cache out, |
1221 | * and -1 if it was not. | 1221 | * or an errno if it was not. |
1222 | */ | 1222 | */ |
1223 | static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, | 1223 | static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, |
1224 | struct btrfs_free_space_ctl *ctl, | 1224 | struct btrfs_free_space_ctl *ctl, |
@@ -1235,12 +1235,12 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, | |||
1235 | int must_iput = 0; | 1235 | int must_iput = 0; |
1236 | 1236 | ||
1237 | if (!i_size_read(inode)) | 1237 | if (!i_size_read(inode)) |
1238 | return -1; | 1238 | return -EIO; |
1239 | 1239 | ||
1240 | WARN_ON(io_ctl->pages); | 1240 | WARN_ON(io_ctl->pages); |
1241 | ret = io_ctl_init(io_ctl, inode, root, 1); | 1241 | ret = io_ctl_init(io_ctl, inode, root, 1); |
1242 | if (ret) | 1242 | if (ret) |
1243 | return -1; | 1243 | return ret; |
1244 | 1244 | ||
1245 | if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)) { | 1245 | if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)) { |
1246 | down_write(&block_group->data_rwsem); | 1246 | down_write(&block_group->data_rwsem); |
@@ -1258,7 +1258,9 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, | |||
1258 | } | 1258 | } |
1259 | 1259 | ||
1260 | /* Lock all pages first so we can lock the extent safely. */ | 1260 | /* Lock all pages first so we can lock the extent safely. */ |
1261 | io_ctl_prepare_pages(io_ctl, inode, 0); | 1261 | ret = io_ctl_prepare_pages(io_ctl, inode, 0); |
1262 | if (ret) | ||
1263 | goto out; | ||
1262 | 1264 | ||
1263 | lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, | 1265 | lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, |
1264 | 0, &cached_state); | 1266 | 0, &cached_state); |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index ada4d24ed11b..8bb013672aee 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -3632,25 +3632,28 @@ static void btrfs_read_locked_inode(struct inode *inode) | |||
3632 | BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item); | 3632 | BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item); |
3633 | BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item); | 3633 | BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item); |
3634 | 3634 | ||
3635 | inode->i_version = btrfs_inode_sequence(leaf, inode_item); | ||
3636 | inode->i_generation = BTRFS_I(inode)->generation; | ||
3637 | inode->i_rdev = 0; | ||
3638 | rdev = btrfs_inode_rdev(leaf, inode_item); | ||
3639 | |||
3640 | BTRFS_I(inode)->index_cnt = (u64)-1; | ||
3641 | BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item); | ||
3642 | |||
3643 | cache_index: | ||
3635 | /* | 3644 | /* |
3636 | * If we were modified in the current generation and evicted from memory | 3645 | * If we were modified in the current generation and evicted from memory |
3637 | * and then re-read we need to do a full sync since we don't have any | 3646 | * and then re-read we need to do a full sync since we don't have any |
3638 | * idea about which extents were modified before we were evicted from | 3647 | * idea about which extents were modified before we were evicted from |
3639 | * cache. | 3648 | * cache. |
3649 | * | ||
3650 | * This is required for both inode re-read from disk and delayed inode | ||
3651 | * in delayed_nodes_tree. | ||
3640 | */ | 3652 | */ |
3641 | if (BTRFS_I(inode)->last_trans == root->fs_info->generation) | 3653 | if (BTRFS_I(inode)->last_trans == root->fs_info->generation) |
3642 | set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, | 3654 | set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, |
3643 | &BTRFS_I(inode)->runtime_flags); | 3655 | &BTRFS_I(inode)->runtime_flags); |
3644 | 3656 | ||
3645 | inode->i_version = btrfs_inode_sequence(leaf, inode_item); | ||
3646 | inode->i_generation = BTRFS_I(inode)->generation; | ||
3647 | inode->i_rdev = 0; | ||
3648 | rdev = btrfs_inode_rdev(leaf, inode_item); | ||
3649 | |||
3650 | BTRFS_I(inode)->index_cnt = (u64)-1; | ||
3651 | BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item); | ||
3652 | |||
3653 | cache_index: | ||
3654 | path->slots[0]++; | 3657 | path->slots[0]++; |
3655 | if (inode->i_nlink != 1 || | 3658 | if (inode->i_nlink != 1 || |
3656 | path->slots[0] >= btrfs_header_nritems(leaf)) | 3659 | path->slots[0] >= btrfs_header_nritems(leaf)) |
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index b05653f182c2..1c22c6518504 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c | |||
@@ -2410,7 +2410,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file, | |||
2410 | "Attempt to delete subvolume %llu during send", | 2410 | "Attempt to delete subvolume %llu during send", |
2411 | dest->root_key.objectid); | 2411 | dest->root_key.objectid); |
2412 | err = -EPERM; | 2412 | err = -EPERM; |
2413 | goto out_dput; | 2413 | goto out_unlock_inode; |
2414 | } | 2414 | } |
2415 | 2415 | ||
2416 | d_invalidate(dentry); | 2416 | d_invalidate(dentry); |
@@ -2505,6 +2505,7 @@ out_up_write: | |||
2505 | root_flags & ~BTRFS_ROOT_SUBVOL_DEAD); | 2505 | root_flags & ~BTRFS_ROOT_SUBVOL_DEAD); |
2506 | spin_unlock(&dest->root_item_lock); | 2506 | spin_unlock(&dest->root_item_lock); |
2507 | } | 2507 | } |
2508 | out_unlock_inode: | ||
2508 | mutex_unlock(&inode->i_mutex); | 2509 | mutex_unlock(&inode->i_mutex); |
2509 | if (!err) { | 2510 | if (!err) { |
2510 | shrink_dcache_sb(root->fs_info->sb); | 2511 | shrink_dcache_sb(root->fs_info->sb); |
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 8bcd2a007517..96aebf3bcd5b 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c | |||
@@ -1058,6 +1058,7 @@ static int contains_pending_extent(struct btrfs_trans_handle *trans, | |||
1058 | struct extent_map *em; | 1058 | struct extent_map *em; |
1059 | struct list_head *search_list = &trans->transaction->pending_chunks; | 1059 | struct list_head *search_list = &trans->transaction->pending_chunks; |
1060 | int ret = 0; | 1060 | int ret = 0; |
1061 | u64 physical_start = *start; | ||
1061 | 1062 | ||
1062 | again: | 1063 | again: |
1063 | list_for_each_entry(em, search_list, list) { | 1064 | list_for_each_entry(em, search_list, list) { |
@@ -1068,9 +1069,9 @@ again: | |||
1068 | for (i = 0; i < map->num_stripes; i++) { | 1069 | for (i = 0; i < map->num_stripes; i++) { |
1069 | if (map->stripes[i].dev != device) | 1070 | if (map->stripes[i].dev != device) |
1070 | continue; | 1071 | continue; |
1071 | if (map->stripes[i].physical >= *start + len || | 1072 | if (map->stripes[i].physical >= physical_start + len || |
1072 | map->stripes[i].physical + em->orig_block_len <= | 1073 | map->stripes[i].physical + em->orig_block_len <= |
1073 | *start) | 1074 | physical_start) |
1074 | continue; | 1075 | continue; |
1075 | *start = map->stripes[i].physical + | 1076 | *start = map->stripes[i].physical + |
1076 | em->orig_block_len; | 1077 | em->orig_block_len; |
@@ -1193,8 +1194,14 @@ again: | |||
1193 | */ | 1194 | */ |
1194 | if (contains_pending_extent(trans, device, | 1195 | if (contains_pending_extent(trans, device, |
1195 | &search_start, | 1196 | &search_start, |
1196 | hole_size)) | 1197 | hole_size)) { |
1197 | hole_size = 0; | 1198 | if (key.offset >= search_start) { |
1199 | hole_size = key.offset - search_start; | ||
1200 | } else { | ||
1201 | WARN_ON_ONCE(1); | ||
1202 | hole_size = 0; | ||
1203 | } | ||
1204 | } | ||
1198 | 1205 | ||
1199 | if (hole_size > max_hole_size) { | 1206 | if (hole_size > max_hole_size) { |
1200 | max_hole_start = search_start; | 1207 | max_hole_start = search_start; |
diff --git a/fs/configfs/mount.c b/fs/configfs/mount.c index da94e41bdbf6..537356742091 100644 --- a/fs/configfs/mount.c +++ b/fs/configfs/mount.c | |||
@@ -173,5 +173,5 @@ MODULE_LICENSE("GPL"); | |||
173 | MODULE_VERSION("0.0.2"); | 173 | MODULE_VERSION("0.0.2"); |
174 | MODULE_DESCRIPTION("Simple RAM filesystem for user driven kernel subsystem configuration."); | 174 | MODULE_DESCRIPTION("Simple RAM filesystem for user driven kernel subsystem configuration."); |
175 | 175 | ||
176 | module_init(configfs_init); | 176 | core_initcall(configfs_init); |
177 | module_exit(configfs_exit); | 177 | module_exit(configfs_exit); |
diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c index 59fedbcf8798..86a2121828c3 100644 --- a/fs/efivarfs/super.c +++ b/fs/efivarfs/super.c | |||
@@ -121,7 +121,7 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor, | |||
121 | int len, i; | 121 | int len, i; |
122 | int err = -ENOMEM; | 122 | int err = -ENOMEM; |
123 | 123 | ||
124 | entry = kmalloc(sizeof(*entry), GFP_KERNEL); | 124 | entry = kzalloc(sizeof(*entry), GFP_KERNEL); |
125 | if (!entry) | 125 | if (!entry) |
126 | return err; | 126 | return err; |
127 | 127 | ||
diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig index 18228c201f7f..024f2284d3f6 100644 --- a/fs/ext4/Kconfig +++ b/fs/ext4/Kconfig | |||
@@ -64,8 +64,8 @@ config EXT4_FS_SECURITY | |||
64 | If you are not using a security module that requires using | 64 | If you are not using a security module that requires using |
65 | extended attributes for file security labels, say N. | 65 | extended attributes for file security labels, say N. |
66 | 66 | ||
67 | config EXT4_FS_ENCRYPTION | 67 | config EXT4_ENCRYPTION |
68 | bool "Ext4 Encryption" | 68 | tristate "Ext4 Encryption" |
69 | depends on EXT4_FS | 69 | depends on EXT4_FS |
70 | select CRYPTO_AES | 70 | select CRYPTO_AES |
71 | select CRYPTO_CBC | 71 | select CRYPTO_CBC |
@@ -81,6 +81,11 @@ config EXT4_FS_ENCRYPTION | |||
81 | efficient since it avoids caching the encrypted and | 81 | efficient since it avoids caching the encrypted and |
82 | decrypted pages in the page cache. | 82 | decrypted pages in the page cache. |
83 | 83 | ||
84 | config EXT4_FS_ENCRYPTION | ||
85 | bool | ||
86 | default y | ||
87 | depends on EXT4_ENCRYPTION | ||
88 | |||
84 | config EXT4_DEBUG | 89 | config EXT4_DEBUG |
85 | bool "EXT4 debugging support" | 90 | bool "EXT4 debugging support" |
86 | depends on EXT4_FS | 91 | depends on EXT4_FS |
diff --git a/fs/ext4/crypto_fname.c b/fs/ext4/crypto_fname.c index ca2f5948c1ac..fded02f72299 100644 --- a/fs/ext4/crypto_fname.c +++ b/fs/ext4/crypto_fname.c | |||
@@ -66,6 +66,7 @@ static int ext4_fname_encrypt(struct ext4_fname_crypto_ctx *ctx, | |||
66 | int res = 0; | 66 | int res = 0; |
67 | char iv[EXT4_CRYPTO_BLOCK_SIZE]; | 67 | char iv[EXT4_CRYPTO_BLOCK_SIZE]; |
68 | struct scatterlist sg[1]; | 68 | struct scatterlist sg[1]; |
69 | int padding = 4 << (ctx->flags & EXT4_POLICY_FLAGS_PAD_MASK); | ||
69 | char *workbuf; | 70 | char *workbuf; |
70 | 71 | ||
71 | if (iname->len <= 0 || iname->len > ctx->lim) | 72 | if (iname->len <= 0 || iname->len > ctx->lim) |
@@ -73,6 +74,7 @@ static int ext4_fname_encrypt(struct ext4_fname_crypto_ctx *ctx, | |||
73 | 74 | ||
74 | ciphertext_len = (iname->len < EXT4_CRYPTO_BLOCK_SIZE) ? | 75 | ciphertext_len = (iname->len < EXT4_CRYPTO_BLOCK_SIZE) ? |
75 | EXT4_CRYPTO_BLOCK_SIZE : iname->len; | 76 | EXT4_CRYPTO_BLOCK_SIZE : iname->len; |
77 | ciphertext_len = ext4_fname_crypto_round_up(ciphertext_len, padding); | ||
76 | ciphertext_len = (ciphertext_len > ctx->lim) | 78 | ciphertext_len = (ciphertext_len > ctx->lim) |
77 | ? ctx->lim : ciphertext_len; | 79 | ? ctx->lim : ciphertext_len; |
78 | 80 | ||
@@ -101,7 +103,7 @@ static int ext4_fname_encrypt(struct ext4_fname_crypto_ctx *ctx, | |||
101 | /* Create encryption request */ | 103 | /* Create encryption request */ |
102 | sg_init_table(sg, 1); | 104 | sg_init_table(sg, 1); |
103 | sg_set_page(sg, ctx->workpage, PAGE_SIZE, 0); | 105 | sg_set_page(sg, ctx->workpage, PAGE_SIZE, 0); |
104 | ablkcipher_request_set_crypt(req, sg, sg, iname->len, iv); | 106 | ablkcipher_request_set_crypt(req, sg, sg, ciphertext_len, iv); |
105 | res = crypto_ablkcipher_encrypt(req); | 107 | res = crypto_ablkcipher_encrypt(req); |
106 | if (res == -EINPROGRESS || res == -EBUSY) { | 108 | if (res == -EINPROGRESS || res == -EBUSY) { |
107 | BUG_ON(req->base.data != &ecr); | 109 | BUG_ON(req->base.data != &ecr); |
@@ -198,106 +200,57 @@ static int ext4_fname_decrypt(struct ext4_fname_crypto_ctx *ctx, | |||
198 | return oname->len; | 200 | return oname->len; |
199 | } | 201 | } |
200 | 202 | ||
203 | static const char *lookup_table = | ||
204 | "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+,"; | ||
205 | |||
201 | /** | 206 | /** |
202 | * ext4_fname_encode_digest() - | 207 | * ext4_fname_encode_digest() - |
203 | * | 208 | * |
204 | * Encodes the input digest using characters from the set [a-zA-Z0-9_+]. | 209 | * Encodes the input digest using characters from the set [a-zA-Z0-9_+]. |
205 | * The encoded string is roughly 4/3 times the size of the input string. | 210 | * The encoded string is roughly 4/3 times the size of the input string. |
206 | */ | 211 | */ |
207 | int ext4_fname_encode_digest(char *dst, char *src, u32 len) | 212 | static int digest_encode(const char *src, int len, char *dst) |
208 | { | 213 | { |
209 | static const char *lookup_table = | 214 | int i = 0, bits = 0, ac = 0; |
210 | "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_+"; | 215 | char *cp = dst; |
211 | u32 current_chunk, num_chunks, i; | 216 | |
212 | char tmp_buf[3]; | 217 | while (i < len) { |
213 | u32 c0, c1, c2, c3; | 218 | ac += (((unsigned char) src[i]) << bits); |
214 | 219 | bits += 8; | |
215 | current_chunk = 0; | 220 | do { |
216 | num_chunks = len/3; | 221 | *cp++ = lookup_table[ac & 0x3f]; |
217 | for (i = 0; i < num_chunks; i++) { | 222 | ac >>= 6; |
218 | c0 = src[3*i] & 0x3f; | 223 | bits -= 6; |
219 | c1 = (((src[3*i]>>6)&0x3) | ((src[3*i+1] & 0xf)<<2)) & 0x3f; | 224 | } while (bits >= 6); |
220 | c2 = (((src[3*i+1]>>4)&0xf) | ((src[3*i+2] & 0x3)<<4)) & 0x3f; | ||
221 | c3 = (src[3*i+2]>>2) & 0x3f; | ||
222 | dst[4*i] = lookup_table[c0]; | ||
223 | dst[4*i+1] = lookup_table[c1]; | ||
224 | dst[4*i+2] = lookup_table[c2]; | ||
225 | dst[4*i+3] = lookup_table[c3]; | ||
226 | } | ||
227 | if (i*3 < len) { | ||
228 | memset(tmp_buf, 0, 3); | ||
229 | memcpy(tmp_buf, &src[3*i], len-3*i); | ||
230 | c0 = tmp_buf[0] & 0x3f; | ||
231 | c1 = (((tmp_buf[0]>>6)&0x3) | ((tmp_buf[1] & 0xf)<<2)) & 0x3f; | ||
232 | c2 = (((tmp_buf[1]>>4)&0xf) | ((tmp_buf[2] & 0x3)<<4)) & 0x3f; | ||
233 | c3 = (tmp_buf[2]>>2) & 0x3f; | ||
234 | dst[4*i] = lookup_table[c0]; | ||
235 | dst[4*i+1] = lookup_table[c1]; | ||
236 | dst[4*i+2] = lookup_table[c2]; | ||
237 | dst[4*i+3] = lookup_table[c3]; | ||
238 | i++; | 225 | i++; |
239 | } | 226 | } |
240 | return (i * 4); | 227 | if (bits) |
228 | *cp++ = lookup_table[ac & 0x3f]; | ||
229 | return cp - dst; | ||
241 | } | 230 | } |
242 | 231 | ||
243 | /** | 232 | static int digest_decode(const char *src, int len, char *dst) |
244 | * ext4_fname_hash() - | ||
245 | * | ||
246 | * This function computes the hash of the input filename, and sets the output | ||
247 | * buffer to the *encoded* digest. It returns the length of the digest as its | ||
248 | * return value. Errors are returned as negative numbers. We trust the caller | ||
249 | * to allocate sufficient memory to oname string. | ||
250 | */ | ||
251 | static int ext4_fname_hash(struct ext4_fname_crypto_ctx *ctx, | ||
252 | const struct ext4_str *iname, | ||
253 | struct ext4_str *oname) | ||
254 | { | 233 | { |
255 | struct scatterlist sg; | 234 | int i = 0, bits = 0, ac = 0; |
256 | struct hash_desc desc = { | 235 | const char *p; |
257 | .tfm = (struct crypto_hash *)ctx->htfm, | 236 | char *cp = dst; |
258 | .flags = CRYPTO_TFM_REQ_MAY_SLEEP | 237 | |
259 | }; | 238 | while (i < len) { |
260 | int res = 0; | 239 | p = strchr(lookup_table, src[i]); |
261 | 240 | if (p == NULL || src[i] == 0) | |
262 | if (iname->len <= EXT4_FNAME_CRYPTO_DIGEST_SIZE) { | 241 | return -2; |
263 | res = ext4_fname_encode_digest(oname->name, iname->name, | 242 | ac += (p - lookup_table) << bits; |
264 | iname->len); | 243 | bits += 6; |
265 | oname->len = res; | 244 | if (bits >= 8) { |
266 | return res; | 245 | *cp++ = ac & 0xff; |
267 | } | 246 | ac >>= 8; |
268 | 247 | bits -= 8; | |
269 | sg_init_one(&sg, iname->name, iname->len); | 248 | } |
270 | res = crypto_hash_init(&desc); | 249 | i++; |
271 | if (res) { | ||
272 | printk(KERN_ERR | ||
273 | "%s: Error initializing crypto hash; res = [%d]\n", | ||
274 | __func__, res); | ||
275 | goto out; | ||
276 | } | ||
277 | res = crypto_hash_update(&desc, &sg, iname->len); | ||
278 | if (res) { | ||
279 | printk(KERN_ERR | ||
280 | "%s: Error updating crypto hash; res = [%d]\n", | ||
281 | __func__, res); | ||
282 | goto out; | ||
283 | } | ||
284 | res = crypto_hash_final(&desc, | ||
285 | &oname->name[EXT4_FNAME_CRYPTO_DIGEST_SIZE]); | ||
286 | if (res) { | ||
287 | printk(KERN_ERR | ||
288 | "%s: Error finalizing crypto hash; res = [%d]\n", | ||
289 | __func__, res); | ||
290 | goto out; | ||
291 | } | 250 | } |
292 | /* Encode the digest as a printable string--this will increase the | 251 | if (ac) |
293 | * size of the digest */ | 252 | return -1; |
294 | oname->name[0] = 'I'; | 253 | return cp - dst; |
295 | res = ext4_fname_encode_digest(oname->name+1, | ||
296 | &oname->name[EXT4_FNAME_CRYPTO_DIGEST_SIZE], | ||
297 | EXT4_FNAME_CRYPTO_DIGEST_SIZE) + 1; | ||
298 | oname->len = res; | ||
299 | out: | ||
300 | return res; | ||
301 | } | 254 | } |
302 | 255 | ||
303 | /** | 256 | /** |
@@ -405,6 +358,7 @@ struct ext4_fname_crypto_ctx *ext4_get_fname_crypto_ctx( | |||
405 | if (IS_ERR(ctx)) | 358 | if (IS_ERR(ctx)) |
406 | return ctx; | 359 | return ctx; |
407 | 360 | ||
361 | ctx->flags = ei->i_crypt_policy_flags; | ||
408 | if (ctx->has_valid_key) { | 362 | if (ctx->has_valid_key) { |
409 | if (ctx->key.mode != EXT4_ENCRYPTION_MODE_AES_256_CTS) { | 363 | if (ctx->key.mode != EXT4_ENCRYPTION_MODE_AES_256_CTS) { |
410 | printk_once(KERN_WARNING | 364 | printk_once(KERN_WARNING |
@@ -517,6 +471,7 @@ int ext4_fname_crypto_namelen_on_disk(struct ext4_fname_crypto_ctx *ctx, | |||
517 | u32 namelen) | 471 | u32 namelen) |
518 | { | 472 | { |
519 | u32 ciphertext_len; | 473 | u32 ciphertext_len; |
474 | int padding = 4 << (ctx->flags & EXT4_POLICY_FLAGS_PAD_MASK); | ||
520 | 475 | ||
521 | if (ctx == NULL) | 476 | if (ctx == NULL) |
522 | return -EIO; | 477 | return -EIO; |
@@ -524,6 +479,7 @@ int ext4_fname_crypto_namelen_on_disk(struct ext4_fname_crypto_ctx *ctx, | |||
524 | return -EACCES; | 479 | return -EACCES; |
525 | ciphertext_len = (namelen < EXT4_CRYPTO_BLOCK_SIZE) ? | 480 | ciphertext_len = (namelen < EXT4_CRYPTO_BLOCK_SIZE) ? |
526 | EXT4_CRYPTO_BLOCK_SIZE : namelen; | 481 | EXT4_CRYPTO_BLOCK_SIZE : namelen; |
482 | ciphertext_len = ext4_fname_crypto_round_up(ciphertext_len, padding); | ||
527 | ciphertext_len = (ciphertext_len > ctx->lim) | 483 | ciphertext_len = (ciphertext_len > ctx->lim) |
528 | ? ctx->lim : ciphertext_len; | 484 | ? ctx->lim : ciphertext_len; |
529 | return (int) ciphertext_len; | 485 | return (int) ciphertext_len; |
@@ -539,10 +495,13 @@ int ext4_fname_crypto_alloc_buffer(struct ext4_fname_crypto_ctx *ctx, | |||
539 | u32 ilen, struct ext4_str *crypto_str) | 495 | u32 ilen, struct ext4_str *crypto_str) |
540 | { | 496 | { |
541 | unsigned int olen; | 497 | unsigned int olen; |
498 | int padding = 4 << (ctx->flags & EXT4_POLICY_FLAGS_PAD_MASK); | ||
542 | 499 | ||
543 | if (!ctx) | 500 | if (!ctx) |
544 | return -EIO; | 501 | return -EIO; |
545 | olen = ext4_fname_crypto_round_up(ilen, EXT4_CRYPTO_BLOCK_SIZE); | 502 | if (padding < EXT4_CRYPTO_BLOCK_SIZE) |
503 | padding = EXT4_CRYPTO_BLOCK_SIZE; | ||
504 | olen = ext4_fname_crypto_round_up(ilen, padding); | ||
546 | crypto_str->len = olen; | 505 | crypto_str->len = olen; |
547 | if (olen < EXT4_FNAME_CRYPTO_DIGEST_SIZE*2) | 506 | if (olen < EXT4_FNAME_CRYPTO_DIGEST_SIZE*2) |
548 | olen = EXT4_FNAME_CRYPTO_DIGEST_SIZE*2; | 507 | olen = EXT4_FNAME_CRYPTO_DIGEST_SIZE*2; |
@@ -571,9 +530,13 @@ void ext4_fname_crypto_free_buffer(struct ext4_str *crypto_str) | |||
571 | * ext4_fname_disk_to_usr() - converts a filename from disk space to user space | 530 | * ext4_fname_disk_to_usr() - converts a filename from disk space to user space |
572 | */ | 531 | */ |
573 | int _ext4_fname_disk_to_usr(struct ext4_fname_crypto_ctx *ctx, | 532 | int _ext4_fname_disk_to_usr(struct ext4_fname_crypto_ctx *ctx, |
574 | const struct ext4_str *iname, | 533 | struct dx_hash_info *hinfo, |
575 | struct ext4_str *oname) | 534 | const struct ext4_str *iname, |
535 | struct ext4_str *oname) | ||
576 | { | 536 | { |
537 | char buf[24]; | ||
538 | int ret; | ||
539 | |||
577 | if (ctx == NULL) | 540 | if (ctx == NULL) |
578 | return -EIO; | 541 | return -EIO; |
579 | if (iname->len < 3) { | 542 | if (iname->len < 3) { |
@@ -587,18 +550,33 @@ int _ext4_fname_disk_to_usr(struct ext4_fname_crypto_ctx *ctx, | |||
587 | } | 550 | } |
588 | if (ctx->has_valid_key) | 551 | if (ctx->has_valid_key) |
589 | return ext4_fname_decrypt(ctx, iname, oname); | 552 | return ext4_fname_decrypt(ctx, iname, oname); |
590 | else | 553 | |
591 | return ext4_fname_hash(ctx, iname, oname); | 554 | if (iname->len <= EXT4_FNAME_CRYPTO_DIGEST_SIZE) { |
555 | ret = digest_encode(iname->name, iname->len, oname->name); | ||
556 | oname->len = ret; | ||
557 | return ret; | ||
558 | } | ||
559 | if (hinfo) { | ||
560 | memcpy(buf, &hinfo->hash, 4); | ||
561 | memcpy(buf+4, &hinfo->minor_hash, 4); | ||
562 | } else | ||
563 | memset(buf, 0, 8); | ||
564 | memcpy(buf + 8, iname->name + iname->len - 16, 16); | ||
565 | oname->name[0] = '_'; | ||
566 | ret = digest_encode(buf, 24, oname->name+1); | ||
567 | oname->len = ret + 1; | ||
568 | return ret + 1; | ||
592 | } | 569 | } |
593 | 570 | ||
594 | int ext4_fname_disk_to_usr(struct ext4_fname_crypto_ctx *ctx, | 571 | int ext4_fname_disk_to_usr(struct ext4_fname_crypto_ctx *ctx, |
572 | struct dx_hash_info *hinfo, | ||
595 | const struct ext4_dir_entry_2 *de, | 573 | const struct ext4_dir_entry_2 *de, |
596 | struct ext4_str *oname) | 574 | struct ext4_str *oname) |
597 | { | 575 | { |
598 | struct ext4_str iname = {.name = (unsigned char *) de->name, | 576 | struct ext4_str iname = {.name = (unsigned char *) de->name, |
599 | .len = de->name_len }; | 577 | .len = de->name_len }; |
600 | 578 | ||
601 | return _ext4_fname_disk_to_usr(ctx, &iname, oname); | 579 | return _ext4_fname_disk_to_usr(ctx, hinfo, &iname, oname); |
602 | } | 580 | } |
603 | 581 | ||
604 | 582 | ||
@@ -640,10 +618,11 @@ int ext4_fname_usr_to_hash(struct ext4_fname_crypto_ctx *ctx, | |||
640 | const struct qstr *iname, | 618 | const struct qstr *iname, |
641 | struct dx_hash_info *hinfo) | 619 | struct dx_hash_info *hinfo) |
642 | { | 620 | { |
643 | struct ext4_str tmp, tmp2; | 621 | struct ext4_str tmp; |
644 | int ret = 0; | 622 | int ret = 0; |
623 | char buf[EXT4_FNAME_CRYPTO_DIGEST_SIZE+1]; | ||
645 | 624 | ||
646 | if (!ctx || !ctx->has_valid_key || | 625 | if (!ctx || |
647 | ((iname->name[0] == '.') && | 626 | ((iname->name[0] == '.') && |
648 | ((iname->len == 1) || | 627 | ((iname->len == 1) || |
649 | ((iname->name[1] == '.') && (iname->len == 2))))) { | 628 | ((iname->name[1] == '.') && (iname->len == 2))))) { |
@@ -651,59 +630,90 @@ int ext4_fname_usr_to_hash(struct ext4_fname_crypto_ctx *ctx, | |||
651 | return 0; | 630 | return 0; |
652 | } | 631 | } |
653 | 632 | ||
633 | if (!ctx->has_valid_key && iname->name[0] == '_') { | ||
634 | if (iname->len != 33) | ||
635 | return -ENOENT; | ||
636 | ret = digest_decode(iname->name+1, iname->len, buf); | ||
637 | if (ret != 24) | ||
638 | return -ENOENT; | ||
639 | memcpy(&hinfo->hash, buf, 4); | ||
640 | memcpy(&hinfo->minor_hash, buf + 4, 4); | ||
641 | return 0; | ||
642 | } | ||
643 | |||
644 | if (!ctx->has_valid_key && iname->name[0] != '_') { | ||
645 | if (iname->len > 43) | ||
646 | return -ENOENT; | ||
647 | ret = digest_decode(iname->name, iname->len, buf); | ||
648 | ext4fs_dirhash(buf, ret, hinfo); | ||
649 | return 0; | ||
650 | } | ||
651 | |||
654 | /* First encrypt the plaintext name */ | 652 | /* First encrypt the plaintext name */ |
655 | ret = ext4_fname_crypto_alloc_buffer(ctx, iname->len, &tmp); | 653 | ret = ext4_fname_crypto_alloc_buffer(ctx, iname->len, &tmp); |
656 | if (ret < 0) | 654 | if (ret < 0) |
657 | return ret; | 655 | return ret; |
658 | 656 | ||
659 | ret = ext4_fname_encrypt(ctx, iname, &tmp); | 657 | ret = ext4_fname_encrypt(ctx, iname, &tmp); |
660 | if (ret < 0) | 658 | if (ret >= 0) { |
661 | goto out; | 659 | ext4fs_dirhash(tmp.name, tmp.len, hinfo); |
662 | 660 | ret = 0; | |
663 | tmp2.len = (4 * ((EXT4_FNAME_CRYPTO_DIGEST_SIZE + 2) / 3)) + 1; | ||
664 | tmp2.name = kmalloc(tmp2.len + 1, GFP_KERNEL); | ||
665 | if (tmp2.name == NULL) { | ||
666 | ret = -ENOMEM; | ||
667 | goto out; | ||
668 | } | 661 | } |
669 | 662 | ||
670 | ret = ext4_fname_hash(ctx, &tmp, &tmp2); | ||
671 | if (ret > 0) | ||
672 | ext4fs_dirhash(tmp2.name, tmp2.len, hinfo); | ||
673 | ext4_fname_crypto_free_buffer(&tmp2); | ||
674 | out: | ||
675 | ext4_fname_crypto_free_buffer(&tmp); | 663 | ext4_fname_crypto_free_buffer(&tmp); |
676 | return ret; | 664 | return ret; |
677 | } | 665 | } |
678 | 666 | ||
679 | /** | 667 | int ext4_fname_match(struct ext4_fname_crypto_ctx *ctx, struct ext4_str *cstr, |
680 | * ext4_fname_disk_to_htree() - converts a filename from disk space to htree-access string | 668 | int len, const char * const name, |
681 | */ | 669 | struct ext4_dir_entry_2 *de) |
682 | int ext4_fname_disk_to_hash(struct ext4_fname_crypto_ctx *ctx, | ||
683 | const struct ext4_dir_entry_2 *de, | ||
684 | struct dx_hash_info *hinfo) | ||
685 | { | 670 | { |
686 | struct ext4_str iname = {.name = (unsigned char *) de->name, | 671 | int ret = -ENOENT; |
687 | .len = de->name_len}; | 672 | int bigname = (*name == '_'); |
688 | struct ext4_str tmp; | ||
689 | int ret; | ||
690 | 673 | ||
691 | if (!ctx || | 674 | if (ctx->has_valid_key) { |
692 | ((iname.name[0] == '.') && | 675 | if (cstr->name == NULL) { |
693 | ((iname.len == 1) || | 676 | struct qstr istr; |
694 | ((iname.name[1] == '.') && (iname.len == 2))))) { | 677 | |
695 | ext4fs_dirhash(iname.name, iname.len, hinfo); | 678 | ret = ext4_fname_crypto_alloc_buffer(ctx, len, cstr); |
696 | return 0; | 679 | if (ret < 0) |
680 | goto errout; | ||
681 | istr.name = name; | ||
682 | istr.len = len; | ||
683 | ret = ext4_fname_encrypt(ctx, &istr, cstr); | ||
684 | if (ret < 0) | ||
685 | goto errout; | ||
686 | } | ||
687 | } else { | ||
688 | if (cstr->name == NULL) { | ||
689 | cstr->name = kmalloc(32, GFP_KERNEL); | ||
690 | if (cstr->name == NULL) | ||
691 | return -ENOMEM; | ||
692 | if ((bigname && (len != 33)) || | ||
693 | (!bigname && (len > 43))) | ||
694 | goto errout; | ||
695 | ret = digest_decode(name+bigname, len-bigname, | ||
696 | cstr->name); | ||
697 | if (ret < 0) { | ||
698 | ret = -ENOENT; | ||
699 | goto errout; | ||
700 | } | ||
701 | cstr->len = ret; | ||
702 | } | ||
703 | if (bigname) { | ||
704 | if (de->name_len < 16) | ||
705 | return 0; | ||
706 | ret = memcmp(de->name + de->name_len - 16, | ||
707 | cstr->name + 8, 16); | ||
708 | return (ret == 0) ? 1 : 0; | ||
709 | } | ||
697 | } | 710 | } |
698 | 711 | if (de->name_len != cstr->len) | |
699 | tmp.len = (4 * ((EXT4_FNAME_CRYPTO_DIGEST_SIZE + 2) / 3)) + 1; | 712 | return 0; |
700 | tmp.name = kmalloc(tmp.len + 1, GFP_KERNEL); | 713 | ret = memcmp(de->name, cstr->name, cstr->len); |
701 | if (tmp.name == NULL) | 714 | return (ret == 0) ? 1 : 0; |
702 | return -ENOMEM; | 715 | errout: |
703 | 716 | kfree(cstr->name); | |
704 | ret = ext4_fname_hash(ctx, &iname, &tmp); | 717 | cstr->name = NULL; |
705 | if (ret > 0) | ||
706 | ext4fs_dirhash(tmp.name, tmp.len, hinfo); | ||
707 | ext4_fname_crypto_free_buffer(&tmp); | ||
708 | return ret; | 718 | return ret; |
709 | } | 719 | } |
diff --git a/fs/ext4/crypto_key.c b/fs/ext4/crypto_key.c index c8392af8abbb..52170d0b7c40 100644 --- a/fs/ext4/crypto_key.c +++ b/fs/ext4/crypto_key.c | |||
@@ -110,6 +110,7 @@ int ext4_generate_encryption_key(struct inode *inode) | |||
110 | } | 110 | } |
111 | res = 0; | 111 | res = 0; |
112 | 112 | ||
113 | ei->i_crypt_policy_flags = ctx.flags; | ||
113 | if (S_ISREG(inode->i_mode)) | 114 | if (S_ISREG(inode->i_mode)) |
114 | crypt_key->mode = ctx.contents_encryption_mode; | 115 | crypt_key->mode = ctx.contents_encryption_mode; |
115 | else if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) | 116 | else if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) |
diff --git a/fs/ext4/crypto_policy.c b/fs/ext4/crypto_policy.c index 30eaf9e9864a..a6d6291aea16 100644 --- a/fs/ext4/crypto_policy.c +++ b/fs/ext4/crypto_policy.c | |||
@@ -37,6 +37,8 @@ static int ext4_is_encryption_context_consistent_with_policy( | |||
37 | return 0; | 37 | return 0; |
38 | return (memcmp(ctx.master_key_descriptor, policy->master_key_descriptor, | 38 | return (memcmp(ctx.master_key_descriptor, policy->master_key_descriptor, |
39 | EXT4_KEY_DESCRIPTOR_SIZE) == 0 && | 39 | EXT4_KEY_DESCRIPTOR_SIZE) == 0 && |
40 | (ctx.flags == | ||
41 | policy->flags) && | ||
40 | (ctx.contents_encryption_mode == | 42 | (ctx.contents_encryption_mode == |
41 | policy->contents_encryption_mode) && | 43 | policy->contents_encryption_mode) && |
42 | (ctx.filenames_encryption_mode == | 44 | (ctx.filenames_encryption_mode == |
@@ -56,25 +58,25 @@ static int ext4_create_encryption_context_from_policy( | |||
56 | printk(KERN_WARNING | 58 | printk(KERN_WARNING |
57 | "%s: Invalid contents encryption mode %d\n", __func__, | 59 | "%s: Invalid contents encryption mode %d\n", __func__, |
58 | policy->contents_encryption_mode); | 60 | policy->contents_encryption_mode); |
59 | res = -EINVAL; | 61 | return -EINVAL; |
60 | goto out; | ||
61 | } | 62 | } |
62 | if (!ext4_valid_filenames_enc_mode(policy->filenames_encryption_mode)) { | 63 | if (!ext4_valid_filenames_enc_mode(policy->filenames_encryption_mode)) { |
63 | printk(KERN_WARNING | 64 | printk(KERN_WARNING |
64 | "%s: Invalid filenames encryption mode %d\n", __func__, | 65 | "%s: Invalid filenames encryption mode %d\n", __func__, |
65 | policy->filenames_encryption_mode); | 66 | policy->filenames_encryption_mode); |
66 | res = -EINVAL; | 67 | return -EINVAL; |
67 | goto out; | ||
68 | } | 68 | } |
69 | if (policy->flags & ~EXT4_POLICY_FLAGS_VALID) | ||
70 | return -EINVAL; | ||
69 | ctx.contents_encryption_mode = policy->contents_encryption_mode; | 71 | ctx.contents_encryption_mode = policy->contents_encryption_mode; |
70 | ctx.filenames_encryption_mode = policy->filenames_encryption_mode; | 72 | ctx.filenames_encryption_mode = policy->filenames_encryption_mode; |
73 | ctx.flags = policy->flags; | ||
71 | BUILD_BUG_ON(sizeof(ctx.nonce) != EXT4_KEY_DERIVATION_NONCE_SIZE); | 74 | BUILD_BUG_ON(sizeof(ctx.nonce) != EXT4_KEY_DERIVATION_NONCE_SIZE); |
72 | get_random_bytes(ctx.nonce, EXT4_KEY_DERIVATION_NONCE_SIZE); | 75 | get_random_bytes(ctx.nonce, EXT4_KEY_DERIVATION_NONCE_SIZE); |
73 | 76 | ||
74 | res = ext4_xattr_set(inode, EXT4_XATTR_INDEX_ENCRYPTION, | 77 | res = ext4_xattr_set(inode, EXT4_XATTR_INDEX_ENCRYPTION, |
75 | EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, &ctx, | 78 | EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, &ctx, |
76 | sizeof(ctx), 0); | 79 | sizeof(ctx), 0); |
77 | out: | ||
78 | if (!res) | 80 | if (!res) |
79 | ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT); | 81 | ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT); |
80 | return res; | 82 | return res; |
@@ -115,6 +117,7 @@ int ext4_get_policy(struct inode *inode, struct ext4_encryption_policy *policy) | |||
115 | policy->version = 0; | 117 | policy->version = 0; |
116 | policy->contents_encryption_mode = ctx.contents_encryption_mode; | 118 | policy->contents_encryption_mode = ctx.contents_encryption_mode; |
117 | policy->filenames_encryption_mode = ctx.filenames_encryption_mode; | 119 | policy->filenames_encryption_mode = ctx.filenames_encryption_mode; |
120 | policy->flags = ctx.flags; | ||
118 | memcpy(&policy->master_key_descriptor, ctx.master_key_descriptor, | 121 | memcpy(&policy->master_key_descriptor, ctx.master_key_descriptor, |
119 | EXT4_KEY_DESCRIPTOR_SIZE); | 122 | EXT4_KEY_DESCRIPTOR_SIZE); |
120 | return 0; | 123 | return 0; |
@@ -176,6 +179,7 @@ int ext4_inherit_context(struct inode *parent, struct inode *child) | |||
176 | EXT4_ENCRYPTION_MODE_AES_256_XTS; | 179 | EXT4_ENCRYPTION_MODE_AES_256_XTS; |
177 | ctx.filenames_encryption_mode = | 180 | ctx.filenames_encryption_mode = |
178 | EXT4_ENCRYPTION_MODE_AES_256_CTS; | 181 | EXT4_ENCRYPTION_MODE_AES_256_CTS; |
182 | ctx.flags = 0; | ||
179 | memset(ctx.master_key_descriptor, 0x42, | 183 | memset(ctx.master_key_descriptor, 0x42, |
180 | EXT4_KEY_DESCRIPTOR_SIZE); | 184 | EXT4_KEY_DESCRIPTOR_SIZE); |
181 | res = 0; | 185 | res = 0; |
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c index 61db51a5ce4c..5665d82d2332 100644 --- a/fs/ext4/dir.c +++ b/fs/ext4/dir.c | |||
@@ -249,7 +249,7 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx) | |||
249 | } else { | 249 | } else { |
250 | /* Directory is encrypted */ | 250 | /* Directory is encrypted */ |
251 | err = ext4_fname_disk_to_usr(enc_ctx, | 251 | err = ext4_fname_disk_to_usr(enc_ctx, |
252 | de, &fname_crypto_str); | 252 | NULL, de, &fname_crypto_str); |
253 | if (err < 0) | 253 | if (err < 0) |
254 | goto errout; | 254 | goto errout; |
255 | if (!dir_emit(ctx, | 255 | if (!dir_emit(ctx, |
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index ef267adce19a..009a0590b20f 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h | |||
@@ -911,6 +911,7 @@ struct ext4_inode_info { | |||
911 | 911 | ||
912 | /* on-disk additional length */ | 912 | /* on-disk additional length */ |
913 | __u16 i_extra_isize; | 913 | __u16 i_extra_isize; |
914 | char i_crypt_policy_flags; | ||
914 | 915 | ||
915 | /* Indicate the inline data space. */ | 916 | /* Indicate the inline data space. */ |
916 | u16 i_inline_off; | 917 | u16 i_inline_off; |
@@ -1066,12 +1067,6 @@ extern void ext4_set_bits(void *bm, int cur, int len); | |||
1066 | /* Metadata checksum algorithm codes */ | 1067 | /* Metadata checksum algorithm codes */ |
1067 | #define EXT4_CRC32C_CHKSUM 1 | 1068 | #define EXT4_CRC32C_CHKSUM 1 |
1068 | 1069 | ||
1069 | /* Encryption algorithms */ | ||
1070 | #define EXT4_ENCRYPTION_MODE_INVALID 0 | ||
1071 | #define EXT4_ENCRYPTION_MODE_AES_256_XTS 1 | ||
1072 | #define EXT4_ENCRYPTION_MODE_AES_256_GCM 2 | ||
1073 | #define EXT4_ENCRYPTION_MODE_AES_256_CBC 3 | ||
1074 | |||
1075 | /* | 1070 | /* |
1076 | * Structure of the super block | 1071 | * Structure of the super block |
1077 | */ | 1072 | */ |
@@ -2093,9 +2088,11 @@ u32 ext4_fname_crypto_round_up(u32 size, u32 blksize); | |||
2093 | int ext4_fname_crypto_alloc_buffer(struct ext4_fname_crypto_ctx *ctx, | 2088 | int ext4_fname_crypto_alloc_buffer(struct ext4_fname_crypto_ctx *ctx, |
2094 | u32 ilen, struct ext4_str *crypto_str); | 2089 | u32 ilen, struct ext4_str *crypto_str); |
2095 | int _ext4_fname_disk_to_usr(struct ext4_fname_crypto_ctx *ctx, | 2090 | int _ext4_fname_disk_to_usr(struct ext4_fname_crypto_ctx *ctx, |
2091 | struct dx_hash_info *hinfo, | ||
2096 | const struct ext4_str *iname, | 2092 | const struct ext4_str *iname, |
2097 | struct ext4_str *oname); | 2093 | struct ext4_str *oname); |
2098 | int ext4_fname_disk_to_usr(struct ext4_fname_crypto_ctx *ctx, | 2094 | int ext4_fname_disk_to_usr(struct ext4_fname_crypto_ctx *ctx, |
2095 | struct dx_hash_info *hinfo, | ||
2099 | const struct ext4_dir_entry_2 *de, | 2096 | const struct ext4_dir_entry_2 *de, |
2100 | struct ext4_str *oname); | 2097 | struct ext4_str *oname); |
2101 | int ext4_fname_usr_to_disk(struct ext4_fname_crypto_ctx *ctx, | 2098 | int ext4_fname_usr_to_disk(struct ext4_fname_crypto_ctx *ctx, |
@@ -2104,11 +2101,12 @@ int ext4_fname_usr_to_disk(struct ext4_fname_crypto_ctx *ctx, | |||
2104 | int ext4_fname_usr_to_hash(struct ext4_fname_crypto_ctx *ctx, | 2101 | int ext4_fname_usr_to_hash(struct ext4_fname_crypto_ctx *ctx, |
2105 | const struct qstr *iname, | 2102 | const struct qstr *iname, |
2106 | struct dx_hash_info *hinfo); | 2103 | struct dx_hash_info *hinfo); |
2107 | int ext4_fname_disk_to_hash(struct ext4_fname_crypto_ctx *ctx, | ||
2108 | const struct ext4_dir_entry_2 *de, | ||
2109 | struct dx_hash_info *hinfo); | ||
2110 | int ext4_fname_crypto_namelen_on_disk(struct ext4_fname_crypto_ctx *ctx, | 2104 | int ext4_fname_crypto_namelen_on_disk(struct ext4_fname_crypto_ctx *ctx, |
2111 | u32 namelen); | 2105 | u32 namelen); |
2106 | int ext4_fname_match(struct ext4_fname_crypto_ctx *ctx, struct ext4_str *cstr, | ||
2107 | int len, const char * const name, | ||
2108 | struct ext4_dir_entry_2 *de); | ||
2109 | |||
2112 | 2110 | ||
2113 | #ifdef CONFIG_EXT4_FS_ENCRYPTION | 2111 | #ifdef CONFIG_EXT4_FS_ENCRYPTION |
2114 | void ext4_put_fname_crypto_ctx(struct ext4_fname_crypto_ctx **ctx); | 2112 | void ext4_put_fname_crypto_ctx(struct ext4_fname_crypto_ctx **ctx); |
diff --git a/fs/ext4/ext4_crypto.h b/fs/ext4/ext4_crypto.h index c2ba35a914b6..d75159c101ce 100644 --- a/fs/ext4/ext4_crypto.h +++ b/fs/ext4/ext4_crypto.h | |||
@@ -20,12 +20,20 @@ struct ext4_encryption_policy { | |||
20 | char version; | 20 | char version; |
21 | char contents_encryption_mode; | 21 | char contents_encryption_mode; |
22 | char filenames_encryption_mode; | 22 | char filenames_encryption_mode; |
23 | char flags; | ||
23 | char master_key_descriptor[EXT4_KEY_DESCRIPTOR_SIZE]; | 24 | char master_key_descriptor[EXT4_KEY_DESCRIPTOR_SIZE]; |
24 | } __attribute__((__packed__)); | 25 | } __attribute__((__packed__)); |
25 | 26 | ||
26 | #define EXT4_ENCRYPTION_CONTEXT_FORMAT_V1 1 | 27 | #define EXT4_ENCRYPTION_CONTEXT_FORMAT_V1 1 |
27 | #define EXT4_KEY_DERIVATION_NONCE_SIZE 16 | 28 | #define EXT4_KEY_DERIVATION_NONCE_SIZE 16 |
28 | 29 | ||
30 | #define EXT4_POLICY_FLAGS_PAD_4 0x00 | ||
31 | #define EXT4_POLICY_FLAGS_PAD_8 0x01 | ||
32 | #define EXT4_POLICY_FLAGS_PAD_16 0x02 | ||
33 | #define EXT4_POLICY_FLAGS_PAD_32 0x03 | ||
34 | #define EXT4_POLICY_FLAGS_PAD_MASK 0x03 | ||
35 | #define EXT4_POLICY_FLAGS_VALID 0x03 | ||
36 | |||
29 | /** | 37 | /** |
30 | * Encryption context for inode | 38 | * Encryption context for inode |
31 | * | 39 | * |
@@ -41,7 +49,7 @@ struct ext4_encryption_context { | |||
41 | char format; | 49 | char format; |
42 | char contents_encryption_mode; | 50 | char contents_encryption_mode; |
43 | char filenames_encryption_mode; | 51 | char filenames_encryption_mode; |
44 | char reserved; | 52 | char flags; |
45 | char master_key_descriptor[EXT4_KEY_DESCRIPTOR_SIZE]; | 53 | char master_key_descriptor[EXT4_KEY_DESCRIPTOR_SIZE]; |
46 | char nonce[EXT4_KEY_DERIVATION_NONCE_SIZE]; | 54 | char nonce[EXT4_KEY_DERIVATION_NONCE_SIZE]; |
47 | } __attribute__((__packed__)); | 55 | } __attribute__((__packed__)); |
@@ -120,6 +128,7 @@ struct ext4_fname_crypto_ctx { | |||
120 | struct crypto_hash *htfm; | 128 | struct crypto_hash *htfm; |
121 | struct page *workpage; | 129 | struct page *workpage; |
122 | struct ext4_encryption_key key; | 130 | struct ext4_encryption_key key; |
131 | unsigned flags : 8; | ||
123 | unsigned has_valid_key : 1; | 132 | unsigned has_valid_key : 1; |
124 | unsigned ctfm_key_is_ready : 1; | 133 | unsigned ctfm_key_is_ready : 1; |
125 | }; | 134 | }; |
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 973816bfe4a9..d74e08029643 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c | |||
@@ -4927,13 +4927,6 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len) | |||
4927 | if (ret) | 4927 | if (ret) |
4928 | return ret; | 4928 | return ret; |
4929 | 4929 | ||
4930 | /* | ||
4931 | * currently supporting (pre)allocate mode for extent-based | ||
4932 | * files _only_ | ||
4933 | */ | ||
4934 | if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) | ||
4935 | return -EOPNOTSUPP; | ||
4936 | |||
4937 | if (mode & FALLOC_FL_COLLAPSE_RANGE) | 4930 | if (mode & FALLOC_FL_COLLAPSE_RANGE) |
4938 | return ext4_collapse_range(inode, offset, len); | 4931 | return ext4_collapse_range(inode, offset, len); |
4939 | 4932 | ||
@@ -4955,6 +4948,14 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len) | |||
4955 | 4948 | ||
4956 | mutex_lock(&inode->i_mutex); | 4949 | mutex_lock(&inode->i_mutex); |
4957 | 4950 | ||
4951 | /* | ||
4952 | * We only support preallocation for extent-based files only | ||
4953 | */ | ||
4954 | if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { | ||
4955 | ret = -EOPNOTSUPP; | ||
4956 | goto out; | ||
4957 | } | ||
4958 | |||
4958 | if (!(mode & FALLOC_FL_KEEP_SIZE) && | 4959 | if (!(mode & FALLOC_FL_KEEP_SIZE) && |
4959 | offset + len > i_size_read(inode)) { | 4960 | offset + len > i_size_read(inode)) { |
4960 | new_size = offset + len; | 4961 | new_size = offset + len; |
diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c index d33d5a6852b9..26724aeece73 100644 --- a/fs/ext4/extents_status.c +++ b/fs/ext4/extents_status.c | |||
@@ -703,6 +703,14 @@ int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk, | |||
703 | 703 | ||
704 | BUG_ON(end < lblk); | 704 | BUG_ON(end < lblk); |
705 | 705 | ||
706 | if ((status & EXTENT_STATUS_DELAYED) && | ||
707 | (status & EXTENT_STATUS_WRITTEN)) { | ||
708 | ext4_warning(inode->i_sb, "Inserting extent [%u/%u] as " | ||
709 | " delayed and written which can potentially " | ||
710 | " cause data loss.\n", lblk, len); | ||
711 | WARN_ON(1); | ||
712 | } | ||
713 | |||
706 | newes.es_lblk = lblk; | 714 | newes.es_lblk = lblk; |
707 | newes.es_len = len; | 715 | newes.es_len = len; |
708 | ext4_es_store_pblock_status(&newes, pblk, status); | 716 | ext4_es_store_pblock_status(&newes, pblk, status); |
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index cbd0654a2675..55b187c3bac1 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c | |||
@@ -531,6 +531,7 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode, | |||
531 | status = map->m_flags & EXT4_MAP_UNWRITTEN ? | 531 | status = map->m_flags & EXT4_MAP_UNWRITTEN ? |
532 | EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; | 532 | EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; |
533 | if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) && | 533 | if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) && |
534 | !(status & EXTENT_STATUS_WRITTEN) && | ||
534 | ext4_find_delalloc_range(inode, map->m_lblk, | 535 | ext4_find_delalloc_range(inode, map->m_lblk, |
535 | map->m_lblk + map->m_len - 1)) | 536 | map->m_lblk + map->m_len - 1)) |
536 | status |= EXTENT_STATUS_DELAYED; | 537 | status |= EXTENT_STATUS_DELAYED; |
@@ -635,6 +636,7 @@ found: | |||
635 | status = map->m_flags & EXT4_MAP_UNWRITTEN ? | 636 | status = map->m_flags & EXT4_MAP_UNWRITTEN ? |
636 | EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; | 637 | EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; |
637 | if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) && | 638 | if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) && |
639 | !(status & EXTENT_STATUS_WRITTEN) && | ||
638 | ext4_find_delalloc_range(inode, map->m_lblk, | 640 | ext4_find_delalloc_range(inode, map->m_lblk, |
639 | map->m_lblk + map->m_len - 1)) | 641 | map->m_lblk + map->m_len - 1)) |
640 | status |= EXTENT_STATUS_DELAYED; | 642 | status |= EXTENT_STATUS_DELAYED; |
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 7223b0b4bc38..814f3beb4369 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c | |||
@@ -640,7 +640,7 @@ static struct stats dx_show_leaf(struct inode *dir, | |||
640 | ext4_put_fname_crypto_ctx(&ctx); | 640 | ext4_put_fname_crypto_ctx(&ctx); |
641 | ctx = NULL; | 641 | ctx = NULL; |
642 | } | 642 | } |
643 | res = ext4_fname_disk_to_usr(ctx, de, | 643 | res = ext4_fname_disk_to_usr(ctx, NULL, de, |
644 | &fname_crypto_str); | 644 | &fname_crypto_str); |
645 | if (res < 0) { | 645 | if (res < 0) { |
646 | printk(KERN_WARNING "Error " | 646 | printk(KERN_WARNING "Error " |
@@ -653,15 +653,8 @@ static struct stats dx_show_leaf(struct inode *dir, | |||
653 | name = fname_crypto_str.name; | 653 | name = fname_crypto_str.name; |
654 | len = fname_crypto_str.len; | 654 | len = fname_crypto_str.len; |
655 | } | 655 | } |
656 | res = ext4_fname_disk_to_hash(ctx, de, | 656 | ext4fs_dirhash(de->name, de->name_len, |
657 | &h); | 657 | &h); |
658 | if (res < 0) { | ||
659 | printk(KERN_WARNING "Error " | ||
660 | "converting filename " | ||
661 | "from disk to htree" | ||
662 | "\n"); | ||
663 | h.hash = 0xDEADBEEF; | ||
664 | } | ||
665 | printk("%*.s:(E)%x.%u ", len, name, | 658 | printk("%*.s:(E)%x.%u ", len, name, |
666 | h.hash, (unsigned) ((char *) de | 659 | h.hash, (unsigned) ((char *) de |
667 | - base)); | 660 | - base)); |
@@ -1008,15 +1001,7 @@ static int htree_dirblock_to_tree(struct file *dir_file, | |||
1008 | /* silently ignore the rest of the block */ | 1001 | /* silently ignore the rest of the block */ |
1009 | break; | 1002 | break; |
1010 | } | 1003 | } |
1011 | #ifdef CONFIG_EXT4_FS_ENCRYPTION | ||
1012 | err = ext4_fname_disk_to_hash(ctx, de, hinfo); | ||
1013 | if (err < 0) { | ||
1014 | count = err; | ||
1015 | goto errout; | ||
1016 | } | ||
1017 | #else | ||
1018 | ext4fs_dirhash(de->name, de->name_len, hinfo); | 1004 | ext4fs_dirhash(de->name, de->name_len, hinfo); |
1019 | #endif | ||
1020 | if ((hinfo->hash < start_hash) || | 1005 | if ((hinfo->hash < start_hash) || |
1021 | ((hinfo->hash == start_hash) && | 1006 | ((hinfo->hash == start_hash) && |
1022 | (hinfo->minor_hash < start_minor_hash))) | 1007 | (hinfo->minor_hash < start_minor_hash))) |
@@ -1032,7 +1017,7 @@ static int htree_dirblock_to_tree(struct file *dir_file, | |||
1032 | &tmp_str); | 1017 | &tmp_str); |
1033 | } else { | 1018 | } else { |
1034 | /* Directory is encrypted */ | 1019 | /* Directory is encrypted */ |
1035 | err = ext4_fname_disk_to_usr(ctx, de, | 1020 | err = ext4_fname_disk_to_usr(ctx, hinfo, de, |
1036 | &fname_crypto_str); | 1021 | &fname_crypto_str); |
1037 | if (err < 0) { | 1022 | if (err < 0) { |
1038 | count = err; | 1023 | count = err; |
@@ -1193,26 +1178,10 @@ static int dx_make_map(struct inode *dir, struct ext4_dir_entry_2 *de, | |||
1193 | int count = 0; | 1178 | int count = 0; |
1194 | char *base = (char *) de; | 1179 | char *base = (char *) de; |
1195 | struct dx_hash_info h = *hinfo; | 1180 | struct dx_hash_info h = *hinfo; |
1196 | #ifdef CONFIG_EXT4_FS_ENCRYPTION | ||
1197 | struct ext4_fname_crypto_ctx *ctx = NULL; | ||
1198 | int err; | ||
1199 | |||
1200 | ctx = ext4_get_fname_crypto_ctx(dir, EXT4_NAME_LEN); | ||
1201 | if (IS_ERR(ctx)) | ||
1202 | return PTR_ERR(ctx); | ||
1203 | #endif | ||
1204 | 1181 | ||
1205 | while ((char *) de < base + blocksize) { | 1182 | while ((char *) de < base + blocksize) { |
1206 | if (de->name_len && de->inode) { | 1183 | if (de->name_len && de->inode) { |
1207 | #ifdef CONFIG_EXT4_FS_ENCRYPTION | ||
1208 | err = ext4_fname_disk_to_hash(ctx, de, &h); | ||
1209 | if (err < 0) { | ||
1210 | ext4_put_fname_crypto_ctx(&ctx); | ||
1211 | return err; | ||
1212 | } | ||
1213 | #else | ||
1214 | ext4fs_dirhash(de->name, de->name_len, &h); | 1184 | ext4fs_dirhash(de->name, de->name_len, &h); |
1215 | #endif | ||
1216 | map_tail--; | 1185 | map_tail--; |
1217 | map_tail->hash = h.hash; | 1186 | map_tail->hash = h.hash; |
1218 | map_tail->offs = ((char *) de - base)>>2; | 1187 | map_tail->offs = ((char *) de - base)>>2; |
@@ -1223,9 +1192,6 @@ static int dx_make_map(struct inode *dir, struct ext4_dir_entry_2 *de, | |||
1223 | /* XXX: do we need to check rec_len == 0 case? -Chris */ | 1192 | /* XXX: do we need to check rec_len == 0 case? -Chris */ |
1224 | de = ext4_next_entry(de, blocksize); | 1193 | de = ext4_next_entry(de, blocksize); |
1225 | } | 1194 | } |
1226 | #ifdef CONFIG_EXT4_FS_ENCRYPTION | ||
1227 | ext4_put_fname_crypto_ctx(&ctx); | ||
1228 | #endif | ||
1229 | return count; | 1195 | return count; |
1230 | } | 1196 | } |
1231 | 1197 | ||
@@ -1287,16 +1253,8 @@ static inline int ext4_match(struct ext4_fname_crypto_ctx *ctx, | |||
1287 | return 0; | 1253 | return 0; |
1288 | 1254 | ||
1289 | #ifdef CONFIG_EXT4_FS_ENCRYPTION | 1255 | #ifdef CONFIG_EXT4_FS_ENCRYPTION |
1290 | if (ctx) { | 1256 | if (ctx) |
1291 | /* Directory is encrypted */ | 1257 | return ext4_fname_match(ctx, fname_crypto_str, len, name, de); |
1292 | res = ext4_fname_disk_to_usr(ctx, de, fname_crypto_str); | ||
1293 | if (res < 0) | ||
1294 | return res; | ||
1295 | if (len != res) | ||
1296 | return 0; | ||
1297 | res = memcmp(name, fname_crypto_str->name, len); | ||
1298 | return (res == 0) ? 1 : 0; | ||
1299 | } | ||
1300 | #endif | 1258 | #endif |
1301 | if (len != de->name_len) | 1259 | if (len != de->name_len) |
1302 | return 0; | 1260 | return 0; |
@@ -1324,16 +1282,6 @@ int search_dir(struct buffer_head *bh, char *search_buf, int buf_size, | |||
1324 | if (IS_ERR(ctx)) | 1282 | if (IS_ERR(ctx)) |
1325 | return -1; | 1283 | return -1; |
1326 | 1284 | ||
1327 | if (ctx != NULL) { | ||
1328 | /* Allocate buffer to hold maximum name length */ | ||
1329 | res = ext4_fname_crypto_alloc_buffer(ctx, EXT4_NAME_LEN, | ||
1330 | &fname_crypto_str); | ||
1331 | if (res < 0) { | ||
1332 | ext4_put_fname_crypto_ctx(&ctx); | ||
1333 | return -1; | ||
1334 | } | ||
1335 | } | ||
1336 | |||
1337 | de = (struct ext4_dir_entry_2 *)search_buf; | 1285 | de = (struct ext4_dir_entry_2 *)search_buf; |
1338 | dlimit = search_buf + buf_size; | 1286 | dlimit = search_buf + buf_size; |
1339 | while ((char *) de < dlimit) { | 1287 | while ((char *) de < dlimit) { |
@@ -1872,14 +1820,6 @@ int ext4_find_dest_de(struct inode *dir, struct inode *inode, | |||
1872 | return res; | 1820 | return res; |
1873 | } | 1821 | } |
1874 | reclen = EXT4_DIR_REC_LEN(res); | 1822 | reclen = EXT4_DIR_REC_LEN(res); |
1875 | |||
1876 | /* Allocate buffer to hold maximum name length */ | ||
1877 | res = ext4_fname_crypto_alloc_buffer(ctx, EXT4_NAME_LEN, | ||
1878 | &fname_crypto_str); | ||
1879 | if (res < 0) { | ||
1880 | ext4_put_fname_crypto_ctx(&ctx); | ||
1881 | return -1; | ||
1882 | } | ||
1883 | } | 1823 | } |
1884 | 1824 | ||
1885 | de = (struct ext4_dir_entry_2 *)buf; | 1825 | de = (struct ext4_dir_entry_2 *)buf; |
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c index 8a8ec6293b19..cf0c472047e3 100644 --- a/fs/ext4/resize.c +++ b/fs/ext4/resize.c | |||
@@ -1432,12 +1432,15 @@ static int ext4_flex_group_add(struct super_block *sb, | |||
1432 | goto exit; | 1432 | goto exit; |
1433 | /* | 1433 | /* |
1434 | * We will always be modifying at least the superblock and GDT | 1434 | * We will always be modifying at least the superblock and GDT |
1435 | * block. If we are adding a group past the last current GDT block, | 1435 | * blocks. If we are adding a group past the last current GDT block, |
1436 | * we will also modify the inode and the dindirect block. If we | 1436 | * we will also modify the inode and the dindirect block. If we |
1437 | * are adding a group with superblock/GDT backups we will also | 1437 | * are adding a group with superblock/GDT backups we will also |
1438 | * modify each of the reserved GDT dindirect blocks. | 1438 | * modify each of the reserved GDT dindirect blocks. |
1439 | */ | 1439 | */ |
1440 | credit = flex_gd->count * 4 + reserved_gdb; | 1440 | credit = 3; /* sb, resize inode, resize inode dindirect */ |
1441 | /* GDT blocks */ | ||
1442 | credit += 1 + DIV_ROUND_UP(flex_gd->count, EXT4_DESC_PER_BLOCK(sb)); | ||
1443 | credit += reserved_gdb; /* Reserved GDT dindirect blocks */ | ||
1441 | handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, credit); | 1444 | handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, credit); |
1442 | if (IS_ERR(handle)) { | 1445 | if (IS_ERR(handle)) { |
1443 | err = PTR_ERR(handle); | 1446 | err = PTR_ERR(handle); |
diff --git a/fs/ext4/symlink.c b/fs/ext4/symlink.c index 19f78f20975e..187b78920314 100644 --- a/fs/ext4/symlink.c +++ b/fs/ext4/symlink.c | |||
@@ -74,7 +74,7 @@ static void *ext4_follow_link(struct dentry *dentry, struct nameidata *nd) | |||
74 | goto errout; | 74 | goto errout; |
75 | } | 75 | } |
76 | pstr.name = paddr; | 76 | pstr.name = paddr; |
77 | res = _ext4_fname_disk_to_usr(ctx, &cstr, &pstr); | 77 | res = _ext4_fname_disk_to_usr(ctx, NULL, &cstr, &pstr); |
78 | if (res < 0) | 78 | if (res < 0) |
79 | goto errout; | 79 | goto errout; |
80 | /* Null-terminate the name */ | 80 | /* Null-terminate the name */ |
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index b91b0e10678e..1e1aae669fa8 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c | |||
@@ -1513,6 +1513,7 @@ static int f2fs_write_data_pages(struct address_space *mapping, | |||
1513 | { | 1513 | { |
1514 | struct inode *inode = mapping->host; | 1514 | struct inode *inode = mapping->host; |
1515 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); | 1515 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); |
1516 | bool locked = false; | ||
1516 | int ret; | 1517 | int ret; |
1517 | long diff; | 1518 | long diff; |
1518 | 1519 | ||
@@ -1533,7 +1534,13 @@ static int f2fs_write_data_pages(struct address_space *mapping, | |||
1533 | 1534 | ||
1534 | diff = nr_pages_to_write(sbi, DATA, wbc); | 1535 | diff = nr_pages_to_write(sbi, DATA, wbc); |
1535 | 1536 | ||
1537 | if (!S_ISDIR(inode->i_mode)) { | ||
1538 | mutex_lock(&sbi->writepages); | ||
1539 | locked = true; | ||
1540 | } | ||
1536 | ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping); | 1541 | ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping); |
1542 | if (locked) | ||
1543 | mutex_unlock(&sbi->writepages); | ||
1537 | 1544 | ||
1538 | f2fs_submit_merged_bio(sbi, DATA, WRITE); | 1545 | f2fs_submit_merged_bio(sbi, DATA, WRITE); |
1539 | 1546 | ||
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index d8921cf2ba9a..8de34ab6d5b1 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h | |||
@@ -625,6 +625,7 @@ struct f2fs_sb_info { | |||
625 | struct mutex cp_mutex; /* checkpoint procedure lock */ | 625 | struct mutex cp_mutex; /* checkpoint procedure lock */ |
626 | struct rw_semaphore cp_rwsem; /* blocking FS operations */ | 626 | struct rw_semaphore cp_rwsem; /* blocking FS operations */ |
627 | struct rw_semaphore node_write; /* locking node writes */ | 627 | struct rw_semaphore node_write; /* locking node writes */ |
628 | struct mutex writepages; /* mutex for writepages() */ | ||
628 | wait_queue_head_t cp_wait; | 629 | wait_queue_head_t cp_wait; |
629 | 630 | ||
630 | struct inode_management im[MAX_INO_ENTRY]; /* manage inode cache */ | 631 | struct inode_management im[MAX_INO_ENTRY]; /* manage inode cache */ |
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c index 7e3794edae42..658e8079aaf9 100644 --- a/fs/f2fs/namei.c +++ b/fs/f2fs/namei.c | |||
@@ -298,16 +298,14 @@ fail: | |||
298 | 298 | ||
299 | static void *f2fs_follow_link(struct dentry *dentry, struct nameidata *nd) | 299 | static void *f2fs_follow_link(struct dentry *dentry, struct nameidata *nd) |
300 | { | 300 | { |
301 | struct page *page; | 301 | struct page *page = page_follow_link_light(dentry, nd); |
302 | 302 | ||
303 | page = page_follow_link_light(dentry, nd); | 303 | if (IS_ERR_OR_NULL(page)) |
304 | if (IS_ERR(page)) | ||
305 | return page; | 304 | return page; |
306 | 305 | ||
307 | /* this is broken symlink case */ | 306 | /* this is broken symlink case */ |
308 | if (*nd_get_link(nd) == 0) { | 307 | if (*nd_get_link(nd) == 0) { |
309 | kunmap(page); | 308 | page_put_link(dentry, nd, page); |
310 | page_cache_release(page); | ||
311 | return ERR_PTR(-ENOENT); | 309 | return ERR_PTR(-ENOENT); |
312 | } | 310 | } |
313 | return page; | 311 | return page; |
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index 160b88346b24..b2dd1b01f076 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c | |||
@@ -1035,6 +1035,7 @@ try_onemore: | |||
1035 | sbi->raw_super = raw_super; | 1035 | sbi->raw_super = raw_super; |
1036 | sbi->raw_super_buf = raw_super_buf; | 1036 | sbi->raw_super_buf = raw_super_buf; |
1037 | mutex_init(&sbi->gc_mutex); | 1037 | mutex_init(&sbi->gc_mutex); |
1038 | mutex_init(&sbi->writepages); | ||
1038 | mutex_init(&sbi->cp_mutex); | 1039 | mutex_init(&sbi->cp_mutex); |
1039 | init_rwsem(&sbi->node_write); | 1040 | init_rwsem(&sbi->node_write); |
1040 | clear_sbi_flag(sbi, SBI_POR_DOING); | 1041 | clear_sbi_flag(sbi, SBI_POR_DOING); |
diff --git a/fs/namei.c b/fs/namei.c index 4a8d998b7274..fe30d3be43a8 100644 --- a/fs/namei.c +++ b/fs/namei.c | |||
@@ -1415,6 +1415,7 @@ static int lookup_fast(struct nameidata *nd, | |||
1415 | */ | 1415 | */ |
1416 | if (nd->flags & LOOKUP_RCU) { | 1416 | if (nd->flags & LOOKUP_RCU) { |
1417 | unsigned seq; | 1417 | unsigned seq; |
1418 | bool negative; | ||
1418 | dentry = __d_lookup_rcu(parent, &nd->last, &seq); | 1419 | dentry = __d_lookup_rcu(parent, &nd->last, &seq); |
1419 | if (!dentry) | 1420 | if (!dentry) |
1420 | goto unlazy; | 1421 | goto unlazy; |
@@ -1424,8 +1425,11 @@ static int lookup_fast(struct nameidata *nd, | |||
1424 | * the dentry name information from lookup. | 1425 | * the dentry name information from lookup. |
1425 | */ | 1426 | */ |
1426 | *inode = dentry->d_inode; | 1427 | *inode = dentry->d_inode; |
1428 | negative = d_is_negative(dentry); | ||
1427 | if (read_seqcount_retry(&dentry->d_seq, seq)) | 1429 | if (read_seqcount_retry(&dentry->d_seq, seq)) |
1428 | return -ECHILD; | 1430 | return -ECHILD; |
1431 | if (negative) | ||
1432 | return -ENOENT; | ||
1429 | 1433 | ||
1430 | /* | 1434 | /* |
1431 | * This sequence count validates that the parent had no | 1435 | * This sequence count validates that the parent had no |
@@ -1472,6 +1476,10 @@ unlazy: | |||
1472 | goto need_lookup; | 1476 | goto need_lookup; |
1473 | } | 1477 | } |
1474 | 1478 | ||
1479 | if (unlikely(d_is_negative(dentry))) { | ||
1480 | dput(dentry); | ||
1481 | return -ENOENT; | ||
1482 | } | ||
1475 | path->mnt = mnt; | 1483 | path->mnt = mnt; |
1476 | path->dentry = dentry; | 1484 | path->dentry = dentry; |
1477 | err = follow_managed(path, nd->flags); | 1485 | err = follow_managed(path, nd->flags); |
@@ -1583,10 +1591,10 @@ static inline int walk_component(struct nameidata *nd, struct path *path, | |||
1583 | goto out_err; | 1591 | goto out_err; |
1584 | 1592 | ||
1585 | inode = path->dentry->d_inode; | 1593 | inode = path->dentry->d_inode; |
1594 | err = -ENOENT; | ||
1595 | if (d_is_negative(path->dentry)) | ||
1596 | goto out_path_put; | ||
1586 | } | 1597 | } |
1587 | err = -ENOENT; | ||
1588 | if (d_is_negative(path->dentry)) | ||
1589 | goto out_path_put; | ||
1590 | 1598 | ||
1591 | if (should_follow_link(path->dentry, follow)) { | 1599 | if (should_follow_link(path->dentry, follow)) { |
1592 | if (nd->flags & LOOKUP_RCU) { | 1600 | if (nd->flags & LOOKUP_RCU) { |
@@ -3036,14 +3044,13 @@ retry_lookup: | |||
3036 | 3044 | ||
3037 | BUG_ON(nd->flags & LOOKUP_RCU); | 3045 | BUG_ON(nd->flags & LOOKUP_RCU); |
3038 | inode = path->dentry->d_inode; | 3046 | inode = path->dentry->d_inode; |
3039 | finish_lookup: | ||
3040 | /* we _can_ be in RCU mode here */ | ||
3041 | error = -ENOENT; | 3047 | error = -ENOENT; |
3042 | if (d_is_negative(path->dentry)) { | 3048 | if (d_is_negative(path->dentry)) { |
3043 | path_to_nameidata(path, nd); | 3049 | path_to_nameidata(path, nd); |
3044 | goto out; | 3050 | goto out; |
3045 | } | 3051 | } |
3046 | 3052 | finish_lookup: | |
3053 | /* we _can_ be in RCU mode here */ | ||
3047 | if (should_follow_link(path->dentry, !symlink_ok)) { | 3054 | if (should_follow_link(path->dentry, !symlink_ok)) { |
3048 | if (nd->flags & LOOKUP_RCU) { | 3055 | if (nd->flags & LOOKUP_RCU) { |
3049 | if (unlikely(nd->path.mnt != path->mnt || | 3056 | if (unlikely(nd->path.mnt != path->mnt || |
@@ -3226,7 +3233,7 @@ static struct file *path_openat(int dfd, struct filename *pathname, | |||
3226 | 3233 | ||
3227 | if (unlikely(file->f_flags & __O_TMPFILE)) { | 3234 | if (unlikely(file->f_flags & __O_TMPFILE)) { |
3228 | error = do_tmpfile(dfd, pathname, nd, flags, op, file, &opened); | 3235 | error = do_tmpfile(dfd, pathname, nd, flags, op, file, &opened); |
3229 | goto out; | 3236 | goto out2; |
3230 | } | 3237 | } |
3231 | 3238 | ||
3232 | error = path_init(dfd, pathname, flags, nd); | 3239 | error = path_init(dfd, pathname, flags, nd); |
@@ -3256,6 +3263,7 @@ static struct file *path_openat(int dfd, struct filename *pathname, | |||
3256 | } | 3263 | } |
3257 | out: | 3264 | out: |
3258 | path_cleanup(nd); | 3265 | path_cleanup(nd); |
3266 | out2: | ||
3259 | if (!(opened & FILE_OPENED)) { | 3267 | if (!(opened & FILE_OPENED)) { |
3260 | BUG_ON(!error); | 3268 | BUG_ON(!error); |
3261 | put_filp(file); | 3269 | put_filp(file); |
diff --git a/fs/namespace.c b/fs/namespace.c index 1f4f9dac6e5a..1b9e11167bae 100644 --- a/fs/namespace.c +++ b/fs/namespace.c | |||
@@ -3179,6 +3179,12 @@ bool fs_fully_visible(struct file_system_type *type) | |||
3179 | if (mnt->mnt.mnt_sb->s_type != type) | 3179 | if (mnt->mnt.mnt_sb->s_type != type) |
3180 | continue; | 3180 | continue; |
3181 | 3181 | ||
3182 | /* This mount is not fully visible if it's root directory | ||
3183 | * is not the root directory of the filesystem. | ||
3184 | */ | ||
3185 | if (mnt->mnt.mnt_root != mnt->mnt.mnt_sb->s_root) | ||
3186 | continue; | ||
3187 | |||
3182 | /* This mount is not fully visible if there are any child mounts | 3188 | /* This mount is not fully visible if there are any child mounts |
3183 | * that cover anything except for empty directories. | 3189 | * that cover anything except for empty directories. |
3184 | */ | 3190 | */ |
diff --git a/fs/nfsd/blocklayout.c b/fs/nfsd/blocklayout.c index 03d647bf195d..cdefaa331a07 100644 --- a/fs/nfsd/blocklayout.c +++ b/fs/nfsd/blocklayout.c | |||
@@ -181,6 +181,17 @@ nfsd4_block_proc_layoutcommit(struct inode *inode, | |||
181 | } | 181 | } |
182 | 182 | ||
183 | const struct nfsd4_layout_ops bl_layout_ops = { | 183 | const struct nfsd4_layout_ops bl_layout_ops = { |
184 | /* | ||
185 | * Pretend that we send notification to the client. This is a blatant | ||
186 | * lie to force recent Linux clients to cache our device IDs. | ||
187 | * We rarely ever change the device ID, so the harm of leaking deviceids | ||
188 | * for a while isn't too bad. Unfortunately RFC5661 is a complete mess | ||
189 | * in this regard, but I filed errata 4119 for this a while ago, and | ||
190 | * hopefully the Linux client will eventually start caching deviceids | ||
191 | * without this again. | ||
192 | */ | ||
193 | .notify_types = | ||
194 | NOTIFY_DEVICEID4_DELETE | NOTIFY_DEVICEID4_CHANGE, | ||
184 | .proc_getdeviceinfo = nfsd4_block_proc_getdeviceinfo, | 195 | .proc_getdeviceinfo = nfsd4_block_proc_getdeviceinfo, |
185 | .encode_getdeviceinfo = nfsd4_block_encode_getdeviceinfo, | 196 | .encode_getdeviceinfo = nfsd4_block_encode_getdeviceinfo, |
186 | .proc_layoutget = nfsd4_block_proc_layoutget, | 197 | .proc_layoutget = nfsd4_block_proc_layoutget, |
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index 58277859a467..5694cfb7a47b 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c | |||
@@ -224,7 +224,7 @@ static int nfs_cb_stat_to_errno(int status) | |||
224 | } | 224 | } |
225 | 225 | ||
226 | static int decode_cb_op_status(struct xdr_stream *xdr, enum nfs_opnum4 expected, | 226 | static int decode_cb_op_status(struct xdr_stream *xdr, enum nfs_opnum4 expected, |
227 | enum nfsstat4 *status) | 227 | int *status) |
228 | { | 228 | { |
229 | __be32 *p; | 229 | __be32 *p; |
230 | u32 op; | 230 | u32 op; |
@@ -235,7 +235,7 @@ static int decode_cb_op_status(struct xdr_stream *xdr, enum nfs_opnum4 expected, | |||
235 | op = be32_to_cpup(p++); | 235 | op = be32_to_cpup(p++); |
236 | if (unlikely(op != expected)) | 236 | if (unlikely(op != expected)) |
237 | goto out_unexpected; | 237 | goto out_unexpected; |
238 | *status = be32_to_cpup(p); | 238 | *status = nfs_cb_stat_to_errno(be32_to_cpup(p)); |
239 | return 0; | 239 | return 0; |
240 | out_overflow: | 240 | out_overflow: |
241 | print_overflow_msg(__func__, xdr); | 241 | print_overflow_msg(__func__, xdr); |
@@ -446,22 +446,16 @@ out_overflow: | |||
446 | static int decode_cb_sequence4res(struct xdr_stream *xdr, | 446 | static int decode_cb_sequence4res(struct xdr_stream *xdr, |
447 | struct nfsd4_callback *cb) | 447 | struct nfsd4_callback *cb) |
448 | { | 448 | { |
449 | enum nfsstat4 nfserr; | ||
450 | int status; | 449 | int status; |
451 | 450 | ||
452 | if (cb->cb_minorversion == 0) | 451 | if (cb->cb_minorversion == 0) |
453 | return 0; | 452 | return 0; |
454 | 453 | ||
455 | status = decode_cb_op_status(xdr, OP_CB_SEQUENCE, &nfserr); | 454 | status = decode_cb_op_status(xdr, OP_CB_SEQUENCE, &cb->cb_status); |
456 | if (unlikely(status)) | 455 | if (unlikely(status || cb->cb_status)) |
457 | goto out; | 456 | return status; |
458 | if (unlikely(nfserr != NFS4_OK)) | 457 | |
459 | goto out_default; | 458 | return decode_cb_sequence4resok(xdr, cb); |
460 | status = decode_cb_sequence4resok(xdr, cb); | ||
461 | out: | ||
462 | return status; | ||
463 | out_default: | ||
464 | return nfs_cb_stat_to_errno(nfserr); | ||
465 | } | 459 | } |
466 | 460 | ||
467 | /* | 461 | /* |
@@ -524,26 +518,19 @@ static int nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp, | |||
524 | struct nfsd4_callback *cb) | 518 | struct nfsd4_callback *cb) |
525 | { | 519 | { |
526 | struct nfs4_cb_compound_hdr hdr; | 520 | struct nfs4_cb_compound_hdr hdr; |
527 | enum nfsstat4 nfserr; | ||
528 | int status; | 521 | int status; |
529 | 522 | ||
530 | status = decode_cb_compound4res(xdr, &hdr); | 523 | status = decode_cb_compound4res(xdr, &hdr); |
531 | if (unlikely(status)) | 524 | if (unlikely(status)) |
532 | goto out; | 525 | return status; |
533 | 526 | ||
534 | if (cb != NULL) { | 527 | if (cb != NULL) { |
535 | status = decode_cb_sequence4res(xdr, cb); | 528 | status = decode_cb_sequence4res(xdr, cb); |
536 | if (unlikely(status)) | 529 | if (unlikely(status || cb->cb_status)) |
537 | goto out; | 530 | return status; |
538 | } | 531 | } |
539 | 532 | ||
540 | status = decode_cb_op_status(xdr, OP_CB_RECALL, &nfserr); | 533 | return decode_cb_op_status(xdr, OP_CB_RECALL, &cb->cb_status); |
541 | if (unlikely(status)) | ||
542 | goto out; | ||
543 | if (unlikely(nfserr != NFS4_OK)) | ||
544 | status = nfs_cb_stat_to_errno(nfserr); | ||
545 | out: | ||
546 | return status; | ||
547 | } | 534 | } |
548 | 535 | ||
549 | #ifdef CONFIG_NFSD_PNFS | 536 | #ifdef CONFIG_NFSD_PNFS |
@@ -621,24 +608,18 @@ static int nfs4_xdr_dec_cb_layout(struct rpc_rqst *rqstp, | |||
621 | struct nfsd4_callback *cb) | 608 | struct nfsd4_callback *cb) |
622 | { | 609 | { |
623 | struct nfs4_cb_compound_hdr hdr; | 610 | struct nfs4_cb_compound_hdr hdr; |
624 | enum nfsstat4 nfserr; | ||
625 | int status; | 611 | int status; |
626 | 612 | ||
627 | status = decode_cb_compound4res(xdr, &hdr); | 613 | status = decode_cb_compound4res(xdr, &hdr); |
628 | if (unlikely(status)) | 614 | if (unlikely(status)) |
629 | goto out; | 615 | return status; |
616 | |||
630 | if (cb) { | 617 | if (cb) { |
631 | status = decode_cb_sequence4res(xdr, cb); | 618 | status = decode_cb_sequence4res(xdr, cb); |
632 | if (unlikely(status)) | 619 | if (unlikely(status || cb->cb_status)) |
633 | goto out; | 620 | return status; |
634 | } | 621 | } |
635 | status = decode_cb_op_status(xdr, OP_CB_LAYOUTRECALL, &nfserr); | 622 | return decode_cb_op_status(xdr, OP_CB_LAYOUTRECALL, &cb->cb_status); |
636 | if (unlikely(status)) | ||
637 | goto out; | ||
638 | if (unlikely(nfserr != NFS4_OK)) | ||
639 | status = nfs_cb_stat_to_errno(nfserr); | ||
640 | out: | ||
641 | return status; | ||
642 | } | 623 | } |
643 | #endif /* CONFIG_NFSD_PNFS */ | 624 | #endif /* CONFIG_NFSD_PNFS */ |
644 | 625 | ||
@@ -898,13 +879,6 @@ static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata) | |||
898 | if (!nfsd41_cb_get_slot(clp, task)) | 879 | if (!nfsd41_cb_get_slot(clp, task)) |
899 | return; | 880 | return; |
900 | } | 881 | } |
901 | spin_lock(&clp->cl_lock); | ||
902 | if (list_empty(&cb->cb_per_client)) { | ||
903 | /* This is the first call, not a restart */ | ||
904 | cb->cb_done = false; | ||
905 | list_add(&cb->cb_per_client, &clp->cl_callbacks); | ||
906 | } | ||
907 | spin_unlock(&clp->cl_lock); | ||
908 | rpc_call_start(task); | 882 | rpc_call_start(task); |
909 | } | 883 | } |
910 | 884 | ||
@@ -918,22 +892,33 @@ static void nfsd4_cb_done(struct rpc_task *task, void *calldata) | |||
918 | 892 | ||
919 | if (clp->cl_minorversion) { | 893 | if (clp->cl_minorversion) { |
920 | /* No need for lock, access serialized in nfsd4_cb_prepare */ | 894 | /* No need for lock, access serialized in nfsd4_cb_prepare */ |
921 | ++clp->cl_cb_session->se_cb_seq_nr; | 895 | if (!task->tk_status) |
896 | ++clp->cl_cb_session->se_cb_seq_nr; | ||
922 | clear_bit(0, &clp->cl_cb_slot_busy); | 897 | clear_bit(0, &clp->cl_cb_slot_busy); |
923 | rpc_wake_up_next(&clp->cl_cb_waitq); | 898 | rpc_wake_up_next(&clp->cl_cb_waitq); |
924 | dprintk("%s: freed slot, new seqid=%d\n", __func__, | 899 | dprintk("%s: freed slot, new seqid=%d\n", __func__, |
925 | clp->cl_cb_session->se_cb_seq_nr); | 900 | clp->cl_cb_session->se_cb_seq_nr); |
926 | } | 901 | } |
927 | 902 | ||
928 | if (clp->cl_cb_client != task->tk_client) { | 903 | /* |
929 | /* We're shutting down or changing cl_cb_client; leave | 904 | * If the backchannel connection was shut down while this |
930 | * it to nfsd4_process_cb_update to restart the call if | 905 | * task was queued, we need to resubmit it after setting up |
931 | * necessary. */ | 906 | * a new backchannel connection. |
907 | * | ||
908 | * Note that if we lost our callback connection permanently | ||
909 | * the submission code will error out, so we don't need to | ||
910 | * handle that case here. | ||
911 | */ | ||
912 | if (task->tk_flags & RPC_TASK_KILLED) { | ||
913 | task->tk_status = 0; | ||
914 | cb->cb_need_restart = true; | ||
932 | return; | 915 | return; |
933 | } | 916 | } |
934 | 917 | ||
935 | if (cb->cb_done) | 918 | if (cb->cb_status) { |
936 | return; | 919 | WARN_ON_ONCE(task->tk_status); |
920 | task->tk_status = cb->cb_status; | ||
921 | } | ||
937 | 922 | ||
938 | switch (cb->cb_ops->done(cb, task)) { | 923 | switch (cb->cb_ops->done(cb, task)) { |
939 | case 0: | 924 | case 0: |
@@ -949,21 +934,17 @@ static void nfsd4_cb_done(struct rpc_task *task, void *calldata) | |||
949 | default: | 934 | default: |
950 | BUG(); | 935 | BUG(); |
951 | } | 936 | } |
952 | cb->cb_done = true; | ||
953 | } | 937 | } |
954 | 938 | ||
955 | static void nfsd4_cb_release(void *calldata) | 939 | static void nfsd4_cb_release(void *calldata) |
956 | { | 940 | { |
957 | struct nfsd4_callback *cb = calldata; | 941 | struct nfsd4_callback *cb = calldata; |
958 | struct nfs4_client *clp = cb->cb_clp; | ||
959 | |||
960 | if (cb->cb_done) { | ||
961 | spin_lock(&clp->cl_lock); | ||
962 | list_del(&cb->cb_per_client); | ||
963 | spin_unlock(&clp->cl_lock); | ||
964 | 942 | ||
943 | if (cb->cb_need_restart) | ||
944 | nfsd4_run_cb(cb); | ||
945 | else | ||
965 | cb->cb_ops->release(cb); | 946 | cb->cb_ops->release(cb); |
966 | } | 947 | |
967 | } | 948 | } |
968 | 949 | ||
969 | static const struct rpc_call_ops nfsd4_cb_ops = { | 950 | static const struct rpc_call_ops nfsd4_cb_ops = { |
@@ -1058,9 +1039,6 @@ static void nfsd4_process_cb_update(struct nfsd4_callback *cb) | |||
1058 | nfsd4_mark_cb_down(clp, err); | 1039 | nfsd4_mark_cb_down(clp, err); |
1059 | return; | 1040 | return; |
1060 | } | 1041 | } |
1061 | /* Yay, the callback channel's back! Restart any callbacks: */ | ||
1062 | list_for_each_entry(cb, &clp->cl_callbacks, cb_per_client) | ||
1063 | queue_work(callback_wq, &cb->cb_work); | ||
1064 | } | 1042 | } |
1065 | 1043 | ||
1066 | static void | 1044 | static void |
@@ -1071,8 +1049,12 @@ nfsd4_run_cb_work(struct work_struct *work) | |||
1071 | struct nfs4_client *clp = cb->cb_clp; | 1049 | struct nfs4_client *clp = cb->cb_clp; |
1072 | struct rpc_clnt *clnt; | 1050 | struct rpc_clnt *clnt; |
1073 | 1051 | ||
1074 | if (cb->cb_ops && cb->cb_ops->prepare) | 1052 | if (cb->cb_need_restart) { |
1075 | cb->cb_ops->prepare(cb); | 1053 | cb->cb_need_restart = false; |
1054 | } else { | ||
1055 | if (cb->cb_ops && cb->cb_ops->prepare) | ||
1056 | cb->cb_ops->prepare(cb); | ||
1057 | } | ||
1076 | 1058 | ||
1077 | if (clp->cl_flags & NFSD4_CLIENT_CB_FLAG_MASK) | 1059 | if (clp->cl_flags & NFSD4_CLIENT_CB_FLAG_MASK) |
1078 | nfsd4_process_cb_update(cb); | 1060 | nfsd4_process_cb_update(cb); |
@@ -1084,6 +1066,15 @@ nfsd4_run_cb_work(struct work_struct *work) | |||
1084 | cb->cb_ops->release(cb); | 1066 | cb->cb_ops->release(cb); |
1085 | return; | 1067 | return; |
1086 | } | 1068 | } |
1069 | |||
1070 | /* | ||
1071 | * Don't send probe messages for 4.1 or later. | ||
1072 | */ | ||
1073 | if (!cb->cb_ops && clp->cl_minorversion) { | ||
1074 | clp->cl_cb_state = NFSD4_CB_UP; | ||
1075 | return; | ||
1076 | } | ||
1077 | |||
1087 | cb->cb_msg.rpc_cred = clp->cl_cb_cred; | 1078 | cb->cb_msg.rpc_cred = clp->cl_cb_cred; |
1088 | rpc_call_async(clnt, &cb->cb_msg, RPC_TASK_SOFT | RPC_TASK_SOFTCONN, | 1079 | rpc_call_async(clnt, &cb->cb_msg, RPC_TASK_SOFT | RPC_TASK_SOFTCONN, |
1089 | cb->cb_ops ? &nfsd4_cb_ops : &nfsd4_cb_probe_ops, cb); | 1080 | cb->cb_ops ? &nfsd4_cb_ops : &nfsd4_cb_probe_ops, cb); |
@@ -1098,8 +1089,8 @@ void nfsd4_init_cb(struct nfsd4_callback *cb, struct nfs4_client *clp, | |||
1098 | cb->cb_msg.rpc_resp = cb; | 1089 | cb->cb_msg.rpc_resp = cb; |
1099 | cb->cb_ops = ops; | 1090 | cb->cb_ops = ops; |
1100 | INIT_WORK(&cb->cb_work, nfsd4_run_cb_work); | 1091 | INIT_WORK(&cb->cb_work, nfsd4_run_cb_work); |
1101 | INIT_LIST_HEAD(&cb->cb_per_client); | 1092 | cb->cb_status = 0; |
1102 | cb->cb_done = true; | 1093 | cb->cb_need_restart = false; |
1103 | } | 1094 | } |
1104 | 1095 | ||
1105 | void nfsd4_run_cb(struct nfsd4_callback *cb) | 1096 | void nfsd4_run_cb(struct nfsd4_callback *cb) |
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 38f2d7abe3a7..039f9c8a95e8 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c | |||
@@ -94,6 +94,7 @@ static struct kmem_cache *lockowner_slab; | |||
94 | static struct kmem_cache *file_slab; | 94 | static struct kmem_cache *file_slab; |
95 | static struct kmem_cache *stateid_slab; | 95 | static struct kmem_cache *stateid_slab; |
96 | static struct kmem_cache *deleg_slab; | 96 | static struct kmem_cache *deleg_slab; |
97 | static struct kmem_cache *odstate_slab; | ||
97 | 98 | ||
98 | static void free_session(struct nfsd4_session *); | 99 | static void free_session(struct nfsd4_session *); |
99 | 100 | ||
@@ -281,6 +282,7 @@ put_nfs4_file(struct nfs4_file *fi) | |||
281 | if (atomic_dec_and_lock(&fi->fi_ref, &state_lock)) { | 282 | if (atomic_dec_and_lock(&fi->fi_ref, &state_lock)) { |
282 | hlist_del_rcu(&fi->fi_hash); | 283 | hlist_del_rcu(&fi->fi_hash); |
283 | spin_unlock(&state_lock); | 284 | spin_unlock(&state_lock); |
285 | WARN_ON_ONCE(!list_empty(&fi->fi_clnt_odstate)); | ||
284 | WARN_ON_ONCE(!list_empty(&fi->fi_delegations)); | 286 | WARN_ON_ONCE(!list_empty(&fi->fi_delegations)); |
285 | call_rcu(&fi->fi_rcu, nfsd4_free_file_rcu); | 287 | call_rcu(&fi->fi_rcu, nfsd4_free_file_rcu); |
286 | } | 288 | } |
@@ -471,6 +473,86 @@ static void nfs4_file_put_access(struct nfs4_file *fp, u32 access) | |||
471 | __nfs4_file_put_access(fp, O_RDONLY); | 473 | __nfs4_file_put_access(fp, O_RDONLY); |
472 | } | 474 | } |
473 | 475 | ||
476 | /* | ||
477 | * Allocate a new open/delegation state counter. This is needed for | ||
478 | * pNFS for proper return on close semantics. | ||
479 | * | ||
480 | * Note that we only allocate it for pNFS-enabled exports, otherwise | ||
481 | * all pointers to struct nfs4_clnt_odstate are always NULL. | ||
482 | */ | ||
483 | static struct nfs4_clnt_odstate * | ||
484 | alloc_clnt_odstate(struct nfs4_client *clp) | ||
485 | { | ||
486 | struct nfs4_clnt_odstate *co; | ||
487 | |||
488 | co = kmem_cache_zalloc(odstate_slab, GFP_KERNEL); | ||
489 | if (co) { | ||
490 | co->co_client = clp; | ||
491 | atomic_set(&co->co_odcount, 1); | ||
492 | } | ||
493 | return co; | ||
494 | } | ||
495 | |||
496 | static void | ||
497 | hash_clnt_odstate_locked(struct nfs4_clnt_odstate *co) | ||
498 | { | ||
499 | struct nfs4_file *fp = co->co_file; | ||
500 | |||
501 | lockdep_assert_held(&fp->fi_lock); | ||
502 | list_add(&co->co_perfile, &fp->fi_clnt_odstate); | ||
503 | } | ||
504 | |||
505 | static inline void | ||
506 | get_clnt_odstate(struct nfs4_clnt_odstate *co) | ||
507 | { | ||
508 | if (co) | ||
509 | atomic_inc(&co->co_odcount); | ||
510 | } | ||
511 | |||
512 | static void | ||
513 | put_clnt_odstate(struct nfs4_clnt_odstate *co) | ||
514 | { | ||
515 | struct nfs4_file *fp; | ||
516 | |||
517 | if (!co) | ||
518 | return; | ||
519 | |||
520 | fp = co->co_file; | ||
521 | if (atomic_dec_and_lock(&co->co_odcount, &fp->fi_lock)) { | ||
522 | list_del(&co->co_perfile); | ||
523 | spin_unlock(&fp->fi_lock); | ||
524 | |||
525 | nfsd4_return_all_file_layouts(co->co_client, fp); | ||
526 | kmem_cache_free(odstate_slab, co); | ||
527 | } | ||
528 | } | ||
529 | |||
530 | static struct nfs4_clnt_odstate * | ||
531 | find_or_hash_clnt_odstate(struct nfs4_file *fp, struct nfs4_clnt_odstate *new) | ||
532 | { | ||
533 | struct nfs4_clnt_odstate *co; | ||
534 | struct nfs4_client *cl; | ||
535 | |||
536 | if (!new) | ||
537 | return NULL; | ||
538 | |||
539 | cl = new->co_client; | ||
540 | |||
541 | spin_lock(&fp->fi_lock); | ||
542 | list_for_each_entry(co, &fp->fi_clnt_odstate, co_perfile) { | ||
543 | if (co->co_client == cl) { | ||
544 | get_clnt_odstate(co); | ||
545 | goto out; | ||
546 | } | ||
547 | } | ||
548 | co = new; | ||
549 | co->co_file = fp; | ||
550 | hash_clnt_odstate_locked(new); | ||
551 | out: | ||
552 | spin_unlock(&fp->fi_lock); | ||
553 | return co; | ||
554 | } | ||
555 | |||
474 | struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, | 556 | struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, |
475 | struct kmem_cache *slab) | 557 | struct kmem_cache *slab) |
476 | { | 558 | { |
@@ -606,7 +688,8 @@ static void block_delegations(struct knfsd_fh *fh) | |||
606 | } | 688 | } |
607 | 689 | ||
608 | static struct nfs4_delegation * | 690 | static struct nfs4_delegation * |
609 | alloc_init_deleg(struct nfs4_client *clp, struct svc_fh *current_fh) | 691 | alloc_init_deleg(struct nfs4_client *clp, struct svc_fh *current_fh, |
692 | struct nfs4_clnt_odstate *odstate) | ||
610 | { | 693 | { |
611 | struct nfs4_delegation *dp; | 694 | struct nfs4_delegation *dp; |
612 | long n; | 695 | long n; |
@@ -631,6 +714,8 @@ alloc_init_deleg(struct nfs4_client *clp, struct svc_fh *current_fh) | |||
631 | INIT_LIST_HEAD(&dp->dl_perfile); | 714 | INIT_LIST_HEAD(&dp->dl_perfile); |
632 | INIT_LIST_HEAD(&dp->dl_perclnt); | 715 | INIT_LIST_HEAD(&dp->dl_perclnt); |
633 | INIT_LIST_HEAD(&dp->dl_recall_lru); | 716 | INIT_LIST_HEAD(&dp->dl_recall_lru); |
717 | dp->dl_clnt_odstate = odstate; | ||
718 | get_clnt_odstate(odstate); | ||
634 | dp->dl_type = NFS4_OPEN_DELEGATE_READ; | 719 | dp->dl_type = NFS4_OPEN_DELEGATE_READ; |
635 | dp->dl_retries = 1; | 720 | dp->dl_retries = 1; |
636 | nfsd4_init_cb(&dp->dl_recall, dp->dl_stid.sc_client, | 721 | nfsd4_init_cb(&dp->dl_recall, dp->dl_stid.sc_client, |
@@ -714,6 +799,7 @@ static void destroy_delegation(struct nfs4_delegation *dp) | |||
714 | spin_lock(&state_lock); | 799 | spin_lock(&state_lock); |
715 | unhash_delegation_locked(dp); | 800 | unhash_delegation_locked(dp); |
716 | spin_unlock(&state_lock); | 801 | spin_unlock(&state_lock); |
802 | put_clnt_odstate(dp->dl_clnt_odstate); | ||
717 | nfs4_put_deleg_lease(dp->dl_stid.sc_file); | 803 | nfs4_put_deleg_lease(dp->dl_stid.sc_file); |
718 | nfs4_put_stid(&dp->dl_stid); | 804 | nfs4_put_stid(&dp->dl_stid); |
719 | } | 805 | } |
@@ -724,6 +810,7 @@ static void revoke_delegation(struct nfs4_delegation *dp) | |||
724 | 810 | ||
725 | WARN_ON(!list_empty(&dp->dl_recall_lru)); | 811 | WARN_ON(!list_empty(&dp->dl_recall_lru)); |
726 | 812 | ||
813 | put_clnt_odstate(dp->dl_clnt_odstate); | ||
727 | nfs4_put_deleg_lease(dp->dl_stid.sc_file); | 814 | nfs4_put_deleg_lease(dp->dl_stid.sc_file); |
728 | 815 | ||
729 | if (clp->cl_minorversion == 0) | 816 | if (clp->cl_minorversion == 0) |
@@ -933,6 +1020,7 @@ static void nfs4_free_ol_stateid(struct nfs4_stid *stid) | |||
933 | { | 1020 | { |
934 | struct nfs4_ol_stateid *stp = openlockstateid(stid); | 1021 | struct nfs4_ol_stateid *stp = openlockstateid(stid); |
935 | 1022 | ||
1023 | put_clnt_odstate(stp->st_clnt_odstate); | ||
936 | release_all_access(stp); | 1024 | release_all_access(stp); |
937 | if (stp->st_stateowner) | 1025 | if (stp->st_stateowner) |
938 | nfs4_put_stateowner(stp->st_stateowner); | 1026 | nfs4_put_stateowner(stp->st_stateowner); |
@@ -1538,7 +1626,6 @@ static struct nfs4_client *alloc_client(struct xdr_netobj name) | |||
1538 | INIT_LIST_HEAD(&clp->cl_openowners); | 1626 | INIT_LIST_HEAD(&clp->cl_openowners); |
1539 | INIT_LIST_HEAD(&clp->cl_delegations); | 1627 | INIT_LIST_HEAD(&clp->cl_delegations); |
1540 | INIT_LIST_HEAD(&clp->cl_lru); | 1628 | INIT_LIST_HEAD(&clp->cl_lru); |
1541 | INIT_LIST_HEAD(&clp->cl_callbacks); | ||
1542 | INIT_LIST_HEAD(&clp->cl_revoked); | 1629 | INIT_LIST_HEAD(&clp->cl_revoked); |
1543 | #ifdef CONFIG_NFSD_PNFS | 1630 | #ifdef CONFIG_NFSD_PNFS |
1544 | INIT_LIST_HEAD(&clp->cl_lo_states); | 1631 | INIT_LIST_HEAD(&clp->cl_lo_states); |
@@ -1634,6 +1721,7 @@ __destroy_client(struct nfs4_client *clp) | |||
1634 | while (!list_empty(&reaplist)) { | 1721 | while (!list_empty(&reaplist)) { |
1635 | dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru); | 1722 | dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru); |
1636 | list_del_init(&dp->dl_recall_lru); | 1723 | list_del_init(&dp->dl_recall_lru); |
1724 | put_clnt_odstate(dp->dl_clnt_odstate); | ||
1637 | nfs4_put_deleg_lease(dp->dl_stid.sc_file); | 1725 | nfs4_put_deleg_lease(dp->dl_stid.sc_file); |
1638 | nfs4_put_stid(&dp->dl_stid); | 1726 | nfs4_put_stid(&dp->dl_stid); |
1639 | } | 1727 | } |
@@ -3057,6 +3145,7 @@ static void nfsd4_init_file(struct knfsd_fh *fh, unsigned int hashval, | |||
3057 | spin_lock_init(&fp->fi_lock); | 3145 | spin_lock_init(&fp->fi_lock); |
3058 | INIT_LIST_HEAD(&fp->fi_stateids); | 3146 | INIT_LIST_HEAD(&fp->fi_stateids); |
3059 | INIT_LIST_HEAD(&fp->fi_delegations); | 3147 | INIT_LIST_HEAD(&fp->fi_delegations); |
3148 | INIT_LIST_HEAD(&fp->fi_clnt_odstate); | ||
3060 | fh_copy_shallow(&fp->fi_fhandle, fh); | 3149 | fh_copy_shallow(&fp->fi_fhandle, fh); |
3061 | fp->fi_deleg_file = NULL; | 3150 | fp->fi_deleg_file = NULL; |
3062 | fp->fi_had_conflict = false; | 3151 | fp->fi_had_conflict = false; |
@@ -3073,6 +3162,7 @@ static void nfsd4_init_file(struct knfsd_fh *fh, unsigned int hashval, | |||
3073 | void | 3162 | void |
3074 | nfsd4_free_slabs(void) | 3163 | nfsd4_free_slabs(void) |
3075 | { | 3164 | { |
3165 | kmem_cache_destroy(odstate_slab); | ||
3076 | kmem_cache_destroy(openowner_slab); | 3166 | kmem_cache_destroy(openowner_slab); |
3077 | kmem_cache_destroy(lockowner_slab); | 3167 | kmem_cache_destroy(lockowner_slab); |
3078 | kmem_cache_destroy(file_slab); | 3168 | kmem_cache_destroy(file_slab); |
@@ -3103,8 +3193,14 @@ nfsd4_init_slabs(void) | |||
3103 | sizeof(struct nfs4_delegation), 0, 0, NULL); | 3193 | sizeof(struct nfs4_delegation), 0, 0, NULL); |
3104 | if (deleg_slab == NULL) | 3194 | if (deleg_slab == NULL) |
3105 | goto out_free_stateid_slab; | 3195 | goto out_free_stateid_slab; |
3196 | odstate_slab = kmem_cache_create("nfsd4_odstate", | ||
3197 | sizeof(struct nfs4_clnt_odstate), 0, 0, NULL); | ||
3198 | if (odstate_slab == NULL) | ||
3199 | goto out_free_deleg_slab; | ||
3106 | return 0; | 3200 | return 0; |
3107 | 3201 | ||
3202 | out_free_deleg_slab: | ||
3203 | kmem_cache_destroy(deleg_slab); | ||
3108 | out_free_stateid_slab: | 3204 | out_free_stateid_slab: |
3109 | kmem_cache_destroy(stateid_slab); | 3205 | kmem_cache_destroy(stateid_slab); |
3110 | out_free_file_slab: | 3206 | out_free_file_slab: |
@@ -3581,6 +3677,14 @@ alloc_stateid: | |||
3581 | open->op_stp = nfs4_alloc_open_stateid(clp); | 3677 | open->op_stp = nfs4_alloc_open_stateid(clp); |
3582 | if (!open->op_stp) | 3678 | if (!open->op_stp) |
3583 | return nfserr_jukebox; | 3679 | return nfserr_jukebox; |
3680 | |||
3681 | if (nfsd4_has_session(cstate) && | ||
3682 | (cstate->current_fh.fh_export->ex_flags & NFSEXP_PNFS)) { | ||
3683 | open->op_odstate = alloc_clnt_odstate(clp); | ||
3684 | if (!open->op_odstate) | ||
3685 | return nfserr_jukebox; | ||
3686 | } | ||
3687 | |||
3584 | return nfs_ok; | 3688 | return nfs_ok; |
3585 | } | 3689 | } |
3586 | 3690 | ||
@@ -3869,7 +3973,7 @@ out_fput: | |||
3869 | 3973 | ||
3870 | static struct nfs4_delegation * | 3974 | static struct nfs4_delegation * |
3871 | nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh, | 3975 | nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh, |
3872 | struct nfs4_file *fp) | 3976 | struct nfs4_file *fp, struct nfs4_clnt_odstate *odstate) |
3873 | { | 3977 | { |
3874 | int status; | 3978 | int status; |
3875 | struct nfs4_delegation *dp; | 3979 | struct nfs4_delegation *dp; |
@@ -3877,7 +3981,7 @@ nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh, | |||
3877 | if (fp->fi_had_conflict) | 3981 | if (fp->fi_had_conflict) |
3878 | return ERR_PTR(-EAGAIN); | 3982 | return ERR_PTR(-EAGAIN); |
3879 | 3983 | ||
3880 | dp = alloc_init_deleg(clp, fh); | 3984 | dp = alloc_init_deleg(clp, fh, odstate); |
3881 | if (!dp) | 3985 | if (!dp) |
3882 | return ERR_PTR(-ENOMEM); | 3986 | return ERR_PTR(-ENOMEM); |
3883 | 3987 | ||
@@ -3903,6 +4007,7 @@ out_unlock: | |||
3903 | spin_unlock(&state_lock); | 4007 | spin_unlock(&state_lock); |
3904 | out: | 4008 | out: |
3905 | if (status) { | 4009 | if (status) { |
4010 | put_clnt_odstate(dp->dl_clnt_odstate); | ||
3906 | nfs4_put_stid(&dp->dl_stid); | 4011 | nfs4_put_stid(&dp->dl_stid); |
3907 | return ERR_PTR(status); | 4012 | return ERR_PTR(status); |
3908 | } | 4013 | } |
@@ -3980,7 +4085,7 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, | |||
3980 | default: | 4085 | default: |
3981 | goto out_no_deleg; | 4086 | goto out_no_deleg; |
3982 | } | 4087 | } |
3983 | dp = nfs4_set_delegation(clp, fh, stp->st_stid.sc_file); | 4088 | dp = nfs4_set_delegation(clp, fh, stp->st_stid.sc_file, stp->st_clnt_odstate); |
3984 | if (IS_ERR(dp)) | 4089 | if (IS_ERR(dp)) |
3985 | goto out_no_deleg; | 4090 | goto out_no_deleg; |
3986 | 4091 | ||
@@ -4069,6 +4174,11 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf | |||
4069 | release_open_stateid(stp); | 4174 | release_open_stateid(stp); |
4070 | goto out; | 4175 | goto out; |
4071 | } | 4176 | } |
4177 | |||
4178 | stp->st_clnt_odstate = find_or_hash_clnt_odstate(fp, | ||
4179 | open->op_odstate); | ||
4180 | if (stp->st_clnt_odstate == open->op_odstate) | ||
4181 | open->op_odstate = NULL; | ||
4072 | } | 4182 | } |
4073 | update_stateid(&stp->st_stid.sc_stateid); | 4183 | update_stateid(&stp->st_stid.sc_stateid); |
4074 | memcpy(&open->op_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t)); | 4184 | memcpy(&open->op_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t)); |
@@ -4129,6 +4239,8 @@ void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate, | |||
4129 | kmem_cache_free(file_slab, open->op_file); | 4239 | kmem_cache_free(file_slab, open->op_file); |
4130 | if (open->op_stp) | 4240 | if (open->op_stp) |
4131 | nfs4_put_stid(&open->op_stp->st_stid); | 4241 | nfs4_put_stid(&open->op_stp->st_stid); |
4242 | if (open->op_odstate) | ||
4243 | kmem_cache_free(odstate_slab, open->op_odstate); | ||
4132 | } | 4244 | } |
4133 | 4245 | ||
4134 | __be32 | 4246 | __be32 |
@@ -4385,10 +4497,17 @@ static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_s | |||
4385 | return nfserr_old_stateid; | 4497 | return nfserr_old_stateid; |
4386 | } | 4498 | } |
4387 | 4499 | ||
4500 | static __be32 nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid *ols) | ||
4501 | { | ||
4502 | if (ols->st_stateowner->so_is_open_owner && | ||
4503 | !(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED)) | ||
4504 | return nfserr_bad_stateid; | ||
4505 | return nfs_ok; | ||
4506 | } | ||
4507 | |||
4388 | static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid) | 4508 | static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid) |
4389 | { | 4509 | { |
4390 | struct nfs4_stid *s; | 4510 | struct nfs4_stid *s; |
4391 | struct nfs4_ol_stateid *ols; | ||
4392 | __be32 status = nfserr_bad_stateid; | 4511 | __be32 status = nfserr_bad_stateid; |
4393 | 4512 | ||
4394 | if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) | 4513 | if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) |
@@ -4418,13 +4537,7 @@ static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid) | |||
4418 | break; | 4537 | break; |
4419 | case NFS4_OPEN_STID: | 4538 | case NFS4_OPEN_STID: |
4420 | case NFS4_LOCK_STID: | 4539 | case NFS4_LOCK_STID: |
4421 | ols = openlockstateid(s); | 4540 | status = nfsd4_check_openowner_confirmed(openlockstateid(s)); |
4422 | if (ols->st_stateowner->so_is_open_owner | ||
4423 | && !(openowner(ols->st_stateowner)->oo_flags | ||
4424 | & NFS4_OO_CONFIRMED)) | ||
4425 | status = nfserr_bad_stateid; | ||
4426 | else | ||
4427 | status = nfs_ok; | ||
4428 | break; | 4541 | break; |
4429 | default: | 4542 | default: |
4430 | printk("unknown stateid type %x\n", s->sc_type); | 4543 | printk("unknown stateid type %x\n", s->sc_type); |
@@ -4516,8 +4629,8 @@ nfs4_preprocess_stateid_op(struct net *net, struct nfsd4_compound_state *cstate, | |||
4516 | status = nfs4_check_fh(current_fh, stp); | 4629 | status = nfs4_check_fh(current_fh, stp); |
4517 | if (status) | 4630 | if (status) |
4518 | goto out; | 4631 | goto out; |
4519 | if (stp->st_stateowner->so_is_open_owner | 4632 | status = nfsd4_check_openowner_confirmed(stp); |
4520 | && !(openowner(stp->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED)) | 4633 | if (status) |
4521 | goto out; | 4634 | goto out; |
4522 | status = nfs4_check_openmode(stp, flags); | 4635 | status = nfs4_check_openmode(stp, flags); |
4523 | if (status) | 4636 | if (status) |
@@ -4852,9 +4965,6 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | |||
4852 | update_stateid(&stp->st_stid.sc_stateid); | 4965 | update_stateid(&stp->st_stid.sc_stateid); |
4853 | memcpy(&close->cl_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t)); | 4966 | memcpy(&close->cl_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t)); |
4854 | 4967 | ||
4855 | nfsd4_return_all_file_layouts(stp->st_stateowner->so_client, | ||
4856 | stp->st_stid.sc_file); | ||
4857 | |||
4858 | nfsd4_close_open_stateid(stp); | 4968 | nfsd4_close_open_stateid(stp); |
4859 | 4969 | ||
4860 | /* put reference from nfs4_preprocess_seqid_op */ | 4970 | /* put reference from nfs4_preprocess_seqid_op */ |
@@ -6488,6 +6598,7 @@ nfs4_state_shutdown_net(struct net *net) | |||
6488 | list_for_each_safe(pos, next, &reaplist) { | 6598 | list_for_each_safe(pos, next, &reaplist) { |
6489 | dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); | 6599 | dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); |
6490 | list_del_init(&dp->dl_recall_lru); | 6600 | list_del_init(&dp->dl_recall_lru); |
6601 | put_clnt_odstate(dp->dl_clnt_odstate); | ||
6491 | nfs4_put_deleg_lease(dp->dl_stid.sc_file); | 6602 | nfs4_put_deleg_lease(dp->dl_stid.sc_file); |
6492 | nfs4_put_stid(&dp->dl_stid); | 6603 | nfs4_put_stid(&dp->dl_stid); |
6493 | } | 6604 | } |
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h index 4f3bfeb11766..dbc4f85a5008 100644 --- a/fs/nfsd/state.h +++ b/fs/nfsd/state.h | |||
@@ -63,12 +63,12 @@ typedef struct { | |||
63 | 63 | ||
64 | struct nfsd4_callback { | 64 | struct nfsd4_callback { |
65 | struct nfs4_client *cb_clp; | 65 | struct nfs4_client *cb_clp; |
66 | struct list_head cb_per_client; | ||
67 | u32 cb_minorversion; | 66 | u32 cb_minorversion; |
68 | struct rpc_message cb_msg; | 67 | struct rpc_message cb_msg; |
69 | struct nfsd4_callback_ops *cb_ops; | 68 | struct nfsd4_callback_ops *cb_ops; |
70 | struct work_struct cb_work; | 69 | struct work_struct cb_work; |
71 | bool cb_done; | 70 | int cb_status; |
71 | bool cb_need_restart; | ||
72 | }; | 72 | }; |
73 | 73 | ||
74 | struct nfsd4_callback_ops { | 74 | struct nfsd4_callback_ops { |
@@ -126,6 +126,7 @@ struct nfs4_delegation { | |||
126 | struct list_head dl_perfile; | 126 | struct list_head dl_perfile; |
127 | struct list_head dl_perclnt; | 127 | struct list_head dl_perclnt; |
128 | struct list_head dl_recall_lru; /* delegation recalled */ | 128 | struct list_head dl_recall_lru; /* delegation recalled */ |
129 | struct nfs4_clnt_odstate *dl_clnt_odstate; | ||
129 | u32 dl_type; | 130 | u32 dl_type; |
130 | time_t dl_time; | 131 | time_t dl_time; |
131 | /* For recall: */ | 132 | /* For recall: */ |
@@ -332,7 +333,6 @@ struct nfs4_client { | |||
332 | int cl_cb_state; | 333 | int cl_cb_state; |
333 | struct nfsd4_callback cl_cb_null; | 334 | struct nfsd4_callback cl_cb_null; |
334 | struct nfsd4_session *cl_cb_session; | 335 | struct nfsd4_session *cl_cb_session; |
335 | struct list_head cl_callbacks; /* list of in-progress callbacks */ | ||
336 | 336 | ||
337 | /* for all client information that callback code might need: */ | 337 | /* for all client information that callback code might need: */ |
338 | spinlock_t cl_lock; | 338 | spinlock_t cl_lock; |
@@ -465,6 +465,17 @@ static inline struct nfs4_lockowner * lockowner(struct nfs4_stateowner *so) | |||
465 | } | 465 | } |
466 | 466 | ||
467 | /* | 467 | /* |
468 | * Per-client state indicating no. of opens and outstanding delegations | ||
469 | * on a file from a particular client.'od' stands for 'open & delegation' | ||
470 | */ | ||
471 | struct nfs4_clnt_odstate { | ||
472 | struct nfs4_client *co_client; | ||
473 | struct nfs4_file *co_file; | ||
474 | struct list_head co_perfile; | ||
475 | atomic_t co_odcount; | ||
476 | }; | ||
477 | |||
478 | /* | ||
468 | * nfs4_file: a file opened by some number of (open) nfs4_stateowners. | 479 | * nfs4_file: a file opened by some number of (open) nfs4_stateowners. |
469 | * | 480 | * |
470 | * These objects are global. nfsd keeps one instance of a nfs4_file per | 481 | * These objects are global. nfsd keeps one instance of a nfs4_file per |
@@ -485,6 +496,7 @@ struct nfs4_file { | |||
485 | struct list_head fi_delegations; | 496 | struct list_head fi_delegations; |
486 | struct rcu_head fi_rcu; | 497 | struct rcu_head fi_rcu; |
487 | }; | 498 | }; |
499 | struct list_head fi_clnt_odstate; | ||
488 | /* One each for O_RDONLY, O_WRONLY, O_RDWR: */ | 500 | /* One each for O_RDONLY, O_WRONLY, O_RDWR: */ |
489 | struct file * fi_fds[3]; | 501 | struct file * fi_fds[3]; |
490 | /* | 502 | /* |
@@ -526,6 +538,7 @@ struct nfs4_ol_stateid { | |||
526 | struct list_head st_perstateowner; | 538 | struct list_head st_perstateowner; |
527 | struct list_head st_locks; | 539 | struct list_head st_locks; |
528 | struct nfs4_stateowner * st_stateowner; | 540 | struct nfs4_stateowner * st_stateowner; |
541 | struct nfs4_clnt_odstate * st_clnt_odstate; | ||
529 | unsigned char st_access_bmap; | 542 | unsigned char st_access_bmap; |
530 | unsigned char st_deny_bmap; | 543 | unsigned char st_deny_bmap; |
531 | struct nfs4_ol_stateid * st_openstp; | 544 | struct nfs4_ol_stateid * st_openstp; |
diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h index f982ae84f0cd..2f8c092be2b3 100644 --- a/fs/nfsd/xdr4.h +++ b/fs/nfsd/xdr4.h | |||
@@ -247,6 +247,7 @@ struct nfsd4_open { | |||
247 | struct nfs4_openowner *op_openowner; /* used during processing */ | 247 | struct nfs4_openowner *op_openowner; /* used during processing */ |
248 | struct nfs4_file *op_file; /* used during processing */ | 248 | struct nfs4_file *op_file; /* used during processing */ |
249 | struct nfs4_ol_stateid *op_stp; /* used during processing */ | 249 | struct nfs4_ol_stateid *op_stp; /* used during processing */ |
250 | struct nfs4_clnt_odstate *op_odstate; /* used during processing */ | ||
250 | struct nfs4_acl *op_acl; | 251 | struct nfs4_acl *op_acl; |
251 | struct xdr_netobj op_label; | 252 | struct xdr_netobj op_label; |
252 | }; | 253 | }; |
diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c index 059f37137f9a..919fd5bb14a8 100644 --- a/fs/nilfs2/btree.c +++ b/fs/nilfs2/btree.c | |||
@@ -388,7 +388,7 @@ static int nilfs_btree_root_broken(const struct nilfs_btree_node *node, | |||
388 | nchildren = nilfs_btree_node_get_nchildren(node); | 388 | nchildren = nilfs_btree_node_get_nchildren(node); |
389 | 389 | ||
390 | if (unlikely(level < NILFS_BTREE_LEVEL_NODE_MIN || | 390 | if (unlikely(level < NILFS_BTREE_LEVEL_NODE_MIN || |
391 | level > NILFS_BTREE_LEVEL_MAX || | 391 | level >= NILFS_BTREE_LEVEL_MAX || |
392 | nchildren < 0 || | 392 | nchildren < 0 || |
393 | nchildren > NILFS_BTREE_ROOT_NCHILDREN_MAX)) { | 393 | nchildren > NILFS_BTREE_ROOT_NCHILDREN_MAX)) { |
394 | pr_crit("NILFS: bad btree root (inode number=%lu): level = %d, flags = 0x%x, nchildren = %d\n", | 394 | pr_crit("NILFS: bad btree root (inode number=%lu): level = %d, flags = 0x%x, nchildren = %d\n", |
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c index a6944b25fd5b..fdf4b41d0609 100644 --- a/fs/ocfs2/dlm/dlmmaster.c +++ b/fs/ocfs2/dlm/dlmmaster.c | |||
@@ -757,6 +757,19 @@ lookup: | |||
757 | if (tmpres) { | 757 | if (tmpres) { |
758 | spin_unlock(&dlm->spinlock); | 758 | spin_unlock(&dlm->spinlock); |
759 | spin_lock(&tmpres->spinlock); | 759 | spin_lock(&tmpres->spinlock); |
760 | |||
761 | /* | ||
762 | * Right after dlm spinlock was released, dlm_thread could have | ||
763 | * purged the lockres. Check if lockres got unhashed. If so | ||
764 | * start over. | ||
765 | */ | ||
766 | if (hlist_unhashed(&tmpres->hash_node)) { | ||
767 | spin_unlock(&tmpres->spinlock); | ||
768 | dlm_lockres_put(tmpres); | ||
769 | tmpres = NULL; | ||
770 | goto lookup; | ||
771 | } | ||
772 | |||
760 | /* Wait on the thread that is mastering the resource */ | 773 | /* Wait on the thread that is mastering the resource */ |
761 | if (tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN) { | 774 | if (tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN) { |
762 | __dlm_wait_on_lockres(tmpres); | 775 | __dlm_wait_on_lockres(tmpres); |
diff --git a/fs/splice.c b/fs/splice.c index 476024bb6546..bfe62ae40f40 100644 --- a/fs/splice.c +++ b/fs/splice.c | |||
@@ -1161,7 +1161,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd, | |||
1161 | long ret, bytes; | 1161 | long ret, bytes; |
1162 | umode_t i_mode; | 1162 | umode_t i_mode; |
1163 | size_t len; | 1163 | size_t len; |
1164 | int i, flags; | 1164 | int i, flags, more; |
1165 | 1165 | ||
1166 | /* | 1166 | /* |
1167 | * We require the input being a regular file, as we don't want to | 1167 | * We require the input being a regular file, as we don't want to |
@@ -1204,6 +1204,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd, | |||
1204 | * Don't block on output, we have to drain the direct pipe. | 1204 | * Don't block on output, we have to drain the direct pipe. |
1205 | */ | 1205 | */ |
1206 | sd->flags &= ~SPLICE_F_NONBLOCK; | 1206 | sd->flags &= ~SPLICE_F_NONBLOCK; |
1207 | more = sd->flags & SPLICE_F_MORE; | ||
1207 | 1208 | ||
1208 | while (len) { | 1209 | while (len) { |
1209 | size_t read_len; | 1210 | size_t read_len; |
@@ -1217,6 +1218,15 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd, | |||
1217 | sd->total_len = read_len; | 1218 | sd->total_len = read_len; |
1218 | 1219 | ||
1219 | /* | 1220 | /* |
1221 | * If more data is pending, set SPLICE_F_MORE | ||
1222 | * If this is the last data and SPLICE_F_MORE was not set | ||
1223 | * initially, clears it. | ||
1224 | */ | ||
1225 | if (read_len < len) | ||
1226 | sd->flags |= SPLICE_F_MORE; | ||
1227 | else if (!more) | ||
1228 | sd->flags &= ~SPLICE_F_MORE; | ||
1229 | /* | ||
1220 | * NOTE: nonblocking mode only applies to the input. We | 1230 | * NOTE: nonblocking mode only applies to the input. We |
1221 | * must not do the output in nonblocking mode as then we | 1231 | * must not do the output in nonblocking mode as then we |
1222 | * could get stuck data in the internal pipe: | 1232 | * could get stuck data in the internal pipe: |
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h index f5ca0e989bba..1c3002e1db20 100644 --- a/include/acpi/actypes.h +++ b/include/acpi/actypes.h | |||
@@ -124,7 +124,6 @@ | |||
124 | #ifndef ACPI_USE_SYSTEM_INTTYPES | 124 | #ifndef ACPI_USE_SYSTEM_INTTYPES |
125 | 125 | ||
126 | typedef unsigned char u8; | 126 | typedef unsigned char u8; |
127 | typedef unsigned char u8; | ||
128 | typedef unsigned short u16; | 127 | typedef unsigned short u16; |
129 | typedef short s16; | 128 | typedef short s16; |
130 | typedef COMPILER_DEPENDENT_UINT64 u64; | 129 | typedef COMPILER_DEPENDENT_UINT64 u64; |
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index a1b25e35ea5f..b7299febc4b4 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h | |||
@@ -220,7 +220,7 @@ enum rq_flag_bits { | |||
220 | 220 | ||
221 | /* This mask is used for both bio and request merge checking */ | 221 | /* This mask is used for both bio and request merge checking */ |
222 | #define REQ_NOMERGE_FLAGS \ | 222 | #define REQ_NOMERGE_FLAGS \ |
223 | (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA) | 223 | (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA | REQ_FLUSH_SEQ) |
224 | 224 | ||
225 | #define REQ_RAHEAD (1ULL << __REQ_RAHEAD) | 225 | #define REQ_RAHEAD (1ULL << __REQ_RAHEAD) |
226 | #define REQ_THROTTLED (1ULL << __REQ_THROTTLED) | 226 | #define REQ_THROTTLED (1ULL << __REQ_THROTTLED) |
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index cdf13ca7cac3..371e560d13cf 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h | |||
@@ -9,10 +9,24 @@ | |||
9 | + __GNUC_MINOR__ * 100 \ | 9 | + __GNUC_MINOR__ * 100 \ |
10 | + __GNUC_PATCHLEVEL__) | 10 | + __GNUC_PATCHLEVEL__) |
11 | 11 | ||
12 | |||
13 | /* Optimization barrier */ | 12 | /* Optimization barrier */ |
13 | |||
14 | /* The "volatile" is due to gcc bugs */ | 14 | /* The "volatile" is due to gcc bugs */ |
15 | #define barrier() __asm__ __volatile__("": : :"memory") | 15 | #define barrier() __asm__ __volatile__("": : :"memory") |
16 | /* | ||
17 | * This version is i.e. to prevent dead stores elimination on @ptr | ||
18 | * where gcc and llvm may behave differently when otherwise using | ||
19 | * normal barrier(): while gcc behavior gets along with a normal | ||
20 | * barrier(), llvm needs an explicit input variable to be assumed | ||
21 | * clobbered. The issue is as follows: while the inline asm might | ||
22 | * access any memory it wants, the compiler could have fit all of | ||
23 | * @ptr into memory registers instead, and since @ptr never escaped | ||
24 | * from that, it proofed that the inline asm wasn't touching any of | ||
25 | * it. This version works well with both compilers, i.e. we're telling | ||
26 | * the compiler that the inline asm absolutely may see the contents | ||
27 | * of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495 | ||
28 | */ | ||
29 | #define barrier_data(ptr) __asm__ __volatile__("": :"r"(ptr) :"memory") | ||
16 | 30 | ||
17 | /* | 31 | /* |
18 | * This macro obfuscates arithmetic on a variable address so that gcc | 32 | * This macro obfuscates arithmetic on a variable address so that gcc |
diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h index ba147a1727e6..0c9a2f2c2802 100644 --- a/include/linux/compiler-intel.h +++ b/include/linux/compiler-intel.h | |||
@@ -13,9 +13,12 @@ | |||
13 | /* Intel ECC compiler doesn't support gcc specific asm stmts. | 13 | /* Intel ECC compiler doesn't support gcc specific asm stmts. |
14 | * It uses intrinsics to do the equivalent things. | 14 | * It uses intrinsics to do the equivalent things. |
15 | */ | 15 | */ |
16 | #undef barrier_data | ||
16 | #undef RELOC_HIDE | 17 | #undef RELOC_HIDE |
17 | #undef OPTIMIZER_HIDE_VAR | 18 | #undef OPTIMIZER_HIDE_VAR |
18 | 19 | ||
20 | #define barrier_data(ptr) barrier() | ||
21 | |||
19 | #define RELOC_HIDE(ptr, off) \ | 22 | #define RELOC_HIDE(ptr, off) \ |
20 | ({ unsigned long __ptr; \ | 23 | ({ unsigned long __ptr; \ |
21 | __ptr = (unsigned long) (ptr); \ | 24 | __ptr = (unsigned long) (ptr); \ |
diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 0e41ca0e5927..867722591be2 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h | |||
@@ -169,6 +169,10 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); | |||
169 | # define barrier() __memory_barrier() | 169 | # define barrier() __memory_barrier() |
170 | #endif | 170 | #endif |
171 | 171 | ||
172 | #ifndef barrier_data | ||
173 | # define barrier_data(ptr) barrier() | ||
174 | #endif | ||
175 | |||
172 | /* Unreachable code */ | 176 | /* Unreachable code */ |
173 | #ifndef unreachable | 177 | #ifndef unreachable |
174 | # define unreachable() do { } while (1) | 178 | # define unreachable() do { } while (1) |
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 46e83c2156c6..f9ecf63d47f1 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h | |||
@@ -46,7 +46,7 @@ const char *ftrace_print_hex_seq(struct trace_seq *p, | |||
46 | const unsigned char *buf, int len); | 46 | const unsigned char *buf, int len); |
47 | 47 | ||
48 | const char *ftrace_print_array_seq(struct trace_seq *p, | 48 | const char *ftrace_print_array_seq(struct trace_seq *p, |
49 | const void *buf, int buf_len, | 49 | const void *buf, int count, |
50 | size_t el_size); | 50 | size_t el_size); |
51 | 51 | ||
52 | struct trace_iterator; | 52 | struct trace_iterator; |
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h index 36ec4ae74634..9de976b4f9a7 100644 --- a/include/linux/irqchip/arm-gic.h +++ b/include/linux/irqchip/arm-gic.h | |||
@@ -95,8 +95,6 @@ | |||
95 | 95 | ||
96 | struct device_node; | 96 | struct device_node; |
97 | 97 | ||
98 | extern struct irq_chip gic_arch_extn; | ||
99 | |||
100 | void gic_set_irqchip_flags(unsigned long flags); | 98 | void gic_set_irqchip_flags(unsigned long flags); |
101 | void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *, | 99 | void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *, |
102 | u32 offset, struct device_node *); | 100 | u32 offset, struct device_node *); |
diff --git a/include/linux/kexec.h b/include/linux/kexec.h index e60a745ac198..e804306ef5e8 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h | |||
@@ -40,6 +40,10 @@ | |||
40 | #error KEXEC_CONTROL_MEMORY_LIMIT not defined | 40 | #error KEXEC_CONTROL_MEMORY_LIMIT not defined |
41 | #endif | 41 | #endif |
42 | 42 | ||
43 | #ifndef KEXEC_CONTROL_MEMORY_GFP | ||
44 | #define KEXEC_CONTROL_MEMORY_GFP GFP_KERNEL | ||
45 | #endif | ||
46 | |||
43 | #ifndef KEXEC_CONTROL_PAGE_SIZE | 47 | #ifndef KEXEC_CONTROL_PAGE_SIZE |
44 | #error KEXEC_CONTROL_PAGE_SIZE not defined | 48 | #error KEXEC_CONTROL_PAGE_SIZE not defined |
45 | #endif | 49 | #endif |
diff --git a/include/linux/libata.h b/include/linux/libata.h index 8dad4a307bb8..28aeae46f355 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h | |||
@@ -205,6 +205,7 @@ enum { | |||
205 | ATA_LFLAG_SW_ACTIVITY = (1 << 7), /* keep activity stats */ | 205 | ATA_LFLAG_SW_ACTIVITY = (1 << 7), /* keep activity stats */ |
206 | ATA_LFLAG_NO_LPM = (1 << 8), /* disable LPM on this link */ | 206 | ATA_LFLAG_NO_LPM = (1 << 8), /* disable LPM on this link */ |
207 | ATA_LFLAG_RST_ONCE = (1 << 9), /* limit recovery to one reset */ | 207 | ATA_LFLAG_RST_ONCE = (1 << 9), /* limit recovery to one reset */ |
208 | ATA_LFLAG_CHANGED = (1 << 10), /* LPM state changed on this link */ | ||
208 | 209 | ||
209 | /* struct ata_port flags */ | 210 | /* struct ata_port flags */ |
210 | ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */ | 211 | ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */ |
@@ -309,6 +310,12 @@ enum { | |||
309 | */ | 310 | */ |
310 | ATA_TMOUT_PMP_SRST_WAIT = 5000, | 311 | ATA_TMOUT_PMP_SRST_WAIT = 5000, |
311 | 312 | ||
313 | /* When the LPM policy is set to ATA_LPM_MAX_POWER, there might | ||
314 | * be a spurious PHY event, so ignore the first PHY event that | ||
315 | * occurs within 10s after the policy change. | ||
316 | */ | ||
317 | ATA_TMOUT_SPURIOUS_PHY = 10000, | ||
318 | |||
312 | /* ATA bus states */ | 319 | /* ATA bus states */ |
313 | BUS_UNKNOWN = 0, | 320 | BUS_UNKNOWN = 0, |
314 | BUS_DMA = 1, | 321 | BUS_DMA = 1, |
@@ -788,6 +795,8 @@ struct ata_link { | |||
788 | struct ata_eh_context eh_context; | 795 | struct ata_eh_context eh_context; |
789 | 796 | ||
790 | struct ata_device device[ATA_MAX_DEVICES]; | 797 | struct ata_device device[ATA_MAX_DEVICES]; |
798 | |||
799 | unsigned long last_lpm_change; /* when last LPM change happened */ | ||
791 | }; | 800 | }; |
792 | #define ATA_LINK_CLEAR_BEGIN offsetof(struct ata_link, active_tag) | 801 | #define ATA_LINK_CLEAR_BEGIN offsetof(struct ata_link, active_tag) |
793 | #define ATA_LINK_CLEAR_END offsetof(struct ata_link, device[0]) | 802 | #define ATA_LINK_CLEAR_END offsetof(struct ata_link, device[0]) |
@@ -1201,6 +1210,7 @@ extern struct ata_device *ata_dev_pair(struct ata_device *adev); | |||
1201 | extern int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev); | 1210 | extern int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev); |
1202 | extern void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap); | 1211 | extern void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap); |
1203 | extern void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap, struct list_head *eh_q); | 1212 | extern void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap, struct list_head *eh_q); |
1213 | extern bool sata_lpm_ignore_phy_events(struct ata_link *link); | ||
1204 | 1214 | ||
1205 | extern int ata_cable_40wire(struct ata_port *ap); | 1215 | extern int ata_cable_40wire(struct ata_port *ap); |
1206 | extern int ata_cable_80wire(struct ata_port *ap); | 1216 | extern int ata_cable_80wire(struct ata_port *ap); |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index bcbde799ec69..1899c74a7127 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -60,6 +60,7 @@ struct phy_device; | |||
60 | struct wireless_dev; | 60 | struct wireless_dev; |
61 | /* 802.15.4 specific */ | 61 | /* 802.15.4 specific */ |
62 | struct wpan_dev; | 62 | struct wpan_dev; |
63 | struct mpls_dev; | ||
63 | 64 | ||
64 | void netdev_set_default_ethtool_ops(struct net_device *dev, | 65 | void netdev_set_default_ethtool_ops(struct net_device *dev, |
65 | const struct ethtool_ops *ops); | 66 | const struct ethtool_ops *ops); |
@@ -976,7 +977,8 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev, | |||
976 | * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh, | 977 | * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh, |
977 | * u16 flags) | 978 | * u16 flags) |
978 | * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq, | 979 | * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq, |
979 | * struct net_device *dev, u32 filter_mask) | 980 | * struct net_device *dev, u32 filter_mask, |
981 | * int nlflags) | ||
980 | * int (*ndo_bridge_dellink)(struct net_device *dev, struct nlmsghdr *nlh, | 982 | * int (*ndo_bridge_dellink)(struct net_device *dev, struct nlmsghdr *nlh, |
981 | * u16 flags); | 983 | * u16 flags); |
982 | * | 984 | * |
@@ -1172,7 +1174,8 @@ struct net_device_ops { | |||
1172 | int (*ndo_bridge_getlink)(struct sk_buff *skb, | 1174 | int (*ndo_bridge_getlink)(struct sk_buff *skb, |
1173 | u32 pid, u32 seq, | 1175 | u32 pid, u32 seq, |
1174 | struct net_device *dev, | 1176 | struct net_device *dev, |
1175 | u32 filter_mask); | 1177 | u32 filter_mask, |
1178 | int nlflags); | ||
1176 | int (*ndo_bridge_dellink)(struct net_device *dev, | 1179 | int (*ndo_bridge_dellink)(struct net_device *dev, |
1177 | struct nlmsghdr *nlh, | 1180 | struct nlmsghdr *nlh, |
1178 | u16 flags); | 1181 | u16 flags); |
@@ -1627,6 +1630,9 @@ struct net_device { | |||
1627 | void *ax25_ptr; | 1630 | void *ax25_ptr; |
1628 | struct wireless_dev *ieee80211_ptr; | 1631 | struct wireless_dev *ieee80211_ptr; |
1629 | struct wpan_dev *ieee802154_ptr; | 1632 | struct wpan_dev *ieee802154_ptr; |
1633 | #if IS_ENABLED(CONFIG_MPLS_ROUTING) | ||
1634 | struct mpls_dev __rcu *mpls_ptr; | ||
1635 | #endif | ||
1630 | 1636 | ||
1631 | /* | 1637 | /* |
1632 | * Cache lines mostly used on receive path (including eth_type_trans()) | 1638 | * Cache lines mostly used on receive path (including eth_type_trans()) |
@@ -2021,10 +2027,10 @@ struct pcpu_sw_netstats { | |||
2021 | ({ \ | 2027 | ({ \ |
2022 | typeof(type) __percpu *pcpu_stats = alloc_percpu(type); \ | 2028 | typeof(type) __percpu *pcpu_stats = alloc_percpu(type); \ |
2023 | if (pcpu_stats) { \ | 2029 | if (pcpu_stats) { \ |
2024 | int i; \ | 2030 | int __cpu; \ |
2025 | for_each_possible_cpu(i) { \ | 2031 | for_each_possible_cpu(__cpu) { \ |
2026 | typeof(type) *stat; \ | 2032 | typeof(type) *stat; \ |
2027 | stat = per_cpu_ptr(pcpu_stats, i); \ | 2033 | stat = per_cpu_ptr(pcpu_stats, __cpu); \ |
2028 | u64_stats_init(&stat->syncp); \ | 2034 | u64_stats_init(&stat->syncp); \ |
2029 | } \ | 2035 | } \ |
2030 | } \ | 2036 | } \ |
diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h index ab8f76dba668..f2fdb5a52070 100644 --- a/include/linux/netfilter_bridge.h +++ b/include/linux/netfilter_bridge.h | |||
@@ -39,12 +39,24 @@ static inline void br_drop_fake_rtable(struct sk_buff *skb) | |||
39 | 39 | ||
40 | static inline int nf_bridge_get_physinif(const struct sk_buff *skb) | 40 | static inline int nf_bridge_get_physinif(const struct sk_buff *skb) |
41 | { | 41 | { |
42 | return skb->nf_bridge ? skb->nf_bridge->physindev->ifindex : 0; | 42 | struct nf_bridge_info *nf_bridge; |
43 | |||
44 | if (skb->nf_bridge == NULL) | ||
45 | return 0; | ||
46 | |||
47 | nf_bridge = skb->nf_bridge; | ||
48 | return nf_bridge->physindev ? nf_bridge->physindev->ifindex : 0; | ||
43 | } | 49 | } |
44 | 50 | ||
45 | static inline int nf_bridge_get_physoutif(const struct sk_buff *skb) | 51 | static inline int nf_bridge_get_physoutif(const struct sk_buff *skb) |
46 | { | 52 | { |
47 | return skb->nf_bridge ? skb->nf_bridge->physoutdev->ifindex : 0; | 53 | struct nf_bridge_info *nf_bridge; |
54 | |||
55 | if (skb->nf_bridge == NULL) | ||
56 | return 0; | ||
57 | |||
58 | nf_bridge = skb->nf_bridge; | ||
59 | return nf_bridge->physoutdev ? nf_bridge->physoutdev->ifindex : 0; | ||
48 | } | 60 | } |
49 | 61 | ||
50 | static inline struct net_device * | 62 | static inline struct net_device * |
diff --git a/include/linux/nilfs2_fs.h b/include/linux/nilfs2_fs.h index ff3fea3194c6..9abb763e4b86 100644 --- a/include/linux/nilfs2_fs.h +++ b/include/linux/nilfs2_fs.h | |||
@@ -460,7 +460,7 @@ struct nilfs_btree_node { | |||
460 | /* level */ | 460 | /* level */ |
461 | #define NILFS_BTREE_LEVEL_DATA 0 | 461 | #define NILFS_BTREE_LEVEL_DATA 0 |
462 | #define NILFS_BTREE_LEVEL_NODE_MIN (NILFS_BTREE_LEVEL_DATA + 1) | 462 | #define NILFS_BTREE_LEVEL_NODE_MIN (NILFS_BTREE_LEVEL_DATA + 1) |
463 | #define NILFS_BTREE_LEVEL_MAX 14 | 463 | #define NILFS_BTREE_LEVEL_MAX 14 /* Max level (exclusive) */ |
464 | 464 | ||
465 | /** | 465 | /** |
466 | * struct nilfs_palloc_group_desc - block group descriptor | 466 | * struct nilfs_palloc_group_desc - block group descriptor |
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 38cff8f6716d..2f7b9a40f627 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h | |||
@@ -2541,10 +2541,6 @@ | |||
2541 | 2541 | ||
2542 | #define PCI_VENDOR_ID_INTEL 0x8086 | 2542 | #define PCI_VENDOR_ID_INTEL 0x8086 |
2543 | #define PCI_DEVICE_ID_INTEL_EESSC 0x0008 | 2543 | #define PCI_DEVICE_ID_INTEL_EESSC 0x0008 |
2544 | #define PCI_DEVICE_ID_INTEL_SNB_IMC 0x0100 | ||
2545 | #define PCI_DEVICE_ID_INTEL_IVB_IMC 0x0154 | ||
2546 | #define PCI_DEVICE_ID_INTEL_IVB_E3_IMC 0x0150 | ||
2547 | #define PCI_DEVICE_ID_INTEL_HSW_IMC 0x0c00 | ||
2548 | #define PCI_DEVICE_ID_INTEL_PXHD_0 0x0320 | 2544 | #define PCI_DEVICE_ID_INTEL_PXHD_0 0x0320 |
2549 | #define PCI_DEVICE_ID_INTEL_PXHD_1 0x0321 | 2545 | #define PCI_DEVICE_ID_INTEL_PXHD_1 0x0321 |
2550 | #define PCI_DEVICE_ID_INTEL_PXH_0 0x0329 | 2546 | #define PCI_DEVICE_ID_INTEL_PXH_0 0x0329 |
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index e23d242d1230..dbcbcc59aa92 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h | |||
@@ -282,7 +282,8 @@ static inline bool rht_shrink_below_30(const struct rhashtable *ht, | |||
282 | static inline bool rht_grow_above_100(const struct rhashtable *ht, | 282 | static inline bool rht_grow_above_100(const struct rhashtable *ht, |
283 | const struct bucket_table *tbl) | 283 | const struct bucket_table *tbl) |
284 | { | 284 | { |
285 | return atomic_read(&ht->nelems) > tbl->size; | 285 | return atomic_read(&ht->nelems) > tbl->size && |
286 | (!ht->p.max_size || tbl->size < ht->p.max_size); | ||
286 | } | 287 | } |
287 | 288 | ||
288 | /* The bucket lock is selected based on the hash and protects mutations | 289 | /* The bucket lock is selected based on the hash and protects mutations |
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index 2da5d1081ad9..7b8e260c4a27 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h | |||
@@ -122,5 +122,5 @@ extern int ndo_dflt_fdb_del(struct ndmsg *ndm, | |||
122 | 122 | ||
123 | extern int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, | 123 | extern int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, |
124 | struct net_device *dev, u16 mode, | 124 | struct net_device *dev, u16 mode, |
125 | u32 flags, u32 mask); | 125 | u32 flags, u32 mask, int nlflags); |
126 | #endif /* __LINUX_RTNETLINK_H */ | 126 | #endif /* __LINUX_RTNETLINK_H */ |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 8222ae40ecb0..26a2e6122734 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -175,14 +175,6 @@ extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load); | |||
175 | extern void calc_global_load(unsigned long ticks); | 175 | extern void calc_global_load(unsigned long ticks); |
176 | extern void update_cpu_load_nohz(void); | 176 | extern void update_cpu_load_nohz(void); |
177 | 177 | ||
178 | /* Notifier for when a task gets migrated to a new CPU */ | ||
179 | struct task_migration_notifier { | ||
180 | struct task_struct *task; | ||
181 | int from_cpu; | ||
182 | int to_cpu; | ||
183 | }; | ||
184 | extern void register_task_migration_notifier(struct notifier_block *n); | ||
185 | |||
186 | extern unsigned long get_parent_ip(unsigned long addr); | 178 | extern unsigned long get_parent_ip(unsigned long addr); |
187 | 179 | ||
188 | extern void dump_cpu_task(int cpu); | 180 | extern void dump_cpu_task(int cpu); |
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 06793b598f44..66e374d62f64 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
@@ -773,6 +773,7 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, | |||
773 | 773 | ||
774 | struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags, | 774 | struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags, |
775 | int node); | 775 | int node); |
776 | struct sk_buff *__build_skb(void *data, unsigned int frag_size); | ||
776 | struct sk_buff *build_skb(void *data, unsigned int frag_size); | 777 | struct sk_buff *build_skb(void *data, unsigned int frag_size); |
777 | static inline struct sk_buff *alloc_skb(unsigned int size, | 778 | static inline struct sk_buff *alloc_skb(unsigned int size, |
778 | gfp_t priority) | 779 | gfp_t priority) |
diff --git a/include/linux/tty.h b/include/linux/tty.h index 358a337af598..fe5623c9af71 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h | |||
@@ -491,6 +491,7 @@ static inline speed_t tty_get_baud_rate(struct tty_struct *tty) | |||
491 | 491 | ||
492 | extern void tty_termios_copy_hw(struct ktermios *new, struct ktermios *old); | 492 | extern void tty_termios_copy_hw(struct ktermios *new, struct ktermios *old); |
493 | extern int tty_termios_hw_change(struct ktermios *a, struct ktermios *b); | 493 | extern int tty_termios_hw_change(struct ktermios *a, struct ktermios *b); |
494 | extern int tty_set_termios(struct tty_struct *tty, struct ktermios *kt); | ||
494 | 495 | ||
495 | extern struct tty_ldisc *tty_ldisc_ref(struct tty_struct *); | 496 | extern struct tty_ldisc *tty_ldisc_ref(struct tty_struct *); |
496 | extern void tty_ldisc_deref(struct tty_ldisc *); | 497 | extern void tty_ldisc_deref(struct tty_ldisc *); |
diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h index a7f2604c5f25..7f5f78bd15ad 100644 --- a/include/linux/usb_usual.h +++ b/include/linux/usb_usual.h | |||
@@ -77,6 +77,8 @@ | |||
77 | /* Cannot handle ATA_12 or ATA_16 CDBs */ \ | 77 | /* Cannot handle ATA_12 or ATA_16 CDBs */ \ |
78 | US_FLAG(NO_REPORT_OPCODES, 0x04000000) \ | 78 | US_FLAG(NO_REPORT_OPCODES, 0x04000000) \ |
79 | /* Cannot handle MI_REPORT_SUPPORTED_OPERATION_CODES */ \ | 79 | /* Cannot handle MI_REPORT_SUPPORTED_OPERATION_CODES */ \ |
80 | US_FLAG(MAX_SECTORS_240, 0x08000000) \ | ||
81 | /* Sets max_sectors to 240 */ \ | ||
80 | 82 | ||
81 | #define US_FLAG(name, value) US_FL_##name = value , | 83 | #define US_FLAG(name, value) US_FL_##name = value , |
82 | enum { US_DO_ALL_FLAGS }; | 84 | enum { US_DO_ALL_FLAGS }; |
diff --git a/include/linux/util_macros.h b/include/linux/util_macros.h index d5f4fb69dba3..f9b2ce58039b 100644 --- a/include/linux/util_macros.h +++ b/include/linux/util_macros.h | |||
@@ -5,7 +5,7 @@ | |||
5 | ({ \ | 5 | ({ \ |
6 | typeof(as) __fc_i, __fc_as = (as) - 1; \ | 6 | typeof(as) __fc_i, __fc_as = (as) - 1; \ |
7 | typeof(x) __fc_x = (x); \ | 7 | typeof(x) __fc_x = (x); \ |
8 | typeof(*a) *__fc_a = (a); \ | 8 | typeof(*a) const *__fc_a = (a); \ |
9 | for (__fc_i = 0; __fc_i < __fc_as; __fc_i++) { \ | 9 | for (__fc_i = 0; __fc_i < __fc_as; __fc_i++) { \ |
10 | if (__fc_x op DIV_ROUND_CLOSEST(__fc_a[__fc_i] + \ | 10 | if (__fc_x op DIV_ROUND_CLOSEST(__fc_a[__fc_i] + \ |
11 | __fc_a[__fc_i + 1], 2)) \ | 11 | __fc_a[__fc_i + 1], 2)) \ |
diff --git a/include/net/bonding.h b/include/net/bonding.h index fda6feeb6c1f..78ed135e9dea 100644 --- a/include/net/bonding.h +++ b/include/net/bonding.h | |||
@@ -30,13 +30,6 @@ | |||
30 | #include <net/bond_alb.h> | 30 | #include <net/bond_alb.h> |
31 | #include <net/bond_options.h> | 31 | #include <net/bond_options.h> |
32 | 32 | ||
33 | #define DRV_VERSION "3.7.1" | ||
34 | #define DRV_RELDATE "April 27, 2011" | ||
35 | #define DRV_NAME "bonding" | ||
36 | #define DRV_DESCRIPTION "Ethernet Channel Bonding Driver" | ||
37 | |||
38 | #define bond_version DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n" | ||
39 | |||
40 | #define BOND_MAX_ARP_TARGETS 16 | 33 | #define BOND_MAX_ARP_TARGETS 16 |
41 | 34 | ||
42 | #define BOND_DEFAULT_MIIMON 100 | 35 | #define BOND_DEFAULT_MIIMON 100 |
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index 7b5887cd1172..48a815823587 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h | |||
@@ -279,12 +279,6 @@ static inline void inet_csk_reqsk_queue_add(struct sock *sk, | |||
279 | void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req, | 279 | void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req, |
280 | unsigned long timeout); | 280 | unsigned long timeout); |
281 | 281 | ||
282 | static inline void inet_csk_reqsk_queue_removed(struct sock *sk, | ||
283 | struct request_sock *req) | ||
284 | { | ||
285 | reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req); | ||
286 | } | ||
287 | |||
288 | static inline void inet_csk_reqsk_queue_added(struct sock *sk, | 282 | static inline void inet_csk_reqsk_queue_added(struct sock *sk, |
289 | const unsigned long timeout) | 283 | const unsigned long timeout) |
290 | { | 284 | { |
@@ -306,19 +300,7 @@ static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk) | |||
306 | return reqsk_queue_is_full(&inet_csk(sk)->icsk_accept_queue); | 300 | return reqsk_queue_is_full(&inet_csk(sk)->icsk_accept_queue); |
307 | } | 301 | } |
308 | 302 | ||
309 | static inline void inet_csk_reqsk_queue_unlink(struct sock *sk, | 303 | void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req); |
310 | struct request_sock *req) | ||
311 | { | ||
312 | reqsk_queue_unlink(&inet_csk(sk)->icsk_accept_queue, req); | ||
313 | } | ||
314 | |||
315 | static inline void inet_csk_reqsk_queue_drop(struct sock *sk, | ||
316 | struct request_sock *req) | ||
317 | { | ||
318 | inet_csk_reqsk_queue_unlink(sk, req); | ||
319 | inet_csk_reqsk_queue_removed(sk, req); | ||
320 | reqsk_put(req); | ||
321 | } | ||
322 | 304 | ||
323 | void inet_csk_destroy_sock(struct sock *sk); | 305 | void inet_csk_destroy_sock(struct sock *sk); |
324 | void inet_csk_prepare_forced_close(struct sock *sk); | 306 | void inet_csk_prepare_forced_close(struct sock *sk); |
diff --git a/include/net/request_sock.h b/include/net/request_sock.h index fe41f3ceb008..9f4265ce8892 100644 --- a/include/net/request_sock.h +++ b/include/net/request_sock.h | |||
@@ -212,24 +212,6 @@ static inline int reqsk_queue_empty(struct request_sock_queue *queue) | |||
212 | return queue->rskq_accept_head == NULL; | 212 | return queue->rskq_accept_head == NULL; |
213 | } | 213 | } |
214 | 214 | ||
215 | static inline void reqsk_queue_unlink(struct request_sock_queue *queue, | ||
216 | struct request_sock *req) | ||
217 | { | ||
218 | struct listen_sock *lopt = queue->listen_opt; | ||
219 | struct request_sock **prev; | ||
220 | |||
221 | spin_lock(&queue->syn_wait_lock); | ||
222 | |||
223 | prev = &lopt->syn_table[req->rsk_hash]; | ||
224 | while (*prev != req) | ||
225 | prev = &(*prev)->dl_next; | ||
226 | *prev = req->dl_next; | ||
227 | |||
228 | spin_unlock(&queue->syn_wait_lock); | ||
229 | if (del_timer(&req->rsk_timer)) | ||
230 | reqsk_put(req); | ||
231 | } | ||
232 | |||
233 | static inline void reqsk_queue_add(struct request_sock_queue *queue, | 215 | static inline void reqsk_queue_add(struct request_sock_queue *queue, |
234 | struct request_sock *req, | 216 | struct request_sock *req, |
235 | struct sock *parent, | 217 | struct sock *parent, |
diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h index ce55906b54a0..ac54c27a2bfd 100644 --- a/include/rdma/ib_addr.h +++ b/include/rdma/ib_addr.h | |||
@@ -160,7 +160,7 @@ static inline int rdma_ip2gid(struct sockaddr *addr, union ib_gid *gid) | |||
160 | } | 160 | } |
161 | 161 | ||
162 | /* Important - sockaddr should be a union of sockaddr_in and sockaddr_in6 */ | 162 | /* Important - sockaddr should be a union of sockaddr_in and sockaddr_in6 */ |
163 | static inline int rdma_gid2ip(struct sockaddr *out, union ib_gid *gid) | 163 | static inline void rdma_gid2ip(struct sockaddr *out, union ib_gid *gid) |
164 | { | 164 | { |
165 | if (ipv6_addr_v4mapped((struct in6_addr *)gid)) { | 165 | if (ipv6_addr_v4mapped((struct in6_addr *)gid)) { |
166 | struct sockaddr_in *out_in = (struct sockaddr_in *)out; | 166 | struct sockaddr_in *out_in = (struct sockaddr_in *)out; |
@@ -173,7 +173,6 @@ static inline int rdma_gid2ip(struct sockaddr *out, union ib_gid *gid) | |||
173 | out_in->sin6_family = AF_INET6; | 173 | out_in->sin6_family = AF_INET6; |
174 | memcpy(&out_in->sin6_addr.s6_addr, gid->raw, 16); | 174 | memcpy(&out_in->sin6_addr.s6_addr, gid->raw, 16); |
175 | } | 175 | } |
176 | return 0; | ||
177 | } | 176 | } |
178 | 177 | ||
179 | static inline void iboe_addr_get_sgid(struct rdma_dev_addr *dev_addr, | 178 | static inline void iboe_addr_get_sgid(struct rdma_dev_addr *dev_addr, |
diff --git a/include/rdma/ib_cm.h b/include/rdma/ib_cm.h index 0e3ff30647d5..39ed2d2fbd51 100644 --- a/include/rdma/ib_cm.h +++ b/include/rdma/ib_cm.h | |||
@@ -105,7 +105,8 @@ enum ib_cm_data_size { | |||
105 | IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE = 216, | 105 | IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE = 216, |
106 | IB_CM_SIDR_REP_PRIVATE_DATA_SIZE = 136, | 106 | IB_CM_SIDR_REP_PRIVATE_DATA_SIZE = 136, |
107 | IB_CM_SIDR_REP_INFO_LENGTH = 72, | 107 | IB_CM_SIDR_REP_INFO_LENGTH = 72, |
108 | IB_CM_COMPARE_SIZE = 64 | 108 | /* compare done u32 at a time */ |
109 | IB_CM_COMPARE_SIZE = (64 / sizeof(u32)) | ||
109 | }; | 110 | }; |
110 | 111 | ||
111 | struct ib_cm_id; | 112 | struct ib_cm_id; |
@@ -337,8 +338,8 @@ void ib_destroy_cm_id(struct ib_cm_id *cm_id); | |||
337 | #define IB_SDP_SERVICE_ID_MASK cpu_to_be64(0xFFFFFFFFFFFF0000ULL) | 338 | #define IB_SDP_SERVICE_ID_MASK cpu_to_be64(0xFFFFFFFFFFFF0000ULL) |
338 | 339 | ||
339 | struct ib_cm_compare_data { | 340 | struct ib_cm_compare_data { |
340 | u8 data[IB_CM_COMPARE_SIZE]; | 341 | u32 data[IB_CM_COMPARE_SIZE]; |
341 | u8 mask[IB_CM_COMPARE_SIZE]; | 342 | u32 mask[IB_CM_COMPARE_SIZE]; |
342 | }; | 343 | }; |
343 | 344 | ||
344 | /** | 345 | /** |
diff --git a/include/rdma/iw_portmap.h b/include/rdma/iw_portmap.h index 928b2775e992..fda31673a562 100644 --- a/include/rdma/iw_portmap.h +++ b/include/rdma/iw_portmap.h | |||
@@ -148,6 +148,16 @@ int iwpm_add_mapping_cb(struct sk_buff *, struct netlink_callback *); | |||
148 | int iwpm_add_and_query_mapping_cb(struct sk_buff *, struct netlink_callback *); | 148 | int iwpm_add_and_query_mapping_cb(struct sk_buff *, struct netlink_callback *); |
149 | 149 | ||
150 | /** | 150 | /** |
151 | * iwpm_remote_info_cb - Process remote connecting peer address info, which | ||
152 | * the port mapper has received from the connecting peer | ||
153 | * | ||
154 | * @cb: Contains the received message (payload and netlink header) | ||
155 | * | ||
156 | * Stores the IPv4/IPv6 address info in a hash table | ||
157 | */ | ||
158 | int iwpm_remote_info_cb(struct sk_buff *, struct netlink_callback *); | ||
159 | |||
160 | /** | ||
151 | * iwpm_mapping_error_cb - Process port mapper notification for error | 161 | * iwpm_mapping_error_cb - Process port mapper notification for error |
152 | * | 162 | * |
153 | * @skb: | 163 | * @skb: |
@@ -175,6 +185,21 @@ int iwpm_mapping_info_cb(struct sk_buff *, struct netlink_callback *); | |||
175 | int iwpm_ack_mapping_info_cb(struct sk_buff *, struct netlink_callback *); | 185 | int iwpm_ack_mapping_info_cb(struct sk_buff *, struct netlink_callback *); |
176 | 186 | ||
177 | /** | 187 | /** |
188 | * iwpm_get_remote_info - Get the remote connecting peer address info | ||
189 | * | ||
190 | * @mapped_loc_addr: Mapped local address of the listening peer | ||
191 | * @mapped_rem_addr: Mapped remote address of the connecting peer | ||
192 | * @remote_addr: To store the remote address of the connecting peer | ||
193 | * @nl_client: The index of the netlink client | ||
194 | * | ||
195 | * The remote address info is retrieved and provided to the client in | ||
196 | * the remote_addr. After that it is removed from the hash table | ||
197 | */ | ||
198 | int iwpm_get_remote_info(struct sockaddr_storage *mapped_loc_addr, | ||
199 | struct sockaddr_storage *mapped_rem_addr, | ||
200 | struct sockaddr_storage *remote_addr, u8 nl_client); | ||
201 | |||
202 | /** | ||
178 | * iwpm_create_mapinfo - Store local and mapped IPv4/IPv6 address | 203 | * iwpm_create_mapinfo - Store local and mapped IPv4/IPv6 address |
179 | * info in a hash table | 204 | * info in a hash table |
180 | * @local_addr: Local ip/tcp address | 205 | * @local_addr: Local ip/tcp address |
diff --git a/include/scsi/scsi_devinfo.h b/include/scsi/scsi_devinfo.h index 183eaab7c380..96e3f56519e7 100644 --- a/include/scsi/scsi_devinfo.h +++ b/include/scsi/scsi_devinfo.h | |||
@@ -36,5 +36,6 @@ | |||
36 | for sequential scan */ | 36 | for sequential scan */ |
37 | #define BLIST_TRY_VPD_PAGES 0x10000000 /* Attempt to read VPD pages */ | 37 | #define BLIST_TRY_VPD_PAGES 0x10000000 /* Attempt to read VPD pages */ |
38 | #define BLIST_NO_RSOC 0x20000000 /* don't try to issue RSOC */ | 38 | #define BLIST_NO_RSOC 0x20000000 /* don't try to issue RSOC */ |
39 | #define BLIST_MAX_1024 0x40000000 /* maximum 1024 sector cdb length */ | ||
39 | 40 | ||
40 | #endif | 41 | #endif |
diff --git a/include/sound/designware_i2s.h b/include/sound/designware_i2s.h index 26f406e0f673..3a8fca9409a7 100644 --- a/include/sound/designware_i2s.h +++ b/include/sound/designware_i2s.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (ST) 2012 Rajeev Kumar (rajeev-dlh.kumar@st.com) | 2 | * Copyright (ST) 2012 Rajeev Kumar (rajeevkumar.linux@gmail.com) |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify | 4 | * This program is free software; you can redistribute it and/or modify |
5 | * it under the terms of the GNU General Public License as published by | 5 | * it under the terms of the GNU General Public License as published by |
diff --git a/include/sound/emu10k1.h b/include/sound/emu10k1.h index 0de95ccb92cf..5bd134651f5e 100644 --- a/include/sound/emu10k1.h +++ b/include/sound/emu10k1.h | |||
@@ -41,7 +41,8 @@ | |||
41 | 41 | ||
42 | #define EMUPAGESIZE 4096 | 42 | #define EMUPAGESIZE 4096 |
43 | #define MAXREQVOICES 8 | 43 | #define MAXREQVOICES 8 |
44 | #define MAXPAGES 8192 | 44 | #define MAXPAGES0 4096 /* 32 bit mode */ |
45 | #define MAXPAGES1 8192 /* 31 bit mode */ | ||
45 | #define RESERVED 0 | 46 | #define RESERVED 0 |
46 | #define NUM_MIDI 16 | 47 | #define NUM_MIDI 16 |
47 | #define NUM_G 64 /* use all channels */ | 48 | #define NUM_G 64 /* use all channels */ |
@@ -50,8 +51,7 @@ | |||
50 | 51 | ||
51 | /* FIXME? - according to the OSS driver the EMU10K1 needs a 29 bit DMA mask */ | 52 | /* FIXME? - according to the OSS driver the EMU10K1 needs a 29 bit DMA mask */ |
52 | #define EMU10K1_DMA_MASK 0x7fffffffUL /* 31bit */ | 53 | #define EMU10K1_DMA_MASK 0x7fffffffUL /* 31bit */ |
53 | #define AUDIGY_DMA_MASK 0x7fffffffUL /* 31bit FIXME - 32 should work? */ | 54 | #define AUDIGY_DMA_MASK 0xffffffffUL /* 32bit mode */ |
54 | /* See ALSA bug #1276 - rlrevell */ | ||
55 | 55 | ||
56 | #define TMEMSIZE 256*1024 | 56 | #define TMEMSIZE 256*1024 |
57 | #define TMEMSIZEREG 4 | 57 | #define TMEMSIZEREG 4 |
@@ -466,8 +466,11 @@ | |||
466 | 466 | ||
467 | #define MAPB 0x0d /* Cache map B */ | 467 | #define MAPB 0x0d /* Cache map B */ |
468 | 468 | ||
469 | #define MAP_PTE_MASK 0xffffe000 /* The 19 MSBs of the PTE indexed by the PTI */ | 469 | #define MAP_PTE_MASK0 0xfffff000 /* The 20 MSBs of the PTE indexed by the PTI */ |
470 | #define MAP_PTI_MASK 0x00001fff /* The 13 bit index to one of the 8192 PTE dwords */ | 470 | #define MAP_PTI_MASK0 0x00000fff /* The 12 bit index to one of the 4096 PTE dwords */ |
471 | |||
472 | #define MAP_PTE_MASK1 0xffffe000 /* The 19 MSBs of the PTE indexed by the PTI */ | ||
473 | #define MAP_PTI_MASK1 0x00001fff /* The 13 bit index to one of the 8192 PTE dwords */ | ||
471 | 474 | ||
472 | /* 0x0e, 0x0f: Not used */ | 475 | /* 0x0e, 0x0f: Not used */ |
473 | 476 | ||
@@ -1704,6 +1707,7 @@ struct snd_emu10k1 { | |||
1704 | unsigned short model; /* subsystem id */ | 1707 | unsigned short model; /* subsystem id */ |
1705 | unsigned int card_type; /* EMU10K1_CARD_* */ | 1708 | unsigned int card_type; /* EMU10K1_CARD_* */ |
1706 | unsigned int ecard_ctrl; /* ecard control bits */ | 1709 | unsigned int ecard_ctrl; /* ecard control bits */ |
1710 | unsigned int address_mode; /* address mode */ | ||
1707 | unsigned long dma_mask; /* PCI DMA mask */ | 1711 | unsigned long dma_mask; /* PCI DMA mask */ |
1708 | unsigned int delay_pcm_irq; /* in samples */ | 1712 | unsigned int delay_pcm_irq; /* in samples */ |
1709 | int max_cache_pages; /* max memory size / PAGE_SIZE */ | 1713 | int max_cache_pages; /* max memory size / PAGE_SIZE */ |
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h index 0bc83647d3fa..1065095c6973 100644 --- a/include/sound/soc-dapm.h +++ b/include/sound/soc-dapm.h | |||
@@ -287,7 +287,7 @@ struct device; | |||
287 | .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | SNDRV_CTL_ELEM_ACCESS_READWRITE,\ | 287 | .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | SNDRV_CTL_ELEM_ACCESS_READWRITE,\ |
288 | .tlv.p = (tlv_array), \ | 288 | .tlv.p = (tlv_array), \ |
289 | .get = snd_soc_dapm_get_volsw, .put = snd_soc_dapm_put_volsw, \ | 289 | .get = snd_soc_dapm_get_volsw, .put = snd_soc_dapm_put_volsw, \ |
290 | .private_value = SOC_SINGLE_VALUE(reg, shift, max, invert, 0) } | 290 | .private_value = SOC_SINGLE_VALUE(reg, shift, max, invert, 1) } |
291 | #define SOC_DAPM_SINGLE_TLV_VIRT(xname, max, tlv_array) \ | 291 | #define SOC_DAPM_SINGLE_TLV_VIRT(xname, max, tlv_array) \ |
292 | SOC_DAPM_SINGLE(xname, SND_SOC_NOPM, 0, max, 0, tlv_array) | 292 | SOC_DAPM_SINGLE(xname, SND_SOC_NOPM, 0, max, 0, tlv_array) |
293 | #define SOC_DAPM_ENUM(xname, xenum) \ | 293 | #define SOC_DAPM_ENUM(xname, xenum) \ |
diff --git a/include/sound/soc.h b/include/sound/soc.h index fcb312b3f258..f6226914acfe 100644 --- a/include/sound/soc.h +++ b/include/sound/soc.h | |||
@@ -387,8 +387,20 @@ int snd_soc_codec_set_pll(struct snd_soc_codec *codec, int pll_id, int source, | |||
387 | int snd_soc_register_card(struct snd_soc_card *card); | 387 | int snd_soc_register_card(struct snd_soc_card *card); |
388 | int snd_soc_unregister_card(struct snd_soc_card *card); | 388 | int snd_soc_unregister_card(struct snd_soc_card *card); |
389 | int devm_snd_soc_register_card(struct device *dev, struct snd_soc_card *card); | 389 | int devm_snd_soc_register_card(struct device *dev, struct snd_soc_card *card); |
390 | #ifdef CONFIG_PM_SLEEP | ||
390 | int snd_soc_suspend(struct device *dev); | 391 | int snd_soc_suspend(struct device *dev); |
391 | int snd_soc_resume(struct device *dev); | 392 | int snd_soc_resume(struct device *dev); |
393 | #else | ||
394 | static inline int snd_soc_suspend(struct device *dev) | ||
395 | { | ||
396 | return 0; | ||
397 | } | ||
398 | |||
399 | static inline int snd_soc_resume(struct device *dev) | ||
400 | { | ||
401 | return 0; | ||
402 | } | ||
403 | #endif | ||
392 | int snd_soc_poweroff(struct device *dev); | 404 | int snd_soc_poweroff(struct device *dev); |
393 | int snd_soc_register_platform(struct device *dev, | 405 | int snd_soc_register_platform(struct device *dev, |
394 | const struct snd_soc_platform_driver *platform_drv); | 406 | const struct snd_soc_platform_driver *platform_drv); |
diff --git a/include/sound/spear_dma.h b/include/sound/spear_dma.h index 65aca51fe255..e290de4e7e82 100644 --- a/include/sound/spear_dma.h +++ b/include/sound/spear_dma.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * linux/spear_dma.h | 2 | * linux/spear_dma.h |
3 | * | 3 | * |
4 | * Copyright (ST) 2012 Rajeev Kumar (rajeev-dlh.kumar@st.com) | 4 | * Copyright (ST) 2012 Rajeev Kumar (rajeevkumar.linux@gmail.com) |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License as published by | 7 | * it under the terms of the GNU General Public License as published by |
diff --git a/include/uapi/linux/virtio_ring.h b/include/uapi/linux/virtio_ring.h index a3318f31e8e7..915980ac68df 100644 --- a/include/uapi/linux/virtio_ring.h +++ b/include/uapi/linux/virtio_ring.h | |||
@@ -155,7 +155,7 @@ static inline unsigned vring_size(unsigned int num, unsigned long align) | |||
155 | } | 155 | } |
156 | 156 | ||
157 | /* The following is used with USED_EVENT_IDX and AVAIL_EVENT_IDX */ | 157 | /* The following is used with USED_EVENT_IDX and AVAIL_EVENT_IDX */ |
158 | /* Assuming a given event_idx value from the other size, if | 158 | /* Assuming a given event_idx value from the other side, if |
159 | * we have just incremented index from old to new_idx, | 159 | * we have just incremented index from old to new_idx, |
160 | * should we trigger an event? */ | 160 | * should we trigger an event? */ |
161 | static inline int vring_need_event(__u16 event_idx, __u16 new_idx, __u16 old) | 161 | static inline int vring_need_event(__u16 event_idx, __u16 new_idx, __u16 old) |
diff --git a/include/uapi/rdma/rdma_netlink.h b/include/uapi/rdma/rdma_netlink.h index de69170a30ce..6e4bb4270ca2 100644 --- a/include/uapi/rdma/rdma_netlink.h +++ b/include/uapi/rdma/rdma_netlink.h | |||
@@ -37,6 +37,7 @@ enum { | |||
37 | RDMA_NL_IWPM_ADD_MAPPING, | 37 | RDMA_NL_IWPM_ADD_MAPPING, |
38 | RDMA_NL_IWPM_QUERY_MAPPING, | 38 | RDMA_NL_IWPM_QUERY_MAPPING, |
39 | RDMA_NL_IWPM_REMOVE_MAPPING, | 39 | RDMA_NL_IWPM_REMOVE_MAPPING, |
40 | RDMA_NL_IWPM_REMOTE_INFO, | ||
40 | RDMA_NL_IWPM_HANDLE_ERR, | 41 | RDMA_NL_IWPM_HANDLE_ERR, |
41 | RDMA_NL_IWPM_MAPINFO, | 42 | RDMA_NL_IWPM_MAPINFO, |
42 | RDMA_NL_IWPM_MAPINFO_NUM, | 43 | RDMA_NL_IWPM_MAPINFO_NUM, |
diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h index 143ca5ffab7a..4478f4b4aae2 100644 --- a/include/xen/grant_table.h +++ b/include/xen/grant_table.h | |||
@@ -191,6 +191,7 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, | |||
191 | struct gnttab_unmap_grant_ref *kunmap_ops, | 191 | struct gnttab_unmap_grant_ref *kunmap_ops, |
192 | struct page **pages, unsigned int count); | 192 | struct page **pages, unsigned int count); |
193 | void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item); | 193 | void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item); |
194 | int gnttab_unmap_refs_sync(struct gntab_unmap_queue_data *item); | ||
194 | 195 | ||
195 | 196 | ||
196 | /* Perform a batch of grant map/copy operations. Retry every batch slot | 197 | /* Perform a batch of grant map/copy operations. Retry every batch slot |
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h index c643e6a94c9a..0ce4f32017ea 100644 --- a/include/xen/xen-ops.h +++ b/include/xen/xen-ops.h | |||
@@ -13,6 +13,7 @@ void xen_arch_post_suspend(int suspend_cancelled); | |||
13 | 13 | ||
14 | void xen_timer_resume(void); | 14 | void xen_timer_resume(void); |
15 | void xen_arch_resume(void); | 15 | void xen_arch_resume(void); |
16 | void xen_arch_suspend(void); | ||
16 | 17 | ||
17 | void xen_resume_notifier_register(struct notifier_block *nb); | 18 | void xen_resume_notifier_register(struct notifier_block *nb); |
18 | void xen_resume_notifier_unregister(struct notifier_block *nb); | 19 | void xen_resume_notifier_unregister(struct notifier_block *nb); |
diff --git a/init/do_mounts.c b/init/do_mounts.c index 8369ffa5f33d..a95bbdb2a502 100644 --- a/init/do_mounts.c +++ b/init/do_mounts.c | |||
@@ -225,10 +225,11 @@ dev_t name_to_dev_t(const char *name) | |||
225 | #endif | 225 | #endif |
226 | 226 | ||
227 | if (strncmp(name, "/dev/", 5) != 0) { | 227 | if (strncmp(name, "/dev/", 5) != 0) { |
228 | unsigned maj, min; | 228 | unsigned maj, min, offset; |
229 | char dummy; | 229 | char dummy; |
230 | 230 | ||
231 | if (sscanf(name, "%u:%u%c", &maj, &min, &dummy) == 2) { | 231 | if ((sscanf(name, "%u:%u%c", &maj, &min, &dummy) == 2) || |
232 | (sscanf(name, "%u:%u:%u:%c", &maj, &min, &offset, &dummy) == 3)) { | ||
232 | res = MKDEV(maj, min); | 233 | res = MKDEV(maj, min); |
233 | if (maj != MAJOR(res) || min != MINOR(res)) | 234 | if (maj != MAJOR(res) || min != MINOR(res)) |
234 | goto fail; | 235 | goto fail; |
diff --git a/kernel/Makefile b/kernel/Makefile index 0f8f8b0bc1bf..60c302cfb4d3 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
@@ -197,9 +197,9 @@ x509.genkey: | |||
197 | @echo >>x509.genkey "x509_extensions = myexts" | 197 | @echo >>x509.genkey "x509_extensions = myexts" |
198 | @echo >>x509.genkey | 198 | @echo >>x509.genkey |
199 | @echo >>x509.genkey "[ req_distinguished_name ]" | 199 | @echo >>x509.genkey "[ req_distinguished_name ]" |
200 | @echo >>x509.genkey "O = Magrathea" | 200 | @echo >>x509.genkey "#O = Unspecified company" |
201 | @echo >>x509.genkey "CN = Glacier signing key" | 201 | @echo >>x509.genkey "CN = Build time autogenerated kernel key" |
202 | @echo >>x509.genkey "emailAddress = slartibartfast@magrathea.h2g2" | 202 | @echo >>x509.genkey "#emailAddress = unspecified.user@unspecified.company" |
203 | @echo >>x509.genkey | 203 | @echo >>x509.genkey |
204 | @echo >>x509.genkey "[ myexts ]" | 204 | @echo >>x509.genkey "[ myexts ]" |
205 | @echo >>x509.genkey "basicConstraints=critical,CA:FALSE" | 205 | @echo >>x509.genkey "basicConstraints=critical,CA:FALSE" |
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 4139a0f8b558..54f0e7fcd0e2 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c | |||
@@ -357,8 +357,8 @@ select_insn: | |||
357 | ALU64_MOD_X: | 357 | ALU64_MOD_X: |
358 | if (unlikely(SRC == 0)) | 358 | if (unlikely(SRC == 0)) |
359 | return 0; | 359 | return 0; |
360 | tmp = DST; | 360 | div64_u64_rem(DST, SRC, &tmp); |
361 | DST = do_div(tmp, SRC); | 361 | DST = tmp; |
362 | CONT; | 362 | CONT; |
363 | ALU_MOD_X: | 363 | ALU_MOD_X: |
364 | if (unlikely(SRC == 0)) | 364 | if (unlikely(SRC == 0)) |
@@ -367,8 +367,8 @@ select_insn: | |||
367 | DST = do_div(tmp, (u32) SRC); | 367 | DST = do_div(tmp, (u32) SRC); |
368 | CONT; | 368 | CONT; |
369 | ALU64_MOD_K: | 369 | ALU64_MOD_K: |
370 | tmp = DST; | 370 | div64_u64_rem(DST, IMM, &tmp); |
371 | DST = do_div(tmp, IMM); | 371 | DST = tmp; |
372 | CONT; | 372 | CONT; |
373 | ALU_MOD_K: | 373 | ALU_MOD_K: |
374 | tmp = (u32) DST; | 374 | tmp = (u32) DST; |
@@ -377,7 +377,7 @@ select_insn: | |||
377 | ALU64_DIV_X: | 377 | ALU64_DIV_X: |
378 | if (unlikely(SRC == 0)) | 378 | if (unlikely(SRC == 0)) |
379 | return 0; | 379 | return 0; |
380 | do_div(DST, SRC); | 380 | DST = div64_u64(DST, SRC); |
381 | CONT; | 381 | CONT; |
382 | ALU_DIV_X: | 382 | ALU_DIV_X: |
383 | if (unlikely(SRC == 0)) | 383 | if (unlikely(SRC == 0)) |
@@ -387,7 +387,7 @@ select_insn: | |||
387 | DST = (u32) tmp; | 387 | DST = (u32) tmp; |
388 | CONT; | 388 | CONT; |
389 | ALU64_DIV_K: | 389 | ALU64_DIV_K: |
390 | do_div(DST, IMM); | 390 | DST = div64_u64(DST, IMM); |
391 | CONT; | 391 | CONT; |
392 | ALU_DIV_K: | 392 | ALU_DIV_K: |
393 | tmp = (u32) DST; | 393 | tmp = (u32) DST; |
diff --git a/kernel/irq/dummychip.c b/kernel/irq/dummychip.c index 988dc58e8847..2feb6feca0cc 100644 --- a/kernel/irq/dummychip.c +++ b/kernel/irq/dummychip.c | |||
@@ -57,5 +57,6 @@ struct irq_chip dummy_irq_chip = { | |||
57 | .irq_ack = noop, | 57 | .irq_ack = noop, |
58 | .irq_mask = noop, | 58 | .irq_mask = noop, |
59 | .irq_unmask = noop, | 59 | .irq_unmask = noop, |
60 | .flags = IRQCHIP_SKIP_SET_WAKE, | ||
60 | }; | 61 | }; |
61 | EXPORT_SYMBOL_GPL(dummy_irq_chip); | 62 | EXPORT_SYMBOL_GPL(dummy_irq_chip); |
diff --git a/kernel/kexec.c b/kernel/kexec.c index 38c25b1f2fd5..7a36fdcca5bf 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c | |||
@@ -707,7 +707,7 @@ static struct page *kimage_alloc_normal_control_pages(struct kimage *image, | |||
707 | do { | 707 | do { |
708 | unsigned long pfn, epfn, addr, eaddr; | 708 | unsigned long pfn, epfn, addr, eaddr; |
709 | 709 | ||
710 | pages = kimage_alloc_pages(GFP_KERNEL, order); | 710 | pages = kimage_alloc_pages(KEXEC_CONTROL_MEMORY_GFP, order); |
711 | if (!pages) | 711 | if (!pages) |
712 | break; | 712 | break; |
713 | pfn = page_to_pfn(pages); | 713 | pfn = page_to_pfn(pages); |
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 233165da782f..8cf7304b2867 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c | |||
@@ -162,11 +162,14 @@ static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp); | |||
162 | static int kthread_prio = CONFIG_RCU_KTHREAD_PRIO; | 162 | static int kthread_prio = CONFIG_RCU_KTHREAD_PRIO; |
163 | module_param(kthread_prio, int, 0644); | 163 | module_param(kthread_prio, int, 0644); |
164 | 164 | ||
165 | /* Delay in jiffies for grace-period initialization delays. */ | 165 | /* Delay in jiffies for grace-period initialization delays, debug only. */ |
166 | static int gp_init_delay = IS_ENABLED(CONFIG_RCU_TORTURE_TEST_SLOW_INIT) | 166 | #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_INIT |
167 | ? CONFIG_RCU_TORTURE_TEST_SLOW_INIT_DELAY | 167 | static int gp_init_delay = CONFIG_RCU_TORTURE_TEST_SLOW_INIT_DELAY; |
168 | : 0; | ||
169 | module_param(gp_init_delay, int, 0644); | 168 | module_param(gp_init_delay, int, 0644); |
169 | #else /* #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_INIT */ | ||
170 | static const int gp_init_delay; | ||
171 | #endif /* #else #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_INIT */ | ||
172 | #define PER_RCU_NODE_PERIOD 10 /* Number of grace periods between delays. */ | ||
170 | 173 | ||
171 | /* | 174 | /* |
172 | * Track the rcutorture test sequence number and the update version | 175 | * Track the rcutorture test sequence number and the update version |
@@ -1843,9 +1846,8 @@ static int rcu_gp_init(struct rcu_state *rsp) | |||
1843 | raw_spin_unlock_irq(&rnp->lock); | 1846 | raw_spin_unlock_irq(&rnp->lock); |
1844 | cond_resched_rcu_qs(); | 1847 | cond_resched_rcu_qs(); |
1845 | ACCESS_ONCE(rsp->gp_activity) = jiffies; | 1848 | ACCESS_ONCE(rsp->gp_activity) = jiffies; |
1846 | if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST_SLOW_INIT) && | 1849 | if (gp_init_delay > 0 && |
1847 | gp_init_delay > 0 && | 1850 | !(rsp->gpnum % (rcu_num_nodes * PER_RCU_NODE_PERIOD))) |
1848 | !(rsp->gpnum % (rcu_num_nodes * 10))) | ||
1849 | schedule_timeout_uninterruptible(gp_init_delay); | 1851 | schedule_timeout_uninterruptible(gp_init_delay); |
1850 | } | 1852 | } |
1851 | 1853 | ||
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index f9123a82cbb6..fe22f7510bce 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -1016,13 +1016,6 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) | |||
1016 | rq_clock_skip_update(rq, true); | 1016 | rq_clock_skip_update(rq, true); |
1017 | } | 1017 | } |
1018 | 1018 | ||
1019 | static ATOMIC_NOTIFIER_HEAD(task_migration_notifier); | ||
1020 | |||
1021 | void register_task_migration_notifier(struct notifier_block *n) | ||
1022 | { | ||
1023 | atomic_notifier_chain_register(&task_migration_notifier, n); | ||
1024 | } | ||
1025 | |||
1026 | #ifdef CONFIG_SMP | 1019 | #ifdef CONFIG_SMP |
1027 | void set_task_cpu(struct task_struct *p, unsigned int new_cpu) | 1020 | void set_task_cpu(struct task_struct *p, unsigned int new_cpu) |
1028 | { | 1021 | { |
@@ -1053,18 +1046,10 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) | |||
1053 | trace_sched_migrate_task(p, new_cpu); | 1046 | trace_sched_migrate_task(p, new_cpu); |
1054 | 1047 | ||
1055 | if (task_cpu(p) != new_cpu) { | 1048 | if (task_cpu(p) != new_cpu) { |
1056 | struct task_migration_notifier tmn; | ||
1057 | |||
1058 | if (p->sched_class->migrate_task_rq) | 1049 | if (p->sched_class->migrate_task_rq) |
1059 | p->sched_class->migrate_task_rq(p, new_cpu); | 1050 | p->sched_class->migrate_task_rq(p, new_cpu); |
1060 | p->se.nr_migrations++; | 1051 | p->se.nr_migrations++; |
1061 | perf_sw_event_sched(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 0); | 1052 | perf_sw_event_sched(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 0); |
1062 | |||
1063 | tmn.task = p; | ||
1064 | tmn.from_cpu = task_cpu(p); | ||
1065 | tmn.to_cpu = new_cpu; | ||
1066 | |||
1067 | atomic_notifier_call_chain(&task_migration_notifier, 0, &tmn); | ||
1068 | } | 1053 | } |
1069 | 1054 | ||
1070 | __set_task_cpu(p, new_cpu); | 1055 | __set_task_cpu(p, new_cpu); |
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index deef1caa94c6..fefcb1fa5160 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c | |||
@@ -81,7 +81,6 @@ static void cpuidle_idle_call(void) | |||
81 | struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); | 81 | struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); |
82 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); | 82 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); |
83 | int next_state, entered_state; | 83 | int next_state, entered_state; |
84 | unsigned int broadcast; | ||
85 | bool reflect; | 84 | bool reflect; |
86 | 85 | ||
87 | /* | 86 | /* |
@@ -150,17 +149,6 @@ static void cpuidle_idle_call(void) | |||
150 | goto exit_idle; | 149 | goto exit_idle; |
151 | } | 150 | } |
152 | 151 | ||
153 | broadcast = drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP; | ||
154 | |||
155 | /* | ||
156 | * Tell the time framework to switch to a broadcast timer | ||
157 | * because our local timer will be shutdown. If a local timer | ||
158 | * is used from another cpu as a broadcast timer, this call may | ||
159 | * fail if it is not available | ||
160 | */ | ||
161 | if (broadcast && tick_broadcast_enter()) | ||
162 | goto use_default; | ||
163 | |||
164 | /* Take note of the planned idle state. */ | 152 | /* Take note of the planned idle state. */ |
165 | idle_set_state(this_rq(), &drv->states[next_state]); | 153 | idle_set_state(this_rq(), &drv->states[next_state]); |
166 | 154 | ||
@@ -174,8 +162,8 @@ static void cpuidle_idle_call(void) | |||
174 | /* The cpu is no longer idle or about to enter idle. */ | 162 | /* The cpu is no longer idle or about to enter idle. */ |
175 | idle_set_state(this_rq(), NULL); | 163 | idle_set_state(this_rq(), NULL); |
176 | 164 | ||
177 | if (broadcast) | 165 | if (entered_state == -EBUSY) |
178 | tick_broadcast_exit(); | 166 | goto use_default; |
179 | 167 | ||
180 | /* | 168 | /* |
181 | * Give the governor an opportunity to reflect on the outcome | 169 | * Give the governor an opportunity to reflect on the outcome |
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index 11dc22a6983b..637a09461c1d 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c | |||
@@ -117,11 +117,7 @@ static int __clockevents_set_state(struct clock_event_device *dev, | |||
117 | /* Transition with new state-specific callbacks */ | 117 | /* Transition with new state-specific callbacks */ |
118 | switch (state) { | 118 | switch (state) { |
119 | case CLOCK_EVT_STATE_DETACHED: | 119 | case CLOCK_EVT_STATE_DETACHED: |
120 | /* | 120 | /* The clockevent device is getting replaced. Shut it down. */ |
121 | * This is an internal state, which is guaranteed to go from | ||
122 | * SHUTDOWN to DETACHED. No driver interaction required. | ||
123 | */ | ||
124 | return 0; | ||
125 | 121 | ||
126 | case CLOCK_EVT_STATE_SHUTDOWN: | 122 | case CLOCK_EVT_STATE_SHUTDOWN: |
127 | return dev->set_state_shutdown(dev); | 123 | return dev->set_state_shutdown(dev); |
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index 692bf7184c8c..25a086bcb700 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c | |||
@@ -178,12 +178,13 @@ ftrace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len) | |||
178 | EXPORT_SYMBOL(ftrace_print_hex_seq); | 178 | EXPORT_SYMBOL(ftrace_print_hex_seq); |
179 | 179 | ||
180 | const char * | 180 | const char * |
181 | ftrace_print_array_seq(struct trace_seq *p, const void *buf, int buf_len, | 181 | ftrace_print_array_seq(struct trace_seq *p, const void *buf, int count, |
182 | size_t el_size) | 182 | size_t el_size) |
183 | { | 183 | { |
184 | const char *ret = trace_seq_buffer_ptr(p); | 184 | const char *ret = trace_seq_buffer_ptr(p); |
185 | const char *prefix = ""; | 185 | const char *prefix = ""; |
186 | void *ptr = (void *)buf; | 186 | void *ptr = (void *)buf; |
187 | size_t buf_len = count * el_size; | ||
187 | 188 | ||
188 | trace_seq_putc(p, '{'); | 189 | trace_seq_putc(p, '{'); |
189 | 190 | ||
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 17670573dda8..ba2b0c87e65b 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -1281,6 +1281,7 @@ config RCU_TORTURE_TEST_SLOW_INIT_DELAY | |||
1281 | int "How much to slow down RCU grace-period initialization" | 1281 | int "How much to slow down RCU grace-period initialization" |
1282 | range 0 5 | 1282 | range 0 5 |
1283 | default 3 | 1283 | default 3 |
1284 | depends on RCU_TORTURE_TEST_SLOW_INIT | ||
1284 | help | 1285 | help |
1285 | This option specifies the number of jiffies to wait between | 1286 | This option specifies the number of jiffies to wait between |
1286 | each rcu_node structure initialization. | 1287 | each rcu_node structure initialization. |
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan index 4fecaedc80a2..777eda7d1ab4 100644 --- a/lib/Kconfig.kasan +++ b/lib/Kconfig.kasan | |||
@@ -10,8 +10,11 @@ config KASAN | |||
10 | help | 10 | help |
11 | Enables kernel address sanitizer - runtime memory debugger, | 11 | Enables kernel address sanitizer - runtime memory debugger, |
12 | designed to find out-of-bounds accesses and use-after-free bugs. | 12 | designed to find out-of-bounds accesses and use-after-free bugs. |
13 | This is strictly debugging feature. It consumes about 1/8 | 13 | This is strictly a debugging feature and it requires a gcc version |
14 | of available memory and brings about ~x3 performance slowdown. | 14 | of 4.9.2 or later. Detection of out of bounds accesses to stack or |
15 | global variables requires gcc 5.0 or later. | ||
16 | This feature consumes about 1/8 of available memory and brings about | ||
17 | ~x3 performance slowdown. | ||
15 | For better error detection enable CONFIG_STACKTRACE, | 18 | For better error detection enable CONFIG_STACKTRACE, |
16 | and add slub_debug=U to boot cmdline. | 19 | and add slub_debug=U to boot cmdline. |
17 | 20 | ||
@@ -40,6 +43,7 @@ config KASAN_INLINE | |||
40 | memory accesses. This is faster than outline (in some workloads | 43 | memory accesses. This is faster than outline (in some workloads |
41 | it gives about x2 boost over outline instrumentation), but | 44 | it gives about x2 boost over outline instrumentation), but |
42 | make kernel's .text size much bigger. | 45 | make kernel's .text size much bigger. |
46 | This requires a gcc version of 5.0 or later. | ||
43 | 47 | ||
44 | endchoice | 48 | endchoice |
45 | 49 | ||
diff --git a/lib/find_last_bit.c b/lib/find_last_bit.c deleted file mode 100644 index 3e3be40c6a6e..000000000000 --- a/lib/find_last_bit.c +++ /dev/null | |||
@@ -1,41 +0,0 @@ | |||
1 | /* find_last_bit.c: fallback find next bit implementation | ||
2 | * | ||
3 | * Copyright (C) 2008 IBM Corporation | ||
4 | * Written by Rusty Russell <rusty@rustcorp.com.au> | ||
5 | * (Inspired by David Howell's find_next_bit implementation) | ||
6 | * | ||
7 | * Rewritten by Yury Norov <yury.norov@gmail.com> to decrease | ||
8 | * size and improve performance, 2015. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | ||
11 | * modify it under the terms of the GNU General Public License | ||
12 | * as published by the Free Software Foundation; either version | ||
13 | * 2 of the License, or (at your option) any later version. | ||
14 | */ | ||
15 | |||
16 | #include <linux/bitops.h> | ||
17 | #include <linux/bitmap.h> | ||
18 | #include <linux/export.h> | ||
19 | #include <linux/kernel.h> | ||
20 | |||
21 | #ifndef find_last_bit | ||
22 | |||
23 | unsigned long find_last_bit(const unsigned long *addr, unsigned long size) | ||
24 | { | ||
25 | if (size) { | ||
26 | unsigned long val = BITMAP_LAST_WORD_MASK(size); | ||
27 | unsigned long idx = (size-1) / BITS_PER_LONG; | ||
28 | |||
29 | do { | ||
30 | val &= addr[idx]; | ||
31 | if (val) | ||
32 | return idx * BITS_PER_LONG + __fls(val); | ||
33 | |||
34 | val = ~0ul; | ||
35 | } while (idx--); | ||
36 | } | ||
37 | return size; | ||
38 | } | ||
39 | EXPORT_SYMBOL(find_last_bit); | ||
40 | |||
41 | #endif | ||
diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 4898442b837f..b28df4019ade 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c | |||
@@ -405,13 +405,18 @@ int rhashtable_insert_rehash(struct rhashtable *ht) | |||
405 | 405 | ||
406 | if (rht_grow_above_75(ht, tbl)) | 406 | if (rht_grow_above_75(ht, tbl)) |
407 | size *= 2; | 407 | size *= 2; |
408 | /* More than two rehashes (not resizes) detected. */ | 408 | /* Do not schedule more than one rehash */ |
409 | else if (WARN_ON(old_tbl != tbl && old_tbl->size == size)) | 409 | else if (old_tbl != tbl) |
410 | return -EBUSY; | 410 | return -EBUSY; |
411 | 411 | ||
412 | new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC); | 412 | new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC); |
413 | if (new_tbl == NULL) | 413 | if (new_tbl == NULL) { |
414 | /* Schedule async resize/rehash to try allocation | ||
415 | * non-atomic context. | ||
416 | */ | ||
417 | schedule_work(&ht->run_work); | ||
414 | return -ENOMEM; | 418 | return -ENOMEM; |
419 | } | ||
415 | 420 | ||
416 | err = rhashtable_rehash_attach(ht, tbl, new_tbl); | 421 | err = rhashtable_rehash_attach(ht, tbl, new_tbl); |
417 | if (err) { | 422 | if (err) { |
diff --git a/lib/string.c b/lib/string.c index a5792019193c..bb3d4b6993c4 100644 --- a/lib/string.c +++ b/lib/string.c | |||
@@ -607,7 +607,7 @@ EXPORT_SYMBOL(memset); | |||
607 | void memzero_explicit(void *s, size_t count) | 607 | void memzero_explicit(void *s, size_t count) |
608 | { | 608 | { |
609 | memset(s, 0, count); | 609 | memset(s, 0, count); |
610 | barrier(); | 610 | barrier_data(s); |
611 | } | 611 | } |
612 | EXPORT_SYMBOL(memzero_explicit); | 612 | EXPORT_SYMBOL(memzero_explicit); |
613 | 613 | ||
diff --git a/mm/hwpoison-inject.c b/mm/hwpoison-inject.c index 329caf56df22..4ca5fe0042e1 100644 --- a/mm/hwpoison-inject.c +++ b/mm/hwpoison-inject.c | |||
@@ -34,13 +34,13 @@ static int hwpoison_inject(void *data, u64 val) | |||
34 | if (!hwpoison_filter_enable) | 34 | if (!hwpoison_filter_enable) |
35 | goto inject; | 35 | goto inject; |
36 | 36 | ||
37 | if (!PageLRU(p) && !PageHuge(p)) | 37 | if (!PageLRU(hpage) && !PageHuge(p)) |
38 | shake_page(p, 0); | 38 | shake_page(hpage, 0); |
39 | /* | 39 | /* |
40 | * This implies unable to support non-LRU pages. | 40 | * This implies unable to support non-LRU pages. |
41 | */ | 41 | */ |
42 | if (!PageLRU(p) && !PageHuge(p)) | 42 | if (!PageLRU(hpage) && !PageHuge(p)) |
43 | return 0; | 43 | goto put_out; |
44 | 44 | ||
45 | /* | 45 | /* |
46 | * do a racy check with elevated page count, to make sure PG_hwpoison | 46 | * do a racy check with elevated page count, to make sure PG_hwpoison |
@@ -52,11 +52,14 @@ static int hwpoison_inject(void *data, u64 val) | |||
52 | err = hwpoison_filter(hpage); | 52 | err = hwpoison_filter(hpage); |
53 | unlock_page(hpage); | 53 | unlock_page(hpage); |
54 | if (err) | 54 | if (err) |
55 | return 0; | 55 | goto put_out; |
56 | 56 | ||
57 | inject: | 57 | inject: |
58 | pr_info("Injecting memory failure at pfn %#lx\n", pfn); | 58 | pr_info("Injecting memory failure at pfn %#lx\n", pfn); |
59 | return memory_failure(pfn, 18, MF_COUNT_INCREASED); | 59 | return memory_failure(pfn, 18, MF_COUNT_INCREASED); |
60 | put_out: | ||
61 | put_page(hpage); | ||
62 | return 0; | ||
60 | } | 63 | } |
61 | 64 | ||
62 | static int hwpoison_unpoison(void *data, u64 val) | 65 | static int hwpoison_unpoison(void *data, u64 val) |
diff --git a/mm/memory-failure.c b/mm/memory-failure.c index d9359b770cd9..501820c815b3 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c | |||
@@ -1187,10 +1187,10 @@ int memory_failure(unsigned long pfn, int trapno, int flags) | |||
1187 | * The check (unnecessarily) ignores LRU pages being isolated and | 1187 | * The check (unnecessarily) ignores LRU pages being isolated and |
1188 | * walked by the page reclaim code, however that's not a big loss. | 1188 | * walked by the page reclaim code, however that's not a big loss. |
1189 | */ | 1189 | */ |
1190 | if (!PageHuge(p) && !PageTransTail(p)) { | 1190 | if (!PageHuge(p)) { |
1191 | if (!PageLRU(p)) | 1191 | if (!PageLRU(hpage)) |
1192 | shake_page(p, 0); | 1192 | shake_page(hpage, 0); |
1193 | if (!PageLRU(p)) { | 1193 | if (!PageLRU(hpage)) { |
1194 | /* | 1194 | /* |
1195 | * shake_page could have turned it free. | 1195 | * shake_page could have turned it free. |
1196 | */ | 1196 | */ |
@@ -1777,12 +1777,12 @@ int soft_offline_page(struct page *page, int flags) | |||
1777 | } else if (ret == 0) { /* for free pages */ | 1777 | } else if (ret == 0) { /* for free pages */ |
1778 | if (PageHuge(page)) { | 1778 | if (PageHuge(page)) { |
1779 | set_page_hwpoison_huge_page(hpage); | 1779 | set_page_hwpoison_huge_page(hpage); |
1780 | dequeue_hwpoisoned_huge_page(hpage); | 1780 | if (!dequeue_hwpoisoned_huge_page(hpage)) |
1781 | atomic_long_add(1 << compound_order(hpage), | 1781 | atomic_long_add(1 << compound_order(hpage), |
1782 | &num_poisoned_pages); | 1782 | &num_poisoned_pages); |
1783 | } else { | 1783 | } else { |
1784 | SetPageHWPoison(page); | 1784 | if (!TestSetPageHWPoison(page)) |
1785 | atomic_long_inc(&num_poisoned_pages); | 1785 | atomic_long_inc(&num_poisoned_pages); |
1786 | } | 1786 | } |
1787 | } | 1787 | } |
1788 | unset_migratetype_isolate(page, MIGRATE_MOVABLE); | 1788 | unset_migratetype_isolate(page, MIGRATE_MOVABLE); |
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 5daf5568b9e1..eb59f7eea508 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
@@ -580,7 +580,7 @@ static long long pos_ratio_polynom(unsigned long setpoint, | |||
580 | long x; | 580 | long x; |
581 | 581 | ||
582 | x = div64_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT, | 582 | x = div64_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT, |
583 | limit - setpoint + 1); | 583 | (limit - setpoint) | 1); |
584 | pos_ratio = x; | 584 | pos_ratio = x; |
585 | pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT; | 585 | pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT; |
586 | pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT; | 586 | pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT; |
@@ -807,7 +807,7 @@ static unsigned long bdi_position_ratio(struct backing_dev_info *bdi, | |||
807 | * scale global setpoint to bdi's: | 807 | * scale global setpoint to bdi's: |
808 | * bdi_setpoint = setpoint * bdi_thresh / thresh | 808 | * bdi_setpoint = setpoint * bdi_thresh / thresh |
809 | */ | 809 | */ |
810 | x = div_u64((u64)bdi_thresh << 16, thresh + 1); | 810 | x = div_u64((u64)bdi_thresh << 16, thresh | 1); |
811 | bdi_setpoint = setpoint * (u64)x >> 16; | 811 | bdi_setpoint = setpoint * (u64)x >> 16; |
812 | /* | 812 | /* |
813 | * Use span=(8*write_bw) in single bdi case as indicated by | 813 | * Use span=(8*write_bw) in single bdi case as indicated by |
@@ -822,7 +822,7 @@ static unsigned long bdi_position_ratio(struct backing_dev_info *bdi, | |||
822 | 822 | ||
823 | if (bdi_dirty < x_intercept - span / 4) { | 823 | if (bdi_dirty < x_intercept - span / 4) { |
824 | pos_ratio = div64_u64(pos_ratio * (x_intercept - bdi_dirty), | 824 | pos_ratio = div64_u64(pos_ratio * (x_intercept - bdi_dirty), |
825 | x_intercept - bdi_setpoint + 1); | 825 | (x_intercept - bdi_setpoint) | 1); |
826 | } else | 826 | } else |
827 | pos_ratio /= 4; | 827 | pos_ratio /= 4; |
828 | 828 | ||
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c index 409608960899..e29ad70b3000 100644 --- a/net/bridge/br_mdb.c +++ b/net/bridge/br_mdb.c | |||
@@ -170,7 +170,7 @@ static int nlmsg_populate_mdb_fill(struct sk_buff *skb, | |||
170 | struct br_port_msg *bpm; | 170 | struct br_port_msg *bpm; |
171 | struct nlattr *nest, *nest2; | 171 | struct nlattr *nest, *nest2; |
172 | 172 | ||
173 | nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), NLM_F_MULTI); | 173 | nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0); |
174 | if (!nlh) | 174 | if (!nlh) |
175 | return -EMSGSIZE; | 175 | return -EMSGSIZE; |
176 | 176 | ||
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index 0e4ddb81610d..4b5c236998ff 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c | |||
@@ -394,7 +394,7 @@ errout: | |||
394 | * Dump information about all ports, in response to GETLINK | 394 | * Dump information about all ports, in response to GETLINK |
395 | */ | 395 | */ |
396 | int br_getlink(struct sk_buff *skb, u32 pid, u32 seq, | 396 | int br_getlink(struct sk_buff *skb, u32 pid, u32 seq, |
397 | struct net_device *dev, u32 filter_mask) | 397 | struct net_device *dev, u32 filter_mask, int nlflags) |
398 | { | 398 | { |
399 | struct net_bridge_port *port = br_port_get_rtnl(dev); | 399 | struct net_bridge_port *port = br_port_get_rtnl(dev); |
400 | 400 | ||
@@ -402,7 +402,7 @@ int br_getlink(struct sk_buff *skb, u32 pid, u32 seq, | |||
402 | !(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) | 402 | !(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) |
403 | return 0; | 403 | return 0; |
404 | 404 | ||
405 | return br_fill_ifinfo(skb, port, pid, seq, RTM_NEWLINK, NLM_F_MULTI, | 405 | return br_fill_ifinfo(skb, port, pid, seq, RTM_NEWLINK, nlflags, |
406 | filter_mask, dev); | 406 | filter_mask, dev); |
407 | } | 407 | } |
408 | 408 | ||
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 6ca0251cb478..3362c29400f1 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h | |||
@@ -828,7 +828,7 @@ void br_ifinfo_notify(int event, struct net_bridge_port *port); | |||
828 | int br_setlink(struct net_device *dev, struct nlmsghdr *nlmsg, u16 flags); | 828 | int br_setlink(struct net_device *dev, struct nlmsghdr *nlmsg, u16 flags); |
829 | int br_dellink(struct net_device *dev, struct nlmsghdr *nlmsg, u16 flags); | 829 | int br_dellink(struct net_device *dev, struct nlmsghdr *nlmsg, u16 flags); |
830 | int br_getlink(struct sk_buff *skb, u32 pid, u32 seq, struct net_device *dev, | 830 | int br_getlink(struct sk_buff *skb, u32 pid, u32 seq, struct net_device *dev, |
831 | u32 filter_mask); | 831 | u32 filter_mask, int nlflags); |
832 | 832 | ||
833 | #ifdef CONFIG_SYSFS | 833 | #ifdef CONFIG_SYSFS |
834 | /* br_sysfs_if.c */ | 834 | /* br_sysfs_if.c */ |
diff --git a/net/core/dev.c b/net/core/dev.c index 1796cef55ab5..c7ba0388f1be 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -3079,7 +3079,7 @@ static struct rps_dev_flow * | |||
3079 | set_rps_cpu(struct net_device *dev, struct sk_buff *skb, | 3079 | set_rps_cpu(struct net_device *dev, struct sk_buff *skb, |
3080 | struct rps_dev_flow *rflow, u16 next_cpu) | 3080 | struct rps_dev_flow *rflow, u16 next_cpu) |
3081 | { | 3081 | { |
3082 | if (next_cpu != RPS_NO_CPU) { | 3082 | if (next_cpu < nr_cpu_ids) { |
3083 | #ifdef CONFIG_RFS_ACCEL | 3083 | #ifdef CONFIG_RFS_ACCEL |
3084 | struct netdev_rx_queue *rxqueue; | 3084 | struct netdev_rx_queue *rxqueue; |
3085 | struct rps_dev_flow_table *flow_table; | 3085 | struct rps_dev_flow_table *flow_table; |
@@ -3184,7 +3184,7 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, | |||
3184 | * If the desired CPU (where last recvmsg was done) is | 3184 | * If the desired CPU (where last recvmsg was done) is |
3185 | * different from current CPU (one in the rx-queue flow | 3185 | * different from current CPU (one in the rx-queue flow |
3186 | * table entry), switch if one of the following holds: | 3186 | * table entry), switch if one of the following holds: |
3187 | * - Current CPU is unset (equal to RPS_NO_CPU). | 3187 | * - Current CPU is unset (>= nr_cpu_ids). |
3188 | * - Current CPU is offline. | 3188 | * - Current CPU is offline. |
3189 | * - The current CPU's queue tail has advanced beyond the | 3189 | * - The current CPU's queue tail has advanced beyond the |
3190 | * last packet that was enqueued using this table entry. | 3190 | * last packet that was enqueued using this table entry. |
@@ -3192,14 +3192,14 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, | |||
3192 | * have been dequeued, thus preserving in order delivery. | 3192 | * have been dequeued, thus preserving in order delivery. |
3193 | */ | 3193 | */ |
3194 | if (unlikely(tcpu != next_cpu) && | 3194 | if (unlikely(tcpu != next_cpu) && |
3195 | (tcpu == RPS_NO_CPU || !cpu_online(tcpu) || | 3195 | (tcpu >= nr_cpu_ids || !cpu_online(tcpu) || |
3196 | ((int)(per_cpu(softnet_data, tcpu).input_queue_head - | 3196 | ((int)(per_cpu(softnet_data, tcpu).input_queue_head - |
3197 | rflow->last_qtail)) >= 0)) { | 3197 | rflow->last_qtail)) >= 0)) { |
3198 | tcpu = next_cpu; | 3198 | tcpu = next_cpu; |
3199 | rflow = set_rps_cpu(dev, skb, rflow, next_cpu); | 3199 | rflow = set_rps_cpu(dev, skb, rflow, next_cpu); |
3200 | } | 3200 | } |
3201 | 3201 | ||
3202 | if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) { | 3202 | if (tcpu < nr_cpu_ids && cpu_online(tcpu)) { |
3203 | *rflowp = rflow; | 3203 | *rflowp = rflow; |
3204 | cpu = tcpu; | 3204 | cpu = tcpu; |
3205 | goto done; | 3205 | goto done; |
@@ -3240,14 +3240,14 @@ bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, | |||
3240 | struct rps_dev_flow_table *flow_table; | 3240 | struct rps_dev_flow_table *flow_table; |
3241 | struct rps_dev_flow *rflow; | 3241 | struct rps_dev_flow *rflow; |
3242 | bool expire = true; | 3242 | bool expire = true; |
3243 | int cpu; | 3243 | unsigned int cpu; |
3244 | 3244 | ||
3245 | rcu_read_lock(); | 3245 | rcu_read_lock(); |
3246 | flow_table = rcu_dereference(rxqueue->rps_flow_table); | 3246 | flow_table = rcu_dereference(rxqueue->rps_flow_table); |
3247 | if (flow_table && flow_id <= flow_table->mask) { | 3247 | if (flow_table && flow_id <= flow_table->mask) { |
3248 | rflow = &flow_table->flows[flow_id]; | 3248 | rflow = &flow_table->flows[flow_id]; |
3249 | cpu = ACCESS_ONCE(rflow->cpu); | 3249 | cpu = ACCESS_ONCE(rflow->cpu); |
3250 | if (rflow->filter == filter_id && cpu != RPS_NO_CPU && | 3250 | if (rflow->filter == filter_id && cpu < nr_cpu_ids && |
3251 | ((int)(per_cpu(softnet_data, cpu).input_queue_head - | 3251 | ((int)(per_cpu(softnet_data, cpu).input_queue_head - |
3252 | rflow->last_qtail) < | 3252 | rflow->last_qtail) < |
3253 | (int)(10 * flow_table->mask))) | 3253 | (int)(10 * flow_table->mask))) |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 358d52a38533..666e0928ba40 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -2854,7 +2854,7 @@ static int brport_nla_put_flag(struct sk_buff *skb, u32 flags, u32 mask, | |||
2854 | 2854 | ||
2855 | int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, | 2855 | int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, |
2856 | struct net_device *dev, u16 mode, | 2856 | struct net_device *dev, u16 mode, |
2857 | u32 flags, u32 mask) | 2857 | u32 flags, u32 mask, int nlflags) |
2858 | { | 2858 | { |
2859 | struct nlmsghdr *nlh; | 2859 | struct nlmsghdr *nlh; |
2860 | struct ifinfomsg *ifm; | 2860 | struct ifinfomsg *ifm; |
@@ -2863,7 +2863,7 @@ int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, | |||
2863 | u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN; | 2863 | u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN; |
2864 | struct net_device *br_dev = netdev_master_upper_dev_get(dev); | 2864 | struct net_device *br_dev = netdev_master_upper_dev_get(dev); |
2865 | 2865 | ||
2866 | nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*ifm), NLM_F_MULTI); | 2866 | nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*ifm), nlflags); |
2867 | if (nlh == NULL) | 2867 | if (nlh == NULL) |
2868 | return -EMSGSIZE; | 2868 | return -EMSGSIZE; |
2869 | 2869 | ||
@@ -2969,7 +2969,8 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb) | |||
2969 | if (br_dev && br_dev->netdev_ops->ndo_bridge_getlink) { | 2969 | if (br_dev && br_dev->netdev_ops->ndo_bridge_getlink) { |
2970 | if (idx >= cb->args[0] && | 2970 | if (idx >= cb->args[0] && |
2971 | br_dev->netdev_ops->ndo_bridge_getlink( | 2971 | br_dev->netdev_ops->ndo_bridge_getlink( |
2972 | skb, portid, seq, dev, filter_mask) < 0) | 2972 | skb, portid, seq, dev, filter_mask, |
2973 | NLM_F_MULTI) < 0) | ||
2973 | break; | 2974 | break; |
2974 | idx++; | 2975 | idx++; |
2975 | } | 2976 | } |
@@ -2977,7 +2978,8 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb) | |||
2977 | if (ops->ndo_bridge_getlink) { | 2978 | if (ops->ndo_bridge_getlink) { |
2978 | if (idx >= cb->args[0] && | 2979 | if (idx >= cb->args[0] && |
2979 | ops->ndo_bridge_getlink(skb, portid, seq, dev, | 2980 | ops->ndo_bridge_getlink(skb, portid, seq, dev, |
2980 | filter_mask) < 0) | 2981 | filter_mask, |
2982 | NLM_F_MULTI) < 0) | ||
2981 | break; | 2983 | break; |
2982 | idx++; | 2984 | idx++; |
2983 | } | 2985 | } |
@@ -3018,7 +3020,7 @@ static int rtnl_bridge_notify(struct net_device *dev) | |||
3018 | goto errout; | 3020 | goto errout; |
3019 | } | 3021 | } |
3020 | 3022 | ||
3021 | err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0); | 3023 | err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0, 0); |
3022 | if (err < 0) | 3024 | if (err < 0) |
3023 | goto errout; | 3025 | goto errout; |
3024 | 3026 | ||
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index d1967dab9cc6..3cfff2a3d651 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -280,13 +280,14 @@ nodata: | |||
280 | EXPORT_SYMBOL(__alloc_skb); | 280 | EXPORT_SYMBOL(__alloc_skb); |
281 | 281 | ||
282 | /** | 282 | /** |
283 | * build_skb - build a network buffer | 283 | * __build_skb - build a network buffer |
284 | * @data: data buffer provided by caller | 284 | * @data: data buffer provided by caller |
285 | * @frag_size: size of fragment, or 0 if head was kmalloced | 285 | * @frag_size: size of data, or 0 if head was kmalloced |
286 | * | 286 | * |
287 | * Allocate a new &sk_buff. Caller provides space holding head and | 287 | * Allocate a new &sk_buff. Caller provides space holding head and |
288 | * skb_shared_info. @data must have been allocated by kmalloc() only if | 288 | * skb_shared_info. @data must have been allocated by kmalloc() only if |
289 | * @frag_size is 0, otherwise data should come from the page allocator. | 289 | * @frag_size is 0, otherwise data should come from the page allocator |
290 | * or vmalloc() | ||
290 | * The return is the new skb buffer. | 291 | * The return is the new skb buffer. |
291 | * On a failure the return is %NULL, and @data is not freed. | 292 | * On a failure the return is %NULL, and @data is not freed. |
292 | * Notes : | 293 | * Notes : |
@@ -297,7 +298,7 @@ EXPORT_SYMBOL(__alloc_skb); | |||
297 | * before giving packet to stack. | 298 | * before giving packet to stack. |
298 | * RX rings only contains data buffers, not full skbs. | 299 | * RX rings only contains data buffers, not full skbs. |
299 | */ | 300 | */ |
300 | struct sk_buff *build_skb(void *data, unsigned int frag_size) | 301 | struct sk_buff *__build_skb(void *data, unsigned int frag_size) |
301 | { | 302 | { |
302 | struct skb_shared_info *shinfo; | 303 | struct skb_shared_info *shinfo; |
303 | struct sk_buff *skb; | 304 | struct sk_buff *skb; |
@@ -311,7 +312,6 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size) | |||
311 | 312 | ||
312 | memset(skb, 0, offsetof(struct sk_buff, tail)); | 313 | memset(skb, 0, offsetof(struct sk_buff, tail)); |
313 | skb->truesize = SKB_TRUESIZE(size); | 314 | skb->truesize = SKB_TRUESIZE(size); |
314 | skb->head_frag = frag_size != 0; | ||
315 | atomic_set(&skb->users, 1); | 315 | atomic_set(&skb->users, 1); |
316 | skb->head = data; | 316 | skb->head = data; |
317 | skb->data = data; | 317 | skb->data = data; |
@@ -328,6 +328,23 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size) | |||
328 | 328 | ||
329 | return skb; | 329 | return skb; |
330 | } | 330 | } |
331 | |||
332 | /* build_skb() is wrapper over __build_skb(), that specifically | ||
333 | * takes care of skb->head and skb->pfmemalloc | ||
334 | * This means that if @frag_size is not zero, then @data must be backed | ||
335 | * by a page fragment, not kmalloc() or vmalloc() | ||
336 | */ | ||
337 | struct sk_buff *build_skb(void *data, unsigned int frag_size) | ||
338 | { | ||
339 | struct sk_buff *skb = __build_skb(data, frag_size); | ||
340 | |||
341 | if (skb && frag_size) { | ||
342 | skb->head_frag = 1; | ||
343 | if (virt_to_head_page(data)->pfmemalloc) | ||
344 | skb->pfmemalloc = 1; | ||
345 | } | ||
346 | return skb; | ||
347 | } | ||
331 | EXPORT_SYMBOL(build_skb); | 348 | EXPORT_SYMBOL(build_skb); |
332 | 349 | ||
333 | struct netdev_alloc_cache { | 350 | struct netdev_alloc_cache { |
@@ -348,7 +365,8 @@ static struct page *__page_frag_refill(struct netdev_alloc_cache *nc, | |||
348 | gfp_t gfp = gfp_mask; | 365 | gfp_t gfp = gfp_mask; |
349 | 366 | ||
350 | if (order) { | 367 | if (order) { |
351 | gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY; | 368 | gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY | |
369 | __GFP_NOMEMALLOC; | ||
352 | page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, order); | 370 | page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, order); |
353 | nc->frag.size = PAGE_SIZE << (page ? order : 0); | 371 | nc->frag.size = PAGE_SIZE << (page ? order : 0); |
354 | } | 372 | } |
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index 2b4f21d34df6..ccf4c5629b3c 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c | |||
@@ -453,7 +453,8 @@ static struct sock *dccp_v4_hnd_req(struct sock *sk, struct sk_buff *skb) | |||
453 | iph->saddr, iph->daddr); | 453 | iph->saddr, iph->daddr); |
454 | if (req) { | 454 | if (req) { |
455 | nsk = dccp_check_req(sk, skb, req); | 455 | nsk = dccp_check_req(sk, skb, req); |
456 | reqsk_put(req); | 456 | if (!nsk) |
457 | reqsk_put(req); | ||
457 | return nsk; | 458 | return nsk; |
458 | } | 459 | } |
459 | nsk = inet_lookup_established(sock_net(sk), &dccp_hashinfo, | 460 | nsk = inet_lookup_established(sock_net(sk), &dccp_hashinfo, |
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index 9d0551092c6c..5165571f397a 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c | |||
@@ -301,7 +301,8 @@ static struct sock *dccp_v6_hnd_req(struct sock *sk,struct sk_buff *skb) | |||
301 | &iph->daddr, inet6_iif(skb)); | 301 | &iph->daddr, inet6_iif(skb)); |
302 | if (req) { | 302 | if (req) { |
303 | nsk = dccp_check_req(sk, skb, req); | 303 | nsk = dccp_check_req(sk, skb, req); |
304 | reqsk_put(req); | 304 | if (!nsk) |
305 | reqsk_put(req); | ||
305 | return nsk; | 306 | return nsk; |
306 | } | 307 | } |
307 | nsk = __inet6_lookup_established(sock_net(sk), &dccp_hashinfo, | 308 | nsk = __inet6_lookup_established(sock_net(sk), &dccp_hashinfo, |
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c index 5f566663e47f..30addee2dd03 100644 --- a/net/dccp/minisocks.c +++ b/net/dccp/minisocks.c | |||
@@ -186,8 +186,7 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb, | |||
186 | if (child == NULL) | 186 | if (child == NULL) |
187 | goto listen_overflow; | 187 | goto listen_overflow; |
188 | 188 | ||
189 | inet_csk_reqsk_queue_unlink(sk, req); | 189 | inet_csk_reqsk_queue_drop(sk, req); |
190 | inet_csk_reqsk_queue_removed(sk, req); | ||
191 | inet_csk_reqsk_queue_add(sk, req, child); | 190 | inet_csk_reqsk_queue_add(sk, req, child); |
192 | out: | 191 | out: |
193 | return child; | 192 | return child; |
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index 079a224471e7..e6f6cc3a1bcf 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c | |||
@@ -633,7 +633,7 @@ static int dsa_of_probe(struct device *dev) | |||
633 | if (cd->sw_addr > PHY_MAX_ADDR) | 633 | if (cd->sw_addr > PHY_MAX_ADDR) |
634 | continue; | 634 | continue; |
635 | 635 | ||
636 | if (!of_property_read_u32(np, "eeprom-length", &eeprom_len)) | 636 | if (!of_property_read_u32(child, "eeprom-length", &eeprom_len)) |
637 | cd->eeprom_len = eeprom_len; | 637 | cd->eeprom_len = eeprom_len; |
638 | 638 | ||
639 | for_each_available_child_of_node(child, port) { | 639 | for_each_available_child_of_node(child, port) { |
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 5c3dd6267ed3..8976ca423a07 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c | |||
@@ -564,6 +564,40 @@ int inet_rtx_syn_ack(struct sock *parent, struct request_sock *req) | |||
564 | } | 564 | } |
565 | EXPORT_SYMBOL(inet_rtx_syn_ack); | 565 | EXPORT_SYMBOL(inet_rtx_syn_ack); |
566 | 566 | ||
567 | /* return true if req was found in the syn_table[] */ | ||
568 | static bool reqsk_queue_unlink(struct request_sock_queue *queue, | ||
569 | struct request_sock *req) | ||
570 | { | ||
571 | struct listen_sock *lopt = queue->listen_opt; | ||
572 | struct request_sock **prev; | ||
573 | bool found = false; | ||
574 | |||
575 | spin_lock(&queue->syn_wait_lock); | ||
576 | |||
577 | for (prev = &lopt->syn_table[req->rsk_hash]; *prev != NULL; | ||
578 | prev = &(*prev)->dl_next) { | ||
579 | if (*prev == req) { | ||
580 | *prev = req->dl_next; | ||
581 | found = true; | ||
582 | break; | ||
583 | } | ||
584 | } | ||
585 | |||
586 | spin_unlock(&queue->syn_wait_lock); | ||
587 | if (del_timer(&req->rsk_timer)) | ||
588 | reqsk_put(req); | ||
589 | return found; | ||
590 | } | ||
591 | |||
592 | void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req) | ||
593 | { | ||
594 | if (reqsk_queue_unlink(&inet_csk(sk)->icsk_accept_queue, req)) { | ||
595 | reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req); | ||
596 | reqsk_put(req); | ||
597 | } | ||
598 | } | ||
599 | EXPORT_SYMBOL(inet_csk_reqsk_queue_drop); | ||
600 | |||
567 | static void reqsk_timer_handler(unsigned long data) | 601 | static void reqsk_timer_handler(unsigned long data) |
568 | { | 602 | { |
569 | struct request_sock *req = (struct request_sock *)data; | 603 | struct request_sock *req = (struct request_sock *)data; |
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index a93f260cf24c..05ff44b758df 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c | |||
@@ -158,6 +158,7 @@ void ping_unhash(struct sock *sk) | |||
158 | if (sk_hashed(sk)) { | 158 | if (sk_hashed(sk)) { |
159 | write_lock_bh(&ping_table.lock); | 159 | write_lock_bh(&ping_table.lock); |
160 | hlist_nulls_del(&sk->sk_nulls_node); | 160 | hlist_nulls_del(&sk->sk_nulls_node); |
161 | sk_nulls_node_init(&sk->sk_nulls_node); | ||
161 | sock_put(sk); | 162 | sock_put(sk); |
162 | isk->inet_num = 0; | 163 | isk->inet_num = 0; |
163 | isk->inet_sport = 0; | 164 | isk->inet_sport = 0; |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index a78540f28276..bff62fc87b8e 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -962,10 +962,7 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu) | |||
962 | if (dst_metric_locked(dst, RTAX_MTU)) | 962 | if (dst_metric_locked(dst, RTAX_MTU)) |
963 | return; | 963 | return; |
964 | 964 | ||
965 | if (dst->dev->mtu < mtu) | 965 | if (ipv4_mtu(dst) < mtu) |
966 | return; | ||
967 | |||
968 | if (rt->rt_pmtu && rt->rt_pmtu < mtu) | ||
969 | return; | 966 | return; |
970 | 967 | ||
971 | if (mtu < ip_rt_min_pmtu) | 968 | if (mtu < ip_rt_min_pmtu) |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 3571f2be4470..fc1c658ec6c1 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -1348,7 +1348,8 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb) | |||
1348 | req = inet_csk_search_req(sk, th->source, iph->saddr, iph->daddr); | 1348 | req = inet_csk_search_req(sk, th->source, iph->saddr, iph->daddr); |
1349 | if (req) { | 1349 | if (req) { |
1350 | nsk = tcp_check_req(sk, skb, req, false); | 1350 | nsk = tcp_check_req(sk, skb, req, false); |
1351 | reqsk_put(req); | 1351 | if (!nsk) |
1352 | reqsk_put(req); | ||
1352 | return nsk; | 1353 | return nsk; |
1353 | } | 1354 | } |
1354 | 1355 | ||
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 63d6311b5365..e5d7649136fc 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c | |||
@@ -755,10 +755,11 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, | |||
755 | if (!child) | 755 | if (!child) |
756 | goto listen_overflow; | 756 | goto listen_overflow; |
757 | 757 | ||
758 | inet_csk_reqsk_queue_unlink(sk, req); | 758 | inet_csk_reqsk_queue_drop(sk, req); |
759 | inet_csk_reqsk_queue_removed(sk, req); | ||
760 | |||
761 | inet_csk_reqsk_queue_add(sk, req, child); | 759 | inet_csk_reqsk_queue_add(sk, req, child); |
760 | /* Warning: caller must not call reqsk_put(req); | ||
761 | * child stole last reference on it. | ||
762 | */ | ||
762 | return child; | 763 | return child; |
763 | 764 | ||
764 | listen_overflow: | 765 | listen_overflow: |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 8c8d7e06b72f..a369e8a70b2c 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -2812,39 +2812,65 @@ begin_fwd: | |||
2812 | } | 2812 | } |
2813 | } | 2813 | } |
2814 | 2814 | ||
2815 | /* Send a fin. The caller locks the socket for us. This cannot be | 2815 | /* We allow to exceed memory limits for FIN packets to expedite |
2816 | * allowed to fail queueing a FIN frame under any circumstances. | 2816 | * connection tear down and (memory) recovery. |
2817 | * Otherwise tcp_send_fin() could be tempted to either delay FIN | ||
2818 | * or even be forced to close flow without any FIN. | ||
2819 | */ | ||
2820 | static void sk_forced_wmem_schedule(struct sock *sk, int size) | ||
2821 | { | ||
2822 | int amt, status; | ||
2823 | |||
2824 | if (size <= sk->sk_forward_alloc) | ||
2825 | return; | ||
2826 | amt = sk_mem_pages(size); | ||
2827 | sk->sk_forward_alloc += amt * SK_MEM_QUANTUM; | ||
2828 | sk_memory_allocated_add(sk, amt, &status); | ||
2829 | } | ||
2830 | |||
2831 | /* Send a FIN. The caller locks the socket for us. | ||
2832 | * We should try to send a FIN packet really hard, but eventually give up. | ||
2817 | */ | 2833 | */ |
2818 | void tcp_send_fin(struct sock *sk) | 2834 | void tcp_send_fin(struct sock *sk) |
2819 | { | 2835 | { |
2836 | struct sk_buff *skb, *tskb = tcp_write_queue_tail(sk); | ||
2820 | struct tcp_sock *tp = tcp_sk(sk); | 2837 | struct tcp_sock *tp = tcp_sk(sk); |
2821 | struct sk_buff *skb = tcp_write_queue_tail(sk); | ||
2822 | int mss_now; | ||
2823 | 2838 | ||
2824 | /* Optimization, tack on the FIN if we have a queue of | 2839 | /* Optimization, tack on the FIN if we have one skb in write queue and |
2825 | * unsent frames. But be careful about outgoing SACKS | 2840 | * this skb was not yet sent, or we are under memory pressure. |
2826 | * and IP options. | 2841 | * Note: in the latter case, FIN packet will be sent after a timeout, |
2842 | * as TCP stack thinks it has already been transmitted. | ||
2827 | */ | 2843 | */ |
2828 | mss_now = tcp_current_mss(sk); | 2844 | if (tskb && (tcp_send_head(sk) || sk_under_memory_pressure(sk))) { |
2829 | 2845 | coalesce: | |
2830 | if (tcp_send_head(sk)) { | 2846 | TCP_SKB_CB(tskb)->tcp_flags |= TCPHDR_FIN; |
2831 | TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_FIN; | 2847 | TCP_SKB_CB(tskb)->end_seq++; |
2832 | TCP_SKB_CB(skb)->end_seq++; | ||
2833 | tp->write_seq++; | 2848 | tp->write_seq++; |
2849 | if (!tcp_send_head(sk)) { | ||
2850 | /* This means tskb was already sent. | ||
2851 | * Pretend we included the FIN on previous transmit. | ||
2852 | * We need to set tp->snd_nxt to the value it would have | ||
2853 | * if FIN had been sent. This is because retransmit path | ||
2854 | * does not change tp->snd_nxt. | ||
2855 | */ | ||
2856 | tp->snd_nxt++; | ||
2857 | return; | ||
2858 | } | ||
2834 | } else { | 2859 | } else { |
2835 | /* Socket is locked, keep trying until memory is available. */ | 2860 | skb = alloc_skb_fclone(MAX_TCP_HEADER, sk->sk_allocation); |
2836 | for (;;) { | 2861 | if (unlikely(!skb)) { |
2837 | skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation); | 2862 | if (tskb) |
2838 | if (skb) | 2863 | goto coalesce; |
2839 | break; | 2864 | return; |
2840 | yield(); | ||
2841 | } | 2865 | } |
2866 | skb_reserve(skb, MAX_TCP_HEADER); | ||
2867 | sk_forced_wmem_schedule(sk, skb->truesize); | ||
2842 | /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */ | 2868 | /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */ |
2843 | tcp_init_nondata_skb(skb, tp->write_seq, | 2869 | tcp_init_nondata_skb(skb, tp->write_seq, |
2844 | TCPHDR_ACK | TCPHDR_FIN); | 2870 | TCPHDR_ACK | TCPHDR_FIN); |
2845 | tcp_queue_skb(sk, skb); | 2871 | tcp_queue_skb(sk, skb); |
2846 | } | 2872 | } |
2847 | __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF); | 2873 | __tcp_push_pending_frames(sk, tcp_current_mss(sk), TCP_NAGLE_OFF); |
2848 | } | 2874 | } |
2849 | 2875 | ||
2850 | /* We get here when a process closes a file descriptor (either due to | 2876 | /* We get here when a process closes a file descriptor (either due to |
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index b5e6cc1d4a73..a38d3ac0f18f 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c | |||
@@ -1246,7 +1246,6 @@ static void ip6gre_tunnel_setup(struct net_device *dev) | |||
1246 | static int ip6gre_tunnel_init(struct net_device *dev) | 1246 | static int ip6gre_tunnel_init(struct net_device *dev) |
1247 | { | 1247 | { |
1248 | struct ip6_tnl *tunnel; | 1248 | struct ip6_tnl *tunnel; |
1249 | int i; | ||
1250 | 1249 | ||
1251 | tunnel = netdev_priv(dev); | 1250 | tunnel = netdev_priv(dev); |
1252 | 1251 | ||
@@ -1260,16 +1259,10 @@ static int ip6gre_tunnel_init(struct net_device *dev) | |||
1260 | if (ipv6_addr_any(&tunnel->parms.raddr)) | 1259 | if (ipv6_addr_any(&tunnel->parms.raddr)) |
1261 | dev->header_ops = &ip6gre_header_ops; | 1260 | dev->header_ops = &ip6gre_header_ops; |
1262 | 1261 | ||
1263 | dev->tstats = alloc_percpu(struct pcpu_sw_netstats); | 1262 | dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); |
1264 | if (!dev->tstats) | 1263 | if (!dev->tstats) |
1265 | return -ENOMEM; | 1264 | return -ENOMEM; |
1266 | 1265 | ||
1267 | for_each_possible_cpu(i) { | ||
1268 | struct pcpu_sw_netstats *ip6gre_tunnel_stats; | ||
1269 | ip6gre_tunnel_stats = per_cpu_ptr(dev->tstats, i); | ||
1270 | u64_stats_init(&ip6gre_tunnel_stats->syncp); | ||
1271 | } | ||
1272 | |||
1273 | return 0; | 1266 | return 0; |
1274 | } | 1267 | } |
1275 | 1268 | ||
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index ad51df85aa00..b6575d665568 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -946,7 +946,8 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb) | |||
946 | &ipv6_hdr(skb)->daddr, tcp_v6_iif(skb)); | 946 | &ipv6_hdr(skb)->daddr, tcp_v6_iif(skb)); |
947 | if (req) { | 947 | if (req) { |
948 | nsk = tcp_check_req(sk, skb, req, false); | 948 | nsk = tcp_check_req(sk, skb, req, false); |
949 | reqsk_put(req); | 949 | if (!nsk) |
950 | reqsk_put(req); | ||
950 | return nsk; | 951 | return nsk; |
951 | } | 952 | } |
952 | nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo, | 953 | nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo, |
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c index db8a2ea6d4de..954810c76a86 100644 --- a/net/mpls/af_mpls.c +++ b/net/mpls/af_mpls.c | |||
@@ -53,6 +53,11 @@ static struct mpls_route *mpls_route_input_rcu(struct net *net, unsigned index) | |||
53 | return rt; | 53 | return rt; |
54 | } | 54 | } |
55 | 55 | ||
56 | static inline struct mpls_dev *mpls_dev_get(const struct net_device *dev) | ||
57 | { | ||
58 | return rcu_dereference_rtnl(dev->mpls_ptr); | ||
59 | } | ||
60 | |||
56 | static bool mpls_output_possible(const struct net_device *dev) | 61 | static bool mpls_output_possible(const struct net_device *dev) |
57 | { | 62 | { |
58 | return dev && (dev->flags & IFF_UP) && netif_carrier_ok(dev); | 63 | return dev && (dev->flags & IFF_UP) && netif_carrier_ok(dev); |
@@ -136,6 +141,7 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev, | |||
136 | struct mpls_route *rt; | 141 | struct mpls_route *rt; |
137 | struct mpls_entry_decoded dec; | 142 | struct mpls_entry_decoded dec; |
138 | struct net_device *out_dev; | 143 | struct net_device *out_dev; |
144 | struct mpls_dev *mdev; | ||
139 | unsigned int hh_len; | 145 | unsigned int hh_len; |
140 | unsigned int new_header_size; | 146 | unsigned int new_header_size; |
141 | unsigned int mtu; | 147 | unsigned int mtu; |
@@ -143,6 +149,10 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev, | |||
143 | 149 | ||
144 | /* Careful this entire function runs inside of an rcu critical section */ | 150 | /* Careful this entire function runs inside of an rcu critical section */ |
145 | 151 | ||
152 | mdev = mpls_dev_get(dev); | ||
153 | if (!mdev || !mdev->input_enabled) | ||
154 | goto drop; | ||
155 | |||
146 | if (skb->pkt_type != PACKET_HOST) | 156 | if (skb->pkt_type != PACKET_HOST) |
147 | goto drop; | 157 | goto drop; |
148 | 158 | ||
@@ -352,9 +362,9 @@ static int mpls_route_add(struct mpls_route_config *cfg) | |||
352 | if (!dev) | 362 | if (!dev) |
353 | goto errout; | 363 | goto errout; |
354 | 364 | ||
355 | /* For now just support ethernet devices */ | 365 | /* Ensure this is a supported device */ |
356 | err = -EINVAL; | 366 | err = -EINVAL; |
357 | if ((dev->type != ARPHRD_ETHER) && (dev->type != ARPHRD_LOOPBACK)) | 367 | if (!mpls_dev_get(dev)) |
358 | goto errout; | 368 | goto errout; |
359 | 369 | ||
360 | err = -EINVAL; | 370 | err = -EINVAL; |
@@ -428,10 +438,89 @@ errout: | |||
428 | return err; | 438 | return err; |
429 | } | 439 | } |
430 | 440 | ||
441 | #define MPLS_PERDEV_SYSCTL_OFFSET(field) \ | ||
442 | (&((struct mpls_dev *)0)->field) | ||
443 | |||
444 | static const struct ctl_table mpls_dev_table[] = { | ||
445 | { | ||
446 | .procname = "input", | ||
447 | .maxlen = sizeof(int), | ||
448 | .mode = 0644, | ||
449 | .proc_handler = proc_dointvec, | ||
450 | .data = MPLS_PERDEV_SYSCTL_OFFSET(input_enabled), | ||
451 | }, | ||
452 | { } | ||
453 | }; | ||
454 | |||
455 | static int mpls_dev_sysctl_register(struct net_device *dev, | ||
456 | struct mpls_dev *mdev) | ||
457 | { | ||
458 | char path[sizeof("net/mpls/conf/") + IFNAMSIZ]; | ||
459 | struct ctl_table *table; | ||
460 | int i; | ||
461 | |||
462 | table = kmemdup(&mpls_dev_table, sizeof(mpls_dev_table), GFP_KERNEL); | ||
463 | if (!table) | ||
464 | goto out; | ||
465 | |||
466 | /* Table data contains only offsets relative to the base of | ||
467 | * the mdev at this point, so make them absolute. | ||
468 | */ | ||
469 | for (i = 0; i < ARRAY_SIZE(mpls_dev_table); i++) | ||
470 | table[i].data = (char *)mdev + (uintptr_t)table[i].data; | ||
471 | |||
472 | snprintf(path, sizeof(path), "net/mpls/conf/%s", dev->name); | ||
473 | |||
474 | mdev->sysctl = register_net_sysctl(dev_net(dev), path, table); | ||
475 | if (!mdev->sysctl) | ||
476 | goto free; | ||
477 | |||
478 | return 0; | ||
479 | |||
480 | free: | ||
481 | kfree(table); | ||
482 | out: | ||
483 | return -ENOBUFS; | ||
484 | } | ||
485 | |||
486 | static void mpls_dev_sysctl_unregister(struct mpls_dev *mdev) | ||
487 | { | ||
488 | struct ctl_table *table; | ||
489 | |||
490 | table = mdev->sysctl->ctl_table_arg; | ||
491 | unregister_net_sysctl_table(mdev->sysctl); | ||
492 | kfree(table); | ||
493 | } | ||
494 | |||
495 | static struct mpls_dev *mpls_add_dev(struct net_device *dev) | ||
496 | { | ||
497 | struct mpls_dev *mdev; | ||
498 | int err = -ENOMEM; | ||
499 | |||
500 | ASSERT_RTNL(); | ||
501 | |||
502 | mdev = kzalloc(sizeof(*mdev), GFP_KERNEL); | ||
503 | if (!mdev) | ||
504 | return ERR_PTR(err); | ||
505 | |||
506 | err = mpls_dev_sysctl_register(dev, mdev); | ||
507 | if (err) | ||
508 | goto free; | ||
509 | |||
510 | rcu_assign_pointer(dev->mpls_ptr, mdev); | ||
511 | |||
512 | return mdev; | ||
513 | |||
514 | free: | ||
515 | kfree(mdev); | ||
516 | return ERR_PTR(err); | ||
517 | } | ||
518 | |||
431 | static void mpls_ifdown(struct net_device *dev) | 519 | static void mpls_ifdown(struct net_device *dev) |
432 | { | 520 | { |
433 | struct mpls_route __rcu **platform_label; | 521 | struct mpls_route __rcu **platform_label; |
434 | struct net *net = dev_net(dev); | 522 | struct net *net = dev_net(dev); |
523 | struct mpls_dev *mdev; | ||
435 | unsigned index; | 524 | unsigned index; |
436 | 525 | ||
437 | platform_label = rtnl_dereference(net->mpls.platform_label); | 526 | platform_label = rtnl_dereference(net->mpls.platform_label); |
@@ -443,14 +532,35 @@ static void mpls_ifdown(struct net_device *dev) | |||
443 | continue; | 532 | continue; |
444 | rt->rt_dev = NULL; | 533 | rt->rt_dev = NULL; |
445 | } | 534 | } |
535 | |||
536 | mdev = mpls_dev_get(dev); | ||
537 | if (!mdev) | ||
538 | return; | ||
539 | |||
540 | mpls_dev_sysctl_unregister(mdev); | ||
541 | |||
542 | RCU_INIT_POINTER(dev->mpls_ptr, NULL); | ||
543 | |||
544 | kfree(mdev); | ||
446 | } | 545 | } |
447 | 546 | ||
448 | static int mpls_dev_notify(struct notifier_block *this, unsigned long event, | 547 | static int mpls_dev_notify(struct notifier_block *this, unsigned long event, |
449 | void *ptr) | 548 | void *ptr) |
450 | { | 549 | { |
451 | struct net_device *dev = netdev_notifier_info_to_dev(ptr); | 550 | struct net_device *dev = netdev_notifier_info_to_dev(ptr); |
551 | struct mpls_dev *mdev; | ||
452 | 552 | ||
453 | switch(event) { | 553 | switch(event) { |
554 | case NETDEV_REGISTER: | ||
555 | /* For now just support ethernet devices */ | ||
556 | if ((dev->type == ARPHRD_ETHER) || | ||
557 | (dev->type == ARPHRD_LOOPBACK)) { | ||
558 | mdev = mpls_add_dev(dev); | ||
559 | if (IS_ERR(mdev)) | ||
560 | return notifier_from_errno(PTR_ERR(mdev)); | ||
561 | } | ||
562 | break; | ||
563 | |||
454 | case NETDEV_UNREGISTER: | 564 | case NETDEV_UNREGISTER: |
455 | mpls_ifdown(dev); | 565 | mpls_ifdown(dev); |
456 | break; | 566 | break; |
@@ -536,6 +646,15 @@ int nla_get_labels(const struct nlattr *nla, | |||
536 | if ((dec.bos != bos) || dec.ttl || dec.tc) | 646 | if ((dec.bos != bos) || dec.ttl || dec.tc) |
537 | return -EINVAL; | 647 | return -EINVAL; |
538 | 648 | ||
649 | switch (dec.label) { | ||
650 | case LABEL_IMPLICIT_NULL: | ||
651 | /* RFC3032: This is a label that an LSR may | ||
652 | * assign and distribute, but which never | ||
653 | * actually appears in the encapsulation. | ||
654 | */ | ||
655 | return -EINVAL; | ||
656 | } | ||
657 | |||
539 | label[i] = dec.label; | 658 | label[i] = dec.label; |
540 | } | 659 | } |
541 | *labels = nla_labels; | 660 | *labels = nla_labels; |
@@ -912,7 +1031,7 @@ static int mpls_platform_labels(struct ctl_table *table, int write, | |||
912 | return ret; | 1031 | return ret; |
913 | } | 1032 | } |
914 | 1033 | ||
915 | static struct ctl_table mpls_table[] = { | 1034 | static const struct ctl_table mpls_table[] = { |
916 | { | 1035 | { |
917 | .procname = "platform_labels", | 1036 | .procname = "platform_labels", |
918 | .data = NULL, | 1037 | .data = NULL, |
diff --git a/net/mpls/internal.h b/net/mpls/internal.h index fb6de92052c4..693877d69606 100644 --- a/net/mpls/internal.h +++ b/net/mpls/internal.h | |||
@@ -22,6 +22,12 @@ struct mpls_entry_decoded { | |||
22 | u8 bos; | 22 | u8 bos; |
23 | }; | 23 | }; |
24 | 24 | ||
25 | struct mpls_dev { | ||
26 | int input_enabled; | ||
27 | |||
28 | struct ctl_table_header *sysctl; | ||
29 | }; | ||
30 | |||
25 | struct sk_buff; | 31 | struct sk_buff; |
26 | 32 | ||
27 | static inline struct mpls_shim_hdr *mpls_hdr(const struct sk_buff *skb) | 33 | static inline struct mpls_shim_hdr *mpls_hdr(const struct sk_buff *skb) |
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 78af83bc9c8e..ad9d11fb29fd 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c | |||
@@ -4340,7 +4340,6 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data, | |||
4340 | case NFT_CONTINUE: | 4340 | case NFT_CONTINUE: |
4341 | case NFT_BREAK: | 4341 | case NFT_BREAK: |
4342 | case NFT_RETURN: | 4342 | case NFT_RETURN: |
4343 | desc->len = sizeof(data->verdict); | ||
4344 | break; | 4343 | break; |
4345 | case NFT_JUMP: | 4344 | case NFT_JUMP: |
4346 | case NFT_GOTO: | 4345 | case NFT_GOTO: |
@@ -4355,10 +4354,10 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data, | |||
4355 | 4354 | ||
4356 | chain->use++; | 4355 | chain->use++; |
4357 | data->verdict.chain = chain; | 4356 | data->verdict.chain = chain; |
4358 | desc->len = sizeof(data); | ||
4359 | break; | 4357 | break; |
4360 | } | 4358 | } |
4361 | 4359 | ||
4360 | desc->len = sizeof(data->verdict); | ||
4362 | desc->type = NFT_DATA_VERDICT; | 4361 | desc->type = NFT_DATA_VERDICT; |
4363 | return 0; | 4362 | return 0; |
4364 | } | 4363 | } |
diff --git a/net/netfilter/nft_reject.c b/net/netfilter/nft_reject.c index 57d3e1af5630..0522fc9bfb0a 100644 --- a/net/netfilter/nft_reject.c +++ b/net/netfilter/nft_reject.c | |||
@@ -63,6 +63,8 @@ int nft_reject_dump(struct sk_buff *skb, const struct nft_expr *expr) | |||
63 | if (nla_put_u8(skb, NFTA_REJECT_ICMP_CODE, priv->icmp_code)) | 63 | if (nla_put_u8(skb, NFTA_REJECT_ICMP_CODE, priv->icmp_code)) |
64 | goto nla_put_failure; | 64 | goto nla_put_failure; |
65 | break; | 65 | break; |
66 | default: | ||
67 | break; | ||
66 | } | 68 | } |
67 | 69 | ||
68 | return 0; | 70 | return 0; |
diff --git a/net/netfilter/nft_reject_inet.c b/net/netfilter/nft_reject_inet.c index 62cabee42fbe..635dbba93d01 100644 --- a/net/netfilter/nft_reject_inet.c +++ b/net/netfilter/nft_reject_inet.c | |||
@@ -108,6 +108,8 @@ static int nft_reject_inet_dump(struct sk_buff *skb, | |||
108 | if (nla_put_u8(skb, NFTA_REJECT_ICMP_CODE, priv->icmp_code)) | 108 | if (nla_put_u8(skb, NFTA_REJECT_ICMP_CODE, priv->icmp_code)) |
109 | goto nla_put_failure; | 109 | goto nla_put_failure; |
110 | break; | 110 | break; |
111 | default: | ||
112 | break; | ||
111 | } | 113 | } |
112 | 114 | ||
113 | return 0; | 115 | return 0; |
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 19909d0786a2..ec4adbdcb9b4 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
@@ -1629,13 +1629,11 @@ static struct sk_buff *netlink_alloc_large_skb(unsigned int size, | |||
1629 | if (data == NULL) | 1629 | if (data == NULL) |
1630 | return NULL; | 1630 | return NULL; |
1631 | 1631 | ||
1632 | skb = build_skb(data, size); | 1632 | skb = __build_skb(data, size); |
1633 | if (skb == NULL) | 1633 | if (skb == NULL) |
1634 | vfree(data); | 1634 | vfree(data); |
1635 | else { | 1635 | else |
1636 | skb->head_frag = 0; | ||
1637 | skb->destructor = netlink_skb_destructor; | 1636 | skb->destructor = netlink_skb_destructor; |
1638 | } | ||
1639 | 1637 | ||
1640 | return skb; | 1638 | return skb; |
1641 | } | 1639 | } |
diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c index 8e472518f9f6..295d14bd6c67 100644 --- a/net/sched/act_connmark.c +++ b/net/sched/act_connmark.c | |||
@@ -63,7 +63,6 @@ static int tcf_connmark(struct sk_buff *skb, const struct tc_action *a, | |||
63 | skb->mark = c->mark; | 63 | skb->mark = c->mark; |
64 | /* using overlimits stats to count how many packets marked */ | 64 | /* using overlimits stats to count how many packets marked */ |
65 | ca->tcf_qstats.overlimits++; | 65 | ca->tcf_qstats.overlimits++; |
66 | nf_ct_put(c); | ||
67 | goto out; | 66 | goto out; |
68 | } | 67 | } |
69 | 68 | ||
@@ -82,7 +81,6 @@ static int tcf_connmark(struct sk_buff *skb, const struct tc_action *a, | |||
82 | nf_ct_put(c); | 81 | nf_ct_put(c); |
83 | 82 | ||
84 | out: | 83 | out: |
85 | skb->nfct = NULL; | ||
86 | spin_unlock(&ca->tcf_lock); | 84 | spin_unlock(&ca->tcf_lock); |
87 | return ca->tcf_action; | 85 | return ca->tcf_action; |
88 | } | 86 | } |
diff --git a/net/sunrpc/auth_gss/gss_rpc_xdr.c b/net/sunrpc/auth_gss/gss_rpc_xdr.c index 1ec19f6f0c2b..eeeba5adee6d 100644 --- a/net/sunrpc/auth_gss/gss_rpc_xdr.c +++ b/net/sunrpc/auth_gss/gss_rpc_xdr.c | |||
@@ -793,20 +793,26 @@ int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp, | |||
793 | { | 793 | { |
794 | u32 value_follows; | 794 | u32 value_follows; |
795 | int err; | 795 | int err; |
796 | struct page *scratch; | ||
797 | |||
798 | scratch = alloc_page(GFP_KERNEL); | ||
799 | if (!scratch) | ||
800 | return -ENOMEM; | ||
801 | xdr_set_scratch_buffer(xdr, page_address(scratch), PAGE_SIZE); | ||
796 | 802 | ||
797 | /* res->status */ | 803 | /* res->status */ |
798 | err = gssx_dec_status(xdr, &res->status); | 804 | err = gssx_dec_status(xdr, &res->status); |
799 | if (err) | 805 | if (err) |
800 | return err; | 806 | goto out_free; |
801 | 807 | ||
802 | /* res->context_handle */ | 808 | /* res->context_handle */ |
803 | err = gssx_dec_bool(xdr, &value_follows); | 809 | err = gssx_dec_bool(xdr, &value_follows); |
804 | if (err) | 810 | if (err) |
805 | return err; | 811 | goto out_free; |
806 | if (value_follows) { | 812 | if (value_follows) { |
807 | err = gssx_dec_ctx(xdr, res->context_handle); | 813 | err = gssx_dec_ctx(xdr, res->context_handle); |
808 | if (err) | 814 | if (err) |
809 | return err; | 815 | goto out_free; |
810 | } else { | 816 | } else { |
811 | res->context_handle = NULL; | 817 | res->context_handle = NULL; |
812 | } | 818 | } |
@@ -814,11 +820,11 @@ int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp, | |||
814 | /* res->output_token */ | 820 | /* res->output_token */ |
815 | err = gssx_dec_bool(xdr, &value_follows); | 821 | err = gssx_dec_bool(xdr, &value_follows); |
816 | if (err) | 822 | if (err) |
817 | return err; | 823 | goto out_free; |
818 | if (value_follows) { | 824 | if (value_follows) { |
819 | err = gssx_dec_buffer(xdr, res->output_token); | 825 | err = gssx_dec_buffer(xdr, res->output_token); |
820 | if (err) | 826 | if (err) |
821 | return err; | 827 | goto out_free; |
822 | } else { | 828 | } else { |
823 | res->output_token = NULL; | 829 | res->output_token = NULL; |
824 | } | 830 | } |
@@ -826,14 +832,17 @@ int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp, | |||
826 | /* res->delegated_cred_handle */ | 832 | /* res->delegated_cred_handle */ |
827 | err = gssx_dec_bool(xdr, &value_follows); | 833 | err = gssx_dec_bool(xdr, &value_follows); |
828 | if (err) | 834 | if (err) |
829 | return err; | 835 | goto out_free; |
830 | if (value_follows) { | 836 | if (value_follows) { |
831 | /* we do not support upcall servers sending this data. */ | 837 | /* we do not support upcall servers sending this data. */ |
832 | return -EINVAL; | 838 | err = -EINVAL; |
839 | goto out_free; | ||
833 | } | 840 | } |
834 | 841 | ||
835 | /* res->options */ | 842 | /* res->options */ |
836 | err = gssx_dec_option_array(xdr, &res->options); | 843 | err = gssx_dec_option_array(xdr, &res->options); |
837 | 844 | ||
845 | out_free: | ||
846 | __free_page(scratch); | ||
838 | return err; | 847 | return err; |
839 | } | 848 | } |
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c index 3613e72e858e..70e3dacbf84a 100644 --- a/net/tipc/bearer.c +++ b/net/tipc/bearer.c | |||
@@ -591,14 +591,14 @@ void tipc_bearer_stop(struct net *net) | |||
591 | 591 | ||
592 | /* Caller should hold rtnl_lock to protect the bearer */ | 592 | /* Caller should hold rtnl_lock to protect the bearer */ |
593 | static int __tipc_nl_add_bearer(struct tipc_nl_msg *msg, | 593 | static int __tipc_nl_add_bearer(struct tipc_nl_msg *msg, |
594 | struct tipc_bearer *bearer) | 594 | struct tipc_bearer *bearer, int nlflags) |
595 | { | 595 | { |
596 | void *hdr; | 596 | void *hdr; |
597 | struct nlattr *attrs; | 597 | struct nlattr *attrs; |
598 | struct nlattr *prop; | 598 | struct nlattr *prop; |
599 | 599 | ||
600 | hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, | 600 | hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, |
601 | NLM_F_MULTI, TIPC_NL_BEARER_GET); | 601 | nlflags, TIPC_NL_BEARER_GET); |
602 | if (!hdr) | 602 | if (!hdr) |
603 | return -EMSGSIZE; | 603 | return -EMSGSIZE; |
604 | 604 | ||
@@ -657,7 +657,7 @@ int tipc_nl_bearer_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
657 | if (!bearer) | 657 | if (!bearer) |
658 | continue; | 658 | continue; |
659 | 659 | ||
660 | err = __tipc_nl_add_bearer(&msg, bearer); | 660 | err = __tipc_nl_add_bearer(&msg, bearer, NLM_F_MULTI); |
661 | if (err) | 661 | if (err) |
662 | break; | 662 | break; |
663 | } | 663 | } |
@@ -705,7 +705,7 @@ int tipc_nl_bearer_get(struct sk_buff *skb, struct genl_info *info) | |||
705 | goto err_out; | 705 | goto err_out; |
706 | } | 706 | } |
707 | 707 | ||
708 | err = __tipc_nl_add_bearer(&msg, bearer); | 708 | err = __tipc_nl_add_bearer(&msg, bearer, 0); |
709 | if (err) | 709 | if (err) |
710 | goto err_out; | 710 | goto err_out; |
711 | rtnl_unlock(); | 711 | rtnl_unlock(); |
@@ -857,14 +857,14 @@ int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info) | |||
857 | } | 857 | } |
858 | 858 | ||
859 | static int __tipc_nl_add_media(struct tipc_nl_msg *msg, | 859 | static int __tipc_nl_add_media(struct tipc_nl_msg *msg, |
860 | struct tipc_media *media) | 860 | struct tipc_media *media, int nlflags) |
861 | { | 861 | { |
862 | void *hdr; | 862 | void *hdr; |
863 | struct nlattr *attrs; | 863 | struct nlattr *attrs; |
864 | struct nlattr *prop; | 864 | struct nlattr *prop; |
865 | 865 | ||
866 | hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, | 866 | hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, |
867 | NLM_F_MULTI, TIPC_NL_MEDIA_GET); | 867 | nlflags, TIPC_NL_MEDIA_GET); |
868 | if (!hdr) | 868 | if (!hdr) |
869 | return -EMSGSIZE; | 869 | return -EMSGSIZE; |
870 | 870 | ||
@@ -916,7 +916,8 @@ int tipc_nl_media_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
916 | 916 | ||
917 | rtnl_lock(); | 917 | rtnl_lock(); |
918 | for (; media_info_array[i] != NULL; i++) { | 918 | for (; media_info_array[i] != NULL; i++) { |
919 | err = __tipc_nl_add_media(&msg, media_info_array[i]); | 919 | err = __tipc_nl_add_media(&msg, media_info_array[i], |
920 | NLM_F_MULTI); | ||
920 | if (err) | 921 | if (err) |
921 | break; | 922 | break; |
922 | } | 923 | } |
@@ -963,7 +964,7 @@ int tipc_nl_media_get(struct sk_buff *skb, struct genl_info *info) | |||
963 | goto err_out; | 964 | goto err_out; |
964 | } | 965 | } |
965 | 966 | ||
966 | err = __tipc_nl_add_media(&msg, media); | 967 | err = __tipc_nl_add_media(&msg, media, 0); |
967 | if (err) | 968 | if (err) |
968 | goto err_out; | 969 | goto err_out; |
969 | rtnl_unlock(); | 970 | rtnl_unlock(); |
diff --git a/net/tipc/link.c b/net/tipc/link.c index a6b30df6ec02..43a515dc97b0 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c | |||
@@ -1145,11 +1145,8 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr) | |||
1145 | } | 1145 | } |
1146 | /* Synchronize with parallel link if applicable */ | 1146 | /* Synchronize with parallel link if applicable */ |
1147 | if (unlikely((l_ptr->flags & LINK_SYNCHING) && !msg_dup(msg))) { | 1147 | if (unlikely((l_ptr->flags & LINK_SYNCHING) && !msg_dup(msg))) { |
1148 | link_handle_out_of_seq_msg(l_ptr, skb); | 1148 | if (!link_synch(l_ptr)) |
1149 | if (link_synch(l_ptr)) | 1149 | goto unlock; |
1150 | link_retrieve_defq(l_ptr, &head); | ||
1151 | skb = NULL; | ||
1152 | goto unlock; | ||
1153 | } | 1150 | } |
1154 | l_ptr->next_in_no++; | 1151 | l_ptr->next_in_no++; |
1155 | if (unlikely(!skb_queue_empty(&l_ptr->deferdq))) | 1152 | if (unlikely(!skb_queue_empty(&l_ptr->deferdq))) |
@@ -2013,7 +2010,7 @@ msg_full: | |||
2013 | 2010 | ||
2014 | /* Caller should hold appropriate locks to protect the link */ | 2011 | /* Caller should hold appropriate locks to protect the link */ |
2015 | static int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg, | 2012 | static int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg, |
2016 | struct tipc_link *link) | 2013 | struct tipc_link *link, int nlflags) |
2017 | { | 2014 | { |
2018 | int err; | 2015 | int err; |
2019 | void *hdr; | 2016 | void *hdr; |
@@ -2022,7 +2019,7 @@ static int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg, | |||
2022 | struct tipc_net *tn = net_generic(net, tipc_net_id); | 2019 | struct tipc_net *tn = net_generic(net, tipc_net_id); |
2023 | 2020 | ||
2024 | hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, | 2021 | hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, |
2025 | NLM_F_MULTI, TIPC_NL_LINK_GET); | 2022 | nlflags, TIPC_NL_LINK_GET); |
2026 | if (!hdr) | 2023 | if (!hdr) |
2027 | return -EMSGSIZE; | 2024 | return -EMSGSIZE; |
2028 | 2025 | ||
@@ -2095,7 +2092,7 @@ static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg, | |||
2095 | if (!node->links[i]) | 2092 | if (!node->links[i]) |
2096 | continue; | 2093 | continue; |
2097 | 2094 | ||
2098 | err = __tipc_nl_add_link(net, msg, node->links[i]); | 2095 | err = __tipc_nl_add_link(net, msg, node->links[i], NLM_F_MULTI); |
2099 | if (err) | 2096 | if (err) |
2100 | return err; | 2097 | return err; |
2101 | } | 2098 | } |
@@ -2143,7 +2140,6 @@ int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
2143 | err = __tipc_nl_add_node_links(net, &msg, node, | 2140 | err = __tipc_nl_add_node_links(net, &msg, node, |
2144 | &prev_link); | 2141 | &prev_link); |
2145 | tipc_node_unlock(node); | 2142 | tipc_node_unlock(node); |
2146 | tipc_node_put(node); | ||
2147 | if (err) | 2143 | if (err) |
2148 | goto out; | 2144 | goto out; |
2149 | 2145 | ||
@@ -2210,7 +2206,7 @@ int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info) | |||
2210 | goto err_out; | 2206 | goto err_out; |
2211 | } | 2207 | } |
2212 | 2208 | ||
2213 | err = __tipc_nl_add_link(net, &msg, link); | 2209 | err = __tipc_nl_add_link(net, &msg, link, 0); |
2214 | if (err) | 2210 | if (err) |
2215 | goto err_out; | 2211 | goto err_out; |
2216 | 2212 | ||
diff --git a/net/tipc/server.c b/net/tipc/server.c index ab6183cdb121..77ff03ed1e18 100644 --- a/net/tipc/server.c +++ b/net/tipc/server.c | |||
@@ -102,7 +102,7 @@ static void tipc_conn_kref_release(struct kref *kref) | |||
102 | } | 102 | } |
103 | saddr->scope = -TIPC_NODE_SCOPE; | 103 | saddr->scope = -TIPC_NODE_SCOPE; |
104 | kernel_bind(sock, (struct sockaddr *)saddr, sizeof(*saddr)); | 104 | kernel_bind(sock, (struct sockaddr *)saddr, sizeof(*saddr)); |
105 | sk_release_kernel(sk); | 105 | sock_release(sock); |
106 | con->sock = NULL; | 106 | con->sock = NULL; |
107 | } | 107 | } |
108 | 108 | ||
@@ -321,12 +321,9 @@ static struct socket *tipc_create_listen_sock(struct tipc_conn *con) | |||
321 | struct socket *sock = NULL; | 321 | struct socket *sock = NULL; |
322 | int ret; | 322 | int ret; |
323 | 323 | ||
324 | ret = sock_create_kern(AF_TIPC, SOCK_SEQPACKET, 0, &sock); | 324 | ret = __sock_create(s->net, AF_TIPC, SOCK_SEQPACKET, 0, &sock, 1); |
325 | if (ret < 0) | 325 | if (ret < 0) |
326 | return NULL; | 326 | return NULL; |
327 | |||
328 | sk_change_net(sock->sk, s->net); | ||
329 | |||
330 | ret = kernel_setsockopt(sock, SOL_TIPC, TIPC_IMPORTANCE, | 327 | ret = kernel_setsockopt(sock, SOL_TIPC, TIPC_IMPORTANCE, |
331 | (char *)&s->imp, sizeof(s->imp)); | 328 | (char *)&s->imp, sizeof(s->imp)); |
332 | if (ret < 0) | 329 | if (ret < 0) |
@@ -376,7 +373,7 @@ static struct socket *tipc_create_listen_sock(struct tipc_conn *con) | |||
376 | 373 | ||
377 | create_err: | 374 | create_err: |
378 | kernel_sock_shutdown(sock, SHUT_RDWR); | 375 | kernel_sock_shutdown(sock, SHUT_RDWR); |
379 | sk_release_kernel(sock->sk); | 376 | sock_release(sock); |
380 | return NULL; | 377 | return NULL; |
381 | } | 378 | } |
382 | 379 | ||
diff --git a/net/tipc/socket.c b/net/tipc/socket.c index ee90d74d7516..9074b5cede38 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c | |||
@@ -1764,13 +1764,14 @@ static int tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk, | |||
1764 | int tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq) | 1764 | int tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq) |
1765 | { | 1765 | { |
1766 | u32 dnode, dport = 0; | 1766 | u32 dnode, dport = 0; |
1767 | int err = -TIPC_ERR_NO_PORT; | 1767 | int err; |
1768 | struct sk_buff *skb; | 1768 | struct sk_buff *skb; |
1769 | struct tipc_sock *tsk; | 1769 | struct tipc_sock *tsk; |
1770 | struct tipc_net *tn; | 1770 | struct tipc_net *tn; |
1771 | struct sock *sk; | 1771 | struct sock *sk; |
1772 | 1772 | ||
1773 | while (skb_queue_len(inputq)) { | 1773 | while (skb_queue_len(inputq)) { |
1774 | err = -TIPC_ERR_NO_PORT; | ||
1774 | skb = NULL; | 1775 | skb = NULL; |
1775 | dport = tipc_skb_peek_port(inputq, dport); | 1776 | dport = tipc_skb_peek_port(inputq, dport); |
1776 | tsk = tipc_sk_lookup(net, dport); | 1777 | tsk = tipc_sk_lookup(net, dport); |
diff --git a/net/unix/garbage.c b/net/unix/garbage.c index 99f7012b23b9..a73a226f2d33 100644 --- a/net/unix/garbage.c +++ b/net/unix/garbage.c | |||
@@ -95,39 +95,36 @@ static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait); | |||
95 | 95 | ||
96 | unsigned int unix_tot_inflight; | 96 | unsigned int unix_tot_inflight; |
97 | 97 | ||
98 | |||
99 | struct sock *unix_get_socket(struct file *filp) | 98 | struct sock *unix_get_socket(struct file *filp) |
100 | { | 99 | { |
101 | struct sock *u_sock = NULL; | 100 | struct sock *u_sock = NULL; |
102 | struct inode *inode = file_inode(filp); | 101 | struct inode *inode = file_inode(filp); |
103 | 102 | ||
104 | /* | 103 | /* Socket ? */ |
105 | * Socket ? | ||
106 | */ | ||
107 | if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) { | 104 | if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) { |
108 | struct socket *sock = SOCKET_I(inode); | 105 | struct socket *sock = SOCKET_I(inode); |
109 | struct sock *s = sock->sk; | 106 | struct sock *s = sock->sk; |
110 | 107 | ||
111 | /* | 108 | /* PF_UNIX ? */ |
112 | * PF_UNIX ? | ||
113 | */ | ||
114 | if (s && sock->ops && sock->ops->family == PF_UNIX) | 109 | if (s && sock->ops && sock->ops->family == PF_UNIX) |
115 | u_sock = s; | 110 | u_sock = s; |
116 | } | 111 | } |
117 | return u_sock; | 112 | return u_sock; |
118 | } | 113 | } |
119 | 114 | ||
120 | /* | 115 | /* Keep the number of times in flight count for the file |
121 | * Keep the number of times in flight count for the file | 116 | * descriptor if it is for an AF_UNIX socket. |
122 | * descriptor if it is for an AF_UNIX socket. | ||
123 | */ | 117 | */ |
124 | 118 | ||
125 | void unix_inflight(struct file *fp) | 119 | void unix_inflight(struct file *fp) |
126 | { | 120 | { |
127 | struct sock *s = unix_get_socket(fp); | 121 | struct sock *s = unix_get_socket(fp); |
122 | |||
128 | if (s) { | 123 | if (s) { |
129 | struct unix_sock *u = unix_sk(s); | 124 | struct unix_sock *u = unix_sk(s); |
125 | |||
130 | spin_lock(&unix_gc_lock); | 126 | spin_lock(&unix_gc_lock); |
127 | |||
131 | if (atomic_long_inc_return(&u->inflight) == 1) { | 128 | if (atomic_long_inc_return(&u->inflight) == 1) { |
132 | BUG_ON(!list_empty(&u->link)); | 129 | BUG_ON(!list_empty(&u->link)); |
133 | list_add_tail(&u->link, &gc_inflight_list); | 130 | list_add_tail(&u->link, &gc_inflight_list); |
@@ -142,10 +139,13 @@ void unix_inflight(struct file *fp) | |||
142 | void unix_notinflight(struct file *fp) | 139 | void unix_notinflight(struct file *fp) |
143 | { | 140 | { |
144 | struct sock *s = unix_get_socket(fp); | 141 | struct sock *s = unix_get_socket(fp); |
142 | |||
145 | if (s) { | 143 | if (s) { |
146 | struct unix_sock *u = unix_sk(s); | 144 | struct unix_sock *u = unix_sk(s); |
145 | |||
147 | spin_lock(&unix_gc_lock); | 146 | spin_lock(&unix_gc_lock); |
148 | BUG_ON(list_empty(&u->link)); | 147 | BUG_ON(list_empty(&u->link)); |
148 | |||
149 | if (atomic_long_dec_and_test(&u->inflight)) | 149 | if (atomic_long_dec_and_test(&u->inflight)) |
150 | list_del_init(&u->link); | 150 | list_del_init(&u->link); |
151 | unix_tot_inflight--; | 151 | unix_tot_inflight--; |
@@ -161,32 +161,27 @@ static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *), | |||
161 | 161 | ||
162 | spin_lock(&x->sk_receive_queue.lock); | 162 | spin_lock(&x->sk_receive_queue.lock); |
163 | skb_queue_walk_safe(&x->sk_receive_queue, skb, next) { | 163 | skb_queue_walk_safe(&x->sk_receive_queue, skb, next) { |
164 | /* | 164 | /* Do we have file descriptors ? */ |
165 | * Do we have file descriptors ? | ||
166 | */ | ||
167 | if (UNIXCB(skb).fp) { | 165 | if (UNIXCB(skb).fp) { |
168 | bool hit = false; | 166 | bool hit = false; |
169 | /* | 167 | /* Process the descriptors of this socket */ |
170 | * Process the descriptors of this socket | ||
171 | */ | ||
172 | int nfd = UNIXCB(skb).fp->count; | 168 | int nfd = UNIXCB(skb).fp->count; |
173 | struct file **fp = UNIXCB(skb).fp->fp; | 169 | struct file **fp = UNIXCB(skb).fp->fp; |
170 | |||
174 | while (nfd--) { | 171 | while (nfd--) { |
175 | /* | 172 | /* Get the socket the fd matches if it indeed does so */ |
176 | * Get the socket the fd matches | ||
177 | * if it indeed does so | ||
178 | */ | ||
179 | struct sock *sk = unix_get_socket(*fp++); | 173 | struct sock *sk = unix_get_socket(*fp++); |
174 | |||
180 | if (sk) { | 175 | if (sk) { |
181 | struct unix_sock *u = unix_sk(sk); | 176 | struct unix_sock *u = unix_sk(sk); |
182 | 177 | ||
183 | /* | 178 | /* Ignore non-candidates, they could |
184 | * Ignore non-candidates, they could | ||
185 | * have been added to the queues after | 179 | * have been added to the queues after |
186 | * starting the garbage collection | 180 | * starting the garbage collection |
187 | */ | 181 | */ |
188 | if (test_bit(UNIX_GC_CANDIDATE, &u->gc_flags)) { | 182 | if (test_bit(UNIX_GC_CANDIDATE, &u->gc_flags)) { |
189 | hit = true; | 183 | hit = true; |
184 | |||
190 | func(u); | 185 | func(u); |
191 | } | 186 | } |
192 | } | 187 | } |
@@ -203,24 +198,22 @@ static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *), | |||
203 | static void scan_children(struct sock *x, void (*func)(struct unix_sock *), | 198 | static void scan_children(struct sock *x, void (*func)(struct unix_sock *), |
204 | struct sk_buff_head *hitlist) | 199 | struct sk_buff_head *hitlist) |
205 | { | 200 | { |
206 | if (x->sk_state != TCP_LISTEN) | 201 | if (x->sk_state != TCP_LISTEN) { |
207 | scan_inflight(x, func, hitlist); | 202 | scan_inflight(x, func, hitlist); |
208 | else { | 203 | } else { |
209 | struct sk_buff *skb; | 204 | struct sk_buff *skb; |
210 | struct sk_buff *next; | 205 | struct sk_buff *next; |
211 | struct unix_sock *u; | 206 | struct unix_sock *u; |
212 | LIST_HEAD(embryos); | 207 | LIST_HEAD(embryos); |
213 | 208 | ||
214 | /* | 209 | /* For a listening socket collect the queued embryos |
215 | * For a listening socket collect the queued embryos | ||
216 | * and perform a scan on them as well. | 210 | * and perform a scan on them as well. |
217 | */ | 211 | */ |
218 | spin_lock(&x->sk_receive_queue.lock); | 212 | spin_lock(&x->sk_receive_queue.lock); |
219 | skb_queue_walk_safe(&x->sk_receive_queue, skb, next) { | 213 | skb_queue_walk_safe(&x->sk_receive_queue, skb, next) { |
220 | u = unix_sk(skb->sk); | 214 | u = unix_sk(skb->sk); |
221 | 215 | ||
222 | /* | 216 | /* An embryo cannot be in-flight, so it's safe |
223 | * An embryo cannot be in-flight, so it's safe | ||
224 | * to use the list link. | 217 | * to use the list link. |
225 | */ | 218 | */ |
226 | BUG_ON(!list_empty(&u->link)); | 219 | BUG_ON(!list_empty(&u->link)); |
@@ -249,8 +242,7 @@ static void inc_inflight(struct unix_sock *usk) | |||
249 | static void inc_inflight_move_tail(struct unix_sock *u) | 242 | static void inc_inflight_move_tail(struct unix_sock *u) |
250 | { | 243 | { |
251 | atomic_long_inc(&u->inflight); | 244 | atomic_long_inc(&u->inflight); |
252 | /* | 245 | /* If this still might be part of a cycle, move it to the end |
253 | * If this still might be part of a cycle, move it to the end | ||
254 | * of the list, so that it's checked even if it was already | 246 | * of the list, so that it's checked even if it was already |
255 | * passed over | 247 | * passed over |
256 | */ | 248 | */ |
@@ -263,8 +255,7 @@ static bool gc_in_progress; | |||
263 | 255 | ||
264 | void wait_for_unix_gc(void) | 256 | void wait_for_unix_gc(void) |
265 | { | 257 | { |
266 | /* | 258 | /* If number of inflight sockets is insane, |
267 | * If number of inflight sockets is insane, | ||
268 | * force a garbage collect right now. | 259 | * force a garbage collect right now. |
269 | */ | 260 | */ |
270 | if (unix_tot_inflight > UNIX_INFLIGHT_TRIGGER_GC && !gc_in_progress) | 261 | if (unix_tot_inflight > UNIX_INFLIGHT_TRIGGER_GC && !gc_in_progress) |
@@ -288,8 +279,7 @@ void unix_gc(void) | |||
288 | goto out; | 279 | goto out; |
289 | 280 | ||
290 | gc_in_progress = true; | 281 | gc_in_progress = true; |
291 | /* | 282 | /* First, select candidates for garbage collection. Only |
292 | * First, select candidates for garbage collection. Only | ||
293 | * in-flight sockets are considered, and from those only ones | 283 | * in-flight sockets are considered, and from those only ones |
294 | * which don't have any external reference. | 284 | * which don't have any external reference. |
295 | * | 285 | * |
@@ -320,15 +310,13 @@ void unix_gc(void) | |||
320 | } | 310 | } |
321 | } | 311 | } |
322 | 312 | ||
323 | /* | 313 | /* Now remove all internal in-flight reference to children of |
324 | * Now remove all internal in-flight reference to children of | ||
325 | * the candidates. | 314 | * the candidates. |
326 | */ | 315 | */ |
327 | list_for_each_entry(u, &gc_candidates, link) | 316 | list_for_each_entry(u, &gc_candidates, link) |
328 | scan_children(&u->sk, dec_inflight, NULL); | 317 | scan_children(&u->sk, dec_inflight, NULL); |
329 | 318 | ||
330 | /* | 319 | /* Restore the references for children of all candidates, |
331 | * Restore the references for children of all candidates, | ||
332 | * which have remaining references. Do this recursively, so | 320 | * which have remaining references. Do this recursively, so |
333 | * only those remain, which form cyclic references. | 321 | * only those remain, which form cyclic references. |
334 | * | 322 | * |
@@ -350,8 +338,7 @@ void unix_gc(void) | |||
350 | } | 338 | } |
351 | list_del(&cursor); | 339 | list_del(&cursor); |
352 | 340 | ||
353 | /* | 341 | /* not_cycle_list contains those sockets which do not make up a |
354 | * not_cycle_list contains those sockets which do not make up a | ||
355 | * cycle. Restore these to the inflight list. | 342 | * cycle. Restore these to the inflight list. |
356 | */ | 343 | */ |
357 | while (!list_empty(¬_cycle_list)) { | 344 | while (!list_empty(¬_cycle_list)) { |
@@ -360,8 +347,7 @@ void unix_gc(void) | |||
360 | list_move_tail(&u->link, &gc_inflight_list); | 347 | list_move_tail(&u->link, &gc_inflight_list); |
361 | } | 348 | } |
362 | 349 | ||
363 | /* | 350 | /* Now gc_candidates contains only garbage. Restore original |
364 | * Now gc_candidates contains only garbage. Restore original | ||
365 | * inflight counters for these as well, and remove the skbuffs | 351 | * inflight counters for these as well, and remove the skbuffs |
366 | * which are creating the cycle(s). | 352 | * which are creating the cycle(s). |
367 | */ | 353 | */ |
diff --git a/sound/pci/emu10k1/emu10k1.c b/sound/pci/emu10k1/emu10k1.c index 37d0220a094c..db7a2e5e4a14 100644 --- a/sound/pci/emu10k1/emu10k1.c +++ b/sound/pci/emu10k1/emu10k1.c | |||
@@ -183,8 +183,10 @@ static int snd_card_emu10k1_probe(struct pci_dev *pci, | |||
183 | } | 183 | } |
184 | #endif | 184 | #endif |
185 | 185 | ||
186 | strcpy(card->driver, emu->card_capabilities->driver); | 186 | strlcpy(card->driver, emu->card_capabilities->driver, |
187 | strcpy(card->shortname, emu->card_capabilities->name); | 187 | sizeof(card->driver)); |
188 | strlcpy(card->shortname, emu->card_capabilities->name, | ||
189 | sizeof(card->shortname)); | ||
188 | snprintf(card->longname, sizeof(card->longname), | 190 | snprintf(card->longname, sizeof(card->longname), |
189 | "%s (rev.%d, serial:0x%x) at 0x%lx, irq %i", | 191 | "%s (rev.%d, serial:0x%x) at 0x%lx, irq %i", |
190 | card->shortname, emu->revision, emu->serial, emu->port, emu->irq); | 192 | card->shortname, emu->revision, emu->serial, emu->port, emu->irq); |
diff --git a/sound/pci/emu10k1/emu10k1_callback.c b/sound/pci/emu10k1/emu10k1_callback.c index 874cd76c7b7f..d2c7ea3a7610 100644 --- a/sound/pci/emu10k1/emu10k1_callback.c +++ b/sound/pci/emu10k1/emu10k1_callback.c | |||
@@ -415,7 +415,7 @@ start_voice(struct snd_emux_voice *vp) | |||
415 | snd_emu10k1_ptr_write(hw, Z2, ch, 0); | 415 | snd_emu10k1_ptr_write(hw, Z2, ch, 0); |
416 | 416 | ||
417 | /* invalidate maps */ | 417 | /* invalidate maps */ |
418 | temp = (hw->silent_page.addr << 1) | MAP_PTI_MASK; | 418 | temp = (hw->silent_page.addr << hw->address_mode) | (hw->address_mode ? MAP_PTI_MASK1 : MAP_PTI_MASK0); |
419 | snd_emu10k1_ptr_write(hw, MAPA, ch, temp); | 419 | snd_emu10k1_ptr_write(hw, MAPA, ch, temp); |
420 | snd_emu10k1_ptr_write(hw, MAPB, ch, temp); | 420 | snd_emu10k1_ptr_write(hw, MAPB, ch, temp); |
421 | #if 0 | 421 | #if 0 |
@@ -436,7 +436,7 @@ start_voice(struct snd_emux_voice *vp) | |||
436 | snd_emu10k1_ptr_write(hw, CDF, ch, sample); | 436 | snd_emu10k1_ptr_write(hw, CDF, ch, sample); |
437 | 437 | ||
438 | /* invalidate maps */ | 438 | /* invalidate maps */ |
439 | temp = ((unsigned int)hw->silent_page.addr << 1) | MAP_PTI_MASK; | 439 | temp = ((unsigned int)hw->silent_page.addr << hw_address_mode) | (hw->address_mode ? MAP_PTI_MASK1 : MAP_PTI_MASK0); |
440 | snd_emu10k1_ptr_write(hw, MAPA, ch, temp); | 440 | snd_emu10k1_ptr_write(hw, MAPA, ch, temp); |
441 | snd_emu10k1_ptr_write(hw, MAPB, ch, temp); | 441 | snd_emu10k1_ptr_write(hw, MAPB, ch, temp); |
442 | 442 | ||
diff --git a/sound/pci/emu10k1/emu10k1_main.c b/sound/pci/emu10k1/emu10k1_main.c index 54079f5d5673..a4548147c621 100644 --- a/sound/pci/emu10k1/emu10k1_main.c +++ b/sound/pci/emu10k1/emu10k1_main.c | |||
@@ -282,7 +282,7 @@ static int snd_emu10k1_init(struct snd_emu10k1 *emu, int enable_ir, int resume) | |||
282 | snd_emu10k1_ptr_write(emu, TCB, 0, 0); /* taken from original driver */ | 282 | snd_emu10k1_ptr_write(emu, TCB, 0, 0); /* taken from original driver */ |
283 | snd_emu10k1_ptr_write(emu, TCBS, 0, 4); /* taken from original driver */ | 283 | snd_emu10k1_ptr_write(emu, TCBS, 0, 4); /* taken from original driver */ |
284 | 284 | ||
285 | silent_page = (emu->silent_page.addr << 1) | MAP_PTI_MASK; | 285 | silent_page = (emu->silent_page.addr << emu->address_mode) | (emu->address_mode ? MAP_PTI_MASK1 : MAP_PTI_MASK0); |
286 | for (ch = 0; ch < NUM_G; ch++) { | 286 | for (ch = 0; ch < NUM_G; ch++) { |
287 | snd_emu10k1_ptr_write(emu, MAPA, ch, silent_page); | 287 | snd_emu10k1_ptr_write(emu, MAPA, ch, silent_page); |
288 | snd_emu10k1_ptr_write(emu, MAPB, ch, silent_page); | 288 | snd_emu10k1_ptr_write(emu, MAPB, ch, silent_page); |
@@ -348,6 +348,11 @@ static int snd_emu10k1_init(struct snd_emu10k1 *emu, int enable_ir, int resume) | |||
348 | outl(reg | A_IOCFG_GPOUT0, emu->port + A_IOCFG); | 348 | outl(reg | A_IOCFG_GPOUT0, emu->port + A_IOCFG); |
349 | } | 349 | } |
350 | 350 | ||
351 | if (emu->address_mode == 0) { | ||
352 | /* use 16M in 4G */ | ||
353 | outl(inl(emu->port + HCFG) | HCFG_EXPANDED_MEM, emu->port + HCFG); | ||
354 | } | ||
355 | |||
351 | return 0; | 356 | return 0; |
352 | } | 357 | } |
353 | 358 | ||
@@ -1446,7 +1451,7 @@ static struct snd_emu_chip_details emu_chip_details[] = { | |||
1446 | * | 1451 | * |
1447 | */ | 1452 | */ |
1448 | {.vendor = 0x1102, .device = 0x0008, .subsystem = 0x20011102, | 1453 | {.vendor = 0x1102, .device = 0x0008, .subsystem = 0x20011102, |
1449 | .driver = "Audigy2", .name = "SB Audigy 2 ZS Notebook [SB0530]", | 1454 | .driver = "Audigy2", .name = "Audigy 2 ZS Notebook [SB0530]", |
1450 | .id = "Audigy2", | 1455 | .id = "Audigy2", |
1451 | .emu10k2_chip = 1, | 1456 | .emu10k2_chip = 1, |
1452 | .ca0108_chip = 1, | 1457 | .ca0108_chip = 1, |
@@ -1596,7 +1601,7 @@ static struct snd_emu_chip_details emu_chip_details[] = { | |||
1596 | .adc_1361t = 1, /* 24 bit capture instead of 16bit */ | 1601 | .adc_1361t = 1, /* 24 bit capture instead of 16bit */ |
1597 | .ac97_chip = 1} , | 1602 | .ac97_chip = 1} , |
1598 | {.vendor = 0x1102, .device = 0x0004, .subsystem = 0x10051102, | 1603 | {.vendor = 0x1102, .device = 0x0004, .subsystem = 0x10051102, |
1599 | .driver = "Audigy2", .name = "SB Audigy 2 Platinum EX [SB0280]", | 1604 | .driver = "Audigy2", .name = "Audigy 2 Platinum EX [SB0280]", |
1600 | .id = "Audigy2", | 1605 | .id = "Audigy2", |
1601 | .emu10k2_chip = 1, | 1606 | .emu10k2_chip = 1, |
1602 | .ca0102_chip = 1, | 1607 | .ca0102_chip = 1, |
@@ -1902,8 +1907,10 @@ int snd_emu10k1_create(struct snd_card *card, | |||
1902 | 1907 | ||
1903 | is_audigy = emu->audigy = c->emu10k2_chip; | 1908 | is_audigy = emu->audigy = c->emu10k2_chip; |
1904 | 1909 | ||
1910 | /* set addressing mode */ | ||
1911 | emu->address_mode = is_audigy ? 0 : 1; | ||
1905 | /* set the DMA transfer mask */ | 1912 | /* set the DMA transfer mask */ |
1906 | emu->dma_mask = is_audigy ? AUDIGY_DMA_MASK : EMU10K1_DMA_MASK; | 1913 | emu->dma_mask = emu->address_mode ? EMU10K1_DMA_MASK : AUDIGY_DMA_MASK; |
1907 | if (pci_set_dma_mask(pci, emu->dma_mask) < 0 || | 1914 | if (pci_set_dma_mask(pci, emu->dma_mask) < 0 || |
1908 | pci_set_consistent_dma_mask(pci, emu->dma_mask) < 0) { | 1915 | pci_set_consistent_dma_mask(pci, emu->dma_mask) < 0) { |
1909 | dev_err(card->dev, | 1916 | dev_err(card->dev, |
@@ -1928,7 +1935,7 @@ int snd_emu10k1_create(struct snd_card *card, | |||
1928 | 1935 | ||
1929 | emu->max_cache_pages = max_cache_bytes >> PAGE_SHIFT; | 1936 | emu->max_cache_pages = max_cache_bytes >> PAGE_SHIFT; |
1930 | if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci), | 1937 | if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci), |
1931 | 32 * 1024, &emu->ptb_pages) < 0) { | 1938 | (emu->address_mode ? 32 : 16) * 1024, &emu->ptb_pages) < 0) { |
1932 | err = -ENOMEM; | 1939 | err = -ENOMEM; |
1933 | goto error; | 1940 | goto error; |
1934 | } | 1941 | } |
@@ -2027,8 +2034,8 @@ int snd_emu10k1_create(struct snd_card *card, | |||
2027 | 2034 | ||
2028 | /* Clear silent pages and set up pointers */ | 2035 | /* Clear silent pages and set up pointers */ |
2029 | memset(emu->silent_page.area, 0, PAGE_SIZE); | 2036 | memset(emu->silent_page.area, 0, PAGE_SIZE); |
2030 | silent_page = emu->silent_page.addr << 1; | 2037 | silent_page = emu->silent_page.addr << emu->address_mode; |
2031 | for (idx = 0; idx < MAXPAGES; idx++) | 2038 | for (idx = 0; idx < (emu->address_mode ? MAXPAGES1 : MAXPAGES0); idx++) |
2032 | ((u32 *)emu->ptb_pages.area)[idx] = cpu_to_le32(silent_page | idx); | 2039 | ((u32 *)emu->ptb_pages.area)[idx] = cpu_to_le32(silent_page | idx); |
2033 | 2040 | ||
2034 | /* set up voice indices */ | 2041 | /* set up voice indices */ |
diff --git a/sound/pci/emu10k1/emupcm.c b/sound/pci/emu10k1/emupcm.c index 0dc07385af0e..14a305bd8a98 100644 --- a/sound/pci/emu10k1/emupcm.c +++ b/sound/pci/emu10k1/emupcm.c | |||
@@ -380,7 +380,7 @@ static void snd_emu10k1_pcm_init_voice(struct snd_emu10k1 *emu, | |||
380 | snd_emu10k1_ptr_write(emu, Z1, voice, 0); | 380 | snd_emu10k1_ptr_write(emu, Z1, voice, 0); |
381 | snd_emu10k1_ptr_write(emu, Z2, voice, 0); | 381 | snd_emu10k1_ptr_write(emu, Z2, voice, 0); |
382 | /* invalidate maps */ | 382 | /* invalidate maps */ |
383 | silent_page = ((unsigned int)emu->silent_page.addr << 1) | MAP_PTI_MASK; | 383 | silent_page = ((unsigned int)emu->silent_page.addr << emu->address_mode) | (emu->address_mode ? MAP_PTI_MASK1 : MAP_PTI_MASK0); |
384 | snd_emu10k1_ptr_write(emu, MAPA, voice, silent_page); | 384 | snd_emu10k1_ptr_write(emu, MAPA, voice, silent_page); |
385 | snd_emu10k1_ptr_write(emu, MAPB, voice, silent_page); | 385 | snd_emu10k1_ptr_write(emu, MAPB, voice, silent_page); |
386 | /* modulation envelope */ | 386 | /* modulation envelope */ |
diff --git a/sound/pci/emu10k1/memory.c b/sound/pci/emu10k1/memory.c index c68e6dd2fa67..4f1f69be1865 100644 --- a/sound/pci/emu10k1/memory.c +++ b/sound/pci/emu10k1/memory.c | |||
@@ -34,10 +34,11 @@ | |||
34 | * aligned pages in others | 34 | * aligned pages in others |
35 | */ | 35 | */ |
36 | #define __set_ptb_entry(emu,page,addr) \ | 36 | #define __set_ptb_entry(emu,page,addr) \ |
37 | (((u32 *)(emu)->ptb_pages.area)[page] = cpu_to_le32(((addr) << 1) | (page))) | 37 | (((u32 *)(emu)->ptb_pages.area)[page] = cpu_to_le32(((addr) << (emu->address_mode)) | (page))) |
38 | 38 | ||
39 | #define UNIT_PAGES (PAGE_SIZE / EMUPAGESIZE) | 39 | #define UNIT_PAGES (PAGE_SIZE / EMUPAGESIZE) |
40 | #define MAX_ALIGN_PAGES (MAXPAGES / UNIT_PAGES) | 40 | #define MAX_ALIGN_PAGES0 (MAXPAGES0 / UNIT_PAGES) |
41 | #define MAX_ALIGN_PAGES1 (MAXPAGES1 / UNIT_PAGES) | ||
41 | /* get aligned page from offset address */ | 42 | /* get aligned page from offset address */ |
42 | #define get_aligned_page(offset) ((offset) >> PAGE_SHIFT) | 43 | #define get_aligned_page(offset) ((offset) >> PAGE_SHIFT) |
43 | /* get offset address from aligned page */ | 44 | /* get offset address from aligned page */ |
@@ -124,7 +125,7 @@ static int search_empty_map_area(struct snd_emu10k1 *emu, int npages, struct lis | |||
124 | } | 125 | } |
125 | page = blk->mapped_page + blk->pages; | 126 | page = blk->mapped_page + blk->pages; |
126 | } | 127 | } |
127 | size = MAX_ALIGN_PAGES - page; | 128 | size = (emu->address_mode ? MAX_ALIGN_PAGES1 : MAX_ALIGN_PAGES0) - page; |
128 | if (size >= max_size) { | 129 | if (size >= max_size) { |
129 | *nextp = pos; | 130 | *nextp = pos; |
130 | return page; | 131 | return page; |
@@ -181,7 +182,7 @@ static int unmap_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk) | |||
181 | q = get_emu10k1_memblk(p, mapped_link); | 182 | q = get_emu10k1_memblk(p, mapped_link); |
182 | end_page = q->mapped_page; | 183 | end_page = q->mapped_page; |
183 | } else | 184 | } else |
184 | end_page = MAX_ALIGN_PAGES; | 185 | end_page = (emu->address_mode ? MAX_ALIGN_PAGES1 : MAX_ALIGN_PAGES0); |
185 | 186 | ||
186 | /* remove links */ | 187 | /* remove links */ |
187 | list_del(&blk->mapped_link); | 188 | list_del(&blk->mapped_link); |
@@ -307,7 +308,7 @@ snd_emu10k1_alloc_pages(struct snd_emu10k1 *emu, struct snd_pcm_substream *subst | |||
307 | if (snd_BUG_ON(!emu)) | 308 | if (snd_BUG_ON(!emu)) |
308 | return NULL; | 309 | return NULL; |
309 | if (snd_BUG_ON(runtime->dma_bytes <= 0 || | 310 | if (snd_BUG_ON(runtime->dma_bytes <= 0 || |
310 | runtime->dma_bytes >= MAXPAGES * EMUPAGESIZE)) | 311 | runtime->dma_bytes >= (emu->address_mode ? MAXPAGES1 : MAXPAGES0) * EMUPAGESIZE)) |
311 | return NULL; | 312 | return NULL; |
312 | hdr = emu->memhdr; | 313 | hdr = emu->memhdr; |
313 | if (snd_BUG_ON(!hdr)) | 314 | if (snd_BUG_ON(!hdr)) |
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index 873ed1bce12b..b49feff0a319 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c | |||
@@ -873,14 +873,15 @@ struct hda_pcm *snd_hda_codec_pcm_new(struct hda_codec *codec, | |||
873 | struct hda_pcm *pcm; | 873 | struct hda_pcm *pcm; |
874 | va_list args; | 874 | va_list args; |
875 | 875 | ||
876 | va_start(args, fmt); | ||
877 | pcm = kzalloc(sizeof(*pcm), GFP_KERNEL); | 876 | pcm = kzalloc(sizeof(*pcm), GFP_KERNEL); |
878 | if (!pcm) | 877 | if (!pcm) |
879 | return NULL; | 878 | return NULL; |
880 | 879 | ||
881 | pcm->codec = codec; | 880 | pcm->codec = codec; |
882 | kref_init(&pcm->kref); | 881 | kref_init(&pcm->kref); |
882 | va_start(args, fmt); | ||
883 | pcm->name = kvasprintf(GFP_KERNEL, fmt, args); | 883 | pcm->name = kvasprintf(GFP_KERNEL, fmt, args); |
884 | va_end(args); | ||
884 | if (!pcm->name) { | 885 | if (!pcm->name) { |
885 | kfree(pcm); | 886 | kfree(pcm); |
886 | return NULL; | 887 | return NULL; |
@@ -2082,6 +2083,16 @@ static struct snd_kcontrol_new vmaster_mute_mode = { | |||
2082 | .put = vmaster_mute_mode_put, | 2083 | .put = vmaster_mute_mode_put, |
2083 | }; | 2084 | }; |
2084 | 2085 | ||
2086 | /* meta hook to call each driver's vmaster hook */ | ||
2087 | static void vmaster_hook(void *private_data, int enabled) | ||
2088 | { | ||
2089 | struct hda_vmaster_mute_hook *hook = private_data; | ||
2090 | |||
2091 | if (hook->mute_mode != HDA_VMUTE_FOLLOW_MASTER) | ||
2092 | enabled = hook->mute_mode; | ||
2093 | hook->hook(hook->codec, enabled); | ||
2094 | } | ||
2095 | |||
2085 | /** | 2096 | /** |
2086 | * snd_hda_add_vmaster_hook - Add a vmaster hook for mute-LED | 2097 | * snd_hda_add_vmaster_hook - Add a vmaster hook for mute-LED |
2087 | * @codec: the HDA codec | 2098 | * @codec: the HDA codec |
@@ -2100,9 +2111,9 @@ int snd_hda_add_vmaster_hook(struct hda_codec *codec, | |||
2100 | 2111 | ||
2101 | if (!hook->hook || !hook->sw_kctl) | 2112 | if (!hook->hook || !hook->sw_kctl) |
2102 | return 0; | 2113 | return 0; |
2103 | snd_ctl_add_vmaster_hook(hook->sw_kctl, hook->hook, codec); | ||
2104 | hook->codec = codec; | 2114 | hook->codec = codec; |
2105 | hook->mute_mode = HDA_VMUTE_FOLLOW_MASTER; | 2115 | hook->mute_mode = HDA_VMUTE_FOLLOW_MASTER; |
2116 | snd_ctl_add_vmaster_hook(hook->sw_kctl, vmaster_hook, hook); | ||
2106 | if (!expose_enum_ctl) | 2117 | if (!expose_enum_ctl) |
2107 | return 0; | 2118 | return 0; |
2108 | kctl = snd_ctl_new1(&vmaster_mute_mode, hook); | 2119 | kctl = snd_ctl_new1(&vmaster_mute_mode, hook); |
@@ -2128,14 +2139,7 @@ void snd_hda_sync_vmaster_hook(struct hda_vmaster_mute_hook *hook) | |||
2128 | */ | 2139 | */ |
2129 | if (hook->codec->bus->shutdown) | 2140 | if (hook->codec->bus->shutdown) |
2130 | return; | 2141 | return; |
2131 | switch (hook->mute_mode) { | 2142 | snd_ctl_sync_vmaster_hook(hook->sw_kctl); |
2132 | case HDA_VMUTE_FOLLOW_MASTER: | ||
2133 | snd_ctl_sync_vmaster_hook(hook->sw_kctl); | ||
2134 | break; | ||
2135 | default: | ||
2136 | hook->hook(hook->codec, hook->mute_mode); | ||
2137 | break; | ||
2138 | } | ||
2139 | } | 2143 | } |
2140 | EXPORT_SYMBOL_GPL(snd_hda_sync_vmaster_hook); | 2144 | EXPORT_SYMBOL_GPL(snd_hda_sync_vmaster_hook); |
2141 | 2145 | ||
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c index 3d2597b7037b..788f969b1a68 100644 --- a/sound/pci/hda/hda_generic.c +++ b/sound/pci/hda/hda_generic.c | |||
@@ -3259,7 +3259,8 @@ static int create_input_ctls(struct hda_codec *codec) | |||
3259 | val = PIN_IN; | 3259 | val = PIN_IN; |
3260 | if (cfg->inputs[i].type == AUTO_PIN_MIC) | 3260 | if (cfg->inputs[i].type == AUTO_PIN_MIC) |
3261 | val |= snd_hda_get_default_vref(codec, pin); | 3261 | val |= snd_hda_get_default_vref(codec, pin); |
3262 | if (pin != spec->hp_mic_pin) | 3262 | if (pin != spec->hp_mic_pin && |
3263 | !snd_hda_codec_get_pin_target(codec, pin)) | ||
3263 | set_pin_target(codec, pin, val, false); | 3264 | set_pin_target(codec, pin, val, false); |
3264 | 3265 | ||
3265 | if (mixer) { | 3266 | if (mixer) { |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 06199e4e930f..e2afd53cc14c 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
@@ -4190,11 +4190,18 @@ static void alc_shutup_dell_xps13(struct hda_codec *codec) | |||
4190 | static void alc_fixup_dell_xps13(struct hda_codec *codec, | 4190 | static void alc_fixup_dell_xps13(struct hda_codec *codec, |
4191 | const struct hda_fixup *fix, int action) | 4191 | const struct hda_fixup *fix, int action) |
4192 | { | 4192 | { |
4193 | if (action == HDA_FIXUP_ACT_PROBE) { | 4193 | struct alc_spec *spec = codec->spec; |
4194 | struct alc_spec *spec = codec->spec; | 4194 | struct hda_input_mux *imux = &spec->gen.input_mux; |
4195 | struct hda_input_mux *imux = &spec->gen.input_mux; | 4195 | int i; |
4196 | int i; | ||
4197 | 4196 | ||
4197 | switch (action) { | ||
4198 | case HDA_FIXUP_ACT_PRE_PROBE: | ||
4199 | /* mic pin 0x19 must be initialized with Vref Hi-Z, otherwise | ||
4200 | * it causes a click noise at start up | ||
4201 | */ | ||
4202 | snd_hda_codec_set_pin_target(codec, 0x19, PIN_VREFHIZ); | ||
4203 | break; | ||
4204 | case HDA_FIXUP_ACT_PROBE: | ||
4198 | spec->shutup = alc_shutup_dell_xps13; | 4205 | spec->shutup = alc_shutup_dell_xps13; |
4199 | 4206 | ||
4200 | /* Make the internal mic the default input source. */ | 4207 | /* Make the internal mic the default input source. */ |
@@ -4204,6 +4211,7 @@ static void alc_fixup_dell_xps13(struct hda_codec *codec, | |||
4204 | break; | 4211 | break; |
4205 | } | 4212 | } |
4206 | } | 4213 | } |
4214 | break; | ||
4207 | } | 4215 | } |
4208 | } | 4216 | } |
4209 | 4217 | ||
diff --git a/sound/pci/hda/thinkpad_helper.c b/sound/pci/hda/thinkpad_helper.c index 0a4ad5feb82e..d51703e30523 100644 --- a/sound/pci/hda/thinkpad_helper.c +++ b/sound/pci/hda/thinkpad_helper.c | |||
@@ -72,6 +72,7 @@ static void hda_fixup_thinkpad_acpi(struct hda_codec *codec, | |||
72 | if (led_set_func(TPACPI_LED_MUTE, false) >= 0) { | 72 | if (led_set_func(TPACPI_LED_MUTE, false) >= 0) { |
73 | old_vmaster_hook = spec->vmaster_mute.hook; | 73 | old_vmaster_hook = spec->vmaster_mute.hook; |
74 | spec->vmaster_mute.hook = update_tpacpi_mute_led; | 74 | spec->vmaster_mute.hook = update_tpacpi_mute_led; |
75 | spec->vmaster_mute_enum = 1; | ||
75 | removefunc = false; | 76 | removefunc = false; |
76 | } | 77 | } |
77 | if (led_set_func(TPACPI_LED_MICMUTE, false) >= 0) { | 78 | if (led_set_func(TPACPI_LED_MICMUTE, false) >= 0) { |
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c index 69528ae5410c..be4d741c45ba 100644 --- a/sound/soc/codecs/rt5645.c +++ b/sound/soc/codecs/rt5645.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/platform_device.h> | 18 | #include <linux/platform_device.h> |
19 | #include <linux/spi/spi.h> | 19 | #include <linux/spi/spi.h> |
20 | #include <linux/gpio.h> | 20 | #include <linux/gpio.h> |
21 | #include <linux/acpi.h> | ||
21 | #include <sound/core.h> | 22 | #include <sound/core.h> |
22 | #include <sound/pcm.h> | 23 | #include <sound/pcm.h> |
23 | #include <sound/pcm_params.h> | 24 | #include <sound/pcm_params.h> |
@@ -2656,6 +2657,15 @@ static const struct i2c_device_id rt5645_i2c_id[] = { | |||
2656 | }; | 2657 | }; |
2657 | MODULE_DEVICE_TABLE(i2c, rt5645_i2c_id); | 2658 | MODULE_DEVICE_TABLE(i2c, rt5645_i2c_id); |
2658 | 2659 | ||
2660 | #ifdef CONFIG_ACPI | ||
2661 | static struct acpi_device_id rt5645_acpi_match[] = { | ||
2662 | { "10EC5645", 0 }, | ||
2663 | { "10EC5650", 0 }, | ||
2664 | {}, | ||
2665 | }; | ||
2666 | MODULE_DEVICE_TABLE(acpi, rt5645_acpi_match); | ||
2667 | #endif | ||
2668 | |||
2659 | static int rt5645_i2c_probe(struct i2c_client *i2c, | 2669 | static int rt5645_i2c_probe(struct i2c_client *i2c, |
2660 | const struct i2c_device_id *id) | 2670 | const struct i2c_device_id *id) |
2661 | { | 2671 | { |
@@ -2770,7 +2780,7 @@ static int rt5645_i2c_probe(struct i2c_client *i2c, | |||
2770 | 2780 | ||
2771 | case RT5645_DMIC_DATA_GPIO12: | 2781 | case RT5645_DMIC_DATA_GPIO12: |
2772 | regmap_update_bits(rt5645->regmap, RT5645_DMIC_CTRL1, | 2782 | regmap_update_bits(rt5645->regmap, RT5645_DMIC_CTRL1, |
2773 | RT5645_DMIC_1_DP_MASK, RT5645_DMIC_2_DP_GPIO12); | 2783 | RT5645_DMIC_2_DP_MASK, RT5645_DMIC_2_DP_GPIO12); |
2774 | regmap_update_bits(rt5645->regmap, RT5645_GPIO_CTRL1, | 2784 | regmap_update_bits(rt5645->regmap, RT5645_GPIO_CTRL1, |
2775 | RT5645_GP12_PIN_MASK, | 2785 | RT5645_GP12_PIN_MASK, |
2776 | RT5645_GP12_PIN_DMIC2_SDA); | 2786 | RT5645_GP12_PIN_DMIC2_SDA); |
@@ -2872,6 +2882,7 @@ static struct i2c_driver rt5645_i2c_driver = { | |||
2872 | .driver = { | 2882 | .driver = { |
2873 | .name = "rt5645", | 2883 | .name = "rt5645", |
2874 | .owner = THIS_MODULE, | 2884 | .owner = THIS_MODULE, |
2885 | .acpi_match_table = ACPI_PTR(rt5645_acpi_match), | ||
2875 | }, | 2886 | }, |
2876 | .probe = rt5645_i2c_probe, | 2887 | .probe = rt5645_i2c_probe, |
2877 | .remove = rt5645_i2c_remove, | 2888 | .remove = rt5645_i2c_remove, |
diff --git a/sound/soc/codecs/rt5677.c b/sound/soc/codecs/rt5677.c index af182586712d..169aa471ffbd 100644 --- a/sound/soc/codecs/rt5677.c +++ b/sound/soc/codecs/rt5677.c | |||
@@ -62,6 +62,9 @@ static const struct reg_default init_list[] = { | |||
62 | {RT5677_PR_BASE + 0x1e, 0x0000}, | 62 | {RT5677_PR_BASE + 0x1e, 0x0000}, |
63 | {RT5677_PR_BASE + 0x12, 0x0eaa}, | 63 | {RT5677_PR_BASE + 0x12, 0x0eaa}, |
64 | {RT5677_PR_BASE + 0x14, 0x018a}, | 64 | {RT5677_PR_BASE + 0x14, 0x018a}, |
65 | {RT5677_PR_BASE + 0x15, 0x0490}, | ||
66 | {RT5677_PR_BASE + 0x38, 0x0f71}, | ||
67 | {RT5677_PR_BASE + 0x39, 0x0f71}, | ||
65 | }; | 68 | }; |
66 | #define RT5677_INIT_REG_LEN ARRAY_SIZE(init_list) | 69 | #define RT5677_INIT_REG_LEN ARRAY_SIZE(init_list) |
67 | 70 | ||
@@ -914,7 +917,7 @@ static int set_dmic_clk(struct snd_soc_dapm_widget *w, | |||
914 | { | 917 | { |
915 | struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm); | 918 | struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm); |
916 | struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec); | 919 | struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec); |
917 | int idx = rl6231_calc_dmic_clk(rt5677->sysclk); | 920 | int idx = rl6231_calc_dmic_clk(rt5677->lrck[RT5677_AIF1] << 8); |
918 | 921 | ||
919 | if (idx < 0) | 922 | if (idx < 0) |
920 | dev_err(codec->dev, "Failed to set DMIC clock\n"); | 923 | dev_err(codec->dev, "Failed to set DMIC clock\n"); |
diff --git a/sound/soc/codecs/tfa9879.c b/sound/soc/codecs/tfa9879.c index 16f1b71edb55..aab0af681e8c 100644 --- a/sound/soc/codecs/tfa9879.c +++ b/sound/soc/codecs/tfa9879.c | |||
@@ -280,8 +280,8 @@ static int tfa9879_i2c_probe(struct i2c_client *i2c, | |||
280 | int i; | 280 | int i; |
281 | 281 | ||
282 | tfa9879 = devm_kzalloc(&i2c->dev, sizeof(*tfa9879), GFP_KERNEL); | 282 | tfa9879 = devm_kzalloc(&i2c->dev, sizeof(*tfa9879), GFP_KERNEL); |
283 | if (IS_ERR(tfa9879)) | 283 | if (!tfa9879) |
284 | return PTR_ERR(tfa9879); | 284 | return -ENOMEM; |
285 | 285 | ||
286 | i2c_set_clientdata(i2c, tfa9879); | 286 | i2c_set_clientdata(i2c, tfa9879); |
287 | 287 | ||
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c index e8bb8eef1d16..0d48804218b1 100644 --- a/sound/soc/fsl/fsl_ssi.c +++ b/sound/soc/fsl/fsl_ssi.c | |||
@@ -1357,7 +1357,7 @@ static int fsl_ssi_probe(struct platform_device *pdev) | |||
1357 | } | 1357 | } |
1358 | 1358 | ||
1359 | ssi_private->irq = platform_get_irq(pdev, 0); | 1359 | ssi_private->irq = platform_get_irq(pdev, 0); |
1360 | if (!ssi_private->irq) { | 1360 | if (ssi_private->irq < 0) { |
1361 | dev_err(&pdev->dev, "no irq for node %s\n", pdev->name); | 1361 | dev_err(&pdev->dev, "no irq for node %s\n", pdev->name); |
1362 | return ssi_private->irq; | 1362 | return ssi_private->irq; |
1363 | } | 1363 | } |
diff --git a/sound/soc/intel/Makefile b/sound/soc/intel/Makefile index cd9aee9871a3..3853ec2ddbc7 100644 --- a/sound/soc/intel/Makefile +++ b/sound/soc/intel/Makefile | |||
@@ -4,7 +4,7 @@ obj-$(CONFIG_SND_SOC_INTEL_SST) += common/ | |||
4 | # Platform Support | 4 | # Platform Support |
5 | obj-$(CONFIG_SND_SOC_INTEL_HASWELL) += haswell/ | 5 | obj-$(CONFIG_SND_SOC_INTEL_HASWELL) += haswell/ |
6 | obj-$(CONFIG_SND_SOC_INTEL_BAYTRAIL) += baytrail/ | 6 | obj-$(CONFIG_SND_SOC_INTEL_BAYTRAIL) += baytrail/ |
7 | obj-$(CONFIG_SND_SOC_INTEL_BAYTRAIL) += atom/ | 7 | obj-$(CONFIG_SND_SST_MFLD_PLATFORM) += atom/ |
8 | 8 | ||
9 | # Machine support | 9 | # Machine support |
10 | obj-$(CONFIG_SND_SOC_INTEL_SST) += boards/ | 10 | obj-$(CONFIG_SND_SOC_INTEL_SST) += boards/ |
diff --git a/sound/soc/intel/baytrail/sst-baytrail-ipc.c b/sound/soc/intel/baytrail/sst-baytrail-ipc.c index 1efb33b36303..a839dbfa5218 100644 --- a/sound/soc/intel/baytrail/sst-baytrail-ipc.c +++ b/sound/soc/intel/baytrail/sst-baytrail-ipc.c | |||
@@ -759,7 +759,6 @@ fw_err: | |||
759 | dsp_new_err: | 759 | dsp_new_err: |
760 | sst_ipc_fini(ipc); | 760 | sst_ipc_fini(ipc); |
761 | ipc_init_err: | 761 | ipc_init_err: |
762 | kfree(byt); | ||
763 | 762 | ||
764 | return err; | 763 | return err; |
765 | } | 764 | } |
diff --git a/sound/soc/intel/haswell/sst-haswell-ipc.c b/sound/soc/intel/haswell/sst-haswell-ipc.c index 344a1e9bbce5..324eceb07b25 100644 --- a/sound/soc/intel/haswell/sst-haswell-ipc.c +++ b/sound/soc/intel/haswell/sst-haswell-ipc.c | |||
@@ -2201,7 +2201,6 @@ dma_err: | |||
2201 | dsp_new_err: | 2201 | dsp_new_err: |
2202 | sst_ipc_fini(ipc); | 2202 | sst_ipc_fini(ipc); |
2203 | ipc_init_err: | 2203 | ipc_init_err: |
2204 | kfree(hsw); | ||
2205 | return ret; | 2204 | return ret; |
2206 | } | 2205 | } |
2207 | EXPORT_SYMBOL_GPL(sst_hsw_dsp_init); | 2206 | EXPORT_SYMBOL_GPL(sst_hsw_dsp_init); |
diff --git a/sound/soc/qcom/lpass-cpu.c b/sound/soc/qcom/lpass-cpu.c index 6698d058de29..dc790abaa331 100644 --- a/sound/soc/qcom/lpass-cpu.c +++ b/sound/soc/qcom/lpass-cpu.c | |||
@@ -194,7 +194,7 @@ static int lpass_cpu_daiops_trigger(struct snd_pcm_substream *substream, | |||
194 | int cmd, struct snd_soc_dai *dai) | 194 | int cmd, struct snd_soc_dai *dai) |
195 | { | 195 | { |
196 | struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai); | 196 | struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai); |
197 | int ret; | 197 | int ret = -EINVAL; |
198 | 198 | ||
199 | switch (cmd) { | 199 | switch (cmd) { |
200 | case SNDRV_PCM_TRIGGER_START: | 200 | case SNDRV_PCM_TRIGGER_START: |
diff --git a/sound/soc/samsung/s3c24xx-i2s.c b/sound/soc/samsung/s3c24xx-i2s.c index 326d3c3804e3..5bf723689692 100644 --- a/sound/soc/samsung/s3c24xx-i2s.c +++ b/sound/soc/samsung/s3c24xx-i2s.c | |||
@@ -461,8 +461,8 @@ static int s3c24xx_iis_dev_probe(struct platform_device *pdev) | |||
461 | return -ENOENT; | 461 | return -ENOENT; |
462 | } | 462 | } |
463 | s3c24xx_i2s.regs = devm_ioremap_resource(&pdev->dev, res); | 463 | s3c24xx_i2s.regs = devm_ioremap_resource(&pdev->dev, res); |
464 | if (s3c24xx_i2s.regs == NULL) | 464 | if (IS_ERR(s3c24xx_i2s.regs)) |
465 | return -ENXIO; | 465 | return PTR_ERR(s3c24xx_i2s.regs); |
466 | 466 | ||
467 | s3c24xx_i2s_pcm_stereo_out.dma_addr = res->start + S3C2410_IISFIFO; | 467 | s3c24xx_i2s_pcm_stereo_out.dma_addr = res->start + S3C2410_IISFIFO; |
468 | s3c24xx_i2s_pcm_stereo_in.dma_addr = res->start + S3C2410_IISFIFO; | 468 | s3c24xx_i2s_pcm_stereo_in.dma_addr = res->start + S3C2410_IISFIFO; |
diff --git a/sound/soc/sh/rcar/dma.c b/sound/soc/sh/rcar/dma.c index ac3756f6af60..144308f15fb3 100644 --- a/sound/soc/sh/rcar/dma.c +++ b/sound/soc/sh/rcar/dma.c | |||
@@ -156,6 +156,7 @@ static int rsnd_dmaen_init(struct rsnd_priv *priv, struct rsnd_dma *dma, int id, | |||
156 | (void *)id); | 156 | (void *)id); |
157 | } | 157 | } |
158 | if (IS_ERR_OR_NULL(dmaen->chan)) { | 158 | if (IS_ERR_OR_NULL(dmaen->chan)) { |
159 | dmaen->chan = NULL; | ||
159 | dev_err(dev, "can't get dma channel\n"); | 160 | dev_err(dev, "can't get dma channel\n"); |
160 | goto rsnd_dma_channel_err; | 161 | goto rsnd_dma_channel_err; |
161 | } | 162 | } |
diff --git a/sound/synth/emux/emux_oss.c b/sound/synth/emux/emux_oss.c index ab37add269ae..82e350e9501c 100644 --- a/sound/synth/emux/emux_oss.c +++ b/sound/synth/emux/emux_oss.c | |||
@@ -118,12 +118,8 @@ snd_emux_open_seq_oss(struct snd_seq_oss_arg *arg, void *closure) | |||
118 | if (snd_BUG_ON(!arg || !emu)) | 118 | if (snd_BUG_ON(!arg || !emu)) |
119 | return -ENXIO; | 119 | return -ENXIO; |
120 | 120 | ||
121 | mutex_lock(&emu->register_mutex); | 121 | if (!snd_emux_inc_count(emu)) |
122 | |||
123 | if (!snd_emux_inc_count(emu)) { | ||
124 | mutex_unlock(&emu->register_mutex); | ||
125 | return -EFAULT; | 122 | return -EFAULT; |
126 | } | ||
127 | 123 | ||
128 | memset(&callback, 0, sizeof(callback)); | 124 | memset(&callback, 0, sizeof(callback)); |
129 | callback.owner = THIS_MODULE; | 125 | callback.owner = THIS_MODULE; |
@@ -135,7 +131,6 @@ snd_emux_open_seq_oss(struct snd_seq_oss_arg *arg, void *closure) | |||
135 | if (p == NULL) { | 131 | if (p == NULL) { |
136 | snd_printk(KERN_ERR "can't create port\n"); | 132 | snd_printk(KERN_ERR "can't create port\n"); |
137 | snd_emux_dec_count(emu); | 133 | snd_emux_dec_count(emu); |
138 | mutex_unlock(&emu->register_mutex); | ||
139 | return -ENOMEM; | 134 | return -ENOMEM; |
140 | } | 135 | } |
141 | 136 | ||
@@ -148,8 +143,6 @@ snd_emux_open_seq_oss(struct snd_seq_oss_arg *arg, void *closure) | |||
148 | reset_port_mode(p, arg->seq_mode); | 143 | reset_port_mode(p, arg->seq_mode); |
149 | 144 | ||
150 | snd_emux_reset_port(p); | 145 | snd_emux_reset_port(p); |
151 | |||
152 | mutex_unlock(&emu->register_mutex); | ||
153 | return 0; | 146 | return 0; |
154 | } | 147 | } |
155 | 148 | ||
@@ -195,13 +188,11 @@ snd_emux_close_seq_oss(struct snd_seq_oss_arg *arg) | |||
195 | if (snd_BUG_ON(!emu)) | 188 | if (snd_BUG_ON(!emu)) |
196 | return -ENXIO; | 189 | return -ENXIO; |
197 | 190 | ||
198 | mutex_lock(&emu->register_mutex); | ||
199 | snd_emux_sounds_off_all(p); | 191 | snd_emux_sounds_off_all(p); |
200 | snd_soundfont_close_check(emu->sflist, SF_CLIENT_NO(p->chset.port)); | 192 | snd_soundfont_close_check(emu->sflist, SF_CLIENT_NO(p->chset.port)); |
201 | snd_seq_event_port_detach(p->chset.client, p->chset.port); | 193 | snd_seq_event_port_detach(p->chset.client, p->chset.port); |
202 | snd_emux_dec_count(emu); | 194 | snd_emux_dec_count(emu); |
203 | 195 | ||
204 | mutex_unlock(&emu->register_mutex); | ||
205 | return 0; | 196 | return 0; |
206 | } | 197 | } |
207 | 198 | ||
diff --git a/sound/synth/emux/emux_seq.c b/sound/synth/emux/emux_seq.c index 7778b8e19782..a0209204ae48 100644 --- a/sound/synth/emux/emux_seq.c +++ b/sound/synth/emux/emux_seq.c | |||
@@ -124,12 +124,10 @@ snd_emux_detach_seq(struct snd_emux *emu) | |||
124 | if (emu->voices) | 124 | if (emu->voices) |
125 | snd_emux_terminate_all(emu); | 125 | snd_emux_terminate_all(emu); |
126 | 126 | ||
127 | mutex_lock(&emu->register_mutex); | ||
128 | if (emu->client >= 0) { | 127 | if (emu->client >= 0) { |
129 | snd_seq_delete_kernel_client(emu->client); | 128 | snd_seq_delete_kernel_client(emu->client); |
130 | emu->client = -1; | 129 | emu->client = -1; |
131 | } | 130 | } |
132 | mutex_unlock(&emu->register_mutex); | ||
133 | } | 131 | } |
134 | 132 | ||
135 | 133 | ||
@@ -269,8 +267,8 @@ snd_emux_event_input(struct snd_seq_event *ev, int direct, void *private_data, | |||
269 | /* | 267 | /* |
270 | * increment usage count | 268 | * increment usage count |
271 | */ | 269 | */ |
272 | int | 270 | static int |
273 | snd_emux_inc_count(struct snd_emux *emu) | 271 | __snd_emux_inc_count(struct snd_emux *emu) |
274 | { | 272 | { |
275 | emu->used++; | 273 | emu->used++; |
276 | if (!try_module_get(emu->ops.owner)) | 274 | if (!try_module_get(emu->ops.owner)) |
@@ -284,12 +282,21 @@ snd_emux_inc_count(struct snd_emux *emu) | |||
284 | return 1; | 282 | return 1; |
285 | } | 283 | } |
286 | 284 | ||
285 | int snd_emux_inc_count(struct snd_emux *emu) | ||
286 | { | ||
287 | int ret; | ||
288 | |||
289 | mutex_lock(&emu->register_mutex); | ||
290 | ret = __snd_emux_inc_count(emu); | ||
291 | mutex_unlock(&emu->register_mutex); | ||
292 | return ret; | ||
293 | } | ||
287 | 294 | ||
288 | /* | 295 | /* |
289 | * decrease usage count | 296 | * decrease usage count |
290 | */ | 297 | */ |
291 | void | 298 | static void |
292 | snd_emux_dec_count(struct snd_emux *emu) | 299 | __snd_emux_dec_count(struct snd_emux *emu) |
293 | { | 300 | { |
294 | module_put(emu->card->module); | 301 | module_put(emu->card->module); |
295 | emu->used--; | 302 | emu->used--; |
@@ -298,6 +305,12 @@ snd_emux_dec_count(struct snd_emux *emu) | |||
298 | module_put(emu->ops.owner); | 305 | module_put(emu->ops.owner); |
299 | } | 306 | } |
300 | 307 | ||
308 | void snd_emux_dec_count(struct snd_emux *emu) | ||
309 | { | ||
310 | mutex_lock(&emu->register_mutex); | ||
311 | __snd_emux_dec_count(emu); | ||
312 | mutex_unlock(&emu->register_mutex); | ||
313 | } | ||
301 | 314 | ||
302 | /* | 315 | /* |
303 | * Routine that is called upon a first use of a particular port | 316 | * Routine that is called upon a first use of a particular port |
@@ -317,7 +330,7 @@ snd_emux_use(void *private_data, struct snd_seq_port_subscribe *info) | |||
317 | 330 | ||
318 | mutex_lock(&emu->register_mutex); | 331 | mutex_lock(&emu->register_mutex); |
319 | snd_emux_init_port(p); | 332 | snd_emux_init_port(p); |
320 | snd_emux_inc_count(emu); | 333 | __snd_emux_inc_count(emu); |
321 | mutex_unlock(&emu->register_mutex); | 334 | mutex_unlock(&emu->register_mutex); |
322 | return 0; | 335 | return 0; |
323 | } | 336 | } |
@@ -340,7 +353,7 @@ snd_emux_unuse(void *private_data, struct snd_seq_port_subscribe *info) | |||
340 | 353 | ||
341 | mutex_lock(&emu->register_mutex); | 354 | mutex_lock(&emu->register_mutex); |
342 | snd_emux_sounds_off_all(p); | 355 | snd_emux_sounds_off_all(p); |
343 | snd_emux_dec_count(emu); | 356 | __snd_emux_dec_count(emu); |
344 | mutex_unlock(&emu->register_mutex); | 357 | mutex_unlock(&emu->register_mutex); |
345 | return 0; | 358 | return 0; |
346 | } | 359 | } |
diff --git a/tools/lib/api/Makefile b/tools/lib/api/Makefile index d8fe29fc19a4..8bd960658463 100644 --- a/tools/lib/api/Makefile +++ b/tools/lib/api/Makefile | |||
@@ -16,7 +16,7 @@ MAKEFLAGS += --no-print-directory | |||
16 | LIBFILE = $(OUTPUT)libapi.a | 16 | LIBFILE = $(OUTPUT)libapi.a |
17 | 17 | ||
18 | CFLAGS := $(EXTRA_WARNINGS) $(EXTRA_CFLAGS) | 18 | CFLAGS := $(EXTRA_WARNINGS) $(EXTRA_CFLAGS) |
19 | CFLAGS += -ggdb3 -Wall -Wextra -std=gnu99 -Werror -O6 -D_FORTIFY_SOURCE=2 -fPIC | 19 | CFLAGS += -ggdb3 -Wall -Wextra -std=gnu99 -Werror -O6 -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fPIC |
20 | CFLAGS += -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 | 20 | CFLAGS += -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 |
21 | 21 | ||
22 | RM = rm -f | 22 | RM = rm -f |
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c index e0917c0f5d9f..29f94f6f0d9e 100644 --- a/tools/lib/traceevent/event-parse.c +++ b/tools/lib/traceevent/event-parse.c | |||
@@ -3865,7 +3865,7 @@ static void print_str_arg(struct trace_seq *s, void *data, int size, | |||
3865 | } else if (el_size == 4) { | 3865 | } else if (el_size == 4) { |
3866 | trace_seq_printf(s, "%u", *(uint32_t *)num); | 3866 | trace_seq_printf(s, "%u", *(uint32_t *)num); |
3867 | } else if (el_size == 8) { | 3867 | } else if (el_size == 8) { |
3868 | trace_seq_printf(s, "%lu", *(uint64_t *)num); | 3868 | trace_seq_printf(s, "%"PRIu64, *(uint64_t *)num); |
3869 | } else { | 3869 | } else { |
3870 | trace_seq_printf(s, "BAD SIZE:%d 0x%x", | 3870 | trace_seq_printf(s, "BAD SIZE:%d 0x%x", |
3871 | el_size, *(uint8_t *)num); | 3871 | el_size, *(uint8_t *)num); |
diff --git a/tools/perf/bench/futex-requeue.c b/tools/perf/bench/futex-requeue.c index bedff6b5b3cf..ad0d9b5342fb 100644 --- a/tools/perf/bench/futex-requeue.c +++ b/tools/perf/bench/futex-requeue.c | |||
@@ -132,6 +132,9 @@ int bench_futex_requeue(int argc, const char **argv, | |||
132 | if (!fshared) | 132 | if (!fshared) |
133 | futex_flag = FUTEX_PRIVATE_FLAG; | 133 | futex_flag = FUTEX_PRIVATE_FLAG; |
134 | 134 | ||
135 | if (nrequeue > nthreads) | ||
136 | nrequeue = nthreads; | ||
137 | |||
135 | printf("Run summary [PID %d]: Requeuing %d threads (from [%s] %p to %p), " | 138 | printf("Run summary [PID %d]: Requeuing %d threads (from [%s] %p to %p), " |
136 | "%d at a time.\n\n", getpid(), nthreads, | 139 | "%d at a time.\n\n", getpid(), nthreads, |
137 | fshared ? "shared":"private", &futex1, &futex2, nrequeue); | 140 | fshared ? "shared":"private", &futex1, &futex2, nrequeue); |
@@ -161,20 +164,18 @@ int bench_futex_requeue(int argc, const char **argv, | |||
161 | 164 | ||
162 | /* Ok, all threads are patiently blocked, start requeueing */ | 165 | /* Ok, all threads are patiently blocked, start requeueing */ |
163 | gettimeofday(&start, NULL); | 166 | gettimeofday(&start, NULL); |
164 | for (nrequeued = 0; nrequeued < nthreads; nrequeued += nrequeue) { | 167 | while (nrequeued < nthreads) { |
165 | /* | 168 | /* |
166 | * Do not wakeup any tasks blocked on futex1, allowing | 169 | * Do not wakeup any tasks blocked on futex1, allowing |
167 | * us to really measure futex_wait functionality. | 170 | * us to really measure futex_wait functionality. |
168 | */ | 171 | */ |
169 | futex_cmp_requeue(&futex1, 0, &futex2, 0, | 172 | nrequeued += futex_cmp_requeue(&futex1, 0, &futex2, 0, |
170 | nrequeue, futex_flag); | 173 | nrequeue, futex_flag); |
171 | } | 174 | } |
175 | |||
172 | gettimeofday(&end, NULL); | 176 | gettimeofday(&end, NULL); |
173 | timersub(&end, &start, &runtime); | 177 | timersub(&end, &start, &runtime); |
174 | 178 | ||
175 | if (nrequeued > nthreads) | ||
176 | nrequeued = nthreads; | ||
177 | |||
178 | update_stats(&requeued_stats, nrequeued); | 179 | update_stats(&requeued_stats, nrequeued); |
179 | update_stats(&requeuetime_stats, runtime.tv_usec); | 180 | update_stats(&requeuetime_stats, runtime.tv_usec); |
180 | 181 | ||
@@ -184,7 +185,7 @@ int bench_futex_requeue(int argc, const char **argv, | |||
184 | } | 185 | } |
185 | 186 | ||
186 | /* everybody should be blocked on futex2, wake'em up */ | 187 | /* everybody should be blocked on futex2, wake'em up */ |
187 | nrequeued = futex_wake(&futex2, nthreads, futex_flag); | 188 | nrequeued = futex_wake(&futex2, nrequeued, futex_flag); |
188 | if (nthreads != nrequeued) | 189 | if (nthreads != nrequeued) |
189 | warnx("couldn't wakeup all tasks (%d/%d)", nrequeued, nthreads); | 190 | warnx("couldn't wakeup all tasks (%d/%d)", nrequeued, nthreads); |
190 | 191 | ||
diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c index ebfa163b80b5..ba5efa4710b5 100644 --- a/tools/perf/bench/numa.c +++ b/tools/perf/bench/numa.c | |||
@@ -180,7 +180,7 @@ static const struct option options[] = { | |||
180 | OPT_INTEGER('H', "thp" , &p0.thp, "MADV_NOHUGEPAGE < 0 < MADV_HUGEPAGE"), | 180 | OPT_INTEGER('H', "thp" , &p0.thp, "MADV_NOHUGEPAGE < 0 < MADV_HUGEPAGE"), |
181 | OPT_BOOLEAN('c', "show_convergence", &p0.show_convergence, "show convergence details"), | 181 | OPT_BOOLEAN('c', "show_convergence", &p0.show_convergence, "show convergence details"), |
182 | OPT_BOOLEAN('m', "measure_convergence", &p0.measure_convergence, "measure convergence latency"), | 182 | OPT_BOOLEAN('m', "measure_convergence", &p0.measure_convergence, "measure convergence latency"), |
183 | OPT_BOOLEAN('q', "quiet" , &p0.show_quiet, "bzero the initial allocations"), | 183 | OPT_BOOLEAN('q', "quiet" , &p0.show_quiet, "quiet mode"), |
184 | OPT_BOOLEAN('S', "serialize-startup", &p0.serialize_startup,"serialize thread startup"), | 184 | OPT_BOOLEAN('S', "serialize-startup", &p0.serialize_startup,"serialize thread startup"), |
185 | 185 | ||
186 | /* Special option string parsing callbacks: */ | 186 | /* Special option string parsing callbacks: */ |
@@ -828,6 +828,9 @@ static int count_process_nodes(int process_nr) | |||
828 | td = g->threads + task_nr; | 828 | td = g->threads + task_nr; |
829 | 829 | ||
830 | node = numa_node_of_cpu(td->curr_cpu); | 830 | node = numa_node_of_cpu(td->curr_cpu); |
831 | if (node < 0) /* curr_cpu was likely still -1 */ | ||
832 | return 0; | ||
833 | |||
831 | node_present[node] = 1; | 834 | node_present[node] = 1; |
832 | } | 835 | } |
833 | 836 | ||
@@ -882,6 +885,11 @@ static void calc_convergence_compression(int *strong) | |||
882 | for (p = 0; p < g->p.nr_proc; p++) { | 885 | for (p = 0; p < g->p.nr_proc; p++) { |
883 | unsigned int nodes = count_process_nodes(p); | 886 | unsigned int nodes = count_process_nodes(p); |
884 | 887 | ||
888 | if (!nodes) { | ||
889 | *strong = 0; | ||
890 | return; | ||
891 | } | ||
892 | |||
885 | nodes_min = min(nodes, nodes_min); | 893 | nodes_min = min(nodes, nodes_min); |
886 | nodes_max = max(nodes, nodes_max); | 894 | nodes_max = max(nodes, nodes_max); |
887 | } | 895 | } |
@@ -1395,7 +1403,7 @@ static void print_res(const char *name, double val, | |||
1395 | if (!name) | 1403 | if (!name) |
1396 | name = "main,"; | 1404 | name = "main,"; |
1397 | 1405 | ||
1398 | if (g->p.show_quiet) | 1406 | if (!g->p.show_quiet) |
1399 | printf(" %-30s %15.3f, %-15s %s\n", name, val, txt_unit, txt_short); | 1407 | printf(" %-30s %15.3f, %-15s %s\n", name, val, txt_unit, txt_short); |
1400 | else | 1408 | else |
1401 | printf(" %14.3f %s\n", val, txt_long); | 1409 | printf(" %14.3f %s\n", val, txt_long); |
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c index 63ea01349b6e..1634186d537c 100644 --- a/tools/perf/builtin-kmem.c +++ b/tools/perf/builtin-kmem.c | |||
@@ -319,7 +319,7 @@ static int page_stat_cmp(struct page_stat *a, struct page_stat *b) | |||
319 | return 0; | 319 | return 0; |
320 | } | 320 | } |
321 | 321 | ||
322 | static struct page_stat *search_page_alloc_stat(struct page_stat *stat, bool create) | 322 | static struct page_stat *search_page_alloc_stat(struct page_stat *pstat, bool create) |
323 | { | 323 | { |
324 | struct rb_node **node = &page_alloc_tree.rb_node; | 324 | struct rb_node **node = &page_alloc_tree.rb_node; |
325 | struct rb_node *parent = NULL; | 325 | struct rb_node *parent = NULL; |
@@ -331,7 +331,7 @@ static struct page_stat *search_page_alloc_stat(struct page_stat *stat, bool cre | |||
331 | parent = *node; | 331 | parent = *node; |
332 | data = rb_entry(*node, struct page_stat, node); | 332 | data = rb_entry(*node, struct page_stat, node); |
333 | 333 | ||
334 | cmp = page_stat_cmp(data, stat); | 334 | cmp = page_stat_cmp(data, pstat); |
335 | if (cmp < 0) | 335 | if (cmp < 0) |
336 | node = &parent->rb_left; | 336 | node = &parent->rb_left; |
337 | else if (cmp > 0) | 337 | else if (cmp > 0) |
@@ -345,10 +345,10 @@ static struct page_stat *search_page_alloc_stat(struct page_stat *stat, bool cre | |||
345 | 345 | ||
346 | data = zalloc(sizeof(*data)); | 346 | data = zalloc(sizeof(*data)); |
347 | if (data != NULL) { | 347 | if (data != NULL) { |
348 | data->page = stat->page; | 348 | data->page = pstat->page; |
349 | data->order = stat->order; | 349 | data->order = pstat->order; |
350 | data->gfp_flags = stat->gfp_flags; | 350 | data->gfp_flags = pstat->gfp_flags; |
351 | data->migrate_type = stat->migrate_type; | 351 | data->migrate_type = pstat->migrate_type; |
352 | 352 | ||
353 | rb_link_node(&data->node, parent, node); | 353 | rb_link_node(&data->node, parent, node); |
354 | rb_insert_color(&data->node, &page_alloc_tree); | 354 | rb_insert_color(&data->node, &page_alloc_tree); |
@@ -375,7 +375,7 @@ static int perf_evsel__process_page_alloc_event(struct perf_evsel *evsel, | |||
375 | unsigned int migrate_type = perf_evsel__intval(evsel, sample, | 375 | unsigned int migrate_type = perf_evsel__intval(evsel, sample, |
376 | "migratetype"); | 376 | "migratetype"); |
377 | u64 bytes = kmem_page_size << order; | 377 | u64 bytes = kmem_page_size << order; |
378 | struct page_stat *stat; | 378 | struct page_stat *pstat; |
379 | struct page_stat this = { | 379 | struct page_stat this = { |
380 | .order = order, | 380 | .order = order, |
381 | .gfp_flags = gfp_flags, | 381 | .gfp_flags = gfp_flags, |
@@ -401,21 +401,21 @@ static int perf_evsel__process_page_alloc_event(struct perf_evsel *evsel, | |||
401 | * This is to find the current page (with correct gfp flags and | 401 | * This is to find the current page (with correct gfp flags and |
402 | * migrate type) at free event. | 402 | * migrate type) at free event. |
403 | */ | 403 | */ |
404 | stat = search_page(page, true); | 404 | pstat = search_page(page, true); |
405 | if (stat == NULL) | 405 | if (pstat == NULL) |
406 | return -ENOMEM; | 406 | return -ENOMEM; |
407 | 407 | ||
408 | stat->order = order; | 408 | pstat->order = order; |
409 | stat->gfp_flags = gfp_flags; | 409 | pstat->gfp_flags = gfp_flags; |
410 | stat->migrate_type = migrate_type; | 410 | pstat->migrate_type = migrate_type; |
411 | 411 | ||
412 | this.page = page; | 412 | this.page = page; |
413 | stat = search_page_alloc_stat(&this, true); | 413 | pstat = search_page_alloc_stat(&this, true); |
414 | if (stat == NULL) | 414 | if (pstat == NULL) |
415 | return -ENOMEM; | 415 | return -ENOMEM; |
416 | 416 | ||
417 | stat->nr_alloc++; | 417 | pstat->nr_alloc++; |
418 | stat->alloc_bytes += bytes; | 418 | pstat->alloc_bytes += bytes; |
419 | 419 | ||
420 | order_stats[order][migrate_type]++; | 420 | order_stats[order][migrate_type]++; |
421 | 421 | ||
@@ -428,7 +428,7 @@ static int perf_evsel__process_page_free_event(struct perf_evsel *evsel, | |||
428 | u64 page; | 428 | u64 page; |
429 | unsigned int order = perf_evsel__intval(evsel, sample, "order"); | 429 | unsigned int order = perf_evsel__intval(evsel, sample, "order"); |
430 | u64 bytes = kmem_page_size << order; | 430 | u64 bytes = kmem_page_size << order; |
431 | struct page_stat *stat; | 431 | struct page_stat *pstat; |
432 | struct page_stat this = { | 432 | struct page_stat this = { |
433 | .order = order, | 433 | .order = order, |
434 | }; | 434 | }; |
@@ -441,8 +441,8 @@ static int perf_evsel__process_page_free_event(struct perf_evsel *evsel, | |||
441 | nr_page_frees++; | 441 | nr_page_frees++; |
442 | total_page_free_bytes += bytes; | 442 | total_page_free_bytes += bytes; |
443 | 443 | ||
444 | stat = search_page(page, false); | 444 | pstat = search_page(page, false); |
445 | if (stat == NULL) { | 445 | if (pstat == NULL) { |
446 | pr_debug2("missing free at page %"PRIx64" (order: %d)\n", | 446 | pr_debug2("missing free at page %"PRIx64" (order: %d)\n", |
447 | page, order); | 447 | page, order); |
448 | 448 | ||
@@ -453,18 +453,18 @@ static int perf_evsel__process_page_free_event(struct perf_evsel *evsel, | |||
453 | } | 453 | } |
454 | 454 | ||
455 | this.page = page; | 455 | this.page = page; |
456 | this.gfp_flags = stat->gfp_flags; | 456 | this.gfp_flags = pstat->gfp_flags; |
457 | this.migrate_type = stat->migrate_type; | 457 | this.migrate_type = pstat->migrate_type; |
458 | 458 | ||
459 | rb_erase(&stat->node, &page_tree); | 459 | rb_erase(&pstat->node, &page_tree); |
460 | free(stat); | 460 | free(pstat); |
461 | 461 | ||
462 | stat = search_page_alloc_stat(&this, false); | 462 | pstat = search_page_alloc_stat(&this, false); |
463 | if (stat == NULL) | 463 | if (pstat == NULL) |
464 | return -ENOENT; | 464 | return -ENOENT; |
465 | 465 | ||
466 | stat->nr_free++; | 466 | pstat->nr_free++; |
467 | stat->free_bytes += bytes; | 467 | pstat->free_bytes += bytes; |
468 | 468 | ||
469 | return 0; | 469 | return 0; |
470 | } | 470 | } |
@@ -640,9 +640,9 @@ static void print_page_summary(void) | |||
640 | nr_page_frees, total_page_free_bytes / 1024); | 640 | nr_page_frees, total_page_free_bytes / 1024); |
641 | printf("\n"); | 641 | printf("\n"); |
642 | 642 | ||
643 | printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total alloc+freed requests", | 643 | printf("%-30s: %'16"PRIu64" [ %'16"PRIu64" KB ]\n", "Total alloc+freed requests", |
644 | nr_alloc_freed, (total_alloc_freed_bytes) / 1024); | 644 | nr_alloc_freed, (total_alloc_freed_bytes) / 1024); |
645 | printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total alloc-only requests", | 645 | printf("%-30s: %'16"PRIu64" [ %'16"PRIu64" KB ]\n", "Total alloc-only requests", |
646 | nr_page_allocs - nr_alloc_freed, | 646 | nr_page_allocs - nr_alloc_freed, |
647 | (total_page_alloc_bytes - total_alloc_freed_bytes) / 1024); | 647 | (total_page_alloc_bytes - total_alloc_freed_bytes) / 1024); |
648 | printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total free-only requests", | 648 | printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total free-only requests", |
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 476cdf7afcca..b63aeda719be 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c | |||
@@ -329,7 +329,7 @@ static int perf_evlist__tty_browse_hists(struct perf_evlist *evlist, | |||
329 | fprintf(stdout, "\n\n"); | 329 | fprintf(stdout, "\n\n"); |
330 | } | 330 | } |
331 | 331 | ||
332 | if (sort_order == default_sort_order && | 332 | if (sort_order == NULL && |
333 | parent_pattern == default_parent_pattern) { | 333 | parent_pattern == default_parent_pattern) { |
334 | fprintf(stdout, "#\n# (%s)\n#\n", help); | 334 | fprintf(stdout, "#\n# (%s)\n#\n", help); |
335 | 335 | ||
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 1cb3436276d1..6a4d5d41c671 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c | |||
@@ -733,7 +733,7 @@ static void perf_event__process_sample(struct perf_tool *tool, | |||
733 | "Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n" | 733 | "Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n" |
734 | "Check /proc/sys/kernel/kptr_restrict.\n\n" | 734 | "Check /proc/sys/kernel/kptr_restrict.\n\n" |
735 | "Kernel%s samples will not be resolved.\n", | 735 | "Kernel%s samples will not be resolved.\n", |
736 | !RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION]) ? | 736 | al.map && !RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION]) ? |
737 | " modules" : ""); | 737 | " modules" : ""); |
738 | if (use_browser <= 0) | 738 | if (use_browser <= 0) |
739 | sleep(5); | 739 | sleep(5); |
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index e124741be187..e122970361f2 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c | |||
@@ -2241,10 +2241,11 @@ static int trace__run(struct trace *trace, int argc, const char **argv) | |||
2241 | if (err < 0) | 2241 | if (err < 0) |
2242 | goto out_error_mmap; | 2242 | goto out_error_mmap; |
2243 | 2243 | ||
2244 | if (!target__none(&trace->opts.target)) | ||
2245 | perf_evlist__enable(evlist); | ||
2246 | |||
2244 | if (forks) | 2247 | if (forks) |
2245 | perf_evlist__start_workload(evlist); | 2248 | perf_evlist__start_workload(evlist); |
2246 | else | ||
2247 | perf_evlist__enable(evlist); | ||
2248 | 2249 | ||
2249 | trace->multiple_threads = evlist->threads->map[0] == -1 || | 2250 | trace->multiple_threads = evlist->threads->map[0] == -1 || |
2250 | evlist->threads->nr > 1 || | 2251 | evlist->threads->nr > 1 || |
@@ -2272,6 +2273,11 @@ next_event: | |||
2272 | 2273 | ||
2273 | if (interrupted) | 2274 | if (interrupted) |
2274 | goto out_disable; | 2275 | goto out_disable; |
2276 | |||
2277 | if (done && !draining) { | ||
2278 | perf_evlist__disable(evlist); | ||
2279 | draining = true; | ||
2280 | } | ||
2275 | } | 2281 | } |
2276 | } | 2282 | } |
2277 | 2283 | ||
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index d8bb616ff57c..d05b77cf35f7 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c | |||
@@ -1084,6 +1084,8 @@ static int parse_perf_probe_point(char *arg, struct perf_probe_event *pev) | |||
1084 | * | 1084 | * |
1085 | * TODO:Group name support | 1085 | * TODO:Group name support |
1086 | */ | 1086 | */ |
1087 | if (!arg) | ||
1088 | return -EINVAL; | ||
1087 | 1089 | ||
1088 | ptr = strpbrk(arg, ";=@+%"); | 1090 | ptr = strpbrk(arg, ";=@+%"); |
1089 | if (ptr && *ptr == '=') { /* Event name */ | 1091 | if (ptr && *ptr == '=') { /* Event name */ |
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index b5bf9d5efeaf..2a76e14db732 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c | |||
@@ -578,10 +578,12 @@ static int find_variable(Dwarf_Die *sc_die, struct probe_finder *pf) | |||
578 | /* Search child die for local variables and parameters. */ | 578 | /* Search child die for local variables and parameters. */ |
579 | if (!die_find_variable_at(sc_die, pf->pvar->var, pf->addr, &vr_die)) { | 579 | if (!die_find_variable_at(sc_die, pf->pvar->var, pf->addr, &vr_die)) { |
580 | /* Search again in global variables */ | 580 | /* Search again in global variables */ |
581 | if (!die_find_variable_at(&pf->cu_die, pf->pvar->var, 0, &vr_die)) | 581 | if (!die_find_variable_at(&pf->cu_die, pf->pvar->var, |
582 | 0, &vr_die)) { | ||
582 | pr_warning("Failed to find '%s' in this function.\n", | 583 | pr_warning("Failed to find '%s' in this function.\n", |
583 | pf->pvar->var); | 584 | pf->pvar->var); |
584 | ret = -ENOENT; | 585 | ret = -ENOENT; |
586 | } | ||
585 | } | 587 | } |
586 | if (ret >= 0) | 588 | if (ret >= 0) |
587 | ret = convert_variable(&vr_die, pf); | 589 | ret = convert_variable(&vr_die, pf); |
diff --git a/tools/testing/selftests/powerpc/pmu/Makefile b/tools/testing/selftests/powerpc/pmu/Makefile index 5a161175bbd4..a9099d9f8f39 100644 --- a/tools/testing/selftests/powerpc/pmu/Makefile +++ b/tools/testing/selftests/powerpc/pmu/Makefile | |||
@@ -26,7 +26,7 @@ override define EMIT_TESTS | |||
26 | $(MAKE) -s -C ebb emit_tests | 26 | $(MAKE) -s -C ebb emit_tests |
27 | endef | 27 | endef |
28 | 28 | ||
29 | DEFAULT_INSTALL := $(INSTALL_RULE) | 29 | DEFAULT_INSTALL_RULE := $(INSTALL_RULE) |
30 | override define INSTALL_RULE | 30 | override define INSTALL_RULE |
31 | $(DEFAULT_INSTALL_RULE) | 31 | $(DEFAULT_INSTALL_RULE) |
32 | $(MAKE) -C ebb install | 32 | $(MAKE) -C ebb install |
diff --git a/tools/testing/selftests/powerpc/tm/Makefile b/tools/testing/selftests/powerpc/tm/Makefile index 1b616fa79e93..6bff955e1d55 100644 --- a/tools/testing/selftests/powerpc/tm/Makefile +++ b/tools/testing/selftests/powerpc/tm/Makefile | |||
@@ -1,4 +1,4 @@ | |||
1 | TEST_PROGS := tm-resched-dscr tm-syscall | 1 | TEST_PROGS := tm-resched-dscr |
2 | 2 | ||
3 | all: $(TEST_PROGS) | 3 | all: $(TEST_PROGS) |
4 | 4 | ||