diff options
311 files changed, 3192 insertions, 1608 deletions
@@ -1510,6 +1510,14 @@ D: Natsemi ethernet | |||
1510 | D: Cobalt Networks (x86) support | 1510 | D: Cobalt Networks (x86) support |
1511 | D: This-and-That | 1511 | D: This-and-That |
1512 | 1512 | ||
1513 | N: Mark M. Hoffman | ||
1514 | E: mhoffman@lightlink.com | ||
1515 | D: asb100, lm93 and smsc47b397 hardware monitoring drivers | ||
1516 | D: hwmon subsystem core | ||
1517 | D: hwmon subsystem maintainer | ||
1518 | D: i2c-sis96x and i2c-stub SMBus drivers | ||
1519 | S: USA | ||
1520 | |||
1513 | N: Dirk Hohndel | 1521 | N: Dirk Hohndel |
1514 | E: hohndel@suse.de | 1522 | E: hohndel@suse.de |
1515 | D: The XFree86[tm] Project | 1523 | D: The XFree86[tm] Project |
diff --git a/Documentation/hwmon/lm75 b/Documentation/hwmon/lm75 index c91a1d15fa28..69af1c7db6b7 100644 --- a/Documentation/hwmon/lm75 +++ b/Documentation/hwmon/lm75 | |||
@@ -23,7 +23,7 @@ Supported chips: | |||
23 | Datasheet: Publicly available at the Maxim website | 23 | Datasheet: Publicly available at the Maxim website |
24 | http://www.maxim-ic.com/ | 24 | http://www.maxim-ic.com/ |
25 | * Microchip (TelCom) TCN75 | 25 | * Microchip (TelCom) TCN75 |
26 | Prefix: 'lm75' | 26 | Prefix: 'tcn75' |
27 | Addresses scanned: none | 27 | Addresses scanned: none |
28 | Datasheet: Publicly available at the Microchip website | 28 | Datasheet: Publicly available at the Microchip website |
29 | http://www.microchip.com/ | 29 | http://www.microchip.com/ |
diff --git a/Documentation/i2c/busses/i2c-diolan-u2c b/Documentation/i2c/busses/i2c-diolan-u2c index 30fe4bb9a069..0d6018c316c7 100644 --- a/Documentation/i2c/busses/i2c-diolan-u2c +++ b/Documentation/i2c/busses/i2c-diolan-u2c | |||
@@ -5,7 +5,7 @@ Supported adapters: | |||
5 | Documentation: | 5 | Documentation: |
6 | http://www.diolan.com/i2c/u2c12.html | 6 | http://www.diolan.com/i2c/u2c12.html |
7 | 7 | ||
8 | Author: Guenter Roeck <guenter.roeck@ericsson.com> | 8 | Author: Guenter Roeck <linux@roeck-us.net> |
9 | 9 | ||
10 | Description | 10 | Description |
11 | ----------- | 11 | ----------- |
diff --git a/Documentation/sound/alsa/ALSA-Configuration.txt b/Documentation/sound/alsa/ALSA-Configuration.txt index ce6581c8ca26..4499bd948860 100644 --- a/Documentation/sound/alsa/ALSA-Configuration.txt +++ b/Documentation/sound/alsa/ALSA-Configuration.txt | |||
@@ -912,7 +912,7 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed. | |||
912 | models depending on the codec chip. The list of available models | 912 | models depending on the codec chip. The list of available models |
913 | is found in HD-Audio-Models.txt | 913 | is found in HD-Audio-Models.txt |
914 | 914 | ||
915 | The model name "genric" is treated as a special case. When this | 915 | The model name "generic" is treated as a special case. When this |
916 | model is given, the driver uses the generic codec parser without | 916 | model is given, the driver uses the generic codec parser without |
917 | "codec-patch". It's sometimes good for testing and debugging. | 917 | "codec-patch". It's sometimes good for testing and debugging. |
918 | 918 | ||
diff --git a/Documentation/sound/alsa/seq_oss.html b/Documentation/sound/alsa/seq_oss.html index d9776cf60c07..9663b45f6fde 100644 --- a/Documentation/sound/alsa/seq_oss.html +++ b/Documentation/sound/alsa/seq_oss.html | |||
@@ -285,7 +285,7 @@ sample data. | |||
285 | <H4> | 285 | <H4> |
286 | 7.2.4 Close Callback</H4> | 286 | 7.2.4 Close Callback</H4> |
287 | The <TT>close</TT> callback is called when this device is closed by the | 287 | The <TT>close</TT> callback is called when this device is closed by the |
288 | applicaion. If any private data was allocated in open callback, it must | 288 | application. If any private data was allocated in open callback, it must |
289 | be released in the close callback. The deletion of ALSA port should be | 289 | be released in the close callback. The deletion of ALSA port should be |
290 | done here, too. This callback must not be NULL. | 290 | done here, too. This callback must not be NULL. |
291 | <H4> | 291 | <H4> |
diff --git a/MAINTAINERS b/MAINTAINERS index 50b4d735f961..4cf5fd334a06 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -1338,12 +1338,6 @@ S: Maintained | |||
1338 | F: drivers/platform/x86/asus*.c | 1338 | F: drivers/platform/x86/asus*.c |
1339 | F: drivers/platform/x86/eeepc*.c | 1339 | F: drivers/platform/x86/eeepc*.c |
1340 | 1340 | ||
1341 | ASUS ASB100 HARDWARE MONITOR DRIVER | ||
1342 | M: "Mark M. Hoffman" <mhoffman@lightlink.com> | ||
1343 | L: lm-sensors@lm-sensors.org | ||
1344 | S: Maintained | ||
1345 | F: drivers/hwmon/asb100.c | ||
1346 | |||
1347 | ASYNCHRONOUS TRANSFERS/TRANSFORMS (IOAT) API | 1341 | ASYNCHRONOUS TRANSFERS/TRANSFORMS (IOAT) API |
1348 | M: Dan Williams <djbw@fb.com> | 1342 | M: Dan Williams <djbw@fb.com> |
1349 | W: http://sourceforge.net/projects/xscaleiop | 1343 | W: http://sourceforge.net/projects/xscaleiop |
@@ -1467,6 +1461,12 @@ F: drivers/dma/at_hdmac.c | |||
1467 | F: drivers/dma/at_hdmac_regs.h | 1461 | F: drivers/dma/at_hdmac_regs.h |
1468 | F: include/linux/platform_data/dma-atmel.h | 1462 | F: include/linux/platform_data/dma-atmel.h |
1469 | 1463 | ||
1464 | ATMEL I2C DRIVER | ||
1465 | M: Ludovic Desroches <ludovic.desroches@atmel.com> | ||
1466 | L: linux-i2c@vger.kernel.org | ||
1467 | S: Supported | ||
1468 | F: drivers/i2c/busses/i2c-at91.c | ||
1469 | |||
1470 | ATMEL ISI DRIVER | 1470 | ATMEL ISI DRIVER |
1471 | M: Josh Wu <josh.wu@atmel.com> | 1471 | M: Josh Wu <josh.wu@atmel.com> |
1472 | L: linux-media@vger.kernel.org | 1472 | L: linux-media@vger.kernel.org |
@@ -2629,7 +2629,7 @@ F: include/uapi/drm/ | |||
2629 | 2629 | ||
2630 | INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets) | 2630 | INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets) |
2631 | M: Daniel Vetter <daniel.vetter@ffwll.ch> | 2631 | M: Daniel Vetter <daniel.vetter@ffwll.ch> |
2632 | L: intel-gfx@lists.freedesktop.org (subscribers-only) | 2632 | L: intel-gfx@lists.freedesktop.org |
2633 | L: dri-devel@lists.freedesktop.org | 2633 | L: dri-devel@lists.freedesktop.org |
2634 | T: git git://people.freedesktop.org/~danvet/drm-intel | 2634 | T: git git://people.freedesktop.org/~danvet/drm-intel |
2635 | S: Supported | 2635 | S: Supported |
@@ -3851,7 +3851,7 @@ F: drivers/i2c/busses/i2c-ismt.c | |||
3851 | F: Documentation/i2c/busses/i2c-ismt | 3851 | F: Documentation/i2c/busses/i2c-ismt |
3852 | 3852 | ||
3853 | I2C/SMBUS STUB DRIVER | 3853 | I2C/SMBUS STUB DRIVER |
3854 | M: "Mark M. Hoffman" <mhoffman@lightlink.com> | 3854 | M: Jean Delvare <khali@linux-fr.org> |
3855 | L: linux-i2c@vger.kernel.org | 3855 | L: linux-i2c@vger.kernel.org |
3856 | S: Maintained | 3856 | S: Maintained |
3857 | F: drivers/i2c/i2c-stub.c | 3857 | F: drivers/i2c/i2c-stub.c |
@@ -5647,6 +5647,14 @@ S: Maintained | |||
5647 | F: drivers/video/riva/ | 5647 | F: drivers/video/riva/ |
5648 | F: drivers/video/nvidia/ | 5648 | F: drivers/video/nvidia/ |
5649 | 5649 | ||
5650 | NVM EXPRESS DRIVER | ||
5651 | M: Matthew Wilcox <willy@linux.intel.com> | ||
5652 | L: linux-nvme@lists.infradead.org | ||
5653 | T: git git://git.infradead.org/users/willy/linux-nvme.git | ||
5654 | S: Supported | ||
5655 | F: drivers/block/nvme.c | ||
5656 | F: include/linux/nvme.h | ||
5657 | |||
5650 | OMAP SUPPORT | 5658 | OMAP SUPPORT |
5651 | M: Tony Lindgren <tony@atomide.com> | 5659 | M: Tony Lindgren <tony@atomide.com> |
5652 | L: linux-omap@vger.kernel.org | 5660 | L: linux-omap@vger.kernel.org |
@@ -7198,13 +7206,6 @@ L: netdev@vger.kernel.org | |||
7198 | S: Maintained | 7206 | S: Maintained |
7199 | F: drivers/net/ethernet/sis/sis900.* | 7207 | F: drivers/net/ethernet/sis/sis900.* |
7200 | 7208 | ||
7201 | SIS 96X I2C/SMBUS DRIVER | ||
7202 | M: "Mark M. Hoffman" <mhoffman@lightlink.com> | ||
7203 | L: linux-i2c@vger.kernel.org | ||
7204 | S: Maintained | ||
7205 | F: Documentation/i2c/busses/i2c-sis96x | ||
7206 | F: drivers/i2c/busses/i2c-sis96x.c | ||
7207 | |||
7208 | SIS FRAMEBUFFER DRIVER | 7209 | SIS FRAMEBUFFER DRIVER |
7209 | M: Thomas Winischhofer <thomas@winischhofer.net> | 7210 | M: Thomas Winischhofer <thomas@winischhofer.net> |
7210 | W: http://www.winischhofer.net/linuxsisvga.shtml | 7211 | W: http://www.winischhofer.net/linuxsisvga.shtml |
@@ -7282,7 +7283,7 @@ F: Documentation/hwmon/sch5627 | |||
7282 | F: drivers/hwmon/sch5627.c | 7283 | F: drivers/hwmon/sch5627.c |
7283 | 7284 | ||
7284 | SMSC47B397 HARDWARE MONITOR DRIVER | 7285 | SMSC47B397 HARDWARE MONITOR DRIVER |
7285 | M: "Mark M. Hoffman" <mhoffman@lightlink.com> | 7286 | M: Jean Delvare <khali@linux-fr.org> |
7286 | L: lm-sensors@lm-sensors.org | 7287 | L: lm-sensors@lm-sensors.org |
7287 | S: Maintained | 7288 | S: Maintained |
7288 | F: Documentation/hwmon/smsc47b397 | 7289 | F: Documentation/hwmon/smsc47b397 |
@@ -1,7 +1,7 @@ | |||
1 | VERSION = 3 | 1 | VERSION = 3 |
2 | PATCHLEVEL = 9 | 2 | PATCHLEVEL = 9 |
3 | SUBLEVEL = 0 | 3 | SUBLEVEL = 0 |
4 | EXTRAVERSION = -rc3 | 4 | EXTRAVERSION = -rc4 |
5 | NAME = Unicycling Gorilla | 5 | NAME = Unicycling Gorilla |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 2c3bdce15134..13b739469c51 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -49,7 +49,6 @@ config ARM | |||
49 | select HAVE_REGS_AND_STACK_ACCESS_API | 49 | select HAVE_REGS_AND_STACK_ACCESS_API |
50 | select HAVE_SYSCALL_TRACEPOINTS | 50 | select HAVE_SYSCALL_TRACEPOINTS |
51 | select HAVE_UID16 | 51 | select HAVE_UID16 |
52 | select VIRT_TO_BUS | ||
53 | select KTIME_SCALAR | 52 | select KTIME_SCALAR |
54 | select PERF_USE_VMALLOC | 53 | select PERF_USE_VMALLOC |
55 | select RTC_LIB | 54 | select RTC_LIB |
@@ -743,6 +742,7 @@ config ARCH_RPC | |||
743 | select NEED_MACH_IO_H | 742 | select NEED_MACH_IO_H |
744 | select NEED_MACH_MEMORY_H | 743 | select NEED_MACH_MEMORY_H |
745 | select NO_IOPORT | 744 | select NO_IOPORT |
745 | select VIRT_TO_BUS | ||
746 | help | 746 | help |
747 | On the Acorn Risc-PC, Linux can support the internal IDE disk and | 747 | On the Acorn Risc-PC, Linux can support the internal IDE disk and |
748 | CD-ROM interface, serial and parallel port, and the floppy drive. | 748 | CD-ROM interface, serial and parallel port, and the floppy drive. |
@@ -878,6 +878,7 @@ config ARCH_SHARK | |||
878 | select ISA_DMA | 878 | select ISA_DMA |
879 | select NEED_MACH_MEMORY_H | 879 | select NEED_MACH_MEMORY_H |
880 | select PCI | 880 | select PCI |
881 | select VIRT_TO_BUS | ||
881 | select ZONE_DMA | 882 | select ZONE_DMA |
882 | help | 883 | help |
883 | Support for the StrongARM based Digital DNARD machine, also known | 884 | Support for the StrongARM based Digital DNARD machine, also known |
@@ -1005,12 +1006,12 @@ config ARCH_MULTI_V4_V5 | |||
1005 | bool | 1006 | bool |
1006 | 1007 | ||
1007 | config ARCH_MULTI_V6 | 1008 | config ARCH_MULTI_V6 |
1008 | bool "ARMv6 based platforms (ARM11, Scorpion, ...)" | 1009 | bool "ARMv6 based platforms (ARM11)" |
1009 | select ARCH_MULTI_V6_V7 | 1010 | select ARCH_MULTI_V6_V7 |
1010 | select CPU_V6 | 1011 | select CPU_V6 |
1011 | 1012 | ||
1012 | config ARCH_MULTI_V7 | 1013 | config ARCH_MULTI_V7 |
1013 | bool "ARMv7 based platforms (Cortex-A, PJ4, Krait)" | 1014 | bool "ARMv7 based platforms (Cortex-A, PJ4, Scorpion, Krait)" |
1014 | default y | 1015 | default y |
1015 | select ARCH_MULTI_V6_V7 | 1016 | select ARCH_MULTI_V6_V7 |
1016 | select ARCH_VEXPRESS | 1017 | select ARCH_VEXPRESS |
@@ -1461,10 +1462,6 @@ config ISA_DMA | |||
1461 | bool | 1462 | bool |
1462 | select ISA_DMA_API | 1463 | select ISA_DMA_API |
1463 | 1464 | ||
1464 | config ARCH_NO_VIRT_TO_BUS | ||
1465 | def_bool y | ||
1466 | depends on !ARCH_RPC && !ARCH_NETWINDER && !ARCH_SHARK | ||
1467 | |||
1468 | # Select ISA DMA interface | 1465 | # Select ISA DMA interface |
1469 | config ISA_DMA_API | 1466 | config ISA_DMA_API |
1470 | bool | 1467 | bool |
diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi index aa98e641931f..a98c0d50fbbe 100644 --- a/arch/arm/boot/dts/at91sam9x5.dtsi +++ b/arch/arm/boot/dts/at91sam9x5.dtsi | |||
@@ -238,8 +238,32 @@ | |||
238 | nand { | 238 | nand { |
239 | pinctrl_nand: nand-0 { | 239 | pinctrl_nand: nand-0 { |
240 | atmel,pins = | 240 | atmel,pins = |
241 | <3 4 0x0 0x1 /* PD5 gpio RDY pin pull_up */ | 241 | <3 0 0x1 0x0 /* PD0 periph A Read Enable */ |
242 | 3 5 0x0 0x1>; /* PD4 gpio enable pin pull_up */ | 242 | 3 1 0x1 0x0 /* PD1 periph A Write Enable */ |
243 | 3 2 0x1 0x0 /* PD2 periph A Address Latch Enable */ | ||
244 | 3 3 0x1 0x0 /* PD3 periph A Command Latch Enable */ | ||
245 | 3 4 0x0 0x1 /* PD4 gpio Chip Enable pin pull_up */ | ||
246 | 3 5 0x0 0x1 /* PD5 gpio RDY/BUSY pin pull_up */ | ||
247 | 3 6 0x1 0x0 /* PD6 periph A Data bit 0 */ | ||
248 | 3 7 0x1 0x0 /* PD7 periph A Data bit 1 */ | ||
249 | 3 8 0x1 0x0 /* PD8 periph A Data bit 2 */ | ||
250 | 3 9 0x1 0x0 /* PD9 periph A Data bit 3 */ | ||
251 | 3 10 0x1 0x0 /* PD10 periph A Data bit 4 */ | ||
252 | 3 11 0x1 0x0 /* PD11 periph A Data bit 5 */ | ||
253 | 3 12 0x1 0x0 /* PD12 periph A Data bit 6 */ | ||
254 | 3 13 0x1 0x0>; /* PD13 periph A Data bit 7 */ | ||
255 | }; | ||
256 | |||
257 | pinctrl_nand_16bits: nand_16bits-0 { | ||
258 | atmel,pins = | ||
259 | <3 14 0x1 0x0 /* PD14 periph A Data bit 8 */ | ||
260 | 3 15 0x1 0x0 /* PD15 periph A Data bit 9 */ | ||
261 | 3 16 0x1 0x0 /* PD16 periph A Data bit 10 */ | ||
262 | 3 17 0x1 0x0 /* PD17 periph A Data bit 11 */ | ||
263 | 3 18 0x1 0x0 /* PD18 periph A Data bit 12 */ | ||
264 | 3 19 0x1 0x0 /* PD19 periph A Data bit 13 */ | ||
265 | 3 20 0x1 0x0 /* PD20 periph A Data bit 14 */ | ||
266 | 3 21 0x1 0x0>; /* PD21 periph A Data bit 15 */ | ||
243 | }; | 267 | }; |
244 | }; | 268 | }; |
245 | 269 | ||
diff --git a/arch/arm/boot/dts/exynos4.dtsi b/arch/arm/boot/dts/exynos4.dtsi index e1347fceb5bc..1a62bcf18aa3 100644 --- a/arch/arm/boot/dts/exynos4.dtsi +++ b/arch/arm/boot/dts/exynos4.dtsi | |||
@@ -275,18 +275,27 @@ | |||
275 | compatible = "arm,pl330", "arm,primecell"; | 275 | compatible = "arm,pl330", "arm,primecell"; |
276 | reg = <0x12680000 0x1000>; | 276 | reg = <0x12680000 0x1000>; |
277 | interrupts = <0 35 0>; | 277 | interrupts = <0 35 0>; |
278 | #dma-cells = <1>; | ||
279 | #dma-channels = <8>; | ||
280 | #dma-requests = <32>; | ||
278 | }; | 281 | }; |
279 | 282 | ||
280 | pdma1: pdma@12690000 { | 283 | pdma1: pdma@12690000 { |
281 | compatible = "arm,pl330", "arm,primecell"; | 284 | compatible = "arm,pl330", "arm,primecell"; |
282 | reg = <0x12690000 0x1000>; | 285 | reg = <0x12690000 0x1000>; |
283 | interrupts = <0 36 0>; | 286 | interrupts = <0 36 0>; |
287 | #dma-cells = <1>; | ||
288 | #dma-channels = <8>; | ||
289 | #dma-requests = <32>; | ||
284 | }; | 290 | }; |
285 | 291 | ||
286 | mdma1: mdma@12850000 { | 292 | mdma1: mdma@12850000 { |
287 | compatible = "arm,pl330", "arm,primecell"; | 293 | compatible = "arm,pl330", "arm,primecell"; |
288 | reg = <0x12850000 0x1000>; | 294 | reg = <0x12850000 0x1000>; |
289 | interrupts = <0 34 0>; | 295 | interrupts = <0 34 0>; |
296 | #dma-cells = <1>; | ||
297 | #dma-channels = <8>; | ||
298 | #dma-requests = <1>; | ||
290 | }; | 299 | }; |
291 | }; | 300 | }; |
292 | }; | 301 | }; |
diff --git a/arch/arm/boot/dts/exynos5440.dtsi b/arch/arm/boot/dts/exynos5440.dtsi index 5f3562ad6746..9a99755920c0 100644 --- a/arch/arm/boot/dts/exynos5440.dtsi +++ b/arch/arm/boot/dts/exynos5440.dtsi | |||
@@ -142,12 +142,18 @@ | |||
142 | compatible = "arm,pl330", "arm,primecell"; | 142 | compatible = "arm,pl330", "arm,primecell"; |
143 | reg = <0x120000 0x1000>; | 143 | reg = <0x120000 0x1000>; |
144 | interrupts = <0 34 0>; | 144 | interrupts = <0 34 0>; |
145 | #dma-cells = <1>; | ||
146 | #dma-channels = <8>; | ||
147 | #dma-requests = <32>; | ||
145 | }; | 148 | }; |
146 | 149 | ||
147 | pdma1: pdma@121B0000 { | 150 | pdma1: pdma@121B0000 { |
148 | compatible = "arm,pl330", "arm,primecell"; | 151 | compatible = "arm,pl330", "arm,primecell"; |
149 | reg = <0x121000 0x1000>; | 152 | reg = <0x121000 0x1000>; |
150 | interrupts = <0 35 0>; | 153 | interrupts = <0 35 0>; |
154 | #dma-cells = <1>; | ||
155 | #dma-channels = <8>; | ||
156 | #dma-requests = <32>; | ||
151 | }; | 157 | }; |
152 | }; | 158 | }; |
153 | 159 | ||
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 31644f1978d5..79078edbb9bc 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c | |||
@@ -480,7 +480,7 @@ static void __cpuinit broadcast_timer_setup(struct clock_event_device *evt) | |||
480 | evt->features = CLOCK_EVT_FEAT_ONESHOT | | 480 | evt->features = CLOCK_EVT_FEAT_ONESHOT | |
481 | CLOCK_EVT_FEAT_PERIODIC | | 481 | CLOCK_EVT_FEAT_PERIODIC | |
482 | CLOCK_EVT_FEAT_DUMMY; | 482 | CLOCK_EVT_FEAT_DUMMY; |
483 | evt->rating = 400; | 483 | evt->rating = 100; |
484 | evt->mult = 1; | 484 | evt->mult = 1; |
485 | evt->set_mode = broadcast_timer_set_mode; | 485 | evt->set_mode = broadcast_timer_set_mode; |
486 | 486 | ||
diff --git a/arch/arm/lib/memset.S b/arch/arm/lib/memset.S index d912e7397ecc..94b0650ea98f 100644 --- a/arch/arm/lib/memset.S +++ b/arch/arm/lib/memset.S | |||
@@ -14,31 +14,15 @@ | |||
14 | 14 | ||
15 | .text | 15 | .text |
16 | .align 5 | 16 | .align 5 |
17 | .word 0 | ||
18 | |||
19 | 1: subs r2, r2, #4 @ 1 do we have enough | ||
20 | blt 5f @ 1 bytes to align with? | ||
21 | cmp r3, #2 @ 1 | ||
22 | strltb r1, [ip], #1 @ 1 | ||
23 | strleb r1, [ip], #1 @ 1 | ||
24 | strb r1, [ip], #1 @ 1 | ||
25 | add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3)) | ||
26 | /* | ||
27 | * The pointer is now aligned and the length is adjusted. Try doing the | ||
28 | * memset again. | ||
29 | */ | ||
30 | 17 | ||
31 | ENTRY(memset) | 18 | ENTRY(memset) |
32 | /* | 19 | ands r3, r0, #3 @ 1 unaligned? |
33 | * Preserve the contents of r0 for the return value. | 20 | mov ip, r0 @ preserve r0 as return value |
34 | */ | 21 | bne 6f @ 1 |
35 | mov ip, r0 | ||
36 | ands r3, ip, #3 @ 1 unaligned? | ||
37 | bne 1b @ 1 | ||
38 | /* | 22 | /* |
39 | * we know that the pointer in ip is aligned to a word boundary. | 23 | * we know that the pointer in ip is aligned to a word boundary. |
40 | */ | 24 | */ |
41 | orr r1, r1, r1, lsl #8 | 25 | 1: orr r1, r1, r1, lsl #8 |
42 | orr r1, r1, r1, lsl #16 | 26 | orr r1, r1, r1, lsl #16 |
43 | mov r3, r1 | 27 | mov r3, r1 |
44 | cmp r2, #16 | 28 | cmp r2, #16 |
@@ -127,4 +111,13 @@ ENTRY(memset) | |||
127 | tst r2, #1 | 111 | tst r2, #1 |
128 | strneb r1, [ip], #1 | 112 | strneb r1, [ip], #1 |
129 | mov pc, lr | 113 | mov pc, lr |
114 | |||
115 | 6: subs r2, r2, #4 @ 1 do we have enough | ||
116 | blt 5b @ 1 bytes to align with? | ||
117 | cmp r3, #2 @ 1 | ||
118 | strltb r1, [ip], #1 @ 1 | ||
119 | strleb r1, [ip], #1 @ 1 | ||
120 | strb r1, [ip], #1 @ 1 | ||
121 | add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3)) | ||
122 | b 1b | ||
130 | ENDPROC(memset) | 123 | ENDPROC(memset) |
diff --git a/arch/arm/mach-at91/include/mach/gpio.h b/arch/arm/mach-at91/include/mach/gpio.h index eed465ab0dd7..5fc23771c154 100644 --- a/arch/arm/mach-at91/include/mach/gpio.h +++ b/arch/arm/mach-at91/include/mach/gpio.h | |||
@@ -209,6 +209,14 @@ extern int at91_get_gpio_value(unsigned pin); | |||
209 | extern void at91_gpio_suspend(void); | 209 | extern void at91_gpio_suspend(void); |
210 | extern void at91_gpio_resume(void); | 210 | extern void at91_gpio_resume(void); |
211 | 211 | ||
212 | #ifdef CONFIG_PINCTRL_AT91 | ||
213 | extern void at91_pinctrl_gpio_suspend(void); | ||
214 | extern void at91_pinctrl_gpio_resume(void); | ||
215 | #else | ||
216 | static inline void at91_pinctrl_gpio_suspend(void) {} | ||
217 | static inline void at91_pinctrl_gpio_resume(void) {} | ||
218 | #endif | ||
219 | |||
212 | #endif /* __ASSEMBLY__ */ | 220 | #endif /* __ASSEMBLY__ */ |
213 | 221 | ||
214 | #endif | 222 | #endif |
diff --git a/arch/arm/mach-at91/irq.c b/arch/arm/mach-at91/irq.c index 8e210262aeee..e0ca59171022 100644 --- a/arch/arm/mach-at91/irq.c +++ b/arch/arm/mach-at91/irq.c | |||
@@ -92,23 +92,21 @@ static int at91_aic_set_wake(struct irq_data *d, unsigned value) | |||
92 | 92 | ||
93 | void at91_irq_suspend(void) | 93 | void at91_irq_suspend(void) |
94 | { | 94 | { |
95 | int i = 0, bit; | 95 | int bit = -1; |
96 | 96 | ||
97 | if (has_aic5()) { | 97 | if (has_aic5()) { |
98 | /* disable enabled irqs */ | 98 | /* disable enabled irqs */ |
99 | while ((bit = find_next_bit(backups, n_irqs, i)) < n_irqs) { | 99 | while ((bit = find_next_bit(backups, n_irqs, bit + 1)) < n_irqs) { |
100 | at91_aic_write(AT91_AIC5_SSR, | 100 | at91_aic_write(AT91_AIC5_SSR, |
101 | bit & AT91_AIC5_INTSEL_MSK); | 101 | bit & AT91_AIC5_INTSEL_MSK); |
102 | at91_aic_write(AT91_AIC5_IDCR, 1); | 102 | at91_aic_write(AT91_AIC5_IDCR, 1); |
103 | i = bit; | ||
104 | } | 103 | } |
105 | /* enable wakeup irqs */ | 104 | /* enable wakeup irqs */ |
106 | i = 0; | 105 | bit = -1; |
107 | while ((bit = find_next_bit(wakeups, n_irqs, i)) < n_irqs) { | 106 | while ((bit = find_next_bit(wakeups, n_irqs, bit + 1)) < n_irqs) { |
108 | at91_aic_write(AT91_AIC5_SSR, | 107 | at91_aic_write(AT91_AIC5_SSR, |
109 | bit & AT91_AIC5_INTSEL_MSK); | 108 | bit & AT91_AIC5_INTSEL_MSK); |
110 | at91_aic_write(AT91_AIC5_IECR, 1); | 109 | at91_aic_write(AT91_AIC5_IECR, 1); |
111 | i = bit; | ||
112 | } | 110 | } |
113 | } else { | 111 | } else { |
114 | at91_aic_write(AT91_AIC_IDCR, *backups); | 112 | at91_aic_write(AT91_AIC_IDCR, *backups); |
@@ -118,23 +116,21 @@ void at91_irq_suspend(void) | |||
118 | 116 | ||
119 | void at91_irq_resume(void) | 117 | void at91_irq_resume(void) |
120 | { | 118 | { |
121 | int i = 0, bit; | 119 | int bit = -1; |
122 | 120 | ||
123 | if (has_aic5()) { | 121 | if (has_aic5()) { |
124 | /* disable wakeup irqs */ | 122 | /* disable wakeup irqs */ |
125 | while ((bit = find_next_bit(wakeups, n_irqs, i)) < n_irqs) { | 123 | while ((bit = find_next_bit(wakeups, n_irqs, bit + 1)) < n_irqs) { |
126 | at91_aic_write(AT91_AIC5_SSR, | 124 | at91_aic_write(AT91_AIC5_SSR, |
127 | bit & AT91_AIC5_INTSEL_MSK); | 125 | bit & AT91_AIC5_INTSEL_MSK); |
128 | at91_aic_write(AT91_AIC5_IDCR, 1); | 126 | at91_aic_write(AT91_AIC5_IDCR, 1); |
129 | i = bit; | ||
130 | } | 127 | } |
131 | /* enable irqs disabled for suspend */ | 128 | /* enable irqs disabled for suspend */ |
132 | i = 0; | 129 | bit = -1; |
133 | while ((bit = find_next_bit(backups, n_irqs, i)) < n_irqs) { | 130 | while ((bit = find_next_bit(backups, n_irqs, bit + 1)) < n_irqs) { |
134 | at91_aic_write(AT91_AIC5_SSR, | 131 | at91_aic_write(AT91_AIC5_SSR, |
135 | bit & AT91_AIC5_INTSEL_MSK); | 132 | bit & AT91_AIC5_INTSEL_MSK); |
136 | at91_aic_write(AT91_AIC5_IECR, 1); | 133 | at91_aic_write(AT91_AIC5_IECR, 1); |
137 | i = bit; | ||
138 | } | 134 | } |
139 | } else { | 135 | } else { |
140 | at91_aic_write(AT91_AIC_IDCR, *wakeups); | 136 | at91_aic_write(AT91_AIC_IDCR, *wakeups); |
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c index adb6db888a1f..73f1f250403a 100644 --- a/arch/arm/mach-at91/pm.c +++ b/arch/arm/mach-at91/pm.c | |||
@@ -201,7 +201,10 @@ extern u32 at91_slow_clock_sz; | |||
201 | 201 | ||
202 | static int at91_pm_enter(suspend_state_t state) | 202 | static int at91_pm_enter(suspend_state_t state) |
203 | { | 203 | { |
204 | at91_gpio_suspend(); | 204 | if (of_have_populated_dt()) |
205 | at91_pinctrl_gpio_suspend(); | ||
206 | else | ||
207 | at91_gpio_suspend(); | ||
205 | at91_irq_suspend(); | 208 | at91_irq_suspend(); |
206 | 209 | ||
207 | pr_debug("AT91: PM - wake mask %08x, pm state %d\n", | 210 | pr_debug("AT91: PM - wake mask %08x, pm state %d\n", |
@@ -286,7 +289,10 @@ static int at91_pm_enter(suspend_state_t state) | |||
286 | error: | 289 | error: |
287 | target_state = PM_SUSPEND_ON; | 290 | target_state = PM_SUSPEND_ON; |
288 | at91_irq_resume(); | 291 | at91_irq_resume(); |
289 | at91_gpio_resume(); | 292 | if (of_have_populated_dt()) |
293 | at91_pinctrl_gpio_resume(); | ||
294 | else | ||
295 | at91_gpio_resume(); | ||
290 | return 0; | 296 | return 0; |
291 | } | 297 | } |
292 | 298 | ||
diff --git a/arch/arm/mach-davinci/dma.c b/arch/arm/mach-davinci/dma.c index a685e9706b7b..45b7c71d9cc1 100644 --- a/arch/arm/mach-davinci/dma.c +++ b/arch/arm/mach-davinci/dma.c | |||
@@ -743,6 +743,9 @@ EXPORT_SYMBOL(edma_free_channel); | |||
743 | */ | 743 | */ |
744 | int edma_alloc_slot(unsigned ctlr, int slot) | 744 | int edma_alloc_slot(unsigned ctlr, int slot) |
745 | { | 745 | { |
746 | if (!edma_cc[ctlr]) | ||
747 | return -EINVAL; | ||
748 | |||
746 | if (slot >= 0) | 749 | if (slot >= 0) |
747 | slot = EDMA_CHAN_SLOT(slot); | 750 | slot = EDMA_CHAN_SLOT(slot); |
748 | 751 | ||
diff --git a/arch/arm/mach-footbridge/Kconfig b/arch/arm/mach-footbridge/Kconfig index abda5a18a664..0f2111a11315 100644 --- a/arch/arm/mach-footbridge/Kconfig +++ b/arch/arm/mach-footbridge/Kconfig | |||
@@ -67,6 +67,7 @@ config ARCH_NETWINDER | |||
67 | select ISA | 67 | select ISA |
68 | select ISA_DMA | 68 | select ISA_DMA |
69 | select PCI | 69 | select PCI |
70 | select VIRT_TO_BUS | ||
70 | help | 71 | help |
71 | Say Y here if you intend to run this kernel on the Rebel.COM | 72 | Say Y here if you intend to run this kernel on the Rebel.COM |
72 | NetWinder. Information about this machine can be found at: | 73 | NetWinder. Information about this machine can be found at: |
diff --git a/arch/arm/mach-imx/clk-imx35.c b/arch/arm/mach-imx/clk-imx35.c index 74e3a34d78b8..e13a8fa5e62c 100644 --- a/arch/arm/mach-imx/clk-imx35.c +++ b/arch/arm/mach-imx/clk-imx35.c | |||
@@ -264,6 +264,7 @@ int __init mx35_clocks_init(void) | |||
264 | clk_prepare_enable(clk[gpio3_gate]); | 264 | clk_prepare_enable(clk[gpio3_gate]); |
265 | clk_prepare_enable(clk[iim_gate]); | 265 | clk_prepare_enable(clk[iim_gate]); |
266 | clk_prepare_enable(clk[emi_gate]); | 266 | clk_prepare_enable(clk[emi_gate]); |
267 | clk_prepare_enable(clk[max_gate]); | ||
267 | 268 | ||
268 | /* | 269 | /* |
269 | * SCC is needed to boot via mmc after a watchdog reset. The clock code | 270 | * SCC is needed to boot via mmc after a watchdog reset. The clock code |
diff --git a/arch/arm/mach-imx/imx25-dt.c b/arch/arm/mach-imx/imx25-dt.c index 03b65e5ea541..82348391582a 100644 --- a/arch/arm/mach-imx/imx25-dt.c +++ b/arch/arm/mach-imx/imx25-dt.c | |||
@@ -27,6 +27,11 @@ static const char * const imx25_dt_board_compat[] __initconst = { | |||
27 | NULL | 27 | NULL |
28 | }; | 28 | }; |
29 | 29 | ||
30 | static void __init imx25_timer_init(void) | ||
31 | { | ||
32 | mx25_clocks_init_dt(); | ||
33 | } | ||
34 | |||
30 | DT_MACHINE_START(IMX25_DT, "Freescale i.MX25 (Device Tree Support)") | 35 | DT_MACHINE_START(IMX25_DT, "Freescale i.MX25 (Device Tree Support)") |
31 | .map_io = mx25_map_io, | 36 | .map_io = mx25_map_io, |
32 | .init_early = imx25_init_early, | 37 | .init_early = imx25_init_early, |
diff --git a/arch/arm/mach-mmp/gplugd.c b/arch/arm/mach-mmp/gplugd.c index d1e2d595e79c..f62b68d926f4 100644 --- a/arch/arm/mach-mmp/gplugd.c +++ b/arch/arm/mach-mmp/gplugd.c | |||
@@ -9,6 +9,7 @@ | |||
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
12 | #include <linux/platform_device.h> | ||
12 | #include <linux/gpio.h> | 13 | #include <linux/gpio.h> |
13 | 14 | ||
14 | #include <asm/mach/arch.h> | 15 | #include <asm/mach/arch.h> |
diff --git a/arch/arm/mach-s5pv210/clock.c b/arch/arm/mach-s5pv210/clock.c index fcdf52dbcc49..f051f53e35b7 100644 --- a/arch/arm/mach-s5pv210/clock.c +++ b/arch/arm/mach-s5pv210/clock.c | |||
@@ -214,11 +214,6 @@ static struct clk clk_pcmcdclk2 = { | |||
214 | .name = "pcmcdclk", | 214 | .name = "pcmcdclk", |
215 | }; | 215 | }; |
216 | 216 | ||
217 | static struct clk dummy_apb_pclk = { | ||
218 | .name = "apb_pclk", | ||
219 | .id = -1, | ||
220 | }; | ||
221 | |||
222 | static struct clk *clkset_vpllsrc_list[] = { | 217 | static struct clk *clkset_vpllsrc_list[] = { |
223 | [0] = &clk_fin_vpll, | 218 | [0] = &clk_fin_vpll, |
224 | [1] = &clk_sclk_hdmi27m, | 219 | [1] = &clk_sclk_hdmi27m, |
@@ -305,18 +300,6 @@ static struct clk_ops clk_fout_apll_ops = { | |||
305 | 300 | ||
306 | static struct clk init_clocks_off[] = { | 301 | static struct clk init_clocks_off[] = { |
307 | { | 302 | { |
308 | .name = "dma", | ||
309 | .devname = "dma-pl330.0", | ||
310 | .parent = &clk_hclk_psys.clk, | ||
311 | .enable = s5pv210_clk_ip0_ctrl, | ||
312 | .ctrlbit = (1 << 3), | ||
313 | }, { | ||
314 | .name = "dma", | ||
315 | .devname = "dma-pl330.1", | ||
316 | .parent = &clk_hclk_psys.clk, | ||
317 | .enable = s5pv210_clk_ip0_ctrl, | ||
318 | .ctrlbit = (1 << 4), | ||
319 | }, { | ||
320 | .name = "rot", | 303 | .name = "rot", |
321 | .parent = &clk_hclk_dsys.clk, | 304 | .parent = &clk_hclk_dsys.clk, |
322 | .enable = s5pv210_clk_ip0_ctrl, | 305 | .enable = s5pv210_clk_ip0_ctrl, |
@@ -573,6 +556,20 @@ static struct clk clk_hsmmc3 = { | |||
573 | .ctrlbit = (1<<19), | 556 | .ctrlbit = (1<<19), |
574 | }; | 557 | }; |
575 | 558 | ||
559 | static struct clk clk_pdma0 = { | ||
560 | .name = "pdma0", | ||
561 | .parent = &clk_hclk_psys.clk, | ||
562 | .enable = s5pv210_clk_ip0_ctrl, | ||
563 | .ctrlbit = (1 << 3), | ||
564 | }; | ||
565 | |||
566 | static struct clk clk_pdma1 = { | ||
567 | .name = "pdma1", | ||
568 | .parent = &clk_hclk_psys.clk, | ||
569 | .enable = s5pv210_clk_ip0_ctrl, | ||
570 | .ctrlbit = (1 << 4), | ||
571 | }; | ||
572 | |||
576 | static struct clk *clkset_uart_list[] = { | 573 | static struct clk *clkset_uart_list[] = { |
577 | [6] = &clk_mout_mpll.clk, | 574 | [6] = &clk_mout_mpll.clk, |
578 | [7] = &clk_mout_epll.clk, | 575 | [7] = &clk_mout_epll.clk, |
@@ -1075,6 +1072,8 @@ static struct clk *clk_cdev[] = { | |||
1075 | &clk_hsmmc1, | 1072 | &clk_hsmmc1, |
1076 | &clk_hsmmc2, | 1073 | &clk_hsmmc2, |
1077 | &clk_hsmmc3, | 1074 | &clk_hsmmc3, |
1075 | &clk_pdma0, | ||
1076 | &clk_pdma1, | ||
1078 | }; | 1077 | }; |
1079 | 1078 | ||
1080 | /* Clock initialisation code */ | 1079 | /* Clock initialisation code */ |
@@ -1333,6 +1332,8 @@ static struct clk_lookup s5pv210_clk_lookup[] = { | |||
1333 | CLKDEV_INIT(NULL, "spi_busclk0", &clk_p), | 1332 | CLKDEV_INIT(NULL, "spi_busclk0", &clk_p), |
1334 | CLKDEV_INIT("s5pv210-spi.0", "spi_busclk1", &clk_sclk_spi0.clk), | 1333 | CLKDEV_INIT("s5pv210-spi.0", "spi_busclk1", &clk_sclk_spi0.clk), |
1335 | CLKDEV_INIT("s5pv210-spi.1", "spi_busclk1", &clk_sclk_spi1.clk), | 1334 | CLKDEV_INIT("s5pv210-spi.1", "spi_busclk1", &clk_sclk_spi1.clk), |
1335 | CLKDEV_INIT("dma-pl330.0", "apb_pclk", &clk_pdma0), | ||
1336 | CLKDEV_INIT("dma-pl330.1", "apb_pclk", &clk_pdma1), | ||
1336 | }; | 1337 | }; |
1337 | 1338 | ||
1338 | void __init s5pv210_register_clocks(void) | 1339 | void __init s5pv210_register_clocks(void) |
@@ -1361,6 +1362,5 @@ void __init s5pv210_register_clocks(void) | |||
1361 | for (ptr = 0; ptr < ARRAY_SIZE(clk_cdev); ptr++) | 1362 | for (ptr = 0; ptr < ARRAY_SIZE(clk_cdev); ptr++) |
1362 | s3c_disable_clocks(clk_cdev[ptr], 1); | 1363 | s3c_disable_clocks(clk_cdev[ptr], 1); |
1363 | 1364 | ||
1364 | s3c24xx_register_clock(&dummy_apb_pclk); | ||
1365 | s3c_pwmclk_init(); | 1365 | s3c_pwmclk_init(); |
1366 | } | 1366 | } |
diff --git a/arch/arm/mach-s5pv210/mach-goni.c b/arch/arm/mach-s5pv210/mach-goni.c index 3a38f7b34b94..e373de44a8b6 100644 --- a/arch/arm/mach-s5pv210/mach-goni.c +++ b/arch/arm/mach-s5pv210/mach-goni.c | |||
@@ -845,7 +845,7 @@ static struct fimc_source_info goni_camera_sensors[] = { | |||
845 | .mux_id = 0, | 845 | .mux_id = 0, |
846 | .flags = V4L2_MBUS_PCLK_SAMPLE_FALLING | | 846 | .flags = V4L2_MBUS_PCLK_SAMPLE_FALLING | |
847 | V4L2_MBUS_VSYNC_ACTIVE_LOW, | 847 | V4L2_MBUS_VSYNC_ACTIVE_LOW, |
848 | .bus_type = FIMC_BUS_TYPE_ITU_601, | 848 | .fimc_bus_type = FIMC_BUS_TYPE_ITU_601, |
849 | .board_info = &noon010pc30_board_info, | 849 | .board_info = &noon010pc30_board_info, |
850 | .i2c_bus_num = 0, | 850 | .i2c_bus_num = 0, |
851 | .clk_frequency = 16000000UL, | 851 | .clk_frequency = 16000000UL, |
diff --git a/arch/arm/mach-shmobile/board-marzen.c b/arch/arm/mach-shmobile/board-marzen.c index cdcb799e802f..fec49ebc359a 100644 --- a/arch/arm/mach-shmobile/board-marzen.c +++ b/arch/arm/mach-shmobile/board-marzen.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/smsc911x.h> | 32 | #include <linux/smsc911x.h> |
33 | #include <linux/spi/spi.h> | 33 | #include <linux/spi/spi.h> |
34 | #include <linux/spi/sh_hspi.h> | 34 | #include <linux/spi/sh_hspi.h> |
35 | #include <linux/mmc/host.h> | ||
35 | #include <linux/mmc/sh_mobile_sdhi.h> | 36 | #include <linux/mmc/sh_mobile_sdhi.h> |
36 | #include <linux/mfd/tmio.h> | 37 | #include <linux/mfd/tmio.h> |
37 | #include <linux/usb/otg.h> | 38 | #include <linux/usb/otg.h> |
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index 6828ef6ce80e..a0bd8a755bdf 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c | |||
@@ -576,7 +576,7 @@ load_ind: | |||
576 | /* x = ((*(frame + k)) & 0xf) << 2; */ | 576 | /* x = ((*(frame + k)) & 0xf) << 2; */ |
577 | ctx->seen |= SEEN_X | SEEN_DATA | SEEN_CALL; | 577 | ctx->seen |= SEEN_X | SEEN_DATA | SEEN_CALL; |
578 | /* the interpreter should deal with the negative K */ | 578 | /* the interpreter should deal with the negative K */ |
579 | if (k < 0) | 579 | if ((int)k < 0) |
580 | return -1; | 580 | return -1; |
581 | /* offset in r1: we might have to take the slow path */ | 581 | /* offset in r1: we might have to take the slow path */ |
582 | emit_mov_i(r_off, k, ctx); | 582 | emit_mov_i(r_off, k, ctx); |
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index fd70a68387eb..9b6d19f74078 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig | |||
@@ -9,7 +9,6 @@ config ARM64 | |||
9 | select CLONE_BACKWARDS | 9 | select CLONE_BACKWARDS |
10 | select COMMON_CLK | 10 | select COMMON_CLK |
11 | select GENERIC_CLOCKEVENTS | 11 | select GENERIC_CLOCKEVENTS |
12 | select GENERIC_HARDIRQS_NO_DEPRECATED | ||
13 | select GENERIC_IOMAP | 12 | select GENERIC_IOMAP |
14 | select GENERIC_IRQ_PROBE | 13 | select GENERIC_IRQ_PROBE |
15 | select GENERIC_IRQ_SHOW | 14 | select GENERIC_IRQ_SHOW |
diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug index 51493430f142..1a6bfe954d49 100644 --- a/arch/arm64/Kconfig.debug +++ b/arch/arm64/Kconfig.debug | |||
@@ -6,17 +6,6 @@ config FRAME_POINTER | |||
6 | bool | 6 | bool |
7 | default y | 7 | default y |
8 | 8 | ||
9 | config DEBUG_ERRORS | ||
10 | bool "Verbose kernel error messages" | ||
11 | depends on DEBUG_KERNEL | ||
12 | help | ||
13 | This option controls verbose debugging information which can be | ||
14 | printed when the kernel detects an internal error. This debugging | ||
15 | information is useful to kernel hackers when tracking down problems, | ||
16 | but mostly meaningless to other people. It's safe to say Y unless | ||
17 | you are concerned with the code size or don't want to see these | ||
18 | messages. | ||
19 | |||
20 | config DEBUG_STACK_USAGE | 9 | config DEBUG_STACK_USAGE |
21 | bool "Enable stack utilization instrumentation" | 10 | bool "Enable stack utilization instrumentation" |
22 | depends on DEBUG_KERNEL | 11 | depends on DEBUG_KERNEL |
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index 9212c7880da7..09bef29f3a09 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig | |||
@@ -82,4 +82,3 @@ CONFIG_DEBUG_KERNEL=y | |||
82 | CONFIG_DEBUG_INFO=y | 82 | CONFIG_DEBUG_INFO=y |
83 | # CONFIG_FTRACE is not set | 83 | # CONFIG_FTRACE is not set |
84 | CONFIG_ATOMIC64_SELFTEST=y | 84 | CONFIG_ATOMIC64_SELFTEST=y |
85 | CONFIG_DEBUG_ERRORS=y | ||
diff --git a/arch/arm64/include/asm/ucontext.h b/arch/arm64/include/asm/ucontext.h index bde960720892..42e04c877428 100644 --- a/arch/arm64/include/asm/ucontext.h +++ b/arch/arm64/include/asm/ucontext.h | |||
@@ -22,7 +22,7 @@ struct ucontext { | |||
22 | stack_t uc_stack; | 22 | stack_t uc_stack; |
23 | sigset_t uc_sigmask; | 23 | sigset_t uc_sigmask; |
24 | /* glibc uses a 1024-bit sigset_t */ | 24 | /* glibc uses a 1024-bit sigset_t */ |
25 | __u8 __unused[(1024 - sizeof(sigset_t)) / 8]; | 25 | __u8 __unused[1024 / 8 - sizeof(sigset_t)]; |
26 | /* last for future expansion */ | 26 | /* last for future expansion */ |
27 | struct sigcontext uc_mcontext; | 27 | struct sigcontext uc_mcontext; |
28 | }; | 28 | }; |
diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c index cef3925eaf60..aa3e948f7885 100644 --- a/arch/arm64/kernel/arm64ksyms.c +++ b/arch/arm64/kernel/arm64ksyms.c | |||
@@ -40,7 +40,9 @@ EXPORT_SYMBOL(__copy_to_user); | |||
40 | EXPORT_SYMBOL(__clear_user); | 40 | EXPORT_SYMBOL(__clear_user); |
41 | 41 | ||
42 | /* bitops */ | 42 | /* bitops */ |
43 | #ifdef CONFIG_SMP | ||
43 | EXPORT_SYMBOL(__atomic_hash); | 44 | EXPORT_SYMBOL(__atomic_hash); |
45 | #endif | ||
44 | 46 | ||
45 | /* physical memory */ | 47 | /* physical memory */ |
46 | EXPORT_SYMBOL(memstart_addr); | 48 | EXPORT_SYMBOL(memstart_addr); |
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c index 7f4f3673f2bc..e393174fe859 100644 --- a/arch/arm64/kernel/signal32.c +++ b/arch/arm64/kernel/signal32.c | |||
@@ -549,7 +549,6 @@ int compat_setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info, | |||
549 | sigset_t *set, struct pt_regs *regs) | 549 | sigset_t *set, struct pt_regs *regs) |
550 | { | 550 | { |
551 | struct compat_rt_sigframe __user *frame; | 551 | struct compat_rt_sigframe __user *frame; |
552 | compat_stack_t stack; | ||
553 | int err = 0; | 552 | int err = 0; |
554 | 553 | ||
555 | frame = compat_get_sigframe(ka, regs, sizeof(*frame)); | 554 | frame = compat_get_sigframe(ka, regs, sizeof(*frame)); |
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 80821512e9cc..ea5bb045983a 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig | |||
@@ -90,6 +90,7 @@ config GENERIC_GPIO | |||
90 | config PPC | 90 | config PPC |
91 | bool | 91 | bool |
92 | default y | 92 | default y |
93 | select BINFMT_ELF | ||
93 | select OF | 94 | select OF |
94 | select OF_EARLY_FLATTREE | 95 | select OF_EARLY_FLATTREE |
95 | select HAVE_FTRACE_MCOUNT_RECORD | 96 | select HAVE_FTRACE_MCOUNT_RECORD |
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h index 2fdb47a19efd..b59e06f507ea 100644 --- a/arch/powerpc/include/asm/mmu-hash64.h +++ b/arch/powerpc/include/asm/mmu-hash64.h | |||
@@ -343,17 +343,16 @@ extern void slb_set_size(u16 size); | |||
343 | /* | 343 | /* |
344 | * VSID allocation (256MB segment) | 344 | * VSID allocation (256MB segment) |
345 | * | 345 | * |
346 | * We first generate a 38-bit "proto-VSID". For kernel addresses this | 346 | * We first generate a 37-bit "proto-VSID". Proto-VSIDs are generated |
347 | * is equal to the ESID | 1 << 37, for user addresses it is: | 347 | * from mmu context id and effective segment id of the address. |
348 | * (context << USER_ESID_BITS) | (esid & ((1U << USER_ESID_BITS) - 1) | ||
349 | * | 348 | * |
350 | * This splits the proto-VSID into the below range | 349 | * For user processes max context id is limited to ((1ul << 19) - 5) |
351 | * 0 - (2^(CONTEXT_BITS + USER_ESID_BITS) - 1) : User proto-VSID range | 350 | * for kernel space, we use the top 4 context ids to map address as below |
352 | * 2^(CONTEXT_BITS + USER_ESID_BITS) - 2^(VSID_BITS) : Kernel proto-VSID range | 351 | * NOTE: each context only support 64TB now. |
353 | * | 352 | * 0x7fffc - [ 0xc000000000000000 - 0xc0003fffffffffff ] |
354 | * We also have CONTEXT_BITS + USER_ESID_BITS = VSID_BITS - 1 | 353 | * 0x7fffd - [ 0xd000000000000000 - 0xd0003fffffffffff ] |
355 | * That is, we assign half of the space to user processes and half | 354 | * 0x7fffe - [ 0xe000000000000000 - 0xe0003fffffffffff ] |
356 | * to the kernel. | 355 | * 0x7ffff - [ 0xf000000000000000 - 0xf0003fffffffffff ] |
357 | * | 356 | * |
358 | * The proto-VSIDs are then scrambled into real VSIDs with the | 357 | * The proto-VSIDs are then scrambled into real VSIDs with the |
359 | * multiplicative hash: | 358 | * multiplicative hash: |
@@ -363,41 +362,49 @@ extern void slb_set_size(u16 size); | |||
363 | * VSID_MULTIPLIER is prime, so in particular it is | 362 | * VSID_MULTIPLIER is prime, so in particular it is |
364 | * co-prime to VSID_MODULUS, making this a 1:1 scrambling function. | 363 | * co-prime to VSID_MODULUS, making this a 1:1 scrambling function. |
365 | * Because the modulus is 2^n-1 we can compute it efficiently without | 364 | * Because the modulus is 2^n-1 we can compute it efficiently without |
366 | * a divide or extra multiply (see below). | 365 | * a divide or extra multiply (see below). The scramble function gives |
367 | * | 366 | * robust scattering in the hash table (at least based on some initial |
368 | * This scheme has several advantages over older methods: | 367 | * results). |
369 | * | ||
370 | * - We have VSIDs allocated for every kernel address | ||
371 | * (i.e. everything above 0xC000000000000000), except the very top | ||
372 | * segment, which simplifies several things. | ||
373 | * | 368 | * |
374 | * - We allow for USER_ESID_BITS significant bits of ESID and | 369 | * We also consider VSID 0 special. We use VSID 0 for slb entries mapping |
375 | * CONTEXT_BITS bits of context for user addresses. | 370 | * bad address. This enables us to consolidate bad address handling in |
376 | * i.e. 64T (46 bits) of address space for up to half a million contexts. | 371 | * hash_page. |
377 | * | 372 | * |
378 | * - The scramble function gives robust scattering in the hash | 373 | * We also need to avoid the last segment of the last context, because that |
379 | * table (at least based on some initial results). The previous | 374 | * would give a protovsid of 0x1fffffffff. That will result in a VSID 0 |
380 | * method was more susceptible to pathological cases giving excessive | 375 | * because of the modulo operation in vsid scramble. But the vmemmap |
381 | * hash collisions. | 376 | * (which is what uses region 0xf) will never be close to 64TB in size |
377 | * (it's 56 bytes per page of system memory). | ||
382 | */ | 378 | */ |
383 | 379 | ||
380 | #define CONTEXT_BITS 19 | ||
381 | #define ESID_BITS 18 | ||
382 | #define ESID_BITS_1T 6 | ||
383 | |||
384 | /* | ||
385 | * 256MB segment | ||
386 | * The proto-VSID space has 2^(CONTEX_BITS + ESID_BITS) - 1 segments | ||
387 | * available for user + kernel mapping. The top 4 contexts are used for | ||
388 | * kernel mapping. Each segment contains 2^28 bytes. Each | ||
389 | * context maps 2^46 bytes (64TB) so we can support 2^19-1 contexts | ||
390 | * (19 == 37 + 28 - 46). | ||
391 | */ | ||
392 | #define MAX_USER_CONTEXT ((ASM_CONST(1) << CONTEXT_BITS) - 5) | ||
393 | |||
384 | /* | 394 | /* |
385 | * This should be computed such that protovosid * vsid_mulitplier | 395 | * This should be computed such that protovosid * vsid_mulitplier |
386 | * doesn't overflow 64 bits. It should also be co-prime to vsid_modulus | 396 | * doesn't overflow 64 bits. It should also be co-prime to vsid_modulus |
387 | */ | 397 | */ |
388 | #define VSID_MULTIPLIER_256M ASM_CONST(12538073) /* 24-bit prime */ | 398 | #define VSID_MULTIPLIER_256M ASM_CONST(12538073) /* 24-bit prime */ |
389 | #define VSID_BITS_256M 38 | 399 | #define VSID_BITS_256M (CONTEXT_BITS + ESID_BITS) |
390 | #define VSID_MODULUS_256M ((1UL<<VSID_BITS_256M)-1) | 400 | #define VSID_MODULUS_256M ((1UL<<VSID_BITS_256M)-1) |
391 | 401 | ||
392 | #define VSID_MULTIPLIER_1T ASM_CONST(12538073) /* 24-bit prime */ | 402 | #define VSID_MULTIPLIER_1T ASM_CONST(12538073) /* 24-bit prime */ |
393 | #define VSID_BITS_1T 26 | 403 | #define VSID_BITS_1T (CONTEXT_BITS + ESID_BITS_1T) |
394 | #define VSID_MODULUS_1T ((1UL<<VSID_BITS_1T)-1) | 404 | #define VSID_MODULUS_1T ((1UL<<VSID_BITS_1T)-1) |
395 | 405 | ||
396 | #define CONTEXT_BITS 19 | ||
397 | #define USER_ESID_BITS 18 | ||
398 | #define USER_ESID_BITS_1T 6 | ||
399 | 406 | ||
400 | #define USER_VSID_RANGE (1UL << (USER_ESID_BITS + SID_SHIFT)) | 407 | #define USER_VSID_RANGE (1UL << (ESID_BITS + SID_SHIFT)) |
401 | 408 | ||
402 | /* | 409 | /* |
403 | * This macro generates asm code to compute the VSID scramble | 410 | * This macro generates asm code to compute the VSID scramble |
@@ -421,7 +428,8 @@ extern void slb_set_size(u16 size); | |||
421 | srdi rx,rt,VSID_BITS_##size; \ | 428 | srdi rx,rt,VSID_BITS_##size; \ |
422 | clrldi rt,rt,(64-VSID_BITS_##size); \ | 429 | clrldi rt,rt,(64-VSID_BITS_##size); \ |
423 | add rt,rt,rx; /* add high and low bits */ \ | 430 | add rt,rt,rx; /* add high and low bits */ \ |
424 | /* Now, r3 == VSID (mod 2^36-1), and lies between 0 and \ | 431 | /* NOTE: explanation based on VSID_BITS_##size = 36 \ |
432 | * Now, r3 == VSID (mod 2^36-1), and lies between 0 and \ | ||
425 | * 2^36-1+2^28-1. That in particular means that if r3 >= \ | 433 | * 2^36-1+2^28-1. That in particular means that if r3 >= \ |
426 | * 2^36-1, then r3+1 has the 2^36 bit set. So, if r3+1 has \ | 434 | * 2^36-1, then r3+1 has the 2^36 bit set. So, if r3+1 has \ |
427 | * the bit clear, r3 already has the answer we want, if it \ | 435 | * the bit clear, r3 already has the answer we want, if it \ |
@@ -513,34 +521,6 @@ typedef struct { | |||
513 | }) | 521 | }) |
514 | #endif /* 1 */ | 522 | #endif /* 1 */ |
515 | 523 | ||
516 | /* | ||
517 | * This is only valid for addresses >= PAGE_OFFSET | ||
518 | * The proto-VSID space is divided into two class | ||
519 | * User: 0 to 2^(CONTEXT_BITS + USER_ESID_BITS) -1 | ||
520 | * kernel: 2^(CONTEXT_BITS + USER_ESID_BITS) to 2^(VSID_BITS) - 1 | ||
521 | * | ||
522 | * With KERNEL_START at 0xc000000000000000, the proto vsid for | ||
523 | * the kernel ends up with 0xc00000000 (36 bits). With 64TB | ||
524 | * support we need to have kernel proto-VSID in the | ||
525 | * [2^37 to 2^38 - 1] range due to the increased USER_ESID_BITS. | ||
526 | */ | ||
527 | static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize) | ||
528 | { | ||
529 | unsigned long proto_vsid; | ||
530 | /* | ||
531 | * We need to make sure proto_vsid for the kernel is | ||
532 | * >= 2^(CONTEXT_BITS + USER_ESID_BITS[_1T]) | ||
533 | */ | ||
534 | if (ssize == MMU_SEGSIZE_256M) { | ||
535 | proto_vsid = ea >> SID_SHIFT; | ||
536 | proto_vsid |= (1UL << (CONTEXT_BITS + USER_ESID_BITS)); | ||
537 | return vsid_scramble(proto_vsid, 256M); | ||
538 | } | ||
539 | proto_vsid = ea >> SID_SHIFT_1T; | ||
540 | proto_vsid |= (1UL << (CONTEXT_BITS + USER_ESID_BITS_1T)); | ||
541 | return vsid_scramble(proto_vsid, 1T); | ||
542 | } | ||
543 | |||
544 | /* Returns the segment size indicator for a user address */ | 524 | /* Returns the segment size indicator for a user address */ |
545 | static inline int user_segment_size(unsigned long addr) | 525 | static inline int user_segment_size(unsigned long addr) |
546 | { | 526 | { |
@@ -550,17 +530,41 @@ static inline int user_segment_size(unsigned long addr) | |||
550 | return MMU_SEGSIZE_256M; | 530 | return MMU_SEGSIZE_256M; |
551 | } | 531 | } |
552 | 532 | ||
553 | /* This is only valid for user addresses (which are below 2^44) */ | ||
554 | static inline unsigned long get_vsid(unsigned long context, unsigned long ea, | 533 | static inline unsigned long get_vsid(unsigned long context, unsigned long ea, |
555 | int ssize) | 534 | int ssize) |
556 | { | 535 | { |
536 | /* | ||
537 | * Bad address. We return VSID 0 for that | ||
538 | */ | ||
539 | if ((ea & ~REGION_MASK) >= PGTABLE_RANGE) | ||
540 | return 0; | ||
541 | |||
557 | if (ssize == MMU_SEGSIZE_256M) | 542 | if (ssize == MMU_SEGSIZE_256M) |
558 | return vsid_scramble((context << USER_ESID_BITS) | 543 | return vsid_scramble((context << ESID_BITS) |
559 | | (ea >> SID_SHIFT), 256M); | 544 | | (ea >> SID_SHIFT), 256M); |
560 | return vsid_scramble((context << USER_ESID_BITS_1T) | 545 | return vsid_scramble((context << ESID_BITS_1T) |
561 | | (ea >> SID_SHIFT_1T), 1T); | 546 | | (ea >> SID_SHIFT_1T), 1T); |
562 | } | 547 | } |
563 | 548 | ||
549 | /* | ||
550 | * This is only valid for addresses >= PAGE_OFFSET | ||
551 | * | ||
552 | * For kernel space, we use the top 4 context ids to map address as below | ||
553 | * 0x7fffc - [ 0xc000000000000000 - 0xc0003fffffffffff ] | ||
554 | * 0x7fffd - [ 0xd000000000000000 - 0xd0003fffffffffff ] | ||
555 | * 0x7fffe - [ 0xe000000000000000 - 0xe0003fffffffffff ] | ||
556 | * 0x7ffff - [ 0xf000000000000000 - 0xf0003fffffffffff ] | ||
557 | */ | ||
558 | static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize) | ||
559 | { | ||
560 | unsigned long context; | ||
561 | |||
562 | /* | ||
563 | * kernel take the top 4 context from the available range | ||
564 | */ | ||
565 | context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1; | ||
566 | return get_vsid(context, ea, ssize); | ||
567 | } | ||
564 | #endif /* __ASSEMBLY__ */ | 568 | #endif /* __ASSEMBLY__ */ |
565 | 569 | ||
566 | #endif /* _ASM_POWERPC_MMU_HASH64_H_ */ | 570 | #endif /* _ASM_POWERPC_MMU_HASH64_H_ */ |
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 75a3d71b895d..19599ef352bc 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c | |||
@@ -275,7 +275,7 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
275 | .cpu_features = CPU_FTRS_PPC970, | 275 | .cpu_features = CPU_FTRS_PPC970, |
276 | .cpu_user_features = COMMON_USER_POWER4 | | 276 | .cpu_user_features = COMMON_USER_POWER4 | |
277 | PPC_FEATURE_HAS_ALTIVEC_COMP, | 277 | PPC_FEATURE_HAS_ALTIVEC_COMP, |
278 | .mmu_features = MMU_FTR_HPTE_TABLE, | 278 | .mmu_features = MMU_FTRS_PPC970, |
279 | .icache_bsize = 128, | 279 | .icache_bsize = 128, |
280 | .dcache_bsize = 128, | 280 | .dcache_bsize = 128, |
281 | .num_pmcs = 8, | 281 | .num_pmcs = 8, |
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 87ef8f5ee5bc..200afa5bcfb7 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S | |||
@@ -1452,20 +1452,36 @@ do_ste_alloc: | |||
1452 | _GLOBAL(do_stab_bolted) | 1452 | _GLOBAL(do_stab_bolted) |
1453 | stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ | 1453 | stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ |
1454 | std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */ | 1454 | std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */ |
1455 | mfspr r11,SPRN_DAR /* ea */ | ||
1455 | 1456 | ||
1457 | /* | ||
1458 | * check for bad kernel/user address | ||
1459 | * (ea & ~REGION_MASK) >= PGTABLE_RANGE | ||
1460 | */ | ||
1461 | rldicr. r9,r11,4,(63 - 46 - 4) | ||
1462 | li r9,0 /* VSID = 0 for bad address */ | ||
1463 | bne- 0f | ||
1464 | |||
1465 | /* | ||
1466 | * Calculate VSID: | ||
1467 | * This is the kernel vsid, we take the top for context from | ||
1468 | * the range. context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1 | ||
1469 | * Here we know that (ea >> 60) == 0xc | ||
1470 | */ | ||
1471 | lis r9,(MAX_USER_CONTEXT + 1)@ha | ||
1472 | addi r9,r9,(MAX_USER_CONTEXT + 1)@l | ||
1473 | |||
1474 | srdi r10,r11,SID_SHIFT | ||
1475 | rldimi r10,r9,ESID_BITS,0 /* proto vsid */ | ||
1476 | ASM_VSID_SCRAMBLE(r10, r9, 256M) | ||
1477 | rldic r9,r10,12,16 /* r9 = vsid << 12 */ | ||
1478 | |||
1479 | 0: | ||
1456 | /* Hash to the primary group */ | 1480 | /* Hash to the primary group */ |
1457 | ld r10,PACASTABVIRT(r13) | 1481 | ld r10,PACASTABVIRT(r13) |
1458 | mfspr r11,SPRN_DAR | 1482 | srdi r11,r11,SID_SHIFT |
1459 | srdi r11,r11,28 | ||
1460 | rldimi r10,r11,7,52 /* r10 = first ste of the group */ | 1483 | rldimi r10,r11,7,52 /* r10 = first ste of the group */ |
1461 | 1484 | ||
1462 | /* Calculate VSID */ | ||
1463 | /* This is a kernel address, so protovsid = ESID | 1 << 37 */ | ||
1464 | li r9,0x1 | ||
1465 | rldimi r11,r9,(CONTEXT_BITS + USER_ESID_BITS),0 | ||
1466 | ASM_VSID_SCRAMBLE(r11, r9, 256M) | ||
1467 | rldic r9,r11,12,16 /* r9 = vsid << 12 */ | ||
1468 | |||
1469 | /* Search the primary group for a free entry */ | 1485 | /* Search the primary group for a free entry */ |
1470 | 1: ld r11,0(r10) /* Test valid bit of the current ste */ | 1486 | 1: ld r11,0(r10) /* Test valid bit of the current ste */ |
1471 | andi. r11,r11,0x80 | 1487 | andi. r11,r11,0x80 |
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index 7f7fb7fd991b..13f8d168b3f1 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c | |||
@@ -2832,11 +2832,13 @@ static void unreloc_toc(void) | |||
2832 | { | 2832 | { |
2833 | } | 2833 | } |
2834 | #else | 2834 | #else |
2835 | static void __reloc_toc(void *tocstart, unsigned long offset, | 2835 | static void __reloc_toc(unsigned long offset, unsigned long nr_entries) |
2836 | unsigned long nr_entries) | ||
2837 | { | 2836 | { |
2838 | unsigned long i; | 2837 | unsigned long i; |
2839 | unsigned long *toc_entry = (unsigned long *)tocstart; | 2838 | unsigned long *toc_entry; |
2839 | |||
2840 | /* Get the start of the TOC by using r2 directly. */ | ||
2841 | asm volatile("addi %0,2,-0x8000" : "=b" (toc_entry)); | ||
2840 | 2842 | ||
2841 | for (i = 0; i < nr_entries; i++) { | 2843 | for (i = 0; i < nr_entries; i++) { |
2842 | *toc_entry = *toc_entry + offset; | 2844 | *toc_entry = *toc_entry + offset; |
@@ -2850,8 +2852,7 @@ static void reloc_toc(void) | |||
2850 | unsigned long nr_entries = | 2852 | unsigned long nr_entries = |
2851 | (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long); | 2853 | (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long); |
2852 | 2854 | ||
2853 | /* Need to add offset to get at __prom_init_toc_start */ | 2855 | __reloc_toc(offset, nr_entries); |
2854 | __reloc_toc(__prom_init_toc_start + offset, offset, nr_entries); | ||
2855 | 2856 | ||
2856 | mb(); | 2857 | mb(); |
2857 | } | 2858 | } |
@@ -2864,8 +2865,7 @@ static void unreloc_toc(void) | |||
2864 | 2865 | ||
2865 | mb(); | 2866 | mb(); |
2866 | 2867 | ||
2867 | /* __prom_init_toc_start has been relocated, no need to add offset */ | 2868 | __reloc_toc(-offset, nr_entries); |
2868 | __reloc_toc(__prom_init_toc_start, -offset, nr_entries); | ||
2869 | } | 2869 | } |
2870 | #endif | 2870 | #endif |
2871 | #endif | 2871 | #endif |
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index 245c1b6a0858..f9b30c68ba47 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c | |||
@@ -1428,6 +1428,7 @@ static long ppc_set_hwdebug(struct task_struct *child, | |||
1428 | 1428 | ||
1429 | brk.address = bp_info->addr & ~7UL; | 1429 | brk.address = bp_info->addr & ~7UL; |
1430 | brk.type = HW_BRK_TYPE_TRANSLATE; | 1430 | brk.type = HW_BRK_TYPE_TRANSLATE; |
1431 | brk.len = 8; | ||
1431 | if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ) | 1432 | if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ) |
1432 | brk.type |= HW_BRK_TYPE_READ; | 1433 | brk.type |= HW_BRK_TYPE_READ; |
1433 | if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE) | 1434 | if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE) |
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c index ead58e317294..5d7d29a313eb 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_host.c +++ b/arch/powerpc/kvm/book3s_64_mmu_host.c | |||
@@ -326,8 +326,8 @@ int kvmppc_mmu_init(struct kvm_vcpu *vcpu) | |||
326 | vcpu3s->context_id[0] = err; | 326 | vcpu3s->context_id[0] = err; |
327 | 327 | ||
328 | vcpu3s->proto_vsid_max = ((vcpu3s->context_id[0] + 1) | 328 | vcpu3s->proto_vsid_max = ((vcpu3s->context_id[0] + 1) |
329 | << USER_ESID_BITS) - 1; | 329 | << ESID_BITS) - 1; |
330 | vcpu3s->proto_vsid_first = vcpu3s->context_id[0] << USER_ESID_BITS; | 330 | vcpu3s->proto_vsid_first = vcpu3s->context_id[0] << ESID_BITS; |
331 | vcpu3s->proto_vsid_next = vcpu3s->proto_vsid_first; | 331 | vcpu3s->proto_vsid_next = vcpu3s->proto_vsid_first; |
332 | 332 | ||
333 | kvmppc_mmu_hpte_init(vcpu); | 333 | kvmppc_mmu_hpte_init(vcpu); |
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 1b6e1271719f..f410c3e12c1e 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c | |||
@@ -195,6 +195,11 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend, | |||
195 | unsigned long vpn = hpt_vpn(vaddr, vsid, ssize); | 195 | unsigned long vpn = hpt_vpn(vaddr, vsid, ssize); |
196 | unsigned long tprot = prot; | 196 | unsigned long tprot = prot; |
197 | 197 | ||
198 | /* | ||
199 | * If we hit a bad address return error. | ||
200 | */ | ||
201 | if (!vsid) | ||
202 | return -1; | ||
198 | /* Make kernel text executable */ | 203 | /* Make kernel text executable */ |
199 | if (overlaps_kernel_text(vaddr, vaddr + step)) | 204 | if (overlaps_kernel_text(vaddr, vaddr + step)) |
200 | tprot &= ~HPTE_R_N; | 205 | tprot &= ~HPTE_R_N; |
@@ -759,6 +764,8 @@ void __init early_init_mmu(void) | |||
759 | /* Initialize stab / SLB management */ | 764 | /* Initialize stab / SLB management */ |
760 | if (mmu_has_feature(MMU_FTR_SLB)) | 765 | if (mmu_has_feature(MMU_FTR_SLB)) |
761 | slb_initialize(); | 766 | slb_initialize(); |
767 | else | ||
768 | stab_initialize(get_paca()->stab_real); | ||
762 | } | 769 | } |
763 | 770 | ||
764 | #ifdef CONFIG_SMP | 771 | #ifdef CONFIG_SMP |
@@ -922,11 +929,6 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) | |||
922 | DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n", | 929 | DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n", |
923 | ea, access, trap); | 930 | ea, access, trap); |
924 | 931 | ||
925 | if ((ea & ~REGION_MASK) >= PGTABLE_RANGE) { | ||
926 | DBG_LOW(" out of pgtable range !\n"); | ||
927 | return 1; | ||
928 | } | ||
929 | |||
930 | /* Get region & vsid */ | 932 | /* Get region & vsid */ |
931 | switch (REGION_ID(ea)) { | 933 | switch (REGION_ID(ea)) { |
932 | case USER_REGION_ID: | 934 | case USER_REGION_ID: |
@@ -957,6 +959,11 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) | |||
957 | } | 959 | } |
958 | DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid); | 960 | DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid); |
959 | 961 | ||
962 | /* Bad address. */ | ||
963 | if (!vsid) { | ||
964 | DBG_LOW("Bad address!\n"); | ||
965 | return 1; | ||
966 | } | ||
960 | /* Get pgdir */ | 967 | /* Get pgdir */ |
961 | pgdir = mm->pgd; | 968 | pgdir = mm->pgd; |
962 | if (pgdir == NULL) | 969 | if (pgdir == NULL) |
@@ -1126,6 +1133,8 @@ void hash_preload(struct mm_struct *mm, unsigned long ea, | |||
1126 | /* Get VSID */ | 1133 | /* Get VSID */ |
1127 | ssize = user_segment_size(ea); | 1134 | ssize = user_segment_size(ea); |
1128 | vsid = get_vsid(mm->context.id, ea, ssize); | 1135 | vsid = get_vsid(mm->context.id, ea, ssize); |
1136 | if (!vsid) | ||
1137 | return; | ||
1129 | 1138 | ||
1130 | /* Hash doesn't like irqs */ | 1139 | /* Hash doesn't like irqs */ |
1131 | local_irq_save(flags); | 1140 | local_irq_save(flags); |
@@ -1233,6 +1242,9 @@ static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi) | |||
1233 | hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize); | 1242 | hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize); |
1234 | hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); | 1243 | hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); |
1235 | 1244 | ||
1245 | /* Don't create HPTE entries for bad address */ | ||
1246 | if (!vsid) | ||
1247 | return; | ||
1236 | ret = ppc_md.hpte_insert(hpteg, vpn, __pa(vaddr), | 1248 | ret = ppc_md.hpte_insert(hpteg, vpn, __pa(vaddr), |
1237 | mode, HPTE_V_BOLTED, | 1249 | mode, HPTE_V_BOLTED, |
1238 | mmu_linear_psize, mmu_kernel_ssize); | 1250 | mmu_linear_psize, mmu_kernel_ssize); |
diff --git a/arch/powerpc/mm/mmu_context_hash64.c b/arch/powerpc/mm/mmu_context_hash64.c index 40bc5b0ace54..d1d1b92c5b99 100644 --- a/arch/powerpc/mm/mmu_context_hash64.c +++ b/arch/powerpc/mm/mmu_context_hash64.c | |||
@@ -29,15 +29,6 @@ | |||
29 | static DEFINE_SPINLOCK(mmu_context_lock); | 29 | static DEFINE_SPINLOCK(mmu_context_lock); |
30 | static DEFINE_IDA(mmu_context_ida); | 30 | static DEFINE_IDA(mmu_context_ida); |
31 | 31 | ||
32 | /* | ||
33 | * 256MB segment | ||
34 | * The proto-VSID space has 2^(CONTEX_BITS + USER_ESID_BITS) - 1 segments | ||
35 | * available for user mappings. Each segment contains 2^28 bytes. Each | ||
36 | * context maps 2^46 bytes (64TB) so we can support 2^19-1 contexts | ||
37 | * (19 == 37 + 28 - 46). | ||
38 | */ | ||
39 | #define MAX_CONTEXT ((1UL << CONTEXT_BITS) - 1) | ||
40 | |||
41 | int __init_new_context(void) | 32 | int __init_new_context(void) |
42 | { | 33 | { |
43 | int index; | 34 | int index; |
@@ -56,7 +47,7 @@ again: | |||
56 | else if (err) | 47 | else if (err) |
57 | return err; | 48 | return err; |
58 | 49 | ||
59 | if (index > MAX_CONTEXT) { | 50 | if (index > MAX_USER_CONTEXT) { |
60 | spin_lock(&mmu_context_lock); | 51 | spin_lock(&mmu_context_lock); |
61 | ida_remove(&mmu_context_ida, index); | 52 | ida_remove(&mmu_context_ida, index); |
62 | spin_unlock(&mmu_context_lock); | 53 | spin_unlock(&mmu_context_lock); |
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index e212a271c7a4..654258f165ae 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c | |||
@@ -61,7 +61,7 @@ | |||
61 | #endif | 61 | #endif |
62 | 62 | ||
63 | #ifdef CONFIG_PPC_STD_MMU_64 | 63 | #ifdef CONFIG_PPC_STD_MMU_64 |
64 | #if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT)) | 64 | #if TASK_SIZE_USER64 > (1UL << (ESID_BITS + SID_SHIFT)) |
65 | #error TASK_SIZE_USER64 exceeds user VSID range | 65 | #error TASK_SIZE_USER64 exceeds user VSID range |
66 | #endif | 66 | #endif |
67 | #endif | 67 | #endif |
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S index 1a16ca227757..17aa6dfceb34 100644 --- a/arch/powerpc/mm/slb_low.S +++ b/arch/powerpc/mm/slb_low.S | |||
@@ -31,10 +31,15 @@ | |||
31 | * No other registers are examined or changed. | 31 | * No other registers are examined or changed. |
32 | */ | 32 | */ |
33 | _GLOBAL(slb_allocate_realmode) | 33 | _GLOBAL(slb_allocate_realmode) |
34 | /* r3 = faulting address */ | 34 | /* |
35 | * check for bad kernel/user address | ||
36 | * (ea & ~REGION_MASK) >= PGTABLE_RANGE | ||
37 | */ | ||
38 | rldicr. r9,r3,4,(63 - 46 - 4) | ||
39 | bne- 8f | ||
35 | 40 | ||
36 | srdi r9,r3,60 /* get region */ | 41 | srdi r9,r3,60 /* get region */ |
37 | srdi r10,r3,28 /* get esid */ | 42 | srdi r10,r3,SID_SHIFT /* get esid */ |
38 | cmpldi cr7,r9,0xc /* cmp PAGE_OFFSET for later use */ | 43 | cmpldi cr7,r9,0xc /* cmp PAGE_OFFSET for later use */ |
39 | 44 | ||
40 | /* r3 = address, r10 = esid, cr7 = <> PAGE_OFFSET */ | 45 | /* r3 = address, r10 = esid, cr7 = <> PAGE_OFFSET */ |
@@ -56,12 +61,14 @@ _GLOBAL(slb_allocate_realmode) | |||
56 | */ | 61 | */ |
57 | _GLOBAL(slb_miss_kernel_load_linear) | 62 | _GLOBAL(slb_miss_kernel_load_linear) |
58 | li r11,0 | 63 | li r11,0 |
59 | li r9,0x1 | ||
60 | /* | 64 | /* |
61 | * for 1T we shift 12 bits more. slb_finish_load_1T will do | 65 | * context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1 |
62 | * the necessary adjustment | 66 | * r9 = region id. |
63 | */ | 67 | */ |
64 | rldimi r10,r9,(CONTEXT_BITS + USER_ESID_BITS),0 | 68 | addis r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@ha |
69 | addi r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@l | ||
70 | |||
71 | |||
65 | BEGIN_FTR_SECTION | 72 | BEGIN_FTR_SECTION |
66 | b slb_finish_load | 73 | b slb_finish_load |
67 | END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) | 74 | END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) |
@@ -91,24 +98,19 @@ _GLOBAL(slb_miss_kernel_load_vmemmap) | |||
91 | _GLOBAL(slb_miss_kernel_load_io) | 98 | _GLOBAL(slb_miss_kernel_load_io) |
92 | li r11,0 | 99 | li r11,0 |
93 | 6: | 100 | 6: |
94 | li r9,0x1 | ||
95 | /* | 101 | /* |
96 | * for 1T we shift 12 bits more. slb_finish_load_1T will do | 102 | * context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1 |
97 | * the necessary adjustment | 103 | * r9 = region id. |
98 | */ | 104 | */ |
99 | rldimi r10,r9,(CONTEXT_BITS + USER_ESID_BITS),0 | 105 | addis r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@ha |
106 | addi r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@l | ||
107 | |||
100 | BEGIN_FTR_SECTION | 108 | BEGIN_FTR_SECTION |
101 | b slb_finish_load | 109 | b slb_finish_load |
102 | END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) | 110 | END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) |
103 | b slb_finish_load_1T | 111 | b slb_finish_load_1T |
104 | 112 | ||
105 | 0: /* user address: proto-VSID = context << 15 | ESID. First check | 113 | 0: |
106 | * if the address is within the boundaries of the user region | ||
107 | */ | ||
108 | srdi. r9,r10,USER_ESID_BITS | ||
109 | bne- 8f /* invalid ea bits set */ | ||
110 | |||
111 | |||
112 | /* when using slices, we extract the psize off the slice bitmaps | 114 | /* when using slices, we extract the psize off the slice bitmaps |
113 | * and then we need to get the sllp encoding off the mmu_psize_defs | 115 | * and then we need to get the sllp encoding off the mmu_psize_defs |
114 | * array. | 116 | * array. |
@@ -164,15 +166,13 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) | |||
164 | ld r9,PACACONTEXTID(r13) | 166 | ld r9,PACACONTEXTID(r13) |
165 | BEGIN_FTR_SECTION | 167 | BEGIN_FTR_SECTION |
166 | cmpldi r10,0x1000 | 168 | cmpldi r10,0x1000 |
167 | END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) | ||
168 | rldimi r10,r9,USER_ESID_BITS,0 | ||
169 | BEGIN_FTR_SECTION | ||
170 | bge slb_finish_load_1T | 169 | bge slb_finish_load_1T |
171 | END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) | 170 | END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) |
172 | b slb_finish_load | 171 | b slb_finish_load |
173 | 172 | ||
174 | 8: /* invalid EA */ | 173 | 8: /* invalid EA */ |
175 | li r10,0 /* BAD_VSID */ | 174 | li r10,0 /* BAD_VSID */ |
175 | li r9,0 /* BAD_VSID */ | ||
176 | li r11,SLB_VSID_USER /* flags don't much matter */ | 176 | li r11,SLB_VSID_USER /* flags don't much matter */ |
177 | b slb_finish_load | 177 | b slb_finish_load |
178 | 178 | ||
@@ -221,8 +221,6 @@ _GLOBAL(slb_allocate_user) | |||
221 | 221 | ||
222 | /* get context to calculate proto-VSID */ | 222 | /* get context to calculate proto-VSID */ |
223 | ld r9,PACACONTEXTID(r13) | 223 | ld r9,PACACONTEXTID(r13) |
224 | rldimi r10,r9,USER_ESID_BITS,0 | ||
225 | |||
226 | /* fall through slb_finish_load */ | 224 | /* fall through slb_finish_load */ |
227 | 225 | ||
228 | #endif /* __DISABLED__ */ | 226 | #endif /* __DISABLED__ */ |
@@ -231,9 +229,10 @@ _GLOBAL(slb_allocate_user) | |||
231 | /* | 229 | /* |
232 | * Finish loading of an SLB entry and return | 230 | * Finish loading of an SLB entry and return |
233 | * | 231 | * |
234 | * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET | 232 | * r3 = EA, r9 = context, r10 = ESID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET |
235 | */ | 233 | */ |
236 | slb_finish_load: | 234 | slb_finish_load: |
235 | rldimi r10,r9,ESID_BITS,0 | ||
237 | ASM_VSID_SCRAMBLE(r10,r9,256M) | 236 | ASM_VSID_SCRAMBLE(r10,r9,256M) |
238 | /* | 237 | /* |
239 | * bits above VSID_BITS_256M need to be ignored from r10 | 238 | * bits above VSID_BITS_256M need to be ignored from r10 |
@@ -298,10 +297,11 @@ _GLOBAL(slb_compare_rr_to_size) | |||
298 | /* | 297 | /* |
299 | * Finish loading of a 1T SLB entry (for the kernel linear mapping) and return. | 298 | * Finish loading of a 1T SLB entry (for the kernel linear mapping) and return. |
300 | * | 299 | * |
301 | * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9 | 300 | * r3 = EA, r9 = context, r10 = ESID(256MB), r11 = flags, clobbers r9 |
302 | */ | 301 | */ |
303 | slb_finish_load_1T: | 302 | slb_finish_load_1T: |
304 | srdi r10,r10,40-28 /* get 1T ESID */ | 303 | srdi r10,r10,(SID_SHIFT_1T - SID_SHIFT) /* get 1T ESID */ |
304 | rldimi r10,r9,ESID_BITS_1T,0 | ||
305 | ASM_VSID_SCRAMBLE(r10,r9,1T) | 305 | ASM_VSID_SCRAMBLE(r10,r9,1T) |
306 | /* | 306 | /* |
307 | * bits above VSID_BITS_1T need to be ignored from r10 | 307 | * bits above VSID_BITS_1T need to be ignored from r10 |
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c index 0d82ef50dc3f..023ec8a13f38 100644 --- a/arch/powerpc/mm/tlb_hash64.c +++ b/arch/powerpc/mm/tlb_hash64.c | |||
@@ -82,11 +82,11 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr, | |||
82 | if (!is_kernel_addr(addr)) { | 82 | if (!is_kernel_addr(addr)) { |
83 | ssize = user_segment_size(addr); | 83 | ssize = user_segment_size(addr); |
84 | vsid = get_vsid(mm->context.id, addr, ssize); | 84 | vsid = get_vsid(mm->context.id, addr, ssize); |
85 | WARN_ON(vsid == 0); | ||
86 | } else { | 85 | } else { |
87 | vsid = get_kernel_vsid(addr, mmu_kernel_ssize); | 86 | vsid = get_kernel_vsid(addr, mmu_kernel_ssize); |
88 | ssize = mmu_kernel_ssize; | 87 | ssize = mmu_kernel_ssize; |
89 | } | 88 | } |
89 | WARN_ON(vsid == 0); | ||
90 | vpn = hpt_vpn(addr, vsid, ssize); | 90 | vpn = hpt_vpn(addr, vsid, ssize); |
91 | rpte = __real_pte(__pte(pte), ptep); | 91 | rpte = __real_pte(__pte(pte), ptep); |
92 | 92 | ||
diff --git a/arch/powerpc/perf/power7-pmu.c b/arch/powerpc/perf/power7-pmu.c index b554879bd31e..3c475d6267c7 100644 --- a/arch/powerpc/perf/power7-pmu.c +++ b/arch/powerpc/perf/power7-pmu.c | |||
@@ -420,7 +420,20 @@ static struct attribute_group power7_pmu_events_group = { | |||
420 | .attrs = power7_events_attr, | 420 | .attrs = power7_events_attr, |
421 | }; | 421 | }; |
422 | 422 | ||
423 | PMU_FORMAT_ATTR(event, "config:0-19"); | ||
424 | |||
425 | static struct attribute *power7_pmu_format_attr[] = { | ||
426 | &format_attr_event.attr, | ||
427 | NULL, | ||
428 | }; | ||
429 | |||
430 | struct attribute_group power7_pmu_format_group = { | ||
431 | .name = "format", | ||
432 | .attrs = power7_pmu_format_attr, | ||
433 | }; | ||
434 | |||
423 | static const struct attribute_group *power7_pmu_attr_groups[] = { | 435 | static const struct attribute_group *power7_pmu_attr_groups[] = { |
436 | &power7_pmu_format_group, | ||
424 | &power7_pmu_events_group, | 437 | &power7_pmu_events_group, |
425 | NULL, | 438 | NULL, |
426 | }; | 439 | }; |
diff --git a/arch/powerpc/platforms/85xx/sgy_cts1000.c b/arch/powerpc/platforms/85xx/sgy_cts1000.c index 611e92f291c4..7179726ba5c5 100644 --- a/arch/powerpc/platforms/85xx/sgy_cts1000.c +++ b/arch/powerpc/platforms/85xx/sgy_cts1000.c | |||
@@ -69,7 +69,7 @@ static irqreturn_t gpio_halt_irq(int irq, void *__data) | |||
69 | return IRQ_HANDLED; | 69 | return IRQ_HANDLED; |
70 | }; | 70 | }; |
71 | 71 | ||
72 | static int __devinit gpio_halt_probe(struct platform_device *pdev) | 72 | static int gpio_halt_probe(struct platform_device *pdev) |
73 | { | 73 | { |
74 | enum of_gpio_flags flags; | 74 | enum of_gpio_flags flags; |
75 | struct device_node *node = pdev->dev.of_node; | 75 | struct device_node *node = pdev->dev.of_node; |
@@ -128,7 +128,7 @@ static int __devinit gpio_halt_probe(struct platform_device *pdev) | |||
128 | return 0; | 128 | return 0; |
129 | } | 129 | } |
130 | 130 | ||
131 | static int __devexit gpio_halt_remove(struct platform_device *pdev) | 131 | static int gpio_halt_remove(struct platform_device *pdev) |
132 | { | 132 | { |
133 | if (halt_node) { | 133 | if (halt_node) { |
134 | int gpio = of_get_gpio(halt_node, 0); | 134 | int gpio = of_get_gpio(halt_node, 0); |
@@ -165,7 +165,7 @@ static struct platform_driver gpio_halt_driver = { | |||
165 | .of_match_table = gpio_halt_match, | 165 | .of_match_table = gpio_halt_match, |
166 | }, | 166 | }, |
167 | .probe = gpio_halt_probe, | 167 | .probe = gpio_halt_probe, |
168 | .remove = __devexit_p(gpio_halt_remove), | 168 | .remove = gpio_halt_remove, |
169 | }; | 169 | }; |
170 | 170 | ||
171 | module_platform_driver(gpio_halt_driver); | 171 | module_platform_driver(gpio_halt_driver); |
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index cea2f09c4241..18e3b76c78d7 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype | |||
@@ -124,9 +124,8 @@ config 6xx | |||
124 | select PPC_HAVE_PMU_SUPPORT | 124 | select PPC_HAVE_PMU_SUPPORT |
125 | 125 | ||
126 | config POWER3 | 126 | config POWER3 |
127 | bool | ||
128 | depends on PPC64 && PPC_BOOK3S | 127 | depends on PPC64 && PPC_BOOK3S |
129 | default y if !POWER4_ONLY | 128 | def_bool y |
130 | 129 | ||
131 | config POWER4 | 130 | config POWER4 |
132 | depends on PPC64 && PPC_BOOK3S | 131 | depends on PPC64 && PPC_BOOK3S |
@@ -145,8 +144,7 @@ config TUNE_CELL | |||
145 | but somewhat slower on other machines. This option only changes | 144 | but somewhat slower on other machines. This option only changes |
146 | the scheduling of instructions, not the selection of instructions | 145 | the scheduling of instructions, not the selection of instructions |
147 | itself, so the resulting kernel will keep running on all other | 146 | itself, so the resulting kernel will keep running on all other |
148 | machines. When building a kernel that is supposed to run only | 147 | machines. |
149 | on Cell, you should also select the POWER4_ONLY option. | ||
150 | 148 | ||
151 | # this is temp to handle compat with arch=ppc | 149 | # this is temp to handle compat with arch=ppc |
152 | config 8xx | 150 | config 8xx |
diff --git a/arch/s390/include/asm/eadm.h b/arch/s390/include/asm/eadm.h index 8d4847191ecc..dc9200ca32ed 100644 --- a/arch/s390/include/asm/eadm.h +++ b/arch/s390/include/asm/eadm.h | |||
@@ -34,6 +34,8 @@ struct arsb { | |||
34 | u32 reserved[4]; | 34 | u32 reserved[4]; |
35 | } __packed; | 35 | } __packed; |
36 | 36 | ||
37 | #define EQC_WR_PROHIBIT 22 | ||
38 | |||
37 | struct msb { | 39 | struct msb { |
38 | u8 fmt:4; | 40 | u8 fmt:4; |
39 | u8 oc:4; | 41 | u8 oc:4; |
@@ -96,11 +98,13 @@ struct scm_device { | |||
96 | #define OP_STATE_TEMP_ERR 2 | 98 | #define OP_STATE_TEMP_ERR 2 |
97 | #define OP_STATE_PERM_ERR 3 | 99 | #define OP_STATE_PERM_ERR 3 |
98 | 100 | ||
101 | enum scm_event {SCM_CHANGE, SCM_AVAIL}; | ||
102 | |||
99 | struct scm_driver { | 103 | struct scm_driver { |
100 | struct device_driver drv; | 104 | struct device_driver drv; |
101 | int (*probe) (struct scm_device *scmdev); | 105 | int (*probe) (struct scm_device *scmdev); |
102 | int (*remove) (struct scm_device *scmdev); | 106 | int (*remove) (struct scm_device *scmdev); |
103 | void (*notify) (struct scm_device *scmdev); | 107 | void (*notify) (struct scm_device *scmdev, enum scm_event event); |
104 | void (*handler) (struct scm_device *scmdev, void *data, int error); | 108 | void (*handler) (struct scm_device *scmdev, void *data, int error); |
105 | }; | 109 | }; |
106 | 110 | ||
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h index 1d8fe2b17ef6..6b32af30878c 100644 --- a/arch/s390/include/asm/tlbflush.h +++ b/arch/s390/include/asm/tlbflush.h | |||
@@ -74,8 +74,6 @@ static inline void __tlb_flush_idte(unsigned long asce) | |||
74 | 74 | ||
75 | static inline void __tlb_flush_mm(struct mm_struct * mm) | 75 | static inline void __tlb_flush_mm(struct mm_struct * mm) |
76 | { | 76 | { |
77 | if (unlikely(cpumask_empty(mm_cpumask(mm)))) | ||
78 | return; | ||
79 | /* | 77 | /* |
80 | * If the machine has IDTE we prefer to do a per mm flush | 78 | * If the machine has IDTE we prefer to do a per mm flush |
81 | * on all cpus instead of doing a local flush if the mm | 79 | * on all cpus instead of doing a local flush if the mm |
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 550228523267..94feff7d6132 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S | |||
@@ -636,7 +636,8 @@ ENTRY(mcck_int_handler) | |||
636 | UPDATE_VTIME %r14,%r15,__LC_MCCK_ENTER_TIMER | 636 | UPDATE_VTIME %r14,%r15,__LC_MCCK_ENTER_TIMER |
637 | mcck_skip: | 637 | mcck_skip: |
638 | SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+32,__LC_PANIC_STACK,PAGE_SHIFT | 638 | SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+32,__LC_PANIC_STACK,PAGE_SHIFT |
639 | mvc __PT_R0(64,%r11),__LC_GPREGS_SAVE_AREA | 639 | stm %r0,%r7,__PT_R0(%r11) |
640 | mvc __PT_R8(32,%r11),__LC_GPREGS_SAVE_AREA+32 | ||
640 | stm %r8,%r9,__PT_PSW(%r11) | 641 | stm %r8,%r9,__PT_PSW(%r11) |
641 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) | 642 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) |
642 | l %r1,BASED(.Ldo_machine_check) | 643 | l %r1,BASED(.Ldo_machine_check) |
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index 9c837c101297..2e6d60c55f90 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S | |||
@@ -678,8 +678,9 @@ ENTRY(mcck_int_handler) | |||
678 | UPDATE_VTIME %r14,__LC_MCCK_ENTER_TIMER | 678 | UPDATE_VTIME %r14,__LC_MCCK_ENTER_TIMER |
679 | LAST_BREAK %r14 | 679 | LAST_BREAK %r14 |
680 | mcck_skip: | 680 | mcck_skip: |
681 | lghi %r14,__LC_GPREGS_SAVE_AREA | 681 | lghi %r14,__LC_GPREGS_SAVE_AREA+64 |
682 | mvc __PT_R0(128,%r11),0(%r14) | 682 | stmg %r0,%r7,__PT_R0(%r11) |
683 | mvc __PT_R8(64,%r11),0(%r14) | ||
683 | stmg %r8,%r9,__PT_PSW(%r11) | 684 | stmg %r8,%r9,__PT_PSW(%r11) |
684 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) | 685 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) |
685 | lgr %r2,%r11 # pass pointer to pt_regs | 686 | lgr %r2,%r11 # pass pointer to pt_regs |
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index a5360de85ec7..29268859d8ee 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c | |||
@@ -571,6 +571,8 @@ static void __init setup_memory_end(void) | |||
571 | 571 | ||
572 | /* Split remaining virtual space between 1:1 mapping & vmemmap array */ | 572 | /* Split remaining virtual space between 1:1 mapping & vmemmap array */ |
573 | tmp = VMALLOC_START / (PAGE_SIZE + sizeof(struct page)); | 573 | tmp = VMALLOC_START / (PAGE_SIZE + sizeof(struct page)); |
574 | /* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */ | ||
575 | tmp = SECTION_ALIGN_UP(tmp); | ||
574 | tmp = VMALLOC_START - tmp * sizeof(struct page); | 576 | tmp = VMALLOC_START - tmp * sizeof(struct page); |
575 | tmp &= ~((vmax >> 11) - 1); /* align to page table level */ | 577 | tmp &= ~((vmax >> 11) - 1); /* align to page table level */ |
576 | tmp = min(tmp, 1UL << MAX_PHYSMEM_BITS); | 578 | tmp = min(tmp, 1UL << MAX_PHYSMEM_BITS); |
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 289127d5241c..3d361f236308 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig | |||
@@ -84,12 +84,6 @@ config ARCH_DEFCONFIG | |||
84 | default "arch/sparc/configs/sparc32_defconfig" if SPARC32 | 84 | default "arch/sparc/configs/sparc32_defconfig" if SPARC32 |
85 | default "arch/sparc/configs/sparc64_defconfig" if SPARC64 | 85 | default "arch/sparc/configs/sparc64_defconfig" if SPARC64 |
86 | 86 | ||
87 | # CONFIG_BITS can be used at source level to get 32/64 bits | ||
88 | config BITS | ||
89 | int | ||
90 | default 32 if SPARC32 | ||
91 | default 64 if SPARC64 | ||
92 | |||
93 | config IOMMU_HELPER | 87 | config IOMMU_HELPER |
94 | bool | 88 | bool |
95 | default y if SPARC64 | 89 | default y if SPARC64 |
@@ -197,7 +191,7 @@ config RWSEM_XCHGADD_ALGORITHM | |||
197 | 191 | ||
198 | config GENERIC_HWEIGHT | 192 | config GENERIC_HWEIGHT |
199 | bool | 193 | bool |
200 | default y if !ULTRA_HAS_POPULATION_COUNT | 194 | default y |
201 | 195 | ||
202 | config GENERIC_CALIBRATE_DELAY | 196 | config GENERIC_CALIBRATE_DELAY |
203 | bool | 197 | bool |
diff --git a/arch/sparc/include/asm/spitfire.h b/arch/sparc/include/asm/spitfire.h index d06a26601753..6b67e50fb9b4 100644 --- a/arch/sparc/include/asm/spitfire.h +++ b/arch/sparc/include/asm/spitfire.h | |||
@@ -45,6 +45,7 @@ | |||
45 | #define SUN4V_CHIP_NIAGARA3 0x03 | 45 | #define SUN4V_CHIP_NIAGARA3 0x03 |
46 | #define SUN4V_CHIP_NIAGARA4 0x04 | 46 | #define SUN4V_CHIP_NIAGARA4 0x04 |
47 | #define SUN4V_CHIP_NIAGARA5 0x05 | 47 | #define SUN4V_CHIP_NIAGARA5 0x05 |
48 | #define SUN4V_CHIP_SPARC64X 0x8a | ||
48 | #define SUN4V_CHIP_UNKNOWN 0xff | 49 | #define SUN4V_CHIP_UNKNOWN 0xff |
49 | 50 | ||
50 | #ifndef __ASSEMBLY__ | 51 | #ifndef __ASSEMBLY__ |
diff --git a/arch/sparc/kernel/cpu.c b/arch/sparc/kernel/cpu.c index a6c94a2bf9d4..5c5125895db8 100644 --- a/arch/sparc/kernel/cpu.c +++ b/arch/sparc/kernel/cpu.c | |||
@@ -493,6 +493,12 @@ static void __init sun4v_cpu_probe(void) | |||
493 | sparc_pmu_type = "niagara5"; | 493 | sparc_pmu_type = "niagara5"; |
494 | break; | 494 | break; |
495 | 495 | ||
496 | case SUN4V_CHIP_SPARC64X: | ||
497 | sparc_cpu_type = "SPARC64-X"; | ||
498 | sparc_fpu_type = "SPARC64-X integrated FPU"; | ||
499 | sparc_pmu_type = "sparc64-x"; | ||
500 | break; | ||
501 | |||
496 | default: | 502 | default: |
497 | printk(KERN_WARNING "CPU: Unknown sun4v cpu type [%s]\n", | 503 | printk(KERN_WARNING "CPU: Unknown sun4v cpu type [%s]\n", |
498 | prom_cpu_compatible); | 504 | prom_cpu_compatible); |
diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S index 2feb15c35d9e..26b706a1867d 100644 --- a/arch/sparc/kernel/head_64.S +++ b/arch/sparc/kernel/head_64.S | |||
@@ -134,6 +134,8 @@ prom_niagara_prefix: | |||
134 | .asciz "SUNW,UltraSPARC-T" | 134 | .asciz "SUNW,UltraSPARC-T" |
135 | prom_sparc_prefix: | 135 | prom_sparc_prefix: |
136 | .asciz "SPARC-" | 136 | .asciz "SPARC-" |
137 | prom_sparc64x_prefix: | ||
138 | .asciz "SPARC64-X" | ||
137 | .align 4 | 139 | .align 4 |
138 | prom_root_compatible: | 140 | prom_root_compatible: |
139 | .skip 64 | 141 | .skip 64 |
@@ -412,7 +414,7 @@ sun4v_chip_type: | |||
412 | cmp %g2, 'T' | 414 | cmp %g2, 'T' |
413 | be,pt %xcc, 70f | 415 | be,pt %xcc, 70f |
414 | cmp %g2, 'M' | 416 | cmp %g2, 'M' |
415 | bne,pn %xcc, 4f | 417 | bne,pn %xcc, 49f |
416 | nop | 418 | nop |
417 | 419 | ||
418 | 70: ldub [%g1 + 7], %g2 | 420 | 70: ldub [%g1 + 7], %g2 |
@@ -425,7 +427,7 @@ sun4v_chip_type: | |||
425 | cmp %g2, '5' | 427 | cmp %g2, '5' |
426 | be,pt %xcc, 5f | 428 | be,pt %xcc, 5f |
427 | mov SUN4V_CHIP_NIAGARA5, %g4 | 429 | mov SUN4V_CHIP_NIAGARA5, %g4 |
428 | ba,pt %xcc, 4f | 430 | ba,pt %xcc, 49f |
429 | nop | 431 | nop |
430 | 432 | ||
431 | 91: sethi %hi(prom_cpu_compatible), %g1 | 433 | 91: sethi %hi(prom_cpu_compatible), %g1 |
@@ -439,6 +441,25 @@ sun4v_chip_type: | |||
439 | mov SUN4V_CHIP_NIAGARA2, %g4 | 441 | mov SUN4V_CHIP_NIAGARA2, %g4 |
440 | 442 | ||
441 | 4: | 443 | 4: |
444 | /* Athena */ | ||
445 | sethi %hi(prom_cpu_compatible), %g1 | ||
446 | or %g1, %lo(prom_cpu_compatible), %g1 | ||
447 | sethi %hi(prom_sparc64x_prefix), %g7 | ||
448 | or %g7, %lo(prom_sparc64x_prefix), %g7 | ||
449 | mov 9, %g3 | ||
450 | 41: ldub [%g7], %g2 | ||
451 | ldub [%g1], %g4 | ||
452 | cmp %g2, %g4 | ||
453 | bne,pn %icc, 49f | ||
454 | add %g7, 1, %g7 | ||
455 | subcc %g3, 1, %g3 | ||
456 | bne,pt %xcc, 41b | ||
457 | add %g1, 1, %g1 | ||
458 | mov SUN4V_CHIP_SPARC64X, %g4 | ||
459 | ba,pt %xcc, 5f | ||
460 | nop | ||
461 | |||
462 | 49: | ||
442 | mov SUN4V_CHIP_UNKNOWN, %g4 | 463 | mov SUN4V_CHIP_UNKNOWN, %g4 |
443 | 5: sethi %hi(sun4v_chip_type), %g2 | 464 | 5: sethi %hi(sun4v_chip_type), %g2 |
444 | or %g2, %lo(sun4v_chip_type), %g2 | 465 | or %g2, %lo(sun4v_chip_type), %g2 |
diff --git a/arch/sparc/kernel/leon_pci_grpci2.c b/arch/sparc/kernel/leon_pci_grpci2.c index fc4320886a3a..4d1487138d26 100644 --- a/arch/sparc/kernel/leon_pci_grpci2.c +++ b/arch/sparc/kernel/leon_pci_grpci2.c | |||
@@ -186,6 +186,8 @@ struct grpci2_cap_first { | |||
186 | #define CAP9_IOMAP_OFS 0x20 | 186 | #define CAP9_IOMAP_OFS 0x20 |
187 | #define CAP9_BARSIZE_OFS 0x24 | 187 | #define CAP9_BARSIZE_OFS 0x24 |
188 | 188 | ||
189 | #define TGT 256 | ||
190 | |||
189 | struct grpci2_priv { | 191 | struct grpci2_priv { |
190 | struct leon_pci_info info; /* must be on top of this structure */ | 192 | struct leon_pci_info info; /* must be on top of this structure */ |
191 | struct grpci2_regs *regs; | 193 | struct grpci2_regs *regs; |
@@ -237,8 +239,12 @@ static int grpci2_cfg_r32(struct grpci2_priv *priv, unsigned int bus, | |||
237 | if (where & 0x3) | 239 | if (where & 0x3) |
238 | return -EINVAL; | 240 | return -EINVAL; |
239 | 241 | ||
240 | if (bus == 0 && PCI_SLOT(devfn) != 0) | 242 | if (bus == 0) { |
241 | devfn += (0x8 * 6); | 243 | devfn += (0x8 * 6); /* start at AD16=Device0 */ |
244 | } else if (bus == TGT) { | ||
245 | bus = 0; | ||
246 | devfn = 0; /* special case: bridge controller itself */ | ||
247 | } | ||
242 | 248 | ||
243 | /* Select bus */ | 249 | /* Select bus */ |
244 | spin_lock_irqsave(&grpci2_dev_lock, flags); | 250 | spin_lock_irqsave(&grpci2_dev_lock, flags); |
@@ -303,8 +309,12 @@ static int grpci2_cfg_w32(struct grpci2_priv *priv, unsigned int bus, | |||
303 | if (where & 0x3) | 309 | if (where & 0x3) |
304 | return -EINVAL; | 310 | return -EINVAL; |
305 | 311 | ||
306 | if (bus == 0 && PCI_SLOT(devfn) != 0) | 312 | if (bus == 0) { |
307 | devfn += (0x8 * 6); | 313 | devfn += (0x8 * 6); /* start at AD16=Device0 */ |
314 | } else if (bus == TGT) { | ||
315 | bus = 0; | ||
316 | devfn = 0; /* special case: bridge controller itself */ | ||
317 | } | ||
308 | 318 | ||
309 | /* Select bus */ | 319 | /* Select bus */ |
310 | spin_lock_irqsave(&grpci2_dev_lock, flags); | 320 | spin_lock_irqsave(&grpci2_dev_lock, flags); |
@@ -368,7 +378,7 @@ static int grpci2_read_config(struct pci_bus *bus, unsigned int devfn, | |||
368 | unsigned int busno = bus->number; | 378 | unsigned int busno = bus->number; |
369 | int ret; | 379 | int ret; |
370 | 380 | ||
371 | if (PCI_SLOT(devfn) > 15 || (PCI_SLOT(devfn) == 0 && busno == 0)) { | 381 | if (PCI_SLOT(devfn) > 15 || busno > 255) { |
372 | *val = ~0; | 382 | *val = ~0; |
373 | return 0; | 383 | return 0; |
374 | } | 384 | } |
@@ -406,7 +416,7 @@ static int grpci2_write_config(struct pci_bus *bus, unsigned int devfn, | |||
406 | struct grpci2_priv *priv = grpci2priv; | 416 | struct grpci2_priv *priv = grpci2priv; |
407 | unsigned int busno = bus->number; | 417 | unsigned int busno = bus->number; |
408 | 418 | ||
409 | if (PCI_SLOT(devfn) > 15 || (PCI_SLOT(devfn) == 0 && busno == 0)) | 419 | if (PCI_SLOT(devfn) > 15 || busno > 255) |
410 | return 0; | 420 | return 0; |
411 | 421 | ||
412 | #ifdef GRPCI2_DEBUG_CFGACCESS | 422 | #ifdef GRPCI2_DEBUG_CFGACCESS |
@@ -578,15 +588,15 @@ void grpci2_hw_init(struct grpci2_priv *priv) | |||
578 | REGSTORE(regs->ahbmst_map[i], priv->pci_area); | 588 | REGSTORE(regs->ahbmst_map[i], priv->pci_area); |
579 | 589 | ||
580 | /* Get the GRPCI2 Host PCI ID */ | 590 | /* Get the GRPCI2 Host PCI ID */ |
581 | grpci2_cfg_r32(priv, 0, 0, PCI_VENDOR_ID, &priv->pciid); | 591 | grpci2_cfg_r32(priv, TGT, 0, PCI_VENDOR_ID, &priv->pciid); |
582 | 592 | ||
583 | /* Get address to first (always defined) capability structure */ | 593 | /* Get address to first (always defined) capability structure */ |
584 | grpci2_cfg_r8(priv, 0, 0, PCI_CAPABILITY_LIST, &capptr); | 594 | grpci2_cfg_r8(priv, TGT, 0, PCI_CAPABILITY_LIST, &capptr); |
585 | 595 | ||
586 | /* Enable/Disable Byte twisting */ | 596 | /* Enable/Disable Byte twisting */ |
587 | grpci2_cfg_r32(priv, 0, 0, capptr+CAP9_IOMAP_OFS, &io_map); | 597 | grpci2_cfg_r32(priv, TGT, 0, capptr+CAP9_IOMAP_OFS, &io_map); |
588 | io_map = (io_map & ~0x1) | (priv->bt_enabled ? 1 : 0); | 598 | io_map = (io_map & ~0x1) | (priv->bt_enabled ? 1 : 0); |
589 | grpci2_cfg_w32(priv, 0, 0, capptr+CAP9_IOMAP_OFS, io_map); | 599 | grpci2_cfg_w32(priv, TGT, 0, capptr+CAP9_IOMAP_OFS, io_map); |
590 | 600 | ||
591 | /* Setup the Host's PCI Target BARs for other peripherals to access, | 601 | /* Setup the Host's PCI Target BARs for other peripherals to access, |
592 | * and do DMA to the host's memory. The target BARs can be sized and | 602 | * and do DMA to the host's memory. The target BARs can be sized and |
@@ -617,17 +627,18 @@ void grpci2_hw_init(struct grpci2_priv *priv) | |||
617 | pciadr = 0; | 627 | pciadr = 0; |
618 | } | 628 | } |
619 | } | 629 | } |
620 | grpci2_cfg_w32(priv, 0, 0, capptr+CAP9_BARSIZE_OFS+i*4, bar_sz); | 630 | grpci2_cfg_w32(priv, TGT, 0, capptr+CAP9_BARSIZE_OFS+i*4, |
621 | grpci2_cfg_w32(priv, 0, 0, PCI_BASE_ADDRESS_0+i*4, pciadr); | 631 | bar_sz); |
622 | grpci2_cfg_w32(priv, 0, 0, capptr+CAP9_BAR_OFS+i*4, ahbadr); | 632 | grpci2_cfg_w32(priv, TGT, 0, PCI_BASE_ADDRESS_0+i*4, pciadr); |
633 | grpci2_cfg_w32(priv, TGT, 0, capptr+CAP9_BAR_OFS+i*4, ahbadr); | ||
623 | printk(KERN_INFO " TGT BAR[%d]: 0x%08x (PCI)-> 0x%08x\n", | 634 | printk(KERN_INFO " TGT BAR[%d]: 0x%08x (PCI)-> 0x%08x\n", |
624 | i, pciadr, ahbadr); | 635 | i, pciadr, ahbadr); |
625 | } | 636 | } |
626 | 637 | ||
627 | /* set as bus master and enable pci memory responses */ | 638 | /* set as bus master and enable pci memory responses */ |
628 | grpci2_cfg_r32(priv, 0, 0, PCI_COMMAND, &data); | 639 | grpci2_cfg_r32(priv, TGT, 0, PCI_COMMAND, &data); |
629 | data |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); | 640 | data |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); |
630 | grpci2_cfg_w32(priv, 0, 0, PCI_COMMAND, data); | 641 | grpci2_cfg_w32(priv, TGT, 0, PCI_COMMAND, data); |
631 | 642 | ||
632 | /* Enable Error respone (CPU-TRAP) on illegal memory access. */ | 643 | /* Enable Error respone (CPU-TRAP) on illegal memory access. */ |
633 | REGSTORE(regs->ctrl, CTRL_ER | CTRL_PE); | 644 | REGSTORE(regs->ctrl, CTRL_ER | CTRL_PE); |
diff --git a/arch/tile/configs/tilegx_defconfig b/arch/tile/configs/tilegx_defconfig index 8c5eff6d6df5..47684815e5c8 100644 --- a/arch/tile/configs/tilegx_defconfig +++ b/arch/tile/configs/tilegx_defconfig | |||
@@ -330,7 +330,6 @@ CONFIG_MD_RAID0=m | |||
330 | CONFIG_MD_RAID1=m | 330 | CONFIG_MD_RAID1=m |
331 | CONFIG_MD_RAID10=m | 331 | CONFIG_MD_RAID10=m |
332 | CONFIG_MD_RAID456=m | 332 | CONFIG_MD_RAID456=m |
333 | CONFIG_MULTICORE_RAID456=y | ||
334 | CONFIG_MD_FAULTY=m | 333 | CONFIG_MD_FAULTY=m |
335 | CONFIG_BLK_DEV_DM=m | 334 | CONFIG_BLK_DEV_DM=m |
336 | CONFIG_DM_DEBUG=y | 335 | CONFIG_DM_DEBUG=y |
diff --git a/arch/tile/configs/tilepro_defconfig b/arch/tile/configs/tilepro_defconfig index e7a3dfcbcda7..dd2b8f0c631f 100644 --- a/arch/tile/configs/tilepro_defconfig +++ b/arch/tile/configs/tilepro_defconfig | |||
@@ -324,7 +324,6 @@ CONFIG_MD_RAID0=m | |||
324 | CONFIG_MD_RAID1=m | 324 | CONFIG_MD_RAID1=m |
325 | CONFIG_MD_RAID10=m | 325 | CONFIG_MD_RAID10=m |
326 | CONFIG_MD_RAID456=m | 326 | CONFIG_MD_RAID456=m |
327 | CONFIG_MULTICORE_RAID456=y | ||
328 | CONFIG_MD_FAULTY=m | 327 | CONFIG_MD_FAULTY=m |
329 | CONFIG_BLK_DEV_DM=m | 328 | CONFIG_BLK_DEV_DM=m |
330 | CONFIG_DM_DEBUG=y | 329 | CONFIG_DM_DEBUG=y |
diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h index d3ddd17405d0..5a6d2873f80e 100644 --- a/arch/x86/include/asm/kprobes.h +++ b/arch/x86/include/asm/kprobes.h | |||
@@ -77,6 +77,7 @@ struct arch_specific_insn { | |||
77 | * a post_handler or break_handler). | 77 | * a post_handler or break_handler). |
78 | */ | 78 | */ |
79 | int boostable; | 79 | int boostable; |
80 | bool if_modifier; | ||
80 | }; | 81 | }; |
81 | 82 | ||
82 | struct arch_optimized_insn { | 83 | struct arch_optimized_insn { |
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 635a74d22409..4979778cc7fb 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
@@ -414,8 +414,8 @@ struct kvm_vcpu_arch { | |||
414 | gpa_t time; | 414 | gpa_t time; |
415 | struct pvclock_vcpu_time_info hv_clock; | 415 | struct pvclock_vcpu_time_info hv_clock; |
416 | unsigned int hw_tsc_khz; | 416 | unsigned int hw_tsc_khz; |
417 | unsigned int time_offset; | 417 | struct gfn_to_hva_cache pv_time; |
418 | struct page *time_page; | 418 | bool pv_time_enabled; |
419 | /* set guest stopped flag in pvclock flags field */ | 419 | /* set guest stopped flag in pvclock flags field */ |
420 | bool pvclock_set_guest_stopped_request; | 420 | bool pvclock_set_guest_stopped_request; |
421 | 421 | ||
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 529c8931fc02..dab7580c47ae 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -101,6 +101,10 @@ static struct event_constraint intel_snb_event_constraints[] __read_mostly = | |||
101 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ | 101 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ |
102 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ | 102 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ |
103 | FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ | 103 | FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ |
104 | INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */ | ||
105 | INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */ | ||
106 | INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */ | ||
107 | INTEL_UEVENT_CONSTRAINT(0x06a3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */ | ||
104 | INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */ | 108 | INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */ |
105 | INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */ | 109 | INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */ |
106 | INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ | 110 | INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ |
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 3f06e6149981..7bfe318d3d8a 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c | |||
@@ -375,6 +375,9 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p) | |||
375 | else | 375 | else |
376 | p->ainsn.boostable = -1; | 376 | p->ainsn.boostable = -1; |
377 | 377 | ||
378 | /* Check whether the instruction modifies Interrupt Flag or not */ | ||
379 | p->ainsn.if_modifier = is_IF_modifier(p->ainsn.insn); | ||
380 | |||
378 | /* Also, displacement change doesn't affect the first byte */ | 381 | /* Also, displacement change doesn't affect the first byte */ |
379 | p->opcode = p->ainsn.insn[0]; | 382 | p->opcode = p->ainsn.insn[0]; |
380 | } | 383 | } |
@@ -434,7 +437,7 @@ static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, | |||
434 | __this_cpu_write(current_kprobe, p); | 437 | __this_cpu_write(current_kprobe, p); |
435 | kcb->kprobe_saved_flags = kcb->kprobe_old_flags | 438 | kcb->kprobe_saved_flags = kcb->kprobe_old_flags |
436 | = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF)); | 439 | = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF)); |
437 | if (is_IF_modifier(p->ainsn.insn)) | 440 | if (p->ainsn.if_modifier) |
438 | kcb->kprobe_saved_flags &= ~X86_EFLAGS_IF; | 441 | kcb->kprobe_saved_flags &= ~X86_EFLAGS_IF; |
439 | } | 442 | } |
440 | 443 | ||
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index f71500af1f81..f19ac0aca60d 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -1406,25 +1406,15 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) | |||
1406 | unsigned long flags, this_tsc_khz; | 1406 | unsigned long flags, this_tsc_khz; |
1407 | struct kvm_vcpu_arch *vcpu = &v->arch; | 1407 | struct kvm_vcpu_arch *vcpu = &v->arch; |
1408 | struct kvm_arch *ka = &v->kvm->arch; | 1408 | struct kvm_arch *ka = &v->kvm->arch; |
1409 | void *shared_kaddr; | ||
1410 | s64 kernel_ns, max_kernel_ns; | 1409 | s64 kernel_ns, max_kernel_ns; |
1411 | u64 tsc_timestamp, host_tsc; | 1410 | u64 tsc_timestamp, host_tsc; |
1412 | struct pvclock_vcpu_time_info *guest_hv_clock; | 1411 | struct pvclock_vcpu_time_info guest_hv_clock; |
1413 | u8 pvclock_flags; | 1412 | u8 pvclock_flags; |
1414 | bool use_master_clock; | 1413 | bool use_master_clock; |
1415 | 1414 | ||
1416 | kernel_ns = 0; | 1415 | kernel_ns = 0; |
1417 | host_tsc = 0; | 1416 | host_tsc = 0; |
1418 | 1417 | ||
1419 | /* Keep irq disabled to prevent changes to the clock */ | ||
1420 | local_irq_save(flags); | ||
1421 | this_tsc_khz = __get_cpu_var(cpu_tsc_khz); | ||
1422 | if (unlikely(this_tsc_khz == 0)) { | ||
1423 | local_irq_restore(flags); | ||
1424 | kvm_make_request(KVM_REQ_CLOCK_UPDATE, v); | ||
1425 | return 1; | ||
1426 | } | ||
1427 | |||
1428 | /* | 1418 | /* |
1429 | * If the host uses TSC clock, then passthrough TSC as stable | 1419 | * If the host uses TSC clock, then passthrough TSC as stable |
1430 | * to the guest. | 1420 | * to the guest. |
@@ -1436,6 +1426,15 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) | |||
1436 | kernel_ns = ka->master_kernel_ns; | 1426 | kernel_ns = ka->master_kernel_ns; |
1437 | } | 1427 | } |
1438 | spin_unlock(&ka->pvclock_gtod_sync_lock); | 1428 | spin_unlock(&ka->pvclock_gtod_sync_lock); |
1429 | |||
1430 | /* Keep irq disabled to prevent changes to the clock */ | ||
1431 | local_irq_save(flags); | ||
1432 | this_tsc_khz = __get_cpu_var(cpu_tsc_khz); | ||
1433 | if (unlikely(this_tsc_khz == 0)) { | ||
1434 | local_irq_restore(flags); | ||
1435 | kvm_make_request(KVM_REQ_CLOCK_UPDATE, v); | ||
1436 | return 1; | ||
1437 | } | ||
1439 | if (!use_master_clock) { | 1438 | if (!use_master_clock) { |
1440 | host_tsc = native_read_tsc(); | 1439 | host_tsc = native_read_tsc(); |
1441 | kernel_ns = get_kernel_ns(); | 1440 | kernel_ns = get_kernel_ns(); |
@@ -1463,7 +1462,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) | |||
1463 | 1462 | ||
1464 | local_irq_restore(flags); | 1463 | local_irq_restore(flags); |
1465 | 1464 | ||
1466 | if (!vcpu->time_page) | 1465 | if (!vcpu->pv_time_enabled) |
1467 | return 0; | 1466 | return 0; |
1468 | 1467 | ||
1469 | /* | 1468 | /* |
@@ -1525,12 +1524,12 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) | |||
1525 | */ | 1524 | */ |
1526 | vcpu->hv_clock.version += 2; | 1525 | vcpu->hv_clock.version += 2; |
1527 | 1526 | ||
1528 | shared_kaddr = kmap_atomic(vcpu->time_page); | 1527 | if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time, |
1529 | 1528 | &guest_hv_clock, sizeof(guest_hv_clock)))) | |
1530 | guest_hv_clock = shared_kaddr + vcpu->time_offset; | 1529 | return 0; |
1531 | 1530 | ||
1532 | /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */ | 1531 | /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */ |
1533 | pvclock_flags = (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED); | 1532 | pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED); |
1534 | 1533 | ||
1535 | if (vcpu->pvclock_set_guest_stopped_request) { | 1534 | if (vcpu->pvclock_set_guest_stopped_request) { |
1536 | pvclock_flags |= PVCLOCK_GUEST_STOPPED; | 1535 | pvclock_flags |= PVCLOCK_GUEST_STOPPED; |
@@ -1543,12 +1542,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) | |||
1543 | 1542 | ||
1544 | vcpu->hv_clock.flags = pvclock_flags; | 1543 | vcpu->hv_clock.flags = pvclock_flags; |
1545 | 1544 | ||
1546 | memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock, | 1545 | kvm_write_guest_cached(v->kvm, &vcpu->pv_time, |
1547 | sizeof(vcpu->hv_clock)); | 1546 | &vcpu->hv_clock, |
1548 | 1547 | sizeof(vcpu->hv_clock)); | |
1549 | kunmap_atomic(shared_kaddr); | ||
1550 | |||
1551 | mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT); | ||
1552 | return 0; | 1548 | return 0; |
1553 | } | 1549 | } |
1554 | 1550 | ||
@@ -1837,10 +1833,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data) | |||
1837 | 1833 | ||
1838 | static void kvmclock_reset(struct kvm_vcpu *vcpu) | 1834 | static void kvmclock_reset(struct kvm_vcpu *vcpu) |
1839 | { | 1835 | { |
1840 | if (vcpu->arch.time_page) { | 1836 | vcpu->arch.pv_time_enabled = false; |
1841 | kvm_release_page_dirty(vcpu->arch.time_page); | ||
1842 | vcpu->arch.time_page = NULL; | ||
1843 | } | ||
1844 | } | 1837 | } |
1845 | 1838 | ||
1846 | static void accumulate_steal_time(struct kvm_vcpu *vcpu) | 1839 | static void accumulate_steal_time(struct kvm_vcpu *vcpu) |
@@ -1947,6 +1940,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
1947 | break; | 1940 | break; |
1948 | case MSR_KVM_SYSTEM_TIME_NEW: | 1941 | case MSR_KVM_SYSTEM_TIME_NEW: |
1949 | case MSR_KVM_SYSTEM_TIME: { | 1942 | case MSR_KVM_SYSTEM_TIME: { |
1943 | u64 gpa_offset; | ||
1950 | kvmclock_reset(vcpu); | 1944 | kvmclock_reset(vcpu); |
1951 | 1945 | ||
1952 | vcpu->arch.time = data; | 1946 | vcpu->arch.time = data; |
@@ -1956,14 +1950,17 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
1956 | if (!(data & 1)) | 1950 | if (!(data & 1)) |
1957 | break; | 1951 | break; |
1958 | 1952 | ||
1959 | /* ...but clean it before doing the actual write */ | 1953 | gpa_offset = data & ~(PAGE_MASK | 1); |
1960 | vcpu->arch.time_offset = data & ~(PAGE_MASK | 1); | ||
1961 | 1954 | ||
1962 | vcpu->arch.time_page = | 1955 | /* Check that the address is 32-byte aligned. */ |
1963 | gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT); | 1956 | if (gpa_offset & (sizeof(struct pvclock_vcpu_time_info) - 1)) |
1957 | break; | ||
1964 | 1958 | ||
1965 | if (is_error_page(vcpu->arch.time_page)) | 1959 | if (kvm_gfn_to_hva_cache_init(vcpu->kvm, |
1966 | vcpu->arch.time_page = NULL; | 1960 | &vcpu->arch.pv_time, data & ~1ULL)) |
1961 | vcpu->arch.pv_time_enabled = false; | ||
1962 | else | ||
1963 | vcpu->arch.pv_time_enabled = true; | ||
1967 | 1964 | ||
1968 | break; | 1965 | break; |
1969 | } | 1966 | } |
@@ -2967,7 +2964,7 @@ static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu, | |||
2967 | */ | 2964 | */ |
2968 | static int kvm_set_guest_paused(struct kvm_vcpu *vcpu) | 2965 | static int kvm_set_guest_paused(struct kvm_vcpu *vcpu) |
2969 | { | 2966 | { |
2970 | if (!vcpu->arch.time_page) | 2967 | if (!vcpu->arch.pv_time_enabled) |
2971 | return -EINVAL; | 2968 | return -EINVAL; |
2972 | vcpu->arch.pvclock_set_guest_stopped_request = true; | 2969 | vcpu->arch.pvclock_set_guest_stopped_request = true; |
2973 | kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); | 2970 | kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); |
@@ -6718,6 +6715,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | |||
6718 | goto fail_free_wbinvd_dirty_mask; | 6715 | goto fail_free_wbinvd_dirty_mask; |
6719 | 6716 | ||
6720 | vcpu->arch.ia32_tsc_adjust_msr = 0x0; | 6717 | vcpu->arch.ia32_tsc_adjust_msr = 0x0; |
6718 | vcpu->arch.pv_time_enabled = false; | ||
6721 | kvm_async_pf_hash_reset(vcpu); | 6719 | kvm_async_pf_hash_reset(vcpu); |
6722 | kvm_pmu_init(vcpu); | 6720 | kvm_pmu_init(vcpu); |
6723 | 6721 | ||
diff --git a/drivers/amba/tegra-ahb.c b/drivers/amba/tegra-ahb.c index 093c43554963..1f44e56cc65d 100644 --- a/drivers/amba/tegra-ahb.c +++ b/drivers/amba/tegra-ahb.c | |||
@@ -158,7 +158,7 @@ int tegra_ahb_enable_smmu(struct device_node *dn) | |||
158 | EXPORT_SYMBOL(tegra_ahb_enable_smmu); | 158 | EXPORT_SYMBOL(tegra_ahb_enable_smmu); |
159 | #endif | 159 | #endif |
160 | 160 | ||
161 | #ifdef CONFIG_PM_SLEEP | 161 | #ifdef CONFIG_PM |
162 | static int tegra_ahb_suspend(struct device *dev) | 162 | static int tegra_ahb_suspend(struct device *dev) |
163 | { | 163 | { |
164 | int i; | 164 | int i; |
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index 3e751b74615e..a5a3ebcbdd2c 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig | |||
@@ -59,15 +59,16 @@ config ATA_ACPI | |||
59 | option libata.noacpi=1 | 59 | option libata.noacpi=1 |
60 | 60 | ||
61 | config SATA_ZPODD | 61 | config SATA_ZPODD |
62 | bool "SATA Zero Power ODD Support" | 62 | bool "SATA Zero Power Optical Disc Drive (ZPODD) support" |
63 | depends on ATA_ACPI | 63 | depends on ATA_ACPI |
64 | default n | 64 | default n |
65 | help | 65 | help |
66 | This option adds support for SATA ZPODD. It requires both | 66 | This option adds support for SATA Zero Power Optical Disc |
67 | ODD and the platform support, and if enabled, will automatically | 67 | Drive (ZPODD). It requires both the ODD and the platform |
68 | power on/off the ODD when certain condition is satisfied. This | 68 | support, and if enabled, will automatically power on/off the |
69 | does not impact user's experience of the ODD, only power is saved | 69 | ODD when certain condition is satisfied. This does not impact |
70 | when ODD is not in use(i.e. no disc inside). | 70 | end user's experience of the ODD, only power is saved when |
71 | the ODD is not in use (i.e. no disc inside). | ||
71 | 72 | ||
72 | If unsure, say N. | 73 | If unsure, say N. |
73 | 74 | ||
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index a99112cfd8b1..6a67b07de494 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c | |||
@@ -281,6 +281,8 @@ static const struct pci_device_id ahci_pci_tbl[] = { | |||
281 | { PCI_VDEVICE(INTEL, 0x1f37), board_ahci }, /* Avoton RAID */ | 281 | { PCI_VDEVICE(INTEL, 0x1f37), board_ahci }, /* Avoton RAID */ |
282 | { PCI_VDEVICE(INTEL, 0x1f3e), board_ahci }, /* Avoton RAID */ | 282 | { PCI_VDEVICE(INTEL, 0x1f3e), board_ahci }, /* Avoton RAID */ |
283 | { PCI_VDEVICE(INTEL, 0x1f3f), board_ahci }, /* Avoton RAID */ | 283 | { PCI_VDEVICE(INTEL, 0x1f3f), board_ahci }, /* Avoton RAID */ |
284 | { PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Wellsburg RAID */ | ||
285 | { PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Wellsburg RAID */ | ||
284 | { PCI_VDEVICE(INTEL, 0x8d02), board_ahci }, /* Wellsburg AHCI */ | 286 | { PCI_VDEVICE(INTEL, 0x8d02), board_ahci }, /* Wellsburg AHCI */ |
285 | { PCI_VDEVICE(INTEL, 0x8d04), board_ahci }, /* Wellsburg RAID */ | 287 | { PCI_VDEVICE(INTEL, 0x8d04), board_ahci }, /* Wellsburg RAID */ |
286 | { PCI_VDEVICE(INTEL, 0x8d06), board_ahci }, /* Wellsburg RAID */ | 288 | { PCI_VDEVICE(INTEL, 0x8d06), board_ahci }, /* Wellsburg RAID */ |
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c index d2ba439cfe54..ffdd32d22602 100644 --- a/drivers/ata/ata_piix.c +++ b/drivers/ata/ata_piix.c | |||
@@ -1547,6 +1547,10 @@ static bool piix_broken_system_poweroff(struct pci_dev *pdev) | |||
1547 | 1547 | ||
1548 | static int prefer_ms_hyperv = 1; | 1548 | static int prefer_ms_hyperv = 1; |
1549 | module_param(prefer_ms_hyperv, int, 0); | 1549 | module_param(prefer_ms_hyperv, int, 0); |
1550 | MODULE_PARM_DESC(prefer_ms_hyperv, | ||
1551 | "Prefer Hyper-V paravirtualization drivers instead of ATA, " | ||
1552 | "0 - Use ATA drivers, " | ||
1553 | "1 (Default) - Use the paravirtualization drivers."); | ||
1550 | 1554 | ||
1551 | static void piix_ignore_devices_quirk(struct ata_host *host) | 1555 | static void piix_ignore_devices_quirk(struct ata_host *host) |
1552 | { | 1556 | { |
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c index beea3115577e..8a52dab412e2 100644 --- a/drivers/ata/libata-acpi.c +++ b/drivers/ata/libata-acpi.c | |||
@@ -1027,7 +1027,7 @@ static void ata_acpi_register_power_resource(struct ata_device *dev) | |||
1027 | 1027 | ||
1028 | handle = ata_dev_acpi_handle(dev); | 1028 | handle = ata_dev_acpi_handle(dev); |
1029 | if (handle) | 1029 | if (handle) |
1030 | acpi_dev_pm_remove_dependent(handle, &sdev->sdev_gendev); | 1030 | acpi_dev_pm_add_dependent(handle, &sdev->sdev_gendev); |
1031 | } | 1031 | } |
1032 | 1032 | ||
1033 | static void ata_acpi_unregister_power_resource(struct ata_device *dev) | 1033 | static void ata_acpi_unregister_power_resource(struct ata_device *dev) |
diff --git a/drivers/ata/pata_samsung_cf.c b/drivers/ata/pata_samsung_cf.c index 70b0e01372b3..6ef27e98c508 100644 --- a/drivers/ata/pata_samsung_cf.c +++ b/drivers/ata/pata_samsung_cf.c | |||
@@ -661,18 +661,7 @@ static struct platform_driver pata_s3c_driver = { | |||
661 | }, | 661 | }, |
662 | }; | 662 | }; |
663 | 663 | ||
664 | static int __init pata_s3c_init(void) | 664 | module_platform_driver_probe(pata_s3c_driver, pata_s3c_probe); |
665 | { | ||
666 | return platform_driver_probe(&pata_s3c_driver, pata_s3c_probe); | ||
667 | } | ||
668 | |||
669 | static void __exit pata_s3c_exit(void) | ||
670 | { | ||
671 | platform_driver_unregister(&pata_s3c_driver); | ||
672 | } | ||
673 | |||
674 | module_init(pata_s3c_init); | ||
675 | module_exit(pata_s3c_exit); | ||
676 | 665 | ||
677 | MODULE_AUTHOR("Abhilash Kesavan, <a.kesavan@samsung.com>"); | 666 | MODULE_AUTHOR("Abhilash Kesavan, <a.kesavan@samsung.com>"); |
678 | MODULE_DESCRIPTION("low-level driver for Samsung PATA controller"); | 667 | MODULE_DESCRIPTION("low-level driver for Samsung PATA controller"); |
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c index 124b2c1d9c0b..608f82fed632 100644 --- a/drivers/ata/sata_fsl.c +++ b/drivers/ata/sata_fsl.c | |||
@@ -1511,8 +1511,7 @@ error_exit_with_cleanup: | |||
1511 | 1511 | ||
1512 | if (hcr_base) | 1512 | if (hcr_base) |
1513 | iounmap(hcr_base); | 1513 | iounmap(hcr_base); |
1514 | if (host_priv) | 1514 | kfree(host_priv); |
1515 | kfree(host_priv); | ||
1516 | 1515 | ||
1517 | return retval; | 1516 | return retval; |
1518 | } | 1517 | } |
diff --git a/drivers/block/nvme.c b/drivers/block/nvme.c index 07fb2dfaae13..9dcefe40380b 100644 --- a/drivers/block/nvme.c +++ b/drivers/block/nvme.c | |||
@@ -135,6 +135,7 @@ static inline void _nvme_check_size(void) | |||
135 | BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096); | 135 | BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096); |
136 | BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096); | 136 | BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096); |
137 | BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64); | 137 | BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64); |
138 | BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512); | ||
138 | } | 139 | } |
139 | 140 | ||
140 | typedef void (*nvme_completion_fn)(struct nvme_dev *, void *, | 141 | typedef void (*nvme_completion_fn)(struct nvme_dev *, void *, |
@@ -237,7 +238,8 @@ static void *free_cmdid(struct nvme_queue *nvmeq, int cmdid, | |||
237 | *fn = special_completion; | 238 | *fn = special_completion; |
238 | return CMD_CTX_INVALID; | 239 | return CMD_CTX_INVALID; |
239 | } | 240 | } |
240 | *fn = info[cmdid].fn; | 241 | if (fn) |
242 | *fn = info[cmdid].fn; | ||
241 | ctx = info[cmdid].ctx; | 243 | ctx = info[cmdid].ctx; |
242 | info[cmdid].fn = special_completion; | 244 | info[cmdid].fn = special_completion; |
243 | info[cmdid].ctx = CMD_CTX_COMPLETED; | 245 | info[cmdid].ctx = CMD_CTX_COMPLETED; |
@@ -335,6 +337,7 @@ nvme_alloc_iod(unsigned nseg, unsigned nbytes, gfp_t gfp) | |||
335 | iod->offset = offsetof(struct nvme_iod, sg[nseg]); | 337 | iod->offset = offsetof(struct nvme_iod, sg[nseg]); |
336 | iod->npages = -1; | 338 | iod->npages = -1; |
337 | iod->length = nbytes; | 339 | iod->length = nbytes; |
340 | iod->nents = 0; | ||
338 | } | 341 | } |
339 | 342 | ||
340 | return iod; | 343 | return iod; |
@@ -375,7 +378,8 @@ static void bio_completion(struct nvme_dev *dev, void *ctx, | |||
375 | struct bio *bio = iod->private; | 378 | struct bio *bio = iod->private; |
376 | u16 status = le16_to_cpup(&cqe->status) >> 1; | 379 | u16 status = le16_to_cpup(&cqe->status) >> 1; |
377 | 380 | ||
378 | dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents, | 381 | if (iod->nents) |
382 | dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents, | ||
379 | bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); | 383 | bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); |
380 | nvme_free_iod(dev, iod); | 384 | nvme_free_iod(dev, iod); |
381 | if (status) { | 385 | if (status) { |
@@ -589,7 +593,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns, | |||
589 | 593 | ||
590 | result = nvme_map_bio(nvmeq->q_dmadev, iod, bio, dma_dir, psegs); | 594 | result = nvme_map_bio(nvmeq->q_dmadev, iod, bio, dma_dir, psegs); |
591 | if (result < 0) | 595 | if (result < 0) |
592 | goto free_iod; | 596 | goto free_cmdid; |
593 | length = result; | 597 | length = result; |
594 | 598 | ||
595 | cmnd->rw.command_id = cmdid; | 599 | cmnd->rw.command_id = cmdid; |
@@ -609,6 +613,8 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns, | |||
609 | 613 | ||
610 | return 0; | 614 | return 0; |
611 | 615 | ||
616 | free_cmdid: | ||
617 | free_cmdid(nvmeq, cmdid, NULL); | ||
612 | free_iod: | 618 | free_iod: |
613 | nvme_free_iod(nvmeq->dev, iod); | 619 | nvme_free_iod(nvmeq->dev, iod); |
614 | nomem: | 620 | nomem: |
@@ -835,8 +841,8 @@ static int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns, | |||
835 | return nvme_submit_admin_cmd(dev, &c, NULL); | 841 | return nvme_submit_admin_cmd(dev, &c, NULL); |
836 | } | 842 | } |
837 | 843 | ||
838 | static int nvme_get_features(struct nvme_dev *dev, unsigned fid, | 844 | static int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid, |
839 | unsigned nsid, dma_addr_t dma_addr) | 845 | dma_addr_t dma_addr, u32 *result) |
840 | { | 846 | { |
841 | struct nvme_command c; | 847 | struct nvme_command c; |
842 | 848 | ||
@@ -846,7 +852,7 @@ static int nvme_get_features(struct nvme_dev *dev, unsigned fid, | |||
846 | c.features.prp1 = cpu_to_le64(dma_addr); | 852 | c.features.prp1 = cpu_to_le64(dma_addr); |
847 | c.features.fid = cpu_to_le32(fid); | 853 | c.features.fid = cpu_to_le32(fid); |
848 | 854 | ||
849 | return nvme_submit_admin_cmd(dev, &c, NULL); | 855 | return nvme_submit_admin_cmd(dev, &c, result); |
850 | } | 856 | } |
851 | 857 | ||
852 | static int nvme_set_features(struct nvme_dev *dev, unsigned fid, | 858 | static int nvme_set_features(struct nvme_dev *dev, unsigned fid, |
@@ -906,6 +912,10 @@ static void nvme_free_queue(struct nvme_dev *dev, int qid) | |||
906 | 912 | ||
907 | spin_lock_irq(&nvmeq->q_lock); | 913 | spin_lock_irq(&nvmeq->q_lock); |
908 | nvme_cancel_ios(nvmeq, false); | 914 | nvme_cancel_ios(nvmeq, false); |
915 | while (bio_list_peek(&nvmeq->sq_cong)) { | ||
916 | struct bio *bio = bio_list_pop(&nvmeq->sq_cong); | ||
917 | bio_endio(bio, -EIO); | ||
918 | } | ||
909 | spin_unlock_irq(&nvmeq->q_lock); | 919 | spin_unlock_irq(&nvmeq->q_lock); |
910 | 920 | ||
911 | irq_set_affinity_hint(vector, NULL); | 921 | irq_set_affinity_hint(vector, NULL); |
@@ -1230,12 +1240,17 @@ static int nvme_user_admin_cmd(struct nvme_dev *dev, | |||
1230 | if (length != cmd.data_len) | 1240 | if (length != cmd.data_len) |
1231 | status = -ENOMEM; | 1241 | status = -ENOMEM; |
1232 | else | 1242 | else |
1233 | status = nvme_submit_admin_cmd(dev, &c, NULL); | 1243 | status = nvme_submit_admin_cmd(dev, &c, &cmd.result); |
1234 | 1244 | ||
1235 | if (cmd.data_len) { | 1245 | if (cmd.data_len) { |
1236 | nvme_unmap_user_pages(dev, cmd.opcode & 1, iod); | 1246 | nvme_unmap_user_pages(dev, cmd.opcode & 1, iod); |
1237 | nvme_free_iod(dev, iod); | 1247 | nvme_free_iod(dev, iod); |
1238 | } | 1248 | } |
1249 | |||
1250 | if (!status && copy_to_user(&ucmd->result, &cmd.result, | ||
1251 | sizeof(cmd.result))) | ||
1252 | status = -EFAULT; | ||
1253 | |||
1239 | return status; | 1254 | return status; |
1240 | } | 1255 | } |
1241 | 1256 | ||
@@ -1523,9 +1538,9 @@ static int nvme_dev_add(struct nvme_dev *dev) | |||
1523 | continue; | 1538 | continue; |
1524 | 1539 | ||
1525 | res = nvme_get_features(dev, NVME_FEAT_LBA_RANGE, i, | 1540 | res = nvme_get_features(dev, NVME_FEAT_LBA_RANGE, i, |
1526 | dma_addr + 4096); | 1541 | dma_addr + 4096, NULL); |
1527 | if (res) | 1542 | if (res) |
1528 | continue; | 1543 | memset(mem + 4096, 0, 4096); |
1529 | 1544 | ||
1530 | ns = nvme_alloc_ns(dev, i, mem, mem + 4096); | 1545 | ns = nvme_alloc_ns(dev, i, mem, mem + 4096); |
1531 | if (ns) | 1546 | if (ns) |
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c index a8a41e07a221..b282af181b44 100644 --- a/drivers/bluetooth/ath3k.c +++ b/drivers/bluetooth/ath3k.c | |||
@@ -74,8 +74,10 @@ static struct usb_device_id ath3k_table[] = { | |||
74 | 74 | ||
75 | /* Atheros AR3012 with sflash firmware*/ | 75 | /* Atheros AR3012 with sflash firmware*/ |
76 | { USB_DEVICE(0x0CF3, 0x3004) }, | 76 | { USB_DEVICE(0x0CF3, 0x3004) }, |
77 | { USB_DEVICE(0x0CF3, 0x3008) }, | ||
77 | { USB_DEVICE(0x0CF3, 0x311D) }, | 78 | { USB_DEVICE(0x0CF3, 0x311D) }, |
78 | { USB_DEVICE(0x13d3, 0x3375) }, | 79 | { USB_DEVICE(0x13d3, 0x3375) }, |
80 | { USB_DEVICE(0x04CA, 0x3004) }, | ||
79 | { USB_DEVICE(0x04CA, 0x3005) }, | 81 | { USB_DEVICE(0x04CA, 0x3005) }, |
80 | { USB_DEVICE(0x04CA, 0x3006) }, | 82 | { USB_DEVICE(0x04CA, 0x3006) }, |
81 | { USB_DEVICE(0x04CA, 0x3008) }, | 83 | { USB_DEVICE(0x04CA, 0x3008) }, |
@@ -106,8 +108,10 @@ static struct usb_device_id ath3k_blist_tbl[] = { | |||
106 | 108 | ||
107 | /* Atheros AR3012 with sflash firmware*/ | 109 | /* Atheros AR3012 with sflash firmware*/ |
108 | { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 }, | 110 | { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 }, |
111 | { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 }, | ||
109 | { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 }, | 112 | { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 }, |
110 | { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 }, | 113 | { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 }, |
114 | { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 }, | ||
111 | { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 }, | 115 | { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 }, |
112 | { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 }, | 116 | { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 }, |
113 | { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 }, | 117 | { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 }, |
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 7e351e345476..e547851870e7 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c | |||
@@ -132,8 +132,10 @@ static struct usb_device_id blacklist_table[] = { | |||
132 | 132 | ||
133 | /* Atheros 3012 with sflash firmware */ | 133 | /* Atheros 3012 with sflash firmware */ |
134 | { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 }, | 134 | { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 }, |
135 | { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 }, | ||
135 | { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 }, | 136 | { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 }, |
136 | { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 }, | 137 | { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 }, |
138 | { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 }, | ||
137 | { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 }, | 139 | { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 }, |
138 | { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 }, | 140 | { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 }, |
139 | { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 }, | 141 | { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 }, |
diff --git a/drivers/clk/clk-vt8500.c b/drivers/clk/clk-vt8500.c index b5538bba7a10..09c63315e579 100644 --- a/drivers/clk/clk-vt8500.c +++ b/drivers/clk/clk-vt8500.c | |||
@@ -157,7 +157,7 @@ static int vt8500_dclk_set_rate(struct clk_hw *hw, unsigned long rate, | |||
157 | divisor = parent_rate / rate; | 157 | divisor = parent_rate / rate; |
158 | 158 | ||
159 | /* If prate / rate would be decimal, incr the divisor */ | 159 | /* If prate / rate would be decimal, incr the divisor */ |
160 | if (rate * divisor < *prate) | 160 | if (rate * divisor < parent_rate) |
161 | divisor++; | 161 | divisor++; |
162 | 162 | ||
163 | if (divisor == cdev->div_mask + 1) | 163 | if (divisor == cdev->div_mask + 1) |
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 910b0116c128..e1d13c463c90 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c | |||
@@ -2048,12 +2048,18 @@ static int init_csrows(struct mem_ctl_info *mci) | |||
2048 | edac_dbg(1, "MC node: %d, csrow: %d\n", | 2048 | edac_dbg(1, "MC node: %d, csrow: %d\n", |
2049 | pvt->mc_node_id, i); | 2049 | pvt->mc_node_id, i); |
2050 | 2050 | ||
2051 | if (row_dct0) | 2051 | if (row_dct0) { |
2052 | nr_pages = amd64_csrow_nr_pages(pvt, 0, i); | 2052 | nr_pages = amd64_csrow_nr_pages(pvt, 0, i); |
2053 | csrow->channels[0]->dimm->nr_pages = nr_pages; | ||
2054 | } | ||
2053 | 2055 | ||
2054 | /* K8 has only one DCT */ | 2056 | /* K8 has only one DCT */ |
2055 | if (boot_cpu_data.x86 != 0xf && row_dct1) | 2057 | if (boot_cpu_data.x86 != 0xf && row_dct1) { |
2056 | nr_pages += amd64_csrow_nr_pages(pvt, 1, i); | 2058 | int row_dct1_pages = amd64_csrow_nr_pages(pvt, 1, i); |
2059 | |||
2060 | csrow->channels[1]->dimm->nr_pages = row_dct1_pages; | ||
2061 | nr_pages += row_dct1_pages; | ||
2062 | } | ||
2057 | 2063 | ||
2058 | mtype = amd64_determine_memory_type(pvt, i); | 2064 | mtype = amd64_determine_memory_type(pvt, i); |
2059 | 2065 | ||
@@ -2072,9 +2078,7 @@ static int init_csrows(struct mem_ctl_info *mci) | |||
2072 | dimm = csrow->channels[j]->dimm; | 2078 | dimm = csrow->channels[j]->dimm; |
2073 | dimm->mtype = mtype; | 2079 | dimm->mtype = mtype; |
2074 | dimm->edac_mode = edac_mode; | 2080 | dimm->edac_mode = edac_mode; |
2075 | dimm->nr_pages = nr_pages; | ||
2076 | } | 2081 | } |
2077 | csrow->nr_pages = nr_pages; | ||
2078 | } | 2082 | } |
2079 | 2083 | ||
2080 | return empty; | 2084 | return empty; |
@@ -2419,7 +2423,6 @@ static int amd64_init_one_instance(struct pci_dev *F2) | |||
2419 | 2423 | ||
2420 | mci->pvt_info = pvt; | 2424 | mci->pvt_info = pvt; |
2421 | mci->pdev = &pvt->F2->dev; | 2425 | mci->pdev = &pvt->F2->dev; |
2422 | mci->csbased = 1; | ||
2423 | 2426 | ||
2424 | setup_mci_misc_attrs(mci, fam_type); | 2427 | setup_mci_misc_attrs(mci, fam_type); |
2425 | 2428 | ||
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c index cdb81aa73ab7..27e86d938262 100644 --- a/drivers/edac/edac_mc.c +++ b/drivers/edac/edac_mc.c | |||
@@ -86,7 +86,7 @@ static void edac_mc_dump_dimm(struct dimm_info *dimm, int number) | |||
86 | edac_dimm_info_location(dimm, location, sizeof(location)); | 86 | edac_dimm_info_location(dimm, location, sizeof(location)); |
87 | 87 | ||
88 | edac_dbg(4, "%s%i: %smapped as virtual row %d, chan %d\n", | 88 | edac_dbg(4, "%s%i: %smapped as virtual row %d, chan %d\n", |
89 | dimm->mci->mem_is_per_rank ? "rank" : "dimm", | 89 | dimm->mci->csbased ? "rank" : "dimm", |
90 | number, location, dimm->csrow, dimm->cschannel); | 90 | number, location, dimm->csrow, dimm->cschannel); |
91 | edac_dbg(4, " dimm = %p\n", dimm); | 91 | edac_dbg(4, " dimm = %p\n", dimm); |
92 | edac_dbg(4, " dimm->label = '%s'\n", dimm->label); | 92 | edac_dbg(4, " dimm->label = '%s'\n", dimm->label); |
@@ -341,7 +341,7 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num, | |||
341 | memcpy(mci->layers, layers, sizeof(*layer) * n_layers); | 341 | memcpy(mci->layers, layers, sizeof(*layer) * n_layers); |
342 | mci->nr_csrows = tot_csrows; | 342 | mci->nr_csrows = tot_csrows; |
343 | mci->num_cschannel = tot_channels; | 343 | mci->num_cschannel = tot_channels; |
344 | mci->mem_is_per_rank = per_rank; | 344 | mci->csbased = per_rank; |
345 | 345 | ||
346 | /* | 346 | /* |
347 | * Alocate and fill the csrow/channels structs | 347 | * Alocate and fill the csrow/channels structs |
@@ -1235,7 +1235,7 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type, | |||
1235 | * incrementing the compat API counters | 1235 | * incrementing the compat API counters |
1236 | */ | 1236 | */ |
1237 | edac_dbg(4, "%s csrows map: (%d,%d)\n", | 1237 | edac_dbg(4, "%s csrows map: (%d,%d)\n", |
1238 | mci->mem_is_per_rank ? "rank" : "dimm", | 1238 | mci->csbased ? "rank" : "dimm", |
1239 | dimm->csrow, dimm->cschannel); | 1239 | dimm->csrow, dimm->cschannel); |
1240 | if (row == -1) | 1240 | if (row == -1) |
1241 | row = dimm->csrow; | 1241 | row = dimm->csrow; |
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c index 4f4b6137d74e..5899a76eec3b 100644 --- a/drivers/edac/edac_mc_sysfs.c +++ b/drivers/edac/edac_mc_sysfs.c | |||
@@ -143,7 +143,7 @@ static const char *edac_caps[] = { | |||
143 | * and the per-dimm/per-rank one | 143 | * and the per-dimm/per-rank one |
144 | */ | 144 | */ |
145 | #define DEVICE_ATTR_LEGACY(_name, _mode, _show, _store) \ | 145 | #define DEVICE_ATTR_LEGACY(_name, _mode, _show, _store) \ |
146 | struct device_attribute dev_attr_legacy_##_name = __ATTR(_name, _mode, _show, _store) | 146 | static struct device_attribute dev_attr_legacy_##_name = __ATTR(_name, _mode, _show, _store) |
147 | 147 | ||
148 | struct dev_ch_attribute { | 148 | struct dev_ch_attribute { |
149 | struct device_attribute attr; | 149 | struct device_attribute attr; |
@@ -180,9 +180,6 @@ static ssize_t csrow_size_show(struct device *dev, | |||
180 | int i; | 180 | int i; |
181 | u32 nr_pages = 0; | 181 | u32 nr_pages = 0; |
182 | 182 | ||
183 | if (csrow->mci->csbased) | ||
184 | return sprintf(data, "%u\n", PAGES_TO_MiB(csrow->nr_pages)); | ||
185 | |||
186 | for (i = 0; i < csrow->nr_channels; i++) | 183 | for (i = 0; i < csrow->nr_channels; i++) |
187 | nr_pages += csrow->channels[i]->dimm->nr_pages; | 184 | nr_pages += csrow->channels[i]->dimm->nr_pages; |
188 | return sprintf(data, "%u\n", PAGES_TO_MiB(nr_pages)); | 185 | return sprintf(data, "%u\n", PAGES_TO_MiB(nr_pages)); |
@@ -612,7 +609,7 @@ static int edac_create_dimm_object(struct mem_ctl_info *mci, | |||
612 | device_initialize(&dimm->dev); | 609 | device_initialize(&dimm->dev); |
613 | 610 | ||
614 | dimm->dev.parent = &mci->dev; | 611 | dimm->dev.parent = &mci->dev; |
615 | if (mci->mem_is_per_rank) | 612 | if (mci->csbased) |
616 | dev_set_name(&dimm->dev, "rank%d", index); | 613 | dev_set_name(&dimm->dev, "rank%d", index); |
617 | else | 614 | else |
618 | dev_set_name(&dimm->dev, "dimm%d", index); | 615 | dev_set_name(&dimm->dev, "dimm%d", index); |
@@ -778,14 +775,10 @@ static ssize_t mci_size_mb_show(struct device *dev, | |||
778 | for (csrow_idx = 0; csrow_idx < mci->nr_csrows; csrow_idx++) { | 775 | for (csrow_idx = 0; csrow_idx < mci->nr_csrows; csrow_idx++) { |
779 | struct csrow_info *csrow = mci->csrows[csrow_idx]; | 776 | struct csrow_info *csrow = mci->csrows[csrow_idx]; |
780 | 777 | ||
781 | if (csrow->mci->csbased) { | 778 | for (j = 0; j < csrow->nr_channels; j++) { |
782 | total_pages += csrow->nr_pages; | 779 | struct dimm_info *dimm = csrow->channels[j]->dimm; |
783 | } else { | ||
784 | for (j = 0; j < csrow->nr_channels; j++) { | ||
785 | struct dimm_info *dimm = csrow->channels[j]->dimm; | ||
786 | 780 | ||
787 | total_pages += dimm->nr_pages; | 781 | total_pages += dimm->nr_pages; |
788 | } | ||
789 | } | 782 | } |
790 | } | 783 | } |
791 | 784 | ||
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index c194f4e680ad..e2acfdbf7d3c 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c | |||
@@ -1634,7 +1634,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev, | |||
1634 | unsigned vblank = (pt->vactive_vblank_hi & 0xf) << 8 | pt->vblank_lo; | 1634 | unsigned vblank = (pt->vactive_vblank_hi & 0xf) << 8 | pt->vblank_lo; |
1635 | unsigned hsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc0) << 2 | pt->hsync_offset_lo; | 1635 | unsigned hsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc0) << 2 | pt->hsync_offset_lo; |
1636 | unsigned hsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x30) << 4 | pt->hsync_pulse_width_lo; | 1636 | unsigned hsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x30) << 4 | pt->hsync_pulse_width_lo; |
1637 | unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) >> 2 | pt->vsync_offset_pulse_width_lo >> 4; | 1637 | unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) << 2 | pt->vsync_offset_pulse_width_lo >> 4; |
1638 | unsigned vsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x3) << 4 | (pt->vsync_offset_pulse_width_lo & 0xf); | 1638 | unsigned vsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x3) << 4 | (pt->vsync_offset_pulse_width_lo & 0xf); |
1639 | 1639 | ||
1640 | /* ignore tiny modes */ | 1640 | /* ignore tiny modes */ |
@@ -1715,6 +1715,7 @@ set_size: | |||
1715 | } | 1715 | } |
1716 | 1716 | ||
1717 | mode->type = DRM_MODE_TYPE_DRIVER; | 1717 | mode->type = DRM_MODE_TYPE_DRIVER; |
1718 | mode->vrefresh = drm_mode_vrefresh(mode); | ||
1718 | drm_mode_set_name(mode); | 1719 | drm_mode_set_name(mode); |
1719 | 1720 | ||
1720 | return mode; | 1721 | return mode; |
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index aae31489c893..7299ea45dd03 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -103,7 +103,7 @@ static const char *cache_level_str(int type) | |||
103 | static void | 103 | static void |
104 | describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) | 104 | describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) |
105 | { | 105 | { |
106 | seq_printf(m, "%p: %s%s %8zdKiB %02x %02x %d %d %d%s%s%s", | 106 | seq_printf(m, "%pK: %s%s %8zdKiB %02x %02x %d %d %d%s%s%s", |
107 | &obj->base, | 107 | &obj->base, |
108 | get_pin_flag(obj), | 108 | get_pin_flag(obj), |
109 | get_tiling_flag(obj), | 109 | get_tiling_flag(obj), |
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 2f2daebd0eef..3b11ab0fbc96 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
@@ -732,6 +732,8 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec, | |||
732 | int count) | 732 | int count) |
733 | { | 733 | { |
734 | int i; | 734 | int i; |
735 | int relocs_total = 0; | ||
736 | int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry); | ||
735 | 737 | ||
736 | for (i = 0; i < count; i++) { | 738 | for (i = 0; i < count; i++) { |
737 | char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr; | 739 | char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr; |
@@ -740,10 +742,13 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec, | |||
740 | if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS) | 742 | if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS) |
741 | return -EINVAL; | 743 | return -EINVAL; |
742 | 744 | ||
743 | /* First check for malicious input causing overflow */ | 745 | /* First check for malicious input causing overflow in |
744 | if (exec[i].relocation_count > | 746 | * the worst case where we need to allocate the entire |
745 | INT_MAX / sizeof(struct drm_i915_gem_relocation_entry)) | 747 | * relocation tree as a single array. |
748 | */ | ||
749 | if (exec[i].relocation_count > relocs_max - relocs_total) | ||
746 | return -EINVAL; | 750 | return -EINVAL; |
751 | relocs_total += exec[i].relocation_count; | ||
747 | 752 | ||
748 | length = exec[i].relocation_count * | 753 | length = exec[i].relocation_count * |
749 | sizeof(struct drm_i915_gem_relocation_entry); | 754 | sizeof(struct drm_i915_gem_relocation_entry); |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 6f728e5ee793..d7d4afe01341 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -820,6 +820,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, | |||
820 | struct intel_link_m_n m_n; | 820 | struct intel_link_m_n m_n; |
821 | int pipe = intel_crtc->pipe; | 821 | int pipe = intel_crtc->pipe; |
822 | enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; | 822 | enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; |
823 | int target_clock; | ||
823 | 824 | ||
824 | /* | 825 | /* |
825 | * Find the lane count in the intel_encoder private | 826 | * Find the lane count in the intel_encoder private |
@@ -835,13 +836,22 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, | |||
835 | } | 836 | } |
836 | } | 837 | } |
837 | 838 | ||
839 | target_clock = mode->clock; | ||
840 | for_each_encoder_on_crtc(dev, crtc, intel_encoder) { | ||
841 | if (intel_encoder->type == INTEL_OUTPUT_EDP) { | ||
842 | target_clock = intel_edp_target_clock(intel_encoder, | ||
843 | mode); | ||
844 | break; | ||
845 | } | ||
846 | } | ||
847 | |||
838 | /* | 848 | /* |
839 | * Compute the GMCH and Link ratios. The '3' here is | 849 | * Compute the GMCH and Link ratios. The '3' here is |
840 | * the number of bytes_per_pixel post-LUT, which we always | 850 | * the number of bytes_per_pixel post-LUT, which we always |
841 | * set up for 8-bits of R/G/B, or 3 bytes total. | 851 | * set up for 8-bits of R/G/B, or 3 bytes total. |
842 | */ | 852 | */ |
843 | intel_link_compute_m_n(intel_crtc->bpp, lane_count, | 853 | intel_link_compute_m_n(intel_crtc->bpp, lane_count, |
844 | mode->clock, adjusted_mode->clock, &m_n); | 854 | target_clock, adjusted_mode->clock, &m_n); |
845 | 855 | ||
846 | if (IS_HASWELL(dev)) { | 856 | if (IS_HASWELL(dev)) { |
847 | I915_WRITE(PIPE_DATA_M1(cpu_transcoder), | 857 | I915_WRITE(PIPE_DATA_M1(cpu_transcoder), |
@@ -1930,7 +1940,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) | |||
1930 | for (i = 0; i < intel_dp->lane_count; i++) | 1940 | for (i = 0; i < intel_dp->lane_count; i++) |
1931 | if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) | 1941 | if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) |
1932 | break; | 1942 | break; |
1933 | if (i == intel_dp->lane_count && voltage_tries == 5) { | 1943 | if (i == intel_dp->lane_count) { |
1934 | ++loop_tries; | 1944 | ++loop_tries; |
1935 | if (loop_tries == 5) { | 1945 | if (loop_tries == 5) { |
1936 | DRM_DEBUG_KMS("too many full retries, give up\n"); | 1946 | DRM_DEBUG_KMS("too many full retries, give up\n"); |
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index acf8aec9ada7..ef4744e1bf0b 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c | |||
@@ -203,7 +203,13 @@ intel_gpio_setup(struct intel_gmbus *bus, u32 pin) | |||
203 | algo->data = bus; | 203 | algo->data = bus; |
204 | } | 204 | } |
205 | 205 | ||
206 | #define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 4) | 206 | /* |
207 | * gmbus on gen4 seems to be able to generate legacy interrupts even when in MSI | ||
208 | * mode. This results in spurious interrupt warnings if the legacy irq no. is | ||
209 | * shared with another device. The kernel then disables that interrupt source | ||
210 | * and so prevents the other device from working properly. | ||
211 | */ | ||
212 | #define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5) | ||
207 | static int | 213 | static int |
208 | gmbus_wait_hw_status(struct drm_i915_private *dev_priv, | 214 | gmbus_wait_hw_status(struct drm_i915_private *dev_priv, |
209 | u32 gmbus2_status, | 215 | u32 gmbus2_status, |
@@ -214,6 +220,9 @@ gmbus_wait_hw_status(struct drm_i915_private *dev_priv, | |||
214 | u32 gmbus2 = 0; | 220 | u32 gmbus2 = 0; |
215 | DEFINE_WAIT(wait); | 221 | DEFINE_WAIT(wait); |
216 | 222 | ||
223 | if (!HAS_GMBUS_IRQ(dev_priv->dev)) | ||
224 | gmbus4_irq_en = 0; | ||
225 | |||
217 | /* Important: The hw handles only the first bit, so set only one! Since | 226 | /* Important: The hw handles only the first bit, so set only one! Since |
218 | * we also need to check for NAKs besides the hw ready/idle signal, we | 227 | * we also need to check for NAKs besides the hw ready/idle signal, we |
219 | * need to wake up periodically and check that ourselves. */ | 228 | * need to wake up periodically and check that ourselves. */ |
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index a274b9906ef8..fe22bb780e1d 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c | |||
@@ -382,19 +382,19 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock) | |||
382 | m = n = p = 0; | 382 | m = n = p = 0; |
383 | vcomax = 800000; | 383 | vcomax = 800000; |
384 | vcomin = 400000; | 384 | vcomin = 400000; |
385 | pllreffreq = 3333; | 385 | pllreffreq = 33333; |
386 | 386 | ||
387 | delta = 0xffffffff; | 387 | delta = 0xffffffff; |
388 | permitteddelta = clock * 5 / 1000; | 388 | permitteddelta = clock * 5 / 1000; |
389 | 389 | ||
390 | for (testp = 16; testp > 0; testp--) { | 390 | for (testp = 16; testp > 0; testp >>= 1) { |
391 | if (clock * testp > vcomax) | 391 | if (clock * testp > vcomax) |
392 | continue; | 392 | continue; |
393 | if (clock * testp < vcomin) | 393 | if (clock * testp < vcomin) |
394 | continue; | 394 | continue; |
395 | 395 | ||
396 | for (testm = 1; testm < 33; testm++) { | 396 | for (testm = 1; testm < 33; testm++) { |
397 | for (testn = 1; testn < 257; testn++) { | 397 | for (testn = 17; testn < 257; testn++) { |
398 | computed = (pllreffreq * testn) / | 398 | computed = (pllreffreq * testn) / |
399 | (testm * testp); | 399 | (testm * testp); |
400 | if (computed > clock) | 400 | if (computed > clock) |
@@ -404,11 +404,11 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock) | |||
404 | if (tmpdelta < delta) { | 404 | if (tmpdelta < delta) { |
405 | delta = tmpdelta; | 405 | delta = tmpdelta; |
406 | n = testn - 1; | 406 | n = testn - 1; |
407 | m = (testm - 1) | ((n >> 1) & 0x80); | 407 | m = (testm - 1); |
408 | p = testp - 1; | 408 | p = testp - 1; |
409 | } | 409 | } |
410 | if ((clock * testp) >= 600000) | 410 | if ((clock * testp) >= 600000) |
411 | p |= 80; | 411 | p |= 0x80; |
412 | } | 412 | } |
413 | } | 413 | } |
414 | } | 414 | } |
diff --git a/drivers/gpu/drm/nouveau/core/core/object.c b/drivers/gpu/drm/nouveau/core/core/object.c index 0daab62ea14c..3b2e7b6304d3 100644 --- a/drivers/gpu/drm/nouveau/core/core/object.c +++ b/drivers/gpu/drm/nouveau/core/core/object.c | |||
@@ -278,7 +278,6 @@ nouveau_object_del(struct nouveau_object *client, u32 _parent, u32 _handle) | |||
278 | struct nouveau_object *parent = NULL; | 278 | struct nouveau_object *parent = NULL; |
279 | struct nouveau_object *namedb = NULL; | 279 | struct nouveau_object *namedb = NULL; |
280 | struct nouveau_handle *handle = NULL; | 280 | struct nouveau_handle *handle = NULL; |
281 | int ret = -EINVAL; | ||
282 | 281 | ||
283 | parent = nouveau_handle_ref(client, _parent); | 282 | parent = nouveau_handle_ref(client, _parent); |
284 | if (!parent) | 283 | if (!parent) |
@@ -295,7 +294,7 @@ nouveau_object_del(struct nouveau_object *client, u32 _parent, u32 _handle) | |||
295 | } | 294 | } |
296 | 295 | ||
297 | nouveau_object_ref(NULL, &parent); | 296 | nouveau_object_ref(NULL, &parent); |
298 | return ret; | 297 | return handle ? 0 : -EINVAL; |
299 | } | 298 | } |
300 | 299 | ||
301 | int | 300 | int |
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/therm.h b/drivers/gpu/drm/nouveau/core/include/subdev/therm.h index 6b17b614629f..0b20fc0d19c1 100644 --- a/drivers/gpu/drm/nouveau/core/include/subdev/therm.h +++ b/drivers/gpu/drm/nouveau/core/include/subdev/therm.h | |||
@@ -4,7 +4,7 @@ | |||
4 | #include <core/device.h> | 4 | #include <core/device.h> |
5 | #include <core/subdev.h> | 5 | #include <core/subdev.h> |
6 | 6 | ||
7 | enum nouveau_therm_mode { | 7 | enum nouveau_therm_fan_mode { |
8 | NOUVEAU_THERM_CTRL_NONE = 0, | 8 | NOUVEAU_THERM_CTRL_NONE = 0, |
9 | NOUVEAU_THERM_CTRL_MANUAL = 1, | 9 | NOUVEAU_THERM_CTRL_MANUAL = 1, |
10 | NOUVEAU_THERM_CTRL_AUTO = 2, | 10 | NOUVEAU_THERM_CTRL_AUTO = 2, |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/base.c b/drivers/gpu/drm/nouveau/core/subdev/therm/base.c index f794dc89a3b2..a00a5a76e2d6 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/therm/base.c +++ b/drivers/gpu/drm/nouveau/core/subdev/therm/base.c | |||
@@ -134,7 +134,7 @@ nouveau_therm_alarm(struct nouveau_alarm *alarm) | |||
134 | } | 134 | } |
135 | 135 | ||
136 | int | 136 | int |
137 | nouveau_therm_mode(struct nouveau_therm *therm, int mode) | 137 | nouveau_therm_fan_mode(struct nouveau_therm *therm, int mode) |
138 | { | 138 | { |
139 | struct nouveau_therm_priv *priv = (void *)therm; | 139 | struct nouveau_therm_priv *priv = (void *)therm; |
140 | struct nouveau_device *device = nv_device(therm); | 140 | struct nouveau_device *device = nv_device(therm); |
@@ -149,10 +149,15 @@ nouveau_therm_mode(struct nouveau_therm *therm, int mode) | |||
149 | (mode != NOUVEAU_THERM_CTRL_NONE && device->card_type >= NV_C0)) | 149 | (mode != NOUVEAU_THERM_CTRL_NONE && device->card_type >= NV_C0)) |
150 | return -EINVAL; | 150 | return -EINVAL; |
151 | 151 | ||
152 | /* do not allow automatic fan management if the thermal sensor is | ||
153 | * not available */ | ||
154 | if (priv->mode == 2 && therm->temp_get(therm) < 0) | ||
155 | return -EINVAL; | ||
156 | |||
152 | if (priv->mode == mode) | 157 | if (priv->mode == mode) |
153 | return 0; | 158 | return 0; |
154 | 159 | ||
155 | nv_info(therm, "Thermal management: %s\n", name[mode]); | 160 | nv_info(therm, "fan management: %s\n", name[mode]); |
156 | nouveau_therm_update(therm, mode); | 161 | nouveau_therm_update(therm, mode); |
157 | return 0; | 162 | return 0; |
158 | } | 163 | } |
@@ -213,7 +218,7 @@ nouveau_therm_attr_set(struct nouveau_therm *therm, | |||
213 | priv->fan->bios.max_duty = value; | 218 | priv->fan->bios.max_duty = value; |
214 | return 0; | 219 | return 0; |
215 | case NOUVEAU_THERM_ATTR_FAN_MODE: | 220 | case NOUVEAU_THERM_ATTR_FAN_MODE: |
216 | return nouveau_therm_mode(therm, value); | 221 | return nouveau_therm_fan_mode(therm, value); |
217 | case NOUVEAU_THERM_ATTR_THRS_FAN_BOOST: | 222 | case NOUVEAU_THERM_ATTR_THRS_FAN_BOOST: |
218 | priv->bios_sensor.thrs_fan_boost.temp = value; | 223 | priv->bios_sensor.thrs_fan_boost.temp = value; |
219 | priv->sensor.program_alarms(therm); | 224 | priv->sensor.program_alarms(therm); |
@@ -263,7 +268,7 @@ _nouveau_therm_init(struct nouveau_object *object) | |||
263 | return ret; | 268 | return ret; |
264 | 269 | ||
265 | if (priv->suspend >= 0) | 270 | if (priv->suspend >= 0) |
266 | nouveau_therm_mode(therm, priv->mode); | 271 | nouveau_therm_fan_mode(therm, priv->mode); |
267 | priv->sensor.program_alarms(therm); | 272 | priv->sensor.program_alarms(therm); |
268 | return 0; | 273 | return 0; |
269 | } | 274 | } |
@@ -313,11 +318,12 @@ nouveau_therm_create_(struct nouveau_object *parent, | |||
313 | int | 318 | int |
314 | nouveau_therm_preinit(struct nouveau_therm *therm) | 319 | nouveau_therm_preinit(struct nouveau_therm *therm) |
315 | { | 320 | { |
316 | nouveau_therm_ic_ctor(therm); | ||
317 | nouveau_therm_sensor_ctor(therm); | 321 | nouveau_therm_sensor_ctor(therm); |
322 | nouveau_therm_ic_ctor(therm); | ||
318 | nouveau_therm_fan_ctor(therm); | 323 | nouveau_therm_fan_ctor(therm); |
319 | 324 | ||
320 | nouveau_therm_mode(therm, NOUVEAU_THERM_CTRL_NONE); | 325 | nouveau_therm_fan_mode(therm, NOUVEAU_THERM_CTRL_NONE); |
326 | nouveau_therm_sensor_preinit(therm); | ||
321 | return 0; | 327 | return 0; |
322 | } | 328 | } |
323 | 329 | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c index e24090bac195..8b3adec5fbb1 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c +++ b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c | |||
@@ -32,6 +32,7 @@ probe_monitoring_device(struct nouveau_i2c_port *i2c, | |||
32 | struct i2c_board_info *info) | 32 | struct i2c_board_info *info) |
33 | { | 33 | { |
34 | struct nouveau_therm_priv *priv = (void *)nouveau_therm(i2c); | 34 | struct nouveau_therm_priv *priv = (void *)nouveau_therm(i2c); |
35 | struct nvbios_therm_sensor *sensor = &priv->bios_sensor; | ||
35 | struct i2c_client *client; | 36 | struct i2c_client *client; |
36 | 37 | ||
37 | request_module("%s%s", I2C_MODULE_PREFIX, info->type); | 38 | request_module("%s%s", I2C_MODULE_PREFIX, info->type); |
@@ -46,8 +47,9 @@ probe_monitoring_device(struct nouveau_i2c_port *i2c, | |||
46 | } | 47 | } |
47 | 48 | ||
48 | nv_info(priv, | 49 | nv_info(priv, |
49 | "Found an %s at address 0x%x (controlled by lm_sensors)\n", | 50 | "Found an %s at address 0x%x (controlled by lm_sensors, " |
50 | info->type, info->addr); | 51 | "temp offset %+i C)\n", |
52 | info->type, info->addr, sensor->offset_constant); | ||
51 | priv->ic = client; | 53 | priv->ic = client; |
52 | 54 | ||
53 | return true; | 55 | return true; |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c index 0f5363edb964..a70d1b7e397b 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c +++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c | |||
@@ -29,54 +29,83 @@ struct nv40_therm_priv { | |||
29 | struct nouveau_therm_priv base; | 29 | struct nouveau_therm_priv base; |
30 | }; | 30 | }; |
31 | 31 | ||
32 | enum nv40_sensor_style { INVALID_STYLE = -1, OLD_STYLE = 0, NEW_STYLE = 1 }; | ||
33 | |||
34 | static enum nv40_sensor_style | ||
35 | nv40_sensor_style(struct nouveau_therm *therm) | ||
36 | { | ||
37 | struct nouveau_device *device = nv_device(therm); | ||
38 | |||
39 | switch (device->chipset) { | ||
40 | case 0x43: | ||
41 | case 0x44: | ||
42 | case 0x4a: | ||
43 | case 0x47: | ||
44 | return OLD_STYLE; | ||
45 | |||
46 | case 0x46: | ||
47 | case 0x49: | ||
48 | case 0x4b: | ||
49 | case 0x4e: | ||
50 | case 0x4c: | ||
51 | case 0x67: | ||
52 | case 0x68: | ||
53 | case 0x63: | ||
54 | return NEW_STYLE; | ||
55 | default: | ||
56 | return INVALID_STYLE; | ||
57 | } | ||
58 | } | ||
59 | |||
32 | static int | 60 | static int |
33 | nv40_sensor_setup(struct nouveau_therm *therm) | 61 | nv40_sensor_setup(struct nouveau_therm *therm) |
34 | { | 62 | { |
35 | struct nouveau_device *device = nv_device(therm); | 63 | enum nv40_sensor_style style = nv40_sensor_style(therm); |
36 | 64 | ||
37 | /* enable ADC readout and disable the ALARM threshold */ | 65 | /* enable ADC readout and disable the ALARM threshold */ |
38 | if (device->chipset >= 0x46) { | 66 | if (style == NEW_STYLE) { |
39 | nv_mask(therm, 0x15b8, 0x80000000, 0); | 67 | nv_mask(therm, 0x15b8, 0x80000000, 0); |
40 | nv_wr32(therm, 0x15b0, 0x80003fff); | 68 | nv_wr32(therm, 0x15b0, 0x80003fff); |
41 | mdelay(10); /* wait for the temperature to stabilize */ | 69 | mdelay(20); /* wait for the temperature to stabilize */ |
42 | return nv_rd32(therm, 0x15b4) & 0x3fff; | 70 | return nv_rd32(therm, 0x15b4) & 0x3fff; |
43 | } else { | 71 | } else if (style == OLD_STYLE) { |
44 | nv_wr32(therm, 0x15b0, 0xff); | 72 | nv_wr32(therm, 0x15b0, 0xff); |
73 | mdelay(20); /* wait for the temperature to stabilize */ | ||
45 | return nv_rd32(therm, 0x15b4) & 0xff; | 74 | return nv_rd32(therm, 0x15b4) & 0xff; |
46 | } | 75 | } else |
76 | return -ENODEV; | ||
47 | } | 77 | } |
48 | 78 | ||
49 | static int | 79 | static int |
50 | nv40_temp_get(struct nouveau_therm *therm) | 80 | nv40_temp_get(struct nouveau_therm *therm) |
51 | { | 81 | { |
52 | struct nouveau_therm_priv *priv = (void *)therm; | 82 | struct nouveau_therm_priv *priv = (void *)therm; |
53 | struct nouveau_device *device = nv_device(therm); | ||
54 | struct nvbios_therm_sensor *sensor = &priv->bios_sensor; | 83 | struct nvbios_therm_sensor *sensor = &priv->bios_sensor; |
84 | enum nv40_sensor_style style = nv40_sensor_style(therm); | ||
55 | int core_temp; | 85 | int core_temp; |
56 | 86 | ||
57 | if (device->chipset >= 0x46) { | 87 | if (style == NEW_STYLE) { |
58 | nv_wr32(therm, 0x15b0, 0x80003fff); | 88 | nv_wr32(therm, 0x15b0, 0x80003fff); |
59 | core_temp = nv_rd32(therm, 0x15b4) & 0x3fff; | 89 | core_temp = nv_rd32(therm, 0x15b4) & 0x3fff; |
60 | } else { | 90 | } else if (style == OLD_STYLE) { |
61 | nv_wr32(therm, 0x15b0, 0xff); | 91 | nv_wr32(therm, 0x15b0, 0xff); |
62 | core_temp = nv_rd32(therm, 0x15b4) & 0xff; | 92 | core_temp = nv_rd32(therm, 0x15b4) & 0xff; |
63 | } | 93 | } else |
64 | 94 | return -ENODEV; | |
65 | /* Setup the sensor if the temperature is 0 */ | ||
66 | if (core_temp == 0) | ||
67 | core_temp = nv40_sensor_setup(therm); | ||
68 | 95 | ||
69 | if (sensor->slope_div == 0) | 96 | /* if the slope or the offset is unset, do no use the sensor */ |
70 | sensor->slope_div = 1; | 97 | if (!sensor->slope_div || !sensor->slope_mult || |
71 | if (sensor->offset_den == 0) | 98 | !sensor->offset_num || !sensor->offset_den) |
72 | sensor->offset_den = 1; | 99 | return -ENODEV; |
73 | if (sensor->slope_mult < 1) | ||
74 | sensor->slope_mult = 1; | ||
75 | 100 | ||
76 | core_temp = core_temp * sensor->slope_mult / sensor->slope_div; | 101 | core_temp = core_temp * sensor->slope_mult / sensor->slope_div; |
77 | core_temp = core_temp + sensor->offset_num / sensor->offset_den; | 102 | core_temp = core_temp + sensor->offset_num / sensor->offset_den; |
78 | core_temp = core_temp + sensor->offset_constant - 8; | 103 | core_temp = core_temp + sensor->offset_constant - 8; |
79 | 104 | ||
105 | /* reserve negative temperatures for errors */ | ||
106 | if (core_temp < 0) | ||
107 | core_temp = 0; | ||
108 | |||
80 | return core_temp; | 109 | return core_temp; |
81 | } | 110 | } |
82 | 111 | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h b/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h index 06b98706b3fc..438d9824b774 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h +++ b/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h | |||
@@ -102,7 +102,7 @@ struct nouveau_therm_priv { | |||
102 | struct i2c_client *ic; | 102 | struct i2c_client *ic; |
103 | }; | 103 | }; |
104 | 104 | ||
105 | int nouveau_therm_mode(struct nouveau_therm *therm, int mode); | 105 | int nouveau_therm_fan_mode(struct nouveau_therm *therm, int mode); |
106 | int nouveau_therm_attr_get(struct nouveau_therm *therm, | 106 | int nouveau_therm_attr_get(struct nouveau_therm *therm, |
107 | enum nouveau_therm_attr_type type); | 107 | enum nouveau_therm_attr_type type); |
108 | int nouveau_therm_attr_set(struct nouveau_therm *therm, | 108 | int nouveau_therm_attr_set(struct nouveau_therm *therm, |
@@ -122,6 +122,7 @@ int nouveau_therm_fan_sense(struct nouveau_therm *therm); | |||
122 | 122 | ||
123 | int nouveau_therm_preinit(struct nouveau_therm *); | 123 | int nouveau_therm_preinit(struct nouveau_therm *); |
124 | 124 | ||
125 | void nouveau_therm_sensor_preinit(struct nouveau_therm *); | ||
125 | void nouveau_therm_sensor_set_threshold_state(struct nouveau_therm *therm, | 126 | void nouveau_therm_sensor_set_threshold_state(struct nouveau_therm *therm, |
126 | enum nouveau_therm_thrs thrs, | 127 | enum nouveau_therm_thrs thrs, |
127 | enum nouveau_therm_thrs_state st); | 128 | enum nouveau_therm_thrs_state st); |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c index b37624af8297..470f6a47b656 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c +++ b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c | |||
@@ -34,10 +34,6 @@ nouveau_therm_temp_set_defaults(struct nouveau_therm *therm) | |||
34 | { | 34 | { |
35 | struct nouveau_therm_priv *priv = (void *)therm; | 35 | struct nouveau_therm_priv *priv = (void *)therm; |
36 | 36 | ||
37 | priv->bios_sensor.slope_mult = 1; | ||
38 | priv->bios_sensor.slope_div = 1; | ||
39 | priv->bios_sensor.offset_num = 0; | ||
40 | priv->bios_sensor.offset_den = 1; | ||
41 | priv->bios_sensor.offset_constant = 0; | 37 | priv->bios_sensor.offset_constant = 0; |
42 | 38 | ||
43 | priv->bios_sensor.thrs_fan_boost.temp = 90; | 39 | priv->bios_sensor.thrs_fan_boost.temp = 90; |
@@ -60,11 +56,6 @@ nouveau_therm_temp_safety_checks(struct nouveau_therm *therm) | |||
60 | struct nouveau_therm_priv *priv = (void *)therm; | 56 | struct nouveau_therm_priv *priv = (void *)therm; |
61 | struct nvbios_therm_sensor *s = &priv->bios_sensor; | 57 | struct nvbios_therm_sensor *s = &priv->bios_sensor; |
62 | 58 | ||
63 | if (!priv->bios_sensor.slope_div) | ||
64 | priv->bios_sensor.slope_div = 1; | ||
65 | if (!priv->bios_sensor.offset_den) | ||
66 | priv->bios_sensor.offset_den = 1; | ||
67 | |||
68 | /* enforce a minimum hysteresis on thresholds */ | 59 | /* enforce a minimum hysteresis on thresholds */ |
69 | s->thrs_fan_boost.hysteresis = max_t(u8, s->thrs_fan_boost.hysteresis, 2); | 60 | s->thrs_fan_boost.hysteresis = max_t(u8, s->thrs_fan_boost.hysteresis, 2); |
70 | s->thrs_down_clock.hysteresis = max_t(u8, s->thrs_down_clock.hysteresis, 2); | 61 | s->thrs_down_clock.hysteresis = max_t(u8, s->thrs_down_clock.hysteresis, 2); |
@@ -106,16 +97,16 @@ void nouveau_therm_sensor_event(struct nouveau_therm *therm, | |||
106 | const char *thresolds[] = { | 97 | const char *thresolds[] = { |
107 | "fanboost", "downclock", "critical", "shutdown" | 98 | "fanboost", "downclock", "critical", "shutdown" |
108 | }; | 99 | }; |
109 | uint8_t temperature = therm->temp_get(therm); | 100 | int temperature = therm->temp_get(therm); |
110 | 101 | ||
111 | if (thrs < 0 || thrs > 3) | 102 | if (thrs < 0 || thrs > 3) |
112 | return; | 103 | return; |
113 | 104 | ||
114 | if (dir == NOUVEAU_THERM_THRS_FALLING) | 105 | if (dir == NOUVEAU_THERM_THRS_FALLING) |
115 | nv_info(therm, "temperature (%u C) went below the '%s' threshold\n", | 106 | nv_info(therm, "temperature (%i C) went below the '%s' threshold\n", |
116 | temperature, thresolds[thrs]); | 107 | temperature, thresolds[thrs]); |
117 | else | 108 | else |
118 | nv_info(therm, "temperature (%u C) hit the '%s' threshold\n", | 109 | nv_info(therm, "temperature (%i C) hit the '%s' threshold\n", |
119 | temperature, thresolds[thrs]); | 110 | temperature, thresolds[thrs]); |
120 | 111 | ||
121 | active = (dir == NOUVEAU_THERM_THRS_RISING); | 112 | active = (dir == NOUVEAU_THERM_THRS_RISING); |
@@ -123,7 +114,7 @@ void nouveau_therm_sensor_event(struct nouveau_therm *therm, | |||
123 | case NOUVEAU_THERM_THRS_FANBOOST: | 114 | case NOUVEAU_THERM_THRS_FANBOOST: |
124 | if (active) { | 115 | if (active) { |
125 | nouveau_therm_fan_set(therm, true, 100); | 116 | nouveau_therm_fan_set(therm, true, 100); |
126 | nouveau_therm_mode(therm, NOUVEAU_THERM_CTRL_AUTO); | 117 | nouveau_therm_fan_mode(therm, NOUVEAU_THERM_CTRL_AUTO); |
127 | } | 118 | } |
128 | break; | 119 | break; |
129 | case NOUVEAU_THERM_THRS_DOWNCLOCK: | 120 | case NOUVEAU_THERM_THRS_DOWNCLOCK: |
@@ -202,7 +193,7 @@ alarm_timer_callback(struct nouveau_alarm *alarm) | |||
202 | NOUVEAU_THERM_THRS_SHUTDOWN); | 193 | NOUVEAU_THERM_THRS_SHUTDOWN); |
203 | 194 | ||
204 | /* schedule the next poll in one second */ | 195 | /* schedule the next poll in one second */ |
205 | if (list_empty(&alarm->head)) | 196 | if (therm->temp_get(therm) >= 0 && list_empty(&alarm->head)) |
206 | ptimer->alarm(ptimer, 1000 * 1000 * 1000, alarm); | 197 | ptimer->alarm(ptimer, 1000 * 1000 * 1000, alarm); |
207 | 198 | ||
208 | spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags); | 199 | spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags); |
@@ -225,6 +216,17 @@ nouveau_therm_program_alarms_polling(struct nouveau_therm *therm) | |||
225 | alarm_timer_callback(&priv->sensor.therm_poll_alarm); | 216 | alarm_timer_callback(&priv->sensor.therm_poll_alarm); |
226 | } | 217 | } |
227 | 218 | ||
219 | void | ||
220 | nouveau_therm_sensor_preinit(struct nouveau_therm *therm) | ||
221 | { | ||
222 | const char *sensor_avail = "yes"; | ||
223 | |||
224 | if (therm->temp_get(therm) < 0) | ||
225 | sensor_avail = "no"; | ||
226 | |||
227 | nv_info(therm, "internal sensor: %s\n", sensor_avail); | ||
228 | } | ||
229 | |||
228 | int | 230 | int |
229 | nouveau_therm_sensor_ctor(struct nouveau_therm *therm) | 231 | nouveau_therm_sensor_ctor(struct nouveau_therm *therm) |
230 | { | 232 | { |
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c index bb54098c6d97..936b442a6ab7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_pm.c +++ b/drivers/gpu/drm/nouveau/nouveau_pm.c | |||
@@ -402,8 +402,12 @@ nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf) | |||
402 | struct drm_device *dev = dev_get_drvdata(d); | 402 | struct drm_device *dev = dev_get_drvdata(d); |
403 | struct nouveau_drm *drm = nouveau_drm(dev); | 403 | struct nouveau_drm *drm = nouveau_drm(dev); |
404 | struct nouveau_therm *therm = nouveau_therm(drm->device); | 404 | struct nouveau_therm *therm = nouveau_therm(drm->device); |
405 | int temp = therm->temp_get(therm); | ||
405 | 406 | ||
406 | return snprintf(buf, PAGE_SIZE, "%d\n", therm->temp_get(therm) * 1000); | 407 | if (temp < 0) |
408 | return temp; | ||
409 | |||
410 | return snprintf(buf, PAGE_SIZE, "%d\n", temp * 1000); | ||
407 | } | 411 | } |
408 | static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, nouveau_hwmon_show_temp, | 412 | static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, nouveau_hwmon_show_temp, |
409 | NULL, 0); | 413 | NULL, 0); |
@@ -871,7 +875,12 @@ static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO | S_IWUSR, | |||
871 | nouveau_hwmon_get_pwm1_max, | 875 | nouveau_hwmon_get_pwm1_max, |
872 | nouveau_hwmon_set_pwm1_max, 0); | 876 | nouveau_hwmon_set_pwm1_max, 0); |
873 | 877 | ||
874 | static struct attribute *hwmon_attributes[] = { | 878 | static struct attribute *hwmon_default_attributes[] = { |
879 | &sensor_dev_attr_name.dev_attr.attr, | ||
880 | &sensor_dev_attr_update_rate.dev_attr.attr, | ||
881 | NULL | ||
882 | }; | ||
883 | static struct attribute *hwmon_temp_attributes[] = { | ||
875 | &sensor_dev_attr_temp1_input.dev_attr.attr, | 884 | &sensor_dev_attr_temp1_input.dev_attr.attr, |
876 | &sensor_dev_attr_temp1_auto_point1_pwm.dev_attr.attr, | 885 | &sensor_dev_attr_temp1_auto_point1_pwm.dev_attr.attr, |
877 | &sensor_dev_attr_temp1_auto_point1_temp.dev_attr.attr, | 886 | &sensor_dev_attr_temp1_auto_point1_temp.dev_attr.attr, |
@@ -882,8 +891,6 @@ static struct attribute *hwmon_attributes[] = { | |||
882 | &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr, | 891 | &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr, |
883 | &sensor_dev_attr_temp1_emergency.dev_attr.attr, | 892 | &sensor_dev_attr_temp1_emergency.dev_attr.attr, |
884 | &sensor_dev_attr_temp1_emergency_hyst.dev_attr.attr, | 893 | &sensor_dev_attr_temp1_emergency_hyst.dev_attr.attr, |
885 | &sensor_dev_attr_name.dev_attr.attr, | ||
886 | &sensor_dev_attr_update_rate.dev_attr.attr, | ||
887 | NULL | 894 | NULL |
888 | }; | 895 | }; |
889 | static struct attribute *hwmon_fan_rpm_attributes[] = { | 896 | static struct attribute *hwmon_fan_rpm_attributes[] = { |
@@ -898,8 +905,11 @@ static struct attribute *hwmon_pwm_fan_attributes[] = { | |||
898 | NULL | 905 | NULL |
899 | }; | 906 | }; |
900 | 907 | ||
901 | static const struct attribute_group hwmon_attrgroup = { | 908 | static const struct attribute_group hwmon_default_attrgroup = { |
902 | .attrs = hwmon_attributes, | 909 | .attrs = hwmon_default_attributes, |
910 | }; | ||
911 | static const struct attribute_group hwmon_temp_attrgroup = { | ||
912 | .attrs = hwmon_temp_attributes, | ||
903 | }; | 913 | }; |
904 | static const struct attribute_group hwmon_fan_rpm_attrgroup = { | 914 | static const struct attribute_group hwmon_fan_rpm_attrgroup = { |
905 | .attrs = hwmon_fan_rpm_attributes, | 915 | .attrs = hwmon_fan_rpm_attributes, |
@@ -931,13 +941,22 @@ nouveau_hwmon_init(struct drm_device *dev) | |||
931 | } | 941 | } |
932 | dev_set_drvdata(hwmon_dev, dev); | 942 | dev_set_drvdata(hwmon_dev, dev); |
933 | 943 | ||
934 | /* default sysfs entries */ | 944 | /* set the default attributes */ |
935 | ret = sysfs_create_group(&hwmon_dev->kobj, &hwmon_attrgroup); | 945 | ret = sysfs_create_group(&hwmon_dev->kobj, &hwmon_default_attrgroup); |
936 | if (ret) { | 946 | if (ret) { |
937 | if (ret) | 947 | if (ret) |
938 | goto error; | 948 | goto error; |
939 | } | 949 | } |
940 | 950 | ||
951 | /* if the card has a working thermal sensor */ | ||
952 | if (therm->temp_get(therm) >= 0) { | ||
953 | ret = sysfs_create_group(&hwmon_dev->kobj, &hwmon_temp_attrgroup); | ||
954 | if (ret) { | ||
955 | if (ret) | ||
956 | goto error; | ||
957 | } | ||
958 | } | ||
959 | |||
941 | /* if the card has a pwm fan */ | 960 | /* if the card has a pwm fan */ |
942 | /*XXX: incorrect, need better detection for this, some boards have | 961 | /*XXX: incorrect, need better detection for this, some boards have |
943 | * the gpio entries for pwm fan control even when there's no | 962 | * the gpio entries for pwm fan control even when there's no |
@@ -979,11 +998,10 @@ nouveau_hwmon_fini(struct drm_device *dev) | |||
979 | struct nouveau_pm *pm = nouveau_pm(dev); | 998 | struct nouveau_pm *pm = nouveau_pm(dev); |
980 | 999 | ||
981 | if (pm->hwmon) { | 1000 | if (pm->hwmon) { |
982 | sysfs_remove_group(&pm->hwmon->kobj, &hwmon_attrgroup); | 1001 | sysfs_remove_group(&pm->hwmon->kobj, &hwmon_default_attrgroup); |
983 | sysfs_remove_group(&pm->hwmon->kobj, | 1002 | sysfs_remove_group(&pm->hwmon->kobj, &hwmon_temp_attrgroup); |
984 | &hwmon_pwm_fan_attrgroup); | 1003 | sysfs_remove_group(&pm->hwmon->kobj, &hwmon_pwm_fan_attrgroup); |
985 | sysfs_remove_group(&pm->hwmon->kobj, | 1004 | sysfs_remove_group(&pm->hwmon->kobj, &hwmon_fan_rpm_attrgroup); |
986 | &hwmon_fan_rpm_attrgroup); | ||
987 | 1005 | ||
988 | hwmon_device_unregister(pm->hwmon); | 1006 | hwmon_device_unregister(pm->hwmon); |
989 | } | 1007 | } |
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 2db57990f65c..7f0e6c3f37d1 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c | |||
@@ -524,6 +524,8 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
524 | swap_interval <<= 4; | 524 | swap_interval <<= 4; |
525 | if (swap_interval == 0) | 525 | if (swap_interval == 0) |
526 | swap_interval |= 0x100; | 526 | swap_interval |= 0x100; |
527 | if (chan == NULL) | ||
528 | evo_sync(crtc->dev); | ||
527 | 529 | ||
528 | push = evo_wait(sync, 128); | 530 | push = evo_wait(sync, 128); |
529 | if (unlikely(push == NULL)) | 531 | if (unlikely(push == NULL)) |
@@ -586,8 +588,6 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
586 | sync->addr ^= 0x10; | 588 | sync->addr ^= 0x10; |
587 | sync->data++; | 589 | sync->data++; |
588 | FIRE_RING (chan); | 590 | FIRE_RING (chan); |
589 | } else { | ||
590 | evo_sync(crtc->dev); | ||
591 | } | 591 | } |
592 | 592 | ||
593 | /* queue the flip */ | 593 | /* queue the flip */ |
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index d4c633e12863..27769e724b6d 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c | |||
@@ -468,13 +468,19 @@ static void cayman_gpu_init(struct radeon_device *rdev) | |||
468 | (rdev->pdev->device == 0x9907) || | 468 | (rdev->pdev->device == 0x9907) || |
469 | (rdev->pdev->device == 0x9908) || | 469 | (rdev->pdev->device == 0x9908) || |
470 | (rdev->pdev->device == 0x9909) || | 470 | (rdev->pdev->device == 0x9909) || |
471 | (rdev->pdev->device == 0x990B) || | ||
472 | (rdev->pdev->device == 0x990C) || | ||
473 | (rdev->pdev->device == 0x990F) || | ||
471 | (rdev->pdev->device == 0x9910) || | 474 | (rdev->pdev->device == 0x9910) || |
472 | (rdev->pdev->device == 0x9917)) { | 475 | (rdev->pdev->device == 0x9917) || |
476 | (rdev->pdev->device == 0x9999)) { | ||
473 | rdev->config.cayman.max_simds_per_se = 6; | 477 | rdev->config.cayman.max_simds_per_se = 6; |
474 | rdev->config.cayman.max_backends_per_se = 2; | 478 | rdev->config.cayman.max_backends_per_se = 2; |
475 | } else if ((rdev->pdev->device == 0x9903) || | 479 | } else if ((rdev->pdev->device == 0x9903) || |
476 | (rdev->pdev->device == 0x9904) || | 480 | (rdev->pdev->device == 0x9904) || |
477 | (rdev->pdev->device == 0x990A) || | 481 | (rdev->pdev->device == 0x990A) || |
482 | (rdev->pdev->device == 0x990D) || | ||
483 | (rdev->pdev->device == 0x990E) || | ||
478 | (rdev->pdev->device == 0x9913) || | 484 | (rdev->pdev->device == 0x9913) || |
479 | (rdev->pdev->device == 0x9918)) { | 485 | (rdev->pdev->device == 0x9918)) { |
480 | rdev->config.cayman.max_simds_per_se = 4; | 486 | rdev->config.cayman.max_simds_per_se = 4; |
@@ -483,6 +489,9 @@ static void cayman_gpu_init(struct radeon_device *rdev) | |||
483 | (rdev->pdev->device == 0x9990) || | 489 | (rdev->pdev->device == 0x9990) || |
484 | (rdev->pdev->device == 0x9991) || | 490 | (rdev->pdev->device == 0x9991) || |
485 | (rdev->pdev->device == 0x9994) || | 491 | (rdev->pdev->device == 0x9994) || |
492 | (rdev->pdev->device == 0x9995) || | ||
493 | (rdev->pdev->device == 0x9996) || | ||
494 | (rdev->pdev->device == 0x999A) || | ||
486 | (rdev->pdev->device == 0x99A0)) { | 495 | (rdev->pdev->device == 0x99A0)) { |
487 | rdev->config.cayman.max_simds_per_se = 3; | 496 | rdev->config.cayman.max_simds_per_se = 3; |
488 | rdev->config.cayman.max_backends_per_se = 1; | 497 | rdev->config.cayman.max_backends_per_se = 1; |
@@ -616,11 +625,22 @@ static void cayman_gpu_init(struct radeon_device *rdev) | |||
616 | WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config); | 625 | WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config); |
617 | WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config); | 626 | WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config); |
618 | 627 | ||
619 | tmp = gb_addr_config & NUM_PIPES_MASK; | 628 | if ((rdev->config.cayman.max_backends_per_se == 1) && |
620 | tmp = r6xx_remap_render_backend(rdev, tmp, | 629 | (rdev->flags & RADEON_IS_IGP)) { |
621 | rdev->config.cayman.max_backends_per_se * | 630 | if ((disabled_rb_mask & 3) == 1) { |
622 | rdev->config.cayman.max_shader_engines, | 631 | /* RB0 disabled, RB1 enabled */ |
623 | CAYMAN_MAX_BACKENDS, disabled_rb_mask); | 632 | tmp = 0x11111111; |
633 | } else { | ||
634 | /* RB1 disabled, RB0 enabled */ | ||
635 | tmp = 0x00000000; | ||
636 | } | ||
637 | } else { | ||
638 | tmp = gb_addr_config & NUM_PIPES_MASK; | ||
639 | tmp = r6xx_remap_render_backend(rdev, tmp, | ||
640 | rdev->config.cayman.max_backends_per_se * | ||
641 | rdev->config.cayman.max_shader_engines, | ||
642 | CAYMAN_MAX_BACKENDS, disabled_rb_mask); | ||
643 | } | ||
624 | WREG32(GB_BACKEND_MAP, tmp); | 644 | WREG32(GB_BACKEND_MAP, tmp); |
625 | 645 | ||
626 | cgts_tcc_disable = 0xffff0000; | 646 | cgts_tcc_disable = 0xffff0000; |
@@ -1771,6 +1791,7 @@ int cayman_resume(struct radeon_device *rdev) | |||
1771 | int cayman_suspend(struct radeon_device *rdev) | 1791 | int cayman_suspend(struct radeon_device *rdev) |
1772 | { | 1792 | { |
1773 | r600_audio_fini(rdev); | 1793 | r600_audio_fini(rdev); |
1794 | radeon_vm_manager_fini(rdev); | ||
1774 | cayman_cp_enable(rdev, false); | 1795 | cayman_cp_enable(rdev, false); |
1775 | cayman_dma_stop(rdev); | 1796 | cayman_dma_stop(rdev); |
1776 | evergreen_irq_suspend(rdev); | 1797 | evergreen_irq_suspend(rdev); |
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c index bedda9caadd9..6e05a2e75a46 100644 --- a/drivers/gpu/drm/radeon/radeon_benchmark.c +++ b/drivers/gpu/drm/radeon/radeon_benchmark.c | |||
@@ -122,10 +122,7 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size, | |||
122 | goto out_cleanup; | 122 | goto out_cleanup; |
123 | } | 123 | } |
124 | 124 | ||
125 | /* r100 doesn't have dma engine so skip the test */ | 125 | if (rdev->asic->copy.dma) { |
126 | /* also, VRAM-to-VRAM test doesn't make much sense for DMA */ | ||
127 | /* skip it as well if domains are the same */ | ||
128 | if ((rdev->asic->copy.dma) && (sdomain != ddomain)) { | ||
129 | time = radeon_benchmark_do_move(rdev, size, saddr, daddr, | 126 | time = radeon_benchmark_do_move(rdev, size, saddr, daddr, |
130 | RADEON_BENCHMARK_COPY_DMA, n); | 127 | RADEON_BENCHMARK_COPY_DMA, n); |
131 | if (time < 0) | 128 | if (time < 0) |
@@ -135,13 +132,15 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size, | |||
135 | sdomain, ddomain, "dma"); | 132 | sdomain, ddomain, "dma"); |
136 | } | 133 | } |
137 | 134 | ||
138 | time = radeon_benchmark_do_move(rdev, size, saddr, daddr, | 135 | if (rdev->asic->copy.blit) { |
139 | RADEON_BENCHMARK_COPY_BLIT, n); | 136 | time = radeon_benchmark_do_move(rdev, size, saddr, daddr, |
140 | if (time < 0) | 137 | RADEON_BENCHMARK_COPY_BLIT, n); |
141 | goto out_cleanup; | 138 | if (time < 0) |
142 | if (time > 0) | 139 | goto out_cleanup; |
143 | radeon_benchmark_log_results(n, size, time, | 140 | if (time > 0) |
144 | sdomain, ddomain, "blit"); | 141 | radeon_benchmark_log_results(n, size, time, |
142 | sdomain, ddomain, "blit"); | ||
143 | } | ||
145 | 144 | ||
146 | out_cleanup: | 145 | out_cleanup: |
147 | if (sobj) { | 146 | if (sobj) { |
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 9128120da044..bafbe3216952 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c | |||
@@ -4469,6 +4469,7 @@ int si_resume(struct radeon_device *rdev) | |||
4469 | 4469 | ||
4470 | int si_suspend(struct radeon_device *rdev) | 4470 | int si_suspend(struct radeon_device *rdev) |
4471 | { | 4471 | { |
4472 | radeon_vm_manager_fini(rdev); | ||
4472 | si_cp_enable(rdev, false); | 4473 | si_cp_enable(rdev, false); |
4473 | cayman_dma_stop(rdev); | 4474 | cayman_dma_stop(rdev); |
4474 | si_irq_suspend(rdev); | 4475 | si_irq_suspend(rdev); |
diff --git a/drivers/hwmon/lm75.h b/drivers/hwmon/lm75.h index 668ff4721323..5cde94e56f17 100644 --- a/drivers/hwmon/lm75.h +++ b/drivers/hwmon/lm75.h | |||
@@ -25,7 +25,7 @@ | |||
25 | which contains this code, we don't worry about the wasted space. | 25 | which contains this code, we don't worry about the wasted space. |
26 | */ | 26 | */ |
27 | 27 | ||
28 | #include <linux/hwmon.h> | 28 | #include <linux/kernel.h> |
29 | 29 | ||
30 | /* straight from the datasheet */ | 30 | /* straight from the datasheet */ |
31 | #define LM75_TEMP_MIN (-55000) | 31 | #define LM75_TEMP_MIN (-55000) |
diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig index 46cde098c11c..e380c6eef3af 100644 --- a/drivers/i2c/Kconfig +++ b/drivers/i2c/Kconfig | |||
@@ -4,7 +4,6 @@ | |||
4 | 4 | ||
5 | menuconfig I2C | 5 | menuconfig I2C |
6 | tristate "I2C support" | 6 | tristate "I2C support" |
7 | depends on !S390 | ||
8 | select RT_MUTEXES | 7 | select RT_MUTEXES |
9 | ---help--- | 8 | ---help--- |
10 | I2C (pronounce: I-squared-C) is a slow serial bus protocol used in | 9 | I2C (pronounce: I-squared-C) is a slow serial bus protocol used in |
@@ -76,6 +75,7 @@ config I2C_HELPER_AUTO | |||
76 | 75 | ||
77 | config I2C_SMBUS | 76 | config I2C_SMBUS |
78 | tristate "SMBus-specific protocols" if !I2C_HELPER_AUTO | 77 | tristate "SMBus-specific protocols" if !I2C_HELPER_AUTO |
78 | depends on GENERIC_HARDIRQS | ||
79 | help | 79 | help |
80 | Say Y here if you want support for SMBus extensions to the I2C | 80 | Say Y here if you want support for SMBus extensions to the I2C |
81 | specification. At the moment, the only supported extension is | 81 | specification. At the moment, the only supported extension is |
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index a3725de92384..adfee98486b1 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig | |||
@@ -114,7 +114,7 @@ config I2C_I801 | |||
114 | 114 | ||
115 | config I2C_ISCH | 115 | config I2C_ISCH |
116 | tristate "Intel SCH SMBus 1.0" | 116 | tristate "Intel SCH SMBus 1.0" |
117 | depends on PCI | 117 | depends on PCI && GENERIC_HARDIRQS |
118 | select LPC_SCH | 118 | select LPC_SCH |
119 | help | 119 | help |
120 | Say Y here if you want to use SMBus controller on the Intel SCH | 120 | Say Y here if you want to use SMBus controller on the Intel SCH |
@@ -543,6 +543,7 @@ config I2C_NUC900 | |||
543 | 543 | ||
544 | config I2C_OCORES | 544 | config I2C_OCORES |
545 | tristate "OpenCores I2C Controller" | 545 | tristate "OpenCores I2C Controller" |
546 | depends on GENERIC_HARDIRQS | ||
546 | help | 547 | help |
547 | If you say yes to this option, support will be included for the | 548 | If you say yes to this option, support will be included for the |
548 | OpenCores I2C controller. For details see | 549 | OpenCores I2C controller. For details see |
@@ -777,7 +778,7 @@ config I2C_DIOLAN_U2C | |||
777 | 778 | ||
778 | config I2C_PARPORT | 779 | config I2C_PARPORT |
779 | tristate "Parallel port adapter" | 780 | tristate "Parallel port adapter" |
780 | depends on PARPORT | 781 | depends on PARPORT && GENERIC_HARDIRQS |
781 | select I2C_ALGOBIT | 782 | select I2C_ALGOBIT |
782 | select I2C_SMBUS | 783 | select I2C_SMBUS |
783 | help | 784 | help |
@@ -802,6 +803,7 @@ config I2C_PARPORT | |||
802 | 803 | ||
803 | config I2C_PARPORT_LIGHT | 804 | config I2C_PARPORT_LIGHT |
804 | tristate "Parallel port adapter (light)" | 805 | tristate "Parallel port adapter (light)" |
806 | depends on GENERIC_HARDIRQS | ||
805 | select I2C_ALGOBIT | 807 | select I2C_ALGOBIT |
806 | select I2C_SMBUS | 808 | select I2C_SMBUS |
807 | help | 809 | help |
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c index e9205ee8cf94..130f02cc9d94 100644 --- a/drivers/i2c/busses/i2c-ismt.c +++ b/drivers/i2c/busses/i2c-ismt.c | |||
@@ -80,6 +80,7 @@ | |||
80 | /* PCI DIDs for the Intel SMBus Message Transport (SMT) Devices */ | 80 | /* PCI DIDs for the Intel SMBus Message Transport (SMT) Devices */ |
81 | #define PCI_DEVICE_ID_INTEL_S1200_SMT0 0x0c59 | 81 | #define PCI_DEVICE_ID_INTEL_S1200_SMT0 0x0c59 |
82 | #define PCI_DEVICE_ID_INTEL_S1200_SMT1 0x0c5a | 82 | #define PCI_DEVICE_ID_INTEL_S1200_SMT1 0x0c5a |
83 | #define PCI_DEVICE_ID_INTEL_AVOTON_SMT 0x1f15 | ||
83 | 84 | ||
84 | #define ISMT_DESC_ENTRIES 32 /* number of descriptor entries */ | 85 | #define ISMT_DESC_ENTRIES 32 /* number of descriptor entries */ |
85 | #define ISMT_MAX_RETRIES 3 /* number of SMBus retries to attempt */ | 86 | #define ISMT_MAX_RETRIES 3 /* number of SMBus retries to attempt */ |
@@ -185,6 +186,7 @@ struct ismt_priv { | |||
185 | static const DEFINE_PCI_DEVICE_TABLE(ismt_ids) = { | 186 | static const DEFINE_PCI_DEVICE_TABLE(ismt_ids) = { |
186 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT0) }, | 187 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT0) }, |
187 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT1) }, | 188 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT1) }, |
189 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_AVOTON_SMT) }, | ||
188 | { 0, } | 190 | { 0, } |
189 | }; | 191 | }; |
190 | 192 | ||
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c index 36704e3ab3fa..b714776b6ddd 100644 --- a/drivers/i2c/busses/i2c-tegra.c +++ b/drivers/i2c/busses/i2c-tegra.c | |||
@@ -411,7 +411,11 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev) | |||
411 | int clk_multiplier = I2C_CLK_MULTIPLIER_STD_FAST_MODE; | 411 | int clk_multiplier = I2C_CLK_MULTIPLIER_STD_FAST_MODE; |
412 | u32 clk_divisor; | 412 | u32 clk_divisor; |
413 | 413 | ||
414 | tegra_i2c_clock_enable(i2c_dev); | 414 | err = tegra_i2c_clock_enable(i2c_dev); |
415 | if (err < 0) { | ||
416 | dev_err(i2c_dev->dev, "Clock enable failed %d\n", err); | ||
417 | return err; | ||
418 | } | ||
415 | 419 | ||
416 | tegra_periph_reset_assert(i2c_dev->div_clk); | 420 | tegra_periph_reset_assert(i2c_dev->div_clk); |
417 | udelay(2); | 421 | udelay(2); |
@@ -628,7 +632,12 @@ static int tegra_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], | |||
628 | if (i2c_dev->is_suspended) | 632 | if (i2c_dev->is_suspended) |
629 | return -EBUSY; | 633 | return -EBUSY; |
630 | 634 | ||
631 | tegra_i2c_clock_enable(i2c_dev); | 635 | ret = tegra_i2c_clock_enable(i2c_dev); |
636 | if (ret < 0) { | ||
637 | dev_err(i2c_dev->dev, "Clock enable failed %d\n", ret); | ||
638 | return ret; | ||
639 | } | ||
640 | |||
632 | for (i = 0; i < num; i++) { | 641 | for (i = 0; i < num; i++) { |
633 | enum msg_end_type end_type = MSG_END_STOP; | 642 | enum msg_end_type end_type = MSG_END_STOP; |
634 | if (i < (num - 1)) { | 643 | if (i < (num - 1)) { |
diff --git a/drivers/i2c/muxes/i2c-mux-pca9541.c b/drivers/i2c/muxes/i2c-mux-pca9541.c index f3b8f9a6a89b..966a18a5d12d 100644 --- a/drivers/i2c/muxes/i2c-mux-pca9541.c +++ b/drivers/i2c/muxes/i2c-mux-pca9541.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Copyright (c) 2010 Ericsson AB. | 4 | * Copyright (c) 2010 Ericsson AB. |
5 | * | 5 | * |
6 | * Author: Guenter Roeck <guenter.roeck@ericsson.com> | 6 | * Author: Guenter Roeck <linux@roeck-us.net> |
7 | * | 7 | * |
8 | * Derived from: | 8 | * Derived from: |
9 | * pca954x.c | 9 | * pca954x.c |
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 565bfb161c1a..a3fde52840ca 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c | |||
@@ -1575,6 +1575,12 @@ static int c4iw_reconnect(struct c4iw_ep *ep) | |||
1575 | 1575 | ||
1576 | neigh = dst_neigh_lookup(ep->dst, | 1576 | neigh = dst_neigh_lookup(ep->dst, |
1577 | &ep->com.cm_id->remote_addr.sin_addr.s_addr); | 1577 | &ep->com.cm_id->remote_addr.sin_addr.s_addr); |
1578 | if (!neigh) { | ||
1579 | pr_err("%s - cannot alloc neigh.\n", __func__); | ||
1580 | err = -ENOMEM; | ||
1581 | goto fail4; | ||
1582 | } | ||
1583 | |||
1578 | /* get a l2t entry */ | 1584 | /* get a l2t entry */ |
1579 | if (neigh->dev->flags & IFF_LOOPBACK) { | 1585 | if (neigh->dev->flags & IFF_LOOPBACK) { |
1580 | PDBG("%s LOOPBACK\n", __func__); | 1586 | PDBG("%s LOOPBACK\n", __func__); |
@@ -3053,6 +3059,12 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb) | |||
3053 | dst = &rt->dst; | 3059 | dst = &rt->dst; |
3054 | neigh = dst_neigh_lookup_skb(dst, skb); | 3060 | neigh = dst_neigh_lookup_skb(dst, skb); |
3055 | 3061 | ||
3062 | if (!neigh) { | ||
3063 | pr_err("%s - failed to allocate neigh!\n", | ||
3064 | __func__); | ||
3065 | goto free_dst; | ||
3066 | } | ||
3067 | |||
3056 | if (neigh->dev->flags & IFF_LOOPBACK) { | 3068 | if (neigh->dev->flags & IFF_LOOPBACK) { |
3057 | pdev = ip_dev_find(&init_net, iph->daddr); | 3069 | pdev = ip_dev_find(&init_net, iph->daddr); |
3058 | e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh, | 3070 | e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh, |
diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c index 7cd74e29cbc8..9135606c8649 100644 --- a/drivers/input/joystick/analog.c +++ b/drivers/input/joystick/analog.c | |||
@@ -158,14 +158,10 @@ static unsigned int get_time_pit(void) | |||
158 | #define GET_TIME(x) rdtscl(x) | 158 | #define GET_TIME(x) rdtscl(x) |
159 | #define DELTA(x,y) ((y)-(x)) | 159 | #define DELTA(x,y) ((y)-(x)) |
160 | #define TIME_NAME "TSC" | 160 | #define TIME_NAME "TSC" |
161 | #elif defined(__alpha__) | 161 | #elif defined(__alpha__) || defined(CONFIG_MN10300) || defined(CONFIG_ARM) || defined(CONFIG_TILE) |
162 | #define GET_TIME(x) do { x = get_cycles(); } while (0) | 162 | #define GET_TIME(x) do { x = get_cycles(); } while (0) |
163 | #define DELTA(x,y) ((y)-(x)) | 163 | #define DELTA(x,y) ((y)-(x)) |
164 | #define TIME_NAME "PCC" | 164 | #define TIME_NAME "get_cycles" |
165 | #elif defined(CONFIG_MN10300) || defined(CONFIG_TILE) | ||
166 | #define GET_TIME(x) do { x = get_cycles(); } while (0) | ||
167 | #define DELTA(x, y) ((x) - (y)) | ||
168 | #define TIME_NAME "TSC" | ||
169 | #else | 165 | #else |
170 | #define FAKE_TIME | 166 | #define FAKE_TIME |
171 | static unsigned long analog_faketime = 0; | 167 | static unsigned long analog_faketime = 0; |
diff --git a/drivers/isdn/hisax/Kconfig b/drivers/isdn/hisax/Kconfig index 5313c9ea44dc..d9edcc94c2a8 100644 --- a/drivers/isdn/hisax/Kconfig +++ b/drivers/isdn/hisax/Kconfig | |||
@@ -237,7 +237,8 @@ config HISAX_MIC | |||
237 | 237 | ||
238 | config HISAX_NETJET | 238 | config HISAX_NETJET |
239 | bool "NETjet card" | 239 | bool "NETjet card" |
240 | depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV || (XTENSA && !CPU_LITTLE_ENDIAN))) | 240 | depends on PCI && (BROKEN || !(PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV || (XTENSA && !CPU_LITTLE_ENDIAN))) |
241 | depends on VIRT_TO_BUS | ||
241 | help | 242 | help |
242 | This enables HiSax support for the NetJet from Traverse | 243 | This enables HiSax support for the NetJet from Traverse |
243 | Technologies. | 244 | Technologies. |
@@ -248,7 +249,8 @@ config HISAX_NETJET | |||
248 | 249 | ||
249 | config HISAX_NETJET_U | 250 | config HISAX_NETJET_U |
250 | bool "NETspider U card" | 251 | bool "NETspider U card" |
251 | depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV || (XTENSA && !CPU_LITTLE_ENDIAN))) | 252 | depends on PCI && (BROKEN || !(PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV || (XTENSA && !CPU_LITTLE_ENDIAN))) |
253 | depends on VIRT_TO_BUS | ||
252 | help | 254 | help |
253 | This enables HiSax support for the Netspider U interface ISDN card | 255 | This enables HiSax support for the Netspider U interface ISDN card |
254 | from Traverse Technologies. | 256 | from Traverse Technologies. |
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index 3c955e10a618..c6083132c4b8 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c | |||
@@ -1025,6 +1025,8 @@ void dm_bufio_prefetch(struct dm_bufio_client *c, | |||
1025 | { | 1025 | { |
1026 | struct blk_plug plug; | 1026 | struct blk_plug plug; |
1027 | 1027 | ||
1028 | BUG_ON(dm_bufio_in_request()); | ||
1029 | |||
1028 | blk_start_plug(&plug); | 1030 | blk_start_plug(&plug); |
1029 | dm_bufio_lock(c); | 1031 | dm_bufio_lock(c); |
1030 | 1032 | ||
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c index fbd3625f2748..83e995fece88 100644 --- a/drivers/md/dm-cache-metadata.c +++ b/drivers/md/dm-cache-metadata.c | |||
@@ -83,6 +83,8 @@ struct cache_disk_superblock { | |||
83 | __le32 read_misses; | 83 | __le32 read_misses; |
84 | __le32 write_hits; | 84 | __le32 write_hits; |
85 | __le32 write_misses; | 85 | __le32 write_misses; |
86 | |||
87 | __le32 policy_version[CACHE_POLICY_VERSION_SIZE]; | ||
86 | } __packed; | 88 | } __packed; |
87 | 89 | ||
88 | struct dm_cache_metadata { | 90 | struct dm_cache_metadata { |
@@ -109,6 +111,7 @@ struct dm_cache_metadata { | |||
109 | bool clean_when_opened:1; | 111 | bool clean_when_opened:1; |
110 | 112 | ||
111 | char policy_name[CACHE_POLICY_NAME_SIZE]; | 113 | char policy_name[CACHE_POLICY_NAME_SIZE]; |
114 | unsigned policy_version[CACHE_POLICY_VERSION_SIZE]; | ||
112 | size_t policy_hint_size; | 115 | size_t policy_hint_size; |
113 | struct dm_cache_statistics stats; | 116 | struct dm_cache_statistics stats; |
114 | }; | 117 | }; |
@@ -268,7 +271,8 @@ static int __write_initial_superblock(struct dm_cache_metadata *cmd) | |||
268 | memset(disk_super->uuid, 0, sizeof(disk_super->uuid)); | 271 | memset(disk_super->uuid, 0, sizeof(disk_super->uuid)); |
269 | disk_super->magic = cpu_to_le64(CACHE_SUPERBLOCK_MAGIC); | 272 | disk_super->magic = cpu_to_le64(CACHE_SUPERBLOCK_MAGIC); |
270 | disk_super->version = cpu_to_le32(CACHE_VERSION); | 273 | disk_super->version = cpu_to_le32(CACHE_VERSION); |
271 | memset(disk_super->policy_name, 0, CACHE_POLICY_NAME_SIZE); | 274 | memset(disk_super->policy_name, 0, sizeof(disk_super->policy_name)); |
275 | memset(disk_super->policy_version, 0, sizeof(disk_super->policy_version)); | ||
272 | disk_super->policy_hint_size = 0; | 276 | disk_super->policy_hint_size = 0; |
273 | 277 | ||
274 | r = dm_sm_copy_root(cmd->metadata_sm, &disk_super->metadata_space_map_root, | 278 | r = dm_sm_copy_root(cmd->metadata_sm, &disk_super->metadata_space_map_root, |
@@ -284,7 +288,6 @@ static int __write_initial_superblock(struct dm_cache_metadata *cmd) | |||
284 | disk_super->metadata_block_size = cpu_to_le32(DM_CACHE_METADATA_BLOCK_SIZE >> SECTOR_SHIFT); | 288 | disk_super->metadata_block_size = cpu_to_le32(DM_CACHE_METADATA_BLOCK_SIZE >> SECTOR_SHIFT); |
285 | disk_super->data_block_size = cpu_to_le32(cmd->data_block_size); | 289 | disk_super->data_block_size = cpu_to_le32(cmd->data_block_size); |
286 | disk_super->cache_blocks = cpu_to_le32(0); | 290 | disk_super->cache_blocks = cpu_to_le32(0); |
287 | memset(disk_super->policy_name, 0, sizeof(disk_super->policy_name)); | ||
288 | 291 | ||
289 | disk_super->read_hits = cpu_to_le32(0); | 292 | disk_super->read_hits = cpu_to_le32(0); |
290 | disk_super->read_misses = cpu_to_le32(0); | 293 | disk_super->read_misses = cpu_to_le32(0); |
@@ -478,6 +481,9 @@ static void read_superblock_fields(struct dm_cache_metadata *cmd, | |||
478 | cmd->data_block_size = le32_to_cpu(disk_super->data_block_size); | 481 | cmd->data_block_size = le32_to_cpu(disk_super->data_block_size); |
479 | cmd->cache_blocks = to_cblock(le32_to_cpu(disk_super->cache_blocks)); | 482 | cmd->cache_blocks = to_cblock(le32_to_cpu(disk_super->cache_blocks)); |
480 | strncpy(cmd->policy_name, disk_super->policy_name, sizeof(cmd->policy_name)); | 483 | strncpy(cmd->policy_name, disk_super->policy_name, sizeof(cmd->policy_name)); |
484 | cmd->policy_version[0] = le32_to_cpu(disk_super->policy_version[0]); | ||
485 | cmd->policy_version[1] = le32_to_cpu(disk_super->policy_version[1]); | ||
486 | cmd->policy_version[2] = le32_to_cpu(disk_super->policy_version[2]); | ||
481 | cmd->policy_hint_size = le32_to_cpu(disk_super->policy_hint_size); | 487 | cmd->policy_hint_size = le32_to_cpu(disk_super->policy_hint_size); |
482 | 488 | ||
483 | cmd->stats.read_hits = le32_to_cpu(disk_super->read_hits); | 489 | cmd->stats.read_hits = le32_to_cpu(disk_super->read_hits); |
@@ -572,6 +578,9 @@ static int __commit_transaction(struct dm_cache_metadata *cmd, | |||
572 | disk_super->discard_nr_blocks = cpu_to_le64(from_dblock(cmd->discard_nr_blocks)); | 578 | disk_super->discard_nr_blocks = cpu_to_le64(from_dblock(cmd->discard_nr_blocks)); |
573 | disk_super->cache_blocks = cpu_to_le32(from_cblock(cmd->cache_blocks)); | 579 | disk_super->cache_blocks = cpu_to_le32(from_cblock(cmd->cache_blocks)); |
574 | strncpy(disk_super->policy_name, cmd->policy_name, sizeof(disk_super->policy_name)); | 580 | strncpy(disk_super->policy_name, cmd->policy_name, sizeof(disk_super->policy_name)); |
581 | disk_super->policy_version[0] = cpu_to_le32(cmd->policy_version[0]); | ||
582 | disk_super->policy_version[1] = cpu_to_le32(cmd->policy_version[1]); | ||
583 | disk_super->policy_version[2] = cpu_to_le32(cmd->policy_version[2]); | ||
575 | 584 | ||
576 | disk_super->read_hits = cpu_to_le32(cmd->stats.read_hits); | 585 | disk_super->read_hits = cpu_to_le32(cmd->stats.read_hits); |
577 | disk_super->read_misses = cpu_to_le32(cmd->stats.read_misses); | 586 | disk_super->read_misses = cpu_to_le32(cmd->stats.read_misses); |
@@ -854,18 +863,43 @@ struct thunk { | |||
854 | bool hints_valid; | 863 | bool hints_valid; |
855 | }; | 864 | }; |
856 | 865 | ||
866 | static bool policy_unchanged(struct dm_cache_metadata *cmd, | ||
867 | struct dm_cache_policy *policy) | ||
868 | { | ||
869 | const char *policy_name = dm_cache_policy_get_name(policy); | ||
870 | const unsigned *policy_version = dm_cache_policy_get_version(policy); | ||
871 | size_t policy_hint_size = dm_cache_policy_get_hint_size(policy); | ||
872 | |||
873 | /* | ||
874 | * Ensure policy names match. | ||
875 | */ | ||
876 | if (strncmp(cmd->policy_name, policy_name, sizeof(cmd->policy_name))) | ||
877 | return false; | ||
878 | |||
879 | /* | ||
880 | * Ensure policy major versions match. | ||
881 | */ | ||
882 | if (cmd->policy_version[0] != policy_version[0]) | ||
883 | return false; | ||
884 | |||
885 | /* | ||
886 | * Ensure policy hint sizes match. | ||
887 | */ | ||
888 | if (cmd->policy_hint_size != policy_hint_size) | ||
889 | return false; | ||
890 | |||
891 | return true; | ||
892 | } | ||
893 | |||
857 | static bool hints_array_initialized(struct dm_cache_metadata *cmd) | 894 | static bool hints_array_initialized(struct dm_cache_metadata *cmd) |
858 | { | 895 | { |
859 | return cmd->hint_root && cmd->policy_hint_size; | 896 | return cmd->hint_root && cmd->policy_hint_size; |
860 | } | 897 | } |
861 | 898 | ||
862 | static bool hints_array_available(struct dm_cache_metadata *cmd, | 899 | static bool hints_array_available(struct dm_cache_metadata *cmd, |
863 | const char *policy_name) | 900 | struct dm_cache_policy *policy) |
864 | { | 901 | { |
865 | bool policy_names_match = !strncmp(cmd->policy_name, policy_name, | 902 | return cmd->clean_when_opened && policy_unchanged(cmd, policy) && |
866 | sizeof(cmd->policy_name)); | ||
867 | |||
868 | return cmd->clean_when_opened && policy_names_match && | ||
869 | hints_array_initialized(cmd); | 903 | hints_array_initialized(cmd); |
870 | } | 904 | } |
871 | 905 | ||
@@ -899,7 +933,8 @@ static int __load_mapping(void *context, uint64_t cblock, void *leaf) | |||
899 | return r; | 933 | return r; |
900 | } | 934 | } |
901 | 935 | ||
902 | static int __load_mappings(struct dm_cache_metadata *cmd, const char *policy_name, | 936 | static int __load_mappings(struct dm_cache_metadata *cmd, |
937 | struct dm_cache_policy *policy, | ||
903 | load_mapping_fn fn, void *context) | 938 | load_mapping_fn fn, void *context) |
904 | { | 939 | { |
905 | struct thunk thunk; | 940 | struct thunk thunk; |
@@ -909,18 +944,19 @@ static int __load_mappings(struct dm_cache_metadata *cmd, const char *policy_nam | |||
909 | 944 | ||
910 | thunk.cmd = cmd; | 945 | thunk.cmd = cmd; |
911 | thunk.respect_dirty_flags = cmd->clean_when_opened; | 946 | thunk.respect_dirty_flags = cmd->clean_when_opened; |
912 | thunk.hints_valid = hints_array_available(cmd, policy_name); | 947 | thunk.hints_valid = hints_array_available(cmd, policy); |
913 | 948 | ||
914 | return dm_array_walk(&cmd->info, cmd->root, __load_mapping, &thunk); | 949 | return dm_array_walk(&cmd->info, cmd->root, __load_mapping, &thunk); |
915 | } | 950 | } |
916 | 951 | ||
917 | int dm_cache_load_mappings(struct dm_cache_metadata *cmd, const char *policy_name, | 952 | int dm_cache_load_mappings(struct dm_cache_metadata *cmd, |
953 | struct dm_cache_policy *policy, | ||
918 | load_mapping_fn fn, void *context) | 954 | load_mapping_fn fn, void *context) |
919 | { | 955 | { |
920 | int r; | 956 | int r; |
921 | 957 | ||
922 | down_read(&cmd->root_lock); | 958 | down_read(&cmd->root_lock); |
923 | r = __load_mappings(cmd, policy_name, fn, context); | 959 | r = __load_mappings(cmd, policy, fn, context); |
924 | up_read(&cmd->root_lock); | 960 | up_read(&cmd->root_lock); |
925 | 961 | ||
926 | return r; | 962 | return r; |
@@ -979,7 +1015,7 @@ static int __dirty(struct dm_cache_metadata *cmd, dm_cblock_t cblock, bool dirty | |||
979 | /* nothing to be done */ | 1015 | /* nothing to be done */ |
980 | return 0; | 1016 | return 0; |
981 | 1017 | ||
982 | value = pack_value(oblock, flags | (dirty ? M_DIRTY : 0)); | 1018 | value = pack_value(oblock, (flags & ~M_DIRTY) | (dirty ? M_DIRTY : 0)); |
983 | __dm_bless_for_disk(&value); | 1019 | __dm_bless_for_disk(&value); |
984 | 1020 | ||
985 | r = dm_array_set_value(&cmd->info, cmd->root, from_cblock(cblock), | 1021 | r = dm_array_set_value(&cmd->info, cmd->root, from_cblock(cblock), |
@@ -1070,13 +1106,15 @@ static int begin_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *po | |||
1070 | __le32 value; | 1106 | __le32 value; |
1071 | size_t hint_size; | 1107 | size_t hint_size; |
1072 | const char *policy_name = dm_cache_policy_get_name(policy); | 1108 | const char *policy_name = dm_cache_policy_get_name(policy); |
1109 | const unsigned *policy_version = dm_cache_policy_get_version(policy); | ||
1073 | 1110 | ||
1074 | if (!policy_name[0] || | 1111 | if (!policy_name[0] || |
1075 | (strlen(policy_name) > sizeof(cmd->policy_name) - 1)) | 1112 | (strlen(policy_name) > sizeof(cmd->policy_name) - 1)) |
1076 | return -EINVAL; | 1113 | return -EINVAL; |
1077 | 1114 | ||
1078 | if (strcmp(cmd->policy_name, policy_name)) { | 1115 | if (!policy_unchanged(cmd, policy)) { |
1079 | strncpy(cmd->policy_name, policy_name, sizeof(cmd->policy_name)); | 1116 | strncpy(cmd->policy_name, policy_name, sizeof(cmd->policy_name)); |
1117 | memcpy(cmd->policy_version, policy_version, sizeof(cmd->policy_version)); | ||
1080 | 1118 | ||
1081 | hint_size = dm_cache_policy_get_hint_size(policy); | 1119 | hint_size = dm_cache_policy_get_hint_size(policy); |
1082 | if (!hint_size) | 1120 | if (!hint_size) |
diff --git a/drivers/md/dm-cache-metadata.h b/drivers/md/dm-cache-metadata.h index 135864ea0eee..f45cef21f3d0 100644 --- a/drivers/md/dm-cache-metadata.h +++ b/drivers/md/dm-cache-metadata.h | |||
@@ -89,7 +89,7 @@ typedef int (*load_mapping_fn)(void *context, dm_oblock_t oblock, | |||
89 | dm_cblock_t cblock, bool dirty, | 89 | dm_cblock_t cblock, bool dirty, |
90 | uint32_t hint, bool hint_valid); | 90 | uint32_t hint, bool hint_valid); |
91 | int dm_cache_load_mappings(struct dm_cache_metadata *cmd, | 91 | int dm_cache_load_mappings(struct dm_cache_metadata *cmd, |
92 | const char *policy_name, | 92 | struct dm_cache_policy *policy, |
93 | load_mapping_fn fn, | 93 | load_mapping_fn fn, |
94 | void *context); | 94 | void *context); |
95 | 95 | ||
diff --git a/drivers/md/dm-cache-policy-cleaner.c b/drivers/md/dm-cache-policy-cleaner.c index cc05d70b3cb8..b04d1f904d07 100644 --- a/drivers/md/dm-cache-policy-cleaner.c +++ b/drivers/md/dm-cache-policy-cleaner.c | |||
@@ -17,7 +17,6 @@ | |||
17 | /*----------------------------------------------------------------*/ | 17 | /*----------------------------------------------------------------*/ |
18 | 18 | ||
19 | #define DM_MSG_PREFIX "cache cleaner" | 19 | #define DM_MSG_PREFIX "cache cleaner" |
20 | #define CLEANER_VERSION "1.0.0" | ||
21 | 20 | ||
22 | /* Cache entry struct. */ | 21 | /* Cache entry struct. */ |
23 | struct wb_cache_entry { | 22 | struct wb_cache_entry { |
@@ -434,6 +433,7 @@ static struct dm_cache_policy *wb_create(dm_cblock_t cache_size, | |||
434 | 433 | ||
435 | static struct dm_cache_policy_type wb_policy_type = { | 434 | static struct dm_cache_policy_type wb_policy_type = { |
436 | .name = "cleaner", | 435 | .name = "cleaner", |
436 | .version = {1, 0, 0}, | ||
437 | .hint_size = 0, | 437 | .hint_size = 0, |
438 | .owner = THIS_MODULE, | 438 | .owner = THIS_MODULE, |
439 | .create = wb_create | 439 | .create = wb_create |
@@ -446,7 +446,10 @@ static int __init wb_init(void) | |||
446 | if (r < 0) | 446 | if (r < 0) |
447 | DMERR("register failed %d", r); | 447 | DMERR("register failed %d", r); |
448 | else | 448 | else |
449 | DMINFO("version " CLEANER_VERSION " loaded"); | 449 | DMINFO("version %u.%u.%u loaded", |
450 | wb_policy_type.version[0], | ||
451 | wb_policy_type.version[1], | ||
452 | wb_policy_type.version[2]); | ||
450 | 453 | ||
451 | return r; | 454 | return r; |
452 | } | 455 | } |
diff --git a/drivers/md/dm-cache-policy-internal.h b/drivers/md/dm-cache-policy-internal.h index 52a75beeced5..0928abdc49f0 100644 --- a/drivers/md/dm-cache-policy-internal.h +++ b/drivers/md/dm-cache-policy-internal.h | |||
@@ -117,6 +117,8 @@ void dm_cache_policy_destroy(struct dm_cache_policy *p); | |||
117 | */ | 117 | */ |
118 | const char *dm_cache_policy_get_name(struct dm_cache_policy *p); | 118 | const char *dm_cache_policy_get_name(struct dm_cache_policy *p); |
119 | 119 | ||
120 | const unsigned *dm_cache_policy_get_version(struct dm_cache_policy *p); | ||
121 | |||
120 | size_t dm_cache_policy_get_hint_size(struct dm_cache_policy *p); | 122 | size_t dm_cache_policy_get_hint_size(struct dm_cache_policy *p); |
121 | 123 | ||
122 | /*----------------------------------------------------------------*/ | 124 | /*----------------------------------------------------------------*/ |
diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c index 964153255076..dc112a7137fe 100644 --- a/drivers/md/dm-cache-policy-mq.c +++ b/drivers/md/dm-cache-policy-mq.c | |||
@@ -14,7 +14,6 @@ | |||
14 | #include <linux/vmalloc.h> | 14 | #include <linux/vmalloc.h> |
15 | 15 | ||
16 | #define DM_MSG_PREFIX "cache-policy-mq" | 16 | #define DM_MSG_PREFIX "cache-policy-mq" |
17 | #define MQ_VERSION "1.0.0" | ||
18 | 17 | ||
19 | static struct kmem_cache *mq_entry_cache; | 18 | static struct kmem_cache *mq_entry_cache; |
20 | 19 | ||
@@ -1133,6 +1132,7 @@ bad_cache_alloc: | |||
1133 | 1132 | ||
1134 | static struct dm_cache_policy_type mq_policy_type = { | 1133 | static struct dm_cache_policy_type mq_policy_type = { |
1135 | .name = "mq", | 1134 | .name = "mq", |
1135 | .version = {1, 0, 0}, | ||
1136 | .hint_size = 4, | 1136 | .hint_size = 4, |
1137 | .owner = THIS_MODULE, | 1137 | .owner = THIS_MODULE, |
1138 | .create = mq_create | 1138 | .create = mq_create |
@@ -1140,6 +1140,7 @@ static struct dm_cache_policy_type mq_policy_type = { | |||
1140 | 1140 | ||
1141 | static struct dm_cache_policy_type default_policy_type = { | 1141 | static struct dm_cache_policy_type default_policy_type = { |
1142 | .name = "default", | 1142 | .name = "default", |
1143 | .version = {1, 0, 0}, | ||
1143 | .hint_size = 4, | 1144 | .hint_size = 4, |
1144 | .owner = THIS_MODULE, | 1145 | .owner = THIS_MODULE, |
1145 | .create = mq_create | 1146 | .create = mq_create |
@@ -1164,7 +1165,10 @@ static int __init mq_init(void) | |||
1164 | 1165 | ||
1165 | r = dm_cache_policy_register(&default_policy_type); | 1166 | r = dm_cache_policy_register(&default_policy_type); |
1166 | if (!r) { | 1167 | if (!r) { |
1167 | DMINFO("version " MQ_VERSION " loaded"); | 1168 | DMINFO("version %u.%u.%u loaded", |
1169 | mq_policy_type.version[0], | ||
1170 | mq_policy_type.version[1], | ||
1171 | mq_policy_type.version[2]); | ||
1168 | return 0; | 1172 | return 0; |
1169 | } | 1173 | } |
1170 | 1174 | ||
diff --git a/drivers/md/dm-cache-policy.c b/drivers/md/dm-cache-policy.c index 2cbf5fdaac52..21c03c570c06 100644 --- a/drivers/md/dm-cache-policy.c +++ b/drivers/md/dm-cache-policy.c | |||
@@ -150,6 +150,14 @@ const char *dm_cache_policy_get_name(struct dm_cache_policy *p) | |||
150 | } | 150 | } |
151 | EXPORT_SYMBOL_GPL(dm_cache_policy_get_name); | 151 | EXPORT_SYMBOL_GPL(dm_cache_policy_get_name); |
152 | 152 | ||
153 | const unsigned *dm_cache_policy_get_version(struct dm_cache_policy *p) | ||
154 | { | ||
155 | struct dm_cache_policy_type *t = p->private; | ||
156 | |||
157 | return t->version; | ||
158 | } | ||
159 | EXPORT_SYMBOL_GPL(dm_cache_policy_get_version); | ||
160 | |||
153 | size_t dm_cache_policy_get_hint_size(struct dm_cache_policy *p) | 161 | size_t dm_cache_policy_get_hint_size(struct dm_cache_policy *p) |
154 | { | 162 | { |
155 | struct dm_cache_policy_type *t = p->private; | 163 | struct dm_cache_policy_type *t = p->private; |
diff --git a/drivers/md/dm-cache-policy.h b/drivers/md/dm-cache-policy.h index f0f51b260544..558bdfdabf5f 100644 --- a/drivers/md/dm-cache-policy.h +++ b/drivers/md/dm-cache-policy.h | |||
@@ -196,6 +196,7 @@ struct dm_cache_policy { | |||
196 | * We maintain a little register of the different policy types. | 196 | * We maintain a little register of the different policy types. |
197 | */ | 197 | */ |
198 | #define CACHE_POLICY_NAME_SIZE 16 | 198 | #define CACHE_POLICY_NAME_SIZE 16 |
199 | #define CACHE_POLICY_VERSION_SIZE 3 | ||
199 | 200 | ||
200 | struct dm_cache_policy_type { | 201 | struct dm_cache_policy_type { |
201 | /* For use by the register code only. */ | 202 | /* For use by the register code only. */ |
@@ -206,6 +207,7 @@ struct dm_cache_policy_type { | |||
206 | * what gets passed on the target line to select your policy. | 207 | * what gets passed on the target line to select your policy. |
207 | */ | 208 | */ |
208 | char name[CACHE_POLICY_NAME_SIZE]; | 209 | char name[CACHE_POLICY_NAME_SIZE]; |
210 | unsigned version[CACHE_POLICY_VERSION_SIZE]; | ||
209 | 211 | ||
210 | /* | 212 | /* |
211 | * Policies may store a hint for each each cache block. | 213 | * Policies may store a hint for each each cache block. |
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 0f4e84b15c30..66120bd46d15 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c | |||
@@ -142,6 +142,7 @@ struct cache { | |||
142 | spinlock_t lock; | 142 | spinlock_t lock; |
143 | struct bio_list deferred_bios; | 143 | struct bio_list deferred_bios; |
144 | struct bio_list deferred_flush_bios; | 144 | struct bio_list deferred_flush_bios; |
145 | struct bio_list deferred_writethrough_bios; | ||
145 | struct list_head quiesced_migrations; | 146 | struct list_head quiesced_migrations; |
146 | struct list_head completed_migrations; | 147 | struct list_head completed_migrations; |
147 | struct list_head need_commit_migrations; | 148 | struct list_head need_commit_migrations; |
@@ -158,7 +159,7 @@ struct cache { | |||
158 | /* | 159 | /* |
159 | * origin_blocks entries, discarded if set. | 160 | * origin_blocks entries, discarded if set. |
160 | */ | 161 | */ |
161 | sector_t discard_block_size; /* a power of 2 times sectors per block */ | 162 | uint32_t discard_block_size; /* a power of 2 times sectors per block */ |
162 | dm_dblock_t discard_nr_blocks; | 163 | dm_dblock_t discard_nr_blocks; |
163 | unsigned long *discard_bitset; | 164 | unsigned long *discard_bitset; |
164 | 165 | ||
@@ -199,6 +200,11 @@ struct per_bio_data { | |||
199 | bool tick:1; | 200 | bool tick:1; |
200 | unsigned req_nr:2; | 201 | unsigned req_nr:2; |
201 | struct dm_deferred_entry *all_io_entry; | 202 | struct dm_deferred_entry *all_io_entry; |
203 | |||
204 | /* writethrough fields */ | ||
205 | struct cache *cache; | ||
206 | dm_cblock_t cblock; | ||
207 | bio_end_io_t *saved_bi_end_io; | ||
202 | }; | 208 | }; |
203 | 209 | ||
204 | struct dm_cache_migration { | 210 | struct dm_cache_migration { |
@@ -412,17 +418,24 @@ static bool block_size_is_power_of_two(struct cache *cache) | |||
412 | return cache->sectors_per_block_shift >= 0; | 418 | return cache->sectors_per_block_shift >= 0; |
413 | } | 419 | } |
414 | 420 | ||
421 | static dm_block_t block_div(dm_block_t b, uint32_t n) | ||
422 | { | ||
423 | do_div(b, n); | ||
424 | |||
425 | return b; | ||
426 | } | ||
427 | |||
415 | static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock) | 428 | static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock) |
416 | { | 429 | { |
417 | sector_t discard_blocks = cache->discard_block_size; | 430 | uint32_t discard_blocks = cache->discard_block_size; |
418 | dm_block_t b = from_oblock(oblock); | 431 | dm_block_t b = from_oblock(oblock); |
419 | 432 | ||
420 | if (!block_size_is_power_of_two(cache)) | 433 | if (!block_size_is_power_of_two(cache)) |
421 | (void) sector_div(discard_blocks, cache->sectors_per_block); | 434 | discard_blocks = discard_blocks / cache->sectors_per_block; |
422 | else | 435 | else |
423 | discard_blocks >>= cache->sectors_per_block_shift; | 436 | discard_blocks >>= cache->sectors_per_block_shift; |
424 | 437 | ||
425 | (void) sector_div(b, discard_blocks); | 438 | b = block_div(b, discard_blocks); |
426 | 439 | ||
427 | return to_dblock(b); | 440 | return to_dblock(b); |
428 | } | 441 | } |
@@ -609,6 +622,56 @@ static void issue(struct cache *cache, struct bio *bio) | |||
609 | spin_unlock_irqrestore(&cache->lock, flags); | 622 | spin_unlock_irqrestore(&cache->lock, flags); |
610 | } | 623 | } |
611 | 624 | ||
625 | static void defer_writethrough_bio(struct cache *cache, struct bio *bio) | ||
626 | { | ||
627 | unsigned long flags; | ||
628 | |||
629 | spin_lock_irqsave(&cache->lock, flags); | ||
630 | bio_list_add(&cache->deferred_writethrough_bios, bio); | ||
631 | spin_unlock_irqrestore(&cache->lock, flags); | ||
632 | |||
633 | wake_worker(cache); | ||
634 | } | ||
635 | |||
636 | static void writethrough_endio(struct bio *bio, int err) | ||
637 | { | ||
638 | struct per_bio_data *pb = get_per_bio_data(bio); | ||
639 | bio->bi_end_io = pb->saved_bi_end_io; | ||
640 | |||
641 | if (err) { | ||
642 | bio_endio(bio, err); | ||
643 | return; | ||
644 | } | ||
645 | |||
646 | remap_to_cache(pb->cache, bio, pb->cblock); | ||
647 | |||
648 | /* | ||
649 | * We can't issue this bio directly, since we're in interrupt | ||
650 | * context. So it get's put on a bio list for processing by the | ||
651 | * worker thread. | ||
652 | */ | ||
653 | defer_writethrough_bio(pb->cache, bio); | ||
654 | } | ||
655 | |||
656 | /* | ||
657 | * When running in writethrough mode we need to send writes to clean blocks | ||
658 | * to both the cache and origin devices. In future we'd like to clone the | ||
659 | * bio and send them in parallel, but for now we're doing them in | ||
660 | * series as this is easier. | ||
661 | */ | ||
662 | static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio, | ||
663 | dm_oblock_t oblock, dm_cblock_t cblock) | ||
664 | { | ||
665 | struct per_bio_data *pb = get_per_bio_data(bio); | ||
666 | |||
667 | pb->cache = cache; | ||
668 | pb->cblock = cblock; | ||
669 | pb->saved_bi_end_io = bio->bi_end_io; | ||
670 | bio->bi_end_io = writethrough_endio; | ||
671 | |||
672 | remap_to_origin_clear_discard(pb->cache, bio, oblock); | ||
673 | } | ||
674 | |||
612 | /*---------------------------------------------------------------- | 675 | /*---------------------------------------------------------------- |
613 | * Migration processing | 676 | * Migration processing |
614 | * | 677 | * |
@@ -1002,7 +1065,7 @@ static void process_discard_bio(struct cache *cache, struct bio *bio) | |||
1002 | dm_block_t end_block = bio->bi_sector + bio_sectors(bio); | 1065 | dm_block_t end_block = bio->bi_sector + bio_sectors(bio); |
1003 | dm_block_t b; | 1066 | dm_block_t b; |
1004 | 1067 | ||
1005 | (void) sector_div(end_block, cache->discard_block_size); | 1068 | end_block = block_div(end_block, cache->discard_block_size); |
1006 | 1069 | ||
1007 | for (b = start_block; b < end_block; b++) | 1070 | for (b = start_block; b < end_block; b++) |
1008 | set_discard(cache, to_dblock(b)); | 1071 | set_discard(cache, to_dblock(b)); |
@@ -1070,14 +1133,9 @@ static void process_bio(struct cache *cache, struct prealloc *structs, | |||
1070 | inc_hit_counter(cache, bio); | 1133 | inc_hit_counter(cache, bio); |
1071 | pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); | 1134 | pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); |
1072 | 1135 | ||
1073 | if (is_writethrough_io(cache, bio, lookup_result.cblock)) { | 1136 | if (is_writethrough_io(cache, bio, lookup_result.cblock)) |
1074 | /* | 1137 | remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock); |
1075 | * No need to mark anything dirty in write through mode. | 1138 | else |
1076 | */ | ||
1077 | pb->req_nr == 0 ? | ||
1078 | remap_to_cache(cache, bio, lookup_result.cblock) : | ||
1079 | remap_to_origin_clear_discard(cache, bio, block); | ||
1080 | } else | ||
1081 | remap_to_cache_dirty(cache, bio, block, lookup_result.cblock); | 1139 | remap_to_cache_dirty(cache, bio, block, lookup_result.cblock); |
1082 | 1140 | ||
1083 | issue(cache, bio); | 1141 | issue(cache, bio); |
@@ -1086,17 +1144,8 @@ static void process_bio(struct cache *cache, struct prealloc *structs, | |||
1086 | case POLICY_MISS: | 1144 | case POLICY_MISS: |
1087 | inc_miss_counter(cache, bio); | 1145 | inc_miss_counter(cache, bio); |
1088 | pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); | 1146 | pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); |
1089 | 1147 | remap_to_origin_clear_discard(cache, bio, block); | |
1090 | if (pb->req_nr != 0) { | 1148 | issue(cache, bio); |
1091 | /* | ||
1092 | * This is a duplicate writethrough io that is no | ||
1093 | * longer needed because the block has been demoted. | ||
1094 | */ | ||
1095 | bio_endio(bio, 0); | ||
1096 | } else { | ||
1097 | remap_to_origin_clear_discard(cache, bio, block); | ||
1098 | issue(cache, bio); | ||
1099 | } | ||
1100 | break; | 1149 | break; |
1101 | 1150 | ||
1102 | case POLICY_NEW: | 1151 | case POLICY_NEW: |
@@ -1217,6 +1266,23 @@ static void process_deferred_flush_bios(struct cache *cache, bool submit_bios) | |||
1217 | submit_bios ? generic_make_request(bio) : bio_io_error(bio); | 1266 | submit_bios ? generic_make_request(bio) : bio_io_error(bio); |
1218 | } | 1267 | } |
1219 | 1268 | ||
1269 | static void process_deferred_writethrough_bios(struct cache *cache) | ||
1270 | { | ||
1271 | unsigned long flags; | ||
1272 | struct bio_list bios; | ||
1273 | struct bio *bio; | ||
1274 | |||
1275 | bio_list_init(&bios); | ||
1276 | |||
1277 | spin_lock_irqsave(&cache->lock, flags); | ||
1278 | bio_list_merge(&bios, &cache->deferred_writethrough_bios); | ||
1279 | bio_list_init(&cache->deferred_writethrough_bios); | ||
1280 | spin_unlock_irqrestore(&cache->lock, flags); | ||
1281 | |||
1282 | while ((bio = bio_list_pop(&bios))) | ||
1283 | generic_make_request(bio); | ||
1284 | } | ||
1285 | |||
1220 | static void writeback_some_dirty_blocks(struct cache *cache) | 1286 | static void writeback_some_dirty_blocks(struct cache *cache) |
1221 | { | 1287 | { |
1222 | int r = 0; | 1288 | int r = 0; |
@@ -1313,6 +1379,7 @@ static int more_work(struct cache *cache) | |||
1313 | else | 1379 | else |
1314 | return !bio_list_empty(&cache->deferred_bios) || | 1380 | return !bio_list_empty(&cache->deferred_bios) || |
1315 | !bio_list_empty(&cache->deferred_flush_bios) || | 1381 | !bio_list_empty(&cache->deferred_flush_bios) || |
1382 | !bio_list_empty(&cache->deferred_writethrough_bios) || | ||
1316 | !list_empty(&cache->quiesced_migrations) || | 1383 | !list_empty(&cache->quiesced_migrations) || |
1317 | !list_empty(&cache->completed_migrations) || | 1384 | !list_empty(&cache->completed_migrations) || |
1318 | !list_empty(&cache->need_commit_migrations); | 1385 | !list_empty(&cache->need_commit_migrations); |
@@ -1331,6 +1398,8 @@ static void do_worker(struct work_struct *ws) | |||
1331 | 1398 | ||
1332 | writeback_some_dirty_blocks(cache); | 1399 | writeback_some_dirty_blocks(cache); |
1333 | 1400 | ||
1401 | process_deferred_writethrough_bios(cache); | ||
1402 | |||
1334 | if (commit_if_needed(cache)) { | 1403 | if (commit_if_needed(cache)) { |
1335 | process_deferred_flush_bios(cache, false); | 1404 | process_deferred_flush_bios(cache, false); |
1336 | 1405 | ||
@@ -1756,8 +1825,11 @@ static int create_cache_policy(struct cache *cache, struct cache_args *ca, | |||
1756 | } | 1825 | } |
1757 | 1826 | ||
1758 | r = set_config_values(cache->policy, ca->policy_argc, ca->policy_argv); | 1827 | r = set_config_values(cache->policy, ca->policy_argc, ca->policy_argv); |
1759 | if (r) | 1828 | if (r) { |
1829 | *error = "Error setting cache policy's config values"; | ||
1760 | dm_cache_policy_destroy(cache->policy); | 1830 | dm_cache_policy_destroy(cache->policy); |
1831 | cache->policy = NULL; | ||
1832 | } | ||
1761 | 1833 | ||
1762 | return r; | 1834 | return r; |
1763 | } | 1835 | } |
@@ -1793,8 +1865,6 @@ static sector_t calculate_discard_block_size(sector_t cache_block_size, | |||
1793 | 1865 | ||
1794 | #define DEFAULT_MIGRATION_THRESHOLD (2048 * 100) | 1866 | #define DEFAULT_MIGRATION_THRESHOLD (2048 * 100) |
1795 | 1867 | ||
1796 | static unsigned cache_num_write_bios(struct dm_target *ti, struct bio *bio); | ||
1797 | |||
1798 | static int cache_create(struct cache_args *ca, struct cache **result) | 1868 | static int cache_create(struct cache_args *ca, struct cache **result) |
1799 | { | 1869 | { |
1800 | int r = 0; | 1870 | int r = 0; |
@@ -1821,9 +1891,6 @@ static int cache_create(struct cache_args *ca, struct cache **result) | |||
1821 | 1891 | ||
1822 | memcpy(&cache->features, &ca->features, sizeof(cache->features)); | 1892 | memcpy(&cache->features, &ca->features, sizeof(cache->features)); |
1823 | 1893 | ||
1824 | if (cache->features.write_through) | ||
1825 | ti->num_write_bios = cache_num_write_bios; | ||
1826 | |||
1827 | cache->callbacks.congested_fn = cache_is_congested; | 1894 | cache->callbacks.congested_fn = cache_is_congested; |
1828 | dm_table_add_target_callbacks(ti->table, &cache->callbacks); | 1895 | dm_table_add_target_callbacks(ti->table, &cache->callbacks); |
1829 | 1896 | ||
@@ -1835,7 +1902,7 @@ static int cache_create(struct cache_args *ca, struct cache **result) | |||
1835 | 1902 | ||
1836 | /* FIXME: factor out this whole section */ | 1903 | /* FIXME: factor out this whole section */ |
1837 | origin_blocks = cache->origin_sectors = ca->origin_sectors; | 1904 | origin_blocks = cache->origin_sectors = ca->origin_sectors; |
1838 | (void) sector_div(origin_blocks, ca->block_size); | 1905 | origin_blocks = block_div(origin_blocks, ca->block_size); |
1839 | cache->origin_blocks = to_oblock(origin_blocks); | 1906 | cache->origin_blocks = to_oblock(origin_blocks); |
1840 | 1907 | ||
1841 | cache->sectors_per_block = ca->block_size; | 1908 | cache->sectors_per_block = ca->block_size; |
@@ -1848,7 +1915,7 @@ static int cache_create(struct cache_args *ca, struct cache **result) | |||
1848 | dm_block_t cache_size = ca->cache_sectors; | 1915 | dm_block_t cache_size = ca->cache_sectors; |
1849 | 1916 | ||
1850 | cache->sectors_per_block_shift = -1; | 1917 | cache->sectors_per_block_shift = -1; |
1851 | (void) sector_div(cache_size, ca->block_size); | 1918 | cache_size = block_div(cache_size, ca->block_size); |
1852 | cache->cache_size = to_cblock(cache_size); | 1919 | cache->cache_size = to_cblock(cache_size); |
1853 | } else { | 1920 | } else { |
1854 | cache->sectors_per_block_shift = __ffs(ca->block_size); | 1921 | cache->sectors_per_block_shift = __ffs(ca->block_size); |
@@ -1873,6 +1940,7 @@ static int cache_create(struct cache_args *ca, struct cache **result) | |||
1873 | spin_lock_init(&cache->lock); | 1940 | spin_lock_init(&cache->lock); |
1874 | bio_list_init(&cache->deferred_bios); | 1941 | bio_list_init(&cache->deferred_bios); |
1875 | bio_list_init(&cache->deferred_flush_bios); | 1942 | bio_list_init(&cache->deferred_flush_bios); |
1943 | bio_list_init(&cache->deferred_writethrough_bios); | ||
1876 | INIT_LIST_HEAD(&cache->quiesced_migrations); | 1944 | INIT_LIST_HEAD(&cache->quiesced_migrations); |
1877 | INIT_LIST_HEAD(&cache->completed_migrations); | 1945 | INIT_LIST_HEAD(&cache->completed_migrations); |
1878 | INIT_LIST_HEAD(&cache->need_commit_migrations); | 1946 | INIT_LIST_HEAD(&cache->need_commit_migrations); |
@@ -2002,6 +2070,8 @@ static int cache_ctr(struct dm_target *ti, unsigned argc, char **argv) | |||
2002 | goto out; | 2070 | goto out; |
2003 | 2071 | ||
2004 | r = cache_create(ca, &cache); | 2072 | r = cache_create(ca, &cache); |
2073 | if (r) | ||
2074 | goto out; | ||
2005 | 2075 | ||
2006 | r = copy_ctr_args(cache, argc - 3, (const char **)argv + 3); | 2076 | r = copy_ctr_args(cache, argc - 3, (const char **)argv + 3); |
2007 | if (r) { | 2077 | if (r) { |
@@ -2016,20 +2086,6 @@ out: | |||
2016 | return r; | 2086 | return r; |
2017 | } | 2087 | } |
2018 | 2088 | ||
2019 | static unsigned cache_num_write_bios(struct dm_target *ti, struct bio *bio) | ||
2020 | { | ||
2021 | int r; | ||
2022 | struct cache *cache = ti->private; | ||
2023 | dm_oblock_t block = get_bio_block(cache, bio); | ||
2024 | dm_cblock_t cblock; | ||
2025 | |||
2026 | r = policy_lookup(cache->policy, block, &cblock); | ||
2027 | if (r < 0) | ||
2028 | return 2; /* assume the worst */ | ||
2029 | |||
2030 | return (!r && !is_dirty(cache, cblock)) ? 2 : 1; | ||
2031 | } | ||
2032 | |||
2033 | static int cache_map(struct dm_target *ti, struct bio *bio) | 2089 | static int cache_map(struct dm_target *ti, struct bio *bio) |
2034 | { | 2090 | { |
2035 | struct cache *cache = ti->private; | 2091 | struct cache *cache = ti->private; |
@@ -2097,18 +2153,12 @@ static int cache_map(struct dm_target *ti, struct bio *bio) | |||
2097 | inc_hit_counter(cache, bio); | 2153 | inc_hit_counter(cache, bio); |
2098 | pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); | 2154 | pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); |
2099 | 2155 | ||
2100 | if (is_writethrough_io(cache, bio, lookup_result.cblock)) { | 2156 | if (is_writethrough_io(cache, bio, lookup_result.cblock)) |
2101 | /* | 2157 | remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock); |
2102 | * No need to mark anything dirty in write through mode. | 2158 | else |
2103 | */ | ||
2104 | pb->req_nr == 0 ? | ||
2105 | remap_to_cache(cache, bio, lookup_result.cblock) : | ||
2106 | remap_to_origin_clear_discard(cache, bio, block); | ||
2107 | cell_defer(cache, cell, false); | ||
2108 | } else { | ||
2109 | remap_to_cache_dirty(cache, bio, block, lookup_result.cblock); | 2159 | remap_to_cache_dirty(cache, bio, block, lookup_result.cblock); |
2110 | cell_defer(cache, cell, false); | 2160 | |
2111 | } | 2161 | cell_defer(cache, cell, false); |
2112 | break; | 2162 | break; |
2113 | 2163 | ||
2114 | case POLICY_MISS: | 2164 | case POLICY_MISS: |
@@ -2319,8 +2369,7 @@ static int cache_preresume(struct dm_target *ti) | |||
2319 | } | 2369 | } |
2320 | 2370 | ||
2321 | if (!cache->loaded_mappings) { | 2371 | if (!cache->loaded_mappings) { |
2322 | r = dm_cache_load_mappings(cache->cmd, | 2372 | r = dm_cache_load_mappings(cache->cmd, cache->policy, |
2323 | dm_cache_policy_get_name(cache->policy), | ||
2324 | load_mapping, cache); | 2373 | load_mapping, cache); |
2325 | if (r) { | 2374 | if (r) { |
2326 | DMERR("could not load cache mappings"); | 2375 | DMERR("could not load cache mappings"); |
@@ -2535,7 +2584,7 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits) | |||
2535 | 2584 | ||
2536 | static struct target_type cache_target = { | 2585 | static struct target_type cache_target = { |
2537 | .name = "cache", | 2586 | .name = "cache", |
2538 | .version = {1, 0, 0}, | 2587 | .version = {1, 1, 0}, |
2539 | .module = THIS_MODULE, | 2588 | .module = THIS_MODULE, |
2540 | .ctr = cache_ctr, | 2589 | .ctr = cache_ctr, |
2541 | .dtr = cache_dtr, | 2590 | .dtr = cache_dtr, |
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 009339d62828..004ad1652b73 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c | |||
@@ -1577,6 +1577,11 @@ static bool data_dev_supports_discard(struct pool_c *pt) | |||
1577 | return q && blk_queue_discard(q); | 1577 | return q && blk_queue_discard(q); |
1578 | } | 1578 | } |
1579 | 1579 | ||
1580 | static bool is_factor(sector_t block_size, uint32_t n) | ||
1581 | { | ||
1582 | return !sector_div(block_size, n); | ||
1583 | } | ||
1584 | |||
1580 | /* | 1585 | /* |
1581 | * If discard_passdown was enabled verify that the data device | 1586 | * If discard_passdown was enabled verify that the data device |
1582 | * supports discards. Disable discard_passdown if not. | 1587 | * supports discards. Disable discard_passdown if not. |
@@ -1602,7 +1607,7 @@ static void disable_passdown_if_not_supported(struct pool_c *pt) | |||
1602 | else if (data_limits->discard_granularity > block_size) | 1607 | else if (data_limits->discard_granularity > block_size) |
1603 | reason = "discard granularity larger than a block"; | 1608 | reason = "discard granularity larger than a block"; |
1604 | 1609 | ||
1605 | else if (block_size & (data_limits->discard_granularity - 1)) | 1610 | else if (!is_factor(block_size, data_limits->discard_granularity)) |
1606 | reason = "discard granularity not a factor of block size"; | 1611 | reason = "discard granularity not a factor of block size"; |
1607 | 1612 | ||
1608 | if (reason) { | 1613 | if (reason) { |
@@ -2544,7 +2549,7 @@ static struct target_type pool_target = { | |||
2544 | .name = "thin-pool", | 2549 | .name = "thin-pool", |
2545 | .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE | | 2550 | .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE | |
2546 | DM_TARGET_IMMUTABLE, | 2551 | DM_TARGET_IMMUTABLE, |
2547 | .version = {1, 6, 1}, | 2552 | .version = {1, 7, 0}, |
2548 | .module = THIS_MODULE, | 2553 | .module = THIS_MODULE, |
2549 | .ctr = pool_ctr, | 2554 | .ctr = pool_ctr, |
2550 | .dtr = pool_dtr, | 2555 | .dtr = pool_dtr, |
@@ -2831,7 +2836,7 @@ static int thin_iterate_devices(struct dm_target *ti, | |||
2831 | 2836 | ||
2832 | static struct target_type thin_target = { | 2837 | static struct target_type thin_target = { |
2833 | .name = "thin", | 2838 | .name = "thin", |
2834 | .version = {1, 7, 1}, | 2839 | .version = {1, 8, 0}, |
2835 | .module = THIS_MODULE, | 2840 | .module = THIS_MODULE, |
2836 | .ctr = thin_ctr, | 2841 | .ctr = thin_ctr, |
2837 | .dtr = thin_dtr, | 2842 | .dtr = thin_dtr, |
diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity.c index 6ad538375c3c..a746f1d21c66 100644 --- a/drivers/md/dm-verity.c +++ b/drivers/md/dm-verity.c | |||
@@ -93,6 +93,13 @@ struct dm_verity_io { | |||
93 | */ | 93 | */ |
94 | }; | 94 | }; |
95 | 95 | ||
96 | struct dm_verity_prefetch_work { | ||
97 | struct work_struct work; | ||
98 | struct dm_verity *v; | ||
99 | sector_t block; | ||
100 | unsigned n_blocks; | ||
101 | }; | ||
102 | |||
96 | static struct shash_desc *io_hash_desc(struct dm_verity *v, struct dm_verity_io *io) | 103 | static struct shash_desc *io_hash_desc(struct dm_verity *v, struct dm_verity_io *io) |
97 | { | 104 | { |
98 | return (struct shash_desc *)(io + 1); | 105 | return (struct shash_desc *)(io + 1); |
@@ -424,15 +431,18 @@ static void verity_end_io(struct bio *bio, int error) | |||
424 | * The root buffer is not prefetched, it is assumed that it will be cached | 431 | * The root buffer is not prefetched, it is assumed that it will be cached |
425 | * all the time. | 432 | * all the time. |
426 | */ | 433 | */ |
427 | static void verity_prefetch_io(struct dm_verity *v, struct dm_verity_io *io) | 434 | static void verity_prefetch_io(struct work_struct *work) |
428 | { | 435 | { |
436 | struct dm_verity_prefetch_work *pw = | ||
437 | container_of(work, struct dm_verity_prefetch_work, work); | ||
438 | struct dm_verity *v = pw->v; | ||
429 | int i; | 439 | int i; |
430 | 440 | ||
431 | for (i = v->levels - 2; i >= 0; i--) { | 441 | for (i = v->levels - 2; i >= 0; i--) { |
432 | sector_t hash_block_start; | 442 | sector_t hash_block_start; |
433 | sector_t hash_block_end; | 443 | sector_t hash_block_end; |
434 | verity_hash_at_level(v, io->block, i, &hash_block_start, NULL); | 444 | verity_hash_at_level(v, pw->block, i, &hash_block_start, NULL); |
435 | verity_hash_at_level(v, io->block + io->n_blocks - 1, i, &hash_block_end, NULL); | 445 | verity_hash_at_level(v, pw->block + pw->n_blocks - 1, i, &hash_block_end, NULL); |
436 | if (!i) { | 446 | if (!i) { |
437 | unsigned cluster = ACCESS_ONCE(dm_verity_prefetch_cluster); | 447 | unsigned cluster = ACCESS_ONCE(dm_verity_prefetch_cluster); |
438 | 448 | ||
@@ -452,6 +462,25 @@ no_prefetch_cluster: | |||
452 | dm_bufio_prefetch(v->bufio, hash_block_start, | 462 | dm_bufio_prefetch(v->bufio, hash_block_start, |
453 | hash_block_end - hash_block_start + 1); | 463 | hash_block_end - hash_block_start + 1); |
454 | } | 464 | } |
465 | |||
466 | kfree(pw); | ||
467 | } | ||
468 | |||
469 | static void verity_submit_prefetch(struct dm_verity *v, struct dm_verity_io *io) | ||
470 | { | ||
471 | struct dm_verity_prefetch_work *pw; | ||
472 | |||
473 | pw = kmalloc(sizeof(struct dm_verity_prefetch_work), | ||
474 | GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN); | ||
475 | |||
476 | if (!pw) | ||
477 | return; | ||
478 | |||
479 | INIT_WORK(&pw->work, verity_prefetch_io); | ||
480 | pw->v = v; | ||
481 | pw->block = io->block; | ||
482 | pw->n_blocks = io->n_blocks; | ||
483 | queue_work(v->verify_wq, &pw->work); | ||
455 | } | 484 | } |
456 | 485 | ||
457 | /* | 486 | /* |
@@ -498,7 +527,7 @@ static int verity_map(struct dm_target *ti, struct bio *bio) | |||
498 | memcpy(io->io_vec, bio_iovec(bio), | 527 | memcpy(io->io_vec, bio_iovec(bio), |
499 | io->io_vec_size * sizeof(struct bio_vec)); | 528 | io->io_vec_size * sizeof(struct bio_vec)); |
500 | 529 | ||
501 | verity_prefetch_io(v, io); | 530 | verity_submit_prefetch(v, io); |
502 | 531 | ||
503 | generic_make_request(bio); | 532 | generic_make_request(bio); |
504 | 533 | ||
@@ -858,7 +887,7 @@ bad: | |||
858 | 887 | ||
859 | static struct target_type verity_target = { | 888 | static struct target_type verity_target = { |
860 | .name = "verity", | 889 | .name = "verity", |
861 | .version = {1, 1, 1}, | 890 | .version = {1, 2, 0}, |
862 | .module = THIS_MODULE, | 891 | .module = THIS_MODULE, |
863 | .ctr = verity_ctr, | 892 | .ctr = verity_ctr, |
864 | .dtr = verity_dtr, | 893 | .dtr = verity_dtr, |
diff --git a/drivers/md/md.c b/drivers/md/md.c index fcb878f88796..aeceedfc530b 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -7663,10 +7663,8 @@ static int remove_and_add_spares(struct mddev *mddev) | |||
7663 | removed++; | 7663 | removed++; |
7664 | } | 7664 | } |
7665 | } | 7665 | } |
7666 | if (removed) | 7666 | if (removed && mddev->kobj.sd) |
7667 | sysfs_notify(&mddev->kobj, NULL, | 7667 | sysfs_notify(&mddev->kobj, NULL, "degraded"); |
7668 | "degraded"); | ||
7669 | |||
7670 | 7668 | ||
7671 | rdev_for_each(rdev, mddev) { | 7669 | rdev_for_each(rdev, mddev) { |
7672 | if (rdev->raid_disk >= 0 && | 7670 | if (rdev->raid_disk >= 0 && |
diff --git a/drivers/md/md.h b/drivers/md/md.h index eca59c3074ef..d90fb1a879e1 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h | |||
@@ -506,7 +506,7 @@ static inline char * mdname (struct mddev * mddev) | |||
506 | static inline int sysfs_link_rdev(struct mddev *mddev, struct md_rdev *rdev) | 506 | static inline int sysfs_link_rdev(struct mddev *mddev, struct md_rdev *rdev) |
507 | { | 507 | { |
508 | char nm[20]; | 508 | char nm[20]; |
509 | if (!test_bit(Replacement, &rdev->flags)) { | 509 | if (!test_bit(Replacement, &rdev->flags) && mddev->kobj.sd) { |
510 | sprintf(nm, "rd%d", rdev->raid_disk); | 510 | sprintf(nm, "rd%d", rdev->raid_disk); |
511 | return sysfs_create_link(&mddev->kobj, &rdev->kobj, nm); | 511 | return sysfs_create_link(&mddev->kobj, &rdev->kobj, nm); |
512 | } else | 512 | } else |
@@ -516,7 +516,7 @@ static inline int sysfs_link_rdev(struct mddev *mddev, struct md_rdev *rdev) | |||
516 | static inline void sysfs_unlink_rdev(struct mddev *mddev, struct md_rdev *rdev) | 516 | static inline void sysfs_unlink_rdev(struct mddev *mddev, struct md_rdev *rdev) |
517 | { | 517 | { |
518 | char nm[20]; | 518 | char nm[20]; |
519 | if (!test_bit(Replacement, &rdev->flags)) { | 519 | if (!test_bit(Replacement, &rdev->flags) && mddev->kobj.sd) { |
520 | sprintf(nm, "rd%d", rdev->raid_disk); | 520 | sprintf(nm, "rd%d", rdev->raid_disk); |
521 | sysfs_remove_link(&mddev->kobj, nm); | 521 | sysfs_remove_link(&mddev->kobj, nm); |
522 | } | 522 | } |
diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c index c4f28133ef82..b88757cd0d1d 100644 --- a/drivers/md/persistent-data/dm-btree-remove.c +++ b/drivers/md/persistent-data/dm-btree-remove.c | |||
@@ -139,15 +139,8 @@ struct child { | |||
139 | struct btree_node *n; | 139 | struct btree_node *n; |
140 | }; | 140 | }; |
141 | 141 | ||
142 | static struct dm_btree_value_type le64_type = { | 142 | static int init_child(struct dm_btree_info *info, struct dm_btree_value_type *vt, |
143 | .context = NULL, | 143 | struct btree_node *parent, |
144 | .size = sizeof(__le64), | ||
145 | .inc = NULL, | ||
146 | .dec = NULL, | ||
147 | .equal = NULL | ||
148 | }; | ||
149 | |||
150 | static int init_child(struct dm_btree_info *info, struct btree_node *parent, | ||
151 | unsigned index, struct child *result) | 144 | unsigned index, struct child *result) |
152 | { | 145 | { |
153 | int r, inc; | 146 | int r, inc; |
@@ -164,7 +157,7 @@ static int init_child(struct dm_btree_info *info, struct btree_node *parent, | |||
164 | result->n = dm_block_data(result->block); | 157 | result->n = dm_block_data(result->block); |
165 | 158 | ||
166 | if (inc) | 159 | if (inc) |
167 | inc_children(info->tm, result->n, &le64_type); | 160 | inc_children(info->tm, result->n, vt); |
168 | 161 | ||
169 | *((__le64 *) value_ptr(parent, index)) = | 162 | *((__le64 *) value_ptr(parent, index)) = |
170 | cpu_to_le64(dm_block_location(result->block)); | 163 | cpu_to_le64(dm_block_location(result->block)); |
@@ -236,7 +229,7 @@ static void __rebalance2(struct dm_btree_info *info, struct btree_node *parent, | |||
236 | } | 229 | } |
237 | 230 | ||
238 | static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info, | 231 | static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info, |
239 | unsigned left_index) | 232 | struct dm_btree_value_type *vt, unsigned left_index) |
240 | { | 233 | { |
241 | int r; | 234 | int r; |
242 | struct btree_node *parent; | 235 | struct btree_node *parent; |
@@ -244,11 +237,11 @@ static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info, | |||
244 | 237 | ||
245 | parent = dm_block_data(shadow_current(s)); | 238 | parent = dm_block_data(shadow_current(s)); |
246 | 239 | ||
247 | r = init_child(info, parent, left_index, &left); | 240 | r = init_child(info, vt, parent, left_index, &left); |
248 | if (r) | 241 | if (r) |
249 | return r; | 242 | return r; |
250 | 243 | ||
251 | r = init_child(info, parent, left_index + 1, &right); | 244 | r = init_child(info, vt, parent, left_index + 1, &right); |
252 | if (r) { | 245 | if (r) { |
253 | exit_child(info, &left); | 246 | exit_child(info, &left); |
254 | return r; | 247 | return r; |
@@ -368,7 +361,7 @@ static void __rebalance3(struct dm_btree_info *info, struct btree_node *parent, | |||
368 | } | 361 | } |
369 | 362 | ||
370 | static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info, | 363 | static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info, |
371 | unsigned left_index) | 364 | struct dm_btree_value_type *vt, unsigned left_index) |
372 | { | 365 | { |
373 | int r; | 366 | int r; |
374 | struct btree_node *parent = dm_block_data(shadow_current(s)); | 367 | struct btree_node *parent = dm_block_data(shadow_current(s)); |
@@ -377,17 +370,17 @@ static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info, | |||
377 | /* | 370 | /* |
378 | * FIXME: fill out an array? | 371 | * FIXME: fill out an array? |
379 | */ | 372 | */ |
380 | r = init_child(info, parent, left_index, &left); | 373 | r = init_child(info, vt, parent, left_index, &left); |
381 | if (r) | 374 | if (r) |
382 | return r; | 375 | return r; |
383 | 376 | ||
384 | r = init_child(info, parent, left_index + 1, ¢er); | 377 | r = init_child(info, vt, parent, left_index + 1, ¢er); |
385 | if (r) { | 378 | if (r) { |
386 | exit_child(info, &left); | 379 | exit_child(info, &left); |
387 | return r; | 380 | return r; |
388 | } | 381 | } |
389 | 382 | ||
390 | r = init_child(info, parent, left_index + 2, &right); | 383 | r = init_child(info, vt, parent, left_index + 2, &right); |
391 | if (r) { | 384 | if (r) { |
392 | exit_child(info, &left); | 385 | exit_child(info, &left); |
393 | exit_child(info, ¢er); | 386 | exit_child(info, ¢er); |
@@ -434,7 +427,8 @@ static int get_nr_entries(struct dm_transaction_manager *tm, | |||
434 | } | 427 | } |
435 | 428 | ||
436 | static int rebalance_children(struct shadow_spine *s, | 429 | static int rebalance_children(struct shadow_spine *s, |
437 | struct dm_btree_info *info, uint64_t key) | 430 | struct dm_btree_info *info, |
431 | struct dm_btree_value_type *vt, uint64_t key) | ||
438 | { | 432 | { |
439 | int i, r, has_left_sibling, has_right_sibling; | 433 | int i, r, has_left_sibling, has_right_sibling; |
440 | uint32_t child_entries; | 434 | uint32_t child_entries; |
@@ -472,13 +466,13 @@ static int rebalance_children(struct shadow_spine *s, | |||
472 | has_right_sibling = i < (le32_to_cpu(n->header.nr_entries) - 1); | 466 | has_right_sibling = i < (le32_to_cpu(n->header.nr_entries) - 1); |
473 | 467 | ||
474 | if (!has_left_sibling) | 468 | if (!has_left_sibling) |
475 | r = rebalance2(s, info, i); | 469 | r = rebalance2(s, info, vt, i); |
476 | 470 | ||
477 | else if (!has_right_sibling) | 471 | else if (!has_right_sibling) |
478 | r = rebalance2(s, info, i - 1); | 472 | r = rebalance2(s, info, vt, i - 1); |
479 | 473 | ||
480 | else | 474 | else |
481 | r = rebalance3(s, info, i - 1); | 475 | r = rebalance3(s, info, vt, i - 1); |
482 | 476 | ||
483 | return r; | 477 | return r; |
484 | } | 478 | } |
@@ -529,7 +523,7 @@ static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info, | |||
529 | if (le32_to_cpu(n->header.flags) & LEAF_NODE) | 523 | if (le32_to_cpu(n->header.flags) & LEAF_NODE) |
530 | return do_leaf(n, key, index); | 524 | return do_leaf(n, key, index); |
531 | 525 | ||
532 | r = rebalance_children(s, info, key); | 526 | r = rebalance_children(s, info, vt, key); |
533 | if (r) | 527 | if (r) |
534 | break; | 528 | break; |
535 | 529 | ||
@@ -550,6 +544,14 @@ static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info, | |||
550 | return r; | 544 | return r; |
551 | } | 545 | } |
552 | 546 | ||
547 | static struct dm_btree_value_type le64_type = { | ||
548 | .context = NULL, | ||
549 | .size = sizeof(__le64), | ||
550 | .inc = NULL, | ||
551 | .dec = NULL, | ||
552 | .equal = NULL | ||
553 | }; | ||
554 | |||
553 | int dm_btree_remove(struct dm_btree_info *info, dm_block_t root, | 555 | int dm_btree_remove(struct dm_btree_info *info, dm_block_t root, |
554 | uint64_t *keys, dm_block_t *new_root) | 556 | uint64_t *keys, dm_block_t *new_root) |
555 | { | 557 | { |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 3ee2912889e7..24909eb13fec 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -671,9 +671,11 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) | |||
671 | bi->bi_next = NULL; | 671 | bi->bi_next = NULL; |
672 | if (rrdev) | 672 | if (rrdev) |
673 | set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags); | 673 | set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags); |
674 | trace_block_bio_remap(bdev_get_queue(bi->bi_bdev), | 674 | |
675 | bi, disk_devt(conf->mddev->gendisk), | 675 | if (conf->mddev->gendisk) |
676 | sh->dev[i].sector); | 676 | trace_block_bio_remap(bdev_get_queue(bi->bi_bdev), |
677 | bi, disk_devt(conf->mddev->gendisk), | ||
678 | sh->dev[i].sector); | ||
677 | generic_make_request(bi); | 679 | generic_make_request(bi); |
678 | } | 680 | } |
679 | if (rrdev) { | 681 | if (rrdev) { |
@@ -701,9 +703,10 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) | |||
701 | rbi->bi_io_vec[0].bv_offset = 0; | 703 | rbi->bi_io_vec[0].bv_offset = 0; |
702 | rbi->bi_size = STRIPE_SIZE; | 704 | rbi->bi_size = STRIPE_SIZE; |
703 | rbi->bi_next = NULL; | 705 | rbi->bi_next = NULL; |
704 | trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev), | 706 | if (conf->mddev->gendisk) |
705 | rbi, disk_devt(conf->mddev->gendisk), | 707 | trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev), |
706 | sh->dev[i].sector); | 708 | rbi, disk_devt(conf->mddev->gendisk), |
709 | sh->dev[i].sector); | ||
707 | generic_make_request(rbi); | 710 | generic_make_request(rbi); |
708 | } | 711 | } |
709 | if (!rdev && !rrdev) { | 712 | if (!rdev && !rrdev) { |
@@ -2280,17 +2283,6 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s, | |||
2280 | int level = conf->level; | 2283 | int level = conf->level; |
2281 | 2284 | ||
2282 | if (rcw) { | 2285 | if (rcw) { |
2283 | /* if we are not expanding this is a proper write request, and | ||
2284 | * there will be bios with new data to be drained into the | ||
2285 | * stripe cache | ||
2286 | */ | ||
2287 | if (!expand) { | ||
2288 | sh->reconstruct_state = reconstruct_state_drain_run; | ||
2289 | set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); | ||
2290 | } else | ||
2291 | sh->reconstruct_state = reconstruct_state_run; | ||
2292 | |||
2293 | set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request); | ||
2294 | 2286 | ||
2295 | for (i = disks; i--; ) { | 2287 | for (i = disks; i--; ) { |
2296 | struct r5dev *dev = &sh->dev[i]; | 2288 | struct r5dev *dev = &sh->dev[i]; |
@@ -2303,6 +2295,21 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s, | |||
2303 | s->locked++; | 2295 | s->locked++; |
2304 | } | 2296 | } |
2305 | } | 2297 | } |
2298 | /* if we are not expanding this is a proper write request, and | ||
2299 | * there will be bios with new data to be drained into the | ||
2300 | * stripe cache | ||
2301 | */ | ||
2302 | if (!expand) { | ||
2303 | if (!s->locked) | ||
2304 | /* False alarm, nothing to do */ | ||
2305 | return; | ||
2306 | sh->reconstruct_state = reconstruct_state_drain_run; | ||
2307 | set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); | ||
2308 | } else | ||
2309 | sh->reconstruct_state = reconstruct_state_run; | ||
2310 | |||
2311 | set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request); | ||
2312 | |||
2306 | if (s->locked + conf->max_degraded == disks) | 2313 | if (s->locked + conf->max_degraded == disks) |
2307 | if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state)) | 2314 | if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state)) |
2308 | atomic_inc(&conf->pending_full_writes); | 2315 | atomic_inc(&conf->pending_full_writes); |
@@ -2311,11 +2318,6 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s, | |||
2311 | BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) || | 2318 | BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) || |
2312 | test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags))); | 2319 | test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags))); |
2313 | 2320 | ||
2314 | sh->reconstruct_state = reconstruct_state_prexor_drain_run; | ||
2315 | set_bit(STRIPE_OP_PREXOR, &s->ops_request); | ||
2316 | set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); | ||
2317 | set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request); | ||
2318 | |||
2319 | for (i = disks; i--; ) { | 2321 | for (i = disks; i--; ) { |
2320 | struct r5dev *dev = &sh->dev[i]; | 2322 | struct r5dev *dev = &sh->dev[i]; |
2321 | if (i == pd_idx) | 2323 | if (i == pd_idx) |
@@ -2330,6 +2332,13 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s, | |||
2330 | s->locked++; | 2332 | s->locked++; |
2331 | } | 2333 | } |
2332 | } | 2334 | } |
2335 | if (!s->locked) | ||
2336 | /* False alarm - nothing to do */ | ||
2337 | return; | ||
2338 | sh->reconstruct_state = reconstruct_state_prexor_drain_run; | ||
2339 | set_bit(STRIPE_OP_PREXOR, &s->ops_request); | ||
2340 | set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); | ||
2341 | set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request); | ||
2333 | } | 2342 | } |
2334 | 2343 | ||
2335 | /* keep the parity disk(s) locked while asynchronous operations | 2344 | /* keep the parity disk(s) locked while asynchronous operations |
@@ -2564,6 +2573,8 @@ handle_failed_sync(struct r5conf *conf, struct stripe_head *sh, | |||
2564 | int i; | 2573 | int i; |
2565 | 2574 | ||
2566 | clear_bit(STRIPE_SYNCING, &sh->state); | 2575 | clear_bit(STRIPE_SYNCING, &sh->state); |
2576 | if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags)) | ||
2577 | wake_up(&conf->wait_for_overlap); | ||
2567 | s->syncing = 0; | 2578 | s->syncing = 0; |
2568 | s->replacing = 0; | 2579 | s->replacing = 0; |
2569 | /* There is nothing more to do for sync/check/repair. | 2580 | /* There is nothing more to do for sync/check/repair. |
@@ -2737,6 +2748,7 @@ static void handle_stripe_clean_event(struct r5conf *conf, | |||
2737 | { | 2748 | { |
2738 | int i; | 2749 | int i; |
2739 | struct r5dev *dev; | 2750 | struct r5dev *dev; |
2751 | int discard_pending = 0; | ||
2740 | 2752 | ||
2741 | for (i = disks; i--; ) | 2753 | for (i = disks; i--; ) |
2742 | if (sh->dev[i].written) { | 2754 | if (sh->dev[i].written) { |
@@ -2765,9 +2777,23 @@ static void handle_stripe_clean_event(struct r5conf *conf, | |||
2765 | STRIPE_SECTORS, | 2777 | STRIPE_SECTORS, |
2766 | !test_bit(STRIPE_DEGRADED, &sh->state), | 2778 | !test_bit(STRIPE_DEGRADED, &sh->state), |
2767 | 0); | 2779 | 0); |
2768 | } | 2780 | } else if (test_bit(R5_Discard, &dev->flags)) |
2769 | } else if (test_bit(R5_Discard, &sh->dev[i].flags)) | 2781 | discard_pending = 1; |
2770 | clear_bit(R5_Discard, &sh->dev[i].flags); | 2782 | } |
2783 | if (!discard_pending && | ||
2784 | test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)) { | ||
2785 | clear_bit(R5_Discard, &sh->dev[sh->pd_idx].flags); | ||
2786 | clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags); | ||
2787 | if (sh->qd_idx >= 0) { | ||
2788 | clear_bit(R5_Discard, &sh->dev[sh->qd_idx].flags); | ||
2789 | clear_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags); | ||
2790 | } | ||
2791 | /* now that discard is done we can proceed with any sync */ | ||
2792 | clear_bit(STRIPE_DISCARD, &sh->state); | ||
2793 | if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state)) | ||
2794 | set_bit(STRIPE_HANDLE, &sh->state); | ||
2795 | |||
2796 | } | ||
2771 | 2797 | ||
2772 | if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) | 2798 | if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) |
2773 | if (atomic_dec_and_test(&conf->pending_full_writes)) | 2799 | if (atomic_dec_and_test(&conf->pending_full_writes)) |
@@ -2826,8 +2852,10 @@ static void handle_stripe_dirtying(struct r5conf *conf, | |||
2826 | set_bit(STRIPE_HANDLE, &sh->state); | 2852 | set_bit(STRIPE_HANDLE, &sh->state); |
2827 | if (rmw < rcw && rmw > 0) { | 2853 | if (rmw < rcw && rmw > 0) { |
2828 | /* prefer read-modify-write, but need to get some data */ | 2854 | /* prefer read-modify-write, but need to get some data */ |
2829 | blk_add_trace_msg(conf->mddev->queue, "raid5 rmw %llu %d", | 2855 | if (conf->mddev->queue) |
2830 | (unsigned long long)sh->sector, rmw); | 2856 | blk_add_trace_msg(conf->mddev->queue, |
2857 | "raid5 rmw %llu %d", | ||
2858 | (unsigned long long)sh->sector, rmw); | ||
2831 | for (i = disks; i--; ) { | 2859 | for (i = disks; i--; ) { |
2832 | struct r5dev *dev = &sh->dev[i]; | 2860 | struct r5dev *dev = &sh->dev[i]; |
2833 | if ((dev->towrite || i == sh->pd_idx) && | 2861 | if ((dev->towrite || i == sh->pd_idx) && |
@@ -2877,7 +2905,7 @@ static void handle_stripe_dirtying(struct r5conf *conf, | |||
2877 | } | 2905 | } |
2878 | } | 2906 | } |
2879 | } | 2907 | } |
2880 | if (rcw) | 2908 | if (rcw && conf->mddev->queue) |
2881 | blk_add_trace_msg(conf->mddev->queue, "raid5 rcw %llu %d %d %d", | 2909 | blk_add_trace_msg(conf->mddev->queue, "raid5 rcw %llu %d %d %d", |
2882 | (unsigned long long)sh->sector, | 2910 | (unsigned long long)sh->sector, |
2883 | rcw, qread, test_bit(STRIPE_DELAYED, &sh->state)); | 2911 | rcw, qread, test_bit(STRIPE_DELAYED, &sh->state)); |
@@ -3417,9 +3445,15 @@ static void handle_stripe(struct stripe_head *sh) | |||
3417 | return; | 3445 | return; |
3418 | } | 3446 | } |
3419 | 3447 | ||
3420 | if (test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) { | 3448 | if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state)) { |
3421 | set_bit(STRIPE_SYNCING, &sh->state); | 3449 | spin_lock(&sh->stripe_lock); |
3422 | clear_bit(STRIPE_INSYNC, &sh->state); | 3450 | /* Cannot process 'sync' concurrently with 'discard' */ |
3451 | if (!test_bit(STRIPE_DISCARD, &sh->state) && | ||
3452 | test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) { | ||
3453 | set_bit(STRIPE_SYNCING, &sh->state); | ||
3454 | clear_bit(STRIPE_INSYNC, &sh->state); | ||
3455 | } | ||
3456 | spin_unlock(&sh->stripe_lock); | ||
3423 | } | 3457 | } |
3424 | clear_bit(STRIPE_DELAYED, &sh->state); | 3458 | clear_bit(STRIPE_DELAYED, &sh->state); |
3425 | 3459 | ||
@@ -3579,6 +3613,8 @@ static void handle_stripe(struct stripe_head *sh) | |||
3579 | test_bit(STRIPE_INSYNC, &sh->state)) { | 3613 | test_bit(STRIPE_INSYNC, &sh->state)) { |
3580 | md_done_sync(conf->mddev, STRIPE_SECTORS, 1); | 3614 | md_done_sync(conf->mddev, STRIPE_SECTORS, 1); |
3581 | clear_bit(STRIPE_SYNCING, &sh->state); | 3615 | clear_bit(STRIPE_SYNCING, &sh->state); |
3616 | if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags)) | ||
3617 | wake_up(&conf->wait_for_overlap); | ||
3582 | } | 3618 | } |
3583 | 3619 | ||
3584 | /* If the failed drives are just a ReadError, then we might need | 3620 | /* If the failed drives are just a ReadError, then we might need |
@@ -3982,9 +4018,10 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio) | |||
3982 | atomic_inc(&conf->active_aligned_reads); | 4018 | atomic_inc(&conf->active_aligned_reads); |
3983 | spin_unlock_irq(&conf->device_lock); | 4019 | spin_unlock_irq(&conf->device_lock); |
3984 | 4020 | ||
3985 | trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev), | 4021 | if (mddev->gendisk) |
3986 | align_bi, disk_devt(mddev->gendisk), | 4022 | trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev), |
3987 | raid_bio->bi_sector); | 4023 | align_bi, disk_devt(mddev->gendisk), |
4024 | raid_bio->bi_sector); | ||
3988 | generic_make_request(align_bi); | 4025 | generic_make_request(align_bi); |
3989 | return 1; | 4026 | return 1; |
3990 | } else { | 4027 | } else { |
@@ -4078,7 +4115,8 @@ static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule) | |||
4078 | } | 4115 | } |
4079 | spin_unlock_irq(&conf->device_lock); | 4116 | spin_unlock_irq(&conf->device_lock); |
4080 | } | 4117 | } |
4081 | trace_block_unplug(mddev->queue, cnt, !from_schedule); | 4118 | if (mddev->queue) |
4119 | trace_block_unplug(mddev->queue, cnt, !from_schedule); | ||
4082 | kfree(cb); | 4120 | kfree(cb); |
4083 | } | 4121 | } |
4084 | 4122 | ||
@@ -4141,6 +4179,13 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi) | |||
4141 | sh = get_active_stripe(conf, logical_sector, 0, 0, 0); | 4179 | sh = get_active_stripe(conf, logical_sector, 0, 0, 0); |
4142 | prepare_to_wait(&conf->wait_for_overlap, &w, | 4180 | prepare_to_wait(&conf->wait_for_overlap, &w, |
4143 | TASK_UNINTERRUPTIBLE); | 4181 | TASK_UNINTERRUPTIBLE); |
4182 | set_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags); | ||
4183 | if (test_bit(STRIPE_SYNCING, &sh->state)) { | ||
4184 | release_stripe(sh); | ||
4185 | schedule(); | ||
4186 | goto again; | ||
4187 | } | ||
4188 | clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags); | ||
4144 | spin_lock_irq(&sh->stripe_lock); | 4189 | spin_lock_irq(&sh->stripe_lock); |
4145 | for (d = 0; d < conf->raid_disks; d++) { | 4190 | for (d = 0; d < conf->raid_disks; d++) { |
4146 | if (d == sh->pd_idx || d == sh->qd_idx) | 4191 | if (d == sh->pd_idx || d == sh->qd_idx) |
@@ -4153,6 +4198,7 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi) | |||
4153 | goto again; | 4198 | goto again; |
4154 | } | 4199 | } |
4155 | } | 4200 | } |
4201 | set_bit(STRIPE_DISCARD, &sh->state); | ||
4156 | finish_wait(&conf->wait_for_overlap, &w); | 4202 | finish_wait(&conf->wait_for_overlap, &w); |
4157 | for (d = 0; d < conf->raid_disks; d++) { | 4203 | for (d = 0; d < conf->raid_disks; d++) { |
4158 | if (d == sh->pd_idx || d == sh->qd_idx) | 4204 | if (d == sh->pd_idx || d == sh->qd_idx) |
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index 18b2c4a8a1fd..b0b663b119a8 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h | |||
@@ -221,10 +221,6 @@ struct stripe_head { | |||
221 | struct stripe_operations { | 221 | struct stripe_operations { |
222 | int target, target2; | 222 | int target, target2; |
223 | enum sum_check_flags zero_sum_result; | 223 | enum sum_check_flags zero_sum_result; |
224 | #ifdef CONFIG_MULTICORE_RAID456 | ||
225 | unsigned long request; | ||
226 | wait_queue_head_t wait_for_ops; | ||
227 | #endif | ||
228 | } ops; | 224 | } ops; |
229 | struct r5dev { | 225 | struct r5dev { |
230 | /* rreq and rvec are used for the replacement device when | 226 | /* rreq and rvec are used for the replacement device when |
@@ -323,6 +319,7 @@ enum { | |||
323 | STRIPE_COMPUTE_RUN, | 319 | STRIPE_COMPUTE_RUN, |
324 | STRIPE_OPS_REQ_PENDING, | 320 | STRIPE_OPS_REQ_PENDING, |
325 | STRIPE_ON_UNPLUG_LIST, | 321 | STRIPE_ON_UNPLUG_LIST, |
322 | STRIPE_DISCARD, | ||
326 | }; | 323 | }; |
327 | 324 | ||
328 | /* | 325 | /* |
diff --git a/drivers/mtd/bcm47xxpart.c b/drivers/mtd/bcm47xxpart.c index 63feb75cc8e0..9279a9174f84 100644 --- a/drivers/mtd/bcm47xxpart.c +++ b/drivers/mtd/bcm47xxpart.c | |||
@@ -19,6 +19,12 @@ | |||
19 | /* 10 parts were found on sflash on Netgear WNDR4500 */ | 19 | /* 10 parts were found on sflash on Netgear WNDR4500 */ |
20 | #define BCM47XXPART_MAX_PARTS 12 | 20 | #define BCM47XXPART_MAX_PARTS 12 |
21 | 21 | ||
22 | /* | ||
23 | * Amount of bytes we read when analyzing each block of flash memory. | ||
24 | * Set it big enough to allow detecting partition and reading important data. | ||
25 | */ | ||
26 | #define BCM47XXPART_BYTES_TO_READ 0x404 | ||
27 | |||
22 | /* Magics */ | 28 | /* Magics */ |
23 | #define BOARD_DATA_MAGIC 0x5246504D /* MPFR */ | 29 | #define BOARD_DATA_MAGIC 0x5246504D /* MPFR */ |
24 | #define POT_MAGIC1 0x54544f50 /* POTT */ | 30 | #define POT_MAGIC1 0x54544f50 /* POTT */ |
@@ -57,17 +63,15 @@ static int bcm47xxpart_parse(struct mtd_info *master, | |||
57 | struct trx_header *trx; | 63 | struct trx_header *trx; |
58 | int trx_part = -1; | 64 | int trx_part = -1; |
59 | int last_trx_part = -1; | 65 | int last_trx_part = -1; |
60 | int max_bytes_to_read = 0x8004; | 66 | int possible_nvram_sizes[] = { 0x8000, 0xF000, 0x10000, }; |
61 | 67 | ||
62 | if (blocksize <= 0x10000) | 68 | if (blocksize <= 0x10000) |
63 | blocksize = 0x10000; | 69 | blocksize = 0x10000; |
64 | if (blocksize == 0x20000) | ||
65 | max_bytes_to_read = 0x18004; | ||
66 | 70 | ||
67 | /* Alloc */ | 71 | /* Alloc */ |
68 | parts = kzalloc(sizeof(struct mtd_partition) * BCM47XXPART_MAX_PARTS, | 72 | parts = kzalloc(sizeof(struct mtd_partition) * BCM47XXPART_MAX_PARTS, |
69 | GFP_KERNEL); | 73 | GFP_KERNEL); |
70 | buf = kzalloc(max_bytes_to_read, GFP_KERNEL); | 74 | buf = kzalloc(BCM47XXPART_BYTES_TO_READ, GFP_KERNEL); |
71 | 75 | ||
72 | /* Parse block by block looking for magics */ | 76 | /* Parse block by block looking for magics */ |
73 | for (offset = 0; offset <= master->size - blocksize; | 77 | for (offset = 0; offset <= master->size - blocksize; |
@@ -82,7 +86,7 @@ static int bcm47xxpart_parse(struct mtd_info *master, | |||
82 | } | 86 | } |
83 | 87 | ||
84 | /* Read beginning of the block */ | 88 | /* Read beginning of the block */ |
85 | if (mtd_read(master, offset, max_bytes_to_read, | 89 | if (mtd_read(master, offset, BCM47XXPART_BYTES_TO_READ, |
86 | &bytes_read, (uint8_t *)buf) < 0) { | 90 | &bytes_read, (uint8_t *)buf) < 0) { |
87 | pr_err("mtd_read error while parsing (offset: 0x%X)!\n", | 91 | pr_err("mtd_read error while parsing (offset: 0x%X)!\n", |
88 | offset); | 92 | offset); |
@@ -96,20 +100,6 @@ static int bcm47xxpart_parse(struct mtd_info *master, | |||
96 | continue; | 100 | continue; |
97 | } | 101 | } |
98 | 102 | ||
99 | /* Standard NVRAM */ | ||
100 | if (buf[0x000 / 4] == NVRAM_HEADER || | ||
101 | buf[0x1000 / 4] == NVRAM_HEADER || | ||
102 | buf[0x8000 / 4] == NVRAM_HEADER || | ||
103 | (blocksize == 0x20000 && ( | ||
104 | buf[0x10000 / 4] == NVRAM_HEADER || | ||
105 | buf[0x11000 / 4] == NVRAM_HEADER || | ||
106 | buf[0x18000 / 4] == NVRAM_HEADER))) { | ||
107 | bcm47xxpart_add_part(&parts[curr_part++], "nvram", | ||
108 | offset, 0); | ||
109 | offset = rounddown(offset, blocksize); | ||
110 | continue; | ||
111 | } | ||
112 | |||
113 | /* | 103 | /* |
114 | * board_data starts with board_id which differs across boards, | 104 | * board_data starts with board_id which differs across boards, |
115 | * but we can use 'MPFR' (hopefully) magic at 0x100 | 105 | * but we can use 'MPFR' (hopefully) magic at 0x100 |
@@ -178,6 +168,30 @@ static int bcm47xxpart_parse(struct mtd_info *master, | |||
178 | continue; | 168 | continue; |
179 | } | 169 | } |
180 | } | 170 | } |
171 | |||
172 | /* Look for NVRAM at the end of the last block. */ | ||
173 | for (i = 0; i < ARRAY_SIZE(possible_nvram_sizes); i++) { | ||
174 | if (curr_part > BCM47XXPART_MAX_PARTS) { | ||
175 | pr_warn("Reached maximum number of partitions, scanning stopped!\n"); | ||
176 | break; | ||
177 | } | ||
178 | |||
179 | offset = master->size - possible_nvram_sizes[i]; | ||
180 | if (mtd_read(master, offset, 0x4, &bytes_read, | ||
181 | (uint8_t *)buf) < 0) { | ||
182 | pr_err("mtd_read error while reading at offset 0x%X!\n", | ||
183 | offset); | ||
184 | continue; | ||
185 | } | ||
186 | |||
187 | /* Standard NVRAM */ | ||
188 | if (buf[0] == NVRAM_HEADER) { | ||
189 | bcm47xxpart_add_part(&parts[curr_part++], "nvram", | ||
190 | master->size - blocksize, 0); | ||
191 | break; | ||
192 | } | ||
193 | } | ||
194 | |||
181 | kfree(buf); | 195 | kfree(buf); |
182 | 196 | ||
183 | /* | 197 | /* |
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index 43214151b882..42c63927609d 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c | |||
@@ -1523,6 +1523,14 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from, | |||
1523 | oobreadlen -= toread; | 1523 | oobreadlen -= toread; |
1524 | } | 1524 | } |
1525 | } | 1525 | } |
1526 | |||
1527 | if (chip->options & NAND_NEED_READRDY) { | ||
1528 | /* Apply delay or wait for ready/busy pin */ | ||
1529 | if (!chip->dev_ready) | ||
1530 | udelay(chip->chip_delay); | ||
1531 | else | ||
1532 | nand_wait_ready(mtd); | ||
1533 | } | ||
1526 | } else { | 1534 | } else { |
1527 | memcpy(buf, chip->buffers->databuf + col, bytes); | 1535 | memcpy(buf, chip->buffers->databuf + col, bytes); |
1528 | buf += bytes; | 1536 | buf += bytes; |
@@ -1787,6 +1795,14 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from, | |||
1787 | len = min(len, readlen); | 1795 | len = min(len, readlen); |
1788 | buf = nand_transfer_oob(chip, buf, ops, len); | 1796 | buf = nand_transfer_oob(chip, buf, ops, len); |
1789 | 1797 | ||
1798 | if (chip->options & NAND_NEED_READRDY) { | ||
1799 | /* Apply delay or wait for ready/busy pin */ | ||
1800 | if (!chip->dev_ready) | ||
1801 | udelay(chip->chip_delay); | ||
1802 | else | ||
1803 | nand_wait_ready(mtd); | ||
1804 | } | ||
1805 | |||
1790 | readlen -= len; | 1806 | readlen -= len; |
1791 | if (!readlen) | 1807 | if (!readlen) |
1792 | break; | 1808 | break; |
diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c index e3aa2748a6e7..9c612388e5de 100644 --- a/drivers/mtd/nand/nand_ids.c +++ b/drivers/mtd/nand/nand_ids.c | |||
@@ -22,49 +22,51 @@ | |||
22 | * 512 512 Byte page size | 22 | * 512 512 Byte page size |
23 | */ | 23 | */ |
24 | struct nand_flash_dev nand_flash_ids[] = { | 24 | struct nand_flash_dev nand_flash_ids[] = { |
25 | #define SP_OPTIONS NAND_NEED_READRDY | ||
26 | #define SP_OPTIONS16 (SP_OPTIONS | NAND_BUSWIDTH_16) | ||
25 | 27 | ||
26 | #ifdef CONFIG_MTD_NAND_MUSEUM_IDS | 28 | #ifdef CONFIG_MTD_NAND_MUSEUM_IDS |
27 | {"NAND 1MiB 5V 8-bit", 0x6e, 256, 1, 0x1000, 0}, | 29 | {"NAND 1MiB 5V 8-bit", 0x6e, 256, 1, 0x1000, SP_OPTIONS}, |
28 | {"NAND 2MiB 5V 8-bit", 0x64, 256, 2, 0x1000, 0}, | 30 | {"NAND 2MiB 5V 8-bit", 0x64, 256, 2, 0x1000, SP_OPTIONS}, |
29 | {"NAND 4MiB 5V 8-bit", 0x6b, 512, 4, 0x2000, 0}, | 31 | {"NAND 4MiB 5V 8-bit", 0x6b, 512, 4, 0x2000, SP_OPTIONS}, |
30 | {"NAND 1MiB 3,3V 8-bit", 0xe8, 256, 1, 0x1000, 0}, | 32 | {"NAND 1MiB 3,3V 8-bit", 0xe8, 256, 1, 0x1000, SP_OPTIONS}, |
31 | {"NAND 1MiB 3,3V 8-bit", 0xec, 256, 1, 0x1000, 0}, | 33 | {"NAND 1MiB 3,3V 8-bit", 0xec, 256, 1, 0x1000, SP_OPTIONS}, |
32 | {"NAND 2MiB 3,3V 8-bit", 0xea, 256, 2, 0x1000, 0}, | 34 | {"NAND 2MiB 3,3V 8-bit", 0xea, 256, 2, 0x1000, SP_OPTIONS}, |
33 | {"NAND 4MiB 3,3V 8-bit", 0xd5, 512, 4, 0x2000, 0}, | 35 | {"NAND 4MiB 3,3V 8-bit", 0xd5, 512, 4, 0x2000, SP_OPTIONS}, |
34 | {"NAND 4MiB 3,3V 8-bit", 0xe3, 512, 4, 0x2000, 0}, | 36 | {"NAND 4MiB 3,3V 8-bit", 0xe3, 512, 4, 0x2000, SP_OPTIONS}, |
35 | {"NAND 4MiB 3,3V 8-bit", 0xe5, 512, 4, 0x2000, 0}, | 37 | {"NAND 4MiB 3,3V 8-bit", 0xe5, 512, 4, 0x2000, SP_OPTIONS}, |
36 | {"NAND 8MiB 3,3V 8-bit", 0xd6, 512, 8, 0x2000, 0}, | 38 | {"NAND 8MiB 3,3V 8-bit", 0xd6, 512, 8, 0x2000, SP_OPTIONS}, |
37 | 39 | ||
38 | {"NAND 8MiB 1,8V 8-bit", 0x39, 512, 8, 0x2000, 0}, | 40 | {"NAND 8MiB 1,8V 8-bit", 0x39, 512, 8, 0x2000, SP_OPTIONS}, |
39 | {"NAND 8MiB 3,3V 8-bit", 0xe6, 512, 8, 0x2000, 0}, | 41 | {"NAND 8MiB 3,3V 8-bit", 0xe6, 512, 8, 0x2000, SP_OPTIONS}, |
40 | {"NAND 8MiB 1,8V 16-bit", 0x49, 512, 8, 0x2000, NAND_BUSWIDTH_16}, | 42 | {"NAND 8MiB 1,8V 16-bit", 0x49, 512, 8, 0x2000, SP_OPTIONS16}, |
41 | {"NAND 8MiB 3,3V 16-bit", 0x59, 512, 8, 0x2000, NAND_BUSWIDTH_16}, | 43 | {"NAND 8MiB 3,3V 16-bit", 0x59, 512, 8, 0x2000, SP_OPTIONS16}, |
42 | #endif | 44 | #endif |
43 | 45 | ||
44 | {"NAND 16MiB 1,8V 8-bit", 0x33, 512, 16, 0x4000, 0}, | 46 | {"NAND 16MiB 1,8V 8-bit", 0x33, 512, 16, 0x4000, SP_OPTIONS}, |
45 | {"NAND 16MiB 3,3V 8-bit", 0x73, 512, 16, 0x4000, 0}, | 47 | {"NAND 16MiB 3,3V 8-bit", 0x73, 512, 16, 0x4000, SP_OPTIONS}, |
46 | {"NAND 16MiB 1,8V 16-bit", 0x43, 512, 16, 0x4000, NAND_BUSWIDTH_16}, | 48 | {"NAND 16MiB 1,8V 16-bit", 0x43, 512, 16, 0x4000, SP_OPTIONS16}, |
47 | {"NAND 16MiB 3,3V 16-bit", 0x53, 512, 16, 0x4000, NAND_BUSWIDTH_16}, | 49 | {"NAND 16MiB 3,3V 16-bit", 0x53, 512, 16, 0x4000, SP_OPTIONS16}, |
48 | 50 | ||
49 | {"NAND 32MiB 1,8V 8-bit", 0x35, 512, 32, 0x4000, 0}, | 51 | {"NAND 32MiB 1,8V 8-bit", 0x35, 512, 32, 0x4000, SP_OPTIONS}, |
50 | {"NAND 32MiB 3,3V 8-bit", 0x75, 512, 32, 0x4000, 0}, | 52 | {"NAND 32MiB 3,3V 8-bit", 0x75, 512, 32, 0x4000, SP_OPTIONS}, |
51 | {"NAND 32MiB 1,8V 16-bit", 0x45, 512, 32, 0x4000, NAND_BUSWIDTH_16}, | 53 | {"NAND 32MiB 1,8V 16-bit", 0x45, 512, 32, 0x4000, SP_OPTIONS16}, |
52 | {"NAND 32MiB 3,3V 16-bit", 0x55, 512, 32, 0x4000, NAND_BUSWIDTH_16}, | 54 | {"NAND 32MiB 3,3V 16-bit", 0x55, 512, 32, 0x4000, SP_OPTIONS16}, |
53 | 55 | ||
54 | {"NAND 64MiB 1,8V 8-bit", 0x36, 512, 64, 0x4000, 0}, | 56 | {"NAND 64MiB 1,8V 8-bit", 0x36, 512, 64, 0x4000, SP_OPTIONS}, |
55 | {"NAND 64MiB 3,3V 8-bit", 0x76, 512, 64, 0x4000, 0}, | 57 | {"NAND 64MiB 3,3V 8-bit", 0x76, 512, 64, 0x4000, SP_OPTIONS}, |
56 | {"NAND 64MiB 1,8V 16-bit", 0x46, 512, 64, 0x4000, NAND_BUSWIDTH_16}, | 58 | {"NAND 64MiB 1,8V 16-bit", 0x46, 512, 64, 0x4000, SP_OPTIONS16}, |
57 | {"NAND 64MiB 3,3V 16-bit", 0x56, 512, 64, 0x4000, NAND_BUSWIDTH_16}, | 59 | {"NAND 64MiB 3,3V 16-bit", 0x56, 512, 64, 0x4000, SP_OPTIONS16}, |
58 | 60 | ||
59 | {"NAND 128MiB 1,8V 8-bit", 0x78, 512, 128, 0x4000, 0}, | 61 | {"NAND 128MiB 1,8V 8-bit", 0x78, 512, 128, 0x4000, SP_OPTIONS}, |
60 | {"NAND 128MiB 1,8V 8-bit", 0x39, 512, 128, 0x4000, 0}, | 62 | {"NAND 128MiB 1,8V 8-bit", 0x39, 512, 128, 0x4000, SP_OPTIONS}, |
61 | {"NAND 128MiB 3,3V 8-bit", 0x79, 512, 128, 0x4000, 0}, | 63 | {"NAND 128MiB 3,3V 8-bit", 0x79, 512, 128, 0x4000, SP_OPTIONS}, |
62 | {"NAND 128MiB 1,8V 16-bit", 0x72, 512, 128, 0x4000, NAND_BUSWIDTH_16}, | 64 | {"NAND 128MiB 1,8V 16-bit", 0x72, 512, 128, 0x4000, SP_OPTIONS16}, |
63 | {"NAND 128MiB 1,8V 16-bit", 0x49, 512, 128, 0x4000, NAND_BUSWIDTH_16}, | 65 | {"NAND 128MiB 1,8V 16-bit", 0x49, 512, 128, 0x4000, SP_OPTIONS16}, |
64 | {"NAND 128MiB 3,3V 16-bit", 0x74, 512, 128, 0x4000, NAND_BUSWIDTH_16}, | 66 | {"NAND 128MiB 3,3V 16-bit", 0x74, 512, 128, 0x4000, SP_OPTIONS16}, |
65 | {"NAND 128MiB 3,3V 16-bit", 0x59, 512, 128, 0x4000, NAND_BUSWIDTH_16}, | 67 | {"NAND 128MiB 3,3V 16-bit", 0x59, 512, 128, 0x4000, SP_OPTIONS16}, |
66 | 68 | ||
67 | {"NAND 256MiB 3,3V 8-bit", 0x71, 512, 256, 0x4000, 0}, | 69 | {"NAND 256MiB 3,3V 8-bit", 0x71, 512, 256, 0x4000, SP_OPTIONS}, |
68 | 70 | ||
69 | /* | 71 | /* |
70 | * These are the new chips with large page size. The pagesize and the | 72 | * These are the new chips with large page size. The pagesize and the |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 8b4e96e01d6c..6bbd90e1123c 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -1746,6 +1746,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1746 | 1746 | ||
1747 | bond_compute_features(bond); | 1747 | bond_compute_features(bond); |
1748 | 1748 | ||
1749 | bond_update_speed_duplex(new_slave); | ||
1750 | |||
1749 | read_lock(&bond->lock); | 1751 | read_lock(&bond->lock); |
1750 | 1752 | ||
1751 | new_slave->last_arp_rx = jiffies - | 1753 | new_slave->last_arp_rx = jiffies - |
@@ -1798,8 +1800,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1798 | new_slave->link == BOND_LINK_DOWN ? "DOWN" : | 1800 | new_slave->link == BOND_LINK_DOWN ? "DOWN" : |
1799 | (new_slave->link == BOND_LINK_UP ? "UP" : "BACK")); | 1801 | (new_slave->link == BOND_LINK_UP ? "UP" : "BACK")); |
1800 | 1802 | ||
1801 | bond_update_speed_duplex(new_slave); | ||
1802 | |||
1803 | if (USES_PRIMARY(bond->params.mode) && bond->params.primary[0]) { | 1803 | if (USES_PRIMARY(bond->params.mode) && bond->params.primary[0]) { |
1804 | /* if there is a primary slave, remember it */ | 1804 | /* if there is a primary slave, remember it */ |
1805 | if (strcmp(bond->params.primary, new_slave->dev->name) == 0) { | 1805 | if (strcmp(bond->params.primary, new_slave->dev->name) == 0) { |
@@ -2374,8 +2374,6 @@ static void bond_miimon_commit(struct bonding *bond) | |||
2374 | bond_set_backup_slave(slave); | 2374 | bond_set_backup_slave(slave); |
2375 | } | 2375 | } |
2376 | 2376 | ||
2377 | bond_update_speed_duplex(slave); | ||
2378 | |||
2379 | pr_info("%s: link status definitely up for interface %s, %u Mbps %s duplex.\n", | 2377 | pr_info("%s: link status definitely up for interface %s, %u Mbps %s duplex.\n", |
2380 | bond->dev->name, slave->dev->name, | 2378 | bond->dev->name, slave->dev->name, |
2381 | slave->speed, slave->duplex ? "full" : "half"); | 2379 | slave->speed, slave->duplex ? "full" : "half"); |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index a923bc4d5a1f..4046f97378c2 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | |||
@@ -2760,6 +2760,7 @@ load_error2: | |||
2760 | bp->port.pmf = 0; | 2760 | bp->port.pmf = 0; |
2761 | load_error1: | 2761 | load_error1: |
2762 | bnx2x_napi_disable(bp); | 2762 | bnx2x_napi_disable(bp); |
2763 | bnx2x_del_all_napi(bp); | ||
2763 | 2764 | ||
2764 | /* clear pf_load status, as it was already set */ | 2765 | /* clear pf_load status, as it was already set */ |
2765 | if (IS_PF(bp)) | 2766 | if (IS_PF(bp)) |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h index 364e37ecbc5c..198f6f1c9ad5 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h | |||
@@ -459,8 +459,9 @@ struct bnx2x_fw_port_stats_old { | |||
459 | 459 | ||
460 | #define UPDATE_QSTAT(s, t) \ | 460 | #define UPDATE_QSTAT(s, t) \ |
461 | do { \ | 461 | do { \ |
462 | qstats->t##_hi = qstats_old->t##_hi + le32_to_cpu(s.hi); \ | ||
463 | qstats->t##_lo = qstats_old->t##_lo + le32_to_cpu(s.lo); \ | 462 | qstats->t##_lo = qstats_old->t##_lo + le32_to_cpu(s.lo); \ |
463 | qstats->t##_hi = qstats_old->t##_hi + le32_to_cpu(s.hi) \ | ||
464 | + ((qstats->t##_lo < qstats_old->t##_lo) ? 1 : 0); \ | ||
464 | } while (0) | 465 | } while (0) |
465 | 466 | ||
466 | #define UPDATE_QSTAT_OLD(f) \ | 467 | #define UPDATE_QSTAT_OLD(f) \ |
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 93729f942358..67d2663b3974 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c | |||
@@ -4130,6 +4130,14 @@ static void tg3_phy_copper_begin(struct tg3 *tp) | |||
4130 | tp->link_config.active_speed = tp->link_config.speed; | 4130 | tp->link_config.active_speed = tp->link_config.speed; |
4131 | tp->link_config.active_duplex = tp->link_config.duplex; | 4131 | tp->link_config.active_duplex = tp->link_config.duplex; |
4132 | 4132 | ||
4133 | if (tg3_asic_rev(tp) == ASIC_REV_5714) { | ||
4134 | /* With autoneg disabled, 5715 only links up when the | ||
4135 | * advertisement register has the configured speed | ||
4136 | * enabled. | ||
4137 | */ | ||
4138 | tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL); | ||
4139 | } | ||
4140 | |||
4133 | bmcr = 0; | 4141 | bmcr = 0; |
4134 | switch (tp->link_config.speed) { | 4142 | switch (tp->link_config.speed) { |
4135 | default: | 4143 | default: |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 4ce62031f62f..8049268ce0f2 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | |||
@@ -497,8 +497,9 @@ int t4_memory_write(struct adapter *adap, int mtype, u32 addr, u32 len, | |||
497 | } | 497 | } |
498 | 498 | ||
499 | #define EEPROM_STAT_ADDR 0x7bfc | 499 | #define EEPROM_STAT_ADDR 0x7bfc |
500 | #define VPD_BASE 0 | ||
501 | #define VPD_LEN 512 | 500 | #define VPD_LEN 512 |
501 | #define VPD_BASE 0x400 | ||
502 | #define VPD_BASE_OLD 0 | ||
502 | 503 | ||
503 | /** | 504 | /** |
504 | * t4_seeprom_wp - enable/disable EEPROM write protection | 505 | * t4_seeprom_wp - enable/disable EEPROM write protection |
@@ -524,7 +525,7 @@ int t4_seeprom_wp(struct adapter *adapter, bool enable) | |||
524 | int get_vpd_params(struct adapter *adapter, struct vpd_params *p) | 525 | int get_vpd_params(struct adapter *adapter, struct vpd_params *p) |
525 | { | 526 | { |
526 | u32 cclk_param, cclk_val; | 527 | u32 cclk_param, cclk_val; |
527 | int i, ret; | 528 | int i, ret, addr; |
528 | int ec, sn; | 529 | int ec, sn; |
529 | u8 *vpd, csum; | 530 | u8 *vpd, csum; |
530 | unsigned int vpdr_len, kw_offset, id_len; | 531 | unsigned int vpdr_len, kw_offset, id_len; |
@@ -533,7 +534,12 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p) | |||
533 | if (!vpd) | 534 | if (!vpd) |
534 | return -ENOMEM; | 535 | return -ENOMEM; |
535 | 536 | ||
536 | ret = pci_read_vpd(adapter->pdev, VPD_BASE, VPD_LEN, vpd); | 537 | ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd); |
538 | if (ret < 0) | ||
539 | goto out; | ||
540 | addr = *vpd == 0x82 ? VPD_BASE : VPD_BASE_OLD; | ||
541 | |||
542 | ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd); | ||
537 | if (ret < 0) | 543 | if (ret < 0) |
538 | goto out; | 544 | goto out; |
539 | 545 | ||
diff --git a/drivers/net/ethernet/dec/tulip/Kconfig b/drivers/net/ethernet/dec/tulip/Kconfig index 0c37fb2cc867..1df33c799c00 100644 --- a/drivers/net/ethernet/dec/tulip/Kconfig +++ b/drivers/net/ethernet/dec/tulip/Kconfig | |||
@@ -108,6 +108,7 @@ config TULIP_DM910X | |||
108 | config DE4X5 | 108 | config DE4X5 |
109 | tristate "Generic DECchip & DIGITAL EtherWORKS PCI/EISA" | 109 | tristate "Generic DECchip & DIGITAL EtherWORKS PCI/EISA" |
110 | depends on (PCI || EISA) | 110 | depends on (PCI || EISA) |
111 | depends on VIRT_TO_BUS || ALPHA || PPC || SPARC | ||
111 | select CRC32 | 112 | select CRC32 |
112 | ---help--- | 113 | ---help--- |
113 | This is support for the DIGITAL series of PCI/EISA Ethernet cards. | 114 | This is support for the DIGITAL series of PCI/EISA Ethernet cards. |
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c index 069a155d16ed..e3f39372ce25 100644 --- a/drivers/net/ethernet/freescale/fec.c +++ b/drivers/net/ethernet/freescale/fec.c | |||
@@ -934,24 +934,28 @@ static void fec_enet_adjust_link(struct net_device *ndev) | |||
934 | goto spin_unlock; | 934 | goto spin_unlock; |
935 | } | 935 | } |
936 | 936 | ||
937 | /* Duplex link change */ | ||
938 | if (phy_dev->link) { | 937 | if (phy_dev->link) { |
939 | if (fep->full_duplex != phy_dev->duplex) { | 938 | if (!fep->link) { |
940 | fec_restart(ndev, phy_dev->duplex); | ||
941 | /* prevent unnecessary second fec_restart() below */ | ||
942 | fep->link = phy_dev->link; | 939 | fep->link = phy_dev->link; |
943 | status_change = 1; | 940 | status_change = 1; |
944 | } | 941 | } |
945 | } | ||
946 | 942 | ||
947 | /* Link on or off change */ | 943 | if (fep->full_duplex != phy_dev->duplex) |
948 | if (phy_dev->link != fep->link) { | 944 | status_change = 1; |
949 | fep->link = phy_dev->link; | 945 | |
950 | if (phy_dev->link) | 946 | if (phy_dev->speed != fep->speed) { |
947 | fep->speed = phy_dev->speed; | ||
948 | status_change = 1; | ||
949 | } | ||
950 | |||
951 | /* if any of the above changed restart the FEC */ | ||
952 | if (status_change) | ||
951 | fec_restart(ndev, phy_dev->duplex); | 953 | fec_restart(ndev, phy_dev->duplex); |
952 | else | 954 | } else { |
955 | if (fep->link) { | ||
953 | fec_stop(ndev); | 956 | fec_stop(ndev); |
954 | status_change = 1; | 957 | status_change = 1; |
958 | } | ||
955 | } | 959 | } |
956 | 960 | ||
957 | spin_unlock: | 961 | spin_unlock: |
@@ -1437,6 +1441,7 @@ fec_enet_close(struct net_device *ndev) | |||
1437 | struct fec_enet_private *fep = netdev_priv(ndev); | 1441 | struct fec_enet_private *fep = netdev_priv(ndev); |
1438 | 1442 | ||
1439 | /* Don't know what to do yet. */ | 1443 | /* Don't know what to do yet. */ |
1444 | napi_disable(&fep->napi); | ||
1440 | fep->opened = 0; | 1445 | fep->opened = 0; |
1441 | netif_stop_queue(ndev); | 1446 | netif_stop_queue(ndev); |
1442 | fec_stop(ndev); | 1447 | fec_stop(ndev); |
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index f5390071efd0..eb4372962839 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h | |||
@@ -240,6 +240,7 @@ struct fec_enet_private { | |||
240 | phy_interface_t phy_interface; | 240 | phy_interface_t phy_interface; |
241 | int link; | 241 | int link; |
242 | int full_duplex; | 242 | int full_duplex; |
243 | int speed; | ||
243 | struct completion mdio_done; | 244 | struct completion mdio_done; |
244 | int irq[FEC_IRQ_NUM]; | 245 | int irq[FEC_IRQ_NUM]; |
245 | int bufdesc_ex; | 246 | int bufdesc_ex; |
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c index 0ad790cc473c..eaa8e874a3cb 100644 --- a/drivers/net/ethernet/sfc/nic.c +++ b/drivers/net/ethernet/sfc/nic.c | |||
@@ -376,7 +376,8 @@ efx_may_push_tx_desc(struct efx_tx_queue *tx_queue, unsigned int write_count) | |||
376 | return false; | 376 | return false; |
377 | 377 | ||
378 | tx_queue->empty_read_count = 0; | 378 | tx_queue->empty_read_count = 0; |
379 | return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0; | 379 | return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0 |
380 | && tx_queue->write_count - write_count == 1; | ||
380 | } | 381 | } |
381 | 382 | ||
382 | /* For each entry inserted into the software descriptor ring, create a | 383 | /* For each entry inserted into the software descriptor ring, create a |
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 01ffbc486982..75c48558e6fd 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c | |||
@@ -905,7 +905,7 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb, | |||
905 | /* If there is no more tx desc left free then we need to | 905 | /* If there is no more tx desc left free then we need to |
906 | * tell the kernel to stop sending us tx frames. | 906 | * tell the kernel to stop sending us tx frames. |
907 | */ | 907 | */ |
908 | if (unlikely(cpdma_check_free_tx_desc(priv->txch))) | 908 | if (unlikely(!cpdma_check_free_tx_desc(priv->txch))) |
909 | netif_stop_queue(ndev); | 909 | netif_stop_queue(ndev); |
910 | 910 | ||
911 | return NETDEV_TX_OK; | 911 | return NETDEV_TX_OK; |
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index 52c05366599a..ae1b77aa199f 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c | |||
@@ -1102,7 +1102,7 @@ static int emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
1102 | /* If there is no more tx desc left free then we need to | 1102 | /* If there is no more tx desc left free then we need to |
1103 | * tell the kernel to stop sending us tx frames. | 1103 | * tell the kernel to stop sending us tx frames. |
1104 | */ | 1104 | */ |
1105 | if (unlikely(cpdma_check_free_tx_desc(priv->txchan))) | 1105 | if (unlikely(!cpdma_check_free_tx_desc(priv->txchan))) |
1106 | netif_stop_queue(ndev); | 1106 | netif_stop_queue(ndev); |
1107 | 1107 | ||
1108 | return NETDEV_TX_OK; | 1108 | return NETDEV_TX_OK; |
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c index 37add21a3d7d..59ac143dec25 100644 --- a/drivers/net/netconsole.c +++ b/drivers/net/netconsole.c | |||
@@ -666,6 +666,7 @@ static int netconsole_netdev_event(struct notifier_block *this, | |||
666 | goto done; | 666 | goto done; |
667 | 667 | ||
668 | spin_lock_irqsave(&target_list_lock, flags); | 668 | spin_lock_irqsave(&target_list_lock, flags); |
669 | restart: | ||
669 | list_for_each_entry(nt, &target_list, list) { | 670 | list_for_each_entry(nt, &target_list, list) { |
670 | netconsole_target_get(nt); | 671 | netconsole_target_get(nt); |
671 | if (nt->np.dev == dev) { | 672 | if (nt->np.dev == dev) { |
@@ -678,15 +679,17 @@ static int netconsole_netdev_event(struct notifier_block *this, | |||
678 | case NETDEV_UNREGISTER: | 679 | case NETDEV_UNREGISTER: |
679 | /* | 680 | /* |
680 | * rtnl_lock already held | 681 | * rtnl_lock already held |
682 | * we might sleep in __netpoll_cleanup() | ||
681 | */ | 683 | */ |
682 | if (nt->np.dev) { | 684 | spin_unlock_irqrestore(&target_list_lock, flags); |
683 | __netpoll_cleanup(&nt->np); | 685 | __netpoll_cleanup(&nt->np); |
684 | dev_put(nt->np.dev); | 686 | spin_lock_irqsave(&target_list_lock, flags); |
685 | nt->np.dev = NULL; | 687 | dev_put(nt->np.dev); |
686 | } | 688 | nt->np.dev = NULL; |
687 | nt->enabled = 0; | 689 | nt->enabled = 0; |
688 | stopped = true; | 690 | stopped = true; |
689 | break; | 691 | netconsole_target_put(nt); |
692 | goto restart; | ||
690 | } | 693 | } |
691 | } | 694 | } |
692 | netconsole_target_put(nt); | 695 | netconsole_target_put(nt); |
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig index 3b6e9b83342d..7c769d8e25ad 100644 --- a/drivers/net/usb/Kconfig +++ b/drivers/net/usb/Kconfig | |||
@@ -268,7 +268,7 @@ config USB_NET_SMSC75XX | |||
268 | select CRC16 | 268 | select CRC16 |
269 | select CRC32 | 269 | select CRC32 |
270 | help | 270 | help |
271 | This option adds support for SMSC LAN95XX based USB 2.0 | 271 | This option adds support for SMSC LAN75XX based USB 2.0 |
272 | Gigabit Ethernet adapters. | 272 | Gigabit Ethernet adapters. |
273 | 273 | ||
274 | config USB_NET_SMSC95XX | 274 | config USB_NET_SMSC95XX |
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c index 248d2dc765a5..16c842997291 100644 --- a/drivers/net/usb/cdc_mbim.c +++ b/drivers/net/usb/cdc_mbim.c | |||
@@ -68,18 +68,9 @@ static int cdc_mbim_bind(struct usbnet *dev, struct usb_interface *intf) | |||
68 | struct cdc_ncm_ctx *ctx; | 68 | struct cdc_ncm_ctx *ctx; |
69 | struct usb_driver *subdriver = ERR_PTR(-ENODEV); | 69 | struct usb_driver *subdriver = ERR_PTR(-ENODEV); |
70 | int ret = -ENODEV; | 70 | int ret = -ENODEV; |
71 | u8 data_altsetting = CDC_NCM_DATA_ALTSETTING_NCM; | 71 | u8 data_altsetting = cdc_ncm_select_altsetting(dev, intf); |
72 | struct cdc_mbim_state *info = (void *)&dev->data; | 72 | struct cdc_mbim_state *info = (void *)&dev->data; |
73 | 73 | ||
74 | /* see if interface supports MBIM alternate setting */ | ||
75 | if (intf->num_altsetting == 2) { | ||
76 | if (!cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting)) | ||
77 | usb_set_interface(dev->udev, | ||
78 | intf->cur_altsetting->desc.bInterfaceNumber, | ||
79 | CDC_NCM_COMM_ALTSETTING_MBIM); | ||
80 | data_altsetting = CDC_NCM_DATA_ALTSETTING_MBIM; | ||
81 | } | ||
82 | |||
83 | /* Probably NCM, defer for cdc_ncm_bind */ | 74 | /* Probably NCM, defer for cdc_ncm_bind */ |
84 | if (!cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting)) | 75 | if (!cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting)) |
85 | goto err; | 76 | goto err; |
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 61b74a2b89ac..4709fa3497cf 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c | |||
@@ -55,6 +55,14 @@ | |||
55 | 55 | ||
56 | #define DRIVER_VERSION "14-Mar-2012" | 56 | #define DRIVER_VERSION "14-Mar-2012" |
57 | 57 | ||
58 | #if IS_ENABLED(CONFIG_USB_NET_CDC_MBIM) | ||
59 | static bool prefer_mbim = true; | ||
60 | #else | ||
61 | static bool prefer_mbim; | ||
62 | #endif | ||
63 | module_param(prefer_mbim, bool, S_IRUGO | S_IWUSR); | ||
64 | MODULE_PARM_DESC(prefer_mbim, "Prefer MBIM setting on dual NCM/MBIM functions"); | ||
65 | |||
58 | static void cdc_ncm_txpath_bh(unsigned long param); | 66 | static void cdc_ncm_txpath_bh(unsigned long param); |
59 | static void cdc_ncm_tx_timeout_start(struct cdc_ncm_ctx *ctx); | 67 | static void cdc_ncm_tx_timeout_start(struct cdc_ncm_ctx *ctx); |
60 | static enum hrtimer_restart cdc_ncm_tx_timer_cb(struct hrtimer *hr_timer); | 68 | static enum hrtimer_restart cdc_ncm_tx_timer_cb(struct hrtimer *hr_timer); |
@@ -550,9 +558,12 @@ void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf) | |||
550 | } | 558 | } |
551 | EXPORT_SYMBOL_GPL(cdc_ncm_unbind); | 559 | EXPORT_SYMBOL_GPL(cdc_ncm_unbind); |
552 | 560 | ||
553 | static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf) | 561 | /* Select the MBIM altsetting iff it is preferred and available, |
562 | * returning the number of the corresponding data interface altsetting | ||
563 | */ | ||
564 | u8 cdc_ncm_select_altsetting(struct usbnet *dev, struct usb_interface *intf) | ||
554 | { | 565 | { |
555 | int ret; | 566 | struct usb_host_interface *alt; |
556 | 567 | ||
557 | /* The MBIM spec defines a NCM compatible default altsetting, | 568 | /* The MBIM spec defines a NCM compatible default altsetting, |
558 | * which we may have matched: | 569 | * which we may have matched: |
@@ -568,23 +579,27 @@ static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf) | |||
568 | * endpoint descriptors, shall be constructed according to | 579 | * endpoint descriptors, shall be constructed according to |
569 | * the rules given in section 6 (USB Device Model) of this | 580 | * the rules given in section 6 (USB Device Model) of this |
570 | * specification." | 581 | * specification." |
571 | * | ||
572 | * Do not bind to such interfaces, allowing cdc_mbim to handle | ||
573 | * them | ||
574 | */ | 582 | */ |
575 | #if IS_ENABLED(CONFIG_USB_NET_CDC_MBIM) | 583 | if (prefer_mbim && intf->num_altsetting == 2) { |
576 | if ((intf->num_altsetting == 2) && | 584 | alt = usb_altnum_to_altsetting(intf, CDC_NCM_COMM_ALTSETTING_MBIM); |
577 | !usb_set_interface(dev->udev, | 585 | if (alt && cdc_ncm_comm_intf_is_mbim(alt) && |
578 | intf->cur_altsetting->desc.bInterfaceNumber, | 586 | !usb_set_interface(dev->udev, |
579 | CDC_NCM_COMM_ALTSETTING_MBIM)) { | 587 | intf->cur_altsetting->desc.bInterfaceNumber, |
580 | if (cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting)) | 588 | CDC_NCM_COMM_ALTSETTING_MBIM)) |
581 | return -ENODEV; | 589 | return CDC_NCM_DATA_ALTSETTING_MBIM; |
582 | else | ||
583 | usb_set_interface(dev->udev, | ||
584 | intf->cur_altsetting->desc.bInterfaceNumber, | ||
585 | CDC_NCM_COMM_ALTSETTING_NCM); | ||
586 | } | 590 | } |
587 | #endif | 591 | return CDC_NCM_DATA_ALTSETTING_NCM; |
592 | } | ||
593 | EXPORT_SYMBOL_GPL(cdc_ncm_select_altsetting); | ||
594 | |||
595 | static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf) | ||
596 | { | ||
597 | int ret; | ||
598 | |||
599 | /* MBIM backwards compatible function? */ | ||
600 | cdc_ncm_select_altsetting(dev, intf); | ||
601 | if (cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting)) | ||
602 | return -ENODEV; | ||
588 | 603 | ||
589 | /* NCM data altsetting is always 1 */ | 604 | /* NCM data altsetting is always 1 */ |
590 | ret = cdc_ncm_bind_common(dev, intf, 1); | 605 | ret = cdc_ncm_bind_common(dev, intf, 1); |
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index efb5c7c33a28..968d5d50751d 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c | |||
@@ -139,16 +139,9 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf) | |||
139 | 139 | ||
140 | BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data) < sizeof(struct qmi_wwan_state))); | 140 | BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data) < sizeof(struct qmi_wwan_state))); |
141 | 141 | ||
142 | /* control and data is shared? */ | 142 | /* set up initial state */ |
143 | if (intf->cur_altsetting->desc.bNumEndpoints == 3) { | 143 | info->control = intf; |
144 | info->control = intf; | 144 | info->data = intf; |
145 | info->data = intf; | ||
146 | goto shared; | ||
147 | } | ||
148 | |||
149 | /* else require a single interrupt status endpoint on control intf */ | ||
150 | if (intf->cur_altsetting->desc.bNumEndpoints != 1) | ||
151 | goto err; | ||
152 | 145 | ||
153 | /* and a number of CDC descriptors */ | 146 | /* and a number of CDC descriptors */ |
154 | while (len > 3) { | 147 | while (len > 3) { |
@@ -207,25 +200,14 @@ next_desc: | |||
207 | buf += h->bLength; | 200 | buf += h->bLength; |
208 | } | 201 | } |
209 | 202 | ||
210 | /* did we find all the required ones? */ | 203 | /* Use separate control and data interfaces if we found a CDC Union */ |
211 | if (!(found & (1 << USB_CDC_HEADER_TYPE)) || | 204 | if (cdc_union) { |
212 | !(found & (1 << USB_CDC_UNION_TYPE))) { | 205 | info->data = usb_ifnum_to_if(dev->udev, cdc_union->bSlaveInterface0); |
213 | dev_err(&intf->dev, "CDC functional descriptors missing\n"); | 206 | if (desc->bInterfaceNumber != cdc_union->bMasterInterface0 || !info->data) { |
214 | goto err; | 207 | dev_err(&intf->dev, "bogus CDC Union: master=%u, slave=%u\n", |
215 | } | 208 | cdc_union->bMasterInterface0, cdc_union->bSlaveInterface0); |
216 | 209 | goto err; | |
217 | /* verify CDC Union */ | 210 | } |
218 | if (desc->bInterfaceNumber != cdc_union->bMasterInterface0) { | ||
219 | dev_err(&intf->dev, "bogus CDC Union: master=%u\n", cdc_union->bMasterInterface0); | ||
220 | goto err; | ||
221 | } | ||
222 | |||
223 | /* need to save these for unbind */ | ||
224 | info->control = intf; | ||
225 | info->data = usb_ifnum_to_if(dev->udev, cdc_union->bSlaveInterface0); | ||
226 | if (!info->data) { | ||
227 | dev_err(&intf->dev, "bogus CDC Union: slave=%u\n", cdc_union->bSlaveInterface0); | ||
228 | goto err; | ||
229 | } | 211 | } |
230 | 212 | ||
231 | /* errors aren't fatal - we can live with the dynamic address */ | 213 | /* errors aren't fatal - we can live with the dynamic address */ |
@@ -235,11 +217,12 @@ next_desc: | |||
235 | } | 217 | } |
236 | 218 | ||
237 | /* claim data interface and set it up */ | 219 | /* claim data interface and set it up */ |
238 | status = usb_driver_claim_interface(driver, info->data, dev); | 220 | if (info->control != info->data) { |
239 | if (status < 0) | 221 | status = usb_driver_claim_interface(driver, info->data, dev); |
240 | goto err; | 222 | if (status < 0) |
223 | goto err; | ||
224 | } | ||
241 | 225 | ||
242 | shared: | ||
243 | status = qmi_wwan_register_subdriver(dev); | 226 | status = qmi_wwan_register_subdriver(dev); |
244 | if (status < 0 && info->control != info->data) { | 227 | if (status < 0 && info->control != info->data) { |
245 | usb_set_intfdata(info->data, NULL); | 228 | usb_set_intfdata(info->data, NULL); |
diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c index 246aa62a4817..2fe0ceba4400 100644 --- a/drivers/net/wireless/mwifiex/join.c +++ b/drivers/net/wireless/mwifiex/join.c | |||
@@ -1117,10 +1117,9 @@ mwifiex_cmd_802_11_ad_hoc_join(struct mwifiex_private *priv, | |||
1117 | adhoc_join->bss_descriptor.bssid, | 1117 | adhoc_join->bss_descriptor.bssid, |
1118 | adhoc_join->bss_descriptor.ssid); | 1118 | adhoc_join->bss_descriptor.ssid); |
1119 | 1119 | ||
1120 | for (i = 0; bss_desc->supported_rates[i] && | 1120 | for (i = 0; i < MWIFIEX_SUPPORTED_RATES && |
1121 | i < MWIFIEX_SUPPORTED_RATES; | 1121 | bss_desc->supported_rates[i]; i++) |
1122 | i++) | 1122 | ; |
1123 | ; | ||
1124 | rates_size = i; | 1123 | rates_size = i; |
1125 | 1124 | ||
1126 | /* Copy Data Rates from the Rates recorded in scan response */ | 1125 | /* Copy Data Rates from the Rates recorded in scan response */ |
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig index 44d6ead43341..2bf4efa33186 100644 --- a/drivers/net/wireless/rt2x00/Kconfig +++ b/drivers/net/wireless/rt2x00/Kconfig | |||
@@ -55,10 +55,10 @@ config RT61PCI | |||
55 | 55 | ||
56 | config RT2800PCI | 56 | config RT2800PCI |
57 | tristate "Ralink rt27xx/rt28xx/rt30xx (PCI/PCIe/PCMCIA) support" | 57 | tristate "Ralink rt27xx/rt28xx/rt30xx (PCI/PCIe/PCMCIA) support" |
58 | depends on PCI || RALINK_RT288X || RALINK_RT305X | 58 | depends on PCI || SOC_RT288X || SOC_RT305X |
59 | select RT2800_LIB | 59 | select RT2800_LIB |
60 | select RT2X00_LIB_PCI if PCI | 60 | select RT2X00_LIB_PCI if PCI |
61 | select RT2X00_LIB_SOC if RALINK_RT288X || RALINK_RT305X | 61 | select RT2X00_LIB_SOC if SOC_RT288X || SOC_RT305X |
62 | select RT2X00_LIB_FIRMWARE | 62 | select RT2X00_LIB_FIRMWARE |
63 | select RT2X00_LIB_CRYPTO | 63 | select RT2X00_LIB_CRYPTO |
64 | select CRC_CCITT | 64 | select CRC_CCITT |
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c index 48a01aa21f1c..ded73da4de0b 100644 --- a/drivers/net/wireless/rt2x00/rt2800pci.c +++ b/drivers/net/wireless/rt2x00/rt2800pci.c | |||
@@ -89,7 +89,7 @@ static void rt2800pci_mcu_status(struct rt2x00_dev *rt2x00dev, const u8 token) | |||
89 | rt2x00pci_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0); | 89 | rt2x00pci_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0); |
90 | } | 90 | } |
91 | 91 | ||
92 | #if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X) | 92 | #if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X) |
93 | static int rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev) | 93 | static int rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev) |
94 | { | 94 | { |
95 | void __iomem *base_addr = ioremap(0x1F040000, EEPROM_SIZE); | 95 | void __iomem *base_addr = ioremap(0x1F040000, EEPROM_SIZE); |
@@ -107,7 +107,7 @@ static inline int rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev) | |||
107 | { | 107 | { |
108 | return -ENOMEM; | 108 | return -ENOMEM; |
109 | } | 109 | } |
110 | #endif /* CONFIG_RALINK_RT288X || CONFIG_RALINK_RT305X */ | 110 | #endif /* CONFIG_SOC_RT288X || CONFIG_SOC_RT305X */ |
111 | 111 | ||
112 | #ifdef CONFIG_PCI | 112 | #ifdef CONFIG_PCI |
113 | static void rt2800pci_eepromregister_read(struct eeprom_93cx6 *eeprom) | 113 | static void rt2800pci_eepromregister_read(struct eeprom_93cx6 *eeprom) |
@@ -1177,7 +1177,7 @@ MODULE_DEVICE_TABLE(pci, rt2800pci_device_table); | |||
1177 | #endif /* CONFIG_PCI */ | 1177 | #endif /* CONFIG_PCI */ |
1178 | MODULE_LICENSE("GPL"); | 1178 | MODULE_LICENSE("GPL"); |
1179 | 1179 | ||
1180 | #if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X) | 1180 | #if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X) |
1181 | static int rt2800soc_probe(struct platform_device *pdev) | 1181 | static int rt2800soc_probe(struct platform_device *pdev) |
1182 | { | 1182 | { |
1183 | return rt2x00soc_probe(pdev, &rt2800pci_ops); | 1183 | return rt2x00soc_probe(pdev, &rt2800pci_ops); |
@@ -1194,7 +1194,7 @@ static struct platform_driver rt2800soc_driver = { | |||
1194 | .suspend = rt2x00soc_suspend, | 1194 | .suspend = rt2x00soc_suspend, |
1195 | .resume = rt2x00soc_resume, | 1195 | .resume = rt2x00soc_resume, |
1196 | }; | 1196 | }; |
1197 | #endif /* CONFIG_RALINK_RT288X || CONFIG_RALINK_RT305X */ | 1197 | #endif /* CONFIG_SOC_RT288X || CONFIG_SOC_RT305X */ |
1198 | 1198 | ||
1199 | #ifdef CONFIG_PCI | 1199 | #ifdef CONFIG_PCI |
1200 | static int rt2800pci_probe(struct pci_dev *pci_dev, | 1200 | static int rt2800pci_probe(struct pci_dev *pci_dev, |
@@ -1217,7 +1217,7 @@ static int __init rt2800pci_init(void) | |||
1217 | { | 1217 | { |
1218 | int ret = 0; | 1218 | int ret = 0; |
1219 | 1219 | ||
1220 | #if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X) | 1220 | #if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X) |
1221 | ret = platform_driver_register(&rt2800soc_driver); | 1221 | ret = platform_driver_register(&rt2800soc_driver); |
1222 | if (ret) | 1222 | if (ret) |
1223 | return ret; | 1223 | return ret; |
@@ -1225,7 +1225,7 @@ static int __init rt2800pci_init(void) | |||
1225 | #ifdef CONFIG_PCI | 1225 | #ifdef CONFIG_PCI |
1226 | ret = pci_register_driver(&rt2800pci_driver); | 1226 | ret = pci_register_driver(&rt2800pci_driver); |
1227 | if (ret) { | 1227 | if (ret) { |
1228 | #if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X) | 1228 | #if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X) |
1229 | platform_driver_unregister(&rt2800soc_driver); | 1229 | platform_driver_unregister(&rt2800soc_driver); |
1230 | #endif | 1230 | #endif |
1231 | return ret; | 1231 | return ret; |
@@ -1240,7 +1240,7 @@ static void __exit rt2800pci_exit(void) | |||
1240 | #ifdef CONFIG_PCI | 1240 | #ifdef CONFIG_PCI |
1241 | pci_unregister_driver(&rt2800pci_driver); | 1241 | pci_unregister_driver(&rt2800pci_driver); |
1242 | #endif | 1242 | #endif |
1243 | #if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X) | 1243 | #if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X) |
1244 | platform_driver_unregister(&rt2800soc_driver); | 1244 | platform_driver_unregister(&rt2800soc_driver); |
1245 | #endif | 1245 | #endif |
1246 | } | 1246 | } |
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c index b1ccff474c79..c08d0f4c5f3d 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c | |||
@@ -1377,74 +1377,57 @@ void rtl92cu_card_disable(struct ieee80211_hw *hw) | |||
1377 | 1377 | ||
1378 | void rtl92cu_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid) | 1378 | void rtl92cu_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid) |
1379 | { | 1379 | { |
1380 | /* dummy routine needed for callback from rtl_op_configure_filter() */ | ||
1381 | } | ||
1382 | |||
1383 | /*========================================================================== */ | ||
1384 | |||
1385 | static void _rtl92cu_set_check_bssid(struct ieee80211_hw *hw, | ||
1386 | enum nl80211_iftype type) | ||
1387 | { | ||
1388 | struct rtl_priv *rtlpriv = rtl_priv(hw); | 1380 | struct rtl_priv *rtlpriv = rtl_priv(hw); |
1389 | u32 reg_rcr = rtl_read_dword(rtlpriv, REG_RCR); | ||
1390 | struct rtl_hal *rtlhal = rtl_hal(rtlpriv); | 1381 | struct rtl_hal *rtlhal = rtl_hal(rtlpriv); |
1391 | struct rtl_phy *rtlphy = &(rtlpriv->phy); | 1382 | u32 reg_rcr = rtl_read_dword(rtlpriv, REG_RCR); |
1392 | u8 filterout_non_associated_bssid = false; | ||
1393 | 1383 | ||
1394 | switch (type) { | 1384 | if (rtlpriv->psc.rfpwr_state != ERFON) |
1395 | case NL80211_IFTYPE_ADHOC: | 1385 | return; |
1396 | case NL80211_IFTYPE_STATION: | 1386 | |
1397 | filterout_non_associated_bssid = true; | 1387 | if (check_bssid) { |
1398 | break; | 1388 | u8 tmp; |
1399 | case NL80211_IFTYPE_UNSPECIFIED: | ||
1400 | case NL80211_IFTYPE_AP: | ||
1401 | default: | ||
1402 | break; | ||
1403 | } | ||
1404 | if (filterout_non_associated_bssid) { | ||
1405 | if (IS_NORMAL_CHIP(rtlhal->version)) { | 1389 | if (IS_NORMAL_CHIP(rtlhal->version)) { |
1406 | switch (rtlphy->current_io_type) { | 1390 | reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN); |
1407 | case IO_CMD_RESUME_DM_BY_SCAN: | 1391 | tmp = BIT(4); |
1408 | reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN); | ||
1409 | rtlpriv->cfg->ops->set_hw_reg(hw, | ||
1410 | HW_VAR_RCR, (u8 *)(®_rcr)); | ||
1411 | /* enable update TSF */ | ||
1412 | _rtl92cu_set_bcn_ctrl_reg(hw, 0, BIT(4)); | ||
1413 | break; | ||
1414 | case IO_CMD_PAUSE_DM_BY_SCAN: | ||
1415 | reg_rcr &= ~(RCR_CBSSID_DATA | RCR_CBSSID_BCN); | ||
1416 | rtlpriv->cfg->ops->set_hw_reg(hw, | ||
1417 | HW_VAR_RCR, (u8 *)(®_rcr)); | ||
1418 | /* disable update TSF */ | ||
1419 | _rtl92cu_set_bcn_ctrl_reg(hw, BIT(4), 0); | ||
1420 | break; | ||
1421 | } | ||
1422 | } else { | 1392 | } else { |
1423 | reg_rcr |= (RCR_CBSSID); | 1393 | reg_rcr |= RCR_CBSSID; |
1424 | rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, | 1394 | tmp = BIT(4) | BIT(5); |
1425 | (u8 *)(®_rcr)); | ||
1426 | _rtl92cu_set_bcn_ctrl_reg(hw, 0, (BIT(4)|BIT(5))); | ||
1427 | } | 1395 | } |
1428 | } else if (filterout_non_associated_bssid == false) { | 1396 | rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, |
1397 | (u8 *) (®_rcr)); | ||
1398 | _rtl92cu_set_bcn_ctrl_reg(hw, 0, tmp); | ||
1399 | } else { | ||
1400 | u8 tmp; | ||
1429 | if (IS_NORMAL_CHIP(rtlhal->version)) { | 1401 | if (IS_NORMAL_CHIP(rtlhal->version)) { |
1430 | reg_rcr &= (~(RCR_CBSSID_DATA | RCR_CBSSID_BCN)); | 1402 | reg_rcr &= ~(RCR_CBSSID_DATA | RCR_CBSSID_BCN); |
1431 | rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, | 1403 | tmp = BIT(4); |
1432 | (u8 *)(®_rcr)); | ||
1433 | _rtl92cu_set_bcn_ctrl_reg(hw, BIT(4), 0); | ||
1434 | } else { | 1404 | } else { |
1435 | reg_rcr &= (~RCR_CBSSID); | 1405 | reg_rcr &= ~RCR_CBSSID; |
1436 | rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, | 1406 | tmp = BIT(4) | BIT(5); |
1437 | (u8 *)(®_rcr)); | ||
1438 | _rtl92cu_set_bcn_ctrl_reg(hw, (BIT(4)|BIT(5)), 0); | ||
1439 | } | 1407 | } |
1408 | reg_rcr &= (~(RCR_CBSSID_DATA | RCR_CBSSID_BCN)); | ||
1409 | rtlpriv->cfg->ops->set_hw_reg(hw, | ||
1410 | HW_VAR_RCR, (u8 *) (®_rcr)); | ||
1411 | _rtl92cu_set_bcn_ctrl_reg(hw, tmp, 0); | ||
1440 | } | 1412 | } |
1441 | } | 1413 | } |
1442 | 1414 | ||
1415 | /*========================================================================== */ | ||
1416 | |||
1443 | int rtl92cu_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type) | 1417 | int rtl92cu_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type) |
1444 | { | 1418 | { |
1419 | struct rtl_priv *rtlpriv = rtl_priv(hw); | ||
1420 | |||
1445 | if (_rtl92cu_set_media_status(hw, type)) | 1421 | if (_rtl92cu_set_media_status(hw, type)) |
1446 | return -EOPNOTSUPP; | 1422 | return -EOPNOTSUPP; |
1447 | _rtl92cu_set_check_bssid(hw, type); | 1423 | |
1424 | if (rtlpriv->mac80211.link_state == MAC80211_LINKED) { | ||
1425 | if (type != NL80211_IFTYPE_AP) | ||
1426 | rtl92cu_set_check_bssid(hw, true); | ||
1427 | } else { | ||
1428 | rtl92cu_set_check_bssid(hw, false); | ||
1429 | } | ||
1430 | |||
1448 | return 0; | 1431 | return 0; |
1449 | } | 1432 | } |
1450 | 1433 | ||
@@ -2058,8 +2041,6 @@ void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw, | |||
2058 | (shortgi_rate << 4) | (shortgi_rate); | 2041 | (shortgi_rate << 4) | (shortgi_rate); |
2059 | } | 2042 | } |
2060 | rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value); | 2043 | rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value); |
2061 | RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, "%x\n", | ||
2062 | rtl_read_dword(rtlpriv, REG_ARFR0)); | ||
2063 | } | 2044 | } |
2064 | 2045 | ||
2065 | void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level) | 2046 | void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level) |
diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c index ab886b7ee327..b41ac7756a4b 100644 --- a/drivers/pci/rom.c +++ b/drivers/pci/rom.c | |||
@@ -100,6 +100,27 @@ size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size) | |||
100 | return min((size_t)(image - rom), size); | 100 | return min((size_t)(image - rom), size); |
101 | } | 101 | } |
102 | 102 | ||
103 | static loff_t pci_find_rom(struct pci_dev *pdev, size_t *size) | ||
104 | { | ||
105 | struct resource *res = &pdev->resource[PCI_ROM_RESOURCE]; | ||
106 | loff_t start; | ||
107 | |||
108 | /* assign the ROM an address if it doesn't have one */ | ||
109 | if (res->parent == NULL && pci_assign_resource(pdev, PCI_ROM_RESOURCE)) | ||
110 | return 0; | ||
111 | start = pci_resource_start(pdev, PCI_ROM_RESOURCE); | ||
112 | *size = pci_resource_len(pdev, PCI_ROM_RESOURCE); | ||
113 | |||
114 | if (*size == 0) | ||
115 | return 0; | ||
116 | |||
117 | /* Enable ROM space decodes */ | ||
118 | if (pci_enable_rom(pdev)) | ||
119 | return 0; | ||
120 | |||
121 | return start; | ||
122 | } | ||
123 | |||
103 | /** | 124 | /** |
104 | * pci_map_rom - map a PCI ROM to kernel space | 125 | * pci_map_rom - map a PCI ROM to kernel space |
105 | * @pdev: pointer to pci device struct | 126 | * @pdev: pointer to pci device struct |
@@ -114,21 +135,15 @@ size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size) | |||
114 | void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size) | 135 | void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size) |
115 | { | 136 | { |
116 | struct resource *res = &pdev->resource[PCI_ROM_RESOURCE]; | 137 | struct resource *res = &pdev->resource[PCI_ROM_RESOURCE]; |
117 | loff_t start; | 138 | loff_t start = 0; |
118 | void __iomem *rom; | 139 | void __iomem *rom; |
119 | 140 | ||
120 | /* | 141 | /* |
121 | * Some devices may provide ROMs via a source other than the BAR | ||
122 | */ | ||
123 | if (pdev->rom && pdev->romlen) { | ||
124 | *size = pdev->romlen; | ||
125 | return phys_to_virt(pdev->rom); | ||
126 | /* | ||
127 | * IORESOURCE_ROM_SHADOW set on x86, x86_64 and IA64 supports legacy | 142 | * IORESOURCE_ROM_SHADOW set on x86, x86_64 and IA64 supports legacy |
128 | * memory map if the VGA enable bit of the Bridge Control register is | 143 | * memory map if the VGA enable bit of the Bridge Control register is |
129 | * set for embedded VGA. | 144 | * set for embedded VGA. |
130 | */ | 145 | */ |
131 | } else if (res->flags & IORESOURCE_ROM_SHADOW) { | 146 | if (res->flags & IORESOURCE_ROM_SHADOW) { |
132 | /* primary video rom always starts here */ | 147 | /* primary video rom always starts here */ |
133 | start = (loff_t)0xC0000; | 148 | start = (loff_t)0xC0000; |
134 | *size = 0x20000; /* cover C000:0 through E000:0 */ | 149 | *size = 0x20000; /* cover C000:0 through E000:0 */ |
@@ -139,21 +154,21 @@ void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size) | |||
139 | return (void __iomem *)(unsigned long) | 154 | return (void __iomem *)(unsigned long) |
140 | pci_resource_start(pdev, PCI_ROM_RESOURCE); | 155 | pci_resource_start(pdev, PCI_ROM_RESOURCE); |
141 | } else { | 156 | } else { |
142 | /* assign the ROM an address if it doesn't have one */ | 157 | start = pci_find_rom(pdev, size); |
143 | if (res->parent == NULL && | ||
144 | pci_assign_resource(pdev,PCI_ROM_RESOURCE)) | ||
145 | return NULL; | ||
146 | start = pci_resource_start(pdev, PCI_ROM_RESOURCE); | ||
147 | *size = pci_resource_len(pdev, PCI_ROM_RESOURCE); | ||
148 | if (*size == 0) | ||
149 | return NULL; | ||
150 | |||
151 | /* Enable ROM space decodes */ | ||
152 | if (pci_enable_rom(pdev)) | ||
153 | return NULL; | ||
154 | } | 158 | } |
155 | } | 159 | } |
156 | 160 | ||
161 | /* | ||
162 | * Some devices may provide ROMs via a source other than the BAR | ||
163 | */ | ||
164 | if (!start && pdev->rom && pdev->romlen) { | ||
165 | *size = pdev->romlen; | ||
166 | return phys_to_virt(pdev->rom); | ||
167 | } | ||
168 | |||
169 | if (!start) | ||
170 | return NULL; | ||
171 | |||
157 | rom = ioremap(start, *size); | 172 | rom = ioremap(start, *size); |
158 | if (!rom) { | 173 | if (!rom) { |
159 | /* restore enable if ioremap fails */ | 174 | /* restore enable if ioremap fails */ |
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c index 75933a6aa828..efb7f10e902a 100644 --- a/drivers/pinctrl/pinctrl-at91.c +++ b/drivers/pinctrl/pinctrl-at91.c | |||
@@ -1277,21 +1277,80 @@ static int alt_gpio_irq_type(struct irq_data *d, unsigned type) | |||
1277 | } | 1277 | } |
1278 | 1278 | ||
1279 | #ifdef CONFIG_PM | 1279 | #ifdef CONFIG_PM |
1280 | |||
1281 | static u32 wakeups[MAX_GPIO_BANKS]; | ||
1282 | static u32 backups[MAX_GPIO_BANKS]; | ||
1283 | |||
1280 | static int gpio_irq_set_wake(struct irq_data *d, unsigned state) | 1284 | static int gpio_irq_set_wake(struct irq_data *d, unsigned state) |
1281 | { | 1285 | { |
1282 | struct at91_gpio_chip *at91_gpio = irq_data_get_irq_chip_data(d); | 1286 | struct at91_gpio_chip *at91_gpio = irq_data_get_irq_chip_data(d); |
1283 | unsigned bank = at91_gpio->pioc_idx; | 1287 | unsigned bank = at91_gpio->pioc_idx; |
1288 | unsigned mask = 1 << d->hwirq; | ||
1284 | 1289 | ||
1285 | if (unlikely(bank >= MAX_GPIO_BANKS)) | 1290 | if (unlikely(bank >= MAX_GPIO_BANKS)) |
1286 | return -EINVAL; | 1291 | return -EINVAL; |
1287 | 1292 | ||
1293 | if (state) | ||
1294 | wakeups[bank] |= mask; | ||
1295 | else | ||
1296 | wakeups[bank] &= ~mask; | ||
1297 | |||
1288 | irq_set_irq_wake(at91_gpio->pioc_virq, state); | 1298 | irq_set_irq_wake(at91_gpio->pioc_virq, state); |
1289 | 1299 | ||
1290 | return 0; | 1300 | return 0; |
1291 | } | 1301 | } |
1302 | |||
1303 | void at91_pinctrl_gpio_suspend(void) | ||
1304 | { | ||
1305 | int i; | ||
1306 | |||
1307 | for (i = 0; i < gpio_banks; i++) { | ||
1308 | void __iomem *pio; | ||
1309 | |||
1310 | if (!gpio_chips[i]) | ||
1311 | continue; | ||
1312 | |||
1313 | pio = gpio_chips[i]->regbase; | ||
1314 | |||
1315 | backups[i] = __raw_readl(pio + PIO_IMR); | ||
1316 | __raw_writel(backups[i], pio + PIO_IDR); | ||
1317 | __raw_writel(wakeups[i], pio + PIO_IER); | ||
1318 | |||
1319 | if (!wakeups[i]) { | ||
1320 | clk_unprepare(gpio_chips[i]->clock); | ||
1321 | clk_disable(gpio_chips[i]->clock); | ||
1322 | } else { | ||
1323 | printk(KERN_DEBUG "GPIO-%c may wake for %08x\n", | ||
1324 | 'A'+i, wakeups[i]); | ||
1325 | } | ||
1326 | } | ||
1327 | } | ||
1328 | |||
1329 | void at91_pinctrl_gpio_resume(void) | ||
1330 | { | ||
1331 | int i; | ||
1332 | |||
1333 | for (i = 0; i < gpio_banks; i++) { | ||
1334 | void __iomem *pio; | ||
1335 | |||
1336 | if (!gpio_chips[i]) | ||
1337 | continue; | ||
1338 | |||
1339 | pio = gpio_chips[i]->regbase; | ||
1340 | |||
1341 | if (!wakeups[i]) { | ||
1342 | if (clk_prepare(gpio_chips[i]->clock) == 0) | ||
1343 | clk_enable(gpio_chips[i]->clock); | ||
1344 | } | ||
1345 | |||
1346 | __raw_writel(wakeups[i], pio + PIO_IDR); | ||
1347 | __raw_writel(backups[i], pio + PIO_IER); | ||
1348 | } | ||
1349 | } | ||
1350 | |||
1292 | #else | 1351 | #else |
1293 | #define gpio_irq_set_wake NULL | 1352 | #define gpio_irq_set_wake NULL |
1294 | #endif | 1353 | #endif /* CONFIG_PM */ |
1295 | 1354 | ||
1296 | static struct irq_chip gpio_irqchip = { | 1355 | static struct irq_chip gpio_irqchip = { |
1297 | .name = "GPIO", | 1356 | .name = "GPIO", |
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c index 434ebc3a99dc..0a9f27e094ea 100644 --- a/drivers/rtc/rtc-at91rm9200.c +++ b/drivers/rtc/rtc-at91rm9200.c | |||
@@ -44,6 +44,7 @@ static DECLARE_COMPLETION(at91_rtc_updated); | |||
44 | static unsigned int at91_alarm_year = AT91_RTC_EPOCH; | 44 | static unsigned int at91_alarm_year = AT91_RTC_EPOCH; |
45 | static void __iomem *at91_rtc_regs; | 45 | static void __iomem *at91_rtc_regs; |
46 | static int irq; | 46 | static int irq; |
47 | static u32 at91_rtc_imr; | ||
47 | 48 | ||
48 | /* | 49 | /* |
49 | * Decode time/date into rtc_time structure | 50 | * Decode time/date into rtc_time structure |
@@ -108,9 +109,11 @@ static int at91_rtc_settime(struct device *dev, struct rtc_time *tm) | |||
108 | cr = at91_rtc_read(AT91_RTC_CR); | 109 | cr = at91_rtc_read(AT91_RTC_CR); |
109 | at91_rtc_write(AT91_RTC_CR, cr | AT91_RTC_UPDCAL | AT91_RTC_UPDTIM); | 110 | at91_rtc_write(AT91_RTC_CR, cr | AT91_RTC_UPDCAL | AT91_RTC_UPDTIM); |
110 | 111 | ||
112 | at91_rtc_imr |= AT91_RTC_ACKUPD; | ||
111 | at91_rtc_write(AT91_RTC_IER, AT91_RTC_ACKUPD); | 113 | at91_rtc_write(AT91_RTC_IER, AT91_RTC_ACKUPD); |
112 | wait_for_completion(&at91_rtc_updated); /* wait for ACKUPD interrupt */ | 114 | wait_for_completion(&at91_rtc_updated); /* wait for ACKUPD interrupt */ |
113 | at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD); | 115 | at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD); |
116 | at91_rtc_imr &= ~AT91_RTC_ACKUPD; | ||
114 | 117 | ||
115 | at91_rtc_write(AT91_RTC_TIMR, | 118 | at91_rtc_write(AT91_RTC_TIMR, |
116 | bin2bcd(tm->tm_sec) << 0 | 119 | bin2bcd(tm->tm_sec) << 0 |
@@ -142,7 +145,7 @@ static int at91_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm) | |||
142 | tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year); | 145 | tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year); |
143 | tm->tm_year = at91_alarm_year - 1900; | 146 | tm->tm_year = at91_alarm_year - 1900; |
144 | 147 | ||
145 | alrm->enabled = (at91_rtc_read(AT91_RTC_IMR) & AT91_RTC_ALARM) | 148 | alrm->enabled = (at91_rtc_imr & AT91_RTC_ALARM) |
146 | ? 1 : 0; | 149 | ? 1 : 0; |
147 | 150 | ||
148 | dev_dbg(dev, "%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__, | 151 | dev_dbg(dev, "%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__, |
@@ -168,6 +171,7 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm) | |||
168 | tm.tm_sec = alrm->time.tm_sec; | 171 | tm.tm_sec = alrm->time.tm_sec; |
169 | 172 | ||
170 | at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ALARM); | 173 | at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ALARM); |
174 | at91_rtc_imr &= ~AT91_RTC_ALARM; | ||
171 | at91_rtc_write(AT91_RTC_TIMALR, | 175 | at91_rtc_write(AT91_RTC_TIMALR, |
172 | bin2bcd(tm.tm_sec) << 0 | 176 | bin2bcd(tm.tm_sec) << 0 |
173 | | bin2bcd(tm.tm_min) << 8 | 177 | | bin2bcd(tm.tm_min) << 8 |
@@ -180,6 +184,7 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm) | |||
180 | 184 | ||
181 | if (alrm->enabled) { | 185 | if (alrm->enabled) { |
182 | at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM); | 186 | at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM); |
187 | at91_rtc_imr |= AT91_RTC_ALARM; | ||
183 | at91_rtc_write(AT91_RTC_IER, AT91_RTC_ALARM); | 188 | at91_rtc_write(AT91_RTC_IER, AT91_RTC_ALARM); |
184 | } | 189 | } |
185 | 190 | ||
@@ -196,9 +201,12 @@ static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) | |||
196 | 201 | ||
197 | if (enabled) { | 202 | if (enabled) { |
198 | at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM); | 203 | at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM); |
204 | at91_rtc_imr |= AT91_RTC_ALARM; | ||
199 | at91_rtc_write(AT91_RTC_IER, AT91_RTC_ALARM); | 205 | at91_rtc_write(AT91_RTC_IER, AT91_RTC_ALARM); |
200 | } else | 206 | } else { |
201 | at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ALARM); | 207 | at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ALARM); |
208 | at91_rtc_imr &= ~AT91_RTC_ALARM; | ||
209 | } | ||
202 | 210 | ||
203 | return 0; | 211 | return 0; |
204 | } | 212 | } |
@@ -207,12 +215,10 @@ static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) | |||
207 | */ | 215 | */ |
208 | static int at91_rtc_proc(struct device *dev, struct seq_file *seq) | 216 | static int at91_rtc_proc(struct device *dev, struct seq_file *seq) |
209 | { | 217 | { |
210 | unsigned long imr = at91_rtc_read(AT91_RTC_IMR); | ||
211 | |||
212 | seq_printf(seq, "update_IRQ\t: %s\n", | 218 | seq_printf(seq, "update_IRQ\t: %s\n", |
213 | (imr & AT91_RTC_ACKUPD) ? "yes" : "no"); | 219 | (at91_rtc_imr & AT91_RTC_ACKUPD) ? "yes" : "no"); |
214 | seq_printf(seq, "periodic_IRQ\t: %s\n", | 220 | seq_printf(seq, "periodic_IRQ\t: %s\n", |
215 | (imr & AT91_RTC_SECEV) ? "yes" : "no"); | 221 | (at91_rtc_imr & AT91_RTC_SECEV) ? "yes" : "no"); |
216 | 222 | ||
217 | return 0; | 223 | return 0; |
218 | } | 224 | } |
@@ -227,7 +233,7 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id) | |||
227 | unsigned int rtsr; | 233 | unsigned int rtsr; |
228 | unsigned long events = 0; | 234 | unsigned long events = 0; |
229 | 235 | ||
230 | rtsr = at91_rtc_read(AT91_RTC_SR) & at91_rtc_read(AT91_RTC_IMR); | 236 | rtsr = at91_rtc_read(AT91_RTC_SR) & at91_rtc_imr; |
231 | if (rtsr) { /* this interrupt is shared! Is it ours? */ | 237 | if (rtsr) { /* this interrupt is shared! Is it ours? */ |
232 | if (rtsr & AT91_RTC_ALARM) | 238 | if (rtsr & AT91_RTC_ALARM) |
233 | events |= (RTC_AF | RTC_IRQF); | 239 | events |= (RTC_AF | RTC_IRQF); |
@@ -291,6 +297,7 @@ static int __init at91_rtc_probe(struct platform_device *pdev) | |||
291 | at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM | | 297 | at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM | |
292 | AT91_RTC_SECEV | AT91_RTC_TIMEV | | 298 | AT91_RTC_SECEV | AT91_RTC_TIMEV | |
293 | AT91_RTC_CALEV); | 299 | AT91_RTC_CALEV); |
300 | at91_rtc_imr = 0; | ||
294 | 301 | ||
295 | ret = request_irq(irq, at91_rtc_interrupt, | 302 | ret = request_irq(irq, at91_rtc_interrupt, |
296 | IRQF_SHARED, | 303 | IRQF_SHARED, |
@@ -329,6 +336,7 @@ static int __exit at91_rtc_remove(struct platform_device *pdev) | |||
329 | at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM | | 336 | at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM | |
330 | AT91_RTC_SECEV | AT91_RTC_TIMEV | | 337 | AT91_RTC_SECEV | AT91_RTC_TIMEV | |
331 | AT91_RTC_CALEV); | 338 | AT91_RTC_CALEV); |
339 | at91_rtc_imr = 0; | ||
332 | free_irq(irq, pdev); | 340 | free_irq(irq, pdev); |
333 | 341 | ||
334 | rtc_device_unregister(rtc); | 342 | rtc_device_unregister(rtc); |
@@ -341,31 +349,35 @@ static int __exit at91_rtc_remove(struct platform_device *pdev) | |||
341 | 349 | ||
342 | /* AT91RM9200 RTC Power management control */ | 350 | /* AT91RM9200 RTC Power management control */ |
343 | 351 | ||
344 | static u32 at91_rtc_imr; | 352 | static u32 at91_rtc_bkpimr; |
353 | |||
345 | 354 | ||
346 | static int at91_rtc_suspend(struct device *dev) | 355 | static int at91_rtc_suspend(struct device *dev) |
347 | { | 356 | { |
348 | /* this IRQ is shared with DBGU and other hardware which isn't | 357 | /* this IRQ is shared with DBGU and other hardware which isn't |
349 | * necessarily doing PM like we are... | 358 | * necessarily doing PM like we are... |
350 | */ | 359 | */ |
351 | at91_rtc_imr = at91_rtc_read(AT91_RTC_IMR) | 360 | at91_rtc_bkpimr = at91_rtc_imr & (AT91_RTC_ALARM|AT91_RTC_SECEV); |
352 | & (AT91_RTC_ALARM|AT91_RTC_SECEV); | 361 | if (at91_rtc_bkpimr) { |
353 | if (at91_rtc_imr) { | 362 | if (device_may_wakeup(dev)) { |
354 | if (device_may_wakeup(dev)) | ||
355 | enable_irq_wake(irq); | 363 | enable_irq_wake(irq); |
356 | else | 364 | } else { |
357 | at91_rtc_write(AT91_RTC_IDR, at91_rtc_imr); | 365 | at91_rtc_write(AT91_RTC_IDR, at91_rtc_bkpimr); |
358 | } | 366 | at91_rtc_imr &= ~at91_rtc_bkpimr; |
367 | } | ||
368 | } | ||
359 | return 0; | 369 | return 0; |
360 | } | 370 | } |
361 | 371 | ||
362 | static int at91_rtc_resume(struct device *dev) | 372 | static int at91_rtc_resume(struct device *dev) |
363 | { | 373 | { |
364 | if (at91_rtc_imr) { | 374 | if (at91_rtc_bkpimr) { |
365 | if (device_may_wakeup(dev)) | 375 | if (device_may_wakeup(dev)) { |
366 | disable_irq_wake(irq); | 376 | disable_irq_wake(irq); |
367 | else | 377 | } else { |
368 | at91_rtc_write(AT91_RTC_IER, at91_rtc_imr); | 378 | at91_rtc_imr |= at91_rtc_bkpimr; |
379 | at91_rtc_write(AT91_RTC_IER, at91_rtc_bkpimr); | ||
380 | } | ||
369 | } | 381 | } |
370 | return 0; | 382 | return 0; |
371 | } | 383 | } |
diff --git a/drivers/rtc/rtc-at91rm9200.h b/drivers/rtc/rtc-at91rm9200.h index da1945e5f714..5f940b6844cb 100644 --- a/drivers/rtc/rtc-at91rm9200.h +++ b/drivers/rtc/rtc-at91rm9200.h | |||
@@ -64,7 +64,6 @@ | |||
64 | #define AT91_RTC_SCCR 0x1c /* Status Clear Command Register */ | 64 | #define AT91_RTC_SCCR 0x1c /* Status Clear Command Register */ |
65 | #define AT91_RTC_IER 0x20 /* Interrupt Enable Register */ | 65 | #define AT91_RTC_IER 0x20 /* Interrupt Enable Register */ |
66 | #define AT91_RTC_IDR 0x24 /* Interrupt Disable Register */ | 66 | #define AT91_RTC_IDR 0x24 /* Interrupt Disable Register */ |
67 | #define AT91_RTC_IMR 0x28 /* Interrupt Mask Register */ | ||
68 | 67 | ||
69 | #define AT91_RTC_VER 0x2c /* Valid Entry Register */ | 68 | #define AT91_RTC_VER 0x2c /* Valid Entry Register */ |
70 | #define AT91_RTC_NVTIM (1 << 0) /* Non valid Time */ | 69 | #define AT91_RTC_NVTIM (1 << 0) /* Non valid Time */ |
diff --git a/drivers/rtc/rtc-da9052.c b/drivers/rtc/rtc-da9052.c index 0dde688ca09b..969abbad7fe3 100644 --- a/drivers/rtc/rtc-da9052.c +++ b/drivers/rtc/rtc-da9052.c | |||
@@ -239,11 +239,9 @@ static int da9052_rtc_probe(struct platform_device *pdev) | |||
239 | 239 | ||
240 | rtc->da9052 = dev_get_drvdata(pdev->dev.parent); | 240 | rtc->da9052 = dev_get_drvdata(pdev->dev.parent); |
241 | platform_set_drvdata(pdev, rtc); | 241 | platform_set_drvdata(pdev, rtc); |
242 | rtc->irq = platform_get_irq_byname(pdev, "ALM"); | 242 | rtc->irq = DA9052_IRQ_ALARM; |
243 | ret = devm_request_threaded_irq(&pdev->dev, rtc->irq, NULL, | 243 | ret = da9052_request_irq(rtc->da9052, rtc->irq, "ALM", |
244 | da9052_rtc_irq, | 244 | da9052_rtc_irq, rtc); |
245 | IRQF_TRIGGER_LOW | IRQF_ONESHOT, | ||
246 | "ALM", rtc); | ||
247 | if (ret != 0) { | 245 | if (ret != 0) { |
248 | rtc_err(rtc->da9052, "irq registration failed: %d\n", ret); | 246 | rtc_err(rtc->da9052, "irq registration failed: %d\n", ret); |
249 | return ret; | 247 | return ret; |
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c index 9978ad4433cb..5ac9c935c151 100644 --- a/drivers/s390/block/scm_blk.c +++ b/drivers/s390/block/scm_blk.c | |||
@@ -135,6 +135,11 @@ static const struct block_device_operations scm_blk_devops = { | |||
135 | .release = scm_release, | 135 | .release = scm_release, |
136 | }; | 136 | }; |
137 | 137 | ||
138 | static bool scm_permit_request(struct scm_blk_dev *bdev, struct request *req) | ||
139 | { | ||
140 | return rq_data_dir(req) != WRITE || bdev->state != SCM_WR_PROHIBIT; | ||
141 | } | ||
142 | |||
138 | static void scm_request_prepare(struct scm_request *scmrq) | 143 | static void scm_request_prepare(struct scm_request *scmrq) |
139 | { | 144 | { |
140 | struct scm_blk_dev *bdev = scmrq->bdev; | 145 | struct scm_blk_dev *bdev = scmrq->bdev; |
@@ -195,14 +200,18 @@ void scm_request_requeue(struct scm_request *scmrq) | |||
195 | 200 | ||
196 | scm_release_cluster(scmrq); | 201 | scm_release_cluster(scmrq); |
197 | blk_requeue_request(bdev->rq, scmrq->request); | 202 | blk_requeue_request(bdev->rq, scmrq->request); |
203 | atomic_dec(&bdev->queued_reqs); | ||
198 | scm_request_done(scmrq); | 204 | scm_request_done(scmrq); |
199 | scm_ensure_queue_restart(bdev); | 205 | scm_ensure_queue_restart(bdev); |
200 | } | 206 | } |
201 | 207 | ||
202 | void scm_request_finish(struct scm_request *scmrq) | 208 | void scm_request_finish(struct scm_request *scmrq) |
203 | { | 209 | { |
210 | struct scm_blk_dev *bdev = scmrq->bdev; | ||
211 | |||
204 | scm_release_cluster(scmrq); | 212 | scm_release_cluster(scmrq); |
205 | blk_end_request_all(scmrq->request, scmrq->error); | 213 | blk_end_request_all(scmrq->request, scmrq->error); |
214 | atomic_dec(&bdev->queued_reqs); | ||
206 | scm_request_done(scmrq); | 215 | scm_request_done(scmrq); |
207 | } | 216 | } |
208 | 217 | ||
@@ -218,6 +227,10 @@ static void scm_blk_request(struct request_queue *rq) | |||
218 | if (req->cmd_type != REQ_TYPE_FS) | 227 | if (req->cmd_type != REQ_TYPE_FS) |
219 | continue; | 228 | continue; |
220 | 229 | ||
230 | if (!scm_permit_request(bdev, req)) { | ||
231 | scm_ensure_queue_restart(bdev); | ||
232 | return; | ||
233 | } | ||
221 | scmrq = scm_request_fetch(); | 234 | scmrq = scm_request_fetch(); |
222 | if (!scmrq) { | 235 | if (!scmrq) { |
223 | SCM_LOG(5, "no request"); | 236 | SCM_LOG(5, "no request"); |
@@ -231,11 +244,13 @@ static void scm_blk_request(struct request_queue *rq) | |||
231 | return; | 244 | return; |
232 | } | 245 | } |
233 | if (scm_need_cluster_request(scmrq)) { | 246 | if (scm_need_cluster_request(scmrq)) { |
247 | atomic_inc(&bdev->queued_reqs); | ||
234 | blk_start_request(req); | 248 | blk_start_request(req); |
235 | scm_initiate_cluster_request(scmrq); | 249 | scm_initiate_cluster_request(scmrq); |
236 | return; | 250 | return; |
237 | } | 251 | } |
238 | scm_request_prepare(scmrq); | 252 | scm_request_prepare(scmrq); |
253 | atomic_inc(&bdev->queued_reqs); | ||
239 | blk_start_request(req); | 254 | blk_start_request(req); |
240 | 255 | ||
241 | ret = scm_start_aob(scmrq->aob); | 256 | ret = scm_start_aob(scmrq->aob); |
@@ -244,7 +259,6 @@ static void scm_blk_request(struct request_queue *rq) | |||
244 | scm_request_requeue(scmrq); | 259 | scm_request_requeue(scmrq); |
245 | return; | 260 | return; |
246 | } | 261 | } |
247 | atomic_inc(&bdev->queued_reqs); | ||
248 | } | 262 | } |
249 | } | 263 | } |
250 | 264 | ||
@@ -280,6 +294,38 @@ void scm_blk_irq(struct scm_device *scmdev, void *data, int error) | |||
280 | tasklet_hi_schedule(&bdev->tasklet); | 294 | tasklet_hi_schedule(&bdev->tasklet); |
281 | } | 295 | } |
282 | 296 | ||
297 | static void scm_blk_handle_error(struct scm_request *scmrq) | ||
298 | { | ||
299 | struct scm_blk_dev *bdev = scmrq->bdev; | ||
300 | unsigned long flags; | ||
301 | |||
302 | if (scmrq->error != -EIO) | ||
303 | goto restart; | ||
304 | |||
305 | /* For -EIO the response block is valid. */ | ||
306 | switch (scmrq->aob->response.eqc) { | ||
307 | case EQC_WR_PROHIBIT: | ||
308 | spin_lock_irqsave(&bdev->lock, flags); | ||
309 | if (bdev->state != SCM_WR_PROHIBIT) | ||
310 | pr_info("%lu: Write access to the SCM increment is suspended\n", | ||
311 | (unsigned long) bdev->scmdev->address); | ||
312 | bdev->state = SCM_WR_PROHIBIT; | ||
313 | spin_unlock_irqrestore(&bdev->lock, flags); | ||
314 | goto requeue; | ||
315 | default: | ||
316 | break; | ||
317 | } | ||
318 | |||
319 | restart: | ||
320 | if (!scm_start_aob(scmrq->aob)) | ||
321 | return; | ||
322 | |||
323 | requeue: | ||
324 | spin_lock_irqsave(&bdev->rq_lock, flags); | ||
325 | scm_request_requeue(scmrq); | ||
326 | spin_unlock_irqrestore(&bdev->rq_lock, flags); | ||
327 | } | ||
328 | |||
283 | static void scm_blk_tasklet(struct scm_blk_dev *bdev) | 329 | static void scm_blk_tasklet(struct scm_blk_dev *bdev) |
284 | { | 330 | { |
285 | struct scm_request *scmrq; | 331 | struct scm_request *scmrq; |
@@ -293,11 +339,8 @@ static void scm_blk_tasklet(struct scm_blk_dev *bdev) | |||
293 | spin_unlock_irqrestore(&bdev->lock, flags); | 339 | spin_unlock_irqrestore(&bdev->lock, flags); |
294 | 340 | ||
295 | if (scmrq->error && scmrq->retries-- > 0) { | 341 | if (scmrq->error && scmrq->retries-- > 0) { |
296 | if (scm_start_aob(scmrq->aob)) { | 342 | scm_blk_handle_error(scmrq); |
297 | spin_lock_irqsave(&bdev->rq_lock, flags); | 343 | |
298 | scm_request_requeue(scmrq); | ||
299 | spin_unlock_irqrestore(&bdev->rq_lock, flags); | ||
300 | } | ||
301 | /* Request restarted or requeued, handle next. */ | 344 | /* Request restarted or requeued, handle next. */ |
302 | spin_lock_irqsave(&bdev->lock, flags); | 345 | spin_lock_irqsave(&bdev->lock, flags); |
303 | continue; | 346 | continue; |
@@ -310,7 +353,6 @@ static void scm_blk_tasklet(struct scm_blk_dev *bdev) | |||
310 | } | 353 | } |
311 | 354 | ||
312 | scm_request_finish(scmrq); | 355 | scm_request_finish(scmrq); |
313 | atomic_dec(&bdev->queued_reqs); | ||
314 | spin_lock_irqsave(&bdev->lock, flags); | 356 | spin_lock_irqsave(&bdev->lock, flags); |
315 | } | 357 | } |
316 | spin_unlock_irqrestore(&bdev->lock, flags); | 358 | spin_unlock_irqrestore(&bdev->lock, flags); |
@@ -332,6 +374,7 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev) | |||
332 | } | 374 | } |
333 | 375 | ||
334 | bdev->scmdev = scmdev; | 376 | bdev->scmdev = scmdev; |
377 | bdev->state = SCM_OPER; | ||
335 | spin_lock_init(&bdev->rq_lock); | 378 | spin_lock_init(&bdev->rq_lock); |
336 | spin_lock_init(&bdev->lock); | 379 | spin_lock_init(&bdev->lock); |
337 | INIT_LIST_HEAD(&bdev->finished_requests); | 380 | INIT_LIST_HEAD(&bdev->finished_requests); |
@@ -396,6 +439,18 @@ void scm_blk_dev_cleanup(struct scm_blk_dev *bdev) | |||
396 | put_disk(bdev->gendisk); | 439 | put_disk(bdev->gendisk); |
397 | } | 440 | } |
398 | 441 | ||
442 | void scm_blk_set_available(struct scm_blk_dev *bdev) | ||
443 | { | ||
444 | unsigned long flags; | ||
445 | |||
446 | spin_lock_irqsave(&bdev->lock, flags); | ||
447 | if (bdev->state == SCM_WR_PROHIBIT) | ||
448 | pr_info("%lu: Write access to the SCM increment is restored\n", | ||
449 | (unsigned long) bdev->scmdev->address); | ||
450 | bdev->state = SCM_OPER; | ||
451 | spin_unlock_irqrestore(&bdev->lock, flags); | ||
452 | } | ||
453 | |||
399 | static int __init scm_blk_init(void) | 454 | static int __init scm_blk_init(void) |
400 | { | 455 | { |
401 | int ret = -EINVAL; | 456 | int ret = -EINVAL; |
diff --git a/drivers/s390/block/scm_blk.h b/drivers/s390/block/scm_blk.h index 3c1ccf494647..8b387b32fd62 100644 --- a/drivers/s390/block/scm_blk.h +++ b/drivers/s390/block/scm_blk.h | |||
@@ -21,6 +21,7 @@ struct scm_blk_dev { | |||
21 | spinlock_t rq_lock; /* guard the request queue */ | 21 | spinlock_t rq_lock; /* guard the request queue */ |
22 | spinlock_t lock; /* guard the rest of the blockdev */ | 22 | spinlock_t lock; /* guard the rest of the blockdev */ |
23 | atomic_t queued_reqs; | 23 | atomic_t queued_reqs; |
24 | enum {SCM_OPER, SCM_WR_PROHIBIT} state; | ||
24 | struct list_head finished_requests; | 25 | struct list_head finished_requests; |
25 | #ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE | 26 | #ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE |
26 | struct list_head cluster_list; | 27 | struct list_head cluster_list; |
@@ -48,6 +49,7 @@ struct scm_request { | |||
48 | 49 | ||
49 | int scm_blk_dev_setup(struct scm_blk_dev *, struct scm_device *); | 50 | int scm_blk_dev_setup(struct scm_blk_dev *, struct scm_device *); |
50 | void scm_blk_dev_cleanup(struct scm_blk_dev *); | 51 | void scm_blk_dev_cleanup(struct scm_blk_dev *); |
52 | void scm_blk_set_available(struct scm_blk_dev *); | ||
51 | void scm_blk_irq(struct scm_device *, void *, int); | 53 | void scm_blk_irq(struct scm_device *, void *, int); |
52 | 54 | ||
53 | void scm_request_finish(struct scm_request *); | 55 | void scm_request_finish(struct scm_request *); |
diff --git a/drivers/s390/block/scm_drv.c b/drivers/s390/block/scm_drv.c index 9fa0a908607b..5f6180d6ff08 100644 --- a/drivers/s390/block/scm_drv.c +++ b/drivers/s390/block/scm_drv.c | |||
@@ -13,12 +13,23 @@ | |||
13 | #include <asm/eadm.h> | 13 | #include <asm/eadm.h> |
14 | #include "scm_blk.h" | 14 | #include "scm_blk.h" |
15 | 15 | ||
16 | static void notify(struct scm_device *scmdev) | 16 | static void scm_notify(struct scm_device *scmdev, enum scm_event event) |
17 | { | 17 | { |
18 | pr_info("%lu: The capabilities of the SCM increment changed\n", | 18 | struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev); |
19 | (unsigned long) scmdev->address); | 19 | |
20 | SCM_LOG(2, "State changed"); | 20 | switch (event) { |
21 | SCM_LOG_STATE(2, scmdev); | 21 | case SCM_CHANGE: |
22 | pr_info("%lu: The capabilities of the SCM increment changed\n", | ||
23 | (unsigned long) scmdev->address); | ||
24 | SCM_LOG(2, "State changed"); | ||
25 | SCM_LOG_STATE(2, scmdev); | ||
26 | break; | ||
27 | case SCM_AVAIL: | ||
28 | SCM_LOG(2, "Increment available"); | ||
29 | SCM_LOG_STATE(2, scmdev); | ||
30 | scm_blk_set_available(bdev); | ||
31 | break; | ||
32 | } | ||
22 | } | 33 | } |
23 | 34 | ||
24 | static int scm_probe(struct scm_device *scmdev) | 35 | static int scm_probe(struct scm_device *scmdev) |
@@ -64,7 +75,7 @@ static struct scm_driver scm_drv = { | |||
64 | .name = "scm_block", | 75 | .name = "scm_block", |
65 | .owner = THIS_MODULE, | 76 | .owner = THIS_MODULE, |
66 | }, | 77 | }, |
67 | .notify = notify, | 78 | .notify = scm_notify, |
68 | .probe = scm_probe, | 79 | .probe = scm_probe, |
69 | .remove = scm_remove, | 80 | .remove = scm_remove, |
70 | .handler = scm_blk_irq, | 81 | .handler = scm_blk_irq, |
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c index 30a2255389e5..cd798386b622 100644 --- a/drivers/s390/char/sclp_cmd.c +++ b/drivers/s390/char/sclp_cmd.c | |||
@@ -627,6 +627,8 @@ static int __init sclp_detect_standby_memory(void) | |||
627 | struct read_storage_sccb *sccb; | 627 | struct read_storage_sccb *sccb; |
628 | int i, id, assigned, rc; | 628 | int i, id, assigned, rc; |
629 | 629 | ||
630 | if (OLDMEM_BASE) /* No standby memory in kdump mode */ | ||
631 | return 0; | ||
630 | if (!early_read_info_sccb_valid) | 632 | if (!early_read_info_sccb_valid) |
631 | return 0; | 633 | return 0; |
632 | if ((sclp_facilities & 0xe00000000000ULL) != 0xe00000000000ULL) | 634 | if ((sclp_facilities & 0xe00000000000ULL) != 0xe00000000000ULL) |
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index 31ceef1beb8b..e16c553f6556 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c | |||
@@ -433,6 +433,20 @@ static void chsc_process_sei_scm_change(struct chsc_sei_nt0_area *sei_area) | |||
433 | " failed (rc=%d).\n", ret); | 433 | " failed (rc=%d).\n", ret); |
434 | } | 434 | } |
435 | 435 | ||
436 | static void chsc_process_sei_scm_avail(struct chsc_sei_nt0_area *sei_area) | ||
437 | { | ||
438 | int ret; | ||
439 | |||
440 | CIO_CRW_EVENT(4, "chsc: scm available information\n"); | ||
441 | if (sei_area->rs != 7) | ||
442 | return; | ||
443 | |||
444 | ret = scm_process_availability_information(); | ||
445 | if (ret) | ||
446 | CIO_CRW_EVENT(0, "chsc: process availability information" | ||
447 | " failed (rc=%d).\n", ret); | ||
448 | } | ||
449 | |||
436 | static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area) | 450 | static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area) |
437 | { | 451 | { |
438 | switch (sei_area->cc) { | 452 | switch (sei_area->cc) { |
@@ -468,6 +482,9 @@ static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area) | |||
468 | case 12: /* scm change notification */ | 482 | case 12: /* scm change notification */ |
469 | chsc_process_sei_scm_change(sei_area); | 483 | chsc_process_sei_scm_change(sei_area); |
470 | break; | 484 | break; |
485 | case 14: /* scm available notification */ | ||
486 | chsc_process_sei_scm_avail(sei_area); | ||
487 | break; | ||
471 | default: /* other stuff */ | 488 | default: /* other stuff */ |
472 | CIO_CRW_EVENT(2, "chsc: sei nt0 unhandled cc=%d\n", | 489 | CIO_CRW_EVENT(2, "chsc: sei nt0 unhandled cc=%d\n", |
473 | sei_area->cc); | 490 | sei_area->cc); |
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h index 227e05f674b3..349d5fc47196 100644 --- a/drivers/s390/cio/chsc.h +++ b/drivers/s390/cio/chsc.h | |||
@@ -156,8 +156,10 @@ int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token); | |||
156 | 156 | ||
157 | #ifdef CONFIG_SCM_BUS | 157 | #ifdef CONFIG_SCM_BUS |
158 | int scm_update_information(void); | 158 | int scm_update_information(void); |
159 | int scm_process_availability_information(void); | ||
159 | #else /* CONFIG_SCM_BUS */ | 160 | #else /* CONFIG_SCM_BUS */ |
160 | static inline int scm_update_information(void) { return 0; } | 161 | static inline int scm_update_information(void) { return 0; } |
162 | static inline int scm_process_availability_information(void) { return 0; } | ||
161 | #endif /* CONFIG_SCM_BUS */ | 163 | #endif /* CONFIG_SCM_BUS */ |
162 | 164 | ||
163 | 165 | ||
diff --git a/drivers/s390/cio/scm.c b/drivers/s390/cio/scm.c index bcf20f3aa51b..46ec25632e8b 100644 --- a/drivers/s390/cio/scm.c +++ b/drivers/s390/cio/scm.c | |||
@@ -211,7 +211,7 @@ static void scmdev_update(struct scm_device *scmdev, struct sale *sale) | |||
211 | goto out; | 211 | goto out; |
212 | scmdrv = to_scm_drv(scmdev->dev.driver); | 212 | scmdrv = to_scm_drv(scmdev->dev.driver); |
213 | if (changed && scmdrv->notify) | 213 | if (changed && scmdrv->notify) |
214 | scmdrv->notify(scmdev); | 214 | scmdrv->notify(scmdev, SCM_CHANGE); |
215 | out: | 215 | out: |
216 | device_unlock(&scmdev->dev); | 216 | device_unlock(&scmdev->dev); |
217 | if (changed) | 217 | if (changed) |
@@ -297,6 +297,22 @@ int scm_update_information(void) | |||
297 | return ret; | 297 | return ret; |
298 | } | 298 | } |
299 | 299 | ||
300 | static int scm_dev_avail(struct device *dev, void *unused) | ||
301 | { | ||
302 | struct scm_driver *scmdrv = to_scm_drv(dev->driver); | ||
303 | struct scm_device *scmdev = to_scm_dev(dev); | ||
304 | |||
305 | if (dev->driver && scmdrv->notify) | ||
306 | scmdrv->notify(scmdev, SCM_AVAIL); | ||
307 | |||
308 | return 0; | ||
309 | } | ||
310 | |||
311 | int scm_process_availability_information(void) | ||
312 | { | ||
313 | return bus_for_each_dev(&scm_bus_type, NULL, NULL, scm_dev_avail); | ||
314 | } | ||
315 | |||
300 | static int __init scm_init(void) | 316 | static int __init scm_init(void) |
301 | { | 317 | { |
302 | int ret; | 318 | int ret; |
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index d87961d4c0de..8c0622399fcd 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h | |||
@@ -916,6 +916,7 @@ int qeth_send_control_data(struct qeth_card *, int, struct qeth_cmd_buffer *, | |||
916 | void *reply_param); | 916 | void *reply_param); |
917 | int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int); | 917 | int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int); |
918 | int qeth_get_elements_no(struct qeth_card *, void *, struct sk_buff *, int); | 918 | int qeth_get_elements_no(struct qeth_card *, void *, struct sk_buff *, int); |
919 | int qeth_get_elements_for_frags(struct sk_buff *); | ||
919 | int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *, | 920 | int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *, |
920 | struct sk_buff *, struct qeth_hdr *, int, int, int); | 921 | struct sk_buff *, struct qeth_hdr *, int, int, int); |
921 | int qeth_do_send_packet(struct qeth_card *, struct qeth_qdio_out_q *, | 922 | int qeth_do_send_packet(struct qeth_card *, struct qeth_qdio_out_q *, |
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 0d8cdff81813..0d73a999983d 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c | |||
@@ -3679,6 +3679,25 @@ int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb, | |||
3679 | } | 3679 | } |
3680 | EXPORT_SYMBOL_GPL(qeth_get_priority_queue); | 3680 | EXPORT_SYMBOL_GPL(qeth_get_priority_queue); |
3681 | 3681 | ||
3682 | int qeth_get_elements_for_frags(struct sk_buff *skb) | ||
3683 | { | ||
3684 | int cnt, length, e, elements = 0; | ||
3685 | struct skb_frag_struct *frag; | ||
3686 | char *data; | ||
3687 | |||
3688 | for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) { | ||
3689 | frag = &skb_shinfo(skb)->frags[cnt]; | ||
3690 | data = (char *)page_to_phys(skb_frag_page(frag)) + | ||
3691 | frag->page_offset; | ||
3692 | length = frag->size; | ||
3693 | e = PFN_UP((unsigned long)data + length - 1) - | ||
3694 | PFN_DOWN((unsigned long)data); | ||
3695 | elements += e; | ||
3696 | } | ||
3697 | return elements; | ||
3698 | } | ||
3699 | EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags); | ||
3700 | |||
3682 | int qeth_get_elements_no(struct qeth_card *card, void *hdr, | 3701 | int qeth_get_elements_no(struct qeth_card *card, void *hdr, |
3683 | struct sk_buff *skb, int elems) | 3702 | struct sk_buff *skb, int elems) |
3684 | { | 3703 | { |
@@ -3686,7 +3705,8 @@ int qeth_get_elements_no(struct qeth_card *card, void *hdr, | |||
3686 | int elements_needed = PFN_UP((unsigned long)skb->data + dlen - 1) - | 3705 | int elements_needed = PFN_UP((unsigned long)skb->data + dlen - 1) - |
3687 | PFN_DOWN((unsigned long)skb->data); | 3706 | PFN_DOWN((unsigned long)skb->data); |
3688 | 3707 | ||
3689 | elements_needed += skb_shinfo(skb)->nr_frags; | 3708 | elements_needed += qeth_get_elements_for_frags(skb); |
3709 | |||
3690 | if ((elements_needed + elems) > QETH_MAX_BUFFER_ELEMENTS(card)) { | 3710 | if ((elements_needed + elems) > QETH_MAX_BUFFER_ELEMENTS(card)) { |
3691 | QETH_DBF_MESSAGE(2, "Invalid size of IP packet " | 3711 | QETH_DBF_MESSAGE(2, "Invalid size of IP packet " |
3692 | "(Number=%d / Length=%d). Discarded.\n", | 3712 | "(Number=%d / Length=%d). Discarded.\n", |
@@ -3771,12 +3791,23 @@ static inline void __qeth_fill_buffer(struct sk_buff *skb, | |||
3771 | 3791 | ||
3772 | for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) { | 3792 | for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) { |
3773 | frag = &skb_shinfo(skb)->frags[cnt]; | 3793 | frag = &skb_shinfo(skb)->frags[cnt]; |
3774 | buffer->element[element].addr = (char *) | 3794 | data = (char *)page_to_phys(skb_frag_page(frag)) + |
3775 | page_to_phys(skb_frag_page(frag)) | 3795 | frag->page_offset; |
3776 | + frag->page_offset; | 3796 | length = frag->size; |
3777 | buffer->element[element].length = frag->size; | 3797 | while (length > 0) { |
3778 | buffer->element[element].eflags = SBAL_EFLAGS_MIDDLE_FRAG; | 3798 | length_here = PAGE_SIZE - |
3779 | element++; | 3799 | ((unsigned long) data % PAGE_SIZE); |
3800 | if (length < length_here) | ||
3801 | length_here = length; | ||
3802 | |||
3803 | buffer->element[element].addr = data; | ||
3804 | buffer->element[element].length = length_here; | ||
3805 | buffer->element[element].eflags = | ||
3806 | SBAL_EFLAGS_MIDDLE_FRAG; | ||
3807 | length -= length_here; | ||
3808 | data += length_here; | ||
3809 | element++; | ||
3810 | } | ||
3780 | } | 3811 | } |
3781 | 3812 | ||
3782 | if (buffer->element[element - 1].eflags) | 3813 | if (buffer->element[element - 1].eflags) |
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 091ca0efa1c5..8710337dab3e 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c | |||
@@ -623,7 +623,7 @@ static int qeth_l3_send_setrouting(struct qeth_card *card, | |||
623 | return rc; | 623 | return rc; |
624 | } | 624 | } |
625 | 625 | ||
626 | static void qeth_l3_correct_routing_type(struct qeth_card *card, | 626 | static int qeth_l3_correct_routing_type(struct qeth_card *card, |
627 | enum qeth_routing_types *type, enum qeth_prot_versions prot) | 627 | enum qeth_routing_types *type, enum qeth_prot_versions prot) |
628 | { | 628 | { |
629 | if (card->info.type == QETH_CARD_TYPE_IQD) { | 629 | if (card->info.type == QETH_CARD_TYPE_IQD) { |
@@ -632,7 +632,7 @@ static void qeth_l3_correct_routing_type(struct qeth_card *card, | |||
632 | case PRIMARY_CONNECTOR: | 632 | case PRIMARY_CONNECTOR: |
633 | case SECONDARY_CONNECTOR: | 633 | case SECONDARY_CONNECTOR: |
634 | case MULTICAST_ROUTER: | 634 | case MULTICAST_ROUTER: |
635 | return; | 635 | return 0; |
636 | default: | 636 | default: |
637 | goto out_inval; | 637 | goto out_inval; |
638 | } | 638 | } |
@@ -641,17 +641,18 @@ static void qeth_l3_correct_routing_type(struct qeth_card *card, | |||
641 | case NO_ROUTER: | 641 | case NO_ROUTER: |
642 | case PRIMARY_ROUTER: | 642 | case PRIMARY_ROUTER: |
643 | case SECONDARY_ROUTER: | 643 | case SECONDARY_ROUTER: |
644 | return; | 644 | return 0; |
645 | case MULTICAST_ROUTER: | 645 | case MULTICAST_ROUTER: |
646 | if (qeth_is_ipafunc_supported(card, prot, | 646 | if (qeth_is_ipafunc_supported(card, prot, |
647 | IPA_OSA_MC_ROUTER)) | 647 | IPA_OSA_MC_ROUTER)) |
648 | return; | 648 | return 0; |
649 | default: | 649 | default: |
650 | goto out_inval; | 650 | goto out_inval; |
651 | } | 651 | } |
652 | } | 652 | } |
653 | out_inval: | 653 | out_inval: |
654 | *type = NO_ROUTER; | 654 | *type = NO_ROUTER; |
655 | return -EINVAL; | ||
655 | } | 656 | } |
656 | 657 | ||
657 | int qeth_l3_setrouting_v4(struct qeth_card *card) | 658 | int qeth_l3_setrouting_v4(struct qeth_card *card) |
@@ -660,8 +661,10 @@ int qeth_l3_setrouting_v4(struct qeth_card *card) | |||
660 | 661 | ||
661 | QETH_CARD_TEXT(card, 3, "setrtg4"); | 662 | QETH_CARD_TEXT(card, 3, "setrtg4"); |
662 | 663 | ||
663 | qeth_l3_correct_routing_type(card, &card->options.route4.type, | 664 | rc = qeth_l3_correct_routing_type(card, &card->options.route4.type, |
664 | QETH_PROT_IPV4); | 665 | QETH_PROT_IPV4); |
666 | if (rc) | ||
667 | return rc; | ||
665 | 668 | ||
666 | rc = qeth_l3_send_setrouting(card, card->options.route4.type, | 669 | rc = qeth_l3_send_setrouting(card, card->options.route4.type, |
667 | QETH_PROT_IPV4); | 670 | QETH_PROT_IPV4); |
@@ -683,8 +686,10 @@ int qeth_l3_setrouting_v6(struct qeth_card *card) | |||
683 | 686 | ||
684 | if (!qeth_is_supported(card, IPA_IPV6)) | 687 | if (!qeth_is_supported(card, IPA_IPV6)) |
685 | return 0; | 688 | return 0; |
686 | qeth_l3_correct_routing_type(card, &card->options.route6.type, | 689 | rc = qeth_l3_correct_routing_type(card, &card->options.route6.type, |
687 | QETH_PROT_IPV6); | 690 | QETH_PROT_IPV6); |
691 | if (rc) | ||
692 | return rc; | ||
688 | 693 | ||
689 | rc = qeth_l3_send_setrouting(card, card->options.route6.type, | 694 | rc = qeth_l3_send_setrouting(card, card->options.route6.type, |
690 | QETH_PROT_IPV6); | 695 | QETH_PROT_IPV6); |
@@ -2898,7 +2903,9 @@ static inline int qeth_l3_tso_elements(struct sk_buff *skb) | |||
2898 | tcp_hdr(skb)->doff * 4; | 2903 | tcp_hdr(skb)->doff * 4; |
2899 | int tcpd_len = skb->len - (tcpd - (unsigned long)skb->data); | 2904 | int tcpd_len = skb->len - (tcpd - (unsigned long)skb->data); |
2900 | int elements = PFN_UP(tcpd + tcpd_len - 1) - PFN_DOWN(tcpd); | 2905 | int elements = PFN_UP(tcpd + tcpd_len - 1) - PFN_DOWN(tcpd); |
2901 | elements += skb_shinfo(skb)->nr_frags; | 2906 | |
2907 | elements += qeth_get_elements_for_frags(skb); | ||
2908 | |||
2902 | return elements; | 2909 | return elements; |
2903 | } | 2910 | } |
2904 | 2911 | ||
@@ -3348,7 +3355,6 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) | |||
3348 | rc = -ENODEV; | 3355 | rc = -ENODEV; |
3349 | goto out_remove; | 3356 | goto out_remove; |
3350 | } | 3357 | } |
3351 | qeth_trace_features(card); | ||
3352 | 3358 | ||
3353 | if (!card->dev && qeth_l3_setup_netdev(card)) { | 3359 | if (!card->dev && qeth_l3_setup_netdev(card)) { |
3354 | rc = -ENODEV; | 3360 | rc = -ENODEV; |
@@ -3425,6 +3431,7 @@ contin: | |||
3425 | qeth_l3_set_multicast_list(card->dev); | 3431 | qeth_l3_set_multicast_list(card->dev); |
3426 | rtnl_unlock(); | 3432 | rtnl_unlock(); |
3427 | } | 3433 | } |
3434 | qeth_trace_features(card); | ||
3428 | /* let user_space know that device is online */ | 3435 | /* let user_space know that device is online */ |
3429 | kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); | 3436 | kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); |
3430 | mutex_unlock(&card->conf_mutex); | 3437 | mutex_unlock(&card->conf_mutex); |
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c index ebc379486267..e70af2406ff9 100644 --- a/drivers/s390/net/qeth_l3_sys.c +++ b/drivers/s390/net/qeth_l3_sys.c | |||
@@ -87,6 +87,8 @@ static ssize_t qeth_l3_dev_route_store(struct qeth_card *card, | |||
87 | rc = qeth_l3_setrouting_v6(card); | 87 | rc = qeth_l3_setrouting_v6(card); |
88 | } | 88 | } |
89 | out: | 89 | out: |
90 | if (rc) | ||
91 | route->type = old_route_type; | ||
90 | mutex_unlock(&card->conf_mutex); | 92 | mutex_unlock(&card->conf_mutex); |
91 | return rc ? rc : count; | 93 | return rc ? rc : count; |
92 | } | 94 | } |
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c index db0cf7c8adde..a0fc7b9eea65 100644 --- a/drivers/target/iscsi/iscsi_target_auth.c +++ b/drivers/target/iscsi/iscsi_target_auth.c | |||
@@ -166,6 +166,7 @@ static int chap_server_compute_md5( | |||
166 | { | 166 | { |
167 | char *endptr; | 167 | char *endptr; |
168 | unsigned long id; | 168 | unsigned long id; |
169 | unsigned char id_as_uchar; | ||
169 | unsigned char digest[MD5_SIGNATURE_SIZE]; | 170 | unsigned char digest[MD5_SIGNATURE_SIZE]; |
170 | unsigned char type, response[MD5_SIGNATURE_SIZE * 2 + 2]; | 171 | unsigned char type, response[MD5_SIGNATURE_SIZE * 2 + 2]; |
171 | unsigned char identifier[10], *challenge = NULL; | 172 | unsigned char identifier[10], *challenge = NULL; |
@@ -355,7 +356,9 @@ static int chap_server_compute_md5( | |||
355 | goto out; | 356 | goto out; |
356 | } | 357 | } |
357 | 358 | ||
358 | sg_init_one(&sg, &id, 1); | 359 | /* To handle both endiannesses */ |
360 | id_as_uchar = id; | ||
361 | sg_init_one(&sg, &id_as_uchar, 1); | ||
359 | ret = crypto_hash_update(&desc, &sg, 1); | 362 | ret = crypto_hash_update(&desc, &sg, 1); |
360 | if (ret < 0) { | 363 | if (ret < 0) { |
361 | pr_err("crypto_hash_update() failed for id\n"); | 364 | pr_err("crypto_hash_update() failed for id\n"); |
diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h index bc02b018ae46..37ffc5bd2399 100644 --- a/drivers/target/target_core_file.h +++ b/drivers/target/target_core_file.h | |||
@@ -7,7 +7,7 @@ | |||
7 | #define FD_DEVICE_QUEUE_DEPTH 32 | 7 | #define FD_DEVICE_QUEUE_DEPTH 32 |
8 | #define FD_MAX_DEVICE_QUEUE_DEPTH 128 | 8 | #define FD_MAX_DEVICE_QUEUE_DEPTH 128 |
9 | #define FD_BLOCKSIZE 512 | 9 | #define FD_BLOCKSIZE 512 |
10 | #define FD_MAX_SECTORS 1024 | 10 | #define FD_MAX_SECTORS 2048 |
11 | 11 | ||
12 | #define RRF_EMULATE_CDB 0x01 | 12 | #define RRF_EMULATE_CDB 0x01 |
13 | #define RRF_GOT_LBA 0x02 | 13 | #define RRF_GOT_LBA 0x02 |
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 82e78d72fdb6..e992b27aa090 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c | |||
@@ -883,7 +883,14 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, | |||
883 | pr_debug("PSCSI: i: %d page: %p len: %d off: %d\n", i, | 883 | pr_debug("PSCSI: i: %d page: %p len: %d off: %d\n", i, |
884 | page, len, off); | 884 | page, len, off); |
885 | 885 | ||
886 | while (len > 0 && data_len > 0) { | 886 | /* |
887 | * We only have one page of data in each sg element, | ||
888 | * we can not cross a page boundary. | ||
889 | */ | ||
890 | if (off + len > PAGE_SIZE) | ||
891 | goto fail; | ||
892 | |||
893 | if (len > 0 && data_len > 0) { | ||
887 | bytes = min_t(unsigned int, len, PAGE_SIZE - off); | 894 | bytes = min_t(unsigned int, len, PAGE_SIZE - off); |
888 | bytes = min(bytes, data_len); | 895 | bytes = min(bytes, data_len); |
889 | 896 | ||
@@ -940,9 +947,7 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, | |||
940 | bio = NULL; | 947 | bio = NULL; |
941 | } | 948 | } |
942 | 949 | ||
943 | len -= bytes; | ||
944 | data_len -= bytes; | 950 | data_len -= bytes; |
945 | off = 0; | ||
946 | } | 951 | } |
947 | } | 952 | } |
948 | 953 | ||
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index 290230de2c53..60d4b5185f32 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c | |||
@@ -464,8 +464,11 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) | |||
464 | break; | 464 | break; |
465 | case SYNCHRONIZE_CACHE: | 465 | case SYNCHRONIZE_CACHE: |
466 | case SYNCHRONIZE_CACHE_16: | 466 | case SYNCHRONIZE_CACHE_16: |
467 | if (!ops->execute_sync_cache) | 467 | if (!ops->execute_sync_cache) { |
468 | return TCM_UNSUPPORTED_SCSI_OPCODE; | 468 | size = 0; |
469 | cmd->execute_cmd = sbc_emulate_noop; | ||
470 | break; | ||
471 | } | ||
469 | 472 | ||
470 | /* | 473 | /* |
471 | * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE | 474 | * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE |
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index 9169d6a5d7e4..aac9d2727e3c 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c | |||
@@ -711,7 +711,8 @@ int core_tpg_register( | |||
711 | 711 | ||
712 | if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) { | 712 | if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) { |
713 | if (core_tpg_setup_virtual_lun0(se_tpg) < 0) { | 713 | if (core_tpg_setup_virtual_lun0(se_tpg) < 0) { |
714 | kfree(se_tpg); | 714 | array_free(se_tpg->tpg_lun_list, |
715 | TRANSPORT_MAX_LUNS_PER_TPG); | ||
715 | return -ENOMEM; | 716 | return -ENOMEM; |
716 | } | 717 | } |
717 | } | 718 | } |
diff --git a/drivers/thermal/dove_thermal.c b/drivers/thermal/dove_thermal.c index 7b0bfa0e7a9c..3078c403b42d 100644 --- a/drivers/thermal/dove_thermal.c +++ b/drivers/thermal/dove_thermal.c | |||
@@ -143,22 +143,18 @@ static int dove_thermal_probe(struct platform_device *pdev) | |||
143 | if (!priv) | 143 | if (!priv) |
144 | return -ENOMEM; | 144 | return -ENOMEM; |
145 | 145 | ||
146 | priv->sensor = devm_request_and_ioremap(&pdev->dev, res); | 146 | priv->sensor = devm_ioremap_resource(&pdev->dev, res); |
147 | if (!priv->sensor) { | 147 | if (IS_ERR(priv->sensor)) |
148 | dev_err(&pdev->dev, "Failed to request_ioremap memory\n"); | 148 | return PTR_ERR(priv->sensor); |
149 | return -EADDRNOTAVAIL; | ||
150 | } | ||
151 | 149 | ||
152 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | 150 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); |
153 | if (!res) { | 151 | if (!res) { |
154 | dev_err(&pdev->dev, "Failed to get platform resource\n"); | 152 | dev_err(&pdev->dev, "Failed to get platform resource\n"); |
155 | return -ENODEV; | 153 | return -ENODEV; |
156 | } | 154 | } |
157 | priv->control = devm_request_and_ioremap(&pdev->dev, res); | 155 | priv->control = devm_ioremap_resource(&pdev->dev, res); |
158 | if (!priv->control) { | 156 | if (IS_ERR(priv->control)) |
159 | dev_err(&pdev->dev, "Failed to request_ioremap memory\n"); | 157 | return PTR_ERR(priv->control); |
160 | return -EADDRNOTAVAIL; | ||
161 | } | ||
162 | 158 | ||
163 | ret = dove_init_sensor(priv); | 159 | ret = dove_init_sensor(priv); |
164 | if (ret) { | 160 | if (ret) { |
diff --git a/drivers/thermal/exynos_thermal.c b/drivers/thermal/exynos_thermal.c index e04ebd8671ac..46568c078dee 100644 --- a/drivers/thermal/exynos_thermal.c +++ b/drivers/thermal/exynos_thermal.c | |||
@@ -476,7 +476,7 @@ static int exynos_register_thermal(struct thermal_sensor_conf *sensor_conf) | |||
476 | 476 | ||
477 | if (IS_ERR(th_zone->therm_dev)) { | 477 | if (IS_ERR(th_zone->therm_dev)) { |
478 | pr_err("Failed to register thermal zone device\n"); | 478 | pr_err("Failed to register thermal zone device\n"); |
479 | ret = -EINVAL; | 479 | ret = PTR_ERR(th_zone->therm_dev); |
480 | goto err_unregister; | 480 | goto err_unregister; |
481 | } | 481 | } |
482 | th_zone->mode = THERMAL_DEVICE_ENABLED; | 482 | th_zone->mode = THERMAL_DEVICE_ENABLED; |
diff --git a/drivers/thermal/kirkwood_thermal.c b/drivers/thermal/kirkwood_thermal.c index 65cb4f09e8f6..e5500edb5285 100644 --- a/drivers/thermal/kirkwood_thermal.c +++ b/drivers/thermal/kirkwood_thermal.c | |||
@@ -85,11 +85,9 @@ static int kirkwood_thermal_probe(struct platform_device *pdev) | |||
85 | if (!priv) | 85 | if (!priv) |
86 | return -ENOMEM; | 86 | return -ENOMEM; |
87 | 87 | ||
88 | priv->sensor = devm_request_and_ioremap(&pdev->dev, res); | 88 | priv->sensor = devm_ioremap_resource(&pdev->dev, res); |
89 | if (!priv->sensor) { | 89 | if (IS_ERR(priv->sensor)) |
90 | dev_err(&pdev->dev, "Failed to request_ioremap memory\n"); | 90 | return PTR_ERR(priv->sensor); |
91 | return -EADDRNOTAVAIL; | ||
92 | } | ||
93 | 91 | ||
94 | thermal = thermal_zone_device_register("kirkwood_thermal", 0, 0, | 92 | thermal = thermal_zone_device_register("kirkwood_thermal", 0, 0, |
95 | priv, &ops, NULL, 0, 0); | 93 | priv, &ops, NULL, 0, 0); |
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c index 28f091994013..2cc5b6115e3e 100644 --- a/drivers/thermal/rcar_thermal.c +++ b/drivers/thermal/rcar_thermal.c | |||
@@ -145,6 +145,7 @@ static int rcar_thermal_update_temp(struct rcar_thermal_priv *priv) | |||
145 | struct device *dev = rcar_priv_to_dev(priv); | 145 | struct device *dev = rcar_priv_to_dev(priv); |
146 | int i; | 146 | int i; |
147 | int ctemp, old, new; | 147 | int ctemp, old, new; |
148 | int ret = -EINVAL; | ||
148 | 149 | ||
149 | mutex_lock(&priv->lock); | 150 | mutex_lock(&priv->lock); |
150 | 151 | ||
@@ -174,7 +175,7 @@ static int rcar_thermal_update_temp(struct rcar_thermal_priv *priv) | |||
174 | 175 | ||
175 | if (!ctemp) { | 176 | if (!ctemp) { |
176 | dev_err(dev, "thermal sensor was broken\n"); | 177 | dev_err(dev, "thermal sensor was broken\n"); |
177 | return -EINVAL; | 178 | goto err_out_unlock; |
178 | } | 179 | } |
179 | 180 | ||
180 | /* | 181 | /* |
@@ -192,10 +193,10 @@ static int rcar_thermal_update_temp(struct rcar_thermal_priv *priv) | |||
192 | dev_dbg(dev, "thermal%d %d -> %d\n", priv->id, priv->ctemp, ctemp); | 193 | dev_dbg(dev, "thermal%d %d -> %d\n", priv->id, priv->ctemp, ctemp); |
193 | 194 | ||
194 | priv->ctemp = ctemp; | 195 | priv->ctemp = ctemp; |
195 | 196 | ret = 0; | |
197 | err_out_unlock: | ||
196 | mutex_unlock(&priv->lock); | 198 | mutex_unlock(&priv->lock); |
197 | 199 | return ret; | |
198 | return 0; | ||
199 | } | 200 | } |
200 | 201 | ||
201 | static int rcar_thermal_get_temp(struct thermal_zone_device *zone, | 202 | static int rcar_thermal_get_temp(struct thermal_zone_device *zone, |
@@ -363,6 +364,7 @@ static int rcar_thermal_probe(struct platform_device *pdev) | |||
363 | struct resource *res, *irq; | 364 | struct resource *res, *irq; |
364 | int mres = 0; | 365 | int mres = 0; |
365 | int i; | 366 | int i; |
367 | int ret = -ENODEV; | ||
366 | int idle = IDLE_INTERVAL; | 368 | int idle = IDLE_INTERVAL; |
367 | 369 | ||
368 | common = devm_kzalloc(dev, sizeof(*common), GFP_KERNEL); | 370 | common = devm_kzalloc(dev, sizeof(*common), GFP_KERNEL); |
@@ -399,11 +401,9 @@ static int rcar_thermal_probe(struct platform_device *pdev) | |||
399 | /* | 401 | /* |
400 | * rcar_has_irq_support() will be enabled | 402 | * rcar_has_irq_support() will be enabled |
401 | */ | 403 | */ |
402 | common->base = devm_request_and_ioremap(dev, res); | 404 | common->base = devm_ioremap_resource(dev, res); |
403 | if (!common->base) { | 405 | if (IS_ERR(common->base)) |
404 | dev_err(dev, "Unable to ioremap thermal register\n"); | 406 | return PTR_ERR(common->base); |
405 | return -ENOMEM; | ||
406 | } | ||
407 | 407 | ||
408 | /* enable temperature comparation */ | 408 | /* enable temperature comparation */ |
409 | rcar_thermal_common_write(common, ENR, 0x00030303); | 409 | rcar_thermal_common_write(common, ENR, 0x00030303); |
@@ -422,11 +422,9 @@ static int rcar_thermal_probe(struct platform_device *pdev) | |||
422 | return -ENOMEM; | 422 | return -ENOMEM; |
423 | } | 423 | } |
424 | 424 | ||
425 | priv->base = devm_request_and_ioremap(dev, res); | 425 | priv->base = devm_ioremap_resource(dev, res); |
426 | if (!priv->base) { | 426 | if (IS_ERR(priv->base)) |
427 | dev_err(dev, "Unable to ioremap priv register\n"); | 427 | return PTR_ERR(priv->base); |
428 | return -ENOMEM; | ||
429 | } | ||
430 | 428 | ||
431 | priv->common = common; | 429 | priv->common = common; |
432 | priv->id = i; | 430 | priv->id = i; |
@@ -441,6 +439,7 @@ static int rcar_thermal_probe(struct platform_device *pdev) | |||
441 | idle); | 439 | idle); |
442 | if (IS_ERR(priv->zone)) { | 440 | if (IS_ERR(priv->zone)) { |
443 | dev_err(dev, "can't register thermal zone\n"); | 441 | dev_err(dev, "can't register thermal zone\n"); |
442 | ret = PTR_ERR(priv->zone); | ||
444 | goto error_unregister; | 443 | goto error_unregister; |
445 | } | 444 | } |
446 | 445 | ||
@@ -460,7 +459,7 @@ error_unregister: | |||
460 | rcar_thermal_for_each_priv(priv, common) | 459 | rcar_thermal_for_each_priv(priv, common) |
461 | thermal_zone_device_unregister(priv->zone); | 460 | thermal_zone_device_unregister(priv->zone); |
462 | 461 | ||
463 | return -ENODEV; | 462 | return ret; |
464 | } | 463 | } |
465 | 464 | ||
466 | static int rcar_thermal_remove(struct platform_device *pdev) | 465 | static int rcar_thermal_remove(struct platform_device *pdev) |
diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c index e343d6670854..451687cb9685 100644 --- a/drivers/tty/serial/sunsu.c +++ b/drivers/tty/serial/sunsu.c | |||
@@ -968,6 +968,7 @@ static struct uart_ops sunsu_pops = { | |||
968 | #define UART_NR 4 | 968 | #define UART_NR 4 |
969 | 969 | ||
970 | static struct uart_sunsu_port sunsu_ports[UART_NR]; | 970 | static struct uart_sunsu_port sunsu_ports[UART_NR]; |
971 | static int nr_inst; /* Number of already registered ports */ | ||
971 | 972 | ||
972 | #ifdef CONFIG_SERIO | 973 | #ifdef CONFIG_SERIO |
973 | 974 | ||
@@ -1337,13 +1338,8 @@ static int __init sunsu_console_setup(struct console *co, char *options) | |||
1337 | printk("Console: ttyS%d (SU)\n", | 1338 | printk("Console: ttyS%d (SU)\n", |
1338 | (sunsu_reg.minor - 64) + co->index); | 1339 | (sunsu_reg.minor - 64) + co->index); |
1339 | 1340 | ||
1340 | /* | 1341 | if (co->index > nr_inst) |
1341 | * Check whether an invalid uart number has been specified, and | 1342 | return -ENODEV; |
1342 | * if so, search for the first available port that does have | ||
1343 | * console support. | ||
1344 | */ | ||
1345 | if (co->index >= UART_NR) | ||
1346 | co->index = 0; | ||
1347 | port = &sunsu_ports[co->index].port; | 1343 | port = &sunsu_ports[co->index].port; |
1348 | 1344 | ||
1349 | /* | 1345 | /* |
@@ -1408,7 +1404,6 @@ static enum su_type su_get_type(struct device_node *dp) | |||
1408 | 1404 | ||
1409 | static int su_probe(struct platform_device *op) | 1405 | static int su_probe(struct platform_device *op) |
1410 | { | 1406 | { |
1411 | static int inst; | ||
1412 | struct device_node *dp = op->dev.of_node; | 1407 | struct device_node *dp = op->dev.of_node; |
1413 | struct uart_sunsu_port *up; | 1408 | struct uart_sunsu_port *up; |
1414 | struct resource *rp; | 1409 | struct resource *rp; |
@@ -1418,16 +1413,16 @@ static int su_probe(struct platform_device *op) | |||
1418 | 1413 | ||
1419 | type = su_get_type(dp); | 1414 | type = su_get_type(dp); |
1420 | if (type == SU_PORT_PORT) { | 1415 | if (type == SU_PORT_PORT) { |
1421 | if (inst >= UART_NR) | 1416 | if (nr_inst >= UART_NR) |
1422 | return -EINVAL; | 1417 | return -EINVAL; |
1423 | up = &sunsu_ports[inst]; | 1418 | up = &sunsu_ports[nr_inst]; |
1424 | } else { | 1419 | } else { |
1425 | up = kzalloc(sizeof(*up), GFP_KERNEL); | 1420 | up = kzalloc(sizeof(*up), GFP_KERNEL); |
1426 | if (!up) | 1421 | if (!up) |
1427 | return -ENOMEM; | 1422 | return -ENOMEM; |
1428 | } | 1423 | } |
1429 | 1424 | ||
1430 | up->port.line = inst; | 1425 | up->port.line = nr_inst; |
1431 | 1426 | ||
1432 | spin_lock_init(&up->port.lock); | 1427 | spin_lock_init(&up->port.lock); |
1433 | 1428 | ||
@@ -1461,6 +1456,8 @@ static int su_probe(struct platform_device *op) | |||
1461 | } | 1456 | } |
1462 | dev_set_drvdata(&op->dev, up); | 1457 | dev_set_drvdata(&op->dev, up); |
1463 | 1458 | ||
1459 | nr_inst++; | ||
1460 | |||
1464 | return 0; | 1461 | return 0; |
1465 | } | 1462 | } |
1466 | 1463 | ||
@@ -1488,7 +1485,7 @@ static int su_probe(struct platform_device *op) | |||
1488 | 1485 | ||
1489 | dev_set_drvdata(&op->dev, up); | 1486 | dev_set_drvdata(&op->dev, up); |
1490 | 1487 | ||
1491 | inst++; | 1488 | nr_inst++; |
1492 | 1489 | ||
1493 | return 0; | 1490 | return 0; |
1494 | 1491 | ||
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 8ac25adf31b4..387dc6c8ad25 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c | |||
@@ -593,7 +593,6 @@ static void acm_port_destruct(struct tty_port *port) | |||
593 | 593 | ||
594 | dev_dbg(&acm->control->dev, "%s\n", __func__); | 594 | dev_dbg(&acm->control->dev, "%s\n", __func__); |
595 | 595 | ||
596 | tty_unregister_device(acm_tty_driver, acm->minor); | ||
597 | acm_release_minor(acm); | 596 | acm_release_minor(acm); |
598 | usb_put_intf(acm->control); | 597 | usb_put_intf(acm->control); |
599 | kfree(acm->country_codes); | 598 | kfree(acm->country_codes); |
@@ -977,6 +976,8 @@ static int acm_probe(struct usb_interface *intf, | |||
977 | int num_rx_buf; | 976 | int num_rx_buf; |
978 | int i; | 977 | int i; |
979 | int combined_interfaces = 0; | 978 | int combined_interfaces = 0; |
979 | struct device *tty_dev; | ||
980 | int rv = -ENOMEM; | ||
980 | 981 | ||
981 | /* normal quirks */ | 982 | /* normal quirks */ |
982 | quirks = (unsigned long)id->driver_info; | 983 | quirks = (unsigned long)id->driver_info; |
@@ -1339,11 +1340,24 @@ skip_countries: | |||
1339 | usb_set_intfdata(data_interface, acm); | 1340 | usb_set_intfdata(data_interface, acm); |
1340 | 1341 | ||
1341 | usb_get_intf(control_interface); | 1342 | usb_get_intf(control_interface); |
1342 | tty_port_register_device(&acm->port, acm_tty_driver, minor, | 1343 | tty_dev = tty_port_register_device(&acm->port, acm_tty_driver, minor, |
1343 | &control_interface->dev); | 1344 | &control_interface->dev); |
1345 | if (IS_ERR(tty_dev)) { | ||
1346 | rv = PTR_ERR(tty_dev); | ||
1347 | goto alloc_fail8; | ||
1348 | } | ||
1344 | 1349 | ||
1345 | return 0; | 1350 | return 0; |
1351 | alloc_fail8: | ||
1352 | if (acm->country_codes) { | ||
1353 | device_remove_file(&acm->control->dev, | ||
1354 | &dev_attr_wCountryCodes); | ||
1355 | device_remove_file(&acm->control->dev, | ||
1356 | &dev_attr_iCountryCodeRelDate); | ||
1357 | } | ||
1358 | device_remove_file(&acm->control->dev, &dev_attr_bmCapabilities); | ||
1346 | alloc_fail7: | 1359 | alloc_fail7: |
1360 | usb_set_intfdata(intf, NULL); | ||
1347 | for (i = 0; i < ACM_NW; i++) | 1361 | for (i = 0; i < ACM_NW; i++) |
1348 | usb_free_urb(acm->wb[i].urb); | 1362 | usb_free_urb(acm->wb[i].urb); |
1349 | alloc_fail6: | 1363 | alloc_fail6: |
@@ -1359,7 +1373,7 @@ alloc_fail2: | |||
1359 | acm_release_minor(acm); | 1373 | acm_release_minor(acm); |
1360 | kfree(acm); | 1374 | kfree(acm); |
1361 | alloc_fail: | 1375 | alloc_fail: |
1362 | return -ENOMEM; | 1376 | return rv; |
1363 | } | 1377 | } |
1364 | 1378 | ||
1365 | static void stop_data_traffic(struct acm *acm) | 1379 | static void stop_data_traffic(struct acm *acm) |
@@ -1411,6 +1425,8 @@ static void acm_disconnect(struct usb_interface *intf) | |||
1411 | 1425 | ||
1412 | stop_data_traffic(acm); | 1426 | stop_data_traffic(acm); |
1413 | 1427 | ||
1428 | tty_unregister_device(acm_tty_driver, acm->minor); | ||
1429 | |||
1414 | usb_free_urb(acm->ctrlurb); | 1430 | usb_free_urb(acm->ctrlurb); |
1415 | for (i = 0; i < ACM_NW; i++) | 1431 | for (i = 0; i < ACM_NW; i++) |
1416 | usb_free_urb(acm->wb[i].urb); | 1432 | usb_free_urb(acm->wb[i].urb); |
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c index 622b4a48e732..2b487d4797bd 100644 --- a/drivers/usb/core/hcd-pci.c +++ b/drivers/usb/core/hcd-pci.c | |||
@@ -173,6 +173,7 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) | |||
173 | struct hc_driver *driver; | 173 | struct hc_driver *driver; |
174 | struct usb_hcd *hcd; | 174 | struct usb_hcd *hcd; |
175 | int retval; | 175 | int retval; |
176 | int hcd_irq = 0; | ||
176 | 177 | ||
177 | if (usb_disabled()) | 178 | if (usb_disabled()) |
178 | return -ENODEV; | 179 | return -ENODEV; |
@@ -187,15 +188,19 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) | |||
187 | return -ENODEV; | 188 | return -ENODEV; |
188 | dev->current_state = PCI_D0; | 189 | dev->current_state = PCI_D0; |
189 | 190 | ||
190 | /* The xHCI driver supports MSI and MSI-X, | 191 | /* |
191 | * so don't fail if the BIOS doesn't provide a legacy IRQ. | 192 | * The xHCI driver has its own irq management |
193 | * make sure irq setup is not touched for xhci in generic hcd code | ||
192 | */ | 194 | */ |
193 | if (!dev->irq && (driver->flags & HCD_MASK) != HCD_USB3) { | 195 | if ((driver->flags & HCD_MASK) != HCD_USB3) { |
194 | dev_err(&dev->dev, | 196 | if (!dev->irq) { |
195 | "Found HC with no IRQ. Check BIOS/PCI %s setup!\n", | 197 | dev_err(&dev->dev, |
196 | pci_name(dev)); | 198 | "Found HC with no IRQ. Check BIOS/PCI %s setup!\n", |
197 | retval = -ENODEV; | 199 | pci_name(dev)); |
198 | goto disable_pci; | 200 | retval = -ENODEV; |
201 | goto disable_pci; | ||
202 | } | ||
203 | hcd_irq = dev->irq; | ||
199 | } | 204 | } |
200 | 205 | ||
201 | hcd = usb_create_hcd(driver, &dev->dev, pci_name(dev)); | 206 | hcd = usb_create_hcd(driver, &dev->dev, pci_name(dev)); |
@@ -245,7 +250,7 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) | |||
245 | 250 | ||
246 | pci_set_master(dev); | 251 | pci_set_master(dev); |
247 | 252 | ||
248 | retval = usb_add_hcd(hcd, dev->irq, IRQF_SHARED); | 253 | retval = usb_add_hcd(hcd, hcd_irq, IRQF_SHARED); |
249 | if (retval != 0) | 254 | if (retval != 0) |
250 | goto unmap_registers; | 255 | goto unmap_registers; |
251 | set_hs_companion(dev, hcd); | 256 | set_hs_companion(dev, hcd); |
diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c index 71beeb833558..cc9c49c57c80 100644 --- a/drivers/usb/gadget/f_rndis.c +++ b/drivers/usb/gadget/f_rndis.c | |||
@@ -447,14 +447,13 @@ static void rndis_response_complete(struct usb_ep *ep, struct usb_request *req) | |||
447 | static void rndis_command_complete(struct usb_ep *ep, struct usb_request *req) | 447 | static void rndis_command_complete(struct usb_ep *ep, struct usb_request *req) |
448 | { | 448 | { |
449 | struct f_rndis *rndis = req->context; | 449 | struct f_rndis *rndis = req->context; |
450 | struct usb_composite_dev *cdev = rndis->port.func.config->cdev; | ||
451 | int status; | 450 | int status; |
452 | 451 | ||
453 | /* received RNDIS command from USB_CDC_SEND_ENCAPSULATED_COMMAND */ | 452 | /* received RNDIS command from USB_CDC_SEND_ENCAPSULATED_COMMAND */ |
454 | // spin_lock(&dev->lock); | 453 | // spin_lock(&dev->lock); |
455 | status = rndis_msg_parser(rndis->config, (u8 *) req->buf); | 454 | status = rndis_msg_parser(rndis->config, (u8 *) req->buf); |
456 | if (status < 0) | 455 | if (status < 0) |
457 | ERROR(cdev, "RNDIS command error %d, %d/%d\n", | 456 | pr_err("RNDIS command error %d, %d/%d\n", |
458 | status, req->actual, req->length); | 457 | status, req->actual, req->length); |
459 | // spin_unlock(&dev->lock); | 458 | // spin_unlock(&dev->lock); |
460 | } | 459 | } |
diff --git a/drivers/usb/gadget/g_ffs.c b/drivers/usb/gadget/g_ffs.c index 3953dd4d7186..3b343b23e4b0 100644 --- a/drivers/usb/gadget/g_ffs.c +++ b/drivers/usb/gadget/g_ffs.c | |||
@@ -357,7 +357,7 @@ static int gfs_bind(struct usb_composite_dev *cdev) | |||
357 | goto error; | 357 | goto error; |
358 | gfs_dev_desc.iProduct = gfs_strings[USB_GADGET_PRODUCT_IDX].id; | 358 | gfs_dev_desc.iProduct = gfs_strings[USB_GADGET_PRODUCT_IDX].id; |
359 | 359 | ||
360 | for (i = func_num; --i; ) { | 360 | for (i = func_num; i--; ) { |
361 | ret = functionfs_bind(ffs_tab[i].ffs_data, cdev); | 361 | ret = functionfs_bind(ffs_tab[i].ffs_data, cdev); |
362 | if (unlikely(ret < 0)) { | 362 | if (unlikely(ret < 0)) { |
363 | while (++i < func_num) | 363 | while (++i < func_num) |
@@ -413,7 +413,7 @@ static int gfs_unbind(struct usb_composite_dev *cdev) | |||
413 | gether_cleanup(); | 413 | gether_cleanup(); |
414 | gfs_ether_setup = false; | 414 | gfs_ether_setup = false; |
415 | 415 | ||
416 | for (i = func_num; --i; ) | 416 | for (i = func_num; i--; ) |
417 | if (ffs_tab[i].ffs_data) | 417 | if (ffs_tab[i].ffs_data) |
418 | functionfs_unbind(ffs_tab[i].ffs_data); | 418 | functionfs_unbind(ffs_tab[i].ffs_data); |
419 | 419 | ||
diff --git a/drivers/usb/gadget/net2272.c b/drivers/usb/gadget/net2272.c index d226058e3b88..32524b631959 100644 --- a/drivers/usb/gadget/net2272.c +++ b/drivers/usb/gadget/net2272.c | |||
@@ -59,7 +59,7 @@ static const char * const ep_name[] = { | |||
59 | }; | 59 | }; |
60 | 60 | ||
61 | #define DMA_ADDR_INVALID (~(dma_addr_t)0) | 61 | #define DMA_ADDR_INVALID (~(dma_addr_t)0) |
62 | #ifdef CONFIG_USB_GADGET_NET2272_DMA | 62 | #ifdef CONFIG_USB_NET2272_DMA |
63 | /* | 63 | /* |
64 | * use_dma: the NET2272 can use an external DMA controller. | 64 | * use_dma: the NET2272 can use an external DMA controller. |
65 | * Note that since there is no generic DMA api, some functions, | 65 | * Note that since there is no generic DMA api, some functions, |
@@ -1495,6 +1495,13 @@ stop_activity(struct net2272 *dev, struct usb_gadget_driver *driver) | |||
1495 | for (i = 0; i < 4; ++i) | 1495 | for (i = 0; i < 4; ++i) |
1496 | net2272_dequeue_all(&dev->ep[i]); | 1496 | net2272_dequeue_all(&dev->ep[i]); |
1497 | 1497 | ||
1498 | /* report disconnect; the driver is already quiesced */ | ||
1499 | if (driver) { | ||
1500 | spin_unlock(&dev->lock); | ||
1501 | driver->disconnect(&dev->gadget); | ||
1502 | spin_lock(&dev->lock); | ||
1503 | } | ||
1504 | |||
1498 | net2272_usb_reinit(dev); | 1505 | net2272_usb_reinit(dev); |
1499 | } | 1506 | } |
1500 | 1507 | ||
diff --git a/drivers/usb/gadget/net2280.c b/drivers/usb/gadget/net2280.c index a1b650e11339..3bd0f992fb49 100644 --- a/drivers/usb/gadget/net2280.c +++ b/drivers/usb/gadget/net2280.c | |||
@@ -1924,7 +1924,6 @@ static int net2280_start(struct usb_gadget *_gadget, | |||
1924 | err_func: | 1924 | err_func: |
1925 | device_remove_file (&dev->pdev->dev, &dev_attr_function); | 1925 | device_remove_file (&dev->pdev->dev, &dev_attr_function); |
1926 | err_unbind: | 1926 | err_unbind: |
1927 | driver->unbind (&dev->gadget); | ||
1928 | dev->gadget.dev.driver = NULL; | 1927 | dev->gadget.dev.driver = NULL; |
1929 | dev->driver = NULL; | 1928 | dev->driver = NULL; |
1930 | return retval; | 1929 | return retval; |
@@ -1946,6 +1945,13 @@ stop_activity (struct net2280 *dev, struct usb_gadget_driver *driver) | |||
1946 | for (i = 0; i < 7; i++) | 1945 | for (i = 0; i < 7; i++) |
1947 | nuke (&dev->ep [i]); | 1946 | nuke (&dev->ep [i]); |
1948 | 1947 | ||
1948 | /* report disconnect; the driver is already quiesced */ | ||
1949 | if (driver) { | ||
1950 | spin_unlock(&dev->lock); | ||
1951 | driver->disconnect(&dev->gadget); | ||
1952 | spin_lock(&dev->lock); | ||
1953 | } | ||
1954 | |||
1949 | usb_reinit (dev); | 1955 | usb_reinit (dev); |
1950 | } | 1956 | } |
1951 | 1957 | ||
diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c index c5034d9c946b..b369292d4b90 100644 --- a/drivers/usb/gadget/u_serial.c +++ b/drivers/usb/gadget/u_serial.c | |||
@@ -136,7 +136,7 @@ static struct portmaster { | |||
136 | pr_debug(fmt, ##arg) | 136 | pr_debug(fmt, ##arg) |
137 | #endif /* pr_vdebug */ | 137 | #endif /* pr_vdebug */ |
138 | #else | 138 | #else |
139 | #ifndef pr_vdebig | 139 | #ifndef pr_vdebug |
140 | #define pr_vdebug(fmt, arg...) \ | 140 | #define pr_vdebug(fmt, arg...) \ |
141 | ({ if (0) pr_debug(fmt, ##arg); }) | 141 | ({ if (0) pr_debug(fmt, ##arg); }) |
142 | #endif /* pr_vdebug */ | 142 | #endif /* pr_vdebug */ |
diff --git a/drivers/usb/gadget/udc-core.c b/drivers/usb/gadget/udc-core.c index 2a9cd369f71c..f8f62c3ed65e 100644 --- a/drivers/usb/gadget/udc-core.c +++ b/drivers/usb/gadget/udc-core.c | |||
@@ -216,7 +216,7 @@ static void usb_gadget_remove_driver(struct usb_udc *udc) | |||
216 | usb_gadget_disconnect(udc->gadget); | 216 | usb_gadget_disconnect(udc->gadget); |
217 | udc->driver->disconnect(udc->gadget); | 217 | udc->driver->disconnect(udc->gadget); |
218 | udc->driver->unbind(udc->gadget); | 218 | udc->driver->unbind(udc->gadget); |
219 | usb_gadget_udc_stop(udc->gadget, udc->driver); | 219 | usb_gadget_udc_stop(udc->gadget, NULL); |
220 | 220 | ||
221 | udc->driver = NULL; | 221 | udc->driver = NULL; |
222 | udc->dev.driver = NULL; | 222 | udc->dev.driver = NULL; |
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index 5726cb144abf..416a6dce5e11 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c | |||
@@ -302,6 +302,7 @@ static void ehci_quiesce (struct ehci_hcd *ehci) | |||
302 | 302 | ||
303 | static void end_unlink_async(struct ehci_hcd *ehci); | 303 | static void end_unlink_async(struct ehci_hcd *ehci); |
304 | static void unlink_empty_async(struct ehci_hcd *ehci); | 304 | static void unlink_empty_async(struct ehci_hcd *ehci); |
305 | static void unlink_empty_async_suspended(struct ehci_hcd *ehci); | ||
305 | static void ehci_work(struct ehci_hcd *ehci); | 306 | static void ehci_work(struct ehci_hcd *ehci); |
306 | static void start_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh); | 307 | static void start_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh); |
307 | static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh); | 308 | static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh); |
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c index 4d3b294f203e..7d06e77f6c4f 100644 --- a/drivers/usb/host/ehci-hub.c +++ b/drivers/usb/host/ehci-hub.c | |||
@@ -328,7 +328,7 @@ static int ehci_bus_suspend (struct usb_hcd *hcd) | |||
328 | ehci->rh_state = EHCI_RH_SUSPENDED; | 328 | ehci->rh_state = EHCI_RH_SUSPENDED; |
329 | 329 | ||
330 | end_unlink_async(ehci); | 330 | end_unlink_async(ehci); |
331 | unlink_empty_async(ehci); | 331 | unlink_empty_async_suspended(ehci); |
332 | ehci_handle_intr_unlinks(ehci); | 332 | ehci_handle_intr_unlinks(ehci); |
333 | end_free_itds(ehci); | 333 | end_free_itds(ehci); |
334 | 334 | ||
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c index 5464665f0b6a..23d136904285 100644 --- a/drivers/usb/host/ehci-q.c +++ b/drivers/usb/host/ehci-q.c | |||
@@ -1316,6 +1316,19 @@ static void unlink_empty_async(struct ehci_hcd *ehci) | |||
1316 | } | 1316 | } |
1317 | } | 1317 | } |
1318 | 1318 | ||
1319 | /* The root hub is suspended; unlink all the async QHs */ | ||
1320 | static void unlink_empty_async_suspended(struct ehci_hcd *ehci) | ||
1321 | { | ||
1322 | struct ehci_qh *qh; | ||
1323 | |||
1324 | while (ehci->async->qh_next.qh) { | ||
1325 | qh = ehci->async->qh_next.qh; | ||
1326 | WARN_ON(!list_empty(&qh->qtd_list)); | ||
1327 | single_unlink_async(ehci, qh); | ||
1328 | } | ||
1329 | start_iaa_cycle(ehci, false); | ||
1330 | } | ||
1331 | |||
1319 | /* makes sure the async qh will become idle */ | 1332 | /* makes sure the async qh will become idle */ |
1320 | /* caller must own ehci->lock */ | 1333 | /* caller must own ehci->lock */ |
1321 | 1334 | ||
diff --git a/drivers/usb/host/ehci-timer.c b/drivers/usb/host/ehci-timer.c index 20dbdcbe9b0f..c3fa1305f830 100644 --- a/drivers/usb/host/ehci-timer.c +++ b/drivers/usb/host/ehci-timer.c | |||
@@ -304,7 +304,7 @@ static void ehci_iaa_watchdog(struct ehci_hcd *ehci) | |||
304 | * (a) SMP races against real IAA firing and retriggering, and | 304 | * (a) SMP races against real IAA firing and retriggering, and |
305 | * (b) clean HC shutdown, when IAA watchdog was pending. | 305 | * (b) clean HC shutdown, when IAA watchdog was pending. |
306 | */ | 306 | */ |
307 | if (ehci->async_iaa) { | 307 | if (1) { |
308 | u32 cmd, status; | 308 | u32 cmd, status; |
309 | 309 | ||
310 | /* If we get here, IAA is *REALLY* late. It's barely | 310 | /* If we get here, IAA is *REALLY* late. It's barely |
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index f1f01a834ba7..849470b18831 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c | |||
@@ -350,7 +350,7 @@ static int xhci_try_enable_msi(struct usb_hcd *hcd) | |||
350 | * generate interrupts. Don't even try to enable MSI. | 350 | * generate interrupts. Don't even try to enable MSI. |
351 | */ | 351 | */ |
352 | if (xhci->quirks & XHCI_BROKEN_MSI) | 352 | if (xhci->quirks & XHCI_BROKEN_MSI) |
353 | return 0; | 353 | goto legacy_irq; |
354 | 354 | ||
355 | /* unregister the legacy interrupt */ | 355 | /* unregister the legacy interrupt */ |
356 | if (hcd->irq) | 356 | if (hcd->irq) |
@@ -371,6 +371,7 @@ static int xhci_try_enable_msi(struct usb_hcd *hcd) | |||
371 | return -EINVAL; | 371 | return -EINVAL; |
372 | } | 372 | } |
373 | 373 | ||
374 | legacy_irq: | ||
374 | /* fall back to legacy interrupt*/ | 375 | /* fall back to legacy interrupt*/ |
375 | ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED, | 376 | ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED, |
376 | hcd->irq_descr, hcd); | 377 | hcd->irq_descr, hcd); |
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index f791bd0aee6c..2c510e4a7d4c 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h | |||
@@ -206,8 +206,8 @@ struct xhci_op_regs { | |||
206 | /* bits 12:31 are reserved (and should be preserved on writes). */ | 206 | /* bits 12:31 are reserved (and should be preserved on writes). */ |
207 | 207 | ||
208 | /* IMAN - Interrupt Management Register */ | 208 | /* IMAN - Interrupt Management Register */ |
209 | #define IMAN_IP (1 << 1) | 209 | #define IMAN_IE (1 << 1) |
210 | #define IMAN_IE (1 << 0) | 210 | #define IMAN_IP (1 << 0) |
211 | 211 | ||
212 | /* USBSTS - USB status - status bitmasks */ | 212 | /* USBSTS - USB status - status bitmasks */ |
213 | /* HC not running - set to 1 when run/stop bit is cleared. */ | 213 | /* HC not running - set to 1 when run/stop bit is cleared. */ |
diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c index 7c71769d71ff..41613a2b35e8 100644 --- a/drivers/usb/musb/da8xx.c +++ b/drivers/usb/musb/da8xx.c | |||
@@ -327,7 +327,7 @@ static irqreturn_t da8xx_musb_interrupt(int irq, void *hci) | |||
327 | u8 devctl = musb_readb(mregs, MUSB_DEVCTL); | 327 | u8 devctl = musb_readb(mregs, MUSB_DEVCTL); |
328 | int err; | 328 | int err; |
329 | 329 | ||
330 | err = musb->int_usb & USB_INTR_VBUSERROR; | 330 | err = musb->int_usb & MUSB_INTR_VBUSERROR; |
331 | if (err) { | 331 | if (err) { |
332 | /* | 332 | /* |
333 | * The Mentor core doesn't debounce VBUS as needed | 333 | * The Mentor core doesn't debounce VBUS as needed |
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index be18537c5f14..83eddedcd9be 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c | |||
@@ -141,7 +141,9 @@ static inline void map_dma_buffer(struct musb_request *request, | |||
141 | static inline void unmap_dma_buffer(struct musb_request *request, | 141 | static inline void unmap_dma_buffer(struct musb_request *request, |
142 | struct musb *musb) | 142 | struct musb *musb) |
143 | { | 143 | { |
144 | if (!is_buffer_mapped(request)) | 144 | struct musb_ep *musb_ep = request->ep; |
145 | |||
146 | if (!is_buffer_mapped(request) || !musb_ep->dma) | ||
145 | return; | 147 | return; |
146 | 148 | ||
147 | if (request->request.dma == DMA_ADDR_INVALID) { | 149 | if (request->request.dma == DMA_ADDR_INVALID) { |
@@ -195,7 +197,10 @@ __acquires(ep->musb->lock) | |||
195 | 197 | ||
196 | ep->busy = 1; | 198 | ep->busy = 1; |
197 | spin_unlock(&musb->lock); | 199 | spin_unlock(&musb->lock); |
198 | unmap_dma_buffer(req, musb); | 200 | |
201 | if (!dma_mapping_error(&musb->g.dev, request->dma)) | ||
202 | unmap_dma_buffer(req, musb); | ||
203 | |||
199 | if (request->status == 0) | 204 | if (request->status == 0) |
200 | dev_dbg(musb->controller, "%s done request %p, %d/%d\n", | 205 | dev_dbg(musb->controller, "%s done request %p, %d/%d\n", |
201 | ep->end_point.name, request, | 206 | ep->end_point.name, request, |
diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c index cbd904b8fba5..4775f8209e55 100644 --- a/drivers/usb/serial/ark3116.c +++ b/drivers/usb/serial/ark3116.c | |||
@@ -62,7 +62,6 @@ static int is_irda(struct usb_serial *serial) | |||
62 | } | 62 | } |
63 | 63 | ||
64 | struct ark3116_private { | 64 | struct ark3116_private { |
65 | wait_queue_head_t delta_msr_wait; | ||
66 | struct async_icount icount; | 65 | struct async_icount icount; |
67 | int irda; /* 1 for irda device */ | 66 | int irda; /* 1 for irda device */ |
68 | 67 | ||
@@ -146,7 +145,6 @@ static int ark3116_port_probe(struct usb_serial_port *port) | |||
146 | if (!priv) | 145 | if (!priv) |
147 | return -ENOMEM; | 146 | return -ENOMEM; |
148 | 147 | ||
149 | init_waitqueue_head(&priv->delta_msr_wait); | ||
150 | mutex_init(&priv->hw_lock); | 148 | mutex_init(&priv->hw_lock); |
151 | spin_lock_init(&priv->status_lock); | 149 | spin_lock_init(&priv->status_lock); |
152 | 150 | ||
@@ -456,10 +454,14 @@ static int ark3116_ioctl(struct tty_struct *tty, | |||
456 | case TIOCMIWAIT: | 454 | case TIOCMIWAIT: |
457 | for (;;) { | 455 | for (;;) { |
458 | struct async_icount prev = priv->icount; | 456 | struct async_icount prev = priv->icount; |
459 | interruptible_sleep_on(&priv->delta_msr_wait); | 457 | interruptible_sleep_on(&port->delta_msr_wait); |
460 | /* see if a signal did it */ | 458 | /* see if a signal did it */ |
461 | if (signal_pending(current)) | 459 | if (signal_pending(current)) |
462 | return -ERESTARTSYS; | 460 | return -ERESTARTSYS; |
461 | |||
462 | if (port->serial->disconnected) | ||
463 | return -EIO; | ||
464 | |||
463 | if ((prev.rng == priv->icount.rng) && | 465 | if ((prev.rng == priv->icount.rng) && |
464 | (prev.dsr == priv->icount.dsr) && | 466 | (prev.dsr == priv->icount.dsr) && |
465 | (prev.dcd == priv->icount.dcd) && | 467 | (prev.dcd == priv->icount.dcd) && |
@@ -580,7 +582,7 @@ static void ark3116_update_msr(struct usb_serial_port *port, __u8 msr) | |||
580 | priv->icount.dcd++; | 582 | priv->icount.dcd++; |
581 | if (msr & UART_MSR_TERI) | 583 | if (msr & UART_MSR_TERI) |
582 | priv->icount.rng++; | 584 | priv->icount.rng++; |
583 | wake_up_interruptible(&priv->delta_msr_wait); | 585 | wake_up_interruptible(&port->delta_msr_wait); |
584 | } | 586 | } |
585 | } | 587 | } |
586 | 588 | ||
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c index d255f66e708e..07d4650a32ab 100644 --- a/drivers/usb/serial/ch341.c +++ b/drivers/usb/serial/ch341.c | |||
@@ -80,7 +80,6 @@ MODULE_DEVICE_TABLE(usb, id_table); | |||
80 | 80 | ||
81 | struct ch341_private { | 81 | struct ch341_private { |
82 | spinlock_t lock; /* access lock */ | 82 | spinlock_t lock; /* access lock */ |
83 | wait_queue_head_t delta_msr_wait; /* wait queue for modem status */ | ||
84 | unsigned baud_rate; /* set baud rate */ | 83 | unsigned baud_rate; /* set baud rate */ |
85 | u8 line_control; /* set line control value RTS/DTR */ | 84 | u8 line_control; /* set line control value RTS/DTR */ |
86 | u8 line_status; /* active status of modem control inputs */ | 85 | u8 line_status; /* active status of modem control inputs */ |
@@ -252,7 +251,6 @@ static int ch341_port_probe(struct usb_serial_port *port) | |||
252 | return -ENOMEM; | 251 | return -ENOMEM; |
253 | 252 | ||
254 | spin_lock_init(&priv->lock); | 253 | spin_lock_init(&priv->lock); |
255 | init_waitqueue_head(&priv->delta_msr_wait); | ||
256 | priv->baud_rate = DEFAULT_BAUD_RATE; | 254 | priv->baud_rate = DEFAULT_BAUD_RATE; |
257 | priv->line_control = CH341_BIT_RTS | CH341_BIT_DTR; | 255 | priv->line_control = CH341_BIT_RTS | CH341_BIT_DTR; |
258 | 256 | ||
@@ -298,7 +296,7 @@ static void ch341_dtr_rts(struct usb_serial_port *port, int on) | |||
298 | priv->line_control &= ~(CH341_BIT_RTS | CH341_BIT_DTR); | 296 | priv->line_control &= ~(CH341_BIT_RTS | CH341_BIT_DTR); |
299 | spin_unlock_irqrestore(&priv->lock, flags); | 297 | spin_unlock_irqrestore(&priv->lock, flags); |
300 | ch341_set_handshake(port->serial->dev, priv->line_control); | 298 | ch341_set_handshake(port->serial->dev, priv->line_control); |
301 | wake_up_interruptible(&priv->delta_msr_wait); | 299 | wake_up_interruptible(&port->delta_msr_wait); |
302 | } | 300 | } |
303 | 301 | ||
304 | static void ch341_close(struct usb_serial_port *port) | 302 | static void ch341_close(struct usb_serial_port *port) |
@@ -491,7 +489,7 @@ static void ch341_read_int_callback(struct urb *urb) | |||
491 | tty_kref_put(tty); | 489 | tty_kref_put(tty); |
492 | } | 490 | } |
493 | 491 | ||
494 | wake_up_interruptible(&priv->delta_msr_wait); | 492 | wake_up_interruptible(&port->delta_msr_wait); |
495 | } | 493 | } |
496 | 494 | ||
497 | exit: | 495 | exit: |
@@ -517,11 +515,14 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg) | |||
517 | spin_unlock_irqrestore(&priv->lock, flags); | 515 | spin_unlock_irqrestore(&priv->lock, flags); |
518 | 516 | ||
519 | while (!multi_change) { | 517 | while (!multi_change) { |
520 | interruptible_sleep_on(&priv->delta_msr_wait); | 518 | interruptible_sleep_on(&port->delta_msr_wait); |
521 | /* see if a signal did it */ | 519 | /* see if a signal did it */ |
522 | if (signal_pending(current)) | 520 | if (signal_pending(current)) |
523 | return -ERESTARTSYS; | 521 | return -ERESTARTSYS; |
524 | 522 | ||
523 | if (port->serial->disconnected) | ||
524 | return -EIO; | ||
525 | |||
525 | spin_lock_irqsave(&priv->lock, flags); | 526 | spin_lock_irqsave(&priv->lock, flags); |
526 | status = priv->line_status; | 527 | status = priv->line_status; |
527 | multi_change = priv->multi_status_change; | 528 | multi_change = priv->multi_status_change; |
diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c index 8efa19d0e9fb..ba7352e4187e 100644 --- a/drivers/usb/serial/cypress_m8.c +++ b/drivers/usb/serial/cypress_m8.c | |||
@@ -111,7 +111,6 @@ struct cypress_private { | |||
111 | int baud_rate; /* stores current baud rate in | 111 | int baud_rate; /* stores current baud rate in |
112 | integer form */ | 112 | integer form */ |
113 | int isthrottled; /* if throttled, discard reads */ | 113 | int isthrottled; /* if throttled, discard reads */ |
114 | wait_queue_head_t delta_msr_wait; /* used for TIOCMIWAIT */ | ||
115 | char prev_status, diff_status; /* used for TIOCMIWAIT */ | 114 | char prev_status, diff_status; /* used for TIOCMIWAIT */ |
116 | /* we pass a pointer to this as the argument sent to | 115 | /* we pass a pointer to this as the argument sent to |
117 | cypress_set_termios old_termios */ | 116 | cypress_set_termios old_termios */ |
@@ -449,7 +448,6 @@ static int cypress_generic_port_probe(struct usb_serial_port *port) | |||
449 | kfree(priv); | 448 | kfree(priv); |
450 | return -ENOMEM; | 449 | return -ENOMEM; |
451 | } | 450 | } |
452 | init_waitqueue_head(&priv->delta_msr_wait); | ||
453 | 451 | ||
454 | usb_reset_configuration(serial->dev); | 452 | usb_reset_configuration(serial->dev); |
455 | 453 | ||
@@ -868,12 +866,16 @@ static int cypress_ioctl(struct tty_struct *tty, | |||
868 | switch (cmd) { | 866 | switch (cmd) { |
869 | /* This code comes from drivers/char/serial.c and ftdi_sio.c */ | 867 | /* This code comes from drivers/char/serial.c and ftdi_sio.c */ |
870 | case TIOCMIWAIT: | 868 | case TIOCMIWAIT: |
871 | while (priv != NULL) { | 869 | for (;;) { |
872 | interruptible_sleep_on(&priv->delta_msr_wait); | 870 | interruptible_sleep_on(&port->delta_msr_wait); |
873 | /* see if a signal did it */ | 871 | /* see if a signal did it */ |
874 | if (signal_pending(current)) | 872 | if (signal_pending(current)) |
875 | return -ERESTARTSYS; | 873 | return -ERESTARTSYS; |
876 | else { | 874 | |
875 | if (port->serial->disconnected) | ||
876 | return -EIO; | ||
877 | |||
878 | { | ||
877 | char diff = priv->diff_status; | 879 | char diff = priv->diff_status; |
878 | if (diff == 0) | 880 | if (diff == 0) |
879 | return -EIO; /* no change => error */ | 881 | return -EIO; /* no change => error */ |
@@ -1187,7 +1189,7 @@ static void cypress_read_int_callback(struct urb *urb) | |||
1187 | if (priv->current_status != priv->prev_status) { | 1189 | if (priv->current_status != priv->prev_status) { |
1188 | priv->diff_status |= priv->current_status ^ | 1190 | priv->diff_status |= priv->current_status ^ |
1189 | priv->prev_status; | 1191 | priv->prev_status; |
1190 | wake_up_interruptible(&priv->delta_msr_wait); | 1192 | wake_up_interruptible(&port->delta_msr_wait); |
1191 | priv->prev_status = priv->current_status; | 1193 | priv->prev_status = priv->current_status; |
1192 | } | 1194 | } |
1193 | spin_unlock_irqrestore(&priv->lock, flags); | 1195 | spin_unlock_irqrestore(&priv->lock, flags); |
diff --git a/drivers/usb/serial/f81232.c b/drivers/usb/serial/f81232.c index b1b2dc64b50b..a172ad5c5ce8 100644 --- a/drivers/usb/serial/f81232.c +++ b/drivers/usb/serial/f81232.c | |||
@@ -47,7 +47,6 @@ MODULE_DEVICE_TABLE(usb, id_table); | |||
47 | 47 | ||
48 | struct f81232_private { | 48 | struct f81232_private { |
49 | spinlock_t lock; | 49 | spinlock_t lock; |
50 | wait_queue_head_t delta_msr_wait; | ||
51 | u8 line_control; | 50 | u8 line_control; |
52 | u8 line_status; | 51 | u8 line_status; |
53 | }; | 52 | }; |
@@ -111,7 +110,7 @@ static void f81232_process_read_urb(struct urb *urb) | |||
111 | line_status = priv->line_status; | 110 | line_status = priv->line_status; |
112 | priv->line_status &= ~UART_STATE_TRANSIENT_MASK; | 111 | priv->line_status &= ~UART_STATE_TRANSIENT_MASK; |
113 | spin_unlock_irqrestore(&priv->lock, flags); | 112 | spin_unlock_irqrestore(&priv->lock, flags); |
114 | wake_up_interruptible(&priv->delta_msr_wait); | 113 | wake_up_interruptible(&port->delta_msr_wait); |
115 | 114 | ||
116 | if (!urb->actual_length) | 115 | if (!urb->actual_length) |
117 | return; | 116 | return; |
@@ -256,11 +255,14 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg) | |||
256 | spin_unlock_irqrestore(&priv->lock, flags); | 255 | spin_unlock_irqrestore(&priv->lock, flags); |
257 | 256 | ||
258 | while (1) { | 257 | while (1) { |
259 | interruptible_sleep_on(&priv->delta_msr_wait); | 258 | interruptible_sleep_on(&port->delta_msr_wait); |
260 | /* see if a signal did it */ | 259 | /* see if a signal did it */ |
261 | if (signal_pending(current)) | 260 | if (signal_pending(current)) |
262 | return -ERESTARTSYS; | 261 | return -ERESTARTSYS; |
263 | 262 | ||
263 | if (port->serial->disconnected) | ||
264 | return -EIO; | ||
265 | |||
264 | spin_lock_irqsave(&priv->lock, flags); | 266 | spin_lock_irqsave(&priv->lock, flags); |
265 | status = priv->line_status; | 267 | status = priv->line_status; |
266 | spin_unlock_irqrestore(&priv->lock, flags); | 268 | spin_unlock_irqrestore(&priv->lock, flags); |
@@ -322,7 +324,6 @@ static int f81232_port_probe(struct usb_serial_port *port) | |||
322 | return -ENOMEM; | 324 | return -ENOMEM; |
323 | 325 | ||
324 | spin_lock_init(&priv->lock); | 326 | spin_lock_init(&priv->lock); |
325 | init_waitqueue_head(&priv->delta_msr_wait); | ||
326 | 327 | ||
327 | usb_set_serial_port_data(port, priv); | 328 | usb_set_serial_port_data(port, priv); |
328 | 329 | ||
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index edd162df49ca..d4809d551473 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c | |||
@@ -69,9 +69,7 @@ struct ftdi_private { | |||
69 | int flags; /* some ASYNC_xxxx flags are supported */ | 69 | int flags; /* some ASYNC_xxxx flags are supported */ |
70 | unsigned long last_dtr_rts; /* saved modem control outputs */ | 70 | unsigned long last_dtr_rts; /* saved modem control outputs */ |
71 | struct async_icount icount; | 71 | struct async_icount icount; |
72 | wait_queue_head_t delta_msr_wait; /* Used for TIOCMIWAIT */ | ||
73 | char prev_status; /* Used for TIOCMIWAIT */ | 72 | char prev_status; /* Used for TIOCMIWAIT */ |
74 | bool dev_gone; /* Used to abort TIOCMIWAIT */ | ||
75 | char transmit_empty; /* If transmitter is empty or not */ | 73 | char transmit_empty; /* If transmitter is empty or not */ |
76 | __u16 interface; /* FT2232C, FT2232H or FT4232H port interface | 74 | __u16 interface; /* FT2232C, FT2232H or FT4232H port interface |
77 | (0 for FT232/245) */ | 75 | (0 for FT232/245) */ |
@@ -1691,10 +1689,8 @@ static int ftdi_sio_port_probe(struct usb_serial_port *port) | |||
1691 | 1689 | ||
1692 | kref_init(&priv->kref); | 1690 | kref_init(&priv->kref); |
1693 | mutex_init(&priv->cfg_lock); | 1691 | mutex_init(&priv->cfg_lock); |
1694 | init_waitqueue_head(&priv->delta_msr_wait); | ||
1695 | 1692 | ||
1696 | priv->flags = ASYNC_LOW_LATENCY; | 1693 | priv->flags = ASYNC_LOW_LATENCY; |
1697 | priv->dev_gone = false; | ||
1698 | 1694 | ||
1699 | if (quirk && quirk->port_probe) | 1695 | if (quirk && quirk->port_probe) |
1700 | quirk->port_probe(priv); | 1696 | quirk->port_probe(priv); |
@@ -1840,8 +1836,7 @@ static int ftdi_sio_port_remove(struct usb_serial_port *port) | |||
1840 | { | 1836 | { |
1841 | struct ftdi_private *priv = usb_get_serial_port_data(port); | 1837 | struct ftdi_private *priv = usb_get_serial_port_data(port); |
1842 | 1838 | ||
1843 | priv->dev_gone = true; | 1839 | wake_up_interruptible(&port->delta_msr_wait); |
1844 | wake_up_interruptible_all(&priv->delta_msr_wait); | ||
1845 | 1840 | ||
1846 | remove_sysfs_attrs(port); | 1841 | remove_sysfs_attrs(port); |
1847 | 1842 | ||
@@ -1989,7 +1984,7 @@ static int ftdi_process_packet(struct usb_serial_port *port, | |||
1989 | if (diff_status & FTDI_RS0_RLSD) | 1984 | if (diff_status & FTDI_RS0_RLSD) |
1990 | priv->icount.dcd++; | 1985 | priv->icount.dcd++; |
1991 | 1986 | ||
1992 | wake_up_interruptible_all(&priv->delta_msr_wait); | 1987 | wake_up_interruptible(&port->delta_msr_wait); |
1993 | priv->prev_status = status; | 1988 | priv->prev_status = status; |
1994 | } | 1989 | } |
1995 | 1990 | ||
@@ -2440,11 +2435,15 @@ static int ftdi_ioctl(struct tty_struct *tty, | |||
2440 | */ | 2435 | */ |
2441 | case TIOCMIWAIT: | 2436 | case TIOCMIWAIT: |
2442 | cprev = priv->icount; | 2437 | cprev = priv->icount; |
2443 | while (!priv->dev_gone) { | 2438 | for (;;) { |
2444 | interruptible_sleep_on(&priv->delta_msr_wait); | 2439 | interruptible_sleep_on(&port->delta_msr_wait); |
2445 | /* see if a signal did it */ | 2440 | /* see if a signal did it */ |
2446 | if (signal_pending(current)) | 2441 | if (signal_pending(current)) |
2447 | return -ERESTARTSYS; | 2442 | return -ERESTARTSYS; |
2443 | |||
2444 | if (port->serial->disconnected) | ||
2445 | return -EIO; | ||
2446 | |||
2448 | cnow = priv->icount; | 2447 | cnow = priv->icount; |
2449 | if (((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) || | 2448 | if (((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) || |
2450 | ((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) || | 2449 | ((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) || |
@@ -2454,8 +2453,6 @@ static int ftdi_ioctl(struct tty_struct *tty, | |||
2454 | } | 2453 | } |
2455 | cprev = cnow; | 2454 | cprev = cnow; |
2456 | } | 2455 | } |
2457 | return -EIO; | ||
2458 | break; | ||
2459 | case TIOCSERGETLSR: | 2456 | case TIOCSERGETLSR: |
2460 | return get_lsr_info(port, (struct serial_struct __user *)arg); | 2457 | return get_lsr_info(port, (struct serial_struct __user *)arg); |
2461 | break; | 2458 | break; |
diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c index 1a07b12ef341..81caf5623ee2 100644 --- a/drivers/usb/serial/garmin_gps.c +++ b/drivers/usb/serial/garmin_gps.c | |||
@@ -956,10 +956,7 @@ static void garmin_close(struct usb_serial_port *port) | |||
956 | if (!serial) | 956 | if (!serial) |
957 | return; | 957 | return; |
958 | 958 | ||
959 | mutex_lock(&port->serial->disc_mutex); | 959 | garmin_clear(garmin_data_p); |
960 | |||
961 | if (!port->serial->disconnected) | ||
962 | garmin_clear(garmin_data_p); | ||
963 | 960 | ||
964 | /* shutdown our urbs */ | 961 | /* shutdown our urbs */ |
965 | usb_kill_urb(port->read_urb); | 962 | usb_kill_urb(port->read_urb); |
@@ -968,8 +965,6 @@ static void garmin_close(struct usb_serial_port *port) | |||
968 | /* keep reset state so we know that we must start a new session */ | 965 | /* keep reset state so we know that we must start a new session */ |
969 | if (garmin_data_p->state != STATE_RESET) | 966 | if (garmin_data_p->state != STATE_RESET) |
970 | garmin_data_p->state = STATE_DISCONNECTED; | 967 | garmin_data_p->state = STATE_DISCONNECTED; |
971 | |||
972 | mutex_unlock(&port->serial->disc_mutex); | ||
973 | } | 968 | } |
974 | 969 | ||
975 | 970 | ||
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c index b00e5cbf741f..efd8b978128c 100644 --- a/drivers/usb/serial/io_edgeport.c +++ b/drivers/usb/serial/io_edgeport.c | |||
@@ -110,7 +110,6 @@ struct edgeport_port { | |||
110 | wait_queue_head_t wait_chase; /* for handling sleeping while waiting for chase to finish */ | 110 | wait_queue_head_t wait_chase; /* for handling sleeping while waiting for chase to finish */ |
111 | wait_queue_head_t wait_open; /* for handling sleeping while waiting for open to finish */ | 111 | wait_queue_head_t wait_open; /* for handling sleeping while waiting for open to finish */ |
112 | wait_queue_head_t wait_command; /* for handling sleeping while waiting for command to finish */ | 112 | wait_queue_head_t wait_command; /* for handling sleeping while waiting for command to finish */ |
113 | wait_queue_head_t delta_msr_wait; /* for handling sleeping while waiting for msr change to happen */ | ||
114 | 113 | ||
115 | struct async_icount icount; | 114 | struct async_icount icount; |
116 | struct usb_serial_port *port; /* loop back to the owner of this object */ | 115 | struct usb_serial_port *port; /* loop back to the owner of this object */ |
@@ -884,7 +883,6 @@ static int edge_open(struct tty_struct *tty, struct usb_serial_port *port) | |||
884 | /* initialize our wait queues */ | 883 | /* initialize our wait queues */ |
885 | init_waitqueue_head(&edge_port->wait_open); | 884 | init_waitqueue_head(&edge_port->wait_open); |
886 | init_waitqueue_head(&edge_port->wait_chase); | 885 | init_waitqueue_head(&edge_port->wait_chase); |
887 | init_waitqueue_head(&edge_port->delta_msr_wait); | ||
888 | init_waitqueue_head(&edge_port->wait_command); | 886 | init_waitqueue_head(&edge_port->wait_command); |
889 | 887 | ||
890 | /* initialize our icount structure */ | 888 | /* initialize our icount structure */ |
@@ -1669,13 +1667,17 @@ static int edge_ioctl(struct tty_struct *tty, | |||
1669 | dev_dbg(&port->dev, "%s (%d) TIOCMIWAIT\n", __func__, port->number); | 1667 | dev_dbg(&port->dev, "%s (%d) TIOCMIWAIT\n", __func__, port->number); |
1670 | cprev = edge_port->icount; | 1668 | cprev = edge_port->icount; |
1671 | while (1) { | 1669 | while (1) { |
1672 | prepare_to_wait(&edge_port->delta_msr_wait, | 1670 | prepare_to_wait(&port->delta_msr_wait, |
1673 | &wait, TASK_INTERRUPTIBLE); | 1671 | &wait, TASK_INTERRUPTIBLE); |
1674 | schedule(); | 1672 | schedule(); |
1675 | finish_wait(&edge_port->delta_msr_wait, &wait); | 1673 | finish_wait(&port->delta_msr_wait, &wait); |
1676 | /* see if a signal did it */ | 1674 | /* see if a signal did it */ |
1677 | if (signal_pending(current)) | 1675 | if (signal_pending(current)) |
1678 | return -ERESTARTSYS; | 1676 | return -ERESTARTSYS; |
1677 | |||
1678 | if (port->serial->disconnected) | ||
1679 | return -EIO; | ||
1680 | |||
1679 | cnow = edge_port->icount; | 1681 | cnow = edge_port->icount; |
1680 | if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr && | 1682 | if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr && |
1681 | cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) | 1683 | cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) |
@@ -2051,7 +2053,7 @@ static void handle_new_msr(struct edgeport_port *edge_port, __u8 newMsr) | |||
2051 | icount->dcd++; | 2053 | icount->dcd++; |
2052 | if (newMsr & EDGEPORT_MSR_DELTA_RI) | 2054 | if (newMsr & EDGEPORT_MSR_DELTA_RI) |
2053 | icount->rng++; | 2055 | icount->rng++; |
2054 | wake_up_interruptible(&edge_port->delta_msr_wait); | 2056 | wake_up_interruptible(&edge_port->port->delta_msr_wait); |
2055 | } | 2057 | } |
2056 | 2058 | ||
2057 | /* Save the new modem status */ | 2059 | /* Save the new modem status */ |
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c index c23776679f70..7777172206de 100644 --- a/drivers/usb/serial/io_ti.c +++ b/drivers/usb/serial/io_ti.c | |||
@@ -87,9 +87,6 @@ struct edgeport_port { | |||
87 | int close_pending; | 87 | int close_pending; |
88 | int lsr_event; | 88 | int lsr_event; |
89 | struct async_icount icount; | 89 | struct async_icount icount; |
90 | wait_queue_head_t delta_msr_wait; /* for handling sleeping while | ||
91 | waiting for msr change to | ||
92 | happen */ | ||
93 | struct edgeport_serial *edge_serial; | 90 | struct edgeport_serial *edge_serial; |
94 | struct usb_serial_port *port; | 91 | struct usb_serial_port *port; |
95 | __u8 bUartMode; /* Port type, 0: RS232, etc. */ | 92 | __u8 bUartMode; /* Port type, 0: RS232, etc. */ |
@@ -1459,7 +1456,7 @@ static void handle_new_msr(struct edgeport_port *edge_port, __u8 msr) | |||
1459 | icount->dcd++; | 1456 | icount->dcd++; |
1460 | if (msr & EDGEPORT_MSR_DELTA_RI) | 1457 | if (msr & EDGEPORT_MSR_DELTA_RI) |
1461 | icount->rng++; | 1458 | icount->rng++; |
1462 | wake_up_interruptible(&edge_port->delta_msr_wait); | 1459 | wake_up_interruptible(&edge_port->port->delta_msr_wait); |
1463 | } | 1460 | } |
1464 | 1461 | ||
1465 | /* Save the new modem status */ | 1462 | /* Save the new modem status */ |
@@ -1754,7 +1751,6 @@ static int edge_open(struct tty_struct *tty, struct usb_serial_port *port) | |||
1754 | dev = port->serial->dev; | 1751 | dev = port->serial->dev; |
1755 | 1752 | ||
1756 | memset(&(edge_port->icount), 0x00, sizeof(edge_port->icount)); | 1753 | memset(&(edge_port->icount), 0x00, sizeof(edge_port->icount)); |
1757 | init_waitqueue_head(&edge_port->delta_msr_wait); | ||
1758 | 1754 | ||
1759 | /* turn off loopback */ | 1755 | /* turn off loopback */ |
1760 | status = ti_do_config(edge_port, UMPC_SET_CLR_LOOPBACK, 0); | 1756 | status = ti_do_config(edge_port, UMPC_SET_CLR_LOOPBACK, 0); |
@@ -2434,10 +2430,14 @@ static int edge_ioctl(struct tty_struct *tty, | |||
2434 | dev_dbg(&port->dev, "%s - TIOCMIWAIT\n", __func__); | 2430 | dev_dbg(&port->dev, "%s - TIOCMIWAIT\n", __func__); |
2435 | cprev = edge_port->icount; | 2431 | cprev = edge_port->icount; |
2436 | while (1) { | 2432 | while (1) { |
2437 | interruptible_sleep_on(&edge_port->delta_msr_wait); | 2433 | interruptible_sleep_on(&port->delta_msr_wait); |
2438 | /* see if a signal did it */ | 2434 | /* see if a signal did it */ |
2439 | if (signal_pending(current)) | 2435 | if (signal_pending(current)) |
2440 | return -ERESTARTSYS; | 2436 | return -ERESTARTSYS; |
2437 | |||
2438 | if (port->serial->disconnected) | ||
2439 | return -EIO; | ||
2440 | |||
2441 | cnow = edge_port->icount; | 2441 | cnow = edge_port->icount; |
2442 | if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr && | 2442 | if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr && |
2443 | cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) | 2443 | cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) |
@@ -2649,6 +2649,7 @@ static struct usb_serial_driver edgeport_2port_device = { | |||
2649 | .set_termios = edge_set_termios, | 2649 | .set_termios = edge_set_termios, |
2650 | .tiocmget = edge_tiocmget, | 2650 | .tiocmget = edge_tiocmget, |
2651 | .tiocmset = edge_tiocmset, | 2651 | .tiocmset = edge_tiocmset, |
2652 | .get_icount = edge_get_icount, | ||
2652 | .write = edge_write, | 2653 | .write = edge_write, |
2653 | .write_room = edge_write_room, | 2654 | .write_room = edge_write_room, |
2654 | .chars_in_buffer = edge_chars_in_buffer, | 2655 | .chars_in_buffer = edge_chars_in_buffer, |
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c index a64d420f687b..06d5a60be2c4 100644 --- a/drivers/usb/serial/mct_u232.c +++ b/drivers/usb/serial/mct_u232.c | |||
@@ -114,8 +114,6 @@ struct mct_u232_private { | |||
114 | unsigned char last_msr; /* Modem Status Register */ | 114 | unsigned char last_msr; /* Modem Status Register */ |
115 | unsigned int rx_flags; /* Throttling flags */ | 115 | unsigned int rx_flags; /* Throttling flags */ |
116 | struct async_icount icount; | 116 | struct async_icount icount; |
117 | wait_queue_head_t msr_wait; /* for handling sleeping while waiting | ||
118 | for msr change to happen */ | ||
119 | }; | 117 | }; |
120 | 118 | ||
121 | #define THROTTLED 0x01 | 119 | #define THROTTLED 0x01 |
@@ -409,7 +407,6 @@ static int mct_u232_port_probe(struct usb_serial_port *port) | |||
409 | return -ENOMEM; | 407 | return -ENOMEM; |
410 | 408 | ||
411 | spin_lock_init(&priv->lock); | 409 | spin_lock_init(&priv->lock); |
412 | init_waitqueue_head(&priv->msr_wait); | ||
413 | 410 | ||
414 | usb_set_serial_port_data(port, priv); | 411 | usb_set_serial_port_data(port, priv); |
415 | 412 | ||
@@ -601,7 +598,7 @@ static void mct_u232_read_int_callback(struct urb *urb) | |||
601 | tty_kref_put(tty); | 598 | tty_kref_put(tty); |
602 | } | 599 | } |
603 | #endif | 600 | #endif |
604 | wake_up_interruptible(&priv->msr_wait); | 601 | wake_up_interruptible(&port->delta_msr_wait); |
605 | spin_unlock_irqrestore(&priv->lock, flags); | 602 | spin_unlock_irqrestore(&priv->lock, flags); |
606 | exit: | 603 | exit: |
607 | retval = usb_submit_urb(urb, GFP_ATOMIC); | 604 | retval = usb_submit_urb(urb, GFP_ATOMIC); |
@@ -810,13 +807,17 @@ static int mct_u232_ioctl(struct tty_struct *tty, | |||
810 | cprev = mct_u232_port->icount; | 807 | cprev = mct_u232_port->icount; |
811 | spin_unlock_irqrestore(&mct_u232_port->lock, flags); | 808 | spin_unlock_irqrestore(&mct_u232_port->lock, flags); |
812 | for ( ; ; ) { | 809 | for ( ; ; ) { |
813 | prepare_to_wait(&mct_u232_port->msr_wait, | 810 | prepare_to_wait(&port->delta_msr_wait, |
814 | &wait, TASK_INTERRUPTIBLE); | 811 | &wait, TASK_INTERRUPTIBLE); |
815 | schedule(); | 812 | schedule(); |
816 | finish_wait(&mct_u232_port->msr_wait, &wait); | 813 | finish_wait(&port->delta_msr_wait, &wait); |
817 | /* see if a signal did it */ | 814 | /* see if a signal did it */ |
818 | if (signal_pending(current)) | 815 | if (signal_pending(current)) |
819 | return -ERESTARTSYS; | 816 | return -ERESTARTSYS; |
817 | |||
818 | if (port->serial->disconnected) | ||
819 | return -EIO; | ||
820 | |||
820 | spin_lock_irqsave(&mct_u232_port->lock, flags); | 821 | spin_lock_irqsave(&mct_u232_port->lock, flags); |
821 | cnow = mct_u232_port->icount; | 822 | cnow = mct_u232_port->icount; |
822 | spin_unlock_irqrestore(&mct_u232_port->lock, flags); | 823 | spin_unlock_irqrestore(&mct_u232_port->lock, flags); |
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c index 809fb329eca5..b8051fa61911 100644 --- a/drivers/usb/serial/mos7840.c +++ b/drivers/usb/serial/mos7840.c | |||
@@ -219,7 +219,6 @@ struct moschip_port { | |||
219 | char open; | 219 | char open; |
220 | char open_ports; | 220 | char open_ports; |
221 | wait_queue_head_t wait_chase; /* for handling sleeping while waiting for chase to finish */ | 221 | wait_queue_head_t wait_chase; /* for handling sleeping while waiting for chase to finish */ |
222 | wait_queue_head_t delta_msr_wait; /* for handling sleeping while waiting for msr change to happen */ | ||
223 | int delta_msr_cond; | 222 | int delta_msr_cond; |
224 | struct async_icount icount; | 223 | struct async_icount icount; |
225 | struct usb_serial_port *port; /* loop back to the owner of this object */ | 224 | struct usb_serial_port *port; /* loop back to the owner of this object */ |
@@ -423,6 +422,9 @@ static void mos7840_handle_new_msr(struct moschip_port *port, __u8 new_msr) | |||
423 | icount->rng++; | 422 | icount->rng++; |
424 | smp_wmb(); | 423 | smp_wmb(); |
425 | } | 424 | } |
425 | |||
426 | mos7840_port->delta_msr_cond = 1; | ||
427 | wake_up_interruptible(&port->port->delta_msr_wait); | ||
426 | } | 428 | } |
427 | } | 429 | } |
428 | 430 | ||
@@ -1127,7 +1129,6 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port) | |||
1127 | 1129 | ||
1128 | /* initialize our wait queues */ | 1130 | /* initialize our wait queues */ |
1129 | init_waitqueue_head(&mos7840_port->wait_chase); | 1131 | init_waitqueue_head(&mos7840_port->wait_chase); |
1130 | init_waitqueue_head(&mos7840_port->delta_msr_wait); | ||
1131 | 1132 | ||
1132 | /* initialize our icount structure */ | 1133 | /* initialize our icount structure */ |
1133 | memset(&(mos7840_port->icount), 0x00, sizeof(mos7840_port->icount)); | 1134 | memset(&(mos7840_port->icount), 0x00, sizeof(mos7840_port->icount)); |
@@ -2017,8 +2018,6 @@ static void mos7840_change_port_settings(struct tty_struct *tty, | |||
2017 | mos7840_port->read_urb_busy = false; | 2018 | mos7840_port->read_urb_busy = false; |
2018 | } | 2019 | } |
2019 | } | 2020 | } |
2020 | wake_up(&mos7840_port->delta_msr_wait); | ||
2021 | mos7840_port->delta_msr_cond = 1; | ||
2022 | dev_dbg(&port->dev, "%s - mos7840_port->shadowLCR is End %x\n", __func__, | 2021 | dev_dbg(&port->dev, "%s - mos7840_port->shadowLCR is End %x\n", __func__, |
2023 | mos7840_port->shadowLCR); | 2022 | mos7840_port->shadowLCR); |
2024 | } | 2023 | } |
@@ -2219,13 +2218,18 @@ static int mos7840_ioctl(struct tty_struct *tty, | |||
2219 | while (1) { | 2218 | while (1) { |
2220 | /* interruptible_sleep_on(&mos7840_port->delta_msr_wait); */ | 2219 | /* interruptible_sleep_on(&mos7840_port->delta_msr_wait); */ |
2221 | mos7840_port->delta_msr_cond = 0; | 2220 | mos7840_port->delta_msr_cond = 0; |
2222 | wait_event_interruptible(mos7840_port->delta_msr_wait, | 2221 | wait_event_interruptible(port->delta_msr_wait, |
2223 | (mos7840_port-> | 2222 | (port->serial->disconnected || |
2223 | mos7840_port-> | ||
2224 | delta_msr_cond == 1)); | 2224 | delta_msr_cond == 1)); |
2225 | 2225 | ||
2226 | /* see if a signal did it */ | 2226 | /* see if a signal did it */ |
2227 | if (signal_pending(current)) | 2227 | if (signal_pending(current)) |
2228 | return -ERESTARTSYS; | 2228 | return -ERESTARTSYS; |
2229 | |||
2230 | if (port->serial->disconnected) | ||
2231 | return -EIO; | ||
2232 | |||
2229 | cnow = mos7840_port->icount; | 2233 | cnow = mos7840_port->icount; |
2230 | smp_rmb(); | 2234 | smp_rmb(); |
2231 | if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr && | 2235 | if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr && |
diff --git a/drivers/usb/serial/oti6858.c b/drivers/usb/serial/oti6858.c index a958fd41b5b3..87c71ccfee87 100644 --- a/drivers/usb/serial/oti6858.c +++ b/drivers/usb/serial/oti6858.c | |||
@@ -188,7 +188,6 @@ struct oti6858_private { | |||
188 | u8 setup_done; | 188 | u8 setup_done; |
189 | struct delayed_work delayed_setup_work; | 189 | struct delayed_work delayed_setup_work; |
190 | 190 | ||
191 | wait_queue_head_t intr_wait; | ||
192 | struct usb_serial_port *port; /* USB port with which associated */ | 191 | struct usb_serial_port *port; /* USB port with which associated */ |
193 | }; | 192 | }; |
194 | 193 | ||
@@ -339,7 +338,6 @@ static int oti6858_port_probe(struct usb_serial_port *port) | |||
339 | return -ENOMEM; | 338 | return -ENOMEM; |
340 | 339 | ||
341 | spin_lock_init(&priv->lock); | 340 | spin_lock_init(&priv->lock); |
342 | init_waitqueue_head(&priv->intr_wait); | ||
343 | priv->port = port; | 341 | priv->port = port; |
344 | INIT_DELAYED_WORK(&priv->delayed_setup_work, setup_line); | 342 | INIT_DELAYED_WORK(&priv->delayed_setup_work, setup_line); |
345 | INIT_DELAYED_WORK(&priv->delayed_write_work, send_data); | 343 | INIT_DELAYED_WORK(&priv->delayed_write_work, send_data); |
@@ -664,11 +662,15 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg) | |||
664 | spin_unlock_irqrestore(&priv->lock, flags); | 662 | spin_unlock_irqrestore(&priv->lock, flags); |
665 | 663 | ||
666 | while (1) { | 664 | while (1) { |
667 | wait_event_interruptible(priv->intr_wait, | 665 | wait_event_interruptible(port->delta_msr_wait, |
666 | port->serial->disconnected || | ||
668 | priv->status.pin_state != prev); | 667 | priv->status.pin_state != prev); |
669 | if (signal_pending(current)) | 668 | if (signal_pending(current)) |
670 | return -ERESTARTSYS; | 669 | return -ERESTARTSYS; |
671 | 670 | ||
671 | if (port->serial->disconnected) | ||
672 | return -EIO; | ||
673 | |||
672 | spin_lock_irqsave(&priv->lock, flags); | 674 | spin_lock_irqsave(&priv->lock, flags); |
673 | status = priv->status.pin_state & PIN_MASK; | 675 | status = priv->status.pin_state & PIN_MASK; |
674 | spin_unlock_irqrestore(&priv->lock, flags); | 676 | spin_unlock_irqrestore(&priv->lock, flags); |
@@ -763,7 +765,7 @@ static void oti6858_read_int_callback(struct urb *urb) | |||
763 | 765 | ||
764 | if (!priv->transient) { | 766 | if (!priv->transient) { |
765 | if (xs->pin_state != priv->status.pin_state) | 767 | if (xs->pin_state != priv->status.pin_state) |
766 | wake_up_interruptible(&priv->intr_wait); | 768 | wake_up_interruptible(&port->delta_msr_wait); |
767 | memcpy(&priv->status, xs, OTI6858_CTRL_PKT_SIZE); | 769 | memcpy(&priv->status, xs, OTI6858_CTRL_PKT_SIZE); |
768 | } | 770 | } |
769 | 771 | ||
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index 54adc9125e5c..3b10018d89a3 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c | |||
@@ -139,7 +139,6 @@ struct pl2303_serial_private { | |||
139 | 139 | ||
140 | struct pl2303_private { | 140 | struct pl2303_private { |
141 | spinlock_t lock; | 141 | spinlock_t lock; |
142 | wait_queue_head_t delta_msr_wait; | ||
143 | u8 line_control; | 142 | u8 line_control; |
144 | u8 line_status; | 143 | u8 line_status; |
145 | }; | 144 | }; |
@@ -233,7 +232,6 @@ static int pl2303_port_probe(struct usb_serial_port *port) | |||
233 | return -ENOMEM; | 232 | return -ENOMEM; |
234 | 233 | ||
235 | spin_lock_init(&priv->lock); | 234 | spin_lock_init(&priv->lock); |
236 | init_waitqueue_head(&priv->delta_msr_wait); | ||
237 | 235 | ||
238 | usb_set_serial_port_data(port, priv); | 236 | usb_set_serial_port_data(port, priv); |
239 | 237 | ||
@@ -607,11 +605,14 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg) | |||
607 | spin_unlock_irqrestore(&priv->lock, flags); | 605 | spin_unlock_irqrestore(&priv->lock, flags); |
608 | 606 | ||
609 | while (1) { | 607 | while (1) { |
610 | interruptible_sleep_on(&priv->delta_msr_wait); | 608 | interruptible_sleep_on(&port->delta_msr_wait); |
611 | /* see if a signal did it */ | 609 | /* see if a signal did it */ |
612 | if (signal_pending(current)) | 610 | if (signal_pending(current)) |
613 | return -ERESTARTSYS; | 611 | return -ERESTARTSYS; |
614 | 612 | ||
613 | if (port->serial->disconnected) | ||
614 | return -EIO; | ||
615 | |||
615 | spin_lock_irqsave(&priv->lock, flags); | 616 | spin_lock_irqsave(&priv->lock, flags); |
616 | status = priv->line_status; | 617 | status = priv->line_status; |
617 | spin_unlock_irqrestore(&priv->lock, flags); | 618 | spin_unlock_irqrestore(&priv->lock, flags); |
@@ -719,7 +720,7 @@ static void pl2303_update_line_status(struct usb_serial_port *port, | |||
719 | spin_unlock_irqrestore(&priv->lock, flags); | 720 | spin_unlock_irqrestore(&priv->lock, flags); |
720 | if (priv->line_status & UART_BREAK_ERROR) | 721 | if (priv->line_status & UART_BREAK_ERROR) |
721 | usb_serial_handle_break(port); | 722 | usb_serial_handle_break(port); |
722 | wake_up_interruptible(&priv->delta_msr_wait); | 723 | wake_up_interruptible(&port->delta_msr_wait); |
723 | 724 | ||
724 | tty = tty_port_tty_get(&port->port); | 725 | tty = tty_port_tty_get(&port->port); |
725 | if (!tty) | 726 | if (!tty) |
@@ -783,7 +784,7 @@ static void pl2303_process_read_urb(struct urb *urb) | |||
783 | line_status = priv->line_status; | 784 | line_status = priv->line_status; |
784 | priv->line_status &= ~UART_STATE_TRANSIENT_MASK; | 785 | priv->line_status &= ~UART_STATE_TRANSIENT_MASK; |
785 | spin_unlock_irqrestore(&priv->lock, flags); | 786 | spin_unlock_irqrestore(&priv->lock, flags); |
786 | wake_up_interruptible(&priv->delta_msr_wait); | 787 | wake_up_interruptible(&port->delta_msr_wait); |
787 | 788 | ||
788 | if (!urb->actual_length) | 789 | if (!urb->actual_length) |
789 | return; | 790 | return; |
diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c index d643a4d4d770..75f125ddb0c9 100644 --- a/drivers/usb/serial/quatech2.c +++ b/drivers/usb/serial/quatech2.c | |||
@@ -128,7 +128,6 @@ struct qt2_port_private { | |||
128 | u8 shadowLSR; | 128 | u8 shadowLSR; |
129 | u8 shadowMSR; | 129 | u8 shadowMSR; |
130 | 130 | ||
131 | wait_queue_head_t delta_msr_wait; /* Used for TIOCMIWAIT */ | ||
132 | struct async_icount icount; | 131 | struct async_icount icount; |
133 | 132 | ||
134 | struct usb_serial_port *port; | 133 | struct usb_serial_port *port; |
@@ -506,8 +505,9 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg) | |||
506 | spin_unlock_irqrestore(&priv->lock, flags); | 505 | spin_unlock_irqrestore(&priv->lock, flags); |
507 | 506 | ||
508 | while (1) { | 507 | while (1) { |
509 | wait_event_interruptible(priv->delta_msr_wait, | 508 | wait_event_interruptible(port->delta_msr_wait, |
510 | ((priv->icount.rng != prev.rng) || | 509 | (port->serial->disconnected || |
510 | (priv->icount.rng != prev.rng) || | ||
511 | (priv->icount.dsr != prev.dsr) || | 511 | (priv->icount.dsr != prev.dsr) || |
512 | (priv->icount.dcd != prev.dcd) || | 512 | (priv->icount.dcd != prev.dcd) || |
513 | (priv->icount.cts != prev.cts))); | 513 | (priv->icount.cts != prev.cts))); |
@@ -515,6 +515,9 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg) | |||
515 | if (signal_pending(current)) | 515 | if (signal_pending(current)) |
516 | return -ERESTARTSYS; | 516 | return -ERESTARTSYS; |
517 | 517 | ||
518 | if (port->serial->disconnected) | ||
519 | return -EIO; | ||
520 | |||
518 | spin_lock_irqsave(&priv->lock, flags); | 521 | spin_lock_irqsave(&priv->lock, flags); |
519 | cur = priv->icount; | 522 | cur = priv->icount; |
520 | spin_unlock_irqrestore(&priv->lock, flags); | 523 | spin_unlock_irqrestore(&priv->lock, flags); |
@@ -827,7 +830,6 @@ static int qt2_port_probe(struct usb_serial_port *port) | |||
827 | 830 | ||
828 | spin_lock_init(&port_priv->lock); | 831 | spin_lock_init(&port_priv->lock); |
829 | spin_lock_init(&port_priv->urb_lock); | 832 | spin_lock_init(&port_priv->urb_lock); |
830 | init_waitqueue_head(&port_priv->delta_msr_wait); | ||
831 | port_priv->port = port; | 833 | port_priv->port = port; |
832 | 834 | ||
833 | port_priv->write_urb = usb_alloc_urb(0, GFP_KERNEL); | 835 | port_priv->write_urb = usb_alloc_urb(0, GFP_KERNEL); |
@@ -970,7 +972,7 @@ static void qt2_update_msr(struct usb_serial_port *port, unsigned char *ch) | |||
970 | if (newMSR & UART_MSR_TERI) | 972 | if (newMSR & UART_MSR_TERI) |
971 | port_priv->icount.rng++; | 973 | port_priv->icount.rng++; |
972 | 974 | ||
973 | wake_up_interruptible(&port_priv->delta_msr_wait); | 975 | wake_up_interruptible(&port->delta_msr_wait); |
974 | } | 976 | } |
975 | } | 977 | } |
976 | 978 | ||
diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c index 91ff8e3bddbd..549ef68ff5fa 100644 --- a/drivers/usb/serial/spcp8x5.c +++ b/drivers/usb/serial/spcp8x5.c | |||
@@ -149,7 +149,6 @@ enum spcp8x5_type { | |||
149 | struct spcp8x5_private { | 149 | struct spcp8x5_private { |
150 | spinlock_t lock; | 150 | spinlock_t lock; |
151 | enum spcp8x5_type type; | 151 | enum spcp8x5_type type; |
152 | wait_queue_head_t delta_msr_wait; | ||
153 | u8 line_control; | 152 | u8 line_control; |
154 | u8 line_status; | 153 | u8 line_status; |
155 | }; | 154 | }; |
@@ -179,7 +178,6 @@ static int spcp8x5_port_probe(struct usb_serial_port *port) | |||
179 | return -ENOMEM; | 178 | return -ENOMEM; |
180 | 179 | ||
181 | spin_lock_init(&priv->lock); | 180 | spin_lock_init(&priv->lock); |
182 | init_waitqueue_head(&priv->delta_msr_wait); | ||
183 | priv->type = type; | 181 | priv->type = type; |
184 | 182 | ||
185 | usb_set_serial_port_data(port , priv); | 183 | usb_set_serial_port_data(port , priv); |
@@ -475,7 +473,7 @@ static void spcp8x5_process_read_urb(struct urb *urb) | |||
475 | priv->line_status &= ~UART_STATE_TRANSIENT_MASK; | 473 | priv->line_status &= ~UART_STATE_TRANSIENT_MASK; |
476 | spin_unlock_irqrestore(&priv->lock, flags); | 474 | spin_unlock_irqrestore(&priv->lock, flags); |
477 | /* wake up the wait for termios */ | 475 | /* wake up the wait for termios */ |
478 | wake_up_interruptible(&priv->delta_msr_wait); | 476 | wake_up_interruptible(&port->delta_msr_wait); |
479 | 477 | ||
480 | if (!urb->actual_length) | 478 | if (!urb->actual_length) |
481 | return; | 479 | return; |
@@ -526,12 +524,15 @@ static int spcp8x5_wait_modem_info(struct usb_serial_port *port, | |||
526 | 524 | ||
527 | while (1) { | 525 | while (1) { |
528 | /* wake up in bulk read */ | 526 | /* wake up in bulk read */ |
529 | interruptible_sleep_on(&priv->delta_msr_wait); | 527 | interruptible_sleep_on(&port->delta_msr_wait); |
530 | 528 | ||
531 | /* see if a signal did it */ | 529 | /* see if a signal did it */ |
532 | if (signal_pending(current)) | 530 | if (signal_pending(current)) |
533 | return -ERESTARTSYS; | 531 | return -ERESTARTSYS; |
534 | 532 | ||
533 | if (port->serial->disconnected) | ||
534 | return -EIO; | ||
535 | |||
535 | spin_lock_irqsave(&priv->lock, flags); | 536 | spin_lock_irqsave(&priv->lock, flags); |
536 | status = priv->line_status; | 537 | status = priv->line_status; |
537 | spin_unlock_irqrestore(&priv->lock, flags); | 538 | spin_unlock_irqrestore(&priv->lock, flags); |
diff --git a/drivers/usb/serial/ssu100.c b/drivers/usb/serial/ssu100.c index b57cf841c5b6..4b2a19757b4d 100644 --- a/drivers/usb/serial/ssu100.c +++ b/drivers/usb/serial/ssu100.c | |||
@@ -61,7 +61,6 @@ struct ssu100_port_private { | |||
61 | spinlock_t status_lock; | 61 | spinlock_t status_lock; |
62 | u8 shadowLSR; | 62 | u8 shadowLSR; |
63 | u8 shadowMSR; | 63 | u8 shadowMSR; |
64 | wait_queue_head_t delta_msr_wait; /* Used for TIOCMIWAIT */ | ||
65 | struct async_icount icount; | 64 | struct async_icount icount; |
66 | }; | 65 | }; |
67 | 66 | ||
@@ -355,8 +354,9 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg) | |||
355 | spin_unlock_irqrestore(&priv->status_lock, flags); | 354 | spin_unlock_irqrestore(&priv->status_lock, flags); |
356 | 355 | ||
357 | while (1) { | 356 | while (1) { |
358 | wait_event_interruptible(priv->delta_msr_wait, | 357 | wait_event_interruptible(port->delta_msr_wait, |
359 | ((priv->icount.rng != prev.rng) || | 358 | (port->serial->disconnected || |
359 | (priv->icount.rng != prev.rng) || | ||
360 | (priv->icount.dsr != prev.dsr) || | 360 | (priv->icount.dsr != prev.dsr) || |
361 | (priv->icount.dcd != prev.dcd) || | 361 | (priv->icount.dcd != prev.dcd) || |
362 | (priv->icount.cts != prev.cts))); | 362 | (priv->icount.cts != prev.cts))); |
@@ -364,6 +364,9 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg) | |||
364 | if (signal_pending(current)) | 364 | if (signal_pending(current)) |
365 | return -ERESTARTSYS; | 365 | return -ERESTARTSYS; |
366 | 366 | ||
367 | if (port->serial->disconnected) | ||
368 | return -EIO; | ||
369 | |||
367 | spin_lock_irqsave(&priv->status_lock, flags); | 370 | spin_lock_irqsave(&priv->status_lock, flags); |
368 | cur = priv->icount; | 371 | cur = priv->icount; |
369 | spin_unlock_irqrestore(&priv->status_lock, flags); | 372 | spin_unlock_irqrestore(&priv->status_lock, flags); |
@@ -445,7 +448,6 @@ static int ssu100_port_probe(struct usb_serial_port *port) | |||
445 | return -ENOMEM; | 448 | return -ENOMEM; |
446 | 449 | ||
447 | spin_lock_init(&priv->status_lock); | 450 | spin_lock_init(&priv->status_lock); |
448 | init_waitqueue_head(&priv->delta_msr_wait); | ||
449 | 451 | ||
450 | usb_set_serial_port_data(port, priv); | 452 | usb_set_serial_port_data(port, priv); |
451 | 453 | ||
@@ -537,7 +539,7 @@ static void ssu100_update_msr(struct usb_serial_port *port, u8 msr) | |||
537 | priv->icount.dcd++; | 539 | priv->icount.dcd++; |
538 | if (msr & UART_MSR_TERI) | 540 | if (msr & UART_MSR_TERI) |
539 | priv->icount.rng++; | 541 | priv->icount.rng++; |
540 | wake_up_interruptible(&priv->delta_msr_wait); | 542 | wake_up_interruptible(&port->delta_msr_wait); |
541 | } | 543 | } |
542 | } | 544 | } |
543 | 545 | ||
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c index 39cb9b807c3c..73deb029fc05 100644 --- a/drivers/usb/serial/ti_usb_3410_5052.c +++ b/drivers/usb/serial/ti_usb_3410_5052.c | |||
@@ -74,7 +74,6 @@ struct ti_port { | |||
74 | int tp_flags; | 74 | int tp_flags; |
75 | int tp_closing_wait;/* in .01 secs */ | 75 | int tp_closing_wait;/* in .01 secs */ |
76 | struct async_icount tp_icount; | 76 | struct async_icount tp_icount; |
77 | wait_queue_head_t tp_msr_wait; /* wait for msr change */ | ||
78 | wait_queue_head_t tp_write_wait; | 77 | wait_queue_head_t tp_write_wait; |
79 | struct ti_device *tp_tdev; | 78 | struct ti_device *tp_tdev; |
80 | struct usb_serial_port *tp_port; | 79 | struct usb_serial_port *tp_port; |
@@ -432,7 +431,6 @@ static int ti_port_probe(struct usb_serial_port *port) | |||
432 | else | 431 | else |
433 | tport->tp_uart_base_addr = TI_UART2_BASE_ADDR; | 432 | tport->tp_uart_base_addr = TI_UART2_BASE_ADDR; |
434 | tport->tp_closing_wait = closing_wait; | 433 | tport->tp_closing_wait = closing_wait; |
435 | init_waitqueue_head(&tport->tp_msr_wait); | ||
436 | init_waitqueue_head(&tport->tp_write_wait); | 434 | init_waitqueue_head(&tport->tp_write_wait); |
437 | if (kfifo_alloc(&tport->write_fifo, TI_WRITE_BUF_SIZE, GFP_KERNEL)) { | 435 | if (kfifo_alloc(&tport->write_fifo, TI_WRITE_BUF_SIZE, GFP_KERNEL)) { |
438 | kfree(tport); | 436 | kfree(tport); |
@@ -784,9 +782,13 @@ static int ti_ioctl(struct tty_struct *tty, | |||
784 | dev_dbg(&port->dev, "%s - TIOCMIWAIT\n", __func__); | 782 | dev_dbg(&port->dev, "%s - TIOCMIWAIT\n", __func__); |
785 | cprev = tport->tp_icount; | 783 | cprev = tport->tp_icount; |
786 | while (1) { | 784 | while (1) { |
787 | interruptible_sleep_on(&tport->tp_msr_wait); | 785 | interruptible_sleep_on(&port->delta_msr_wait); |
788 | if (signal_pending(current)) | 786 | if (signal_pending(current)) |
789 | return -ERESTARTSYS; | 787 | return -ERESTARTSYS; |
788 | |||
789 | if (port->serial->disconnected) | ||
790 | return -EIO; | ||
791 | |||
790 | cnow = tport->tp_icount; | 792 | cnow = tport->tp_icount; |
791 | if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr && | 793 | if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr && |
792 | cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) | 794 | cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) |
@@ -1392,7 +1394,7 @@ static void ti_handle_new_msr(struct ti_port *tport, __u8 msr) | |||
1392 | icount->dcd++; | 1394 | icount->dcd++; |
1393 | if (msr & TI_MSR_DELTA_RI) | 1395 | if (msr & TI_MSR_DELTA_RI) |
1394 | icount->rng++; | 1396 | icount->rng++; |
1395 | wake_up_interruptible(&tport->tp_msr_wait); | 1397 | wake_up_interruptible(&tport->tp_port->delta_msr_wait); |
1396 | spin_unlock_irqrestore(&tport->tp_lock, flags); | 1398 | spin_unlock_irqrestore(&tport->tp_lock, flags); |
1397 | } | 1399 | } |
1398 | 1400 | ||
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c index a19ed74d770d..2e70efa08b77 100644 --- a/drivers/usb/serial/usb-serial.c +++ b/drivers/usb/serial/usb-serial.c | |||
@@ -151,6 +151,7 @@ static void destroy_serial(struct kref *kref) | |||
151 | } | 151 | } |
152 | } | 152 | } |
153 | 153 | ||
154 | usb_put_intf(serial->interface); | ||
154 | usb_put_dev(serial->dev); | 155 | usb_put_dev(serial->dev); |
155 | kfree(serial); | 156 | kfree(serial); |
156 | } | 157 | } |
@@ -620,7 +621,7 @@ static struct usb_serial *create_serial(struct usb_device *dev, | |||
620 | } | 621 | } |
621 | serial->dev = usb_get_dev(dev); | 622 | serial->dev = usb_get_dev(dev); |
622 | serial->type = driver; | 623 | serial->type = driver; |
623 | serial->interface = interface; | 624 | serial->interface = usb_get_intf(interface); |
624 | kref_init(&serial->kref); | 625 | kref_init(&serial->kref); |
625 | mutex_init(&serial->disc_mutex); | 626 | mutex_init(&serial->disc_mutex); |
626 | serial->minor = SERIAL_TTY_NO_MINOR; | 627 | serial->minor = SERIAL_TTY_NO_MINOR; |
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index da04a074e790..1799335288bd 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h | |||
@@ -496,6 +496,13 @@ UNUSUAL_DEV( 0x04e8, 0x5122, 0x0000, 0x9999, | |||
496 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, | 496 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, |
497 | US_FL_MAX_SECTORS_64 | US_FL_BULK_IGNORE_TAG), | 497 | US_FL_MAX_SECTORS_64 | US_FL_BULK_IGNORE_TAG), |
498 | 498 | ||
499 | /* Added by Dmitry Artamonow <mad_soft@inbox.ru> */ | ||
500 | UNUSUAL_DEV( 0x04e8, 0x5136, 0x0000, 0x9999, | ||
501 | "Samsung", | ||
502 | "YP-Z3", | ||
503 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, | ||
504 | US_FL_MAX_SECTORS_64), | ||
505 | |||
499 | /* Entry and supporting patch by Theodore Kilgore <kilgota@auburn.edu>. | 506 | /* Entry and supporting patch by Theodore Kilgore <kilgota@auburn.edu>. |
500 | * Device uses standards-violating 32-byte Bulk Command Block Wrappers and | 507 | * Device uses standards-violating 32-byte Bulk Command Block Wrappers and |
501 | * reports itself as "Proprietary SCSI Bulk." Cf. device entry 0x084d:0x0011. | 508 | * reports itself as "Proprietary SCSI Bulk." Cf. device entry 0x084d:0x0011. |
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c index 964ff22bf281..aeb00fc2d3be 100644 --- a/drivers/vfio/pci/vfio_pci_config.c +++ b/drivers/vfio/pci/vfio_pci_config.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <linux/pci.h> | 27 | #include <linux/pci.h> |
28 | #include <linux/uaccess.h> | 28 | #include <linux/uaccess.h> |
29 | #include <linux/vfio.h> | 29 | #include <linux/vfio.h> |
30 | #include <linux/slab.h> | ||
30 | 31 | ||
31 | #include "vfio_pci_private.h" | 32 | #include "vfio_pci_private.h" |
32 | 33 | ||
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c index 3639371fa697..a96509187deb 100644 --- a/drivers/vfio/pci/vfio_pci_intrs.c +++ b/drivers/vfio/pci/vfio_pci_intrs.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/vfio.h> | 22 | #include <linux/vfio.h> |
23 | #include <linux/wait.h> | 23 | #include <linux/wait.h> |
24 | #include <linux/workqueue.h> | 24 | #include <linux/workqueue.h> |
25 | #include <linux/slab.h> | ||
25 | 26 | ||
26 | #include "vfio_pci_private.h" | 27 | #include "vfio_pci_private.h" |
27 | 28 | ||
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 959b1cd89e6a..ec6fb3fa59bb 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c | |||
@@ -339,7 +339,8 @@ static void handle_tx(struct vhost_net *net) | |||
339 | msg.msg_controllen = 0; | 339 | msg.msg_controllen = 0; |
340 | ubufs = NULL; | 340 | ubufs = NULL; |
341 | } else { | 341 | } else { |
342 | struct ubuf_info *ubuf = &vq->ubuf_info[head]; | 342 | struct ubuf_info *ubuf; |
343 | ubuf = vq->ubuf_info + vq->upend_idx; | ||
343 | 344 | ||
344 | vq->heads[vq->upend_idx].len = | 345 | vq->heads[vq->upend_idx].len = |
345 | VHOST_DMA_IN_PROGRESS; | 346 | VHOST_DMA_IN_PROGRESS; |
diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c index 9951297b2427..43fb11ee2e8d 100644 --- a/drivers/vhost/tcm_vhost.c +++ b/drivers/vhost/tcm_vhost.c | |||
@@ -850,7 +850,7 @@ static int vhost_scsi_clear_endpoint( | |||
850 | for (index = 0; index < vs->dev.nvqs; ++index) { | 850 | for (index = 0; index < vs->dev.nvqs; ++index) { |
851 | if (!vhost_vq_access_ok(&vs->vqs[index])) { | 851 | if (!vhost_vq_access_ok(&vs->vqs[index])) { |
852 | ret = -EFAULT; | 852 | ret = -EFAULT; |
853 | goto err; | 853 | goto err_dev; |
854 | } | 854 | } |
855 | } | 855 | } |
856 | for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) { | 856 | for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) { |
@@ -860,10 +860,11 @@ static int vhost_scsi_clear_endpoint( | |||
860 | if (!tv_tpg) | 860 | if (!tv_tpg) |
861 | continue; | 861 | continue; |
862 | 862 | ||
863 | mutex_lock(&tv_tpg->tv_tpg_mutex); | ||
863 | tv_tport = tv_tpg->tport; | 864 | tv_tport = tv_tpg->tport; |
864 | if (!tv_tport) { | 865 | if (!tv_tport) { |
865 | ret = -ENODEV; | 866 | ret = -ENODEV; |
866 | goto err; | 867 | goto err_tpg; |
867 | } | 868 | } |
868 | 869 | ||
869 | if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) { | 870 | if (strcmp(tv_tport->tport_name, t->vhost_wwpn)) { |
@@ -872,16 +873,19 @@ static int vhost_scsi_clear_endpoint( | |||
872 | tv_tport->tport_name, tv_tpg->tport_tpgt, | 873 | tv_tport->tport_name, tv_tpg->tport_tpgt, |
873 | t->vhost_wwpn, t->vhost_tpgt); | 874 | t->vhost_wwpn, t->vhost_tpgt); |
874 | ret = -EINVAL; | 875 | ret = -EINVAL; |
875 | goto err; | 876 | goto err_tpg; |
876 | } | 877 | } |
877 | tv_tpg->tv_tpg_vhost_count--; | 878 | tv_tpg->tv_tpg_vhost_count--; |
878 | vs->vs_tpg[target] = NULL; | 879 | vs->vs_tpg[target] = NULL; |
879 | vs->vs_endpoint = false; | 880 | vs->vs_endpoint = false; |
881 | mutex_unlock(&tv_tpg->tv_tpg_mutex); | ||
880 | } | 882 | } |
881 | mutex_unlock(&vs->dev.mutex); | 883 | mutex_unlock(&vs->dev.mutex); |
882 | return 0; | 884 | return 0; |
883 | 885 | ||
884 | err: | 886 | err_tpg: |
887 | mutex_unlock(&tv_tpg->tv_tpg_mutex); | ||
888 | err_dev: | ||
885 | mutex_unlock(&vs->dev.mutex); | 889 | mutex_unlock(&vs->dev.mutex); |
886 | return ret; | 890 | return ret; |
887 | } | 891 | } |
@@ -937,6 +941,7 @@ static void vhost_scsi_flush(struct vhost_scsi *vs) | |||
937 | 941 | ||
938 | for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) | 942 | for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) |
939 | vhost_scsi_flush_vq(vs, i); | 943 | vhost_scsi_flush_vq(vs, i); |
944 | vhost_work_flush(&vs->dev, &vs->vs_completion_work); | ||
940 | } | 945 | } |
941 | 946 | ||
942 | static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features) | 947 | static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features) |
diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c index 12cf5f31ee8f..025428e04c33 100644 --- a/drivers/video/atmel_lcdfb.c +++ b/drivers/video/atmel_lcdfb.c | |||
@@ -422,17 +422,22 @@ static int atmel_lcdfb_check_var(struct fb_var_screeninfo *var, | |||
422 | = var->bits_per_pixel; | 422 | = var->bits_per_pixel; |
423 | break; | 423 | break; |
424 | case 16: | 424 | case 16: |
425 | /* Older SOCs use IBGR:555 rather than BGR:565. */ | ||
426 | if (sinfo->have_intensity_bit) | ||
427 | var->green.length = 5; | ||
428 | else | ||
429 | var->green.length = 6; | ||
430 | |||
425 | if (sinfo->lcd_wiring_mode == ATMEL_LCDC_WIRING_RGB) { | 431 | if (sinfo->lcd_wiring_mode == ATMEL_LCDC_WIRING_RGB) { |
426 | /* RGB:565 mode */ | 432 | /* RGB:5X5 mode */ |
427 | var->red.offset = 11; | 433 | var->red.offset = var->green.length + 5; |
428 | var->blue.offset = 0; | 434 | var->blue.offset = 0; |
429 | } else { | 435 | } else { |
430 | /* BGR:565 mode */ | 436 | /* BGR:5X5 mode */ |
431 | var->red.offset = 0; | 437 | var->red.offset = 0; |
432 | var->blue.offset = 11; | 438 | var->blue.offset = var->green.length + 5; |
433 | } | 439 | } |
434 | var->green.offset = 5; | 440 | var->green.offset = 5; |
435 | var->green.length = 6; | ||
436 | var->red.length = var->blue.length = 5; | 441 | var->red.length = var->blue.length = 5; |
437 | break; | 442 | break; |
438 | case 32: | 443 | case 32: |
@@ -679,8 +684,7 @@ static int atmel_lcdfb_setcolreg(unsigned int regno, unsigned int red, | |||
679 | 684 | ||
680 | case FB_VISUAL_PSEUDOCOLOR: | 685 | case FB_VISUAL_PSEUDOCOLOR: |
681 | if (regno < 256) { | 686 | if (regno < 256) { |
682 | if (cpu_is_at91sam9261() || cpu_is_at91sam9263() | 687 | if (sinfo->have_intensity_bit) { |
683 | || cpu_is_at91sam9rl()) { | ||
684 | /* old style I+BGR:555 */ | 688 | /* old style I+BGR:555 */ |
685 | val = ((red >> 11) & 0x001f); | 689 | val = ((red >> 11) & 0x001f); |
686 | val |= ((green >> 6) & 0x03e0); | 690 | val |= ((green >> 6) & 0x03e0); |
@@ -870,6 +874,10 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev) | |||
870 | } | 874 | } |
871 | sinfo->info = info; | 875 | sinfo->info = info; |
872 | sinfo->pdev = pdev; | 876 | sinfo->pdev = pdev; |
877 | if (cpu_is_at91sam9261() || cpu_is_at91sam9263() || | ||
878 | cpu_is_at91sam9rl()) { | ||
879 | sinfo->have_intensity_bit = true; | ||
880 | } | ||
873 | 881 | ||
874 | strcpy(info->fix.id, sinfo->pdev->name); | 882 | strcpy(info->fix.id, sinfo->pdev->name); |
875 | info->flags = ATMEL_LCDFB_FBINFO_DEFAULT; | 883 | info->flags = ATMEL_LCDFB_FBINFO_DEFAULT; |
diff --git a/drivers/video/ep93xx-fb.c b/drivers/video/ep93xx-fb.c index 3f2519d30715..e06cd5d90c97 100644 --- a/drivers/video/ep93xx-fb.c +++ b/drivers/video/ep93xx-fb.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/slab.h> | 23 | #include <linux/slab.h> |
24 | #include <linux/clk.h> | 24 | #include <linux/clk.h> |
25 | #include <linux/fb.h> | 25 | #include <linux/fb.h> |
26 | #include <linux/io.h> | ||
26 | 27 | ||
27 | #include <linux/platform_data/video-ep93xx.h> | 28 | #include <linux/platform_data/video-ep93xx.h> |
28 | 29 | ||
diff --git a/drivers/watchdog/sp5100_tco.c b/drivers/watchdog/sp5100_tco.c index e3b8f757d2d3..0e9d8c479c35 100644 --- a/drivers/watchdog/sp5100_tco.c +++ b/drivers/watchdog/sp5100_tco.c | |||
@@ -40,13 +40,12 @@ | |||
40 | #include "sp5100_tco.h" | 40 | #include "sp5100_tco.h" |
41 | 41 | ||
42 | /* Module and version information */ | 42 | /* Module and version information */ |
43 | #define TCO_VERSION "0.03" | 43 | #define TCO_VERSION "0.05" |
44 | #define TCO_MODULE_NAME "SP5100 TCO timer" | 44 | #define TCO_MODULE_NAME "SP5100 TCO timer" |
45 | #define TCO_DRIVER_NAME TCO_MODULE_NAME ", v" TCO_VERSION | 45 | #define TCO_DRIVER_NAME TCO_MODULE_NAME ", v" TCO_VERSION |
46 | 46 | ||
47 | /* internal variables */ | 47 | /* internal variables */ |
48 | static u32 tcobase_phys; | 48 | static u32 tcobase_phys; |
49 | static u32 resbase_phys; | ||
50 | static u32 tco_wdt_fired; | 49 | static u32 tco_wdt_fired; |
51 | static void __iomem *tcobase; | 50 | static void __iomem *tcobase; |
52 | static unsigned int pm_iobase; | 51 | static unsigned int pm_iobase; |
@@ -54,10 +53,6 @@ static DEFINE_SPINLOCK(tco_lock); /* Guards the hardware */ | |||
54 | static unsigned long timer_alive; | 53 | static unsigned long timer_alive; |
55 | static char tco_expect_close; | 54 | static char tco_expect_close; |
56 | static struct pci_dev *sp5100_tco_pci; | 55 | static struct pci_dev *sp5100_tco_pci; |
57 | static struct resource wdt_res = { | ||
58 | .name = "Watchdog Timer", | ||
59 | .flags = IORESOURCE_MEM, | ||
60 | }; | ||
61 | 56 | ||
62 | /* the watchdog platform device */ | 57 | /* the watchdog platform device */ |
63 | static struct platform_device *sp5100_tco_platform_device; | 58 | static struct platform_device *sp5100_tco_platform_device; |
@@ -75,12 +70,6 @@ module_param(nowayout, bool, 0); | |||
75 | MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started." | 70 | MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started." |
76 | " (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); | 71 | " (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); |
77 | 72 | ||
78 | static unsigned int force_addr; | ||
79 | module_param(force_addr, uint, 0); | ||
80 | MODULE_PARM_DESC(force_addr, "Force the use of specified MMIO address." | ||
81 | " ONLY USE THIS PARAMETER IF YOU REALLY KNOW" | ||
82 | " WHAT YOU ARE DOING (default=none)"); | ||
83 | |||
84 | /* | 73 | /* |
85 | * Some TCO specific functions | 74 | * Some TCO specific functions |
86 | */ | 75 | */ |
@@ -176,39 +165,6 @@ static void tco_timer_enable(void) | |||
176 | } | 165 | } |
177 | } | 166 | } |
178 | 167 | ||
179 | static void tco_timer_disable(void) | ||
180 | { | ||
181 | int val; | ||
182 | |||
183 | if (sp5100_tco_pci->revision >= 0x40) { | ||
184 | /* For SB800 or later */ | ||
185 | /* Enable watchdog decode bit and Disable watchdog timer */ | ||
186 | outb(SB800_PM_WATCHDOG_CONTROL, SB800_IO_PM_INDEX_REG); | ||
187 | val = inb(SB800_IO_PM_DATA_REG); | ||
188 | val |= SB800_PCI_WATCHDOG_DECODE_EN; | ||
189 | val |= SB800_PM_WATCHDOG_DISABLE; | ||
190 | outb(val, SB800_IO_PM_DATA_REG); | ||
191 | } else { | ||
192 | /* For SP5100 or SB7x0 */ | ||
193 | /* Enable watchdog decode bit */ | ||
194 | pci_read_config_dword(sp5100_tco_pci, | ||
195 | SP5100_PCI_WATCHDOG_MISC_REG, | ||
196 | &val); | ||
197 | |||
198 | val |= SP5100_PCI_WATCHDOG_DECODE_EN; | ||
199 | |||
200 | pci_write_config_dword(sp5100_tco_pci, | ||
201 | SP5100_PCI_WATCHDOG_MISC_REG, | ||
202 | val); | ||
203 | |||
204 | /* Disable Watchdog timer */ | ||
205 | outb(SP5100_PM_WATCHDOG_CONTROL, SP5100_IO_PM_INDEX_REG); | ||
206 | val = inb(SP5100_IO_PM_DATA_REG); | ||
207 | val |= SP5100_PM_WATCHDOG_DISABLE; | ||
208 | outb(val, SP5100_IO_PM_DATA_REG); | ||
209 | } | ||
210 | } | ||
211 | |||
212 | /* | 168 | /* |
213 | * /dev/watchdog handling | 169 | * /dev/watchdog handling |
214 | */ | 170 | */ |
@@ -361,7 +317,7 @@ static unsigned char sp5100_tco_setupdevice(void) | |||
361 | { | 317 | { |
362 | struct pci_dev *dev = NULL; | 318 | struct pci_dev *dev = NULL; |
363 | const char *dev_name = NULL; | 319 | const char *dev_name = NULL; |
364 | u32 val, tmp_val; | 320 | u32 val; |
365 | u32 index_reg, data_reg, base_addr; | 321 | u32 index_reg, data_reg, base_addr; |
366 | 322 | ||
367 | /* Match the PCI device */ | 323 | /* Match the PCI device */ |
@@ -459,63 +415,8 @@ static unsigned char sp5100_tco_setupdevice(void) | |||
459 | } else | 415 | } else |
460 | pr_debug("SBResource_MMIO is disabled(0x%04x)\n", val); | 416 | pr_debug("SBResource_MMIO is disabled(0x%04x)\n", val); |
461 | 417 | ||
462 | /* | 418 | pr_notice("failed to find MMIO address, giving up.\n"); |
463 | * Lastly re-programming the watchdog timer MMIO address, | 419 | goto unreg_region; |
464 | * This method is a last resort... | ||
465 | * | ||
466 | * Before re-programming, to ensure that the watchdog timer | ||
467 | * is disabled, disable the watchdog timer. | ||
468 | */ | ||
469 | tco_timer_disable(); | ||
470 | |||
471 | if (force_addr) { | ||
472 | /* | ||
473 | * Force the use of watchdog timer MMIO address, and aligned to | ||
474 | * 8byte boundary. | ||
475 | */ | ||
476 | force_addr &= ~0x7; | ||
477 | val = force_addr; | ||
478 | |||
479 | pr_info("Force the use of 0x%04x as MMIO address\n", val); | ||
480 | } else { | ||
481 | /* | ||
482 | * Get empty slot into the resource tree for watchdog timer. | ||
483 | */ | ||
484 | if (allocate_resource(&iomem_resource, | ||
485 | &wdt_res, | ||
486 | SP5100_WDT_MEM_MAP_SIZE, | ||
487 | 0xf0000000, | ||
488 | 0xfffffff8, | ||
489 | 0x8, | ||
490 | NULL, | ||
491 | NULL)) { | ||
492 | pr_err("MMIO allocation failed\n"); | ||
493 | goto unreg_region; | ||
494 | } | ||
495 | |||
496 | val = resbase_phys = wdt_res.start; | ||
497 | pr_debug("Got 0x%04x from resource tree\n", val); | ||
498 | } | ||
499 | |||
500 | /* Restore to the low three bits */ | ||
501 | outb(base_addr+0, index_reg); | ||
502 | tmp_val = val | (inb(data_reg) & 0x7); | ||
503 | |||
504 | /* Re-programming the watchdog timer base address */ | ||
505 | outb(base_addr+0, index_reg); | ||
506 | outb((tmp_val >> 0) & 0xff, data_reg); | ||
507 | outb(base_addr+1, index_reg); | ||
508 | outb((tmp_val >> 8) & 0xff, data_reg); | ||
509 | outb(base_addr+2, index_reg); | ||
510 | outb((tmp_val >> 16) & 0xff, data_reg); | ||
511 | outb(base_addr+3, index_reg); | ||
512 | outb((tmp_val >> 24) & 0xff, data_reg); | ||
513 | |||
514 | if (!request_mem_region_exclusive(val, SP5100_WDT_MEM_MAP_SIZE, | ||
515 | dev_name)) { | ||
516 | pr_err("MMIO address 0x%04x already in use\n", val); | ||
517 | goto unreg_resource; | ||
518 | } | ||
519 | 420 | ||
520 | setup_wdt: | 421 | setup_wdt: |
521 | tcobase_phys = val; | 422 | tcobase_phys = val; |
@@ -555,9 +456,6 @@ setup_wdt: | |||
555 | 456 | ||
556 | unreg_mem_region: | 457 | unreg_mem_region: |
557 | release_mem_region(tcobase_phys, SP5100_WDT_MEM_MAP_SIZE); | 458 | release_mem_region(tcobase_phys, SP5100_WDT_MEM_MAP_SIZE); |
558 | unreg_resource: | ||
559 | if (resbase_phys) | ||
560 | release_resource(&wdt_res); | ||
561 | unreg_region: | 459 | unreg_region: |
562 | release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE); | 460 | release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE); |
563 | exit: | 461 | exit: |
@@ -567,7 +465,6 @@ exit: | |||
567 | static int sp5100_tco_init(struct platform_device *dev) | 465 | static int sp5100_tco_init(struct platform_device *dev) |
568 | { | 466 | { |
569 | int ret; | 467 | int ret; |
570 | char addr_str[16]; | ||
571 | 468 | ||
572 | /* | 469 | /* |
573 | * Check whether or not the hardware watchdog is there. If found, then | 470 | * Check whether or not the hardware watchdog is there. If found, then |
@@ -599,23 +496,14 @@ static int sp5100_tco_init(struct platform_device *dev) | |||
599 | clear_bit(0, &timer_alive); | 496 | clear_bit(0, &timer_alive); |
600 | 497 | ||
601 | /* Show module parameters */ | 498 | /* Show module parameters */ |
602 | if (force_addr == tcobase_phys) | 499 | pr_info("initialized (0x%p). heartbeat=%d sec (nowayout=%d)\n", |
603 | /* The force_addr is vaild */ | 500 | tcobase, heartbeat, nowayout); |
604 | sprintf(addr_str, "0x%04x", force_addr); | ||
605 | else | ||
606 | strcpy(addr_str, "none"); | ||
607 | |||
608 | pr_info("initialized (0x%p). heartbeat=%d sec (nowayout=%d, " | ||
609 | "force_addr=%s)\n", | ||
610 | tcobase, heartbeat, nowayout, addr_str); | ||
611 | 501 | ||
612 | return 0; | 502 | return 0; |
613 | 503 | ||
614 | exit: | 504 | exit: |
615 | iounmap(tcobase); | 505 | iounmap(tcobase); |
616 | release_mem_region(tcobase_phys, SP5100_WDT_MEM_MAP_SIZE); | 506 | release_mem_region(tcobase_phys, SP5100_WDT_MEM_MAP_SIZE); |
617 | if (resbase_phys) | ||
618 | release_resource(&wdt_res); | ||
619 | release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE); | 507 | release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE); |
620 | return ret; | 508 | return ret; |
621 | } | 509 | } |
@@ -630,8 +518,6 @@ static void sp5100_tco_cleanup(void) | |||
630 | misc_deregister(&sp5100_tco_miscdev); | 518 | misc_deregister(&sp5100_tco_miscdev); |
631 | iounmap(tcobase); | 519 | iounmap(tcobase); |
632 | release_mem_region(tcobase_phys, SP5100_WDT_MEM_MAP_SIZE); | 520 | release_mem_region(tcobase_phys, SP5100_WDT_MEM_MAP_SIZE); |
633 | if (resbase_phys) | ||
634 | release_resource(&wdt_res); | ||
635 | release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE); | 521 | release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE); |
636 | } | 522 | } |
637 | 523 | ||
diff --git a/drivers/watchdog/sp5100_tco.h b/drivers/watchdog/sp5100_tco.h index 71594a0c14b7..2b28c00da0df 100644 --- a/drivers/watchdog/sp5100_tco.h +++ b/drivers/watchdog/sp5100_tco.h | |||
@@ -57,7 +57,7 @@ | |||
57 | #define SB800_PM_WATCHDOG_DISABLE (1 << 2) | 57 | #define SB800_PM_WATCHDOG_DISABLE (1 << 2) |
58 | #define SB800_PM_WATCHDOG_SECOND_RES (3 << 0) | 58 | #define SB800_PM_WATCHDOG_SECOND_RES (3 << 0) |
59 | #define SB800_ACPI_MMIO_DECODE_EN (1 << 0) | 59 | #define SB800_ACPI_MMIO_DECODE_EN (1 << 0) |
60 | #define SB800_ACPI_MMIO_SEL (1 << 2) | 60 | #define SB800_ACPI_MMIO_SEL (1 << 1) |
61 | 61 | ||
62 | 62 | ||
63 | #define SB800_PM_WDT_MMIO_OFFSET 0xB00 | 63 | #define SB800_PM_WDT_MMIO_OFFSET 0xB00 |
diff --git a/fs/cifs/asn1.c b/fs/cifs/asn1.c index cfd1ce34e0bc..1d36db114772 100644 --- a/fs/cifs/asn1.c +++ b/fs/cifs/asn1.c | |||
@@ -614,53 +614,10 @@ decode_negTokenInit(unsigned char *security_blob, int length, | |||
614 | } | 614 | } |
615 | } | 615 | } |
616 | 616 | ||
617 | /* mechlistMIC */ | 617 | /* |
618 | if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { | 618 | * We currently ignore anything at the end of the SPNEGO blob after |
619 | /* Check if we have reached the end of the blob, but with | 619 | * the mechTypes have been parsed, since none of that info is |
620 | no mechListMic (e.g. NTLMSSP instead of KRB5) */ | 620 | * used at the moment. |
621 | if (ctx.error == ASN1_ERR_DEC_EMPTY) | 621 | */ |
622 | goto decode_negtoken_exit; | ||
623 | cFYI(1, "Error decoding last part negTokenInit exit3"); | ||
624 | return 0; | ||
625 | } else if ((cls != ASN1_CTX) || (con != ASN1_CON)) { | ||
626 | /* tag = 3 indicating mechListMIC */ | ||
627 | cFYI(1, "Exit 4 cls = %d con = %d tag = %d end = %p (%d)", | ||
628 | cls, con, tag, end, *end); | ||
629 | return 0; | ||
630 | } | ||
631 | |||
632 | /* sequence */ | ||
633 | if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { | ||
634 | cFYI(1, "Error decoding last part negTokenInit exit5"); | ||
635 | return 0; | ||
636 | } else if ((cls != ASN1_UNI) || (con != ASN1_CON) | ||
637 | || (tag != ASN1_SEQ)) { | ||
638 | cFYI(1, "cls = %d con = %d tag = %d end = %p (%d)", | ||
639 | cls, con, tag, end, *end); | ||
640 | } | ||
641 | |||
642 | /* sequence of */ | ||
643 | if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { | ||
644 | cFYI(1, "Error decoding last part negTokenInit exit 7"); | ||
645 | return 0; | ||
646 | } else if ((cls != ASN1_CTX) || (con != ASN1_CON)) { | ||
647 | cFYI(1, "Exit 8 cls = %d con = %d tag = %d end = %p (%d)", | ||
648 | cls, con, tag, end, *end); | ||
649 | return 0; | ||
650 | } | ||
651 | |||
652 | /* general string */ | ||
653 | if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { | ||
654 | cFYI(1, "Error decoding last part negTokenInit exit9"); | ||
655 | return 0; | ||
656 | } else if ((cls != ASN1_UNI) || (con != ASN1_PRI) | ||
657 | || (tag != ASN1_GENSTR)) { | ||
658 | cFYI(1, "Exit10 cls = %d con = %d tag = %d end = %p (%d)", | ||
659 | cls, con, tag, end, *end); | ||
660 | return 0; | ||
661 | } | ||
662 | cFYI(1, "Need to call asn1_octets_decode() function for %s", | ||
663 | ctx.pointer); /* is this UTF-8 or ASCII? */ | ||
664 | decode_negtoken_exit: | ||
665 | return 1; | 622 | return 1; |
666 | } | 623 | } |
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 3cf8a15af916..345fc89c4286 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c | |||
@@ -91,6 +91,30 @@ struct workqueue_struct *cifsiod_wq; | |||
91 | __u8 cifs_client_guid[SMB2_CLIENT_GUID_SIZE]; | 91 | __u8 cifs_client_guid[SMB2_CLIENT_GUID_SIZE]; |
92 | #endif | 92 | #endif |
93 | 93 | ||
94 | /* | ||
95 | * Bumps refcount for cifs super block. | ||
96 | * Note that it should be only called if a referece to VFS super block is | ||
97 | * already held, e.g. in open-type syscalls context. Otherwise it can race with | ||
98 | * atomic_dec_and_test in deactivate_locked_super. | ||
99 | */ | ||
100 | void | ||
101 | cifs_sb_active(struct super_block *sb) | ||
102 | { | ||
103 | struct cifs_sb_info *server = CIFS_SB(sb); | ||
104 | |||
105 | if (atomic_inc_return(&server->active) == 1) | ||
106 | atomic_inc(&sb->s_active); | ||
107 | } | ||
108 | |||
109 | void | ||
110 | cifs_sb_deactive(struct super_block *sb) | ||
111 | { | ||
112 | struct cifs_sb_info *server = CIFS_SB(sb); | ||
113 | |||
114 | if (atomic_dec_and_test(&server->active)) | ||
115 | deactivate_super(sb); | ||
116 | } | ||
117 | |||
94 | static int | 118 | static int |
95 | cifs_read_super(struct super_block *sb) | 119 | cifs_read_super(struct super_block *sb) |
96 | { | 120 | { |
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index 7163419cecd9..0e32c3446ce9 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h | |||
@@ -41,6 +41,10 @@ extern struct file_system_type cifs_fs_type; | |||
41 | extern const struct address_space_operations cifs_addr_ops; | 41 | extern const struct address_space_operations cifs_addr_ops; |
42 | extern const struct address_space_operations cifs_addr_ops_smallbuf; | 42 | extern const struct address_space_operations cifs_addr_ops_smallbuf; |
43 | 43 | ||
44 | /* Functions related to super block operations */ | ||
45 | extern void cifs_sb_active(struct super_block *sb); | ||
46 | extern void cifs_sb_deactive(struct super_block *sb); | ||
47 | |||
44 | /* Functions related to inodes */ | 48 | /* Functions related to inodes */ |
45 | extern const struct inode_operations cifs_dir_inode_ops; | 49 | extern const struct inode_operations cifs_dir_inode_ops; |
46 | extern struct inode *cifs_root_iget(struct super_block *); | 50 | extern struct inode *cifs_root_iget(struct super_block *); |
diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 8c0d85577314..7a0dd99e4507 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c | |||
@@ -300,6 +300,8 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file, | |||
300 | INIT_WORK(&cfile->oplock_break, cifs_oplock_break); | 300 | INIT_WORK(&cfile->oplock_break, cifs_oplock_break); |
301 | mutex_init(&cfile->fh_mutex); | 301 | mutex_init(&cfile->fh_mutex); |
302 | 302 | ||
303 | cifs_sb_active(inode->i_sb); | ||
304 | |||
303 | /* | 305 | /* |
304 | * If the server returned a read oplock and we have mandatory brlocks, | 306 | * If the server returned a read oplock and we have mandatory brlocks, |
305 | * set oplock level to None. | 307 | * set oplock level to None. |
@@ -349,7 +351,8 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file) | |||
349 | struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink); | 351 | struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink); |
350 | struct TCP_Server_Info *server = tcon->ses->server; | 352 | struct TCP_Server_Info *server = tcon->ses->server; |
351 | struct cifsInodeInfo *cifsi = CIFS_I(inode); | 353 | struct cifsInodeInfo *cifsi = CIFS_I(inode); |
352 | struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); | 354 | struct super_block *sb = inode->i_sb; |
355 | struct cifs_sb_info *cifs_sb = CIFS_SB(sb); | ||
353 | struct cifsLockInfo *li, *tmp; | 356 | struct cifsLockInfo *li, *tmp; |
354 | struct cifs_fid fid; | 357 | struct cifs_fid fid; |
355 | struct cifs_pending_open open; | 358 | struct cifs_pending_open open; |
@@ -414,6 +417,7 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file) | |||
414 | 417 | ||
415 | cifs_put_tlink(cifs_file->tlink); | 418 | cifs_put_tlink(cifs_file->tlink); |
416 | dput(cifs_file->dentry); | 419 | dput(cifs_file->dentry); |
420 | cifs_sb_deactive(sb); | ||
417 | kfree(cifs_file); | 421 | kfree(cifs_file); |
418 | } | 422 | } |
419 | 423 | ||
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 0079696305c9..20887bf63121 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c | |||
@@ -1043,7 +1043,7 @@ cifs_rename_pending_delete(const char *full_path, struct dentry *dentry, | |||
1043 | cifs_sb->mnt_cifs_flags & | 1043 | cifs_sb->mnt_cifs_flags & |
1044 | CIFS_MOUNT_MAP_SPECIAL_CHR); | 1044 | CIFS_MOUNT_MAP_SPECIAL_CHR); |
1045 | if (rc != 0) { | 1045 | if (rc != 0) { |
1046 | rc = -ETXTBSY; | 1046 | rc = -EBUSY; |
1047 | goto undo_setattr; | 1047 | goto undo_setattr; |
1048 | } | 1048 | } |
1049 | 1049 | ||
@@ -1062,7 +1062,7 @@ cifs_rename_pending_delete(const char *full_path, struct dentry *dentry, | |||
1062 | if (rc == -ENOENT) | 1062 | if (rc == -ENOENT) |
1063 | rc = 0; | 1063 | rc = 0; |
1064 | else if (rc != 0) { | 1064 | else if (rc != 0) { |
1065 | rc = -ETXTBSY; | 1065 | rc = -EBUSY; |
1066 | goto undo_rename; | 1066 | goto undo_rename; |
1067 | } | 1067 | } |
1068 | cifsInode->delete_pending = true; | 1068 | cifsInode->delete_pending = true; |
@@ -1169,15 +1169,13 @@ psx_del_no_retry: | |||
1169 | cifs_drop_nlink(inode); | 1169 | cifs_drop_nlink(inode); |
1170 | } else if (rc == -ENOENT) { | 1170 | } else if (rc == -ENOENT) { |
1171 | d_drop(dentry); | 1171 | d_drop(dentry); |
1172 | } else if (rc == -ETXTBSY) { | 1172 | } else if (rc == -EBUSY) { |
1173 | if (server->ops->rename_pending_delete) { | 1173 | if (server->ops->rename_pending_delete) { |
1174 | rc = server->ops->rename_pending_delete(full_path, | 1174 | rc = server->ops->rename_pending_delete(full_path, |
1175 | dentry, xid); | 1175 | dentry, xid); |
1176 | if (rc == 0) | 1176 | if (rc == 0) |
1177 | cifs_drop_nlink(inode); | 1177 | cifs_drop_nlink(inode); |
1178 | } | 1178 | } |
1179 | if (rc == -ETXTBSY) | ||
1180 | rc = -EBUSY; | ||
1181 | } else if ((rc == -EACCES) && (dosattr == 0) && inode) { | 1179 | } else if ((rc == -EACCES) && (dosattr == 0) && inode) { |
1182 | attrs = kzalloc(sizeof(*attrs), GFP_KERNEL); | 1180 | attrs = kzalloc(sizeof(*attrs), GFP_KERNEL); |
1183 | if (attrs == NULL) { | 1181 | if (attrs == NULL) { |
@@ -1518,7 +1516,7 @@ cifs_do_rename(const unsigned int xid, struct dentry *from_dentry, | |||
1518 | * source. Note that cross directory moves do not work with | 1516 | * source. Note that cross directory moves do not work with |
1519 | * rename by filehandle to various Windows servers. | 1517 | * rename by filehandle to various Windows servers. |
1520 | */ | 1518 | */ |
1521 | if (rc == 0 || rc != -ETXTBSY) | 1519 | if (rc == 0 || rc != -EBUSY) |
1522 | goto do_rename_exit; | 1520 | goto do_rename_exit; |
1523 | 1521 | ||
1524 | /* open-file renames don't work across directories */ | 1522 | /* open-file renames don't work across directories */ |
diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c index a82bc51fdc82..c0b25b28be6c 100644 --- a/fs/cifs/netmisc.c +++ b/fs/cifs/netmisc.c | |||
@@ -62,7 +62,7 @@ static const struct smb_to_posix_error mapping_table_ERRDOS[] = { | |||
62 | {ERRdiffdevice, -EXDEV}, | 62 | {ERRdiffdevice, -EXDEV}, |
63 | {ERRnofiles, -ENOENT}, | 63 | {ERRnofiles, -ENOENT}, |
64 | {ERRwriteprot, -EROFS}, | 64 | {ERRwriteprot, -EROFS}, |
65 | {ERRbadshare, -ETXTBSY}, | 65 | {ERRbadshare, -EBUSY}, |
66 | {ERRlock, -EACCES}, | 66 | {ERRlock, -EACCES}, |
67 | {ERRunsup, -EINVAL}, | 67 | {ERRunsup, -EINVAL}, |
68 | {ERRnosuchshare, -ENXIO}, | 68 | {ERRnosuchshare, -ENXIO}, |
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 4a01ba315262..3b83cd604796 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h | |||
@@ -335,9 +335,9 @@ struct ext4_group_desc | |||
335 | */ | 335 | */ |
336 | 336 | ||
337 | struct flex_groups { | 337 | struct flex_groups { |
338 | atomic_t free_inodes; | 338 | atomic64_t free_clusters; |
339 | atomic_t free_clusters; | 339 | atomic_t free_inodes; |
340 | atomic_t used_dirs; | 340 | atomic_t used_dirs; |
341 | }; | 341 | }; |
342 | 342 | ||
343 | #define EXT4_BG_INODE_UNINIT 0x0001 /* Inode table/bitmap not in use */ | 343 | #define EXT4_BG_INODE_UNINIT 0x0001 /* Inode table/bitmap not in use */ |
@@ -2617,7 +2617,7 @@ extern int ext4_move_extents(struct file *o_filp, struct file *d_filp, | |||
2617 | extern int __init ext4_init_pageio(void); | 2617 | extern int __init ext4_init_pageio(void); |
2618 | extern void ext4_add_complete_io(ext4_io_end_t *io_end); | 2618 | extern void ext4_add_complete_io(ext4_io_end_t *io_end); |
2619 | extern void ext4_exit_pageio(void); | 2619 | extern void ext4_exit_pageio(void); |
2620 | extern void ext4_ioend_wait(struct inode *); | 2620 | extern void ext4_ioend_shutdown(struct inode *); |
2621 | extern void ext4_free_io_end(ext4_io_end_t *io); | 2621 | extern void ext4_free_io_end(ext4_io_end_t *io); |
2622 | extern ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags); | 2622 | extern ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags); |
2623 | extern void ext4_end_io_work(struct work_struct *work); | 2623 | extern void ext4_end_io_work(struct work_struct *work); |
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 28dd8eeea6a9..56efcaadf848 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c | |||
@@ -1584,10 +1584,12 @@ ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1, | |||
1584 | unsigned short ext1_ee_len, ext2_ee_len, max_len; | 1584 | unsigned short ext1_ee_len, ext2_ee_len, max_len; |
1585 | 1585 | ||
1586 | /* | 1586 | /* |
1587 | * Make sure that either both extents are uninitialized, or | 1587 | * Make sure that both extents are initialized. We don't merge |
1588 | * both are _not_. | 1588 | * uninitialized extents so that we can be sure that end_io code has |
1589 | * the extent that was written properly split out and conversion to | ||
1590 | * initialized is trivial. | ||
1589 | */ | 1591 | */ |
1590 | if (ext4_ext_is_uninitialized(ex1) ^ ext4_ext_is_uninitialized(ex2)) | 1592 | if (ext4_ext_is_uninitialized(ex1) || ext4_ext_is_uninitialized(ex2)) |
1591 | return 0; | 1593 | return 0; |
1592 | 1594 | ||
1593 | if (ext4_ext_is_uninitialized(ex1)) | 1595 | if (ext4_ext_is_uninitialized(ex1)) |
@@ -2923,7 +2925,7 @@ static int ext4_split_extent_at(handle_t *handle, | |||
2923 | { | 2925 | { |
2924 | ext4_fsblk_t newblock; | 2926 | ext4_fsblk_t newblock; |
2925 | ext4_lblk_t ee_block; | 2927 | ext4_lblk_t ee_block; |
2926 | struct ext4_extent *ex, newex, orig_ex; | 2928 | struct ext4_extent *ex, newex, orig_ex, zero_ex; |
2927 | struct ext4_extent *ex2 = NULL; | 2929 | struct ext4_extent *ex2 = NULL; |
2928 | unsigned int ee_len, depth; | 2930 | unsigned int ee_len, depth; |
2929 | int err = 0; | 2931 | int err = 0; |
@@ -2943,6 +2945,10 @@ static int ext4_split_extent_at(handle_t *handle, | |||
2943 | newblock = split - ee_block + ext4_ext_pblock(ex); | 2945 | newblock = split - ee_block + ext4_ext_pblock(ex); |
2944 | 2946 | ||
2945 | BUG_ON(split < ee_block || split >= (ee_block + ee_len)); | 2947 | BUG_ON(split < ee_block || split >= (ee_block + ee_len)); |
2948 | BUG_ON(!ext4_ext_is_uninitialized(ex) && | ||
2949 | split_flag & (EXT4_EXT_MAY_ZEROOUT | | ||
2950 | EXT4_EXT_MARK_UNINIT1 | | ||
2951 | EXT4_EXT_MARK_UNINIT2)); | ||
2946 | 2952 | ||
2947 | err = ext4_ext_get_access(handle, inode, path + depth); | 2953 | err = ext4_ext_get_access(handle, inode, path + depth); |
2948 | if (err) | 2954 | if (err) |
@@ -2990,12 +2996,26 @@ static int ext4_split_extent_at(handle_t *handle, | |||
2990 | err = ext4_ext_insert_extent(handle, inode, path, &newex, flags); | 2996 | err = ext4_ext_insert_extent(handle, inode, path, &newex, flags); |
2991 | if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) { | 2997 | if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) { |
2992 | if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) { | 2998 | if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) { |
2993 | if (split_flag & EXT4_EXT_DATA_VALID1) | 2999 | if (split_flag & EXT4_EXT_DATA_VALID1) { |
2994 | err = ext4_ext_zeroout(inode, ex2); | 3000 | err = ext4_ext_zeroout(inode, ex2); |
2995 | else | 3001 | zero_ex.ee_block = ex2->ee_block; |
3002 | zero_ex.ee_len = ext4_ext_get_actual_len(ex2); | ||
3003 | ext4_ext_store_pblock(&zero_ex, | ||
3004 | ext4_ext_pblock(ex2)); | ||
3005 | } else { | ||
2996 | err = ext4_ext_zeroout(inode, ex); | 3006 | err = ext4_ext_zeroout(inode, ex); |
2997 | } else | 3007 | zero_ex.ee_block = ex->ee_block; |
3008 | zero_ex.ee_len = ext4_ext_get_actual_len(ex); | ||
3009 | ext4_ext_store_pblock(&zero_ex, | ||
3010 | ext4_ext_pblock(ex)); | ||
3011 | } | ||
3012 | } else { | ||
2998 | err = ext4_ext_zeroout(inode, &orig_ex); | 3013 | err = ext4_ext_zeroout(inode, &orig_ex); |
3014 | zero_ex.ee_block = orig_ex.ee_block; | ||
3015 | zero_ex.ee_len = ext4_ext_get_actual_len(&orig_ex); | ||
3016 | ext4_ext_store_pblock(&zero_ex, | ||
3017 | ext4_ext_pblock(&orig_ex)); | ||
3018 | } | ||
2999 | 3019 | ||
3000 | if (err) | 3020 | if (err) |
3001 | goto fix_extent_len; | 3021 | goto fix_extent_len; |
@@ -3003,6 +3023,12 @@ static int ext4_split_extent_at(handle_t *handle, | |||
3003 | ex->ee_len = cpu_to_le16(ee_len); | 3023 | ex->ee_len = cpu_to_le16(ee_len); |
3004 | ext4_ext_try_to_merge(handle, inode, path, ex); | 3024 | ext4_ext_try_to_merge(handle, inode, path, ex); |
3005 | err = ext4_ext_dirty(handle, inode, path + path->p_depth); | 3025 | err = ext4_ext_dirty(handle, inode, path + path->p_depth); |
3026 | if (err) | ||
3027 | goto fix_extent_len; | ||
3028 | |||
3029 | /* update extent status tree */ | ||
3030 | err = ext4_es_zeroout(inode, &zero_ex); | ||
3031 | |||
3006 | goto out; | 3032 | goto out; |
3007 | } else if (err) | 3033 | } else if (err) |
3008 | goto fix_extent_len; | 3034 | goto fix_extent_len; |
@@ -3041,6 +3067,7 @@ static int ext4_split_extent(handle_t *handle, | |||
3041 | int err = 0; | 3067 | int err = 0; |
3042 | int uninitialized; | 3068 | int uninitialized; |
3043 | int split_flag1, flags1; | 3069 | int split_flag1, flags1; |
3070 | int allocated = map->m_len; | ||
3044 | 3071 | ||
3045 | depth = ext_depth(inode); | 3072 | depth = ext_depth(inode); |
3046 | ex = path[depth].p_ext; | 3073 | ex = path[depth].p_ext; |
@@ -3060,20 +3087,29 @@ static int ext4_split_extent(handle_t *handle, | |||
3060 | map->m_lblk + map->m_len, split_flag1, flags1); | 3087 | map->m_lblk + map->m_len, split_flag1, flags1); |
3061 | if (err) | 3088 | if (err) |
3062 | goto out; | 3089 | goto out; |
3090 | } else { | ||
3091 | allocated = ee_len - (map->m_lblk - ee_block); | ||
3063 | } | 3092 | } |
3064 | 3093 | /* | |
3094 | * Update path is required because previous ext4_split_extent_at() may | ||
3095 | * result in split of original leaf or extent zeroout. | ||
3096 | */ | ||
3065 | ext4_ext_drop_refs(path); | 3097 | ext4_ext_drop_refs(path); |
3066 | path = ext4_ext_find_extent(inode, map->m_lblk, path); | 3098 | path = ext4_ext_find_extent(inode, map->m_lblk, path); |
3067 | if (IS_ERR(path)) | 3099 | if (IS_ERR(path)) |
3068 | return PTR_ERR(path); | 3100 | return PTR_ERR(path); |
3101 | depth = ext_depth(inode); | ||
3102 | ex = path[depth].p_ext; | ||
3103 | uninitialized = ext4_ext_is_uninitialized(ex); | ||
3104 | split_flag1 = 0; | ||
3069 | 3105 | ||
3070 | if (map->m_lblk >= ee_block) { | 3106 | if (map->m_lblk >= ee_block) { |
3071 | split_flag1 = split_flag & (EXT4_EXT_MAY_ZEROOUT | | 3107 | split_flag1 = split_flag & EXT4_EXT_DATA_VALID2; |
3072 | EXT4_EXT_DATA_VALID2); | 3108 | if (uninitialized) { |
3073 | if (uninitialized) | ||
3074 | split_flag1 |= EXT4_EXT_MARK_UNINIT1; | 3109 | split_flag1 |= EXT4_EXT_MARK_UNINIT1; |
3075 | if (split_flag & EXT4_EXT_MARK_UNINIT2) | 3110 | split_flag1 |= split_flag & (EXT4_EXT_MAY_ZEROOUT | |
3076 | split_flag1 |= EXT4_EXT_MARK_UNINIT2; | 3111 | EXT4_EXT_MARK_UNINIT2); |
3112 | } | ||
3077 | err = ext4_split_extent_at(handle, inode, path, | 3113 | err = ext4_split_extent_at(handle, inode, path, |
3078 | map->m_lblk, split_flag1, flags); | 3114 | map->m_lblk, split_flag1, flags); |
3079 | if (err) | 3115 | if (err) |
@@ -3082,7 +3118,7 @@ static int ext4_split_extent(handle_t *handle, | |||
3082 | 3118 | ||
3083 | ext4_ext_show_leaf(inode, path); | 3119 | ext4_ext_show_leaf(inode, path); |
3084 | out: | 3120 | out: |
3085 | return err ? err : map->m_len; | 3121 | return err ? err : allocated; |
3086 | } | 3122 | } |
3087 | 3123 | ||
3088 | /* | 3124 | /* |
@@ -3137,6 +3173,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, | |||
3137 | ee_block = le32_to_cpu(ex->ee_block); | 3173 | ee_block = le32_to_cpu(ex->ee_block); |
3138 | ee_len = ext4_ext_get_actual_len(ex); | 3174 | ee_len = ext4_ext_get_actual_len(ex); |
3139 | allocated = ee_len - (map->m_lblk - ee_block); | 3175 | allocated = ee_len - (map->m_lblk - ee_block); |
3176 | zero_ex.ee_len = 0; | ||
3140 | 3177 | ||
3141 | trace_ext4_ext_convert_to_initialized_enter(inode, map, ex); | 3178 | trace_ext4_ext_convert_to_initialized_enter(inode, map, ex); |
3142 | 3179 | ||
@@ -3227,13 +3264,16 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, | |||
3227 | 3264 | ||
3228 | if (EXT4_EXT_MAY_ZEROOUT & split_flag) | 3265 | if (EXT4_EXT_MAY_ZEROOUT & split_flag) |
3229 | max_zeroout = sbi->s_extent_max_zeroout_kb >> | 3266 | max_zeroout = sbi->s_extent_max_zeroout_kb >> |
3230 | inode->i_sb->s_blocksize_bits; | 3267 | (inode->i_sb->s_blocksize_bits - 10); |
3231 | 3268 | ||
3232 | /* If extent is less than s_max_zeroout_kb, zeroout directly */ | 3269 | /* If extent is less than s_max_zeroout_kb, zeroout directly */ |
3233 | if (max_zeroout && (ee_len <= max_zeroout)) { | 3270 | if (max_zeroout && (ee_len <= max_zeroout)) { |
3234 | err = ext4_ext_zeroout(inode, ex); | 3271 | err = ext4_ext_zeroout(inode, ex); |
3235 | if (err) | 3272 | if (err) |
3236 | goto out; | 3273 | goto out; |
3274 | zero_ex.ee_block = ex->ee_block; | ||
3275 | zero_ex.ee_len = ext4_ext_get_actual_len(ex); | ||
3276 | ext4_ext_store_pblock(&zero_ex, ext4_ext_pblock(ex)); | ||
3237 | 3277 | ||
3238 | err = ext4_ext_get_access(handle, inode, path + depth); | 3278 | err = ext4_ext_get_access(handle, inode, path + depth); |
3239 | if (err) | 3279 | if (err) |
@@ -3292,6 +3332,9 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, | |||
3292 | err = allocated; | 3332 | err = allocated; |
3293 | 3333 | ||
3294 | out: | 3334 | out: |
3335 | /* If we have gotten a failure, don't zero out status tree */ | ||
3336 | if (!err) | ||
3337 | err = ext4_es_zeroout(inode, &zero_ex); | ||
3295 | return err ? err : allocated; | 3338 | return err ? err : allocated; |
3296 | } | 3339 | } |
3297 | 3340 | ||
@@ -3374,8 +3417,19 @@ static int ext4_convert_unwritten_extents_endio(handle_t *handle, | |||
3374 | "block %llu, max_blocks %u\n", inode->i_ino, | 3417 | "block %llu, max_blocks %u\n", inode->i_ino, |
3375 | (unsigned long long)ee_block, ee_len); | 3418 | (unsigned long long)ee_block, ee_len); |
3376 | 3419 | ||
3377 | /* If extent is larger than requested then split is required */ | 3420 | /* If extent is larger than requested it is a clear sign that we still |
3421 | * have some extent state machine issues left. So extent_split is still | ||
3422 | * required. | ||
3423 | * TODO: Once all related issues will be fixed this situation should be | ||
3424 | * illegal. | ||
3425 | */ | ||
3378 | if (ee_block != map->m_lblk || ee_len > map->m_len) { | 3426 | if (ee_block != map->m_lblk || ee_len > map->m_len) { |
3427 | #ifdef EXT4_DEBUG | ||
3428 | ext4_warning("Inode (%ld) finished: extent logical block %llu," | ||
3429 | " len %u; IO logical block %llu, len %u\n", | ||
3430 | inode->i_ino, (unsigned long long)ee_block, ee_len, | ||
3431 | (unsigned long long)map->m_lblk, map->m_len); | ||
3432 | #endif | ||
3379 | err = ext4_split_unwritten_extents(handle, inode, map, path, | 3433 | err = ext4_split_unwritten_extents(handle, inode, map, path, |
3380 | EXT4_GET_BLOCKS_CONVERT); | 3434 | EXT4_GET_BLOCKS_CONVERT); |
3381 | if (err < 0) | 3435 | if (err < 0) |
@@ -3626,6 +3680,10 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode, | |||
3626 | path, map->m_len); | 3680 | path, map->m_len); |
3627 | } else | 3681 | } else |
3628 | err = ret; | 3682 | err = ret; |
3683 | map->m_flags |= EXT4_MAP_MAPPED; | ||
3684 | if (allocated > map->m_len) | ||
3685 | allocated = map->m_len; | ||
3686 | map->m_len = allocated; | ||
3629 | goto out2; | 3687 | goto out2; |
3630 | } | 3688 | } |
3631 | /* buffered IO case */ | 3689 | /* buffered IO case */ |
@@ -3675,6 +3733,7 @@ out: | |||
3675 | allocated - map->m_len); | 3733 | allocated - map->m_len); |
3676 | allocated = map->m_len; | 3734 | allocated = map->m_len; |
3677 | } | 3735 | } |
3736 | map->m_len = allocated; | ||
3678 | 3737 | ||
3679 | /* | 3738 | /* |
3680 | * If we have done fallocate with the offset that is already | 3739 | * If we have done fallocate with the offset that is already |
@@ -4106,9 +4165,6 @@ got_allocated_blocks: | |||
4106 | } | 4165 | } |
4107 | } else { | 4166 | } else { |
4108 | BUG_ON(allocated_clusters < reserved_clusters); | 4167 | BUG_ON(allocated_clusters < reserved_clusters); |
4109 | /* We will claim quota for all newly allocated blocks.*/ | ||
4110 | ext4_da_update_reserve_space(inode, allocated_clusters, | ||
4111 | 1); | ||
4112 | if (reserved_clusters < allocated_clusters) { | 4168 | if (reserved_clusters < allocated_clusters) { |
4113 | struct ext4_inode_info *ei = EXT4_I(inode); | 4169 | struct ext4_inode_info *ei = EXT4_I(inode); |
4114 | int reservation = allocated_clusters - | 4170 | int reservation = allocated_clusters - |
@@ -4159,6 +4215,15 @@ got_allocated_blocks: | |||
4159 | ei->i_reserved_data_blocks += reservation; | 4215 | ei->i_reserved_data_blocks += reservation; |
4160 | spin_unlock(&ei->i_block_reservation_lock); | 4216 | spin_unlock(&ei->i_block_reservation_lock); |
4161 | } | 4217 | } |
4218 | /* | ||
4219 | * We will claim quota for all newly allocated blocks. | ||
4220 | * We're updating the reserved space *after* the | ||
4221 | * correction above so we do not accidentally free | ||
4222 | * all the metadata reservation because we might | ||
4223 | * actually need it later on. | ||
4224 | */ | ||
4225 | ext4_da_update_reserve_space(inode, allocated_clusters, | ||
4226 | 1); | ||
4162 | } | 4227 | } |
4163 | } | 4228 | } |
4164 | 4229 | ||
@@ -4368,8 +4433,6 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len) | |||
4368 | if (len <= EXT_UNINIT_MAX_LEN << blkbits) | 4433 | if (len <= EXT_UNINIT_MAX_LEN << blkbits) |
4369 | flags |= EXT4_GET_BLOCKS_NO_NORMALIZE; | 4434 | flags |= EXT4_GET_BLOCKS_NO_NORMALIZE; |
4370 | 4435 | ||
4371 | /* Prevent race condition between unwritten */ | ||
4372 | ext4_flush_unwritten_io(inode); | ||
4373 | retry: | 4436 | retry: |
4374 | while (ret >= 0 && ret < max_blocks) { | 4437 | while (ret >= 0 && ret < max_blocks) { |
4375 | map.m_lblk = map.m_lblk + ret; | 4438 | map.m_lblk = map.m_lblk + ret; |
diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c index 95796a1b7522..fe3337a85ede 100644 --- a/fs/ext4/extents_status.c +++ b/fs/ext4/extents_status.c | |||
@@ -333,17 +333,27 @@ static void ext4_es_free_extent(struct inode *inode, struct extent_status *es) | |||
333 | static int ext4_es_can_be_merged(struct extent_status *es1, | 333 | static int ext4_es_can_be_merged(struct extent_status *es1, |
334 | struct extent_status *es2) | 334 | struct extent_status *es2) |
335 | { | 335 | { |
336 | if (es1->es_lblk + es1->es_len != es2->es_lblk) | 336 | if (ext4_es_status(es1) != ext4_es_status(es2)) |
337 | return 0; | 337 | return 0; |
338 | 338 | ||
339 | if (ext4_es_status(es1) != ext4_es_status(es2)) | 339 | if (((__u64) es1->es_len) + es2->es_len > 0xFFFFFFFFULL) |
340 | return 0; | 340 | return 0; |
341 | 341 | ||
342 | if ((ext4_es_is_written(es1) || ext4_es_is_unwritten(es1)) && | 342 | if (((__u64) es1->es_lblk) + es1->es_len != es2->es_lblk) |
343 | (ext4_es_pblock(es1) + es1->es_len != ext4_es_pblock(es2))) | ||
344 | return 0; | 343 | return 0; |
345 | 344 | ||
346 | return 1; | 345 | if ((ext4_es_is_written(es1) || ext4_es_is_unwritten(es1)) && |
346 | (ext4_es_pblock(es1) + es1->es_len == ext4_es_pblock(es2))) | ||
347 | return 1; | ||
348 | |||
349 | if (ext4_es_is_hole(es1)) | ||
350 | return 1; | ||
351 | |||
352 | /* we need to check delayed extent is without unwritten status */ | ||
353 | if (ext4_es_is_delayed(es1) && !ext4_es_is_unwritten(es1)) | ||
354 | return 1; | ||
355 | |||
356 | return 0; | ||
347 | } | 357 | } |
348 | 358 | ||
349 | static struct extent_status * | 359 | static struct extent_status * |
@@ -389,6 +399,179 @@ ext4_es_try_to_merge_right(struct inode *inode, struct extent_status *es) | |||
389 | return es; | 399 | return es; |
390 | } | 400 | } |
391 | 401 | ||
402 | #ifdef ES_AGGRESSIVE_TEST | ||
403 | static void ext4_es_insert_extent_ext_check(struct inode *inode, | ||
404 | struct extent_status *es) | ||
405 | { | ||
406 | struct ext4_ext_path *path = NULL; | ||
407 | struct ext4_extent *ex; | ||
408 | ext4_lblk_t ee_block; | ||
409 | ext4_fsblk_t ee_start; | ||
410 | unsigned short ee_len; | ||
411 | int depth, ee_status, es_status; | ||
412 | |||
413 | path = ext4_ext_find_extent(inode, es->es_lblk, NULL); | ||
414 | if (IS_ERR(path)) | ||
415 | return; | ||
416 | |||
417 | depth = ext_depth(inode); | ||
418 | ex = path[depth].p_ext; | ||
419 | |||
420 | if (ex) { | ||
421 | |||
422 | ee_block = le32_to_cpu(ex->ee_block); | ||
423 | ee_start = ext4_ext_pblock(ex); | ||
424 | ee_len = ext4_ext_get_actual_len(ex); | ||
425 | |||
426 | ee_status = ext4_ext_is_uninitialized(ex) ? 1 : 0; | ||
427 | es_status = ext4_es_is_unwritten(es) ? 1 : 0; | ||
428 | |||
429 | /* | ||
430 | * Make sure ex and es are not overlap when we try to insert | ||
431 | * a delayed/hole extent. | ||
432 | */ | ||
433 | if (!ext4_es_is_written(es) && !ext4_es_is_unwritten(es)) { | ||
434 | if (in_range(es->es_lblk, ee_block, ee_len)) { | ||
435 | pr_warn("ES insert assertation failed for " | ||
436 | "inode: %lu we can find an extent " | ||
437 | "at block [%d/%d/%llu/%c], but we " | ||
438 | "want to add an delayed/hole extent " | ||
439 | "[%d/%d/%llu/%llx]\n", | ||
440 | inode->i_ino, ee_block, ee_len, | ||
441 | ee_start, ee_status ? 'u' : 'w', | ||
442 | es->es_lblk, es->es_len, | ||
443 | ext4_es_pblock(es), ext4_es_status(es)); | ||
444 | } | ||
445 | goto out; | ||
446 | } | ||
447 | |||
448 | /* | ||
449 | * We don't check ee_block == es->es_lblk, etc. because es | ||
450 | * might be a part of whole extent, vice versa. | ||
451 | */ | ||
452 | if (es->es_lblk < ee_block || | ||
453 | ext4_es_pblock(es) != ee_start + es->es_lblk - ee_block) { | ||
454 | pr_warn("ES insert assertation failed for inode: %lu " | ||
455 | "ex_status [%d/%d/%llu/%c] != " | ||
456 | "es_status [%d/%d/%llu/%c]\n", inode->i_ino, | ||
457 | ee_block, ee_len, ee_start, | ||
458 | ee_status ? 'u' : 'w', es->es_lblk, es->es_len, | ||
459 | ext4_es_pblock(es), es_status ? 'u' : 'w'); | ||
460 | goto out; | ||
461 | } | ||
462 | |||
463 | if (ee_status ^ es_status) { | ||
464 | pr_warn("ES insert assertation failed for inode: %lu " | ||
465 | "ex_status [%d/%d/%llu/%c] != " | ||
466 | "es_status [%d/%d/%llu/%c]\n", inode->i_ino, | ||
467 | ee_block, ee_len, ee_start, | ||
468 | ee_status ? 'u' : 'w', es->es_lblk, es->es_len, | ||
469 | ext4_es_pblock(es), es_status ? 'u' : 'w'); | ||
470 | } | ||
471 | } else { | ||
472 | /* | ||
473 | * We can't find an extent on disk. So we need to make sure | ||
474 | * that we don't want to add an written/unwritten extent. | ||
475 | */ | ||
476 | if (!ext4_es_is_delayed(es) && !ext4_es_is_hole(es)) { | ||
477 | pr_warn("ES insert assertation failed for inode: %lu " | ||
478 | "can't find an extent at block %d but we want " | ||
479 | "to add an written/unwritten extent " | ||
480 | "[%d/%d/%llu/%llx]\n", inode->i_ino, | ||
481 | es->es_lblk, es->es_lblk, es->es_len, | ||
482 | ext4_es_pblock(es), ext4_es_status(es)); | ||
483 | } | ||
484 | } | ||
485 | out: | ||
486 | if (path) { | ||
487 | ext4_ext_drop_refs(path); | ||
488 | kfree(path); | ||
489 | } | ||
490 | } | ||
491 | |||
492 | static void ext4_es_insert_extent_ind_check(struct inode *inode, | ||
493 | struct extent_status *es) | ||
494 | { | ||
495 | struct ext4_map_blocks map; | ||
496 | int retval; | ||
497 | |||
498 | /* | ||
499 | * Here we call ext4_ind_map_blocks to lookup a block mapping because | ||
500 | * 'Indirect' structure is defined in indirect.c. So we couldn't | ||
501 | * access direct/indirect tree from outside. It is too dirty to define | ||
502 | * this function in indirect.c file. | ||
503 | */ | ||
504 | |||
505 | map.m_lblk = es->es_lblk; | ||
506 | map.m_len = es->es_len; | ||
507 | |||
508 | retval = ext4_ind_map_blocks(NULL, inode, &map, 0); | ||
509 | if (retval > 0) { | ||
510 | if (ext4_es_is_delayed(es) || ext4_es_is_hole(es)) { | ||
511 | /* | ||
512 | * We want to add a delayed/hole extent but this | ||
513 | * block has been allocated. | ||
514 | */ | ||
515 | pr_warn("ES insert assertation failed for inode: %lu " | ||
516 | "We can find blocks but we want to add a " | ||
517 | "delayed/hole extent [%d/%d/%llu/%llx]\n", | ||
518 | inode->i_ino, es->es_lblk, es->es_len, | ||
519 | ext4_es_pblock(es), ext4_es_status(es)); | ||
520 | return; | ||
521 | } else if (ext4_es_is_written(es)) { | ||
522 | if (retval != es->es_len) { | ||
523 | pr_warn("ES insert assertation failed for " | ||
524 | "inode: %lu retval %d != es_len %d\n", | ||
525 | inode->i_ino, retval, es->es_len); | ||
526 | return; | ||
527 | } | ||
528 | if (map.m_pblk != ext4_es_pblock(es)) { | ||
529 | pr_warn("ES insert assertation failed for " | ||
530 | "inode: %lu m_pblk %llu != " | ||
531 | "es_pblk %llu\n", | ||
532 | inode->i_ino, map.m_pblk, | ||
533 | ext4_es_pblock(es)); | ||
534 | return; | ||
535 | } | ||
536 | } else { | ||
537 | /* | ||
538 | * We don't need to check unwritten extent because | ||
539 | * indirect-based file doesn't have it. | ||
540 | */ | ||
541 | BUG_ON(1); | ||
542 | } | ||
543 | } else if (retval == 0) { | ||
544 | if (ext4_es_is_written(es)) { | ||
545 | pr_warn("ES insert assertation failed for inode: %lu " | ||
546 | "We can't find the block but we want to add " | ||
547 | "an written extent [%d/%d/%llu/%llx]\n", | ||
548 | inode->i_ino, es->es_lblk, es->es_len, | ||
549 | ext4_es_pblock(es), ext4_es_status(es)); | ||
550 | return; | ||
551 | } | ||
552 | } | ||
553 | } | ||
554 | |||
555 | static inline void ext4_es_insert_extent_check(struct inode *inode, | ||
556 | struct extent_status *es) | ||
557 | { | ||
558 | /* | ||
559 | * We don't need to worry about the race condition because | ||
560 | * caller takes i_data_sem locking. | ||
561 | */ | ||
562 | BUG_ON(!rwsem_is_locked(&EXT4_I(inode)->i_data_sem)); | ||
563 | if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) | ||
564 | ext4_es_insert_extent_ext_check(inode, es); | ||
565 | else | ||
566 | ext4_es_insert_extent_ind_check(inode, es); | ||
567 | } | ||
568 | #else | ||
569 | static inline void ext4_es_insert_extent_check(struct inode *inode, | ||
570 | struct extent_status *es) | ||
571 | { | ||
572 | } | ||
573 | #endif | ||
574 | |||
392 | static int __es_insert_extent(struct inode *inode, struct extent_status *newes) | 575 | static int __es_insert_extent(struct inode *inode, struct extent_status *newes) |
393 | { | 576 | { |
394 | struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree; | 577 | struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree; |
@@ -471,6 +654,8 @@ int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk, | |||
471 | ext4_es_store_status(&newes, status); | 654 | ext4_es_store_status(&newes, status); |
472 | trace_ext4_es_insert_extent(inode, &newes); | 655 | trace_ext4_es_insert_extent(inode, &newes); |
473 | 656 | ||
657 | ext4_es_insert_extent_check(inode, &newes); | ||
658 | |||
474 | write_lock(&EXT4_I(inode)->i_es_lock); | 659 | write_lock(&EXT4_I(inode)->i_es_lock); |
475 | err = __es_remove_extent(inode, lblk, end); | 660 | err = __es_remove_extent(inode, lblk, end); |
476 | if (err != 0) | 661 | if (err != 0) |
@@ -669,6 +854,23 @@ int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk, | |||
669 | return err; | 854 | return err; |
670 | } | 855 | } |
671 | 856 | ||
857 | int ext4_es_zeroout(struct inode *inode, struct ext4_extent *ex) | ||
858 | { | ||
859 | ext4_lblk_t ee_block; | ||
860 | ext4_fsblk_t ee_pblock; | ||
861 | unsigned int ee_len; | ||
862 | |||
863 | ee_block = le32_to_cpu(ex->ee_block); | ||
864 | ee_len = ext4_ext_get_actual_len(ex); | ||
865 | ee_pblock = ext4_ext_pblock(ex); | ||
866 | |||
867 | if (ee_len == 0) | ||
868 | return 0; | ||
869 | |||
870 | return ext4_es_insert_extent(inode, ee_block, ee_len, ee_pblock, | ||
871 | EXTENT_STATUS_WRITTEN); | ||
872 | } | ||
873 | |||
672 | static int ext4_es_shrink(struct shrinker *shrink, struct shrink_control *sc) | 874 | static int ext4_es_shrink(struct shrinker *shrink, struct shrink_control *sc) |
673 | { | 875 | { |
674 | struct ext4_sb_info *sbi = container_of(shrink, | 876 | struct ext4_sb_info *sbi = container_of(shrink, |
diff --git a/fs/ext4/extents_status.h b/fs/ext4/extents_status.h index f190dfe969da..d8e2d4dc311e 100644 --- a/fs/ext4/extents_status.h +++ b/fs/ext4/extents_status.h | |||
@@ -21,6 +21,12 @@ | |||
21 | #endif | 21 | #endif |
22 | 22 | ||
23 | /* | 23 | /* |
24 | * With ES_AGGRESSIVE_TEST defined, the result of es caching will be | ||
25 | * checked with old map_block's result. | ||
26 | */ | ||
27 | #define ES_AGGRESSIVE_TEST__ | ||
28 | |||
29 | /* | ||
24 | * These flags live in the high bits of extent_status.es_pblk | 30 | * These flags live in the high bits of extent_status.es_pblk |
25 | */ | 31 | */ |
26 | #define EXTENT_STATUS_WRITTEN (1ULL << 63) | 32 | #define EXTENT_STATUS_WRITTEN (1ULL << 63) |
@@ -33,6 +39,8 @@ | |||
33 | EXTENT_STATUS_DELAYED | \ | 39 | EXTENT_STATUS_DELAYED | \ |
34 | EXTENT_STATUS_HOLE) | 40 | EXTENT_STATUS_HOLE) |
35 | 41 | ||
42 | struct ext4_extent; | ||
43 | |||
36 | struct extent_status { | 44 | struct extent_status { |
37 | struct rb_node rb_node; | 45 | struct rb_node rb_node; |
38 | ext4_lblk_t es_lblk; /* first logical block extent covers */ | 46 | ext4_lblk_t es_lblk; /* first logical block extent covers */ |
@@ -58,6 +66,7 @@ extern void ext4_es_find_delayed_extent(struct inode *inode, ext4_lblk_t lblk, | |||
58 | struct extent_status *es); | 66 | struct extent_status *es); |
59 | extern int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk, | 67 | extern int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk, |
60 | struct extent_status *es); | 68 | struct extent_status *es); |
69 | extern int ext4_es_zeroout(struct inode *inode, struct ext4_extent *ex); | ||
61 | 70 | ||
62 | static inline int ext4_es_is_written(struct extent_status *es) | 71 | static inline int ext4_es_is_written(struct extent_status *es) |
63 | { | 72 | { |
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c index 32fd2b9075dd..6c5bb8d993fe 100644 --- a/fs/ext4/ialloc.c +++ b/fs/ext4/ialloc.c | |||
@@ -324,8 +324,8 @@ error_return: | |||
324 | } | 324 | } |
325 | 325 | ||
326 | struct orlov_stats { | 326 | struct orlov_stats { |
327 | __u64 free_clusters; | ||
327 | __u32 free_inodes; | 328 | __u32 free_inodes; |
328 | __u32 free_clusters; | ||
329 | __u32 used_dirs; | 329 | __u32 used_dirs; |
330 | }; | 330 | }; |
331 | 331 | ||
@@ -342,7 +342,7 @@ static void get_orlov_stats(struct super_block *sb, ext4_group_t g, | |||
342 | 342 | ||
343 | if (flex_size > 1) { | 343 | if (flex_size > 1) { |
344 | stats->free_inodes = atomic_read(&flex_group[g].free_inodes); | 344 | stats->free_inodes = atomic_read(&flex_group[g].free_inodes); |
345 | stats->free_clusters = atomic_read(&flex_group[g].free_clusters); | 345 | stats->free_clusters = atomic64_read(&flex_group[g].free_clusters); |
346 | stats->used_dirs = atomic_read(&flex_group[g].used_dirs); | 346 | stats->used_dirs = atomic_read(&flex_group[g].used_dirs); |
347 | return; | 347 | return; |
348 | } | 348 | } |
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 9ea0cde3fa9e..b3a5213bc73e 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c | |||
@@ -185,8 +185,6 @@ void ext4_evict_inode(struct inode *inode) | |||
185 | 185 | ||
186 | trace_ext4_evict_inode(inode); | 186 | trace_ext4_evict_inode(inode); |
187 | 187 | ||
188 | ext4_ioend_wait(inode); | ||
189 | |||
190 | if (inode->i_nlink) { | 188 | if (inode->i_nlink) { |
191 | /* | 189 | /* |
192 | * When journalling data dirty buffers are tracked only in the | 190 | * When journalling data dirty buffers are tracked only in the |
@@ -207,7 +205,8 @@ void ext4_evict_inode(struct inode *inode) | |||
207 | * don't use page cache. | 205 | * don't use page cache. |
208 | */ | 206 | */ |
209 | if (ext4_should_journal_data(inode) && | 207 | if (ext4_should_journal_data(inode) && |
210 | (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode))) { | 208 | (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode)) && |
209 | inode->i_ino != EXT4_JOURNAL_INO) { | ||
211 | journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; | 210 | journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; |
212 | tid_t commit_tid = EXT4_I(inode)->i_datasync_tid; | 211 | tid_t commit_tid = EXT4_I(inode)->i_datasync_tid; |
213 | 212 | ||
@@ -216,6 +215,7 @@ void ext4_evict_inode(struct inode *inode) | |||
216 | filemap_write_and_wait(&inode->i_data); | 215 | filemap_write_and_wait(&inode->i_data); |
217 | } | 216 | } |
218 | truncate_inode_pages(&inode->i_data, 0); | 217 | truncate_inode_pages(&inode->i_data, 0); |
218 | ext4_ioend_shutdown(inode); | ||
219 | goto no_delete; | 219 | goto no_delete; |
220 | } | 220 | } |
221 | 221 | ||
@@ -225,6 +225,7 @@ void ext4_evict_inode(struct inode *inode) | |||
225 | if (ext4_should_order_data(inode)) | 225 | if (ext4_should_order_data(inode)) |
226 | ext4_begin_ordered_truncate(inode, 0); | 226 | ext4_begin_ordered_truncate(inode, 0); |
227 | truncate_inode_pages(&inode->i_data, 0); | 227 | truncate_inode_pages(&inode->i_data, 0); |
228 | ext4_ioend_shutdown(inode); | ||
228 | 229 | ||
229 | if (is_bad_inode(inode)) | 230 | if (is_bad_inode(inode)) |
230 | goto no_delete; | 231 | goto no_delete; |
@@ -482,6 +483,58 @@ static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx, | |||
482 | return num; | 483 | return num; |
483 | } | 484 | } |
484 | 485 | ||
486 | #ifdef ES_AGGRESSIVE_TEST | ||
487 | static void ext4_map_blocks_es_recheck(handle_t *handle, | ||
488 | struct inode *inode, | ||
489 | struct ext4_map_blocks *es_map, | ||
490 | struct ext4_map_blocks *map, | ||
491 | int flags) | ||
492 | { | ||
493 | int retval; | ||
494 | |||
495 | map->m_flags = 0; | ||
496 | /* | ||
497 | * There is a race window that the result is not the same. | ||
498 | * e.g. xfstests #223 when dioread_nolock enables. The reason | ||
499 | * is that we lookup a block mapping in extent status tree with | ||
500 | * out taking i_data_sem. So at the time the unwritten extent | ||
501 | * could be converted. | ||
502 | */ | ||
503 | if (!(flags & EXT4_GET_BLOCKS_NO_LOCK)) | ||
504 | down_read((&EXT4_I(inode)->i_data_sem)); | ||
505 | if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { | ||
506 | retval = ext4_ext_map_blocks(handle, inode, map, flags & | ||
507 | EXT4_GET_BLOCKS_KEEP_SIZE); | ||
508 | } else { | ||
509 | retval = ext4_ind_map_blocks(handle, inode, map, flags & | ||
510 | EXT4_GET_BLOCKS_KEEP_SIZE); | ||
511 | } | ||
512 | if (!(flags & EXT4_GET_BLOCKS_NO_LOCK)) | ||
513 | up_read((&EXT4_I(inode)->i_data_sem)); | ||
514 | /* | ||
515 | * Clear EXT4_MAP_FROM_CLUSTER and EXT4_MAP_BOUNDARY flag | ||
516 | * because it shouldn't be marked in es_map->m_flags. | ||
517 | */ | ||
518 | map->m_flags &= ~(EXT4_MAP_FROM_CLUSTER | EXT4_MAP_BOUNDARY); | ||
519 | |||
520 | /* | ||
521 | * We don't check m_len because extent will be collpased in status | ||
522 | * tree. So the m_len might not equal. | ||
523 | */ | ||
524 | if (es_map->m_lblk != map->m_lblk || | ||
525 | es_map->m_flags != map->m_flags || | ||
526 | es_map->m_pblk != map->m_pblk) { | ||
527 | printk("ES cache assertation failed for inode: %lu " | ||
528 | "es_cached ex [%d/%d/%llu/%x] != " | ||
529 | "found ex [%d/%d/%llu/%x] retval %d flags %x\n", | ||
530 | inode->i_ino, es_map->m_lblk, es_map->m_len, | ||
531 | es_map->m_pblk, es_map->m_flags, map->m_lblk, | ||
532 | map->m_len, map->m_pblk, map->m_flags, | ||
533 | retval, flags); | ||
534 | } | ||
535 | } | ||
536 | #endif /* ES_AGGRESSIVE_TEST */ | ||
537 | |||
485 | /* | 538 | /* |
486 | * The ext4_map_blocks() function tries to look up the requested blocks, | 539 | * The ext4_map_blocks() function tries to look up the requested blocks, |
487 | * and returns if the blocks are already mapped. | 540 | * and returns if the blocks are already mapped. |
@@ -509,6 +562,11 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode, | |||
509 | { | 562 | { |
510 | struct extent_status es; | 563 | struct extent_status es; |
511 | int retval; | 564 | int retval; |
565 | #ifdef ES_AGGRESSIVE_TEST | ||
566 | struct ext4_map_blocks orig_map; | ||
567 | |||
568 | memcpy(&orig_map, map, sizeof(*map)); | ||
569 | #endif | ||
512 | 570 | ||
513 | map->m_flags = 0; | 571 | map->m_flags = 0; |
514 | ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u," | 572 | ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u," |
@@ -531,6 +589,10 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode, | |||
531 | } else { | 589 | } else { |
532 | BUG_ON(1); | 590 | BUG_ON(1); |
533 | } | 591 | } |
592 | #ifdef ES_AGGRESSIVE_TEST | ||
593 | ext4_map_blocks_es_recheck(handle, inode, map, | ||
594 | &orig_map, flags); | ||
595 | #endif | ||
534 | goto found; | 596 | goto found; |
535 | } | 597 | } |
536 | 598 | ||
@@ -551,6 +613,15 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode, | |||
551 | int ret; | 613 | int ret; |
552 | unsigned long long status; | 614 | unsigned long long status; |
553 | 615 | ||
616 | #ifdef ES_AGGRESSIVE_TEST | ||
617 | if (retval != map->m_len) { | ||
618 | printk("ES len assertation failed for inode: %lu " | ||
619 | "retval %d != map->m_len %d " | ||
620 | "in %s (lookup)\n", inode->i_ino, retval, | ||
621 | map->m_len, __func__); | ||
622 | } | ||
623 | #endif | ||
624 | |||
554 | status = map->m_flags & EXT4_MAP_UNWRITTEN ? | 625 | status = map->m_flags & EXT4_MAP_UNWRITTEN ? |
555 | EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; | 626 | EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; |
556 | if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) && | 627 | if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) && |
@@ -643,6 +714,24 @@ found: | |||
643 | int ret; | 714 | int ret; |
644 | unsigned long long status; | 715 | unsigned long long status; |
645 | 716 | ||
717 | #ifdef ES_AGGRESSIVE_TEST | ||
718 | if (retval != map->m_len) { | ||
719 | printk("ES len assertation failed for inode: %lu " | ||
720 | "retval %d != map->m_len %d " | ||
721 | "in %s (allocation)\n", inode->i_ino, retval, | ||
722 | map->m_len, __func__); | ||
723 | } | ||
724 | #endif | ||
725 | |||
726 | /* | ||
727 | * If the extent has been zeroed out, we don't need to update | ||
728 | * extent status tree. | ||
729 | */ | ||
730 | if ((flags & EXT4_GET_BLOCKS_PRE_IO) && | ||
731 | ext4_es_lookup_extent(inode, map->m_lblk, &es)) { | ||
732 | if (ext4_es_is_written(&es)) | ||
733 | goto has_zeroout; | ||
734 | } | ||
646 | status = map->m_flags & EXT4_MAP_UNWRITTEN ? | 735 | status = map->m_flags & EXT4_MAP_UNWRITTEN ? |
647 | EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; | 736 | EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; |
648 | if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) && | 737 | if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) && |
@@ -655,6 +744,7 @@ found: | |||
655 | retval = ret; | 744 | retval = ret; |
656 | } | 745 | } |
657 | 746 | ||
747 | has_zeroout: | ||
658 | up_write((&EXT4_I(inode)->i_data_sem)); | 748 | up_write((&EXT4_I(inode)->i_data_sem)); |
659 | if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { | 749 | if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { |
660 | int ret = check_block_validity(inode, map); | 750 | int ret = check_block_validity(inode, map); |
@@ -1216,6 +1306,55 @@ static int ext4_journalled_write_end(struct file *file, | |||
1216 | } | 1306 | } |
1217 | 1307 | ||
1218 | /* | 1308 | /* |
1309 | * Reserve a metadata for a single block located at lblock | ||
1310 | */ | ||
1311 | static int ext4_da_reserve_metadata(struct inode *inode, ext4_lblk_t lblock) | ||
1312 | { | ||
1313 | int retries = 0; | ||
1314 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); | ||
1315 | struct ext4_inode_info *ei = EXT4_I(inode); | ||
1316 | unsigned int md_needed; | ||
1317 | ext4_lblk_t save_last_lblock; | ||
1318 | int save_len; | ||
1319 | |||
1320 | /* | ||
1321 | * recalculate the amount of metadata blocks to reserve | ||
1322 | * in order to allocate nrblocks | ||
1323 | * worse case is one extent per block | ||
1324 | */ | ||
1325 | repeat: | ||
1326 | spin_lock(&ei->i_block_reservation_lock); | ||
1327 | /* | ||
1328 | * ext4_calc_metadata_amount() has side effects, which we have | ||
1329 | * to be prepared undo if we fail to claim space. | ||
1330 | */ | ||
1331 | save_len = ei->i_da_metadata_calc_len; | ||
1332 | save_last_lblock = ei->i_da_metadata_calc_last_lblock; | ||
1333 | md_needed = EXT4_NUM_B2C(sbi, | ||
1334 | ext4_calc_metadata_amount(inode, lblock)); | ||
1335 | trace_ext4_da_reserve_space(inode, md_needed); | ||
1336 | |||
1337 | /* | ||
1338 | * We do still charge estimated metadata to the sb though; | ||
1339 | * we cannot afford to run out of free blocks. | ||
1340 | */ | ||
1341 | if (ext4_claim_free_clusters(sbi, md_needed, 0)) { | ||
1342 | ei->i_da_metadata_calc_len = save_len; | ||
1343 | ei->i_da_metadata_calc_last_lblock = save_last_lblock; | ||
1344 | spin_unlock(&ei->i_block_reservation_lock); | ||
1345 | if (ext4_should_retry_alloc(inode->i_sb, &retries)) { | ||
1346 | cond_resched(); | ||
1347 | goto repeat; | ||
1348 | } | ||
1349 | return -ENOSPC; | ||
1350 | } | ||
1351 | ei->i_reserved_meta_blocks += md_needed; | ||
1352 | spin_unlock(&ei->i_block_reservation_lock); | ||
1353 | |||
1354 | return 0; /* success */ | ||
1355 | } | ||
1356 | |||
1357 | /* | ||
1219 | * Reserve a single cluster located at lblock | 1358 | * Reserve a single cluster located at lblock |
1220 | */ | 1359 | */ |
1221 | static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock) | 1360 | static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock) |
@@ -1263,7 +1402,7 @@ repeat: | |||
1263 | ei->i_da_metadata_calc_last_lblock = save_last_lblock; | 1402 | ei->i_da_metadata_calc_last_lblock = save_last_lblock; |
1264 | spin_unlock(&ei->i_block_reservation_lock); | 1403 | spin_unlock(&ei->i_block_reservation_lock); |
1265 | if (ext4_should_retry_alloc(inode->i_sb, &retries)) { | 1404 | if (ext4_should_retry_alloc(inode->i_sb, &retries)) { |
1266 | yield(); | 1405 | cond_resched(); |
1267 | goto repeat; | 1406 | goto repeat; |
1268 | } | 1407 | } |
1269 | dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1)); | 1408 | dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1)); |
@@ -1768,6 +1907,11 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock, | |||
1768 | struct extent_status es; | 1907 | struct extent_status es; |
1769 | int retval; | 1908 | int retval; |
1770 | sector_t invalid_block = ~((sector_t) 0xffff); | 1909 | sector_t invalid_block = ~((sector_t) 0xffff); |
1910 | #ifdef ES_AGGRESSIVE_TEST | ||
1911 | struct ext4_map_blocks orig_map; | ||
1912 | |||
1913 | memcpy(&orig_map, map, sizeof(*map)); | ||
1914 | #endif | ||
1771 | 1915 | ||
1772 | if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es)) | 1916 | if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es)) |
1773 | invalid_block = ~0; | 1917 | invalid_block = ~0; |
@@ -1809,6 +1953,9 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock, | |||
1809 | else | 1953 | else |
1810 | BUG_ON(1); | 1954 | BUG_ON(1); |
1811 | 1955 | ||
1956 | #ifdef ES_AGGRESSIVE_TEST | ||
1957 | ext4_map_blocks_es_recheck(NULL, inode, map, &orig_map, 0); | ||
1958 | #endif | ||
1812 | return retval; | 1959 | return retval; |
1813 | } | 1960 | } |
1814 | 1961 | ||
@@ -1843,8 +1990,11 @@ add_delayed: | |||
1843 | * XXX: __block_prepare_write() unmaps passed block, | 1990 | * XXX: __block_prepare_write() unmaps passed block, |
1844 | * is it OK? | 1991 | * is it OK? |
1845 | */ | 1992 | */ |
1846 | /* If the block was allocated from previously allocated cluster, | 1993 | /* |
1847 | * then we dont need to reserve it again. */ | 1994 | * If the block was allocated from previously allocated cluster, |
1995 | * then we don't need to reserve it again. However we still need | ||
1996 | * to reserve metadata for every block we're going to write. | ||
1997 | */ | ||
1848 | if (!(map->m_flags & EXT4_MAP_FROM_CLUSTER)) { | 1998 | if (!(map->m_flags & EXT4_MAP_FROM_CLUSTER)) { |
1849 | ret = ext4_da_reserve_space(inode, iblock); | 1999 | ret = ext4_da_reserve_space(inode, iblock); |
1850 | if (ret) { | 2000 | if (ret) { |
@@ -1852,6 +2002,13 @@ add_delayed: | |||
1852 | retval = ret; | 2002 | retval = ret; |
1853 | goto out_unlock; | 2003 | goto out_unlock; |
1854 | } | 2004 | } |
2005 | } else { | ||
2006 | ret = ext4_da_reserve_metadata(inode, iblock); | ||
2007 | if (ret) { | ||
2008 | /* not enough space to reserve */ | ||
2009 | retval = ret; | ||
2010 | goto out_unlock; | ||
2011 | } | ||
1855 | } | 2012 | } |
1856 | 2013 | ||
1857 | ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len, | 2014 | ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len, |
@@ -1873,6 +2030,15 @@ add_delayed: | |||
1873 | int ret; | 2030 | int ret; |
1874 | unsigned long long status; | 2031 | unsigned long long status; |
1875 | 2032 | ||
2033 | #ifdef ES_AGGRESSIVE_TEST | ||
2034 | if (retval != map->m_len) { | ||
2035 | printk("ES len assertation failed for inode: %lu " | ||
2036 | "retval %d != map->m_len %d " | ||
2037 | "in %s (lookup)\n", inode->i_ino, retval, | ||
2038 | map->m_len, __func__); | ||
2039 | } | ||
2040 | #endif | ||
2041 | |||
1876 | status = map->m_flags & EXT4_MAP_UNWRITTEN ? | 2042 | status = map->m_flags & EXT4_MAP_UNWRITTEN ? |
1877 | EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; | 2043 | EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; |
1878 | ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len, | 2044 | ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len, |
@@ -2908,8 +3074,8 @@ static int ext4_releasepage(struct page *page, gfp_t wait) | |||
2908 | 3074 | ||
2909 | trace_ext4_releasepage(page); | 3075 | trace_ext4_releasepage(page); |
2910 | 3076 | ||
2911 | WARN_ON(PageChecked(page)); | 3077 | /* Page has dirty journalled data -> cannot release */ |
2912 | if (!page_has_buffers(page)) | 3078 | if (PageChecked(page)) |
2913 | return 0; | 3079 | return 0; |
2914 | if (journal) | 3080 | if (journal) |
2915 | return jbd2_journal_try_to_free_buffers(journal, page, wait); | 3081 | return jbd2_journal_try_to_free_buffers(journal, page, wait); |
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 7bb713a46fe4..ee6614bdb639 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c | |||
@@ -2804,8 +2804,8 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, | |||
2804 | if (sbi->s_log_groups_per_flex) { | 2804 | if (sbi->s_log_groups_per_flex) { |
2805 | ext4_group_t flex_group = ext4_flex_group(sbi, | 2805 | ext4_group_t flex_group = ext4_flex_group(sbi, |
2806 | ac->ac_b_ex.fe_group); | 2806 | ac->ac_b_ex.fe_group); |
2807 | atomic_sub(ac->ac_b_ex.fe_len, | 2807 | atomic64_sub(ac->ac_b_ex.fe_len, |
2808 | &sbi->s_flex_groups[flex_group].free_clusters); | 2808 | &sbi->s_flex_groups[flex_group].free_clusters); |
2809 | } | 2809 | } |
2810 | 2810 | ||
2811 | err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); | 2811 | err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); |
@@ -3692,11 +3692,7 @@ repeat: | |||
3692 | if (free < needed && busy) { | 3692 | if (free < needed && busy) { |
3693 | busy = 0; | 3693 | busy = 0; |
3694 | ext4_unlock_group(sb, group); | 3694 | ext4_unlock_group(sb, group); |
3695 | /* | 3695 | cond_resched(); |
3696 | * Yield the CPU here so that we don't get soft lockup | ||
3697 | * in non preempt case. | ||
3698 | */ | ||
3699 | yield(); | ||
3700 | goto repeat; | 3696 | goto repeat; |
3701 | } | 3697 | } |
3702 | 3698 | ||
@@ -4246,7 +4242,7 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, | |||
4246 | ext4_claim_free_clusters(sbi, ar->len, ar->flags)) { | 4242 | ext4_claim_free_clusters(sbi, ar->len, ar->flags)) { |
4247 | 4243 | ||
4248 | /* let others to free the space */ | 4244 | /* let others to free the space */ |
4249 | yield(); | 4245 | cond_resched(); |
4250 | ar->len = ar->len >> 1; | 4246 | ar->len = ar->len >> 1; |
4251 | } | 4247 | } |
4252 | if (!ar->len) { | 4248 | if (!ar->len) { |
@@ -4464,7 +4460,6 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode, | |||
4464 | struct buffer_head *bitmap_bh = NULL; | 4460 | struct buffer_head *bitmap_bh = NULL; |
4465 | struct super_block *sb = inode->i_sb; | 4461 | struct super_block *sb = inode->i_sb; |
4466 | struct ext4_group_desc *gdp; | 4462 | struct ext4_group_desc *gdp; |
4467 | unsigned long freed = 0; | ||
4468 | unsigned int overflow; | 4463 | unsigned int overflow; |
4469 | ext4_grpblk_t bit; | 4464 | ext4_grpblk_t bit; |
4470 | struct buffer_head *gd_bh; | 4465 | struct buffer_head *gd_bh; |
@@ -4666,14 +4661,12 @@ do_more: | |||
4666 | 4661 | ||
4667 | if (sbi->s_log_groups_per_flex) { | 4662 | if (sbi->s_log_groups_per_flex) { |
4668 | ext4_group_t flex_group = ext4_flex_group(sbi, block_group); | 4663 | ext4_group_t flex_group = ext4_flex_group(sbi, block_group); |
4669 | atomic_add(count_clusters, | 4664 | atomic64_add(count_clusters, |
4670 | &sbi->s_flex_groups[flex_group].free_clusters); | 4665 | &sbi->s_flex_groups[flex_group].free_clusters); |
4671 | } | 4666 | } |
4672 | 4667 | ||
4673 | ext4_mb_unload_buddy(&e4b); | 4668 | ext4_mb_unload_buddy(&e4b); |
4674 | 4669 | ||
4675 | freed += count; | ||
4676 | |||
4677 | if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE)) | 4670 | if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE)) |
4678 | dquot_free_block(inode, EXT4_C2B(sbi, count_clusters)); | 4671 | dquot_free_block(inode, EXT4_C2B(sbi, count_clusters)); |
4679 | 4672 | ||
@@ -4811,8 +4804,8 @@ int ext4_group_add_blocks(handle_t *handle, struct super_block *sb, | |||
4811 | 4804 | ||
4812 | if (sbi->s_log_groups_per_flex) { | 4805 | if (sbi->s_log_groups_per_flex) { |
4813 | ext4_group_t flex_group = ext4_flex_group(sbi, block_group); | 4806 | ext4_group_t flex_group = ext4_flex_group(sbi, block_group); |
4814 | atomic_add(EXT4_NUM_B2C(sbi, blocks_freed), | 4807 | atomic64_add(EXT4_NUM_B2C(sbi, blocks_freed), |
4815 | &sbi->s_flex_groups[flex_group].free_clusters); | 4808 | &sbi->s_flex_groups[flex_group].free_clusters); |
4816 | } | 4809 | } |
4817 | 4810 | ||
4818 | ext4_mb_unload_buddy(&e4b); | 4811 | ext4_mb_unload_buddy(&e4b); |
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c index 4e81d47aa8cb..33e1c086858b 100644 --- a/fs/ext4/move_extent.c +++ b/fs/ext4/move_extent.c | |||
@@ -32,16 +32,18 @@ | |||
32 | */ | 32 | */ |
33 | static inline int | 33 | static inline int |
34 | get_ext_path(struct inode *inode, ext4_lblk_t lblock, | 34 | get_ext_path(struct inode *inode, ext4_lblk_t lblock, |
35 | struct ext4_ext_path **path) | 35 | struct ext4_ext_path **orig_path) |
36 | { | 36 | { |
37 | int ret = 0; | 37 | int ret = 0; |
38 | struct ext4_ext_path *path; | ||
38 | 39 | ||
39 | *path = ext4_ext_find_extent(inode, lblock, *path); | 40 | path = ext4_ext_find_extent(inode, lblock, *orig_path); |
40 | if (IS_ERR(*path)) { | 41 | if (IS_ERR(path)) |
41 | ret = PTR_ERR(*path); | 42 | ret = PTR_ERR(path); |
42 | *path = NULL; | 43 | else if (path[ext_depth(inode)].p_ext == NULL) |
43 | } else if ((*path)[ext_depth(inode)].p_ext == NULL) | ||
44 | ret = -ENODATA; | 44 | ret = -ENODATA; |
45 | else | ||
46 | *orig_path = path; | ||
45 | 47 | ||
46 | return ret; | 48 | return ret; |
47 | } | 49 | } |
@@ -611,24 +613,25 @@ mext_check_coverage(struct inode *inode, ext4_lblk_t from, ext4_lblk_t count, | |||
611 | { | 613 | { |
612 | struct ext4_ext_path *path = NULL; | 614 | struct ext4_ext_path *path = NULL; |
613 | struct ext4_extent *ext; | 615 | struct ext4_extent *ext; |
616 | int ret = 0; | ||
614 | ext4_lblk_t last = from + count; | 617 | ext4_lblk_t last = from + count; |
615 | while (from < last) { | 618 | while (from < last) { |
616 | *err = get_ext_path(inode, from, &path); | 619 | *err = get_ext_path(inode, from, &path); |
617 | if (*err) | 620 | if (*err) |
618 | return 0; | 621 | goto out; |
619 | ext = path[ext_depth(inode)].p_ext; | 622 | ext = path[ext_depth(inode)].p_ext; |
620 | if (!ext) { | 623 | if (uninit != ext4_ext_is_uninitialized(ext)) |
621 | ext4_ext_drop_refs(path); | 624 | goto out; |
622 | return 0; | ||
623 | } | ||
624 | if (uninit != ext4_ext_is_uninitialized(ext)) { | ||
625 | ext4_ext_drop_refs(path); | ||
626 | return 0; | ||
627 | } | ||
628 | from += ext4_ext_get_actual_len(ext); | 625 | from += ext4_ext_get_actual_len(ext); |
629 | ext4_ext_drop_refs(path); | 626 | ext4_ext_drop_refs(path); |
630 | } | 627 | } |
631 | return 1; | 628 | ret = 1; |
629 | out: | ||
630 | if (path) { | ||
631 | ext4_ext_drop_refs(path); | ||
632 | kfree(path); | ||
633 | } | ||
634 | return ret; | ||
632 | } | 635 | } |
633 | 636 | ||
634 | /** | 637 | /** |
@@ -666,6 +669,14 @@ mext_replace_branches(handle_t *handle, struct inode *orig_inode, | |||
666 | int replaced_count = 0; | 669 | int replaced_count = 0; |
667 | int dext_alen; | 670 | int dext_alen; |
668 | 671 | ||
672 | *err = ext4_es_remove_extent(orig_inode, from, count); | ||
673 | if (*err) | ||
674 | goto out; | ||
675 | |||
676 | *err = ext4_es_remove_extent(donor_inode, from, count); | ||
677 | if (*err) | ||
678 | goto out; | ||
679 | |||
669 | /* Get the original extent for the block "orig_off" */ | 680 | /* Get the original extent for the block "orig_off" */ |
670 | *err = get_ext_path(orig_inode, orig_off, &orig_path); | 681 | *err = get_ext_path(orig_inode, orig_off, &orig_path); |
671 | if (*err) | 682 | if (*err) |
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c index 809b31003ecc..047a6de04a0a 100644 --- a/fs/ext4/page-io.c +++ b/fs/ext4/page-io.c | |||
@@ -50,11 +50,21 @@ void ext4_exit_pageio(void) | |||
50 | kmem_cache_destroy(io_page_cachep); | 50 | kmem_cache_destroy(io_page_cachep); |
51 | } | 51 | } |
52 | 52 | ||
53 | void ext4_ioend_wait(struct inode *inode) | 53 | /* |
54 | * This function is called by ext4_evict_inode() to make sure there is | ||
55 | * no more pending I/O completion work left to do. | ||
56 | */ | ||
57 | void ext4_ioend_shutdown(struct inode *inode) | ||
54 | { | 58 | { |
55 | wait_queue_head_t *wq = ext4_ioend_wq(inode); | 59 | wait_queue_head_t *wq = ext4_ioend_wq(inode); |
56 | 60 | ||
57 | wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_ioend_count) == 0)); | 61 | wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_ioend_count) == 0)); |
62 | /* | ||
63 | * We need to make sure the work structure is finished being | ||
64 | * used before we let the inode get destroyed. | ||
65 | */ | ||
66 | if (work_pending(&EXT4_I(inode)->i_unwritten_work)) | ||
67 | cancel_work_sync(&EXT4_I(inode)->i_unwritten_work); | ||
58 | } | 68 | } |
59 | 69 | ||
60 | static void put_io_page(struct ext4_io_page *io_page) | 70 | static void put_io_page(struct ext4_io_page *io_page) |
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c index b2c8ee56eb98..c169477a62c9 100644 --- a/fs/ext4/resize.c +++ b/fs/ext4/resize.c | |||
@@ -1360,8 +1360,8 @@ static void ext4_update_super(struct super_block *sb, | |||
1360 | sbi->s_log_groups_per_flex) { | 1360 | sbi->s_log_groups_per_flex) { |
1361 | ext4_group_t flex_group; | 1361 | ext4_group_t flex_group; |
1362 | flex_group = ext4_flex_group(sbi, group_data[0].group); | 1362 | flex_group = ext4_flex_group(sbi, group_data[0].group); |
1363 | atomic_add(EXT4_NUM_B2C(sbi, free_blocks), | 1363 | atomic64_add(EXT4_NUM_B2C(sbi, free_blocks), |
1364 | &sbi->s_flex_groups[flex_group].free_clusters); | 1364 | &sbi->s_flex_groups[flex_group].free_clusters); |
1365 | atomic_add(EXT4_INODES_PER_GROUP(sb) * flex_gd->count, | 1365 | atomic_add(EXT4_INODES_PER_GROUP(sb) * flex_gd->count, |
1366 | &sbi->s_flex_groups[flex_group].free_inodes); | 1366 | &sbi->s_flex_groups[flex_group].free_inodes); |
1367 | } | 1367 | } |
diff --git a/fs/ext4/super.c b/fs/ext4/super.c index b3818b48f418..5d6d53578124 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c | |||
@@ -1927,8 +1927,8 @@ static int ext4_fill_flex_info(struct super_block *sb) | |||
1927 | flex_group = ext4_flex_group(sbi, i); | 1927 | flex_group = ext4_flex_group(sbi, i); |
1928 | atomic_add(ext4_free_inodes_count(sb, gdp), | 1928 | atomic_add(ext4_free_inodes_count(sb, gdp), |
1929 | &sbi->s_flex_groups[flex_group].free_inodes); | 1929 | &sbi->s_flex_groups[flex_group].free_inodes); |
1930 | atomic_add(ext4_free_group_clusters(sb, gdp), | 1930 | atomic64_add(ext4_free_group_clusters(sb, gdp), |
1931 | &sbi->s_flex_groups[flex_group].free_clusters); | 1931 | &sbi->s_flex_groups[flex_group].free_clusters); |
1932 | atomic_add(ext4_used_dirs_count(sb, gdp), | 1932 | atomic_add(ext4_used_dirs_count(sb, gdp), |
1933 | &sbi->s_flex_groups[flex_group].used_dirs); | 1933 | &sbi->s_flex_groups[flex_group].used_dirs); |
1934 | } | 1934 | } |
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index d6ee5aed56b1..325bc019ed88 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c | |||
@@ -1065,9 +1065,12 @@ out: | |||
1065 | void jbd2_journal_set_triggers(struct buffer_head *bh, | 1065 | void jbd2_journal_set_triggers(struct buffer_head *bh, |
1066 | struct jbd2_buffer_trigger_type *type) | 1066 | struct jbd2_buffer_trigger_type *type) |
1067 | { | 1067 | { |
1068 | struct journal_head *jh = bh2jh(bh); | 1068 | struct journal_head *jh = jbd2_journal_grab_journal_head(bh); |
1069 | 1069 | ||
1070 | if (WARN_ON(!jh)) | ||
1071 | return; | ||
1070 | jh->b_triggers = type; | 1072 | jh->b_triggers = type; |
1073 | jbd2_journal_put_journal_head(jh); | ||
1071 | } | 1074 | } |
1072 | 1075 | ||
1073 | void jbd2_buffer_frozen_trigger(struct journal_head *jh, void *mapped_data, | 1076 | void jbd2_buffer_frozen_trigger(struct journal_head *jh, void *mapped_data, |
@@ -1119,17 +1122,18 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh) | |||
1119 | { | 1122 | { |
1120 | transaction_t *transaction = handle->h_transaction; | 1123 | transaction_t *transaction = handle->h_transaction; |
1121 | journal_t *journal = transaction->t_journal; | 1124 | journal_t *journal = transaction->t_journal; |
1122 | struct journal_head *jh = bh2jh(bh); | 1125 | struct journal_head *jh; |
1123 | int ret = 0; | 1126 | int ret = 0; |
1124 | 1127 | ||
1125 | jbd_debug(5, "journal_head %p\n", jh); | ||
1126 | JBUFFER_TRACE(jh, "entry"); | ||
1127 | if (is_handle_aborted(handle)) | 1128 | if (is_handle_aborted(handle)) |
1128 | goto out; | 1129 | goto out; |
1129 | if (!buffer_jbd(bh)) { | 1130 | jh = jbd2_journal_grab_journal_head(bh); |
1131 | if (!jh) { | ||
1130 | ret = -EUCLEAN; | 1132 | ret = -EUCLEAN; |
1131 | goto out; | 1133 | goto out; |
1132 | } | 1134 | } |
1135 | jbd_debug(5, "journal_head %p\n", jh); | ||
1136 | JBUFFER_TRACE(jh, "entry"); | ||
1133 | 1137 | ||
1134 | jbd_lock_bh_state(bh); | 1138 | jbd_lock_bh_state(bh); |
1135 | 1139 | ||
@@ -1220,6 +1224,7 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh) | |||
1220 | spin_unlock(&journal->j_list_lock); | 1224 | spin_unlock(&journal->j_list_lock); |
1221 | out_unlock_bh: | 1225 | out_unlock_bh: |
1222 | jbd_unlock_bh_state(bh); | 1226 | jbd_unlock_bh_state(bh); |
1227 | jbd2_journal_put_journal_head(jh); | ||
1223 | out: | 1228 | out: |
1224 | JBUFFER_TRACE(jh, "exit"); | 1229 | JBUFFER_TRACE(jh, "exit"); |
1225 | WARN_ON(ret); /* All errors are bugs, so dump the stack */ | 1230 | WARN_ON(ret); /* All errors are bugs, so dump the stack */ |
diff --git a/fs/proc/inode.c b/fs/proc/inode.c index a86aebc9ba7c..869116c2afbe 100644 --- a/fs/proc/inode.c +++ b/fs/proc/inode.c | |||
@@ -446,9 +446,10 @@ static const struct file_operations proc_reg_file_ops_no_compat = { | |||
446 | 446 | ||
447 | struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de) | 447 | struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de) |
448 | { | 448 | { |
449 | struct inode *inode = iget_locked(sb, de->low_ino); | 449 | struct inode *inode = new_inode_pseudo(sb); |
450 | 450 | ||
451 | if (inode && (inode->i_state & I_NEW)) { | 451 | if (inode) { |
452 | inode->i_ino = de->low_ino; | ||
452 | inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; | 453 | inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; |
453 | PROC_I(inode)->pde = de; | 454 | PROC_I(inode)->pde = de; |
454 | 455 | ||
@@ -476,7 +477,6 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de) | |||
476 | inode->i_fop = de->proc_fops; | 477 | inode->i_fop = de->proc_fops; |
477 | } | 478 | } |
478 | } | 479 | } |
479 | unlock_new_inode(inode); | ||
480 | } else | 480 | } else |
481 | pde_put(de); | 481 | pde_put(de); |
482 | return inode; | 482 | return inode; |
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index 4e8f0df82d02..8459b5d8cb71 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c | |||
@@ -1334,6 +1334,12 @@ _xfs_buf_ioapply( | |||
1334 | int size; | 1334 | int size; |
1335 | int i; | 1335 | int i; |
1336 | 1336 | ||
1337 | /* | ||
1338 | * Make sure we capture only current IO errors rather than stale errors | ||
1339 | * left over from previous use of the buffer (e.g. failed readahead). | ||
1340 | */ | ||
1341 | bp->b_error = 0; | ||
1342 | |||
1337 | if (bp->b_flags & XBF_WRITE) { | 1343 | if (bp->b_flags & XBF_WRITE) { |
1338 | if (bp->b_flags & XBF_SYNCIO) | 1344 | if (bp->b_flags & XBF_SYNCIO) |
1339 | rw = WRITE_SYNC; | 1345 | rw = WRITE_SYNC; |
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c index 912d83d8860a..5a30dd899d2b 100644 --- a/fs/xfs/xfs_iomap.c +++ b/fs/xfs/xfs_iomap.c | |||
@@ -325,7 +325,7 @@ xfs_iomap_eof_want_preallocate( | |||
325 | * rather than falling short due to things like stripe unit/width alignment of | 325 | * rather than falling short due to things like stripe unit/width alignment of |
326 | * real extents. | 326 | * real extents. |
327 | */ | 327 | */ |
328 | STATIC int | 328 | STATIC xfs_fsblock_t |
329 | xfs_iomap_eof_prealloc_initial_size( | 329 | xfs_iomap_eof_prealloc_initial_size( |
330 | struct xfs_mount *mp, | 330 | struct xfs_mount *mp, |
331 | struct xfs_inode *ip, | 331 | struct xfs_inode *ip, |
@@ -413,7 +413,7 @@ xfs_iomap_prealloc_size( | |||
413 | * have a large file on a small filesystem and the above | 413 | * have a large file on a small filesystem and the above |
414 | * lowspace thresholds are smaller than MAXEXTLEN. | 414 | * lowspace thresholds are smaller than MAXEXTLEN. |
415 | */ | 415 | */ |
416 | while (alloc_blocks >= freesp) | 416 | while (alloc_blocks && alloc_blocks >= freesp) |
417 | alloc_blocks >>= 4; | 417 | alloc_blocks >>= 4; |
418 | } | 418 | } |
419 | 419 | ||
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h index a386b0b654cc..918e8fe2f5e9 100644 --- a/include/drm/drm_pciids.h +++ b/include/drm/drm_pciids.h | |||
@@ -581,7 +581,11 @@ | |||
581 | {0x1002, 0x9908, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 581 | {0x1002, 0x9908, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
582 | {0x1002, 0x9909, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 582 | {0x1002, 0x9909, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
583 | {0x1002, 0x990A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 583 | {0x1002, 0x990A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
584 | {0x1002, 0x990F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 584 | {0x1002, 0x990B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
585 | {0x1002, 0x990C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | ||
586 | {0x1002, 0x990D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | ||
587 | {0x1002, 0x990E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | ||
588 | {0x1002, 0x990F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | ||
585 | {0x1002, 0x9910, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 589 | {0x1002, 0x9910, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
586 | {0x1002, 0x9913, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 590 | {0x1002, 0x9913, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
587 | {0x1002, 0x9917, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 591 | {0x1002, 0x9917, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
@@ -592,6 +596,13 @@ | |||
592 | {0x1002, 0x9992, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 596 | {0x1002, 0x9992, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
593 | {0x1002, 0x9993, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 597 | {0x1002, 0x9993, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
594 | {0x1002, 0x9994, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 598 | {0x1002, 0x9994, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
599 | {0x1002, 0x9995, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | ||
600 | {0x1002, 0x9996, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | ||
601 | {0x1002, 0x9997, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | ||
602 | {0x1002, 0x9998, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | ||
603 | {0x1002, 0x9999, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | ||
604 | {0x1002, 0x999A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | ||
605 | {0x1002, 0x999B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | ||
595 | {0x1002, 0x99A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 606 | {0x1002, 0x99A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
596 | {0x1002, 0x99A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 607 | {0x1002, 0x99A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
597 | {0x1002, 0x99A4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 608 | {0x1002, 0x99A4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
diff --git a/include/linux/edac.h b/include/linux/edac.h index 4fd4999ccb5b..0b763276f619 100644 --- a/include/linux/edac.h +++ b/include/linux/edac.h | |||
@@ -561,7 +561,6 @@ struct csrow_info { | |||
561 | 561 | ||
562 | u32 ue_count; /* Uncorrectable Errors for this csrow */ | 562 | u32 ue_count; /* Uncorrectable Errors for this csrow */ |
563 | u32 ce_count; /* Correctable Errors for this csrow */ | 563 | u32 ce_count; /* Correctable Errors for this csrow */ |
564 | u32 nr_pages; /* combined pages count of all channels */ | ||
565 | 564 | ||
566 | struct mem_ctl_info *mci; /* the parent */ | 565 | struct mem_ctl_info *mci; /* the parent */ |
567 | 566 | ||
@@ -676,11 +675,11 @@ struct mem_ctl_info { | |||
676 | * sees memory sticks ("dimms"), and the ones that sees memory ranks. | 675 | * sees memory sticks ("dimms"), and the ones that sees memory ranks. |
677 | * All old memory controllers enumerate memories per rank, but most | 676 | * All old memory controllers enumerate memories per rank, but most |
678 | * of the recent drivers enumerate memories per DIMM, instead. | 677 | * of the recent drivers enumerate memories per DIMM, instead. |
679 | * When the memory controller is per rank, mem_is_per_rank is true. | 678 | * When the memory controller is per rank, csbased is true. |
680 | */ | 679 | */ |
681 | unsigned n_layers; | 680 | unsigned n_layers; |
682 | struct edac_mc_layer *layers; | 681 | struct edac_mc_layer *layers; |
683 | bool mem_is_per_rank; | 682 | bool csbased; |
684 | 683 | ||
685 | /* | 684 | /* |
686 | * DIMM info. Will eventually remove the entire csrows_info some day | 685 | * DIMM info. Will eventually remove the entire csrows_info some day |
@@ -741,8 +740,6 @@ struct mem_ctl_info { | |||
741 | u32 fake_inject_ue; | 740 | u32 fake_inject_ue; |
742 | u16 fake_inject_count; | 741 | u16 fake_inject_count; |
743 | #endif | 742 | #endif |
744 | __u8 csbased : 1, /* csrow-based memory controller */ | ||
745 | __resv : 7; | ||
746 | }; | 743 | }; |
747 | 744 | ||
748 | #endif | 745 | #endif |
diff --git a/include/linux/hash.h b/include/linux/hash.h index 61c97ae22e01..f09a0ae4d858 100644 --- a/include/linux/hash.h +++ b/include/linux/hash.h | |||
@@ -15,6 +15,7 @@ | |||
15 | */ | 15 | */ |
16 | 16 | ||
17 | #include <asm/types.h> | 17 | #include <asm/types.h> |
18 | #include <linux/compiler.h> | ||
18 | 19 | ||
19 | /* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */ | 20 | /* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */ |
20 | #define GOLDEN_RATIO_PRIME_32 0x9e370001UL | 21 | #define GOLDEN_RATIO_PRIME_32 0x9e370001UL |
@@ -31,7 +32,7 @@ | |||
31 | #error Wordsize not 32 or 64 | 32 | #error Wordsize not 32 or 64 |
32 | #endif | 33 | #endif |
33 | 34 | ||
34 | static inline u64 hash_64(u64 val, unsigned int bits) | 35 | static __always_inline u64 hash_64(u64 val, unsigned int bits) |
35 | { | 36 | { |
36 | u64 hash = val; | 37 | u64 hash = val; |
37 | 38 | ||
diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h index f5dbce50466e..66017028dcb3 100644 --- a/include/linux/irq_work.h +++ b/include/linux/irq_work.h | |||
@@ -37,7 +37,7 @@ void irq_work_sync(struct irq_work *work); | |||
37 | #ifdef CONFIG_IRQ_WORK | 37 | #ifdef CONFIG_IRQ_WORK |
38 | bool irq_work_needs_cpu(void); | 38 | bool irq_work_needs_cpu(void); |
39 | #else | 39 | #else |
40 | static bool irq_work_needs_cpu(void) { return false; } | 40 | static inline bool irq_work_needs_cpu(void) { return false; } |
41 | #endif | 41 | #endif |
42 | 42 | ||
43 | #endif /* _LINUX_IRQ_WORK_H */ | 43 | #endif /* _LINUX_IRQ_WORK_H */ |
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 80d36874689b..79fdd80a42d4 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h | |||
@@ -390,7 +390,6 @@ extern struct pid *session_of_pgrp(struct pid *pgrp); | |||
390 | unsigned long int_sqrt(unsigned long); | 390 | unsigned long int_sqrt(unsigned long); |
391 | 391 | ||
392 | extern void bust_spinlocks(int yes); | 392 | extern void bust_spinlocks(int yes); |
393 | extern void wake_up_klogd(void); | ||
394 | extern int oops_in_progress; /* If set, an oops, panic(), BUG() or die() is in progress */ | 393 | extern int oops_in_progress; /* If set, an oops, panic(), BUG() or die() is in progress */ |
395 | extern int panic_timeout; | 394 | extern int panic_timeout; |
396 | extern int panic_on_oops; | 395 | extern int panic_on_oops; |
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index ede274957e05..c74092eebf5c 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
@@ -527,7 +527,7 @@ static inline int zone_is_oom_locked(const struct zone *zone) | |||
527 | return test_bit(ZONE_OOM_LOCKED, &zone->flags); | 527 | return test_bit(ZONE_OOM_LOCKED, &zone->flags); |
528 | } | 528 | } |
529 | 529 | ||
530 | static inline unsigned zone_end_pfn(const struct zone *zone) | 530 | static inline unsigned long zone_end_pfn(const struct zone *zone) |
531 | { | 531 | { |
532 | return zone->zone_start_pfn + zone->spanned_pages; | 532 | return zone->zone_start_pfn + zone->spanned_pages; |
533 | } | 533 | } |
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 7ccb3c59ed60..ef52d9c91459 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h | |||
@@ -187,6 +187,13 @@ typedef enum { | |||
187 | * This happens with the Renesas AG-AND chips, possibly others. | 187 | * This happens with the Renesas AG-AND chips, possibly others. |
188 | */ | 188 | */ |
189 | #define BBT_AUTO_REFRESH 0x00000080 | 189 | #define BBT_AUTO_REFRESH 0x00000080 |
190 | /* | ||
191 | * Chip requires ready check on read (for auto-incremented sequential read). | ||
192 | * True only for small page devices; large page devices do not support | ||
193 | * autoincrement. | ||
194 | */ | ||
195 | #define NAND_NEED_READRDY 0x00000100 | ||
196 | |||
190 | /* Chip does not allow subpage writes */ | 197 | /* Chip does not allow subpage writes */ |
191 | #define NAND_NO_SUBPAGE_WRITE 0x00000200 | 198 | #define NAND_NO_SUBPAGE_WRITE 0x00000200 |
192 | 199 | ||
diff --git a/include/linux/nvme.h b/include/linux/nvme.h index c25cccaa555a..4fa3b0b9b071 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h | |||
@@ -137,6 +137,34 @@ enum { | |||
137 | NVME_LBAF_RP_DEGRADED = 3, | 137 | NVME_LBAF_RP_DEGRADED = 3, |
138 | }; | 138 | }; |
139 | 139 | ||
140 | struct nvme_smart_log { | ||
141 | __u8 critical_warning; | ||
142 | __u8 temperature[2]; | ||
143 | __u8 avail_spare; | ||
144 | __u8 spare_thresh; | ||
145 | __u8 percent_used; | ||
146 | __u8 rsvd6[26]; | ||
147 | __u8 data_units_read[16]; | ||
148 | __u8 data_units_written[16]; | ||
149 | __u8 host_reads[16]; | ||
150 | __u8 host_writes[16]; | ||
151 | __u8 ctrl_busy_time[16]; | ||
152 | __u8 power_cycles[16]; | ||
153 | __u8 power_on_hours[16]; | ||
154 | __u8 unsafe_shutdowns[16]; | ||
155 | __u8 media_errors[16]; | ||
156 | __u8 num_err_log_entries[16]; | ||
157 | __u8 rsvd192[320]; | ||
158 | }; | ||
159 | |||
160 | enum { | ||
161 | NVME_SMART_CRIT_SPARE = 1 << 0, | ||
162 | NVME_SMART_CRIT_TEMPERATURE = 1 << 1, | ||
163 | NVME_SMART_CRIT_RELIABILITY = 1 << 2, | ||
164 | NVME_SMART_CRIT_MEDIA = 1 << 3, | ||
165 | NVME_SMART_CRIT_VOLATILE_MEMORY = 1 << 4, | ||
166 | }; | ||
167 | |||
140 | struct nvme_lba_range_type { | 168 | struct nvme_lba_range_type { |
141 | __u8 type; | 169 | __u8 type; |
142 | __u8 attributes; | 170 | __u8 attributes; |
diff --git a/include/linux/printk.h b/include/linux/printk.h index 1249a54d17e0..822171fcb1c8 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h | |||
@@ -134,6 +134,8 @@ extern int printk_delay_msec; | |||
134 | extern int dmesg_restrict; | 134 | extern int dmesg_restrict; |
135 | extern int kptr_restrict; | 135 | extern int kptr_restrict; |
136 | 136 | ||
137 | extern void wake_up_klogd(void); | ||
138 | |||
137 | void log_buf_kexec_setup(void); | 139 | void log_buf_kexec_setup(void); |
138 | void __init setup_log_buf(int early); | 140 | void __init setup_log_buf(int early); |
139 | #else | 141 | #else |
@@ -162,6 +164,10 @@ static inline bool printk_timed_ratelimit(unsigned long *caller_jiffies, | |||
162 | return false; | 164 | return false; |
163 | } | 165 | } |
164 | 166 | ||
167 | static inline void wake_up_klogd(void) | ||
168 | { | ||
169 | } | ||
170 | |||
165 | static inline void log_buf_kexec_setup(void) | 171 | static inline void log_buf_kexec_setup(void) |
166 | { | 172 | { |
167 | } | 173 | } |
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 821c7f45d2a7..441f5bfdab8e 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
@@ -500,7 +500,7 @@ struct sk_buff { | |||
500 | union { | 500 | union { |
501 | __u32 mark; | 501 | __u32 mark; |
502 | __u32 dropcount; | 502 | __u32 dropcount; |
503 | __u32 avail_size; | 503 | __u32 reserved_tailroom; |
504 | }; | 504 | }; |
505 | 505 | ||
506 | sk_buff_data_t inner_transport_header; | 506 | sk_buff_data_t inner_transport_header; |
@@ -1288,11 +1288,13 @@ static inline void __skb_fill_page_desc(struct sk_buff *skb, int i, | |||
1288 | * do not lose pfmemalloc information as the pages would not be | 1288 | * do not lose pfmemalloc information as the pages would not be |
1289 | * allocated using __GFP_MEMALLOC. | 1289 | * allocated using __GFP_MEMALLOC. |
1290 | */ | 1290 | */ |
1291 | if (page->pfmemalloc && !page->mapping) | ||
1292 | skb->pfmemalloc = true; | ||
1293 | frag->page.p = page; | 1291 | frag->page.p = page; |
1294 | frag->page_offset = off; | 1292 | frag->page_offset = off; |
1295 | skb_frag_size_set(frag, size); | 1293 | skb_frag_size_set(frag, size); |
1294 | |||
1295 | page = compound_head(page); | ||
1296 | if (page->pfmemalloc && !page->mapping) | ||
1297 | skb->pfmemalloc = true; | ||
1296 | } | 1298 | } |
1297 | 1299 | ||
1298 | /** | 1300 | /** |
@@ -1447,7 +1449,10 @@ static inline int skb_tailroom(const struct sk_buff *skb) | |||
1447 | */ | 1449 | */ |
1448 | static inline int skb_availroom(const struct sk_buff *skb) | 1450 | static inline int skb_availroom(const struct sk_buff *skb) |
1449 | { | 1451 | { |
1450 | return skb_is_nonlinear(skb) ? 0 : skb->avail_size - skb->len; | 1452 | if (skb_is_nonlinear(skb)) |
1453 | return 0; | ||
1454 | |||
1455 | return skb->end - skb->tail - skb->reserved_tailroom; | ||
1451 | } | 1456 | } |
1452 | 1457 | ||
1453 | /** | 1458 | /** |
diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h index 3b8f9d4fc3fe..cc25b70af33c 100644 --- a/include/linux/usb/cdc_ncm.h +++ b/include/linux/usb/cdc_ncm.h | |||
@@ -127,6 +127,7 @@ struct cdc_ncm_ctx { | |||
127 | u16 connected; | 127 | u16 connected; |
128 | }; | 128 | }; |
129 | 129 | ||
130 | extern u8 cdc_ncm_select_altsetting(struct usbnet *dev, struct usb_interface *intf); | ||
130 | extern int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting); | 131 | extern int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting); |
131 | extern void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf); | 132 | extern void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf); |
132 | extern struct sk_buff *cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb, __le32 sign); | 133 | extern struct sk_buff *cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb, __le32 sign); |
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h index ef9be7e1e190..1819b59aab2a 100644 --- a/include/linux/usb/serial.h +++ b/include/linux/usb/serial.h | |||
@@ -66,6 +66,7 @@ | |||
66 | * port. | 66 | * port. |
67 | * @flags: usb serial port flags | 67 | * @flags: usb serial port flags |
68 | * @write_wait: a wait_queue_head_t used by the port. | 68 | * @write_wait: a wait_queue_head_t used by the port. |
69 | * @delta_msr_wait: modem-status-change wait queue | ||
69 | * @work: work queue entry for the line discipline waking up. | 70 | * @work: work queue entry for the line discipline waking up. |
70 | * @throttled: nonzero if the read urb is inactive to throttle the device | 71 | * @throttled: nonzero if the read urb is inactive to throttle the device |
71 | * @throttle_req: nonzero if the tty wants to throttle us | 72 | * @throttle_req: nonzero if the tty wants to throttle us |
@@ -112,6 +113,7 @@ struct usb_serial_port { | |||
112 | 113 | ||
113 | unsigned long flags; | 114 | unsigned long flags; |
114 | wait_queue_head_t write_wait; | 115 | wait_queue_head_t write_wait; |
116 | wait_queue_head_t delta_msr_wait; | ||
115 | struct work_struct work; | 117 | struct work_struct work; |
116 | char throttled; | 118 | char throttled; |
117 | char throttle_req; | 119 | char throttle_req; |
diff --git a/include/linux/usb/ulpi.h b/include/linux/usb/ulpi.h index 6f033a415ecb..5c295c26ad37 100644 --- a/include/linux/usb/ulpi.h +++ b/include/linux/usb/ulpi.h | |||
@@ -181,8 +181,16 @@ | |||
181 | 181 | ||
182 | /*-------------------------------------------------------------------------*/ | 182 | /*-------------------------------------------------------------------------*/ |
183 | 183 | ||
184 | #if IS_ENABLED(CONFIG_USB_ULPI) | ||
184 | struct usb_phy *otg_ulpi_create(struct usb_phy_io_ops *ops, | 185 | struct usb_phy *otg_ulpi_create(struct usb_phy_io_ops *ops, |
185 | unsigned int flags); | 186 | unsigned int flags); |
187 | #else | ||
188 | static inline struct usb_phy *otg_ulpi_create(struct usb_phy_io_ops *ops, | ||
189 | unsigned int flags) | ||
190 | { | ||
191 | return NULL; | ||
192 | } | ||
193 | #endif | ||
186 | 194 | ||
187 | #ifdef CONFIG_USB_ULPI_VIEWPORT | 195 | #ifdef CONFIG_USB_ULPI_VIEWPORT |
188 | /* access ops for controllers with a viewport register */ | 196 | /* access ops for controllers with a viewport register */ |
diff --git a/include/net/dst.h b/include/net/dst.h index 853cda11e518..1f8fd109e225 100644 --- a/include/net/dst.h +++ b/include/net/dst.h | |||
@@ -413,13 +413,15 @@ static inline int dst_neigh_output(struct dst_entry *dst, struct neighbour *n, | |||
413 | 413 | ||
414 | static inline struct neighbour *dst_neigh_lookup(const struct dst_entry *dst, const void *daddr) | 414 | static inline struct neighbour *dst_neigh_lookup(const struct dst_entry *dst, const void *daddr) |
415 | { | 415 | { |
416 | return dst->ops->neigh_lookup(dst, NULL, daddr); | 416 | struct neighbour *n = dst->ops->neigh_lookup(dst, NULL, daddr); |
417 | return IS_ERR(n) ? NULL : n; | ||
417 | } | 418 | } |
418 | 419 | ||
419 | static inline struct neighbour *dst_neigh_lookup_skb(const struct dst_entry *dst, | 420 | static inline struct neighbour *dst_neigh_lookup_skb(const struct dst_entry *dst, |
420 | struct sk_buff *skb) | 421 | struct sk_buff *skb) |
421 | { | 422 | { |
422 | return dst->ops->neigh_lookup(dst, skb, NULL); | 423 | struct neighbour *n = dst->ops->neigh_lookup(dst, skb, NULL); |
424 | return IS_ERR(n) ? NULL : n; | ||
423 | } | 425 | } |
424 | 426 | ||
425 | static inline void dst_link_failure(struct sk_buff *skb) | 427 | static inline void dst_link_failure(struct sk_buff *skb) |
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h index 76c3fe5ecc2e..0a1dcc2fa2f5 100644 --- a/include/net/inet_frag.h +++ b/include/net/inet_frag.h | |||
@@ -43,6 +43,13 @@ struct inet_frag_queue { | |||
43 | 43 | ||
44 | #define INETFRAGS_HASHSZ 64 | 44 | #define INETFRAGS_HASHSZ 64 |
45 | 45 | ||
46 | /* averaged: | ||
47 | * max_depth = default ipfrag_high_thresh / INETFRAGS_HASHSZ / | ||
48 | * rounded up (SKB_TRUELEN(0) + sizeof(struct ipq or | ||
49 | * struct frag_queue)) | ||
50 | */ | ||
51 | #define INETFRAGS_MAXDEPTH 128 | ||
52 | |||
46 | struct inet_frags { | 53 | struct inet_frags { |
47 | struct hlist_head hash[INETFRAGS_HASHSZ]; | 54 | struct hlist_head hash[INETFRAGS_HASHSZ]; |
48 | /* This rwlock is a global lock (seperate per IPv4, IPv6 and | 55 | /* This rwlock is a global lock (seperate per IPv4, IPv6 and |
@@ -76,6 +83,8 @@ int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force); | |||
76 | struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, | 83 | struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, |
77 | struct inet_frags *f, void *key, unsigned int hash) | 84 | struct inet_frags *f, void *key, unsigned int hash) |
78 | __releases(&f->lock); | 85 | __releases(&f->lock); |
86 | void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q, | ||
87 | const char *prefix); | ||
79 | 88 | ||
80 | static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f) | 89 | static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f) |
81 | { | 90 | { |
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index 9497be1ad4c0..e49db91593a9 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h | |||
@@ -152,18 +152,16 @@ struct fib_result_nl { | |||
152 | }; | 152 | }; |
153 | 153 | ||
154 | #ifdef CONFIG_IP_ROUTE_MULTIPATH | 154 | #ifdef CONFIG_IP_ROUTE_MULTIPATH |
155 | |||
156 | #define FIB_RES_NH(res) ((res).fi->fib_nh[(res).nh_sel]) | 155 | #define FIB_RES_NH(res) ((res).fi->fib_nh[(res).nh_sel]) |
157 | |||
158 | #define FIB_TABLE_HASHSZ 2 | ||
159 | |||
160 | #else /* CONFIG_IP_ROUTE_MULTIPATH */ | 156 | #else /* CONFIG_IP_ROUTE_MULTIPATH */ |
161 | |||
162 | #define FIB_RES_NH(res) ((res).fi->fib_nh[0]) | 157 | #define FIB_RES_NH(res) ((res).fi->fib_nh[0]) |
158 | #endif /* CONFIG_IP_ROUTE_MULTIPATH */ | ||
163 | 159 | ||
160 | #ifdef CONFIG_IP_MULTIPLE_TABLES | ||
164 | #define FIB_TABLE_HASHSZ 256 | 161 | #define FIB_TABLE_HASHSZ 256 |
165 | 162 | #else | |
166 | #endif /* CONFIG_IP_ROUTE_MULTIPATH */ | 163 | #define FIB_TABLE_HASHSZ 2 |
164 | #endif | ||
167 | 165 | ||
168 | extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh); | 166 | extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh); |
169 | 167 | ||
diff --git a/include/sound/max98090.h b/include/sound/max98090.h index 95efb13f8478..95efb13f8478 100755..100644 --- a/include/sound/max98090.h +++ b/include/sound/max98090.h | |||
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h index e1ef63d4a5c4..44a30b108683 100644 --- a/include/sound/soc-dapm.h +++ b/include/sound/soc-dapm.h | |||
@@ -488,6 +488,7 @@ struct snd_soc_dapm_path { | |||
488 | /* status */ | 488 | /* status */ |
489 | u32 connect:1; /* source and sink widgets are connected */ | 489 | u32 connect:1; /* source and sink widgets are connected */ |
490 | u32 walked:1; /* path has been walked */ | 490 | u32 walked:1; /* path has been walked */ |
491 | u32 walking:1; /* path is in the process of being walked */ | ||
491 | u32 weak:1; /* path ignored for power management */ | 492 | u32 weak:1; /* path ignored for power management */ |
492 | 493 | ||
493 | int (*connected)(struct snd_soc_dapm_widget *source, | 494 | int (*connected)(struct snd_soc_dapm_widget *source, |
diff --git a/include/video/atmel_lcdc.h b/include/video/atmel_lcdc.h index 28447f1594fa..8deb22672ada 100644 --- a/include/video/atmel_lcdc.h +++ b/include/video/atmel_lcdc.h | |||
@@ -30,7 +30,6 @@ | |||
30 | */ | 30 | */ |
31 | #define ATMEL_LCDC_WIRING_BGR 0 | 31 | #define ATMEL_LCDC_WIRING_BGR 0 |
32 | #define ATMEL_LCDC_WIRING_RGB 1 | 32 | #define ATMEL_LCDC_WIRING_RGB 1 |
33 | #define ATMEL_LCDC_WIRING_RGB555 2 | ||
34 | 33 | ||
35 | 34 | ||
36 | /* LCD Controller info data structure, stored in device platform_data */ | 35 | /* LCD Controller info data structure, stored in device platform_data */ |
@@ -62,6 +61,7 @@ struct atmel_lcdfb_info { | |||
62 | void (*atmel_lcdfb_power_control)(int on); | 61 | void (*atmel_lcdfb_power_control)(int on); |
63 | struct fb_monspecs *default_monspecs; | 62 | struct fb_monspecs *default_monspecs; |
64 | u32 pseudo_palette[16]; | 63 | u32 pseudo_palette[16]; |
64 | bool have_intensity_bit; | ||
65 | }; | 65 | }; |
66 | 66 | ||
67 | #define ATMEL_LCDC_DMABADDR1 0x00 | 67 | #define ATMEL_LCDC_DMABADDR1 0x00 |
diff --git a/ipc/mqueue.c b/ipc/mqueue.c index e5c4f609f22c..3953fda2e8bd 100644 --- a/ipc/mqueue.c +++ b/ipc/mqueue.c | |||
@@ -840,7 +840,8 @@ out_putfd: | |||
840 | fd = error; | 840 | fd = error; |
841 | } | 841 | } |
842 | mutex_unlock(&root->d_inode->i_mutex); | 842 | mutex_unlock(&root->d_inode->i_mutex); |
843 | mnt_drop_write(mnt); | 843 | if (!ro) |
844 | mnt_drop_write(mnt); | ||
844 | out_putname: | 845 | out_putname: |
845 | putname(name); | 846 | putname(name); |
846 | return fd; | 847 | return fd; |
diff --git a/kernel/events/core.c b/kernel/events/core.c index b0cd86501c30..59412d037eed 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -4434,12 +4434,15 @@ static void perf_event_task_event(struct perf_task_event *task_event) | |||
4434 | if (ctxn < 0) | 4434 | if (ctxn < 0) |
4435 | goto next; | 4435 | goto next; |
4436 | ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); | 4436 | ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); |
4437 | if (ctx) | ||
4438 | perf_event_task_ctx(ctx, task_event); | ||
4437 | } | 4439 | } |
4438 | if (ctx) | ||
4439 | perf_event_task_ctx(ctx, task_event); | ||
4440 | next: | 4440 | next: |
4441 | put_cpu_ptr(pmu->pmu_cpu_context); | 4441 | put_cpu_ptr(pmu->pmu_cpu_context); |
4442 | } | 4442 | } |
4443 | if (task_event->task_ctx) | ||
4444 | perf_event_task_ctx(task_event->task_ctx, task_event); | ||
4445 | |||
4443 | rcu_read_unlock(); | 4446 | rcu_read_unlock(); |
4444 | } | 4447 | } |
4445 | 4448 | ||
@@ -5647,6 +5650,7 @@ static void perf_swevent_init_hrtimer(struct perf_event *event) | |||
5647 | event->attr.sample_period = NSEC_PER_SEC / freq; | 5650 | event->attr.sample_period = NSEC_PER_SEC / freq; |
5648 | hwc->sample_period = event->attr.sample_period; | 5651 | hwc->sample_period = event->attr.sample_period; |
5649 | local64_set(&hwc->period_left, hwc->sample_period); | 5652 | local64_set(&hwc->period_left, hwc->sample_period); |
5653 | hwc->last_period = hwc->sample_period; | ||
5650 | event->attr.freq = 0; | 5654 | event->attr.freq = 0; |
5651 | } | 5655 | } |
5652 | } | 5656 | } |
diff --git a/kernel/printk.c b/kernel/printk.c index 0b31715f335a..abbdd9e2ac82 100644 --- a/kernel/printk.c +++ b/kernel/printk.c | |||
@@ -63,8 +63,6 @@ void asmlinkage __attribute__((weak)) early_printk(const char *fmt, ...) | |||
63 | #define MINIMUM_CONSOLE_LOGLEVEL 1 /* Minimum loglevel we let people use */ | 63 | #define MINIMUM_CONSOLE_LOGLEVEL 1 /* Minimum loglevel we let people use */ |
64 | #define DEFAULT_CONSOLE_LOGLEVEL 7 /* anything MORE serious than KERN_DEBUG */ | 64 | #define DEFAULT_CONSOLE_LOGLEVEL 7 /* anything MORE serious than KERN_DEBUG */ |
65 | 65 | ||
66 | DECLARE_WAIT_QUEUE_HEAD(log_wait); | ||
67 | |||
68 | int console_printk[4] = { | 66 | int console_printk[4] = { |
69 | DEFAULT_CONSOLE_LOGLEVEL, /* console_loglevel */ | 67 | DEFAULT_CONSOLE_LOGLEVEL, /* console_loglevel */ |
70 | DEFAULT_MESSAGE_LOGLEVEL, /* default_message_loglevel */ | 68 | DEFAULT_MESSAGE_LOGLEVEL, /* default_message_loglevel */ |
@@ -224,6 +222,7 @@ struct log { | |||
224 | static DEFINE_RAW_SPINLOCK(logbuf_lock); | 222 | static DEFINE_RAW_SPINLOCK(logbuf_lock); |
225 | 223 | ||
226 | #ifdef CONFIG_PRINTK | 224 | #ifdef CONFIG_PRINTK |
225 | DECLARE_WAIT_QUEUE_HEAD(log_wait); | ||
227 | /* the next printk record to read by syslog(READ) or /proc/kmsg */ | 226 | /* the next printk record to read by syslog(READ) or /proc/kmsg */ |
228 | static u64 syslog_seq; | 227 | static u64 syslog_seq; |
229 | static u32 syslog_idx; | 228 | static u32 syslog_idx; |
@@ -1957,45 +1956,6 @@ int is_console_locked(void) | |||
1957 | return console_locked; | 1956 | return console_locked; |
1958 | } | 1957 | } |
1959 | 1958 | ||
1960 | /* | ||
1961 | * Delayed printk version, for scheduler-internal messages: | ||
1962 | */ | ||
1963 | #define PRINTK_BUF_SIZE 512 | ||
1964 | |||
1965 | #define PRINTK_PENDING_WAKEUP 0x01 | ||
1966 | #define PRINTK_PENDING_SCHED 0x02 | ||
1967 | |||
1968 | static DEFINE_PER_CPU(int, printk_pending); | ||
1969 | static DEFINE_PER_CPU(char [PRINTK_BUF_SIZE], printk_sched_buf); | ||
1970 | |||
1971 | static void wake_up_klogd_work_func(struct irq_work *irq_work) | ||
1972 | { | ||
1973 | int pending = __this_cpu_xchg(printk_pending, 0); | ||
1974 | |||
1975 | if (pending & PRINTK_PENDING_SCHED) { | ||
1976 | char *buf = __get_cpu_var(printk_sched_buf); | ||
1977 | printk(KERN_WARNING "[sched_delayed] %s", buf); | ||
1978 | } | ||
1979 | |||
1980 | if (pending & PRINTK_PENDING_WAKEUP) | ||
1981 | wake_up_interruptible(&log_wait); | ||
1982 | } | ||
1983 | |||
1984 | static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) = { | ||
1985 | .func = wake_up_klogd_work_func, | ||
1986 | .flags = IRQ_WORK_LAZY, | ||
1987 | }; | ||
1988 | |||
1989 | void wake_up_klogd(void) | ||
1990 | { | ||
1991 | preempt_disable(); | ||
1992 | if (waitqueue_active(&log_wait)) { | ||
1993 | this_cpu_or(printk_pending, PRINTK_PENDING_WAKEUP); | ||
1994 | irq_work_queue(&__get_cpu_var(wake_up_klogd_work)); | ||
1995 | } | ||
1996 | preempt_enable(); | ||
1997 | } | ||
1998 | |||
1999 | static void console_cont_flush(char *text, size_t size) | 1959 | static void console_cont_flush(char *text, size_t size) |
2000 | { | 1960 | { |
2001 | unsigned long flags; | 1961 | unsigned long flags; |
@@ -2458,6 +2418,44 @@ static int __init printk_late_init(void) | |||
2458 | late_initcall(printk_late_init); | 2418 | late_initcall(printk_late_init); |
2459 | 2419 | ||
2460 | #if defined CONFIG_PRINTK | 2420 | #if defined CONFIG_PRINTK |
2421 | /* | ||
2422 | * Delayed printk version, for scheduler-internal messages: | ||
2423 | */ | ||
2424 | #define PRINTK_BUF_SIZE 512 | ||
2425 | |||
2426 | #define PRINTK_PENDING_WAKEUP 0x01 | ||
2427 | #define PRINTK_PENDING_SCHED 0x02 | ||
2428 | |||
2429 | static DEFINE_PER_CPU(int, printk_pending); | ||
2430 | static DEFINE_PER_CPU(char [PRINTK_BUF_SIZE], printk_sched_buf); | ||
2431 | |||
2432 | static void wake_up_klogd_work_func(struct irq_work *irq_work) | ||
2433 | { | ||
2434 | int pending = __this_cpu_xchg(printk_pending, 0); | ||
2435 | |||
2436 | if (pending & PRINTK_PENDING_SCHED) { | ||
2437 | char *buf = __get_cpu_var(printk_sched_buf); | ||
2438 | printk(KERN_WARNING "[sched_delayed] %s", buf); | ||
2439 | } | ||
2440 | |||
2441 | if (pending & PRINTK_PENDING_WAKEUP) | ||
2442 | wake_up_interruptible(&log_wait); | ||
2443 | } | ||
2444 | |||
2445 | static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) = { | ||
2446 | .func = wake_up_klogd_work_func, | ||
2447 | .flags = IRQ_WORK_LAZY, | ||
2448 | }; | ||
2449 | |||
2450 | void wake_up_klogd(void) | ||
2451 | { | ||
2452 | preempt_disable(); | ||
2453 | if (waitqueue_active(&log_wait)) { | ||
2454 | this_cpu_or(printk_pending, PRINTK_PENDING_WAKEUP); | ||
2455 | irq_work_queue(&__get_cpu_var(wake_up_klogd_work)); | ||
2456 | } | ||
2457 | preempt_enable(); | ||
2458 | } | ||
2461 | 2459 | ||
2462 | int printk_sched(const char *fmt, ...) | 2460 | int printk_sched(const char *fmt, ...) |
2463 | { | 2461 | { |
diff --git a/kernel/sys.c b/kernel/sys.c index 81f56445fba9..39c9c4a2949f 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
@@ -2185,9 +2185,8 @@ SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep, | |||
2185 | 2185 | ||
2186 | char poweroff_cmd[POWEROFF_CMD_PATH_LEN] = "/sbin/poweroff"; | 2186 | char poweroff_cmd[POWEROFF_CMD_PATH_LEN] = "/sbin/poweroff"; |
2187 | 2187 | ||
2188 | static int __orderly_poweroff(void) | 2188 | static int __orderly_poweroff(bool force) |
2189 | { | 2189 | { |
2190 | int argc; | ||
2191 | char **argv; | 2190 | char **argv; |
2192 | static char *envp[] = { | 2191 | static char *envp[] = { |
2193 | "HOME=/", | 2192 | "HOME=/", |
@@ -2196,20 +2195,40 @@ static int __orderly_poweroff(void) | |||
2196 | }; | 2195 | }; |
2197 | int ret; | 2196 | int ret; |
2198 | 2197 | ||
2199 | argv = argv_split(GFP_ATOMIC, poweroff_cmd, &argc); | 2198 | argv = argv_split(GFP_KERNEL, poweroff_cmd, NULL); |
2200 | if (argv == NULL) { | 2199 | if (argv) { |
2200 | ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC); | ||
2201 | argv_free(argv); | ||
2202 | } else { | ||
2201 | printk(KERN_WARNING "%s failed to allocate memory for \"%s\"\n", | 2203 | printk(KERN_WARNING "%s failed to allocate memory for \"%s\"\n", |
2202 | __func__, poweroff_cmd); | 2204 | __func__, poweroff_cmd); |
2203 | return -ENOMEM; | 2205 | ret = -ENOMEM; |
2204 | } | 2206 | } |
2205 | 2207 | ||
2206 | ret = call_usermodehelper_fns(argv[0], argv, envp, UMH_WAIT_EXEC, | 2208 | if (ret && force) { |
2207 | NULL, NULL, NULL); | 2209 | printk(KERN_WARNING "Failed to start orderly shutdown: " |
2208 | argv_free(argv); | 2210 | "forcing the issue\n"); |
2211 | /* | ||
2212 | * I guess this should try to kick off some daemon to sync and | ||
2213 | * poweroff asap. Or not even bother syncing if we're doing an | ||
2214 | * emergency shutdown? | ||
2215 | */ | ||
2216 | emergency_sync(); | ||
2217 | kernel_power_off(); | ||
2218 | } | ||
2209 | 2219 | ||
2210 | return ret; | 2220 | return ret; |
2211 | } | 2221 | } |
2212 | 2222 | ||
2223 | static bool poweroff_force; | ||
2224 | |||
2225 | static void poweroff_work_func(struct work_struct *work) | ||
2226 | { | ||
2227 | __orderly_poweroff(poweroff_force); | ||
2228 | } | ||
2229 | |||
2230 | static DECLARE_WORK(poweroff_work, poweroff_work_func); | ||
2231 | |||
2213 | /** | 2232 | /** |
2214 | * orderly_poweroff - Trigger an orderly system poweroff | 2233 | * orderly_poweroff - Trigger an orderly system poweroff |
2215 | * @force: force poweroff if command execution fails | 2234 | * @force: force poweroff if command execution fails |
@@ -2219,21 +2238,9 @@ static int __orderly_poweroff(void) | |||
2219 | */ | 2238 | */ |
2220 | int orderly_poweroff(bool force) | 2239 | int orderly_poweroff(bool force) |
2221 | { | 2240 | { |
2222 | int ret = __orderly_poweroff(); | 2241 | if (force) /* do not override the pending "true" */ |
2223 | 2242 | poweroff_force = true; | |
2224 | if (ret && force) { | 2243 | schedule_work(&poweroff_work); |
2225 | printk(KERN_WARNING "Failed to start orderly shutdown: " | 2244 | return 0; |
2226 | "forcing the issue\n"); | ||
2227 | |||
2228 | /* | ||
2229 | * I guess this should try to kick off some daemon to sync and | ||
2230 | * poweroff asap. Or not even bother syncing if we're doing an | ||
2231 | * emergency shutdown? | ||
2232 | */ | ||
2233 | emergency_sync(); | ||
2234 | kernel_power_off(); | ||
2235 | } | ||
2236 | |||
2237 | return ret; | ||
2238 | } | 2245 | } |
2239 | EXPORT_SYMBOL_GPL(orderly_poweroff); | 2246 | EXPORT_SYMBOL_GPL(orderly_poweroff); |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index ab25b88aae56..6893d5a2bf08 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -3104,8 +3104,8 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | |||
3104 | continue; | 3104 | continue; |
3105 | } | 3105 | } |
3106 | 3106 | ||
3107 | hlist_del(&entry->node); | 3107 | hlist_del_rcu(&entry->node); |
3108 | call_rcu(&entry->rcu, ftrace_free_entry_rcu); | 3108 | call_rcu_sched(&entry->rcu, ftrace_free_entry_rcu); |
3109 | } | 3109 | } |
3110 | } | 3110 | } |
3111 | __disable_ftrace_function_probe(); | 3111 | __disable_ftrace_function_probe(); |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 1f835a83cb2c..4f1dade56981 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -704,7 +704,7 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
704 | void | 704 | void |
705 | update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | 705 | update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) |
706 | { | 706 | { |
707 | struct ring_buffer *buf = tr->buffer; | 707 | struct ring_buffer *buf; |
708 | 708 | ||
709 | if (trace_stop_count) | 709 | if (trace_stop_count) |
710 | return; | 710 | return; |
@@ -719,6 +719,7 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
719 | 719 | ||
720 | arch_spin_lock(&ftrace_max_lock); | 720 | arch_spin_lock(&ftrace_max_lock); |
721 | 721 | ||
722 | buf = tr->buffer; | ||
722 | tr->buffer = max_tr.buffer; | 723 | tr->buffer = max_tr.buffer; |
723 | max_tr.buffer = buf; | 724 | max_tr.buffer = buf; |
724 | 725 | ||
@@ -2880,11 +2881,25 @@ static int set_tracer_option(struct tracer *trace, char *cmp, int neg) | |||
2880 | return -EINVAL; | 2881 | return -EINVAL; |
2881 | } | 2882 | } |
2882 | 2883 | ||
2883 | static void set_tracer_flags(unsigned int mask, int enabled) | 2884 | /* Some tracers require overwrite to stay enabled */ |
2885 | int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set) | ||
2886 | { | ||
2887 | if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set) | ||
2888 | return -1; | ||
2889 | |||
2890 | return 0; | ||
2891 | } | ||
2892 | |||
2893 | int set_tracer_flag(unsigned int mask, int enabled) | ||
2884 | { | 2894 | { |
2885 | /* do nothing if flag is already set */ | 2895 | /* do nothing if flag is already set */ |
2886 | if (!!(trace_flags & mask) == !!enabled) | 2896 | if (!!(trace_flags & mask) == !!enabled) |
2887 | return; | 2897 | return 0; |
2898 | |||
2899 | /* Give the tracer a chance to approve the change */ | ||
2900 | if (current_trace->flag_changed) | ||
2901 | if (current_trace->flag_changed(current_trace, mask, !!enabled)) | ||
2902 | return -EINVAL; | ||
2888 | 2903 | ||
2889 | if (enabled) | 2904 | if (enabled) |
2890 | trace_flags |= mask; | 2905 | trace_flags |= mask; |
@@ -2894,18 +2909,24 @@ static void set_tracer_flags(unsigned int mask, int enabled) | |||
2894 | if (mask == TRACE_ITER_RECORD_CMD) | 2909 | if (mask == TRACE_ITER_RECORD_CMD) |
2895 | trace_event_enable_cmd_record(enabled); | 2910 | trace_event_enable_cmd_record(enabled); |
2896 | 2911 | ||
2897 | if (mask == TRACE_ITER_OVERWRITE) | 2912 | if (mask == TRACE_ITER_OVERWRITE) { |
2898 | ring_buffer_change_overwrite(global_trace.buffer, enabled); | 2913 | ring_buffer_change_overwrite(global_trace.buffer, enabled); |
2914 | #ifdef CONFIG_TRACER_MAX_TRACE | ||
2915 | ring_buffer_change_overwrite(max_tr.buffer, enabled); | ||
2916 | #endif | ||
2917 | } | ||
2899 | 2918 | ||
2900 | if (mask == TRACE_ITER_PRINTK) | 2919 | if (mask == TRACE_ITER_PRINTK) |
2901 | trace_printk_start_stop_comm(enabled); | 2920 | trace_printk_start_stop_comm(enabled); |
2921 | |||
2922 | return 0; | ||
2902 | } | 2923 | } |
2903 | 2924 | ||
2904 | static int trace_set_options(char *option) | 2925 | static int trace_set_options(char *option) |
2905 | { | 2926 | { |
2906 | char *cmp; | 2927 | char *cmp; |
2907 | int neg = 0; | 2928 | int neg = 0; |
2908 | int ret = 0; | 2929 | int ret = -ENODEV; |
2909 | int i; | 2930 | int i; |
2910 | 2931 | ||
2911 | cmp = strstrip(option); | 2932 | cmp = strstrip(option); |
@@ -2915,19 +2936,20 @@ static int trace_set_options(char *option) | |||
2915 | cmp += 2; | 2936 | cmp += 2; |
2916 | } | 2937 | } |
2917 | 2938 | ||
2939 | mutex_lock(&trace_types_lock); | ||
2940 | |||
2918 | for (i = 0; trace_options[i]; i++) { | 2941 | for (i = 0; trace_options[i]; i++) { |
2919 | if (strcmp(cmp, trace_options[i]) == 0) { | 2942 | if (strcmp(cmp, trace_options[i]) == 0) { |
2920 | set_tracer_flags(1 << i, !neg); | 2943 | ret = set_tracer_flag(1 << i, !neg); |
2921 | break; | 2944 | break; |
2922 | } | 2945 | } |
2923 | } | 2946 | } |
2924 | 2947 | ||
2925 | /* If no option could be set, test the specific tracer options */ | 2948 | /* If no option could be set, test the specific tracer options */ |
2926 | if (!trace_options[i]) { | 2949 | if (!trace_options[i]) |
2927 | mutex_lock(&trace_types_lock); | ||
2928 | ret = set_tracer_option(current_trace, cmp, neg); | 2950 | ret = set_tracer_option(current_trace, cmp, neg); |
2929 | mutex_unlock(&trace_types_lock); | 2951 | |
2930 | } | 2952 | mutex_unlock(&trace_types_lock); |
2931 | 2953 | ||
2932 | return ret; | 2954 | return ret; |
2933 | } | 2955 | } |
@@ -2937,6 +2959,7 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf, | |||
2937 | size_t cnt, loff_t *ppos) | 2959 | size_t cnt, loff_t *ppos) |
2938 | { | 2960 | { |
2939 | char buf[64]; | 2961 | char buf[64]; |
2962 | int ret; | ||
2940 | 2963 | ||
2941 | if (cnt >= sizeof(buf)) | 2964 | if (cnt >= sizeof(buf)) |
2942 | return -EINVAL; | 2965 | return -EINVAL; |
@@ -2946,7 +2969,9 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf, | |||
2946 | 2969 | ||
2947 | buf[cnt] = 0; | 2970 | buf[cnt] = 0; |
2948 | 2971 | ||
2949 | trace_set_options(buf); | 2972 | ret = trace_set_options(buf); |
2973 | if (ret < 0) | ||
2974 | return ret; | ||
2950 | 2975 | ||
2951 | *ppos += cnt; | 2976 | *ppos += cnt; |
2952 | 2977 | ||
@@ -3250,6 +3275,9 @@ static int tracing_set_tracer(const char *buf) | |||
3250 | goto out; | 3275 | goto out; |
3251 | 3276 | ||
3252 | trace_branch_disable(); | 3277 | trace_branch_disable(); |
3278 | |||
3279 | current_trace->enabled = false; | ||
3280 | |||
3253 | if (current_trace->reset) | 3281 | if (current_trace->reset) |
3254 | current_trace->reset(tr); | 3282 | current_trace->reset(tr); |
3255 | 3283 | ||
@@ -3294,6 +3322,7 @@ static int tracing_set_tracer(const char *buf) | |||
3294 | } | 3322 | } |
3295 | 3323 | ||
3296 | current_trace = t; | 3324 | current_trace = t; |
3325 | current_trace->enabled = true; | ||
3297 | trace_branch_enable(tr); | 3326 | trace_branch_enable(tr); |
3298 | out: | 3327 | out: |
3299 | mutex_unlock(&trace_types_lock); | 3328 | mutex_unlock(&trace_types_lock); |
@@ -4780,7 +4809,13 @@ trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt, | |||
4780 | 4809 | ||
4781 | if (val != 0 && val != 1) | 4810 | if (val != 0 && val != 1) |
4782 | return -EINVAL; | 4811 | return -EINVAL; |
4783 | set_tracer_flags(1 << index, val); | 4812 | |
4813 | mutex_lock(&trace_types_lock); | ||
4814 | ret = set_tracer_flag(1 << index, val); | ||
4815 | mutex_unlock(&trace_types_lock); | ||
4816 | |||
4817 | if (ret < 0) | ||
4818 | return ret; | ||
4784 | 4819 | ||
4785 | *ppos += cnt; | 4820 | *ppos += cnt; |
4786 | 4821 | ||
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 57d7e5397d56..2081971367ea 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -283,11 +283,15 @@ struct tracer { | |||
283 | enum print_line_t (*print_line)(struct trace_iterator *iter); | 283 | enum print_line_t (*print_line)(struct trace_iterator *iter); |
284 | /* If you handled the flag setting, return 0 */ | 284 | /* If you handled the flag setting, return 0 */ |
285 | int (*set_flag)(u32 old_flags, u32 bit, int set); | 285 | int (*set_flag)(u32 old_flags, u32 bit, int set); |
286 | /* Return 0 if OK with change, else return non-zero */ | ||
287 | int (*flag_changed)(struct tracer *tracer, | ||
288 | u32 mask, int set); | ||
286 | struct tracer *next; | 289 | struct tracer *next; |
287 | struct tracer_flags *flags; | 290 | struct tracer_flags *flags; |
288 | bool print_max; | 291 | bool print_max; |
289 | bool use_max_tr; | 292 | bool use_max_tr; |
290 | bool allocated_snapshot; | 293 | bool allocated_snapshot; |
294 | bool enabled; | ||
291 | }; | 295 | }; |
292 | 296 | ||
293 | 297 | ||
@@ -943,6 +947,8 @@ extern const char *__stop___trace_bprintk_fmt[]; | |||
943 | 947 | ||
944 | void trace_printk_init_buffers(void); | 948 | void trace_printk_init_buffers(void); |
945 | void trace_printk_start_comm(void); | 949 | void trace_printk_start_comm(void); |
950 | int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set); | ||
951 | int set_tracer_flag(unsigned int mask, int enabled); | ||
946 | 952 | ||
947 | #undef FTRACE_ENTRY | 953 | #undef FTRACE_ENTRY |
948 | #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \ | 954 | #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \ |
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index 713a2cac4881..443b25b43b4f 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c | |||
@@ -32,7 +32,7 @@ enum { | |||
32 | 32 | ||
33 | static int trace_type __read_mostly; | 33 | static int trace_type __read_mostly; |
34 | 34 | ||
35 | static int save_lat_flag; | 35 | static int save_flags; |
36 | 36 | ||
37 | static void stop_irqsoff_tracer(struct trace_array *tr, int graph); | 37 | static void stop_irqsoff_tracer(struct trace_array *tr, int graph); |
38 | static int start_irqsoff_tracer(struct trace_array *tr, int graph); | 38 | static int start_irqsoff_tracer(struct trace_array *tr, int graph); |
@@ -558,8 +558,11 @@ static void stop_irqsoff_tracer(struct trace_array *tr, int graph) | |||
558 | 558 | ||
559 | static void __irqsoff_tracer_init(struct trace_array *tr) | 559 | static void __irqsoff_tracer_init(struct trace_array *tr) |
560 | { | 560 | { |
561 | save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT; | 561 | save_flags = trace_flags; |
562 | trace_flags |= TRACE_ITER_LATENCY_FMT; | 562 | |
563 | /* non overwrite screws up the latency tracers */ | ||
564 | set_tracer_flag(TRACE_ITER_OVERWRITE, 1); | ||
565 | set_tracer_flag(TRACE_ITER_LATENCY_FMT, 1); | ||
563 | 566 | ||
564 | tracing_max_latency = 0; | 567 | tracing_max_latency = 0; |
565 | irqsoff_trace = tr; | 568 | irqsoff_trace = tr; |
@@ -573,10 +576,13 @@ static void __irqsoff_tracer_init(struct trace_array *tr) | |||
573 | 576 | ||
574 | static void irqsoff_tracer_reset(struct trace_array *tr) | 577 | static void irqsoff_tracer_reset(struct trace_array *tr) |
575 | { | 578 | { |
579 | int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT; | ||
580 | int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE; | ||
581 | |||
576 | stop_irqsoff_tracer(tr, is_graph()); | 582 | stop_irqsoff_tracer(tr, is_graph()); |
577 | 583 | ||
578 | if (!save_lat_flag) | 584 | set_tracer_flag(TRACE_ITER_LATENCY_FMT, lat_flag); |
579 | trace_flags &= ~TRACE_ITER_LATENCY_FMT; | 585 | set_tracer_flag(TRACE_ITER_OVERWRITE, overwrite_flag); |
580 | } | 586 | } |
581 | 587 | ||
582 | static void irqsoff_tracer_start(struct trace_array *tr) | 588 | static void irqsoff_tracer_start(struct trace_array *tr) |
@@ -609,6 +615,7 @@ static struct tracer irqsoff_tracer __read_mostly = | |||
609 | .print_line = irqsoff_print_line, | 615 | .print_line = irqsoff_print_line, |
610 | .flags = &tracer_flags, | 616 | .flags = &tracer_flags, |
611 | .set_flag = irqsoff_set_flag, | 617 | .set_flag = irqsoff_set_flag, |
618 | .flag_changed = trace_keep_overwrite, | ||
612 | #ifdef CONFIG_FTRACE_SELFTEST | 619 | #ifdef CONFIG_FTRACE_SELFTEST |
613 | .selftest = trace_selftest_startup_irqsoff, | 620 | .selftest = trace_selftest_startup_irqsoff, |
614 | #endif | 621 | #endif |
@@ -642,6 +649,7 @@ static struct tracer preemptoff_tracer __read_mostly = | |||
642 | .print_line = irqsoff_print_line, | 649 | .print_line = irqsoff_print_line, |
643 | .flags = &tracer_flags, | 650 | .flags = &tracer_flags, |
644 | .set_flag = irqsoff_set_flag, | 651 | .set_flag = irqsoff_set_flag, |
652 | .flag_changed = trace_keep_overwrite, | ||
645 | #ifdef CONFIG_FTRACE_SELFTEST | 653 | #ifdef CONFIG_FTRACE_SELFTEST |
646 | .selftest = trace_selftest_startup_preemptoff, | 654 | .selftest = trace_selftest_startup_preemptoff, |
647 | #endif | 655 | #endif |
@@ -677,6 +685,7 @@ static struct tracer preemptirqsoff_tracer __read_mostly = | |||
677 | .print_line = irqsoff_print_line, | 685 | .print_line = irqsoff_print_line, |
678 | .flags = &tracer_flags, | 686 | .flags = &tracer_flags, |
679 | .set_flag = irqsoff_set_flag, | 687 | .set_flag = irqsoff_set_flag, |
688 | .flag_changed = trace_keep_overwrite, | ||
680 | #ifdef CONFIG_FTRACE_SELFTEST | 689 | #ifdef CONFIG_FTRACE_SELFTEST |
681 | .selftest = trace_selftest_startup_preemptirqsoff, | 690 | .selftest = trace_selftest_startup_preemptirqsoff, |
682 | #endif | 691 | #endif |
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 75aa97fbe1a1..fde652c9a511 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c | |||
@@ -36,7 +36,7 @@ static void __wakeup_reset(struct trace_array *tr); | |||
36 | static int wakeup_graph_entry(struct ftrace_graph_ent *trace); | 36 | static int wakeup_graph_entry(struct ftrace_graph_ent *trace); |
37 | static void wakeup_graph_return(struct ftrace_graph_ret *trace); | 37 | static void wakeup_graph_return(struct ftrace_graph_ret *trace); |
38 | 38 | ||
39 | static int save_lat_flag; | 39 | static int save_flags; |
40 | 40 | ||
41 | #define TRACE_DISPLAY_GRAPH 1 | 41 | #define TRACE_DISPLAY_GRAPH 1 |
42 | 42 | ||
@@ -540,8 +540,11 @@ static void stop_wakeup_tracer(struct trace_array *tr) | |||
540 | 540 | ||
541 | static int __wakeup_tracer_init(struct trace_array *tr) | 541 | static int __wakeup_tracer_init(struct trace_array *tr) |
542 | { | 542 | { |
543 | save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT; | 543 | save_flags = trace_flags; |
544 | trace_flags |= TRACE_ITER_LATENCY_FMT; | 544 | |
545 | /* non overwrite screws up the latency tracers */ | ||
546 | set_tracer_flag(TRACE_ITER_OVERWRITE, 1); | ||
547 | set_tracer_flag(TRACE_ITER_LATENCY_FMT, 1); | ||
545 | 548 | ||
546 | tracing_max_latency = 0; | 549 | tracing_max_latency = 0; |
547 | wakeup_trace = tr; | 550 | wakeup_trace = tr; |
@@ -563,12 +566,15 @@ static int wakeup_rt_tracer_init(struct trace_array *tr) | |||
563 | 566 | ||
564 | static void wakeup_tracer_reset(struct trace_array *tr) | 567 | static void wakeup_tracer_reset(struct trace_array *tr) |
565 | { | 568 | { |
569 | int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT; | ||
570 | int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE; | ||
571 | |||
566 | stop_wakeup_tracer(tr); | 572 | stop_wakeup_tracer(tr); |
567 | /* make sure we put back any tasks we are tracing */ | 573 | /* make sure we put back any tasks we are tracing */ |
568 | wakeup_reset(tr); | 574 | wakeup_reset(tr); |
569 | 575 | ||
570 | if (!save_lat_flag) | 576 | set_tracer_flag(TRACE_ITER_LATENCY_FMT, lat_flag); |
571 | trace_flags &= ~TRACE_ITER_LATENCY_FMT; | 577 | set_tracer_flag(TRACE_ITER_OVERWRITE, overwrite_flag); |
572 | } | 578 | } |
573 | 579 | ||
574 | static void wakeup_tracer_start(struct trace_array *tr) | 580 | static void wakeup_tracer_start(struct trace_array *tr) |
@@ -594,6 +600,7 @@ static struct tracer wakeup_tracer __read_mostly = | |||
594 | .print_line = wakeup_print_line, | 600 | .print_line = wakeup_print_line, |
595 | .flags = &tracer_flags, | 601 | .flags = &tracer_flags, |
596 | .set_flag = wakeup_set_flag, | 602 | .set_flag = wakeup_set_flag, |
603 | .flag_changed = trace_keep_overwrite, | ||
597 | #ifdef CONFIG_FTRACE_SELFTEST | 604 | #ifdef CONFIG_FTRACE_SELFTEST |
598 | .selftest = trace_selftest_startup_wakeup, | 605 | .selftest = trace_selftest_startup_wakeup, |
599 | #endif | 606 | #endif |
@@ -615,6 +622,7 @@ static struct tracer wakeup_rt_tracer __read_mostly = | |||
615 | .print_line = wakeup_print_line, | 622 | .print_line = wakeup_print_line, |
616 | .flags = &tracer_flags, | 623 | .flags = &tracer_flags, |
617 | .set_flag = wakeup_set_flag, | 624 | .set_flag = wakeup_set_flag, |
625 | .flag_changed = trace_keep_overwrite, | ||
618 | #ifdef CONFIG_FTRACE_SELFTEST | 626 | #ifdef CONFIG_FTRACE_SELFTEST |
619 | .selftest = trace_selftest_startup_wakeup, | 627 | .selftest = trace_selftest_startup_wakeup, |
620 | #endif | 628 | #endif |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 55fac5b991b7..b48cd597145d 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -3447,28 +3447,34 @@ static void wq_unbind_fn(struct work_struct *work) | |||
3447 | 3447 | ||
3448 | spin_unlock_irq(&pool->lock); | 3448 | spin_unlock_irq(&pool->lock); |
3449 | mutex_unlock(&pool->assoc_mutex); | 3449 | mutex_unlock(&pool->assoc_mutex); |
3450 | } | ||
3451 | 3450 | ||
3452 | /* | 3451 | /* |
3453 | * Call schedule() so that we cross rq->lock and thus can guarantee | 3452 | * Call schedule() so that we cross rq->lock and thus can |
3454 | * sched callbacks see the %WORKER_UNBOUND flag. This is necessary | 3453 | * guarantee sched callbacks see the %WORKER_UNBOUND flag. |
3455 | * as scheduler callbacks may be invoked from other cpus. | 3454 | * This is necessary as scheduler callbacks may be invoked |
3456 | */ | 3455 | * from other cpus. |
3457 | schedule(); | 3456 | */ |
3457 | schedule(); | ||
3458 | 3458 | ||
3459 | /* | 3459 | /* |
3460 | * Sched callbacks are disabled now. Zap nr_running. After this, | 3460 | * Sched callbacks are disabled now. Zap nr_running. |
3461 | * nr_running stays zero and need_more_worker() and keep_working() | 3461 | * After this, nr_running stays zero and need_more_worker() |
3462 | * are always true as long as the worklist is not empty. Pools on | 3462 | * and keep_working() are always true as long as the |
3463 | * @cpu now behave as unbound (in terms of concurrency management) | 3463 | * worklist is not empty. This pool now behaves as an |
3464 | * pools which are served by workers tied to the CPU. | 3464 | * unbound (in terms of concurrency management) pool which |
3465 | * | 3465 | * are served by workers tied to the pool. |
3466 | * On return from this function, the current worker would trigger | 3466 | */ |
3467 | * unbound chain execution of pending work items if other workers | ||
3468 | * didn't already. | ||
3469 | */ | ||
3470 | for_each_std_worker_pool(pool, cpu) | ||
3471 | atomic_set(&pool->nr_running, 0); | 3467 | atomic_set(&pool->nr_running, 0); |
3468 | |||
3469 | /* | ||
3470 | * With concurrency management just turned off, a busy | ||
3471 | * worker blocking could lead to lengthy stalls. Kick off | ||
3472 | * unbound chain execution of currently pending work items. | ||
3473 | */ | ||
3474 | spin_lock_irq(&pool->lock); | ||
3475 | wake_up_worker(pool); | ||
3476 | spin_unlock_irq(&pool->lock); | ||
3477 | } | ||
3472 | } | 3478 | } |
3473 | 3479 | ||
3474 | /* | 3480 | /* |
diff --git a/lib/bust_spinlocks.c b/lib/bust_spinlocks.c index 9681d54b95d1..f8e0e5367398 100644 --- a/lib/bust_spinlocks.c +++ b/lib/bust_spinlocks.c | |||
@@ -8,6 +8,7 @@ | |||
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
11 | #include <linux/printk.h> | ||
11 | #include <linux/spinlock.h> | 12 | #include <linux/spinlock.h> |
12 | #include <linux/tty.h> | 13 | #include <linux/tty.h> |
13 | #include <linux/wait.h> | 14 | #include <linux/wait.h> |
@@ -28,5 +29,3 @@ void __attribute__((weak)) bust_spinlocks(int yes) | |||
28 | wake_up_klogd(); | 29 | wake_up_klogd(); |
29 | } | 30 | } |
30 | } | 31 | } |
31 | |||
32 | |||
diff --git a/lib/dma-debug.c b/lib/dma-debug.c index 5e396accd3d0..d87a17a819d0 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c | |||
@@ -862,17 +862,21 @@ static void check_unmap(struct dma_debug_entry *ref) | |||
862 | entry = bucket_find_exact(bucket, ref); | 862 | entry = bucket_find_exact(bucket, ref); |
863 | 863 | ||
864 | if (!entry) { | 864 | if (!entry) { |
865 | /* must drop lock before calling dma_mapping_error */ | ||
866 | put_hash_bucket(bucket, &flags); | ||
867 | |||
865 | if (dma_mapping_error(ref->dev, ref->dev_addr)) { | 868 | if (dma_mapping_error(ref->dev, ref->dev_addr)) { |
866 | err_printk(ref->dev, NULL, | 869 | err_printk(ref->dev, NULL, |
867 | "DMA-API: device driver tries " | 870 | "DMA-API: device driver tries to free an " |
868 | "to free an invalid DMA memory address\n"); | 871 | "invalid DMA memory address\n"); |
869 | return; | 872 | } else { |
873 | err_printk(ref->dev, NULL, | ||
874 | "DMA-API: device driver tries to free DMA " | ||
875 | "memory it has not allocated [device " | ||
876 | "address=0x%016llx] [size=%llu bytes]\n", | ||
877 | ref->dev_addr, ref->size); | ||
870 | } | 878 | } |
871 | err_printk(ref->dev, NULL, "DMA-API: device driver tries " | 879 | return; |
872 | "to free DMA memory it has not allocated " | ||
873 | "[device address=0x%016llx] [size=%llu bytes]\n", | ||
874 | ref->dev_addr, ref->size); | ||
875 | goto out; | ||
876 | } | 880 | } |
877 | 881 | ||
878 | if (ref->size != entry->size) { | 882 | if (ref->size != entry->size) { |
@@ -936,7 +940,6 @@ static void check_unmap(struct dma_debug_entry *ref) | |||
936 | hash_bucket_del(entry); | 940 | hash_bucket_del(entry); |
937 | dma_entry_free(entry); | 941 | dma_entry_free(entry); |
938 | 942 | ||
939 | out: | ||
940 | put_hash_bucket(bucket, &flags); | 943 | put_hash_bucket(bucket, &flags); |
941 | } | 944 | } |
942 | 945 | ||
@@ -1082,13 +1085,27 @@ void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | |||
1082 | ref.dev = dev; | 1085 | ref.dev = dev; |
1083 | ref.dev_addr = dma_addr; | 1086 | ref.dev_addr = dma_addr; |
1084 | bucket = get_hash_bucket(&ref, &flags); | 1087 | bucket = get_hash_bucket(&ref, &flags); |
1085 | entry = bucket_find_exact(bucket, &ref); | ||
1086 | 1088 | ||
1087 | if (!entry) | 1089 | list_for_each_entry(entry, &bucket->list, list) { |
1088 | goto out; | 1090 | if (!exact_match(&ref, entry)) |
1091 | continue; | ||
1092 | |||
1093 | /* | ||
1094 | * The same physical address can be mapped multiple | ||
1095 | * times. Without a hardware IOMMU this results in the | ||
1096 | * same device addresses being put into the dma-debug | ||
1097 | * hash multiple times too. This can result in false | ||
1098 | * positives being reported. Therefore we implement a | ||
1099 | * best-fit algorithm here which updates the first entry | ||
1100 | * from the hash which fits the reference value and is | ||
1101 | * not currently listed as being checked. | ||
1102 | */ | ||
1103 | if (entry->map_err_type == MAP_ERR_NOT_CHECKED) { | ||
1104 | entry->map_err_type = MAP_ERR_CHECKED; | ||
1105 | break; | ||
1106 | } | ||
1107 | } | ||
1089 | 1108 | ||
1090 | entry->map_err_type = MAP_ERR_CHECKED; | ||
1091 | out: | ||
1092 | put_hash_bucket(bucket, &flags); | 1109 | put_hash_bucket(bucket, &flags); |
1093 | } | 1110 | } |
1094 | EXPORT_SYMBOL(debug_dma_mapping_error); | 1111 | EXPORT_SYMBOL(debug_dma_mapping_error); |
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 0a0be33bb199..ca9a7c6d7e97 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -2124,8 +2124,12 @@ int hugetlb_report_node_meminfo(int nid, char *buf) | |||
2124 | /* Return the number pages of memory we physically have, in PAGE_SIZE units. */ | 2124 | /* Return the number pages of memory we physically have, in PAGE_SIZE units. */ |
2125 | unsigned long hugetlb_total_pages(void) | 2125 | unsigned long hugetlb_total_pages(void) |
2126 | { | 2126 | { |
2127 | struct hstate *h = &default_hstate; | 2127 | struct hstate *h; |
2128 | return h->nr_huge_pages * pages_per_huge_page(h); | 2128 | unsigned long nr_total_pages = 0; |
2129 | |||
2130 | for_each_hstate(h) | ||
2131 | nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h); | ||
2132 | return nr_total_pages; | ||
2129 | } | 2133 | } |
2130 | 2134 | ||
2131 | static int hugetlb_acct_memory(struct hstate *h, long delta) | 2135 | static int hugetlb_acct_memory(struct hstate *h, long delta) |
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 9597eec8239d..ee3765760818 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c | |||
@@ -1779,7 +1779,11 @@ void try_offline_node(int nid) | |||
1779 | for (i = 0; i < MAX_NR_ZONES; i++) { | 1779 | for (i = 0; i < MAX_NR_ZONES; i++) { |
1780 | struct zone *zone = pgdat->node_zones + i; | 1780 | struct zone *zone = pgdat->node_zones + i; |
1781 | 1781 | ||
1782 | if (zone->wait_table) | 1782 | /* |
1783 | * wait_table may be allocated from boot memory, | ||
1784 | * here only free if it's allocated by vmalloc. | ||
1785 | */ | ||
1786 | if (is_vmalloc_addr(zone->wait_table)) | ||
1783 | vfree(zone->wait_table); | 1787 | vfree(zone->wait_table); |
1784 | } | 1788 | } |
1785 | 1789 | ||
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index a0b253ecadaf..a5bb0a769eb9 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c | |||
@@ -1288,7 +1288,8 @@ static int batadv_iv_ogm_receive(struct sk_buff *skb, | |||
1288 | batadv_ogm_packet = (struct batadv_ogm_packet *)packet_buff; | 1288 | batadv_ogm_packet = (struct batadv_ogm_packet *)packet_buff; |
1289 | 1289 | ||
1290 | /* unpack the aggregated packets and process them one by one */ | 1290 | /* unpack the aggregated packets and process them one by one */ |
1291 | do { | 1291 | while (batadv_iv_ogm_aggr_packet(buff_pos, packet_len, |
1292 | batadv_ogm_packet->tt_num_changes)) { | ||
1292 | tt_buff = packet_buff + buff_pos + BATADV_OGM_HLEN; | 1293 | tt_buff = packet_buff + buff_pos + BATADV_OGM_HLEN; |
1293 | 1294 | ||
1294 | batadv_iv_ogm_process(ethhdr, batadv_ogm_packet, tt_buff, | 1295 | batadv_iv_ogm_process(ethhdr, batadv_ogm_packet, tt_buff, |
@@ -1299,8 +1300,7 @@ static int batadv_iv_ogm_receive(struct sk_buff *skb, | |||
1299 | 1300 | ||
1300 | packet_pos = packet_buff + buff_pos; | 1301 | packet_pos = packet_buff + buff_pos; |
1301 | batadv_ogm_packet = (struct batadv_ogm_packet *)packet_pos; | 1302 | batadv_ogm_packet = (struct batadv_ogm_packet *)packet_pos; |
1302 | } while (batadv_iv_ogm_aggr_packet(buff_pos, packet_len, | 1303 | } |
1303 | batadv_ogm_packet->tt_num_changes)); | ||
1304 | 1304 | ||
1305 | kfree_skb(skb); | 1305 | kfree_skb(skb); |
1306 | return NET_RX_SUCCESS; | 1306 | return NET_RX_SUCCESS; |
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index 27aa3ee517ce..299fc5f40a26 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c | |||
@@ -29,6 +29,7 @@ static inline size_t br_port_info_size(void) | |||
29 | + nla_total_size(1) /* IFLA_BRPORT_MODE */ | 29 | + nla_total_size(1) /* IFLA_BRPORT_MODE */ |
30 | + nla_total_size(1) /* IFLA_BRPORT_GUARD */ | 30 | + nla_total_size(1) /* IFLA_BRPORT_GUARD */ |
31 | + nla_total_size(1) /* IFLA_BRPORT_PROTECT */ | 31 | + nla_total_size(1) /* IFLA_BRPORT_PROTECT */ |
32 | + nla_total_size(1) /* IFLA_BRPORT_FAST_LEAVE */ | ||
32 | + 0; | 33 | + 0; |
33 | } | 34 | } |
34 | 35 | ||
@@ -329,6 +330,7 @@ static int br_setport(struct net_bridge_port *p, struct nlattr *tb[]) | |||
329 | br_set_port_flag(p, tb, IFLA_BRPORT_MODE, BR_HAIRPIN_MODE); | 330 | br_set_port_flag(p, tb, IFLA_BRPORT_MODE, BR_HAIRPIN_MODE); |
330 | br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD); | 331 | br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD); |
331 | br_set_port_flag(p, tb, IFLA_BRPORT_FAST_LEAVE, BR_MULTICAST_FAST_LEAVE); | 332 | br_set_port_flag(p, tb, IFLA_BRPORT_FAST_LEAVE, BR_MULTICAST_FAST_LEAVE); |
333 | br_set_port_flag(p, tb, IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK); | ||
332 | 334 | ||
333 | if (tb[IFLA_BRPORT_COST]) { | 335 | if (tb[IFLA_BRPORT_COST]) { |
334 | err = br_stp_set_path_cost(p, nla_get_u32(tb[IFLA_BRPORT_COST])); | 336 | err = br_stp_set_path_cost(p, nla_get_u32(tb[IFLA_BRPORT_COST])); |
diff --git a/net/core/dev.c b/net/core/dev.c index dffbef70cd31..d540ced1f6c6 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -2219,9 +2219,9 @@ struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, | |||
2219 | struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); | 2219 | struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); |
2220 | struct packet_offload *ptype; | 2220 | struct packet_offload *ptype; |
2221 | __be16 type = skb->protocol; | 2221 | __be16 type = skb->protocol; |
2222 | int vlan_depth = ETH_HLEN; | ||
2222 | 2223 | ||
2223 | while (type == htons(ETH_P_8021Q)) { | 2224 | while (type == htons(ETH_P_8021Q)) { |
2224 | int vlan_depth = ETH_HLEN; | ||
2225 | struct vlan_hdr *vh; | 2225 | struct vlan_hdr *vh; |
2226 | 2226 | ||
2227 | if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN))) | 2227 | if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN))) |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index a585d45cc9d9..5fb8d7e47294 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -2621,7 +2621,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
2621 | struct rtattr *attr = (void *)nlh + NLMSG_ALIGN(min_len); | 2621 | struct rtattr *attr = (void *)nlh + NLMSG_ALIGN(min_len); |
2622 | 2622 | ||
2623 | while (RTA_OK(attr, attrlen)) { | 2623 | while (RTA_OK(attr, attrlen)) { |
2624 | unsigned int flavor = attr->rta_type; | 2624 | unsigned int flavor = attr->rta_type & NLA_TYPE_MASK; |
2625 | if (flavor) { | 2625 | if (flavor) { |
2626 | if (flavor > rta_max[sz_idx]) | 2626 | if (flavor > rta_max[sz_idx]) |
2627 | return -EINVAL; | 2627 | return -EINVAL; |
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c index 245ae078a07f..f4fd23de9b13 100644 --- a/net/ipv4/inet_fragment.c +++ b/net/ipv4/inet_fragment.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/rtnetlink.h> | 21 | #include <linux/rtnetlink.h> |
22 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
23 | 23 | ||
24 | #include <net/sock.h> | ||
24 | #include <net/inet_frag.h> | 25 | #include <net/inet_frag.h> |
25 | 26 | ||
26 | static void inet_frag_secret_rebuild(unsigned long dummy) | 27 | static void inet_frag_secret_rebuild(unsigned long dummy) |
@@ -277,6 +278,7 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, | |||
277 | __releases(&f->lock) | 278 | __releases(&f->lock) |
278 | { | 279 | { |
279 | struct inet_frag_queue *q; | 280 | struct inet_frag_queue *q; |
281 | int depth = 0; | ||
280 | 282 | ||
281 | hlist_for_each_entry(q, &f->hash[hash], list) { | 283 | hlist_for_each_entry(q, &f->hash[hash], list) { |
282 | if (q->net == nf && f->match(q, key)) { | 284 | if (q->net == nf && f->match(q, key)) { |
@@ -284,9 +286,25 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, | |||
284 | read_unlock(&f->lock); | 286 | read_unlock(&f->lock); |
285 | return q; | 287 | return q; |
286 | } | 288 | } |
289 | depth++; | ||
287 | } | 290 | } |
288 | read_unlock(&f->lock); | 291 | read_unlock(&f->lock); |
289 | 292 | ||
290 | return inet_frag_create(nf, f, key); | 293 | if (depth <= INETFRAGS_MAXDEPTH) |
294 | return inet_frag_create(nf, f, key); | ||
295 | else | ||
296 | return ERR_PTR(-ENOBUFS); | ||
291 | } | 297 | } |
292 | EXPORT_SYMBOL(inet_frag_find); | 298 | EXPORT_SYMBOL(inet_frag_find); |
299 | |||
300 | void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q, | ||
301 | const char *prefix) | ||
302 | { | ||
303 | static const char msg[] = "inet_frag_find: Fragment hash bucket" | ||
304 | " list length grew over limit " __stringify(INETFRAGS_MAXDEPTH) | ||
305 | ". Dropping fragment.\n"; | ||
306 | |||
307 | if (PTR_ERR(q) == -ENOBUFS) | ||
308 | LIMIT_NETDEBUG(KERN_WARNING "%s%s", prefix, msg); | ||
309 | } | ||
310 | EXPORT_SYMBOL(inet_frag_maybe_warn_overflow); | ||
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index b6d30acb600c..a6445b843ef4 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c | |||
@@ -292,14 +292,11 @@ static inline struct ipq *ip_find(struct net *net, struct iphdr *iph, u32 user) | |||
292 | hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol); | 292 | hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol); |
293 | 293 | ||
294 | q = inet_frag_find(&net->ipv4.frags, &ip4_frags, &arg, hash); | 294 | q = inet_frag_find(&net->ipv4.frags, &ip4_frags, &arg, hash); |
295 | if (q == NULL) | 295 | if (IS_ERR_OR_NULL(q)) { |
296 | goto out_nomem; | 296 | inet_frag_maybe_warn_overflow(q, pr_fmt()); |
297 | 297 | return NULL; | |
298 | } | ||
298 | return container_of(q, struct ipq, q); | 299 | return container_of(q, struct ipq, q); |
299 | |||
300 | out_nomem: | ||
301 | LIMIT_NETDEBUG(KERN_ERR pr_fmt("ip_frag_create: no memory left !\n")); | ||
302 | return NULL; | ||
303 | } | 300 | } |
304 | 301 | ||
305 | /* Is the fragment too far ahead to be part of ipq? */ | 302 | /* Is the fragment too far ahead to be part of ipq? */ |
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index d0ef0e674ec5..91d66dbde9c0 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
@@ -798,10 +798,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev | |||
798 | 798 | ||
799 | if (dev->header_ops && dev->type == ARPHRD_IPGRE) { | 799 | if (dev->header_ops && dev->type == ARPHRD_IPGRE) { |
800 | gre_hlen = 0; | 800 | gre_hlen = 0; |
801 | if (skb->protocol == htons(ETH_P_IP)) | 801 | tiph = (const struct iphdr *)skb->data; |
802 | tiph = (const struct iphdr *)skb->data; | ||
803 | else | ||
804 | tiph = &tunnel->parms.iph; | ||
805 | } else { | 802 | } else { |
806 | gre_hlen = tunnel->hlen; | 803 | gre_hlen = tunnel->hlen; |
807 | tiph = &tunnel->parms.iph; | 804 | tiph = &tunnel->parms.iph; |
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c index 310a3647c83d..ec7264514a82 100644 --- a/net/ipv4/ip_options.c +++ b/net/ipv4/ip_options.c | |||
@@ -370,7 +370,6 @@ int ip_options_compile(struct net *net, | |||
370 | } | 370 | } |
371 | switch (optptr[3]&0xF) { | 371 | switch (optptr[3]&0xF) { |
372 | case IPOPT_TS_TSONLY: | 372 | case IPOPT_TS_TSONLY: |
373 | opt->ts = optptr - iph; | ||
374 | if (skb) | 373 | if (skb) |
375 | timeptr = &optptr[optptr[2]-1]; | 374 | timeptr = &optptr[optptr[2]-1]; |
376 | opt->ts_needtime = 1; | 375 | opt->ts_needtime = 1; |
@@ -381,7 +380,6 @@ int ip_options_compile(struct net *net, | |||
381 | pp_ptr = optptr + 2; | 380 | pp_ptr = optptr + 2; |
382 | goto error; | 381 | goto error; |
383 | } | 382 | } |
384 | opt->ts = optptr - iph; | ||
385 | if (rt) { | 383 | if (rt) { |
386 | spec_dst_fill(&spec_dst, skb); | 384 | spec_dst_fill(&spec_dst, skb); |
387 | memcpy(&optptr[optptr[2]-1], &spec_dst, 4); | 385 | memcpy(&optptr[optptr[2]-1], &spec_dst, 4); |
@@ -396,7 +394,6 @@ int ip_options_compile(struct net *net, | |||
396 | pp_ptr = optptr + 2; | 394 | pp_ptr = optptr + 2; |
397 | goto error; | 395 | goto error; |
398 | } | 396 | } |
399 | opt->ts = optptr - iph; | ||
400 | { | 397 | { |
401 | __be32 addr; | 398 | __be32 addr; |
402 | memcpy(&addr, &optptr[optptr[2]-1], 4); | 399 | memcpy(&addr, &optptr[optptr[2]-1], 4); |
@@ -429,12 +426,12 @@ int ip_options_compile(struct net *net, | |||
429 | pp_ptr = optptr + 3; | 426 | pp_ptr = optptr + 3; |
430 | goto error; | 427 | goto error; |
431 | } | 428 | } |
432 | opt->ts = optptr - iph; | ||
433 | if (skb) { | 429 | if (skb) { |
434 | optptr[3] = (optptr[3]&0xF)|((overflow+1)<<4); | 430 | optptr[3] = (optptr[3]&0xF)|((overflow+1)<<4); |
435 | opt->is_changed = 1; | 431 | opt->is_changed = 1; |
436 | } | 432 | } |
437 | } | 433 | } |
434 | opt->ts = optptr - iph; | ||
438 | break; | 435 | break; |
439 | case IPOPT_RA: | 436 | case IPOPT_RA: |
440 | if (optlen < 4) { | 437 | if (optlen < 4) { |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 47e854fcae24..e22020790709 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -775,7 +775,7 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp) | |||
775 | * Make sure that we have exactly size bytes | 775 | * Make sure that we have exactly size bytes |
776 | * available to the caller, no more, no less. | 776 | * available to the caller, no more, no less. |
777 | */ | 777 | */ |
778 | skb->avail_size = size; | 778 | skb->reserved_tailroom = skb->end - skb->tail - size; |
779 | return skb; | 779 | return skb; |
780 | } | 780 | } |
781 | __kfree_skb(skb); | 781 | __kfree_skb(skb); |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 4a8ec457310f..d09203c63264 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -274,13 +274,6 @@ static void tcp_v4_mtu_reduced(struct sock *sk) | |||
274 | struct inet_sock *inet = inet_sk(sk); | 274 | struct inet_sock *inet = inet_sk(sk); |
275 | u32 mtu = tcp_sk(sk)->mtu_info; | 275 | u32 mtu = tcp_sk(sk)->mtu_info; |
276 | 276 | ||
277 | /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs | ||
278 | * send out by Linux are always <576bytes so they should go through | ||
279 | * unfragmented). | ||
280 | */ | ||
281 | if (sk->sk_state == TCP_LISTEN) | ||
282 | return; | ||
283 | |||
284 | dst = inet_csk_update_pmtu(sk, mtu); | 277 | dst = inet_csk_update_pmtu(sk, mtu); |
285 | if (!dst) | 278 | if (!dst) |
286 | return; | 279 | return; |
@@ -408,6 +401,13 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) | |||
408 | goto out; | 401 | goto out; |
409 | 402 | ||
410 | if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */ | 403 | if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */ |
404 | /* We are not interested in TCP_LISTEN and open_requests | ||
405 | * (SYN-ACKs send out by Linux are always <576bytes so | ||
406 | * they should go through unfragmented). | ||
407 | */ | ||
408 | if (sk->sk_state == TCP_LISTEN) | ||
409 | goto out; | ||
410 | |||
411 | tp->mtu_info = info; | 411 | tp->mtu_info = info; |
412 | if (!sock_owned_by_user(sk)) { | 412 | if (!sock_owned_by_user(sk)) { |
413 | tcp_v4_mtu_reduced(sk); | 413 | tcp_v4_mtu_reduced(sk); |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index e2b4461074da..817fbb396bc8 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -1298,7 +1298,6 @@ static void __pskb_trim_head(struct sk_buff *skb, int len) | |||
1298 | eat = min_t(int, len, skb_headlen(skb)); | 1298 | eat = min_t(int, len, skb_headlen(skb)); |
1299 | if (eat) { | 1299 | if (eat) { |
1300 | __skb_pull(skb, eat); | 1300 | __skb_pull(skb, eat); |
1301 | skb->avail_size -= eat; | ||
1302 | len -= eat; | 1301 | len -= eat; |
1303 | if (!len) | 1302 | if (!len) |
1304 | return; | 1303 | return; |
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index 54087e96d7b8..6700069949dd 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c | |||
@@ -14,6 +14,8 @@ | |||
14 | * 2 of the License, or (at your option) any later version. | 14 | * 2 of the License, or (at your option) any later version. |
15 | */ | 15 | */ |
16 | 16 | ||
17 | #define pr_fmt(fmt) "IPv6-nf: " fmt | ||
18 | |||
17 | #include <linux/errno.h> | 19 | #include <linux/errno.h> |
18 | #include <linux/types.h> | 20 | #include <linux/types.h> |
19 | #include <linux/string.h> | 21 | #include <linux/string.h> |
@@ -180,13 +182,11 @@ static inline struct frag_queue *fq_find(struct net *net, __be32 id, | |||
180 | 182 | ||
181 | q = inet_frag_find(&net->nf_frag.frags, &nf_frags, &arg, hash); | 183 | q = inet_frag_find(&net->nf_frag.frags, &nf_frags, &arg, hash); |
182 | local_bh_enable(); | 184 | local_bh_enable(); |
183 | if (q == NULL) | 185 | if (IS_ERR_OR_NULL(q)) { |
184 | goto oom; | 186 | inet_frag_maybe_warn_overflow(q, pr_fmt()); |
185 | 187 | return NULL; | |
188 | } | ||
186 | return container_of(q, struct frag_queue, q); | 189 | return container_of(q, struct frag_queue, q); |
187 | |||
188 | oom: | ||
189 | return NULL; | ||
190 | } | 190 | } |
191 | 191 | ||
192 | 192 | ||
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index 3c6a77290c6e..196ab9347ad1 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c | |||
@@ -26,6 +26,9 @@ | |||
26 | * YOSHIFUJI,H. @USAGI Always remove fragment header to | 26 | * YOSHIFUJI,H. @USAGI Always remove fragment header to |
27 | * calculate ICV correctly. | 27 | * calculate ICV correctly. |
28 | */ | 28 | */ |
29 | |||
30 | #define pr_fmt(fmt) "IPv6: " fmt | ||
31 | |||
29 | #include <linux/errno.h> | 32 | #include <linux/errno.h> |
30 | #include <linux/types.h> | 33 | #include <linux/types.h> |
31 | #include <linux/string.h> | 34 | #include <linux/string.h> |
@@ -185,9 +188,10 @@ fq_find(struct net *net, __be32 id, const struct in6_addr *src, const struct in6 | |||
185 | hash = inet6_hash_frag(id, src, dst, ip6_frags.rnd); | 188 | hash = inet6_hash_frag(id, src, dst, ip6_frags.rnd); |
186 | 189 | ||
187 | q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash); | 190 | q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash); |
188 | if (q == NULL) | 191 | if (IS_ERR_OR_NULL(q)) { |
192 | inet_frag_maybe_warn_overflow(q, pr_fmt()); | ||
189 | return NULL; | 193 | return NULL; |
190 | 194 | } | |
191 | return container_of(q, struct frag_queue, q); | 195 | return container_of(q, struct frag_queue, q); |
192 | } | 196 | } |
193 | 197 | ||
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 9b6460055df5..f6d629fd6aee 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -389,6 +389,13 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
389 | } | 389 | } |
390 | 390 | ||
391 | if (type == ICMPV6_PKT_TOOBIG) { | 391 | if (type == ICMPV6_PKT_TOOBIG) { |
392 | /* We are not interested in TCP_LISTEN and open_requests | ||
393 | * (SYN-ACKs send out by Linux are always <576bytes so | ||
394 | * they should go through unfragmented). | ||
395 | */ | ||
396 | if (sk->sk_state == TCP_LISTEN) | ||
397 | goto out; | ||
398 | |||
392 | tp->mtu_info = ntohl(info); | 399 | tp->mtu_info = ntohl(info); |
393 | if (!sock_owned_by_user(sk)) | 400 | if (!sock_owned_by_user(sk)) |
394 | tcp_v6_mtu_reduced(sk); | 401 | tcp_v6_mtu_reduced(sk); |
diff --git a/net/nfc/llcp/llcp.c b/net/nfc/llcp/llcp.c index 7f8266dd14cb..b530afadd76c 100644 --- a/net/nfc/llcp/llcp.c +++ b/net/nfc/llcp/llcp.c | |||
@@ -68,7 +68,8 @@ static void nfc_llcp_socket_purge(struct nfc_llcp_sock *sock) | |||
68 | } | 68 | } |
69 | } | 69 | } |
70 | 70 | ||
71 | static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen) | 71 | static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen, |
72 | int err) | ||
72 | { | 73 | { |
73 | struct sock *sk; | 74 | struct sock *sk; |
74 | struct hlist_node *tmp; | 75 | struct hlist_node *tmp; |
@@ -100,7 +101,10 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen) | |||
100 | 101 | ||
101 | nfc_llcp_accept_unlink(accept_sk); | 102 | nfc_llcp_accept_unlink(accept_sk); |
102 | 103 | ||
104 | if (err) | ||
105 | accept_sk->sk_err = err; | ||
103 | accept_sk->sk_state = LLCP_CLOSED; | 106 | accept_sk->sk_state = LLCP_CLOSED; |
107 | accept_sk->sk_state_change(sk); | ||
104 | 108 | ||
105 | bh_unlock_sock(accept_sk); | 109 | bh_unlock_sock(accept_sk); |
106 | 110 | ||
@@ -123,7 +127,10 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen) | |||
123 | continue; | 127 | continue; |
124 | } | 128 | } |
125 | 129 | ||
130 | if (err) | ||
131 | sk->sk_err = err; | ||
126 | sk->sk_state = LLCP_CLOSED; | 132 | sk->sk_state = LLCP_CLOSED; |
133 | sk->sk_state_change(sk); | ||
127 | 134 | ||
128 | bh_unlock_sock(sk); | 135 | bh_unlock_sock(sk); |
129 | 136 | ||
@@ -133,6 +140,36 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen) | |||
133 | } | 140 | } |
134 | 141 | ||
135 | write_unlock(&local->sockets.lock); | 142 | write_unlock(&local->sockets.lock); |
143 | |||
144 | /* | ||
145 | * If we want to keep the listening sockets alive, | ||
146 | * we don't touch the RAW ones. | ||
147 | */ | ||
148 | if (listen == true) | ||
149 | return; | ||
150 | |||
151 | write_lock(&local->raw_sockets.lock); | ||
152 | |||
153 | sk_for_each_safe(sk, tmp, &local->raw_sockets.head) { | ||
154 | llcp_sock = nfc_llcp_sock(sk); | ||
155 | |||
156 | bh_lock_sock(sk); | ||
157 | |||
158 | nfc_llcp_socket_purge(llcp_sock); | ||
159 | |||
160 | if (err) | ||
161 | sk->sk_err = err; | ||
162 | sk->sk_state = LLCP_CLOSED; | ||
163 | sk->sk_state_change(sk); | ||
164 | |||
165 | bh_unlock_sock(sk); | ||
166 | |||
167 | sock_orphan(sk); | ||
168 | |||
169 | sk_del_node_init(sk); | ||
170 | } | ||
171 | |||
172 | write_unlock(&local->raw_sockets.lock); | ||
136 | } | 173 | } |
137 | 174 | ||
138 | struct nfc_llcp_local *nfc_llcp_local_get(struct nfc_llcp_local *local) | 175 | struct nfc_llcp_local *nfc_llcp_local_get(struct nfc_llcp_local *local) |
@@ -142,20 +179,25 @@ struct nfc_llcp_local *nfc_llcp_local_get(struct nfc_llcp_local *local) | |||
142 | return local; | 179 | return local; |
143 | } | 180 | } |
144 | 181 | ||
145 | static void local_release(struct kref *ref) | 182 | static void local_cleanup(struct nfc_llcp_local *local, bool listen) |
146 | { | 183 | { |
147 | struct nfc_llcp_local *local; | 184 | nfc_llcp_socket_release(local, listen, ENXIO); |
148 | |||
149 | local = container_of(ref, struct nfc_llcp_local, ref); | ||
150 | |||
151 | list_del(&local->list); | ||
152 | nfc_llcp_socket_release(local, false); | ||
153 | del_timer_sync(&local->link_timer); | 185 | del_timer_sync(&local->link_timer); |
154 | skb_queue_purge(&local->tx_queue); | 186 | skb_queue_purge(&local->tx_queue); |
155 | cancel_work_sync(&local->tx_work); | 187 | cancel_work_sync(&local->tx_work); |
156 | cancel_work_sync(&local->rx_work); | 188 | cancel_work_sync(&local->rx_work); |
157 | cancel_work_sync(&local->timeout_work); | 189 | cancel_work_sync(&local->timeout_work); |
158 | kfree_skb(local->rx_pending); | 190 | kfree_skb(local->rx_pending); |
191 | } | ||
192 | |||
193 | static void local_release(struct kref *ref) | ||
194 | { | ||
195 | struct nfc_llcp_local *local; | ||
196 | |||
197 | local = container_of(ref, struct nfc_llcp_local, ref); | ||
198 | |||
199 | list_del(&local->list); | ||
200 | local_cleanup(local, false); | ||
159 | kfree(local); | 201 | kfree(local); |
160 | } | 202 | } |
161 | 203 | ||
@@ -1348,7 +1390,7 @@ void nfc_llcp_mac_is_down(struct nfc_dev *dev) | |||
1348 | return; | 1390 | return; |
1349 | 1391 | ||
1350 | /* Close and purge all existing sockets */ | 1392 | /* Close and purge all existing sockets */ |
1351 | nfc_llcp_socket_release(local, true); | 1393 | nfc_llcp_socket_release(local, true, 0); |
1352 | } | 1394 | } |
1353 | 1395 | ||
1354 | void nfc_llcp_mac_is_up(struct nfc_dev *dev, u32 target_idx, | 1396 | void nfc_llcp_mac_is_up(struct nfc_dev *dev, u32 target_idx, |
@@ -1427,6 +1469,8 @@ void nfc_llcp_unregister_device(struct nfc_dev *dev) | |||
1427 | return; | 1469 | return; |
1428 | } | 1470 | } |
1429 | 1471 | ||
1472 | local_cleanup(local, false); | ||
1473 | |||
1430 | nfc_llcp_local_put(local); | 1474 | nfc_llcp_local_put(local); |
1431 | } | 1475 | } |
1432 | 1476 | ||
diff --git a/net/nfc/llcp/sock.c b/net/nfc/llcp/sock.c index 5332751943a9..5c7cdf3f2a83 100644 --- a/net/nfc/llcp/sock.c +++ b/net/nfc/llcp/sock.c | |||
@@ -278,6 +278,8 @@ struct sock *nfc_llcp_accept_dequeue(struct sock *parent, | |||
278 | 278 | ||
279 | pr_debug("Returning sk state %d\n", sk->sk_state); | 279 | pr_debug("Returning sk state %d\n", sk->sk_state); |
280 | 280 | ||
281 | sk_acceptq_removed(parent); | ||
282 | |||
281 | return sk; | 283 | return sk; |
282 | } | 284 | } |
283 | 285 | ||
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index ac2defeeba83..d4d5363c7ba7 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c | |||
@@ -58,7 +58,7 @@ static int __pop_vlan_tci(struct sk_buff *skb, __be16 *current_tci) | |||
58 | 58 | ||
59 | if (skb->ip_summed == CHECKSUM_COMPLETE) | 59 | if (skb->ip_summed == CHECKSUM_COMPLETE) |
60 | skb->csum = csum_sub(skb->csum, csum_partial(skb->data | 60 | skb->csum = csum_sub(skb->csum, csum_partial(skb->data |
61 | + ETH_HLEN, VLAN_HLEN, 0)); | 61 | + (2 * ETH_ALEN), VLAN_HLEN, 0)); |
62 | 62 | ||
63 | vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN); | 63 | vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN); |
64 | *current_tci = vhdr->h_vlan_TCI; | 64 | *current_tci = vhdr->h_vlan_TCI; |
@@ -115,7 +115,7 @@ static int push_vlan(struct sk_buff *skb, const struct ovs_action_push_vlan *vla | |||
115 | 115 | ||
116 | if (skb->ip_summed == CHECKSUM_COMPLETE) | 116 | if (skb->ip_summed == CHECKSUM_COMPLETE) |
117 | skb->csum = csum_add(skb->csum, csum_partial(skb->data | 117 | skb->csum = csum_add(skb->csum, csum_partial(skb->data |
118 | + ETH_HLEN, VLAN_HLEN, 0)); | 118 | + (2 * ETH_ALEN), VLAN_HLEN, 0)); |
119 | 119 | ||
120 | } | 120 | } |
121 | __vlan_hwaccel_put_tag(skb, ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT); | 121 | __vlan_hwaccel_put_tag(skb, ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT); |
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index e87a26506dba..a4b724708a1a 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c | |||
@@ -394,6 +394,7 @@ static int queue_userspace_packet(struct net *net, int dp_ifindex, | |||
394 | 394 | ||
395 | skb_copy_and_csum_dev(skb, nla_data(nla)); | 395 | skb_copy_and_csum_dev(skb, nla_data(nla)); |
396 | 396 | ||
397 | genlmsg_end(user_skb, upcall); | ||
397 | err = genlmsg_unicast(net, user_skb, upcall_info->portid); | 398 | err = genlmsg_unicast(net, user_skb, upcall_info->portid); |
398 | 399 | ||
399 | out: | 400 | out: |
@@ -1690,6 +1691,7 @@ static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info) | |||
1690 | if (IS_ERR(vport)) | 1691 | if (IS_ERR(vport)) |
1691 | goto exit_unlock; | 1692 | goto exit_unlock; |
1692 | 1693 | ||
1694 | err = 0; | ||
1693 | reply = ovs_vport_cmd_build_info(vport, info->snd_portid, info->snd_seq, | 1695 | reply = ovs_vport_cmd_build_info(vport, info->snd_portid, info->snd_seq, |
1694 | OVS_VPORT_CMD_NEW); | 1696 | OVS_VPORT_CMD_NEW); |
1695 | if (IS_ERR(reply)) { | 1697 | if (IS_ERR(reply)) { |
@@ -1771,6 +1773,7 @@ static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info) | |||
1771 | if (IS_ERR(reply)) | 1773 | if (IS_ERR(reply)) |
1772 | goto exit_unlock; | 1774 | goto exit_unlock; |
1773 | 1775 | ||
1776 | err = 0; | ||
1774 | ovs_dp_detach_port(vport); | 1777 | ovs_dp_detach_port(vport); |
1775 | 1778 | ||
1776 | genl_notify(reply, genl_info_net(info), info->snd_portid, | 1779 | genl_notify(reply, genl_info_net(info), info->snd_portid, |
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index 20605ecf100b..fe0e4215c73d 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c | |||
@@ -482,7 +482,11 @@ static __be16 parse_ethertype(struct sk_buff *skb) | |||
482 | return htons(ETH_P_802_2); | 482 | return htons(ETH_P_802_2); |
483 | 483 | ||
484 | __skb_pull(skb, sizeof(struct llc_snap_hdr)); | 484 | __skb_pull(skb, sizeof(struct llc_snap_hdr)); |
485 | return llc->ethertype; | 485 | |
486 | if (ntohs(llc->ethertype) >= 1536) | ||
487 | return llc->ethertype; | ||
488 | |||
489 | return htons(ETH_P_802_2); | ||
486 | } | 490 | } |
487 | 491 | ||
488 | static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key, | 492 | static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key, |
diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c index 670cbc3518de..2130d61c384a 100644 --- a/net/openvswitch/vport-netdev.c +++ b/net/openvswitch/vport-netdev.c | |||
@@ -43,8 +43,7 @@ static void netdev_port_receive(struct vport *vport, struct sk_buff *skb) | |||
43 | 43 | ||
44 | /* Make our own copy of the packet. Otherwise we will mangle the | 44 | /* Make our own copy of the packet. Otherwise we will mangle the |
45 | * packet for anyone who came before us (e.g. tcpdump via AF_PACKET). | 45 | * packet for anyone who came before us (e.g. tcpdump via AF_PACKET). |
46 | * (No one comes after us, since we tell handle_bridge() that we took | 46 | */ |
47 | * the packet.) */ | ||
48 | skb = skb_share_check(skb, GFP_ATOMIC); | 47 | skb = skb_share_check(skb, GFP_ATOMIC); |
49 | if (unlikely(!skb)) | 48 | if (unlikely(!skb)) |
50 | return; | 49 | return; |
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c index ba717cc038b3..f6b8132ce4cb 100644 --- a/net/openvswitch/vport.c +++ b/net/openvswitch/vport.c | |||
@@ -325,8 +325,7 @@ int ovs_vport_get_options(const struct vport *vport, struct sk_buff *skb) | |||
325 | * @skb: skb that was received | 325 | * @skb: skb that was received |
326 | * | 326 | * |
327 | * Must be called with rcu_read_lock. The packet cannot be shared and | 327 | * Must be called with rcu_read_lock. The packet cannot be shared and |
328 | * skb->data should point to the Ethernet header. The caller must have already | 328 | * skb->data should point to the Ethernet header. |
329 | * called compute_ip_summed() to initialize the checksumming fields. | ||
330 | */ | 329 | */ |
331 | void ovs_vport_receive(struct vport *vport, struct sk_buff *skb) | 330 | void ovs_vport_receive(struct vport *vport, struct sk_buff *skb) |
332 | { | 331 | { |
diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 43cd0dd9149d..d2709e2b7be6 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c | |||
@@ -1079,7 +1079,7 @@ struct sctp_transport *sctp_assoc_lookup_tsn(struct sctp_association *asoc, | |||
1079 | transports) { | 1079 | transports) { |
1080 | 1080 | ||
1081 | if (transport == active) | 1081 | if (transport == active) |
1082 | break; | 1082 | continue; |
1083 | list_for_each_entry(chunk, &transport->transmitted, | 1083 | list_for_each_entry(chunk, &transport->transmitted, |
1084 | transmitted_list) { | 1084 | transmitted_list) { |
1085 | if (key == chunk->subh.data_hdr->tsn) { | 1085 | if (key == chunk->subh.data_hdr->tsn) { |
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 5131fcfedb03..de1a0138317f 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c | |||
@@ -2082,7 +2082,7 @@ sctp_disposition_t sctp_sf_do_5_2_4_dupcook(struct net *net, | |||
2082 | } | 2082 | } |
2083 | 2083 | ||
2084 | /* Delete the tempory new association. */ | 2084 | /* Delete the tempory new association. */ |
2085 | sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc)); | 2085 | sctp_add_cmd_sf(commands, SCTP_CMD_SET_ASOC, SCTP_ASOC(new_asoc)); |
2086 | sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); | 2086 | sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); |
2087 | 2087 | ||
2088 | /* Restore association pointer to provide SCTP command interpeter | 2088 | /* Restore association pointer to provide SCTP command interpeter |
diff --git a/security/selinux/xfrm.c b/security/selinux/xfrm.c index 48665ecd1197..8ab295154517 100644 --- a/security/selinux/xfrm.c +++ b/security/selinux/xfrm.c | |||
@@ -310,7 +310,7 @@ int selinux_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx, | |||
310 | 310 | ||
311 | if (old_ctx) { | 311 | if (old_ctx) { |
312 | new_ctx = kmalloc(sizeof(*old_ctx) + old_ctx->ctx_len, | 312 | new_ctx = kmalloc(sizeof(*old_ctx) + old_ctx->ctx_len, |
313 | GFP_KERNEL); | 313 | GFP_ATOMIC); |
314 | if (!new_ctx) | 314 | if (!new_ctx) |
315 | return -ENOMEM; | 315 | return -ENOMEM; |
316 | 316 | ||
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index a9ebcf9e3710..ecdf30eb5879 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c | |||
@@ -3144,7 +3144,7 @@ static unsigned int convert_to_spdif_status(unsigned short val) | |||
3144 | if (val & AC_DIG1_PROFESSIONAL) | 3144 | if (val & AC_DIG1_PROFESSIONAL) |
3145 | sbits |= IEC958_AES0_PROFESSIONAL; | 3145 | sbits |= IEC958_AES0_PROFESSIONAL; |
3146 | if (sbits & IEC958_AES0_PROFESSIONAL) { | 3146 | if (sbits & IEC958_AES0_PROFESSIONAL) { |
3147 | if (sbits & AC_DIG1_EMPHASIS) | 3147 | if (val & AC_DIG1_EMPHASIS) |
3148 | sbits |= IEC958_AES0_PRO_EMPHASIS_5015; | 3148 | sbits |= IEC958_AES0_PRO_EMPHASIS_5015; |
3149 | } else { | 3149 | } else { |
3150 | if (val & AC_DIG1_EMPHASIS) | 3150 | if (val & AC_DIG1_EMPHASIS) |
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c index 78897d05d80f..43c2ea539561 100644 --- a/sound/pci/hda/hda_generic.c +++ b/sound/pci/hda/hda_generic.c | |||
@@ -995,6 +995,8 @@ enum { | |||
995 | BAD_NO_EXTRA_SURR_DAC = 0x101, | 995 | BAD_NO_EXTRA_SURR_DAC = 0x101, |
996 | /* Primary DAC shared with main surrounds */ | 996 | /* Primary DAC shared with main surrounds */ |
997 | BAD_SHARED_SURROUND = 0x100, | 997 | BAD_SHARED_SURROUND = 0x100, |
998 | /* No independent HP possible */ | ||
999 | BAD_NO_INDEP_HP = 0x40, | ||
998 | /* Primary DAC shared with main CLFE */ | 1000 | /* Primary DAC shared with main CLFE */ |
999 | BAD_SHARED_CLFE = 0x10, | 1001 | BAD_SHARED_CLFE = 0x10, |
1000 | /* Primary DAC shared with extra surrounds */ | 1002 | /* Primary DAC shared with extra surrounds */ |
@@ -1392,6 +1394,43 @@ static int check_aamix_out_path(struct hda_codec *codec, int path_idx) | |||
1392 | return snd_hda_get_path_idx(codec, path); | 1394 | return snd_hda_get_path_idx(codec, path); |
1393 | } | 1395 | } |
1394 | 1396 | ||
1397 | /* check whether the independent HP is available with the current config */ | ||
1398 | static bool indep_hp_possible(struct hda_codec *codec) | ||
1399 | { | ||
1400 | struct hda_gen_spec *spec = codec->spec; | ||
1401 | struct auto_pin_cfg *cfg = &spec->autocfg; | ||
1402 | struct nid_path *path; | ||
1403 | int i, idx; | ||
1404 | |||
1405 | if (cfg->line_out_type == AUTO_PIN_HP_OUT) | ||
1406 | idx = spec->out_paths[0]; | ||
1407 | else | ||
1408 | idx = spec->hp_paths[0]; | ||
1409 | path = snd_hda_get_path_from_idx(codec, idx); | ||
1410 | if (!path) | ||
1411 | return false; | ||
1412 | |||
1413 | /* assume no path conflicts unless aamix is involved */ | ||
1414 | if (!spec->mixer_nid || !is_nid_contained(path, spec->mixer_nid)) | ||
1415 | return true; | ||
1416 | |||
1417 | /* check whether output paths contain aamix */ | ||
1418 | for (i = 0; i < cfg->line_outs; i++) { | ||
1419 | if (spec->out_paths[i] == idx) | ||
1420 | break; | ||
1421 | path = snd_hda_get_path_from_idx(codec, spec->out_paths[i]); | ||
1422 | if (path && is_nid_contained(path, spec->mixer_nid)) | ||
1423 | return false; | ||
1424 | } | ||
1425 | for (i = 0; i < cfg->speaker_outs; i++) { | ||
1426 | path = snd_hda_get_path_from_idx(codec, spec->speaker_paths[i]); | ||
1427 | if (path && is_nid_contained(path, spec->mixer_nid)) | ||
1428 | return false; | ||
1429 | } | ||
1430 | |||
1431 | return true; | ||
1432 | } | ||
1433 | |||
1395 | /* fill the empty entries in the dac array for speaker/hp with the | 1434 | /* fill the empty entries in the dac array for speaker/hp with the |
1396 | * shared dac pointed by the paths | 1435 | * shared dac pointed by the paths |
1397 | */ | 1436 | */ |
@@ -1545,6 +1584,9 @@ static int fill_and_eval_dacs(struct hda_codec *codec, | |||
1545 | badness += BAD_MULTI_IO; | 1584 | badness += BAD_MULTI_IO; |
1546 | } | 1585 | } |
1547 | 1586 | ||
1587 | if (spec->indep_hp && !indep_hp_possible(codec)) | ||
1588 | badness += BAD_NO_INDEP_HP; | ||
1589 | |||
1548 | /* re-fill the shared DAC for speaker / headphone */ | 1590 | /* re-fill the shared DAC for speaker / headphone */ |
1549 | if (cfg->line_out_type != AUTO_PIN_HP_OUT) | 1591 | if (cfg->line_out_type != AUTO_PIN_HP_OUT) |
1550 | refill_shared_dacs(codec, cfg->hp_outs, | 1592 | refill_shared_dacs(codec, cfg->hp_outs, |
@@ -1758,6 +1800,10 @@ static int parse_output_paths(struct hda_codec *codec) | |||
1758 | cfg->speaker_pins, val); | 1800 | cfg->speaker_pins, val); |
1759 | } | 1801 | } |
1760 | 1802 | ||
1803 | /* clear indep_hp flag if not available */ | ||
1804 | if (spec->indep_hp && !indep_hp_possible(codec)) | ||
1805 | spec->indep_hp = 0; | ||
1806 | |||
1761 | kfree(best_cfg); | 1807 | kfree(best_cfg); |
1762 | return 0; | 1808 | return 0; |
1763 | } | 1809 | } |
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 4cea6bb6fade..418bfc0eb0a3 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c | |||
@@ -415,6 +415,8 @@ struct azx_dev { | |||
415 | unsigned int opened :1; | 415 | unsigned int opened :1; |
416 | unsigned int running :1; | 416 | unsigned int running :1; |
417 | unsigned int irq_pending :1; | 417 | unsigned int irq_pending :1; |
418 | unsigned int prepared:1; | ||
419 | unsigned int locked:1; | ||
418 | /* | 420 | /* |
419 | * For VIA: | 421 | * For VIA: |
420 | * A flag to ensure DMA position is 0 | 422 | * A flag to ensure DMA position is 0 |
@@ -426,8 +428,25 @@ struct azx_dev { | |||
426 | 428 | ||
427 | struct timecounter azx_tc; | 429 | struct timecounter azx_tc; |
428 | struct cyclecounter azx_cc; | 430 | struct cyclecounter azx_cc; |
431 | |||
432 | #ifdef CONFIG_SND_HDA_DSP_LOADER | ||
433 | struct mutex dsp_mutex; | ||
434 | #endif | ||
429 | }; | 435 | }; |
430 | 436 | ||
437 | /* DSP lock helpers */ | ||
438 | #ifdef CONFIG_SND_HDA_DSP_LOADER | ||
439 | #define dsp_lock_init(dev) mutex_init(&(dev)->dsp_mutex) | ||
440 | #define dsp_lock(dev) mutex_lock(&(dev)->dsp_mutex) | ||
441 | #define dsp_unlock(dev) mutex_unlock(&(dev)->dsp_mutex) | ||
442 | #define dsp_is_locked(dev) ((dev)->locked) | ||
443 | #else | ||
444 | #define dsp_lock_init(dev) do {} while (0) | ||
445 | #define dsp_lock(dev) do {} while (0) | ||
446 | #define dsp_unlock(dev) do {} while (0) | ||
447 | #define dsp_is_locked(dev) 0 | ||
448 | #endif | ||
449 | |||
431 | /* CORB/RIRB */ | 450 | /* CORB/RIRB */ |
432 | struct azx_rb { | 451 | struct azx_rb { |
433 | u32 *buf; /* CORB/RIRB buffer | 452 | u32 *buf; /* CORB/RIRB buffer |
@@ -527,6 +546,10 @@ struct azx { | |||
527 | 546 | ||
528 | /* card list (for power_save trigger) */ | 547 | /* card list (for power_save trigger) */ |
529 | struct list_head list; | 548 | struct list_head list; |
549 | |||
550 | #ifdef CONFIG_SND_HDA_DSP_LOADER | ||
551 | struct azx_dev saved_azx_dev; | ||
552 | #endif | ||
530 | }; | 553 | }; |
531 | 554 | ||
532 | #define CREATE_TRACE_POINTS | 555 | #define CREATE_TRACE_POINTS |
@@ -1793,15 +1816,25 @@ azx_assign_device(struct azx *chip, struct snd_pcm_substream *substream) | |||
1793 | dev = chip->capture_index_offset; | 1816 | dev = chip->capture_index_offset; |
1794 | nums = chip->capture_streams; | 1817 | nums = chip->capture_streams; |
1795 | } | 1818 | } |
1796 | for (i = 0; i < nums; i++, dev++) | 1819 | for (i = 0; i < nums; i++, dev++) { |
1797 | if (!chip->azx_dev[dev].opened) { | 1820 | struct azx_dev *azx_dev = &chip->azx_dev[dev]; |
1798 | res = &chip->azx_dev[dev]; | 1821 | dsp_lock(azx_dev); |
1799 | if (res->assigned_key == key) | 1822 | if (!azx_dev->opened && !dsp_is_locked(azx_dev)) { |
1800 | break; | 1823 | res = azx_dev; |
1824 | if (res->assigned_key == key) { | ||
1825 | res->opened = 1; | ||
1826 | res->assigned_key = key; | ||
1827 | dsp_unlock(azx_dev); | ||
1828 | return azx_dev; | ||
1829 | } | ||
1801 | } | 1830 | } |
1831 | dsp_unlock(azx_dev); | ||
1832 | } | ||
1802 | if (res) { | 1833 | if (res) { |
1834 | dsp_lock(res); | ||
1803 | res->opened = 1; | 1835 | res->opened = 1; |
1804 | res->assigned_key = key; | 1836 | res->assigned_key = key; |
1837 | dsp_unlock(res); | ||
1805 | } | 1838 | } |
1806 | return res; | 1839 | return res; |
1807 | } | 1840 | } |
@@ -2009,6 +2042,12 @@ static int azx_pcm_hw_params(struct snd_pcm_substream *substream, | |||
2009 | struct azx_dev *azx_dev = get_azx_dev(substream); | 2042 | struct azx_dev *azx_dev = get_azx_dev(substream); |
2010 | int ret; | 2043 | int ret; |
2011 | 2044 | ||
2045 | dsp_lock(azx_dev); | ||
2046 | if (dsp_is_locked(azx_dev)) { | ||
2047 | ret = -EBUSY; | ||
2048 | goto unlock; | ||
2049 | } | ||
2050 | |||
2012 | mark_runtime_wc(chip, azx_dev, substream, false); | 2051 | mark_runtime_wc(chip, azx_dev, substream, false); |
2013 | azx_dev->bufsize = 0; | 2052 | azx_dev->bufsize = 0; |
2014 | azx_dev->period_bytes = 0; | 2053 | azx_dev->period_bytes = 0; |
@@ -2016,8 +2055,10 @@ static int azx_pcm_hw_params(struct snd_pcm_substream *substream, | |||
2016 | ret = snd_pcm_lib_malloc_pages(substream, | 2055 | ret = snd_pcm_lib_malloc_pages(substream, |
2017 | params_buffer_bytes(hw_params)); | 2056 | params_buffer_bytes(hw_params)); |
2018 | if (ret < 0) | 2057 | if (ret < 0) |
2019 | return ret; | 2058 | goto unlock; |
2020 | mark_runtime_wc(chip, azx_dev, substream, true); | 2059 | mark_runtime_wc(chip, azx_dev, substream, true); |
2060 | unlock: | ||
2061 | dsp_unlock(azx_dev); | ||
2021 | return ret; | 2062 | return ret; |
2022 | } | 2063 | } |
2023 | 2064 | ||
@@ -2029,16 +2070,21 @@ static int azx_pcm_hw_free(struct snd_pcm_substream *substream) | |||
2029 | struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream]; | 2070 | struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream]; |
2030 | 2071 | ||
2031 | /* reset BDL address */ | 2072 | /* reset BDL address */ |
2032 | azx_sd_writel(azx_dev, SD_BDLPL, 0); | 2073 | dsp_lock(azx_dev); |
2033 | azx_sd_writel(azx_dev, SD_BDLPU, 0); | 2074 | if (!dsp_is_locked(azx_dev)) { |
2034 | azx_sd_writel(azx_dev, SD_CTL, 0); | 2075 | azx_sd_writel(azx_dev, SD_BDLPL, 0); |
2035 | azx_dev->bufsize = 0; | 2076 | azx_sd_writel(azx_dev, SD_BDLPU, 0); |
2036 | azx_dev->period_bytes = 0; | 2077 | azx_sd_writel(azx_dev, SD_CTL, 0); |
2037 | azx_dev->format_val = 0; | 2078 | azx_dev->bufsize = 0; |
2079 | azx_dev->period_bytes = 0; | ||
2080 | azx_dev->format_val = 0; | ||
2081 | } | ||
2038 | 2082 | ||
2039 | snd_hda_codec_cleanup(apcm->codec, hinfo, substream); | 2083 | snd_hda_codec_cleanup(apcm->codec, hinfo, substream); |
2040 | 2084 | ||
2041 | mark_runtime_wc(chip, azx_dev, substream, false); | 2085 | mark_runtime_wc(chip, azx_dev, substream, false); |
2086 | azx_dev->prepared = 0; | ||
2087 | dsp_unlock(azx_dev); | ||
2042 | return snd_pcm_lib_free_pages(substream); | 2088 | return snd_pcm_lib_free_pages(substream); |
2043 | } | 2089 | } |
2044 | 2090 | ||
@@ -2055,6 +2101,12 @@ static int azx_pcm_prepare(struct snd_pcm_substream *substream) | |||
2055 | snd_hda_spdif_out_of_nid(apcm->codec, hinfo->nid); | 2101 | snd_hda_spdif_out_of_nid(apcm->codec, hinfo->nid); |
2056 | unsigned short ctls = spdif ? spdif->ctls : 0; | 2102 | unsigned short ctls = spdif ? spdif->ctls : 0; |
2057 | 2103 | ||
2104 | dsp_lock(azx_dev); | ||
2105 | if (dsp_is_locked(azx_dev)) { | ||
2106 | err = -EBUSY; | ||
2107 | goto unlock; | ||
2108 | } | ||
2109 | |||
2058 | azx_stream_reset(chip, azx_dev); | 2110 | azx_stream_reset(chip, azx_dev); |
2059 | format_val = snd_hda_calc_stream_format(runtime->rate, | 2111 | format_val = snd_hda_calc_stream_format(runtime->rate, |
2060 | runtime->channels, | 2112 | runtime->channels, |
@@ -2065,7 +2117,8 @@ static int azx_pcm_prepare(struct snd_pcm_substream *substream) | |||
2065 | snd_printk(KERN_ERR SFX | 2117 | snd_printk(KERN_ERR SFX |
2066 | "%s: invalid format_val, rate=%d, ch=%d, format=%d\n", | 2118 | "%s: invalid format_val, rate=%d, ch=%d, format=%d\n", |
2067 | pci_name(chip->pci), runtime->rate, runtime->channels, runtime->format); | 2119 | pci_name(chip->pci), runtime->rate, runtime->channels, runtime->format); |
2068 | return -EINVAL; | 2120 | err = -EINVAL; |
2121 | goto unlock; | ||
2069 | } | 2122 | } |
2070 | 2123 | ||
2071 | bufsize = snd_pcm_lib_buffer_bytes(substream); | 2124 | bufsize = snd_pcm_lib_buffer_bytes(substream); |
@@ -2084,7 +2137,7 @@ static int azx_pcm_prepare(struct snd_pcm_substream *substream) | |||
2084 | azx_dev->no_period_wakeup = runtime->no_period_wakeup; | 2137 | azx_dev->no_period_wakeup = runtime->no_period_wakeup; |
2085 | err = azx_setup_periods(chip, substream, azx_dev); | 2138 | err = azx_setup_periods(chip, substream, azx_dev); |
2086 | if (err < 0) | 2139 | if (err < 0) |
2087 | return err; | 2140 | goto unlock; |
2088 | } | 2141 | } |
2089 | 2142 | ||
2090 | /* wallclk has 24Mhz clock source */ | 2143 | /* wallclk has 24Mhz clock source */ |
@@ -2101,8 +2154,14 @@ static int azx_pcm_prepare(struct snd_pcm_substream *substream) | |||
2101 | if ((chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) && | 2154 | if ((chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) && |
2102 | stream_tag > chip->capture_streams) | 2155 | stream_tag > chip->capture_streams) |
2103 | stream_tag -= chip->capture_streams; | 2156 | stream_tag -= chip->capture_streams; |
2104 | return snd_hda_codec_prepare(apcm->codec, hinfo, stream_tag, | 2157 | err = snd_hda_codec_prepare(apcm->codec, hinfo, stream_tag, |
2105 | azx_dev->format_val, substream); | 2158 | azx_dev->format_val, substream); |
2159 | |||
2160 | unlock: | ||
2161 | if (!err) | ||
2162 | azx_dev->prepared = 1; | ||
2163 | dsp_unlock(azx_dev); | ||
2164 | return err; | ||
2106 | } | 2165 | } |
2107 | 2166 | ||
2108 | static int azx_pcm_trigger(struct snd_pcm_substream *substream, int cmd) | 2167 | static int azx_pcm_trigger(struct snd_pcm_substream *substream, int cmd) |
@@ -2117,6 +2176,9 @@ static int azx_pcm_trigger(struct snd_pcm_substream *substream, int cmd) | |||
2117 | azx_dev = get_azx_dev(substream); | 2176 | azx_dev = get_azx_dev(substream); |
2118 | trace_azx_pcm_trigger(chip, azx_dev, cmd); | 2177 | trace_azx_pcm_trigger(chip, azx_dev, cmd); |
2119 | 2178 | ||
2179 | if (dsp_is_locked(azx_dev) || !azx_dev->prepared) | ||
2180 | return -EPIPE; | ||
2181 | |||
2120 | switch (cmd) { | 2182 | switch (cmd) { |
2121 | case SNDRV_PCM_TRIGGER_START: | 2183 | case SNDRV_PCM_TRIGGER_START: |
2122 | rstart = 1; | 2184 | rstart = 1; |
@@ -2621,17 +2683,27 @@ static int azx_load_dsp_prepare(struct hda_bus *bus, unsigned int format, | |||
2621 | struct azx_dev *azx_dev; | 2683 | struct azx_dev *azx_dev; |
2622 | int err; | 2684 | int err; |
2623 | 2685 | ||
2624 | if (snd_hda_lock_devices(bus)) | 2686 | azx_dev = azx_get_dsp_loader_dev(chip); |
2625 | return -EBUSY; | 2687 | |
2688 | dsp_lock(azx_dev); | ||
2689 | spin_lock_irq(&chip->reg_lock); | ||
2690 | if (azx_dev->running || azx_dev->locked) { | ||
2691 | spin_unlock_irq(&chip->reg_lock); | ||
2692 | err = -EBUSY; | ||
2693 | goto unlock; | ||
2694 | } | ||
2695 | azx_dev->prepared = 0; | ||
2696 | chip->saved_azx_dev = *azx_dev; | ||
2697 | azx_dev->locked = 1; | ||
2698 | spin_unlock_irq(&chip->reg_lock); | ||
2626 | 2699 | ||
2627 | err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV_SG, | 2700 | err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV_SG, |
2628 | snd_dma_pci_data(chip->pci), | 2701 | snd_dma_pci_data(chip->pci), |
2629 | byte_size, bufp); | 2702 | byte_size, bufp); |
2630 | if (err < 0) | 2703 | if (err < 0) |
2631 | goto unlock; | 2704 | goto err_alloc; |
2632 | 2705 | ||
2633 | mark_pages_wc(chip, bufp, true); | 2706 | mark_pages_wc(chip, bufp, true); |
2634 | azx_dev = azx_get_dsp_loader_dev(chip); | ||
2635 | azx_dev->bufsize = byte_size; | 2707 | azx_dev->bufsize = byte_size; |
2636 | azx_dev->period_bytes = byte_size; | 2708 | azx_dev->period_bytes = byte_size; |
2637 | azx_dev->format_val = format; | 2709 | azx_dev->format_val = format; |
@@ -2649,13 +2721,20 @@ static int azx_load_dsp_prepare(struct hda_bus *bus, unsigned int format, | |||
2649 | goto error; | 2721 | goto error; |
2650 | 2722 | ||
2651 | azx_setup_controller(chip, azx_dev); | 2723 | azx_setup_controller(chip, azx_dev); |
2724 | dsp_unlock(azx_dev); | ||
2652 | return azx_dev->stream_tag; | 2725 | return azx_dev->stream_tag; |
2653 | 2726 | ||
2654 | error: | 2727 | error: |
2655 | mark_pages_wc(chip, bufp, false); | 2728 | mark_pages_wc(chip, bufp, false); |
2656 | snd_dma_free_pages(bufp); | 2729 | snd_dma_free_pages(bufp); |
2657 | unlock: | 2730 | err_alloc: |
2658 | snd_hda_unlock_devices(bus); | 2731 | spin_lock_irq(&chip->reg_lock); |
2732 | if (azx_dev->opened) | ||
2733 | *azx_dev = chip->saved_azx_dev; | ||
2734 | azx_dev->locked = 0; | ||
2735 | spin_unlock_irq(&chip->reg_lock); | ||
2736 | unlock: | ||
2737 | dsp_unlock(azx_dev); | ||
2659 | return err; | 2738 | return err; |
2660 | } | 2739 | } |
2661 | 2740 | ||
@@ -2677,9 +2756,10 @@ static void azx_load_dsp_cleanup(struct hda_bus *bus, | |||
2677 | struct azx *chip = bus->private_data; | 2756 | struct azx *chip = bus->private_data; |
2678 | struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip); | 2757 | struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip); |
2679 | 2758 | ||
2680 | if (!dmab->area) | 2759 | if (!dmab->area || !azx_dev->locked) |
2681 | return; | 2760 | return; |
2682 | 2761 | ||
2762 | dsp_lock(azx_dev); | ||
2683 | /* reset BDL address */ | 2763 | /* reset BDL address */ |
2684 | azx_sd_writel(azx_dev, SD_BDLPL, 0); | 2764 | azx_sd_writel(azx_dev, SD_BDLPL, 0); |
2685 | azx_sd_writel(azx_dev, SD_BDLPU, 0); | 2765 | azx_sd_writel(azx_dev, SD_BDLPU, 0); |
@@ -2692,7 +2772,12 @@ static void azx_load_dsp_cleanup(struct hda_bus *bus, | |||
2692 | snd_dma_free_pages(dmab); | 2772 | snd_dma_free_pages(dmab); |
2693 | dmab->area = NULL; | 2773 | dmab->area = NULL; |
2694 | 2774 | ||
2695 | snd_hda_unlock_devices(bus); | 2775 | spin_lock_irq(&chip->reg_lock); |
2776 | if (azx_dev->opened) | ||
2777 | *azx_dev = chip->saved_azx_dev; | ||
2778 | azx_dev->locked = 0; | ||
2779 | spin_unlock_irq(&chip->reg_lock); | ||
2780 | dsp_unlock(azx_dev); | ||
2696 | } | 2781 | } |
2697 | #endif /* CONFIG_SND_HDA_DSP_LOADER */ | 2782 | #endif /* CONFIG_SND_HDA_DSP_LOADER */ |
2698 | 2783 | ||
@@ -3481,6 +3566,7 @@ static int azx_first_init(struct azx *chip) | |||
3481 | } | 3566 | } |
3482 | 3567 | ||
3483 | for (i = 0; i < chip->num_streams; i++) { | 3568 | for (i = 0; i < chip->num_streams; i++) { |
3569 | dsp_lock_init(&chip->azx_dev[i]); | ||
3484 | /* allocate memory for the BDL for each stream */ | 3570 | /* allocate memory for the BDL for each stream */ |
3485 | err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, | 3571 | err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, |
3486 | snd_dma_pci_data(chip->pci), | 3572 | snd_dma_pci_data(chip->pci), |
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c index 60d08f669f0c..0d9c58f13560 100644 --- a/sound/pci/hda/patch_cirrus.c +++ b/sound/pci/hda/patch_cirrus.c | |||
@@ -168,10 +168,10 @@ static void cs_automute(struct hda_codec *codec) | |||
168 | snd_hda_gen_update_outputs(codec); | 168 | snd_hda_gen_update_outputs(codec); |
169 | 169 | ||
170 | if (spec->gpio_eapd_hp) { | 170 | if (spec->gpio_eapd_hp) { |
171 | unsigned int gpio = spec->gen.hp_jack_present ? | 171 | spec->gpio_data = spec->gen.hp_jack_present ? |
172 | spec->gpio_eapd_hp : spec->gpio_eapd_speaker; | 172 | spec->gpio_eapd_hp : spec->gpio_eapd_speaker; |
173 | snd_hda_codec_write(codec, 0x01, 0, | 173 | snd_hda_codec_write(codec, 0x01, 0, |
174 | AC_VERB_SET_GPIO_DATA, gpio); | 174 | AC_VERB_SET_GPIO_DATA, spec->gpio_data); |
175 | } | 175 | } |
176 | } | 176 | } |
177 | 177 | ||
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index 941bf6c766ec..2a89d1eefeb6 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c | |||
@@ -1142,7 +1142,7 @@ static int patch_cxt5045(struct hda_codec *codec) | |||
1142 | } | 1142 | } |
1143 | 1143 | ||
1144 | if (spec->beep_amp) | 1144 | if (spec->beep_amp) |
1145 | snd_hda_attach_beep_device(codec, spec->beep_amp); | 1145 | snd_hda_attach_beep_device(codec, get_amp_nid_(spec->beep_amp)); |
1146 | 1146 | ||
1147 | return 0; | 1147 | return 0; |
1148 | } | 1148 | } |
@@ -1921,7 +1921,7 @@ static int patch_cxt5051(struct hda_codec *codec) | |||
1921 | } | 1921 | } |
1922 | 1922 | ||
1923 | if (spec->beep_amp) | 1923 | if (spec->beep_amp) |
1924 | snd_hda_attach_beep_device(codec, spec->beep_amp); | 1924 | snd_hda_attach_beep_device(codec, get_amp_nid_(spec->beep_amp)); |
1925 | 1925 | ||
1926 | return 0; | 1926 | return 0; |
1927 | } | 1927 | } |
@@ -3099,7 +3099,7 @@ static int patch_cxt5066(struct hda_codec *codec) | |||
3099 | } | 3099 | } |
3100 | 3100 | ||
3101 | if (spec->beep_amp) | 3101 | if (spec->beep_amp) |
3102 | snd_hda_attach_beep_device(codec, spec->beep_amp); | 3102 | snd_hda_attach_beep_device(codec, get_amp_nid_(spec->beep_amp)); |
3103 | 3103 | ||
3104 | return 0; | 3104 | return 0; |
3105 | } | 3105 | } |
@@ -3191,11 +3191,17 @@ static int cx_auto_build_controls(struct hda_codec *codec) | |||
3191 | return 0; | 3191 | return 0; |
3192 | } | 3192 | } |
3193 | 3193 | ||
3194 | static void cx_auto_free(struct hda_codec *codec) | ||
3195 | { | ||
3196 | snd_hda_detach_beep_device(codec); | ||
3197 | snd_hda_gen_free(codec); | ||
3198 | } | ||
3199 | |||
3194 | static const struct hda_codec_ops cx_auto_patch_ops = { | 3200 | static const struct hda_codec_ops cx_auto_patch_ops = { |
3195 | .build_controls = cx_auto_build_controls, | 3201 | .build_controls = cx_auto_build_controls, |
3196 | .build_pcms = snd_hda_gen_build_pcms, | 3202 | .build_pcms = snd_hda_gen_build_pcms, |
3197 | .init = snd_hda_gen_init, | 3203 | .init = snd_hda_gen_init, |
3198 | .free = snd_hda_gen_free, | 3204 | .free = cx_auto_free, |
3199 | .unsol_event = snd_hda_jack_unsol_event, | 3205 | .unsol_event = snd_hda_jack_unsol_event, |
3200 | #ifdef CONFIG_PM | 3206 | #ifdef CONFIG_PM |
3201 | .check_power_status = snd_hda_gen_check_power_status, | 3207 | .check_power_status = snd_hda_gen_check_power_status, |
@@ -3391,7 +3397,7 @@ static int patch_conexant_auto(struct hda_codec *codec) | |||
3391 | 3397 | ||
3392 | codec->patch_ops = cx_auto_patch_ops; | 3398 | codec->patch_ops = cx_auto_patch_ops; |
3393 | if (spec->beep_amp) | 3399 | if (spec->beep_amp) |
3394 | snd_hda_attach_beep_device(codec, spec->beep_amp); | 3400 | snd_hda_attach_beep_device(codec, get_amp_nid_(spec->beep_amp)); |
3395 | 3401 | ||
3396 | /* Some laptops with Conexant chips show stalls in S3 resume, | 3402 | /* Some laptops with Conexant chips show stalls in S3 resume, |
3397 | * which falls into the single-cmd mode. | 3403 | * which falls into the single-cmd mode. |
diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c index fc176044994d..fc176044994d 100755..100644 --- a/sound/soc/codecs/max98090.c +++ b/sound/soc/codecs/max98090.c | |||
diff --git a/sound/soc/codecs/max98090.h b/sound/soc/codecs/max98090.h index 7e103f249053..7e103f249053 100755..100644 --- a/sound/soc/codecs/max98090.h +++ b/sound/soc/codecs/max98090.h | |||
diff --git a/sound/soc/codecs/si476x.c b/sound/soc/codecs/si476x.c index f2d61a187830..566ea3256e2d 100644 --- a/sound/soc/codecs/si476x.c +++ b/sound/soc/codecs/si476x.c | |||
@@ -159,6 +159,7 @@ static int si476x_codec_hw_params(struct snd_pcm_substream *substream, | |||
159 | switch (params_format(params)) { | 159 | switch (params_format(params)) { |
160 | case SNDRV_PCM_FORMAT_S8: | 160 | case SNDRV_PCM_FORMAT_S8: |
161 | width = SI476X_PCM_FORMAT_S8; | 161 | width = SI476X_PCM_FORMAT_S8; |
162 | break; | ||
162 | case SNDRV_PCM_FORMAT_S16_LE: | 163 | case SNDRV_PCM_FORMAT_S16_LE: |
163 | width = SI476X_PCM_FORMAT_S16_LE; | 164 | width = SI476X_PCM_FORMAT_S16_LE; |
164 | break; | 165 | break; |
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c index f3f7e75f8628..9af1bddc4c62 100644 --- a/sound/soc/codecs/wm_adsp.c +++ b/sound/soc/codecs/wm_adsp.c | |||
@@ -828,7 +828,8 @@ static int wm_adsp_load_coeff(struct wm_adsp *dsp) | |||
828 | &buf_list); | 828 | &buf_list); |
829 | if (!buf) { | 829 | if (!buf) { |
830 | adsp_err(dsp, "Out of memory\n"); | 830 | adsp_err(dsp, "Out of memory\n"); |
831 | return -ENOMEM; | 831 | ret = -ENOMEM; |
832 | goto out_fw; | ||
832 | } | 833 | } |
833 | 834 | ||
834 | adsp_dbg(dsp, "%s.%d: Writing %d bytes at %x\n", | 835 | adsp_dbg(dsp, "%s.%d: Writing %d bytes at %x\n", |
@@ -865,7 +866,7 @@ out_fw: | |||
865 | wm_adsp_buf_free(&buf_list); | 866 | wm_adsp_buf_free(&buf_list); |
866 | out: | 867 | out: |
867 | kfree(file); | 868 | kfree(file); |
868 | return 0; | 869 | return ret; |
869 | } | 870 | } |
870 | 871 | ||
871 | int wm_adsp1_init(struct wm_adsp *adsp) | 872 | int wm_adsp1_init(struct wm_adsp *adsp) |
diff --git a/sound/soc/fsl/imx-ssi.c b/sound/soc/fsl/imx-ssi.c index 55464a5b0706..810c7eeb7b03 100644 --- a/sound/soc/fsl/imx-ssi.c +++ b/sound/soc/fsl/imx-ssi.c | |||
@@ -496,6 +496,8 @@ static void imx_ssi_ac97_reset(struct snd_ac97 *ac97) | |||
496 | 496 | ||
497 | if (imx_ssi->ac97_reset) | 497 | if (imx_ssi->ac97_reset) |
498 | imx_ssi->ac97_reset(ac97); | 498 | imx_ssi->ac97_reset(ac97); |
499 | /* First read sometimes fails, do a dummy read */ | ||
500 | imx_ssi_ac97_read(ac97, 0); | ||
499 | } | 501 | } |
500 | 502 | ||
501 | static void imx_ssi_ac97_warm_reset(struct snd_ac97 *ac97) | 503 | static void imx_ssi_ac97_warm_reset(struct snd_ac97 *ac97) |
@@ -504,6 +506,9 @@ static void imx_ssi_ac97_warm_reset(struct snd_ac97 *ac97) | |||
504 | 506 | ||
505 | if (imx_ssi->ac97_warm_reset) | 507 | if (imx_ssi->ac97_warm_reset) |
506 | imx_ssi->ac97_warm_reset(ac97); | 508 | imx_ssi->ac97_warm_reset(ac97); |
509 | |||
510 | /* First read sometimes fails, do a dummy read */ | ||
511 | imx_ssi_ac97_read(ac97, 0); | ||
507 | } | 512 | } |
508 | 513 | ||
509 | struct snd_ac97_bus_ops soc_ac97_ops = { | 514 | struct snd_ac97_bus_ops soc_ac97_ops = { |
diff --git a/sound/soc/fsl/pcm030-audio-fabric.c b/sound/soc/fsl/pcm030-audio-fabric.c index 8e52c1485df3..eb4373840bb6 100644 --- a/sound/soc/fsl/pcm030-audio-fabric.c +++ b/sound/soc/fsl/pcm030-audio-fabric.c | |||
@@ -51,7 +51,7 @@ static struct snd_soc_card pcm030_card = { | |||
51 | .num_links = ARRAY_SIZE(pcm030_fabric_dai), | 51 | .num_links = ARRAY_SIZE(pcm030_fabric_dai), |
52 | }; | 52 | }; |
53 | 53 | ||
54 | static int __init pcm030_fabric_probe(struct platform_device *op) | 54 | static int pcm030_fabric_probe(struct platform_device *op) |
55 | { | 55 | { |
56 | struct device_node *np = op->dev.of_node; | 56 | struct device_node *np = op->dev.of_node; |
57 | struct device_node *platform_np; | 57 | struct device_node *platform_np; |
diff --git a/sound/soc/sh/dma-sh7760.c b/sound/soc/sh/dma-sh7760.c index 19eff8fc4fdd..1a8b03e4b41b 100644 --- a/sound/soc/sh/dma-sh7760.c +++ b/sound/soc/sh/dma-sh7760.c | |||
@@ -342,8 +342,8 @@ static int camelot_pcm_new(struct snd_soc_pcm_runtime *rtd) | |||
342 | return 0; | 342 | return 0; |
343 | } | 343 | } |
344 | 344 | ||
345 | static struct snd_soc_platform sh7760_soc_platform = { | 345 | static struct snd_soc_platform_driver sh7760_soc_platform = { |
346 | .pcm_ops = &camelot_pcm_ops, | 346 | .ops = &camelot_pcm_ops, |
347 | .pcm_new = camelot_pcm_new, | 347 | .pcm_new = camelot_pcm_new, |
348 | .pcm_free = camelot_pcm_free, | 348 | .pcm_free = camelot_pcm_free, |
349 | }; | 349 | }; |
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index b7e84a7cd9ee..507d251916af 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c | |||
@@ -3140,7 +3140,7 @@ int snd_soc_bytes_put(struct snd_kcontrol *kcontrol, | |||
3140 | if (params->mask) { | 3140 | if (params->mask) { |
3141 | ret = regmap_read(codec->control_data, params->base, &val); | 3141 | ret = regmap_read(codec->control_data, params->base, &val); |
3142 | if (ret != 0) | 3142 | if (ret != 0) |
3143 | return ret; | 3143 | goto out; |
3144 | 3144 | ||
3145 | val &= params->mask; | 3145 | val &= params->mask; |
3146 | 3146 | ||
@@ -3158,13 +3158,15 @@ int snd_soc_bytes_put(struct snd_kcontrol *kcontrol, | |||
3158 | ((u32 *)data)[0] |= cpu_to_be32(val); | 3158 | ((u32 *)data)[0] |= cpu_to_be32(val); |
3159 | break; | 3159 | break; |
3160 | default: | 3160 | default: |
3161 | return -EINVAL; | 3161 | ret = -EINVAL; |
3162 | goto out; | ||
3162 | } | 3163 | } |
3163 | } | 3164 | } |
3164 | 3165 | ||
3165 | ret = regmap_raw_write(codec->control_data, params->base, | 3166 | ret = regmap_raw_write(codec->control_data, params->base, |
3166 | data, len); | 3167 | data, len); |
3167 | 3168 | ||
3169 | out: | ||
3168 | kfree(data); | 3170 | kfree(data); |
3169 | 3171 | ||
3170 | return ret; | 3172 | return ret; |
@@ -4197,7 +4199,6 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card, | |||
4197 | dev_err(card->dev, | 4199 | dev_err(card->dev, |
4198 | "ASoC: Property '%s' index %d could not be read: %d\n", | 4200 | "ASoC: Property '%s' index %d could not be read: %d\n", |
4199 | propname, 2 * i, ret); | 4201 | propname, 2 * i, ret); |
4200 | kfree(routes); | ||
4201 | return -EINVAL; | 4202 | return -EINVAL; |
4202 | } | 4203 | } |
4203 | ret = of_property_read_string_index(np, propname, | 4204 | ret = of_property_read_string_index(np, propname, |
@@ -4206,7 +4207,6 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card, | |||
4206 | dev_err(card->dev, | 4207 | dev_err(card->dev, |
4207 | "ASoC: Property '%s' index %d could not be read: %d\n", | 4208 | "ASoC: Property '%s' index %d could not be read: %d\n", |
4208 | propname, (2 * i) + 1, ret); | 4209 | propname, (2 * i) + 1, ret); |
4209 | kfree(routes); | ||
4210 | return -EINVAL; | 4210 | return -EINVAL; |
4211 | } | 4211 | } |
4212 | } | 4212 | } |
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index 1d6a9b3ceb27..d6d9ba2e6916 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c | |||
@@ -831,6 +831,9 @@ static int is_connected_output_ep(struct snd_soc_dapm_widget *widget, | |||
831 | if (path->weak) | 831 | if (path->weak) |
832 | continue; | 832 | continue; |
833 | 833 | ||
834 | if (path->walking) | ||
835 | return 1; | ||
836 | |||
834 | if (path->walked) | 837 | if (path->walked) |
835 | continue; | 838 | continue; |
836 | 839 | ||
@@ -838,6 +841,7 @@ static int is_connected_output_ep(struct snd_soc_dapm_widget *widget, | |||
838 | 841 | ||
839 | if (path->sink && path->connect) { | 842 | if (path->sink && path->connect) { |
840 | path->walked = 1; | 843 | path->walked = 1; |
844 | path->walking = 1; | ||
841 | 845 | ||
842 | /* do we need to add this widget to the list ? */ | 846 | /* do we need to add this widget to the list ? */ |
843 | if (list) { | 847 | if (list) { |
@@ -847,11 +851,14 @@ static int is_connected_output_ep(struct snd_soc_dapm_widget *widget, | |||
847 | dev_err(widget->dapm->dev, | 851 | dev_err(widget->dapm->dev, |
848 | "ASoC: could not add widget %s\n", | 852 | "ASoC: could not add widget %s\n", |
849 | widget->name); | 853 | widget->name); |
854 | path->walking = 0; | ||
850 | return con; | 855 | return con; |
851 | } | 856 | } |
852 | } | 857 | } |
853 | 858 | ||
854 | con += is_connected_output_ep(path->sink, list); | 859 | con += is_connected_output_ep(path->sink, list); |
860 | |||
861 | path->walking = 0; | ||
855 | } | 862 | } |
856 | } | 863 | } |
857 | 864 | ||
@@ -931,6 +938,9 @@ static int is_connected_input_ep(struct snd_soc_dapm_widget *widget, | |||
931 | if (path->weak) | 938 | if (path->weak) |
932 | continue; | 939 | continue; |
933 | 940 | ||
941 | if (path->walking) | ||
942 | return 1; | ||
943 | |||
934 | if (path->walked) | 944 | if (path->walked) |
935 | continue; | 945 | continue; |
936 | 946 | ||
@@ -938,6 +948,7 @@ static int is_connected_input_ep(struct snd_soc_dapm_widget *widget, | |||
938 | 948 | ||
939 | if (path->source && path->connect) { | 949 | if (path->source && path->connect) { |
940 | path->walked = 1; | 950 | path->walked = 1; |
951 | path->walking = 1; | ||
941 | 952 | ||
942 | /* do we need to add this widget to the list ? */ | 953 | /* do we need to add this widget to the list ? */ |
943 | if (list) { | 954 | if (list) { |
@@ -947,11 +958,14 @@ static int is_connected_input_ep(struct snd_soc_dapm_widget *widget, | |||
947 | dev_err(widget->dapm->dev, | 958 | dev_err(widget->dapm->dev, |
948 | "ASoC: could not add widget %s\n", | 959 | "ASoC: could not add widget %s\n", |
949 | widget->name); | 960 | widget->name); |
961 | path->walking = 0; | ||
950 | return con; | 962 | return con; |
951 | } | 963 | } |
952 | } | 964 | } |
953 | 965 | ||
954 | con += is_connected_input_ep(path->source, list); | 966 | con += is_connected_input_ep(path->source, list); |
967 | |||
968 | path->walking = 0; | ||
955 | } | 969 | } |
956 | } | 970 | } |
957 | 971 | ||
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index 638e7f738018..ca4739c3f650 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c | |||
@@ -715,8 +715,9 @@ static int check_input_term(struct mixer_build *state, int id, struct usb_audio_ | |||
715 | case UAC2_CLOCK_SELECTOR: { | 715 | case UAC2_CLOCK_SELECTOR: { |
716 | struct uac_selector_unit_descriptor *d = p1; | 716 | struct uac_selector_unit_descriptor *d = p1; |
717 | /* call recursively to retrieve the channel info */ | 717 | /* call recursively to retrieve the channel info */ |
718 | if (check_input_term(state, d->baSourceID[0], term) < 0) | 718 | err = check_input_term(state, d->baSourceID[0], term); |
719 | return -ENODEV; | 719 | if (err < 0) |
720 | return err; | ||
720 | term->type = d->bDescriptorSubtype << 16; /* virtual type */ | 721 | term->type = d->bDescriptorSubtype << 16; /* virtual type */ |
721 | term->id = id; | 722 | term->id = id; |
722 | term->name = uac_selector_unit_iSelector(d); | 723 | term->name = uac_selector_unit_iSelector(d); |
@@ -725,7 +726,8 @@ static int check_input_term(struct mixer_build *state, int id, struct usb_audio_ | |||
725 | case UAC1_PROCESSING_UNIT: | 726 | case UAC1_PROCESSING_UNIT: |
726 | case UAC1_EXTENSION_UNIT: | 727 | case UAC1_EXTENSION_UNIT: |
727 | /* UAC2_PROCESSING_UNIT_V2 */ | 728 | /* UAC2_PROCESSING_UNIT_V2 */ |
728 | /* UAC2_EFFECT_UNIT */ { | 729 | /* UAC2_EFFECT_UNIT */ |
730 | case UAC2_EXTENSION_UNIT_V2: { | ||
729 | struct uac_processing_unit_descriptor *d = p1; | 731 | struct uac_processing_unit_descriptor *d = p1; |
730 | 732 | ||
731 | if (state->mixer->protocol == UAC_VERSION_2 && | 733 | if (state->mixer->protocol == UAC_VERSION_2 && |
@@ -1356,8 +1358,9 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid, void | |||
1356 | return err; | 1358 | return err; |
1357 | 1359 | ||
1358 | /* determine the input source type and name */ | 1360 | /* determine the input source type and name */ |
1359 | if (check_input_term(state, hdr->bSourceID, &iterm) < 0) | 1361 | err = check_input_term(state, hdr->bSourceID, &iterm); |
1360 | return -EINVAL; | 1362 | if (err < 0) |
1363 | return err; | ||
1361 | 1364 | ||
1362 | master_bits = snd_usb_combine_bytes(bmaControls, csize); | 1365 | master_bits = snd_usb_combine_bytes(bmaControls, csize); |
1363 | /* master configuration quirks */ | 1366 | /* master configuration quirks */ |
@@ -2052,6 +2055,8 @@ static int parse_audio_unit(struct mixer_build *state, int unitid) | |||
2052 | return parse_audio_extension_unit(state, unitid, p1); | 2055 | return parse_audio_extension_unit(state, unitid, p1); |
2053 | else /* UAC_VERSION_2 */ | 2056 | else /* UAC_VERSION_2 */ |
2054 | return parse_audio_processing_unit(state, unitid, p1); | 2057 | return parse_audio_processing_unit(state, unitid, p1); |
2058 | case UAC2_EXTENSION_UNIT_V2: | ||
2059 | return parse_audio_extension_unit(state, unitid, p1); | ||
2055 | default: | 2060 | default: |
2056 | snd_printk(KERN_ERR "usbaudio: unit %u: unexpected type 0x%02x\n", unitid, p1[2]); | 2061 | snd_printk(KERN_ERR "usbaudio: unit %u: unexpected type 0x%02x\n", unitid, p1[2]); |
2057 | return -EINVAL; | 2062 | return -EINVAL; |
@@ -2118,7 +2123,7 @@ static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer) | |||
2118 | state.oterm.type = le16_to_cpu(desc->wTerminalType); | 2123 | state.oterm.type = le16_to_cpu(desc->wTerminalType); |
2119 | state.oterm.name = desc->iTerminal; | 2124 | state.oterm.name = desc->iTerminal; |
2120 | err = parse_audio_unit(&state, desc->bSourceID); | 2125 | err = parse_audio_unit(&state, desc->bSourceID); |
2121 | if (err < 0) | 2126 | if (err < 0 && err != -EINVAL) |
2122 | return err; | 2127 | return err; |
2123 | } else { /* UAC_VERSION_2 */ | 2128 | } else { /* UAC_VERSION_2 */ |
2124 | struct uac2_output_terminal_descriptor *desc = p; | 2129 | struct uac2_output_terminal_descriptor *desc = p; |
@@ -2130,12 +2135,12 @@ static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer) | |||
2130 | state.oterm.type = le16_to_cpu(desc->wTerminalType); | 2135 | state.oterm.type = le16_to_cpu(desc->wTerminalType); |
2131 | state.oterm.name = desc->iTerminal; | 2136 | state.oterm.name = desc->iTerminal; |
2132 | err = parse_audio_unit(&state, desc->bSourceID); | 2137 | err = parse_audio_unit(&state, desc->bSourceID); |
2133 | if (err < 0) | 2138 | if (err < 0 && err != -EINVAL) |
2134 | return err; | 2139 | return err; |
2135 | 2140 | ||
2136 | /* for UAC2, use the same approach to also add the clock selectors */ | 2141 | /* for UAC2, use the same approach to also add the clock selectors */ |
2137 | err = parse_audio_unit(&state, desc->bCSourceID); | 2142 | err = parse_audio_unit(&state, desc->bCSourceID); |
2138 | if (err < 0) | 2143 | if (err < 0 && err != -EINVAL) |
2139 | return err; | 2144 | return err; |
2140 | } | 2145 | } |
2141 | } | 2146 | } |
diff --git a/tools/lib/traceevent/Makefile b/tools/lib/traceevent/Makefile index a20e32033431..0b0a90787db6 100644 --- a/tools/lib/traceevent/Makefile +++ b/tools/lib/traceevent/Makefile | |||
@@ -122,7 +122,7 @@ export Q VERBOSE | |||
122 | 122 | ||
123 | EVENT_PARSE_VERSION = $(EP_VERSION).$(EP_PATCHLEVEL).$(EP_EXTRAVERSION) | 123 | EVENT_PARSE_VERSION = $(EP_VERSION).$(EP_PATCHLEVEL).$(EP_EXTRAVERSION) |
124 | 124 | ||
125 | INCLUDES = -I. -I/usr/local/include $(CONFIG_INCLUDES) | 125 | INCLUDES = -I. $(CONFIG_INCLUDES) |
126 | 126 | ||
127 | # Set compile option CFLAGS if not set elsewhere | 127 | # Set compile option CFLAGS if not set elsewhere |
128 | CFLAGS ?= -g -Wall | 128 | CFLAGS ?= -g -Wall |
diff --git a/tools/perf/Makefile b/tools/perf/Makefile index a2108ca1cc17..bb74c79cd16e 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile | |||
@@ -95,7 +95,7 @@ ifeq ("$(origin DEBUG)", "command line") | |||
95 | PERF_DEBUG = $(DEBUG) | 95 | PERF_DEBUG = $(DEBUG) |
96 | endif | 96 | endif |
97 | ifndef PERF_DEBUG | 97 | ifndef PERF_DEBUG |
98 | CFLAGS_OPTIMIZE = -O6 -D_FORTIFY_SOURCE=2 | 98 | CFLAGS_OPTIMIZE = -O6 |
99 | endif | 99 | endif |
100 | 100 | ||
101 | ifdef PARSER_DEBUG | 101 | ifdef PARSER_DEBUG |
@@ -180,6 +180,12 @@ ifeq ($(call try-cc,$(SOURCE_HELLO),$(CFLAGS) -Werror -Wvolatile-register-var,-W | |||
180 | CFLAGS := $(CFLAGS) -Wvolatile-register-var | 180 | CFLAGS := $(CFLAGS) -Wvolatile-register-var |
181 | endif | 181 | endif |
182 | 182 | ||
183 | ifndef PERF_DEBUG | ||
184 | ifeq ($(call try-cc,$(SOURCE_HELLO),$(CFLAGS) -D_FORTIFY_SOURCE=2,-D_FORTIFY_SOURCE=2),y) | ||
185 | CFLAGS := $(CFLAGS) -D_FORTIFY_SOURCE=2 | ||
186 | endif | ||
187 | endif | ||
188 | |||
183 | ### --- END CONFIGURATION SECTION --- | 189 | ### --- END CONFIGURATION SECTION --- |
184 | 190 | ||
185 | ifeq ($(srctree),) | 191 | ifeq ($(srctree),) |
diff --git a/tools/perf/bench/bench.h b/tools/perf/bench/bench.h index a5223e6a7b43..0fdc85269c4d 100644 --- a/tools/perf/bench/bench.h +++ b/tools/perf/bench/bench.h | |||
@@ -1,6 +1,30 @@ | |||
1 | #ifndef BENCH_H | 1 | #ifndef BENCH_H |
2 | #define BENCH_H | 2 | #define BENCH_H |
3 | 3 | ||
4 | /* | ||
5 | * The madvise transparent hugepage constants were added in glibc | ||
6 | * 2.13. For compatibility with older versions of glibc, define these | ||
7 | * tokens if they are not already defined. | ||
8 | * | ||
9 | * PA-RISC uses different madvise values from other architectures and | ||
10 | * needs to be special-cased. | ||
11 | */ | ||
12 | #ifdef __hppa__ | ||
13 | # ifndef MADV_HUGEPAGE | ||
14 | # define MADV_HUGEPAGE 67 | ||
15 | # endif | ||
16 | # ifndef MADV_NOHUGEPAGE | ||
17 | # define MADV_NOHUGEPAGE 68 | ||
18 | # endif | ||
19 | #else | ||
20 | # ifndef MADV_HUGEPAGE | ||
21 | # define MADV_HUGEPAGE 14 | ||
22 | # endif | ||
23 | # ifndef MADV_NOHUGEPAGE | ||
24 | # define MADV_NOHUGEPAGE 15 | ||
25 | # endif | ||
26 | #endif | ||
27 | |||
4 | extern int bench_numa(int argc, const char **argv, const char *prefix); | 28 | extern int bench_numa(int argc, const char **argv, const char *prefix); |
5 | extern int bench_sched_messaging(int argc, const char **argv, const char *prefix); | 29 | extern int bench_sched_messaging(int argc, const char **argv, const char *prefix); |
6 | extern int bench_sched_pipe(int argc, const char **argv, const char *prefix); | 30 | extern int bench_sched_pipe(int argc, const char **argv, const char *prefix); |
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 774c90713a53..f1a939ebc19c 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c | |||
@@ -573,13 +573,15 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv) | |||
573 | perf_event__synthesize_guest_os, tool); | 573 | perf_event__synthesize_guest_os, tool); |
574 | } | 574 | } |
575 | 575 | ||
576 | if (!opts->target.system_wide) | 576 | if (perf_target__has_task(&opts->target)) |
577 | err = perf_event__synthesize_thread_map(tool, evsel_list->threads, | 577 | err = perf_event__synthesize_thread_map(tool, evsel_list->threads, |
578 | process_synthesized_event, | 578 | process_synthesized_event, |
579 | machine); | 579 | machine); |
580 | else | 580 | else if (perf_target__has_cpu(&opts->target)) |
581 | err = perf_event__synthesize_threads(tool, process_synthesized_event, | 581 | err = perf_event__synthesize_threads(tool, process_synthesized_event, |
582 | machine); | 582 | machine); |
583 | else /* command specified */ | ||
584 | err = 0; | ||
583 | 585 | ||
584 | if (err != 0) | 586 | if (err != 0) |
585 | goto out_delete_session; | 587 | goto out_delete_session; |
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h index 38624686ee9a..226a4ae2f936 100644 --- a/tools/perf/util/hist.h +++ b/tools/perf/util/hist.h | |||
@@ -208,8 +208,9 @@ static inline int script_browse(const char *script_opt __maybe_unused) | |||
208 | return 0; | 208 | return 0; |
209 | } | 209 | } |
210 | 210 | ||
211 | #define K_LEFT -1 | 211 | #define K_LEFT -1000 |
212 | #define K_RIGHT -2 | 212 | #define K_RIGHT -2000 |
213 | #define K_SWITCH_INPUT_DATA -3000 | ||
213 | #endif | 214 | #endif |
214 | 215 | ||
215 | #ifdef GTK2_SUPPORT | 216 | #ifdef GTK2_SUPPORT |
diff --git a/tools/perf/util/strlist.c b/tools/perf/util/strlist.c index 55433aa42c8f..eabdce0a2daa 100644 --- a/tools/perf/util/strlist.c +++ b/tools/perf/util/strlist.c | |||
@@ -143,7 +143,7 @@ struct strlist *strlist__new(bool dupstr, const char *list) | |||
143 | slist->rblist.node_delete = strlist__node_delete; | 143 | slist->rblist.node_delete = strlist__node_delete; |
144 | 144 | ||
145 | slist->dupstr = dupstr; | 145 | slist->dupstr = dupstr; |
146 | if (slist && strlist__parse_list(slist, list) != 0) | 146 | if (list && strlist__parse_list(slist, list) != 0) |
147 | goto out_error; | 147 | goto out_error; |
148 | } | 148 | } |
149 | 149 | ||
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c index ce82b9401958..5ba005c00e2f 100644 --- a/virt/kvm/ioapic.c +++ b/virt/kvm/ioapic.c | |||
@@ -74,9 +74,12 @@ static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic, | |||
74 | u32 redir_index = (ioapic->ioregsel - 0x10) >> 1; | 74 | u32 redir_index = (ioapic->ioregsel - 0x10) >> 1; |
75 | u64 redir_content; | 75 | u64 redir_content; |
76 | 76 | ||
77 | ASSERT(redir_index < IOAPIC_NUM_PINS); | 77 | if (redir_index < IOAPIC_NUM_PINS) |
78 | redir_content = | ||
79 | ioapic->redirtbl[redir_index].bits; | ||
80 | else | ||
81 | redir_content = ~0ULL; | ||
78 | 82 | ||
79 | redir_content = ioapic->redirtbl[redir_index].bits; | ||
80 | result = (ioapic->ioregsel & 0x1) ? | 83 | result = (ioapic->ioregsel & 0x1) ? |
81 | (redir_content >> 32) & 0xffffffff : | 84 | (redir_content >> 32) & 0xffffffff : |
82 | redir_content & 0xffffffff; | 85 | redir_content & 0xffffffff; |