diff options
866 files changed, 9888 insertions, 5331 deletions
@@ -1510,6 +1510,14 @@ D: Natsemi ethernet | |||
1510 | D: Cobalt Networks (x86) support | 1510 | D: Cobalt Networks (x86) support |
1511 | D: This-and-That | 1511 | D: This-and-That |
1512 | 1512 | ||
1513 | N: Mark M. Hoffman | ||
1514 | E: mhoffman@lightlink.com | ||
1515 | D: asb100, lm93 and smsc47b397 hardware monitoring drivers | ||
1516 | D: hwmon subsystem core | ||
1517 | D: hwmon subsystem maintainer | ||
1518 | D: i2c-sis96x and i2c-stub SMBus drivers | ||
1519 | S: USA | ||
1520 | |||
1513 | N: Dirk Hohndel | 1521 | N: Dirk Hohndel |
1514 | E: hohndel@suse.de | 1522 | E: hohndel@suse.de |
1515 | D: The XFree86[tm] Project | 1523 | D: The XFree86[tm] Project |
diff --git a/Documentation/DocBook/device-drivers.tmpl b/Documentation/DocBook/device-drivers.tmpl index 7514dbf0a679..c36892c072da 100644 --- a/Documentation/DocBook/device-drivers.tmpl +++ b/Documentation/DocBook/device-drivers.tmpl | |||
@@ -227,7 +227,7 @@ X!Isound/sound_firmware.c | |||
227 | <chapter id="uart16x50"> | 227 | <chapter id="uart16x50"> |
228 | <title>16x50 UART Driver</title> | 228 | <title>16x50 UART Driver</title> |
229 | !Edrivers/tty/serial/serial_core.c | 229 | !Edrivers/tty/serial/serial_core.c |
230 | !Edrivers/tty/serial/8250/8250.c | 230 | !Edrivers/tty/serial/8250/8250_core.c |
231 | </chapter> | 231 | </chapter> |
232 | 232 | ||
233 | <chapter id="fbdev"> | 233 | <chapter id="fbdev"> |
diff --git a/Documentation/hwmon/lm75 b/Documentation/hwmon/lm75 index c91a1d15fa28..69af1c7db6b7 100644 --- a/Documentation/hwmon/lm75 +++ b/Documentation/hwmon/lm75 | |||
@@ -23,7 +23,7 @@ Supported chips: | |||
23 | Datasheet: Publicly available at the Maxim website | 23 | Datasheet: Publicly available at the Maxim website |
24 | http://www.maxim-ic.com/ | 24 | http://www.maxim-ic.com/ |
25 | * Microchip (TelCom) TCN75 | 25 | * Microchip (TelCom) TCN75 |
26 | Prefix: 'lm75' | 26 | Prefix: 'tcn75' |
27 | Addresses scanned: none | 27 | Addresses scanned: none |
28 | Datasheet: Publicly available at the Microchip website | 28 | Datasheet: Publicly available at the Microchip website |
29 | http://www.microchip.com/ | 29 | http://www.microchip.com/ |
diff --git a/Documentation/i2c/busses/i2c-diolan-u2c b/Documentation/i2c/busses/i2c-diolan-u2c index 30fe4bb9a069..0d6018c316c7 100644 --- a/Documentation/i2c/busses/i2c-diolan-u2c +++ b/Documentation/i2c/busses/i2c-diolan-u2c | |||
@@ -5,7 +5,7 @@ Supported adapters: | |||
5 | Documentation: | 5 | Documentation: |
6 | http://www.diolan.com/i2c/u2c12.html | 6 | http://www.diolan.com/i2c/u2c12.html |
7 | 7 | ||
8 | Author: Guenter Roeck <guenter.roeck@ericsson.com> | 8 | Author: Guenter Roeck <linux@roeck-us.net> |
9 | 9 | ||
10 | Description | 10 | Description |
11 | ----------- | 11 | ----------- |
diff --git a/Documentation/networking/ipvs-sysctl.txt b/Documentation/networking/ipvs-sysctl.txt index f2a2488f1bf3..9573d0c48c6e 100644 --- a/Documentation/networking/ipvs-sysctl.txt +++ b/Documentation/networking/ipvs-sysctl.txt | |||
@@ -15,6 +15,13 @@ amemthresh - INTEGER | |||
15 | enabled and the variable is automatically set to 2, otherwise | 15 | enabled and the variable is automatically set to 2, otherwise |
16 | the strategy is disabled and the variable is set to 1. | 16 | the strategy is disabled and the variable is set to 1. |
17 | 17 | ||
18 | backup_only - BOOLEAN | ||
19 | 0 - disabled (default) | ||
20 | not 0 - enabled | ||
21 | |||
22 | If set, disable the director function while the server is | ||
23 | in backup mode to avoid packet loops for DR/TUN methods. | ||
24 | |||
18 | conntrack - BOOLEAN | 25 | conntrack - BOOLEAN |
19 | 0 - disabled (default) | 26 | 0 - disabled (default) |
20 | not 0 - enabled | 27 | not 0 - enabled |
diff --git a/Documentation/scsi/LICENSE.qla2xxx b/Documentation/scsi/LICENSE.qla2xxx index 27a91cf43d6d..5020b7b5a244 100644 --- a/Documentation/scsi/LICENSE.qla2xxx +++ b/Documentation/scsi/LICENSE.qla2xxx | |||
@@ -1,4 +1,4 @@ | |||
1 | Copyright (c) 2003-2012 QLogic Corporation | 1 | Copyright (c) 2003-2013 QLogic Corporation |
2 | QLogic Linux FC-FCoE Driver | 2 | QLogic Linux FC-FCoE Driver |
3 | 3 | ||
4 | This program includes a device driver for Linux 3.x. | 4 | This program includes a device driver for Linux 3.x. |
diff --git a/Documentation/sound/alsa/ALSA-Configuration.txt b/Documentation/sound/alsa/ALSA-Configuration.txt index ce6581c8ca26..95731a08f257 100644 --- a/Documentation/sound/alsa/ALSA-Configuration.txt +++ b/Documentation/sound/alsa/ALSA-Configuration.txt | |||
@@ -890,9 +890,8 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed. | |||
890 | enable_msi - Enable Message Signaled Interrupt (MSI) (default = off) | 890 | enable_msi - Enable Message Signaled Interrupt (MSI) (default = off) |
891 | power_save - Automatic power-saving timeout (in second, 0 = | 891 | power_save - Automatic power-saving timeout (in second, 0 = |
892 | disable) | 892 | disable) |
893 | power_save_controller - Support runtime D3 of HD-audio controller | 893 | power_save_controller - Reset HD-audio controller in power-saving mode |
894 | (-1 = on for supported chip (default), false = off, | 894 | (default = on) |
895 | true = force to on even for unsupported hardware) | ||
896 | align_buffer_size - Force rounding of buffer/period sizes to multiples | 895 | align_buffer_size - Force rounding of buffer/period sizes to multiples |
897 | of 128 bytes. This is more efficient in terms of memory | 896 | of 128 bytes. This is more efficient in terms of memory |
898 | access but isn't required by the HDA spec and prevents | 897 | access but isn't required by the HDA spec and prevents |
@@ -912,7 +911,7 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed. | |||
912 | models depending on the codec chip. The list of available models | 911 | models depending on the codec chip. The list of available models |
913 | is found in HD-Audio-Models.txt | 912 | is found in HD-Audio-Models.txt |
914 | 913 | ||
915 | The model name "genric" is treated as a special case. When this | 914 | The model name "generic" is treated as a special case. When this |
916 | model is given, the driver uses the generic codec parser without | 915 | model is given, the driver uses the generic codec parser without |
917 | "codec-patch". It's sometimes good for testing and debugging. | 916 | "codec-patch". It's sometimes good for testing and debugging. |
918 | 917 | ||
diff --git a/Documentation/sound/alsa/seq_oss.html b/Documentation/sound/alsa/seq_oss.html index d9776cf60c07..9663b45f6fde 100644 --- a/Documentation/sound/alsa/seq_oss.html +++ b/Documentation/sound/alsa/seq_oss.html | |||
@@ -285,7 +285,7 @@ sample data. | |||
285 | <H4> | 285 | <H4> |
286 | 7.2.4 Close Callback</H4> | 286 | 7.2.4 Close Callback</H4> |
287 | The <TT>close</TT> callback is called when this device is closed by the | 287 | The <TT>close</TT> callback is called when this device is closed by the |
288 | applicaion. If any private data was allocated in open callback, it must | 288 | application. If any private data was allocated in open callback, it must |
289 | be released in the close callback. The deletion of ALSA port should be | 289 | be released in the close callback. The deletion of ALSA port should be |
290 | done here, too. This callback must not be NULL. | 290 | done here, too. This callback must not be NULL. |
291 | <H4> | 291 | <H4> |
diff --git a/MAINTAINERS b/MAINTAINERS index 50b4d735f961..8bdd7a7ef2f4 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -1338,12 +1338,6 @@ S: Maintained | |||
1338 | F: drivers/platform/x86/asus*.c | 1338 | F: drivers/platform/x86/asus*.c |
1339 | F: drivers/platform/x86/eeepc*.c | 1339 | F: drivers/platform/x86/eeepc*.c |
1340 | 1340 | ||
1341 | ASUS ASB100 HARDWARE MONITOR DRIVER | ||
1342 | M: "Mark M. Hoffman" <mhoffman@lightlink.com> | ||
1343 | L: lm-sensors@lm-sensors.org | ||
1344 | S: Maintained | ||
1345 | F: drivers/hwmon/asb100.c | ||
1346 | |||
1347 | ASYNCHRONOUS TRANSFERS/TRANSFORMS (IOAT) API | 1341 | ASYNCHRONOUS TRANSFERS/TRANSFORMS (IOAT) API |
1348 | M: Dan Williams <djbw@fb.com> | 1342 | M: Dan Williams <djbw@fb.com> |
1349 | W: http://sourceforge.net/projects/xscaleiop | 1343 | W: http://sourceforge.net/projects/xscaleiop |
@@ -1467,6 +1461,12 @@ F: drivers/dma/at_hdmac.c | |||
1467 | F: drivers/dma/at_hdmac_regs.h | 1461 | F: drivers/dma/at_hdmac_regs.h |
1468 | F: include/linux/platform_data/dma-atmel.h | 1462 | F: include/linux/platform_data/dma-atmel.h |
1469 | 1463 | ||
1464 | ATMEL I2C DRIVER | ||
1465 | M: Ludovic Desroches <ludovic.desroches@atmel.com> | ||
1466 | L: linux-i2c@vger.kernel.org | ||
1467 | S: Supported | ||
1468 | F: drivers/i2c/busses/i2c-at91.c | ||
1469 | |||
1470 | ATMEL ISI DRIVER | 1470 | ATMEL ISI DRIVER |
1471 | M: Josh Wu <josh.wu@atmel.com> | 1471 | M: Josh Wu <josh.wu@atmel.com> |
1472 | L: linux-media@vger.kernel.org | 1472 | L: linux-media@vger.kernel.org |
@@ -2629,7 +2629,7 @@ F: include/uapi/drm/ | |||
2629 | 2629 | ||
2630 | INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets) | 2630 | INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets) |
2631 | M: Daniel Vetter <daniel.vetter@ffwll.ch> | 2631 | M: Daniel Vetter <daniel.vetter@ffwll.ch> |
2632 | L: intel-gfx@lists.freedesktop.org (subscribers-only) | 2632 | L: intel-gfx@lists.freedesktop.org |
2633 | L: dri-devel@lists.freedesktop.org | 2633 | L: dri-devel@lists.freedesktop.org |
2634 | T: git git://people.freedesktop.org/~danvet/drm-intel | 2634 | T: git git://people.freedesktop.org/~danvet/drm-intel |
2635 | S: Supported | 2635 | S: Supported |
@@ -3242,6 +3242,12 @@ F: Documentation/firmware_class/ | |||
3242 | F: drivers/base/firmware*.c | 3242 | F: drivers/base/firmware*.c |
3243 | F: include/linux/firmware.h | 3243 | F: include/linux/firmware.h |
3244 | 3244 | ||
3245 | FLASHSYSTEM DRIVER (IBM FlashSystem 70/80 PCI SSD Flash Card) | ||
3246 | M: Joshua Morris <josh.h.morris@us.ibm.com> | ||
3247 | M: Philip Kelleher <pjk1939@linux.vnet.ibm.com> | ||
3248 | S: Maintained | ||
3249 | F: drivers/block/rsxx/ | ||
3250 | |||
3245 | FLOPPY DRIVER | 3251 | FLOPPY DRIVER |
3246 | M: Jiri Kosina <jkosina@suse.cz> | 3252 | M: Jiri Kosina <jkosina@suse.cz> |
3247 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/floppy.git | 3253 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/floppy.git |
@@ -3851,7 +3857,7 @@ F: drivers/i2c/busses/i2c-ismt.c | |||
3851 | F: Documentation/i2c/busses/i2c-ismt | 3857 | F: Documentation/i2c/busses/i2c-ismt |
3852 | 3858 | ||
3853 | I2C/SMBUS STUB DRIVER | 3859 | I2C/SMBUS STUB DRIVER |
3854 | M: "Mark M. Hoffman" <mhoffman@lightlink.com> | 3860 | M: Jean Delvare <khali@linux-fr.org> |
3855 | L: linux-i2c@vger.kernel.org | 3861 | L: linux-i2c@vger.kernel.org |
3856 | S: Maintained | 3862 | S: Maintained |
3857 | F: drivers/i2c/i2c-stub.c | 3863 | F: drivers/i2c/i2c-stub.c |
@@ -4935,6 +4941,12 @@ W: logfs.org | |||
4935 | S: Maintained | 4941 | S: Maintained |
4936 | F: fs/logfs/ | 4942 | F: fs/logfs/ |
4937 | 4943 | ||
4944 | LPC32XX MACHINE SUPPORT | ||
4945 | M: Roland Stigge <stigge@antcom.de> | ||
4946 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | ||
4947 | S: Maintained | ||
4948 | F: arch/arm/mach-lpc32xx/ | ||
4949 | |||
4938 | LSILOGIC MPT FUSION DRIVERS (FC/SAS/SPI) | 4950 | LSILOGIC MPT FUSION DRIVERS (FC/SAS/SPI) |
4939 | M: Nagalakshmi Nandigama <Nagalakshmi.Nandigama@lsi.com> | 4951 | M: Nagalakshmi Nandigama <Nagalakshmi.Nandigama@lsi.com> |
4940 | M: Sreekanth Reddy <Sreekanth.Reddy@lsi.com> | 4952 | M: Sreekanth Reddy <Sreekanth.Reddy@lsi.com> |
@@ -5059,9 +5071,8 @@ S: Maintained | |||
5059 | F: drivers/net/ethernet/marvell/sk* | 5071 | F: drivers/net/ethernet/marvell/sk* |
5060 | 5072 | ||
5061 | MARVELL LIBERTAS WIRELESS DRIVER | 5073 | MARVELL LIBERTAS WIRELESS DRIVER |
5062 | M: Dan Williams <dcbw@redhat.com> | ||
5063 | L: libertas-dev@lists.infradead.org | 5074 | L: libertas-dev@lists.infradead.org |
5064 | S: Maintained | 5075 | S: Orphan |
5065 | F: drivers/net/wireless/libertas/ | 5076 | F: drivers/net/wireless/libertas/ |
5066 | 5077 | ||
5067 | MARVELL MV643XX ETHERNET DRIVER | 5078 | MARVELL MV643XX ETHERNET DRIVER |
@@ -5563,6 +5574,7 @@ F: include/uapi/linux/if_* | |||
5563 | F: include/uapi/linux/netdevice.h | 5574 | F: include/uapi/linux/netdevice.h |
5564 | 5575 | ||
5565 | NETXEN (1/10) GbE SUPPORT | 5576 | NETXEN (1/10) GbE SUPPORT |
5577 | M: Manish Chopra <manish.chopra@qlogic.com> | ||
5566 | M: Sony Chacko <sony.chacko@qlogic.com> | 5578 | M: Sony Chacko <sony.chacko@qlogic.com> |
5567 | M: Rajesh Borundia <rajesh.borundia@qlogic.com> | 5579 | M: Rajesh Borundia <rajesh.borundia@qlogic.com> |
5568 | L: netdev@vger.kernel.org | 5580 | L: netdev@vger.kernel.org |
@@ -5647,6 +5659,14 @@ S: Maintained | |||
5647 | F: drivers/video/riva/ | 5659 | F: drivers/video/riva/ |
5648 | F: drivers/video/nvidia/ | 5660 | F: drivers/video/nvidia/ |
5649 | 5661 | ||
5662 | NVM EXPRESS DRIVER | ||
5663 | M: Matthew Wilcox <willy@linux.intel.com> | ||
5664 | L: linux-nvme@lists.infradead.org | ||
5665 | T: git git://git.infradead.org/users/willy/linux-nvme.git | ||
5666 | S: Supported | ||
5667 | F: drivers/block/nvme.c | ||
5668 | F: include/linux/nvme.h | ||
5669 | |||
5650 | OMAP SUPPORT | 5670 | OMAP SUPPORT |
5651 | M: Tony Lindgren <tony@atomide.com> | 5671 | M: Tony Lindgren <tony@atomide.com> |
5652 | L: linux-omap@vger.kernel.org | 5672 | L: linux-omap@vger.kernel.org |
@@ -5675,7 +5695,7 @@ S: Maintained | |||
5675 | F: arch/arm/*omap*/*clock* | 5695 | F: arch/arm/*omap*/*clock* |
5676 | 5696 | ||
5677 | OMAP POWER MANAGEMENT SUPPORT | 5697 | OMAP POWER MANAGEMENT SUPPORT |
5678 | M: Kevin Hilman <khilman@ti.com> | 5698 | M: Kevin Hilman <khilman@deeprootsystems.com> |
5679 | L: linux-omap@vger.kernel.org | 5699 | L: linux-omap@vger.kernel.org |
5680 | S: Maintained | 5700 | S: Maintained |
5681 | F: arch/arm/*omap*/*pm* | 5701 | F: arch/arm/*omap*/*pm* |
@@ -5769,7 +5789,7 @@ F: arch/arm/*omap*/usb* | |||
5769 | 5789 | ||
5770 | OMAP GPIO DRIVER | 5790 | OMAP GPIO DRIVER |
5771 | M: Santosh Shilimkar <santosh.shilimkar@ti.com> | 5791 | M: Santosh Shilimkar <santosh.shilimkar@ti.com> |
5772 | M: Kevin Hilman <khilman@ti.com> | 5792 | M: Kevin Hilman <khilman@deeprootsystems.com> |
5773 | L: linux-omap@vger.kernel.org | 5793 | L: linux-omap@vger.kernel.org |
5774 | S: Maintained | 5794 | S: Maintained |
5775 | F: drivers/gpio/gpio-omap.c | 5795 | F: drivers/gpio/gpio-omap.c |
@@ -6201,7 +6221,7 @@ F: include/linux/power_supply.h | |||
6201 | F: drivers/power/ | 6221 | F: drivers/power/ |
6202 | 6222 | ||
6203 | PNP SUPPORT | 6223 | PNP SUPPORT |
6204 | M: Adam Belay <abelay@mit.edu> | 6224 | M: Rafael J. Wysocki <rafael.j.wysocki@intel.com> |
6205 | M: Bjorn Helgaas <bhelgaas@google.com> | 6225 | M: Bjorn Helgaas <bhelgaas@google.com> |
6206 | S: Maintained | 6226 | S: Maintained |
6207 | F: drivers/pnp/ | 6227 | F: drivers/pnp/ |
@@ -6543,12 +6563,6 @@ S: Maintained | |||
6543 | F: Documentation/blockdev/ramdisk.txt | 6563 | F: Documentation/blockdev/ramdisk.txt |
6544 | F: drivers/block/brd.c | 6564 | F: drivers/block/brd.c |
6545 | 6565 | ||
6546 | RAMSAM DRIVER (IBM RamSan 70/80 PCI SSD Flash Card) | ||
6547 | M: Joshua Morris <josh.h.morris@us.ibm.com> | ||
6548 | M: Philip Kelleher <pjk1939@linux.vnet.ibm.com> | ||
6549 | S: Maintained | ||
6550 | F: drivers/block/rsxx/ | ||
6551 | |||
6552 | RANDOM NUMBER DRIVER | 6566 | RANDOM NUMBER DRIVER |
6553 | M: Theodore Ts'o" <tytso@mit.edu> | 6567 | M: Theodore Ts'o" <tytso@mit.edu> |
6554 | S: Maintained | 6568 | S: Maintained |
@@ -6617,7 +6631,7 @@ S: Supported | |||
6617 | F: fs/reiserfs/ | 6631 | F: fs/reiserfs/ |
6618 | 6632 | ||
6619 | REGISTER MAP ABSTRACTION | 6633 | REGISTER MAP ABSTRACTION |
6620 | M: Mark Brown <broonie@opensource.wolfsonmicro.com> | 6634 | M: Mark Brown <broonie@kernel.org> |
6621 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/regmap.git | 6635 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/regmap.git |
6622 | S: Supported | 6636 | S: Supported |
6623 | F: drivers/base/regmap/ | 6637 | F: drivers/base/regmap/ |
@@ -6943,7 +6957,6 @@ F: drivers/scsi/st* | |||
6943 | 6957 | ||
6944 | SCTP PROTOCOL | 6958 | SCTP PROTOCOL |
6945 | M: Vlad Yasevich <vyasevich@gmail.com> | 6959 | M: Vlad Yasevich <vyasevich@gmail.com> |
6946 | M: Sridhar Samudrala <sri@us.ibm.com> | ||
6947 | M: Neil Horman <nhorman@tuxdriver.com> | 6960 | M: Neil Horman <nhorman@tuxdriver.com> |
6948 | L: linux-sctp@vger.kernel.org | 6961 | L: linux-sctp@vger.kernel.org |
6949 | W: http://lksctp.sourceforge.net | 6962 | W: http://lksctp.sourceforge.net |
@@ -7165,7 +7178,7 @@ F: arch/arm/mach-s3c2410/bast-irq.c | |||
7165 | 7178 | ||
7166 | TI DAVINCI MACHINE SUPPORT | 7179 | TI DAVINCI MACHINE SUPPORT |
7167 | M: Sekhar Nori <nsekhar@ti.com> | 7180 | M: Sekhar Nori <nsekhar@ti.com> |
7168 | M: Kevin Hilman <khilman@ti.com> | 7181 | M: Kevin Hilman <khilman@deeprootsystems.com> |
7169 | L: davinci-linux-open-source@linux.davincidsp.com (moderated for non-subscribers) | 7182 | L: davinci-linux-open-source@linux.davincidsp.com (moderated for non-subscribers) |
7170 | T: git git://gitorious.org/linux-davinci/linux-davinci.git | 7183 | T: git git://gitorious.org/linux-davinci/linux-davinci.git |
7171 | Q: http://patchwork.kernel.org/project/linux-davinci/list/ | 7184 | Q: http://patchwork.kernel.org/project/linux-davinci/list/ |
@@ -7198,13 +7211,6 @@ L: netdev@vger.kernel.org | |||
7198 | S: Maintained | 7211 | S: Maintained |
7199 | F: drivers/net/ethernet/sis/sis900.* | 7212 | F: drivers/net/ethernet/sis/sis900.* |
7200 | 7213 | ||
7201 | SIS 96X I2C/SMBUS DRIVER | ||
7202 | M: "Mark M. Hoffman" <mhoffman@lightlink.com> | ||
7203 | L: linux-i2c@vger.kernel.org | ||
7204 | S: Maintained | ||
7205 | F: Documentation/i2c/busses/i2c-sis96x | ||
7206 | F: drivers/i2c/busses/i2c-sis96x.c | ||
7207 | |||
7208 | SIS FRAMEBUFFER DRIVER | 7214 | SIS FRAMEBUFFER DRIVER |
7209 | M: Thomas Winischhofer <thomas@winischhofer.net> | 7215 | M: Thomas Winischhofer <thomas@winischhofer.net> |
7210 | W: http://www.winischhofer.net/linuxsisvga.shtml | 7216 | W: http://www.winischhofer.net/linuxsisvga.shtml |
@@ -7282,7 +7288,7 @@ F: Documentation/hwmon/sch5627 | |||
7282 | F: drivers/hwmon/sch5627.c | 7288 | F: drivers/hwmon/sch5627.c |
7283 | 7289 | ||
7284 | SMSC47B397 HARDWARE MONITOR DRIVER | 7290 | SMSC47B397 HARDWARE MONITOR DRIVER |
7285 | M: "Mark M. Hoffman" <mhoffman@lightlink.com> | 7291 | M: Jean Delvare <khali@linux-fr.org> |
7286 | L: lm-sensors@lm-sensors.org | 7292 | L: lm-sensors@lm-sensors.org |
7287 | S: Maintained | 7293 | S: Maintained |
7288 | F: Documentation/hwmon/smsc47b397 | 7294 | F: Documentation/hwmon/smsc47b397 |
@@ -7373,7 +7379,7 @@ F: sound/ | |||
7373 | 7379 | ||
7374 | SOUND - SOC LAYER / DYNAMIC AUDIO POWER MANAGEMENT (ASoC) | 7380 | SOUND - SOC LAYER / DYNAMIC AUDIO POWER MANAGEMENT (ASoC) |
7375 | M: Liam Girdwood <lgirdwood@gmail.com> | 7381 | M: Liam Girdwood <lgirdwood@gmail.com> |
7376 | M: Mark Brown <broonie@opensource.wolfsonmicro.com> | 7382 | M: Mark Brown <broonie@kernel.org> |
7377 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound.git | 7383 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound.git |
7378 | L: alsa-devel@alsa-project.org (moderated for non-subscribers) | 7384 | L: alsa-devel@alsa-project.org (moderated for non-subscribers) |
7379 | W: http://alsa-project.org/main/index.php/ASoC | 7385 | W: http://alsa-project.org/main/index.php/ASoC |
@@ -7462,7 +7468,7 @@ F: drivers/clk/spear/ | |||
7462 | 7468 | ||
7463 | SPI SUBSYSTEM | 7469 | SPI SUBSYSTEM |
7464 | M: Grant Likely <grant.likely@secretlab.ca> | 7470 | M: Grant Likely <grant.likely@secretlab.ca> |
7465 | M: Mark Brown <broonie@opensource.wolfsonmicro.com> | 7471 | M: Mark Brown <broonie@kernel.org> |
7466 | L: spi-devel-general@lists.sourceforge.net | 7472 | L: spi-devel-general@lists.sourceforge.net |
7467 | Q: http://patchwork.kernel.org/project/spi-devel-general/list/ | 7473 | Q: http://patchwork.kernel.org/project/spi-devel-general/list/ |
7468 | T: git git://git.secretlab.ca/git/linux-2.6.git | 7474 | T: git git://git.secretlab.ca/git/linux-2.6.git |
@@ -7705,9 +7711,10 @@ F: include/linux/swiotlb.h | |||
7705 | 7711 | ||
7706 | SYNOPSYS ARC ARCHITECTURE | 7712 | SYNOPSYS ARC ARCHITECTURE |
7707 | M: Vineet Gupta <vgupta@synopsys.com> | 7713 | M: Vineet Gupta <vgupta@synopsys.com> |
7708 | L: linux-snps-arc@vger.kernel.org | ||
7709 | S: Supported | 7714 | S: Supported |
7710 | F: arch/arc/ | 7715 | F: arch/arc/ |
7716 | F: Documentation/devicetree/bindings/arc/ | ||
7717 | F: drivers/tty/serial/arc-uart.c | ||
7711 | 7718 | ||
7712 | SYSV FILESYSTEM | 7719 | SYSV FILESYSTEM |
7713 | M: Christoph Hellwig <hch@infradead.org> | 7720 | M: Christoph Hellwig <hch@infradead.org> |
@@ -8706,7 +8713,7 @@ F: drivers/scsi/vmw_pvscsi.h | |||
8706 | 8713 | ||
8707 | VOLTAGE AND CURRENT REGULATOR FRAMEWORK | 8714 | VOLTAGE AND CURRENT REGULATOR FRAMEWORK |
8708 | M: Liam Girdwood <lrg@ti.com> | 8715 | M: Liam Girdwood <lrg@ti.com> |
8709 | M: Mark Brown <broonie@opensource.wolfsonmicro.com> | 8716 | M: Mark Brown <broonie@kernel.org> |
8710 | W: http://opensource.wolfsonmicro.com/node/15 | 8717 | W: http://opensource.wolfsonmicro.com/node/15 |
8711 | W: http://www.slimlogic.co.uk/?p=48 | 8718 | W: http://www.slimlogic.co.uk/?p=48 |
8712 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/lrg/regulator.git | 8719 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/lrg/regulator.git |
@@ -1,7 +1,7 @@ | |||
1 | VERSION = 3 | 1 | VERSION = 3 |
2 | PATCHLEVEL = 9 | 2 | PATCHLEVEL = 9 |
3 | SUBLEVEL = 0 | 3 | SUBLEVEL = 0 |
4 | EXTRAVERSION = -rc3 | 4 | EXTRAVERSION = -rc6 |
5 | NAME = Unicycling Gorilla | 5 | NAME = Unicycling Gorilla |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/alpha/Makefile b/arch/alpha/Makefile index 4759fe751aa1..2cc3cc519c54 100644 --- a/arch/alpha/Makefile +++ b/arch/alpha/Makefile | |||
@@ -12,7 +12,7 @@ NM := $(NM) -B | |||
12 | 12 | ||
13 | LDFLAGS_vmlinux := -static -N #-relax | 13 | LDFLAGS_vmlinux := -static -N #-relax |
14 | CHECKFLAGS += -D__alpha__ -m64 | 14 | CHECKFLAGS += -D__alpha__ -m64 |
15 | cflags-y := -pipe -mno-fp-regs -ffixed-8 -msmall-data | 15 | cflags-y := -pipe -mno-fp-regs -ffixed-8 |
16 | cflags-y += $(call cc-option, -fno-jump-tables) | 16 | cflags-y += $(call cc-option, -fno-jump-tables) |
17 | 17 | ||
18 | cpuflags-$(CONFIG_ALPHA_EV4) := -mcpu=ev4 | 18 | cpuflags-$(CONFIG_ALPHA_EV4) := -mcpu=ev4 |
diff --git a/arch/alpha/include/asm/floppy.h b/arch/alpha/include/asm/floppy.h index 46cefbd50e73..bae97eb19d26 100644 --- a/arch/alpha/include/asm/floppy.h +++ b/arch/alpha/include/asm/floppy.h | |||
@@ -26,7 +26,7 @@ | |||
26 | #define fd_disable_irq() disable_irq(FLOPPY_IRQ) | 26 | #define fd_disable_irq() disable_irq(FLOPPY_IRQ) |
27 | #define fd_cacheflush(addr,size) /* nothing */ | 27 | #define fd_cacheflush(addr,size) /* nothing */ |
28 | #define fd_request_irq() request_irq(FLOPPY_IRQ, floppy_interrupt,\ | 28 | #define fd_request_irq() request_irq(FLOPPY_IRQ, floppy_interrupt,\ |
29 | IRQF_DISABLED, "floppy", NULL) | 29 | 0, "floppy", NULL) |
30 | #define fd_free_irq() free_irq(FLOPPY_IRQ, NULL) | 30 | #define fd_free_irq() free_irq(FLOPPY_IRQ, NULL) |
31 | 31 | ||
32 | #ifdef CONFIG_PCI | 32 | #ifdef CONFIG_PCI |
diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c index 2872accd2215..7b2be251c30f 100644 --- a/arch/alpha/kernel/irq.c +++ b/arch/alpha/kernel/irq.c | |||
@@ -117,13 +117,6 @@ handle_irq(int irq) | |||
117 | return; | 117 | return; |
118 | } | 118 | } |
119 | 119 | ||
120 | /* | ||
121 | * From here we must proceed with IPL_MAX. Note that we do not | ||
122 | * explicitly enable interrupts afterwards - some MILO PALcode | ||
123 | * (namely LX164 one) seems to have severe problems with RTI | ||
124 | * at IPL 0. | ||
125 | */ | ||
126 | local_irq_disable(); | ||
127 | irq_enter(); | 120 | irq_enter(); |
128 | generic_handle_irq_desc(irq, desc); | 121 | generic_handle_irq_desc(irq, desc); |
129 | irq_exit(); | 122 | irq_exit(); |
diff --git a/arch/alpha/kernel/irq_alpha.c b/arch/alpha/kernel/irq_alpha.c index 772ddfdb71a8..f433fc11877a 100644 --- a/arch/alpha/kernel/irq_alpha.c +++ b/arch/alpha/kernel/irq_alpha.c | |||
@@ -45,6 +45,14 @@ do_entInt(unsigned long type, unsigned long vector, | |||
45 | unsigned long la_ptr, struct pt_regs *regs) | 45 | unsigned long la_ptr, struct pt_regs *regs) |
46 | { | 46 | { |
47 | struct pt_regs *old_regs; | 47 | struct pt_regs *old_regs; |
48 | |||
49 | /* | ||
50 | * Disable interrupts during IRQ handling. | ||
51 | * Note that there is no matching local_irq_enable() due to | ||
52 | * severe problems with RTI at IPL0 and some MILO PALcode | ||
53 | * (namely LX164). | ||
54 | */ | ||
55 | local_irq_disable(); | ||
48 | switch (type) { | 56 | switch (type) { |
49 | case 0: | 57 | case 0: |
50 | #ifdef CONFIG_SMP | 58 | #ifdef CONFIG_SMP |
@@ -62,7 +70,6 @@ do_entInt(unsigned long type, unsigned long vector, | |||
62 | { | 70 | { |
63 | long cpu; | 71 | long cpu; |
64 | 72 | ||
65 | local_irq_disable(); | ||
66 | smp_percpu_timer_interrupt(regs); | 73 | smp_percpu_timer_interrupt(regs); |
67 | cpu = smp_processor_id(); | 74 | cpu = smp_processor_id(); |
68 | if (cpu != boot_cpuid) { | 75 | if (cpu != boot_cpuid) { |
@@ -222,7 +229,6 @@ process_mcheck_info(unsigned long vector, unsigned long la_ptr, | |||
222 | 229 | ||
223 | struct irqaction timer_irqaction = { | 230 | struct irqaction timer_irqaction = { |
224 | .handler = timer_interrupt, | 231 | .handler = timer_interrupt, |
225 | .flags = IRQF_DISABLED, | ||
226 | .name = "timer", | 232 | .name = "timer", |
227 | }; | 233 | }; |
228 | 234 | ||
diff --git a/arch/alpha/kernel/sys_nautilus.c b/arch/alpha/kernel/sys_nautilus.c index 4d4c046f708d..1383f8601a93 100644 --- a/arch/alpha/kernel/sys_nautilus.c +++ b/arch/alpha/kernel/sys_nautilus.c | |||
@@ -188,6 +188,10 @@ nautilus_machine_check(unsigned long vector, unsigned long la_ptr) | |||
188 | extern void free_reserved_mem(void *, void *); | 188 | extern void free_reserved_mem(void *, void *); |
189 | extern void pcibios_claim_one_bus(struct pci_bus *); | 189 | extern void pcibios_claim_one_bus(struct pci_bus *); |
190 | 190 | ||
191 | static struct resource irongate_io = { | ||
192 | .name = "Irongate PCI IO", | ||
193 | .flags = IORESOURCE_IO, | ||
194 | }; | ||
191 | static struct resource irongate_mem = { | 195 | static struct resource irongate_mem = { |
192 | .name = "Irongate PCI MEM", | 196 | .name = "Irongate PCI MEM", |
193 | .flags = IORESOURCE_MEM, | 197 | .flags = IORESOURCE_MEM, |
@@ -209,6 +213,7 @@ nautilus_init_pci(void) | |||
209 | 213 | ||
210 | irongate = pci_get_bus_and_slot(0, 0); | 214 | irongate = pci_get_bus_and_slot(0, 0); |
211 | bus->self = irongate; | 215 | bus->self = irongate; |
216 | bus->resource[0] = &irongate_io; | ||
212 | bus->resource[1] = &irongate_mem; | 217 | bus->resource[1] = &irongate_mem; |
213 | 218 | ||
214 | pci_bus_size_bridges(bus); | 219 | pci_bus_size_bridges(bus); |
diff --git a/arch/alpha/kernel/sys_titan.c b/arch/alpha/kernel/sys_titan.c index 5cf4a481b8c5..a53cf03f49d5 100644 --- a/arch/alpha/kernel/sys_titan.c +++ b/arch/alpha/kernel/sys_titan.c | |||
@@ -280,15 +280,15 @@ titan_late_init(void) | |||
280 | * all reported to the kernel as machine checks, so the handler | 280 | * all reported to the kernel as machine checks, so the handler |
281 | * is a nop so it can be called to count the individual events. | 281 | * is a nop so it can be called to count the individual events. |
282 | */ | 282 | */ |
283 | titan_request_irq(63+16, titan_intr_nop, IRQF_DISABLED, | 283 | titan_request_irq(63+16, titan_intr_nop, 0, |
284 | "CChip Error", NULL); | 284 | "CChip Error", NULL); |
285 | titan_request_irq(62+16, titan_intr_nop, IRQF_DISABLED, | 285 | titan_request_irq(62+16, titan_intr_nop, 0, |
286 | "PChip 0 H_Error", NULL); | 286 | "PChip 0 H_Error", NULL); |
287 | titan_request_irq(61+16, titan_intr_nop, IRQF_DISABLED, | 287 | titan_request_irq(61+16, titan_intr_nop, 0, |
288 | "PChip 1 H_Error", NULL); | 288 | "PChip 1 H_Error", NULL); |
289 | titan_request_irq(60+16, titan_intr_nop, IRQF_DISABLED, | 289 | titan_request_irq(60+16, titan_intr_nop, 0, |
290 | "PChip 0 C_Error", NULL); | 290 | "PChip 0 C_Error", NULL); |
291 | titan_request_irq(59+16, titan_intr_nop, IRQF_DISABLED, | 291 | titan_request_irq(59+16, titan_intr_nop, 0, |
292 | "PChip 1 C_Error", NULL); | 292 | "PChip 1 C_Error", NULL); |
293 | 293 | ||
294 | /* | 294 | /* |
@@ -348,9 +348,9 @@ privateer_init_pci(void) | |||
348 | * Hook a couple of extra err interrupts that the | 348 | * Hook a couple of extra err interrupts that the |
349 | * common titan code won't. | 349 | * common titan code won't. |
350 | */ | 350 | */ |
351 | titan_request_irq(53+16, titan_intr_nop, IRQF_DISABLED, | 351 | titan_request_irq(53+16, titan_intr_nop, 0, |
352 | "NMI", NULL); | 352 | "NMI", NULL); |
353 | titan_request_irq(50+16, titan_intr_nop, IRQF_DISABLED, | 353 | titan_request_irq(50+16, titan_intr_nop, 0, |
354 | "Temperature Warning", NULL); | 354 | "Temperature Warning", NULL); |
355 | 355 | ||
356 | /* | 356 | /* |
diff --git a/arch/arc/include/asm/dma-mapping.h b/arch/arc/include/asm/dma-mapping.h index 31f77aec0823..45b8e0cea176 100644 --- a/arch/arc/include/asm/dma-mapping.h +++ b/arch/arc/include/asm/dma-mapping.h | |||
@@ -126,7 +126,7 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, | |||
126 | int i; | 126 | int i; |
127 | 127 | ||
128 | for_each_sg(sg, s, nents, i) | 128 | for_each_sg(sg, s, nents, i) |
129 | sg->dma_address = dma_map_page(dev, sg_page(s), s->offset, | 129 | s->dma_address = dma_map_page(dev, sg_page(s), s->offset, |
130 | s->length, dir); | 130 | s->length, dir); |
131 | 131 | ||
132 | return nents; | 132 | return nents; |
diff --git a/arch/arc/include/asm/elf.h b/arch/arc/include/asm/elf.h index f4c8d36ebecb..a26282857683 100644 --- a/arch/arc/include/asm/elf.h +++ b/arch/arc/include/asm/elf.h | |||
@@ -72,7 +72,4 @@ extern int elf_check_arch(const struct elf32_hdr *); | |||
72 | */ | 72 | */ |
73 | #define ELF_PLATFORM (NULL) | 73 | #define ELF_PLATFORM (NULL) |
74 | 74 | ||
75 | #define SET_PERSONALITY(ex) \ | ||
76 | set_personality(PER_LINUX | (current->personality & (~PER_MASK))) | ||
77 | |||
78 | #endif | 75 | #endif |
diff --git a/arch/arc/include/asm/entry.h b/arch/arc/include/asm/entry.h index 23daa326fc9b..eb2ae53187d9 100644 --- a/arch/arc/include/asm/entry.h +++ b/arch/arc/include/asm/entry.h | |||
@@ -415,7 +415,7 @@ | |||
415 | *-------------------------------------------------------------*/ | 415 | *-------------------------------------------------------------*/ |
416 | .macro SAVE_ALL_EXCEPTION marker | 416 | .macro SAVE_ALL_EXCEPTION marker |
417 | 417 | ||
418 | st \marker, [sp, 8] | 418 | st \marker, [sp, 8] /* orig_r8 */ |
419 | st r0, [sp, 4] /* orig_r0, needed only for sys calls */ | 419 | st r0, [sp, 4] /* orig_r0, needed only for sys calls */ |
420 | 420 | ||
421 | /* Restore r9 used to code the early prologue */ | 421 | /* Restore r9 used to code the early prologue */ |
diff --git a/arch/arc/include/asm/irqflags.h b/arch/arc/include/asm/irqflags.h index ccd84806b62f..eac071668201 100644 --- a/arch/arc/include/asm/irqflags.h +++ b/arch/arc/include/asm/irqflags.h | |||
@@ -39,7 +39,7 @@ static inline long arch_local_irq_save(void) | |||
39 | " flag.nz %0 \n" | 39 | " flag.nz %0 \n" |
40 | : "=r"(temp), "=r"(flags) | 40 | : "=r"(temp), "=r"(flags) |
41 | : "n"((STATUS_E1_MASK | STATUS_E2_MASK)) | 41 | : "n"((STATUS_E1_MASK | STATUS_E2_MASK)) |
42 | : "cc"); | 42 | : "memory", "cc"); |
43 | 43 | ||
44 | return flags; | 44 | return flags; |
45 | } | 45 | } |
@@ -53,7 +53,8 @@ static inline void arch_local_irq_restore(unsigned long flags) | |||
53 | __asm__ __volatile__( | 53 | __asm__ __volatile__( |
54 | " flag %0 \n" | 54 | " flag %0 \n" |
55 | : | 55 | : |
56 | : "r"(flags)); | 56 | : "r"(flags) |
57 | : "memory"); | ||
57 | } | 58 | } |
58 | 59 | ||
59 | /* | 60 | /* |
@@ -73,7 +74,8 @@ static inline void arch_local_irq_disable(void) | |||
73 | " and %0, %0, %1 \n" | 74 | " and %0, %0, %1 \n" |
74 | " flag %0 \n" | 75 | " flag %0 \n" |
75 | : "=&r"(temp) | 76 | : "=&r"(temp) |
76 | : "n"(~(STATUS_E1_MASK | STATUS_E2_MASK))); | 77 | : "n"(~(STATUS_E1_MASK | STATUS_E2_MASK)) |
78 | : "memory"); | ||
77 | } | 79 | } |
78 | 80 | ||
79 | /* | 81 | /* |
@@ -85,7 +87,9 @@ static inline long arch_local_save_flags(void) | |||
85 | 87 | ||
86 | __asm__ __volatile__( | 88 | __asm__ __volatile__( |
87 | " lr %0, [status32] \n" | 89 | " lr %0, [status32] \n" |
88 | : "=&r"(temp)); | 90 | : "=&r"(temp) |
91 | : | ||
92 | : "memory"); | ||
89 | 93 | ||
90 | return temp; | 94 | return temp; |
91 | } | 95 | } |
diff --git a/arch/arc/include/asm/kgdb.h b/arch/arc/include/asm/kgdb.h index f3c4934f0ca9..4930957ca3d3 100644 --- a/arch/arc/include/asm/kgdb.h +++ b/arch/arc/include/asm/kgdb.h | |||
@@ -13,7 +13,7 @@ | |||
13 | 13 | ||
14 | #ifdef CONFIG_KGDB | 14 | #ifdef CONFIG_KGDB |
15 | 15 | ||
16 | #include <asm/user.h> | 16 | #include <asm/ptrace.h> |
17 | 17 | ||
18 | /* to ensure compatibility with Linux 2.6.35, we don't implement the get/set | 18 | /* to ensure compatibility with Linux 2.6.35, we don't implement the get/set |
19 | * register API yet */ | 19 | * register API yet */ |
@@ -53,9 +53,7 @@ enum arc700_linux_regnums { | |||
53 | }; | 53 | }; |
54 | 54 | ||
55 | #else | 55 | #else |
56 | static inline void kgdb_trap(struct pt_regs *regs, int param) | 56 | #define kgdb_trap(regs, param) |
57 | { | ||
58 | } | ||
59 | #endif | 57 | #endif |
60 | 58 | ||
61 | #endif /* __ARC_KGDB_H__ */ | 59 | #endif /* __ARC_KGDB_H__ */ |
diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h index 8ae783d20a81..6179de7e07c2 100644 --- a/arch/arc/include/asm/ptrace.h +++ b/arch/arc/include/asm/ptrace.h | |||
@@ -123,7 +123,7 @@ static inline long regs_return_value(struct pt_regs *regs) | |||
123 | #define orig_r8_IS_SCALL 0x0001 | 123 | #define orig_r8_IS_SCALL 0x0001 |
124 | #define orig_r8_IS_SCALL_RESTARTED 0x0002 | 124 | #define orig_r8_IS_SCALL_RESTARTED 0x0002 |
125 | #define orig_r8_IS_BRKPT 0x0004 | 125 | #define orig_r8_IS_BRKPT 0x0004 |
126 | #define orig_r8_IS_EXCPN 0x0004 | 126 | #define orig_r8_IS_EXCPN 0x0008 |
127 | #define orig_r8_IS_IRQ1 0x0010 | 127 | #define orig_r8_IS_IRQ1 0x0010 |
128 | #define orig_r8_IS_IRQ2 0x0020 | 128 | #define orig_r8_IS_IRQ2 0x0020 |
129 | 129 | ||
diff --git a/arch/arc/include/asm/syscalls.h b/arch/arc/include/asm/syscalls.h index e53a5340ba4f..dd785befe7fd 100644 --- a/arch/arc/include/asm/syscalls.h +++ b/arch/arc/include/asm/syscalls.h | |||
@@ -16,8 +16,6 @@ | |||
16 | #include <linux/types.h> | 16 | #include <linux/types.h> |
17 | 17 | ||
18 | int sys_clone_wrapper(int, int, int, int, int); | 18 | int sys_clone_wrapper(int, int, int, int, int); |
19 | int sys_fork_wrapper(void); | ||
20 | int sys_vfork_wrapper(void); | ||
21 | int sys_cacheflush(uint32_t, uint32_t uint32_t); | 19 | int sys_cacheflush(uint32_t, uint32_t uint32_t); |
22 | int sys_arc_settls(void *); | 20 | int sys_arc_settls(void *); |
23 | int sys_arc_gettls(void); | 21 | int sys_arc_gettls(void); |
diff --git a/arch/arc/include/uapi/asm/ptrace.h b/arch/arc/include/uapi/asm/ptrace.h index 6afa4f702075..30333cec0fef 100644 --- a/arch/arc/include/uapi/asm/ptrace.h +++ b/arch/arc/include/uapi/asm/ptrace.h | |||
@@ -28,14 +28,14 @@ | |||
28 | */ | 28 | */ |
29 | struct user_regs_struct { | 29 | struct user_regs_struct { |
30 | 30 | ||
31 | struct scratch { | 31 | struct { |
32 | long pad; | 32 | long pad; |
33 | long bta, lp_start, lp_end, lp_count; | 33 | long bta, lp_start, lp_end, lp_count; |
34 | long status32, ret, blink, fp, gp; | 34 | long status32, ret, blink, fp, gp; |
35 | long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0; | 35 | long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0; |
36 | long sp; | 36 | long sp; |
37 | } scratch; | 37 | } scratch; |
38 | struct callee { | 38 | struct { |
39 | long pad; | 39 | long pad; |
40 | long r25, r24, r23, r22, r21, r20; | 40 | long r25, r24, r23, r22, r21, r20; |
41 | long r19, r18, r17, r16, r15, r14, r13; | 41 | long r19, r18, r17, r16, r15, r14, r13; |
diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S index ef6800ba2f03..91eeab81f52d 100644 --- a/arch/arc/kernel/entry.S +++ b/arch/arc/kernel/entry.S | |||
@@ -452,7 +452,7 @@ tracesys: | |||
452 | ; using ERET won't work since next-PC has already committed | 452 | ; using ERET won't work since next-PC has already committed |
453 | lr r12, [efa] | 453 | lr r12, [efa] |
454 | GET_CURR_TASK_FIELD_PTR TASK_THREAD, r11 | 454 | GET_CURR_TASK_FIELD_PTR TASK_THREAD, r11 |
455 | st r12, [r11, THREAD_FAULT_ADDR] | 455 | st r12, [r11, THREAD_FAULT_ADDR] ; thread.fault_address |
456 | 456 | ||
457 | ; PRE Sys Call Ptrace hook | 457 | ; PRE Sys Call Ptrace hook |
458 | mov r0, sp ; pt_regs needed | 458 | mov r0, sp ; pt_regs needed |
@@ -792,31 +792,6 @@ ARC_EXIT ret_from_fork | |||
792 | 792 | ||
793 | ;################### Special Sys Call Wrappers ########################## | 793 | ;################### Special Sys Call Wrappers ########################## |
794 | 794 | ||
795 | ; TBD: call do_fork directly from here | ||
796 | ARC_ENTRY sys_fork_wrapper | ||
797 | SAVE_CALLEE_SAVED_USER | ||
798 | bl @sys_fork | ||
799 | DISCARD_CALLEE_SAVED_USER | ||
800 | |||
801 | GET_CURR_THR_INFO_FLAGS r10 | ||
802 | btst r10, TIF_SYSCALL_TRACE | ||
803 | bnz tracesys_exit | ||
804 | |||
805 | b ret_from_system_call | ||
806 | ARC_EXIT sys_fork_wrapper | ||
807 | |||
808 | ARC_ENTRY sys_vfork_wrapper | ||
809 | SAVE_CALLEE_SAVED_USER | ||
810 | bl @sys_vfork | ||
811 | DISCARD_CALLEE_SAVED_USER | ||
812 | |||
813 | GET_CURR_THR_INFO_FLAGS r10 | ||
814 | btst r10, TIF_SYSCALL_TRACE | ||
815 | bnz tracesys_exit | ||
816 | |||
817 | b ret_from_system_call | ||
818 | ARC_EXIT sys_vfork_wrapper | ||
819 | |||
820 | ARC_ENTRY sys_clone_wrapper | 795 | ARC_ENTRY sys_clone_wrapper |
821 | SAVE_CALLEE_SAVED_USER | 796 | SAVE_CALLEE_SAVED_USER |
822 | bl @sys_clone | 797 | bl @sys_clone |
diff --git a/arch/arc/kernel/kgdb.c b/arch/arc/kernel/kgdb.c index 2888ba5be47e..52bdc83c1495 100644 --- a/arch/arc/kernel/kgdb.c +++ b/arch/arc/kernel/kgdb.c | |||
@@ -9,6 +9,7 @@ | |||
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/kgdb.h> | 11 | #include <linux/kgdb.h> |
12 | #include <linux/sched.h> | ||
12 | #include <asm/disasm.h> | 13 | #include <asm/disasm.h> |
13 | #include <asm/cacheflush.h> | 14 | #include <asm/cacheflush.h> |
14 | 15 | ||
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c index dc0f968dae0a..2d95ac07df7b 100644 --- a/arch/arc/kernel/setup.c +++ b/arch/arc/kernel/setup.c | |||
@@ -232,10 +232,8 @@ char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len) | |||
232 | 232 | ||
233 | n += scnprintf(buf + n, len - n, "\n"); | 233 | n += scnprintf(buf + n, len - n, "\n"); |
234 | 234 | ||
235 | #ifdef _ASM_GENERIC_UNISTD_H | ||
236 | n += scnprintf(buf + n, len - n, | 235 | n += scnprintf(buf + n, len - n, |
237 | "OS ABI [v2]\t: asm-generic/{unistd,stat,fcntl}\n"); | 236 | "OS ABI [v3]\t: no-legacy-syscalls\n"); |
238 | #endif | ||
239 | 237 | ||
240 | return buf; | 238 | return buf; |
241 | } | 239 | } |
diff --git a/arch/arc/kernel/sys.c b/arch/arc/kernel/sys.c index f6bdd07583f3..9d6c1ca26af6 100644 --- a/arch/arc/kernel/sys.c +++ b/arch/arc/kernel/sys.c | |||
@@ -6,8 +6,6 @@ | |||
6 | #include <asm/syscalls.h> | 6 | #include <asm/syscalls.h> |
7 | 7 | ||
8 | #define sys_clone sys_clone_wrapper | 8 | #define sys_clone sys_clone_wrapper |
9 | #define sys_fork sys_fork_wrapper | ||
10 | #define sys_vfork sys_vfork_wrapper | ||
11 | 9 | ||
12 | #undef __SYSCALL | 10 | #undef __SYSCALL |
13 | #define __SYSCALL(nr, call) [nr] = (call), | 11 | #define __SYSCALL(nr, call) [nr] = (call), |
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 2c3bdce15134..1cacda426a0e 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -49,7 +49,6 @@ config ARM | |||
49 | select HAVE_REGS_AND_STACK_ACCESS_API | 49 | select HAVE_REGS_AND_STACK_ACCESS_API |
50 | select HAVE_SYSCALL_TRACEPOINTS | 50 | select HAVE_SYSCALL_TRACEPOINTS |
51 | select HAVE_UID16 | 51 | select HAVE_UID16 |
52 | select VIRT_TO_BUS | ||
53 | select KTIME_SCALAR | 52 | select KTIME_SCALAR |
54 | select PERF_USE_VMALLOC | 53 | select PERF_USE_VMALLOC |
55 | select RTC_LIB | 54 | select RTC_LIB |
@@ -743,6 +742,7 @@ config ARCH_RPC | |||
743 | select NEED_MACH_IO_H | 742 | select NEED_MACH_IO_H |
744 | select NEED_MACH_MEMORY_H | 743 | select NEED_MACH_MEMORY_H |
745 | select NO_IOPORT | 744 | select NO_IOPORT |
745 | select VIRT_TO_BUS | ||
746 | help | 746 | help |
747 | On the Acorn Risc-PC, Linux can support the internal IDE disk and | 747 | On the Acorn Risc-PC, Linux can support the internal IDE disk and |
748 | CD-ROM interface, serial and parallel port, and the floppy drive. | 748 | CD-ROM interface, serial and parallel port, and the floppy drive. |
@@ -878,6 +878,7 @@ config ARCH_SHARK | |||
878 | select ISA_DMA | 878 | select ISA_DMA |
879 | select NEED_MACH_MEMORY_H | 879 | select NEED_MACH_MEMORY_H |
880 | select PCI | 880 | select PCI |
881 | select VIRT_TO_BUS | ||
881 | select ZONE_DMA | 882 | select ZONE_DMA |
882 | help | 883 | help |
883 | Support for the StrongARM based Digital DNARD machine, also known | 884 | Support for the StrongARM based Digital DNARD machine, also known |
@@ -1005,12 +1006,12 @@ config ARCH_MULTI_V4_V5 | |||
1005 | bool | 1006 | bool |
1006 | 1007 | ||
1007 | config ARCH_MULTI_V6 | 1008 | config ARCH_MULTI_V6 |
1008 | bool "ARMv6 based platforms (ARM11, Scorpion, ...)" | 1009 | bool "ARMv6 based platforms (ARM11)" |
1009 | select ARCH_MULTI_V6_V7 | 1010 | select ARCH_MULTI_V6_V7 |
1010 | select CPU_V6 | 1011 | select CPU_V6 |
1011 | 1012 | ||
1012 | config ARCH_MULTI_V7 | 1013 | config ARCH_MULTI_V7 |
1013 | bool "ARMv7 based platforms (Cortex-A, PJ4, Krait)" | 1014 | bool "ARMv7 based platforms (Cortex-A, PJ4, Scorpion, Krait)" |
1014 | default y | 1015 | default y |
1015 | select ARCH_MULTI_V6_V7 | 1016 | select ARCH_MULTI_V6_V7 |
1016 | select ARCH_VEXPRESS | 1017 | select ARCH_VEXPRESS |
@@ -1182,9 +1183,9 @@ config ARM_NR_BANKS | |||
1182 | default 8 | 1183 | default 8 |
1183 | 1184 | ||
1184 | config IWMMXT | 1185 | config IWMMXT |
1185 | bool "Enable iWMMXt support" | 1186 | bool "Enable iWMMXt support" if !CPU_PJ4 |
1186 | depends on CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_PJ4 | 1187 | depends on CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_PJ4 |
1187 | default y if PXA27x || PXA3xx || ARCH_MMP | 1188 | default y if PXA27x || PXA3xx || ARCH_MMP || CPU_PJ4 |
1188 | help | 1189 | help |
1189 | Enable support for iWMMXt context switching at run time if | 1190 | Enable support for iWMMXt context switching at run time if |
1190 | running on a CPU that supports it. | 1191 | running on a CPU that supports it. |
@@ -1438,6 +1439,16 @@ config ARM_ERRATA_775420 | |||
1438 | to deadlock. This workaround puts DSB before executing ISB if | 1439 | to deadlock. This workaround puts DSB before executing ISB if |
1439 | an abort may occur on cache maintenance. | 1440 | an abort may occur on cache maintenance. |
1440 | 1441 | ||
1442 | config ARM_ERRATA_798181 | ||
1443 | bool "ARM errata: TLBI/DSB failure on Cortex-A15" | ||
1444 | depends on CPU_V7 && SMP | ||
1445 | help | ||
1446 | On Cortex-A15 (r0p0..r3p2) the TLBI*IS/DSB operations are not | ||
1447 | adequately shooting down all use of the old entries. This | ||
1448 | option enables the Linux kernel workaround for this erratum | ||
1449 | which sends an IPI to the CPUs that are running the same ASID | ||
1450 | as the one being invalidated. | ||
1451 | |||
1441 | endmenu | 1452 | endmenu |
1442 | 1453 | ||
1443 | source "arch/arm/common/Kconfig" | 1454 | source "arch/arm/common/Kconfig" |
@@ -1461,10 +1472,6 @@ config ISA_DMA | |||
1461 | bool | 1472 | bool |
1462 | select ISA_DMA_API | 1473 | select ISA_DMA_API |
1463 | 1474 | ||
1464 | config ARCH_NO_VIRT_TO_BUS | ||
1465 | def_bool y | ||
1466 | depends on !ARCH_RPC && !ARCH_NETWINDER && !ARCH_SHARK | ||
1467 | |||
1468 | # Select ISA DMA interface | 1475 | # Select ISA DMA interface |
1469 | config ISA_DMA_API | 1476 | config ISA_DMA_API |
1470 | bool | 1477 | bool |
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug index ecfcdba2d17c..9b31f4311ea2 100644 --- a/arch/arm/Kconfig.debug +++ b/arch/arm/Kconfig.debug | |||
@@ -495,6 +495,7 @@ config DEBUG_IMX_UART_PORT | |||
495 | DEBUG_IMX53_UART || \ | 495 | DEBUG_IMX53_UART || \ |
496 | DEBUG_IMX6Q_UART | 496 | DEBUG_IMX6Q_UART |
497 | default 1 | 497 | default 1 |
498 | depends on ARCH_MXC | ||
498 | help | 499 | help |
499 | Choose UART port on which kernel low-level debug messages | 500 | Choose UART port on which kernel low-level debug messages |
500 | should be output. | 501 | should be output. |
diff --git a/arch/arm/boot/dts/armada-370-mirabox.dts b/arch/arm/boot/dts/armada-370-mirabox.dts index dd0c57dd9f30..3234875824dc 100644 --- a/arch/arm/boot/dts/armada-370-mirabox.dts +++ b/arch/arm/boot/dts/armada-370-mirabox.dts | |||
@@ -54,7 +54,7 @@ | |||
54 | }; | 54 | }; |
55 | 55 | ||
56 | mvsdio@d00d4000 { | 56 | mvsdio@d00d4000 { |
57 | pinctrl-0 = <&sdio_pins2>; | 57 | pinctrl-0 = <&sdio_pins3>; |
58 | pinctrl-names = "default"; | 58 | pinctrl-names = "default"; |
59 | status = "okay"; | 59 | status = "okay"; |
60 | /* | 60 | /* |
diff --git a/arch/arm/boot/dts/armada-370.dtsi b/arch/arm/boot/dts/armada-370.dtsi index 8188d138020e..a195debb67d3 100644 --- a/arch/arm/boot/dts/armada-370.dtsi +++ b/arch/arm/boot/dts/armada-370.dtsi | |||
@@ -59,6 +59,12 @@ | |||
59 | "mpp50", "mpp51", "mpp52"; | 59 | "mpp50", "mpp51", "mpp52"; |
60 | marvell,function = "sd0"; | 60 | marvell,function = "sd0"; |
61 | }; | 61 | }; |
62 | |||
63 | sdio_pins3: sdio-pins3 { | ||
64 | marvell,pins = "mpp48", "mpp49", "mpp50", | ||
65 | "mpp51", "mpp52", "mpp53"; | ||
66 | marvell,function = "sd0"; | ||
67 | }; | ||
62 | }; | 68 | }; |
63 | 69 | ||
64 | gpio0: gpio@d0018100 { | 70 | gpio0: gpio@d0018100 { |
diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi index aa98e641931f..a98c0d50fbbe 100644 --- a/arch/arm/boot/dts/at91sam9x5.dtsi +++ b/arch/arm/boot/dts/at91sam9x5.dtsi | |||
@@ -238,8 +238,32 @@ | |||
238 | nand { | 238 | nand { |
239 | pinctrl_nand: nand-0 { | 239 | pinctrl_nand: nand-0 { |
240 | atmel,pins = | 240 | atmel,pins = |
241 | <3 4 0x0 0x1 /* PD5 gpio RDY pin pull_up */ | 241 | <3 0 0x1 0x0 /* PD0 periph A Read Enable */ |
242 | 3 5 0x0 0x1>; /* PD4 gpio enable pin pull_up */ | 242 | 3 1 0x1 0x0 /* PD1 periph A Write Enable */ |
243 | 3 2 0x1 0x0 /* PD2 periph A Address Latch Enable */ | ||
244 | 3 3 0x1 0x0 /* PD3 periph A Command Latch Enable */ | ||
245 | 3 4 0x0 0x1 /* PD4 gpio Chip Enable pin pull_up */ | ||
246 | 3 5 0x0 0x1 /* PD5 gpio RDY/BUSY pin pull_up */ | ||
247 | 3 6 0x1 0x0 /* PD6 periph A Data bit 0 */ | ||
248 | 3 7 0x1 0x0 /* PD7 periph A Data bit 1 */ | ||
249 | 3 8 0x1 0x0 /* PD8 periph A Data bit 2 */ | ||
250 | 3 9 0x1 0x0 /* PD9 periph A Data bit 3 */ | ||
251 | 3 10 0x1 0x0 /* PD10 periph A Data bit 4 */ | ||
252 | 3 11 0x1 0x0 /* PD11 periph A Data bit 5 */ | ||
253 | 3 12 0x1 0x0 /* PD12 periph A Data bit 6 */ | ||
254 | 3 13 0x1 0x0>; /* PD13 periph A Data bit 7 */ | ||
255 | }; | ||
256 | |||
257 | pinctrl_nand_16bits: nand_16bits-0 { | ||
258 | atmel,pins = | ||
259 | <3 14 0x1 0x0 /* PD14 periph A Data bit 8 */ | ||
260 | 3 15 0x1 0x0 /* PD15 periph A Data bit 9 */ | ||
261 | 3 16 0x1 0x0 /* PD16 periph A Data bit 10 */ | ||
262 | 3 17 0x1 0x0 /* PD17 periph A Data bit 11 */ | ||
263 | 3 18 0x1 0x0 /* PD18 periph A Data bit 12 */ | ||
264 | 3 19 0x1 0x0 /* PD19 periph A Data bit 13 */ | ||
265 | 3 20 0x1 0x0 /* PD20 periph A Data bit 14 */ | ||
266 | 3 21 0x1 0x0>; /* PD21 periph A Data bit 15 */ | ||
243 | }; | 267 | }; |
244 | }; | 268 | }; |
245 | 269 | ||
diff --git a/arch/arm/boot/dts/dbx5x0.dtsi b/arch/arm/boot/dts/dbx5x0.dtsi index 9de93096601a..aaa63d0a8096 100644 --- a/arch/arm/boot/dts/dbx5x0.dtsi +++ b/arch/arm/boot/dts/dbx5x0.dtsi | |||
@@ -191,8 +191,8 @@ | |||
191 | 191 | ||
192 | prcmu: prcmu@80157000 { | 192 | prcmu: prcmu@80157000 { |
193 | compatible = "stericsson,db8500-prcmu"; | 193 | compatible = "stericsson,db8500-prcmu"; |
194 | reg = <0x80157000 0x1000>; | 194 | reg = <0x80157000 0x1000>, <0x801b0000 0x8000>, <0x801b8000 0x1000>; |
195 | reg-names = "prcmu"; | 195 | reg-names = "prcmu", "prcmu-tcpm", "prcmu-tcdm"; |
196 | interrupts = <0 47 0x4>; | 196 | interrupts = <0 47 0x4>; |
197 | #address-cells = <1>; | 197 | #address-cells = <1>; |
198 | #size-cells = <1>; | 198 | #size-cells = <1>; |
diff --git a/arch/arm/boot/dts/exynos4.dtsi b/arch/arm/boot/dts/exynos4.dtsi index e1347fceb5bc..1a62bcf18aa3 100644 --- a/arch/arm/boot/dts/exynos4.dtsi +++ b/arch/arm/boot/dts/exynos4.dtsi | |||
@@ -275,18 +275,27 @@ | |||
275 | compatible = "arm,pl330", "arm,primecell"; | 275 | compatible = "arm,pl330", "arm,primecell"; |
276 | reg = <0x12680000 0x1000>; | 276 | reg = <0x12680000 0x1000>; |
277 | interrupts = <0 35 0>; | 277 | interrupts = <0 35 0>; |
278 | #dma-cells = <1>; | ||
279 | #dma-channels = <8>; | ||
280 | #dma-requests = <32>; | ||
278 | }; | 281 | }; |
279 | 282 | ||
280 | pdma1: pdma@12690000 { | 283 | pdma1: pdma@12690000 { |
281 | compatible = "arm,pl330", "arm,primecell"; | 284 | compatible = "arm,pl330", "arm,primecell"; |
282 | reg = <0x12690000 0x1000>; | 285 | reg = <0x12690000 0x1000>; |
283 | interrupts = <0 36 0>; | 286 | interrupts = <0 36 0>; |
287 | #dma-cells = <1>; | ||
288 | #dma-channels = <8>; | ||
289 | #dma-requests = <32>; | ||
284 | }; | 290 | }; |
285 | 291 | ||
286 | mdma1: mdma@12850000 { | 292 | mdma1: mdma@12850000 { |
287 | compatible = "arm,pl330", "arm,primecell"; | 293 | compatible = "arm,pl330", "arm,primecell"; |
288 | reg = <0x12850000 0x1000>; | 294 | reg = <0x12850000 0x1000>; |
289 | interrupts = <0 34 0>; | 295 | interrupts = <0 34 0>; |
296 | #dma-cells = <1>; | ||
297 | #dma-channels = <8>; | ||
298 | #dma-requests = <1>; | ||
290 | }; | 299 | }; |
291 | }; | 300 | }; |
292 | }; | 301 | }; |
diff --git a/arch/arm/boot/dts/exynos5440.dtsi b/arch/arm/boot/dts/exynos5440.dtsi index 5f3562ad6746..9a99755920c0 100644 --- a/arch/arm/boot/dts/exynos5440.dtsi +++ b/arch/arm/boot/dts/exynos5440.dtsi | |||
@@ -142,12 +142,18 @@ | |||
142 | compatible = "arm,pl330", "arm,primecell"; | 142 | compatible = "arm,pl330", "arm,primecell"; |
143 | reg = <0x120000 0x1000>; | 143 | reg = <0x120000 0x1000>; |
144 | interrupts = <0 34 0>; | 144 | interrupts = <0 34 0>; |
145 | #dma-cells = <1>; | ||
146 | #dma-channels = <8>; | ||
147 | #dma-requests = <32>; | ||
145 | }; | 148 | }; |
146 | 149 | ||
147 | pdma1: pdma@121B0000 { | 150 | pdma1: pdma@121B0000 { |
148 | compatible = "arm,pl330", "arm,primecell"; | 151 | compatible = "arm,pl330", "arm,primecell"; |
149 | reg = <0x121000 0x1000>; | 152 | reg = <0x121000 0x1000>; |
150 | interrupts = <0 35 0>; | 153 | interrupts = <0 35 0>; |
154 | #dma-cells = <1>; | ||
155 | #dma-channels = <8>; | ||
156 | #dma-requests = <32>; | ||
151 | }; | 157 | }; |
152 | }; | 158 | }; |
153 | 159 | ||
diff --git a/arch/arm/boot/dts/imx28-m28evk.dts b/arch/arm/boot/dts/imx28-m28evk.dts index 6ce3d17c3a29..fd36e1cca104 100644 --- a/arch/arm/boot/dts/imx28-m28evk.dts +++ b/arch/arm/boot/dts/imx28-m28evk.dts | |||
@@ -152,7 +152,6 @@ | |||
152 | i2c0: i2c@80058000 { | 152 | i2c0: i2c@80058000 { |
153 | pinctrl-names = "default"; | 153 | pinctrl-names = "default"; |
154 | pinctrl-0 = <&i2c0_pins_a>; | 154 | pinctrl-0 = <&i2c0_pins_a>; |
155 | clock-frequency = <400000>; | ||
156 | status = "okay"; | 155 | status = "okay"; |
157 | 156 | ||
158 | sgtl5000: codec@0a { | 157 | sgtl5000: codec@0a { |
diff --git a/arch/arm/boot/dts/imx28-sps1.dts b/arch/arm/boot/dts/imx28-sps1.dts index e6cde8aa7fff..6c6a5442800a 100644 --- a/arch/arm/boot/dts/imx28-sps1.dts +++ b/arch/arm/boot/dts/imx28-sps1.dts | |||
@@ -70,7 +70,6 @@ | |||
70 | i2c0: i2c@80058000 { | 70 | i2c0: i2c@80058000 { |
71 | pinctrl-names = "default"; | 71 | pinctrl-names = "default"; |
72 | pinctrl-0 = <&i2c0_pins_a>; | 72 | pinctrl-0 = <&i2c0_pins_a>; |
73 | clock-frequency = <400000>; | ||
74 | status = "okay"; | 73 | status = "okay"; |
75 | 74 | ||
76 | rtc: rtc@51 { | 75 | rtc: rtc@51 { |
diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi index 06ec460b4581..281a223591ff 100644 --- a/arch/arm/boot/dts/imx6qdl.dtsi +++ b/arch/arm/boot/dts/imx6qdl.dtsi | |||
@@ -91,6 +91,7 @@ | |||
91 | compatible = "arm,cortex-a9-twd-timer"; | 91 | compatible = "arm,cortex-a9-twd-timer"; |
92 | reg = <0x00a00600 0x20>; | 92 | reg = <0x00a00600 0x20>; |
93 | interrupts = <1 13 0xf01>; | 93 | interrupts = <1 13 0xf01>; |
94 | clocks = <&clks 15>; | ||
94 | }; | 95 | }; |
95 | 96 | ||
96 | L2: l2-cache@00a02000 { | 97 | L2: l2-cache@00a02000 { |
diff --git a/arch/arm/boot/dts/kirkwood-goflexnet.dts b/arch/arm/boot/dts/kirkwood-goflexnet.dts index bd83b8fc7c83..c3573be7b92c 100644 --- a/arch/arm/boot/dts/kirkwood-goflexnet.dts +++ b/arch/arm/boot/dts/kirkwood-goflexnet.dts | |||
@@ -77,6 +77,7 @@ | |||
77 | }; | 77 | }; |
78 | 78 | ||
79 | nand@3000000 { | 79 | nand@3000000 { |
80 | chip-delay = <40>; | ||
80 | status = "okay"; | 81 | status = "okay"; |
81 | 82 | ||
82 | partition@0 { | 83 | partition@0 { |
diff --git a/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts b/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts index 93c3afbef9ee..3694e94f6e99 100644 --- a/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts +++ b/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts | |||
@@ -96,11 +96,11 @@ | |||
96 | marvell,function = "gpio"; | 96 | marvell,function = "gpio"; |
97 | }; | 97 | }; |
98 | pmx_led_rebuild_brt_ctrl_1: pmx-led-rebuild-brt-ctrl-1 { | 98 | pmx_led_rebuild_brt_ctrl_1: pmx-led-rebuild-brt-ctrl-1 { |
99 | marvell,pins = "mpp44"; | 99 | marvell,pins = "mpp46"; |
100 | marvell,function = "gpio"; | 100 | marvell,function = "gpio"; |
101 | }; | 101 | }; |
102 | pmx_led_rebuild_brt_ctrl_2: pmx-led-rebuild-brt-ctrl-2 { | 102 | pmx_led_rebuild_brt_ctrl_2: pmx-led-rebuild-brt-ctrl-2 { |
103 | marvell,pins = "mpp45"; | 103 | marvell,pins = "mpp47"; |
104 | marvell,function = "gpio"; | 104 | marvell,function = "gpio"; |
105 | }; | 105 | }; |
106 | 106 | ||
@@ -157,14 +157,14 @@ | |||
157 | gpios = <&gpio0 16 0>; | 157 | gpios = <&gpio0 16 0>; |
158 | linux,default-trigger = "default-on"; | 158 | linux,default-trigger = "default-on"; |
159 | }; | 159 | }; |
160 | health_led1 { | 160 | rebuild_led { |
161 | label = "status:white:rebuild_led"; | ||
162 | gpios = <&gpio1 4 0>; | ||
163 | }; | ||
164 | health_led { | ||
161 | label = "status:red:health_led"; | 165 | label = "status:red:health_led"; |
162 | gpios = <&gpio1 5 0>; | 166 | gpios = <&gpio1 5 0>; |
163 | }; | 167 | }; |
164 | health_led2 { | ||
165 | label = "status:white:health_led"; | ||
166 | gpios = <&gpio1 4 0>; | ||
167 | }; | ||
168 | backup_led { | 168 | backup_led { |
169 | label = "status:blue:backup_led"; | 169 | label = "status:blue:backup_led"; |
170 | gpios = <&gpio0 15 0>; | 170 | gpios = <&gpio0 15 0>; |
diff --git a/arch/arm/boot/dts/orion5x.dtsi b/arch/arm/boot/dts/orion5x.dtsi index 8aad00f81ed9..f7bec3b1ba32 100644 --- a/arch/arm/boot/dts/orion5x.dtsi +++ b/arch/arm/boot/dts/orion5x.dtsi | |||
@@ -13,6 +13,9 @@ | |||
13 | compatible = "marvell,orion5x"; | 13 | compatible = "marvell,orion5x"; |
14 | interrupt-parent = <&intc>; | 14 | interrupt-parent = <&intc>; |
15 | 15 | ||
16 | aliases { | ||
17 | gpio0 = &gpio0; | ||
18 | }; | ||
16 | intc: interrupt-controller { | 19 | intc: interrupt-controller { |
17 | compatible = "marvell,orion-intc", "marvell,intc"; | 20 | compatible = "marvell,orion-intc", "marvell,intc"; |
18 | interrupt-controller; | 21 | interrupt-controller; |
@@ -32,7 +35,9 @@ | |||
32 | #gpio-cells = <2>; | 35 | #gpio-cells = <2>; |
33 | gpio-controller; | 36 | gpio-controller; |
34 | reg = <0x10100 0x40>; | 37 | reg = <0x10100 0x40>; |
35 | ngpio = <32>; | 38 | ngpios = <32>; |
39 | interrupt-controller; | ||
40 | #interrupt-cells = <2>; | ||
36 | interrupts = <6>, <7>, <8>, <9>; | 41 | interrupts = <6>, <7>, <8>, <9>; |
37 | }; | 42 | }; |
38 | 43 | ||
@@ -91,7 +96,7 @@ | |||
91 | reg = <0x90000 0x10000>, | 96 | reg = <0x90000 0x10000>, |
92 | <0xf2200000 0x800>; | 97 | <0xf2200000 0x800>; |
93 | reg-names = "regs", "sram"; | 98 | reg-names = "regs", "sram"; |
94 | interrupts = <22>; | 99 | interrupts = <28>; |
95 | status = "okay"; | 100 | status = "okay"; |
96 | }; | 101 | }; |
97 | }; | 102 | }; |
diff --git a/arch/arm/boot/dts/tegra20.dtsi b/arch/arm/boot/dts/tegra20.dtsi index 48d00a099ce3..3d3f64d2111a 100644 --- a/arch/arm/boot/dts/tegra20.dtsi +++ b/arch/arm/boot/dts/tegra20.dtsi | |||
@@ -385,7 +385,7 @@ | |||
385 | 385 | ||
386 | spi@7000d800 { | 386 | spi@7000d800 { |
387 | compatible = "nvidia,tegra20-slink"; | 387 | compatible = "nvidia,tegra20-slink"; |
388 | reg = <0x7000d480 0x200>; | 388 | reg = <0x7000d800 0x200>; |
389 | interrupts = <0 83 0x04>; | 389 | interrupts = <0 83 0x04>; |
390 | nvidia,dma-request-selector = <&apbdma 17>; | 390 | nvidia,dma-request-selector = <&apbdma 17>; |
391 | #address-cells = <1>; | 391 | #address-cells = <1>; |
diff --git a/arch/arm/boot/dts/tegra30.dtsi b/arch/arm/boot/dts/tegra30.dtsi index 9d87a3ffe998..dbf46c272562 100644 --- a/arch/arm/boot/dts/tegra30.dtsi +++ b/arch/arm/boot/dts/tegra30.dtsi | |||
@@ -372,7 +372,7 @@ | |||
372 | 372 | ||
373 | spi@7000d800 { | 373 | spi@7000d800 { |
374 | compatible = "nvidia,tegra30-slink", "nvidia,tegra20-slink"; | 374 | compatible = "nvidia,tegra30-slink", "nvidia,tegra20-slink"; |
375 | reg = <0x7000d480 0x200>; | 375 | reg = <0x7000d800 0x200>; |
376 | interrupts = <0 83 0x04>; | 376 | interrupts = <0 83 0x04>; |
377 | nvidia,dma-request-selector = <&apbdma 17>; | 377 | nvidia,dma-request-selector = <&apbdma 17>; |
378 | #address-cells = <1>; | 378 | #address-cells = <1>; |
diff --git a/arch/arm/include/asm/delay.h b/arch/arm/include/asm/delay.h index 720799fd3a81..dff714d886d5 100644 --- a/arch/arm/include/asm/delay.h +++ b/arch/arm/include/asm/delay.h | |||
@@ -24,7 +24,7 @@ extern struct arm_delay_ops { | |||
24 | void (*delay)(unsigned long); | 24 | void (*delay)(unsigned long); |
25 | void (*const_udelay)(unsigned long); | 25 | void (*const_udelay)(unsigned long); |
26 | void (*udelay)(unsigned long); | 26 | void (*udelay)(unsigned long); |
27 | bool const_clock; | 27 | unsigned long ticks_per_jiffy; |
28 | } arm_delay_ops; | 28 | } arm_delay_ops; |
29 | 29 | ||
30 | #define __delay(n) arm_delay_ops.delay(n) | 30 | #define __delay(n) arm_delay_ops.delay(n) |
diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h index 8c5e828f484d..91b99abe7a95 100644 --- a/arch/arm/include/asm/highmem.h +++ b/arch/arm/include/asm/highmem.h | |||
@@ -41,6 +41,13 @@ extern void kunmap_high(struct page *page); | |||
41 | #endif | 41 | #endif |
42 | #endif | 42 | #endif |
43 | 43 | ||
44 | /* | ||
45 | * Needed to be able to broadcast the TLB invalidation for kmap. | ||
46 | */ | ||
47 | #ifdef CONFIG_ARM_ERRATA_798181 | ||
48 | #undef ARCH_NEEDS_KMAP_HIGH_GET | ||
49 | #endif | ||
50 | |||
44 | #ifdef ARCH_NEEDS_KMAP_HIGH_GET | 51 | #ifdef ARCH_NEEDS_KMAP_HIGH_GET |
45 | extern void *kmap_high_get(struct page *page); | 52 | extern void *kmap_high_get(struct page *page); |
46 | #else | 53 | #else |
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h index 863a6611323c..a7b85e0d0cc1 100644 --- a/arch/arm/include/asm/mmu_context.h +++ b/arch/arm/include/asm/mmu_context.h | |||
@@ -27,6 +27,8 @@ void __check_vmalloc_seq(struct mm_struct *mm); | |||
27 | void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk); | 27 | void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk); |
28 | #define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; }) | 28 | #define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; }) |
29 | 29 | ||
30 | DECLARE_PER_CPU(atomic64_t, active_asids); | ||
31 | |||
30 | #else /* !CONFIG_CPU_HAS_ASID */ | 32 | #else /* !CONFIG_CPU_HAS_ASID */ |
31 | 33 | ||
32 | #ifdef CONFIG_MMU | 34 | #ifdef CONFIG_MMU |
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h index 4db8c8820f0d..9e9c041358ca 100644 --- a/arch/arm/include/asm/tlbflush.h +++ b/arch/arm/include/asm/tlbflush.h | |||
@@ -450,6 +450,21 @@ static inline void local_flush_bp_all(void) | |||
450 | isb(); | 450 | isb(); |
451 | } | 451 | } |
452 | 452 | ||
453 | #ifdef CONFIG_ARM_ERRATA_798181 | ||
454 | static inline void dummy_flush_tlb_a15_erratum(void) | ||
455 | { | ||
456 | /* | ||
457 | * Dummy TLBIMVAIS. Using the unmapped address 0 and ASID 0. | ||
458 | */ | ||
459 | asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0)); | ||
460 | dsb(); | ||
461 | } | ||
462 | #else | ||
463 | static inline void dummy_flush_tlb_a15_erratum(void) | ||
464 | { | ||
465 | } | ||
466 | #endif | ||
467 | |||
453 | /* | 468 | /* |
454 | * flush_pmd_entry | 469 | * flush_pmd_entry |
455 | * | 470 | * |
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 3248cde504ed..fefd7f971437 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S | |||
@@ -276,7 +276,13 @@ ENDPROC(ftrace_graph_caller_old) | |||
276 | */ | 276 | */ |
277 | 277 | ||
278 | .macro mcount_enter | 278 | .macro mcount_enter |
279 | /* | ||
280 | * This pad compensates for the push {lr} at the call site. Note that we are | ||
281 | * unable to unwind through a function which does not otherwise save its lr. | ||
282 | */ | ||
283 | UNWIND(.pad #4) | ||
279 | stmdb sp!, {r0-r3, lr} | 284 | stmdb sp!, {r0-r3, lr} |
285 | UNWIND(.save {r0-r3, lr}) | ||
280 | .endm | 286 | .endm |
281 | 287 | ||
282 | .macro mcount_get_lr reg | 288 | .macro mcount_get_lr reg |
@@ -289,6 +295,7 @@ ENDPROC(ftrace_graph_caller_old) | |||
289 | .endm | 295 | .endm |
290 | 296 | ||
291 | ENTRY(__gnu_mcount_nc) | 297 | ENTRY(__gnu_mcount_nc) |
298 | UNWIND(.fnstart) | ||
292 | #ifdef CONFIG_DYNAMIC_FTRACE | 299 | #ifdef CONFIG_DYNAMIC_FTRACE |
293 | mov ip, lr | 300 | mov ip, lr |
294 | ldmia sp!, {lr} | 301 | ldmia sp!, {lr} |
@@ -296,17 +303,22 @@ ENTRY(__gnu_mcount_nc) | |||
296 | #else | 303 | #else |
297 | __mcount | 304 | __mcount |
298 | #endif | 305 | #endif |
306 | UNWIND(.fnend) | ||
299 | ENDPROC(__gnu_mcount_nc) | 307 | ENDPROC(__gnu_mcount_nc) |
300 | 308 | ||
301 | #ifdef CONFIG_DYNAMIC_FTRACE | 309 | #ifdef CONFIG_DYNAMIC_FTRACE |
302 | ENTRY(ftrace_caller) | 310 | ENTRY(ftrace_caller) |
311 | UNWIND(.fnstart) | ||
303 | __ftrace_caller | 312 | __ftrace_caller |
313 | UNWIND(.fnend) | ||
304 | ENDPROC(ftrace_caller) | 314 | ENDPROC(ftrace_caller) |
305 | #endif | 315 | #endif |
306 | 316 | ||
307 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 317 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
308 | ENTRY(ftrace_graph_caller) | 318 | ENTRY(ftrace_graph_caller) |
319 | UNWIND(.fnstart) | ||
309 | __ftrace_graph_caller | 320 | __ftrace_graph_caller |
321 | UNWIND(.fnend) | ||
310 | ENDPROC(ftrace_graph_caller) | 322 | ENDPROC(ftrace_graph_caller) |
311 | #endif | 323 | #endif |
312 | 324 | ||
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index e0eb9a1cae77..8bac553fe213 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S | |||
@@ -267,7 +267,7 @@ __create_page_tables: | |||
267 | addne r6, r6, #1 << SECTION_SHIFT | 267 | addne r6, r6, #1 << SECTION_SHIFT |
268 | strne r6, [r3] | 268 | strne r6, [r3] |
269 | 269 | ||
270 | #if defined(CONFIG_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8) | 270 | #if defined(CONFIG_ARM_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8) |
271 | sub r4, r4, #4 @ Fixup page table pointer | 271 | sub r4, r4, #4 @ Fixup page table pointer |
272 | @ for 64-bit descriptors | 272 | @ for 64-bit descriptors |
273 | #endif | 273 | #endif |
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index 96093b75ab90..5dc1aa6f0f7d 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c | |||
@@ -966,7 +966,7 @@ static void reset_ctrl_regs(void *unused) | |||
966 | } | 966 | } |
967 | 967 | ||
968 | if (err) { | 968 | if (err) { |
969 | pr_warning("CPU %d debug is powered down!\n", cpu); | 969 | pr_warn_once("CPU %d debug is powered down!\n", cpu); |
970 | cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu)); | 970 | cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu)); |
971 | return; | 971 | return; |
972 | } | 972 | } |
@@ -987,7 +987,7 @@ clear_vcr: | |||
987 | isb(); | 987 | isb(); |
988 | 988 | ||
989 | if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) { | 989 | if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) { |
990 | pr_warning("CPU %d failed to disable vector catch\n", cpu); | 990 | pr_warn_once("CPU %d failed to disable vector catch\n", cpu); |
991 | return; | 991 | return; |
992 | } | 992 | } |
993 | 993 | ||
@@ -1007,7 +1007,7 @@ clear_vcr: | |||
1007 | } | 1007 | } |
1008 | 1008 | ||
1009 | if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) { | 1009 | if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) { |
1010 | pr_warning("CPU %d failed to clear debug register pairs\n", cpu); | 1010 | pr_warn_once("CPU %d failed to clear debug register pairs\n", cpu); |
1011 | return; | 1011 | return; |
1012 | } | 1012 | } |
1013 | 1013 | ||
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 3f6cbb2e3eda..d343a6c3a6d1 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c | |||
@@ -353,6 +353,23 @@ void __init early_print(const char *str, ...) | |||
353 | printk("%s", buf); | 353 | printk("%s", buf); |
354 | } | 354 | } |
355 | 355 | ||
356 | static void __init cpuid_init_hwcaps(void) | ||
357 | { | ||
358 | unsigned int divide_instrs; | ||
359 | |||
360 | if (cpu_architecture() < CPU_ARCH_ARMv7) | ||
361 | return; | ||
362 | |||
363 | divide_instrs = (read_cpuid_ext(CPUID_EXT_ISAR0) & 0x0f000000) >> 24; | ||
364 | |||
365 | switch (divide_instrs) { | ||
366 | case 2: | ||
367 | elf_hwcap |= HWCAP_IDIVA; | ||
368 | case 1: | ||
369 | elf_hwcap |= HWCAP_IDIVT; | ||
370 | } | ||
371 | } | ||
372 | |||
356 | static void __init feat_v6_fixup(void) | 373 | static void __init feat_v6_fixup(void) |
357 | { | 374 | { |
358 | int id = read_cpuid_id(); | 375 | int id = read_cpuid_id(); |
@@ -483,8 +500,11 @@ static void __init setup_processor(void) | |||
483 | snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c", | 500 | snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c", |
484 | list->elf_name, ENDIANNESS); | 501 | list->elf_name, ENDIANNESS); |
485 | elf_hwcap = list->elf_hwcap; | 502 | elf_hwcap = list->elf_hwcap; |
503 | |||
504 | cpuid_init_hwcaps(); | ||
505 | |||
486 | #ifndef CONFIG_ARM_THUMB | 506 | #ifndef CONFIG_ARM_THUMB |
487 | elf_hwcap &= ~HWCAP_THUMB; | 507 | elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT); |
488 | #endif | 508 | #endif |
489 | 509 | ||
490 | feat_v6_fixup(); | 510 | feat_v6_fixup(); |
@@ -524,7 +544,7 @@ int __init arm_add_memory(phys_addr_t start, phys_addr_t size) | |||
524 | size -= start & ~PAGE_MASK; | 544 | size -= start & ~PAGE_MASK; |
525 | bank->start = PAGE_ALIGN(start); | 545 | bank->start = PAGE_ALIGN(start); |
526 | 546 | ||
527 | #ifndef CONFIG_LPAE | 547 | #ifndef CONFIG_ARM_LPAE |
528 | if (bank->start + size < bank->start) { | 548 | if (bank->start + size < bank->start) { |
529 | printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in " | 549 | printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in " |
530 | "32-bit physical address space\n", (long long)start); | 550 | "32-bit physical address space\n", (long long)start); |
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 31644f1978d5..1f2ccccaf009 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c | |||
@@ -480,7 +480,7 @@ static void __cpuinit broadcast_timer_setup(struct clock_event_device *evt) | |||
480 | evt->features = CLOCK_EVT_FEAT_ONESHOT | | 480 | evt->features = CLOCK_EVT_FEAT_ONESHOT | |
481 | CLOCK_EVT_FEAT_PERIODIC | | 481 | CLOCK_EVT_FEAT_PERIODIC | |
482 | CLOCK_EVT_FEAT_DUMMY; | 482 | CLOCK_EVT_FEAT_DUMMY; |
483 | evt->rating = 400; | 483 | evt->rating = 100; |
484 | evt->mult = 1; | 484 | evt->mult = 1; |
485 | evt->set_mode = broadcast_timer_set_mode; | 485 | evt->set_mode = broadcast_timer_set_mode; |
486 | 486 | ||
@@ -673,9 +673,6 @@ static int cpufreq_callback(struct notifier_block *nb, | |||
673 | if (freq->flags & CPUFREQ_CONST_LOOPS) | 673 | if (freq->flags & CPUFREQ_CONST_LOOPS) |
674 | return NOTIFY_OK; | 674 | return NOTIFY_OK; |
675 | 675 | ||
676 | if (arm_delay_ops.const_clock) | ||
677 | return NOTIFY_OK; | ||
678 | |||
679 | if (!per_cpu(l_p_j_ref, cpu)) { | 676 | if (!per_cpu(l_p_j_ref, cpu)) { |
680 | per_cpu(l_p_j_ref, cpu) = | 677 | per_cpu(l_p_j_ref, cpu) = |
681 | per_cpu(cpu_data, cpu).loops_per_jiffy; | 678 | per_cpu(cpu_data, cpu).loops_per_jiffy; |
diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c index bd0300531399..e82e1d248772 100644 --- a/arch/arm/kernel/smp_tlb.c +++ b/arch/arm/kernel/smp_tlb.c | |||
@@ -12,6 +12,7 @@ | |||
12 | 12 | ||
13 | #include <asm/smp_plat.h> | 13 | #include <asm/smp_plat.h> |
14 | #include <asm/tlbflush.h> | 14 | #include <asm/tlbflush.h> |
15 | #include <asm/mmu_context.h> | ||
15 | 16 | ||
16 | /**********************************************************************/ | 17 | /**********************************************************************/ |
17 | 18 | ||
@@ -69,12 +70,72 @@ static inline void ipi_flush_bp_all(void *ignored) | |||
69 | local_flush_bp_all(); | 70 | local_flush_bp_all(); |
70 | } | 71 | } |
71 | 72 | ||
73 | #ifdef CONFIG_ARM_ERRATA_798181 | ||
74 | static int erratum_a15_798181(void) | ||
75 | { | ||
76 | unsigned int midr = read_cpuid_id(); | ||
77 | |||
78 | /* Cortex-A15 r0p0..r3p2 affected */ | ||
79 | if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2) | ||
80 | return 0; | ||
81 | return 1; | ||
82 | } | ||
83 | #else | ||
84 | static int erratum_a15_798181(void) | ||
85 | { | ||
86 | return 0; | ||
87 | } | ||
88 | #endif | ||
89 | |||
90 | static void ipi_flush_tlb_a15_erratum(void *arg) | ||
91 | { | ||
92 | dmb(); | ||
93 | } | ||
94 | |||
95 | static void broadcast_tlb_a15_erratum(void) | ||
96 | { | ||
97 | if (!erratum_a15_798181()) | ||
98 | return; | ||
99 | |||
100 | dummy_flush_tlb_a15_erratum(); | ||
101 | smp_call_function_many(cpu_online_mask, ipi_flush_tlb_a15_erratum, | ||
102 | NULL, 1); | ||
103 | } | ||
104 | |||
105 | static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm) | ||
106 | { | ||
107 | int cpu; | ||
108 | cpumask_t mask = { CPU_BITS_NONE }; | ||
109 | |||
110 | if (!erratum_a15_798181()) | ||
111 | return; | ||
112 | |||
113 | dummy_flush_tlb_a15_erratum(); | ||
114 | for_each_online_cpu(cpu) { | ||
115 | if (cpu == smp_processor_id()) | ||
116 | continue; | ||
117 | /* | ||
118 | * We only need to send an IPI if the other CPUs are running | ||
119 | * the same ASID as the one being invalidated. There is no | ||
120 | * need for locking around the active_asids check since the | ||
121 | * switch_mm() function has at least one dmb() (as required by | ||
122 | * this workaround) in case a context switch happens on | ||
123 | * another CPU after the condition below. | ||
124 | */ | ||
125 | if (atomic64_read(&mm->context.id) == | ||
126 | atomic64_read(&per_cpu(active_asids, cpu))) | ||
127 | cpumask_set_cpu(cpu, &mask); | ||
128 | } | ||
129 | smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1); | ||
130 | } | ||
131 | |||
72 | void flush_tlb_all(void) | 132 | void flush_tlb_all(void) |
73 | { | 133 | { |
74 | if (tlb_ops_need_broadcast()) | 134 | if (tlb_ops_need_broadcast()) |
75 | on_each_cpu(ipi_flush_tlb_all, NULL, 1); | 135 | on_each_cpu(ipi_flush_tlb_all, NULL, 1); |
76 | else | 136 | else |
77 | local_flush_tlb_all(); | 137 | local_flush_tlb_all(); |
138 | broadcast_tlb_a15_erratum(); | ||
78 | } | 139 | } |
79 | 140 | ||
80 | void flush_tlb_mm(struct mm_struct *mm) | 141 | void flush_tlb_mm(struct mm_struct *mm) |
@@ -83,6 +144,7 @@ void flush_tlb_mm(struct mm_struct *mm) | |||
83 | on_each_cpu_mask(mm_cpumask(mm), ipi_flush_tlb_mm, mm, 1); | 144 | on_each_cpu_mask(mm_cpumask(mm), ipi_flush_tlb_mm, mm, 1); |
84 | else | 145 | else |
85 | local_flush_tlb_mm(mm); | 146 | local_flush_tlb_mm(mm); |
147 | broadcast_tlb_mm_a15_erratum(mm); | ||
86 | } | 148 | } |
87 | 149 | ||
88 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) | 150 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) |
@@ -95,6 +157,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) | |||
95 | &ta, 1); | 157 | &ta, 1); |
96 | } else | 158 | } else |
97 | local_flush_tlb_page(vma, uaddr); | 159 | local_flush_tlb_page(vma, uaddr); |
160 | broadcast_tlb_mm_a15_erratum(vma->vm_mm); | ||
98 | } | 161 | } |
99 | 162 | ||
100 | void flush_tlb_kernel_page(unsigned long kaddr) | 163 | void flush_tlb_kernel_page(unsigned long kaddr) |
@@ -105,6 +168,7 @@ void flush_tlb_kernel_page(unsigned long kaddr) | |||
105 | on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1); | 168 | on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1); |
106 | } else | 169 | } else |
107 | local_flush_tlb_kernel_page(kaddr); | 170 | local_flush_tlb_kernel_page(kaddr); |
171 | broadcast_tlb_a15_erratum(); | ||
108 | } | 172 | } |
109 | 173 | ||
110 | void flush_tlb_range(struct vm_area_struct *vma, | 174 | void flush_tlb_range(struct vm_area_struct *vma, |
@@ -119,6 +183,7 @@ void flush_tlb_range(struct vm_area_struct *vma, | |||
119 | &ta, 1); | 183 | &ta, 1); |
120 | } else | 184 | } else |
121 | local_flush_tlb_range(vma, start, end); | 185 | local_flush_tlb_range(vma, start, end); |
186 | broadcast_tlb_mm_a15_erratum(vma->vm_mm); | ||
122 | } | 187 | } |
123 | 188 | ||
124 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) | 189 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) |
@@ -130,6 +195,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end) | |||
130 | on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1); | 195 | on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1); |
131 | } else | 196 | } else |
132 | local_flush_tlb_kernel_range(start, end); | 197 | local_flush_tlb_kernel_range(start, end); |
198 | broadcast_tlb_a15_erratum(); | ||
133 | } | 199 | } |
134 | 200 | ||
135 | void flush_bp_all(void) | 201 | void flush_bp_all(void) |
diff --git a/arch/arm/kvm/vgic.c b/arch/arm/kvm/vgic.c index c9a17316e9fe..0e4cfe123b38 100644 --- a/arch/arm/kvm/vgic.c +++ b/arch/arm/kvm/vgic.c | |||
@@ -883,8 +883,7 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq) | |||
883 | lr, irq, vgic_cpu->vgic_lr[lr]); | 883 | lr, irq, vgic_cpu->vgic_lr[lr]); |
884 | BUG_ON(!test_bit(lr, vgic_cpu->lr_used)); | 884 | BUG_ON(!test_bit(lr, vgic_cpu->lr_used)); |
885 | vgic_cpu->vgic_lr[lr] |= GICH_LR_PENDING_BIT; | 885 | vgic_cpu->vgic_lr[lr] |= GICH_LR_PENDING_BIT; |
886 | 886 | return true; | |
887 | goto out; | ||
888 | } | 887 | } |
889 | 888 | ||
890 | /* Try to use another LR for this interrupt */ | 889 | /* Try to use another LR for this interrupt */ |
@@ -898,7 +897,6 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq) | |||
898 | vgic_cpu->vgic_irq_lr_map[irq] = lr; | 897 | vgic_cpu->vgic_irq_lr_map[irq] = lr; |
899 | set_bit(lr, vgic_cpu->lr_used); | 898 | set_bit(lr, vgic_cpu->lr_used); |
900 | 899 | ||
901 | out: | ||
902 | if (!vgic_irq_is_edge(vcpu, irq)) | 900 | if (!vgic_irq_is_edge(vcpu, irq)) |
903 | vgic_cpu->vgic_lr[lr] |= GICH_LR_EOI; | 901 | vgic_cpu->vgic_lr[lr] |= GICH_LR_EOI; |
904 | 902 | ||
@@ -1018,21 +1016,6 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) | |||
1018 | 1016 | ||
1019 | kvm_debug("MISR = %08x\n", vgic_cpu->vgic_misr); | 1017 | kvm_debug("MISR = %08x\n", vgic_cpu->vgic_misr); |
1020 | 1018 | ||
1021 | /* | ||
1022 | * We do not need to take the distributor lock here, since the only | ||
1023 | * action we perform is clearing the irq_active_bit for an EOIed | ||
1024 | * level interrupt. There is a potential race with | ||
1025 | * the queuing of an interrupt in __kvm_vgic_flush_hwstate(), where we | ||
1026 | * check if the interrupt is already active. Two possibilities: | ||
1027 | * | ||
1028 | * - The queuing is occurring on the same vcpu: cannot happen, | ||
1029 | * as we're already in the context of this vcpu, and | ||
1030 | * executing the handler | ||
1031 | * - The interrupt has been migrated to another vcpu, and we | ||
1032 | * ignore this interrupt for this run. Big deal. It is still | ||
1033 | * pending though, and will get considered when this vcpu | ||
1034 | * exits. | ||
1035 | */ | ||
1036 | if (vgic_cpu->vgic_misr & GICH_MISR_EOI) { | 1019 | if (vgic_cpu->vgic_misr & GICH_MISR_EOI) { |
1037 | /* | 1020 | /* |
1038 | * Some level interrupts have been EOIed. Clear their | 1021 | * Some level interrupts have been EOIed. Clear their |
@@ -1054,6 +1037,13 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) | |||
1054 | } else { | 1037 | } else { |
1055 | vgic_cpu_irq_clear(vcpu, irq); | 1038 | vgic_cpu_irq_clear(vcpu, irq); |
1056 | } | 1039 | } |
1040 | |||
1041 | /* | ||
1042 | * Despite being EOIed, the LR may not have | ||
1043 | * been marked as empty. | ||
1044 | */ | ||
1045 | set_bit(lr, (unsigned long *)vgic_cpu->vgic_elrsr); | ||
1046 | vgic_cpu->vgic_lr[lr] &= ~GICH_LR_ACTIVE_BIT; | ||
1057 | } | 1047 | } |
1058 | } | 1048 | } |
1059 | 1049 | ||
@@ -1064,9 +1054,8 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) | |||
1064 | } | 1054 | } |
1065 | 1055 | ||
1066 | /* | 1056 | /* |
1067 | * Sync back the VGIC state after a guest run. We do not really touch | 1057 | * Sync back the VGIC state after a guest run. The distributor lock is |
1068 | * the distributor here (the irq_pending_on_cpu bit is safe to set), | 1058 | * needed so we don't get preempted in the middle of the state processing. |
1069 | * so there is no need for taking its lock. | ||
1070 | */ | 1059 | */ |
1071 | static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) | 1060 | static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) |
1072 | { | 1061 | { |
@@ -1112,10 +1101,14 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) | |||
1112 | 1101 | ||
1113 | void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) | 1102 | void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) |
1114 | { | 1103 | { |
1104 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; | ||
1105 | |||
1115 | if (!irqchip_in_kernel(vcpu->kvm)) | 1106 | if (!irqchip_in_kernel(vcpu->kvm)) |
1116 | return; | 1107 | return; |
1117 | 1108 | ||
1109 | spin_lock(&dist->lock); | ||
1118 | __kvm_vgic_sync_hwstate(vcpu); | 1110 | __kvm_vgic_sync_hwstate(vcpu); |
1111 | spin_unlock(&dist->lock); | ||
1119 | } | 1112 | } |
1120 | 1113 | ||
1121 | int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) | 1114 | int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) |
diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c index 6b93f6a1a3c7..64dbfa57204a 100644 --- a/arch/arm/lib/delay.c +++ b/arch/arm/lib/delay.c | |||
@@ -58,7 +58,7 @@ static void __timer_delay(unsigned long cycles) | |||
58 | static void __timer_const_udelay(unsigned long xloops) | 58 | static void __timer_const_udelay(unsigned long xloops) |
59 | { | 59 | { |
60 | unsigned long long loops = xloops; | 60 | unsigned long long loops = xloops; |
61 | loops *= loops_per_jiffy; | 61 | loops *= arm_delay_ops.ticks_per_jiffy; |
62 | __timer_delay(loops >> UDELAY_SHIFT); | 62 | __timer_delay(loops >> UDELAY_SHIFT); |
63 | } | 63 | } |
64 | 64 | ||
@@ -73,11 +73,13 @@ void __init register_current_timer_delay(const struct delay_timer *timer) | |||
73 | pr_info("Switching to timer-based delay loop\n"); | 73 | pr_info("Switching to timer-based delay loop\n"); |
74 | delay_timer = timer; | 74 | delay_timer = timer; |
75 | lpj_fine = timer->freq / HZ; | 75 | lpj_fine = timer->freq / HZ; |
76 | loops_per_jiffy = lpj_fine; | 76 | |
77 | /* cpufreq may scale loops_per_jiffy, so keep a private copy */ | ||
78 | arm_delay_ops.ticks_per_jiffy = lpj_fine; | ||
77 | arm_delay_ops.delay = __timer_delay; | 79 | arm_delay_ops.delay = __timer_delay; |
78 | arm_delay_ops.const_udelay = __timer_const_udelay; | 80 | arm_delay_ops.const_udelay = __timer_const_udelay; |
79 | arm_delay_ops.udelay = __timer_udelay; | 81 | arm_delay_ops.udelay = __timer_udelay; |
80 | arm_delay_ops.const_clock = true; | 82 | |
81 | delay_calibrated = true; | 83 | delay_calibrated = true; |
82 | } else { | 84 | } else { |
83 | pr_info("Ignoring duplicate/late registration of read_current_timer delay\n"); | 85 | pr_info("Ignoring duplicate/late registration of read_current_timer delay\n"); |
diff --git a/arch/arm/lib/memset.S b/arch/arm/lib/memset.S index d912e7397ecc..94b0650ea98f 100644 --- a/arch/arm/lib/memset.S +++ b/arch/arm/lib/memset.S | |||
@@ -14,31 +14,15 @@ | |||
14 | 14 | ||
15 | .text | 15 | .text |
16 | .align 5 | 16 | .align 5 |
17 | .word 0 | ||
18 | |||
19 | 1: subs r2, r2, #4 @ 1 do we have enough | ||
20 | blt 5f @ 1 bytes to align with? | ||
21 | cmp r3, #2 @ 1 | ||
22 | strltb r1, [ip], #1 @ 1 | ||
23 | strleb r1, [ip], #1 @ 1 | ||
24 | strb r1, [ip], #1 @ 1 | ||
25 | add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3)) | ||
26 | /* | ||
27 | * The pointer is now aligned and the length is adjusted. Try doing the | ||
28 | * memset again. | ||
29 | */ | ||
30 | 17 | ||
31 | ENTRY(memset) | 18 | ENTRY(memset) |
32 | /* | 19 | ands r3, r0, #3 @ 1 unaligned? |
33 | * Preserve the contents of r0 for the return value. | 20 | mov ip, r0 @ preserve r0 as return value |
34 | */ | 21 | bne 6f @ 1 |
35 | mov ip, r0 | ||
36 | ands r3, ip, #3 @ 1 unaligned? | ||
37 | bne 1b @ 1 | ||
38 | /* | 22 | /* |
39 | * we know that the pointer in ip is aligned to a word boundary. | 23 | * we know that the pointer in ip is aligned to a word boundary. |
40 | */ | 24 | */ |
41 | orr r1, r1, r1, lsl #8 | 25 | 1: orr r1, r1, r1, lsl #8 |
42 | orr r1, r1, r1, lsl #16 | 26 | orr r1, r1, r1, lsl #16 |
43 | mov r3, r1 | 27 | mov r3, r1 |
44 | cmp r2, #16 | 28 | cmp r2, #16 |
@@ -127,4 +111,13 @@ ENTRY(memset) | |||
127 | tst r2, #1 | 111 | tst r2, #1 |
128 | strneb r1, [ip], #1 | 112 | strneb r1, [ip], #1 |
129 | mov pc, lr | 113 | mov pc, lr |
114 | |||
115 | 6: subs r2, r2, #4 @ 1 do we have enough | ||
116 | blt 5b @ 1 bytes to align with? | ||
117 | cmp r3, #2 @ 1 | ||
118 | strltb r1, [ip], #1 @ 1 | ||
119 | strleb r1, [ip], #1 @ 1 | ||
120 | strb r1, [ip], #1 @ 1 | ||
121 | add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3)) | ||
122 | b 1b | ||
130 | ENDPROC(memset) | 123 | ENDPROC(memset) |
diff --git a/arch/arm/mach-at91/include/mach/gpio.h b/arch/arm/mach-at91/include/mach/gpio.h index eed465ab0dd7..5fc23771c154 100644 --- a/arch/arm/mach-at91/include/mach/gpio.h +++ b/arch/arm/mach-at91/include/mach/gpio.h | |||
@@ -209,6 +209,14 @@ extern int at91_get_gpio_value(unsigned pin); | |||
209 | extern void at91_gpio_suspend(void); | 209 | extern void at91_gpio_suspend(void); |
210 | extern void at91_gpio_resume(void); | 210 | extern void at91_gpio_resume(void); |
211 | 211 | ||
212 | #ifdef CONFIG_PINCTRL_AT91 | ||
213 | extern void at91_pinctrl_gpio_suspend(void); | ||
214 | extern void at91_pinctrl_gpio_resume(void); | ||
215 | #else | ||
216 | static inline void at91_pinctrl_gpio_suspend(void) {} | ||
217 | static inline void at91_pinctrl_gpio_resume(void) {} | ||
218 | #endif | ||
219 | |||
212 | #endif /* __ASSEMBLY__ */ | 220 | #endif /* __ASSEMBLY__ */ |
213 | 221 | ||
214 | #endif | 222 | #endif |
diff --git a/arch/arm/mach-at91/irq.c b/arch/arm/mach-at91/irq.c index 8e210262aeee..e0ca59171022 100644 --- a/arch/arm/mach-at91/irq.c +++ b/arch/arm/mach-at91/irq.c | |||
@@ -92,23 +92,21 @@ static int at91_aic_set_wake(struct irq_data *d, unsigned value) | |||
92 | 92 | ||
93 | void at91_irq_suspend(void) | 93 | void at91_irq_suspend(void) |
94 | { | 94 | { |
95 | int i = 0, bit; | 95 | int bit = -1; |
96 | 96 | ||
97 | if (has_aic5()) { | 97 | if (has_aic5()) { |
98 | /* disable enabled irqs */ | 98 | /* disable enabled irqs */ |
99 | while ((bit = find_next_bit(backups, n_irqs, i)) < n_irqs) { | 99 | while ((bit = find_next_bit(backups, n_irqs, bit + 1)) < n_irqs) { |
100 | at91_aic_write(AT91_AIC5_SSR, | 100 | at91_aic_write(AT91_AIC5_SSR, |
101 | bit & AT91_AIC5_INTSEL_MSK); | 101 | bit & AT91_AIC5_INTSEL_MSK); |
102 | at91_aic_write(AT91_AIC5_IDCR, 1); | 102 | at91_aic_write(AT91_AIC5_IDCR, 1); |
103 | i = bit; | ||
104 | } | 103 | } |
105 | /* enable wakeup irqs */ | 104 | /* enable wakeup irqs */ |
106 | i = 0; | 105 | bit = -1; |
107 | while ((bit = find_next_bit(wakeups, n_irqs, i)) < n_irqs) { | 106 | while ((bit = find_next_bit(wakeups, n_irqs, bit + 1)) < n_irqs) { |
108 | at91_aic_write(AT91_AIC5_SSR, | 107 | at91_aic_write(AT91_AIC5_SSR, |
109 | bit & AT91_AIC5_INTSEL_MSK); | 108 | bit & AT91_AIC5_INTSEL_MSK); |
110 | at91_aic_write(AT91_AIC5_IECR, 1); | 109 | at91_aic_write(AT91_AIC5_IECR, 1); |
111 | i = bit; | ||
112 | } | 110 | } |
113 | } else { | 111 | } else { |
114 | at91_aic_write(AT91_AIC_IDCR, *backups); | 112 | at91_aic_write(AT91_AIC_IDCR, *backups); |
@@ -118,23 +116,21 @@ void at91_irq_suspend(void) | |||
118 | 116 | ||
119 | void at91_irq_resume(void) | 117 | void at91_irq_resume(void) |
120 | { | 118 | { |
121 | int i = 0, bit; | 119 | int bit = -1; |
122 | 120 | ||
123 | if (has_aic5()) { | 121 | if (has_aic5()) { |
124 | /* disable wakeup irqs */ | 122 | /* disable wakeup irqs */ |
125 | while ((bit = find_next_bit(wakeups, n_irqs, i)) < n_irqs) { | 123 | while ((bit = find_next_bit(wakeups, n_irqs, bit + 1)) < n_irqs) { |
126 | at91_aic_write(AT91_AIC5_SSR, | 124 | at91_aic_write(AT91_AIC5_SSR, |
127 | bit & AT91_AIC5_INTSEL_MSK); | 125 | bit & AT91_AIC5_INTSEL_MSK); |
128 | at91_aic_write(AT91_AIC5_IDCR, 1); | 126 | at91_aic_write(AT91_AIC5_IDCR, 1); |
129 | i = bit; | ||
130 | } | 127 | } |
131 | /* enable irqs disabled for suspend */ | 128 | /* enable irqs disabled for suspend */ |
132 | i = 0; | 129 | bit = -1; |
133 | while ((bit = find_next_bit(backups, n_irqs, i)) < n_irqs) { | 130 | while ((bit = find_next_bit(backups, n_irqs, bit + 1)) < n_irqs) { |
134 | at91_aic_write(AT91_AIC5_SSR, | 131 | at91_aic_write(AT91_AIC5_SSR, |
135 | bit & AT91_AIC5_INTSEL_MSK); | 132 | bit & AT91_AIC5_INTSEL_MSK); |
136 | at91_aic_write(AT91_AIC5_IECR, 1); | 133 | at91_aic_write(AT91_AIC5_IECR, 1); |
137 | i = bit; | ||
138 | } | 134 | } |
139 | } else { | 135 | } else { |
140 | at91_aic_write(AT91_AIC_IDCR, *wakeups); | 136 | at91_aic_write(AT91_AIC_IDCR, *wakeups); |
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c index adb6db888a1f..73f1f250403a 100644 --- a/arch/arm/mach-at91/pm.c +++ b/arch/arm/mach-at91/pm.c | |||
@@ -201,7 +201,10 @@ extern u32 at91_slow_clock_sz; | |||
201 | 201 | ||
202 | static int at91_pm_enter(suspend_state_t state) | 202 | static int at91_pm_enter(suspend_state_t state) |
203 | { | 203 | { |
204 | at91_gpio_suspend(); | 204 | if (of_have_populated_dt()) |
205 | at91_pinctrl_gpio_suspend(); | ||
206 | else | ||
207 | at91_gpio_suspend(); | ||
205 | at91_irq_suspend(); | 208 | at91_irq_suspend(); |
206 | 209 | ||
207 | pr_debug("AT91: PM - wake mask %08x, pm state %d\n", | 210 | pr_debug("AT91: PM - wake mask %08x, pm state %d\n", |
@@ -286,7 +289,10 @@ static int at91_pm_enter(suspend_state_t state) | |||
286 | error: | 289 | error: |
287 | target_state = PM_SUSPEND_ON; | 290 | target_state = PM_SUSPEND_ON; |
288 | at91_irq_resume(); | 291 | at91_irq_resume(); |
289 | at91_gpio_resume(); | 292 | if (of_have_populated_dt()) |
293 | at91_pinctrl_gpio_resume(); | ||
294 | else | ||
295 | at91_gpio_resume(); | ||
290 | return 0; | 296 | return 0; |
291 | } | 297 | } |
292 | 298 | ||
diff --git a/arch/arm/mach-cns3xxx/core.c b/arch/arm/mach-cns3xxx/core.c index e698f26cc0cb..52e4bb5cf12d 100644 --- a/arch/arm/mach-cns3xxx/core.c +++ b/arch/arm/mach-cns3xxx/core.c | |||
@@ -22,19 +22,9 @@ | |||
22 | 22 | ||
23 | static struct map_desc cns3xxx_io_desc[] __initdata = { | 23 | static struct map_desc cns3xxx_io_desc[] __initdata = { |
24 | { | 24 | { |
25 | .virtual = CNS3XXX_TC11MP_TWD_BASE_VIRT, | 25 | .virtual = CNS3XXX_TC11MP_SCU_BASE_VIRT, |
26 | .pfn = __phys_to_pfn(CNS3XXX_TC11MP_TWD_BASE), | 26 | .pfn = __phys_to_pfn(CNS3XXX_TC11MP_SCU_BASE), |
27 | .length = SZ_4K, | 27 | .length = SZ_8K, |
28 | .type = MT_DEVICE, | ||
29 | }, { | ||
30 | .virtual = CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT, | ||
31 | .pfn = __phys_to_pfn(CNS3XXX_TC11MP_GIC_CPU_BASE), | ||
32 | .length = SZ_4K, | ||
33 | .type = MT_DEVICE, | ||
34 | }, { | ||
35 | .virtual = CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT, | ||
36 | .pfn = __phys_to_pfn(CNS3XXX_TC11MP_GIC_DIST_BASE), | ||
37 | .length = SZ_4K, | ||
38 | .type = MT_DEVICE, | 28 | .type = MT_DEVICE, |
39 | }, { | 29 | }, { |
40 | .virtual = CNS3XXX_TIMER1_2_3_BASE_VIRT, | 30 | .virtual = CNS3XXX_TIMER1_2_3_BASE_VIRT, |
diff --git a/arch/arm/mach-cns3xxx/include/mach/cns3xxx.h b/arch/arm/mach-cns3xxx/include/mach/cns3xxx.h index 191c8e57f289..b1021aafa481 100644 --- a/arch/arm/mach-cns3xxx/include/mach/cns3xxx.h +++ b/arch/arm/mach-cns3xxx/include/mach/cns3xxx.h | |||
@@ -94,10 +94,10 @@ | |||
94 | #define RTC_INTR_STS_OFFSET 0x34 | 94 | #define RTC_INTR_STS_OFFSET 0x34 |
95 | 95 | ||
96 | #define CNS3XXX_MISC_BASE 0x76000000 /* Misc Control */ | 96 | #define CNS3XXX_MISC_BASE 0x76000000 /* Misc Control */ |
97 | #define CNS3XXX_MISC_BASE_VIRT 0xFFF07000 /* Misc Control */ | 97 | #define CNS3XXX_MISC_BASE_VIRT 0xFB000000 /* Misc Control */ |
98 | 98 | ||
99 | #define CNS3XXX_PM_BASE 0x77000000 /* Power Management Control */ | 99 | #define CNS3XXX_PM_BASE 0x77000000 /* Power Management Control */ |
100 | #define CNS3XXX_PM_BASE_VIRT 0xFFF08000 | 100 | #define CNS3XXX_PM_BASE_VIRT 0xFB001000 |
101 | 101 | ||
102 | #define PM_CLK_GATE_OFFSET 0x00 | 102 | #define PM_CLK_GATE_OFFSET 0x00 |
103 | #define PM_SOFT_RST_OFFSET 0x04 | 103 | #define PM_SOFT_RST_OFFSET 0x04 |
@@ -109,7 +109,7 @@ | |||
109 | #define PM_PLL_HM_PD_OFFSET 0x1C | 109 | #define PM_PLL_HM_PD_OFFSET 0x1C |
110 | 110 | ||
111 | #define CNS3XXX_UART0_BASE 0x78000000 /* UART 0 */ | 111 | #define CNS3XXX_UART0_BASE 0x78000000 /* UART 0 */ |
112 | #define CNS3XXX_UART0_BASE_VIRT 0xFFF09000 | 112 | #define CNS3XXX_UART0_BASE_VIRT 0xFB002000 |
113 | 113 | ||
114 | #define CNS3XXX_UART1_BASE 0x78400000 /* UART 1 */ | 114 | #define CNS3XXX_UART1_BASE 0x78400000 /* UART 1 */ |
115 | #define CNS3XXX_UART1_BASE_VIRT 0xFFF0A000 | 115 | #define CNS3XXX_UART1_BASE_VIRT 0xFFF0A000 |
@@ -130,7 +130,7 @@ | |||
130 | #define CNS3XXX_I2S_BASE_VIRT 0xFFF10000 | 130 | #define CNS3XXX_I2S_BASE_VIRT 0xFFF10000 |
131 | 131 | ||
132 | #define CNS3XXX_TIMER1_2_3_BASE 0x7C800000 /* Timer */ | 132 | #define CNS3XXX_TIMER1_2_3_BASE 0x7C800000 /* Timer */ |
133 | #define CNS3XXX_TIMER1_2_3_BASE_VIRT 0xFFF10800 | 133 | #define CNS3XXX_TIMER1_2_3_BASE_VIRT 0xFB003000 |
134 | 134 | ||
135 | #define TIMER1_COUNTER_OFFSET 0x00 | 135 | #define TIMER1_COUNTER_OFFSET 0x00 |
136 | #define TIMER1_AUTO_RELOAD_OFFSET 0x04 | 136 | #define TIMER1_AUTO_RELOAD_OFFSET 0x04 |
@@ -227,16 +227,16 @@ | |||
227 | * Testchip peripheral and fpga gic regions | 227 | * Testchip peripheral and fpga gic regions |
228 | */ | 228 | */ |
229 | #define CNS3XXX_TC11MP_SCU_BASE 0x90000000 /* IRQ, Test chip */ | 229 | #define CNS3XXX_TC11MP_SCU_BASE 0x90000000 /* IRQ, Test chip */ |
230 | #define CNS3XXX_TC11MP_SCU_BASE_VIRT 0xFF000000 | 230 | #define CNS3XXX_TC11MP_SCU_BASE_VIRT 0xFB004000 |
231 | 231 | ||
232 | #define CNS3XXX_TC11MP_GIC_CPU_BASE 0x90000100 /* Test chip interrupt controller CPU interface */ | 232 | #define CNS3XXX_TC11MP_GIC_CPU_BASE 0x90000100 /* Test chip interrupt controller CPU interface */ |
233 | #define CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT 0xFF000100 | 233 | #define CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT (CNS3XXX_TC11MP_SCU_BASE_VIRT + 0x100) |
234 | 234 | ||
235 | #define CNS3XXX_TC11MP_TWD_BASE 0x90000600 | 235 | #define CNS3XXX_TC11MP_TWD_BASE 0x90000600 |
236 | #define CNS3XXX_TC11MP_TWD_BASE_VIRT 0xFF000600 | 236 | #define CNS3XXX_TC11MP_TWD_BASE_VIRT (CNS3XXX_TC11MP_SCU_BASE_VIRT + 0x600) |
237 | 237 | ||
238 | #define CNS3XXX_TC11MP_GIC_DIST_BASE 0x90001000 /* Test chip interrupt controller distributor */ | 238 | #define CNS3XXX_TC11MP_GIC_DIST_BASE 0x90001000 /* Test chip interrupt controller distributor */ |
239 | #define CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT 0xFF001000 | 239 | #define CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT (CNS3XXX_TC11MP_SCU_BASE_VIRT + 0x1000) |
240 | 240 | ||
241 | #define CNS3XXX_TC11MP_L220_BASE 0x92002000 /* L220 registers */ | 241 | #define CNS3XXX_TC11MP_L220_BASE 0x92002000 /* L220 registers */ |
242 | #define CNS3XXX_TC11MP_L220_BASE_VIRT 0xFF002000 | 242 | #define CNS3XXX_TC11MP_L220_BASE_VIRT 0xFF002000 |
diff --git a/arch/arm/mach-davinci/dma.c b/arch/arm/mach-davinci/dma.c index a685e9706b7b..45b7c71d9cc1 100644 --- a/arch/arm/mach-davinci/dma.c +++ b/arch/arm/mach-davinci/dma.c | |||
@@ -743,6 +743,9 @@ EXPORT_SYMBOL(edma_free_channel); | |||
743 | */ | 743 | */ |
744 | int edma_alloc_slot(unsigned ctlr, int slot) | 744 | int edma_alloc_slot(unsigned ctlr, int slot) |
745 | { | 745 | { |
746 | if (!edma_cc[ctlr]) | ||
747 | return -EINVAL; | ||
748 | |||
746 | if (slot >= 0) | 749 | if (slot >= 0) |
747 | slot = EDMA_CHAN_SLOT(slot); | 750 | slot = EDMA_CHAN_SLOT(slot); |
748 | 751 | ||
diff --git a/arch/arm/mach-ep93xx/include/mach/uncompress.h b/arch/arm/mach-ep93xx/include/mach/uncompress.h index d2afb4dd82ab..b5cc77d2380b 100644 --- a/arch/arm/mach-ep93xx/include/mach/uncompress.h +++ b/arch/arm/mach-ep93xx/include/mach/uncompress.h | |||
@@ -47,9 +47,13 @@ static void __raw_writel(unsigned int value, unsigned int ptr) | |||
47 | 47 | ||
48 | static inline void putc(int c) | 48 | static inline void putc(int c) |
49 | { | 49 | { |
50 | /* Transmit fifo not full? */ | 50 | int i; |
51 | while (__raw_readb(PHYS_UART_FLAG) & UART_FLAG_TXFF) | 51 | |
52 | ; | 52 | for (i = 0; i < 10000; i++) { |
53 | /* Transmit fifo not full? */ | ||
54 | if (!(__raw_readb(PHYS_UART_FLAG) & UART_FLAG_TXFF)) | ||
55 | break; | ||
56 | } | ||
53 | 57 | ||
54 | __raw_writeb(c, PHYS_UART_DATA); | 58 | __raw_writeb(c, PHYS_UART_DATA); |
55 | } | 59 | } |
diff --git a/arch/arm/mach-footbridge/Kconfig b/arch/arm/mach-footbridge/Kconfig index abda5a18a664..0f2111a11315 100644 --- a/arch/arm/mach-footbridge/Kconfig +++ b/arch/arm/mach-footbridge/Kconfig | |||
@@ -67,6 +67,7 @@ config ARCH_NETWINDER | |||
67 | select ISA | 67 | select ISA |
68 | select ISA_DMA | 68 | select ISA_DMA |
69 | select PCI | 69 | select PCI |
70 | select VIRT_TO_BUS | ||
70 | help | 71 | help |
71 | Say Y here if you intend to run this kernel on the Rebel.COM | 72 | Say Y here if you intend to run this kernel on the Rebel.COM |
72 | NetWinder. Information about this machine can be found at: | 73 | NetWinder. Information about this machine can be found at: |
diff --git a/arch/arm/mach-imx/clk-imx35.c b/arch/arm/mach-imx/clk-imx35.c index 74e3a34d78b8..2193c834f55c 100644 --- a/arch/arm/mach-imx/clk-imx35.c +++ b/arch/arm/mach-imx/clk-imx35.c | |||
@@ -257,6 +257,7 @@ int __init mx35_clocks_init(void) | |||
257 | clk_register_clkdev(clk[wdog_gate], NULL, "imx2-wdt.0"); | 257 | clk_register_clkdev(clk[wdog_gate], NULL, "imx2-wdt.0"); |
258 | clk_register_clkdev(clk[nfc_div], NULL, "imx25-nand.0"); | 258 | clk_register_clkdev(clk[nfc_div], NULL, "imx25-nand.0"); |
259 | clk_register_clkdev(clk[csi_gate], NULL, "mx3-camera.0"); | 259 | clk_register_clkdev(clk[csi_gate], NULL, "mx3-camera.0"); |
260 | clk_register_clkdev(clk[admux_gate], "audmux", NULL); | ||
260 | 261 | ||
261 | clk_prepare_enable(clk[spba_gate]); | 262 | clk_prepare_enable(clk[spba_gate]); |
262 | clk_prepare_enable(clk[gpio1_gate]); | 263 | clk_prepare_enable(clk[gpio1_gate]); |
@@ -264,6 +265,8 @@ int __init mx35_clocks_init(void) | |||
264 | clk_prepare_enable(clk[gpio3_gate]); | 265 | clk_prepare_enable(clk[gpio3_gate]); |
265 | clk_prepare_enable(clk[iim_gate]); | 266 | clk_prepare_enable(clk[iim_gate]); |
266 | clk_prepare_enable(clk[emi_gate]); | 267 | clk_prepare_enable(clk[emi_gate]); |
268 | clk_prepare_enable(clk[max_gate]); | ||
269 | clk_prepare_enable(clk[iomuxc_gate]); | ||
267 | 270 | ||
268 | /* | 271 | /* |
269 | * SCC is needed to boot via mmc after a watchdog reset. The clock code | 272 | * SCC is needed to boot via mmc after a watchdog reset. The clock code |
diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c index 2f9ff93a4e61..d38e54f5b6d7 100644 --- a/arch/arm/mach-imx/clk-imx6q.c +++ b/arch/arm/mach-imx/clk-imx6q.c | |||
@@ -115,7 +115,7 @@ static const char *gpu2d_core_sels[] = { "axi", "pll3_usb_otg", "pll2_pfd0_352m" | |||
115 | static const char *gpu3d_core_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd2_396m", }; | 115 | static const char *gpu3d_core_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd2_396m", }; |
116 | static const char *gpu3d_shader_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd9_720m", }; | 116 | static const char *gpu3d_shader_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd9_720m", }; |
117 | static const char *ipu_sels[] = { "mmdc_ch0_axi", "pll2_pfd2_396m", "pll3_120m", "pll3_pfd1_540m", }; | 117 | static const char *ipu_sels[] = { "mmdc_ch0_axi", "pll2_pfd2_396m", "pll3_120m", "pll3_pfd1_540m", }; |
118 | static const char *ldb_di_sels[] = { "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "mmdc_ch1_axi", "pll3_pfd1_540m", }; | 118 | static const char *ldb_di_sels[] = { "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "mmdc_ch1_axi", "pll3_usb_otg", }; |
119 | static const char *ipu_di_pre_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll3_pfd1_540m", }; | 119 | static const char *ipu_di_pre_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll3_pfd1_540m", }; |
120 | static const char *ipu1_di0_sels[] = { "ipu1_di0_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", }; | 120 | static const char *ipu1_di0_sels[] = { "ipu1_di0_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", }; |
121 | static const char *ipu1_di1_sels[] = { "ipu1_di1_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", }; | 121 | static const char *ipu1_di1_sels[] = { "ipu1_di1_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", }; |
@@ -443,7 +443,6 @@ int __init mx6q_clocks_init(void) | |||
443 | 443 | ||
444 | clk_register_clkdev(clk[gpt_ipg], "ipg", "imx-gpt.0"); | 444 | clk_register_clkdev(clk[gpt_ipg], "ipg", "imx-gpt.0"); |
445 | clk_register_clkdev(clk[gpt_ipg_per], "per", "imx-gpt.0"); | 445 | clk_register_clkdev(clk[gpt_ipg_per], "per", "imx-gpt.0"); |
446 | clk_register_clkdev(clk[twd], NULL, "smp_twd"); | ||
447 | clk_register_clkdev(clk[cko1_sel], "cko1_sel", NULL); | 446 | clk_register_clkdev(clk[cko1_sel], "cko1_sel", NULL); |
448 | clk_register_clkdev(clk[ahb], "ahb", NULL); | 447 | clk_register_clkdev(clk[ahb], "ahb", NULL); |
449 | clk_register_clkdev(clk[cko1], "cko1", NULL); | 448 | clk_register_clkdev(clk[cko1], "cko1", NULL); |
diff --git a/arch/arm/mach-imx/common.h b/arch/arm/mach-imx/common.h index 5a800bfcec5b..5bf4a97ab241 100644 --- a/arch/arm/mach-imx/common.h +++ b/arch/arm/mach-imx/common.h | |||
@@ -110,6 +110,8 @@ void tzic_handle_irq(struct pt_regs *); | |||
110 | 110 | ||
111 | extern void imx_enable_cpu(int cpu, bool enable); | 111 | extern void imx_enable_cpu(int cpu, bool enable); |
112 | extern void imx_set_cpu_jump(int cpu, void *jump_addr); | 112 | extern void imx_set_cpu_jump(int cpu, void *jump_addr); |
113 | extern u32 imx_get_cpu_arg(int cpu); | ||
114 | extern void imx_set_cpu_arg(int cpu, u32 arg); | ||
113 | extern void v7_cpu_resume(void); | 115 | extern void v7_cpu_resume(void); |
114 | extern u32 *pl310_get_save_ptr(void); | 116 | extern u32 *pl310_get_save_ptr(void); |
115 | #ifdef CONFIG_SMP | 117 | #ifdef CONFIG_SMP |
diff --git a/arch/arm/mach-imx/hotplug.c b/arch/arm/mach-imx/hotplug.c index 7bc5fe15dda2..361a253e2b63 100644 --- a/arch/arm/mach-imx/hotplug.c +++ b/arch/arm/mach-imx/hotplug.c | |||
@@ -46,11 +46,23 @@ static inline void cpu_enter_lowpower(void) | |||
46 | void imx_cpu_die(unsigned int cpu) | 46 | void imx_cpu_die(unsigned int cpu) |
47 | { | 47 | { |
48 | cpu_enter_lowpower(); | 48 | cpu_enter_lowpower(); |
49 | /* | ||
50 | * We use the cpu jumping argument register to sync with | ||
51 | * imx_cpu_kill() which is running on cpu0 and waiting for | ||
52 | * the register being cleared to kill the cpu. | ||
53 | */ | ||
54 | imx_set_cpu_arg(cpu, ~0); | ||
49 | cpu_do_idle(); | 55 | cpu_do_idle(); |
50 | } | 56 | } |
51 | 57 | ||
52 | int imx_cpu_kill(unsigned int cpu) | 58 | int imx_cpu_kill(unsigned int cpu) |
53 | { | 59 | { |
60 | unsigned long timeout = jiffies + msecs_to_jiffies(50); | ||
61 | |||
62 | while (imx_get_cpu_arg(cpu) == 0) | ||
63 | if (time_after(jiffies, timeout)) | ||
64 | return 0; | ||
54 | imx_enable_cpu(cpu, false); | 65 | imx_enable_cpu(cpu, false); |
66 | imx_set_cpu_arg(cpu, 0); | ||
55 | return 1; | 67 | return 1; |
56 | } | 68 | } |
diff --git a/arch/arm/mach-imx/imx25-dt.c b/arch/arm/mach-imx/imx25-dt.c index 03b65e5ea541..82348391582a 100644 --- a/arch/arm/mach-imx/imx25-dt.c +++ b/arch/arm/mach-imx/imx25-dt.c | |||
@@ -27,6 +27,11 @@ static const char * const imx25_dt_board_compat[] __initconst = { | |||
27 | NULL | 27 | NULL |
28 | }; | 28 | }; |
29 | 29 | ||
30 | static void __init imx25_timer_init(void) | ||
31 | { | ||
32 | mx25_clocks_init_dt(); | ||
33 | } | ||
34 | |||
30 | DT_MACHINE_START(IMX25_DT, "Freescale i.MX25 (Device Tree Support)") | 35 | DT_MACHINE_START(IMX25_DT, "Freescale i.MX25 (Device Tree Support)") |
31 | .map_io = mx25_map_io, | 36 | .map_io = mx25_map_io, |
32 | .init_early = imx25_init_early, | 37 | .init_early = imx25_init_early, |
diff --git a/arch/arm/mach-imx/src.c b/arch/arm/mach-imx/src.c index e15f1555c59b..09a742f8c7ab 100644 --- a/arch/arm/mach-imx/src.c +++ b/arch/arm/mach-imx/src.c | |||
@@ -43,6 +43,18 @@ void imx_set_cpu_jump(int cpu, void *jump_addr) | |||
43 | src_base + SRC_GPR1 + cpu * 8); | 43 | src_base + SRC_GPR1 + cpu * 8); |
44 | } | 44 | } |
45 | 45 | ||
46 | u32 imx_get_cpu_arg(int cpu) | ||
47 | { | ||
48 | cpu = cpu_logical_map(cpu); | ||
49 | return readl_relaxed(src_base + SRC_GPR1 + cpu * 8 + 4); | ||
50 | } | ||
51 | |||
52 | void imx_set_cpu_arg(int cpu, u32 arg) | ||
53 | { | ||
54 | cpu = cpu_logical_map(cpu); | ||
55 | writel_relaxed(arg, src_base + SRC_GPR1 + cpu * 8 + 4); | ||
56 | } | ||
57 | |||
46 | void imx_src_prepare_restart(void) | 58 | void imx_src_prepare_restart(void) |
47 | { | 59 | { |
48 | u32 val; | 60 | u32 val; |
diff --git a/arch/arm/mach-kirkwood/board-iomega_ix2_200.c b/arch/arm/mach-kirkwood/board-iomega_ix2_200.c index f655b2637b0e..e5f70415905a 100644 --- a/arch/arm/mach-kirkwood/board-iomega_ix2_200.c +++ b/arch/arm/mach-kirkwood/board-iomega_ix2_200.c | |||
@@ -20,10 +20,15 @@ static struct mv643xx_eth_platform_data iomega_ix2_200_ge00_data = { | |||
20 | .duplex = DUPLEX_FULL, | 20 | .duplex = DUPLEX_FULL, |
21 | }; | 21 | }; |
22 | 22 | ||
23 | static struct mv643xx_eth_platform_data iomega_ix2_200_ge01_data = { | ||
24 | .phy_addr = MV643XX_ETH_PHY_ADDR(11), | ||
25 | }; | ||
26 | |||
23 | void __init iomega_ix2_200_init(void) | 27 | void __init iomega_ix2_200_init(void) |
24 | { | 28 | { |
25 | /* | 29 | /* |
26 | * Basic setup. Needs to be called early. | 30 | * Basic setup. Needs to be called early. |
27 | */ | 31 | */ |
28 | kirkwood_ge01_init(&iomega_ix2_200_ge00_data); | 32 | kirkwood_ge00_init(&iomega_ix2_200_ge00_data); |
33 | kirkwood_ge01_init(&iomega_ix2_200_ge01_data); | ||
29 | } | 34 | } |
diff --git a/arch/arm/mach-kirkwood/guruplug-setup.c b/arch/arm/mach-kirkwood/guruplug-setup.c index 1c6e736cbbf8..08dd739aa709 100644 --- a/arch/arm/mach-kirkwood/guruplug-setup.c +++ b/arch/arm/mach-kirkwood/guruplug-setup.c | |||
@@ -53,6 +53,8 @@ static struct mv_sata_platform_data guruplug_sata_data = { | |||
53 | 53 | ||
54 | static struct mvsdio_platform_data guruplug_mvsdio_data = { | 54 | static struct mvsdio_platform_data guruplug_mvsdio_data = { |
55 | /* unfortunately the CD signal has not been connected */ | 55 | /* unfortunately the CD signal has not been connected */ |
56 | .gpio_card_detect = -1, | ||
57 | .gpio_write_protect = -1, | ||
56 | }; | 58 | }; |
57 | 59 | ||
58 | static struct gpio_led guruplug_led_pins[] = { | 60 | static struct gpio_led guruplug_led_pins[] = { |
diff --git a/arch/arm/mach-kirkwood/openrd-setup.c b/arch/arm/mach-kirkwood/openrd-setup.c index 8ddd69fdc937..6a6eb548307d 100644 --- a/arch/arm/mach-kirkwood/openrd-setup.c +++ b/arch/arm/mach-kirkwood/openrd-setup.c | |||
@@ -55,6 +55,7 @@ static struct mv_sata_platform_data openrd_sata_data = { | |||
55 | 55 | ||
56 | static struct mvsdio_platform_data openrd_mvsdio_data = { | 56 | static struct mvsdio_platform_data openrd_mvsdio_data = { |
57 | .gpio_card_detect = 29, /* MPP29 used as SD card detect */ | 57 | .gpio_card_detect = 29, /* MPP29 used as SD card detect */ |
58 | .gpio_write_protect = -1, | ||
58 | }; | 59 | }; |
59 | 60 | ||
60 | static unsigned int openrd_mpp_config[] __initdata = { | 61 | static unsigned int openrd_mpp_config[] __initdata = { |
diff --git a/arch/arm/mach-kirkwood/rd88f6281-setup.c b/arch/arm/mach-kirkwood/rd88f6281-setup.c index c7d93b48926b..d24223166e06 100644 --- a/arch/arm/mach-kirkwood/rd88f6281-setup.c +++ b/arch/arm/mach-kirkwood/rd88f6281-setup.c | |||
@@ -69,6 +69,7 @@ static struct mv_sata_platform_data rd88f6281_sata_data = { | |||
69 | 69 | ||
70 | static struct mvsdio_platform_data rd88f6281_mvsdio_data = { | 70 | static struct mvsdio_platform_data rd88f6281_mvsdio_data = { |
71 | .gpio_card_detect = 28, | 71 | .gpio_card_detect = 28, |
72 | .gpio_write_protect = -1, | ||
72 | }; | 73 | }; |
73 | 74 | ||
74 | static unsigned int rd88f6281_mpp_config[] __initdata = { | 75 | static unsigned int rd88f6281_mpp_config[] __initdata = { |
diff --git a/arch/arm/mach-mmp/gplugd.c b/arch/arm/mach-mmp/gplugd.c index d1e2d595e79c..f62b68d926f4 100644 --- a/arch/arm/mach-mmp/gplugd.c +++ b/arch/arm/mach-mmp/gplugd.c | |||
@@ -9,6 +9,7 @@ | |||
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
12 | #include <linux/platform_device.h> | ||
12 | #include <linux/gpio.h> | 13 | #include <linux/gpio.h> |
13 | 14 | ||
14 | #include <asm/mach/arch.h> | 15 | #include <asm/mach/arch.h> |
diff --git a/arch/arm/mach-msm/timer.c b/arch/arm/mach-msm/timer.c index 2969027f02fa..f9fd77e8f1f5 100644 --- a/arch/arm/mach-msm/timer.c +++ b/arch/arm/mach-msm/timer.c | |||
@@ -62,7 +62,10 @@ static int msm_timer_set_next_event(unsigned long cycles, | |||
62 | { | 62 | { |
63 | u32 ctrl = readl_relaxed(event_base + TIMER_ENABLE); | 63 | u32 ctrl = readl_relaxed(event_base + TIMER_ENABLE); |
64 | 64 | ||
65 | writel_relaxed(0, event_base + TIMER_CLEAR); | 65 | ctrl &= ~TIMER_ENABLE_EN; |
66 | writel_relaxed(ctrl, event_base + TIMER_ENABLE); | ||
67 | |||
68 | writel_relaxed(ctrl, event_base + TIMER_CLEAR); | ||
66 | writel_relaxed(cycles, event_base + TIMER_MATCH_VAL); | 69 | writel_relaxed(cycles, event_base + TIMER_MATCH_VAL); |
67 | writel_relaxed(ctrl | TIMER_ENABLE_EN, event_base + TIMER_ENABLE); | 70 | writel_relaxed(ctrl | TIMER_ENABLE_EN, event_base + TIMER_ENABLE); |
68 | return 0; | 71 | return 0; |
diff --git a/arch/arm/mach-mvebu/irq-armada-370-xp.c b/arch/arm/mach-mvebu/irq-armada-370-xp.c index 274ff58271de..d5970f5a1e8d 100644 --- a/arch/arm/mach-mvebu/irq-armada-370-xp.c +++ b/arch/arm/mach-mvebu/irq-armada-370-xp.c | |||
@@ -44,6 +44,8 @@ | |||
44 | 44 | ||
45 | #define ARMADA_370_XP_MAX_PER_CPU_IRQS (28) | 45 | #define ARMADA_370_XP_MAX_PER_CPU_IRQS (28) |
46 | 46 | ||
47 | #define ARMADA_370_XP_TIMER0_PER_CPU_IRQ (5) | ||
48 | |||
47 | #define ACTIVE_DOORBELLS (8) | 49 | #define ACTIVE_DOORBELLS (8) |
48 | 50 | ||
49 | static DEFINE_RAW_SPINLOCK(irq_controller_lock); | 51 | static DEFINE_RAW_SPINLOCK(irq_controller_lock); |
@@ -59,36 +61,26 @@ static struct irq_domain *armada_370_xp_mpic_domain; | |||
59 | */ | 61 | */ |
60 | static void armada_370_xp_irq_mask(struct irq_data *d) | 62 | static void armada_370_xp_irq_mask(struct irq_data *d) |
61 | { | 63 | { |
62 | #ifdef CONFIG_SMP | ||
63 | irq_hw_number_t hwirq = irqd_to_hwirq(d); | 64 | irq_hw_number_t hwirq = irqd_to_hwirq(d); |
64 | 65 | ||
65 | if (hwirq > ARMADA_370_XP_MAX_PER_CPU_IRQS) | 66 | if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ) |
66 | writel(hwirq, main_int_base + | 67 | writel(hwirq, main_int_base + |
67 | ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS); | 68 | ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS); |
68 | else | 69 | else |
69 | writel(hwirq, per_cpu_int_base + | 70 | writel(hwirq, per_cpu_int_base + |
70 | ARMADA_370_XP_INT_SET_MASK_OFFS); | 71 | ARMADA_370_XP_INT_SET_MASK_OFFS); |
71 | #else | ||
72 | writel(irqd_to_hwirq(d), | ||
73 | per_cpu_int_base + ARMADA_370_XP_INT_SET_MASK_OFFS); | ||
74 | #endif | ||
75 | } | 72 | } |
76 | 73 | ||
77 | static void armada_370_xp_irq_unmask(struct irq_data *d) | 74 | static void armada_370_xp_irq_unmask(struct irq_data *d) |
78 | { | 75 | { |
79 | #ifdef CONFIG_SMP | ||
80 | irq_hw_number_t hwirq = irqd_to_hwirq(d); | 76 | irq_hw_number_t hwirq = irqd_to_hwirq(d); |
81 | 77 | ||
82 | if (hwirq > ARMADA_370_XP_MAX_PER_CPU_IRQS) | 78 | if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ) |
83 | writel(hwirq, main_int_base + | 79 | writel(hwirq, main_int_base + |
84 | ARMADA_370_XP_INT_SET_ENABLE_OFFS); | 80 | ARMADA_370_XP_INT_SET_ENABLE_OFFS); |
85 | else | 81 | else |
86 | writel(hwirq, per_cpu_int_base + | 82 | writel(hwirq, per_cpu_int_base + |
87 | ARMADA_370_XP_INT_CLEAR_MASK_OFFS); | 83 | ARMADA_370_XP_INT_CLEAR_MASK_OFFS); |
88 | #else | ||
89 | writel(irqd_to_hwirq(d), | ||
90 | per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS); | ||
91 | #endif | ||
92 | } | 84 | } |
93 | 85 | ||
94 | #ifdef CONFIG_SMP | 86 | #ifdef CONFIG_SMP |
@@ -144,10 +136,14 @@ static int armada_370_xp_mpic_irq_map(struct irq_domain *h, | |||
144 | unsigned int virq, irq_hw_number_t hw) | 136 | unsigned int virq, irq_hw_number_t hw) |
145 | { | 137 | { |
146 | armada_370_xp_irq_mask(irq_get_irq_data(virq)); | 138 | armada_370_xp_irq_mask(irq_get_irq_data(virq)); |
147 | writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS); | 139 | if (hw != ARMADA_370_XP_TIMER0_PER_CPU_IRQ) |
140 | writel(hw, per_cpu_int_base + | ||
141 | ARMADA_370_XP_INT_CLEAR_MASK_OFFS); | ||
142 | else | ||
143 | writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS); | ||
148 | irq_set_status_flags(virq, IRQ_LEVEL); | 144 | irq_set_status_flags(virq, IRQ_LEVEL); |
149 | 145 | ||
150 | if (hw < ARMADA_370_XP_MAX_PER_CPU_IRQS) { | 146 | if (hw == ARMADA_370_XP_TIMER0_PER_CPU_IRQ) { |
151 | irq_set_percpu_devid(virq); | 147 | irq_set_percpu_devid(virq); |
152 | irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip, | 148 | irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip, |
153 | handle_percpu_devid_irq); | 149 | handle_percpu_devid_irq); |
diff --git a/arch/arm/mach-mxs/mach-mxs.c b/arch/arm/mach-mxs/mach-mxs.c index 3218f1f2c0e0..e7b781d3788f 100644 --- a/arch/arm/mach-mxs/mach-mxs.c +++ b/arch/arm/mach-mxs/mach-mxs.c | |||
@@ -41,8 +41,6 @@ static struct fb_videomode mx23evk_video_modes[] = { | |||
41 | .lower_margin = 4, | 41 | .lower_margin = 4, |
42 | .hsync_len = 1, | 42 | .hsync_len = 1, |
43 | .vsync_len = 1, | 43 | .vsync_len = 1, |
44 | .sync = FB_SYNC_DATA_ENABLE_HIGH_ACT | | ||
45 | FB_SYNC_DOTCLK_FAILING_ACT, | ||
46 | }, | 44 | }, |
47 | }; | 45 | }; |
48 | 46 | ||
@@ -59,8 +57,6 @@ static struct fb_videomode mx28evk_video_modes[] = { | |||
59 | .lower_margin = 10, | 57 | .lower_margin = 10, |
60 | .hsync_len = 10, | 58 | .hsync_len = 10, |
61 | .vsync_len = 10, | 59 | .vsync_len = 10, |
62 | .sync = FB_SYNC_DATA_ENABLE_HIGH_ACT | | ||
63 | FB_SYNC_DOTCLK_FAILING_ACT, | ||
64 | }, | 60 | }, |
65 | }; | 61 | }; |
66 | 62 | ||
@@ -77,7 +73,6 @@ static struct fb_videomode m28evk_video_modes[] = { | |||
77 | .lower_margin = 45, | 73 | .lower_margin = 45, |
78 | .hsync_len = 1, | 74 | .hsync_len = 1, |
79 | .vsync_len = 1, | 75 | .vsync_len = 1, |
80 | .sync = FB_SYNC_DATA_ENABLE_HIGH_ACT, | ||
81 | }, | 76 | }, |
82 | }; | 77 | }; |
83 | 78 | ||
@@ -94,9 +89,7 @@ static struct fb_videomode apx4devkit_video_modes[] = { | |||
94 | .lower_margin = 13, | 89 | .lower_margin = 13, |
95 | .hsync_len = 48, | 90 | .hsync_len = 48, |
96 | .vsync_len = 3, | 91 | .vsync_len = 3, |
97 | .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT | | 92 | .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, |
98 | FB_SYNC_DATA_ENABLE_HIGH_ACT | | ||
99 | FB_SYNC_DOTCLK_FAILING_ACT, | ||
100 | }, | 93 | }, |
101 | }; | 94 | }; |
102 | 95 | ||
@@ -113,9 +106,7 @@ static struct fb_videomode apf28dev_video_modes[] = { | |||
113 | .lower_margin = 0x15, | 106 | .lower_margin = 0x15, |
114 | .hsync_len = 64, | 107 | .hsync_len = 64, |
115 | .vsync_len = 4, | 108 | .vsync_len = 4, |
116 | .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT | | 109 | .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, |
117 | FB_SYNC_DATA_ENABLE_HIGH_ACT | | ||
118 | FB_SYNC_DOTCLK_FAILING_ACT, | ||
119 | }, | 110 | }, |
120 | }; | 111 | }; |
121 | 112 | ||
@@ -132,7 +123,6 @@ static struct fb_videomode cfa10049_video_modes[] = { | |||
132 | .lower_margin = 2, | 123 | .lower_margin = 2, |
133 | .hsync_len = 15, | 124 | .hsync_len = 15, |
134 | .vsync_len = 15, | 125 | .vsync_len = 15, |
135 | .sync = FB_SYNC_DATA_ENABLE_HIGH_ACT | ||
136 | }, | 126 | }, |
137 | }; | 127 | }; |
138 | 128 | ||
@@ -259,6 +249,8 @@ static void __init imx23_evk_init(void) | |||
259 | mxsfb_pdata.mode_count = ARRAY_SIZE(mx23evk_video_modes); | 249 | mxsfb_pdata.mode_count = ARRAY_SIZE(mx23evk_video_modes); |
260 | mxsfb_pdata.default_bpp = 32; | 250 | mxsfb_pdata.default_bpp = 32; |
261 | mxsfb_pdata.ld_intf_width = STMLCDIF_24BIT; | 251 | mxsfb_pdata.ld_intf_width = STMLCDIF_24BIT; |
252 | mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT | | ||
253 | MXSFB_SYNC_DOTCLK_FAILING_ACT; | ||
262 | } | 254 | } |
263 | 255 | ||
264 | static inline void enable_clk_enet_out(void) | 256 | static inline void enable_clk_enet_out(void) |
@@ -278,6 +270,8 @@ static void __init imx28_evk_init(void) | |||
278 | mxsfb_pdata.mode_count = ARRAY_SIZE(mx28evk_video_modes); | 270 | mxsfb_pdata.mode_count = ARRAY_SIZE(mx28evk_video_modes); |
279 | mxsfb_pdata.default_bpp = 32; | 271 | mxsfb_pdata.default_bpp = 32; |
280 | mxsfb_pdata.ld_intf_width = STMLCDIF_24BIT; | 272 | mxsfb_pdata.ld_intf_width = STMLCDIF_24BIT; |
273 | mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT | | ||
274 | MXSFB_SYNC_DOTCLK_FAILING_ACT; | ||
281 | 275 | ||
282 | mxs_saif_clkmux_select(MXS_DIGCTL_SAIF_CLKMUX_EXTMSTR0); | 276 | mxs_saif_clkmux_select(MXS_DIGCTL_SAIF_CLKMUX_EXTMSTR0); |
283 | } | 277 | } |
@@ -297,6 +291,7 @@ static void __init m28evk_init(void) | |||
297 | mxsfb_pdata.mode_count = ARRAY_SIZE(m28evk_video_modes); | 291 | mxsfb_pdata.mode_count = ARRAY_SIZE(m28evk_video_modes); |
298 | mxsfb_pdata.default_bpp = 16; | 292 | mxsfb_pdata.default_bpp = 16; |
299 | mxsfb_pdata.ld_intf_width = STMLCDIF_18BIT; | 293 | mxsfb_pdata.ld_intf_width = STMLCDIF_18BIT; |
294 | mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT; | ||
300 | } | 295 | } |
301 | 296 | ||
302 | static void __init sc_sps1_init(void) | 297 | static void __init sc_sps1_init(void) |
@@ -322,6 +317,8 @@ static void __init apx4devkit_init(void) | |||
322 | mxsfb_pdata.mode_count = ARRAY_SIZE(apx4devkit_video_modes); | 317 | mxsfb_pdata.mode_count = ARRAY_SIZE(apx4devkit_video_modes); |
323 | mxsfb_pdata.default_bpp = 32; | 318 | mxsfb_pdata.default_bpp = 32; |
324 | mxsfb_pdata.ld_intf_width = STMLCDIF_24BIT; | 319 | mxsfb_pdata.ld_intf_width = STMLCDIF_24BIT; |
320 | mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT | | ||
321 | MXSFB_SYNC_DOTCLK_FAILING_ACT; | ||
325 | } | 322 | } |
326 | 323 | ||
327 | #define ENET0_MDC__GPIO_4_0 MXS_GPIO_NR(4, 0) | 324 | #define ENET0_MDC__GPIO_4_0 MXS_GPIO_NR(4, 0) |
@@ -407,6 +404,7 @@ static void __init cfa10049_init(void) | |||
407 | mxsfb_pdata.mode_count = ARRAY_SIZE(cfa10049_video_modes); | 404 | mxsfb_pdata.mode_count = ARRAY_SIZE(cfa10049_video_modes); |
408 | mxsfb_pdata.default_bpp = 32; | 405 | mxsfb_pdata.default_bpp = 32; |
409 | mxsfb_pdata.ld_intf_width = STMLCDIF_18BIT; | 406 | mxsfb_pdata.ld_intf_width = STMLCDIF_18BIT; |
407 | mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT; | ||
410 | } | 408 | } |
411 | 409 | ||
412 | static void __init cfa10037_init(void) | 410 | static void __init cfa10037_init(void) |
@@ -423,6 +421,8 @@ static void __init apf28_init(void) | |||
423 | mxsfb_pdata.mode_count = ARRAY_SIZE(apf28dev_video_modes); | 421 | mxsfb_pdata.mode_count = ARRAY_SIZE(apf28dev_video_modes); |
424 | mxsfb_pdata.default_bpp = 16; | 422 | mxsfb_pdata.default_bpp = 16; |
425 | mxsfb_pdata.ld_intf_width = STMLCDIF_16BIT; | 423 | mxsfb_pdata.ld_intf_width = STMLCDIF_16BIT; |
424 | mxsfb_pdata.sync = MXSFB_SYNC_DATA_ENABLE_HIGH_ACT | | ||
425 | MXSFB_SYNC_DOTCLK_FAILING_ACT; | ||
426 | } | 426 | } |
427 | 427 | ||
428 | static void __init mxs_machine_init(void) | 428 | static void __init mxs_machine_init(void) |
diff --git a/arch/arm/mach-omap1/clock_data.c b/arch/arm/mach-omap1/clock_data.c index cb7c6ae2e3fc..6c4f766365a2 100644 --- a/arch/arm/mach-omap1/clock_data.c +++ b/arch/arm/mach-omap1/clock_data.c | |||
@@ -543,15 +543,6 @@ static struct clk usb_dc_ck = { | |||
543 | /* Direct from ULPD, no parent */ | 543 | /* Direct from ULPD, no parent */ |
544 | .rate = 48000000, | 544 | .rate = 48000000, |
545 | .enable_reg = OMAP1_IO_ADDRESS(SOFT_REQ_REG), | 545 | .enable_reg = OMAP1_IO_ADDRESS(SOFT_REQ_REG), |
546 | .enable_bit = USB_REQ_EN_SHIFT, | ||
547 | }; | ||
548 | |||
549 | static struct clk usb_dc_ck7xx = { | ||
550 | .name = "usb_dc_ck", | ||
551 | .ops = &clkops_generic, | ||
552 | /* Direct from ULPD, no parent */ | ||
553 | .rate = 48000000, | ||
554 | .enable_reg = OMAP1_IO_ADDRESS(SOFT_REQ_REG), | ||
555 | .enable_bit = SOFT_USB_OTG_DPLL_REQ_SHIFT, | 546 | .enable_bit = SOFT_USB_OTG_DPLL_REQ_SHIFT, |
556 | }; | 547 | }; |
557 | 548 | ||
@@ -727,8 +718,7 @@ static struct omap_clk omap_clks[] = { | |||
727 | CLK(NULL, "usb_clko", &usb_clko, CK_16XX | CK_1510 | CK_310), | 718 | CLK(NULL, "usb_clko", &usb_clko, CK_16XX | CK_1510 | CK_310), |
728 | CLK(NULL, "usb_hhc_ck", &usb_hhc_ck1510, CK_1510 | CK_310), | 719 | CLK(NULL, "usb_hhc_ck", &usb_hhc_ck1510, CK_1510 | CK_310), |
729 | CLK(NULL, "usb_hhc_ck", &usb_hhc_ck16xx, CK_16XX), | 720 | CLK(NULL, "usb_hhc_ck", &usb_hhc_ck16xx, CK_16XX), |
730 | CLK(NULL, "usb_dc_ck", &usb_dc_ck, CK_16XX), | 721 | CLK(NULL, "usb_dc_ck", &usb_dc_ck, CK_16XX | CK_7XX), |
731 | CLK(NULL, "usb_dc_ck", &usb_dc_ck7xx, CK_7XX), | ||
732 | CLK(NULL, "mclk", &mclk_1510, CK_1510 | CK_310), | 722 | CLK(NULL, "mclk", &mclk_1510, CK_1510 | CK_310), |
733 | CLK(NULL, "mclk", &mclk_16xx, CK_16XX), | 723 | CLK(NULL, "mclk", &mclk_16xx, CK_16XX), |
734 | CLK(NULL, "bclk", &bclk_1510, CK_1510 | CK_310), | 724 | CLK(NULL, "bclk", &bclk_1510, CK_1510 | CK_310), |
diff --git a/arch/arm/mach-omap2/cclock44xx_data.c b/arch/arm/mach-omap2/cclock44xx_data.c index 3d58f335f173..0c6834ae1fc4 100644 --- a/arch/arm/mach-omap2/cclock44xx_data.c +++ b/arch/arm/mach-omap2/cclock44xx_data.c | |||
@@ -52,6 +52,13 @@ | |||
52 | */ | 52 | */ |
53 | #define OMAP4_DPLL_ABE_DEFFREQ 98304000 | 53 | #define OMAP4_DPLL_ABE_DEFFREQ 98304000 |
54 | 54 | ||
55 | /* | ||
56 | * OMAP4 USB DPLL default frequency. In OMAP4430 TRM version V, section | ||
57 | * "3.6.3.9.5 DPLL_USB Preferred Settings" shows that the preferred | ||
58 | * locked frequency for the USB DPLL is 960MHz. | ||
59 | */ | ||
60 | #define OMAP4_DPLL_USB_DEFFREQ 960000000 | ||
61 | |||
55 | /* Root clocks */ | 62 | /* Root clocks */ |
56 | 63 | ||
57 | DEFINE_CLK_FIXED_RATE(extalt_clkin_ck, CLK_IS_ROOT, 59000000, 0x0); | 64 | DEFINE_CLK_FIXED_RATE(extalt_clkin_ck, CLK_IS_ROOT, 59000000, 0x0); |
@@ -1011,6 +1018,10 @@ DEFINE_CLK_OMAP_MUX(hsmmc2_fclk, "l3_init_clkdm", hsmmc1_fclk_sel, | |||
1011 | OMAP4430_CM_L3INIT_MMC2_CLKCTRL, OMAP4430_CLKSEL_MASK, | 1018 | OMAP4430_CM_L3INIT_MMC2_CLKCTRL, OMAP4430_CLKSEL_MASK, |
1012 | hsmmc1_fclk_parents, func_dmic_abe_gfclk_ops); | 1019 | hsmmc1_fclk_parents, func_dmic_abe_gfclk_ops); |
1013 | 1020 | ||
1021 | DEFINE_CLK_GATE(ocp2scp_usb_phy_phy_48m, "func_48m_fclk", &func_48m_fclk, 0x0, | ||
1022 | OMAP4430_CM_L3INIT_USBPHYOCP2SCP_CLKCTRL, | ||
1023 | OMAP4430_OPTFCLKEN_PHY_48M_SHIFT, 0x0, NULL); | ||
1024 | |||
1014 | DEFINE_CLK_GATE(sha2md5_fck, "l3_div_ck", &l3_div_ck, 0x0, | 1025 | DEFINE_CLK_GATE(sha2md5_fck, "l3_div_ck", &l3_div_ck, 0x0, |
1015 | OMAP4430_CM_L4SEC_SHA2MD51_CLKCTRL, | 1026 | OMAP4430_CM_L4SEC_SHA2MD51_CLKCTRL, |
1016 | OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL); | 1027 | OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL); |
@@ -1538,6 +1549,7 @@ static struct omap_clk omap44xx_clks[] = { | |||
1538 | CLK(NULL, "per_mcbsp4_gfclk", &per_mcbsp4_gfclk, CK_443X), | 1549 | CLK(NULL, "per_mcbsp4_gfclk", &per_mcbsp4_gfclk, CK_443X), |
1539 | CLK(NULL, "hsmmc1_fclk", &hsmmc1_fclk, CK_443X), | 1550 | CLK(NULL, "hsmmc1_fclk", &hsmmc1_fclk, CK_443X), |
1540 | CLK(NULL, "hsmmc2_fclk", &hsmmc2_fclk, CK_443X), | 1551 | CLK(NULL, "hsmmc2_fclk", &hsmmc2_fclk, CK_443X), |
1552 | CLK(NULL, "ocp2scp_usb_phy_phy_48m", &ocp2scp_usb_phy_phy_48m, CK_443X), | ||
1541 | CLK(NULL, "sha2md5_fck", &sha2md5_fck, CK_443X), | 1553 | CLK(NULL, "sha2md5_fck", &sha2md5_fck, CK_443X), |
1542 | CLK(NULL, "slimbus1_fclk_1", &slimbus1_fclk_1, CK_443X), | 1554 | CLK(NULL, "slimbus1_fclk_1", &slimbus1_fclk_1, CK_443X), |
1543 | CLK(NULL, "slimbus1_fclk_0", &slimbus1_fclk_0, CK_443X), | 1555 | CLK(NULL, "slimbus1_fclk_0", &slimbus1_fclk_0, CK_443X), |
@@ -1705,5 +1717,13 @@ int __init omap4xxx_clk_init(void) | |||
1705 | if (rc) | 1717 | if (rc) |
1706 | pr_err("%s: failed to configure ABE DPLL!\n", __func__); | 1718 | pr_err("%s: failed to configure ABE DPLL!\n", __func__); |
1707 | 1719 | ||
1720 | /* | ||
1721 | * Lock USB DPLL on OMAP4 devices so that the L3INIT power | ||
1722 | * domain can transition to retention state when not in use. | ||
1723 | */ | ||
1724 | rc = clk_set_rate(&dpll_usb_ck, OMAP4_DPLL_USB_DEFFREQ); | ||
1725 | if (rc) | ||
1726 | pr_err("%s: failed to configure USB DPLL!\n", __func__); | ||
1727 | |||
1708 | return 0; | 1728 | return 0; |
1709 | } | 1729 | } |
diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h index 40f4a03d728f..d6ba13e1c540 100644 --- a/arch/arm/mach-omap2/common.h +++ b/arch/arm/mach-omap2/common.h | |||
@@ -293,5 +293,8 @@ extern void omap_reserve(void); | |||
293 | struct omap_hwmod; | 293 | struct omap_hwmod; |
294 | extern int omap_dss_reset(struct omap_hwmod *); | 294 | extern int omap_dss_reset(struct omap_hwmod *); |
295 | 295 | ||
296 | /* SoC specific clock initializer */ | ||
297 | extern int (*omap_clk_init)(void); | ||
298 | |||
296 | #endif /* __ASSEMBLER__ */ | 299 | #endif /* __ASSEMBLER__ */ |
297 | #endif /* __ARCH_ARM_MACH_OMAP2PLUS_COMMON_H */ | 300 | #endif /* __ARCH_ARM_MACH_OMAP2PLUS_COMMON_H */ |
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c index 2c3fdd65387b..5c445ca1e271 100644 --- a/arch/arm/mach-omap2/io.c +++ b/arch/arm/mach-omap2/io.c | |||
@@ -55,6 +55,12 @@ | |||
55 | #include "prm44xx.h" | 55 | #include "prm44xx.h" |
56 | 56 | ||
57 | /* | 57 | /* |
58 | * omap_clk_init: points to a function that does the SoC-specific | ||
59 | * clock initializations | ||
60 | */ | ||
61 | int (*omap_clk_init)(void); | ||
62 | |||
63 | /* | ||
58 | * The machine specific code may provide the extra mapping besides the | 64 | * The machine specific code may provide the extra mapping besides the |
59 | * default mapping provided here. | 65 | * default mapping provided here. |
60 | */ | 66 | */ |
@@ -397,7 +403,7 @@ void __init omap2420_init_early(void) | |||
397 | omap242x_clockdomains_init(); | 403 | omap242x_clockdomains_init(); |
398 | omap2420_hwmod_init(); | 404 | omap2420_hwmod_init(); |
399 | omap_hwmod_init_postsetup(); | 405 | omap_hwmod_init_postsetup(); |
400 | omap2420_clk_init(); | 406 | omap_clk_init = omap2420_clk_init; |
401 | } | 407 | } |
402 | 408 | ||
403 | void __init omap2420_init_late(void) | 409 | void __init omap2420_init_late(void) |
@@ -427,7 +433,7 @@ void __init omap2430_init_early(void) | |||
427 | omap243x_clockdomains_init(); | 433 | omap243x_clockdomains_init(); |
428 | omap2430_hwmod_init(); | 434 | omap2430_hwmod_init(); |
429 | omap_hwmod_init_postsetup(); | 435 | omap_hwmod_init_postsetup(); |
430 | omap2430_clk_init(); | 436 | omap_clk_init = omap2430_clk_init; |
431 | } | 437 | } |
432 | 438 | ||
433 | void __init omap2430_init_late(void) | 439 | void __init omap2430_init_late(void) |
@@ -462,7 +468,7 @@ void __init omap3_init_early(void) | |||
462 | omap3xxx_clockdomains_init(); | 468 | omap3xxx_clockdomains_init(); |
463 | omap3xxx_hwmod_init(); | 469 | omap3xxx_hwmod_init(); |
464 | omap_hwmod_init_postsetup(); | 470 | omap_hwmod_init_postsetup(); |
465 | omap3xxx_clk_init(); | 471 | omap_clk_init = omap3xxx_clk_init; |
466 | } | 472 | } |
467 | 473 | ||
468 | void __init omap3430_init_early(void) | 474 | void __init omap3430_init_early(void) |
@@ -500,7 +506,7 @@ void __init ti81xx_init_early(void) | |||
500 | omap3xxx_clockdomains_init(); | 506 | omap3xxx_clockdomains_init(); |
501 | omap3xxx_hwmod_init(); | 507 | omap3xxx_hwmod_init(); |
502 | omap_hwmod_init_postsetup(); | 508 | omap_hwmod_init_postsetup(); |
503 | omap3xxx_clk_init(); | 509 | omap_clk_init = omap3xxx_clk_init; |
504 | } | 510 | } |
505 | 511 | ||
506 | void __init omap3_init_late(void) | 512 | void __init omap3_init_late(void) |
@@ -568,7 +574,7 @@ void __init am33xx_init_early(void) | |||
568 | am33xx_clockdomains_init(); | 574 | am33xx_clockdomains_init(); |
569 | am33xx_hwmod_init(); | 575 | am33xx_hwmod_init(); |
570 | omap_hwmod_init_postsetup(); | 576 | omap_hwmod_init_postsetup(); |
571 | am33xx_clk_init(); | 577 | omap_clk_init = am33xx_clk_init; |
572 | } | 578 | } |
573 | #endif | 579 | #endif |
574 | 580 | ||
@@ -593,7 +599,7 @@ void __init omap4430_init_early(void) | |||
593 | omap44xx_clockdomains_init(); | 599 | omap44xx_clockdomains_init(); |
594 | omap44xx_hwmod_init(); | 600 | omap44xx_hwmod_init(); |
595 | omap_hwmod_init_postsetup(); | 601 | omap_hwmod_init_postsetup(); |
596 | omap4xxx_clk_init(); | 602 | omap_clk_init = omap4xxx_clk_init; |
597 | } | 603 | } |
598 | 604 | ||
599 | void __init omap4430_init_late(void) | 605 | void __init omap4430_init_late(void) |
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index c2c798c08c2b..a202a4785104 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c | |||
@@ -1368,7 +1368,9 @@ static void _enable_sysc(struct omap_hwmod *oh) | |||
1368 | } | 1368 | } |
1369 | 1369 | ||
1370 | if (sf & SYSC_HAS_MIDLEMODE) { | 1370 | if (sf & SYSC_HAS_MIDLEMODE) { |
1371 | if (oh->flags & HWMOD_SWSUP_MSTANDBY) { | 1371 | if (oh->flags & HWMOD_FORCE_MSTANDBY) { |
1372 | idlemode = HWMOD_IDLEMODE_FORCE; | ||
1373 | } else if (oh->flags & HWMOD_SWSUP_MSTANDBY) { | ||
1372 | idlemode = HWMOD_IDLEMODE_NO; | 1374 | idlemode = HWMOD_IDLEMODE_NO; |
1373 | } else { | 1375 | } else { |
1374 | if (sf & SYSC_HAS_ENAWAKEUP) | 1376 | if (sf & SYSC_HAS_ENAWAKEUP) |
@@ -1440,7 +1442,8 @@ static void _idle_sysc(struct omap_hwmod *oh) | |||
1440 | } | 1442 | } |
1441 | 1443 | ||
1442 | if (sf & SYSC_HAS_MIDLEMODE) { | 1444 | if (sf & SYSC_HAS_MIDLEMODE) { |
1443 | if (oh->flags & HWMOD_SWSUP_MSTANDBY) { | 1445 | if ((oh->flags & HWMOD_SWSUP_MSTANDBY) || |
1446 | (oh->flags & HWMOD_FORCE_MSTANDBY)) { | ||
1444 | idlemode = HWMOD_IDLEMODE_FORCE; | 1447 | idlemode = HWMOD_IDLEMODE_FORCE; |
1445 | } else { | 1448 | } else { |
1446 | if (sf & SYSC_HAS_ENAWAKEUP) | 1449 | if (sf & SYSC_HAS_ENAWAKEUP) |
diff --git a/arch/arm/mach-omap2/omap_hwmod.h b/arch/arm/mach-omap2/omap_hwmod.h index d43d9b608eda..d5dc935f6060 100644 --- a/arch/arm/mach-omap2/omap_hwmod.h +++ b/arch/arm/mach-omap2/omap_hwmod.h | |||
@@ -427,8 +427,8 @@ struct omap_hwmod_omap4_prcm { | |||
427 | * | 427 | * |
428 | * HWMOD_SWSUP_SIDLE: omap_hwmod code should manually bring module in and out | 428 | * HWMOD_SWSUP_SIDLE: omap_hwmod code should manually bring module in and out |
429 | * of idle, rather than relying on module smart-idle | 429 | * of idle, rather than relying on module smart-idle |
430 | * HWMOD_SWSUP_MSTDBY: omap_hwmod code should manually bring module in and out | 430 | * HWMOD_SWSUP_MSTANDBY: omap_hwmod code should manually bring module in and |
431 | * of standby, rather than relying on module smart-standby | 431 | * out of standby, rather than relying on module smart-standby |
432 | * HWMOD_INIT_NO_RESET: don't reset this module at boot - important for | 432 | * HWMOD_INIT_NO_RESET: don't reset this module at boot - important for |
433 | * SDRAM controller, etc. XXX probably belongs outside the main hwmod file | 433 | * SDRAM controller, etc. XXX probably belongs outside the main hwmod file |
434 | * XXX Should be HWMOD_SETUP_NO_RESET | 434 | * XXX Should be HWMOD_SETUP_NO_RESET |
@@ -459,6 +459,10 @@ struct omap_hwmod_omap4_prcm { | |||
459 | * correctly, or this is being abused to deal with some PM latency | 459 | * correctly, or this is being abused to deal with some PM latency |
460 | * issues -- but we're currently suffering from a shortage of | 460 | * issues -- but we're currently suffering from a shortage of |
461 | * folks who are able to track these issues down properly. | 461 | * folks who are able to track these issues down properly. |
462 | * HWMOD_FORCE_MSTANDBY: Always keep MIDLEMODE bits cleared so that device | ||
463 | * is kept in force-standby mode. Failing to do so causes PM problems | ||
464 | * with musb on OMAP3630 at least. Note that musb has a dedicated register | ||
465 | * to control MSTANDBY signal when MIDLEMODE is set to force-standby. | ||
462 | */ | 466 | */ |
463 | #define HWMOD_SWSUP_SIDLE (1 << 0) | 467 | #define HWMOD_SWSUP_SIDLE (1 << 0) |
464 | #define HWMOD_SWSUP_MSTANDBY (1 << 1) | 468 | #define HWMOD_SWSUP_MSTANDBY (1 << 1) |
@@ -471,6 +475,7 @@ struct omap_hwmod_omap4_prcm { | |||
471 | #define HWMOD_16BIT_REG (1 << 8) | 475 | #define HWMOD_16BIT_REG (1 << 8) |
472 | #define HWMOD_EXT_OPT_MAIN_CLK (1 << 9) | 476 | #define HWMOD_EXT_OPT_MAIN_CLK (1 << 9) |
473 | #define HWMOD_BLOCK_WFI (1 << 10) | 477 | #define HWMOD_BLOCK_WFI (1 << 10) |
478 | #define HWMOD_FORCE_MSTANDBY (1 << 11) | ||
474 | 479 | ||
475 | /* | 480 | /* |
476 | * omap_hwmod._int_flags definitions | 481 | * omap_hwmod._int_flags definitions |
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c index ac7e03ec952f..5112d04e7b79 100644 --- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c | |||
@@ -1707,9 +1707,14 @@ static struct omap_hwmod omap3xxx_usbhsotg_hwmod = { | |||
1707 | * Erratum ID: i479 idle_req / idle_ack mechanism potentially | 1707 | * Erratum ID: i479 idle_req / idle_ack mechanism potentially |
1708 | * broken when autoidle is enabled | 1708 | * broken when autoidle is enabled |
1709 | * workaround is to disable the autoidle bit at module level. | 1709 | * workaround is to disable the autoidle bit at module level. |
1710 | * | ||
1711 | * Enabling the device in any other MIDLEMODE setting but force-idle | ||
1712 | * causes core_pwrdm not enter idle states at least on OMAP3630. | ||
1713 | * Note that musb has OTG_FORCESTDBY register that controls MSTANDBY | ||
1714 | * signal when MIDLEMODE is set to force-idle. | ||
1710 | */ | 1715 | */ |
1711 | .flags = HWMOD_NO_OCP_AUTOIDLE | HWMOD_SWSUP_SIDLE | 1716 | .flags = HWMOD_NO_OCP_AUTOIDLE | HWMOD_SWSUP_SIDLE |
1712 | | HWMOD_SWSUP_MSTANDBY, | 1717 | | HWMOD_FORCE_MSTANDBY, |
1713 | }; | 1718 | }; |
1714 | 1719 | ||
1715 | /* usb_otg_hs */ | 1720 | /* usb_otg_hs */ |
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c index 0e47d2e1687c..9e0576569e07 100644 --- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c | |||
@@ -2714,6 +2714,10 @@ static struct omap_ocp2scp_dev ocp2scp_dev_attr[] = { | |||
2714 | { } | 2714 | { } |
2715 | }; | 2715 | }; |
2716 | 2716 | ||
2717 | static struct omap_hwmod_opt_clk ocp2scp_usb_phy_opt_clks[] = { | ||
2718 | { .role = "48mhz", .clk = "ocp2scp_usb_phy_phy_48m" }, | ||
2719 | }; | ||
2720 | |||
2717 | /* ocp2scp_usb_phy */ | 2721 | /* ocp2scp_usb_phy */ |
2718 | static struct omap_hwmod omap44xx_ocp2scp_usb_phy_hwmod = { | 2722 | static struct omap_hwmod omap44xx_ocp2scp_usb_phy_hwmod = { |
2719 | .name = "ocp2scp_usb_phy", | 2723 | .name = "ocp2scp_usb_phy", |
@@ -2728,6 +2732,8 @@ static struct omap_hwmod omap44xx_ocp2scp_usb_phy_hwmod = { | |||
2728 | }, | 2732 | }, |
2729 | }, | 2733 | }, |
2730 | .dev_attr = ocp2scp_dev_attr, | 2734 | .dev_attr = ocp2scp_dev_attr, |
2735 | .opt_clks = ocp2scp_usb_phy_opt_clks, | ||
2736 | .opt_clks_cnt = ARRAY_SIZE(ocp2scp_usb_phy_opt_clks), | ||
2731 | }; | 2737 | }; |
2732 | 2738 | ||
2733 | /* | 2739 | /* |
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c index 2bdd4cf17a8f..f62b509ed08d 100644 --- a/arch/arm/mach-omap2/timer.c +++ b/arch/arm/mach-omap2/timer.c | |||
@@ -547,6 +547,8 @@ static inline void __init realtime_counter_init(void) | |||
547 | clksrc_nr, clksrc_src) \ | 547 | clksrc_nr, clksrc_src) \ |
548 | void __init omap##name##_gptimer_timer_init(void) \ | 548 | void __init omap##name##_gptimer_timer_init(void) \ |
549 | { \ | 549 | { \ |
550 | if (omap_clk_init) \ | ||
551 | omap_clk_init(); \ | ||
550 | omap_dmtimer_init(); \ | 552 | omap_dmtimer_init(); \ |
551 | omap2_gp_clockevent_init((clkev_nr), clkev_src, clkev_prop); \ | 553 | omap2_gp_clockevent_init((clkev_nr), clkev_src, clkev_prop); \ |
552 | omap2_gptimer_clocksource_init((clksrc_nr), clksrc_src); \ | 554 | omap2_gptimer_clocksource_init((clksrc_nr), clksrc_src); \ |
@@ -556,6 +558,8 @@ void __init omap##name##_gptimer_timer_init(void) \ | |||
556 | clksrc_nr, clksrc_src) \ | 558 | clksrc_nr, clksrc_src) \ |
557 | void __init omap##name##_sync32k_timer_init(void) \ | 559 | void __init omap##name##_sync32k_timer_init(void) \ |
558 | { \ | 560 | { \ |
561 | if (omap_clk_init) \ | ||
562 | omap_clk_init(); \ | ||
559 | omap_dmtimer_init(); \ | 563 | omap_dmtimer_init(); \ |
560 | omap2_gp_clockevent_init((clkev_nr), clkev_src, clkev_prop); \ | 564 | omap2_gp_clockevent_init((clkev_nr), clkev_src, clkev_prop); \ |
561 | /* Enable the use of clocksource="gp_timer" kernel parameter */ \ | 565 | /* Enable the use of clocksource="gp_timer" kernel parameter */ \ |
diff --git a/arch/arm/mach-s3c24xx/include/mach/irqs.h b/arch/arm/mach-s3c24xx/include/mach/irqs.h index b7a9f4d469e8..1e73f5fa8659 100644 --- a/arch/arm/mach-s3c24xx/include/mach/irqs.h +++ b/arch/arm/mach-s3c24xx/include/mach/irqs.h | |||
@@ -188,10 +188,8 @@ | |||
188 | 188 | ||
189 | #if defined(CONFIG_CPU_S3C2416) | 189 | #if defined(CONFIG_CPU_S3C2416) |
190 | #define NR_IRQS (IRQ_S3C2416_I2S1 + 1) | 190 | #define NR_IRQS (IRQ_S3C2416_I2S1 + 1) |
191 | #elif defined(CONFIG_CPU_S3C2443) | ||
192 | #define NR_IRQS (IRQ_S3C2443_AC97+1) | ||
193 | #else | 191 | #else |
194 | #define NR_IRQS (IRQ_S3C2440_AC97+1) | 192 | #define NR_IRQS (IRQ_S3C2443_AC97 + 1) |
195 | #endif | 193 | #endif |
196 | 194 | ||
197 | /* compatibility define. */ | 195 | /* compatibility define. */ |
diff --git a/arch/arm/mach-s3c24xx/irq.c b/arch/arm/mach-s3c24xx/irq.c index cb9f5e011e73..d8ba9bee4c7e 100644 --- a/arch/arm/mach-s3c24xx/irq.c +++ b/arch/arm/mach-s3c24xx/irq.c | |||
@@ -500,7 +500,7 @@ struct s3c_irq_intc *s3c24xx_init_intc(struct device_node *np, | |||
500 | base = (void *)0xfd000000; | 500 | base = (void *)0xfd000000; |
501 | 501 | ||
502 | intc->reg_mask = base + 0xa4; | 502 | intc->reg_mask = base + 0xa4; |
503 | intc->reg_pending = base + 0x08; | 503 | intc->reg_pending = base + 0xa8; |
504 | irq_num = 20; | 504 | irq_num = 20; |
505 | irq_start = S3C2410_IRQ(32); | 505 | irq_start = S3C2410_IRQ(32); |
506 | irq_offset = 4; | 506 | irq_offset = 4; |
diff --git a/arch/arm/mach-s5pv210/clock.c b/arch/arm/mach-s5pv210/clock.c index fcdf52dbcc49..f051f53e35b7 100644 --- a/arch/arm/mach-s5pv210/clock.c +++ b/arch/arm/mach-s5pv210/clock.c | |||
@@ -214,11 +214,6 @@ static struct clk clk_pcmcdclk2 = { | |||
214 | .name = "pcmcdclk", | 214 | .name = "pcmcdclk", |
215 | }; | 215 | }; |
216 | 216 | ||
217 | static struct clk dummy_apb_pclk = { | ||
218 | .name = "apb_pclk", | ||
219 | .id = -1, | ||
220 | }; | ||
221 | |||
222 | static struct clk *clkset_vpllsrc_list[] = { | 217 | static struct clk *clkset_vpllsrc_list[] = { |
223 | [0] = &clk_fin_vpll, | 218 | [0] = &clk_fin_vpll, |
224 | [1] = &clk_sclk_hdmi27m, | 219 | [1] = &clk_sclk_hdmi27m, |
@@ -305,18 +300,6 @@ static struct clk_ops clk_fout_apll_ops = { | |||
305 | 300 | ||
306 | static struct clk init_clocks_off[] = { | 301 | static struct clk init_clocks_off[] = { |
307 | { | 302 | { |
308 | .name = "dma", | ||
309 | .devname = "dma-pl330.0", | ||
310 | .parent = &clk_hclk_psys.clk, | ||
311 | .enable = s5pv210_clk_ip0_ctrl, | ||
312 | .ctrlbit = (1 << 3), | ||
313 | }, { | ||
314 | .name = "dma", | ||
315 | .devname = "dma-pl330.1", | ||
316 | .parent = &clk_hclk_psys.clk, | ||
317 | .enable = s5pv210_clk_ip0_ctrl, | ||
318 | .ctrlbit = (1 << 4), | ||
319 | }, { | ||
320 | .name = "rot", | 303 | .name = "rot", |
321 | .parent = &clk_hclk_dsys.clk, | 304 | .parent = &clk_hclk_dsys.clk, |
322 | .enable = s5pv210_clk_ip0_ctrl, | 305 | .enable = s5pv210_clk_ip0_ctrl, |
@@ -573,6 +556,20 @@ static struct clk clk_hsmmc3 = { | |||
573 | .ctrlbit = (1<<19), | 556 | .ctrlbit = (1<<19), |
574 | }; | 557 | }; |
575 | 558 | ||
559 | static struct clk clk_pdma0 = { | ||
560 | .name = "pdma0", | ||
561 | .parent = &clk_hclk_psys.clk, | ||
562 | .enable = s5pv210_clk_ip0_ctrl, | ||
563 | .ctrlbit = (1 << 3), | ||
564 | }; | ||
565 | |||
566 | static struct clk clk_pdma1 = { | ||
567 | .name = "pdma1", | ||
568 | .parent = &clk_hclk_psys.clk, | ||
569 | .enable = s5pv210_clk_ip0_ctrl, | ||
570 | .ctrlbit = (1 << 4), | ||
571 | }; | ||
572 | |||
576 | static struct clk *clkset_uart_list[] = { | 573 | static struct clk *clkset_uart_list[] = { |
577 | [6] = &clk_mout_mpll.clk, | 574 | [6] = &clk_mout_mpll.clk, |
578 | [7] = &clk_mout_epll.clk, | 575 | [7] = &clk_mout_epll.clk, |
@@ -1075,6 +1072,8 @@ static struct clk *clk_cdev[] = { | |||
1075 | &clk_hsmmc1, | 1072 | &clk_hsmmc1, |
1076 | &clk_hsmmc2, | 1073 | &clk_hsmmc2, |
1077 | &clk_hsmmc3, | 1074 | &clk_hsmmc3, |
1075 | &clk_pdma0, | ||
1076 | &clk_pdma1, | ||
1078 | }; | 1077 | }; |
1079 | 1078 | ||
1080 | /* Clock initialisation code */ | 1079 | /* Clock initialisation code */ |
@@ -1333,6 +1332,8 @@ static struct clk_lookup s5pv210_clk_lookup[] = { | |||
1333 | CLKDEV_INIT(NULL, "spi_busclk0", &clk_p), | 1332 | CLKDEV_INIT(NULL, "spi_busclk0", &clk_p), |
1334 | CLKDEV_INIT("s5pv210-spi.0", "spi_busclk1", &clk_sclk_spi0.clk), | 1333 | CLKDEV_INIT("s5pv210-spi.0", "spi_busclk1", &clk_sclk_spi0.clk), |
1335 | CLKDEV_INIT("s5pv210-spi.1", "spi_busclk1", &clk_sclk_spi1.clk), | 1334 | CLKDEV_INIT("s5pv210-spi.1", "spi_busclk1", &clk_sclk_spi1.clk), |
1335 | CLKDEV_INIT("dma-pl330.0", "apb_pclk", &clk_pdma0), | ||
1336 | CLKDEV_INIT("dma-pl330.1", "apb_pclk", &clk_pdma1), | ||
1336 | }; | 1337 | }; |
1337 | 1338 | ||
1338 | void __init s5pv210_register_clocks(void) | 1339 | void __init s5pv210_register_clocks(void) |
@@ -1361,6 +1362,5 @@ void __init s5pv210_register_clocks(void) | |||
1361 | for (ptr = 0; ptr < ARRAY_SIZE(clk_cdev); ptr++) | 1362 | for (ptr = 0; ptr < ARRAY_SIZE(clk_cdev); ptr++) |
1362 | s3c_disable_clocks(clk_cdev[ptr], 1); | 1363 | s3c_disable_clocks(clk_cdev[ptr], 1); |
1363 | 1364 | ||
1364 | s3c24xx_register_clock(&dummy_apb_pclk); | ||
1365 | s3c_pwmclk_init(); | 1365 | s3c_pwmclk_init(); |
1366 | } | 1366 | } |
diff --git a/arch/arm/mach-s5pv210/mach-goni.c b/arch/arm/mach-s5pv210/mach-goni.c index 3a38f7b34b94..e373de44a8b6 100644 --- a/arch/arm/mach-s5pv210/mach-goni.c +++ b/arch/arm/mach-s5pv210/mach-goni.c | |||
@@ -845,7 +845,7 @@ static struct fimc_source_info goni_camera_sensors[] = { | |||
845 | .mux_id = 0, | 845 | .mux_id = 0, |
846 | .flags = V4L2_MBUS_PCLK_SAMPLE_FALLING | | 846 | .flags = V4L2_MBUS_PCLK_SAMPLE_FALLING | |
847 | V4L2_MBUS_VSYNC_ACTIVE_LOW, | 847 | V4L2_MBUS_VSYNC_ACTIVE_LOW, |
848 | .bus_type = FIMC_BUS_TYPE_ITU_601, | 848 | .fimc_bus_type = FIMC_BUS_TYPE_ITU_601, |
849 | .board_info = &noon010pc30_board_info, | 849 | .board_info = &noon010pc30_board_info, |
850 | .i2c_bus_num = 0, | 850 | .i2c_bus_num = 0, |
851 | .clk_frequency = 16000000UL, | 851 | .clk_frequency = 16000000UL, |
diff --git a/arch/arm/mach-shmobile/board-marzen.c b/arch/arm/mach-shmobile/board-marzen.c index cdcb799e802f..fec49ebc359a 100644 --- a/arch/arm/mach-shmobile/board-marzen.c +++ b/arch/arm/mach-shmobile/board-marzen.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/smsc911x.h> | 32 | #include <linux/smsc911x.h> |
33 | #include <linux/spi/spi.h> | 33 | #include <linux/spi/spi.h> |
34 | #include <linux/spi/sh_hspi.h> | 34 | #include <linux/spi/sh_hspi.h> |
35 | #include <linux/mmc/host.h> | ||
35 | #include <linux/mmc/sh_mobile_sdhi.h> | 36 | #include <linux/mmc/sh_mobile_sdhi.h> |
36 | #include <linux/mfd/tmio.h> | 37 | #include <linux/mfd/tmio.h> |
37 | #include <linux/usb/otg.h> | 38 | #include <linux/usb/otg.h> |
diff --git a/arch/arm/mach-ux500/board-mop500-sdi.c b/arch/arm/mach-ux500/board-mop500-sdi.c index 051b62c27102..7f2cb6c5e2c1 100644 --- a/arch/arm/mach-ux500/board-mop500-sdi.c +++ b/arch/arm/mach-ux500/board-mop500-sdi.c | |||
@@ -81,7 +81,6 @@ static struct stedma40_chan_cfg mop500_sdi0_dma_cfg_tx = { | |||
81 | #endif | 81 | #endif |
82 | 82 | ||
83 | struct mmci_platform_data mop500_sdi0_data = { | 83 | struct mmci_platform_data mop500_sdi0_data = { |
84 | .ios_handler = mop500_sdi0_ios_handler, | ||
85 | .ocr_mask = MMC_VDD_29_30, | 84 | .ocr_mask = MMC_VDD_29_30, |
86 | .f_max = 50000000, | 85 | .f_max = 50000000, |
87 | .capabilities = MMC_CAP_4_BIT_DATA | | 86 | .capabilities = MMC_CAP_4_BIT_DATA | |
diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c index b03457881c4b..87d2d7b38ce9 100644 --- a/arch/arm/mach-ux500/board-mop500.c +++ b/arch/arm/mach-ux500/board-mop500.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/init.h> | 12 | #include <linux/init.h> |
13 | #include <linux/interrupt.h> | 13 | #include <linux/interrupt.h> |
14 | #include <linux/platform_device.h> | 14 | #include <linux/platform_device.h> |
15 | #include <linux/clk.h> | ||
15 | #include <linux/io.h> | 16 | #include <linux/io.h> |
16 | #include <linux/i2c.h> | 17 | #include <linux/i2c.h> |
17 | #include <linux/platform_data/i2c-nomadik.h> | 18 | #include <linux/platform_data/i2c-nomadik.h> |
@@ -439,6 +440,15 @@ static void mop500_prox_deactivate(struct device *dev) | |||
439 | regulator_put(prox_regulator); | 440 | regulator_put(prox_regulator); |
440 | } | 441 | } |
441 | 442 | ||
443 | void mop500_snowball_ethernet_clock_enable(void) | ||
444 | { | ||
445 | struct clk *clk; | ||
446 | |||
447 | clk = clk_get_sys("fsmc", NULL); | ||
448 | if (!IS_ERR(clk)) | ||
449 | clk_prepare_enable(clk); | ||
450 | } | ||
451 | |||
442 | static struct cryp_platform_data u8500_cryp1_platform_data = { | 452 | static struct cryp_platform_data u8500_cryp1_platform_data = { |
443 | .mem_to_engine = { | 453 | .mem_to_engine = { |
444 | .dir = STEDMA40_MEM_TO_PERIPH, | 454 | .dir = STEDMA40_MEM_TO_PERIPH, |
@@ -683,6 +693,8 @@ static void __init snowball_init_machine(void) | |||
683 | mop500_audio_init(parent); | 693 | mop500_audio_init(parent); |
684 | mop500_uart_init(parent); | 694 | mop500_uart_init(parent); |
685 | 695 | ||
696 | mop500_snowball_ethernet_clock_enable(); | ||
697 | |||
686 | /* This board has full regulator constraints */ | 698 | /* This board has full regulator constraints */ |
687 | regulator_has_full_constraints(); | 699 | regulator_has_full_constraints(); |
688 | } | 700 | } |
diff --git a/arch/arm/mach-ux500/board-mop500.h b/arch/arm/mach-ux500/board-mop500.h index eaa605f5d90d..d38951be70df 100644 --- a/arch/arm/mach-ux500/board-mop500.h +++ b/arch/arm/mach-ux500/board-mop500.h | |||
@@ -104,6 +104,7 @@ void __init mop500_pinmaps_init(void); | |||
104 | void __init snowball_pinmaps_init(void); | 104 | void __init snowball_pinmaps_init(void); |
105 | void __init hrefv60_pinmaps_init(void); | 105 | void __init hrefv60_pinmaps_init(void); |
106 | void mop500_audio_init(struct device *parent); | 106 | void mop500_audio_init(struct device *parent); |
107 | void mop500_snowball_ethernet_clock_enable(void); | ||
107 | 108 | ||
108 | int __init mop500_uib_init(void); | 109 | int __init mop500_uib_init(void); |
109 | void mop500_uib_i2c_add(int busnum, struct i2c_board_info *info, | 110 | void mop500_uib_i2c_add(int busnum, struct i2c_board_info *info, |
diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c index 19235cf7bbe3..f1a581844372 100644 --- a/arch/arm/mach-ux500/cpu-db8500.c +++ b/arch/arm/mach-ux500/cpu-db8500.c | |||
@@ -312,9 +312,10 @@ static void __init u8500_init_machine(void) | |||
312 | /* Pinmaps must be in place before devices register */ | 312 | /* Pinmaps must be in place before devices register */ |
313 | if (of_machine_is_compatible("st-ericsson,mop500")) | 313 | if (of_machine_is_compatible("st-ericsson,mop500")) |
314 | mop500_pinmaps_init(); | 314 | mop500_pinmaps_init(); |
315 | else if (of_machine_is_compatible("calaosystems,snowball-a9500")) | 315 | else if (of_machine_is_compatible("calaosystems,snowball-a9500")) { |
316 | snowball_pinmaps_init(); | 316 | snowball_pinmaps_init(); |
317 | else if (of_machine_is_compatible("st-ericsson,hrefv60+")) | 317 | mop500_snowball_ethernet_clock_enable(); |
318 | } else if (of_machine_is_compatible("st-ericsson,hrefv60+")) | ||
318 | hrefv60_pinmaps_init(); | 319 | hrefv60_pinmaps_init(); |
319 | else if (of_machine_is_compatible("st-ericsson,ccu9540")) {} | 320 | else if (of_machine_is_compatible("st-ericsson,ccu9540")) {} |
320 | /* TODO: Add pinmaps for ccu9540 board. */ | 321 | /* TODO: Add pinmaps for ccu9540 board. */ |
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index c2f37390308a..c465faca51b0 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c | |||
@@ -299,7 +299,7 @@ static void l2x0_unlock(u32 cache_id) | |||
299 | int lockregs; | 299 | int lockregs; |
300 | int i; | 300 | int i; |
301 | 301 | ||
302 | switch (cache_id) { | 302 | switch (cache_id & L2X0_CACHE_ID_PART_MASK) { |
303 | case L2X0_CACHE_ID_PART_L310: | 303 | case L2X0_CACHE_ID_PART_L310: |
304 | lockregs = 8; | 304 | lockregs = 8; |
305 | break; | 305 | break; |
@@ -333,15 +333,14 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask) | |||
333 | if (cache_id_part_number_from_dt) | 333 | if (cache_id_part_number_from_dt) |
334 | cache_id = cache_id_part_number_from_dt; | 334 | cache_id = cache_id_part_number_from_dt; |
335 | else | 335 | else |
336 | cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID) | 336 | cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID); |
337 | & L2X0_CACHE_ID_PART_MASK; | ||
338 | aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); | 337 | aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); |
339 | 338 | ||
340 | aux &= aux_mask; | 339 | aux &= aux_mask; |
341 | aux |= aux_val; | 340 | aux |= aux_val; |
342 | 341 | ||
343 | /* Determine the number of ways */ | 342 | /* Determine the number of ways */ |
344 | switch (cache_id) { | 343 | switch (cache_id & L2X0_CACHE_ID_PART_MASK) { |
345 | case L2X0_CACHE_ID_PART_L310: | 344 | case L2X0_CACHE_ID_PART_L310: |
346 | if (aux & (1 << 16)) | 345 | if (aux & (1 << 16)) |
347 | ways = 16; | 346 | ways = 16; |
@@ -725,7 +724,6 @@ static const struct l2x0_of_data pl310_data = { | |||
725 | .flush_all = l2x0_flush_all, | 724 | .flush_all = l2x0_flush_all, |
726 | .inv_all = l2x0_inv_all, | 725 | .inv_all = l2x0_inv_all, |
727 | .disable = l2x0_disable, | 726 | .disable = l2x0_disable, |
728 | .set_debug = pl310_set_debug, | ||
729 | }, | 727 | }, |
730 | }; | 728 | }; |
731 | 729 | ||
@@ -814,9 +812,8 @@ int __init l2x0_of_init(u32 aux_val, u32 aux_mask) | |||
814 | data->save(); | 812 | data->save(); |
815 | 813 | ||
816 | of_init = true; | 814 | of_init = true; |
817 | l2x0_init(l2x0_base, aux_val, aux_mask); | ||
818 | |||
819 | memcpy(&outer_cache, &data->outer_cache, sizeof(outer_cache)); | 815 | memcpy(&outer_cache, &data->outer_cache, sizeof(outer_cache)); |
816 | l2x0_init(l2x0_base, aux_val, aux_mask); | ||
820 | 817 | ||
821 | return 0; | 818 | return 0; |
822 | } | 819 | } |
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index a5a4b2bc42ba..2ac37372ef52 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c | |||
@@ -48,7 +48,7 @@ static DEFINE_RAW_SPINLOCK(cpu_asid_lock); | |||
48 | static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION); | 48 | static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION); |
49 | static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS); | 49 | static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS); |
50 | 50 | ||
51 | static DEFINE_PER_CPU(atomic64_t, active_asids); | 51 | DEFINE_PER_CPU(atomic64_t, active_asids); |
52 | static DEFINE_PER_CPU(u64, reserved_asids); | 52 | static DEFINE_PER_CPU(u64, reserved_asids); |
53 | static cpumask_t tlb_flush_pending; | 53 | static cpumask_t tlb_flush_pending; |
54 | 54 | ||
@@ -215,6 +215,7 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) | |||
215 | if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) { | 215 | if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) { |
216 | local_flush_bp_all(); | 216 | local_flush_bp_all(); |
217 | local_flush_tlb_all(); | 217 | local_flush_tlb_all(); |
218 | dummy_flush_tlb_a15_erratum(); | ||
218 | } | 219 | } |
219 | 220 | ||
220 | atomic64_set(&per_cpu(active_asids, cpu), asid); | 221 | atomic64_set(&per_cpu(active_asids, cpu), asid); |
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index e95a996ab78f..78978945492a 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
@@ -598,39 +598,60 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, | |||
598 | } while (pte++, addr += PAGE_SIZE, addr != end); | 598 | } while (pte++, addr += PAGE_SIZE, addr != end); |
599 | } | 599 | } |
600 | 600 | ||
601 | static void __init alloc_init_section(pud_t *pud, unsigned long addr, | 601 | static void __init map_init_section(pmd_t *pmd, unsigned long addr, |
602 | unsigned long end, phys_addr_t phys, | 602 | unsigned long end, phys_addr_t phys, |
603 | const struct mem_type *type) | 603 | const struct mem_type *type) |
604 | { | 604 | { |
605 | pmd_t *pmd = pmd_offset(pud, addr); | 605 | #ifndef CONFIG_ARM_LPAE |
606 | |||
607 | /* | 606 | /* |
608 | * Try a section mapping - end, addr and phys must all be aligned | 607 | * In classic MMU format, puds and pmds are folded in to |
609 | * to a section boundary. Note that PMDs refer to the individual | 608 | * the pgds. pmd_offset gives the PGD entry. PGDs refer to a |
610 | * L1 entries, whereas PGDs refer to a group of L1 entries making | 609 | * group of L1 entries making up one logical pointer to |
611 | * up one logical pointer to an L2 table. | 610 | * an L2 table (2MB), where as PMDs refer to the individual |
611 | * L1 entries (1MB). Hence increment to get the correct | ||
612 | * offset for odd 1MB sections. | ||
613 | * (See arch/arm/include/asm/pgtable-2level.h) | ||
612 | */ | 614 | */ |
613 | if (type->prot_sect && ((addr | end | phys) & ~SECTION_MASK) == 0) { | 615 | if (addr & SECTION_SIZE) |
614 | pmd_t *p = pmd; | 616 | pmd++; |
615 | |||
616 | #ifndef CONFIG_ARM_LPAE | ||
617 | if (addr & SECTION_SIZE) | ||
618 | pmd++; | ||
619 | #endif | 617 | #endif |
618 | do { | ||
619 | *pmd = __pmd(phys | type->prot_sect); | ||
620 | phys += SECTION_SIZE; | ||
621 | } while (pmd++, addr += SECTION_SIZE, addr != end); | ||
620 | 622 | ||
621 | do { | 623 | flush_pmd_entry(pmd); |
622 | *pmd = __pmd(phys | type->prot_sect); | 624 | } |
623 | phys += SECTION_SIZE; | ||
624 | } while (pmd++, addr += SECTION_SIZE, addr != end); | ||
625 | 625 | ||
626 | flush_pmd_entry(p); | 626 | static void __init alloc_init_pmd(pud_t *pud, unsigned long addr, |
627 | } else { | 627 | unsigned long end, phys_addr_t phys, |
628 | const struct mem_type *type) | ||
629 | { | ||
630 | pmd_t *pmd = pmd_offset(pud, addr); | ||
631 | unsigned long next; | ||
632 | |||
633 | do { | ||
628 | /* | 634 | /* |
629 | * No need to loop; pte's aren't interested in the | 635 | * With LPAE, we must loop over to map |
630 | * individual L1 entries. | 636 | * all the pmds for the given range. |
631 | */ | 637 | */ |
632 | alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type); | 638 | next = pmd_addr_end(addr, end); |
633 | } | 639 | |
640 | /* | ||
641 | * Try a section mapping - addr, next and phys must all be | ||
642 | * aligned to a section boundary. | ||
643 | */ | ||
644 | if (type->prot_sect && | ||
645 | ((addr | next | phys) & ~SECTION_MASK) == 0) { | ||
646 | map_init_section(pmd, addr, next, phys, type); | ||
647 | } else { | ||
648 | alloc_init_pte(pmd, addr, next, | ||
649 | __phys_to_pfn(phys), type); | ||
650 | } | ||
651 | |||
652 | phys += next - addr; | ||
653 | |||
654 | } while (pmd++, addr = next, addr != end); | ||
634 | } | 655 | } |
635 | 656 | ||
636 | static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, | 657 | static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, |
@@ -641,7 +662,7 @@ static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, | |||
641 | 662 | ||
642 | do { | 663 | do { |
643 | next = pud_addr_end(addr, end); | 664 | next = pud_addr_end(addr, end); |
644 | alloc_init_section(pud, addr, next, phys, type); | 665 | alloc_init_pmd(pud, addr, next, phys, type); |
645 | phys += next - addr; | 666 | phys += next - addr; |
646 | } while (pud++, addr = next, addr != end); | 667 | } while (pud++, addr = next, addr != end); |
647 | } | 668 | } |
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 3a3c015f8d5c..f584d3f5b37c 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S | |||
@@ -420,7 +420,7 @@ __v7_pj4b_proc_info: | |||
420 | __v7_ca7mp_proc_info: | 420 | __v7_ca7mp_proc_info: |
421 | .long 0x410fc070 | 421 | .long 0x410fc070 |
422 | .long 0xff0ffff0 | 422 | .long 0xff0ffff0 |
423 | __v7_proc __v7_ca7mp_setup, hwcaps = HWCAP_IDIV | 423 | __v7_proc __v7_ca7mp_setup |
424 | .size __v7_ca7mp_proc_info, . - __v7_ca7mp_proc_info | 424 | .size __v7_ca7mp_proc_info, . - __v7_ca7mp_proc_info |
425 | 425 | ||
426 | /* | 426 | /* |
@@ -430,10 +430,25 @@ __v7_ca7mp_proc_info: | |||
430 | __v7_ca15mp_proc_info: | 430 | __v7_ca15mp_proc_info: |
431 | .long 0x410fc0f0 | 431 | .long 0x410fc0f0 |
432 | .long 0xff0ffff0 | 432 | .long 0xff0ffff0 |
433 | __v7_proc __v7_ca15mp_setup, hwcaps = HWCAP_IDIV | 433 | __v7_proc __v7_ca15mp_setup |
434 | .size __v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info | 434 | .size __v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info |
435 | 435 | ||
436 | /* | 436 | /* |
437 | * Qualcomm Inc. Krait processors. | ||
438 | */ | ||
439 | .type __krait_proc_info, #object | ||
440 | __krait_proc_info: | ||
441 | .long 0x510f0400 @ Required ID value | ||
442 | .long 0xff0ffc00 @ Mask for ID | ||
443 | /* | ||
444 | * Some Krait processors don't indicate support for SDIV and UDIV | ||
445 | * instructions in the ARM instruction set, even though they actually | ||
446 | * do support them. | ||
447 | */ | ||
448 | __v7_proc __v7_setup, hwcaps = HWCAP_IDIV | ||
449 | .size __krait_proc_info, . - __krait_proc_info | ||
450 | |||
451 | /* | ||
437 | * Match any ARMv7 processor core. | 452 | * Match any ARMv7 processor core. |
438 | */ | 453 | */ |
439 | .type __v7_proc_info, #object | 454 | .type __v7_proc_info, #object |
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index 6828ef6ce80e..a0bd8a755bdf 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c | |||
@@ -576,7 +576,7 @@ load_ind: | |||
576 | /* x = ((*(frame + k)) & 0xf) << 2; */ | 576 | /* x = ((*(frame + k)) & 0xf) << 2; */ |
577 | ctx->seen |= SEEN_X | SEEN_DATA | SEEN_CALL; | 577 | ctx->seen |= SEEN_X | SEEN_DATA | SEEN_CALL; |
578 | /* the interpreter should deal with the negative K */ | 578 | /* the interpreter should deal with the negative K */ |
579 | if (k < 0) | 579 | if ((int)k < 0) |
580 | return -1; | 580 | return -1; |
581 | /* offset in r1: we might have to take the slow path */ | 581 | /* offset in r1: we might have to take the slow path */ |
582 | emit_mov_i(r_off, k, ctx); | 582 | emit_mov_i(r_off, k, ctx); |
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index fd70a68387eb..9b6d19f74078 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig | |||
@@ -9,7 +9,6 @@ config ARM64 | |||
9 | select CLONE_BACKWARDS | 9 | select CLONE_BACKWARDS |
10 | select COMMON_CLK | 10 | select COMMON_CLK |
11 | select GENERIC_CLOCKEVENTS | 11 | select GENERIC_CLOCKEVENTS |
12 | select GENERIC_HARDIRQS_NO_DEPRECATED | ||
13 | select GENERIC_IOMAP | 12 | select GENERIC_IOMAP |
14 | select GENERIC_IRQ_PROBE | 13 | select GENERIC_IRQ_PROBE |
15 | select GENERIC_IRQ_SHOW | 14 | select GENERIC_IRQ_SHOW |
diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug index 51493430f142..1a6bfe954d49 100644 --- a/arch/arm64/Kconfig.debug +++ b/arch/arm64/Kconfig.debug | |||
@@ -6,17 +6,6 @@ config FRAME_POINTER | |||
6 | bool | 6 | bool |
7 | default y | 7 | default y |
8 | 8 | ||
9 | config DEBUG_ERRORS | ||
10 | bool "Verbose kernel error messages" | ||
11 | depends on DEBUG_KERNEL | ||
12 | help | ||
13 | This option controls verbose debugging information which can be | ||
14 | printed when the kernel detects an internal error. This debugging | ||
15 | information is useful to kernel hackers when tracking down problems, | ||
16 | but mostly meaningless to other people. It's safe to say Y unless | ||
17 | you are concerned with the code size or don't want to see these | ||
18 | messages. | ||
19 | |||
20 | config DEBUG_STACK_USAGE | 9 | config DEBUG_STACK_USAGE |
21 | bool "Enable stack utilization instrumentation" | 10 | bool "Enable stack utilization instrumentation" |
22 | depends on DEBUG_KERNEL | 11 | depends on DEBUG_KERNEL |
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index 9212c7880da7..09bef29f3a09 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig | |||
@@ -82,4 +82,3 @@ CONFIG_DEBUG_KERNEL=y | |||
82 | CONFIG_DEBUG_INFO=y | 82 | CONFIG_DEBUG_INFO=y |
83 | # CONFIG_FTRACE is not set | 83 | # CONFIG_FTRACE is not set |
84 | CONFIG_ATOMIC64_SELFTEST=y | 84 | CONFIG_ATOMIC64_SELFTEST=y |
85 | CONFIG_DEBUG_ERRORS=y | ||
diff --git a/arch/arm64/include/asm/ucontext.h b/arch/arm64/include/asm/ucontext.h index bde960720892..42e04c877428 100644 --- a/arch/arm64/include/asm/ucontext.h +++ b/arch/arm64/include/asm/ucontext.h | |||
@@ -22,7 +22,7 @@ struct ucontext { | |||
22 | stack_t uc_stack; | 22 | stack_t uc_stack; |
23 | sigset_t uc_sigmask; | 23 | sigset_t uc_sigmask; |
24 | /* glibc uses a 1024-bit sigset_t */ | 24 | /* glibc uses a 1024-bit sigset_t */ |
25 | __u8 __unused[(1024 - sizeof(sigset_t)) / 8]; | 25 | __u8 __unused[1024 / 8 - sizeof(sigset_t)]; |
26 | /* last for future expansion */ | 26 | /* last for future expansion */ |
27 | struct sigcontext uc_mcontext; | 27 | struct sigcontext uc_mcontext; |
28 | }; | 28 | }; |
diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c index cef3925eaf60..aa3e948f7885 100644 --- a/arch/arm64/kernel/arm64ksyms.c +++ b/arch/arm64/kernel/arm64ksyms.c | |||
@@ -40,7 +40,9 @@ EXPORT_SYMBOL(__copy_to_user); | |||
40 | EXPORT_SYMBOL(__clear_user); | 40 | EXPORT_SYMBOL(__clear_user); |
41 | 41 | ||
42 | /* bitops */ | 42 | /* bitops */ |
43 | #ifdef CONFIG_SMP | ||
43 | EXPORT_SYMBOL(__atomic_hash); | 44 | EXPORT_SYMBOL(__atomic_hash); |
45 | #endif | ||
44 | 46 | ||
45 | /* physical memory */ | 47 | /* physical memory */ |
46 | EXPORT_SYMBOL(memstart_addr); | 48 | EXPORT_SYMBOL(memstart_addr); |
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c index 7f4f3673f2bc..e393174fe859 100644 --- a/arch/arm64/kernel/signal32.c +++ b/arch/arm64/kernel/signal32.c | |||
@@ -549,7 +549,6 @@ int compat_setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info, | |||
549 | sigset_t *set, struct pt_regs *regs) | 549 | sigset_t *set, struct pt_regs *regs) |
550 | { | 550 | { |
551 | struct compat_rt_sigframe __user *frame; | 551 | struct compat_rt_sigframe __user *frame; |
552 | compat_stack_t stack; | ||
553 | int err = 0; | 552 | int err = 0; |
554 | 553 | ||
555 | frame = compat_get_sigframe(ka, regs, sizeof(*frame)); | 554 | frame = compat_get_sigframe(ka, regs, sizeof(*frame)); |
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 224b44ab534e..70b8cd4021c4 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c | |||
@@ -261,7 +261,7 @@ static void __init create_mapping(phys_addr_t phys, unsigned long virt, | |||
261 | void __iomem * __init early_io_map(phys_addr_t phys, unsigned long virt) | 261 | void __iomem * __init early_io_map(phys_addr_t phys, unsigned long virt) |
262 | { | 262 | { |
263 | unsigned long size, mask; | 263 | unsigned long size, mask; |
264 | bool page64k = IS_ENABLED(ARM64_64K_PAGES); | 264 | bool page64k = IS_ENABLED(CONFIG_ARM64_64K_PAGES); |
265 | pgd_t *pgd; | 265 | pgd_t *pgd; |
266 | pud_t *pud; | 266 | pud_t *pud; |
267 | pmd_t *pmd; | 267 | pmd_t *pmd; |
diff --git a/arch/c6x/include/asm/irqflags.h b/arch/c6x/include/asm/irqflags.h index cf78e09e18c3..2c71d5634ec2 100644 --- a/arch/c6x/include/asm/irqflags.h +++ b/arch/c6x/include/asm/irqflags.h | |||
@@ -27,7 +27,7 @@ static inline unsigned long arch_local_save_flags(void) | |||
27 | /* set interrupt enabled status */ | 27 | /* set interrupt enabled status */ |
28 | static inline void arch_local_irq_restore(unsigned long flags) | 28 | static inline void arch_local_irq_restore(unsigned long flags) |
29 | { | 29 | { |
30 | asm volatile (" mvc .s2 %0,CSR\n" : : "b"(flags)); | 30 | asm volatile (" mvc .s2 %0,CSR\n" : : "b"(flags) : "memory"); |
31 | } | 31 | } |
32 | 32 | ||
33 | /* unconditionally enable interrupts */ | 33 | /* unconditionally enable interrupts */ |
diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c index 77597e5ea60a..79521d5499f9 100644 --- a/arch/ia64/kernel/palinfo.c +++ b/arch/ia64/kernel/palinfo.c | |||
@@ -849,17 +849,6 @@ static palinfo_entry_t palinfo_entries[]={ | |||
849 | 849 | ||
850 | #define NR_PALINFO_ENTRIES (int) ARRAY_SIZE(palinfo_entries) | 850 | #define NR_PALINFO_ENTRIES (int) ARRAY_SIZE(palinfo_entries) |
851 | 851 | ||
852 | /* | ||
853 | * this array is used to keep track of the proc entries we create. This is | ||
854 | * required in the module mode when we need to remove all entries. The procfs code | ||
855 | * does not do recursion of deletion | ||
856 | * | ||
857 | * Notes: | ||
858 | * - +1 accounts for the cpuN directory entry in /proc/pal | ||
859 | */ | ||
860 | #define NR_PALINFO_PROC_ENTRIES (NR_CPUS*(NR_PALINFO_ENTRIES+1)) | ||
861 | |||
862 | static struct proc_dir_entry *palinfo_proc_entries[NR_PALINFO_PROC_ENTRIES]; | ||
863 | static struct proc_dir_entry *palinfo_dir; | 852 | static struct proc_dir_entry *palinfo_dir; |
864 | 853 | ||
865 | /* | 854 | /* |
@@ -971,60 +960,32 @@ palinfo_read_entry(char *page, char **start, off_t off, int count, int *eof, voi | |||
971 | static void __cpuinit | 960 | static void __cpuinit |
972 | create_palinfo_proc_entries(unsigned int cpu) | 961 | create_palinfo_proc_entries(unsigned int cpu) |
973 | { | 962 | { |
974 | # define CPUSTR "cpu%d" | ||
975 | |||
976 | pal_func_cpu_u_t f; | 963 | pal_func_cpu_u_t f; |
977 | struct proc_dir_entry **pdir; | ||
978 | struct proc_dir_entry *cpu_dir; | 964 | struct proc_dir_entry *cpu_dir; |
979 | int j; | 965 | int j; |
980 | char cpustr[sizeof(CPUSTR)]; | 966 | char cpustr[3+4+1]; /* cpu numbers are up to 4095 on itanic */ |
981 | 967 | sprintf(cpustr, "cpu%d", cpu); | |
982 | |||
983 | /* | ||
984 | * we keep track of created entries in a depth-first order for | ||
985 | * cleanup purposes. Each entry is stored into palinfo_proc_entries | ||
986 | */ | ||
987 | sprintf(cpustr,CPUSTR, cpu); | ||
988 | 968 | ||
989 | cpu_dir = proc_mkdir(cpustr, palinfo_dir); | 969 | cpu_dir = proc_mkdir(cpustr, palinfo_dir); |
970 | if (!cpu_dir) | ||
971 | return; | ||
990 | 972 | ||
991 | f.req_cpu = cpu; | 973 | f.req_cpu = cpu; |
992 | 974 | ||
993 | /* | ||
994 | * Compute the location to store per cpu entries | ||
995 | * We dont store the top level entry in this list, but | ||
996 | * remove it finally after removing all cpu entries. | ||
997 | */ | ||
998 | pdir = &palinfo_proc_entries[cpu*(NR_PALINFO_ENTRIES+1)]; | ||
999 | *pdir++ = cpu_dir; | ||
1000 | for (j=0; j < NR_PALINFO_ENTRIES; j++) { | 975 | for (j=0; j < NR_PALINFO_ENTRIES; j++) { |
1001 | f.func_id = j; | 976 | f.func_id = j; |
1002 | *pdir = create_proc_read_entry( | 977 | create_proc_read_entry( |
1003 | palinfo_entries[j].name, 0, cpu_dir, | 978 | palinfo_entries[j].name, 0, cpu_dir, |
1004 | palinfo_read_entry, (void *)f.value); | 979 | palinfo_read_entry, (void *)f.value); |
1005 | pdir++; | ||
1006 | } | 980 | } |
1007 | } | 981 | } |
1008 | 982 | ||
1009 | static void | 983 | static void |
1010 | remove_palinfo_proc_entries(unsigned int hcpu) | 984 | remove_palinfo_proc_entries(unsigned int hcpu) |
1011 | { | 985 | { |
1012 | int j; | 986 | char cpustr[3+4+1]; /* cpu numbers are up to 4095 on itanic */ |
1013 | struct proc_dir_entry *cpu_dir, **pdir; | 987 | sprintf(cpustr, "cpu%d", hcpu); |
1014 | 988 | remove_proc_subtree(cpustr, palinfo_dir); | |
1015 | pdir = &palinfo_proc_entries[hcpu*(NR_PALINFO_ENTRIES+1)]; | ||
1016 | cpu_dir = *pdir; | ||
1017 | *pdir++=NULL; | ||
1018 | for (j=0; j < (NR_PALINFO_ENTRIES); j++) { | ||
1019 | if ((*pdir)) { | ||
1020 | remove_proc_entry ((*pdir)->name, cpu_dir); | ||
1021 | *pdir ++= NULL; | ||
1022 | } | ||
1023 | } | ||
1024 | |||
1025 | if (cpu_dir) { | ||
1026 | remove_proc_entry(cpu_dir->name, palinfo_dir); | ||
1027 | } | ||
1028 | } | 989 | } |
1029 | 990 | ||
1030 | static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb, | 991 | static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb, |
@@ -1058,6 +1019,8 @@ palinfo_init(void) | |||
1058 | 1019 | ||
1059 | printk(KERN_INFO "PAL Information Facility v%s\n", PALINFO_VERSION); | 1020 | printk(KERN_INFO "PAL Information Facility v%s\n", PALINFO_VERSION); |
1060 | palinfo_dir = proc_mkdir("pal", NULL); | 1021 | palinfo_dir = proc_mkdir("pal", NULL); |
1022 | if (!palinfo_dir) | ||
1023 | return -ENOMEM; | ||
1061 | 1024 | ||
1062 | /* Create palinfo dirs in /proc for all online cpus */ | 1025 | /* Create palinfo dirs in /proc for all online cpus */ |
1063 | for_each_online_cpu(i) { | 1026 | for_each_online_cpu(i) { |
@@ -1073,22 +1036,8 @@ palinfo_init(void) | |||
1073 | static void __exit | 1036 | static void __exit |
1074 | palinfo_exit(void) | 1037 | palinfo_exit(void) |
1075 | { | 1038 | { |
1076 | int i = 0; | ||
1077 | |||
1078 | /* remove all nodes: depth first pass. Could optimize this */ | ||
1079 | for_each_online_cpu(i) { | ||
1080 | remove_palinfo_proc_entries(i); | ||
1081 | } | ||
1082 | |||
1083 | /* | ||
1084 | * Remove the top level entry finally | ||
1085 | */ | ||
1086 | remove_proc_entry(palinfo_dir->name, NULL); | ||
1087 | |||
1088 | /* | ||
1089 | * Unregister from cpu notifier callbacks | ||
1090 | */ | ||
1091 | unregister_hotcpu_notifier(&palinfo_cpu_notifier); | 1039 | unregister_hotcpu_notifier(&palinfo_cpu_notifier); |
1040 | remove_proc_subtree("pal", NULL); | ||
1092 | } | 1041 | } |
1093 | 1042 | ||
1094 | module_init(palinfo_init); | 1043 | module_init(palinfo_init); |
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index e34f565f595a..6f7dc8b7b35c 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c | |||
@@ -291,7 +291,6 @@ cpu_idle (void) | |||
291 | } | 291 | } |
292 | 292 | ||
293 | if (!need_resched()) { | 293 | if (!need_resched()) { |
294 | void (*idle)(void); | ||
295 | #ifdef CONFIG_SMP | 294 | #ifdef CONFIG_SMP |
296 | min_xtp(); | 295 | min_xtp(); |
297 | #endif | 296 | #endif |
@@ -299,9 +298,7 @@ cpu_idle (void) | |||
299 | if (mark_idle) | 298 | if (mark_idle) |
300 | (*mark_idle)(1); | 299 | (*mark_idle)(1); |
301 | 300 | ||
302 | if (!idle) | 301 | default_idle(); |
303 | idle = default_idle; | ||
304 | (*idle)(); | ||
305 | if (mark_idle) | 302 | if (mark_idle) |
306 | (*mark_idle)(0); | 303 | (*mark_idle)(0); |
307 | #ifdef CONFIG_SMP | 304 | #ifdef CONFIG_SMP |
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index cd2e21ff562a..51244bf97271 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig | |||
@@ -18,7 +18,7 @@ config MIPS | |||
18 | select HAVE_KRETPROBES | 18 | select HAVE_KRETPROBES |
19 | select HAVE_DEBUG_KMEMLEAK | 19 | select HAVE_DEBUG_KMEMLEAK |
20 | select ARCH_BINFMT_ELF_RANDOMIZE_PIE | 20 | select ARCH_BINFMT_ELF_RANDOMIZE_PIE |
21 | select HAVE_ARCH_TRANSPARENT_HUGEPAGE | 21 | select HAVE_ARCH_TRANSPARENT_HUGEPAGE if CPU_SUPPORTS_HUGEPAGES && 64BIT |
22 | select RTC_LIB if !MACH_LOONGSON | 22 | select RTC_LIB if !MACH_LOONGSON |
23 | select GENERIC_ATOMIC64 if !64BIT | 23 | select GENERIC_ATOMIC64 if !64BIT |
24 | select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE | 24 | select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE |
@@ -657,7 +657,7 @@ config SNI_RM | |||
657 | bool "SNI RM200/300/400" | 657 | bool "SNI RM200/300/400" |
658 | select FW_ARC if CPU_LITTLE_ENDIAN | 658 | select FW_ARC if CPU_LITTLE_ENDIAN |
659 | select FW_ARC32 if CPU_LITTLE_ENDIAN | 659 | select FW_ARC32 if CPU_LITTLE_ENDIAN |
660 | select SNIPROM if CPU_BIG_ENDIAN | 660 | select FW_SNIPROM if CPU_BIG_ENDIAN |
661 | select ARCH_MAY_HAVE_PC_FDC | 661 | select ARCH_MAY_HAVE_PC_FDC |
662 | select BOOT_ELF32 | 662 | select BOOT_ELF32 |
663 | select CEVT_R4K | 663 | select CEVT_R4K |
@@ -1144,7 +1144,7 @@ config DEFAULT_SGI_PARTITION | |||
1144 | config FW_ARC32 | 1144 | config FW_ARC32 |
1145 | bool | 1145 | bool |
1146 | 1146 | ||
1147 | config SNIPROM | 1147 | config FW_SNIPROM |
1148 | bool | 1148 | bool |
1149 | 1149 | ||
1150 | config BOOT_ELF32 | 1150 | config BOOT_ELF32 |
@@ -1493,7 +1493,6 @@ config CPU_XLP | |||
1493 | select CPU_SUPPORTS_32BIT_KERNEL | 1493 | select CPU_SUPPORTS_32BIT_KERNEL |
1494 | select CPU_SUPPORTS_64BIT_KERNEL | 1494 | select CPU_SUPPORTS_64BIT_KERNEL |
1495 | select CPU_SUPPORTS_HIGHMEM | 1495 | select CPU_SUPPORTS_HIGHMEM |
1496 | select CPU_HAS_LLSC | ||
1497 | select WEAK_ORDERING | 1496 | select WEAK_ORDERING |
1498 | select WEAK_REORDERING_BEYOND_LLSC | 1497 | select WEAK_REORDERING_BEYOND_LLSC |
1499 | select CPU_HAS_PREFETCH | 1498 | select CPU_HAS_PREFETCH |
diff --git a/arch/mips/bcm63xx/boards/board_bcm963xx.c b/arch/mips/bcm63xx/boards/board_bcm963xx.c index ed1949c29508..9aa7d44898ed 100644 --- a/arch/mips/bcm63xx/boards/board_bcm963xx.c +++ b/arch/mips/bcm63xx/boards/board_bcm963xx.c | |||
@@ -745,10 +745,7 @@ void __init board_prom_init(void) | |||
745 | strcpy(cfe_version, "unknown"); | 745 | strcpy(cfe_version, "unknown"); |
746 | printk(KERN_INFO PFX "CFE version: %s\n", cfe_version); | 746 | printk(KERN_INFO PFX "CFE version: %s\n", cfe_version); |
747 | 747 | ||
748 | if (bcm63xx_nvram_init(boot_addr + BCM963XX_NVRAM_OFFSET)) { | 748 | bcm63xx_nvram_init(boot_addr + BCM963XX_NVRAM_OFFSET); |
749 | printk(KERN_ERR PFX "invalid nvram checksum\n"); | ||
750 | return; | ||
751 | } | ||
752 | 749 | ||
753 | board_name = bcm63xx_nvram_get_name(); | 750 | board_name = bcm63xx_nvram_get_name(); |
754 | /* find board by name */ | 751 | /* find board by name */ |
diff --git a/arch/mips/bcm63xx/nvram.c b/arch/mips/bcm63xx/nvram.c index 620611680839..a4b8864f9307 100644 --- a/arch/mips/bcm63xx/nvram.c +++ b/arch/mips/bcm63xx/nvram.c | |||
@@ -38,7 +38,7 @@ struct bcm963xx_nvram { | |||
38 | static struct bcm963xx_nvram nvram; | 38 | static struct bcm963xx_nvram nvram; |
39 | static int mac_addr_used; | 39 | static int mac_addr_used; |
40 | 40 | ||
41 | int __init bcm63xx_nvram_init(void *addr) | 41 | void __init bcm63xx_nvram_init(void *addr) |
42 | { | 42 | { |
43 | unsigned int check_len; | 43 | unsigned int check_len; |
44 | u32 crc, expected_crc; | 44 | u32 crc, expected_crc; |
@@ -60,9 +60,8 @@ int __init bcm63xx_nvram_init(void *addr) | |||
60 | crc = crc32_le(~0, (u8 *)&nvram, check_len); | 60 | crc = crc32_le(~0, (u8 *)&nvram, check_len); |
61 | 61 | ||
62 | if (crc != expected_crc) | 62 | if (crc != expected_crc) |
63 | return -EINVAL; | 63 | pr_warn("nvram checksum failed, contents may be invalid (expected %08x, got %08x)\n", |
64 | 64 | expected_crc, crc); | |
65 | return 0; | ||
66 | } | 65 | } |
67 | 66 | ||
68 | u8 *bcm63xx_nvram_get_name(void) | 67 | u8 *bcm63xx_nvram_get_name(void) |
diff --git a/arch/mips/bcm63xx/setup.c b/arch/mips/bcm63xx/setup.c index 314231be788c..35e18e98beb9 100644 --- a/arch/mips/bcm63xx/setup.c +++ b/arch/mips/bcm63xx/setup.c | |||
@@ -157,4 +157,4 @@ int __init bcm63xx_register_devices(void) | |||
157 | return board_register_devices(); | 157 | return board_register_devices(); |
158 | } | 158 | } |
159 | 159 | ||
160 | device_initcall(bcm63xx_register_devices); | 160 | arch_initcall(bcm63xx_register_devices); |
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c index c594a3d4f743..b0baa299f899 100644 --- a/arch/mips/cavium-octeon/setup.c +++ b/arch/mips/cavium-octeon/setup.c | |||
@@ -174,7 +174,10 @@ static int octeon_kexec_prepare(struct kimage *image) | |||
174 | 174 | ||
175 | static void octeon_generic_shutdown(void) | 175 | static void octeon_generic_shutdown(void) |
176 | { | 176 | { |
177 | int cpu, i; | 177 | int i; |
178 | #ifdef CONFIG_SMP | ||
179 | int cpu; | ||
180 | #endif | ||
178 | struct cvmx_bootmem_desc *bootmem_desc; | 181 | struct cvmx_bootmem_desc *bootmem_desc; |
179 | void *named_block_array_ptr; | 182 | void *named_block_array_ptr; |
180 | 183 | ||
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h index 62d6a3b4d3b7..4e0b6bc1165e 100644 --- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h +++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h | |||
@@ -9,10 +9,8 @@ | |||
9 | * | 9 | * |
10 | * Initialized the local nvram copy from the target address and checks | 10 | * Initialized the local nvram copy from the target address and checks |
11 | * its checksum. | 11 | * its checksum. |
12 | * | ||
13 | * Returns 0 on success. | ||
14 | */ | 12 | */ |
15 | int __init bcm63xx_nvram_init(void *nvram); | 13 | void bcm63xx_nvram_init(void *nvram); |
16 | 14 | ||
17 | /** | 15 | /** |
18 | * bcm63xx_nvram_get_name() - returns the board name according to nvram | 16 | * bcm63xx_nvram_get_name() - returns the board name according to nvram |
diff --git a/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h b/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h index d9c828419037..193c0912d38e 100644 --- a/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h | |||
@@ -28,11 +28,7 @@ | |||
28 | /* #define cpu_has_prefetch ? */ | 28 | /* #define cpu_has_prefetch ? */ |
29 | #define cpu_has_mcheck 1 | 29 | #define cpu_has_mcheck 1 |
30 | /* #define cpu_has_ejtag ? */ | 30 | /* #define cpu_has_ejtag ? */ |
31 | #ifdef CONFIG_CPU_HAS_LLSC | ||
32 | #define cpu_has_llsc 1 | 31 | #define cpu_has_llsc 1 |
33 | #else | ||
34 | #define cpu_has_llsc 0 | ||
35 | #endif | ||
36 | /* #define cpu_has_vtag_icache ? */ | 32 | /* #define cpu_has_vtag_icache ? */ |
37 | /* #define cpu_has_dc_aliases ? */ | 33 | /* #define cpu_has_dc_aliases ? */ |
38 | /* #define cpu_has_ic_fills_f_dc ? */ | 34 | /* #define cpu_has_ic_fills_f_dc ? */ |
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h index 12b70c25906a..0da44d422f5b 100644 --- a/arch/mips/include/asm/mipsregs.h +++ b/arch/mips/include/asm/mipsregs.h | |||
@@ -1166,7 +1166,10 @@ do { \ | |||
1166 | unsigned int __dspctl; \ | 1166 | unsigned int __dspctl; \ |
1167 | \ | 1167 | \ |
1168 | __asm__ __volatile__( \ | 1168 | __asm__ __volatile__( \ |
1169 | " .set push \n" \ | ||
1170 | " .set dsp \n" \ | ||
1169 | " rddsp %0, %x1 \n" \ | 1171 | " rddsp %0, %x1 \n" \ |
1172 | " .set pop \n" \ | ||
1170 | : "=r" (__dspctl) \ | 1173 | : "=r" (__dspctl) \ |
1171 | : "i" (mask)); \ | 1174 | : "i" (mask)); \ |
1172 | __dspctl; \ | 1175 | __dspctl; \ |
@@ -1175,30 +1178,198 @@ do { \ | |||
1175 | #define wrdsp(val, mask) \ | 1178 | #define wrdsp(val, mask) \ |
1176 | do { \ | 1179 | do { \ |
1177 | __asm__ __volatile__( \ | 1180 | __asm__ __volatile__( \ |
1181 | " .set push \n" \ | ||
1182 | " .set dsp \n" \ | ||
1178 | " wrdsp %0, %x1 \n" \ | 1183 | " wrdsp %0, %x1 \n" \ |
1184 | " .set pop \n" \ | ||
1179 | : \ | 1185 | : \ |
1180 | : "r" (val), "i" (mask)); \ | 1186 | : "r" (val), "i" (mask)); \ |
1181 | } while (0) | 1187 | } while (0) |
1182 | 1188 | ||
1183 | #define mflo0() ({ long mflo0; __asm__("mflo %0, $ac0" : "=r" (mflo0)); mflo0;}) | 1189 | #define mflo0() \ |
1184 | #define mflo1() ({ long mflo1; __asm__("mflo %0, $ac1" : "=r" (mflo1)); mflo1;}) | 1190 | ({ \ |
1185 | #define mflo2() ({ long mflo2; __asm__("mflo %0, $ac2" : "=r" (mflo2)); mflo2;}) | 1191 | long mflo0; \ |
1186 | #define mflo3() ({ long mflo3; __asm__("mflo %0, $ac3" : "=r" (mflo3)); mflo3;}) | 1192 | __asm__( \ |
1187 | 1193 | " .set push \n" \ | |
1188 | #define mfhi0() ({ long mfhi0; __asm__("mfhi %0, $ac0" : "=r" (mfhi0)); mfhi0;}) | 1194 | " .set dsp \n" \ |
1189 | #define mfhi1() ({ long mfhi1; __asm__("mfhi %0, $ac1" : "=r" (mfhi1)); mfhi1;}) | 1195 | " mflo %0, $ac0 \n" \ |
1190 | #define mfhi2() ({ long mfhi2; __asm__("mfhi %0, $ac2" : "=r" (mfhi2)); mfhi2;}) | 1196 | " .set pop \n" \ |
1191 | #define mfhi3() ({ long mfhi3; __asm__("mfhi %0, $ac3" : "=r" (mfhi3)); mfhi3;}) | 1197 | : "=r" (mflo0)); \ |
1192 | 1198 | mflo0; \ | |
1193 | #define mtlo0(x) __asm__("mtlo %0, $ac0" ::"r" (x)) | 1199 | }) |
1194 | #define mtlo1(x) __asm__("mtlo %0, $ac1" ::"r" (x)) | 1200 | |
1195 | #define mtlo2(x) __asm__("mtlo %0, $ac2" ::"r" (x)) | 1201 | #define mflo1() \ |
1196 | #define mtlo3(x) __asm__("mtlo %0, $ac3" ::"r" (x)) | 1202 | ({ \ |
1197 | 1203 | long mflo1; \ | |
1198 | #define mthi0(x) __asm__("mthi %0, $ac0" ::"r" (x)) | 1204 | __asm__( \ |
1199 | #define mthi1(x) __asm__("mthi %0, $ac1" ::"r" (x)) | 1205 | " .set push \n" \ |
1200 | #define mthi2(x) __asm__("mthi %0, $ac2" ::"r" (x)) | 1206 | " .set dsp \n" \ |
1201 | #define mthi3(x) __asm__("mthi %0, $ac3" ::"r" (x)) | 1207 | " mflo %0, $ac1 \n" \ |
1208 | " .set pop \n" \ | ||
1209 | : "=r" (mflo1)); \ | ||
1210 | mflo1; \ | ||
1211 | }) | ||
1212 | |||
1213 | #define mflo2() \ | ||
1214 | ({ \ | ||
1215 | long mflo2; \ | ||
1216 | __asm__( \ | ||
1217 | " .set push \n" \ | ||
1218 | " .set dsp \n" \ | ||
1219 | " mflo %0, $ac2 \n" \ | ||
1220 | " .set pop \n" \ | ||
1221 | : "=r" (mflo2)); \ | ||
1222 | mflo2; \ | ||
1223 | }) | ||
1224 | |||
1225 | #define mflo3() \ | ||
1226 | ({ \ | ||
1227 | long mflo3; \ | ||
1228 | __asm__( \ | ||
1229 | " .set push \n" \ | ||
1230 | " .set dsp \n" \ | ||
1231 | " mflo %0, $ac3 \n" \ | ||
1232 | " .set pop \n" \ | ||
1233 | : "=r" (mflo3)); \ | ||
1234 | mflo3; \ | ||
1235 | }) | ||
1236 | |||
1237 | #define mfhi0() \ | ||
1238 | ({ \ | ||
1239 | long mfhi0; \ | ||
1240 | __asm__( \ | ||
1241 | " .set push \n" \ | ||
1242 | " .set dsp \n" \ | ||
1243 | " mfhi %0, $ac0 \n" \ | ||
1244 | " .set pop \n" \ | ||
1245 | : "=r" (mfhi0)); \ | ||
1246 | mfhi0; \ | ||
1247 | }) | ||
1248 | |||
1249 | #define mfhi1() \ | ||
1250 | ({ \ | ||
1251 | long mfhi1; \ | ||
1252 | __asm__( \ | ||
1253 | " .set push \n" \ | ||
1254 | " .set dsp \n" \ | ||
1255 | " mfhi %0, $ac1 \n" \ | ||
1256 | " .set pop \n" \ | ||
1257 | : "=r" (mfhi1)); \ | ||
1258 | mfhi1; \ | ||
1259 | }) | ||
1260 | |||
1261 | #define mfhi2() \ | ||
1262 | ({ \ | ||
1263 | long mfhi2; \ | ||
1264 | __asm__( \ | ||
1265 | " .set push \n" \ | ||
1266 | " .set dsp \n" \ | ||
1267 | " mfhi %0, $ac2 \n" \ | ||
1268 | " .set pop \n" \ | ||
1269 | : "=r" (mfhi2)); \ | ||
1270 | mfhi2; \ | ||
1271 | }) | ||
1272 | |||
1273 | #define mfhi3() \ | ||
1274 | ({ \ | ||
1275 | long mfhi3; \ | ||
1276 | __asm__( \ | ||
1277 | " .set push \n" \ | ||
1278 | " .set dsp \n" \ | ||
1279 | " mfhi %0, $ac3 \n" \ | ||
1280 | " .set pop \n" \ | ||
1281 | : "=r" (mfhi3)); \ | ||
1282 | mfhi3; \ | ||
1283 | }) | ||
1284 | |||
1285 | |||
1286 | #define mtlo0(x) \ | ||
1287 | ({ \ | ||
1288 | __asm__( \ | ||
1289 | " .set push \n" \ | ||
1290 | " .set dsp \n" \ | ||
1291 | " mtlo %0, $ac0 \n" \ | ||
1292 | " .set pop \n" \ | ||
1293 | : \ | ||
1294 | : "r" (x)); \ | ||
1295 | }) | ||
1296 | |||
1297 | #define mtlo1(x) \ | ||
1298 | ({ \ | ||
1299 | __asm__( \ | ||
1300 | " .set push \n" \ | ||
1301 | " .set dsp \n" \ | ||
1302 | " mtlo %0, $ac1 \n" \ | ||
1303 | " .set pop \n" \ | ||
1304 | : \ | ||
1305 | : "r" (x)); \ | ||
1306 | }) | ||
1307 | |||
1308 | #define mtlo2(x) \ | ||
1309 | ({ \ | ||
1310 | __asm__( \ | ||
1311 | " .set push \n" \ | ||
1312 | " .set dsp \n" \ | ||
1313 | " mtlo %0, $ac2 \n" \ | ||
1314 | " .set pop \n" \ | ||
1315 | : \ | ||
1316 | : "r" (x)); \ | ||
1317 | }) | ||
1318 | |||
1319 | #define mtlo3(x) \ | ||
1320 | ({ \ | ||
1321 | __asm__( \ | ||
1322 | " .set push \n" \ | ||
1323 | " .set dsp \n" \ | ||
1324 | " mtlo %0, $ac3 \n" \ | ||
1325 | " .set pop \n" \ | ||
1326 | : \ | ||
1327 | : "r" (x)); \ | ||
1328 | }) | ||
1329 | |||
1330 | #define mthi0(x) \ | ||
1331 | ({ \ | ||
1332 | __asm__( \ | ||
1333 | " .set push \n" \ | ||
1334 | " .set dsp \n" \ | ||
1335 | " mthi %0, $ac0 \n" \ | ||
1336 | " .set pop \n" \ | ||
1337 | : \ | ||
1338 | : "r" (x)); \ | ||
1339 | }) | ||
1340 | |||
1341 | #define mthi1(x) \ | ||
1342 | ({ \ | ||
1343 | __asm__( \ | ||
1344 | " .set push \n" \ | ||
1345 | " .set dsp \n" \ | ||
1346 | " mthi %0, $ac1 \n" \ | ||
1347 | " .set pop \n" \ | ||
1348 | : \ | ||
1349 | : "r" (x)); \ | ||
1350 | }) | ||
1351 | |||
1352 | #define mthi2(x) \ | ||
1353 | ({ \ | ||
1354 | __asm__( \ | ||
1355 | " .set push \n" \ | ||
1356 | " .set dsp \n" \ | ||
1357 | " mthi %0, $ac2 \n" \ | ||
1358 | " .set pop \n" \ | ||
1359 | : \ | ||
1360 | : "r" (x)); \ | ||
1361 | }) | ||
1362 | |||
1363 | #define mthi3(x) \ | ||
1364 | ({ \ | ||
1365 | __asm__( \ | ||
1366 | " .set push \n" \ | ||
1367 | " .set dsp \n" \ | ||
1368 | " mthi %0, $ac3 \n" \ | ||
1369 | " .set pop \n" \ | ||
1370 | : \ | ||
1371 | : "r" (x)); \ | ||
1372 | }) | ||
1202 | 1373 | ||
1203 | #else | 1374 | #else |
1204 | 1375 | ||
diff --git a/arch/mips/include/asm/signal.h b/arch/mips/include/asm/signal.h index 197f6367c201..8efe5a9e2c3e 100644 --- a/arch/mips/include/asm/signal.h +++ b/arch/mips/include/asm/signal.h | |||
@@ -21,6 +21,6 @@ | |||
21 | #include <asm/sigcontext.h> | 21 | #include <asm/sigcontext.h> |
22 | #include <asm/siginfo.h> | 22 | #include <asm/siginfo.h> |
23 | 23 | ||
24 | #define __ARCH_HAS_ODD_SIGACTION | 24 | #define __ARCH_HAS_IRIX_SIGACTION |
25 | 25 | ||
26 | #endif /* _ASM_SIGNAL_H */ | 26 | #endif /* _ASM_SIGNAL_H */ |
diff --git a/arch/mips/include/uapi/asm/signal.h b/arch/mips/include/uapi/asm/signal.h index d6b18b4d0f3a..addb9f556b71 100644 --- a/arch/mips/include/uapi/asm/signal.h +++ b/arch/mips/include/uapi/asm/signal.h | |||
@@ -72,6 +72,12 @@ typedef unsigned long old_sigset_t; /* at least 32 bits */ | |||
72 | * | 72 | * |
73 | * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single | 73 | * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single |
74 | * Unix names RESETHAND and NODEFER respectively. | 74 | * Unix names RESETHAND and NODEFER respectively. |
75 | * | ||
76 | * SA_RESTORER used to be defined as 0x04000000 but only the O32 ABI ever | ||
77 | * supported its use and no libc was using it, so the entire sa-restorer | ||
78 | * functionality was removed with lmo commit 39bffc12c3580ab for 2.5.48 | ||
79 | * retaining only the SA_RESTORER definition as a reminder to avoid | ||
80 | * accidental reuse of the mask bit. | ||
75 | */ | 81 | */ |
76 | #define SA_ONSTACK 0x08000000 | 82 | #define SA_ONSTACK 0x08000000 |
77 | #define SA_RESETHAND 0x80000000 | 83 | #define SA_RESETHAND 0x80000000 |
@@ -84,8 +90,6 @@ typedef unsigned long old_sigset_t; /* at least 32 bits */ | |||
84 | #define SA_NOMASK SA_NODEFER | 90 | #define SA_NOMASK SA_NODEFER |
85 | #define SA_ONESHOT SA_RESETHAND | 91 | #define SA_ONESHOT SA_RESETHAND |
86 | 92 | ||
87 | #define SA_RESTORER 0x04000000 /* Only for o32 */ | ||
88 | |||
89 | #define MINSIGSTKSZ 2048 | 93 | #define MINSIGSTKSZ 2048 |
90 | #define SIGSTKSZ 8192 | 94 | #define SIGSTKSZ 8192 |
91 | 95 | ||
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index f81d98f6184c..de75fb50562b 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile | |||
@@ -100,29 +100,16 @@ obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_mipsxx.o | |||
100 | obj-$(CONFIG_JUMP_LABEL) += jump_label.o | 100 | obj-$(CONFIG_JUMP_LABEL) += jump_label.o |
101 | 101 | ||
102 | # | 102 | # |
103 | # DSP ASE supported for MIPS32 or MIPS64 Release 2 cores only. It is safe | 103 | # DSP ASE supported for MIPS32 or MIPS64 Release 2 cores only. It is not |
104 | # to enable DSP assembler support here even if the MIPS Release 2 CPU we | 104 | # safe to unconditionnaly use the assembler -mdsp / -mdspr2 switches |
105 | # are targetting does not support DSP because all code-paths making use of | 105 | # here because the compiler may use DSP ASE instructions (such as lwx) in |
106 | # it properly check that the running CPU *actually does* support these | 106 | # code paths where we cannot check that the CPU we are running on supports it. |
107 | # instructions. | 107 | # Proper abstraction using HAVE_AS_DSP and macros is done in |
108 | # arch/mips/include/asm/mipsregs.h. | ||
108 | # | 109 | # |
109 | ifeq ($(CONFIG_CPU_MIPSR2), y) | 110 | ifeq ($(CONFIG_CPU_MIPSR2), y) |
110 | CFLAGS_DSP = -DHAVE_AS_DSP | 111 | CFLAGS_DSP = -DHAVE_AS_DSP |
111 | 112 | ||
112 | # | ||
113 | # Check if assembler supports DSP ASE | ||
114 | # | ||
115 | ifeq ($(call cc-option-yn,-mdsp), y) | ||
116 | CFLAGS_DSP += -mdsp | ||
117 | endif | ||
118 | |||
119 | # | ||
120 | # Check if assembler supports DSP ASE Rev2 | ||
121 | # | ||
122 | ifeq ($(call cc-option-yn,-mdspr2), y) | ||
123 | CFLAGS_DSP += -mdspr2 | ||
124 | endif | ||
125 | |||
126 | CFLAGS_signal.o = $(CFLAGS_DSP) | 113 | CFLAGS_signal.o = $(CFLAGS_DSP) |
127 | CFLAGS_signal32.o = $(CFLAGS_DSP) | 114 | CFLAGS_signal32.o = $(CFLAGS_DSP) |
128 | CFLAGS_process.o = $(CFLAGS_DSP) | 115 | CFLAGS_process.o = $(CFLAGS_DSP) |
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index 6bfccc227a95..5fe66a0c3224 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c | |||
@@ -580,6 +580,9 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) | |||
580 | c->tlbsize = 48; | 580 | c->tlbsize = 48; |
581 | break; | 581 | break; |
582 | case PRID_IMP_VR41XX: | 582 | case PRID_IMP_VR41XX: |
583 | set_isa(c, MIPS_CPU_ISA_III); | ||
584 | c->options = R4K_OPTS; | ||
585 | c->tlbsize = 32; | ||
583 | switch (c->processor_id & 0xf0) { | 586 | switch (c->processor_id & 0xf0) { |
584 | case PRID_REV_VR4111: | 587 | case PRID_REV_VR4111: |
585 | c->cputype = CPU_VR4111; | 588 | c->cputype = CPU_VR4111; |
@@ -604,6 +607,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) | |||
604 | __cpu_name[cpu] = "NEC VR4131"; | 607 | __cpu_name[cpu] = "NEC VR4131"; |
605 | } else { | 608 | } else { |
606 | c->cputype = CPU_VR4133; | 609 | c->cputype = CPU_VR4133; |
610 | c->options |= MIPS_CPU_LLSC; | ||
607 | __cpu_name[cpu] = "NEC VR4133"; | 611 | __cpu_name[cpu] = "NEC VR4133"; |
608 | } | 612 | } |
609 | break; | 613 | break; |
@@ -613,9 +617,6 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) | |||
613 | __cpu_name[cpu] = "NEC Vr41xx"; | 617 | __cpu_name[cpu] = "NEC Vr41xx"; |
614 | break; | 618 | break; |
615 | } | 619 | } |
616 | set_isa(c, MIPS_CPU_ISA_III); | ||
617 | c->options = R4K_OPTS; | ||
618 | c->tlbsize = 32; | ||
619 | break; | 620 | break; |
620 | case PRID_IMP_R4300: | 621 | case PRID_IMP_R4300: |
621 | c->cputype = CPU_R4300; | 622 | c->cputype = CPU_R4300; |
@@ -1226,10 +1227,8 @@ __cpuinit void cpu_probe(void) | |||
1226 | if (c->options & MIPS_CPU_FPU) { | 1227 | if (c->options & MIPS_CPU_FPU) { |
1227 | c->fpu_id = cpu_get_fpu_id(); | 1228 | c->fpu_id = cpu_get_fpu_id(); |
1228 | 1229 | ||
1229 | if (c->isa_level == MIPS_CPU_ISA_M32R1 || | 1230 | if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 | |
1230 | c->isa_level == MIPS_CPU_ISA_M32R2 || | 1231 | MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) { |
1231 | c->isa_level == MIPS_CPU_ISA_M64R1 || | ||
1232 | c->isa_level == MIPS_CPU_ISA_M64R2) { | ||
1233 | if (c->fpu_id & MIPS_FPIR_3D) | 1232 | if (c->fpu_id & MIPS_FPIR_3D) |
1234 | c->ases |= MIPS_ASE_MIPS3D; | 1233 | c->ases |= MIPS_ASE_MIPS3D; |
1235 | } | 1234 | } |
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c index 8eeee1c860c0..db9655f08892 100644 --- a/arch/mips/kernel/linux32.c +++ b/arch/mips/kernel/linux32.c | |||
@@ -171,7 +171,7 @@ SYSCALL_DEFINE6(32_ipc, u32, call, long, first, long, second, long, third, | |||
171 | err = compat_sys_shmctl(first, second, compat_ptr(ptr)); | 171 | err = compat_sys_shmctl(first, second, compat_ptr(ptr)); |
172 | break; | 172 | break; |
173 | default: | 173 | default: |
174 | err = -EINVAL; | 174 | err = -ENOSYS; |
175 | break; | 175 | break; |
176 | } | 176 | } |
177 | 177 | ||
diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S index 165867673357..33d067148e61 100644 --- a/arch/mips/kernel/mcount.S +++ b/arch/mips/kernel/mcount.S | |||
@@ -46,10 +46,9 @@ | |||
46 | PTR_L a5, PT_R9(sp) | 46 | PTR_L a5, PT_R9(sp) |
47 | PTR_L a6, PT_R10(sp) | 47 | PTR_L a6, PT_R10(sp) |
48 | PTR_L a7, PT_R11(sp) | 48 | PTR_L a7, PT_R11(sp) |
49 | #else | ||
50 | PTR_ADDIU sp, PT_SIZE | ||
51 | #endif | 49 | #endif |
52 | .endm | 50 | PTR_ADDIU sp, PT_SIZE |
51 | .endm | ||
53 | 52 | ||
54 | .macro RETURN_BACK | 53 | .macro RETURN_BACK |
55 | jr ra | 54 | jr ra |
@@ -68,7 +67,11 @@ NESTED(ftrace_caller, PT_SIZE, ra) | |||
68 | .globl _mcount | 67 | .globl _mcount |
69 | _mcount: | 68 | _mcount: |
70 | b ftrace_stub | 69 | b ftrace_stub |
71 | addiu sp,sp,8 | 70 | #ifdef CONFIG_32BIT |
71 | addiu sp,sp,8 | ||
72 | #else | ||
73 | nop | ||
74 | #endif | ||
72 | 75 | ||
73 | /* When tracing is activated, it calls ftrace_caller+8 (aka here) */ | 76 | /* When tracing is activated, it calls ftrace_caller+8 (aka here) */ |
74 | lw t1, function_trace_stop | 77 | lw t1, function_trace_stop |
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c index 135c4aadccbe..7a54f74b7818 100644 --- a/arch/mips/kernel/proc.c +++ b/arch/mips/kernel/proc.c | |||
@@ -67,7 +67,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
67 | if (cpu_has_mips_r) { | 67 | if (cpu_has_mips_r) { |
68 | seq_printf(m, "isa\t\t\t:"); | 68 | seq_printf(m, "isa\t\t\t:"); |
69 | if (cpu_has_mips_1) | 69 | if (cpu_has_mips_1) |
70 | seq_printf(m, "%s", "mips1"); | 70 | seq_printf(m, "%s", " mips1"); |
71 | if (cpu_has_mips_2) | 71 | if (cpu_has_mips_2) |
72 | seq_printf(m, "%s", " mips2"); | 72 | seq_printf(m, "%s", " mips2"); |
73 | if (cpu_has_mips_3) | 73 | if (cpu_has_mips_3) |
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index a200b5bdbb87..c3abb88170fc 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c | |||
@@ -1571,7 +1571,7 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu) | |||
1571 | #ifdef CONFIG_64BIT | 1571 | #ifdef CONFIG_64BIT |
1572 | status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX; | 1572 | status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX; |
1573 | #endif | 1573 | #endif |
1574 | if (current_cpu_data.isa_level == MIPS_CPU_ISA_IV) | 1574 | if (current_cpu_data.isa_level & MIPS_CPU_ISA_IV) |
1575 | status_set |= ST0_XX; | 1575 | status_set |= ST0_XX; |
1576 | if (cpu_has_dsp) | 1576 | if (cpu_has_dsp) |
1577 | status_set |= ST0_MX; | 1577 | status_set |= ST0_MX; |
diff --git a/arch/mips/lib/bitops.c b/arch/mips/lib/bitops.c index 81f1dcfdcab8..a64daee740ee 100644 --- a/arch/mips/lib/bitops.c +++ b/arch/mips/lib/bitops.c | |||
@@ -90,12 +90,12 @@ int __mips_test_and_set_bit(unsigned long nr, | |||
90 | unsigned bit = nr & SZLONG_MASK; | 90 | unsigned bit = nr & SZLONG_MASK; |
91 | unsigned long mask; | 91 | unsigned long mask; |
92 | unsigned long flags; | 92 | unsigned long flags; |
93 | unsigned long res; | 93 | int res; |
94 | 94 | ||
95 | a += nr >> SZLONG_LOG; | 95 | a += nr >> SZLONG_LOG; |
96 | mask = 1UL << bit; | 96 | mask = 1UL << bit; |
97 | raw_local_irq_save(flags); | 97 | raw_local_irq_save(flags); |
98 | res = (mask & *a); | 98 | res = (mask & *a) != 0; |
99 | *a |= mask; | 99 | *a |= mask; |
100 | raw_local_irq_restore(flags); | 100 | raw_local_irq_restore(flags); |
101 | return res; | 101 | return res; |
@@ -116,12 +116,12 @@ int __mips_test_and_set_bit_lock(unsigned long nr, | |||
116 | unsigned bit = nr & SZLONG_MASK; | 116 | unsigned bit = nr & SZLONG_MASK; |
117 | unsigned long mask; | 117 | unsigned long mask; |
118 | unsigned long flags; | 118 | unsigned long flags; |
119 | unsigned long res; | 119 | int res; |
120 | 120 | ||
121 | a += nr >> SZLONG_LOG; | 121 | a += nr >> SZLONG_LOG; |
122 | mask = 1UL << bit; | 122 | mask = 1UL << bit; |
123 | raw_local_irq_save(flags); | 123 | raw_local_irq_save(flags); |
124 | res = (mask & *a); | 124 | res = (mask & *a) != 0; |
125 | *a |= mask; | 125 | *a |= mask; |
126 | raw_local_irq_restore(flags); | 126 | raw_local_irq_restore(flags); |
127 | return res; | 127 | return res; |
@@ -141,12 +141,12 @@ int __mips_test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) | |||
141 | unsigned bit = nr & SZLONG_MASK; | 141 | unsigned bit = nr & SZLONG_MASK; |
142 | unsigned long mask; | 142 | unsigned long mask; |
143 | unsigned long flags; | 143 | unsigned long flags; |
144 | unsigned long res; | 144 | int res; |
145 | 145 | ||
146 | a += nr >> SZLONG_LOG; | 146 | a += nr >> SZLONG_LOG; |
147 | mask = 1UL << bit; | 147 | mask = 1UL << bit; |
148 | raw_local_irq_save(flags); | 148 | raw_local_irq_save(flags); |
149 | res = (mask & *a); | 149 | res = (mask & *a) != 0; |
150 | *a &= ~mask; | 150 | *a &= ~mask; |
151 | raw_local_irq_restore(flags); | 151 | raw_local_irq_restore(flags); |
152 | return res; | 152 | return res; |
@@ -166,12 +166,12 @@ int __mips_test_and_change_bit(unsigned long nr, volatile unsigned long *addr) | |||
166 | unsigned bit = nr & SZLONG_MASK; | 166 | unsigned bit = nr & SZLONG_MASK; |
167 | unsigned long mask; | 167 | unsigned long mask; |
168 | unsigned long flags; | 168 | unsigned long flags; |
169 | unsigned long res; | 169 | int res; |
170 | 170 | ||
171 | a += nr >> SZLONG_LOG; | 171 | a += nr >> SZLONG_LOG; |
172 | mask = 1UL << bit; | 172 | mask = 1UL << bit; |
173 | raw_local_irq_save(flags); | 173 | raw_local_irq_save(flags); |
174 | res = (mask & *a); | 174 | res = (mask & *a) != 0; |
175 | *a ^= mask; | 175 | *a ^= mask; |
176 | raw_local_irq_restore(flags); | 176 | raw_local_irq_restore(flags); |
177 | return res; | 177 | return res; |
diff --git a/arch/mips/lib/csum_partial.S b/arch/mips/lib/csum_partial.S index 507147aebd41..a6adffbb4e5f 100644 --- a/arch/mips/lib/csum_partial.S +++ b/arch/mips/lib/csum_partial.S | |||
@@ -270,7 +270,7 @@ LEAF(csum_partial) | |||
270 | #endif | 270 | #endif |
271 | 271 | ||
272 | /* odd buffer alignment? */ | 272 | /* odd buffer alignment? */ |
273 | #ifdef CPU_MIPSR2 | 273 | #ifdef CONFIG_CPU_MIPSR2 |
274 | wsbh v1, sum | 274 | wsbh v1, sum |
275 | movn sum, v1, t7 | 275 | movn sum, v1, t7 |
276 | #else | 276 | #else |
@@ -670,7 +670,7 @@ EXC( sb t0, NBYTES-2(dst), .Ls_exc) | |||
670 | addu sum, v1 | 670 | addu sum, v1 |
671 | #endif | 671 | #endif |
672 | 672 | ||
673 | #ifdef CPU_MIPSR2 | 673 | #ifdef CONFIG_CPU_MIPSR2 |
674 | wsbh v1, sum | 674 | wsbh v1, sum |
675 | movn sum, v1, odd | 675 | movn sum, v1, odd |
676 | #else | 676 | #else |
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index ecca559b8d7b..2078915eacb9 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c | |||
@@ -1247,10 +1247,8 @@ static void __cpuinit setup_scache(void) | |||
1247 | return; | 1247 | return; |
1248 | 1248 | ||
1249 | default: | 1249 | default: |
1250 | if (c->isa_level == MIPS_CPU_ISA_M32R1 || | 1250 | if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 | |
1251 | c->isa_level == MIPS_CPU_ISA_M32R2 || | 1251 | MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) { |
1252 | c->isa_level == MIPS_CPU_ISA_M64R1 || | ||
1253 | c->isa_level == MIPS_CPU_ISA_M64R2) { | ||
1254 | #ifdef CONFIG_MIPS_CPU_SCACHE | 1252 | #ifdef CONFIG_MIPS_CPU_SCACHE |
1255 | if (mips_sc_init ()) { | 1253 | if (mips_sc_init ()) { |
1256 | scache_size = c->scache.ways * c->scache.sets * c->scache.linesz; | 1254 | scache_size = c->scache.ways * c->scache.sets * c->scache.linesz; |
diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c index 93d937b4b1ba..df96da7e939b 100644 --- a/arch/mips/mm/sc-mips.c +++ b/arch/mips/mm/sc-mips.c | |||
@@ -98,10 +98,8 @@ static inline int __init mips_sc_probe(void) | |||
98 | c->scache.flags |= MIPS_CACHE_NOT_PRESENT; | 98 | c->scache.flags |= MIPS_CACHE_NOT_PRESENT; |
99 | 99 | ||
100 | /* Ignore anything but MIPSxx processors */ | 100 | /* Ignore anything but MIPSxx processors */ |
101 | if (c->isa_level != MIPS_CPU_ISA_M32R1 && | 101 | if (!(c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 | |
102 | c->isa_level != MIPS_CPU_ISA_M32R2 && | 102 | MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2))) |
103 | c->isa_level != MIPS_CPU_ISA_M64R1 && | ||
104 | c->isa_level != MIPS_CPU_ISA_M64R2) | ||
105 | return 0; | 103 | return 0; |
106 | 104 | ||
107 | /* Does this MIPS32/MIPS64 CPU have a config2 register? */ | 105 | /* Does this MIPS32/MIPS64 CPU have a config2 register? */ |
diff --git a/arch/mips/pci/pci-alchemy.c b/arch/mips/pci/pci-alchemy.c index 38a80c83fd67..d1faece21b6a 100644 --- a/arch/mips/pci/pci-alchemy.c +++ b/arch/mips/pci/pci-alchemy.c | |||
@@ -19,7 +19,7 @@ | |||
19 | #include <asm/mach-au1x00/au1000.h> | 19 | #include <asm/mach-au1x00/au1000.h> |
20 | #include <asm/tlbmisc.h> | 20 | #include <asm/tlbmisc.h> |
21 | 21 | ||
22 | #ifdef CONFIG_DEBUG_PCI | 22 | #ifdef CONFIG_PCI_DEBUG |
23 | #define DBG(x...) printk(KERN_DEBUG x) | 23 | #define DBG(x...) printk(KERN_DEBUG x) |
24 | #else | 24 | #else |
25 | #define DBG(x...) do {} while (0) | 25 | #define DBG(x...) do {} while (0) |
@@ -162,7 +162,7 @@ static int config_access(unsigned char access_type, struct pci_bus *bus, | |||
162 | if (status & (1 << 29)) { | 162 | if (status & (1 << 29)) { |
163 | *data = 0xffffffff; | 163 | *data = 0xffffffff; |
164 | error = -1; | 164 | error = -1; |
165 | DBG("alchemy-pci: master abort on cfg access %d bus %d dev %d", | 165 | DBG("alchemy-pci: master abort on cfg access %d bus %d dev %d\n", |
166 | access_type, bus->number, device); | 166 | access_type, bus->number, device); |
167 | } else if ((status >> 28) & 0xf) { | 167 | } else if ((status >> 28) & 0xf) { |
168 | DBG("alchemy-pci: PCI ERR detected: dev %d, status %lx\n", | 168 | DBG("alchemy-pci: PCI ERR detected: dev %d, status %lx\n", |
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 80821512e9cc..ea5bb045983a 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig | |||
@@ -90,6 +90,7 @@ config GENERIC_GPIO | |||
90 | config PPC | 90 | config PPC |
91 | bool | 91 | bool |
92 | default y | 92 | default y |
93 | select BINFMT_ELF | ||
93 | select OF | 94 | select OF |
94 | select OF_EARLY_FLATTREE | 95 | select OF_EARLY_FLATTREE |
95 | select HAVE_FTRACE_MCOUNT_RECORD | 96 | select HAVE_FTRACE_MCOUNT_RECORD |
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h index 2fdb47a19efd..b59e06f507ea 100644 --- a/arch/powerpc/include/asm/mmu-hash64.h +++ b/arch/powerpc/include/asm/mmu-hash64.h | |||
@@ -343,17 +343,16 @@ extern void slb_set_size(u16 size); | |||
343 | /* | 343 | /* |
344 | * VSID allocation (256MB segment) | 344 | * VSID allocation (256MB segment) |
345 | * | 345 | * |
346 | * We first generate a 38-bit "proto-VSID". For kernel addresses this | 346 | * We first generate a 37-bit "proto-VSID". Proto-VSIDs are generated |
347 | * is equal to the ESID | 1 << 37, for user addresses it is: | 347 | * from mmu context id and effective segment id of the address. |
348 | * (context << USER_ESID_BITS) | (esid & ((1U << USER_ESID_BITS) - 1) | ||
349 | * | 348 | * |
350 | * This splits the proto-VSID into the below range | 349 | * For user processes max context id is limited to ((1ul << 19) - 5) |
351 | * 0 - (2^(CONTEXT_BITS + USER_ESID_BITS) - 1) : User proto-VSID range | 350 | * for kernel space, we use the top 4 context ids to map address as below |
352 | * 2^(CONTEXT_BITS + USER_ESID_BITS) - 2^(VSID_BITS) : Kernel proto-VSID range | 351 | * NOTE: each context only support 64TB now. |
353 | * | 352 | * 0x7fffc - [ 0xc000000000000000 - 0xc0003fffffffffff ] |
354 | * We also have CONTEXT_BITS + USER_ESID_BITS = VSID_BITS - 1 | 353 | * 0x7fffd - [ 0xd000000000000000 - 0xd0003fffffffffff ] |
355 | * That is, we assign half of the space to user processes and half | 354 | * 0x7fffe - [ 0xe000000000000000 - 0xe0003fffffffffff ] |
356 | * to the kernel. | 355 | * 0x7ffff - [ 0xf000000000000000 - 0xf0003fffffffffff ] |
357 | * | 356 | * |
358 | * The proto-VSIDs are then scrambled into real VSIDs with the | 357 | * The proto-VSIDs are then scrambled into real VSIDs with the |
359 | * multiplicative hash: | 358 | * multiplicative hash: |
@@ -363,41 +362,49 @@ extern void slb_set_size(u16 size); | |||
363 | * VSID_MULTIPLIER is prime, so in particular it is | 362 | * VSID_MULTIPLIER is prime, so in particular it is |
364 | * co-prime to VSID_MODULUS, making this a 1:1 scrambling function. | 363 | * co-prime to VSID_MODULUS, making this a 1:1 scrambling function. |
365 | * Because the modulus is 2^n-1 we can compute it efficiently without | 364 | * Because the modulus is 2^n-1 we can compute it efficiently without |
366 | * a divide or extra multiply (see below). | 365 | * a divide or extra multiply (see below). The scramble function gives |
367 | * | 366 | * robust scattering in the hash table (at least based on some initial |
368 | * This scheme has several advantages over older methods: | 367 | * results). |
369 | * | ||
370 | * - We have VSIDs allocated for every kernel address | ||
371 | * (i.e. everything above 0xC000000000000000), except the very top | ||
372 | * segment, which simplifies several things. | ||
373 | * | 368 | * |
374 | * - We allow for USER_ESID_BITS significant bits of ESID and | 369 | * We also consider VSID 0 special. We use VSID 0 for slb entries mapping |
375 | * CONTEXT_BITS bits of context for user addresses. | 370 | * bad address. This enables us to consolidate bad address handling in |
376 | * i.e. 64T (46 bits) of address space for up to half a million contexts. | 371 | * hash_page. |
377 | * | 372 | * |
378 | * - The scramble function gives robust scattering in the hash | 373 | * We also need to avoid the last segment of the last context, because that |
379 | * table (at least based on some initial results). The previous | 374 | * would give a protovsid of 0x1fffffffff. That will result in a VSID 0 |
380 | * method was more susceptible to pathological cases giving excessive | 375 | * because of the modulo operation in vsid scramble. But the vmemmap |
381 | * hash collisions. | 376 | * (which is what uses region 0xf) will never be close to 64TB in size |
377 | * (it's 56 bytes per page of system memory). | ||
382 | */ | 378 | */ |
383 | 379 | ||
380 | #define CONTEXT_BITS 19 | ||
381 | #define ESID_BITS 18 | ||
382 | #define ESID_BITS_1T 6 | ||
383 | |||
384 | /* | ||
385 | * 256MB segment | ||
386 | * The proto-VSID space has 2^(CONTEX_BITS + ESID_BITS) - 1 segments | ||
387 | * available for user + kernel mapping. The top 4 contexts are used for | ||
388 | * kernel mapping. Each segment contains 2^28 bytes. Each | ||
389 | * context maps 2^46 bytes (64TB) so we can support 2^19-1 contexts | ||
390 | * (19 == 37 + 28 - 46). | ||
391 | */ | ||
392 | #define MAX_USER_CONTEXT ((ASM_CONST(1) << CONTEXT_BITS) - 5) | ||
393 | |||
384 | /* | 394 | /* |
385 | * This should be computed such that protovosid * vsid_mulitplier | 395 | * This should be computed such that protovosid * vsid_mulitplier |
386 | * doesn't overflow 64 bits. It should also be co-prime to vsid_modulus | 396 | * doesn't overflow 64 bits. It should also be co-prime to vsid_modulus |
387 | */ | 397 | */ |
388 | #define VSID_MULTIPLIER_256M ASM_CONST(12538073) /* 24-bit prime */ | 398 | #define VSID_MULTIPLIER_256M ASM_CONST(12538073) /* 24-bit prime */ |
389 | #define VSID_BITS_256M 38 | 399 | #define VSID_BITS_256M (CONTEXT_BITS + ESID_BITS) |
390 | #define VSID_MODULUS_256M ((1UL<<VSID_BITS_256M)-1) | 400 | #define VSID_MODULUS_256M ((1UL<<VSID_BITS_256M)-1) |
391 | 401 | ||
392 | #define VSID_MULTIPLIER_1T ASM_CONST(12538073) /* 24-bit prime */ | 402 | #define VSID_MULTIPLIER_1T ASM_CONST(12538073) /* 24-bit prime */ |
393 | #define VSID_BITS_1T 26 | 403 | #define VSID_BITS_1T (CONTEXT_BITS + ESID_BITS_1T) |
394 | #define VSID_MODULUS_1T ((1UL<<VSID_BITS_1T)-1) | 404 | #define VSID_MODULUS_1T ((1UL<<VSID_BITS_1T)-1) |
395 | 405 | ||
396 | #define CONTEXT_BITS 19 | ||
397 | #define USER_ESID_BITS 18 | ||
398 | #define USER_ESID_BITS_1T 6 | ||
399 | 406 | ||
400 | #define USER_VSID_RANGE (1UL << (USER_ESID_BITS + SID_SHIFT)) | 407 | #define USER_VSID_RANGE (1UL << (ESID_BITS + SID_SHIFT)) |
401 | 408 | ||
402 | /* | 409 | /* |
403 | * This macro generates asm code to compute the VSID scramble | 410 | * This macro generates asm code to compute the VSID scramble |
@@ -421,7 +428,8 @@ extern void slb_set_size(u16 size); | |||
421 | srdi rx,rt,VSID_BITS_##size; \ | 428 | srdi rx,rt,VSID_BITS_##size; \ |
422 | clrldi rt,rt,(64-VSID_BITS_##size); \ | 429 | clrldi rt,rt,(64-VSID_BITS_##size); \ |
423 | add rt,rt,rx; /* add high and low bits */ \ | 430 | add rt,rt,rx; /* add high and low bits */ \ |
424 | /* Now, r3 == VSID (mod 2^36-1), and lies between 0 and \ | 431 | /* NOTE: explanation based on VSID_BITS_##size = 36 \ |
432 | * Now, r3 == VSID (mod 2^36-1), and lies between 0 and \ | ||
425 | * 2^36-1+2^28-1. That in particular means that if r3 >= \ | 433 | * 2^36-1+2^28-1. That in particular means that if r3 >= \ |
426 | * 2^36-1, then r3+1 has the 2^36 bit set. So, if r3+1 has \ | 434 | * 2^36-1, then r3+1 has the 2^36 bit set. So, if r3+1 has \ |
427 | * the bit clear, r3 already has the answer we want, if it \ | 435 | * the bit clear, r3 already has the answer we want, if it \ |
@@ -513,34 +521,6 @@ typedef struct { | |||
513 | }) | 521 | }) |
514 | #endif /* 1 */ | 522 | #endif /* 1 */ |
515 | 523 | ||
516 | /* | ||
517 | * This is only valid for addresses >= PAGE_OFFSET | ||
518 | * The proto-VSID space is divided into two class | ||
519 | * User: 0 to 2^(CONTEXT_BITS + USER_ESID_BITS) -1 | ||
520 | * kernel: 2^(CONTEXT_BITS + USER_ESID_BITS) to 2^(VSID_BITS) - 1 | ||
521 | * | ||
522 | * With KERNEL_START at 0xc000000000000000, the proto vsid for | ||
523 | * the kernel ends up with 0xc00000000 (36 bits). With 64TB | ||
524 | * support we need to have kernel proto-VSID in the | ||
525 | * [2^37 to 2^38 - 1] range due to the increased USER_ESID_BITS. | ||
526 | */ | ||
527 | static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize) | ||
528 | { | ||
529 | unsigned long proto_vsid; | ||
530 | /* | ||
531 | * We need to make sure proto_vsid for the kernel is | ||
532 | * >= 2^(CONTEXT_BITS + USER_ESID_BITS[_1T]) | ||
533 | */ | ||
534 | if (ssize == MMU_SEGSIZE_256M) { | ||
535 | proto_vsid = ea >> SID_SHIFT; | ||
536 | proto_vsid |= (1UL << (CONTEXT_BITS + USER_ESID_BITS)); | ||
537 | return vsid_scramble(proto_vsid, 256M); | ||
538 | } | ||
539 | proto_vsid = ea >> SID_SHIFT_1T; | ||
540 | proto_vsid |= (1UL << (CONTEXT_BITS + USER_ESID_BITS_1T)); | ||
541 | return vsid_scramble(proto_vsid, 1T); | ||
542 | } | ||
543 | |||
544 | /* Returns the segment size indicator for a user address */ | 524 | /* Returns the segment size indicator for a user address */ |
545 | static inline int user_segment_size(unsigned long addr) | 525 | static inline int user_segment_size(unsigned long addr) |
546 | { | 526 | { |
@@ -550,17 +530,41 @@ static inline int user_segment_size(unsigned long addr) | |||
550 | return MMU_SEGSIZE_256M; | 530 | return MMU_SEGSIZE_256M; |
551 | } | 531 | } |
552 | 532 | ||
553 | /* This is only valid for user addresses (which are below 2^44) */ | ||
554 | static inline unsigned long get_vsid(unsigned long context, unsigned long ea, | 533 | static inline unsigned long get_vsid(unsigned long context, unsigned long ea, |
555 | int ssize) | 534 | int ssize) |
556 | { | 535 | { |
536 | /* | ||
537 | * Bad address. We return VSID 0 for that | ||
538 | */ | ||
539 | if ((ea & ~REGION_MASK) >= PGTABLE_RANGE) | ||
540 | return 0; | ||
541 | |||
557 | if (ssize == MMU_SEGSIZE_256M) | 542 | if (ssize == MMU_SEGSIZE_256M) |
558 | return vsid_scramble((context << USER_ESID_BITS) | 543 | return vsid_scramble((context << ESID_BITS) |
559 | | (ea >> SID_SHIFT), 256M); | 544 | | (ea >> SID_SHIFT), 256M); |
560 | return vsid_scramble((context << USER_ESID_BITS_1T) | 545 | return vsid_scramble((context << ESID_BITS_1T) |
561 | | (ea >> SID_SHIFT_1T), 1T); | 546 | | (ea >> SID_SHIFT_1T), 1T); |
562 | } | 547 | } |
563 | 548 | ||
549 | /* | ||
550 | * This is only valid for addresses >= PAGE_OFFSET | ||
551 | * | ||
552 | * For kernel space, we use the top 4 context ids to map address as below | ||
553 | * 0x7fffc - [ 0xc000000000000000 - 0xc0003fffffffffff ] | ||
554 | * 0x7fffd - [ 0xd000000000000000 - 0xd0003fffffffffff ] | ||
555 | * 0x7fffe - [ 0xe000000000000000 - 0xe0003fffffffffff ] | ||
556 | * 0x7ffff - [ 0xf000000000000000 - 0xf0003fffffffffff ] | ||
557 | */ | ||
558 | static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize) | ||
559 | { | ||
560 | unsigned long context; | ||
561 | |||
562 | /* | ||
563 | * kernel take the top 4 context from the available range | ||
564 | */ | ||
565 | context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1; | ||
566 | return get_vsid(context, ea, ssize); | ||
567 | } | ||
564 | #endif /* __ASSEMBLY__ */ | 568 | #endif /* __ASSEMBLY__ */ |
565 | 569 | ||
566 | #endif /* _ASM_POWERPC_MMU_HASH64_H_ */ | 570 | #endif /* _ASM_POWERPC_MMU_HASH64_H_ */ |
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 75a3d71b895d..19599ef352bc 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c | |||
@@ -275,7 +275,7 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
275 | .cpu_features = CPU_FTRS_PPC970, | 275 | .cpu_features = CPU_FTRS_PPC970, |
276 | .cpu_user_features = COMMON_USER_POWER4 | | 276 | .cpu_user_features = COMMON_USER_POWER4 | |
277 | PPC_FEATURE_HAS_ALTIVEC_COMP, | 277 | PPC_FEATURE_HAS_ALTIVEC_COMP, |
278 | .mmu_features = MMU_FTR_HPTE_TABLE, | 278 | .mmu_features = MMU_FTRS_PPC970, |
279 | .icache_bsize = 128, | 279 | .icache_bsize = 128, |
280 | .dcache_bsize = 128, | 280 | .dcache_bsize = 128, |
281 | .num_pmcs = 8, | 281 | .num_pmcs = 8, |
diff --git a/arch/powerpc/kernel/epapr_paravirt.c b/arch/powerpc/kernel/epapr_paravirt.c index f3eab8594d9f..d44a571e45a7 100644 --- a/arch/powerpc/kernel/epapr_paravirt.c +++ b/arch/powerpc/kernel/epapr_paravirt.c | |||
@@ -23,8 +23,10 @@ | |||
23 | #include <asm/code-patching.h> | 23 | #include <asm/code-patching.h> |
24 | #include <asm/machdep.h> | 24 | #include <asm/machdep.h> |
25 | 25 | ||
26 | #if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64) | ||
26 | extern void epapr_ev_idle(void); | 27 | extern void epapr_ev_idle(void); |
27 | extern u32 epapr_ev_idle_start[]; | 28 | extern u32 epapr_ev_idle_start[]; |
29 | #endif | ||
28 | 30 | ||
29 | bool epapr_paravirt_enabled; | 31 | bool epapr_paravirt_enabled; |
30 | 32 | ||
@@ -47,11 +49,15 @@ static int __init epapr_paravirt_init(void) | |||
47 | 49 | ||
48 | for (i = 0; i < (len / 4); i++) { | 50 | for (i = 0; i < (len / 4); i++) { |
49 | patch_instruction(epapr_hypercall_start + i, insts[i]); | 51 | patch_instruction(epapr_hypercall_start + i, insts[i]); |
52 | #if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64) | ||
50 | patch_instruction(epapr_ev_idle_start + i, insts[i]); | 53 | patch_instruction(epapr_ev_idle_start + i, insts[i]); |
54 | #endif | ||
51 | } | 55 | } |
52 | 56 | ||
57 | #if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64) | ||
53 | if (of_get_property(hyper_node, "has-idle", NULL)) | 58 | if (of_get_property(hyper_node, "has-idle", NULL)) |
54 | ppc_md.power_save = epapr_ev_idle; | 59 | ppc_md.power_save = epapr_ev_idle; |
60 | #endif | ||
55 | 61 | ||
56 | epapr_paravirt_enabled = true; | 62 | epapr_paravirt_enabled = true; |
57 | 63 | ||
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 87ef8f5ee5bc..56bd92362ce1 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S | |||
@@ -1066,78 +1066,6 @@ unrecov_user_slb: | |||
1066 | #endif /* __DISABLED__ */ | 1066 | #endif /* __DISABLED__ */ |
1067 | 1067 | ||
1068 | 1068 | ||
1069 | /* | ||
1070 | * r13 points to the PACA, r9 contains the saved CR, | ||
1071 | * r12 contain the saved SRR1, SRR0 is still ready for return | ||
1072 | * r3 has the faulting address | ||
1073 | * r9 - r13 are saved in paca->exslb. | ||
1074 | * r3 is saved in paca->slb_r3 | ||
1075 | * We assume we aren't going to take any exceptions during this procedure. | ||
1076 | */ | ||
1077 | _GLOBAL(slb_miss_realmode) | ||
1078 | mflr r10 | ||
1079 | #ifdef CONFIG_RELOCATABLE | ||
1080 | mtctr r11 | ||
1081 | #endif | ||
1082 | |||
1083 | stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ | ||
1084 | std r10,PACA_EXSLB+EX_LR(r13) /* save LR */ | ||
1085 | |||
1086 | bl .slb_allocate_realmode | ||
1087 | |||
1088 | /* All done -- return from exception. */ | ||
1089 | |||
1090 | ld r10,PACA_EXSLB+EX_LR(r13) | ||
1091 | ld r3,PACA_EXSLB+EX_R3(r13) | ||
1092 | lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ | ||
1093 | |||
1094 | mtlr r10 | ||
1095 | |||
1096 | andi. r10,r12,MSR_RI /* check for unrecoverable exception */ | ||
1097 | beq- 2f | ||
1098 | |||
1099 | .machine push | ||
1100 | .machine "power4" | ||
1101 | mtcrf 0x80,r9 | ||
1102 | mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */ | ||
1103 | .machine pop | ||
1104 | |||
1105 | RESTORE_PPR_PACA(PACA_EXSLB, r9) | ||
1106 | ld r9,PACA_EXSLB+EX_R9(r13) | ||
1107 | ld r10,PACA_EXSLB+EX_R10(r13) | ||
1108 | ld r11,PACA_EXSLB+EX_R11(r13) | ||
1109 | ld r12,PACA_EXSLB+EX_R12(r13) | ||
1110 | ld r13,PACA_EXSLB+EX_R13(r13) | ||
1111 | rfid | ||
1112 | b . /* prevent speculative execution */ | ||
1113 | |||
1114 | 2: mfspr r11,SPRN_SRR0 | ||
1115 | ld r10,PACAKBASE(r13) | ||
1116 | LOAD_HANDLER(r10,unrecov_slb) | ||
1117 | mtspr SPRN_SRR0,r10 | ||
1118 | ld r10,PACAKMSR(r13) | ||
1119 | mtspr SPRN_SRR1,r10 | ||
1120 | rfid | ||
1121 | b . | ||
1122 | |||
1123 | unrecov_slb: | ||
1124 | EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB) | ||
1125 | DISABLE_INTS | ||
1126 | bl .save_nvgprs | ||
1127 | 1: addi r3,r1,STACK_FRAME_OVERHEAD | ||
1128 | bl .unrecoverable_exception | ||
1129 | b 1b | ||
1130 | |||
1131 | |||
1132 | #ifdef CONFIG_PPC_970_NAP | ||
1133 | power4_fixup_nap: | ||
1134 | andc r9,r9,r10 | ||
1135 | std r9,TI_LOCAL_FLAGS(r11) | ||
1136 | ld r10,_LINK(r1) /* make idle task do the */ | ||
1137 | std r10,_NIP(r1) /* equivalent of a blr */ | ||
1138 | blr | ||
1139 | #endif | ||
1140 | |||
1141 | .align 7 | 1069 | .align 7 |
1142 | .globl alignment_common | 1070 | .globl alignment_common |
1143 | alignment_common: | 1071 | alignment_common: |
@@ -1336,6 +1264,78 @@ _GLOBAL(opal_mc_secondary_handler) | |||
1336 | 1264 | ||
1337 | 1265 | ||
1338 | /* | 1266 | /* |
1267 | * r13 points to the PACA, r9 contains the saved CR, | ||
1268 | * r12 contain the saved SRR1, SRR0 is still ready for return | ||
1269 | * r3 has the faulting address | ||
1270 | * r9 - r13 are saved in paca->exslb. | ||
1271 | * r3 is saved in paca->slb_r3 | ||
1272 | * We assume we aren't going to take any exceptions during this procedure. | ||
1273 | */ | ||
1274 | _GLOBAL(slb_miss_realmode) | ||
1275 | mflr r10 | ||
1276 | #ifdef CONFIG_RELOCATABLE | ||
1277 | mtctr r11 | ||
1278 | #endif | ||
1279 | |||
1280 | stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ | ||
1281 | std r10,PACA_EXSLB+EX_LR(r13) /* save LR */ | ||
1282 | |||
1283 | bl .slb_allocate_realmode | ||
1284 | |||
1285 | /* All done -- return from exception. */ | ||
1286 | |||
1287 | ld r10,PACA_EXSLB+EX_LR(r13) | ||
1288 | ld r3,PACA_EXSLB+EX_R3(r13) | ||
1289 | lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ | ||
1290 | |||
1291 | mtlr r10 | ||
1292 | |||
1293 | andi. r10,r12,MSR_RI /* check for unrecoverable exception */ | ||
1294 | beq- 2f | ||
1295 | |||
1296 | .machine push | ||
1297 | .machine "power4" | ||
1298 | mtcrf 0x80,r9 | ||
1299 | mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */ | ||
1300 | .machine pop | ||
1301 | |||
1302 | RESTORE_PPR_PACA(PACA_EXSLB, r9) | ||
1303 | ld r9,PACA_EXSLB+EX_R9(r13) | ||
1304 | ld r10,PACA_EXSLB+EX_R10(r13) | ||
1305 | ld r11,PACA_EXSLB+EX_R11(r13) | ||
1306 | ld r12,PACA_EXSLB+EX_R12(r13) | ||
1307 | ld r13,PACA_EXSLB+EX_R13(r13) | ||
1308 | rfid | ||
1309 | b . /* prevent speculative execution */ | ||
1310 | |||
1311 | 2: mfspr r11,SPRN_SRR0 | ||
1312 | ld r10,PACAKBASE(r13) | ||
1313 | LOAD_HANDLER(r10,unrecov_slb) | ||
1314 | mtspr SPRN_SRR0,r10 | ||
1315 | ld r10,PACAKMSR(r13) | ||
1316 | mtspr SPRN_SRR1,r10 | ||
1317 | rfid | ||
1318 | b . | ||
1319 | |||
1320 | unrecov_slb: | ||
1321 | EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB) | ||
1322 | DISABLE_INTS | ||
1323 | bl .save_nvgprs | ||
1324 | 1: addi r3,r1,STACK_FRAME_OVERHEAD | ||
1325 | bl .unrecoverable_exception | ||
1326 | b 1b | ||
1327 | |||
1328 | |||
1329 | #ifdef CONFIG_PPC_970_NAP | ||
1330 | power4_fixup_nap: | ||
1331 | andc r9,r9,r10 | ||
1332 | std r9,TI_LOCAL_FLAGS(r11) | ||
1333 | ld r10,_LINK(r1) /* make idle task do the */ | ||
1334 | std r10,_NIP(r1) /* equivalent of a blr */ | ||
1335 | blr | ||
1336 | #endif | ||
1337 | |||
1338 | /* | ||
1339 | * Hash table stuff | 1339 | * Hash table stuff |
1340 | */ | 1340 | */ |
1341 | .align 7 | 1341 | .align 7 |
@@ -1452,20 +1452,36 @@ do_ste_alloc: | |||
1452 | _GLOBAL(do_stab_bolted) | 1452 | _GLOBAL(do_stab_bolted) |
1453 | stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ | 1453 | stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ |
1454 | std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */ | 1454 | std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */ |
1455 | mfspr r11,SPRN_DAR /* ea */ | ||
1455 | 1456 | ||
1457 | /* | ||
1458 | * check for bad kernel/user address | ||
1459 | * (ea & ~REGION_MASK) >= PGTABLE_RANGE | ||
1460 | */ | ||
1461 | rldicr. r9,r11,4,(63 - 46 - 4) | ||
1462 | li r9,0 /* VSID = 0 for bad address */ | ||
1463 | bne- 0f | ||
1464 | |||
1465 | /* | ||
1466 | * Calculate VSID: | ||
1467 | * This is the kernel vsid, we take the top for context from | ||
1468 | * the range. context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1 | ||
1469 | * Here we know that (ea >> 60) == 0xc | ||
1470 | */ | ||
1471 | lis r9,(MAX_USER_CONTEXT + 1)@ha | ||
1472 | addi r9,r9,(MAX_USER_CONTEXT + 1)@l | ||
1473 | |||
1474 | srdi r10,r11,SID_SHIFT | ||
1475 | rldimi r10,r9,ESID_BITS,0 /* proto vsid */ | ||
1476 | ASM_VSID_SCRAMBLE(r10, r9, 256M) | ||
1477 | rldic r9,r10,12,16 /* r9 = vsid << 12 */ | ||
1478 | |||
1479 | 0: | ||
1456 | /* Hash to the primary group */ | 1480 | /* Hash to the primary group */ |
1457 | ld r10,PACASTABVIRT(r13) | 1481 | ld r10,PACASTABVIRT(r13) |
1458 | mfspr r11,SPRN_DAR | 1482 | srdi r11,r11,SID_SHIFT |
1459 | srdi r11,r11,28 | ||
1460 | rldimi r10,r11,7,52 /* r10 = first ste of the group */ | 1483 | rldimi r10,r11,7,52 /* r10 = first ste of the group */ |
1461 | 1484 | ||
1462 | /* Calculate VSID */ | ||
1463 | /* This is a kernel address, so protovsid = ESID | 1 << 37 */ | ||
1464 | li r9,0x1 | ||
1465 | rldimi r11,r9,(CONTEXT_BITS + USER_ESID_BITS),0 | ||
1466 | ASM_VSID_SCRAMBLE(r11, r9, 256M) | ||
1467 | rldic r9,r11,12,16 /* r9 = vsid << 12 */ | ||
1468 | |||
1469 | /* Search the primary group for a free entry */ | 1485 | /* Search the primary group for a free entry */ |
1470 | 1: ld r11,0(r10) /* Test valid bit of the current ste */ | 1486 | 1: ld r11,0(r10) /* Test valid bit of the current ste */ |
1471 | andi. r11,r11,0x80 | 1487 | andi. r11,r11,0x80 |
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index 7f7fb7fd991b..13f8d168b3f1 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c | |||
@@ -2832,11 +2832,13 @@ static void unreloc_toc(void) | |||
2832 | { | 2832 | { |
2833 | } | 2833 | } |
2834 | #else | 2834 | #else |
2835 | static void __reloc_toc(void *tocstart, unsigned long offset, | 2835 | static void __reloc_toc(unsigned long offset, unsigned long nr_entries) |
2836 | unsigned long nr_entries) | ||
2837 | { | 2836 | { |
2838 | unsigned long i; | 2837 | unsigned long i; |
2839 | unsigned long *toc_entry = (unsigned long *)tocstart; | 2838 | unsigned long *toc_entry; |
2839 | |||
2840 | /* Get the start of the TOC by using r2 directly. */ | ||
2841 | asm volatile("addi %0,2,-0x8000" : "=b" (toc_entry)); | ||
2840 | 2842 | ||
2841 | for (i = 0; i < nr_entries; i++) { | 2843 | for (i = 0; i < nr_entries; i++) { |
2842 | *toc_entry = *toc_entry + offset; | 2844 | *toc_entry = *toc_entry + offset; |
@@ -2850,8 +2852,7 @@ static void reloc_toc(void) | |||
2850 | unsigned long nr_entries = | 2852 | unsigned long nr_entries = |
2851 | (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long); | 2853 | (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long); |
2852 | 2854 | ||
2853 | /* Need to add offset to get at __prom_init_toc_start */ | 2855 | __reloc_toc(offset, nr_entries); |
2854 | __reloc_toc(__prom_init_toc_start + offset, offset, nr_entries); | ||
2855 | 2856 | ||
2856 | mb(); | 2857 | mb(); |
2857 | } | 2858 | } |
@@ -2864,8 +2865,7 @@ static void unreloc_toc(void) | |||
2864 | 2865 | ||
2865 | mb(); | 2866 | mb(); |
2866 | 2867 | ||
2867 | /* __prom_init_toc_start has been relocated, no need to add offset */ | 2868 | __reloc_toc(-offset, nr_entries); |
2868 | __reloc_toc(__prom_init_toc_start, -offset, nr_entries); | ||
2869 | } | 2869 | } |
2870 | #endif | 2870 | #endif |
2871 | #endif | 2871 | #endif |
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index 245c1b6a0858..f9b30c68ba47 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c | |||
@@ -1428,6 +1428,7 @@ static long ppc_set_hwdebug(struct task_struct *child, | |||
1428 | 1428 | ||
1429 | brk.address = bp_info->addr & ~7UL; | 1429 | brk.address = bp_info->addr & ~7UL; |
1430 | brk.type = HW_BRK_TYPE_TRANSLATE; | 1430 | brk.type = HW_BRK_TYPE_TRANSLATE; |
1431 | brk.len = 8; | ||
1431 | if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ) | 1432 | if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ) |
1432 | brk.type |= HW_BRK_TYPE_READ; | 1433 | brk.type |= HW_BRK_TYPE_READ; |
1433 | if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE) | 1434 | if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE) |
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c index ead58e317294..5d7d29a313eb 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_host.c +++ b/arch/powerpc/kvm/book3s_64_mmu_host.c | |||
@@ -326,8 +326,8 @@ int kvmppc_mmu_init(struct kvm_vcpu *vcpu) | |||
326 | vcpu3s->context_id[0] = err; | 326 | vcpu3s->context_id[0] = err; |
327 | 327 | ||
328 | vcpu3s->proto_vsid_max = ((vcpu3s->context_id[0] + 1) | 328 | vcpu3s->proto_vsid_max = ((vcpu3s->context_id[0] + 1) |
329 | << USER_ESID_BITS) - 1; | 329 | << ESID_BITS) - 1; |
330 | vcpu3s->proto_vsid_first = vcpu3s->context_id[0] << USER_ESID_BITS; | 330 | vcpu3s->proto_vsid_first = vcpu3s->context_id[0] << ESID_BITS; |
331 | vcpu3s->proto_vsid_next = vcpu3s->proto_vsid_first; | 331 | vcpu3s->proto_vsid_next = vcpu3s->proto_vsid_first; |
332 | 332 | ||
333 | kvmppc_mmu_hpte_init(vcpu); | 333 | kvmppc_mmu_hpte_init(vcpu); |
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 1b6e1271719f..f410c3e12c1e 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c | |||
@@ -195,6 +195,11 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend, | |||
195 | unsigned long vpn = hpt_vpn(vaddr, vsid, ssize); | 195 | unsigned long vpn = hpt_vpn(vaddr, vsid, ssize); |
196 | unsigned long tprot = prot; | 196 | unsigned long tprot = prot; |
197 | 197 | ||
198 | /* | ||
199 | * If we hit a bad address return error. | ||
200 | */ | ||
201 | if (!vsid) | ||
202 | return -1; | ||
198 | /* Make kernel text executable */ | 203 | /* Make kernel text executable */ |
199 | if (overlaps_kernel_text(vaddr, vaddr + step)) | 204 | if (overlaps_kernel_text(vaddr, vaddr + step)) |
200 | tprot &= ~HPTE_R_N; | 205 | tprot &= ~HPTE_R_N; |
@@ -759,6 +764,8 @@ void __init early_init_mmu(void) | |||
759 | /* Initialize stab / SLB management */ | 764 | /* Initialize stab / SLB management */ |
760 | if (mmu_has_feature(MMU_FTR_SLB)) | 765 | if (mmu_has_feature(MMU_FTR_SLB)) |
761 | slb_initialize(); | 766 | slb_initialize(); |
767 | else | ||
768 | stab_initialize(get_paca()->stab_real); | ||
762 | } | 769 | } |
763 | 770 | ||
764 | #ifdef CONFIG_SMP | 771 | #ifdef CONFIG_SMP |
@@ -922,11 +929,6 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) | |||
922 | DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n", | 929 | DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n", |
923 | ea, access, trap); | 930 | ea, access, trap); |
924 | 931 | ||
925 | if ((ea & ~REGION_MASK) >= PGTABLE_RANGE) { | ||
926 | DBG_LOW(" out of pgtable range !\n"); | ||
927 | return 1; | ||
928 | } | ||
929 | |||
930 | /* Get region & vsid */ | 932 | /* Get region & vsid */ |
931 | switch (REGION_ID(ea)) { | 933 | switch (REGION_ID(ea)) { |
932 | case USER_REGION_ID: | 934 | case USER_REGION_ID: |
@@ -957,6 +959,11 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) | |||
957 | } | 959 | } |
958 | DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid); | 960 | DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid); |
959 | 961 | ||
962 | /* Bad address. */ | ||
963 | if (!vsid) { | ||
964 | DBG_LOW("Bad address!\n"); | ||
965 | return 1; | ||
966 | } | ||
960 | /* Get pgdir */ | 967 | /* Get pgdir */ |
961 | pgdir = mm->pgd; | 968 | pgdir = mm->pgd; |
962 | if (pgdir == NULL) | 969 | if (pgdir == NULL) |
@@ -1126,6 +1133,8 @@ void hash_preload(struct mm_struct *mm, unsigned long ea, | |||
1126 | /* Get VSID */ | 1133 | /* Get VSID */ |
1127 | ssize = user_segment_size(ea); | 1134 | ssize = user_segment_size(ea); |
1128 | vsid = get_vsid(mm->context.id, ea, ssize); | 1135 | vsid = get_vsid(mm->context.id, ea, ssize); |
1136 | if (!vsid) | ||
1137 | return; | ||
1129 | 1138 | ||
1130 | /* Hash doesn't like irqs */ | 1139 | /* Hash doesn't like irqs */ |
1131 | local_irq_save(flags); | 1140 | local_irq_save(flags); |
@@ -1233,6 +1242,9 @@ static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi) | |||
1233 | hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize); | 1242 | hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize); |
1234 | hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); | 1243 | hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); |
1235 | 1244 | ||
1245 | /* Don't create HPTE entries for bad address */ | ||
1246 | if (!vsid) | ||
1247 | return; | ||
1236 | ret = ppc_md.hpte_insert(hpteg, vpn, __pa(vaddr), | 1248 | ret = ppc_md.hpte_insert(hpteg, vpn, __pa(vaddr), |
1237 | mode, HPTE_V_BOLTED, | 1249 | mode, HPTE_V_BOLTED, |
1238 | mmu_linear_psize, mmu_kernel_ssize); | 1250 | mmu_linear_psize, mmu_kernel_ssize); |
diff --git a/arch/powerpc/mm/mmu_context_hash64.c b/arch/powerpc/mm/mmu_context_hash64.c index 40bc5b0ace54..d1d1b92c5b99 100644 --- a/arch/powerpc/mm/mmu_context_hash64.c +++ b/arch/powerpc/mm/mmu_context_hash64.c | |||
@@ -29,15 +29,6 @@ | |||
29 | static DEFINE_SPINLOCK(mmu_context_lock); | 29 | static DEFINE_SPINLOCK(mmu_context_lock); |
30 | static DEFINE_IDA(mmu_context_ida); | 30 | static DEFINE_IDA(mmu_context_ida); |
31 | 31 | ||
32 | /* | ||
33 | * 256MB segment | ||
34 | * The proto-VSID space has 2^(CONTEX_BITS + USER_ESID_BITS) - 1 segments | ||
35 | * available for user mappings. Each segment contains 2^28 bytes. Each | ||
36 | * context maps 2^46 bytes (64TB) so we can support 2^19-1 contexts | ||
37 | * (19 == 37 + 28 - 46). | ||
38 | */ | ||
39 | #define MAX_CONTEXT ((1UL << CONTEXT_BITS) - 1) | ||
40 | |||
41 | int __init_new_context(void) | 32 | int __init_new_context(void) |
42 | { | 33 | { |
43 | int index; | 34 | int index; |
@@ -56,7 +47,7 @@ again: | |||
56 | else if (err) | 47 | else if (err) |
57 | return err; | 48 | return err; |
58 | 49 | ||
59 | if (index > MAX_CONTEXT) { | 50 | if (index > MAX_USER_CONTEXT) { |
60 | spin_lock(&mmu_context_lock); | 51 | spin_lock(&mmu_context_lock); |
61 | ida_remove(&mmu_context_ida, index); | 52 | ida_remove(&mmu_context_ida, index); |
62 | spin_unlock(&mmu_context_lock); | 53 | spin_unlock(&mmu_context_lock); |
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index e212a271c7a4..654258f165ae 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c | |||
@@ -61,7 +61,7 @@ | |||
61 | #endif | 61 | #endif |
62 | 62 | ||
63 | #ifdef CONFIG_PPC_STD_MMU_64 | 63 | #ifdef CONFIG_PPC_STD_MMU_64 |
64 | #if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT)) | 64 | #if TASK_SIZE_USER64 > (1UL << (ESID_BITS + SID_SHIFT)) |
65 | #error TASK_SIZE_USER64 exceeds user VSID range | 65 | #error TASK_SIZE_USER64 exceeds user VSID range |
66 | #endif | 66 | #endif |
67 | #endif | 67 | #endif |
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S index 1a16ca227757..17aa6dfceb34 100644 --- a/arch/powerpc/mm/slb_low.S +++ b/arch/powerpc/mm/slb_low.S | |||
@@ -31,10 +31,15 @@ | |||
31 | * No other registers are examined or changed. | 31 | * No other registers are examined or changed. |
32 | */ | 32 | */ |
33 | _GLOBAL(slb_allocate_realmode) | 33 | _GLOBAL(slb_allocate_realmode) |
34 | /* r3 = faulting address */ | 34 | /* |
35 | * check for bad kernel/user address | ||
36 | * (ea & ~REGION_MASK) >= PGTABLE_RANGE | ||
37 | */ | ||
38 | rldicr. r9,r3,4,(63 - 46 - 4) | ||
39 | bne- 8f | ||
35 | 40 | ||
36 | srdi r9,r3,60 /* get region */ | 41 | srdi r9,r3,60 /* get region */ |
37 | srdi r10,r3,28 /* get esid */ | 42 | srdi r10,r3,SID_SHIFT /* get esid */ |
38 | cmpldi cr7,r9,0xc /* cmp PAGE_OFFSET for later use */ | 43 | cmpldi cr7,r9,0xc /* cmp PAGE_OFFSET for later use */ |
39 | 44 | ||
40 | /* r3 = address, r10 = esid, cr7 = <> PAGE_OFFSET */ | 45 | /* r3 = address, r10 = esid, cr7 = <> PAGE_OFFSET */ |
@@ -56,12 +61,14 @@ _GLOBAL(slb_allocate_realmode) | |||
56 | */ | 61 | */ |
57 | _GLOBAL(slb_miss_kernel_load_linear) | 62 | _GLOBAL(slb_miss_kernel_load_linear) |
58 | li r11,0 | 63 | li r11,0 |
59 | li r9,0x1 | ||
60 | /* | 64 | /* |
61 | * for 1T we shift 12 bits more. slb_finish_load_1T will do | 65 | * context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1 |
62 | * the necessary adjustment | 66 | * r9 = region id. |
63 | */ | 67 | */ |
64 | rldimi r10,r9,(CONTEXT_BITS + USER_ESID_BITS),0 | 68 | addis r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@ha |
69 | addi r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@l | ||
70 | |||
71 | |||
65 | BEGIN_FTR_SECTION | 72 | BEGIN_FTR_SECTION |
66 | b slb_finish_load | 73 | b slb_finish_load |
67 | END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) | 74 | END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) |
@@ -91,24 +98,19 @@ _GLOBAL(slb_miss_kernel_load_vmemmap) | |||
91 | _GLOBAL(slb_miss_kernel_load_io) | 98 | _GLOBAL(slb_miss_kernel_load_io) |
92 | li r11,0 | 99 | li r11,0 |
93 | 6: | 100 | 6: |
94 | li r9,0x1 | ||
95 | /* | 101 | /* |
96 | * for 1T we shift 12 bits more. slb_finish_load_1T will do | 102 | * context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1 |
97 | * the necessary adjustment | 103 | * r9 = region id. |
98 | */ | 104 | */ |
99 | rldimi r10,r9,(CONTEXT_BITS + USER_ESID_BITS),0 | 105 | addis r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@ha |
106 | addi r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@l | ||
107 | |||
100 | BEGIN_FTR_SECTION | 108 | BEGIN_FTR_SECTION |
101 | b slb_finish_load | 109 | b slb_finish_load |
102 | END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) | 110 | END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) |
103 | b slb_finish_load_1T | 111 | b slb_finish_load_1T |
104 | 112 | ||
105 | 0: /* user address: proto-VSID = context << 15 | ESID. First check | 113 | 0: |
106 | * if the address is within the boundaries of the user region | ||
107 | */ | ||
108 | srdi. r9,r10,USER_ESID_BITS | ||
109 | bne- 8f /* invalid ea bits set */ | ||
110 | |||
111 | |||
112 | /* when using slices, we extract the psize off the slice bitmaps | 114 | /* when using slices, we extract the psize off the slice bitmaps |
113 | * and then we need to get the sllp encoding off the mmu_psize_defs | 115 | * and then we need to get the sllp encoding off the mmu_psize_defs |
114 | * array. | 116 | * array. |
@@ -164,15 +166,13 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) | |||
164 | ld r9,PACACONTEXTID(r13) | 166 | ld r9,PACACONTEXTID(r13) |
165 | BEGIN_FTR_SECTION | 167 | BEGIN_FTR_SECTION |
166 | cmpldi r10,0x1000 | 168 | cmpldi r10,0x1000 |
167 | END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) | ||
168 | rldimi r10,r9,USER_ESID_BITS,0 | ||
169 | BEGIN_FTR_SECTION | ||
170 | bge slb_finish_load_1T | 169 | bge slb_finish_load_1T |
171 | END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) | 170 | END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) |
172 | b slb_finish_load | 171 | b slb_finish_load |
173 | 172 | ||
174 | 8: /* invalid EA */ | 173 | 8: /* invalid EA */ |
175 | li r10,0 /* BAD_VSID */ | 174 | li r10,0 /* BAD_VSID */ |
175 | li r9,0 /* BAD_VSID */ | ||
176 | li r11,SLB_VSID_USER /* flags don't much matter */ | 176 | li r11,SLB_VSID_USER /* flags don't much matter */ |
177 | b slb_finish_load | 177 | b slb_finish_load |
178 | 178 | ||
@@ -221,8 +221,6 @@ _GLOBAL(slb_allocate_user) | |||
221 | 221 | ||
222 | /* get context to calculate proto-VSID */ | 222 | /* get context to calculate proto-VSID */ |
223 | ld r9,PACACONTEXTID(r13) | 223 | ld r9,PACACONTEXTID(r13) |
224 | rldimi r10,r9,USER_ESID_BITS,0 | ||
225 | |||
226 | /* fall through slb_finish_load */ | 224 | /* fall through slb_finish_load */ |
227 | 225 | ||
228 | #endif /* __DISABLED__ */ | 226 | #endif /* __DISABLED__ */ |
@@ -231,9 +229,10 @@ _GLOBAL(slb_allocate_user) | |||
231 | /* | 229 | /* |
232 | * Finish loading of an SLB entry and return | 230 | * Finish loading of an SLB entry and return |
233 | * | 231 | * |
234 | * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET | 232 | * r3 = EA, r9 = context, r10 = ESID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET |
235 | */ | 233 | */ |
236 | slb_finish_load: | 234 | slb_finish_load: |
235 | rldimi r10,r9,ESID_BITS,0 | ||
237 | ASM_VSID_SCRAMBLE(r10,r9,256M) | 236 | ASM_VSID_SCRAMBLE(r10,r9,256M) |
238 | /* | 237 | /* |
239 | * bits above VSID_BITS_256M need to be ignored from r10 | 238 | * bits above VSID_BITS_256M need to be ignored from r10 |
@@ -298,10 +297,11 @@ _GLOBAL(slb_compare_rr_to_size) | |||
298 | /* | 297 | /* |
299 | * Finish loading of a 1T SLB entry (for the kernel linear mapping) and return. | 298 | * Finish loading of a 1T SLB entry (for the kernel linear mapping) and return. |
300 | * | 299 | * |
301 | * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9 | 300 | * r3 = EA, r9 = context, r10 = ESID(256MB), r11 = flags, clobbers r9 |
302 | */ | 301 | */ |
303 | slb_finish_load_1T: | 302 | slb_finish_load_1T: |
304 | srdi r10,r10,40-28 /* get 1T ESID */ | 303 | srdi r10,r10,(SID_SHIFT_1T - SID_SHIFT) /* get 1T ESID */ |
304 | rldimi r10,r9,ESID_BITS_1T,0 | ||
305 | ASM_VSID_SCRAMBLE(r10,r9,1T) | 305 | ASM_VSID_SCRAMBLE(r10,r9,1T) |
306 | /* | 306 | /* |
307 | * bits above VSID_BITS_1T need to be ignored from r10 | 307 | * bits above VSID_BITS_1T need to be ignored from r10 |
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c index 0d82ef50dc3f..023ec8a13f38 100644 --- a/arch/powerpc/mm/tlb_hash64.c +++ b/arch/powerpc/mm/tlb_hash64.c | |||
@@ -82,11 +82,11 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr, | |||
82 | if (!is_kernel_addr(addr)) { | 82 | if (!is_kernel_addr(addr)) { |
83 | ssize = user_segment_size(addr); | 83 | ssize = user_segment_size(addr); |
84 | vsid = get_vsid(mm->context.id, addr, ssize); | 84 | vsid = get_vsid(mm->context.id, addr, ssize); |
85 | WARN_ON(vsid == 0); | ||
86 | } else { | 85 | } else { |
87 | vsid = get_kernel_vsid(addr, mmu_kernel_ssize); | 86 | vsid = get_kernel_vsid(addr, mmu_kernel_ssize); |
88 | ssize = mmu_kernel_ssize; | 87 | ssize = mmu_kernel_ssize; |
89 | } | 88 | } |
89 | WARN_ON(vsid == 0); | ||
90 | vpn = hpt_vpn(addr, vsid, ssize); | 90 | vpn = hpt_vpn(addr, vsid, ssize); |
91 | rpte = __real_pte(__pte(pte), ptep); | 91 | rpte = __real_pte(__pte(pte), ptep); |
92 | 92 | ||
diff --git a/arch/powerpc/perf/power7-pmu.c b/arch/powerpc/perf/power7-pmu.c index b554879bd31e..3c475d6267c7 100644 --- a/arch/powerpc/perf/power7-pmu.c +++ b/arch/powerpc/perf/power7-pmu.c | |||
@@ -420,7 +420,20 @@ static struct attribute_group power7_pmu_events_group = { | |||
420 | .attrs = power7_events_attr, | 420 | .attrs = power7_events_attr, |
421 | }; | 421 | }; |
422 | 422 | ||
423 | PMU_FORMAT_ATTR(event, "config:0-19"); | ||
424 | |||
425 | static struct attribute *power7_pmu_format_attr[] = { | ||
426 | &format_attr_event.attr, | ||
427 | NULL, | ||
428 | }; | ||
429 | |||
430 | struct attribute_group power7_pmu_format_group = { | ||
431 | .name = "format", | ||
432 | .attrs = power7_pmu_format_attr, | ||
433 | }; | ||
434 | |||
423 | static const struct attribute_group *power7_pmu_attr_groups[] = { | 435 | static const struct attribute_group *power7_pmu_attr_groups[] = { |
436 | &power7_pmu_format_group, | ||
424 | &power7_pmu_events_group, | 437 | &power7_pmu_events_group, |
425 | NULL, | 438 | NULL, |
426 | }; | 439 | }; |
diff --git a/arch/powerpc/platforms/85xx/sgy_cts1000.c b/arch/powerpc/platforms/85xx/sgy_cts1000.c index 611e92f291c4..7179726ba5c5 100644 --- a/arch/powerpc/platforms/85xx/sgy_cts1000.c +++ b/arch/powerpc/platforms/85xx/sgy_cts1000.c | |||
@@ -69,7 +69,7 @@ static irqreturn_t gpio_halt_irq(int irq, void *__data) | |||
69 | return IRQ_HANDLED; | 69 | return IRQ_HANDLED; |
70 | }; | 70 | }; |
71 | 71 | ||
72 | static int __devinit gpio_halt_probe(struct platform_device *pdev) | 72 | static int gpio_halt_probe(struct platform_device *pdev) |
73 | { | 73 | { |
74 | enum of_gpio_flags flags; | 74 | enum of_gpio_flags flags; |
75 | struct device_node *node = pdev->dev.of_node; | 75 | struct device_node *node = pdev->dev.of_node; |
@@ -128,7 +128,7 @@ static int __devinit gpio_halt_probe(struct platform_device *pdev) | |||
128 | return 0; | 128 | return 0; |
129 | } | 129 | } |
130 | 130 | ||
131 | static int __devexit gpio_halt_remove(struct platform_device *pdev) | 131 | static int gpio_halt_remove(struct platform_device *pdev) |
132 | { | 132 | { |
133 | if (halt_node) { | 133 | if (halt_node) { |
134 | int gpio = of_get_gpio(halt_node, 0); | 134 | int gpio = of_get_gpio(halt_node, 0); |
@@ -165,7 +165,7 @@ static struct platform_driver gpio_halt_driver = { | |||
165 | .of_match_table = gpio_halt_match, | 165 | .of_match_table = gpio_halt_match, |
166 | }, | 166 | }, |
167 | .probe = gpio_halt_probe, | 167 | .probe = gpio_halt_probe, |
168 | .remove = __devexit_p(gpio_halt_remove), | 168 | .remove = gpio_halt_remove, |
169 | }; | 169 | }; |
170 | 170 | ||
171 | module_platform_driver(gpio_halt_driver); | 171 | module_platform_driver(gpio_halt_driver); |
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index cea2f09c4241..18e3b76c78d7 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype | |||
@@ -124,9 +124,8 @@ config 6xx | |||
124 | select PPC_HAVE_PMU_SUPPORT | 124 | select PPC_HAVE_PMU_SUPPORT |
125 | 125 | ||
126 | config POWER3 | 126 | config POWER3 |
127 | bool | ||
128 | depends on PPC64 && PPC_BOOK3S | 127 | depends on PPC64 && PPC_BOOK3S |
129 | default y if !POWER4_ONLY | 128 | def_bool y |
130 | 129 | ||
131 | config POWER4 | 130 | config POWER4 |
132 | depends on PPC64 && PPC_BOOK3S | 131 | depends on PPC64 && PPC_BOOK3S |
@@ -145,8 +144,7 @@ config TUNE_CELL | |||
145 | but somewhat slower on other machines. This option only changes | 144 | but somewhat slower on other machines. This option only changes |
146 | the scheduling of instructions, not the selection of instructions | 145 | the scheduling of instructions, not the selection of instructions |
147 | itself, so the resulting kernel will keep running on all other | 146 | itself, so the resulting kernel will keep running on all other |
148 | machines. When building a kernel that is supposed to run only | 147 | machines. |
149 | on Cell, you should also select the POWER4_ONLY option. | ||
150 | 148 | ||
151 | # this is temp to handle compat with arch=ppc | 149 | # this is temp to handle compat with arch=ppc |
152 | config 8xx | 150 | config 8xx |
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index 0da39fed355a..299731e9036b 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c | |||
@@ -186,7 +186,13 @@ static long pSeries_lpar_hpte_remove(unsigned long hpte_group) | |||
186 | (0x1UL << 4), &dummy1, &dummy2); | 186 | (0x1UL << 4), &dummy1, &dummy2); |
187 | if (lpar_rc == H_SUCCESS) | 187 | if (lpar_rc == H_SUCCESS) |
188 | return i; | 188 | return i; |
189 | BUG_ON(lpar_rc != H_NOT_FOUND); | 189 | |
190 | /* | ||
191 | * The test for adjunct partition is performed before the | ||
192 | * ANDCOND test. H_RESOURCE may be returned, so we need to | ||
193 | * check for that as well. | ||
194 | */ | ||
195 | BUG_ON(lpar_rc != H_NOT_FOUND && lpar_rc != H_RESOURCE); | ||
190 | 196 | ||
191 | slot_offset++; | 197 | slot_offset++; |
192 | slot_offset &= 0x7; | 198 | slot_offset &= 0x7; |
diff --git a/arch/s390/include/asm/eadm.h b/arch/s390/include/asm/eadm.h index 8d4847191ecc..dc9200ca32ed 100644 --- a/arch/s390/include/asm/eadm.h +++ b/arch/s390/include/asm/eadm.h | |||
@@ -34,6 +34,8 @@ struct arsb { | |||
34 | u32 reserved[4]; | 34 | u32 reserved[4]; |
35 | } __packed; | 35 | } __packed; |
36 | 36 | ||
37 | #define EQC_WR_PROHIBIT 22 | ||
38 | |||
37 | struct msb { | 39 | struct msb { |
38 | u8 fmt:4; | 40 | u8 fmt:4; |
39 | u8 oc:4; | 41 | u8 oc:4; |
@@ -96,11 +98,13 @@ struct scm_device { | |||
96 | #define OP_STATE_TEMP_ERR 2 | 98 | #define OP_STATE_TEMP_ERR 2 |
97 | #define OP_STATE_PERM_ERR 3 | 99 | #define OP_STATE_PERM_ERR 3 |
98 | 100 | ||
101 | enum scm_event {SCM_CHANGE, SCM_AVAIL}; | ||
102 | |||
99 | struct scm_driver { | 103 | struct scm_driver { |
100 | struct device_driver drv; | 104 | struct device_driver drv; |
101 | int (*probe) (struct scm_device *scmdev); | 105 | int (*probe) (struct scm_device *scmdev); |
102 | int (*remove) (struct scm_device *scmdev); | 106 | int (*remove) (struct scm_device *scmdev); |
103 | void (*notify) (struct scm_device *scmdev); | 107 | void (*notify) (struct scm_device *scmdev, enum scm_event event); |
104 | void (*handler) (struct scm_device *scmdev, void *data, int error); | 108 | void (*handler) (struct scm_device *scmdev, void *data, int error); |
105 | }; | 109 | }; |
106 | 110 | ||
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 4a2930844d43..4a5443118cfb 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h | |||
@@ -344,6 +344,7 @@ extern unsigned long MODULES_END; | |||
344 | #define _REGION3_ENTRY_CO 0x100 /* change-recording override */ | 344 | #define _REGION3_ENTRY_CO 0x100 /* change-recording override */ |
345 | 345 | ||
346 | /* Bits in the segment table entry */ | 346 | /* Bits in the segment table entry */ |
347 | #define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */ | ||
347 | #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */ | 348 | #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */ |
348 | #define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */ | 349 | #define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */ |
349 | #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */ | 350 | #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */ |
@@ -1531,7 +1532,8 @@ extern int s390_enable_sie(void); | |||
1531 | /* | 1532 | /* |
1532 | * No page table caches to initialise | 1533 | * No page table caches to initialise |
1533 | */ | 1534 | */ |
1534 | #define pgtable_cache_init() do { } while (0) | 1535 | static inline void pgtable_cache_init(void) { } |
1536 | static inline void check_pgt_cache(void) { } | ||
1535 | 1537 | ||
1536 | #include <asm-generic/pgtable.h> | 1538 | #include <asm-generic/pgtable.h> |
1537 | 1539 | ||
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h index 1d8fe2b17ef6..6b32af30878c 100644 --- a/arch/s390/include/asm/tlbflush.h +++ b/arch/s390/include/asm/tlbflush.h | |||
@@ -74,8 +74,6 @@ static inline void __tlb_flush_idte(unsigned long asce) | |||
74 | 74 | ||
75 | static inline void __tlb_flush_mm(struct mm_struct * mm) | 75 | static inline void __tlb_flush_mm(struct mm_struct * mm) |
76 | { | 76 | { |
77 | if (unlikely(cpumask_empty(mm_cpumask(mm)))) | ||
78 | return; | ||
79 | /* | 77 | /* |
80 | * If the machine has IDTE we prefer to do a per mm flush | 78 | * If the machine has IDTE we prefer to do a per mm flush |
81 | * on all cpus instead of doing a local flush if the mm | 79 | * on all cpus instead of doing a local flush if the mm |
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 550228523267..94feff7d6132 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S | |||
@@ -636,7 +636,8 @@ ENTRY(mcck_int_handler) | |||
636 | UPDATE_VTIME %r14,%r15,__LC_MCCK_ENTER_TIMER | 636 | UPDATE_VTIME %r14,%r15,__LC_MCCK_ENTER_TIMER |
637 | mcck_skip: | 637 | mcck_skip: |
638 | SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+32,__LC_PANIC_STACK,PAGE_SHIFT | 638 | SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+32,__LC_PANIC_STACK,PAGE_SHIFT |
639 | mvc __PT_R0(64,%r11),__LC_GPREGS_SAVE_AREA | 639 | stm %r0,%r7,__PT_R0(%r11) |
640 | mvc __PT_R8(32,%r11),__LC_GPREGS_SAVE_AREA+32 | ||
640 | stm %r8,%r9,__PT_PSW(%r11) | 641 | stm %r8,%r9,__PT_PSW(%r11) |
641 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) | 642 | xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) |
642 | l %r1,BASED(.Ldo_machine_check) | 643 | l %r1,BASED(.Ldo_machine_check) |
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index 9c837c101297..2e6d60c55f90 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S | |||
@@ -678,8 +678,9 @@ ENTRY(mcck_int_handler) | |||
678 | UPDATE_VTIME %r14,__LC_MCCK_ENTER_TIMER | 678 | UPDATE_VTIME %r14,__LC_MCCK_ENTER_TIMER |
679 | LAST_BREAK %r14 | 679 | LAST_BREAK %r14 |
680 | mcck_skip: | 680 | mcck_skip: |
681 | lghi %r14,__LC_GPREGS_SAVE_AREA | 681 | lghi %r14,__LC_GPREGS_SAVE_AREA+64 |
682 | mvc __PT_R0(128,%r11),0(%r14) | 682 | stmg %r0,%r7,__PT_R0(%r11) |
683 | mvc __PT_R8(64,%r11),0(%r14) | ||
683 | stmg %r8,%r9,__PT_PSW(%r11) | 684 | stmg %r8,%r9,__PT_PSW(%r11) |
684 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) | 685 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) |
685 | lgr %r2,%r11 # pass pointer to pt_regs | 686 | lgr %r2,%r11 # pass pointer to pt_regs |
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index a5360de85ec7..29268859d8ee 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c | |||
@@ -571,6 +571,8 @@ static void __init setup_memory_end(void) | |||
571 | 571 | ||
572 | /* Split remaining virtual space between 1:1 mapping & vmemmap array */ | 572 | /* Split remaining virtual space between 1:1 mapping & vmemmap array */ |
573 | tmp = VMALLOC_START / (PAGE_SIZE + sizeof(struct page)); | 573 | tmp = VMALLOC_START / (PAGE_SIZE + sizeof(struct page)); |
574 | /* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */ | ||
575 | tmp = SECTION_ALIGN_UP(tmp); | ||
574 | tmp = VMALLOC_START - tmp * sizeof(struct page); | 576 | tmp = VMALLOC_START - tmp * sizeof(struct page); |
575 | tmp &= ~((vmax >> 11) - 1); /* align to page table level */ | 577 | tmp &= ~((vmax >> 11) - 1); /* align to page table level */ |
576 | tmp = min(tmp, 1UL << MAX_PHYSMEM_BITS); | 578 | tmp = min(tmp, 1UL << MAX_PHYSMEM_BITS); |
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c index dff631d34b45..466fb3383960 100644 --- a/arch/s390/lib/uaccess_pt.c +++ b/arch/s390/lib/uaccess_pt.c | |||
@@ -77,42 +77,69 @@ static size_t copy_in_kernel(size_t count, void __user *to, | |||
77 | * >= -4095 (IS_ERR_VALUE(x) returns true), a fault has occured and the address | 77 | * >= -4095 (IS_ERR_VALUE(x) returns true), a fault has occured and the address |
78 | * contains the (negative) exception code. | 78 | * contains the (negative) exception code. |
79 | */ | 79 | */ |
80 | static __always_inline unsigned long follow_table(struct mm_struct *mm, | 80 | #ifdef CONFIG_64BIT |
81 | unsigned long addr, int write) | 81 | static unsigned long follow_table(struct mm_struct *mm, |
82 | unsigned long address, int write) | ||
82 | { | 83 | { |
83 | pgd_t *pgd; | 84 | unsigned long *table = (unsigned long *)__pa(mm->pgd); |
84 | pud_t *pud; | 85 | |
85 | pmd_t *pmd; | 86 | switch (mm->context.asce_bits & _ASCE_TYPE_MASK) { |
86 | pte_t *ptep; | 87 | case _ASCE_TYPE_REGION1: |
88 | table = table + ((address >> 53) & 0x7ff); | ||
89 | if (unlikely(*table & _REGION_ENTRY_INV)) | ||
90 | return -0x39UL; | ||
91 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | ||
92 | case _ASCE_TYPE_REGION2: | ||
93 | table = table + ((address >> 42) & 0x7ff); | ||
94 | if (unlikely(*table & _REGION_ENTRY_INV)) | ||
95 | return -0x3aUL; | ||
96 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | ||
97 | case _ASCE_TYPE_REGION3: | ||
98 | table = table + ((address >> 31) & 0x7ff); | ||
99 | if (unlikely(*table & _REGION_ENTRY_INV)) | ||
100 | return -0x3bUL; | ||
101 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | ||
102 | case _ASCE_TYPE_SEGMENT: | ||
103 | table = table + ((address >> 20) & 0x7ff); | ||
104 | if (unlikely(*table & _SEGMENT_ENTRY_INV)) | ||
105 | return -0x10UL; | ||
106 | if (unlikely(*table & _SEGMENT_ENTRY_LARGE)) { | ||
107 | if (write && (*table & _SEGMENT_ENTRY_RO)) | ||
108 | return -0x04UL; | ||
109 | return (*table & _SEGMENT_ENTRY_ORIGIN_LARGE) + | ||
110 | (address & ~_SEGMENT_ENTRY_ORIGIN_LARGE); | ||
111 | } | ||
112 | table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN); | ||
113 | } | ||
114 | table = table + ((address >> 12) & 0xff); | ||
115 | if (unlikely(*table & _PAGE_INVALID)) | ||
116 | return -0x11UL; | ||
117 | if (write && (*table & _PAGE_RO)) | ||
118 | return -0x04UL; | ||
119 | return (*table & PAGE_MASK) + (address & ~PAGE_MASK); | ||
120 | } | ||
87 | 121 | ||
88 | pgd = pgd_offset(mm, addr); | 122 | #else /* CONFIG_64BIT */ |
89 | if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) | ||
90 | return -0x3aUL; | ||
91 | 123 | ||
92 | pud = pud_offset(pgd, addr); | 124 | static unsigned long follow_table(struct mm_struct *mm, |
93 | if (pud_none(*pud) || unlikely(pud_bad(*pud))) | 125 | unsigned long address, int write) |
94 | return -0x3bUL; | 126 | { |
127 | unsigned long *table = (unsigned long *)__pa(mm->pgd); | ||
95 | 128 | ||
96 | pmd = pmd_offset(pud, addr); | 129 | table = table + ((address >> 20) & 0x7ff); |
97 | if (pmd_none(*pmd)) | 130 | if (unlikely(*table & _SEGMENT_ENTRY_INV)) |
98 | return -0x10UL; | 131 | return -0x10UL; |
99 | if (pmd_large(*pmd)) { | 132 | table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN); |
100 | if (write && (pmd_val(*pmd) & _SEGMENT_ENTRY_RO)) | 133 | table = table + ((address >> 12) & 0xff); |
101 | return -0x04UL; | 134 | if (unlikely(*table & _PAGE_INVALID)) |
102 | return (pmd_val(*pmd) & HPAGE_MASK) + (addr & ~HPAGE_MASK); | ||
103 | } | ||
104 | if (unlikely(pmd_bad(*pmd))) | ||
105 | return -0x10UL; | ||
106 | |||
107 | ptep = pte_offset_map(pmd, addr); | ||
108 | if (!pte_present(*ptep)) | ||
109 | return -0x11UL; | 135 | return -0x11UL; |
110 | if (write && (!pte_write(*ptep) || !pte_dirty(*ptep))) | 136 | if (write && (*table & _PAGE_RO)) |
111 | return -0x04UL; | 137 | return -0x04UL; |
112 | 138 | return (*table & PAGE_MASK) + (address & ~PAGE_MASK); | |
113 | return (pte_val(*ptep) & PAGE_MASK) + (addr & ~PAGE_MASK); | ||
114 | } | 139 | } |
115 | 140 | ||
141 | #endif /* CONFIG_64BIT */ | ||
142 | |||
116 | static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr, | 143 | static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr, |
117 | size_t n, int write_user) | 144 | size_t n, int write_user) |
118 | { | 145 | { |
@@ -197,7 +224,7 @@ size_t copy_to_user_pt(size_t n, void __user *to, const void *from) | |||
197 | 224 | ||
198 | static size_t clear_user_pt(size_t n, void __user *to) | 225 | static size_t clear_user_pt(size_t n, void __user *to) |
199 | { | 226 | { |
200 | void *zpage = &empty_zero_page; | 227 | void *zpage = (void *) empty_zero_page; |
201 | long done, size, ret; | 228 | long done, size, ret; |
202 | 229 | ||
203 | done = 0; | 230 | done = 0; |
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 289127d5241c..3d361f236308 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig | |||
@@ -84,12 +84,6 @@ config ARCH_DEFCONFIG | |||
84 | default "arch/sparc/configs/sparc32_defconfig" if SPARC32 | 84 | default "arch/sparc/configs/sparc32_defconfig" if SPARC32 |
85 | default "arch/sparc/configs/sparc64_defconfig" if SPARC64 | 85 | default "arch/sparc/configs/sparc64_defconfig" if SPARC64 |
86 | 86 | ||
87 | # CONFIG_BITS can be used at source level to get 32/64 bits | ||
88 | config BITS | ||
89 | int | ||
90 | default 32 if SPARC32 | ||
91 | default 64 if SPARC64 | ||
92 | |||
93 | config IOMMU_HELPER | 87 | config IOMMU_HELPER |
94 | bool | 88 | bool |
95 | default y if SPARC64 | 89 | default y if SPARC64 |
@@ -197,7 +191,7 @@ config RWSEM_XCHGADD_ALGORITHM | |||
197 | 191 | ||
198 | config GENERIC_HWEIGHT | 192 | config GENERIC_HWEIGHT |
199 | bool | 193 | bool |
200 | default y if !ULTRA_HAS_POPULATION_COUNT | 194 | default y |
201 | 195 | ||
202 | config GENERIC_CALIBRATE_DELAY | 196 | config GENERIC_CALIBRATE_DELAY |
203 | bool | 197 | bool |
diff --git a/arch/sparc/include/asm/spitfire.h b/arch/sparc/include/asm/spitfire.h index d06a26601753..6b67e50fb9b4 100644 --- a/arch/sparc/include/asm/spitfire.h +++ b/arch/sparc/include/asm/spitfire.h | |||
@@ -45,6 +45,7 @@ | |||
45 | #define SUN4V_CHIP_NIAGARA3 0x03 | 45 | #define SUN4V_CHIP_NIAGARA3 0x03 |
46 | #define SUN4V_CHIP_NIAGARA4 0x04 | 46 | #define SUN4V_CHIP_NIAGARA4 0x04 |
47 | #define SUN4V_CHIP_NIAGARA5 0x05 | 47 | #define SUN4V_CHIP_NIAGARA5 0x05 |
48 | #define SUN4V_CHIP_SPARC64X 0x8a | ||
48 | #define SUN4V_CHIP_UNKNOWN 0xff | 49 | #define SUN4V_CHIP_UNKNOWN 0xff |
49 | 50 | ||
50 | #ifndef __ASSEMBLY__ | 51 | #ifndef __ASSEMBLY__ |
diff --git a/arch/sparc/kernel/cpu.c b/arch/sparc/kernel/cpu.c index a6c94a2bf9d4..5c5125895db8 100644 --- a/arch/sparc/kernel/cpu.c +++ b/arch/sparc/kernel/cpu.c | |||
@@ -493,6 +493,12 @@ static void __init sun4v_cpu_probe(void) | |||
493 | sparc_pmu_type = "niagara5"; | 493 | sparc_pmu_type = "niagara5"; |
494 | break; | 494 | break; |
495 | 495 | ||
496 | case SUN4V_CHIP_SPARC64X: | ||
497 | sparc_cpu_type = "SPARC64-X"; | ||
498 | sparc_fpu_type = "SPARC64-X integrated FPU"; | ||
499 | sparc_pmu_type = "sparc64-x"; | ||
500 | break; | ||
501 | |||
496 | default: | 502 | default: |
497 | printk(KERN_WARNING "CPU: Unknown sun4v cpu type [%s]\n", | 503 | printk(KERN_WARNING "CPU: Unknown sun4v cpu type [%s]\n", |
498 | prom_cpu_compatible); | 504 | prom_cpu_compatible); |
diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S index 2feb15c35d9e..26b706a1867d 100644 --- a/arch/sparc/kernel/head_64.S +++ b/arch/sparc/kernel/head_64.S | |||
@@ -134,6 +134,8 @@ prom_niagara_prefix: | |||
134 | .asciz "SUNW,UltraSPARC-T" | 134 | .asciz "SUNW,UltraSPARC-T" |
135 | prom_sparc_prefix: | 135 | prom_sparc_prefix: |
136 | .asciz "SPARC-" | 136 | .asciz "SPARC-" |
137 | prom_sparc64x_prefix: | ||
138 | .asciz "SPARC64-X" | ||
137 | .align 4 | 139 | .align 4 |
138 | prom_root_compatible: | 140 | prom_root_compatible: |
139 | .skip 64 | 141 | .skip 64 |
@@ -412,7 +414,7 @@ sun4v_chip_type: | |||
412 | cmp %g2, 'T' | 414 | cmp %g2, 'T' |
413 | be,pt %xcc, 70f | 415 | be,pt %xcc, 70f |
414 | cmp %g2, 'M' | 416 | cmp %g2, 'M' |
415 | bne,pn %xcc, 4f | 417 | bne,pn %xcc, 49f |
416 | nop | 418 | nop |
417 | 419 | ||
418 | 70: ldub [%g1 + 7], %g2 | 420 | 70: ldub [%g1 + 7], %g2 |
@@ -425,7 +427,7 @@ sun4v_chip_type: | |||
425 | cmp %g2, '5' | 427 | cmp %g2, '5' |
426 | be,pt %xcc, 5f | 428 | be,pt %xcc, 5f |
427 | mov SUN4V_CHIP_NIAGARA5, %g4 | 429 | mov SUN4V_CHIP_NIAGARA5, %g4 |
428 | ba,pt %xcc, 4f | 430 | ba,pt %xcc, 49f |
429 | nop | 431 | nop |
430 | 432 | ||
431 | 91: sethi %hi(prom_cpu_compatible), %g1 | 433 | 91: sethi %hi(prom_cpu_compatible), %g1 |
@@ -439,6 +441,25 @@ sun4v_chip_type: | |||
439 | mov SUN4V_CHIP_NIAGARA2, %g4 | 441 | mov SUN4V_CHIP_NIAGARA2, %g4 |
440 | 442 | ||
441 | 4: | 443 | 4: |
444 | /* Athena */ | ||
445 | sethi %hi(prom_cpu_compatible), %g1 | ||
446 | or %g1, %lo(prom_cpu_compatible), %g1 | ||
447 | sethi %hi(prom_sparc64x_prefix), %g7 | ||
448 | or %g7, %lo(prom_sparc64x_prefix), %g7 | ||
449 | mov 9, %g3 | ||
450 | 41: ldub [%g7], %g2 | ||
451 | ldub [%g1], %g4 | ||
452 | cmp %g2, %g4 | ||
453 | bne,pn %icc, 49f | ||
454 | add %g7, 1, %g7 | ||
455 | subcc %g3, 1, %g3 | ||
456 | bne,pt %xcc, 41b | ||
457 | add %g1, 1, %g1 | ||
458 | mov SUN4V_CHIP_SPARC64X, %g4 | ||
459 | ba,pt %xcc, 5f | ||
460 | nop | ||
461 | |||
462 | 49: | ||
442 | mov SUN4V_CHIP_UNKNOWN, %g4 | 463 | mov SUN4V_CHIP_UNKNOWN, %g4 |
443 | 5: sethi %hi(sun4v_chip_type), %g2 | 464 | 5: sethi %hi(sun4v_chip_type), %g2 |
444 | or %g2, %lo(sun4v_chip_type), %g2 | 465 | or %g2, %lo(sun4v_chip_type), %g2 |
diff --git a/arch/sparc/kernel/leon_pci_grpci2.c b/arch/sparc/kernel/leon_pci_grpci2.c index fc4320886a3a..4d1487138d26 100644 --- a/arch/sparc/kernel/leon_pci_grpci2.c +++ b/arch/sparc/kernel/leon_pci_grpci2.c | |||
@@ -186,6 +186,8 @@ struct grpci2_cap_first { | |||
186 | #define CAP9_IOMAP_OFS 0x20 | 186 | #define CAP9_IOMAP_OFS 0x20 |
187 | #define CAP9_BARSIZE_OFS 0x24 | 187 | #define CAP9_BARSIZE_OFS 0x24 |
188 | 188 | ||
189 | #define TGT 256 | ||
190 | |||
189 | struct grpci2_priv { | 191 | struct grpci2_priv { |
190 | struct leon_pci_info info; /* must be on top of this structure */ | 192 | struct leon_pci_info info; /* must be on top of this structure */ |
191 | struct grpci2_regs *regs; | 193 | struct grpci2_regs *regs; |
@@ -237,8 +239,12 @@ static int grpci2_cfg_r32(struct grpci2_priv *priv, unsigned int bus, | |||
237 | if (where & 0x3) | 239 | if (where & 0x3) |
238 | return -EINVAL; | 240 | return -EINVAL; |
239 | 241 | ||
240 | if (bus == 0 && PCI_SLOT(devfn) != 0) | 242 | if (bus == 0) { |
241 | devfn += (0x8 * 6); | 243 | devfn += (0x8 * 6); /* start at AD16=Device0 */ |
244 | } else if (bus == TGT) { | ||
245 | bus = 0; | ||
246 | devfn = 0; /* special case: bridge controller itself */ | ||
247 | } | ||
242 | 248 | ||
243 | /* Select bus */ | 249 | /* Select bus */ |
244 | spin_lock_irqsave(&grpci2_dev_lock, flags); | 250 | spin_lock_irqsave(&grpci2_dev_lock, flags); |
@@ -303,8 +309,12 @@ static int grpci2_cfg_w32(struct grpci2_priv *priv, unsigned int bus, | |||
303 | if (where & 0x3) | 309 | if (where & 0x3) |
304 | return -EINVAL; | 310 | return -EINVAL; |
305 | 311 | ||
306 | if (bus == 0 && PCI_SLOT(devfn) != 0) | 312 | if (bus == 0) { |
307 | devfn += (0x8 * 6); | 313 | devfn += (0x8 * 6); /* start at AD16=Device0 */ |
314 | } else if (bus == TGT) { | ||
315 | bus = 0; | ||
316 | devfn = 0; /* special case: bridge controller itself */ | ||
317 | } | ||
308 | 318 | ||
309 | /* Select bus */ | 319 | /* Select bus */ |
310 | spin_lock_irqsave(&grpci2_dev_lock, flags); | 320 | spin_lock_irqsave(&grpci2_dev_lock, flags); |
@@ -368,7 +378,7 @@ static int grpci2_read_config(struct pci_bus *bus, unsigned int devfn, | |||
368 | unsigned int busno = bus->number; | 378 | unsigned int busno = bus->number; |
369 | int ret; | 379 | int ret; |
370 | 380 | ||
371 | if (PCI_SLOT(devfn) > 15 || (PCI_SLOT(devfn) == 0 && busno == 0)) { | 381 | if (PCI_SLOT(devfn) > 15 || busno > 255) { |
372 | *val = ~0; | 382 | *val = ~0; |
373 | return 0; | 383 | return 0; |
374 | } | 384 | } |
@@ -406,7 +416,7 @@ static int grpci2_write_config(struct pci_bus *bus, unsigned int devfn, | |||
406 | struct grpci2_priv *priv = grpci2priv; | 416 | struct grpci2_priv *priv = grpci2priv; |
407 | unsigned int busno = bus->number; | 417 | unsigned int busno = bus->number; |
408 | 418 | ||
409 | if (PCI_SLOT(devfn) > 15 || (PCI_SLOT(devfn) == 0 && busno == 0)) | 419 | if (PCI_SLOT(devfn) > 15 || busno > 255) |
410 | return 0; | 420 | return 0; |
411 | 421 | ||
412 | #ifdef GRPCI2_DEBUG_CFGACCESS | 422 | #ifdef GRPCI2_DEBUG_CFGACCESS |
@@ -578,15 +588,15 @@ void grpci2_hw_init(struct grpci2_priv *priv) | |||
578 | REGSTORE(regs->ahbmst_map[i], priv->pci_area); | 588 | REGSTORE(regs->ahbmst_map[i], priv->pci_area); |
579 | 589 | ||
580 | /* Get the GRPCI2 Host PCI ID */ | 590 | /* Get the GRPCI2 Host PCI ID */ |
581 | grpci2_cfg_r32(priv, 0, 0, PCI_VENDOR_ID, &priv->pciid); | 591 | grpci2_cfg_r32(priv, TGT, 0, PCI_VENDOR_ID, &priv->pciid); |
582 | 592 | ||
583 | /* Get address to first (always defined) capability structure */ | 593 | /* Get address to first (always defined) capability structure */ |
584 | grpci2_cfg_r8(priv, 0, 0, PCI_CAPABILITY_LIST, &capptr); | 594 | grpci2_cfg_r8(priv, TGT, 0, PCI_CAPABILITY_LIST, &capptr); |
585 | 595 | ||
586 | /* Enable/Disable Byte twisting */ | 596 | /* Enable/Disable Byte twisting */ |
587 | grpci2_cfg_r32(priv, 0, 0, capptr+CAP9_IOMAP_OFS, &io_map); | 597 | grpci2_cfg_r32(priv, TGT, 0, capptr+CAP9_IOMAP_OFS, &io_map); |
588 | io_map = (io_map & ~0x1) | (priv->bt_enabled ? 1 : 0); | 598 | io_map = (io_map & ~0x1) | (priv->bt_enabled ? 1 : 0); |
589 | grpci2_cfg_w32(priv, 0, 0, capptr+CAP9_IOMAP_OFS, io_map); | 599 | grpci2_cfg_w32(priv, TGT, 0, capptr+CAP9_IOMAP_OFS, io_map); |
590 | 600 | ||
591 | /* Setup the Host's PCI Target BARs for other peripherals to access, | 601 | /* Setup the Host's PCI Target BARs for other peripherals to access, |
592 | * and do DMA to the host's memory. The target BARs can be sized and | 602 | * and do DMA to the host's memory. The target BARs can be sized and |
@@ -617,17 +627,18 @@ void grpci2_hw_init(struct grpci2_priv *priv) | |||
617 | pciadr = 0; | 627 | pciadr = 0; |
618 | } | 628 | } |
619 | } | 629 | } |
620 | grpci2_cfg_w32(priv, 0, 0, capptr+CAP9_BARSIZE_OFS+i*4, bar_sz); | 630 | grpci2_cfg_w32(priv, TGT, 0, capptr+CAP9_BARSIZE_OFS+i*4, |
621 | grpci2_cfg_w32(priv, 0, 0, PCI_BASE_ADDRESS_0+i*4, pciadr); | 631 | bar_sz); |
622 | grpci2_cfg_w32(priv, 0, 0, capptr+CAP9_BAR_OFS+i*4, ahbadr); | 632 | grpci2_cfg_w32(priv, TGT, 0, PCI_BASE_ADDRESS_0+i*4, pciadr); |
633 | grpci2_cfg_w32(priv, TGT, 0, capptr+CAP9_BAR_OFS+i*4, ahbadr); | ||
623 | printk(KERN_INFO " TGT BAR[%d]: 0x%08x (PCI)-> 0x%08x\n", | 634 | printk(KERN_INFO " TGT BAR[%d]: 0x%08x (PCI)-> 0x%08x\n", |
624 | i, pciadr, ahbadr); | 635 | i, pciadr, ahbadr); |
625 | } | 636 | } |
626 | 637 | ||
627 | /* set as bus master and enable pci memory responses */ | 638 | /* set as bus master and enable pci memory responses */ |
628 | grpci2_cfg_r32(priv, 0, 0, PCI_COMMAND, &data); | 639 | grpci2_cfg_r32(priv, TGT, 0, PCI_COMMAND, &data); |
629 | data |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); | 640 | data |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); |
630 | grpci2_cfg_w32(priv, 0, 0, PCI_COMMAND, data); | 641 | grpci2_cfg_w32(priv, TGT, 0, PCI_COMMAND, data); |
631 | 642 | ||
632 | /* Enable Error respone (CPU-TRAP) on illegal memory access. */ | 643 | /* Enable Error respone (CPU-TRAP) on illegal memory access. */ |
633 | REGSTORE(regs->ctrl, CTRL_ER | CTRL_PE); | 644 | REGSTORE(regs->ctrl, CTRL_ER | CTRL_PE); |
diff --git a/arch/tile/configs/tilegx_defconfig b/arch/tile/configs/tilegx_defconfig index 8c5eff6d6df5..47684815e5c8 100644 --- a/arch/tile/configs/tilegx_defconfig +++ b/arch/tile/configs/tilegx_defconfig | |||
@@ -330,7 +330,6 @@ CONFIG_MD_RAID0=m | |||
330 | CONFIG_MD_RAID1=m | 330 | CONFIG_MD_RAID1=m |
331 | CONFIG_MD_RAID10=m | 331 | CONFIG_MD_RAID10=m |
332 | CONFIG_MD_RAID456=m | 332 | CONFIG_MD_RAID456=m |
333 | CONFIG_MULTICORE_RAID456=y | ||
334 | CONFIG_MD_FAULTY=m | 333 | CONFIG_MD_FAULTY=m |
335 | CONFIG_BLK_DEV_DM=m | 334 | CONFIG_BLK_DEV_DM=m |
336 | CONFIG_DM_DEBUG=y | 335 | CONFIG_DM_DEBUG=y |
diff --git a/arch/tile/configs/tilepro_defconfig b/arch/tile/configs/tilepro_defconfig index e7a3dfcbcda7..dd2b8f0c631f 100644 --- a/arch/tile/configs/tilepro_defconfig +++ b/arch/tile/configs/tilepro_defconfig | |||
@@ -324,7 +324,6 @@ CONFIG_MD_RAID0=m | |||
324 | CONFIG_MD_RAID1=m | 324 | CONFIG_MD_RAID1=m |
325 | CONFIG_MD_RAID10=m | 325 | CONFIG_MD_RAID10=m |
326 | CONFIG_MD_RAID456=m | 326 | CONFIG_MD_RAID456=m |
327 | CONFIG_MULTICORE_RAID456=y | ||
328 | CONFIG_MD_FAULTY=m | 327 | CONFIG_MD_FAULTY=m |
329 | CONFIG_BLK_DEV_DM=m | 328 | CONFIG_BLK_DEV_DM=m |
330 | CONFIG_DM_DEBUG=y | 329 | CONFIG_DM_DEBUG=y |
diff --git a/arch/tile/include/asm/irqflags.h b/arch/tile/include/asm/irqflags.h index 241c0bb60b12..c96f9bbb760d 100644 --- a/arch/tile/include/asm/irqflags.h +++ b/arch/tile/include/asm/irqflags.h | |||
@@ -40,7 +40,15 @@ | |||
40 | #include <asm/percpu.h> | 40 | #include <asm/percpu.h> |
41 | #include <arch/spr_def.h> | 41 | #include <arch/spr_def.h> |
42 | 42 | ||
43 | /* Set and clear kernel interrupt masks. */ | 43 | /* |
44 | * Set and clear kernel interrupt masks. | ||
45 | * | ||
46 | * NOTE: __insn_mtspr() is a compiler builtin marked as a memory | ||
47 | * clobber. We rely on it being equivalent to a compiler barrier in | ||
48 | * this code since arch_local_irq_save() and friends must act as | ||
49 | * compiler barriers. This compiler semantic is baked into enough | ||
50 | * places that the compiler will maintain it going forward. | ||
51 | */ | ||
44 | #if CHIP_HAS_SPLIT_INTR_MASK() | 52 | #if CHIP_HAS_SPLIT_INTR_MASK() |
45 | #if INT_PERF_COUNT < 32 || INT_AUX_PERF_COUNT < 32 || INT_MEM_ERROR >= 32 | 53 | #if INT_PERF_COUNT < 32 || INT_AUX_PERF_COUNT < 32 || INT_MEM_ERROR >= 32 |
46 | # error Fix assumptions about which word various interrupts are in | 54 | # error Fix assumptions about which word various interrupts are in |
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c index d1e15f7b59c6..7a5aa1a7864e 100644 --- a/arch/tile/kernel/setup.c +++ b/arch/tile/kernel/setup.c | |||
@@ -1004,15 +1004,8 @@ void __cpuinit setup_cpu(int boot) | |||
1004 | 1004 | ||
1005 | #ifdef CONFIG_BLK_DEV_INITRD | 1005 | #ifdef CONFIG_BLK_DEV_INITRD |
1006 | 1006 | ||
1007 | /* | ||
1008 | * Note that the kernel can potentially support other compression | ||
1009 | * techniques than gz, though we don't do so by default. If we ever | ||
1010 | * decide to do so we can either look for other filename extensions, | ||
1011 | * or just allow a file with this name to be compressed with an | ||
1012 | * arbitrary compressor (somewhat counterintuitively). | ||
1013 | */ | ||
1014 | static int __initdata set_initramfs_file; | 1007 | static int __initdata set_initramfs_file; |
1015 | static char __initdata initramfs_file[128] = "initramfs.cpio.gz"; | 1008 | static char __initdata initramfs_file[128] = "initramfs"; |
1016 | 1009 | ||
1017 | static int __init setup_initramfs_file(char *str) | 1010 | static int __init setup_initramfs_file(char *str) |
1018 | { | 1011 | { |
@@ -1026,9 +1019,9 @@ static int __init setup_initramfs_file(char *str) | |||
1026 | early_param("initramfs_file", setup_initramfs_file); | 1019 | early_param("initramfs_file", setup_initramfs_file); |
1027 | 1020 | ||
1028 | /* | 1021 | /* |
1029 | * We look for an "initramfs.cpio.gz" file in the hvfs. | 1022 | * We look for a file called "initramfs" in the hvfs. If there is one, we |
1030 | * If there is one, we allocate some memory for it and it will be | 1023 | * allocate some memory for it and it will be unpacked to the initramfs. |
1031 | * unpacked to the initramfs. | 1024 | * If it's compressed, the initd code will uncompress it first. |
1032 | */ | 1025 | */ |
1033 | static void __init load_hv_initrd(void) | 1026 | static void __init load_hv_initrd(void) |
1034 | { | 1027 | { |
@@ -1038,10 +1031,16 @@ static void __init load_hv_initrd(void) | |||
1038 | 1031 | ||
1039 | fd = hv_fs_findfile((HV_VirtAddr) initramfs_file); | 1032 | fd = hv_fs_findfile((HV_VirtAddr) initramfs_file); |
1040 | if (fd == HV_ENOENT) { | 1033 | if (fd == HV_ENOENT) { |
1041 | if (set_initramfs_file) | 1034 | if (set_initramfs_file) { |
1042 | pr_warning("No such hvfs initramfs file '%s'\n", | 1035 | pr_warning("No such hvfs initramfs file '%s'\n", |
1043 | initramfs_file); | 1036 | initramfs_file); |
1044 | return; | 1037 | return; |
1038 | } else { | ||
1039 | /* Try old backwards-compatible name. */ | ||
1040 | fd = hv_fs_findfile((HV_VirtAddr)"initramfs.cpio.gz"); | ||
1041 | if (fd == HV_ENOENT) | ||
1042 | return; | ||
1043 | } | ||
1045 | } | 1044 | } |
1046 | BUG_ON(fd < 0); | 1045 | BUG_ON(fd < 0); |
1047 | stat = hv_fs_fstat(fd); | 1046 | stat = hv_fs_fstat(fd); |
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index 8a84501acb1b..5ef205c5f37b 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile | |||
@@ -4,7 +4,7 @@ | |||
4 | # create a compressed vmlinux image from the original vmlinux | 4 | # create a compressed vmlinux image from the original vmlinux |
5 | # | 5 | # |
6 | 6 | ||
7 | targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma vmlinux.bin.xz vmlinux.bin.lzo head_$(BITS).o misc.o string.o cmdline.o early_serial_console.o piggy.o | 7 | targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma vmlinux.bin.xz vmlinux.bin.lzo |
8 | 8 | ||
9 | KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 | 9 | KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 |
10 | KBUILD_CFLAGS += -fno-strict-aliasing -fPIC | 10 | KBUILD_CFLAGS += -fno-strict-aliasing -fPIC |
@@ -29,7 +29,6 @@ VMLINUX_OBJS = $(obj)/vmlinux.lds $(obj)/head_$(BITS).o $(obj)/misc.o \ | |||
29 | $(obj)/piggy.o | 29 | $(obj)/piggy.o |
30 | 30 | ||
31 | $(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone | 31 | $(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone |
32 | $(obj)/efi_stub_$(BITS).o: KBUILD_CLFAGS += -fshort-wchar -mno-red-zone | ||
33 | 32 | ||
34 | ifeq ($(CONFIG_EFI_STUB), y) | 33 | ifeq ($(CONFIG_EFI_STUB), y) |
35 | VMLINUX_OBJS += $(obj)/eboot.o $(obj)/efi_stub_$(BITS).o | 34 | VMLINUX_OBJS += $(obj)/eboot.o $(obj)/efi_stub_$(BITS).o |
@@ -43,7 +42,7 @@ OBJCOPYFLAGS_vmlinux.bin := -R .comment -S | |||
43 | $(obj)/vmlinux.bin: vmlinux FORCE | 42 | $(obj)/vmlinux.bin: vmlinux FORCE |
44 | $(call if_changed,objcopy) | 43 | $(call if_changed,objcopy) |
45 | 44 | ||
46 | targets += vmlinux.bin.all vmlinux.relocs | 45 | targets += $(patsubst $(obj)/%,%,$(VMLINUX_OBJS)) vmlinux.bin.all vmlinux.relocs |
47 | 46 | ||
48 | CMD_RELOCS = arch/x86/tools/relocs | 47 | CMD_RELOCS = arch/x86/tools/relocs |
49 | quiet_cmd_relocs = RELOCS $@ | 48 | quiet_cmd_relocs = RELOCS $@ |
diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h index d3ddd17405d0..5a6d2873f80e 100644 --- a/arch/x86/include/asm/kprobes.h +++ b/arch/x86/include/asm/kprobes.h | |||
@@ -77,6 +77,7 @@ struct arch_specific_insn { | |||
77 | * a post_handler or break_handler). | 77 | * a post_handler or break_handler). |
78 | */ | 78 | */ |
79 | int boostable; | 79 | int boostable; |
80 | bool if_modifier; | ||
80 | }; | 81 | }; |
81 | 82 | ||
82 | struct arch_optimized_insn { | 83 | struct arch_optimized_insn { |
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 635a74d22409..4979778cc7fb 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
@@ -414,8 +414,8 @@ struct kvm_vcpu_arch { | |||
414 | gpa_t time; | 414 | gpa_t time; |
415 | struct pvclock_vcpu_time_info hv_clock; | 415 | struct pvclock_vcpu_time_info hv_clock; |
416 | unsigned int hw_tsc_khz; | 416 | unsigned int hw_tsc_khz; |
417 | unsigned int time_offset; | 417 | struct gfn_to_hva_cache pv_time; |
418 | struct page *time_page; | 418 | bool pv_time_enabled; |
419 | /* set guest stopped flag in pvclock flags field */ | 419 | /* set guest stopped flag in pvclock flags field */ |
420 | bool pvclock_set_guest_stopped_request; | 420 | bool pvclock_set_guest_stopped_request; |
421 | 421 | ||
diff --git a/arch/x86/include/asm/syscall.h b/arch/x86/include/asm/syscall.h index 1ace47b62592..2e188d68397c 100644 --- a/arch/x86/include/asm/syscall.h +++ b/arch/x86/include/asm/syscall.h | |||
@@ -29,13 +29,13 @@ extern const unsigned long sys_call_table[]; | |||
29 | */ | 29 | */ |
30 | static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs) | 30 | static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs) |
31 | { | 31 | { |
32 | return regs->orig_ax & __SYSCALL_MASK; | 32 | return regs->orig_ax; |
33 | } | 33 | } |
34 | 34 | ||
35 | static inline void syscall_rollback(struct task_struct *task, | 35 | static inline void syscall_rollback(struct task_struct *task, |
36 | struct pt_regs *regs) | 36 | struct pt_regs *regs) |
37 | { | 37 | { |
38 | regs->ax = regs->orig_ax & __SYSCALL_MASK; | 38 | regs->ax = regs->orig_ax; |
39 | } | 39 | } |
40 | 40 | ||
41 | static inline long syscall_get_error(struct task_struct *task, | 41 | static inline long syscall_get_error(struct task_struct *task, |
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h index c20d1ce62dc6..e709884d0ef9 100644 --- a/arch/x86/include/asm/xen/hypercall.h +++ b/arch/x86/include/asm/xen/hypercall.h | |||
@@ -382,14 +382,14 @@ HYPERVISOR_console_io(int cmd, int count, char *str) | |||
382 | return _hypercall3(int, console_io, cmd, count, str); | 382 | return _hypercall3(int, console_io, cmd, count, str); |
383 | } | 383 | } |
384 | 384 | ||
385 | extern int __must_check HYPERVISOR_physdev_op_compat(int, void *); | 385 | extern int __must_check xen_physdev_op_compat(int, void *); |
386 | 386 | ||
387 | static inline int | 387 | static inline int |
388 | HYPERVISOR_physdev_op(int cmd, void *arg) | 388 | HYPERVISOR_physdev_op(int cmd, void *arg) |
389 | { | 389 | { |
390 | int rc = _hypercall2(int, physdev_op, cmd, arg); | 390 | int rc = _hypercall2(int, physdev_op, cmd, arg); |
391 | if (unlikely(rc == -ENOSYS)) | 391 | if (unlikely(rc == -ENOSYS)) |
392 | rc = HYPERVISOR_physdev_op_compat(cmd, arg); | 392 | rc = xen_physdev_op_compat(cmd, arg); |
393 | return rc; | 393 | return rc; |
394 | } | 394 | } |
395 | 395 | ||
diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h index 892ce40a7470..7a060f4b411f 100644 --- a/arch/x86/include/uapi/asm/msr-index.h +++ b/arch/x86/include/uapi/asm/msr-index.h | |||
@@ -44,6 +44,7 @@ | |||
44 | #define SNB_C1_AUTO_UNDEMOTE (1UL << 27) | 44 | #define SNB_C1_AUTO_UNDEMOTE (1UL << 27) |
45 | #define SNB_C3_AUTO_UNDEMOTE (1UL << 28) | 45 | #define SNB_C3_AUTO_UNDEMOTE (1UL << 28) |
46 | 46 | ||
47 | #define MSR_PLATFORM_INFO 0x000000ce | ||
47 | #define MSR_MTRRcap 0x000000fe | 48 | #define MSR_MTRRcap 0x000000fe |
48 | #define MSR_IA32_BBL_CR_CTL 0x00000119 | 49 | #define MSR_IA32_BBL_CR_CTL 0x00000119 |
49 | #define MSR_IA32_BBL_CR_CTL3 0x0000011e | 50 | #define MSR_IA32_BBL_CR_CTL3 0x0000011e |
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 529c8931fc02..dab7580c47ae 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -101,6 +101,10 @@ static struct event_constraint intel_snb_event_constraints[] __read_mostly = | |||
101 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ | 101 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ |
102 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ | 102 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ |
103 | FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ | 103 | FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ |
104 | INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */ | ||
105 | INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */ | ||
106 | INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */ | ||
107 | INTEL_UEVENT_CONSTRAINT(0x06a3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */ | ||
104 | INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */ | 108 | INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */ |
105 | INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */ | 109 | INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */ |
106 | INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ | 110 | INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ |
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 3f06e6149981..7bfe318d3d8a 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c | |||
@@ -375,6 +375,9 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p) | |||
375 | else | 375 | else |
376 | p->ainsn.boostable = -1; | 376 | p->ainsn.boostable = -1; |
377 | 377 | ||
378 | /* Check whether the instruction modifies Interrupt Flag or not */ | ||
379 | p->ainsn.if_modifier = is_IF_modifier(p->ainsn.insn); | ||
380 | |||
378 | /* Also, displacement change doesn't affect the first byte */ | 381 | /* Also, displacement change doesn't affect the first byte */ |
379 | p->opcode = p->ainsn.insn[0]; | 382 | p->opcode = p->ainsn.insn[0]; |
380 | } | 383 | } |
@@ -434,7 +437,7 @@ static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, | |||
434 | __this_cpu_write(current_kprobe, p); | 437 | __this_cpu_write(current_kprobe, p); |
435 | kcb->kprobe_saved_flags = kcb->kprobe_old_flags | 438 | kcb->kprobe_saved_flags = kcb->kprobe_old_flags |
436 | = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF)); | 439 | = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF)); |
437 | if (is_IF_modifier(p->ainsn.insn)) | 440 | if (p->ainsn.if_modifier) |
438 | kcb->kprobe_saved_flags &= ~X86_EFLAGS_IF; | 441 | kcb->kprobe_saved_flags &= ~X86_EFLAGS_IF; |
439 | } | 442 | } |
440 | 443 | ||
diff --git a/arch/x86/kernel/microcode_intel_early.c b/arch/x86/kernel/microcode_intel_early.c index 7890bc838952..d893e8ed8ac9 100644 --- a/arch/x86/kernel/microcode_intel_early.c +++ b/arch/x86/kernel/microcode_intel_early.c | |||
@@ -90,13 +90,13 @@ microcode_phys(struct microcode_intel **mc_saved_tmp, | |||
90 | struct microcode_intel ***mc_saved; | 90 | struct microcode_intel ***mc_saved; |
91 | 91 | ||
92 | mc_saved = (struct microcode_intel ***) | 92 | mc_saved = (struct microcode_intel ***) |
93 | __pa_symbol(&mc_saved_data->mc_saved); | 93 | __pa_nodebug(&mc_saved_data->mc_saved); |
94 | for (i = 0; i < mc_saved_data->mc_saved_count; i++) { | 94 | for (i = 0; i < mc_saved_data->mc_saved_count; i++) { |
95 | struct microcode_intel *p; | 95 | struct microcode_intel *p; |
96 | 96 | ||
97 | p = *(struct microcode_intel **) | 97 | p = *(struct microcode_intel **) |
98 | __pa(mc_saved_data->mc_saved + i); | 98 | __pa_nodebug(mc_saved_data->mc_saved + i); |
99 | mc_saved_tmp[i] = (struct microcode_intel *)__pa(p); | 99 | mc_saved_tmp[i] = (struct microcode_intel *)__pa_nodebug(p); |
100 | } | 100 | } |
101 | } | 101 | } |
102 | #endif | 102 | #endif |
@@ -562,7 +562,7 @@ scan_microcode(unsigned long start, unsigned long end, | |||
562 | struct cpio_data cd; | 562 | struct cpio_data cd; |
563 | long offset = 0; | 563 | long offset = 0; |
564 | #ifdef CONFIG_X86_32 | 564 | #ifdef CONFIG_X86_32 |
565 | char *p = (char *)__pa_symbol(ucode_name); | 565 | char *p = (char *)__pa_nodebug(ucode_name); |
566 | #else | 566 | #else |
567 | char *p = ucode_name; | 567 | char *p = ucode_name; |
568 | #endif | 568 | #endif |
@@ -630,8 +630,8 @@ static void __cpuinit print_ucode(struct ucode_cpu_info *uci) | |||
630 | if (mc_intel == NULL) | 630 | if (mc_intel == NULL) |
631 | return; | 631 | return; |
632 | 632 | ||
633 | delay_ucode_info_p = (int *)__pa_symbol(&delay_ucode_info); | 633 | delay_ucode_info_p = (int *)__pa_nodebug(&delay_ucode_info); |
634 | current_mc_date_p = (int *)__pa_symbol(¤t_mc_date); | 634 | current_mc_date_p = (int *)__pa_nodebug(¤t_mc_date); |
635 | 635 | ||
636 | *delay_ucode_info_p = 1; | 636 | *delay_ucode_info_p = 1; |
637 | *current_mc_date_p = mc_intel->hdr.date; | 637 | *current_mc_date_p = mc_intel->hdr.date; |
@@ -659,8 +659,8 @@ static inline void __cpuinit print_ucode(struct ucode_cpu_info *uci) | |||
659 | } | 659 | } |
660 | #endif | 660 | #endif |
661 | 661 | ||
662 | static int apply_microcode_early(struct mc_saved_data *mc_saved_data, | 662 | static int __cpuinit apply_microcode_early(struct mc_saved_data *mc_saved_data, |
663 | struct ucode_cpu_info *uci) | 663 | struct ucode_cpu_info *uci) |
664 | { | 664 | { |
665 | struct microcode_intel *mc_intel; | 665 | struct microcode_intel *mc_intel; |
666 | unsigned int val[2]; | 666 | unsigned int val[2]; |
@@ -741,15 +741,15 @@ load_ucode_intel_bsp(void) | |||
741 | #ifdef CONFIG_X86_32 | 741 | #ifdef CONFIG_X86_32 |
742 | struct boot_params *boot_params_p; | 742 | struct boot_params *boot_params_p; |
743 | 743 | ||
744 | boot_params_p = (struct boot_params *)__pa_symbol(&boot_params); | 744 | boot_params_p = (struct boot_params *)__pa_nodebug(&boot_params); |
745 | ramdisk_image = boot_params_p->hdr.ramdisk_image; | 745 | ramdisk_image = boot_params_p->hdr.ramdisk_image; |
746 | ramdisk_size = boot_params_p->hdr.ramdisk_size; | 746 | ramdisk_size = boot_params_p->hdr.ramdisk_size; |
747 | initrd_start_early = ramdisk_image; | 747 | initrd_start_early = ramdisk_image; |
748 | initrd_end_early = initrd_start_early + ramdisk_size; | 748 | initrd_end_early = initrd_start_early + ramdisk_size; |
749 | 749 | ||
750 | _load_ucode_intel_bsp( | 750 | _load_ucode_intel_bsp( |
751 | (struct mc_saved_data *)__pa_symbol(&mc_saved_data), | 751 | (struct mc_saved_data *)__pa_nodebug(&mc_saved_data), |
752 | (unsigned long *)__pa_symbol(&mc_saved_in_initrd), | 752 | (unsigned long *)__pa_nodebug(&mc_saved_in_initrd), |
753 | initrd_start_early, initrd_end_early, &uci); | 753 | initrd_start_early, initrd_end_early, &uci); |
754 | #else | 754 | #else |
755 | ramdisk_image = boot_params.hdr.ramdisk_image; | 755 | ramdisk_image = boot_params.hdr.ramdisk_image; |
@@ -772,10 +772,10 @@ void __cpuinit load_ucode_intel_ap(void) | |||
772 | unsigned long *initrd_start_p; | 772 | unsigned long *initrd_start_p; |
773 | 773 | ||
774 | mc_saved_in_initrd_p = | 774 | mc_saved_in_initrd_p = |
775 | (unsigned long *)__pa_symbol(mc_saved_in_initrd); | 775 | (unsigned long *)__pa_nodebug(mc_saved_in_initrd); |
776 | mc_saved_data_p = (struct mc_saved_data *)__pa_symbol(&mc_saved_data); | 776 | mc_saved_data_p = (struct mc_saved_data *)__pa_nodebug(&mc_saved_data); |
777 | initrd_start_p = (unsigned long *)__pa_symbol(&initrd_start); | 777 | initrd_start_p = (unsigned long *)__pa_nodebug(&initrd_start); |
778 | initrd_start_addr = (unsigned long)__pa_symbol(*initrd_start_p); | 778 | initrd_start_addr = (unsigned long)__pa_nodebug(*initrd_start_p); |
779 | #else | 779 | #else |
780 | mc_saved_data_p = &mc_saved_data; | 780 | mc_saved_data_p = &mc_saved_data; |
781 | mc_saved_in_initrd_p = mc_saved_in_initrd; | 781 | mc_saved_in_initrd_p = mc_saved_in_initrd; |
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 02b51dd4e4ad..f77df1c5de6e 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c | |||
@@ -1857,7 +1857,7 @@ int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data) | |||
1857 | if (!pv_eoi_enabled(vcpu)) | 1857 | if (!pv_eoi_enabled(vcpu)) |
1858 | return 0; | 1858 | return 0; |
1859 | return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data, | 1859 | return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data, |
1860 | addr); | 1860 | addr, sizeof(u8)); |
1861 | } | 1861 | } |
1862 | 1862 | ||
1863 | void kvm_lapic_init(void) | 1863 | void kvm_lapic_init(void) |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index f71500af1f81..e1721324c271 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -1406,25 +1406,15 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) | |||
1406 | unsigned long flags, this_tsc_khz; | 1406 | unsigned long flags, this_tsc_khz; |
1407 | struct kvm_vcpu_arch *vcpu = &v->arch; | 1407 | struct kvm_vcpu_arch *vcpu = &v->arch; |
1408 | struct kvm_arch *ka = &v->kvm->arch; | 1408 | struct kvm_arch *ka = &v->kvm->arch; |
1409 | void *shared_kaddr; | ||
1410 | s64 kernel_ns, max_kernel_ns; | 1409 | s64 kernel_ns, max_kernel_ns; |
1411 | u64 tsc_timestamp, host_tsc; | 1410 | u64 tsc_timestamp, host_tsc; |
1412 | struct pvclock_vcpu_time_info *guest_hv_clock; | 1411 | struct pvclock_vcpu_time_info guest_hv_clock; |
1413 | u8 pvclock_flags; | 1412 | u8 pvclock_flags; |
1414 | bool use_master_clock; | 1413 | bool use_master_clock; |
1415 | 1414 | ||
1416 | kernel_ns = 0; | 1415 | kernel_ns = 0; |
1417 | host_tsc = 0; | 1416 | host_tsc = 0; |
1418 | 1417 | ||
1419 | /* Keep irq disabled to prevent changes to the clock */ | ||
1420 | local_irq_save(flags); | ||
1421 | this_tsc_khz = __get_cpu_var(cpu_tsc_khz); | ||
1422 | if (unlikely(this_tsc_khz == 0)) { | ||
1423 | local_irq_restore(flags); | ||
1424 | kvm_make_request(KVM_REQ_CLOCK_UPDATE, v); | ||
1425 | return 1; | ||
1426 | } | ||
1427 | |||
1428 | /* | 1418 | /* |
1429 | * If the host uses TSC clock, then passthrough TSC as stable | 1419 | * If the host uses TSC clock, then passthrough TSC as stable |
1430 | * to the guest. | 1420 | * to the guest. |
@@ -1436,6 +1426,15 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) | |||
1436 | kernel_ns = ka->master_kernel_ns; | 1426 | kernel_ns = ka->master_kernel_ns; |
1437 | } | 1427 | } |
1438 | spin_unlock(&ka->pvclock_gtod_sync_lock); | 1428 | spin_unlock(&ka->pvclock_gtod_sync_lock); |
1429 | |||
1430 | /* Keep irq disabled to prevent changes to the clock */ | ||
1431 | local_irq_save(flags); | ||
1432 | this_tsc_khz = __get_cpu_var(cpu_tsc_khz); | ||
1433 | if (unlikely(this_tsc_khz == 0)) { | ||
1434 | local_irq_restore(flags); | ||
1435 | kvm_make_request(KVM_REQ_CLOCK_UPDATE, v); | ||
1436 | return 1; | ||
1437 | } | ||
1439 | if (!use_master_clock) { | 1438 | if (!use_master_clock) { |
1440 | host_tsc = native_read_tsc(); | 1439 | host_tsc = native_read_tsc(); |
1441 | kernel_ns = get_kernel_ns(); | 1440 | kernel_ns = get_kernel_ns(); |
@@ -1463,7 +1462,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) | |||
1463 | 1462 | ||
1464 | local_irq_restore(flags); | 1463 | local_irq_restore(flags); |
1465 | 1464 | ||
1466 | if (!vcpu->time_page) | 1465 | if (!vcpu->pv_time_enabled) |
1467 | return 0; | 1466 | return 0; |
1468 | 1467 | ||
1469 | /* | 1468 | /* |
@@ -1525,12 +1524,12 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) | |||
1525 | */ | 1524 | */ |
1526 | vcpu->hv_clock.version += 2; | 1525 | vcpu->hv_clock.version += 2; |
1527 | 1526 | ||
1528 | shared_kaddr = kmap_atomic(vcpu->time_page); | 1527 | if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time, |
1529 | 1528 | &guest_hv_clock, sizeof(guest_hv_clock)))) | |
1530 | guest_hv_clock = shared_kaddr + vcpu->time_offset; | 1529 | return 0; |
1531 | 1530 | ||
1532 | /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */ | 1531 | /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */ |
1533 | pvclock_flags = (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED); | 1532 | pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED); |
1534 | 1533 | ||
1535 | if (vcpu->pvclock_set_guest_stopped_request) { | 1534 | if (vcpu->pvclock_set_guest_stopped_request) { |
1536 | pvclock_flags |= PVCLOCK_GUEST_STOPPED; | 1535 | pvclock_flags |= PVCLOCK_GUEST_STOPPED; |
@@ -1543,12 +1542,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) | |||
1543 | 1542 | ||
1544 | vcpu->hv_clock.flags = pvclock_flags; | 1543 | vcpu->hv_clock.flags = pvclock_flags; |
1545 | 1544 | ||
1546 | memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock, | 1545 | kvm_write_guest_cached(v->kvm, &vcpu->pv_time, |
1547 | sizeof(vcpu->hv_clock)); | 1546 | &vcpu->hv_clock, |
1548 | 1547 | sizeof(vcpu->hv_clock)); | |
1549 | kunmap_atomic(shared_kaddr); | ||
1550 | |||
1551 | mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT); | ||
1552 | return 0; | 1548 | return 0; |
1553 | } | 1549 | } |
1554 | 1550 | ||
@@ -1827,7 +1823,8 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data) | |||
1827 | return 0; | 1823 | return 0; |
1828 | } | 1824 | } |
1829 | 1825 | ||
1830 | if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa)) | 1826 | if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa, |
1827 | sizeof(u32))) | ||
1831 | return 1; | 1828 | return 1; |
1832 | 1829 | ||
1833 | vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS); | 1830 | vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS); |
@@ -1837,10 +1834,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data) | |||
1837 | 1834 | ||
1838 | static void kvmclock_reset(struct kvm_vcpu *vcpu) | 1835 | static void kvmclock_reset(struct kvm_vcpu *vcpu) |
1839 | { | 1836 | { |
1840 | if (vcpu->arch.time_page) { | 1837 | vcpu->arch.pv_time_enabled = false; |
1841 | kvm_release_page_dirty(vcpu->arch.time_page); | ||
1842 | vcpu->arch.time_page = NULL; | ||
1843 | } | ||
1844 | } | 1838 | } |
1845 | 1839 | ||
1846 | static void accumulate_steal_time(struct kvm_vcpu *vcpu) | 1840 | static void accumulate_steal_time(struct kvm_vcpu *vcpu) |
@@ -1947,6 +1941,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
1947 | break; | 1941 | break; |
1948 | case MSR_KVM_SYSTEM_TIME_NEW: | 1942 | case MSR_KVM_SYSTEM_TIME_NEW: |
1949 | case MSR_KVM_SYSTEM_TIME: { | 1943 | case MSR_KVM_SYSTEM_TIME: { |
1944 | u64 gpa_offset; | ||
1950 | kvmclock_reset(vcpu); | 1945 | kvmclock_reset(vcpu); |
1951 | 1946 | ||
1952 | vcpu->arch.time = data; | 1947 | vcpu->arch.time = data; |
@@ -1956,14 +1951,14 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
1956 | if (!(data & 1)) | 1951 | if (!(data & 1)) |
1957 | break; | 1952 | break; |
1958 | 1953 | ||
1959 | /* ...but clean it before doing the actual write */ | 1954 | gpa_offset = data & ~(PAGE_MASK | 1); |
1960 | vcpu->arch.time_offset = data & ~(PAGE_MASK | 1); | ||
1961 | |||
1962 | vcpu->arch.time_page = | ||
1963 | gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT); | ||
1964 | 1955 | ||
1965 | if (is_error_page(vcpu->arch.time_page)) | 1956 | if (kvm_gfn_to_hva_cache_init(vcpu->kvm, |
1966 | vcpu->arch.time_page = NULL; | 1957 | &vcpu->arch.pv_time, data & ~1ULL, |
1958 | sizeof(struct pvclock_vcpu_time_info))) | ||
1959 | vcpu->arch.pv_time_enabled = false; | ||
1960 | else | ||
1961 | vcpu->arch.pv_time_enabled = true; | ||
1967 | 1962 | ||
1968 | break; | 1963 | break; |
1969 | } | 1964 | } |
@@ -1980,7 +1975,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
1980 | return 1; | 1975 | return 1; |
1981 | 1976 | ||
1982 | if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime, | 1977 | if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime, |
1983 | data & KVM_STEAL_VALID_BITS)) | 1978 | data & KVM_STEAL_VALID_BITS, |
1979 | sizeof(struct kvm_steal_time))) | ||
1984 | return 1; | 1980 | return 1; |
1985 | 1981 | ||
1986 | vcpu->arch.st.msr_val = data; | 1982 | vcpu->arch.st.msr_val = data; |
@@ -2967,7 +2963,7 @@ static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu, | |||
2967 | */ | 2963 | */ |
2968 | static int kvm_set_guest_paused(struct kvm_vcpu *vcpu) | 2964 | static int kvm_set_guest_paused(struct kvm_vcpu *vcpu) |
2969 | { | 2965 | { |
2970 | if (!vcpu->arch.time_page) | 2966 | if (!vcpu->arch.pv_time_enabled) |
2971 | return -EINVAL; | 2967 | return -EINVAL; |
2972 | vcpu->arch.pvclock_set_guest_stopped_request = true; | 2968 | vcpu->arch.pvclock_set_guest_stopped_request = true; |
2973 | kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); | 2969 | kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); |
@@ -6718,6 +6714,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | |||
6718 | goto fail_free_wbinvd_dirty_mask; | 6714 | goto fail_free_wbinvd_dirty_mask; |
6719 | 6715 | ||
6720 | vcpu->arch.ia32_tsc_adjust_msr = 0x0; | 6716 | vcpu->arch.ia32_tsc_adjust_msr = 0x0; |
6717 | vcpu->arch.pv_time_enabled = false; | ||
6721 | kvm_async_pf_hash_reset(vcpu); | 6718 | kvm_async_pf_hash_reset(vcpu); |
6722 | kvm_pmu_init(vcpu); | 6719 | kvm_pmu_init(vcpu); |
6723 | 6720 | ||
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c index 05928aae911e..906fea315791 100644 --- a/arch/x86/lib/usercopy_64.c +++ b/arch/x86/lib/usercopy_64.c | |||
@@ -74,10 +74,10 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest) | |||
74 | char c; | 74 | char c; |
75 | unsigned zero_len; | 75 | unsigned zero_len; |
76 | 76 | ||
77 | for (; len; --len) { | 77 | for (; len; --len, to++) { |
78 | if (__get_user_nocheck(c, from++, sizeof(char))) | 78 | if (__get_user_nocheck(c, from++, sizeof(char))) |
79 | break; | 79 | break; |
80 | if (__put_user_nocheck(c, to++, sizeof(char))) | 80 | if (__put_user_nocheck(c, to, sizeof(char))) |
81 | break; | 81 | break; |
82 | } | 82 | } |
83 | 83 | ||
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index e8e34938c57d..a4ea92477e01 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -1467,8 +1467,6 @@ static void __init xen_write_cr3_init(unsigned long cr3) | |||
1467 | __xen_write_cr3(true, cr3); | 1467 | __xen_write_cr3(true, cr3); |
1468 | 1468 | ||
1469 | xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */ | 1469 | xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */ |
1470 | |||
1471 | pv_mmu_ops.write_cr3 = &xen_write_cr3; | ||
1472 | } | 1470 | } |
1473 | #endif | 1471 | #endif |
1474 | 1472 | ||
@@ -1750,14 +1748,18 @@ static void *m2v(phys_addr_t maddr) | |||
1750 | } | 1748 | } |
1751 | 1749 | ||
1752 | /* Set the page permissions on an identity-mapped pages */ | 1750 | /* Set the page permissions on an identity-mapped pages */ |
1753 | static void set_page_prot(void *addr, pgprot_t prot) | 1751 | static void set_page_prot_flags(void *addr, pgprot_t prot, unsigned long flags) |
1754 | { | 1752 | { |
1755 | unsigned long pfn = __pa(addr) >> PAGE_SHIFT; | 1753 | unsigned long pfn = __pa(addr) >> PAGE_SHIFT; |
1756 | pte_t pte = pfn_pte(pfn, prot); | 1754 | pte_t pte = pfn_pte(pfn, prot); |
1757 | 1755 | ||
1758 | if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, 0)) | 1756 | if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, flags)) |
1759 | BUG(); | 1757 | BUG(); |
1760 | } | 1758 | } |
1759 | static void set_page_prot(void *addr, pgprot_t prot) | ||
1760 | { | ||
1761 | return set_page_prot_flags(addr, prot, UVMF_NONE); | ||
1762 | } | ||
1761 | #ifdef CONFIG_X86_32 | 1763 | #ifdef CONFIG_X86_32 |
1762 | static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn) | 1764 | static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn) |
1763 | { | 1765 | { |
@@ -1841,12 +1843,12 @@ static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end, | |||
1841 | unsigned long addr) | 1843 | unsigned long addr) |
1842 | { | 1844 | { |
1843 | if (*pt_base == PFN_DOWN(__pa(addr))) { | 1845 | if (*pt_base == PFN_DOWN(__pa(addr))) { |
1844 | set_page_prot((void *)addr, PAGE_KERNEL); | 1846 | set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG); |
1845 | clear_page((void *)addr); | 1847 | clear_page((void *)addr); |
1846 | (*pt_base)++; | 1848 | (*pt_base)++; |
1847 | } | 1849 | } |
1848 | if (*pt_end == PFN_DOWN(__pa(addr))) { | 1850 | if (*pt_end == PFN_DOWN(__pa(addr))) { |
1849 | set_page_prot((void *)addr, PAGE_KERNEL); | 1851 | set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG); |
1850 | clear_page((void *)addr); | 1852 | clear_page((void *)addr); |
1851 | (*pt_end)--; | 1853 | (*pt_end)--; |
1852 | } | 1854 | } |
@@ -2122,6 +2124,7 @@ static void __init xen_post_allocator_init(void) | |||
2122 | #endif | 2124 | #endif |
2123 | 2125 | ||
2124 | #ifdef CONFIG_X86_64 | 2126 | #ifdef CONFIG_X86_64 |
2127 | pv_mmu_ops.write_cr3 = &xen_write_cr3; | ||
2125 | SetPagePinned(virt_to_page(level3_user_vsyscall)); | 2128 | SetPagePinned(virt_to_page(level3_user_vsyscall)); |
2126 | #endif | 2129 | #endif |
2127 | xen_mark_init_mm_pinned(); | 2130 | xen_mark_init_mm_pinned(); |
diff --git a/block/blk-flush.c b/block/blk-flush.c index db8f1b507857..cc2b827a853c 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c | |||
@@ -444,7 +444,7 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, | |||
444 | * copied from blk_rq_pos(rq). | 444 | * copied from blk_rq_pos(rq). |
445 | */ | 445 | */ |
446 | if (error_sector) | 446 | if (error_sector) |
447 | *error_sector = bio->bi_sector; | 447 | *error_sector = bio->bi_sector; |
448 | 448 | ||
449 | if (!bio_flagged(bio, BIO_UPTODATE)) | 449 | if (!bio_flagged(bio, BIO_UPTODATE)) |
450 | ret = -EIO; | 450 | ret = -EIO; |
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 6206a934eb8c..5efc5a647183 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c | |||
@@ -229,6 +229,8 @@ queue_store_##name(struct request_queue *q, const char *page, size_t count) \ | |||
229 | unsigned long val; \ | 229 | unsigned long val; \ |
230 | ssize_t ret; \ | 230 | ssize_t ret; \ |
231 | ret = queue_var_store(&val, page, count); \ | 231 | ret = queue_var_store(&val, page, count); \ |
232 | if (ret < 0) \ | ||
233 | return ret; \ | ||
232 | if (neg) \ | 234 | if (neg) \ |
233 | val = !val; \ | 235 | val = !val; \ |
234 | \ | 236 | \ |
diff --git a/crypto/gcm.c b/crypto/gcm.c index 137ad1ec5438..13ccbda34ff9 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c | |||
@@ -44,6 +44,7 @@ struct crypto_rfc4543_ctx { | |||
44 | 44 | ||
45 | struct crypto_rfc4543_req_ctx { | 45 | struct crypto_rfc4543_req_ctx { |
46 | u8 auth_tag[16]; | 46 | u8 auth_tag[16]; |
47 | u8 assocbuf[32]; | ||
47 | struct scatterlist cipher[1]; | 48 | struct scatterlist cipher[1]; |
48 | struct scatterlist payload[2]; | 49 | struct scatterlist payload[2]; |
49 | struct scatterlist assoc[2]; | 50 | struct scatterlist assoc[2]; |
@@ -1133,9 +1134,19 @@ static struct aead_request *crypto_rfc4543_crypt(struct aead_request *req, | |||
1133 | scatterwalk_crypto_chain(payload, dst, vdst == req->iv + 8, 2); | 1134 | scatterwalk_crypto_chain(payload, dst, vdst == req->iv + 8, 2); |
1134 | assoclen += 8 + req->cryptlen - (enc ? 0 : authsize); | 1135 | assoclen += 8 + req->cryptlen - (enc ? 0 : authsize); |
1135 | 1136 | ||
1136 | sg_init_table(assoc, 2); | 1137 | if (req->assoc->length == req->assoclen) { |
1137 | sg_set_page(assoc, sg_page(req->assoc), req->assoc->length, | 1138 | sg_init_table(assoc, 2); |
1138 | req->assoc->offset); | 1139 | sg_set_page(assoc, sg_page(req->assoc), req->assoc->length, |
1140 | req->assoc->offset); | ||
1141 | } else { | ||
1142 | BUG_ON(req->assoclen > sizeof(rctx->assocbuf)); | ||
1143 | |||
1144 | scatterwalk_map_and_copy(rctx->assocbuf, req->assoc, 0, | ||
1145 | req->assoclen, 0); | ||
1146 | |||
1147 | sg_init_table(assoc, 2); | ||
1148 | sg_set_buf(assoc, rctx->assocbuf, req->assoclen); | ||
1149 | } | ||
1139 | scatterwalk_crypto_chain(assoc, payload, 0, 2); | 1150 | scatterwalk_crypto_chain(assoc, payload, 0, 2); |
1140 | 1151 | ||
1141 | aead_request_set_tfm(subreq, ctx->child); | 1152 | aead_request_set_tfm(subreq, ctx->child); |
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index 92ed9692c47e..4bf68c8d4797 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig | |||
@@ -396,7 +396,7 @@ config ACPI_CUSTOM_METHOD | |||
396 | 396 | ||
397 | config ACPI_BGRT | 397 | config ACPI_BGRT |
398 | bool "Boottime Graphics Resource Table support" | 398 | bool "Boottime Graphics Resource Table support" |
399 | depends on EFI | 399 | depends on EFI && X86 |
400 | help | 400 | help |
401 | This driver adds support for exposing the ACPI Boottime Graphics | 401 | This driver adds support for exposing the ACPI Boottime Graphics |
402 | Resource Table, which allows the operating system to obtain | 402 | Resource Table, which allows the operating system to obtain |
diff --git a/drivers/acpi/acpi_i2c.c b/drivers/acpi/acpi_i2c.c index 82045e3f5cac..a82c7626aa9b 100644 --- a/drivers/acpi/acpi_i2c.c +++ b/drivers/acpi/acpi_i2c.c | |||
@@ -90,7 +90,7 @@ void acpi_i2c_register_devices(struct i2c_adapter *adapter) | |||
90 | acpi_handle handle; | 90 | acpi_handle handle; |
91 | acpi_status status; | 91 | acpi_status status; |
92 | 92 | ||
93 | handle = ACPI_HANDLE(&adapter->dev); | 93 | handle = ACPI_HANDLE(adapter->dev.parent); |
94 | if (!handle) | 94 | if (!handle) |
95 | return; | 95 | return; |
96 | 96 | ||
diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c index 1e5d8a40101e..fefc2ca7cc3e 100644 --- a/drivers/acpi/apei/cper.c +++ b/drivers/acpi/apei/cper.c | |||
@@ -405,7 +405,7 @@ int apei_estatus_check(const struct acpi_hest_generic_status *estatus) | |||
405 | return rc; | 405 | return rc; |
406 | data_len = estatus->data_length; | 406 | data_len = estatus->data_length; |
407 | gdata = (struct acpi_hest_generic_data *)(estatus + 1); | 407 | gdata = (struct acpi_hest_generic_data *)(estatus + 1); |
408 | while (data_len > sizeof(*gdata)) { | 408 | while (data_len >= sizeof(*gdata)) { |
409 | gedata_len = gdata->error_data_length; | 409 | gedata_len = gdata->error_data_length; |
410 | if (gedata_len > data_len - sizeof(*gdata)) | 410 | if (gedata_len > data_len - sizeof(*gdata)) |
411 | return -EINVAL; | 411 | return -EINVAL; |
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index 0ac546d5e53f..6ae5e440436e 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c | |||
@@ -415,7 +415,6 @@ static int acpi_pci_root_add(struct acpi_device *device, | |||
415 | struct acpi_pci_root *root; | 415 | struct acpi_pci_root *root; |
416 | struct acpi_pci_driver *driver; | 416 | struct acpi_pci_driver *driver; |
417 | u32 flags, base_flags; | 417 | u32 flags, base_flags; |
418 | bool is_osc_granted = false; | ||
419 | 418 | ||
420 | root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL); | 419 | root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL); |
421 | if (!root) | 420 | if (!root) |
@@ -476,6 +475,30 @@ static int acpi_pci_root_add(struct acpi_device *device, | |||
476 | flags = base_flags = OSC_PCI_SEGMENT_GROUPS_SUPPORT; | 475 | flags = base_flags = OSC_PCI_SEGMENT_GROUPS_SUPPORT; |
477 | acpi_pci_osc_support(root, flags); | 476 | acpi_pci_osc_support(root, flags); |
478 | 477 | ||
478 | /* | ||
479 | * TBD: Need PCI interface for enumeration/configuration of roots. | ||
480 | */ | ||
481 | |||
482 | mutex_lock(&acpi_pci_root_lock); | ||
483 | list_add_tail(&root->node, &acpi_pci_roots); | ||
484 | mutex_unlock(&acpi_pci_root_lock); | ||
485 | |||
486 | /* | ||
487 | * Scan the Root Bridge | ||
488 | * -------------------- | ||
489 | * Must do this prior to any attempt to bind the root device, as the | ||
490 | * PCI namespace does not get created until this call is made (and | ||
491 | * thus the root bridge's pci_dev does not exist). | ||
492 | */ | ||
493 | root->bus = pci_acpi_scan_root(root); | ||
494 | if (!root->bus) { | ||
495 | printk(KERN_ERR PREFIX | ||
496 | "Bus %04x:%02x not present in PCI namespace\n", | ||
497 | root->segment, (unsigned int)root->secondary.start); | ||
498 | result = -ENODEV; | ||
499 | goto out_del_root; | ||
500 | } | ||
501 | |||
479 | /* Indicate support for various _OSC capabilities. */ | 502 | /* Indicate support for various _OSC capabilities. */ |
480 | if (pci_ext_cfg_avail()) | 503 | if (pci_ext_cfg_avail()) |
481 | flags |= OSC_EXT_PCI_CONFIG_SUPPORT; | 504 | flags |= OSC_EXT_PCI_CONFIG_SUPPORT; |
@@ -494,6 +517,7 @@ static int acpi_pci_root_add(struct acpi_device *device, | |||
494 | flags = base_flags; | 517 | flags = base_flags; |
495 | } | 518 | } |
496 | } | 519 | } |
520 | |||
497 | if (!pcie_ports_disabled | 521 | if (!pcie_ports_disabled |
498 | && (flags & ACPI_PCIE_REQ_SUPPORT) == ACPI_PCIE_REQ_SUPPORT) { | 522 | && (flags & ACPI_PCIE_REQ_SUPPORT) == ACPI_PCIE_REQ_SUPPORT) { |
499 | flags = OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL | 523 | flags = OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL |
@@ -514,54 +538,28 @@ static int acpi_pci_root_add(struct acpi_device *device, | |||
514 | status = acpi_pci_osc_control_set(device->handle, &flags, | 538 | status = acpi_pci_osc_control_set(device->handle, &flags, |
515 | OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL); | 539 | OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL); |
516 | if (ACPI_SUCCESS(status)) { | 540 | if (ACPI_SUCCESS(status)) { |
517 | is_osc_granted = true; | ||
518 | dev_info(&device->dev, | 541 | dev_info(&device->dev, |
519 | "ACPI _OSC control (0x%02x) granted\n", flags); | 542 | "ACPI _OSC control (0x%02x) granted\n", flags); |
543 | if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) { | ||
544 | /* | ||
545 | * We have ASPM control, but the FADT indicates | ||
546 | * that it's unsupported. Clear it. | ||
547 | */ | ||
548 | pcie_clear_aspm(root->bus); | ||
549 | } | ||
520 | } else { | 550 | } else { |
521 | is_osc_granted = false; | ||
522 | dev_info(&device->dev, | 551 | dev_info(&device->dev, |
523 | "ACPI _OSC request failed (%s), " | 552 | "ACPI _OSC request failed (%s), " |
524 | "returned control mask: 0x%02x\n", | 553 | "returned control mask: 0x%02x\n", |
525 | acpi_format_exception(status), flags); | 554 | acpi_format_exception(status), flags); |
555 | pr_info("ACPI _OSC control for PCIe not granted, " | ||
556 | "disabling ASPM\n"); | ||
557 | pcie_no_aspm(); | ||
526 | } | 558 | } |
527 | } else { | 559 | } else { |
528 | dev_info(&device->dev, | 560 | dev_info(&device->dev, |
529 | "Unable to request _OSC control " | 561 | "Unable to request _OSC control " |
530 | "(_OSC support mask: 0x%02x)\n", flags); | 562 | "(_OSC support mask: 0x%02x)\n", flags); |
531 | } | ||
532 | |||
533 | /* | ||
534 | * TBD: Need PCI interface for enumeration/configuration of roots. | ||
535 | */ | ||
536 | |||
537 | mutex_lock(&acpi_pci_root_lock); | ||
538 | list_add_tail(&root->node, &acpi_pci_roots); | ||
539 | mutex_unlock(&acpi_pci_root_lock); | ||
540 | |||
541 | /* | ||
542 | * Scan the Root Bridge | ||
543 | * -------------------- | ||
544 | * Must do this prior to any attempt to bind the root device, as the | ||
545 | * PCI namespace does not get created until this call is made (and | ||
546 | * thus the root bridge's pci_dev does not exist). | ||
547 | */ | ||
548 | root->bus = pci_acpi_scan_root(root); | ||
549 | if (!root->bus) { | ||
550 | printk(KERN_ERR PREFIX | ||
551 | "Bus %04x:%02x not present in PCI namespace\n", | ||
552 | root->segment, (unsigned int)root->secondary.start); | ||
553 | result = -ENODEV; | ||
554 | goto out_del_root; | ||
555 | } | ||
556 | |||
557 | /* ASPM setting */ | ||
558 | if (is_osc_granted) { | ||
559 | if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) | ||
560 | pcie_clear_aspm(root->bus); | ||
561 | } else { | ||
562 | pr_info("ACPI _OSC control for PCIe not granted, " | ||
563 | "disabling ASPM\n"); | ||
564 | pcie_no_aspm(); | ||
565 | } | 563 | } |
566 | 564 | ||
567 | pci_acpi_add_bus_pm_notifier(device, root->bus); | 565 | pci_acpi_add_bus_pm_notifier(device, root->bus); |
@@ -646,6 +644,7 @@ static void handle_root_bridge_insertion(acpi_handle handle) | |||
646 | 644 | ||
647 | static void handle_root_bridge_removal(struct acpi_device *device) | 645 | static void handle_root_bridge_removal(struct acpi_device *device) |
648 | { | 646 | { |
647 | acpi_status status; | ||
649 | struct acpi_eject_event *ej_event; | 648 | struct acpi_eject_event *ej_event; |
650 | 649 | ||
651 | ej_event = kmalloc(sizeof(*ej_event), GFP_KERNEL); | 650 | ej_event = kmalloc(sizeof(*ej_event), GFP_KERNEL); |
@@ -661,7 +660,9 @@ static void handle_root_bridge_removal(struct acpi_device *device) | |||
661 | ej_event->device = device; | 660 | ej_event->device = device; |
662 | ej_event->event = ACPI_NOTIFY_EJECT_REQUEST; | 661 | ej_event->event = ACPI_NOTIFY_EJECT_REQUEST; |
663 | 662 | ||
664 | acpi_bus_hot_remove_device(ej_event); | 663 | status = acpi_os_hotplug_execute(acpi_bus_hot_remove_device, ej_event); |
664 | if (ACPI_FAILURE(status)) | ||
665 | kfree(ej_event); | ||
665 | } | 666 | } |
666 | 667 | ||
667 | static void _handle_hotplug_event_root(struct work_struct *work) | 668 | static void _handle_hotplug_event_root(struct work_struct *work) |
@@ -676,8 +677,9 @@ static void _handle_hotplug_event_root(struct work_struct *work) | |||
676 | handle = hp_work->handle; | 677 | handle = hp_work->handle; |
677 | type = hp_work->type; | 678 | type = hp_work->type; |
678 | 679 | ||
679 | root = acpi_pci_find_root(handle); | 680 | acpi_scan_lock_acquire(); |
680 | 681 | ||
682 | root = acpi_pci_find_root(handle); | ||
681 | acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); | 683 | acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); |
682 | 684 | ||
683 | switch (type) { | 685 | switch (type) { |
@@ -711,6 +713,7 @@ static void _handle_hotplug_event_root(struct work_struct *work) | |||
711 | break; | 713 | break; |
712 | } | 714 | } |
713 | 715 | ||
716 | acpi_scan_lock_release(); | ||
714 | kfree(hp_work); /* allocated in handle_hotplug_event_bridge */ | 717 | kfree(hp_work); /* allocated in handle_hotplug_event_bridge */ |
715 | kfree(buffer.pointer); | 718 | kfree(buffer.pointer); |
716 | } | 719 | } |
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index fc95308e9a11..ee255c60bdac 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c | |||
@@ -66,7 +66,8 @@ module_param(latency_factor, uint, 0644); | |||
66 | 66 | ||
67 | static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device); | 67 | static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device); |
68 | 68 | ||
69 | static struct acpi_processor_cx *acpi_cstate[CPUIDLE_STATE_MAX]; | 69 | static DEFINE_PER_CPU(struct acpi_processor_cx * [CPUIDLE_STATE_MAX], |
70 | acpi_cstate); | ||
70 | 71 | ||
71 | static int disabled_by_idle_boot_param(void) | 72 | static int disabled_by_idle_boot_param(void) |
72 | { | 73 | { |
@@ -722,7 +723,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev, | |||
722 | struct cpuidle_driver *drv, int index) | 723 | struct cpuidle_driver *drv, int index) |
723 | { | 724 | { |
724 | struct acpi_processor *pr; | 725 | struct acpi_processor *pr; |
725 | struct acpi_processor_cx *cx = acpi_cstate[index]; | 726 | struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); |
726 | 727 | ||
727 | pr = __this_cpu_read(processors); | 728 | pr = __this_cpu_read(processors); |
728 | 729 | ||
@@ -745,7 +746,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev, | |||
745 | */ | 746 | */ |
746 | static int acpi_idle_play_dead(struct cpuidle_device *dev, int index) | 747 | static int acpi_idle_play_dead(struct cpuidle_device *dev, int index) |
747 | { | 748 | { |
748 | struct acpi_processor_cx *cx = acpi_cstate[index]; | 749 | struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); |
749 | 750 | ||
750 | ACPI_FLUSH_CPU_CACHE(); | 751 | ACPI_FLUSH_CPU_CACHE(); |
751 | 752 | ||
@@ -775,7 +776,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, | |||
775 | struct cpuidle_driver *drv, int index) | 776 | struct cpuidle_driver *drv, int index) |
776 | { | 777 | { |
777 | struct acpi_processor *pr; | 778 | struct acpi_processor *pr; |
778 | struct acpi_processor_cx *cx = acpi_cstate[index]; | 779 | struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); |
779 | 780 | ||
780 | pr = __this_cpu_read(processors); | 781 | pr = __this_cpu_read(processors); |
781 | 782 | ||
@@ -833,7 +834,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, | |||
833 | struct cpuidle_driver *drv, int index) | 834 | struct cpuidle_driver *drv, int index) |
834 | { | 835 | { |
835 | struct acpi_processor *pr; | 836 | struct acpi_processor *pr; |
836 | struct acpi_processor_cx *cx = acpi_cstate[index]; | 837 | struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); |
837 | 838 | ||
838 | pr = __this_cpu_read(processors); | 839 | pr = __this_cpu_read(processors); |
839 | 840 | ||
@@ -960,7 +961,7 @@ static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr, | |||
960 | !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) | 961 | !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) |
961 | continue; | 962 | continue; |
962 | #endif | 963 | #endif |
963 | acpi_cstate[count] = cx; | 964 | per_cpu(acpi_cstate[count], dev->cpu) = cx; |
964 | 965 | ||
965 | count++; | 966 | count++; |
966 | if (count == CPUIDLE_STATE_MAX) | 967 | if (count == CPUIDLE_STATE_MAX) |
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 24213033fbae..9c1a435d10e6 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c | |||
@@ -193,6 +193,14 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = { | |||
193 | }, | 193 | }, |
194 | { | 194 | { |
195 | .callback = init_nvs_nosave, | 195 | .callback = init_nvs_nosave, |
196 | .ident = "Sony Vaio VGN-FW21M", | ||
197 | .matches = { | ||
198 | DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), | ||
199 | DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW21M"), | ||
200 | }, | ||
201 | }, | ||
202 | { | ||
203 | .callback = init_nvs_nosave, | ||
196 | .ident = "Sony Vaio VPCEB17FX", | 204 | .ident = "Sony Vaio VPCEB17FX", |
197 | .matches = { | 205 | .matches = { |
198 | DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), | 206 | DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), |
diff --git a/drivers/amba/tegra-ahb.c b/drivers/amba/tegra-ahb.c index 093c43554963..1f44e56cc65d 100644 --- a/drivers/amba/tegra-ahb.c +++ b/drivers/amba/tegra-ahb.c | |||
@@ -158,7 +158,7 @@ int tegra_ahb_enable_smmu(struct device_node *dn) | |||
158 | EXPORT_SYMBOL(tegra_ahb_enable_smmu); | 158 | EXPORT_SYMBOL(tegra_ahb_enable_smmu); |
159 | #endif | 159 | #endif |
160 | 160 | ||
161 | #ifdef CONFIG_PM_SLEEP | 161 | #ifdef CONFIG_PM |
162 | static int tegra_ahb_suspend(struct device *dev) | 162 | static int tegra_ahb_suspend(struct device *dev) |
163 | { | 163 | { |
164 | int i; | 164 | int i; |
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index 3e751b74615e..a5a3ebcbdd2c 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig | |||
@@ -59,15 +59,16 @@ config ATA_ACPI | |||
59 | option libata.noacpi=1 | 59 | option libata.noacpi=1 |
60 | 60 | ||
61 | config SATA_ZPODD | 61 | config SATA_ZPODD |
62 | bool "SATA Zero Power ODD Support" | 62 | bool "SATA Zero Power Optical Disc Drive (ZPODD) support" |
63 | depends on ATA_ACPI | 63 | depends on ATA_ACPI |
64 | default n | 64 | default n |
65 | help | 65 | help |
66 | This option adds support for SATA ZPODD. It requires both | 66 | This option adds support for SATA Zero Power Optical Disc |
67 | ODD and the platform support, and if enabled, will automatically | 67 | Drive (ZPODD). It requires both the ODD and the platform |
68 | power on/off the ODD when certain condition is satisfied. This | 68 | support, and if enabled, will automatically power on/off the |
69 | does not impact user's experience of the ODD, only power is saved | 69 | ODD when certain condition is satisfied. This does not impact |
70 | when ODD is not in use(i.e. no disc inside). | 70 | end user's experience of the ODD, only power is saved when |
71 | the ODD is not in use (i.e. no disc inside). | ||
71 | 72 | ||
72 | If unsure, say N. | 73 | If unsure, say N. |
73 | 74 | ||
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index a99112cfd8b1..6a67b07de494 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c | |||
@@ -281,6 +281,8 @@ static const struct pci_device_id ahci_pci_tbl[] = { | |||
281 | { PCI_VDEVICE(INTEL, 0x1f37), board_ahci }, /* Avoton RAID */ | 281 | { PCI_VDEVICE(INTEL, 0x1f37), board_ahci }, /* Avoton RAID */ |
282 | { PCI_VDEVICE(INTEL, 0x1f3e), board_ahci }, /* Avoton RAID */ | 282 | { PCI_VDEVICE(INTEL, 0x1f3e), board_ahci }, /* Avoton RAID */ |
283 | { PCI_VDEVICE(INTEL, 0x1f3f), board_ahci }, /* Avoton RAID */ | 283 | { PCI_VDEVICE(INTEL, 0x1f3f), board_ahci }, /* Avoton RAID */ |
284 | { PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Wellsburg RAID */ | ||
285 | { PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Wellsburg RAID */ | ||
284 | { PCI_VDEVICE(INTEL, 0x8d02), board_ahci }, /* Wellsburg AHCI */ | 286 | { PCI_VDEVICE(INTEL, 0x8d02), board_ahci }, /* Wellsburg AHCI */ |
285 | { PCI_VDEVICE(INTEL, 0x8d04), board_ahci }, /* Wellsburg RAID */ | 287 | { PCI_VDEVICE(INTEL, 0x8d04), board_ahci }, /* Wellsburg RAID */ |
286 | { PCI_VDEVICE(INTEL, 0x8d06), board_ahci }, /* Wellsburg RAID */ | 288 | { PCI_VDEVICE(INTEL, 0x8d06), board_ahci }, /* Wellsburg RAID */ |
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c index d2ba439cfe54..2f48123d74c4 100644 --- a/drivers/ata/ata_piix.c +++ b/drivers/ata/ata_piix.c | |||
@@ -150,6 +150,7 @@ enum piix_controller_ids { | |||
150 | tolapai_sata, | 150 | tolapai_sata, |
151 | piix_pata_vmw, /* PIIX4 for VMware, spurious DMA_ERR */ | 151 | piix_pata_vmw, /* PIIX4 for VMware, spurious DMA_ERR */ |
152 | ich8_sata_snb, | 152 | ich8_sata_snb, |
153 | ich8_2port_sata_snb, | ||
153 | }; | 154 | }; |
154 | 155 | ||
155 | struct piix_map_db { | 156 | struct piix_map_db { |
@@ -304,7 +305,7 @@ static const struct pci_device_id piix_pci_tbl[] = { | |||
304 | /* SATA Controller IDE (Lynx Point) */ | 305 | /* SATA Controller IDE (Lynx Point) */ |
305 | { 0x8086, 0x8c01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, | 306 | { 0x8086, 0x8c01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, |
306 | /* SATA Controller IDE (Lynx Point) */ | 307 | /* SATA Controller IDE (Lynx Point) */ |
307 | { 0x8086, 0x8c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, | 308 | { 0x8086, 0x8c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb }, |
308 | /* SATA Controller IDE (Lynx Point) */ | 309 | /* SATA Controller IDE (Lynx Point) */ |
309 | { 0x8086, 0x8c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, | 310 | { 0x8086, 0x8c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, |
310 | /* SATA Controller IDE (Lynx Point-LP) */ | 311 | /* SATA Controller IDE (Lynx Point-LP) */ |
@@ -439,6 +440,7 @@ static const struct piix_map_db *piix_map_db_table[] = { | |||
439 | [ich8m_apple_sata] = &ich8m_apple_map_db, | 440 | [ich8m_apple_sata] = &ich8m_apple_map_db, |
440 | [tolapai_sata] = &tolapai_map_db, | 441 | [tolapai_sata] = &tolapai_map_db, |
441 | [ich8_sata_snb] = &ich8_map_db, | 442 | [ich8_sata_snb] = &ich8_map_db, |
443 | [ich8_2port_sata_snb] = &ich8_2port_map_db, | ||
442 | }; | 444 | }; |
443 | 445 | ||
444 | static struct pci_bits piix_enable_bits[] = { | 446 | static struct pci_bits piix_enable_bits[] = { |
@@ -1242,6 +1244,16 @@ static struct ata_port_info piix_port_info[] = { | |||
1242 | .udma_mask = ATA_UDMA6, | 1244 | .udma_mask = ATA_UDMA6, |
1243 | .port_ops = &piix_sata_ops, | 1245 | .port_ops = &piix_sata_ops, |
1244 | }, | 1246 | }, |
1247 | |||
1248 | [ich8_2port_sata_snb] = | ||
1249 | { | ||
1250 | .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR | ||
1251 | | PIIX_FLAG_PIO16, | ||
1252 | .pio_mask = ATA_PIO4, | ||
1253 | .mwdma_mask = ATA_MWDMA2, | ||
1254 | .udma_mask = ATA_UDMA6, | ||
1255 | .port_ops = &piix_sata_ops, | ||
1256 | }, | ||
1245 | }; | 1257 | }; |
1246 | 1258 | ||
1247 | #define AHCI_PCI_BAR 5 | 1259 | #define AHCI_PCI_BAR 5 |
@@ -1547,6 +1559,10 @@ static bool piix_broken_system_poweroff(struct pci_dev *pdev) | |||
1547 | 1559 | ||
1548 | static int prefer_ms_hyperv = 1; | 1560 | static int prefer_ms_hyperv = 1; |
1549 | module_param(prefer_ms_hyperv, int, 0); | 1561 | module_param(prefer_ms_hyperv, int, 0); |
1562 | MODULE_PARM_DESC(prefer_ms_hyperv, | ||
1563 | "Prefer Hyper-V paravirtualization drivers instead of ATA, " | ||
1564 | "0 - Use ATA drivers, " | ||
1565 | "1 (Default) - Use the paravirtualization drivers."); | ||
1550 | 1566 | ||
1551 | static void piix_ignore_devices_quirk(struct ata_host *host) | 1567 | static void piix_ignore_devices_quirk(struct ata_host *host) |
1552 | { | 1568 | { |
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c index beea3115577e..8a52dab412e2 100644 --- a/drivers/ata/libata-acpi.c +++ b/drivers/ata/libata-acpi.c | |||
@@ -1027,7 +1027,7 @@ static void ata_acpi_register_power_resource(struct ata_device *dev) | |||
1027 | 1027 | ||
1028 | handle = ata_dev_acpi_handle(dev); | 1028 | handle = ata_dev_acpi_handle(dev); |
1029 | if (handle) | 1029 | if (handle) |
1030 | acpi_dev_pm_remove_dependent(handle, &sdev->sdev_gendev); | 1030 | acpi_dev_pm_add_dependent(handle, &sdev->sdev_gendev); |
1031 | } | 1031 | } |
1032 | 1032 | ||
1033 | static void ata_acpi_unregister_power_resource(struct ata_device *dev) | 1033 | static void ata_acpi_unregister_power_resource(struct ata_device *dev) |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 497adea1f0d6..63c743baf920 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
@@ -2329,7 +2329,7 @@ int ata_dev_configure(struct ata_device *dev) | |||
2329 | * from SATA Settings page of Identify Device Data Log. | 2329 | * from SATA Settings page of Identify Device Data Log. |
2330 | */ | 2330 | */ |
2331 | if (ata_id_has_devslp(dev->id)) { | 2331 | if (ata_id_has_devslp(dev->id)) { |
2332 | u8 sata_setting[ATA_SECT_SIZE]; | 2332 | u8 *sata_setting = ap->sector_buf; |
2333 | int i, j; | 2333 | int i, j; |
2334 | 2334 | ||
2335 | dev->flags |= ATA_DFLAG_DEVSLP; | 2335 | dev->flags |= ATA_DFLAG_DEVSLP; |
@@ -2439,6 +2439,9 @@ int ata_dev_configure(struct ata_device *dev) | |||
2439 | dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128, | 2439 | dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128, |
2440 | dev->max_sectors); | 2440 | dev->max_sectors); |
2441 | 2441 | ||
2442 | if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48) | ||
2443 | dev->max_sectors = ATA_MAX_SECTORS_LBA48; | ||
2444 | |||
2442 | if (ap->ops->dev_config) | 2445 | if (ap->ops->dev_config) |
2443 | ap->ops->dev_config(dev); | 2446 | ap->ops->dev_config(dev); |
2444 | 2447 | ||
@@ -4100,6 +4103,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { | |||
4100 | /* Weird ATAPI devices */ | 4103 | /* Weird ATAPI devices */ |
4101 | { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 }, | 4104 | { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 }, |
4102 | { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA }, | 4105 | { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA }, |
4106 | { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 }, | ||
4103 | 4107 | ||
4104 | /* Devices we expect to fail diagnostics */ | 4108 | /* Devices we expect to fail diagnostics */ |
4105 | 4109 | ||
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 318b41358187..ff44787e5a45 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c | |||
@@ -532,8 +532,8 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg) | |||
532 | struct scsi_sense_hdr sshdr; | 532 | struct scsi_sense_hdr sshdr; |
533 | scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, | 533 | scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, |
534 | &sshdr); | 534 | &sshdr); |
535 | if (sshdr.sense_key == 0 && | 535 | if (sshdr.sense_key == RECOVERED_ERROR && |
536 | sshdr.asc == 0 && sshdr.ascq == 0) | 536 | sshdr.asc == 0 && sshdr.ascq == 0x1d) |
537 | cmd_result &= ~SAM_STAT_CHECK_CONDITION; | 537 | cmd_result &= ~SAM_STAT_CHECK_CONDITION; |
538 | } | 538 | } |
539 | 539 | ||
@@ -618,8 +618,8 @@ int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg) | |||
618 | struct scsi_sense_hdr sshdr; | 618 | struct scsi_sense_hdr sshdr; |
619 | scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, | 619 | scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, |
620 | &sshdr); | 620 | &sshdr); |
621 | if (sshdr.sense_key == 0 && | 621 | if (sshdr.sense_key == RECOVERED_ERROR && |
622 | sshdr.asc == 0 && sshdr.ascq == 0) | 622 | sshdr.asc == 0 && sshdr.ascq == 0x1d) |
623 | cmd_result &= ~SAM_STAT_CHECK_CONDITION; | 623 | cmd_result &= ~SAM_STAT_CHECK_CONDITION; |
624 | } | 624 | } |
625 | 625 | ||
diff --git a/drivers/ata/pata_samsung_cf.c b/drivers/ata/pata_samsung_cf.c index 70b0e01372b3..6ef27e98c508 100644 --- a/drivers/ata/pata_samsung_cf.c +++ b/drivers/ata/pata_samsung_cf.c | |||
@@ -661,18 +661,7 @@ static struct platform_driver pata_s3c_driver = { | |||
661 | }, | 661 | }, |
662 | }; | 662 | }; |
663 | 663 | ||
664 | static int __init pata_s3c_init(void) | 664 | module_platform_driver_probe(pata_s3c_driver, pata_s3c_probe); |
665 | { | ||
666 | return platform_driver_probe(&pata_s3c_driver, pata_s3c_probe); | ||
667 | } | ||
668 | |||
669 | static void __exit pata_s3c_exit(void) | ||
670 | { | ||
671 | platform_driver_unregister(&pata_s3c_driver); | ||
672 | } | ||
673 | |||
674 | module_init(pata_s3c_init); | ||
675 | module_exit(pata_s3c_exit); | ||
676 | 665 | ||
677 | MODULE_AUTHOR("Abhilash Kesavan, <a.kesavan@samsung.com>"); | 666 | MODULE_AUTHOR("Abhilash Kesavan, <a.kesavan@samsung.com>"); |
678 | MODULE_DESCRIPTION("low-level driver for Samsung PATA controller"); | 667 | MODULE_DESCRIPTION("low-level driver for Samsung PATA controller"); |
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c index 124b2c1d9c0b..608f82fed632 100644 --- a/drivers/ata/sata_fsl.c +++ b/drivers/ata/sata_fsl.c | |||
@@ -1511,8 +1511,7 @@ error_exit_with_cleanup: | |||
1511 | 1511 | ||
1512 | if (hcr_base) | 1512 | if (hcr_base) |
1513 | iounmap(hcr_base); | 1513 | iounmap(hcr_base); |
1514 | if (host_priv) | 1514 | kfree(host_priv); |
1515 | kfree(host_priv); | ||
1516 | 1515 | ||
1517 | return retval; | 1516 | return retval; |
1518 | } | 1517 | } |
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c index 5f74587ef258..71671c42ef45 100644 --- a/drivers/base/power/qos.c +++ b/drivers/base/power/qos.c | |||
@@ -46,6 +46,7 @@ | |||
46 | #include "power.h" | 46 | #include "power.h" |
47 | 47 | ||
48 | static DEFINE_MUTEX(dev_pm_qos_mtx); | 48 | static DEFINE_MUTEX(dev_pm_qos_mtx); |
49 | static DEFINE_MUTEX(dev_pm_qos_sysfs_mtx); | ||
49 | 50 | ||
50 | static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers); | 51 | static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers); |
51 | 52 | ||
@@ -216,12 +217,17 @@ void dev_pm_qos_constraints_destroy(struct device *dev) | |||
216 | struct pm_qos_constraints *c; | 217 | struct pm_qos_constraints *c; |
217 | struct pm_qos_flags *f; | 218 | struct pm_qos_flags *f; |
218 | 219 | ||
219 | mutex_lock(&dev_pm_qos_mtx); | 220 | mutex_lock(&dev_pm_qos_sysfs_mtx); |
220 | 221 | ||
221 | /* | 222 | /* |
222 | * If the device's PM QoS resume latency limit or PM QoS flags have been | 223 | * If the device's PM QoS resume latency limit or PM QoS flags have been |
223 | * exposed to user space, they have to be hidden at this point. | 224 | * exposed to user space, they have to be hidden at this point. |
224 | */ | 225 | */ |
226 | pm_qos_sysfs_remove_latency(dev); | ||
227 | pm_qos_sysfs_remove_flags(dev); | ||
228 | |||
229 | mutex_lock(&dev_pm_qos_mtx); | ||
230 | |||
225 | __dev_pm_qos_hide_latency_limit(dev); | 231 | __dev_pm_qos_hide_latency_limit(dev); |
226 | __dev_pm_qos_hide_flags(dev); | 232 | __dev_pm_qos_hide_flags(dev); |
227 | 233 | ||
@@ -254,6 +260,8 @@ void dev_pm_qos_constraints_destroy(struct device *dev) | |||
254 | 260 | ||
255 | out: | 261 | out: |
256 | mutex_unlock(&dev_pm_qos_mtx); | 262 | mutex_unlock(&dev_pm_qos_mtx); |
263 | |||
264 | mutex_unlock(&dev_pm_qos_sysfs_mtx); | ||
257 | } | 265 | } |
258 | 266 | ||
259 | /** | 267 | /** |
@@ -558,6 +566,14 @@ static void __dev_pm_qos_drop_user_request(struct device *dev, | |||
558 | kfree(req); | 566 | kfree(req); |
559 | } | 567 | } |
560 | 568 | ||
569 | static void dev_pm_qos_drop_user_request(struct device *dev, | ||
570 | enum dev_pm_qos_req_type type) | ||
571 | { | ||
572 | mutex_lock(&dev_pm_qos_mtx); | ||
573 | __dev_pm_qos_drop_user_request(dev, type); | ||
574 | mutex_unlock(&dev_pm_qos_mtx); | ||
575 | } | ||
576 | |||
561 | /** | 577 | /** |
562 | * dev_pm_qos_expose_latency_limit - Expose PM QoS latency limit to user space. | 578 | * dev_pm_qos_expose_latency_limit - Expose PM QoS latency limit to user space. |
563 | * @dev: Device whose PM QoS latency limit is to be exposed to user space. | 579 | * @dev: Device whose PM QoS latency limit is to be exposed to user space. |
@@ -581,6 +597,8 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value) | |||
581 | return ret; | 597 | return ret; |
582 | } | 598 | } |
583 | 599 | ||
600 | mutex_lock(&dev_pm_qos_sysfs_mtx); | ||
601 | |||
584 | mutex_lock(&dev_pm_qos_mtx); | 602 | mutex_lock(&dev_pm_qos_mtx); |
585 | 603 | ||
586 | if (IS_ERR_OR_NULL(dev->power.qos)) | 604 | if (IS_ERR_OR_NULL(dev->power.qos)) |
@@ -591,26 +609,27 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value) | |||
591 | if (ret < 0) { | 609 | if (ret < 0) { |
592 | __dev_pm_qos_remove_request(req); | 610 | __dev_pm_qos_remove_request(req); |
593 | kfree(req); | 611 | kfree(req); |
612 | mutex_unlock(&dev_pm_qos_mtx); | ||
594 | goto out; | 613 | goto out; |
595 | } | 614 | } |
596 | |||
597 | dev->power.qos->latency_req = req; | 615 | dev->power.qos->latency_req = req; |
616 | |||
617 | mutex_unlock(&dev_pm_qos_mtx); | ||
618 | |||
598 | ret = pm_qos_sysfs_add_latency(dev); | 619 | ret = pm_qos_sysfs_add_latency(dev); |
599 | if (ret) | 620 | if (ret) |
600 | __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); | 621 | dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); |
601 | 622 | ||
602 | out: | 623 | out: |
603 | mutex_unlock(&dev_pm_qos_mtx); | 624 | mutex_unlock(&dev_pm_qos_sysfs_mtx); |
604 | return ret; | 625 | return ret; |
605 | } | 626 | } |
606 | EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit); | 627 | EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit); |
607 | 628 | ||
608 | static void __dev_pm_qos_hide_latency_limit(struct device *dev) | 629 | static void __dev_pm_qos_hide_latency_limit(struct device *dev) |
609 | { | 630 | { |
610 | if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req) { | 631 | if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req) |
611 | pm_qos_sysfs_remove_latency(dev); | ||
612 | __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); | 632 | __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); |
613 | } | ||
614 | } | 633 | } |
615 | 634 | ||
616 | /** | 635 | /** |
@@ -619,9 +638,15 @@ static void __dev_pm_qos_hide_latency_limit(struct device *dev) | |||
619 | */ | 638 | */ |
620 | void dev_pm_qos_hide_latency_limit(struct device *dev) | 639 | void dev_pm_qos_hide_latency_limit(struct device *dev) |
621 | { | 640 | { |
641 | mutex_lock(&dev_pm_qos_sysfs_mtx); | ||
642 | |||
643 | pm_qos_sysfs_remove_latency(dev); | ||
644 | |||
622 | mutex_lock(&dev_pm_qos_mtx); | 645 | mutex_lock(&dev_pm_qos_mtx); |
623 | __dev_pm_qos_hide_latency_limit(dev); | 646 | __dev_pm_qos_hide_latency_limit(dev); |
624 | mutex_unlock(&dev_pm_qos_mtx); | 647 | mutex_unlock(&dev_pm_qos_mtx); |
648 | |||
649 | mutex_unlock(&dev_pm_qos_sysfs_mtx); | ||
625 | } | 650 | } |
626 | EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit); | 651 | EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit); |
627 | 652 | ||
@@ -649,6 +674,8 @@ int dev_pm_qos_expose_flags(struct device *dev, s32 val) | |||
649 | } | 674 | } |
650 | 675 | ||
651 | pm_runtime_get_sync(dev); | 676 | pm_runtime_get_sync(dev); |
677 | mutex_lock(&dev_pm_qos_sysfs_mtx); | ||
678 | |||
652 | mutex_lock(&dev_pm_qos_mtx); | 679 | mutex_lock(&dev_pm_qos_mtx); |
653 | 680 | ||
654 | if (IS_ERR_OR_NULL(dev->power.qos)) | 681 | if (IS_ERR_OR_NULL(dev->power.qos)) |
@@ -659,16 +686,19 @@ int dev_pm_qos_expose_flags(struct device *dev, s32 val) | |||
659 | if (ret < 0) { | 686 | if (ret < 0) { |
660 | __dev_pm_qos_remove_request(req); | 687 | __dev_pm_qos_remove_request(req); |
661 | kfree(req); | 688 | kfree(req); |
689 | mutex_unlock(&dev_pm_qos_mtx); | ||
662 | goto out; | 690 | goto out; |
663 | } | 691 | } |
664 | |||
665 | dev->power.qos->flags_req = req; | 692 | dev->power.qos->flags_req = req; |
693 | |||
694 | mutex_unlock(&dev_pm_qos_mtx); | ||
695 | |||
666 | ret = pm_qos_sysfs_add_flags(dev); | 696 | ret = pm_qos_sysfs_add_flags(dev); |
667 | if (ret) | 697 | if (ret) |
668 | __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); | 698 | dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); |
669 | 699 | ||
670 | out: | 700 | out: |
671 | mutex_unlock(&dev_pm_qos_mtx); | 701 | mutex_unlock(&dev_pm_qos_sysfs_mtx); |
672 | pm_runtime_put(dev); | 702 | pm_runtime_put(dev); |
673 | return ret; | 703 | return ret; |
674 | } | 704 | } |
@@ -676,10 +706,8 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags); | |||
676 | 706 | ||
677 | static void __dev_pm_qos_hide_flags(struct device *dev) | 707 | static void __dev_pm_qos_hide_flags(struct device *dev) |
678 | { | 708 | { |
679 | if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req) { | 709 | if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req) |
680 | pm_qos_sysfs_remove_flags(dev); | ||
681 | __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); | 710 | __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); |
682 | } | ||
683 | } | 711 | } |
684 | 712 | ||
685 | /** | 713 | /** |
@@ -689,9 +717,15 @@ static void __dev_pm_qos_hide_flags(struct device *dev) | |||
689 | void dev_pm_qos_hide_flags(struct device *dev) | 717 | void dev_pm_qos_hide_flags(struct device *dev) |
690 | { | 718 | { |
691 | pm_runtime_get_sync(dev); | 719 | pm_runtime_get_sync(dev); |
720 | mutex_lock(&dev_pm_qos_sysfs_mtx); | ||
721 | |||
722 | pm_qos_sysfs_remove_flags(dev); | ||
723 | |||
692 | mutex_lock(&dev_pm_qos_mtx); | 724 | mutex_lock(&dev_pm_qos_mtx); |
693 | __dev_pm_qos_hide_flags(dev); | 725 | __dev_pm_qos_hide_flags(dev); |
694 | mutex_unlock(&dev_pm_qos_mtx); | 726 | mutex_unlock(&dev_pm_qos_mtx); |
727 | |||
728 | mutex_unlock(&dev_pm_qos_sysfs_mtx); | ||
695 | pm_runtime_put(dev); | 729 | pm_runtime_put(dev); |
696 | } | 730 | } |
697 | EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags); | 731 | EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags); |
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c index e6732cf7c06e..79f4fca9877a 100644 --- a/drivers/base/regmap/regcache-rbtree.c +++ b/drivers/base/regmap/regcache-rbtree.c | |||
@@ -398,7 +398,7 @@ static int regcache_rbtree_sync(struct regmap *map, unsigned int min, | |||
398 | base = 0; | 398 | base = 0; |
399 | 399 | ||
400 | if (max < rbnode->base_reg + rbnode->blklen) | 400 | if (max < rbnode->base_reg + rbnode->blklen) |
401 | end = rbnode->base_reg + rbnode->blklen - max; | 401 | end = max - rbnode->base_reg + 1; |
402 | else | 402 | else |
403 | end = rbnode->blklen; | 403 | end = rbnode->blklen; |
404 | 404 | ||
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index 3d2367501fd0..58cfb3232428 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c | |||
@@ -710,12 +710,12 @@ skip_format_initialization: | |||
710 | } | 710 | } |
711 | } | 711 | } |
712 | 712 | ||
713 | regmap_debugfs_init(map, config->name); | ||
714 | |||
713 | ret = regcache_init(map, config); | 715 | ret = regcache_init(map, config); |
714 | if (ret != 0) | 716 | if (ret != 0) |
715 | goto err_range; | 717 | goto err_range; |
716 | 718 | ||
717 | regmap_debugfs_init(map, config->name); | ||
718 | |||
719 | /* Add a devres resource for dev_get_regmap() */ | 719 | /* Add a devres resource for dev_get_regmap() */ |
720 | m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL); | 720 | m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL); |
721 | if (!m) { | 721 | if (!m) { |
@@ -1036,6 +1036,8 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg, | |||
1036 | kfree(async->work_buf); | 1036 | kfree(async->work_buf); |
1037 | kfree(async); | 1037 | kfree(async); |
1038 | } | 1038 | } |
1039 | |||
1040 | return ret; | ||
1039 | } | 1041 | } |
1040 | 1042 | ||
1041 | trace_regmap_hw_write_start(map->dev, reg, | 1043 | trace_regmap_hw_write_start(map->dev, reg, |
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index 5dc0daed8fac..b81ddfea1da0 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig | |||
@@ -532,11 +532,11 @@ config BLK_DEV_RBD | |||
532 | If unsure, say N. | 532 | If unsure, say N. |
533 | 533 | ||
534 | config BLK_DEV_RSXX | 534 | config BLK_DEV_RSXX |
535 | tristate "RamSam PCIe Flash SSD Device Driver" | 535 | tristate "IBM FlashSystem 70/80 PCIe SSD Device Driver" |
536 | depends on PCI | 536 | depends on PCI |
537 | help | 537 | help |
538 | Device driver for IBM's high speed PCIe SSD | 538 | Device driver for IBM's high speed PCIe SSD |
539 | storage devices: RamSan-70 and RamSan-80. | 539 | storage devices: FlashSystem-70 and FlashSystem-80. |
540 | 540 | ||
541 | To compile this driver as a module, choose M here: the | 541 | To compile this driver as a module, choose M here: the |
542 | module will be called rsxx. | 542 | module will be called rsxx. |
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index 25ef5c014fca..92b6d7c51e39 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c | |||
@@ -51,8 +51,9 @@ new_skb(ulong len) | |||
51 | { | 51 | { |
52 | struct sk_buff *skb; | 52 | struct sk_buff *skb; |
53 | 53 | ||
54 | skb = alloc_skb(len, GFP_ATOMIC); | 54 | skb = alloc_skb(len + MAX_HEADER, GFP_ATOMIC); |
55 | if (skb) { | 55 | if (skb) { |
56 | skb_reserve(skb, MAX_HEADER); | ||
56 | skb_reset_mac_header(skb); | 57 | skb_reset_mac_header(skb); |
57 | skb_reset_network_header(skb); | 58 | skb_reset_network_header(skb); |
58 | skb->protocol = __constant_htons(ETH_P_AOE); | 59 | skb->protocol = __constant_htons(ETH_P_AOE); |
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index ade58bc8f3c4..1c1b8e544aa2 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c | |||
@@ -4206,7 +4206,7 @@ static int cciss_find_cfgtables(ctlr_info_t *h) | |||
4206 | if (rc) | 4206 | if (rc) |
4207 | return rc; | 4207 | return rc; |
4208 | h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev, | 4208 | h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev, |
4209 | cfg_base_addr_index) + cfg_offset, sizeof(h->cfgtable)); | 4209 | cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable)); |
4210 | if (!h->cfgtable) | 4210 | if (!h->cfgtable) |
4211 | return -ENOMEM; | 4211 | return -ENOMEM; |
4212 | rc = write_driver_ver_to_cfgtable(h->cfgtable); | 4212 | rc = write_driver_ver_to_cfgtable(h->cfgtable); |
diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 747bb2af69dc..dfe758382eaf 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c | |||
@@ -922,6 +922,11 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode, | |||
922 | lo->lo_flags |= LO_FLAGS_PARTSCAN; | 922 | lo->lo_flags |= LO_FLAGS_PARTSCAN; |
923 | if (lo->lo_flags & LO_FLAGS_PARTSCAN) | 923 | if (lo->lo_flags & LO_FLAGS_PARTSCAN) |
924 | ioctl_by_bdev(bdev, BLKRRPART, 0); | 924 | ioctl_by_bdev(bdev, BLKRRPART, 0); |
925 | |||
926 | /* Grab the block_device to prevent its destruction after we | ||
927 | * put /dev/loopXX inode. Later in loop_clr_fd() we bdput(bdev). | ||
928 | */ | ||
929 | bdgrab(bdev); | ||
925 | return 0; | 930 | return 0; |
926 | 931 | ||
927 | out_clr: | 932 | out_clr: |
@@ -1031,8 +1036,10 @@ static int loop_clr_fd(struct loop_device *lo) | |||
1031 | memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE); | 1036 | memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE); |
1032 | memset(lo->lo_crypt_name, 0, LO_NAME_SIZE); | 1037 | memset(lo->lo_crypt_name, 0, LO_NAME_SIZE); |
1033 | memset(lo->lo_file_name, 0, LO_NAME_SIZE); | 1038 | memset(lo->lo_file_name, 0, LO_NAME_SIZE); |
1034 | if (bdev) | 1039 | if (bdev) { |
1040 | bdput(bdev); | ||
1035 | invalidate_bdev(bdev); | 1041 | invalidate_bdev(bdev); |
1042 | } | ||
1036 | set_capacity(lo->lo_disk, 0); | 1043 | set_capacity(lo->lo_disk, 0); |
1037 | loop_sysfs_exit(lo); | 1044 | loop_sysfs_exit(lo); |
1038 | if (bdev) { | 1045 | if (bdev) { |
@@ -1623,6 +1630,7 @@ static int loop_add(struct loop_device **l, int i) | |||
1623 | goto out_free_dev; | 1630 | goto out_free_dev; |
1624 | i = err; | 1631 | i = err; |
1625 | 1632 | ||
1633 | err = -ENOMEM; | ||
1626 | lo->lo_queue = blk_alloc_queue(GFP_KERNEL); | 1634 | lo->lo_queue = blk_alloc_queue(GFP_KERNEL); |
1627 | if (!lo->lo_queue) | 1635 | if (!lo->lo_queue) |
1628 | goto out_free_dev; | 1636 | goto out_free_dev; |
diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c index 1788f491e0fb..076ae7f1b781 100644 --- a/drivers/block/mg_disk.c +++ b/drivers/block/mg_disk.c | |||
@@ -890,8 +890,10 @@ static int mg_probe(struct platform_device *plat_dev) | |||
890 | gpio_direction_output(host->rst, 1); | 890 | gpio_direction_output(host->rst, 1); |
891 | 891 | ||
892 | /* reset out pin */ | 892 | /* reset out pin */ |
893 | if (!(prv_data->dev_attr & MG_DEV_MASK)) | 893 | if (!(prv_data->dev_attr & MG_DEV_MASK)) { |
894 | err = -EINVAL; | ||
894 | goto probe_err_3a; | 895 | goto probe_err_3a; |
896 | } | ||
895 | 897 | ||
896 | if (prv_data->dev_attr != MG_BOOT_DEV) { | 898 | if (prv_data->dev_attr != MG_BOOT_DEV) { |
897 | rsc = platform_get_resource_byname(plat_dev, IORESOURCE_IO, | 899 | rsc = platform_get_resource_byname(plat_dev, IORESOURCE_IO, |
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index 11cc9522cdd4..32c678028e53 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c | |||
@@ -81,12 +81,17 @@ | |||
81 | /* Device instance number, incremented each time a device is probed. */ | 81 | /* Device instance number, incremented each time a device is probed. */ |
82 | static int instance; | 82 | static int instance; |
83 | 83 | ||
84 | struct list_head online_list; | ||
85 | struct list_head removing_list; | ||
86 | spinlock_t dev_lock; | ||
87 | |||
84 | /* | 88 | /* |
85 | * Global variable used to hold the major block device number | 89 | * Global variable used to hold the major block device number |
86 | * allocated in mtip_init(). | 90 | * allocated in mtip_init(). |
87 | */ | 91 | */ |
88 | static int mtip_major; | 92 | static int mtip_major; |
89 | static struct dentry *dfs_parent; | 93 | static struct dentry *dfs_parent; |
94 | static struct dentry *dfs_device_status; | ||
90 | 95 | ||
91 | static u32 cpu_use[NR_CPUS]; | 96 | static u32 cpu_use[NR_CPUS]; |
92 | 97 | ||
@@ -243,40 +248,31 @@ static inline void release_slot(struct mtip_port *port, int tag) | |||
243 | /* | 248 | /* |
244 | * Reset the HBA (without sleeping) | 249 | * Reset the HBA (without sleeping) |
245 | * | 250 | * |
246 | * Just like hba_reset, except does not call sleep, so can be | ||
247 | * run from interrupt/tasklet context. | ||
248 | * | ||
249 | * @dd Pointer to the driver data structure. | 251 | * @dd Pointer to the driver data structure. |
250 | * | 252 | * |
251 | * return value | 253 | * return value |
252 | * 0 The reset was successful. | 254 | * 0 The reset was successful. |
253 | * -1 The HBA Reset bit did not clear. | 255 | * -1 The HBA Reset bit did not clear. |
254 | */ | 256 | */ |
255 | static int hba_reset_nosleep(struct driver_data *dd) | 257 | static int mtip_hba_reset(struct driver_data *dd) |
256 | { | 258 | { |
257 | unsigned long timeout; | 259 | unsigned long timeout; |
258 | 260 | ||
259 | /* Chip quirk: quiesce any chip function */ | ||
260 | mdelay(10); | ||
261 | |||
262 | /* Set the reset bit */ | 261 | /* Set the reset bit */ |
263 | writel(HOST_RESET, dd->mmio + HOST_CTL); | 262 | writel(HOST_RESET, dd->mmio + HOST_CTL); |
264 | 263 | ||
265 | /* Flush */ | 264 | /* Flush */ |
266 | readl(dd->mmio + HOST_CTL); | 265 | readl(dd->mmio + HOST_CTL); |
267 | 266 | ||
268 | /* | 267 | /* Spin for up to 2 seconds, waiting for reset acknowledgement */ |
269 | * Wait 10ms then spin for up to 1 second | 268 | timeout = jiffies + msecs_to_jiffies(2000); |
270 | * waiting for reset acknowledgement | 269 | do { |
271 | */ | 270 | mdelay(10); |
272 | timeout = jiffies + msecs_to_jiffies(1000); | 271 | if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) |
273 | mdelay(10); | 272 | return -1; |
274 | while ((readl(dd->mmio + HOST_CTL) & HOST_RESET) | ||
275 | && time_before(jiffies, timeout)) | ||
276 | mdelay(1); | ||
277 | 273 | ||
278 | if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) | 274 | } while ((readl(dd->mmio + HOST_CTL) & HOST_RESET) |
279 | return -1; | 275 | && time_before(jiffies, timeout)); |
280 | 276 | ||
281 | if (readl(dd->mmio + HOST_CTL) & HOST_RESET) | 277 | if (readl(dd->mmio + HOST_CTL) & HOST_RESET) |
282 | return -1; | 278 | return -1; |
@@ -481,7 +477,7 @@ static void mtip_restart_port(struct mtip_port *port) | |||
481 | dev_warn(&port->dd->pdev->dev, | 477 | dev_warn(&port->dd->pdev->dev, |
482 | "PxCMD.CR not clear, escalating reset\n"); | 478 | "PxCMD.CR not clear, escalating reset\n"); |
483 | 479 | ||
484 | if (hba_reset_nosleep(port->dd)) | 480 | if (mtip_hba_reset(port->dd)) |
485 | dev_err(&port->dd->pdev->dev, | 481 | dev_err(&port->dd->pdev->dev, |
486 | "HBA reset escalation failed.\n"); | 482 | "HBA reset escalation failed.\n"); |
487 | 483 | ||
@@ -527,6 +523,26 @@ static void mtip_restart_port(struct mtip_port *port) | |||
527 | 523 | ||
528 | } | 524 | } |
529 | 525 | ||
526 | static int mtip_device_reset(struct driver_data *dd) | ||
527 | { | ||
528 | int rv = 0; | ||
529 | |||
530 | if (mtip_check_surprise_removal(dd->pdev)) | ||
531 | return 0; | ||
532 | |||
533 | if (mtip_hba_reset(dd) < 0) | ||
534 | rv = -EFAULT; | ||
535 | |||
536 | mdelay(1); | ||
537 | mtip_init_port(dd->port); | ||
538 | mtip_start_port(dd->port); | ||
539 | |||
540 | /* Enable interrupts on the HBA. */ | ||
541 | writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN, | ||
542 | dd->mmio + HOST_CTL); | ||
543 | return rv; | ||
544 | } | ||
545 | |||
530 | /* | 546 | /* |
531 | * Helper function for tag logging | 547 | * Helper function for tag logging |
532 | */ | 548 | */ |
@@ -632,7 +648,7 @@ static void mtip_timeout_function(unsigned long int data) | |||
632 | if (cmdto_cnt) { | 648 | if (cmdto_cnt) { |
633 | print_tags(port->dd, "timed out", tagaccum, cmdto_cnt); | 649 | print_tags(port->dd, "timed out", tagaccum, cmdto_cnt); |
634 | if (!test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) { | 650 | if (!test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) { |
635 | mtip_restart_port(port); | 651 | mtip_device_reset(port->dd); |
636 | wake_up_interruptible(&port->svc_wait); | 652 | wake_up_interruptible(&port->svc_wait); |
637 | } | 653 | } |
638 | clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags); | 654 | clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags); |
@@ -1283,11 +1299,11 @@ static int mtip_exec_internal_command(struct mtip_port *port, | |||
1283 | int rv = 0, ready2go = 1; | 1299 | int rv = 0, ready2go = 1; |
1284 | struct mtip_cmd *int_cmd = &port->commands[MTIP_TAG_INTERNAL]; | 1300 | struct mtip_cmd *int_cmd = &port->commands[MTIP_TAG_INTERNAL]; |
1285 | unsigned long to; | 1301 | unsigned long to; |
1302 | struct driver_data *dd = port->dd; | ||
1286 | 1303 | ||
1287 | /* Make sure the buffer is 8 byte aligned. This is asic specific. */ | 1304 | /* Make sure the buffer is 8 byte aligned. This is asic specific. */ |
1288 | if (buffer & 0x00000007) { | 1305 | if (buffer & 0x00000007) { |
1289 | dev_err(&port->dd->pdev->dev, | 1306 | dev_err(&dd->pdev->dev, "SG buffer is not 8 byte aligned\n"); |
1290 | "SG buffer is not 8 byte aligned\n"); | ||
1291 | return -EFAULT; | 1307 | return -EFAULT; |
1292 | } | 1308 | } |
1293 | 1309 | ||
@@ -1300,23 +1316,21 @@ static int mtip_exec_internal_command(struct mtip_port *port, | |||
1300 | mdelay(100); | 1316 | mdelay(100); |
1301 | } while (time_before(jiffies, to)); | 1317 | } while (time_before(jiffies, to)); |
1302 | if (!ready2go) { | 1318 | if (!ready2go) { |
1303 | dev_warn(&port->dd->pdev->dev, | 1319 | dev_warn(&dd->pdev->dev, |
1304 | "Internal cmd active. new cmd [%02X]\n", fis->command); | 1320 | "Internal cmd active. new cmd [%02X]\n", fis->command); |
1305 | return -EBUSY; | 1321 | return -EBUSY; |
1306 | } | 1322 | } |
1307 | set_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags); | 1323 | set_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags); |
1308 | port->ic_pause_timer = 0; | 1324 | port->ic_pause_timer = 0; |
1309 | 1325 | ||
1310 | if (fis->command == ATA_CMD_SEC_ERASE_UNIT) | 1326 | clear_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags); |
1311 | clear_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags); | 1327 | clear_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags); |
1312 | else if (fis->command == ATA_CMD_DOWNLOAD_MICRO) | ||
1313 | clear_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags); | ||
1314 | 1328 | ||
1315 | if (atomic == GFP_KERNEL) { | 1329 | if (atomic == GFP_KERNEL) { |
1316 | if (fis->command != ATA_CMD_STANDBYNOW1) { | 1330 | if (fis->command != ATA_CMD_STANDBYNOW1) { |
1317 | /* wait for io to complete if non atomic */ | 1331 | /* wait for io to complete if non atomic */ |
1318 | if (mtip_quiesce_io(port, 5000) < 0) { | 1332 | if (mtip_quiesce_io(port, 5000) < 0) { |
1319 | dev_warn(&port->dd->pdev->dev, | 1333 | dev_warn(&dd->pdev->dev, |
1320 | "Failed to quiesce IO\n"); | 1334 | "Failed to quiesce IO\n"); |
1321 | release_slot(port, MTIP_TAG_INTERNAL); | 1335 | release_slot(port, MTIP_TAG_INTERNAL); |
1322 | clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags); | 1336 | clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags); |
@@ -1361,58 +1375,84 @@ static int mtip_exec_internal_command(struct mtip_port *port, | |||
1361 | /* Issue the command to the hardware */ | 1375 | /* Issue the command to the hardware */ |
1362 | mtip_issue_non_ncq_command(port, MTIP_TAG_INTERNAL); | 1376 | mtip_issue_non_ncq_command(port, MTIP_TAG_INTERNAL); |
1363 | 1377 | ||
1364 | /* Poll if atomic, wait_for_completion otherwise */ | ||
1365 | if (atomic == GFP_KERNEL) { | 1378 | if (atomic == GFP_KERNEL) { |
1366 | /* Wait for the command to complete or timeout. */ | 1379 | /* Wait for the command to complete or timeout. */ |
1367 | if (wait_for_completion_timeout( | 1380 | if (wait_for_completion_interruptible_timeout( |
1368 | &wait, | 1381 | &wait, |
1369 | msecs_to_jiffies(timeout)) == 0) { | 1382 | msecs_to_jiffies(timeout)) <= 0) { |
1370 | dev_err(&port->dd->pdev->dev, | 1383 | if (rv == -ERESTARTSYS) { /* interrupted */ |
1371 | "Internal command did not complete [%d] " | 1384 | dev_err(&dd->pdev->dev, |
1372 | "within timeout of %lu ms\n", | 1385 | "Internal command [%02X] was interrupted after %lu ms\n", |
1373 | atomic, timeout); | 1386 | fis->command, timeout); |
1374 | if (mtip_check_surprise_removal(port->dd->pdev) || | 1387 | rv = -EINTR; |
1388 | goto exec_ic_exit; | ||
1389 | } else if (rv == 0) /* timeout */ | ||
1390 | dev_err(&dd->pdev->dev, | ||
1391 | "Internal command did not complete [%02X] within timeout of %lu ms\n", | ||
1392 | fis->command, timeout); | ||
1393 | else | ||
1394 | dev_err(&dd->pdev->dev, | ||
1395 | "Internal command [%02X] wait returned code [%d] after %lu ms - unhandled\n", | ||
1396 | fis->command, rv, timeout); | ||
1397 | |||
1398 | if (mtip_check_surprise_removal(dd->pdev) || | ||
1375 | test_bit(MTIP_DDF_REMOVE_PENDING_BIT, | 1399 | test_bit(MTIP_DDF_REMOVE_PENDING_BIT, |
1376 | &port->dd->dd_flag)) { | 1400 | &dd->dd_flag)) { |
1401 | dev_err(&dd->pdev->dev, | ||
1402 | "Internal command [%02X] wait returned due to SR\n", | ||
1403 | fis->command); | ||
1377 | rv = -ENXIO; | 1404 | rv = -ENXIO; |
1378 | goto exec_ic_exit; | 1405 | goto exec_ic_exit; |
1379 | } | 1406 | } |
1407 | mtip_device_reset(dd); /* recover from timeout issue */ | ||
1380 | rv = -EAGAIN; | 1408 | rv = -EAGAIN; |
1409 | goto exec_ic_exit; | ||
1381 | } | 1410 | } |
1382 | } else { | 1411 | } else { |
1412 | u32 hba_stat, port_stat; | ||
1413 | |||
1383 | /* Spin for <timeout> checking if command still outstanding */ | 1414 | /* Spin for <timeout> checking if command still outstanding */ |
1384 | timeout = jiffies + msecs_to_jiffies(timeout); | 1415 | timeout = jiffies + msecs_to_jiffies(timeout); |
1385 | while ((readl(port->cmd_issue[MTIP_TAG_INTERNAL]) | 1416 | while ((readl(port->cmd_issue[MTIP_TAG_INTERNAL]) |
1386 | & (1 << MTIP_TAG_INTERNAL)) | 1417 | & (1 << MTIP_TAG_INTERNAL)) |
1387 | && time_before(jiffies, timeout)) { | 1418 | && time_before(jiffies, timeout)) { |
1388 | if (mtip_check_surprise_removal(port->dd->pdev)) { | 1419 | if (mtip_check_surprise_removal(dd->pdev)) { |
1389 | rv = -ENXIO; | 1420 | rv = -ENXIO; |
1390 | goto exec_ic_exit; | 1421 | goto exec_ic_exit; |
1391 | } | 1422 | } |
1392 | if ((fis->command != ATA_CMD_STANDBYNOW1) && | 1423 | if ((fis->command != ATA_CMD_STANDBYNOW1) && |
1393 | test_bit(MTIP_DDF_REMOVE_PENDING_BIT, | 1424 | test_bit(MTIP_DDF_REMOVE_PENDING_BIT, |
1394 | &port->dd->dd_flag)) { | 1425 | &dd->dd_flag)) { |
1395 | rv = -ENXIO; | 1426 | rv = -ENXIO; |
1396 | goto exec_ic_exit; | 1427 | goto exec_ic_exit; |
1397 | } | 1428 | } |
1398 | if (readl(port->mmio + PORT_IRQ_STAT) & PORT_IRQ_ERR) { | 1429 | port_stat = readl(port->mmio + PORT_IRQ_STAT); |
1399 | atomic_inc(&int_cmd->active); /* error */ | 1430 | if (!port_stat) |
1400 | break; | 1431 | continue; |
1432 | |||
1433 | if (port_stat & PORT_IRQ_ERR) { | ||
1434 | dev_err(&dd->pdev->dev, | ||
1435 | "Internal command [%02X] failed\n", | ||
1436 | fis->command); | ||
1437 | mtip_device_reset(dd); | ||
1438 | rv = -EIO; | ||
1439 | goto exec_ic_exit; | ||
1440 | } else { | ||
1441 | writel(port_stat, port->mmio + PORT_IRQ_STAT); | ||
1442 | hba_stat = readl(dd->mmio + HOST_IRQ_STAT); | ||
1443 | if (hba_stat) | ||
1444 | writel(hba_stat, | ||
1445 | dd->mmio + HOST_IRQ_STAT); | ||
1401 | } | 1446 | } |
1447 | break; | ||
1402 | } | 1448 | } |
1403 | } | 1449 | } |
1404 | 1450 | ||
1405 | if (atomic_read(&int_cmd->active) > 1) { | ||
1406 | dev_err(&port->dd->pdev->dev, | ||
1407 | "Internal command [%02X] failed\n", fis->command); | ||
1408 | rv = -EIO; | ||
1409 | } | ||
1410 | if (readl(port->cmd_issue[MTIP_TAG_INTERNAL]) | 1451 | if (readl(port->cmd_issue[MTIP_TAG_INTERNAL]) |
1411 | & (1 << MTIP_TAG_INTERNAL)) { | 1452 | & (1 << MTIP_TAG_INTERNAL)) { |
1412 | rv = -ENXIO; | 1453 | rv = -ENXIO; |
1413 | if (!test_bit(MTIP_DDF_REMOVE_PENDING_BIT, | 1454 | if (!test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) { |
1414 | &port->dd->dd_flag)) { | 1455 | mtip_device_reset(dd); |
1415 | mtip_restart_port(port); | ||
1416 | rv = -EAGAIN; | 1456 | rv = -EAGAIN; |
1417 | } | 1457 | } |
1418 | } | 1458 | } |
@@ -1724,7 +1764,8 @@ static int mtip_get_smart_attr(struct mtip_port *port, unsigned int id, | |||
1724 | * -EINVAL Invalid parameters passed in, trim not supported | 1764 | * -EINVAL Invalid parameters passed in, trim not supported |
1725 | * -EIO Error submitting trim request to hw | 1765 | * -EIO Error submitting trim request to hw |
1726 | */ | 1766 | */ |
1727 | static int mtip_send_trim(struct driver_data *dd, unsigned int lba, unsigned int len) | 1767 | static int mtip_send_trim(struct driver_data *dd, unsigned int lba, |
1768 | unsigned int len) | ||
1728 | { | 1769 | { |
1729 | int i, rv = 0; | 1770 | int i, rv = 0; |
1730 | u64 tlba, tlen, sect_left; | 1771 | u64 tlba, tlen, sect_left; |
@@ -1811,45 +1852,6 @@ static bool mtip_hw_get_capacity(struct driver_data *dd, sector_t *sectors) | |||
1811 | } | 1852 | } |
1812 | 1853 | ||
1813 | /* | 1854 | /* |
1814 | * Reset the HBA. | ||
1815 | * | ||
1816 | * Resets the HBA by setting the HBA Reset bit in the Global | ||
1817 | * HBA Control register. After setting the HBA Reset bit the | ||
1818 | * function waits for 1 second before reading the HBA Reset | ||
1819 | * bit to make sure it has cleared. If HBA Reset is not clear | ||
1820 | * an error is returned. Cannot be used in non-blockable | ||
1821 | * context. | ||
1822 | * | ||
1823 | * @dd Pointer to the driver data structure. | ||
1824 | * | ||
1825 | * return value | ||
1826 | * 0 The reset was successful. | ||
1827 | * -1 The HBA Reset bit did not clear. | ||
1828 | */ | ||
1829 | static int mtip_hba_reset(struct driver_data *dd) | ||
1830 | { | ||
1831 | mtip_deinit_port(dd->port); | ||
1832 | |||
1833 | /* Set the reset bit */ | ||
1834 | writel(HOST_RESET, dd->mmio + HOST_CTL); | ||
1835 | |||
1836 | /* Flush */ | ||
1837 | readl(dd->mmio + HOST_CTL); | ||
1838 | |||
1839 | /* Wait for reset to clear */ | ||
1840 | ssleep(1); | ||
1841 | |||
1842 | /* Check the bit has cleared */ | ||
1843 | if (readl(dd->mmio + HOST_CTL) & HOST_RESET) { | ||
1844 | dev_err(&dd->pdev->dev, | ||
1845 | "Reset bit did not clear.\n"); | ||
1846 | return -1; | ||
1847 | } | ||
1848 | |||
1849 | return 0; | ||
1850 | } | ||
1851 | |||
1852 | /* | ||
1853 | * Display the identify command data. | 1855 | * Display the identify command data. |
1854 | * | 1856 | * |
1855 | * @port Pointer to the port data structure. | 1857 | * @port Pointer to the port data structure. |
@@ -2710,6 +2712,100 @@ static ssize_t mtip_hw_show_status(struct device *dev, | |||
2710 | 2712 | ||
2711 | static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL); | 2713 | static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL); |
2712 | 2714 | ||
2715 | /* debugsfs entries */ | ||
2716 | |||
2717 | static ssize_t show_device_status(struct device_driver *drv, char *buf) | ||
2718 | { | ||
2719 | int size = 0; | ||
2720 | struct driver_data *dd, *tmp; | ||
2721 | unsigned long flags; | ||
2722 | char id_buf[42]; | ||
2723 | u16 status = 0; | ||
2724 | |||
2725 | spin_lock_irqsave(&dev_lock, flags); | ||
2726 | size += sprintf(&buf[size], "Devices Present:\n"); | ||
2727 | list_for_each_entry_safe(dd, tmp, &online_list, online_list) { | ||
2728 | if (dd->pdev) { | ||
2729 | if (dd->port && | ||
2730 | dd->port->identify && | ||
2731 | dd->port->identify_valid) { | ||
2732 | strlcpy(id_buf, | ||
2733 | (char *) (dd->port->identify + 10), 21); | ||
2734 | status = *(dd->port->identify + 141); | ||
2735 | } else { | ||
2736 | memset(id_buf, 0, 42); | ||
2737 | status = 0; | ||
2738 | } | ||
2739 | |||
2740 | if (dd->port && | ||
2741 | test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags)) { | ||
2742 | size += sprintf(&buf[size], | ||
2743 | " device %s %s (ftl rebuild %d %%)\n", | ||
2744 | dev_name(&dd->pdev->dev), | ||
2745 | id_buf, | ||
2746 | status); | ||
2747 | } else { | ||
2748 | size += sprintf(&buf[size], | ||
2749 | " device %s %s\n", | ||
2750 | dev_name(&dd->pdev->dev), | ||
2751 | id_buf); | ||
2752 | } | ||
2753 | } | ||
2754 | } | ||
2755 | |||
2756 | size += sprintf(&buf[size], "Devices Being Removed:\n"); | ||
2757 | list_for_each_entry_safe(dd, tmp, &removing_list, remove_list) { | ||
2758 | if (dd->pdev) { | ||
2759 | if (dd->port && | ||
2760 | dd->port->identify && | ||
2761 | dd->port->identify_valid) { | ||
2762 | strlcpy(id_buf, | ||
2763 | (char *) (dd->port->identify+10), 21); | ||
2764 | status = *(dd->port->identify + 141); | ||
2765 | } else { | ||
2766 | memset(id_buf, 0, 42); | ||
2767 | status = 0; | ||
2768 | } | ||
2769 | |||
2770 | if (dd->port && | ||
2771 | test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags)) { | ||
2772 | size += sprintf(&buf[size], | ||
2773 | " device %s %s (ftl rebuild %d %%)\n", | ||
2774 | dev_name(&dd->pdev->dev), | ||
2775 | id_buf, | ||
2776 | status); | ||
2777 | } else { | ||
2778 | size += sprintf(&buf[size], | ||
2779 | " device %s %s\n", | ||
2780 | dev_name(&dd->pdev->dev), | ||
2781 | id_buf); | ||
2782 | } | ||
2783 | } | ||
2784 | } | ||
2785 | spin_unlock_irqrestore(&dev_lock, flags); | ||
2786 | |||
2787 | return size; | ||
2788 | } | ||
2789 | |||
2790 | static ssize_t mtip_hw_read_device_status(struct file *f, char __user *ubuf, | ||
2791 | size_t len, loff_t *offset) | ||
2792 | { | ||
2793 | int size = *offset; | ||
2794 | char buf[MTIP_DFS_MAX_BUF_SIZE]; | ||
2795 | |||
2796 | if (!len || *offset) | ||
2797 | return 0; | ||
2798 | |||
2799 | size += show_device_status(NULL, buf); | ||
2800 | |||
2801 | *offset = size <= len ? size : len; | ||
2802 | size = copy_to_user(ubuf, buf, *offset); | ||
2803 | if (size) | ||
2804 | return -EFAULT; | ||
2805 | |||
2806 | return *offset; | ||
2807 | } | ||
2808 | |||
2713 | static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf, | 2809 | static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf, |
2714 | size_t len, loff_t *offset) | 2810 | size_t len, loff_t *offset) |
2715 | { | 2811 | { |
@@ -2804,6 +2900,13 @@ static ssize_t mtip_hw_read_flags(struct file *f, char __user *ubuf, | |||
2804 | return *offset; | 2900 | return *offset; |
2805 | } | 2901 | } |
2806 | 2902 | ||
2903 | static const struct file_operations mtip_device_status_fops = { | ||
2904 | .owner = THIS_MODULE, | ||
2905 | .open = simple_open, | ||
2906 | .read = mtip_hw_read_device_status, | ||
2907 | .llseek = no_llseek, | ||
2908 | }; | ||
2909 | |||
2807 | static const struct file_operations mtip_regs_fops = { | 2910 | static const struct file_operations mtip_regs_fops = { |
2808 | .owner = THIS_MODULE, | 2911 | .owner = THIS_MODULE, |
2809 | .open = simple_open, | 2912 | .open = simple_open, |
@@ -4161,6 +4264,7 @@ static int mtip_pci_probe(struct pci_dev *pdev, | |||
4161 | const struct cpumask *node_mask; | 4264 | const struct cpumask *node_mask; |
4162 | int cpu, i = 0, j = 0; | 4265 | int cpu, i = 0, j = 0; |
4163 | int my_node = NUMA_NO_NODE; | 4266 | int my_node = NUMA_NO_NODE; |
4267 | unsigned long flags; | ||
4164 | 4268 | ||
4165 | /* Allocate memory for this devices private data. */ | 4269 | /* Allocate memory for this devices private data. */ |
4166 | my_node = pcibus_to_node(pdev->bus); | 4270 | my_node = pcibus_to_node(pdev->bus); |
@@ -4218,12 +4322,16 @@ static int mtip_pci_probe(struct pci_dev *pdev, | |||
4218 | dd->pdev = pdev; | 4322 | dd->pdev = pdev; |
4219 | dd->numa_node = my_node; | 4323 | dd->numa_node = my_node; |
4220 | 4324 | ||
4325 | INIT_LIST_HEAD(&dd->online_list); | ||
4326 | INIT_LIST_HEAD(&dd->remove_list); | ||
4327 | |||
4221 | memset(dd->workq_name, 0, 32); | 4328 | memset(dd->workq_name, 0, 32); |
4222 | snprintf(dd->workq_name, 31, "mtipq%d", dd->instance); | 4329 | snprintf(dd->workq_name, 31, "mtipq%d", dd->instance); |
4223 | 4330 | ||
4224 | dd->isr_workq = create_workqueue(dd->workq_name); | 4331 | dd->isr_workq = create_workqueue(dd->workq_name); |
4225 | if (!dd->isr_workq) { | 4332 | if (!dd->isr_workq) { |
4226 | dev_warn(&pdev->dev, "Can't create wq %d\n", dd->instance); | 4333 | dev_warn(&pdev->dev, "Can't create wq %d\n", dd->instance); |
4334 | rv = -ENOMEM; | ||
4227 | goto block_initialize_err; | 4335 | goto block_initialize_err; |
4228 | } | 4336 | } |
4229 | 4337 | ||
@@ -4282,7 +4390,8 @@ static int mtip_pci_probe(struct pci_dev *pdev, | |||
4282 | INIT_WORK(&dd->work[7].work, mtip_workq_sdbf7); | 4390 | INIT_WORK(&dd->work[7].work, mtip_workq_sdbf7); |
4283 | 4391 | ||
4284 | pci_set_master(pdev); | 4392 | pci_set_master(pdev); |
4285 | if (pci_enable_msi(pdev)) { | 4393 | rv = pci_enable_msi(pdev); |
4394 | if (rv) { | ||
4286 | dev_warn(&pdev->dev, | 4395 | dev_warn(&pdev->dev, |
4287 | "Unable to enable MSI interrupt.\n"); | 4396 | "Unable to enable MSI interrupt.\n"); |
4288 | goto block_initialize_err; | 4397 | goto block_initialize_err; |
@@ -4303,6 +4412,14 @@ static int mtip_pci_probe(struct pci_dev *pdev, | |||
4303 | instance++; | 4412 | instance++; |
4304 | if (rv != MTIP_FTL_REBUILD_MAGIC) | 4413 | if (rv != MTIP_FTL_REBUILD_MAGIC) |
4305 | set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag); | 4414 | set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag); |
4415 | else | ||
4416 | rv = 0; /* device in rebuild state, return 0 from probe */ | ||
4417 | |||
4418 | /* Add to online list even if in ftl rebuild */ | ||
4419 | spin_lock_irqsave(&dev_lock, flags); | ||
4420 | list_add(&dd->online_list, &online_list); | ||
4421 | spin_unlock_irqrestore(&dev_lock, flags); | ||
4422 | |||
4306 | goto done; | 4423 | goto done; |
4307 | 4424 | ||
4308 | block_initialize_err: | 4425 | block_initialize_err: |
@@ -4336,9 +4453,15 @@ static void mtip_pci_remove(struct pci_dev *pdev) | |||
4336 | { | 4453 | { |
4337 | struct driver_data *dd = pci_get_drvdata(pdev); | 4454 | struct driver_data *dd = pci_get_drvdata(pdev); |
4338 | int counter = 0; | 4455 | int counter = 0; |
4456 | unsigned long flags; | ||
4339 | 4457 | ||
4340 | set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag); | 4458 | set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag); |
4341 | 4459 | ||
4460 | spin_lock_irqsave(&dev_lock, flags); | ||
4461 | list_del_init(&dd->online_list); | ||
4462 | list_add(&dd->remove_list, &removing_list); | ||
4463 | spin_unlock_irqrestore(&dev_lock, flags); | ||
4464 | |||
4342 | if (mtip_check_surprise_removal(pdev)) { | 4465 | if (mtip_check_surprise_removal(pdev)) { |
4343 | while (!test_bit(MTIP_DDF_CLEANUP_BIT, &dd->dd_flag)) { | 4466 | while (!test_bit(MTIP_DDF_CLEANUP_BIT, &dd->dd_flag)) { |
4344 | counter++; | 4467 | counter++; |
@@ -4364,6 +4487,10 @@ static void mtip_pci_remove(struct pci_dev *pdev) | |||
4364 | 4487 | ||
4365 | pci_disable_msi(pdev); | 4488 | pci_disable_msi(pdev); |
4366 | 4489 | ||
4490 | spin_lock_irqsave(&dev_lock, flags); | ||
4491 | list_del_init(&dd->remove_list); | ||
4492 | spin_unlock_irqrestore(&dev_lock, flags); | ||
4493 | |||
4367 | kfree(dd); | 4494 | kfree(dd); |
4368 | pcim_iounmap_regions(pdev, 1 << MTIP_ABAR); | 4495 | pcim_iounmap_regions(pdev, 1 << MTIP_ABAR); |
4369 | } | 4496 | } |
@@ -4511,6 +4638,11 @@ static int __init mtip_init(void) | |||
4511 | 4638 | ||
4512 | pr_info(MTIP_DRV_NAME " Version " MTIP_DRV_VERSION "\n"); | 4639 | pr_info(MTIP_DRV_NAME " Version " MTIP_DRV_VERSION "\n"); |
4513 | 4640 | ||
4641 | spin_lock_init(&dev_lock); | ||
4642 | |||
4643 | INIT_LIST_HEAD(&online_list); | ||
4644 | INIT_LIST_HEAD(&removing_list); | ||
4645 | |||
4514 | /* Allocate a major block device number to use with this driver. */ | 4646 | /* Allocate a major block device number to use with this driver. */ |
4515 | error = register_blkdev(0, MTIP_DRV_NAME); | 4647 | error = register_blkdev(0, MTIP_DRV_NAME); |
4516 | if (error <= 0) { | 4648 | if (error <= 0) { |
@@ -4520,11 +4652,18 @@ static int __init mtip_init(void) | |||
4520 | } | 4652 | } |
4521 | mtip_major = error; | 4653 | mtip_major = error; |
4522 | 4654 | ||
4523 | if (!dfs_parent) { | 4655 | dfs_parent = debugfs_create_dir("rssd", NULL); |
4524 | dfs_parent = debugfs_create_dir("rssd", NULL); | 4656 | if (IS_ERR_OR_NULL(dfs_parent)) { |
4525 | if (IS_ERR_OR_NULL(dfs_parent)) { | 4657 | pr_warn("Error creating debugfs parent\n"); |
4526 | pr_warn("Error creating debugfs parent\n"); | 4658 | dfs_parent = NULL; |
4527 | dfs_parent = NULL; | 4659 | } |
4660 | if (dfs_parent) { | ||
4661 | dfs_device_status = debugfs_create_file("device_status", | ||
4662 | S_IRUGO, dfs_parent, NULL, | ||
4663 | &mtip_device_status_fops); | ||
4664 | if (IS_ERR_OR_NULL(dfs_device_status)) { | ||
4665 | pr_err("Error creating device_status node\n"); | ||
4666 | dfs_device_status = NULL; | ||
4528 | } | 4667 | } |
4529 | } | 4668 | } |
4530 | 4669 | ||
diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h index 3bffff5f670c..8e8334c9dd0f 100644 --- a/drivers/block/mtip32xx/mtip32xx.h +++ b/drivers/block/mtip32xx/mtip32xx.h | |||
@@ -129,9 +129,9 @@ enum { | |||
129 | MTIP_PF_EH_ACTIVE_BIT = 1, /* error handling */ | 129 | MTIP_PF_EH_ACTIVE_BIT = 1, /* error handling */ |
130 | MTIP_PF_SE_ACTIVE_BIT = 2, /* secure erase */ | 130 | MTIP_PF_SE_ACTIVE_BIT = 2, /* secure erase */ |
131 | MTIP_PF_DM_ACTIVE_BIT = 3, /* download microcde */ | 131 | MTIP_PF_DM_ACTIVE_BIT = 3, /* download microcde */ |
132 | MTIP_PF_PAUSE_IO = ((1 << MTIP_PF_IC_ACTIVE_BIT) | \ | 132 | MTIP_PF_PAUSE_IO = ((1 << MTIP_PF_IC_ACTIVE_BIT) | |
133 | (1 << MTIP_PF_EH_ACTIVE_BIT) | \ | 133 | (1 << MTIP_PF_EH_ACTIVE_BIT) | |
134 | (1 << MTIP_PF_SE_ACTIVE_BIT) | \ | 134 | (1 << MTIP_PF_SE_ACTIVE_BIT) | |
135 | (1 << MTIP_PF_DM_ACTIVE_BIT)), | 135 | (1 << MTIP_PF_DM_ACTIVE_BIT)), |
136 | 136 | ||
137 | MTIP_PF_SVC_THD_ACTIVE_BIT = 4, | 137 | MTIP_PF_SVC_THD_ACTIVE_BIT = 4, |
@@ -144,9 +144,9 @@ enum { | |||
144 | MTIP_DDF_REMOVE_PENDING_BIT = 1, | 144 | MTIP_DDF_REMOVE_PENDING_BIT = 1, |
145 | MTIP_DDF_OVER_TEMP_BIT = 2, | 145 | MTIP_DDF_OVER_TEMP_BIT = 2, |
146 | MTIP_DDF_WRITE_PROTECT_BIT = 3, | 146 | MTIP_DDF_WRITE_PROTECT_BIT = 3, |
147 | MTIP_DDF_STOP_IO = ((1 << MTIP_DDF_REMOVE_PENDING_BIT) | \ | 147 | MTIP_DDF_STOP_IO = ((1 << MTIP_DDF_REMOVE_PENDING_BIT) | |
148 | (1 << MTIP_DDF_SEC_LOCK_BIT) | \ | 148 | (1 << MTIP_DDF_SEC_LOCK_BIT) | |
149 | (1 << MTIP_DDF_OVER_TEMP_BIT) | \ | 149 | (1 << MTIP_DDF_OVER_TEMP_BIT) | |
150 | (1 << MTIP_DDF_WRITE_PROTECT_BIT)), | 150 | (1 << MTIP_DDF_WRITE_PROTECT_BIT)), |
151 | 151 | ||
152 | MTIP_DDF_CLEANUP_BIT = 5, | 152 | MTIP_DDF_CLEANUP_BIT = 5, |
@@ -180,7 +180,7 @@ struct mtip_work { | |||
180 | 180 | ||
181 | #define MTIP_TRIM_TIMEOUT_MS 240000 | 181 | #define MTIP_TRIM_TIMEOUT_MS 240000 |
182 | #define MTIP_MAX_TRIM_ENTRIES 8 | 182 | #define MTIP_MAX_TRIM_ENTRIES 8 |
183 | #define MTIP_MAX_TRIM_ENTRY_LEN 0xfff8 | 183 | #define MTIP_MAX_TRIM_ENTRY_LEN 0xfff8 |
184 | 184 | ||
185 | struct mtip_trim_entry { | 185 | struct mtip_trim_entry { |
186 | u32 lba; /* starting lba of region */ | 186 | u32 lba; /* starting lba of region */ |
@@ -501,6 +501,10 @@ struct driver_data { | |||
501 | atomic_t irq_workers_active; | 501 | atomic_t irq_workers_active; |
502 | 502 | ||
503 | int isr_binding; | 503 | int isr_binding; |
504 | |||
505 | struct list_head online_list; /* linkage for online list */ | ||
506 | |||
507 | struct list_head remove_list; /* linkage for removing list */ | ||
504 | }; | 508 | }; |
505 | 509 | ||
506 | #endif | 510 | #endif |
diff --git a/drivers/block/nvme.c b/drivers/block/nvme.c index 07fb2dfaae13..9dcefe40380b 100644 --- a/drivers/block/nvme.c +++ b/drivers/block/nvme.c | |||
@@ -135,6 +135,7 @@ static inline void _nvme_check_size(void) | |||
135 | BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096); | 135 | BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096); |
136 | BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096); | 136 | BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096); |
137 | BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64); | 137 | BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64); |
138 | BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512); | ||
138 | } | 139 | } |
139 | 140 | ||
140 | typedef void (*nvme_completion_fn)(struct nvme_dev *, void *, | 141 | typedef void (*nvme_completion_fn)(struct nvme_dev *, void *, |
@@ -237,7 +238,8 @@ static void *free_cmdid(struct nvme_queue *nvmeq, int cmdid, | |||
237 | *fn = special_completion; | 238 | *fn = special_completion; |
238 | return CMD_CTX_INVALID; | 239 | return CMD_CTX_INVALID; |
239 | } | 240 | } |
240 | *fn = info[cmdid].fn; | 241 | if (fn) |
242 | *fn = info[cmdid].fn; | ||
241 | ctx = info[cmdid].ctx; | 243 | ctx = info[cmdid].ctx; |
242 | info[cmdid].fn = special_completion; | 244 | info[cmdid].fn = special_completion; |
243 | info[cmdid].ctx = CMD_CTX_COMPLETED; | 245 | info[cmdid].ctx = CMD_CTX_COMPLETED; |
@@ -335,6 +337,7 @@ nvme_alloc_iod(unsigned nseg, unsigned nbytes, gfp_t gfp) | |||
335 | iod->offset = offsetof(struct nvme_iod, sg[nseg]); | 337 | iod->offset = offsetof(struct nvme_iod, sg[nseg]); |
336 | iod->npages = -1; | 338 | iod->npages = -1; |
337 | iod->length = nbytes; | 339 | iod->length = nbytes; |
340 | iod->nents = 0; | ||
338 | } | 341 | } |
339 | 342 | ||
340 | return iod; | 343 | return iod; |
@@ -375,7 +378,8 @@ static void bio_completion(struct nvme_dev *dev, void *ctx, | |||
375 | struct bio *bio = iod->private; | 378 | struct bio *bio = iod->private; |
376 | u16 status = le16_to_cpup(&cqe->status) >> 1; | 379 | u16 status = le16_to_cpup(&cqe->status) >> 1; |
377 | 380 | ||
378 | dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents, | 381 | if (iod->nents) |
382 | dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents, | ||
379 | bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); | 383 | bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); |
380 | nvme_free_iod(dev, iod); | 384 | nvme_free_iod(dev, iod); |
381 | if (status) { | 385 | if (status) { |
@@ -589,7 +593,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns, | |||
589 | 593 | ||
590 | result = nvme_map_bio(nvmeq->q_dmadev, iod, bio, dma_dir, psegs); | 594 | result = nvme_map_bio(nvmeq->q_dmadev, iod, bio, dma_dir, psegs); |
591 | if (result < 0) | 595 | if (result < 0) |
592 | goto free_iod; | 596 | goto free_cmdid; |
593 | length = result; | 597 | length = result; |
594 | 598 | ||
595 | cmnd->rw.command_id = cmdid; | 599 | cmnd->rw.command_id = cmdid; |
@@ -609,6 +613,8 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns, | |||
609 | 613 | ||
610 | return 0; | 614 | return 0; |
611 | 615 | ||
616 | free_cmdid: | ||
617 | free_cmdid(nvmeq, cmdid, NULL); | ||
612 | free_iod: | 618 | free_iod: |
613 | nvme_free_iod(nvmeq->dev, iod); | 619 | nvme_free_iod(nvmeq->dev, iod); |
614 | nomem: | 620 | nomem: |
@@ -835,8 +841,8 @@ static int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns, | |||
835 | return nvme_submit_admin_cmd(dev, &c, NULL); | 841 | return nvme_submit_admin_cmd(dev, &c, NULL); |
836 | } | 842 | } |
837 | 843 | ||
838 | static int nvme_get_features(struct nvme_dev *dev, unsigned fid, | 844 | static int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid, |
839 | unsigned nsid, dma_addr_t dma_addr) | 845 | dma_addr_t dma_addr, u32 *result) |
840 | { | 846 | { |
841 | struct nvme_command c; | 847 | struct nvme_command c; |
842 | 848 | ||
@@ -846,7 +852,7 @@ static int nvme_get_features(struct nvme_dev *dev, unsigned fid, | |||
846 | c.features.prp1 = cpu_to_le64(dma_addr); | 852 | c.features.prp1 = cpu_to_le64(dma_addr); |
847 | c.features.fid = cpu_to_le32(fid); | 853 | c.features.fid = cpu_to_le32(fid); |
848 | 854 | ||
849 | return nvme_submit_admin_cmd(dev, &c, NULL); | 855 | return nvme_submit_admin_cmd(dev, &c, result); |
850 | } | 856 | } |
851 | 857 | ||
852 | static int nvme_set_features(struct nvme_dev *dev, unsigned fid, | 858 | static int nvme_set_features(struct nvme_dev *dev, unsigned fid, |
@@ -906,6 +912,10 @@ static void nvme_free_queue(struct nvme_dev *dev, int qid) | |||
906 | 912 | ||
907 | spin_lock_irq(&nvmeq->q_lock); | 913 | spin_lock_irq(&nvmeq->q_lock); |
908 | nvme_cancel_ios(nvmeq, false); | 914 | nvme_cancel_ios(nvmeq, false); |
915 | while (bio_list_peek(&nvmeq->sq_cong)) { | ||
916 | struct bio *bio = bio_list_pop(&nvmeq->sq_cong); | ||
917 | bio_endio(bio, -EIO); | ||
918 | } | ||
909 | spin_unlock_irq(&nvmeq->q_lock); | 919 | spin_unlock_irq(&nvmeq->q_lock); |
910 | 920 | ||
911 | irq_set_affinity_hint(vector, NULL); | 921 | irq_set_affinity_hint(vector, NULL); |
@@ -1230,12 +1240,17 @@ static int nvme_user_admin_cmd(struct nvme_dev *dev, | |||
1230 | if (length != cmd.data_len) | 1240 | if (length != cmd.data_len) |
1231 | status = -ENOMEM; | 1241 | status = -ENOMEM; |
1232 | else | 1242 | else |
1233 | status = nvme_submit_admin_cmd(dev, &c, NULL); | 1243 | status = nvme_submit_admin_cmd(dev, &c, &cmd.result); |
1234 | 1244 | ||
1235 | if (cmd.data_len) { | 1245 | if (cmd.data_len) { |
1236 | nvme_unmap_user_pages(dev, cmd.opcode & 1, iod); | 1246 | nvme_unmap_user_pages(dev, cmd.opcode & 1, iod); |
1237 | nvme_free_iod(dev, iod); | 1247 | nvme_free_iod(dev, iod); |
1238 | } | 1248 | } |
1249 | |||
1250 | if (!status && copy_to_user(&ucmd->result, &cmd.result, | ||
1251 | sizeof(cmd.result))) | ||
1252 | status = -EFAULT; | ||
1253 | |||
1239 | return status; | 1254 | return status; |
1240 | } | 1255 | } |
1241 | 1256 | ||
@@ -1523,9 +1538,9 @@ static int nvme_dev_add(struct nvme_dev *dev) | |||
1523 | continue; | 1538 | continue; |
1524 | 1539 | ||
1525 | res = nvme_get_features(dev, NVME_FEAT_LBA_RANGE, i, | 1540 | res = nvme_get_features(dev, NVME_FEAT_LBA_RANGE, i, |
1526 | dma_addr + 4096); | 1541 | dma_addr + 4096, NULL); |
1527 | if (res) | 1542 | if (res) |
1528 | continue; | 1543 | memset(mem + 4096, 0, 4096); |
1529 | 1544 | ||
1530 | ns = nvme_alloc_ns(dev, i, mem, mem + 4096); | 1545 | ns = nvme_alloc_ns(dev, i, mem, mem + 4096); |
1531 | if (ns) | 1546 | if (ns) |
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 6c81a4c040b9..f556f8a8b3f9 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c | |||
@@ -1264,6 +1264,32 @@ static bool obj_request_done_test(struct rbd_obj_request *obj_request) | |||
1264 | return atomic_read(&obj_request->done) != 0; | 1264 | return atomic_read(&obj_request->done) != 0; |
1265 | } | 1265 | } |
1266 | 1266 | ||
1267 | static void | ||
1268 | rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request) | ||
1269 | { | ||
1270 | dout("%s: obj %p img %p result %d %llu/%llu\n", __func__, | ||
1271 | obj_request, obj_request->img_request, obj_request->result, | ||
1272 | obj_request->xferred, obj_request->length); | ||
1273 | /* | ||
1274 | * ENOENT means a hole in the image. We zero-fill the | ||
1275 | * entire length of the request. A short read also implies | ||
1276 | * zero-fill to the end of the request. Either way we | ||
1277 | * update the xferred count to indicate the whole request | ||
1278 | * was satisfied. | ||
1279 | */ | ||
1280 | BUG_ON(obj_request->type != OBJ_REQUEST_BIO); | ||
1281 | if (obj_request->result == -ENOENT) { | ||
1282 | zero_bio_chain(obj_request->bio_list, 0); | ||
1283 | obj_request->result = 0; | ||
1284 | obj_request->xferred = obj_request->length; | ||
1285 | } else if (obj_request->xferred < obj_request->length && | ||
1286 | !obj_request->result) { | ||
1287 | zero_bio_chain(obj_request->bio_list, obj_request->xferred); | ||
1288 | obj_request->xferred = obj_request->length; | ||
1289 | } | ||
1290 | obj_request_done_set(obj_request); | ||
1291 | } | ||
1292 | |||
1267 | static void rbd_obj_request_complete(struct rbd_obj_request *obj_request) | 1293 | static void rbd_obj_request_complete(struct rbd_obj_request *obj_request) |
1268 | { | 1294 | { |
1269 | dout("%s: obj %p cb %p\n", __func__, obj_request, | 1295 | dout("%s: obj %p cb %p\n", __func__, obj_request, |
@@ -1284,23 +1310,10 @@ static void rbd_osd_read_callback(struct rbd_obj_request *obj_request) | |||
1284 | { | 1310 | { |
1285 | dout("%s: obj %p result %d %llu/%llu\n", __func__, obj_request, | 1311 | dout("%s: obj %p result %d %llu/%llu\n", __func__, obj_request, |
1286 | obj_request->result, obj_request->xferred, obj_request->length); | 1312 | obj_request->result, obj_request->xferred, obj_request->length); |
1287 | /* | 1313 | if (obj_request->img_request) |
1288 | * ENOENT means a hole in the object. We zero-fill the | 1314 | rbd_img_obj_request_read_callback(obj_request); |
1289 | * entire length of the request. A short read also implies | 1315 | else |
1290 | * zero-fill to the end of the request. Either way we | 1316 | obj_request_done_set(obj_request); |
1291 | * update the xferred count to indicate the whole request | ||
1292 | * was satisfied. | ||
1293 | */ | ||
1294 | if (obj_request->result == -ENOENT) { | ||
1295 | zero_bio_chain(obj_request->bio_list, 0); | ||
1296 | obj_request->result = 0; | ||
1297 | obj_request->xferred = obj_request->length; | ||
1298 | } else if (obj_request->xferred < obj_request->length && | ||
1299 | !obj_request->result) { | ||
1300 | zero_bio_chain(obj_request->bio_list, obj_request->xferred); | ||
1301 | obj_request->xferred = obj_request->length; | ||
1302 | } | ||
1303 | obj_request_done_set(obj_request); | ||
1304 | } | 1317 | } |
1305 | 1318 | ||
1306 | static void rbd_osd_write_callback(struct rbd_obj_request *obj_request) | 1319 | static void rbd_osd_write_callback(struct rbd_obj_request *obj_request) |
diff --git a/drivers/block/rsxx/Makefile b/drivers/block/rsxx/Makefile index f35cd0b71f7b..b1c53c0aa450 100644 --- a/drivers/block/rsxx/Makefile +++ b/drivers/block/rsxx/Makefile | |||
@@ -1,2 +1,2 @@ | |||
1 | obj-$(CONFIG_BLK_DEV_RSXX) += rsxx.o | 1 | obj-$(CONFIG_BLK_DEV_RSXX) += rsxx.o |
2 | rsxx-y := config.o core.o cregs.o dev.o dma.o | 2 | rsxx-objs := config.o core.o cregs.o dev.o dma.o |
diff --git a/drivers/block/rsxx/config.c b/drivers/block/rsxx/config.c index a295e7e9ee41..10cd530d3e10 100644 --- a/drivers/block/rsxx/config.c +++ b/drivers/block/rsxx/config.c | |||
@@ -29,15 +29,13 @@ | |||
29 | #include "rsxx_priv.h" | 29 | #include "rsxx_priv.h" |
30 | #include "rsxx_cfg.h" | 30 | #include "rsxx_cfg.h" |
31 | 31 | ||
32 | static void initialize_config(void *config) | 32 | static void initialize_config(struct rsxx_card_cfg *cfg) |
33 | { | 33 | { |
34 | struct rsxx_card_cfg *cfg = config; | ||
35 | |||
36 | cfg->hdr.version = RSXX_CFG_VERSION; | 34 | cfg->hdr.version = RSXX_CFG_VERSION; |
37 | 35 | ||
38 | cfg->data.block_size = RSXX_HW_BLK_SIZE; | 36 | cfg->data.block_size = RSXX_HW_BLK_SIZE; |
39 | cfg->data.stripe_size = RSXX_HW_BLK_SIZE; | 37 | cfg->data.stripe_size = RSXX_HW_BLK_SIZE; |
40 | cfg->data.vendor_id = RSXX_VENDOR_ID_TMS_IBM; | 38 | cfg->data.vendor_id = RSXX_VENDOR_ID_IBM; |
41 | cfg->data.cache_order = (-1); | 39 | cfg->data.cache_order = (-1); |
42 | cfg->data.intr_coal.mode = RSXX_INTR_COAL_DISABLED; | 40 | cfg->data.intr_coal.mode = RSXX_INTR_COAL_DISABLED; |
43 | cfg->data.intr_coal.count = 0; | 41 | cfg->data.intr_coal.count = 0; |
@@ -181,7 +179,7 @@ int rsxx_load_config(struct rsxx_cardinfo *card) | |||
181 | } else { | 179 | } else { |
182 | dev_info(CARD_TO_DEV(card), | 180 | dev_info(CARD_TO_DEV(card), |
183 | "Initializing card configuration.\n"); | 181 | "Initializing card configuration.\n"); |
184 | initialize_config(card); | 182 | initialize_config(&card->config); |
185 | st = rsxx_save_config(card); | 183 | st = rsxx_save_config(card); |
186 | if (st) | 184 | if (st) |
187 | return st; | 185 | return st; |
diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c index e5162487686a..5af21f2db29c 100644 --- a/drivers/block/rsxx/core.c +++ b/drivers/block/rsxx/core.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <linux/reboot.h> | 30 | #include <linux/reboot.h> |
31 | #include <linux/slab.h> | 31 | #include <linux/slab.h> |
32 | #include <linux/bitops.h> | 32 | #include <linux/bitops.h> |
33 | #include <linux/delay.h> | ||
33 | 34 | ||
34 | #include <linux/genhd.h> | 35 | #include <linux/genhd.h> |
35 | #include <linux/idr.h> | 36 | #include <linux/idr.h> |
@@ -39,8 +40,8 @@ | |||
39 | 40 | ||
40 | #define NO_LEGACY 0 | 41 | #define NO_LEGACY 0 |
41 | 42 | ||
42 | MODULE_DESCRIPTION("IBM RamSan PCIe Flash SSD Device Driver"); | 43 | MODULE_DESCRIPTION("IBM FlashSystem 70/80 PCIe SSD Device Driver"); |
43 | MODULE_AUTHOR("IBM <support@ramsan.com>"); | 44 | MODULE_AUTHOR("Joshua Morris/Philip Kelleher, IBM"); |
44 | MODULE_LICENSE("GPL"); | 45 | MODULE_LICENSE("GPL"); |
45 | MODULE_VERSION(DRIVER_VERSION); | 46 | MODULE_VERSION(DRIVER_VERSION); |
46 | 47 | ||
@@ -52,6 +53,13 @@ static DEFINE_IDA(rsxx_disk_ida); | |||
52 | static DEFINE_SPINLOCK(rsxx_ida_lock); | 53 | static DEFINE_SPINLOCK(rsxx_ida_lock); |
53 | 54 | ||
54 | /*----------------- Interrupt Control & Handling -------------------*/ | 55 | /*----------------- Interrupt Control & Handling -------------------*/ |
56 | |||
57 | static void rsxx_mask_interrupts(struct rsxx_cardinfo *card) | ||
58 | { | ||
59 | card->isr_mask = 0; | ||
60 | card->ier_mask = 0; | ||
61 | } | ||
62 | |||
55 | static void __enable_intr(unsigned int *mask, unsigned int intr) | 63 | static void __enable_intr(unsigned int *mask, unsigned int intr) |
56 | { | 64 | { |
57 | *mask |= intr; | 65 | *mask |= intr; |
@@ -71,7 +79,8 @@ static void __disable_intr(unsigned int *mask, unsigned int intr) | |||
71 | */ | 79 | */ |
72 | void rsxx_enable_ier(struct rsxx_cardinfo *card, unsigned int intr) | 80 | void rsxx_enable_ier(struct rsxx_cardinfo *card, unsigned int intr) |
73 | { | 81 | { |
74 | if (unlikely(card->halt)) | 82 | if (unlikely(card->halt) || |
83 | unlikely(card->eeh_state)) | ||
75 | return; | 84 | return; |
76 | 85 | ||
77 | __enable_intr(&card->ier_mask, intr); | 86 | __enable_intr(&card->ier_mask, intr); |
@@ -80,6 +89,9 @@ void rsxx_enable_ier(struct rsxx_cardinfo *card, unsigned int intr) | |||
80 | 89 | ||
81 | void rsxx_disable_ier(struct rsxx_cardinfo *card, unsigned int intr) | 90 | void rsxx_disable_ier(struct rsxx_cardinfo *card, unsigned int intr) |
82 | { | 91 | { |
92 | if (unlikely(card->eeh_state)) | ||
93 | return; | ||
94 | |||
83 | __disable_intr(&card->ier_mask, intr); | 95 | __disable_intr(&card->ier_mask, intr); |
84 | iowrite32(card->ier_mask, card->regmap + IER); | 96 | iowrite32(card->ier_mask, card->regmap + IER); |
85 | } | 97 | } |
@@ -87,7 +99,8 @@ void rsxx_disable_ier(struct rsxx_cardinfo *card, unsigned int intr) | |||
87 | void rsxx_enable_ier_and_isr(struct rsxx_cardinfo *card, | 99 | void rsxx_enable_ier_and_isr(struct rsxx_cardinfo *card, |
88 | unsigned int intr) | 100 | unsigned int intr) |
89 | { | 101 | { |
90 | if (unlikely(card->halt)) | 102 | if (unlikely(card->halt) || |
103 | unlikely(card->eeh_state)) | ||
91 | return; | 104 | return; |
92 | 105 | ||
93 | __enable_intr(&card->isr_mask, intr); | 106 | __enable_intr(&card->isr_mask, intr); |
@@ -97,6 +110,9 @@ void rsxx_enable_ier_and_isr(struct rsxx_cardinfo *card, | |||
97 | void rsxx_disable_ier_and_isr(struct rsxx_cardinfo *card, | 110 | void rsxx_disable_ier_and_isr(struct rsxx_cardinfo *card, |
98 | unsigned int intr) | 111 | unsigned int intr) |
99 | { | 112 | { |
113 | if (unlikely(card->eeh_state)) | ||
114 | return; | ||
115 | |||
100 | __disable_intr(&card->isr_mask, intr); | 116 | __disable_intr(&card->isr_mask, intr); |
101 | __disable_intr(&card->ier_mask, intr); | 117 | __disable_intr(&card->ier_mask, intr); |
102 | iowrite32(card->ier_mask, card->regmap + IER); | 118 | iowrite32(card->ier_mask, card->regmap + IER); |
@@ -115,6 +131,9 @@ static irqreturn_t rsxx_isr(int irq, void *pdata) | |||
115 | do { | 131 | do { |
116 | reread_isr = 0; | 132 | reread_isr = 0; |
117 | 133 | ||
134 | if (unlikely(card->eeh_state)) | ||
135 | break; | ||
136 | |||
118 | isr = ioread32(card->regmap + ISR); | 137 | isr = ioread32(card->regmap + ISR); |
119 | if (isr == 0xffffffff) { | 138 | if (isr == 0xffffffff) { |
120 | /* | 139 | /* |
@@ -161,9 +180,9 @@ static irqreturn_t rsxx_isr(int irq, void *pdata) | |||
161 | } | 180 | } |
162 | 181 | ||
163 | /*----------------- Card Event Handler -------------------*/ | 182 | /*----------------- Card Event Handler -------------------*/ |
164 | static char *rsxx_card_state_to_str(unsigned int state) | 183 | static const char * const rsxx_card_state_to_str(unsigned int state) |
165 | { | 184 | { |
166 | static char *state_strings[] = { | 185 | static const char * const state_strings[] = { |
167 | "Unknown", "Shutdown", "Starting", "Formatting", | 186 | "Unknown", "Shutdown", "Starting", "Formatting", |
168 | "Uninitialized", "Good", "Shutting Down", | 187 | "Uninitialized", "Good", "Shutting Down", |
169 | "Fault", "Read Only Fault", "dStroying" | 188 | "Fault", "Read Only Fault", "dStroying" |
@@ -304,6 +323,192 @@ static int card_shutdown(struct rsxx_cardinfo *card) | |||
304 | return 0; | 323 | return 0; |
305 | } | 324 | } |
306 | 325 | ||
326 | static int rsxx_eeh_frozen(struct pci_dev *dev) | ||
327 | { | ||
328 | struct rsxx_cardinfo *card = pci_get_drvdata(dev); | ||
329 | int i; | ||
330 | int st; | ||
331 | |||
332 | dev_warn(&dev->dev, "IBM FlashSystem PCI: preparing for slot reset.\n"); | ||
333 | |||
334 | card->eeh_state = 1; | ||
335 | rsxx_mask_interrupts(card); | ||
336 | |||
337 | /* | ||
338 | * We need to guarantee that the write for eeh_state and masking | ||
339 | * interrupts does not become reordered. This will prevent a possible | ||
340 | * race condition with the EEH code. | ||
341 | */ | ||
342 | wmb(); | ||
343 | |||
344 | pci_disable_device(dev); | ||
345 | |||
346 | st = rsxx_eeh_save_issued_dmas(card); | ||
347 | if (st) | ||
348 | return st; | ||
349 | |||
350 | rsxx_eeh_save_issued_creg(card); | ||
351 | |||
352 | for (i = 0; i < card->n_targets; i++) { | ||
353 | if (card->ctrl[i].status.buf) | ||
354 | pci_free_consistent(card->dev, STATUS_BUFFER_SIZE8, | ||
355 | card->ctrl[i].status.buf, | ||
356 | card->ctrl[i].status.dma_addr); | ||
357 | if (card->ctrl[i].cmd.buf) | ||
358 | pci_free_consistent(card->dev, COMMAND_BUFFER_SIZE8, | ||
359 | card->ctrl[i].cmd.buf, | ||
360 | card->ctrl[i].cmd.dma_addr); | ||
361 | } | ||
362 | |||
363 | return 0; | ||
364 | } | ||
365 | |||
366 | static void rsxx_eeh_failure(struct pci_dev *dev) | ||
367 | { | ||
368 | struct rsxx_cardinfo *card = pci_get_drvdata(dev); | ||
369 | int i; | ||
370 | |||
371 | dev_err(&dev->dev, "IBM FlashSystem PCI: disabling failed card.\n"); | ||
372 | |||
373 | card->eeh_state = 1; | ||
374 | |||
375 | for (i = 0; i < card->n_targets; i++) | ||
376 | del_timer_sync(&card->ctrl[i].activity_timer); | ||
377 | |||
378 | rsxx_eeh_cancel_dmas(card); | ||
379 | } | ||
380 | |||
381 | static int rsxx_eeh_fifo_flush_poll(struct rsxx_cardinfo *card) | ||
382 | { | ||
383 | unsigned int status; | ||
384 | int iter = 0; | ||
385 | |||
386 | /* We need to wait for the hardware to reset */ | ||
387 | while (iter++ < 10) { | ||
388 | status = ioread32(card->regmap + PCI_RECONFIG); | ||
389 | |||
390 | if (status & RSXX_FLUSH_BUSY) { | ||
391 | ssleep(1); | ||
392 | continue; | ||
393 | } | ||
394 | |||
395 | if (status & RSXX_FLUSH_TIMEOUT) | ||
396 | dev_warn(CARD_TO_DEV(card), "HW: flash controller timeout\n"); | ||
397 | return 0; | ||
398 | } | ||
399 | |||
400 | /* Hardware failed resetting itself. */ | ||
401 | return -1; | ||
402 | } | ||
403 | |||
404 | static pci_ers_result_t rsxx_error_detected(struct pci_dev *dev, | ||
405 | enum pci_channel_state error) | ||
406 | { | ||
407 | int st; | ||
408 | |||
409 | if (dev->revision < RSXX_EEH_SUPPORT) | ||
410 | return PCI_ERS_RESULT_NONE; | ||
411 | |||
412 | if (error == pci_channel_io_perm_failure) { | ||
413 | rsxx_eeh_failure(dev); | ||
414 | return PCI_ERS_RESULT_DISCONNECT; | ||
415 | } | ||
416 | |||
417 | st = rsxx_eeh_frozen(dev); | ||
418 | if (st) { | ||
419 | dev_err(&dev->dev, "Slot reset setup failed\n"); | ||
420 | rsxx_eeh_failure(dev); | ||
421 | return PCI_ERS_RESULT_DISCONNECT; | ||
422 | } | ||
423 | |||
424 | return PCI_ERS_RESULT_NEED_RESET; | ||
425 | } | ||
426 | |||
427 | static pci_ers_result_t rsxx_slot_reset(struct pci_dev *dev) | ||
428 | { | ||
429 | struct rsxx_cardinfo *card = pci_get_drvdata(dev); | ||
430 | unsigned long flags; | ||
431 | int i; | ||
432 | int st; | ||
433 | |||
434 | dev_warn(&dev->dev, | ||
435 | "IBM FlashSystem PCI: recovering from slot reset.\n"); | ||
436 | |||
437 | st = pci_enable_device(dev); | ||
438 | if (st) | ||
439 | goto failed_hw_setup; | ||
440 | |||
441 | pci_set_master(dev); | ||
442 | |||
443 | st = rsxx_eeh_fifo_flush_poll(card); | ||
444 | if (st) | ||
445 | goto failed_hw_setup; | ||
446 | |||
447 | rsxx_dma_queue_reset(card); | ||
448 | |||
449 | for (i = 0; i < card->n_targets; i++) { | ||
450 | st = rsxx_hw_buffers_init(dev, &card->ctrl[i]); | ||
451 | if (st) | ||
452 | goto failed_hw_buffers_init; | ||
453 | } | ||
454 | |||
455 | if (card->config_valid) | ||
456 | rsxx_dma_configure(card); | ||
457 | |||
458 | /* Clears the ISR register from spurious interrupts */ | ||
459 | st = ioread32(card->regmap + ISR); | ||
460 | |||
461 | card->eeh_state = 0; | ||
462 | |||
463 | st = rsxx_eeh_remap_dmas(card); | ||
464 | if (st) | ||
465 | goto failed_remap_dmas; | ||
466 | |||
467 | spin_lock_irqsave(&card->irq_lock, flags); | ||
468 | if (card->n_targets & RSXX_MAX_TARGETS) | ||
469 | rsxx_enable_ier_and_isr(card, CR_INTR_ALL_G); | ||
470 | else | ||
471 | rsxx_enable_ier_and_isr(card, CR_INTR_ALL_C); | ||
472 | spin_unlock_irqrestore(&card->irq_lock, flags); | ||
473 | |||
474 | rsxx_kick_creg_queue(card); | ||
475 | |||
476 | for (i = 0; i < card->n_targets; i++) { | ||
477 | spin_lock(&card->ctrl[i].queue_lock); | ||
478 | if (list_empty(&card->ctrl[i].queue)) { | ||
479 | spin_unlock(&card->ctrl[i].queue_lock); | ||
480 | continue; | ||
481 | } | ||
482 | spin_unlock(&card->ctrl[i].queue_lock); | ||
483 | |||
484 | queue_work(card->ctrl[i].issue_wq, | ||
485 | &card->ctrl[i].issue_dma_work); | ||
486 | } | ||
487 | |||
488 | dev_info(&dev->dev, "IBM FlashSystem PCI: recovery complete.\n"); | ||
489 | |||
490 | return PCI_ERS_RESULT_RECOVERED; | ||
491 | |||
492 | failed_hw_buffers_init: | ||
493 | failed_remap_dmas: | ||
494 | for (i = 0; i < card->n_targets; i++) { | ||
495 | if (card->ctrl[i].status.buf) | ||
496 | pci_free_consistent(card->dev, | ||
497 | STATUS_BUFFER_SIZE8, | ||
498 | card->ctrl[i].status.buf, | ||
499 | card->ctrl[i].status.dma_addr); | ||
500 | if (card->ctrl[i].cmd.buf) | ||
501 | pci_free_consistent(card->dev, | ||
502 | COMMAND_BUFFER_SIZE8, | ||
503 | card->ctrl[i].cmd.buf, | ||
504 | card->ctrl[i].cmd.dma_addr); | ||
505 | } | ||
506 | failed_hw_setup: | ||
507 | rsxx_eeh_failure(dev); | ||
508 | return PCI_ERS_RESULT_DISCONNECT; | ||
509 | |||
510 | } | ||
511 | |||
307 | /*----------------- Driver Initialization & Setup -------------------*/ | 512 | /*----------------- Driver Initialization & Setup -------------------*/ |
308 | /* Returns: 0 if the driver is compatible with the device | 513 | /* Returns: 0 if the driver is compatible with the device |
309 | -1 if the driver is NOT compatible with the device */ | 514 | -1 if the driver is NOT compatible with the device */ |
@@ -383,6 +588,7 @@ static int rsxx_pci_probe(struct pci_dev *dev, | |||
383 | 588 | ||
384 | spin_lock_init(&card->irq_lock); | 589 | spin_lock_init(&card->irq_lock); |
385 | card->halt = 0; | 590 | card->halt = 0; |
591 | card->eeh_state = 0; | ||
386 | 592 | ||
387 | spin_lock_irq(&card->irq_lock); | 593 | spin_lock_irq(&card->irq_lock); |
388 | rsxx_disable_ier_and_isr(card, CR_INTR_ALL); | 594 | rsxx_disable_ier_and_isr(card, CR_INTR_ALL); |
@@ -538,9 +744,6 @@ static void rsxx_pci_remove(struct pci_dev *dev) | |||
538 | rsxx_disable_ier_and_isr(card, CR_INTR_EVENT); | 744 | rsxx_disable_ier_and_isr(card, CR_INTR_EVENT); |
539 | spin_unlock_irqrestore(&card->irq_lock, flags); | 745 | spin_unlock_irqrestore(&card->irq_lock, flags); |
540 | 746 | ||
541 | /* Prevent work_structs from re-queuing themselves. */ | ||
542 | card->halt = 1; | ||
543 | |||
544 | cancel_work_sync(&card->event_work); | 747 | cancel_work_sync(&card->event_work); |
545 | 748 | ||
546 | rsxx_destroy_dev(card); | 749 | rsxx_destroy_dev(card); |
@@ -549,6 +752,10 @@ static void rsxx_pci_remove(struct pci_dev *dev) | |||
549 | spin_lock_irqsave(&card->irq_lock, flags); | 752 | spin_lock_irqsave(&card->irq_lock, flags); |
550 | rsxx_disable_ier_and_isr(card, CR_INTR_ALL); | 753 | rsxx_disable_ier_and_isr(card, CR_INTR_ALL); |
551 | spin_unlock_irqrestore(&card->irq_lock, flags); | 754 | spin_unlock_irqrestore(&card->irq_lock, flags); |
755 | |||
756 | /* Prevent work_structs from re-queuing themselves. */ | ||
757 | card->halt = 1; | ||
758 | |||
552 | free_irq(dev->irq, card); | 759 | free_irq(dev->irq, card); |
553 | 760 | ||
554 | if (!force_legacy) | 761 | if (!force_legacy) |
@@ -592,11 +799,14 @@ static void rsxx_pci_shutdown(struct pci_dev *dev) | |||
592 | card_shutdown(card); | 799 | card_shutdown(card); |
593 | } | 800 | } |
594 | 801 | ||
802 | static const struct pci_error_handlers rsxx_err_handler = { | ||
803 | .error_detected = rsxx_error_detected, | ||
804 | .slot_reset = rsxx_slot_reset, | ||
805 | }; | ||
806 | |||
595 | static DEFINE_PCI_DEVICE_TABLE(rsxx_pci_ids) = { | 807 | static DEFINE_PCI_DEVICE_TABLE(rsxx_pci_ids) = { |
596 | {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS70_FLASH)}, | 808 | {PCI_DEVICE(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_FS70_FLASH)}, |
597 | {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS70D_FLASH)}, | 809 | {PCI_DEVICE(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_FS80_FLASH)}, |
598 | {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS80_FLASH)}, | ||
599 | {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS81_FLASH)}, | ||
600 | {0,}, | 810 | {0,}, |
601 | }; | 811 | }; |
602 | 812 | ||
@@ -609,6 +819,7 @@ static struct pci_driver rsxx_pci_driver = { | |||
609 | .remove = rsxx_pci_remove, | 819 | .remove = rsxx_pci_remove, |
610 | .suspend = rsxx_pci_suspend, | 820 | .suspend = rsxx_pci_suspend, |
611 | .shutdown = rsxx_pci_shutdown, | 821 | .shutdown = rsxx_pci_shutdown, |
822 | .err_handler = &rsxx_err_handler, | ||
612 | }; | 823 | }; |
613 | 824 | ||
614 | static int __init rsxx_core_init(void) | 825 | static int __init rsxx_core_init(void) |
diff --git a/drivers/block/rsxx/cregs.c b/drivers/block/rsxx/cregs.c index 80bbe639fccd..4b5c020a0a65 100644 --- a/drivers/block/rsxx/cregs.c +++ b/drivers/block/rsxx/cregs.c | |||
@@ -58,7 +58,7 @@ static struct kmem_cache *creg_cmd_pool; | |||
58 | #error Unknown endianess!!! Aborting... | 58 | #error Unknown endianess!!! Aborting... |
59 | #endif | 59 | #endif |
60 | 60 | ||
61 | static void copy_to_creg_data(struct rsxx_cardinfo *card, | 61 | static int copy_to_creg_data(struct rsxx_cardinfo *card, |
62 | int cnt8, | 62 | int cnt8, |
63 | void *buf, | 63 | void *buf, |
64 | unsigned int stream) | 64 | unsigned int stream) |
@@ -66,6 +66,9 @@ static void copy_to_creg_data(struct rsxx_cardinfo *card, | |||
66 | int i = 0; | 66 | int i = 0; |
67 | u32 *data = buf; | 67 | u32 *data = buf; |
68 | 68 | ||
69 | if (unlikely(card->eeh_state)) | ||
70 | return -EIO; | ||
71 | |||
69 | for (i = 0; cnt8 > 0; i++, cnt8 -= 4) { | 72 | for (i = 0; cnt8 > 0; i++, cnt8 -= 4) { |
70 | /* | 73 | /* |
71 | * Firmware implementation makes it necessary to byte swap on | 74 | * Firmware implementation makes it necessary to byte swap on |
@@ -76,10 +79,12 @@ static void copy_to_creg_data(struct rsxx_cardinfo *card, | |||
76 | else | 79 | else |
77 | iowrite32(data[i], card->regmap + CREG_DATA(i)); | 80 | iowrite32(data[i], card->regmap + CREG_DATA(i)); |
78 | } | 81 | } |
82 | |||
83 | return 0; | ||
79 | } | 84 | } |
80 | 85 | ||
81 | 86 | ||
82 | static void copy_from_creg_data(struct rsxx_cardinfo *card, | 87 | static int copy_from_creg_data(struct rsxx_cardinfo *card, |
83 | int cnt8, | 88 | int cnt8, |
84 | void *buf, | 89 | void *buf, |
85 | unsigned int stream) | 90 | unsigned int stream) |
@@ -87,6 +92,9 @@ static void copy_from_creg_data(struct rsxx_cardinfo *card, | |||
87 | int i = 0; | 92 | int i = 0; |
88 | u32 *data = buf; | 93 | u32 *data = buf; |
89 | 94 | ||
95 | if (unlikely(card->eeh_state)) | ||
96 | return -EIO; | ||
97 | |||
90 | for (i = 0; cnt8 > 0; i++, cnt8 -= 4) { | 98 | for (i = 0; cnt8 > 0; i++, cnt8 -= 4) { |
91 | /* | 99 | /* |
92 | * Firmware implementation makes it necessary to byte swap on | 100 | * Firmware implementation makes it necessary to byte swap on |
@@ -97,41 +105,31 @@ static void copy_from_creg_data(struct rsxx_cardinfo *card, | |||
97 | else | 105 | else |
98 | data[i] = ioread32(card->regmap + CREG_DATA(i)); | 106 | data[i] = ioread32(card->regmap + CREG_DATA(i)); |
99 | } | 107 | } |
100 | } | ||
101 | |||
102 | static struct creg_cmd *pop_active_cmd(struct rsxx_cardinfo *card) | ||
103 | { | ||
104 | struct creg_cmd *cmd; | ||
105 | 108 | ||
106 | /* | 109 | return 0; |
107 | * Spin lock is needed because this can be called in atomic/interrupt | ||
108 | * context. | ||
109 | */ | ||
110 | spin_lock_bh(&card->creg_ctrl.lock); | ||
111 | cmd = card->creg_ctrl.active_cmd; | ||
112 | card->creg_ctrl.active_cmd = NULL; | ||
113 | spin_unlock_bh(&card->creg_ctrl.lock); | ||
114 | |||
115 | return cmd; | ||
116 | } | 110 | } |
117 | 111 | ||
118 | static void creg_issue_cmd(struct rsxx_cardinfo *card, struct creg_cmd *cmd) | 112 | static void creg_issue_cmd(struct rsxx_cardinfo *card, struct creg_cmd *cmd) |
119 | { | 113 | { |
114 | int st; | ||
115 | |||
116 | if (unlikely(card->eeh_state)) | ||
117 | return; | ||
118 | |||
120 | iowrite32(cmd->addr, card->regmap + CREG_ADD); | 119 | iowrite32(cmd->addr, card->regmap + CREG_ADD); |
121 | iowrite32(cmd->cnt8, card->regmap + CREG_CNT); | 120 | iowrite32(cmd->cnt8, card->regmap + CREG_CNT); |
122 | 121 | ||
123 | if (cmd->op == CREG_OP_WRITE) { | 122 | if (cmd->op == CREG_OP_WRITE) { |
124 | if (cmd->buf) | 123 | if (cmd->buf) { |
125 | copy_to_creg_data(card, cmd->cnt8, | 124 | st = copy_to_creg_data(card, cmd->cnt8, |
126 | cmd->buf, cmd->stream); | 125 | cmd->buf, cmd->stream); |
126 | if (st) | ||
127 | return; | ||
128 | } | ||
127 | } | 129 | } |
128 | 130 | ||
129 | /* | 131 | if (unlikely(card->eeh_state)) |
130 | * Data copy must complete before initiating the command. This is | 132 | return; |
131 | * needed for weakly ordered processors (i.e. PowerPC), so that all | ||
132 | * neccessary registers are written before we kick the hardware. | ||
133 | */ | ||
134 | wmb(); | ||
135 | 133 | ||
136 | /* Setting the valid bit will kick off the command. */ | 134 | /* Setting the valid bit will kick off the command. */ |
137 | iowrite32(cmd->op, card->regmap + CREG_CMD); | 135 | iowrite32(cmd->op, card->regmap + CREG_CMD); |
@@ -196,11 +194,11 @@ static int creg_queue_cmd(struct rsxx_cardinfo *card, | |||
196 | cmd->cb_private = cb_private; | 194 | cmd->cb_private = cb_private; |
197 | cmd->status = 0; | 195 | cmd->status = 0; |
198 | 196 | ||
199 | spin_lock(&card->creg_ctrl.lock); | 197 | spin_lock_bh(&card->creg_ctrl.lock); |
200 | list_add_tail(&cmd->list, &card->creg_ctrl.queue); | 198 | list_add_tail(&cmd->list, &card->creg_ctrl.queue); |
201 | card->creg_ctrl.q_depth++; | 199 | card->creg_ctrl.q_depth++; |
202 | creg_kick_queue(card); | 200 | creg_kick_queue(card); |
203 | spin_unlock(&card->creg_ctrl.lock); | 201 | spin_unlock_bh(&card->creg_ctrl.lock); |
204 | 202 | ||
205 | return 0; | 203 | return 0; |
206 | } | 204 | } |
@@ -210,7 +208,11 @@ static void creg_cmd_timed_out(unsigned long data) | |||
210 | struct rsxx_cardinfo *card = (struct rsxx_cardinfo *) data; | 208 | struct rsxx_cardinfo *card = (struct rsxx_cardinfo *) data; |
211 | struct creg_cmd *cmd; | 209 | struct creg_cmd *cmd; |
212 | 210 | ||
213 | cmd = pop_active_cmd(card); | 211 | spin_lock(&card->creg_ctrl.lock); |
212 | cmd = card->creg_ctrl.active_cmd; | ||
213 | card->creg_ctrl.active_cmd = NULL; | ||
214 | spin_unlock(&card->creg_ctrl.lock); | ||
215 | |||
214 | if (cmd == NULL) { | 216 | if (cmd == NULL) { |
215 | card->creg_ctrl.creg_stats.creg_timeout++; | 217 | card->creg_ctrl.creg_stats.creg_timeout++; |
216 | dev_warn(CARD_TO_DEV(card), | 218 | dev_warn(CARD_TO_DEV(card), |
@@ -247,7 +249,11 @@ static void creg_cmd_done(struct work_struct *work) | |||
247 | if (del_timer_sync(&card->creg_ctrl.cmd_timer) == 0) | 249 | if (del_timer_sync(&card->creg_ctrl.cmd_timer) == 0) |
248 | card->creg_ctrl.creg_stats.failed_cancel_timer++; | 250 | card->creg_ctrl.creg_stats.failed_cancel_timer++; |
249 | 251 | ||
250 | cmd = pop_active_cmd(card); | 252 | spin_lock_bh(&card->creg_ctrl.lock); |
253 | cmd = card->creg_ctrl.active_cmd; | ||
254 | card->creg_ctrl.active_cmd = NULL; | ||
255 | spin_unlock_bh(&card->creg_ctrl.lock); | ||
256 | |||
251 | if (cmd == NULL) { | 257 | if (cmd == NULL) { |
252 | dev_err(CARD_TO_DEV(card), | 258 | dev_err(CARD_TO_DEV(card), |
253 | "Spurious creg interrupt!\n"); | 259 | "Spurious creg interrupt!\n"); |
@@ -287,7 +293,7 @@ static void creg_cmd_done(struct work_struct *work) | |||
287 | goto creg_done; | 293 | goto creg_done; |
288 | } | 294 | } |
289 | 295 | ||
290 | copy_from_creg_data(card, cnt8, cmd->buf, cmd->stream); | 296 | st = copy_from_creg_data(card, cnt8, cmd->buf, cmd->stream); |
291 | } | 297 | } |
292 | 298 | ||
293 | creg_done: | 299 | creg_done: |
@@ -296,10 +302,10 @@ creg_done: | |||
296 | 302 | ||
297 | kmem_cache_free(creg_cmd_pool, cmd); | 303 | kmem_cache_free(creg_cmd_pool, cmd); |
298 | 304 | ||
299 | spin_lock(&card->creg_ctrl.lock); | 305 | spin_lock_bh(&card->creg_ctrl.lock); |
300 | card->creg_ctrl.active = 0; | 306 | card->creg_ctrl.active = 0; |
301 | creg_kick_queue(card); | 307 | creg_kick_queue(card); |
302 | spin_unlock(&card->creg_ctrl.lock); | 308 | spin_unlock_bh(&card->creg_ctrl.lock); |
303 | } | 309 | } |
304 | 310 | ||
305 | static void creg_reset(struct rsxx_cardinfo *card) | 311 | static void creg_reset(struct rsxx_cardinfo *card) |
@@ -324,7 +330,7 @@ static void creg_reset(struct rsxx_cardinfo *card) | |||
324 | "Resetting creg interface for recovery\n"); | 330 | "Resetting creg interface for recovery\n"); |
325 | 331 | ||
326 | /* Cancel outstanding commands */ | 332 | /* Cancel outstanding commands */ |
327 | spin_lock(&card->creg_ctrl.lock); | 333 | spin_lock_bh(&card->creg_ctrl.lock); |
328 | list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) { | 334 | list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) { |
329 | list_del(&cmd->list); | 335 | list_del(&cmd->list); |
330 | card->creg_ctrl.q_depth--; | 336 | card->creg_ctrl.q_depth--; |
@@ -345,7 +351,7 @@ static void creg_reset(struct rsxx_cardinfo *card) | |||
345 | 351 | ||
346 | card->creg_ctrl.active = 0; | 352 | card->creg_ctrl.active = 0; |
347 | } | 353 | } |
348 | spin_unlock(&card->creg_ctrl.lock); | 354 | spin_unlock_bh(&card->creg_ctrl.lock); |
349 | 355 | ||
350 | card->creg_ctrl.reset = 0; | 356 | card->creg_ctrl.reset = 0; |
351 | spin_lock_irqsave(&card->irq_lock, flags); | 357 | spin_lock_irqsave(&card->irq_lock, flags); |
@@ -399,12 +405,12 @@ static int __issue_creg_rw(struct rsxx_cardinfo *card, | |||
399 | return st; | 405 | return st; |
400 | 406 | ||
401 | /* | 407 | /* |
402 | * This timeout is neccessary for unresponsive hardware. The additional | 408 | * This timeout is necessary for unresponsive hardware. The additional |
403 | * 20 seconds to used to guarantee that each cregs requests has time to | 409 | * 20 seconds to used to guarantee that each cregs requests has time to |
404 | * complete. | 410 | * complete. |
405 | */ | 411 | */ |
406 | timeout = msecs_to_jiffies((CREG_TIMEOUT_MSEC * | 412 | timeout = msecs_to_jiffies(CREG_TIMEOUT_MSEC * |
407 | card->creg_ctrl.q_depth) + 20000); | 413 | card->creg_ctrl.q_depth + 20000); |
408 | 414 | ||
409 | /* | 415 | /* |
410 | * The creg interface is guaranteed to complete. It has a timeout | 416 | * The creg interface is guaranteed to complete. It has a timeout |
@@ -690,6 +696,32 @@ int rsxx_reg_access(struct rsxx_cardinfo *card, | |||
690 | return 0; | 696 | return 0; |
691 | } | 697 | } |
692 | 698 | ||
699 | void rsxx_eeh_save_issued_creg(struct rsxx_cardinfo *card) | ||
700 | { | ||
701 | struct creg_cmd *cmd = NULL; | ||
702 | |||
703 | cmd = card->creg_ctrl.active_cmd; | ||
704 | card->creg_ctrl.active_cmd = NULL; | ||
705 | |||
706 | if (cmd) { | ||
707 | del_timer_sync(&card->creg_ctrl.cmd_timer); | ||
708 | |||
709 | spin_lock_bh(&card->creg_ctrl.lock); | ||
710 | list_add(&cmd->list, &card->creg_ctrl.queue); | ||
711 | card->creg_ctrl.q_depth++; | ||
712 | card->creg_ctrl.active = 0; | ||
713 | spin_unlock_bh(&card->creg_ctrl.lock); | ||
714 | } | ||
715 | } | ||
716 | |||
717 | void rsxx_kick_creg_queue(struct rsxx_cardinfo *card) | ||
718 | { | ||
719 | spin_lock_bh(&card->creg_ctrl.lock); | ||
720 | if (!list_empty(&card->creg_ctrl.queue)) | ||
721 | creg_kick_queue(card); | ||
722 | spin_unlock_bh(&card->creg_ctrl.lock); | ||
723 | } | ||
724 | |||
693 | /*------------ Initialization & Setup --------------*/ | 725 | /*------------ Initialization & Setup --------------*/ |
694 | int rsxx_creg_setup(struct rsxx_cardinfo *card) | 726 | int rsxx_creg_setup(struct rsxx_cardinfo *card) |
695 | { | 727 | { |
@@ -712,7 +744,7 @@ void rsxx_creg_destroy(struct rsxx_cardinfo *card) | |||
712 | int cnt = 0; | 744 | int cnt = 0; |
713 | 745 | ||
714 | /* Cancel outstanding commands */ | 746 | /* Cancel outstanding commands */ |
715 | spin_lock(&card->creg_ctrl.lock); | 747 | spin_lock_bh(&card->creg_ctrl.lock); |
716 | list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) { | 748 | list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) { |
717 | list_del(&cmd->list); | 749 | list_del(&cmd->list); |
718 | if (cmd->cb) | 750 | if (cmd->cb) |
@@ -737,7 +769,7 @@ void rsxx_creg_destroy(struct rsxx_cardinfo *card) | |||
737 | "Canceled active creg command\n"); | 769 | "Canceled active creg command\n"); |
738 | kmem_cache_free(creg_cmd_pool, cmd); | 770 | kmem_cache_free(creg_cmd_pool, cmd); |
739 | } | 771 | } |
740 | spin_unlock(&card->creg_ctrl.lock); | 772 | spin_unlock_bh(&card->creg_ctrl.lock); |
741 | 773 | ||
742 | cancel_work_sync(&card->creg_ctrl.done_work); | 774 | cancel_work_sync(&card->creg_ctrl.done_work); |
743 | } | 775 | } |
diff --git a/drivers/block/rsxx/dma.c b/drivers/block/rsxx/dma.c index 63176e67662f..0607513cfb41 100644 --- a/drivers/block/rsxx/dma.c +++ b/drivers/block/rsxx/dma.c | |||
@@ -28,7 +28,7 @@ | |||
28 | struct rsxx_dma { | 28 | struct rsxx_dma { |
29 | struct list_head list; | 29 | struct list_head list; |
30 | u8 cmd; | 30 | u8 cmd; |
31 | unsigned int laddr; /* Logical address on the ramsan */ | 31 | unsigned int laddr; /* Logical address */ |
32 | struct { | 32 | struct { |
33 | u32 off; | 33 | u32 off; |
34 | u32 cnt; | 34 | u32 cnt; |
@@ -81,9 +81,6 @@ enum rsxx_hw_status { | |||
81 | HW_STATUS_FAULT = 0x08, | 81 | HW_STATUS_FAULT = 0x08, |
82 | }; | 82 | }; |
83 | 83 | ||
84 | #define STATUS_BUFFER_SIZE8 4096 | ||
85 | #define COMMAND_BUFFER_SIZE8 4096 | ||
86 | |||
87 | static struct kmem_cache *rsxx_dma_pool; | 84 | static struct kmem_cache *rsxx_dma_pool; |
88 | 85 | ||
89 | struct dma_tracker { | 86 | struct dma_tracker { |
@@ -122,7 +119,7 @@ static unsigned int rsxx_get_dma_tgt(struct rsxx_cardinfo *card, u64 addr8) | |||
122 | return tgt; | 119 | return tgt; |
123 | } | 120 | } |
124 | 121 | ||
125 | static void rsxx_dma_queue_reset(struct rsxx_cardinfo *card) | 122 | void rsxx_dma_queue_reset(struct rsxx_cardinfo *card) |
126 | { | 123 | { |
127 | /* Reset all DMA Command/Status Queues */ | 124 | /* Reset all DMA Command/Status Queues */ |
128 | iowrite32(DMA_QUEUE_RESET, card->regmap + RESET); | 125 | iowrite32(DMA_QUEUE_RESET, card->regmap + RESET); |
@@ -210,7 +207,8 @@ static void dma_intr_coal_auto_tune(struct rsxx_cardinfo *card) | |||
210 | u32 q_depth = 0; | 207 | u32 q_depth = 0; |
211 | u32 intr_coal; | 208 | u32 intr_coal; |
212 | 209 | ||
213 | if (card->config.data.intr_coal.mode != RSXX_INTR_COAL_AUTO_TUNE) | 210 | if (card->config.data.intr_coal.mode != RSXX_INTR_COAL_AUTO_TUNE || |
211 | unlikely(card->eeh_state)) | ||
214 | return; | 212 | return; |
215 | 213 | ||
216 | for (i = 0; i < card->n_targets; i++) | 214 | for (i = 0; i < card->n_targets; i++) |
@@ -223,31 +221,26 @@ static void dma_intr_coal_auto_tune(struct rsxx_cardinfo *card) | |||
223 | } | 221 | } |
224 | 222 | ||
225 | /*----------------- RSXX DMA Handling -------------------*/ | 223 | /*----------------- RSXX DMA Handling -------------------*/ |
226 | static void rsxx_complete_dma(struct rsxx_cardinfo *card, | 224 | static void rsxx_complete_dma(struct rsxx_dma_ctrl *ctrl, |
227 | struct rsxx_dma *dma, | 225 | struct rsxx_dma *dma, |
228 | unsigned int status) | 226 | unsigned int status) |
229 | { | 227 | { |
230 | if (status & DMA_SW_ERR) | 228 | if (status & DMA_SW_ERR) |
231 | printk_ratelimited(KERN_ERR | 229 | ctrl->stats.dma_sw_err++; |
232 | "SW Error in DMA(cmd x%02x, laddr x%08x)\n", | ||
233 | dma->cmd, dma->laddr); | ||
234 | if (status & DMA_HW_FAULT) | 230 | if (status & DMA_HW_FAULT) |
235 | printk_ratelimited(KERN_ERR | 231 | ctrl->stats.dma_hw_fault++; |
236 | "HW Fault in DMA(cmd x%02x, laddr x%08x)\n", | ||
237 | dma->cmd, dma->laddr); | ||
238 | if (status & DMA_CANCELLED) | 232 | if (status & DMA_CANCELLED) |
239 | printk_ratelimited(KERN_ERR | 233 | ctrl->stats.dma_cancelled++; |
240 | "DMA Cancelled(cmd x%02x, laddr x%08x)\n", | ||
241 | dma->cmd, dma->laddr); | ||
242 | 234 | ||
243 | if (dma->dma_addr) | 235 | if (dma->dma_addr) |
244 | pci_unmap_page(card->dev, dma->dma_addr, get_dma_size(dma), | 236 | pci_unmap_page(ctrl->card->dev, dma->dma_addr, |
237 | get_dma_size(dma), | ||
245 | dma->cmd == HW_CMD_BLK_WRITE ? | 238 | dma->cmd == HW_CMD_BLK_WRITE ? |
246 | PCI_DMA_TODEVICE : | 239 | PCI_DMA_TODEVICE : |
247 | PCI_DMA_FROMDEVICE); | 240 | PCI_DMA_FROMDEVICE); |
248 | 241 | ||
249 | if (dma->cb) | 242 | if (dma->cb) |
250 | dma->cb(card, dma->cb_data, status ? 1 : 0); | 243 | dma->cb(ctrl->card, dma->cb_data, status ? 1 : 0); |
251 | 244 | ||
252 | kmem_cache_free(rsxx_dma_pool, dma); | 245 | kmem_cache_free(rsxx_dma_pool, dma); |
253 | } | 246 | } |
@@ -330,14 +323,15 @@ static void rsxx_handle_dma_error(struct rsxx_dma_ctrl *ctrl, | |||
330 | if (requeue_cmd) | 323 | if (requeue_cmd) |
331 | rsxx_requeue_dma(ctrl, dma); | 324 | rsxx_requeue_dma(ctrl, dma); |
332 | else | 325 | else |
333 | rsxx_complete_dma(ctrl->card, dma, status); | 326 | rsxx_complete_dma(ctrl, dma, status); |
334 | } | 327 | } |
335 | 328 | ||
336 | static void dma_engine_stalled(unsigned long data) | 329 | static void dma_engine_stalled(unsigned long data) |
337 | { | 330 | { |
338 | struct rsxx_dma_ctrl *ctrl = (struct rsxx_dma_ctrl *)data; | 331 | struct rsxx_dma_ctrl *ctrl = (struct rsxx_dma_ctrl *)data; |
339 | 332 | ||
340 | if (atomic_read(&ctrl->stats.hw_q_depth) == 0) | 333 | if (atomic_read(&ctrl->stats.hw_q_depth) == 0 || |
334 | unlikely(ctrl->card->eeh_state)) | ||
341 | return; | 335 | return; |
342 | 336 | ||
343 | if (ctrl->cmd.idx != ioread32(ctrl->regmap + SW_CMD_IDX)) { | 337 | if (ctrl->cmd.idx != ioread32(ctrl->regmap + SW_CMD_IDX)) { |
@@ -369,7 +363,8 @@ static void rsxx_issue_dmas(struct work_struct *work) | |||
369 | ctrl = container_of(work, struct rsxx_dma_ctrl, issue_dma_work); | 363 | ctrl = container_of(work, struct rsxx_dma_ctrl, issue_dma_work); |
370 | hw_cmd_buf = ctrl->cmd.buf; | 364 | hw_cmd_buf = ctrl->cmd.buf; |
371 | 365 | ||
372 | if (unlikely(ctrl->card->halt)) | 366 | if (unlikely(ctrl->card->halt) || |
367 | unlikely(ctrl->card->eeh_state)) | ||
373 | return; | 368 | return; |
374 | 369 | ||
375 | while (1) { | 370 | while (1) { |
@@ -397,7 +392,7 @@ static void rsxx_issue_dmas(struct work_struct *work) | |||
397 | */ | 392 | */ |
398 | if (unlikely(ctrl->card->dma_fault)) { | 393 | if (unlikely(ctrl->card->dma_fault)) { |
399 | push_tracker(ctrl->trackers, tag); | 394 | push_tracker(ctrl->trackers, tag); |
400 | rsxx_complete_dma(ctrl->card, dma, DMA_CANCELLED); | 395 | rsxx_complete_dma(ctrl, dma, DMA_CANCELLED); |
401 | continue; | 396 | continue; |
402 | } | 397 | } |
403 | 398 | ||
@@ -432,19 +427,15 @@ static void rsxx_issue_dmas(struct work_struct *work) | |||
432 | 427 | ||
433 | /* Let HW know we've queued commands. */ | 428 | /* Let HW know we've queued commands. */ |
434 | if (cmds_pending) { | 429 | if (cmds_pending) { |
435 | /* | ||
436 | * We must guarantee that the CPU writes to 'ctrl->cmd.buf' | ||
437 | * (which is in PCI-consistent system-memory) from the loop | ||
438 | * above make it into the coherency domain before the | ||
439 | * following PIO "trigger" updating the cmd.idx. A WMB is | ||
440 | * sufficient. We need not explicitly CPU cache-flush since | ||
441 | * the memory is a PCI-consistent (ie; coherent) mapping. | ||
442 | */ | ||
443 | wmb(); | ||
444 | |||
445 | atomic_add(cmds_pending, &ctrl->stats.hw_q_depth); | 430 | atomic_add(cmds_pending, &ctrl->stats.hw_q_depth); |
446 | mod_timer(&ctrl->activity_timer, | 431 | mod_timer(&ctrl->activity_timer, |
447 | jiffies + DMA_ACTIVITY_TIMEOUT); | 432 | jiffies + DMA_ACTIVITY_TIMEOUT); |
433 | |||
434 | if (unlikely(ctrl->card->eeh_state)) { | ||
435 | del_timer_sync(&ctrl->activity_timer); | ||
436 | return; | ||
437 | } | ||
438 | |||
448 | iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX); | 439 | iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX); |
449 | } | 440 | } |
450 | } | 441 | } |
@@ -463,7 +454,8 @@ static void rsxx_dma_done(struct work_struct *work) | |||
463 | hw_st_buf = ctrl->status.buf; | 454 | hw_st_buf = ctrl->status.buf; |
464 | 455 | ||
465 | if (unlikely(ctrl->card->halt) || | 456 | if (unlikely(ctrl->card->halt) || |
466 | unlikely(ctrl->card->dma_fault)) | 457 | unlikely(ctrl->card->dma_fault) || |
458 | unlikely(ctrl->card->eeh_state)) | ||
467 | return; | 459 | return; |
468 | 460 | ||
469 | count = le16_to_cpu(hw_st_buf[ctrl->status.idx].count); | 461 | count = le16_to_cpu(hw_st_buf[ctrl->status.idx].count); |
@@ -508,7 +500,7 @@ static void rsxx_dma_done(struct work_struct *work) | |||
508 | if (status) | 500 | if (status) |
509 | rsxx_handle_dma_error(ctrl, dma, status); | 501 | rsxx_handle_dma_error(ctrl, dma, status); |
510 | else | 502 | else |
511 | rsxx_complete_dma(ctrl->card, dma, 0); | 503 | rsxx_complete_dma(ctrl, dma, 0); |
512 | 504 | ||
513 | push_tracker(ctrl->trackers, tag); | 505 | push_tracker(ctrl->trackers, tag); |
514 | 506 | ||
@@ -727,20 +719,54 @@ bvec_err: | |||
727 | 719 | ||
728 | 720 | ||
729 | /*----------------- DMA Engine Initialization & Setup -------------------*/ | 721 | /*----------------- DMA Engine Initialization & Setup -------------------*/ |
722 | int rsxx_hw_buffers_init(struct pci_dev *dev, struct rsxx_dma_ctrl *ctrl) | ||
723 | { | ||
724 | ctrl->status.buf = pci_alloc_consistent(dev, STATUS_BUFFER_SIZE8, | ||
725 | &ctrl->status.dma_addr); | ||
726 | ctrl->cmd.buf = pci_alloc_consistent(dev, COMMAND_BUFFER_SIZE8, | ||
727 | &ctrl->cmd.dma_addr); | ||
728 | if (ctrl->status.buf == NULL || ctrl->cmd.buf == NULL) | ||
729 | return -ENOMEM; | ||
730 | |||
731 | memset(ctrl->status.buf, 0xac, STATUS_BUFFER_SIZE8); | ||
732 | iowrite32(lower_32_bits(ctrl->status.dma_addr), | ||
733 | ctrl->regmap + SB_ADD_LO); | ||
734 | iowrite32(upper_32_bits(ctrl->status.dma_addr), | ||
735 | ctrl->regmap + SB_ADD_HI); | ||
736 | |||
737 | memset(ctrl->cmd.buf, 0x83, COMMAND_BUFFER_SIZE8); | ||
738 | iowrite32(lower_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_LO); | ||
739 | iowrite32(upper_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_HI); | ||
740 | |||
741 | ctrl->status.idx = ioread32(ctrl->regmap + HW_STATUS_CNT); | ||
742 | if (ctrl->status.idx > RSXX_MAX_OUTSTANDING_CMDS) { | ||
743 | dev_crit(&dev->dev, "Failed reading status cnt x%x\n", | ||
744 | ctrl->status.idx); | ||
745 | return -EINVAL; | ||
746 | } | ||
747 | iowrite32(ctrl->status.idx, ctrl->regmap + HW_STATUS_CNT); | ||
748 | iowrite32(ctrl->status.idx, ctrl->regmap + SW_STATUS_CNT); | ||
749 | |||
750 | ctrl->cmd.idx = ioread32(ctrl->regmap + HW_CMD_IDX); | ||
751 | if (ctrl->cmd.idx > RSXX_MAX_OUTSTANDING_CMDS) { | ||
752 | dev_crit(&dev->dev, "Failed reading cmd cnt x%x\n", | ||
753 | ctrl->status.idx); | ||
754 | return -EINVAL; | ||
755 | } | ||
756 | iowrite32(ctrl->cmd.idx, ctrl->regmap + HW_CMD_IDX); | ||
757 | iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX); | ||
758 | |||
759 | return 0; | ||
760 | } | ||
761 | |||
730 | static int rsxx_dma_ctrl_init(struct pci_dev *dev, | 762 | static int rsxx_dma_ctrl_init(struct pci_dev *dev, |
731 | struct rsxx_dma_ctrl *ctrl) | 763 | struct rsxx_dma_ctrl *ctrl) |
732 | { | 764 | { |
733 | int i; | 765 | int i; |
766 | int st; | ||
734 | 767 | ||
735 | memset(&ctrl->stats, 0, sizeof(ctrl->stats)); | 768 | memset(&ctrl->stats, 0, sizeof(ctrl->stats)); |
736 | 769 | ||
737 | ctrl->status.buf = pci_alloc_consistent(dev, STATUS_BUFFER_SIZE8, | ||
738 | &ctrl->status.dma_addr); | ||
739 | ctrl->cmd.buf = pci_alloc_consistent(dev, COMMAND_BUFFER_SIZE8, | ||
740 | &ctrl->cmd.dma_addr); | ||
741 | if (ctrl->status.buf == NULL || ctrl->cmd.buf == NULL) | ||
742 | return -ENOMEM; | ||
743 | |||
744 | ctrl->trackers = vmalloc(DMA_TRACKER_LIST_SIZE8); | 770 | ctrl->trackers = vmalloc(DMA_TRACKER_LIST_SIZE8); |
745 | if (!ctrl->trackers) | 771 | if (!ctrl->trackers) |
746 | return -ENOMEM; | 772 | return -ENOMEM; |
@@ -770,35 +796,9 @@ static int rsxx_dma_ctrl_init(struct pci_dev *dev, | |||
770 | INIT_WORK(&ctrl->issue_dma_work, rsxx_issue_dmas); | 796 | INIT_WORK(&ctrl->issue_dma_work, rsxx_issue_dmas); |
771 | INIT_WORK(&ctrl->dma_done_work, rsxx_dma_done); | 797 | INIT_WORK(&ctrl->dma_done_work, rsxx_dma_done); |
772 | 798 | ||
773 | memset(ctrl->status.buf, 0xac, STATUS_BUFFER_SIZE8); | 799 | st = rsxx_hw_buffers_init(dev, ctrl); |
774 | iowrite32(lower_32_bits(ctrl->status.dma_addr), | 800 | if (st) |
775 | ctrl->regmap + SB_ADD_LO); | 801 | return st; |
776 | iowrite32(upper_32_bits(ctrl->status.dma_addr), | ||
777 | ctrl->regmap + SB_ADD_HI); | ||
778 | |||
779 | memset(ctrl->cmd.buf, 0x83, COMMAND_BUFFER_SIZE8); | ||
780 | iowrite32(lower_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_LO); | ||
781 | iowrite32(upper_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_HI); | ||
782 | |||
783 | ctrl->status.idx = ioread32(ctrl->regmap + HW_STATUS_CNT); | ||
784 | if (ctrl->status.idx > RSXX_MAX_OUTSTANDING_CMDS) { | ||
785 | dev_crit(&dev->dev, "Failed reading status cnt x%x\n", | ||
786 | ctrl->status.idx); | ||
787 | return -EINVAL; | ||
788 | } | ||
789 | iowrite32(ctrl->status.idx, ctrl->regmap + HW_STATUS_CNT); | ||
790 | iowrite32(ctrl->status.idx, ctrl->regmap + SW_STATUS_CNT); | ||
791 | |||
792 | ctrl->cmd.idx = ioread32(ctrl->regmap + HW_CMD_IDX); | ||
793 | if (ctrl->cmd.idx > RSXX_MAX_OUTSTANDING_CMDS) { | ||
794 | dev_crit(&dev->dev, "Failed reading cmd cnt x%x\n", | ||
795 | ctrl->status.idx); | ||
796 | return -EINVAL; | ||
797 | } | ||
798 | iowrite32(ctrl->cmd.idx, ctrl->regmap + HW_CMD_IDX); | ||
799 | iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX); | ||
800 | |||
801 | wmb(); | ||
802 | 802 | ||
803 | return 0; | 803 | return 0; |
804 | } | 804 | } |
@@ -834,7 +834,7 @@ static int rsxx_dma_stripe_setup(struct rsxx_cardinfo *card, | |||
834 | return 0; | 834 | return 0; |
835 | } | 835 | } |
836 | 836 | ||
837 | static int rsxx_dma_configure(struct rsxx_cardinfo *card) | 837 | int rsxx_dma_configure(struct rsxx_cardinfo *card) |
838 | { | 838 | { |
839 | u32 intr_coal; | 839 | u32 intr_coal; |
840 | 840 | ||
@@ -980,6 +980,103 @@ void rsxx_dma_destroy(struct rsxx_cardinfo *card) | |||
980 | } | 980 | } |
981 | } | 981 | } |
982 | 982 | ||
983 | int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card) | ||
984 | { | ||
985 | int i; | ||
986 | int j; | ||
987 | int cnt; | ||
988 | struct rsxx_dma *dma; | ||
989 | struct list_head *issued_dmas; | ||
990 | |||
991 | issued_dmas = kzalloc(sizeof(*issued_dmas) * card->n_targets, | ||
992 | GFP_KERNEL); | ||
993 | if (!issued_dmas) | ||
994 | return -ENOMEM; | ||
995 | |||
996 | for (i = 0; i < card->n_targets; i++) { | ||
997 | INIT_LIST_HEAD(&issued_dmas[i]); | ||
998 | cnt = 0; | ||
999 | for (j = 0; j < RSXX_MAX_OUTSTANDING_CMDS; j++) { | ||
1000 | dma = get_tracker_dma(card->ctrl[i].trackers, j); | ||
1001 | if (dma == NULL) | ||
1002 | continue; | ||
1003 | |||
1004 | if (dma->cmd == HW_CMD_BLK_WRITE) | ||
1005 | card->ctrl[i].stats.writes_issued--; | ||
1006 | else if (dma->cmd == HW_CMD_BLK_DISCARD) | ||
1007 | card->ctrl[i].stats.discards_issued--; | ||
1008 | else | ||
1009 | card->ctrl[i].stats.reads_issued--; | ||
1010 | |||
1011 | list_add_tail(&dma->list, &issued_dmas[i]); | ||
1012 | push_tracker(card->ctrl[i].trackers, j); | ||
1013 | cnt++; | ||
1014 | } | ||
1015 | |||
1016 | spin_lock(&card->ctrl[i].queue_lock); | ||
1017 | list_splice(&issued_dmas[i], &card->ctrl[i].queue); | ||
1018 | |||
1019 | atomic_sub(cnt, &card->ctrl[i].stats.hw_q_depth); | ||
1020 | card->ctrl[i].stats.sw_q_depth += cnt; | ||
1021 | card->ctrl[i].e_cnt = 0; | ||
1022 | |||
1023 | list_for_each_entry(dma, &card->ctrl[i].queue, list) { | ||
1024 | if (dma->dma_addr) | ||
1025 | pci_unmap_page(card->dev, dma->dma_addr, | ||
1026 | get_dma_size(dma), | ||
1027 | dma->cmd == HW_CMD_BLK_WRITE ? | ||
1028 | PCI_DMA_TODEVICE : | ||
1029 | PCI_DMA_FROMDEVICE); | ||
1030 | } | ||
1031 | spin_unlock(&card->ctrl[i].queue_lock); | ||
1032 | } | ||
1033 | |||
1034 | kfree(issued_dmas); | ||
1035 | |||
1036 | return 0; | ||
1037 | } | ||
1038 | |||
1039 | void rsxx_eeh_cancel_dmas(struct rsxx_cardinfo *card) | ||
1040 | { | ||
1041 | struct rsxx_dma *dma; | ||
1042 | struct rsxx_dma *tmp; | ||
1043 | int i; | ||
1044 | |||
1045 | for (i = 0; i < card->n_targets; i++) { | ||
1046 | spin_lock(&card->ctrl[i].queue_lock); | ||
1047 | list_for_each_entry_safe(dma, tmp, &card->ctrl[i].queue, list) { | ||
1048 | list_del(&dma->list); | ||
1049 | |||
1050 | rsxx_complete_dma(&card->ctrl[i], dma, DMA_CANCELLED); | ||
1051 | } | ||
1052 | spin_unlock(&card->ctrl[i].queue_lock); | ||
1053 | } | ||
1054 | } | ||
1055 | |||
1056 | int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card) | ||
1057 | { | ||
1058 | struct rsxx_dma *dma; | ||
1059 | int i; | ||
1060 | |||
1061 | for (i = 0; i < card->n_targets; i++) { | ||
1062 | spin_lock(&card->ctrl[i].queue_lock); | ||
1063 | list_for_each_entry(dma, &card->ctrl[i].queue, list) { | ||
1064 | dma->dma_addr = pci_map_page(card->dev, dma->page, | ||
1065 | dma->pg_off, get_dma_size(dma), | ||
1066 | dma->cmd == HW_CMD_BLK_WRITE ? | ||
1067 | PCI_DMA_TODEVICE : | ||
1068 | PCI_DMA_FROMDEVICE); | ||
1069 | if (!dma->dma_addr) { | ||
1070 | spin_unlock(&card->ctrl[i].queue_lock); | ||
1071 | kmem_cache_free(rsxx_dma_pool, dma); | ||
1072 | return -ENOMEM; | ||
1073 | } | ||
1074 | } | ||
1075 | spin_unlock(&card->ctrl[i].queue_lock); | ||
1076 | } | ||
1077 | |||
1078 | return 0; | ||
1079 | } | ||
983 | 1080 | ||
984 | int rsxx_dma_init(void) | 1081 | int rsxx_dma_init(void) |
985 | { | 1082 | { |
diff --git a/drivers/block/rsxx/rsxx.h b/drivers/block/rsxx/rsxx.h index 2e50b65902b7..24ba3642bd89 100644 --- a/drivers/block/rsxx/rsxx.h +++ b/drivers/block/rsxx/rsxx.h | |||
@@ -27,15 +27,17 @@ | |||
27 | 27 | ||
28 | /*----------------- IOCTL Definitions -------------------*/ | 28 | /*----------------- IOCTL Definitions -------------------*/ |
29 | 29 | ||
30 | #define RSXX_MAX_DATA 8 | ||
31 | |||
30 | struct rsxx_reg_access { | 32 | struct rsxx_reg_access { |
31 | __u32 addr; | 33 | __u32 addr; |
32 | __u32 cnt; | 34 | __u32 cnt; |
33 | __u32 stat; | 35 | __u32 stat; |
34 | __u32 stream; | 36 | __u32 stream; |
35 | __u32 data[8]; | 37 | __u32 data[RSXX_MAX_DATA]; |
36 | }; | 38 | }; |
37 | 39 | ||
38 | #define RSXX_MAX_REG_CNT (8 * (sizeof(__u32))) | 40 | #define RSXX_MAX_REG_CNT (RSXX_MAX_DATA * (sizeof(__u32))) |
39 | 41 | ||
40 | #define RSXX_IOC_MAGIC 'r' | 42 | #define RSXX_IOC_MAGIC 'r' |
41 | 43 | ||
diff --git a/drivers/block/rsxx/rsxx_cfg.h b/drivers/block/rsxx/rsxx_cfg.h index c025fe5fdb70..f384c943846d 100644 --- a/drivers/block/rsxx/rsxx_cfg.h +++ b/drivers/block/rsxx/rsxx_cfg.h | |||
@@ -58,7 +58,7 @@ struct rsxx_card_cfg { | |||
58 | }; | 58 | }; |
59 | 59 | ||
60 | /* Vendor ID Values */ | 60 | /* Vendor ID Values */ |
61 | #define RSXX_VENDOR_ID_TMS_IBM 0 | 61 | #define RSXX_VENDOR_ID_IBM 0 |
62 | #define RSXX_VENDOR_ID_DSI 1 | 62 | #define RSXX_VENDOR_ID_DSI 1 |
63 | #define RSXX_VENDOR_COUNT 2 | 63 | #define RSXX_VENDOR_COUNT 2 |
64 | 64 | ||
diff --git a/drivers/block/rsxx/rsxx_priv.h b/drivers/block/rsxx/rsxx_priv.h index a1ac907d8f4c..382e8bf5c03b 100644 --- a/drivers/block/rsxx/rsxx_priv.h +++ b/drivers/block/rsxx/rsxx_priv.h | |||
@@ -45,16 +45,13 @@ | |||
45 | 45 | ||
46 | struct proc_cmd; | 46 | struct proc_cmd; |
47 | 47 | ||
48 | #define PCI_VENDOR_ID_TMS_IBM 0x15B6 | 48 | #define PCI_DEVICE_ID_FS70_FLASH 0x04A9 |
49 | #define PCI_DEVICE_ID_RS70_FLASH 0x0019 | 49 | #define PCI_DEVICE_ID_FS80_FLASH 0x04AA |
50 | #define PCI_DEVICE_ID_RS70D_FLASH 0x001A | ||
51 | #define PCI_DEVICE_ID_RS80_FLASH 0x001C | ||
52 | #define PCI_DEVICE_ID_RS81_FLASH 0x001E | ||
53 | 50 | ||
54 | #define RS70_PCI_REV_SUPPORTED 4 | 51 | #define RS70_PCI_REV_SUPPORTED 4 |
55 | 52 | ||
56 | #define DRIVER_NAME "rsxx" | 53 | #define DRIVER_NAME "rsxx" |
57 | #define DRIVER_VERSION "3.7" | 54 | #define DRIVER_VERSION "4.0" |
58 | 55 | ||
59 | /* Block size is 4096 */ | 56 | /* Block size is 4096 */ |
60 | #define RSXX_HW_BLK_SHIFT 12 | 57 | #define RSXX_HW_BLK_SHIFT 12 |
@@ -67,6 +64,9 @@ struct proc_cmd; | |||
67 | #define RSXX_MAX_OUTSTANDING_CMDS 255 | 64 | #define RSXX_MAX_OUTSTANDING_CMDS 255 |
68 | #define RSXX_CS_IDX_MASK 0xff | 65 | #define RSXX_CS_IDX_MASK 0xff |
69 | 66 | ||
67 | #define STATUS_BUFFER_SIZE8 4096 | ||
68 | #define COMMAND_BUFFER_SIZE8 4096 | ||
69 | |||
70 | #define RSXX_MAX_TARGETS 8 | 70 | #define RSXX_MAX_TARGETS 8 |
71 | 71 | ||
72 | struct dma_tracker_list; | 72 | struct dma_tracker_list; |
@@ -91,6 +91,9 @@ struct rsxx_dma_stats { | |||
91 | u32 discards_failed; | 91 | u32 discards_failed; |
92 | u32 done_rescheduled; | 92 | u32 done_rescheduled; |
93 | u32 issue_rescheduled; | 93 | u32 issue_rescheduled; |
94 | u32 dma_sw_err; | ||
95 | u32 dma_hw_fault; | ||
96 | u32 dma_cancelled; | ||
94 | u32 sw_q_depth; /* Number of DMAs on the SW queue. */ | 97 | u32 sw_q_depth; /* Number of DMAs on the SW queue. */ |
95 | atomic_t hw_q_depth; /* Number of DMAs queued to HW. */ | 98 | atomic_t hw_q_depth; /* Number of DMAs queued to HW. */ |
96 | }; | 99 | }; |
@@ -116,6 +119,7 @@ struct rsxx_dma_ctrl { | |||
116 | struct rsxx_cardinfo { | 119 | struct rsxx_cardinfo { |
117 | struct pci_dev *dev; | 120 | struct pci_dev *dev; |
118 | unsigned int halt; | 121 | unsigned int halt; |
122 | unsigned int eeh_state; | ||
119 | 123 | ||
120 | void __iomem *regmap; | 124 | void __iomem *regmap; |
121 | spinlock_t irq_lock; | 125 | spinlock_t irq_lock; |
@@ -224,6 +228,7 @@ enum rsxx_pci_regmap { | |||
224 | PERF_RD512_HI = 0xac, | 228 | PERF_RD512_HI = 0xac, |
225 | PERF_WR512_LO = 0xb0, | 229 | PERF_WR512_LO = 0xb0, |
226 | PERF_WR512_HI = 0xb4, | 230 | PERF_WR512_HI = 0xb4, |
231 | PCI_RECONFIG = 0xb8, | ||
227 | }; | 232 | }; |
228 | 233 | ||
229 | enum rsxx_intr { | 234 | enum rsxx_intr { |
@@ -237,6 +242,8 @@ enum rsxx_intr { | |||
237 | CR_INTR_DMA5 = 0x00000080, | 242 | CR_INTR_DMA5 = 0x00000080, |
238 | CR_INTR_DMA6 = 0x00000100, | 243 | CR_INTR_DMA6 = 0x00000100, |
239 | CR_INTR_DMA7 = 0x00000200, | 244 | CR_INTR_DMA7 = 0x00000200, |
245 | CR_INTR_ALL_C = 0x0000003f, | ||
246 | CR_INTR_ALL_G = 0x000003ff, | ||
240 | CR_INTR_DMA_ALL = 0x000003f5, | 247 | CR_INTR_DMA_ALL = 0x000003f5, |
241 | CR_INTR_ALL = 0xffffffff, | 248 | CR_INTR_ALL = 0xffffffff, |
242 | }; | 249 | }; |
@@ -253,8 +260,14 @@ enum rsxx_pci_reset { | |||
253 | DMA_QUEUE_RESET = 0x00000001, | 260 | DMA_QUEUE_RESET = 0x00000001, |
254 | }; | 261 | }; |
255 | 262 | ||
263 | enum rsxx_hw_fifo_flush { | ||
264 | RSXX_FLUSH_BUSY = 0x00000002, | ||
265 | RSXX_FLUSH_TIMEOUT = 0x00000004, | ||
266 | }; | ||
267 | |||
256 | enum rsxx_pci_revision { | 268 | enum rsxx_pci_revision { |
257 | RSXX_DISCARD_SUPPORT = 2, | 269 | RSXX_DISCARD_SUPPORT = 2, |
270 | RSXX_EEH_SUPPORT = 3, | ||
258 | }; | 271 | }; |
259 | 272 | ||
260 | enum rsxx_creg_cmd { | 273 | enum rsxx_creg_cmd { |
@@ -360,11 +373,17 @@ int rsxx_dma_setup(struct rsxx_cardinfo *card); | |||
360 | void rsxx_dma_destroy(struct rsxx_cardinfo *card); | 373 | void rsxx_dma_destroy(struct rsxx_cardinfo *card); |
361 | int rsxx_dma_init(void); | 374 | int rsxx_dma_init(void); |
362 | void rsxx_dma_cleanup(void); | 375 | void rsxx_dma_cleanup(void); |
376 | void rsxx_dma_queue_reset(struct rsxx_cardinfo *card); | ||
377 | int rsxx_dma_configure(struct rsxx_cardinfo *card); | ||
363 | int rsxx_dma_queue_bio(struct rsxx_cardinfo *card, | 378 | int rsxx_dma_queue_bio(struct rsxx_cardinfo *card, |
364 | struct bio *bio, | 379 | struct bio *bio, |
365 | atomic_t *n_dmas, | 380 | atomic_t *n_dmas, |
366 | rsxx_dma_cb cb, | 381 | rsxx_dma_cb cb, |
367 | void *cb_data); | 382 | void *cb_data); |
383 | int rsxx_hw_buffers_init(struct pci_dev *dev, struct rsxx_dma_ctrl *ctrl); | ||
384 | int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card); | ||
385 | void rsxx_eeh_cancel_dmas(struct rsxx_cardinfo *card); | ||
386 | int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card); | ||
368 | 387 | ||
369 | /***** cregs.c *****/ | 388 | /***** cregs.c *****/ |
370 | int rsxx_creg_write(struct rsxx_cardinfo *card, u32 addr, | 389 | int rsxx_creg_write(struct rsxx_cardinfo *card, u32 addr, |
@@ -389,10 +408,11 @@ int rsxx_creg_setup(struct rsxx_cardinfo *card); | |||
389 | void rsxx_creg_destroy(struct rsxx_cardinfo *card); | 408 | void rsxx_creg_destroy(struct rsxx_cardinfo *card); |
390 | int rsxx_creg_init(void); | 409 | int rsxx_creg_init(void); |
391 | void rsxx_creg_cleanup(void); | 410 | void rsxx_creg_cleanup(void); |
392 | |||
393 | int rsxx_reg_access(struct rsxx_cardinfo *card, | 411 | int rsxx_reg_access(struct rsxx_cardinfo *card, |
394 | struct rsxx_reg_access __user *ucmd, | 412 | struct rsxx_reg_access __user *ucmd, |
395 | int read); | 413 | int read); |
414 | void rsxx_eeh_save_issued_creg(struct rsxx_cardinfo *card); | ||
415 | void rsxx_kick_creg_queue(struct rsxx_cardinfo *card); | ||
396 | 416 | ||
397 | 417 | ||
398 | 418 | ||
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index de1f319f7bd7..dd5b2fed97e9 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c | |||
@@ -164,7 +164,7 @@ static void make_response(struct xen_blkif *blkif, u64 id, | |||
164 | 164 | ||
165 | #define foreach_grant_safe(pos, n, rbtree, node) \ | 165 | #define foreach_grant_safe(pos, n, rbtree, node) \ |
166 | for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \ | 166 | for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \ |
167 | (n) = rb_next(&(pos)->node); \ | 167 | (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL; \ |
168 | &(pos)->node != NULL; \ | 168 | &(pos)->node != NULL; \ |
169 | (pos) = container_of(n, typeof(*(pos)), node), \ | 169 | (pos) = container_of(n, typeof(*(pos)), node), \ |
170 | (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL) | 170 | (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL) |
@@ -381,8 +381,8 @@ irqreturn_t xen_blkif_be_int(int irq, void *dev_id) | |||
381 | 381 | ||
382 | static void print_stats(struct xen_blkif *blkif) | 382 | static void print_stats(struct xen_blkif *blkif) |
383 | { | 383 | { |
384 | pr_info("xen-blkback (%s): oo %3d | rd %4d | wr %4d | f %4d" | 384 | pr_info("xen-blkback (%s): oo %3llu | rd %4llu | wr %4llu | f %4llu" |
385 | " | ds %4d\n", | 385 | " | ds %4llu\n", |
386 | current->comm, blkif->st_oo_req, | 386 | current->comm, blkif->st_oo_req, |
387 | blkif->st_rd_req, blkif->st_wr_req, | 387 | blkif->st_rd_req, blkif->st_wr_req, |
388 | blkif->st_f_req, blkif->st_ds_req); | 388 | blkif->st_f_req, blkif->st_ds_req); |
@@ -442,7 +442,7 @@ int xen_blkif_schedule(void *arg) | |||
442 | } | 442 | } |
443 | 443 | ||
444 | struct seg_buf { | 444 | struct seg_buf { |
445 | unsigned long buf; | 445 | unsigned int offset; |
446 | unsigned int nsec; | 446 | unsigned int nsec; |
447 | }; | 447 | }; |
448 | /* | 448 | /* |
@@ -621,30 +621,21 @@ static int xen_blkbk_map(struct blkif_request *req, | |||
621 | * If this is a new persistent grant | 621 | * If this is a new persistent grant |
622 | * save the handler | 622 | * save the handler |
623 | */ | 623 | */ |
624 | persistent_gnts[i]->handle = map[j].handle; | 624 | persistent_gnts[i]->handle = map[j++].handle; |
625 | persistent_gnts[i]->dev_bus_addr = | ||
626 | map[j++].dev_bus_addr; | ||
627 | } | 625 | } |
628 | pending_handle(pending_req, i) = | 626 | pending_handle(pending_req, i) = |
629 | persistent_gnts[i]->handle; | 627 | persistent_gnts[i]->handle; |
630 | 628 | ||
631 | if (ret) | 629 | if (ret) |
632 | continue; | 630 | continue; |
633 | |||
634 | seg[i].buf = persistent_gnts[i]->dev_bus_addr | | ||
635 | (req->u.rw.seg[i].first_sect << 9); | ||
636 | } else { | 631 | } else { |
637 | pending_handle(pending_req, i) = map[j].handle; | 632 | pending_handle(pending_req, i) = map[j++].handle; |
638 | bitmap_set(pending_req->unmap_seg, i, 1); | 633 | bitmap_set(pending_req->unmap_seg, i, 1); |
639 | 634 | ||
640 | if (ret) { | 635 | if (ret) |
641 | j++; | ||
642 | continue; | 636 | continue; |
643 | } | ||
644 | |||
645 | seg[i].buf = map[j++].dev_bus_addr | | ||
646 | (req->u.rw.seg[i].first_sect << 9); | ||
647 | } | 637 | } |
638 | seg[i].offset = (req->u.rw.seg[i].first_sect << 9); | ||
648 | } | 639 | } |
649 | return ret; | 640 | return ret; |
650 | } | 641 | } |
@@ -679,6 +670,16 @@ static int dispatch_discard_io(struct xen_blkif *blkif, | |||
679 | return err; | 670 | return err; |
680 | } | 671 | } |
681 | 672 | ||
673 | static int dispatch_other_io(struct xen_blkif *blkif, | ||
674 | struct blkif_request *req, | ||
675 | struct pending_req *pending_req) | ||
676 | { | ||
677 | free_req(pending_req); | ||
678 | make_response(blkif, req->u.other.id, req->operation, | ||
679 | BLKIF_RSP_EOPNOTSUPP); | ||
680 | return -EIO; | ||
681 | } | ||
682 | |||
682 | static void xen_blk_drain_io(struct xen_blkif *blkif) | 683 | static void xen_blk_drain_io(struct xen_blkif *blkif) |
683 | { | 684 | { |
684 | atomic_set(&blkif->drain, 1); | 685 | atomic_set(&blkif->drain, 1); |
@@ -800,17 +801,30 @@ __do_block_io_op(struct xen_blkif *blkif) | |||
800 | 801 | ||
801 | /* Apply all sanity checks to /private copy/ of request. */ | 802 | /* Apply all sanity checks to /private copy/ of request. */ |
802 | barrier(); | 803 | barrier(); |
803 | if (unlikely(req.operation == BLKIF_OP_DISCARD)) { | 804 | |
805 | switch (req.operation) { | ||
806 | case BLKIF_OP_READ: | ||
807 | case BLKIF_OP_WRITE: | ||
808 | case BLKIF_OP_WRITE_BARRIER: | ||
809 | case BLKIF_OP_FLUSH_DISKCACHE: | ||
810 | if (dispatch_rw_block_io(blkif, &req, pending_req)) | ||
811 | goto done; | ||
812 | break; | ||
813 | case BLKIF_OP_DISCARD: | ||
804 | free_req(pending_req); | 814 | free_req(pending_req); |
805 | if (dispatch_discard_io(blkif, &req)) | 815 | if (dispatch_discard_io(blkif, &req)) |
806 | break; | 816 | goto done; |
807 | } else if (dispatch_rw_block_io(blkif, &req, pending_req)) | ||
808 | break; | 817 | break; |
818 | default: | ||
819 | if (dispatch_other_io(blkif, &req, pending_req)) | ||
820 | goto done; | ||
821 | break; | ||
822 | } | ||
809 | 823 | ||
810 | /* Yield point for this unbounded loop. */ | 824 | /* Yield point for this unbounded loop. */ |
811 | cond_resched(); | 825 | cond_resched(); |
812 | } | 826 | } |
813 | 827 | done: | |
814 | return more_to_do; | 828 | return more_to_do; |
815 | } | 829 | } |
816 | 830 | ||
@@ -904,7 +918,8 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, | |||
904 | pr_debug(DRV_PFX "access denied: %s of [%llu,%llu] on dev=%04x\n", | 918 | pr_debug(DRV_PFX "access denied: %s of [%llu,%llu] on dev=%04x\n", |
905 | operation == READ ? "read" : "write", | 919 | operation == READ ? "read" : "write", |
906 | preq.sector_number, | 920 | preq.sector_number, |
907 | preq.sector_number + preq.nr_sects, preq.dev); | 921 | preq.sector_number + preq.nr_sects, |
922 | blkif->vbd.pdevice); | ||
908 | goto fail_response; | 923 | goto fail_response; |
909 | } | 924 | } |
910 | 925 | ||
@@ -947,7 +962,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, | |||
947 | (bio_add_page(bio, | 962 | (bio_add_page(bio, |
948 | pages[i], | 963 | pages[i], |
949 | seg[i].nsec << 9, | 964 | seg[i].nsec << 9, |
950 | seg[i].buf & ~PAGE_MASK) == 0)) { | 965 | seg[i].offset) == 0)) { |
951 | 966 | ||
952 | bio = bio_alloc(GFP_KERNEL, nseg-i); | 967 | bio = bio_alloc(GFP_KERNEL, nseg-i); |
953 | if (unlikely(bio == NULL)) | 968 | if (unlikely(bio == NULL)) |
@@ -977,13 +992,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, | |||
977 | bio->bi_end_io = end_block_io_op; | 992 | bio->bi_end_io = end_block_io_op; |
978 | } | 993 | } |
979 | 994 | ||
980 | /* | ||
981 | * We set it one so that the last submit_bio does not have to call | ||
982 | * atomic_inc. | ||
983 | */ | ||
984 | atomic_set(&pending_req->pendcnt, nbio); | 995 | atomic_set(&pending_req->pendcnt, nbio); |
985 | |||
986 | /* Get a reference count for the disk queue and start sending I/O */ | ||
987 | blk_start_plug(&plug); | 996 | blk_start_plug(&plug); |
988 | 997 | ||
989 | for (i = 0; i < nbio; i++) | 998 | for (i = 0; i < nbio; i++) |
@@ -1011,6 +1020,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif, | |||
1011 | fail_put_bio: | 1020 | fail_put_bio: |
1012 | for (i = 0; i < nbio; i++) | 1021 | for (i = 0; i < nbio; i++) |
1013 | bio_put(biolist[i]); | 1022 | bio_put(biolist[i]); |
1023 | atomic_set(&pending_req->pendcnt, 1); | ||
1014 | __end_block_io_op(pending_req, -EINVAL); | 1024 | __end_block_io_op(pending_req, -EINVAL); |
1015 | msleep(1); /* back off a bit */ | 1025 | msleep(1); /* back off a bit */ |
1016 | return -EIO; | 1026 | return -EIO; |
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h index 6072390c7f57..60103e2517ba 100644 --- a/drivers/block/xen-blkback/common.h +++ b/drivers/block/xen-blkback/common.h | |||
@@ -77,11 +77,18 @@ struct blkif_x86_32_request_discard { | |||
77 | uint64_t nr_sectors; | 77 | uint64_t nr_sectors; |
78 | } __attribute__((__packed__)); | 78 | } __attribute__((__packed__)); |
79 | 79 | ||
80 | struct blkif_x86_32_request_other { | ||
81 | uint8_t _pad1; | ||
82 | blkif_vdev_t _pad2; | ||
83 | uint64_t id; /* private guest value, echoed in resp */ | ||
84 | } __attribute__((__packed__)); | ||
85 | |||
80 | struct blkif_x86_32_request { | 86 | struct blkif_x86_32_request { |
81 | uint8_t operation; /* BLKIF_OP_??? */ | 87 | uint8_t operation; /* BLKIF_OP_??? */ |
82 | union { | 88 | union { |
83 | struct blkif_x86_32_request_rw rw; | 89 | struct blkif_x86_32_request_rw rw; |
84 | struct blkif_x86_32_request_discard discard; | 90 | struct blkif_x86_32_request_discard discard; |
91 | struct blkif_x86_32_request_other other; | ||
85 | } u; | 92 | } u; |
86 | } __attribute__((__packed__)); | 93 | } __attribute__((__packed__)); |
87 | 94 | ||
@@ -113,11 +120,19 @@ struct blkif_x86_64_request_discard { | |||
113 | uint64_t nr_sectors; | 120 | uint64_t nr_sectors; |
114 | } __attribute__((__packed__)); | 121 | } __attribute__((__packed__)); |
115 | 122 | ||
123 | struct blkif_x86_64_request_other { | ||
124 | uint8_t _pad1; | ||
125 | blkif_vdev_t _pad2; | ||
126 | uint32_t _pad3; /* offsetof(blkif_..,u.discard.id)==8 */ | ||
127 | uint64_t id; /* private guest value, echoed in resp */ | ||
128 | } __attribute__((__packed__)); | ||
129 | |||
116 | struct blkif_x86_64_request { | 130 | struct blkif_x86_64_request { |
117 | uint8_t operation; /* BLKIF_OP_??? */ | 131 | uint8_t operation; /* BLKIF_OP_??? */ |
118 | union { | 132 | union { |
119 | struct blkif_x86_64_request_rw rw; | 133 | struct blkif_x86_64_request_rw rw; |
120 | struct blkif_x86_64_request_discard discard; | 134 | struct blkif_x86_64_request_discard discard; |
135 | struct blkif_x86_64_request_other other; | ||
121 | } u; | 136 | } u; |
122 | } __attribute__((__packed__)); | 137 | } __attribute__((__packed__)); |
123 | 138 | ||
@@ -172,7 +187,6 @@ struct persistent_gnt { | |||
172 | struct page *page; | 187 | struct page *page; |
173 | grant_ref_t gnt; | 188 | grant_ref_t gnt; |
174 | grant_handle_t handle; | 189 | grant_handle_t handle; |
175 | uint64_t dev_bus_addr; | ||
176 | struct rb_node node; | 190 | struct rb_node node; |
177 | }; | 191 | }; |
178 | 192 | ||
@@ -208,13 +222,13 @@ struct xen_blkif { | |||
208 | 222 | ||
209 | /* statistics */ | 223 | /* statistics */ |
210 | unsigned long st_print; | 224 | unsigned long st_print; |
211 | int st_rd_req; | 225 | unsigned long long st_rd_req; |
212 | int st_wr_req; | 226 | unsigned long long st_wr_req; |
213 | int st_oo_req; | 227 | unsigned long long st_oo_req; |
214 | int st_f_req; | 228 | unsigned long long st_f_req; |
215 | int st_ds_req; | 229 | unsigned long long st_ds_req; |
216 | int st_rd_sect; | 230 | unsigned long long st_rd_sect; |
217 | int st_wr_sect; | 231 | unsigned long long st_wr_sect; |
218 | 232 | ||
219 | wait_queue_head_t waiting_to_free; | 233 | wait_queue_head_t waiting_to_free; |
220 | }; | 234 | }; |
@@ -278,6 +292,11 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst, | |||
278 | dst->u.discard.nr_sectors = src->u.discard.nr_sectors; | 292 | dst->u.discard.nr_sectors = src->u.discard.nr_sectors; |
279 | break; | 293 | break; |
280 | default: | 294 | default: |
295 | /* | ||
296 | * Don't know how to translate this op. Only get the | ||
297 | * ID so failure can be reported to the frontend. | ||
298 | */ | ||
299 | dst->u.other.id = src->u.other.id; | ||
281 | break; | 300 | break; |
282 | } | 301 | } |
283 | } | 302 | } |
@@ -309,6 +328,11 @@ static inline void blkif_get_x86_64_req(struct blkif_request *dst, | |||
309 | dst->u.discard.nr_sectors = src->u.discard.nr_sectors; | 328 | dst->u.discard.nr_sectors = src->u.discard.nr_sectors; |
310 | break; | 329 | break; |
311 | default: | 330 | default: |
331 | /* | ||
332 | * Don't know how to translate this op. Only get the | ||
333 | * ID so failure can be reported to the frontend. | ||
334 | */ | ||
335 | dst->u.other.id = src->u.other.id; | ||
312 | break; | 336 | break; |
313 | } | 337 | } |
314 | } | 338 | } |
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index 5e237f630c47..8bfd1bcf95ec 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c | |||
@@ -230,13 +230,13 @@ int __init xen_blkif_interface_init(void) | |||
230 | } \ | 230 | } \ |
231 | static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL) | 231 | static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL) |
232 | 232 | ||
233 | VBD_SHOW(oo_req, "%d\n", be->blkif->st_oo_req); | 233 | VBD_SHOW(oo_req, "%llu\n", be->blkif->st_oo_req); |
234 | VBD_SHOW(rd_req, "%d\n", be->blkif->st_rd_req); | 234 | VBD_SHOW(rd_req, "%llu\n", be->blkif->st_rd_req); |
235 | VBD_SHOW(wr_req, "%d\n", be->blkif->st_wr_req); | 235 | VBD_SHOW(wr_req, "%llu\n", be->blkif->st_wr_req); |
236 | VBD_SHOW(f_req, "%d\n", be->blkif->st_f_req); | 236 | VBD_SHOW(f_req, "%llu\n", be->blkif->st_f_req); |
237 | VBD_SHOW(ds_req, "%d\n", be->blkif->st_ds_req); | 237 | VBD_SHOW(ds_req, "%llu\n", be->blkif->st_ds_req); |
238 | VBD_SHOW(rd_sect, "%d\n", be->blkif->st_rd_sect); | 238 | VBD_SHOW(rd_sect, "%llu\n", be->blkif->st_rd_sect); |
239 | VBD_SHOW(wr_sect, "%d\n", be->blkif->st_wr_sect); | 239 | VBD_SHOW(wr_sect, "%llu\n", be->blkif->st_wr_sect); |
240 | 240 | ||
241 | static struct attribute *xen_vbdstat_attrs[] = { | 241 | static struct attribute *xen_vbdstat_attrs[] = { |
242 | &dev_attr_oo_req.attr, | 242 | &dev_attr_oo_req.attr, |
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index c3dae2e0f290..a894f88762d8 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c | |||
@@ -44,7 +44,7 @@ | |||
44 | #include <linux/mutex.h> | 44 | #include <linux/mutex.h> |
45 | #include <linux/scatterlist.h> | 45 | #include <linux/scatterlist.h> |
46 | #include <linux/bitmap.h> | 46 | #include <linux/bitmap.h> |
47 | #include <linux/llist.h> | 47 | #include <linux/list.h> |
48 | 48 | ||
49 | #include <xen/xen.h> | 49 | #include <xen/xen.h> |
50 | #include <xen/xenbus.h> | 50 | #include <xen/xenbus.h> |
@@ -68,13 +68,12 @@ enum blkif_state { | |||
68 | struct grant { | 68 | struct grant { |
69 | grant_ref_t gref; | 69 | grant_ref_t gref; |
70 | unsigned long pfn; | 70 | unsigned long pfn; |
71 | struct llist_node node; | 71 | struct list_head node; |
72 | }; | 72 | }; |
73 | 73 | ||
74 | struct blk_shadow { | 74 | struct blk_shadow { |
75 | struct blkif_request req; | 75 | struct blkif_request req; |
76 | struct request *request; | 76 | struct request *request; |
77 | unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | ||
78 | struct grant *grants_used[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | 77 | struct grant *grants_used[BLKIF_MAX_SEGMENTS_PER_REQUEST]; |
79 | }; | 78 | }; |
80 | 79 | ||
@@ -105,7 +104,7 @@ struct blkfront_info | |||
105 | struct work_struct work; | 104 | struct work_struct work; |
106 | struct gnttab_free_callback callback; | 105 | struct gnttab_free_callback callback; |
107 | struct blk_shadow shadow[BLK_RING_SIZE]; | 106 | struct blk_shadow shadow[BLK_RING_SIZE]; |
108 | struct llist_head persistent_gnts; | 107 | struct list_head persistent_gnts; |
109 | unsigned int persistent_gnts_c; | 108 | unsigned int persistent_gnts_c; |
110 | unsigned long shadow_free; | 109 | unsigned long shadow_free; |
111 | unsigned int feature_flush; | 110 | unsigned int feature_flush; |
@@ -165,6 +164,69 @@ static int add_id_to_freelist(struct blkfront_info *info, | |||
165 | return 0; | 164 | return 0; |
166 | } | 165 | } |
167 | 166 | ||
167 | static int fill_grant_buffer(struct blkfront_info *info, int num) | ||
168 | { | ||
169 | struct page *granted_page; | ||
170 | struct grant *gnt_list_entry, *n; | ||
171 | int i = 0; | ||
172 | |||
173 | while(i < num) { | ||
174 | gnt_list_entry = kzalloc(sizeof(struct grant), GFP_NOIO); | ||
175 | if (!gnt_list_entry) | ||
176 | goto out_of_memory; | ||
177 | |||
178 | granted_page = alloc_page(GFP_NOIO); | ||
179 | if (!granted_page) { | ||
180 | kfree(gnt_list_entry); | ||
181 | goto out_of_memory; | ||
182 | } | ||
183 | |||
184 | gnt_list_entry->pfn = page_to_pfn(granted_page); | ||
185 | gnt_list_entry->gref = GRANT_INVALID_REF; | ||
186 | list_add(&gnt_list_entry->node, &info->persistent_gnts); | ||
187 | i++; | ||
188 | } | ||
189 | |||
190 | return 0; | ||
191 | |||
192 | out_of_memory: | ||
193 | list_for_each_entry_safe(gnt_list_entry, n, | ||
194 | &info->persistent_gnts, node) { | ||
195 | list_del(&gnt_list_entry->node); | ||
196 | __free_page(pfn_to_page(gnt_list_entry->pfn)); | ||
197 | kfree(gnt_list_entry); | ||
198 | i--; | ||
199 | } | ||
200 | BUG_ON(i != 0); | ||
201 | return -ENOMEM; | ||
202 | } | ||
203 | |||
204 | static struct grant *get_grant(grant_ref_t *gref_head, | ||
205 | struct blkfront_info *info) | ||
206 | { | ||
207 | struct grant *gnt_list_entry; | ||
208 | unsigned long buffer_mfn; | ||
209 | |||
210 | BUG_ON(list_empty(&info->persistent_gnts)); | ||
211 | gnt_list_entry = list_first_entry(&info->persistent_gnts, struct grant, | ||
212 | node); | ||
213 | list_del(&gnt_list_entry->node); | ||
214 | |||
215 | if (gnt_list_entry->gref != GRANT_INVALID_REF) { | ||
216 | info->persistent_gnts_c--; | ||
217 | return gnt_list_entry; | ||
218 | } | ||
219 | |||
220 | /* Assign a gref to this page */ | ||
221 | gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head); | ||
222 | BUG_ON(gnt_list_entry->gref == -ENOSPC); | ||
223 | buffer_mfn = pfn_to_mfn(gnt_list_entry->pfn); | ||
224 | gnttab_grant_foreign_access_ref(gnt_list_entry->gref, | ||
225 | info->xbdev->otherend_id, | ||
226 | buffer_mfn, 0); | ||
227 | return gnt_list_entry; | ||
228 | } | ||
229 | |||
168 | static const char *op_name(int op) | 230 | static const char *op_name(int op) |
169 | { | 231 | { |
170 | static const char *const names[] = { | 232 | static const char *const names[] = { |
@@ -293,7 +355,6 @@ static int blkif_ioctl(struct block_device *bdev, fmode_t mode, | |||
293 | static int blkif_queue_request(struct request *req) | 355 | static int blkif_queue_request(struct request *req) |
294 | { | 356 | { |
295 | struct blkfront_info *info = req->rq_disk->private_data; | 357 | struct blkfront_info *info = req->rq_disk->private_data; |
296 | unsigned long buffer_mfn; | ||
297 | struct blkif_request *ring_req; | 358 | struct blkif_request *ring_req; |
298 | unsigned long id; | 359 | unsigned long id; |
299 | unsigned int fsect, lsect; | 360 | unsigned int fsect, lsect; |
@@ -306,7 +367,6 @@ static int blkif_queue_request(struct request *req) | |||
306 | */ | 367 | */ |
307 | bool new_persistent_gnts; | 368 | bool new_persistent_gnts; |
308 | grant_ref_t gref_head; | 369 | grant_ref_t gref_head; |
309 | struct page *granted_page; | ||
310 | struct grant *gnt_list_entry = NULL; | 370 | struct grant *gnt_list_entry = NULL; |
311 | struct scatterlist *sg; | 371 | struct scatterlist *sg; |
312 | 372 | ||
@@ -370,41 +430,8 @@ static int blkif_queue_request(struct request *req) | |||
370 | fsect = sg->offset >> 9; | 430 | fsect = sg->offset >> 9; |
371 | lsect = fsect + (sg->length >> 9) - 1; | 431 | lsect = fsect + (sg->length >> 9) - 1; |
372 | 432 | ||
373 | if (info->persistent_gnts_c) { | 433 | gnt_list_entry = get_grant(&gref_head, info); |
374 | BUG_ON(llist_empty(&info->persistent_gnts)); | 434 | ref = gnt_list_entry->gref; |
375 | gnt_list_entry = llist_entry( | ||
376 | llist_del_first(&info->persistent_gnts), | ||
377 | struct grant, node); | ||
378 | |||
379 | ref = gnt_list_entry->gref; | ||
380 | buffer_mfn = pfn_to_mfn(gnt_list_entry->pfn); | ||
381 | info->persistent_gnts_c--; | ||
382 | } else { | ||
383 | ref = gnttab_claim_grant_reference(&gref_head); | ||
384 | BUG_ON(ref == -ENOSPC); | ||
385 | |||
386 | gnt_list_entry = | ||
387 | kmalloc(sizeof(struct grant), | ||
388 | GFP_ATOMIC); | ||
389 | if (!gnt_list_entry) | ||
390 | return -ENOMEM; | ||
391 | |||
392 | granted_page = alloc_page(GFP_ATOMIC); | ||
393 | if (!granted_page) { | ||
394 | kfree(gnt_list_entry); | ||
395 | return -ENOMEM; | ||
396 | } | ||
397 | |||
398 | gnt_list_entry->pfn = | ||
399 | page_to_pfn(granted_page); | ||
400 | gnt_list_entry->gref = ref; | ||
401 | |||
402 | buffer_mfn = pfn_to_mfn(page_to_pfn( | ||
403 | granted_page)); | ||
404 | gnttab_grant_foreign_access_ref(ref, | ||
405 | info->xbdev->otherend_id, | ||
406 | buffer_mfn, 0); | ||
407 | } | ||
408 | 435 | ||
409 | info->shadow[id].grants_used[i] = gnt_list_entry; | 436 | info->shadow[id].grants_used[i] = gnt_list_entry; |
410 | 437 | ||
@@ -435,7 +462,6 @@ static int blkif_queue_request(struct request *req) | |||
435 | kunmap_atomic(shared_data); | 462 | kunmap_atomic(shared_data); |
436 | } | 463 | } |
437 | 464 | ||
438 | info->shadow[id].frame[i] = mfn_to_pfn(buffer_mfn); | ||
439 | ring_req->u.rw.seg[i] = | 465 | ring_req->u.rw.seg[i] = |
440 | (struct blkif_request_segment) { | 466 | (struct blkif_request_segment) { |
441 | .gref = ref, | 467 | .gref = ref, |
@@ -790,9 +816,8 @@ static void blkif_restart_queue(struct work_struct *work) | |||
790 | 816 | ||
791 | static void blkif_free(struct blkfront_info *info, int suspend) | 817 | static void blkif_free(struct blkfront_info *info, int suspend) |
792 | { | 818 | { |
793 | struct llist_node *all_gnts; | 819 | struct grant *persistent_gnt; |
794 | struct grant *persistent_gnt, *tmp; | 820 | struct grant *n; |
795 | struct llist_node *n; | ||
796 | 821 | ||
797 | /* Prevent new requests being issued until we fix things up. */ | 822 | /* Prevent new requests being issued until we fix things up. */ |
798 | spin_lock_irq(&info->io_lock); | 823 | spin_lock_irq(&info->io_lock); |
@@ -803,22 +828,20 @@ static void blkif_free(struct blkfront_info *info, int suspend) | |||
803 | blk_stop_queue(info->rq); | 828 | blk_stop_queue(info->rq); |
804 | 829 | ||
805 | /* Remove all persistent grants */ | 830 | /* Remove all persistent grants */ |
806 | if (info->persistent_gnts_c) { | 831 | if (!list_empty(&info->persistent_gnts)) { |
807 | all_gnts = llist_del_all(&info->persistent_gnts); | 832 | list_for_each_entry_safe(persistent_gnt, n, |
808 | persistent_gnt = llist_entry(all_gnts, typeof(*(persistent_gnt)), node); | 833 | &info->persistent_gnts, node) { |
809 | while (persistent_gnt) { | 834 | list_del(&persistent_gnt->node); |
810 | gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL); | 835 | if (persistent_gnt->gref != GRANT_INVALID_REF) { |
836 | gnttab_end_foreign_access(persistent_gnt->gref, | ||
837 | 0, 0UL); | ||
838 | info->persistent_gnts_c--; | ||
839 | } | ||
811 | __free_page(pfn_to_page(persistent_gnt->pfn)); | 840 | __free_page(pfn_to_page(persistent_gnt->pfn)); |
812 | tmp = persistent_gnt; | 841 | kfree(persistent_gnt); |
813 | n = persistent_gnt->node.next; | ||
814 | if (n) | ||
815 | persistent_gnt = llist_entry(n, typeof(*(persistent_gnt)), node); | ||
816 | else | ||
817 | persistent_gnt = NULL; | ||
818 | kfree(tmp); | ||
819 | } | 842 | } |
820 | info->persistent_gnts_c = 0; | ||
821 | } | 843 | } |
844 | BUG_ON(info->persistent_gnts_c != 0); | ||
822 | 845 | ||
823 | /* No more gnttab callback work. */ | 846 | /* No more gnttab callback work. */ |
824 | gnttab_cancel_free_callback(&info->callback); | 847 | gnttab_cancel_free_callback(&info->callback); |
@@ -875,7 +898,7 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info, | |||
875 | } | 898 | } |
876 | /* Add the persistent grant into the list of free grants */ | 899 | /* Add the persistent grant into the list of free grants */ |
877 | for (i = 0; i < s->req.u.rw.nr_segments; i++) { | 900 | for (i = 0; i < s->req.u.rw.nr_segments; i++) { |
878 | llist_add(&s->grants_used[i]->node, &info->persistent_gnts); | 901 | list_add(&s->grants_used[i]->node, &info->persistent_gnts); |
879 | info->persistent_gnts_c++; | 902 | info->persistent_gnts_c++; |
880 | } | 903 | } |
881 | } | 904 | } |
@@ -1013,6 +1036,12 @@ static int setup_blkring(struct xenbus_device *dev, | |||
1013 | 1036 | ||
1014 | sg_init_table(info->sg, BLKIF_MAX_SEGMENTS_PER_REQUEST); | 1037 | sg_init_table(info->sg, BLKIF_MAX_SEGMENTS_PER_REQUEST); |
1015 | 1038 | ||
1039 | /* Allocate memory for grants */ | ||
1040 | err = fill_grant_buffer(info, BLK_RING_SIZE * | ||
1041 | BLKIF_MAX_SEGMENTS_PER_REQUEST); | ||
1042 | if (err) | ||
1043 | goto fail; | ||
1044 | |||
1016 | err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring)); | 1045 | err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring)); |
1017 | if (err < 0) { | 1046 | if (err < 0) { |
1018 | free_page((unsigned long)sring); | 1047 | free_page((unsigned long)sring); |
@@ -1171,7 +1200,7 @@ static int blkfront_probe(struct xenbus_device *dev, | |||
1171 | spin_lock_init(&info->io_lock); | 1200 | spin_lock_init(&info->io_lock); |
1172 | info->xbdev = dev; | 1201 | info->xbdev = dev; |
1173 | info->vdevice = vdevice; | 1202 | info->vdevice = vdevice; |
1174 | init_llist_head(&info->persistent_gnts); | 1203 | INIT_LIST_HEAD(&info->persistent_gnts); |
1175 | info->persistent_gnts_c = 0; | 1204 | info->persistent_gnts_c = 0; |
1176 | info->connected = BLKIF_STATE_DISCONNECTED; | 1205 | info->connected = BLKIF_STATE_DISCONNECTED; |
1177 | INIT_WORK(&info->work, blkif_restart_queue); | 1206 | INIT_WORK(&info->work, blkif_restart_queue); |
@@ -1203,11 +1232,10 @@ static int blkif_recover(struct blkfront_info *info) | |||
1203 | int j; | 1232 | int j; |
1204 | 1233 | ||
1205 | /* Stage 1: Make a safe copy of the shadow state. */ | 1234 | /* Stage 1: Make a safe copy of the shadow state. */ |
1206 | copy = kmalloc(sizeof(info->shadow), | 1235 | copy = kmemdup(info->shadow, sizeof(info->shadow), |
1207 | GFP_NOIO | __GFP_REPEAT | __GFP_HIGH); | 1236 | GFP_NOIO | __GFP_REPEAT | __GFP_HIGH); |
1208 | if (!copy) | 1237 | if (!copy) |
1209 | return -ENOMEM; | 1238 | return -ENOMEM; |
1210 | memcpy(copy, info->shadow, sizeof(info->shadow)); | ||
1211 | 1239 | ||
1212 | /* Stage 2: Set up free list. */ | 1240 | /* Stage 2: Set up free list. */ |
1213 | memset(&info->shadow, 0, sizeof(info->shadow)); | 1241 | memset(&info->shadow, 0, sizeof(info->shadow)); |
@@ -1236,7 +1264,7 @@ static int blkif_recover(struct blkfront_info *info) | |||
1236 | gnttab_grant_foreign_access_ref( | 1264 | gnttab_grant_foreign_access_ref( |
1237 | req->u.rw.seg[j].gref, | 1265 | req->u.rw.seg[j].gref, |
1238 | info->xbdev->otherend_id, | 1266 | info->xbdev->otherend_id, |
1239 | pfn_to_mfn(info->shadow[req->u.rw.id].frame[j]), | 1267 | pfn_to_mfn(copy[i].grants_used[j]->pfn), |
1240 | 0); | 1268 | 0); |
1241 | } | 1269 | } |
1242 | info->shadow[req->u.rw.id].req = *req; | 1270 | info->shadow[req->u.rw.id].req = *req; |
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c index a8a41e07a221..6aab00ef4379 100644 --- a/drivers/bluetooth/ath3k.c +++ b/drivers/bluetooth/ath3k.c | |||
@@ -73,9 +73,13 @@ static struct usb_device_id ath3k_table[] = { | |||
73 | { USB_DEVICE(0x03F0, 0x311D) }, | 73 | { USB_DEVICE(0x03F0, 0x311D) }, |
74 | 74 | ||
75 | /* Atheros AR3012 with sflash firmware*/ | 75 | /* Atheros AR3012 with sflash firmware*/ |
76 | { USB_DEVICE(0x0CF3, 0x0036) }, | ||
76 | { USB_DEVICE(0x0CF3, 0x3004) }, | 77 | { USB_DEVICE(0x0CF3, 0x3004) }, |
78 | { USB_DEVICE(0x0CF3, 0x3008) }, | ||
77 | { USB_DEVICE(0x0CF3, 0x311D) }, | 79 | { USB_DEVICE(0x0CF3, 0x311D) }, |
80 | { USB_DEVICE(0x0CF3, 0x817a) }, | ||
78 | { USB_DEVICE(0x13d3, 0x3375) }, | 81 | { USB_DEVICE(0x13d3, 0x3375) }, |
82 | { USB_DEVICE(0x04CA, 0x3004) }, | ||
79 | { USB_DEVICE(0x04CA, 0x3005) }, | 83 | { USB_DEVICE(0x04CA, 0x3005) }, |
80 | { USB_DEVICE(0x04CA, 0x3006) }, | 84 | { USB_DEVICE(0x04CA, 0x3006) }, |
81 | { USB_DEVICE(0x04CA, 0x3008) }, | 85 | { USB_DEVICE(0x04CA, 0x3008) }, |
@@ -105,9 +109,13 @@ MODULE_DEVICE_TABLE(usb, ath3k_table); | |||
105 | static struct usb_device_id ath3k_blist_tbl[] = { | 109 | static struct usb_device_id ath3k_blist_tbl[] = { |
106 | 110 | ||
107 | /* Atheros AR3012 with sflash firmware*/ | 111 | /* Atheros AR3012 with sflash firmware*/ |
112 | { USB_DEVICE(0x0CF3, 0x0036), .driver_info = BTUSB_ATH3012 }, | ||
108 | { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 }, | 113 | { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 }, |
114 | { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 }, | ||
109 | { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 }, | 115 | { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 }, |
116 | { USB_DEVICE(0x0CF3, 0x817a), .driver_info = BTUSB_ATH3012 }, | ||
110 | { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 }, | 117 | { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 }, |
118 | { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 }, | ||
111 | { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 }, | 119 | { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 }, |
112 | { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 }, | 120 | { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 }, |
113 | { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 }, | 121 | { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 }, |
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 7e351e345476..2cc5f774a29c 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c | |||
@@ -131,9 +131,13 @@ static struct usb_device_id blacklist_table[] = { | |||
131 | { USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE }, | 131 | { USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE }, |
132 | 132 | ||
133 | /* Atheros 3012 with sflash firmware */ | 133 | /* Atheros 3012 with sflash firmware */ |
134 | { USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 }, | ||
134 | { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 }, | 135 | { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 }, |
136 | { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 }, | ||
135 | { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 }, | 137 | { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 }, |
138 | { USB_DEVICE(0x0cf3, 0x817a), .driver_info = BTUSB_ATH3012 }, | ||
136 | { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 }, | 139 | { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 }, |
140 | { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 }, | ||
137 | { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 }, | 141 | { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 }, |
138 | { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 }, | 142 | { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 }, |
139 | { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 }, | 143 | { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 }, |
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c index 69ae5972713c..a0f7724852eb 100644 --- a/drivers/char/hw_random/core.c +++ b/drivers/char/hw_random/core.c | |||
@@ -380,6 +380,15 @@ void hwrng_unregister(struct hwrng *rng) | |||
380 | } | 380 | } |
381 | EXPORT_SYMBOL_GPL(hwrng_unregister); | 381 | EXPORT_SYMBOL_GPL(hwrng_unregister); |
382 | 382 | ||
383 | static void __exit hwrng_exit(void) | ||
384 | { | ||
385 | mutex_lock(&rng_mutex); | ||
386 | BUG_ON(current_rng); | ||
387 | kfree(rng_buffer); | ||
388 | mutex_unlock(&rng_mutex); | ||
389 | } | ||
390 | |||
391 | module_exit(hwrng_exit); | ||
383 | 392 | ||
384 | MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver"); | 393 | MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver"); |
385 | MODULE_LICENSE("GPL"); | 394 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index e905d5f53051..ce5f3fc25d6d 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c | |||
@@ -149,7 +149,8 @@ struct ports_device { | |||
149 | spinlock_t ports_lock; | 149 | spinlock_t ports_lock; |
150 | 150 | ||
151 | /* To protect the vq operations for the control channel */ | 151 | /* To protect the vq operations for the control channel */ |
152 | spinlock_t cvq_lock; | 152 | spinlock_t c_ivq_lock; |
153 | spinlock_t c_ovq_lock; | ||
153 | 154 | ||
154 | /* The current config space is stored here */ | 155 | /* The current config space is stored here */ |
155 | struct virtio_console_config config; | 156 | struct virtio_console_config config; |
@@ -569,11 +570,14 @@ static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id, | |||
569 | vq = portdev->c_ovq; | 570 | vq = portdev->c_ovq; |
570 | 571 | ||
571 | sg_init_one(sg, &cpkt, sizeof(cpkt)); | 572 | sg_init_one(sg, &cpkt, sizeof(cpkt)); |
573 | |||
574 | spin_lock(&portdev->c_ovq_lock); | ||
572 | if (virtqueue_add_buf(vq, sg, 1, 0, &cpkt, GFP_ATOMIC) == 0) { | 575 | if (virtqueue_add_buf(vq, sg, 1, 0, &cpkt, GFP_ATOMIC) == 0) { |
573 | virtqueue_kick(vq); | 576 | virtqueue_kick(vq); |
574 | while (!virtqueue_get_buf(vq, &len)) | 577 | while (!virtqueue_get_buf(vq, &len)) |
575 | cpu_relax(); | 578 | cpu_relax(); |
576 | } | 579 | } |
580 | spin_unlock(&portdev->c_ovq_lock); | ||
577 | return 0; | 581 | return 0; |
578 | } | 582 | } |
579 | 583 | ||
@@ -1436,7 +1440,7 @@ static int add_port(struct ports_device *portdev, u32 id) | |||
1436 | * rproc_serial does not want the console port, only | 1440 | * rproc_serial does not want the console port, only |
1437 | * the generic port implementation. | 1441 | * the generic port implementation. |
1438 | */ | 1442 | */ |
1439 | port->host_connected = port->guest_connected = true; | 1443 | port->host_connected = true; |
1440 | else if (!use_multiport(port->portdev)) { | 1444 | else if (!use_multiport(port->portdev)) { |
1441 | /* | 1445 | /* |
1442 | * If we're not using multiport support, | 1446 | * If we're not using multiport support, |
@@ -1709,23 +1713,23 @@ static void control_work_handler(struct work_struct *work) | |||
1709 | portdev = container_of(work, struct ports_device, control_work); | 1713 | portdev = container_of(work, struct ports_device, control_work); |
1710 | vq = portdev->c_ivq; | 1714 | vq = portdev->c_ivq; |
1711 | 1715 | ||
1712 | spin_lock(&portdev->cvq_lock); | 1716 | spin_lock(&portdev->c_ivq_lock); |
1713 | while ((buf = virtqueue_get_buf(vq, &len))) { | 1717 | while ((buf = virtqueue_get_buf(vq, &len))) { |
1714 | spin_unlock(&portdev->cvq_lock); | 1718 | spin_unlock(&portdev->c_ivq_lock); |
1715 | 1719 | ||
1716 | buf->len = len; | 1720 | buf->len = len; |
1717 | buf->offset = 0; | 1721 | buf->offset = 0; |
1718 | 1722 | ||
1719 | handle_control_message(portdev, buf); | 1723 | handle_control_message(portdev, buf); |
1720 | 1724 | ||
1721 | spin_lock(&portdev->cvq_lock); | 1725 | spin_lock(&portdev->c_ivq_lock); |
1722 | if (add_inbuf(portdev->c_ivq, buf) < 0) { | 1726 | if (add_inbuf(portdev->c_ivq, buf) < 0) { |
1723 | dev_warn(&portdev->vdev->dev, | 1727 | dev_warn(&portdev->vdev->dev, |
1724 | "Error adding buffer to queue\n"); | 1728 | "Error adding buffer to queue\n"); |
1725 | free_buf(buf, false); | 1729 | free_buf(buf, false); |
1726 | } | 1730 | } |
1727 | } | 1731 | } |
1728 | spin_unlock(&portdev->cvq_lock); | 1732 | spin_unlock(&portdev->c_ivq_lock); |
1729 | } | 1733 | } |
1730 | 1734 | ||
1731 | static void out_intr(struct virtqueue *vq) | 1735 | static void out_intr(struct virtqueue *vq) |
@@ -1752,13 +1756,23 @@ static void in_intr(struct virtqueue *vq) | |||
1752 | port->inbuf = get_inbuf(port); | 1756 | port->inbuf = get_inbuf(port); |
1753 | 1757 | ||
1754 | /* | 1758 | /* |
1755 | * Don't queue up data when port is closed. This condition | 1759 | * Normally the port should not accept data when the port is |
1760 | * closed. For generic serial ports, the host won't (shouldn't) | ||
1761 | * send data till the guest is connected. But this condition | ||
1756 | * can be reached when a console port is not yet connected (no | 1762 | * can be reached when a console port is not yet connected (no |
1757 | * tty is spawned) and the host sends out data to console | 1763 | * tty is spawned) and the other side sends out data over the |
1758 | * ports. For generic serial ports, the host won't | 1764 | * vring, or when a remote devices start sending data before |
1759 | * (shouldn't) send data till the guest is connected. | 1765 | * the ports are opened. |
1766 | * | ||
1767 | * A generic serial port will discard data if not connected, | ||
1768 | * while console ports and rproc-serial ports accepts data at | ||
1769 | * any time. rproc-serial is initiated with guest_connected to | ||
1770 | * false because port_fops_open expects this. Console ports are | ||
1771 | * hooked up with an HVC console and is initialized with | ||
1772 | * guest_connected to true. | ||
1760 | */ | 1773 | */ |
1761 | if (!port->guest_connected) | 1774 | |
1775 | if (!port->guest_connected && !is_rproc_serial(port->portdev->vdev)) | ||
1762 | discard_port_data(port); | 1776 | discard_port_data(port); |
1763 | 1777 | ||
1764 | spin_unlock_irqrestore(&port->inbuf_lock, flags); | 1778 | spin_unlock_irqrestore(&port->inbuf_lock, flags); |
@@ -1986,10 +2000,12 @@ static int virtcons_probe(struct virtio_device *vdev) | |||
1986 | if (multiport) { | 2000 | if (multiport) { |
1987 | unsigned int nr_added_bufs; | 2001 | unsigned int nr_added_bufs; |
1988 | 2002 | ||
1989 | spin_lock_init(&portdev->cvq_lock); | 2003 | spin_lock_init(&portdev->c_ivq_lock); |
2004 | spin_lock_init(&portdev->c_ovq_lock); | ||
1990 | INIT_WORK(&portdev->control_work, &control_work_handler); | 2005 | INIT_WORK(&portdev->control_work, &control_work_handler); |
1991 | 2006 | ||
1992 | nr_added_bufs = fill_queue(portdev->c_ivq, &portdev->cvq_lock); | 2007 | nr_added_bufs = fill_queue(portdev->c_ivq, |
2008 | &portdev->c_ivq_lock); | ||
1993 | if (!nr_added_bufs) { | 2009 | if (!nr_added_bufs) { |
1994 | dev_err(&vdev->dev, | 2010 | dev_err(&vdev->dev, |
1995 | "Error allocating buffers for control queue\n"); | 2011 | "Error allocating buffers for control queue\n"); |
@@ -2140,7 +2156,7 @@ static int virtcons_restore(struct virtio_device *vdev) | |||
2140 | return ret; | 2156 | return ret; |
2141 | 2157 | ||
2142 | if (use_multiport(portdev)) | 2158 | if (use_multiport(portdev)) |
2143 | fill_queue(portdev->c_ivq, &portdev->cvq_lock); | 2159 | fill_queue(portdev->c_ivq, &portdev->c_ivq_lock); |
2144 | 2160 | ||
2145 | list_for_each_entry(port, &portdev->ports, list) { | 2161 | list_for_each_entry(port, &portdev->ports, list) { |
2146 | port->in_vq = portdev->in_vqs[port->id]; | 2162 | port->in_vq = portdev->in_vqs[port->id]; |
diff --git a/drivers/clk/clk-vt8500.c b/drivers/clk/clk-vt8500.c index b5538bba7a10..09c63315e579 100644 --- a/drivers/clk/clk-vt8500.c +++ b/drivers/clk/clk-vt8500.c | |||
@@ -157,7 +157,7 @@ static int vt8500_dclk_set_rate(struct clk_hw *hw, unsigned long rate, | |||
157 | divisor = parent_rate / rate; | 157 | divisor = parent_rate / rate; |
158 | 158 | ||
159 | /* If prate / rate would be decimal, incr the divisor */ | 159 | /* If prate / rate would be decimal, incr the divisor */ |
160 | if (rate * divisor < *prate) | 160 | if (rate * divisor < parent_rate) |
161 | divisor++; | 161 | divisor++; |
162 | 162 | ||
163 | if (divisor == cdev->div_mask + 1) | 163 | if (divisor == cdev->div_mask + 1) |
diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c index 1e2de7305362..f873dcefe0de 100644 --- a/drivers/clk/tegra/clk-tegra20.c +++ b/drivers/clk/tegra/clk-tegra20.c | |||
@@ -703,7 +703,7 @@ static void tegra20_pll_init(void) | |||
703 | clks[pll_a_out0] = clk; | 703 | clks[pll_a_out0] = clk; |
704 | 704 | ||
705 | /* PLLE */ | 705 | /* PLLE */ |
706 | clk = tegra_clk_register_plle("pll_e", "pll_ref", clk_base, NULL, | 706 | clk = tegra_clk_register_plle("pll_e", "pll_ref", clk_base, pmc_base, |
707 | 0, 100000000, &pll_e_params, | 707 | 0, 100000000, &pll_e_params, |
708 | 0, pll_e_freq_table, NULL); | 708 | 0, pll_e_freq_table, NULL); |
709 | clk_register_clkdev(clk, "pll_e", NULL); | 709 | clk_register_clkdev(clk, "pll_e", NULL); |
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index 937bc286591f..57a8774f0b4e 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c | |||
@@ -730,7 +730,6 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
730 | policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) { | 730 | policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) { |
731 | cpumask_copy(policy->cpus, perf->shared_cpu_map); | 731 | cpumask_copy(policy->cpus, perf->shared_cpu_map); |
732 | } | 732 | } |
733 | cpumask_copy(policy->related_cpus, perf->shared_cpu_map); | ||
734 | 733 | ||
735 | #ifdef CONFIG_SMP | 734 | #ifdef CONFIG_SMP |
736 | dmi_check_system(sw_any_bug_dmi_table); | 735 | dmi_check_system(sw_any_bug_dmi_table); |
@@ -742,7 +741,6 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
742 | if (check_amd_hwpstate_cpu(cpu) && !acpi_pstate_strict) { | 741 | if (check_amd_hwpstate_cpu(cpu) && !acpi_pstate_strict) { |
743 | cpumask_clear(policy->cpus); | 742 | cpumask_clear(policy->cpus); |
744 | cpumask_set_cpu(cpu, policy->cpus); | 743 | cpumask_set_cpu(cpu, policy->cpus); |
745 | cpumask_copy(policy->related_cpus, cpu_sibling_mask(cpu)); | ||
746 | policy->shared_type = CPUFREQ_SHARED_TYPE_HW; | 744 | policy->shared_type = CPUFREQ_SHARED_TYPE_HW; |
747 | pr_info_once(PFX "overriding BIOS provided _PSD data\n"); | 745 | pr_info_once(PFX "overriding BIOS provided _PSD data\n"); |
748 | } | 746 | } |
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c index 4e5b7fb8927c..37d23a0f8c56 100644 --- a/drivers/cpufreq/cpufreq-cpu0.c +++ b/drivers/cpufreq/cpufreq-cpu0.c | |||
@@ -178,10 +178,16 @@ static struct cpufreq_driver cpu0_cpufreq_driver = { | |||
178 | 178 | ||
179 | static int cpu0_cpufreq_probe(struct platform_device *pdev) | 179 | static int cpu0_cpufreq_probe(struct platform_device *pdev) |
180 | { | 180 | { |
181 | struct device_node *np; | 181 | struct device_node *np, *parent; |
182 | int ret; | 182 | int ret; |
183 | 183 | ||
184 | for_each_child_of_node(of_find_node_by_path("/cpus"), np) { | 184 | parent = of_find_node_by_path("/cpus"); |
185 | if (!parent) { | ||
186 | pr_err("failed to find OF /cpus\n"); | ||
187 | return -ENOENT; | ||
188 | } | ||
189 | |||
190 | for_each_child_of_node(parent, np) { | ||
185 | if (of_get_property(np, "operating-points", NULL)) | 191 | if (of_get_property(np, "operating-points", NULL)) |
186 | break; | 192 | break; |
187 | } | 193 | } |
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h index 46bde01eee62..cc4bd2f6838a 100644 --- a/drivers/cpufreq/cpufreq_governor.h +++ b/drivers/cpufreq/cpufreq_governor.h | |||
@@ -14,8 +14,8 @@ | |||
14 | * published by the Free Software Foundation. | 14 | * published by the Free Software Foundation. |
15 | */ | 15 | */ |
16 | 16 | ||
17 | #ifndef _CPUFREQ_GOVERNER_H | 17 | #ifndef _CPUFREQ_GOVERNOR_H |
18 | #define _CPUFREQ_GOVERNER_H | 18 | #define _CPUFREQ_GOVERNOR_H |
19 | 19 | ||
20 | #include <linux/cpufreq.h> | 20 | #include <linux/cpufreq.h> |
21 | #include <linux/kobject.h> | 21 | #include <linux/kobject.h> |
@@ -175,4 +175,4 @@ bool need_load_eval(struct cpu_dbs_common_info *cdbs, | |||
175 | unsigned int sampling_rate); | 175 | unsigned int sampling_rate); |
176 | int cpufreq_governor_dbs(struct dbs_data *dbs_data, | 176 | int cpufreq_governor_dbs(struct dbs_data *dbs_data, |
177 | struct cpufreq_policy *policy, unsigned int event); | 177 | struct cpufreq_policy *policy, unsigned int event); |
178 | #endif /* _CPUFREQ_GOVERNER_H */ | 178 | #endif /* _CPUFREQ_GOVERNOR_H */ |
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c index 2fd779eb1ed1..bfd6273fd873 100644 --- a/drivers/cpufreq/cpufreq_stats.c +++ b/drivers/cpufreq/cpufreq_stats.c | |||
@@ -180,15 +180,19 @@ static void cpufreq_stats_free_sysfs(unsigned int cpu) | |||
180 | { | 180 | { |
181 | struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); | 181 | struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); |
182 | 182 | ||
183 | if (!cpufreq_frequency_get_table(cpu)) | 183 | if (!policy) |
184 | return; | 184 | return; |
185 | 185 | ||
186 | if (policy && !policy_is_shared(policy)) { | 186 | if (!cpufreq_frequency_get_table(cpu)) |
187 | goto put_ref; | ||
188 | |||
189 | if (!policy_is_shared(policy)) { | ||
187 | pr_debug("%s: Free sysfs stat\n", __func__); | 190 | pr_debug("%s: Free sysfs stat\n", __func__); |
188 | sysfs_remove_group(&policy->kobj, &stats_attr_group); | 191 | sysfs_remove_group(&policy->kobj, &stats_attr_group); |
189 | } | 192 | } |
190 | if (policy) | 193 | |
191 | cpufreq_cpu_put(policy); | 194 | put_ref: |
195 | cpufreq_cpu_put(policy); | ||
192 | } | 196 | } |
193 | 197 | ||
194 | static int cpufreq_stats_create_table(struct cpufreq_policy *policy, | 198 | static int cpufreq_stats_create_table(struct cpufreq_policy *policy, |
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index f6dd1e761129..6133ef5cf671 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c | |||
@@ -358,14 +358,14 @@ static void intel_pstate_sysfs_expose_params(void) | |||
358 | static int intel_pstate_min_pstate(void) | 358 | static int intel_pstate_min_pstate(void) |
359 | { | 359 | { |
360 | u64 value; | 360 | u64 value; |
361 | rdmsrl(0xCE, value); | 361 | rdmsrl(MSR_PLATFORM_INFO, value); |
362 | return (value >> 40) & 0xFF; | 362 | return (value >> 40) & 0xFF; |
363 | } | 363 | } |
364 | 364 | ||
365 | static int intel_pstate_max_pstate(void) | 365 | static int intel_pstate_max_pstate(void) |
366 | { | 366 | { |
367 | u64 value; | 367 | u64 value; |
368 | rdmsrl(0xCE, value); | 368 | rdmsrl(MSR_PLATFORM_INFO, value); |
369 | return (value >> 8) & 0xFF; | 369 | return (value >> 8) & 0xFF; |
370 | } | 370 | } |
371 | 371 | ||
@@ -373,7 +373,7 @@ static int intel_pstate_turbo_pstate(void) | |||
373 | { | 373 | { |
374 | u64 value; | 374 | u64 value; |
375 | int nont, ret; | 375 | int nont, ret; |
376 | rdmsrl(0x1AD, value); | 376 | rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value); |
377 | nont = intel_pstate_max_pstate(); | 377 | nont = intel_pstate_max_pstate(); |
378 | ret = ((value) & 255); | 378 | ret = ((value) & 255); |
379 | if (ret <= nont) | 379 | if (ret <= nont) |
@@ -454,7 +454,7 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu, | |||
454 | sample->idletime_us * 100, | 454 | sample->idletime_us * 100, |
455 | sample->duration_us); | 455 | sample->duration_us); |
456 | core_pct = div64_u64(sample->aperf * 100, sample->mperf); | 456 | core_pct = div64_u64(sample->aperf * 100, sample->mperf); |
457 | sample->freq = cpu->pstate.turbo_pstate * core_pct * 1000; | 457 | sample->freq = cpu->pstate.max_pstate * core_pct * 1000; |
458 | 458 | ||
459 | sample->core_pct_busy = div_s64((sample->pstate_pct_busy * core_pct), | 459 | sample->core_pct_busy = div_s64((sample->pstate_pct_busy * core_pct), |
460 | 100); | 460 | 100); |
@@ -502,7 +502,6 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu) | |||
502 | 502 | ||
503 | sample_time = cpu->pstate_policy->sample_rate_ms; | 503 | sample_time = cpu->pstate_policy->sample_rate_ms; |
504 | delay = msecs_to_jiffies(sample_time); | 504 | delay = msecs_to_jiffies(sample_time); |
505 | delay -= jiffies % delay; | ||
506 | mod_timer_pinned(&cpu->timer, jiffies + delay); | 505 | mod_timer_pinned(&cpu->timer, jiffies + delay); |
507 | } | 506 | } |
508 | 507 | ||
@@ -752,6 +751,29 @@ static struct cpufreq_driver intel_pstate_driver = { | |||
752 | 751 | ||
753 | static int __initdata no_load; | 752 | static int __initdata no_load; |
754 | 753 | ||
754 | static int intel_pstate_msrs_not_valid(void) | ||
755 | { | ||
756 | /* Check that all the msr's we are using are valid. */ | ||
757 | u64 aperf, mperf, tmp; | ||
758 | |||
759 | rdmsrl(MSR_IA32_APERF, aperf); | ||
760 | rdmsrl(MSR_IA32_MPERF, mperf); | ||
761 | |||
762 | if (!intel_pstate_min_pstate() || | ||
763 | !intel_pstate_max_pstate() || | ||
764 | !intel_pstate_turbo_pstate()) | ||
765 | return -ENODEV; | ||
766 | |||
767 | rdmsrl(MSR_IA32_APERF, tmp); | ||
768 | if (!(tmp - aperf)) | ||
769 | return -ENODEV; | ||
770 | |||
771 | rdmsrl(MSR_IA32_MPERF, tmp); | ||
772 | if (!(tmp - mperf)) | ||
773 | return -ENODEV; | ||
774 | |||
775 | return 0; | ||
776 | } | ||
755 | static int __init intel_pstate_init(void) | 777 | static int __init intel_pstate_init(void) |
756 | { | 778 | { |
757 | int cpu, rc = 0; | 779 | int cpu, rc = 0; |
@@ -764,6 +786,9 @@ static int __init intel_pstate_init(void) | |||
764 | if (!id) | 786 | if (!id) |
765 | return -ENODEV; | 787 | return -ENODEV; |
766 | 788 | ||
789 | if (intel_pstate_msrs_not_valid()) | ||
790 | return -ENODEV; | ||
791 | |||
767 | pr_info("Intel P-state driver initializing.\n"); | 792 | pr_info("Intel P-state driver initializing.\n"); |
768 | 793 | ||
769 | all_cpu_data = vmalloc(sizeof(void *) * num_possible_cpus()); | 794 | all_cpu_data = vmalloc(sizeof(void *) * num_possible_cpus()); |
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index b2a0a0726a54..cf268b14ae9a 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c | |||
@@ -1650,11 +1650,7 @@ struct caam_alg_template { | |||
1650 | }; | 1650 | }; |
1651 | 1651 | ||
1652 | static struct caam_alg_template driver_algs[] = { | 1652 | static struct caam_alg_template driver_algs[] = { |
1653 | /* | 1653 | /* single-pass ipsec_esp descriptor */ |
1654 | * single-pass ipsec_esp descriptor | ||
1655 | * authencesn(*,*) is also registered, although not present | ||
1656 | * explicitly here. | ||
1657 | */ | ||
1658 | { | 1654 | { |
1659 | .name = "authenc(hmac(md5),cbc(aes))", | 1655 | .name = "authenc(hmac(md5),cbc(aes))", |
1660 | .driver_name = "authenc-hmac-md5-cbc-aes-caam", | 1656 | .driver_name = "authenc-hmac-md5-cbc-aes-caam", |
@@ -2217,9 +2213,7 @@ static int __init caam_algapi_init(void) | |||
2217 | for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { | 2213 | for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { |
2218 | /* TODO: check if h/w supports alg */ | 2214 | /* TODO: check if h/w supports alg */ |
2219 | struct caam_crypto_alg *t_alg; | 2215 | struct caam_crypto_alg *t_alg; |
2220 | bool done = false; | ||
2221 | 2216 | ||
2222 | authencesn: | ||
2223 | t_alg = caam_alg_alloc(ctrldev, &driver_algs[i]); | 2217 | t_alg = caam_alg_alloc(ctrldev, &driver_algs[i]); |
2224 | if (IS_ERR(t_alg)) { | 2218 | if (IS_ERR(t_alg)) { |
2225 | err = PTR_ERR(t_alg); | 2219 | err = PTR_ERR(t_alg); |
@@ -2233,25 +2227,8 @@ authencesn: | |||
2233 | dev_warn(ctrldev, "%s alg registration failed\n", | 2227 | dev_warn(ctrldev, "%s alg registration failed\n", |
2234 | t_alg->crypto_alg.cra_driver_name); | 2228 | t_alg->crypto_alg.cra_driver_name); |
2235 | kfree(t_alg); | 2229 | kfree(t_alg); |
2236 | } else { | 2230 | } else |
2237 | list_add_tail(&t_alg->entry, &priv->alg_list); | 2231 | list_add_tail(&t_alg->entry, &priv->alg_list); |
2238 | if (driver_algs[i].type == CRYPTO_ALG_TYPE_AEAD && | ||
2239 | !memcmp(driver_algs[i].name, "authenc", 7) && | ||
2240 | !done) { | ||
2241 | char *name; | ||
2242 | |||
2243 | name = driver_algs[i].name; | ||
2244 | memmove(name + 10, name + 7, strlen(name) - 7); | ||
2245 | memcpy(name + 7, "esn", 3); | ||
2246 | |||
2247 | name = driver_algs[i].driver_name; | ||
2248 | memmove(name + 10, name + 7, strlen(name) - 7); | ||
2249 | memcpy(name + 7, "esn", 3); | ||
2250 | |||
2251 | done = true; | ||
2252 | goto authencesn; | ||
2253 | } | ||
2254 | } | ||
2255 | } | 2232 | } |
2256 | if (!list_empty(&priv->alg_list)) | 2233 | if (!list_empty(&priv->alg_list)) |
2257 | dev_info(ctrldev, "%s algorithms registered in /proc/crypto\n", | 2234 | dev_info(ctrldev, "%s algorithms registered in /proc/crypto\n", |
diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h index cf15e7813801..762aeff626ac 100644 --- a/drivers/crypto/caam/compat.h +++ b/drivers/crypto/caam/compat.h | |||
@@ -23,7 +23,6 @@ | |||
23 | #include <linux/types.h> | 23 | #include <linux/types.h> |
24 | #include <linux/debugfs.h> | 24 | #include <linux/debugfs.h> |
25 | #include <linux/circ_buf.h> | 25 | #include <linux/circ_buf.h> |
26 | #include <linux/string.h> | ||
27 | #include <net/xfrm.h> | 26 | #include <net/xfrm.h> |
28 | 27 | ||
29 | #include <crypto/algapi.h> | 28 | #include <crypto/algapi.h> |
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 09b184adf31b..5b2b5e61e4f9 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c | |||
@@ -38,7 +38,6 @@ | |||
38 | #include <linux/spinlock.h> | 38 | #include <linux/spinlock.h> |
39 | #include <linux/rtnetlink.h> | 39 | #include <linux/rtnetlink.h> |
40 | #include <linux/slab.h> | 40 | #include <linux/slab.h> |
41 | #include <linux/string.h> | ||
42 | 41 | ||
43 | #include <crypto/algapi.h> | 42 | #include <crypto/algapi.h> |
44 | #include <crypto/aes.h> | 43 | #include <crypto/aes.h> |
@@ -1974,11 +1973,7 @@ struct talitos_alg_template { | |||
1974 | }; | 1973 | }; |
1975 | 1974 | ||
1976 | static struct talitos_alg_template driver_algs[] = { | 1975 | static struct talitos_alg_template driver_algs[] = { |
1977 | /* | 1976 | /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */ |
1978 | * AEAD algorithms. These use a single-pass ipsec_esp descriptor. | ||
1979 | * authencesn(*,*) is also registered, although not present | ||
1980 | * explicitly here. | ||
1981 | */ | ||
1982 | { .type = CRYPTO_ALG_TYPE_AEAD, | 1977 | { .type = CRYPTO_ALG_TYPE_AEAD, |
1983 | .alg.crypto = { | 1978 | .alg.crypto = { |
1984 | .cra_name = "authenc(hmac(sha1),cbc(aes))", | 1979 | .cra_name = "authenc(hmac(sha1),cbc(aes))", |
@@ -2820,9 +2815,7 @@ static int talitos_probe(struct platform_device *ofdev) | |||
2820 | if (hw_supports(dev, driver_algs[i].desc_hdr_template)) { | 2815 | if (hw_supports(dev, driver_algs[i].desc_hdr_template)) { |
2821 | struct talitos_crypto_alg *t_alg; | 2816 | struct talitos_crypto_alg *t_alg; |
2822 | char *name = NULL; | 2817 | char *name = NULL; |
2823 | bool authenc = false; | ||
2824 | 2818 | ||
2825 | authencesn: | ||
2826 | t_alg = talitos_alg_alloc(dev, &driver_algs[i]); | 2819 | t_alg = talitos_alg_alloc(dev, &driver_algs[i]); |
2827 | if (IS_ERR(t_alg)) { | 2820 | if (IS_ERR(t_alg)) { |
2828 | err = PTR_ERR(t_alg); | 2821 | err = PTR_ERR(t_alg); |
@@ -2837,8 +2830,6 @@ authencesn: | |||
2837 | err = crypto_register_alg( | 2830 | err = crypto_register_alg( |
2838 | &t_alg->algt.alg.crypto); | 2831 | &t_alg->algt.alg.crypto); |
2839 | name = t_alg->algt.alg.crypto.cra_driver_name; | 2832 | name = t_alg->algt.alg.crypto.cra_driver_name; |
2840 | authenc = authenc ? !authenc : | ||
2841 | !(bool)memcmp(name, "authenc", 7); | ||
2842 | break; | 2833 | break; |
2843 | case CRYPTO_ALG_TYPE_AHASH: | 2834 | case CRYPTO_ALG_TYPE_AHASH: |
2844 | err = crypto_register_ahash( | 2835 | err = crypto_register_ahash( |
@@ -2851,25 +2842,8 @@ authencesn: | |||
2851 | dev_err(dev, "%s alg registration failed\n", | 2842 | dev_err(dev, "%s alg registration failed\n", |
2852 | name); | 2843 | name); |
2853 | kfree(t_alg); | 2844 | kfree(t_alg); |
2854 | } else { | 2845 | } else |
2855 | list_add_tail(&t_alg->entry, &priv->alg_list); | 2846 | list_add_tail(&t_alg->entry, &priv->alg_list); |
2856 | if (authenc) { | ||
2857 | struct crypto_alg *alg = | ||
2858 | &driver_algs[i].alg.crypto; | ||
2859 | |||
2860 | name = alg->cra_name; | ||
2861 | memmove(name + 10, name + 7, | ||
2862 | strlen(name) - 7); | ||
2863 | memcpy(name + 7, "esn", 3); | ||
2864 | |||
2865 | name = alg->cra_driver_name; | ||
2866 | memmove(name + 10, name + 7, | ||
2867 | strlen(name) - 7); | ||
2868 | memcpy(name + 7, "esn", 3); | ||
2869 | |||
2870 | goto authencesn; | ||
2871 | } | ||
2872 | } | ||
2873 | } | 2847 | } |
2874 | } | 2848 | } |
2875 | if (!list_empty(&priv->alg_list)) | 2849 | if (!list_empty(&priv->alg_list)) |
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c index 8bc5fef07e7a..22c9063e0120 100644 --- a/drivers/crypto/ux500/cryp/cryp_core.c +++ b/drivers/crypto/ux500/cryp/cryp_core.c | |||
@@ -1750,7 +1750,7 @@ static struct platform_driver cryp_driver = { | |||
1750 | .shutdown = ux500_cryp_shutdown, | 1750 | .shutdown = ux500_cryp_shutdown, |
1751 | .driver = { | 1751 | .driver = { |
1752 | .owner = THIS_MODULE, | 1752 | .owner = THIS_MODULE, |
1753 | .name = "cryp1" | 1753 | .name = "cryp1", |
1754 | .pm = &ux500_cryp_pm, | 1754 | .pm = &ux500_cryp_pm, |
1755 | } | 1755 | } |
1756 | }; | 1756 | }; |
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 80b69971cf28..aeaea32bcfda 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
@@ -83,6 +83,7 @@ config INTEL_IOP_ADMA | |||
83 | 83 | ||
84 | config DW_DMAC | 84 | config DW_DMAC |
85 | tristate "Synopsys DesignWare AHB DMA support" | 85 | tristate "Synopsys DesignWare AHB DMA support" |
86 | depends on GENERIC_HARDIRQS | ||
86 | select DMA_ENGINE | 87 | select DMA_ENGINE |
87 | default y if CPU_AT32AP7000 | 88 | default y if CPU_AT32AP7000 |
88 | help | 89 | help |
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index c599558faeda..43a5329d4483 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c | |||
@@ -1001,6 +1001,13 @@ static inline void convert_burst(u32 *maxburst) | |||
1001 | *maxburst = 0; | 1001 | *maxburst = 0; |
1002 | } | 1002 | } |
1003 | 1003 | ||
1004 | static inline void convert_slave_id(struct dw_dma_chan *dwc) | ||
1005 | { | ||
1006 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | ||
1007 | |||
1008 | dwc->dma_sconfig.slave_id -= dw->request_line_base; | ||
1009 | } | ||
1010 | |||
1004 | static int | 1011 | static int |
1005 | set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig) | 1012 | set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig) |
1006 | { | 1013 | { |
@@ -1015,6 +1022,7 @@ set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig) | |||
1015 | 1022 | ||
1016 | convert_burst(&dwc->dma_sconfig.src_maxburst); | 1023 | convert_burst(&dwc->dma_sconfig.src_maxburst); |
1017 | convert_burst(&dwc->dma_sconfig.dst_maxburst); | 1024 | convert_burst(&dwc->dma_sconfig.dst_maxburst); |
1025 | convert_slave_id(dwc); | ||
1018 | 1026 | ||
1019 | return 0; | 1027 | return 0; |
1020 | } | 1028 | } |
@@ -1276,9 +1284,9 @@ static struct dma_chan *dw_dma_xlate(struct of_phandle_args *dma_spec, | |||
1276 | if (dma_spec->args_count != 3) | 1284 | if (dma_spec->args_count != 3) |
1277 | return NULL; | 1285 | return NULL; |
1278 | 1286 | ||
1279 | fargs.req = be32_to_cpup(dma_spec->args+0); | 1287 | fargs.req = dma_spec->args[0]; |
1280 | fargs.src = be32_to_cpup(dma_spec->args+1); | 1288 | fargs.src = dma_spec->args[1]; |
1281 | fargs.dst = be32_to_cpup(dma_spec->args+2); | 1289 | fargs.dst = dma_spec->args[2]; |
1282 | 1290 | ||
1283 | if (WARN_ON(fargs.req >= DW_DMA_MAX_NR_REQUESTS || | 1291 | if (WARN_ON(fargs.req >= DW_DMA_MAX_NR_REQUESTS || |
1284 | fargs.src >= dw->nr_masters || | 1292 | fargs.src >= dw->nr_masters || |
@@ -1628,6 +1636,7 @@ dw_dma_parse_dt(struct platform_device *pdev) | |||
1628 | 1636 | ||
1629 | static int dw_probe(struct platform_device *pdev) | 1637 | static int dw_probe(struct platform_device *pdev) |
1630 | { | 1638 | { |
1639 | const struct platform_device_id *match; | ||
1631 | struct dw_dma_platform_data *pdata; | 1640 | struct dw_dma_platform_data *pdata; |
1632 | struct resource *io; | 1641 | struct resource *io; |
1633 | struct dw_dma *dw; | 1642 | struct dw_dma *dw; |
@@ -1711,6 +1720,11 @@ static int dw_probe(struct platform_device *pdev) | |||
1711 | memcpy(dw->data_width, pdata->data_width, 4); | 1720 | memcpy(dw->data_width, pdata->data_width, 4); |
1712 | } | 1721 | } |
1713 | 1722 | ||
1723 | /* Get the base request line if set */ | ||
1724 | match = platform_get_device_id(pdev); | ||
1725 | if (match) | ||
1726 | dw->request_line_base = (unsigned int)match->driver_data; | ||
1727 | |||
1714 | /* Calculate all channel mask before DMA setup */ | 1728 | /* Calculate all channel mask before DMA setup */ |
1715 | dw->all_chan_mask = (1 << nr_channels) - 1; | 1729 | dw->all_chan_mask = (1 << nr_channels) - 1; |
1716 | 1730 | ||
@@ -1906,7 +1920,8 @@ MODULE_DEVICE_TABLE(of, dw_dma_id_table); | |||
1906 | #endif | 1920 | #endif |
1907 | 1921 | ||
1908 | static const struct platform_device_id dw_dma_ids[] = { | 1922 | static const struct platform_device_id dw_dma_ids[] = { |
1909 | { "INTL9C60", 0 }, | 1923 | /* Name, Request Line Base */ |
1924 | { "INTL9C60", (kernel_ulong_t)16 }, | ||
1910 | { } | 1925 | { } |
1911 | }; | 1926 | }; |
1912 | 1927 | ||
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h index cf0ce5c77d60..4d02c3669b75 100644 --- a/drivers/dma/dw_dmac_regs.h +++ b/drivers/dma/dw_dmac_regs.h | |||
@@ -247,6 +247,7 @@ struct dw_dma { | |||
247 | /* hardware configuration */ | 247 | /* hardware configuration */ |
248 | unsigned char nr_masters; | 248 | unsigned char nr_masters; |
249 | unsigned char data_width[4]; | 249 | unsigned char data_width[4]; |
250 | unsigned int request_line_base; | ||
250 | 251 | ||
251 | struct dw_dma_chan chan[0]; | 252 | struct dw_dma_chan chan[0]; |
252 | }; | 253 | }; |
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c index c4b4fd2acc42..08b43bf37158 100644 --- a/drivers/dma/omap-dma.c +++ b/drivers/dma/omap-dma.c | |||
@@ -276,12 +276,20 @@ static void omap_dma_issue_pending(struct dma_chan *chan) | |||
276 | 276 | ||
277 | spin_lock_irqsave(&c->vc.lock, flags); | 277 | spin_lock_irqsave(&c->vc.lock, flags); |
278 | if (vchan_issue_pending(&c->vc) && !c->desc) { | 278 | if (vchan_issue_pending(&c->vc) && !c->desc) { |
279 | struct omap_dmadev *d = to_omap_dma_dev(chan->device); | 279 | /* |
280 | spin_lock(&d->lock); | 280 | * c->cyclic is used only by audio and in this case the DMA need |
281 | if (list_empty(&c->node)) | 281 | * to be started without delay. |
282 | list_add_tail(&c->node, &d->pending); | 282 | */ |
283 | spin_unlock(&d->lock); | 283 | if (!c->cyclic) { |
284 | tasklet_schedule(&d->task); | 284 | struct omap_dmadev *d = to_omap_dma_dev(chan->device); |
285 | spin_lock(&d->lock); | ||
286 | if (list_empty(&c->node)) | ||
287 | list_add_tail(&c->node, &d->pending); | ||
288 | spin_unlock(&d->lock); | ||
289 | tasklet_schedule(&d->task); | ||
290 | } else { | ||
291 | omap_dma_start_desc(c); | ||
292 | } | ||
285 | } | 293 | } |
286 | spin_unlock_irqrestore(&c->vc.lock, flags); | 294 | spin_unlock_irqrestore(&c->vc.lock, flags); |
287 | } | 295 | } |
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 718153122759..5dbc5946c4c3 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c | |||
@@ -2882,7 +2882,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
2882 | { | 2882 | { |
2883 | struct dma_pl330_platdata *pdat; | 2883 | struct dma_pl330_platdata *pdat; |
2884 | struct dma_pl330_dmac *pdmac; | 2884 | struct dma_pl330_dmac *pdmac; |
2885 | struct dma_pl330_chan *pch; | 2885 | struct dma_pl330_chan *pch, *_p; |
2886 | struct pl330_info *pi; | 2886 | struct pl330_info *pi; |
2887 | struct dma_device *pd; | 2887 | struct dma_device *pd; |
2888 | struct resource *res; | 2888 | struct resource *res; |
@@ -2984,7 +2984,16 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
2984 | ret = dma_async_device_register(pd); | 2984 | ret = dma_async_device_register(pd); |
2985 | if (ret) { | 2985 | if (ret) { |
2986 | dev_err(&adev->dev, "unable to register DMAC\n"); | 2986 | dev_err(&adev->dev, "unable to register DMAC\n"); |
2987 | goto probe_err2; | 2987 | goto probe_err3; |
2988 | } | ||
2989 | |||
2990 | if (adev->dev.of_node) { | ||
2991 | ret = of_dma_controller_register(adev->dev.of_node, | ||
2992 | of_dma_pl330_xlate, pdmac); | ||
2993 | if (ret) { | ||
2994 | dev_err(&adev->dev, | ||
2995 | "unable to register DMA to the generic DT DMA helpers\n"); | ||
2996 | } | ||
2988 | } | 2997 | } |
2989 | 2998 | ||
2990 | dev_info(&adev->dev, | 2999 | dev_info(&adev->dev, |
@@ -2995,16 +3004,21 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) | |||
2995 | pi->pcfg.data_bus_width / 8, pi->pcfg.num_chan, | 3004 | pi->pcfg.data_bus_width / 8, pi->pcfg.num_chan, |
2996 | pi->pcfg.num_peri, pi->pcfg.num_events); | 3005 | pi->pcfg.num_peri, pi->pcfg.num_events); |
2997 | 3006 | ||
2998 | ret = of_dma_controller_register(adev->dev.of_node, | ||
2999 | of_dma_pl330_xlate, pdmac); | ||
3000 | if (ret) { | ||
3001 | dev_err(&adev->dev, | ||
3002 | "unable to register DMA to the generic DT DMA helpers\n"); | ||
3003 | goto probe_err2; | ||
3004 | } | ||
3005 | |||
3006 | return 0; | 3007 | return 0; |
3008 | probe_err3: | ||
3009 | amba_set_drvdata(adev, NULL); | ||
3007 | 3010 | ||
3011 | /* Idle the DMAC */ | ||
3012 | list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels, | ||
3013 | chan.device_node) { | ||
3014 | |||
3015 | /* Remove the channel */ | ||
3016 | list_del(&pch->chan.device_node); | ||
3017 | |||
3018 | /* Flush the channel */ | ||
3019 | pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0); | ||
3020 | pl330_free_chan_resources(&pch->chan); | ||
3021 | } | ||
3008 | probe_err2: | 3022 | probe_err2: |
3009 | pl330_del(pi); | 3023 | pl330_del(pi); |
3010 | probe_err1: | 3024 | probe_err1: |
@@ -3023,8 +3037,10 @@ static int pl330_remove(struct amba_device *adev) | |||
3023 | if (!pdmac) | 3037 | if (!pdmac) |
3024 | return 0; | 3038 | return 0; |
3025 | 3039 | ||
3026 | of_dma_controller_free(adev->dev.of_node); | 3040 | if (adev->dev.of_node) |
3041 | of_dma_controller_free(adev->dev.of_node); | ||
3027 | 3042 | ||
3043 | dma_async_device_unregister(&pdmac->ddma); | ||
3028 | amba_set_drvdata(adev, NULL); | 3044 | amba_set_drvdata(adev, NULL); |
3029 | 3045 | ||
3030 | /* Idle the DMAC */ | 3046 | /* Idle the DMAC */ |
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 910b0116c128..e1d13c463c90 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c | |||
@@ -2048,12 +2048,18 @@ static int init_csrows(struct mem_ctl_info *mci) | |||
2048 | edac_dbg(1, "MC node: %d, csrow: %d\n", | 2048 | edac_dbg(1, "MC node: %d, csrow: %d\n", |
2049 | pvt->mc_node_id, i); | 2049 | pvt->mc_node_id, i); |
2050 | 2050 | ||
2051 | if (row_dct0) | 2051 | if (row_dct0) { |
2052 | nr_pages = amd64_csrow_nr_pages(pvt, 0, i); | 2052 | nr_pages = amd64_csrow_nr_pages(pvt, 0, i); |
2053 | csrow->channels[0]->dimm->nr_pages = nr_pages; | ||
2054 | } | ||
2053 | 2055 | ||
2054 | /* K8 has only one DCT */ | 2056 | /* K8 has only one DCT */ |
2055 | if (boot_cpu_data.x86 != 0xf && row_dct1) | 2057 | if (boot_cpu_data.x86 != 0xf && row_dct1) { |
2056 | nr_pages += amd64_csrow_nr_pages(pvt, 1, i); | 2058 | int row_dct1_pages = amd64_csrow_nr_pages(pvt, 1, i); |
2059 | |||
2060 | csrow->channels[1]->dimm->nr_pages = row_dct1_pages; | ||
2061 | nr_pages += row_dct1_pages; | ||
2062 | } | ||
2057 | 2063 | ||
2058 | mtype = amd64_determine_memory_type(pvt, i); | 2064 | mtype = amd64_determine_memory_type(pvt, i); |
2059 | 2065 | ||
@@ -2072,9 +2078,7 @@ static int init_csrows(struct mem_ctl_info *mci) | |||
2072 | dimm = csrow->channels[j]->dimm; | 2078 | dimm = csrow->channels[j]->dimm; |
2073 | dimm->mtype = mtype; | 2079 | dimm->mtype = mtype; |
2074 | dimm->edac_mode = edac_mode; | 2080 | dimm->edac_mode = edac_mode; |
2075 | dimm->nr_pages = nr_pages; | ||
2076 | } | 2081 | } |
2077 | csrow->nr_pages = nr_pages; | ||
2078 | } | 2082 | } |
2079 | 2083 | ||
2080 | return empty; | 2084 | return empty; |
@@ -2419,7 +2423,6 @@ static int amd64_init_one_instance(struct pci_dev *F2) | |||
2419 | 2423 | ||
2420 | mci->pvt_info = pvt; | 2424 | mci->pvt_info = pvt; |
2421 | mci->pdev = &pvt->F2->dev; | 2425 | mci->pdev = &pvt->F2->dev; |
2422 | mci->csbased = 1; | ||
2423 | 2426 | ||
2424 | setup_mci_misc_attrs(mci, fam_type); | 2427 | setup_mci_misc_attrs(mci, fam_type); |
2425 | 2428 | ||
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c index cdb81aa73ab7..27e86d938262 100644 --- a/drivers/edac/edac_mc.c +++ b/drivers/edac/edac_mc.c | |||
@@ -86,7 +86,7 @@ static void edac_mc_dump_dimm(struct dimm_info *dimm, int number) | |||
86 | edac_dimm_info_location(dimm, location, sizeof(location)); | 86 | edac_dimm_info_location(dimm, location, sizeof(location)); |
87 | 87 | ||
88 | edac_dbg(4, "%s%i: %smapped as virtual row %d, chan %d\n", | 88 | edac_dbg(4, "%s%i: %smapped as virtual row %d, chan %d\n", |
89 | dimm->mci->mem_is_per_rank ? "rank" : "dimm", | 89 | dimm->mci->csbased ? "rank" : "dimm", |
90 | number, location, dimm->csrow, dimm->cschannel); | 90 | number, location, dimm->csrow, dimm->cschannel); |
91 | edac_dbg(4, " dimm = %p\n", dimm); | 91 | edac_dbg(4, " dimm = %p\n", dimm); |
92 | edac_dbg(4, " dimm->label = '%s'\n", dimm->label); | 92 | edac_dbg(4, " dimm->label = '%s'\n", dimm->label); |
@@ -341,7 +341,7 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num, | |||
341 | memcpy(mci->layers, layers, sizeof(*layer) * n_layers); | 341 | memcpy(mci->layers, layers, sizeof(*layer) * n_layers); |
342 | mci->nr_csrows = tot_csrows; | 342 | mci->nr_csrows = tot_csrows; |
343 | mci->num_cschannel = tot_channels; | 343 | mci->num_cschannel = tot_channels; |
344 | mci->mem_is_per_rank = per_rank; | 344 | mci->csbased = per_rank; |
345 | 345 | ||
346 | /* | 346 | /* |
347 | * Alocate and fill the csrow/channels structs | 347 | * Alocate and fill the csrow/channels structs |
@@ -1235,7 +1235,7 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type, | |||
1235 | * incrementing the compat API counters | 1235 | * incrementing the compat API counters |
1236 | */ | 1236 | */ |
1237 | edac_dbg(4, "%s csrows map: (%d,%d)\n", | 1237 | edac_dbg(4, "%s csrows map: (%d,%d)\n", |
1238 | mci->mem_is_per_rank ? "rank" : "dimm", | 1238 | mci->csbased ? "rank" : "dimm", |
1239 | dimm->csrow, dimm->cschannel); | 1239 | dimm->csrow, dimm->cschannel); |
1240 | if (row == -1) | 1240 | if (row == -1) |
1241 | row = dimm->csrow; | 1241 | row = dimm->csrow; |
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c index 4f4b6137d74e..5899a76eec3b 100644 --- a/drivers/edac/edac_mc_sysfs.c +++ b/drivers/edac/edac_mc_sysfs.c | |||
@@ -143,7 +143,7 @@ static const char *edac_caps[] = { | |||
143 | * and the per-dimm/per-rank one | 143 | * and the per-dimm/per-rank one |
144 | */ | 144 | */ |
145 | #define DEVICE_ATTR_LEGACY(_name, _mode, _show, _store) \ | 145 | #define DEVICE_ATTR_LEGACY(_name, _mode, _show, _store) \ |
146 | struct device_attribute dev_attr_legacy_##_name = __ATTR(_name, _mode, _show, _store) | 146 | static struct device_attribute dev_attr_legacy_##_name = __ATTR(_name, _mode, _show, _store) |
147 | 147 | ||
148 | struct dev_ch_attribute { | 148 | struct dev_ch_attribute { |
149 | struct device_attribute attr; | 149 | struct device_attribute attr; |
@@ -180,9 +180,6 @@ static ssize_t csrow_size_show(struct device *dev, | |||
180 | int i; | 180 | int i; |
181 | u32 nr_pages = 0; | 181 | u32 nr_pages = 0; |
182 | 182 | ||
183 | if (csrow->mci->csbased) | ||
184 | return sprintf(data, "%u\n", PAGES_TO_MiB(csrow->nr_pages)); | ||
185 | |||
186 | for (i = 0; i < csrow->nr_channels; i++) | 183 | for (i = 0; i < csrow->nr_channels; i++) |
187 | nr_pages += csrow->channels[i]->dimm->nr_pages; | 184 | nr_pages += csrow->channels[i]->dimm->nr_pages; |
188 | return sprintf(data, "%u\n", PAGES_TO_MiB(nr_pages)); | 185 | return sprintf(data, "%u\n", PAGES_TO_MiB(nr_pages)); |
@@ -612,7 +609,7 @@ static int edac_create_dimm_object(struct mem_ctl_info *mci, | |||
612 | device_initialize(&dimm->dev); | 609 | device_initialize(&dimm->dev); |
613 | 610 | ||
614 | dimm->dev.parent = &mci->dev; | 611 | dimm->dev.parent = &mci->dev; |
615 | if (mci->mem_is_per_rank) | 612 | if (mci->csbased) |
616 | dev_set_name(&dimm->dev, "rank%d", index); | 613 | dev_set_name(&dimm->dev, "rank%d", index); |
617 | else | 614 | else |
618 | dev_set_name(&dimm->dev, "dimm%d", index); | 615 | dev_set_name(&dimm->dev, "dimm%d", index); |
@@ -778,14 +775,10 @@ static ssize_t mci_size_mb_show(struct device *dev, | |||
778 | for (csrow_idx = 0; csrow_idx < mci->nr_csrows; csrow_idx++) { | 775 | for (csrow_idx = 0; csrow_idx < mci->nr_csrows; csrow_idx++) { |
779 | struct csrow_info *csrow = mci->csrows[csrow_idx]; | 776 | struct csrow_info *csrow = mci->csrows[csrow_idx]; |
780 | 777 | ||
781 | if (csrow->mci->csbased) { | 778 | for (j = 0; j < csrow->nr_channels; j++) { |
782 | total_pages += csrow->nr_pages; | 779 | struct dimm_info *dimm = csrow->channels[j]->dimm; |
783 | } else { | ||
784 | for (j = 0; j < csrow->nr_channels; j++) { | ||
785 | struct dimm_info *dimm = csrow->channels[j]->dimm; | ||
786 | 780 | ||
787 | total_pages += dimm->nr_pages; | 781 | total_pages += dimm->nr_pages; |
788 | } | ||
789 | } | 782 | } |
790 | } | 783 | } |
791 | 784 | ||
diff --git a/drivers/eisa/pci_eisa.c b/drivers/eisa/pci_eisa.c index cdae207028a7..6c3fca97d346 100644 --- a/drivers/eisa/pci_eisa.c +++ b/drivers/eisa/pci_eisa.c | |||
@@ -19,10 +19,10 @@ | |||
19 | /* There is only *one* pci_eisa device per machine, right ? */ | 19 | /* There is only *one* pci_eisa device per machine, right ? */ |
20 | static struct eisa_root_device pci_eisa_root; | 20 | static struct eisa_root_device pci_eisa_root; |
21 | 21 | ||
22 | static int __init pci_eisa_init(struct pci_dev *pdev, | 22 | static int __init pci_eisa_init(struct pci_dev *pdev) |
23 | const struct pci_device_id *ent) | ||
24 | { | 23 | { |
25 | int rc; | 24 | int rc, i; |
25 | struct resource *res, *bus_res = NULL; | ||
26 | 26 | ||
27 | if ((rc = pci_enable_device (pdev))) { | 27 | if ((rc = pci_enable_device (pdev))) { |
28 | printk (KERN_ERR "pci_eisa : Could not enable device %s\n", | 28 | printk (KERN_ERR "pci_eisa : Could not enable device %s\n", |
@@ -30,9 +30,30 @@ static int __init pci_eisa_init(struct pci_dev *pdev, | |||
30 | return rc; | 30 | return rc; |
31 | } | 31 | } |
32 | 32 | ||
33 | /* | ||
34 | * The Intel 82375 PCI-EISA bridge is a subtractive-decode PCI | ||
35 | * device, so the resources available on EISA are the same as those | ||
36 | * available on the 82375 bus. This works the same as a PCI-PCI | ||
37 | * bridge in subtractive-decode mode (see pci_read_bridge_bases()). | ||
38 | * We assume other PCI-EISA bridges are similar. | ||
39 | * | ||
40 | * eisa_root_register() can only deal with a single io port resource, | ||
41 | * so we use the first valid io port resource. | ||
42 | */ | ||
43 | pci_bus_for_each_resource(pdev->bus, res, i) | ||
44 | if (res && (res->flags & IORESOURCE_IO)) { | ||
45 | bus_res = res; | ||
46 | break; | ||
47 | } | ||
48 | |||
49 | if (!bus_res) { | ||
50 | dev_err(&pdev->dev, "No resources available\n"); | ||
51 | return -1; | ||
52 | } | ||
53 | |||
33 | pci_eisa_root.dev = &pdev->dev; | 54 | pci_eisa_root.dev = &pdev->dev; |
34 | pci_eisa_root.res = pdev->bus->resource[0]; | 55 | pci_eisa_root.res = bus_res; |
35 | pci_eisa_root.bus_base_addr = pdev->bus->resource[0]->start; | 56 | pci_eisa_root.bus_base_addr = bus_res->start; |
36 | pci_eisa_root.slots = EISA_MAX_SLOTS; | 57 | pci_eisa_root.slots = EISA_MAX_SLOTS; |
37 | pci_eisa_root.dma_mask = pdev->dma_mask; | 58 | pci_eisa_root.dma_mask = pdev->dma_mask; |
38 | dev_set_drvdata(pci_eisa_root.dev, &pci_eisa_root); | 59 | dev_set_drvdata(pci_eisa_root.dev, &pci_eisa_root); |
@@ -45,22 +66,26 @@ static int __init pci_eisa_init(struct pci_dev *pdev, | |||
45 | return 0; | 66 | return 0; |
46 | } | 67 | } |
47 | 68 | ||
48 | static struct pci_device_id pci_eisa_pci_tbl[] = { | 69 | /* |
49 | { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, | 70 | * We have to call pci_eisa_init_early() before pnpacpi_init()/isapnp_init(). |
50 | PCI_CLASS_BRIDGE_EISA << 8, 0xffff00, 0 }, | 71 | * Otherwise pnp resource will get enabled early and could prevent eisa |
51 | { 0, } | 72 | * to be initialized. |
52 | }; | 73 | * Also need to make sure pci_eisa_init_early() is called after |
74 | * x86/pci_subsys_init(). | ||
75 | * So need to use subsys_initcall_sync with it. | ||
76 | */ | ||
77 | static int __init pci_eisa_init_early(void) | ||
78 | { | ||
79 | struct pci_dev *dev = NULL; | ||
80 | int ret; | ||
53 | 81 | ||
54 | static struct pci_driver __refdata pci_eisa_driver = { | 82 | for_each_pci_dev(dev) |
55 | .name = "pci_eisa", | 83 | if ((dev->class >> 8) == PCI_CLASS_BRIDGE_EISA) { |
56 | .id_table = pci_eisa_pci_tbl, | 84 | ret = pci_eisa_init(dev); |
57 | .probe = pci_eisa_init, | 85 | if (ret) |
58 | }; | 86 | return ret; |
87 | } | ||
59 | 88 | ||
60 | static int __init pci_eisa_init_module (void) | 89 | return 0; |
61 | { | ||
62 | return pci_register_driver (&pci_eisa_driver); | ||
63 | } | 90 | } |
64 | 91 | subsys_initcall_sync(pci_eisa_init_early); | |
65 | device_initcall(pci_eisa_init_module); | ||
66 | MODULE_DEVICE_TABLE(pci, pci_eisa_pci_tbl); | ||
diff --git a/drivers/extcon/extcon-max77693.c b/drivers/extcon/extcon-max77693.c index b70e3815c459..8f3c947b0029 100644 --- a/drivers/extcon/extcon-max77693.c +++ b/drivers/extcon/extcon-max77693.c | |||
@@ -32,6 +32,38 @@ | |||
32 | #define DEV_NAME "max77693-muic" | 32 | #define DEV_NAME "max77693-muic" |
33 | #define DELAY_MS_DEFAULT 20000 /* unit: millisecond */ | 33 | #define DELAY_MS_DEFAULT 20000 /* unit: millisecond */ |
34 | 34 | ||
35 | /* | ||
36 | * Default value of MAX77693 register to bring up MUIC device. | ||
37 | * If user don't set some initial value for MUIC device through platform data, | ||
38 | * extcon-max77693 driver use 'default_init_data' to bring up base operation | ||
39 | * of MAX77693 MUIC device. | ||
40 | */ | ||
41 | struct max77693_reg_data default_init_data[] = { | ||
42 | { | ||
43 | /* STATUS2 - [3]ChgDetRun */ | ||
44 | .addr = MAX77693_MUIC_REG_STATUS2, | ||
45 | .data = STATUS2_CHGDETRUN_MASK, | ||
46 | }, { | ||
47 | /* INTMASK1 - Unmask [3]ADC1KM,[0]ADCM */ | ||
48 | .addr = MAX77693_MUIC_REG_INTMASK1, | ||
49 | .data = INTMASK1_ADC1K_MASK | ||
50 | | INTMASK1_ADC_MASK, | ||
51 | }, { | ||
52 | /* INTMASK2 - Unmask [0]ChgTypM */ | ||
53 | .addr = MAX77693_MUIC_REG_INTMASK2, | ||
54 | .data = INTMASK2_CHGTYP_MASK, | ||
55 | }, { | ||
56 | /* INTMASK3 - Mask all of interrupts */ | ||
57 | .addr = MAX77693_MUIC_REG_INTMASK3, | ||
58 | .data = 0x0, | ||
59 | }, { | ||
60 | /* CDETCTRL2 */ | ||
61 | .addr = MAX77693_MUIC_REG_CDETCTRL2, | ||
62 | .data = CDETCTRL2_VIDRMEN_MASK | ||
63 | | CDETCTRL2_DXOVPEN_MASK, | ||
64 | }, | ||
65 | }; | ||
66 | |||
35 | enum max77693_muic_adc_debounce_time { | 67 | enum max77693_muic_adc_debounce_time { |
36 | ADC_DEBOUNCE_TIME_5MS = 0, | 68 | ADC_DEBOUNCE_TIME_5MS = 0, |
37 | ADC_DEBOUNCE_TIME_10MS, | 69 | ADC_DEBOUNCE_TIME_10MS, |
@@ -1045,8 +1077,9 @@ static int max77693_muic_probe(struct platform_device *pdev) | |||
1045 | { | 1077 | { |
1046 | struct max77693_dev *max77693 = dev_get_drvdata(pdev->dev.parent); | 1078 | struct max77693_dev *max77693 = dev_get_drvdata(pdev->dev.parent); |
1047 | struct max77693_platform_data *pdata = dev_get_platdata(max77693->dev); | 1079 | struct max77693_platform_data *pdata = dev_get_platdata(max77693->dev); |
1048 | struct max77693_muic_platform_data *muic_pdata = pdata->muic_data; | ||
1049 | struct max77693_muic_info *info; | 1080 | struct max77693_muic_info *info; |
1081 | struct max77693_reg_data *init_data; | ||
1082 | int num_init_data; | ||
1050 | int delay_jiffies; | 1083 | int delay_jiffies; |
1051 | int ret; | 1084 | int ret; |
1052 | int i; | 1085 | int i; |
@@ -1145,15 +1178,25 @@ static int max77693_muic_probe(struct platform_device *pdev) | |||
1145 | goto err_irq; | 1178 | goto err_irq; |
1146 | } | 1179 | } |
1147 | 1180 | ||
1148 | /* Initialize MUIC register by using platform data */ | 1181 | |
1149 | for (i = 0 ; i < muic_pdata->num_init_data ; i++) { | 1182 | /* Initialize MUIC register by using platform data or default data */ |
1150 | enum max77693_irq_source irq_src = MAX77693_IRQ_GROUP_NR; | 1183 | if (pdata->muic_data) { |
1184 | init_data = pdata->muic_data->init_data; | ||
1185 | num_init_data = pdata->muic_data->num_init_data; | ||
1186 | } else { | ||
1187 | init_data = default_init_data; | ||
1188 | num_init_data = ARRAY_SIZE(default_init_data); | ||
1189 | } | ||
1190 | |||
1191 | for (i = 0 ; i < num_init_data ; i++) { | ||
1192 | enum max77693_irq_source irq_src | ||
1193 | = MAX77693_IRQ_GROUP_NR; | ||
1151 | 1194 | ||
1152 | max77693_write_reg(info->max77693->regmap_muic, | 1195 | max77693_write_reg(info->max77693->regmap_muic, |
1153 | muic_pdata->init_data[i].addr, | 1196 | init_data[i].addr, |
1154 | muic_pdata->init_data[i].data); | 1197 | init_data[i].data); |
1155 | 1198 | ||
1156 | switch (muic_pdata->init_data[i].addr) { | 1199 | switch (init_data[i].addr) { |
1157 | case MAX77693_MUIC_REG_INTMASK1: | 1200 | case MAX77693_MUIC_REG_INTMASK1: |
1158 | irq_src = MUIC_INT1; | 1201 | irq_src = MUIC_INT1; |
1159 | break; | 1202 | break; |
@@ -1167,22 +1210,40 @@ static int max77693_muic_probe(struct platform_device *pdev) | |||
1167 | 1210 | ||
1168 | if (irq_src < MAX77693_IRQ_GROUP_NR) | 1211 | if (irq_src < MAX77693_IRQ_GROUP_NR) |
1169 | info->max77693->irq_masks_cur[irq_src] | 1212 | info->max77693->irq_masks_cur[irq_src] |
1170 | = muic_pdata->init_data[i].data; | 1213 | = init_data[i].data; |
1171 | } | 1214 | } |
1172 | 1215 | ||
1173 | /* | 1216 | if (pdata->muic_data) { |
1174 | * Default usb/uart path whether UART/USB or AUX_UART/AUX_USB | 1217 | struct max77693_muic_platform_data *muic_pdata = pdata->muic_data; |
1175 | * h/w path of COMP2/COMN1 on CONTROL1 register. | ||
1176 | */ | ||
1177 | if (muic_pdata->path_uart) | ||
1178 | info->path_uart = muic_pdata->path_uart; | ||
1179 | else | ||
1180 | info->path_uart = CONTROL1_SW_UART; | ||
1181 | 1218 | ||
1182 | if (muic_pdata->path_usb) | 1219 | /* |
1183 | info->path_usb = muic_pdata->path_usb; | 1220 | * Default usb/uart path whether UART/USB or AUX_UART/AUX_USB |
1184 | else | 1221 | * h/w path of COMP2/COMN1 on CONTROL1 register. |
1222 | */ | ||
1223 | if (muic_pdata->path_uart) | ||
1224 | info->path_uart = muic_pdata->path_uart; | ||
1225 | else | ||
1226 | info->path_uart = CONTROL1_SW_UART; | ||
1227 | |||
1228 | if (muic_pdata->path_usb) | ||
1229 | info->path_usb = muic_pdata->path_usb; | ||
1230 | else | ||
1231 | info->path_usb = CONTROL1_SW_USB; | ||
1232 | |||
1233 | /* | ||
1234 | * Default delay time for detecting cable state | ||
1235 | * after certain time. | ||
1236 | */ | ||
1237 | if (muic_pdata->detcable_delay_ms) | ||
1238 | delay_jiffies = | ||
1239 | msecs_to_jiffies(muic_pdata->detcable_delay_ms); | ||
1240 | else | ||
1241 | delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT); | ||
1242 | } else { | ||
1185 | info->path_usb = CONTROL1_SW_USB; | 1243 | info->path_usb = CONTROL1_SW_USB; |
1244 | info->path_uart = CONTROL1_SW_UART; | ||
1245 | delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT); | ||
1246 | } | ||
1186 | 1247 | ||
1187 | /* Set initial path for UART */ | 1248 | /* Set initial path for UART */ |
1188 | max77693_muic_set_path(info, info->path_uart, true); | 1249 | max77693_muic_set_path(info, info->path_uart, true); |
@@ -1208,10 +1269,6 @@ static int max77693_muic_probe(struct platform_device *pdev) | |||
1208 | * driver should notify cable state to upper layer. | 1269 | * driver should notify cable state to upper layer. |
1209 | */ | 1270 | */ |
1210 | INIT_DELAYED_WORK(&info->wq_detcable, max77693_muic_detect_cable_wq); | 1271 | INIT_DELAYED_WORK(&info->wq_detcable, max77693_muic_detect_cable_wq); |
1211 | if (muic_pdata->detcable_delay_ms) | ||
1212 | delay_jiffies = msecs_to_jiffies(muic_pdata->detcable_delay_ms); | ||
1213 | else | ||
1214 | delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT); | ||
1215 | schedule_delayed_work(&info->wq_detcable, delay_jiffies); | 1272 | schedule_delayed_work(&info->wq_detcable, delay_jiffies); |
1216 | 1273 | ||
1217 | return ret; | 1274 | return ret; |
diff --git a/drivers/extcon/extcon-max8997.c b/drivers/extcon/extcon-max8997.c index e636d950ad6c..69641bcae325 100644 --- a/drivers/extcon/extcon-max8997.c +++ b/drivers/extcon/extcon-max8997.c | |||
@@ -712,29 +712,45 @@ static int max8997_muic_probe(struct platform_device *pdev) | |||
712 | goto err_irq; | 712 | goto err_irq; |
713 | } | 713 | } |
714 | 714 | ||
715 | /* Initialize registers according to platform data */ | ||
716 | if (pdata->muic_pdata) { | 715 | if (pdata->muic_pdata) { |
717 | struct max8997_muic_platform_data *mdata = info->muic_pdata; | 716 | struct max8997_muic_platform_data *muic_pdata |
718 | 717 | = pdata->muic_pdata; | |
719 | for (i = 0; i < mdata->num_init_data; i++) { | 718 | |
720 | max8997_write_reg(info->muic, mdata->init_data[i].addr, | 719 | /* Initialize registers according to platform data */ |
721 | mdata->init_data[i].data); | 720 | for (i = 0; i < muic_pdata->num_init_data; i++) { |
721 | max8997_write_reg(info->muic, | ||
722 | muic_pdata->init_data[i].addr, | ||
723 | muic_pdata->init_data[i].data); | ||
722 | } | 724 | } |
723 | } | ||
724 | 725 | ||
725 | /* | 726 | /* |
726 | * Default usb/uart path whether UART/USB or AUX_UART/AUX_USB | 727 | * Default usb/uart path whether UART/USB or AUX_UART/AUX_USB |
727 | * h/w path of COMP2/COMN1 on CONTROL1 register. | 728 | * h/w path of COMP2/COMN1 on CONTROL1 register. |
728 | */ | 729 | */ |
729 | if (pdata->muic_pdata->path_uart) | 730 | if (muic_pdata->path_uart) |
730 | info->path_uart = pdata->muic_pdata->path_uart; | 731 | info->path_uart = muic_pdata->path_uart; |
731 | else | 732 | else |
732 | info->path_uart = CONTROL1_SW_UART; | 733 | info->path_uart = CONTROL1_SW_UART; |
733 | 734 | ||
734 | if (pdata->muic_pdata->path_usb) | 735 | if (muic_pdata->path_usb) |
735 | info->path_usb = pdata->muic_pdata->path_usb; | 736 | info->path_usb = muic_pdata->path_usb; |
736 | else | 737 | else |
738 | info->path_usb = CONTROL1_SW_USB; | ||
739 | |||
740 | /* | ||
741 | * Default delay time for detecting cable state | ||
742 | * after certain time. | ||
743 | */ | ||
744 | if (muic_pdata->detcable_delay_ms) | ||
745 | delay_jiffies = | ||
746 | msecs_to_jiffies(muic_pdata->detcable_delay_ms); | ||
747 | else | ||
748 | delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT); | ||
749 | } else { | ||
750 | info->path_uart = CONTROL1_SW_UART; | ||
737 | info->path_usb = CONTROL1_SW_USB; | 751 | info->path_usb = CONTROL1_SW_USB; |
752 | delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT); | ||
753 | } | ||
738 | 754 | ||
739 | /* Set initial path for UART */ | 755 | /* Set initial path for UART */ |
740 | max8997_muic_set_path(info, info->path_uart, true); | 756 | max8997_muic_set_path(info, info->path_uart, true); |
@@ -751,10 +767,6 @@ static int max8997_muic_probe(struct platform_device *pdev) | |||
751 | * driver should notify cable state to upper layer. | 767 | * driver should notify cable state to upper layer. |
752 | */ | 768 | */ |
753 | INIT_DELAYED_WORK(&info->wq_detcable, max8997_muic_detect_cable_wq); | 769 | INIT_DELAYED_WORK(&info->wq_detcable, max8997_muic_detect_cable_wq); |
754 | if (pdata->muic_pdata->detcable_delay_ms) | ||
755 | delay_jiffies = msecs_to_jiffies(pdata->muic_pdata->detcable_delay_ms); | ||
756 | else | ||
757 | delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT); | ||
758 | schedule_delayed_work(&info->wq_detcable, delay_jiffies); | 770 | schedule_delayed_work(&info->wq_detcable, delay_jiffies); |
759 | 771 | ||
760 | return 0; | 772 | return 0; |
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig index 9b00072a020f..42c759a4d047 100644 --- a/drivers/firmware/Kconfig +++ b/drivers/firmware/Kconfig | |||
@@ -53,6 +53,24 @@ config EFI_VARS | |||
53 | Subsequent efibootmgr releases may be found at: | 53 | Subsequent efibootmgr releases may be found at: |
54 | <http://linux.dell.com/efibootmgr> | 54 | <http://linux.dell.com/efibootmgr> |
55 | 55 | ||
56 | config EFI_VARS_PSTORE | ||
57 | bool "Register efivars backend for pstore" | ||
58 | depends on EFI_VARS && PSTORE | ||
59 | default y | ||
60 | help | ||
61 | Say Y here to enable use efivars as a backend to pstore. This | ||
62 | will allow writing console messages, crash dumps, or anything | ||
63 | else supported by pstore to EFI variables. | ||
64 | |||
65 | config EFI_VARS_PSTORE_DEFAULT_DISABLE | ||
66 | bool "Disable using efivars as a pstore backend by default" | ||
67 | depends on EFI_VARS_PSTORE | ||
68 | default n | ||
69 | help | ||
70 | Saying Y here will disable the use of efivars as a storage | ||
71 | backend for pstore by default. This setting can be overridden | ||
72 | using the efivars module's pstore_disable parameter. | ||
73 | |||
56 | config EFI_PCDP | 74 | config EFI_PCDP |
57 | bool "Console device selection via EFI PCDP or HCDP table" | 75 | bool "Console device selection via EFI PCDP or HCDP table" |
58 | depends on ACPI && EFI && IA64 | 76 | depends on ACPI && EFI && IA64 |
diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c index fe62aa392239..7acafb80fd4c 100644 --- a/drivers/firmware/efivars.c +++ b/drivers/firmware/efivars.c | |||
@@ -103,6 +103,11 @@ MODULE_VERSION(EFIVARS_VERSION); | |||
103 | */ | 103 | */ |
104 | #define GUID_LEN 36 | 104 | #define GUID_LEN 36 |
105 | 105 | ||
106 | static bool efivars_pstore_disable = | ||
107 | IS_ENABLED(CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE); | ||
108 | |||
109 | module_param_named(pstore_disable, efivars_pstore_disable, bool, 0644); | ||
110 | |||
106 | /* | 111 | /* |
107 | * The maximum size of VariableName + Data = 1024 | 112 | * The maximum size of VariableName + Data = 1024 |
108 | * Therefore, it's reasonable to save that much | 113 | * Therefore, it's reasonable to save that much |
@@ -165,6 +170,7 @@ efivar_create_sysfs_entry(struct efivars *efivars, | |||
165 | 170 | ||
166 | static void efivar_update_sysfs_entries(struct work_struct *); | 171 | static void efivar_update_sysfs_entries(struct work_struct *); |
167 | static DECLARE_WORK(efivar_work, efivar_update_sysfs_entries); | 172 | static DECLARE_WORK(efivar_work, efivar_update_sysfs_entries); |
173 | static bool efivar_wq_enabled = true; | ||
168 | 174 | ||
169 | /* Return the number of unicode characters in data */ | 175 | /* Return the number of unicode characters in data */ |
170 | static unsigned long | 176 | static unsigned long |
@@ -1309,9 +1315,7 @@ static const struct inode_operations efivarfs_dir_inode_operations = { | |||
1309 | .create = efivarfs_create, | 1315 | .create = efivarfs_create, |
1310 | }; | 1316 | }; |
1311 | 1317 | ||
1312 | static struct pstore_info efi_pstore_info; | 1318 | #ifdef CONFIG_EFI_VARS_PSTORE |
1313 | |||
1314 | #ifdef CONFIG_PSTORE | ||
1315 | 1319 | ||
1316 | static int efi_pstore_open(struct pstore_info *psi) | 1320 | static int efi_pstore_open(struct pstore_info *psi) |
1317 | { | 1321 | { |
@@ -1441,7 +1445,7 @@ static int efi_pstore_write(enum pstore_type_id type, | |||
1441 | 1445 | ||
1442 | spin_unlock_irqrestore(&efivars->lock, flags); | 1446 | spin_unlock_irqrestore(&efivars->lock, flags); |
1443 | 1447 | ||
1444 | if (reason == KMSG_DUMP_OOPS) | 1448 | if (reason == KMSG_DUMP_OOPS && efivar_wq_enabled) |
1445 | schedule_work(&efivar_work); | 1449 | schedule_work(&efivar_work); |
1446 | 1450 | ||
1447 | *id = part; | 1451 | *id = part; |
@@ -1514,38 +1518,6 @@ static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count, | |||
1514 | 1518 | ||
1515 | return 0; | 1519 | return 0; |
1516 | } | 1520 | } |
1517 | #else | ||
1518 | static int efi_pstore_open(struct pstore_info *psi) | ||
1519 | { | ||
1520 | return 0; | ||
1521 | } | ||
1522 | |||
1523 | static int efi_pstore_close(struct pstore_info *psi) | ||
1524 | { | ||
1525 | return 0; | ||
1526 | } | ||
1527 | |||
1528 | static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type, int *count, | ||
1529 | struct timespec *timespec, | ||
1530 | char **buf, struct pstore_info *psi) | ||
1531 | { | ||
1532 | return -1; | ||
1533 | } | ||
1534 | |||
1535 | static int efi_pstore_write(enum pstore_type_id type, | ||
1536 | enum kmsg_dump_reason reason, u64 *id, | ||
1537 | unsigned int part, int count, size_t size, | ||
1538 | struct pstore_info *psi) | ||
1539 | { | ||
1540 | return 0; | ||
1541 | } | ||
1542 | |||
1543 | static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count, | ||
1544 | struct timespec time, struct pstore_info *psi) | ||
1545 | { | ||
1546 | return 0; | ||
1547 | } | ||
1548 | #endif | ||
1549 | 1521 | ||
1550 | static struct pstore_info efi_pstore_info = { | 1522 | static struct pstore_info efi_pstore_info = { |
1551 | .owner = THIS_MODULE, | 1523 | .owner = THIS_MODULE, |
@@ -1557,6 +1529,24 @@ static struct pstore_info efi_pstore_info = { | |||
1557 | .erase = efi_pstore_erase, | 1529 | .erase = efi_pstore_erase, |
1558 | }; | 1530 | }; |
1559 | 1531 | ||
1532 | static void efivar_pstore_register(struct efivars *efivars) | ||
1533 | { | ||
1534 | efivars->efi_pstore_info = efi_pstore_info; | ||
1535 | efivars->efi_pstore_info.buf = kmalloc(4096, GFP_KERNEL); | ||
1536 | if (efivars->efi_pstore_info.buf) { | ||
1537 | efivars->efi_pstore_info.bufsize = 1024; | ||
1538 | efivars->efi_pstore_info.data = efivars; | ||
1539 | spin_lock_init(&efivars->efi_pstore_info.buf_lock); | ||
1540 | pstore_register(&efivars->efi_pstore_info); | ||
1541 | } | ||
1542 | } | ||
1543 | #else | ||
1544 | static void efivar_pstore_register(struct efivars *efivars) | ||
1545 | { | ||
1546 | return; | ||
1547 | } | ||
1548 | #endif | ||
1549 | |||
1560 | static ssize_t efivar_create(struct file *filp, struct kobject *kobj, | 1550 | static ssize_t efivar_create(struct file *filp, struct kobject *kobj, |
1561 | struct bin_attribute *bin_attr, | 1551 | struct bin_attribute *bin_attr, |
1562 | char *buf, loff_t pos, size_t count) | 1552 | char *buf, loff_t pos, size_t count) |
@@ -1716,6 +1706,31 @@ static bool variable_is_present(efi_char16_t *variable_name, efi_guid_t *vendor) | |||
1716 | return found; | 1706 | return found; |
1717 | } | 1707 | } |
1718 | 1708 | ||
1709 | /* | ||
1710 | * Returns the size of variable_name, in bytes, including the | ||
1711 | * terminating NULL character, or variable_name_size if no NULL | ||
1712 | * character is found among the first variable_name_size bytes. | ||
1713 | */ | ||
1714 | static unsigned long var_name_strnsize(efi_char16_t *variable_name, | ||
1715 | unsigned long variable_name_size) | ||
1716 | { | ||
1717 | unsigned long len; | ||
1718 | efi_char16_t c; | ||
1719 | |||
1720 | /* | ||
1721 | * The variable name is, by definition, a NULL-terminated | ||
1722 | * string, so make absolutely sure that variable_name_size is | ||
1723 | * the value we expect it to be. If not, return the real size. | ||
1724 | */ | ||
1725 | for (len = 2; len <= variable_name_size; len += sizeof(c)) { | ||
1726 | c = variable_name[(len / sizeof(c)) - 1]; | ||
1727 | if (!c) | ||
1728 | break; | ||
1729 | } | ||
1730 | |||
1731 | return min(len, variable_name_size); | ||
1732 | } | ||
1733 | |||
1719 | static void efivar_update_sysfs_entries(struct work_struct *work) | 1734 | static void efivar_update_sysfs_entries(struct work_struct *work) |
1720 | { | 1735 | { |
1721 | struct efivars *efivars = &__efivars; | 1736 | struct efivars *efivars = &__efivars; |
@@ -1756,10 +1771,13 @@ static void efivar_update_sysfs_entries(struct work_struct *work) | |||
1756 | if (!found) { | 1771 | if (!found) { |
1757 | kfree(variable_name); | 1772 | kfree(variable_name); |
1758 | break; | 1773 | break; |
1759 | } else | 1774 | } else { |
1775 | variable_name_size = var_name_strnsize(variable_name, | ||
1776 | variable_name_size); | ||
1760 | efivar_create_sysfs_entry(efivars, | 1777 | efivar_create_sysfs_entry(efivars, |
1761 | variable_name_size, | 1778 | variable_name_size, |
1762 | variable_name, &vendor); | 1779 | variable_name, &vendor); |
1780 | } | ||
1763 | } | 1781 | } |
1764 | } | 1782 | } |
1765 | 1783 | ||
@@ -1958,6 +1976,35 @@ void unregister_efivars(struct efivars *efivars) | |||
1958 | } | 1976 | } |
1959 | EXPORT_SYMBOL_GPL(unregister_efivars); | 1977 | EXPORT_SYMBOL_GPL(unregister_efivars); |
1960 | 1978 | ||
1979 | /* | ||
1980 | * Print a warning when duplicate EFI variables are encountered and | ||
1981 | * disable the sysfs workqueue since the firmware is buggy. | ||
1982 | */ | ||
1983 | static void dup_variable_bug(efi_char16_t *s16, efi_guid_t *vendor_guid, | ||
1984 | unsigned long len16) | ||
1985 | { | ||
1986 | size_t i, len8 = len16 / sizeof(efi_char16_t); | ||
1987 | char *s8; | ||
1988 | |||
1989 | /* | ||
1990 | * Disable the workqueue since the algorithm it uses for | ||
1991 | * detecting new variables won't work with this buggy | ||
1992 | * implementation of GetNextVariableName(). | ||
1993 | */ | ||
1994 | efivar_wq_enabled = false; | ||
1995 | |||
1996 | s8 = kzalloc(len8, GFP_KERNEL); | ||
1997 | if (!s8) | ||
1998 | return; | ||
1999 | |||
2000 | for (i = 0; i < len8; i++) | ||
2001 | s8[i] = s16[i]; | ||
2002 | |||
2003 | printk(KERN_WARNING "efivars: duplicate variable: %s-%pUl\n", | ||
2004 | s8, vendor_guid); | ||
2005 | kfree(s8); | ||
2006 | } | ||
2007 | |||
1961 | int register_efivars(struct efivars *efivars, | 2008 | int register_efivars(struct efivars *efivars, |
1962 | const struct efivar_operations *ops, | 2009 | const struct efivar_operations *ops, |
1963 | struct kobject *parent_kobj) | 2010 | struct kobject *parent_kobj) |
@@ -2006,6 +2053,24 @@ int register_efivars(struct efivars *efivars, | |||
2006 | &vendor_guid); | 2053 | &vendor_guid); |
2007 | switch (status) { | 2054 | switch (status) { |
2008 | case EFI_SUCCESS: | 2055 | case EFI_SUCCESS: |
2056 | variable_name_size = var_name_strnsize(variable_name, | ||
2057 | variable_name_size); | ||
2058 | |||
2059 | /* | ||
2060 | * Some firmware implementations return the | ||
2061 | * same variable name on multiple calls to | ||
2062 | * get_next_variable(). Terminate the loop | ||
2063 | * immediately as there is no guarantee that | ||
2064 | * we'll ever see a different variable name, | ||
2065 | * and may end up looping here forever. | ||
2066 | */ | ||
2067 | if (variable_is_present(variable_name, &vendor_guid)) { | ||
2068 | dup_variable_bug(variable_name, &vendor_guid, | ||
2069 | variable_name_size); | ||
2070 | status = EFI_NOT_FOUND; | ||
2071 | break; | ||
2072 | } | ||
2073 | |||
2009 | efivar_create_sysfs_entry(efivars, | 2074 | efivar_create_sysfs_entry(efivars, |
2010 | variable_name_size, | 2075 | variable_name_size, |
2011 | variable_name, | 2076 | variable_name, |
@@ -2025,15 +2090,8 @@ int register_efivars(struct efivars *efivars, | |||
2025 | if (error) | 2090 | if (error) |
2026 | unregister_efivars(efivars); | 2091 | unregister_efivars(efivars); |
2027 | 2092 | ||
2028 | efivars->efi_pstore_info = efi_pstore_info; | 2093 | if (!efivars_pstore_disable) |
2029 | 2094 | efivar_pstore_register(efivars); | |
2030 | efivars->efi_pstore_info.buf = kmalloc(4096, GFP_KERNEL); | ||
2031 | if (efivars->efi_pstore_info.buf) { | ||
2032 | efivars->efi_pstore_info.bufsize = 1024; | ||
2033 | efivars->efi_pstore_info.data = efivars; | ||
2034 | spin_lock_init(&efivars->efi_pstore_info.buf_lock); | ||
2035 | pstore_register(&efivars->efi_pstore_info); | ||
2036 | } | ||
2037 | 2095 | ||
2038 | register_filesystem(&efivarfs_type); | 2096 | register_filesystem(&efivarfs_type); |
2039 | 2097 | ||
diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c index f9dbd503fc40..de3c317bd3e2 100644 --- a/drivers/gpio/gpio-ich.c +++ b/drivers/gpio/gpio-ich.c | |||
@@ -214,7 +214,7 @@ static int ichx_gpio_request(struct gpio_chip *chip, unsigned nr) | |||
214 | * If it can't be trusted, assume that the pin can be used as a GPIO. | 214 | * If it can't be trusted, assume that the pin can be used as a GPIO. |
215 | */ | 215 | */ |
216 | if (ichx_priv.desc->use_sel_ignore[nr / 32] & (1 << (nr & 0x1f))) | 216 | if (ichx_priv.desc->use_sel_ignore[nr / 32] & (1 << (nr & 0x1f))) |
217 | return 1; | 217 | return 0; |
218 | 218 | ||
219 | return ichx_read_bit(GPIO_USE_SEL, nr) ? 0 : -ENODEV; | 219 | return ichx_read_bit(GPIO_USE_SEL, nr) ? 0 : -ENODEV; |
220 | } | 220 | } |
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c index 24059462c87f..9391cf16e990 100644 --- a/drivers/gpio/gpio-pca953x.c +++ b/drivers/gpio/gpio-pca953x.c | |||
@@ -575,7 +575,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip, | |||
575 | chip->gpio_chip.ngpio, | 575 | chip->gpio_chip.ngpio, |
576 | irq_base, | 576 | irq_base, |
577 | &pca953x_irq_simple_ops, | 577 | &pca953x_irq_simple_ops, |
578 | NULL); | 578 | chip); |
579 | if (!chip->domain) | 579 | if (!chip->domain) |
580 | return -ENODEV; | 580 | return -ENODEV; |
581 | 581 | ||
diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c index 770476a9da87..3ce5bc38ac31 100644 --- a/drivers/gpio/gpio-stmpe.c +++ b/drivers/gpio/gpio-stmpe.c | |||
@@ -307,11 +307,15 @@ static const struct irq_domain_ops stmpe_gpio_irq_simple_ops = { | |||
307 | .xlate = irq_domain_xlate_twocell, | 307 | .xlate = irq_domain_xlate_twocell, |
308 | }; | 308 | }; |
309 | 309 | ||
310 | static int stmpe_gpio_irq_init(struct stmpe_gpio *stmpe_gpio) | 310 | static int stmpe_gpio_irq_init(struct stmpe_gpio *stmpe_gpio, |
311 | struct device_node *np) | ||
311 | { | 312 | { |
312 | int base = stmpe_gpio->irq_base; | 313 | int base = 0; |
313 | 314 | ||
314 | stmpe_gpio->domain = irq_domain_add_simple(NULL, | 315 | if (!np) |
316 | base = stmpe_gpio->irq_base; | ||
317 | |||
318 | stmpe_gpio->domain = irq_domain_add_simple(np, | ||
315 | stmpe_gpio->chip.ngpio, base, | 319 | stmpe_gpio->chip.ngpio, base, |
316 | &stmpe_gpio_irq_simple_ops, stmpe_gpio); | 320 | &stmpe_gpio_irq_simple_ops, stmpe_gpio); |
317 | if (!stmpe_gpio->domain) { | 321 | if (!stmpe_gpio->domain) { |
@@ -346,6 +350,9 @@ static int stmpe_gpio_probe(struct platform_device *pdev) | |||
346 | stmpe_gpio->chip = template_chip; | 350 | stmpe_gpio->chip = template_chip; |
347 | stmpe_gpio->chip.ngpio = stmpe->num_gpios; | 351 | stmpe_gpio->chip.ngpio = stmpe->num_gpios; |
348 | stmpe_gpio->chip.dev = &pdev->dev; | 352 | stmpe_gpio->chip.dev = &pdev->dev; |
353 | #ifdef CONFIG_OF | ||
354 | stmpe_gpio->chip.of_node = np; | ||
355 | #endif | ||
349 | stmpe_gpio->chip.base = pdata ? pdata->gpio_base : -1; | 356 | stmpe_gpio->chip.base = pdata ? pdata->gpio_base : -1; |
350 | 357 | ||
351 | if (pdata) | 358 | if (pdata) |
@@ -366,7 +373,7 @@ static int stmpe_gpio_probe(struct platform_device *pdev) | |||
366 | goto out_free; | 373 | goto out_free; |
367 | 374 | ||
368 | if (irq >= 0) { | 375 | if (irq >= 0) { |
369 | ret = stmpe_gpio_irq_init(stmpe_gpio); | 376 | ret = stmpe_gpio_irq_init(stmpe_gpio, np); |
370 | if (ret) | 377 | if (ret) |
371 | goto out_disable; | 378 | goto out_disable; |
372 | 379 | ||
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index a71a54a3e3f7..5150df6cba08 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c | |||
@@ -193,7 +193,7 @@ static void of_gpiochip_add_pin_range(struct gpio_chip *chip) | |||
193 | if (!np) | 193 | if (!np) |
194 | return; | 194 | return; |
195 | 195 | ||
196 | do { | 196 | for (;; index++) { |
197 | ret = of_parse_phandle_with_args(np, "gpio-ranges", | 197 | ret = of_parse_phandle_with_args(np, "gpio-ranges", |
198 | "#gpio-range-cells", index, &pinspec); | 198 | "#gpio-range-cells", index, &pinspec); |
199 | if (ret) | 199 | if (ret) |
@@ -222,8 +222,7 @@ static void of_gpiochip_add_pin_range(struct gpio_chip *chip) | |||
222 | 222 | ||
223 | if (ret) | 223 | if (ret) |
224 | break; | 224 | break; |
225 | 225 | } | |
226 | } while (index++); | ||
227 | } | 226 | } |
228 | 227 | ||
229 | #else | 228 | #else |
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 792c3e3795ca..dd64a06dc5b4 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c | |||
@@ -2326,7 +2326,6 @@ int drm_mode_addfb(struct drm_device *dev, | |||
2326 | fb = dev->mode_config.funcs->fb_create(dev, file_priv, &r); | 2326 | fb = dev->mode_config.funcs->fb_create(dev, file_priv, &r); |
2327 | if (IS_ERR(fb)) { | 2327 | if (IS_ERR(fb)) { |
2328 | DRM_DEBUG_KMS("could not create framebuffer\n"); | 2328 | DRM_DEBUG_KMS("could not create framebuffer\n"); |
2329 | drm_modeset_unlock_all(dev); | ||
2330 | return PTR_ERR(fb); | 2329 | return PTR_ERR(fb); |
2331 | } | 2330 | } |
2332 | 2331 | ||
@@ -2506,7 +2505,6 @@ int drm_mode_addfb2(struct drm_device *dev, | |||
2506 | fb = dev->mode_config.funcs->fb_create(dev, file_priv, r); | 2505 | fb = dev->mode_config.funcs->fb_create(dev, file_priv, r); |
2507 | if (IS_ERR(fb)) { | 2506 | if (IS_ERR(fb)) { |
2508 | DRM_DEBUG_KMS("could not create framebuffer\n"); | 2507 | DRM_DEBUG_KMS("could not create framebuffer\n"); |
2509 | drm_modeset_unlock_all(dev); | ||
2510 | return PTR_ERR(fb); | 2508 | return PTR_ERR(fb); |
2511 | } | 2509 | } |
2512 | 2510 | ||
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index c194f4e680ad..e2acfdbf7d3c 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c | |||
@@ -1634,7 +1634,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev, | |||
1634 | unsigned vblank = (pt->vactive_vblank_hi & 0xf) << 8 | pt->vblank_lo; | 1634 | unsigned vblank = (pt->vactive_vblank_hi & 0xf) << 8 | pt->vblank_lo; |
1635 | unsigned hsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc0) << 2 | pt->hsync_offset_lo; | 1635 | unsigned hsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc0) << 2 | pt->hsync_offset_lo; |
1636 | unsigned hsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x30) << 4 | pt->hsync_pulse_width_lo; | 1636 | unsigned hsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x30) << 4 | pt->hsync_pulse_width_lo; |
1637 | unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) >> 2 | pt->vsync_offset_pulse_width_lo >> 4; | 1637 | unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) << 2 | pt->vsync_offset_pulse_width_lo >> 4; |
1638 | unsigned vsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x3) << 4 | (pt->vsync_offset_pulse_width_lo & 0xf); | 1638 | unsigned vsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x3) << 4 | (pt->vsync_offset_pulse_width_lo & 0xf); |
1639 | 1639 | ||
1640 | /* ignore tiny modes */ | 1640 | /* ignore tiny modes */ |
@@ -1715,6 +1715,7 @@ set_size: | |||
1715 | } | 1715 | } |
1716 | 1716 | ||
1717 | mode->type = DRM_MODE_TYPE_DRIVER; | 1717 | mode->type = DRM_MODE_TYPE_DRIVER; |
1718 | mode->vrefresh = drm_mode_vrefresh(mode); | ||
1718 | drm_mode_set_name(mode); | 1719 | drm_mode_set_name(mode); |
1719 | 1720 | ||
1720 | return mode; | 1721 | return mode; |
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index 13fdcd10a605..429e07d0b0f1 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c | |||
@@ -123,6 +123,7 @@ int drm_open(struct inode *inode, struct file *filp) | |||
123 | int retcode = 0; | 123 | int retcode = 0; |
124 | int need_setup = 0; | 124 | int need_setup = 0; |
125 | struct address_space *old_mapping; | 125 | struct address_space *old_mapping; |
126 | struct address_space *old_imapping; | ||
126 | 127 | ||
127 | minor = idr_find(&drm_minors_idr, minor_id); | 128 | minor = idr_find(&drm_minors_idr, minor_id); |
128 | if (!minor) | 129 | if (!minor) |
@@ -137,6 +138,7 @@ int drm_open(struct inode *inode, struct file *filp) | |||
137 | if (!dev->open_count++) | 138 | if (!dev->open_count++) |
138 | need_setup = 1; | 139 | need_setup = 1; |
139 | mutex_lock(&dev->struct_mutex); | 140 | mutex_lock(&dev->struct_mutex); |
141 | old_imapping = inode->i_mapping; | ||
140 | old_mapping = dev->dev_mapping; | 142 | old_mapping = dev->dev_mapping; |
141 | if (old_mapping == NULL) | 143 | if (old_mapping == NULL) |
142 | dev->dev_mapping = &inode->i_data; | 144 | dev->dev_mapping = &inode->i_data; |
@@ -159,8 +161,8 @@ int drm_open(struct inode *inode, struct file *filp) | |||
159 | 161 | ||
160 | err_undo: | 162 | err_undo: |
161 | mutex_lock(&dev->struct_mutex); | 163 | mutex_lock(&dev->struct_mutex); |
162 | filp->f_mapping = old_mapping; | 164 | filp->f_mapping = old_imapping; |
163 | inode->i_mapping = old_mapping; | 165 | inode->i_mapping = old_imapping; |
164 | iput(container_of(dev->dev_mapping, struct inode, i_data)); | 166 | iput(container_of(dev->dev_mapping, struct inode, i_data)); |
165 | dev->dev_mapping = old_mapping; | 167 | dev->dev_mapping = old_mapping; |
166 | mutex_unlock(&dev->struct_mutex); | 168 | mutex_unlock(&dev->struct_mutex); |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index 36493ce71f9a..98cc14725ba9 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c | |||
@@ -38,11 +38,12 @@ | |||
38 | /* position control register for hardware window 0, 2 ~ 4.*/ | 38 | /* position control register for hardware window 0, 2 ~ 4.*/ |
39 | #define VIDOSD_A(win) (VIDOSD_BASE + 0x00 + (win) * 16) | 39 | #define VIDOSD_A(win) (VIDOSD_BASE + 0x00 + (win) * 16) |
40 | #define VIDOSD_B(win) (VIDOSD_BASE + 0x04 + (win) * 16) | 40 | #define VIDOSD_B(win) (VIDOSD_BASE + 0x04 + (win) * 16) |
41 | /* size control register for hardware window 0. */ | 41 | /* |
42 | #define VIDOSD_C_SIZE_W0 (VIDOSD_BASE + 0x08) | 42 | * size control register for hardware windows 0 and alpha control register |
43 | /* alpha control register for hardware window 1 ~ 4. */ | 43 | * for hardware windows 1 ~ 4 |
44 | #define VIDOSD_C(win) (VIDOSD_BASE + 0x18 + (win) * 16) | 44 | */ |
45 | /* size control register for hardware window 1 ~ 4. */ | 45 | #define VIDOSD_C(win) (VIDOSD_BASE + 0x08 + (win) * 16) |
46 | /* size control register for hardware windows 1 ~ 2. */ | ||
46 | #define VIDOSD_D(win) (VIDOSD_BASE + 0x0C + (win) * 16) | 47 | #define VIDOSD_D(win) (VIDOSD_BASE + 0x0C + (win) * 16) |
47 | 48 | ||
48 | #define VIDWx_BUF_START(win, buf) (VIDW_BUF_START(buf) + (win) * 8) | 49 | #define VIDWx_BUF_START(win, buf) (VIDW_BUF_START(buf) + (win) * 8) |
@@ -50,9 +51,9 @@ | |||
50 | #define VIDWx_BUF_SIZE(win, buf) (VIDW_BUF_SIZE(buf) + (win) * 4) | 51 | #define VIDWx_BUF_SIZE(win, buf) (VIDW_BUF_SIZE(buf) + (win) * 4) |
51 | 52 | ||
52 | /* color key control register for hardware window 1 ~ 4. */ | 53 | /* color key control register for hardware window 1 ~ 4. */ |
53 | #define WKEYCON0_BASE(x) ((WKEYCON0 + 0x140) + (x * 8)) | 54 | #define WKEYCON0_BASE(x) ((WKEYCON0 + 0x140) + ((x - 1) * 8)) |
54 | /* color key value register for hardware window 1 ~ 4. */ | 55 | /* color key value register for hardware window 1 ~ 4. */ |
55 | #define WKEYCON1_BASE(x) ((WKEYCON1 + 0x140) + (x * 8)) | 56 | #define WKEYCON1_BASE(x) ((WKEYCON1 + 0x140) + ((x - 1) * 8)) |
56 | 57 | ||
57 | /* FIMD has totally five hardware windows. */ | 58 | /* FIMD has totally five hardware windows. */ |
58 | #define WINDOWS_NR 5 | 59 | #define WINDOWS_NR 5 |
@@ -109,9 +110,9 @@ struct fimd_context { | |||
109 | 110 | ||
110 | #ifdef CONFIG_OF | 111 | #ifdef CONFIG_OF |
111 | static const struct of_device_id fimd_driver_dt_match[] = { | 112 | static const struct of_device_id fimd_driver_dt_match[] = { |
112 | { .compatible = "samsung,exynos4-fimd", | 113 | { .compatible = "samsung,exynos4210-fimd", |
113 | .data = &exynos4_fimd_driver_data }, | 114 | .data = &exynos4_fimd_driver_data }, |
114 | { .compatible = "samsung,exynos5-fimd", | 115 | { .compatible = "samsung,exynos5250-fimd", |
115 | .data = &exynos5_fimd_driver_data }, | 116 | .data = &exynos5_fimd_driver_data }, |
116 | {}, | 117 | {}, |
117 | }; | 118 | }; |
@@ -581,7 +582,7 @@ static void fimd_win_commit(struct device *dev, int zpos) | |||
581 | if (win != 3 && win != 4) { | 582 | if (win != 3 && win != 4) { |
582 | u32 offset = VIDOSD_D(win); | 583 | u32 offset = VIDOSD_D(win); |
583 | if (win == 0) | 584 | if (win == 0) |
584 | offset = VIDOSD_C_SIZE_W0; | 585 | offset = VIDOSD_C(win); |
585 | val = win_data->ovl_width * win_data->ovl_height; | 586 | val = win_data->ovl_width * win_data->ovl_height; |
586 | writel(val, ctx->regs + offset); | 587 | writel(val, ctx->regs + offset); |
587 | 588 | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index 3b0da0378acf..47a493c8a71f 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c | |||
@@ -48,8 +48,14 @@ | |||
48 | 48 | ||
49 | /* registers for base address */ | 49 | /* registers for base address */ |
50 | #define G2D_SRC_BASE_ADDR 0x0304 | 50 | #define G2D_SRC_BASE_ADDR 0x0304 |
51 | #define G2D_SRC_COLOR_MODE 0x030C | ||
52 | #define G2D_SRC_LEFT_TOP 0x0310 | ||
53 | #define G2D_SRC_RIGHT_BOTTOM 0x0314 | ||
51 | #define G2D_SRC_PLANE2_BASE_ADDR 0x0318 | 54 | #define G2D_SRC_PLANE2_BASE_ADDR 0x0318 |
52 | #define G2D_DST_BASE_ADDR 0x0404 | 55 | #define G2D_DST_BASE_ADDR 0x0404 |
56 | #define G2D_DST_COLOR_MODE 0x040C | ||
57 | #define G2D_DST_LEFT_TOP 0x0410 | ||
58 | #define G2D_DST_RIGHT_BOTTOM 0x0414 | ||
53 | #define G2D_DST_PLANE2_BASE_ADDR 0x0418 | 59 | #define G2D_DST_PLANE2_BASE_ADDR 0x0418 |
54 | #define G2D_PAT_BASE_ADDR 0x0500 | 60 | #define G2D_PAT_BASE_ADDR 0x0500 |
55 | #define G2D_MSK_BASE_ADDR 0x0520 | 61 | #define G2D_MSK_BASE_ADDR 0x0520 |
@@ -82,7 +88,7 @@ | |||
82 | #define G2D_DMA_LIST_DONE_COUNT_OFFSET 17 | 88 | #define G2D_DMA_LIST_DONE_COUNT_OFFSET 17 |
83 | 89 | ||
84 | /* G2D_DMA_HOLD_CMD */ | 90 | /* G2D_DMA_HOLD_CMD */ |
85 | #define G2D_USET_HOLD (1 << 2) | 91 | #define G2D_USER_HOLD (1 << 2) |
86 | #define G2D_LIST_HOLD (1 << 1) | 92 | #define G2D_LIST_HOLD (1 << 1) |
87 | #define G2D_BITBLT_HOLD (1 << 0) | 93 | #define G2D_BITBLT_HOLD (1 << 0) |
88 | 94 | ||
@@ -91,13 +97,27 @@ | |||
91 | #define G2D_START_NHOLT (1 << 1) | 97 | #define G2D_START_NHOLT (1 << 1) |
92 | #define G2D_START_BITBLT (1 << 0) | 98 | #define G2D_START_BITBLT (1 << 0) |
93 | 99 | ||
100 | /* buffer color format */ | ||
101 | #define G2D_FMT_XRGB8888 0 | ||
102 | #define G2D_FMT_ARGB8888 1 | ||
103 | #define G2D_FMT_RGB565 2 | ||
104 | #define G2D_FMT_XRGB1555 3 | ||
105 | #define G2D_FMT_ARGB1555 4 | ||
106 | #define G2D_FMT_XRGB4444 5 | ||
107 | #define G2D_FMT_ARGB4444 6 | ||
108 | #define G2D_FMT_PACKED_RGB888 7 | ||
109 | #define G2D_FMT_A8 11 | ||
110 | #define G2D_FMT_L8 12 | ||
111 | |||
112 | /* buffer valid length */ | ||
113 | #define G2D_LEN_MIN 1 | ||
114 | #define G2D_LEN_MAX 8000 | ||
115 | |||
94 | #define G2D_CMDLIST_SIZE (PAGE_SIZE / 4) | 116 | #define G2D_CMDLIST_SIZE (PAGE_SIZE / 4) |
95 | #define G2D_CMDLIST_NUM 64 | 117 | #define G2D_CMDLIST_NUM 64 |
96 | #define G2D_CMDLIST_POOL_SIZE (G2D_CMDLIST_SIZE * G2D_CMDLIST_NUM) | 118 | #define G2D_CMDLIST_POOL_SIZE (G2D_CMDLIST_SIZE * G2D_CMDLIST_NUM) |
97 | #define G2D_CMDLIST_DATA_NUM (G2D_CMDLIST_SIZE / sizeof(u32) - 2) | 119 | #define G2D_CMDLIST_DATA_NUM (G2D_CMDLIST_SIZE / sizeof(u32) - 2) |
98 | 120 | ||
99 | #define MAX_BUF_ADDR_NR 6 | ||
100 | |||
101 | /* maximum buffer pool size of userptr is 64MB as default */ | 121 | /* maximum buffer pool size of userptr is 64MB as default */ |
102 | #define MAX_POOL (64 * 1024 * 1024) | 122 | #define MAX_POOL (64 * 1024 * 1024) |
103 | 123 | ||
@@ -106,6 +126,17 @@ enum { | |||
106 | BUF_TYPE_USERPTR, | 126 | BUF_TYPE_USERPTR, |
107 | }; | 127 | }; |
108 | 128 | ||
129 | enum g2d_reg_type { | ||
130 | REG_TYPE_NONE = -1, | ||
131 | REG_TYPE_SRC, | ||
132 | REG_TYPE_SRC_PLANE2, | ||
133 | REG_TYPE_DST, | ||
134 | REG_TYPE_DST_PLANE2, | ||
135 | REG_TYPE_PAT, | ||
136 | REG_TYPE_MSK, | ||
137 | MAX_REG_TYPE_NR | ||
138 | }; | ||
139 | |||
109 | /* cmdlist data structure */ | 140 | /* cmdlist data structure */ |
110 | struct g2d_cmdlist { | 141 | struct g2d_cmdlist { |
111 | u32 head; | 142 | u32 head; |
@@ -113,6 +144,42 @@ struct g2d_cmdlist { | |||
113 | u32 last; /* last data offset */ | 144 | u32 last; /* last data offset */ |
114 | }; | 145 | }; |
115 | 146 | ||
147 | /* | ||
148 | * A structure of buffer description | ||
149 | * | ||
150 | * @format: color format | ||
151 | * @left_x: the x coordinates of left top corner | ||
152 | * @top_y: the y coordinates of left top corner | ||
153 | * @right_x: the x coordinates of right bottom corner | ||
154 | * @bottom_y: the y coordinates of right bottom corner | ||
155 | * | ||
156 | */ | ||
157 | struct g2d_buf_desc { | ||
158 | unsigned int format; | ||
159 | unsigned int left_x; | ||
160 | unsigned int top_y; | ||
161 | unsigned int right_x; | ||
162 | unsigned int bottom_y; | ||
163 | }; | ||
164 | |||
165 | /* | ||
166 | * A structure of buffer information | ||
167 | * | ||
168 | * @map_nr: manages the number of mapped buffers | ||
169 | * @reg_types: stores regitster type in the order of requested command | ||
170 | * @handles: stores buffer handle in its reg_type position | ||
171 | * @types: stores buffer type in its reg_type position | ||
172 | * @descs: stores buffer description in its reg_type position | ||
173 | * | ||
174 | */ | ||
175 | struct g2d_buf_info { | ||
176 | unsigned int map_nr; | ||
177 | enum g2d_reg_type reg_types[MAX_REG_TYPE_NR]; | ||
178 | unsigned long handles[MAX_REG_TYPE_NR]; | ||
179 | unsigned int types[MAX_REG_TYPE_NR]; | ||
180 | struct g2d_buf_desc descs[MAX_REG_TYPE_NR]; | ||
181 | }; | ||
182 | |||
116 | struct drm_exynos_pending_g2d_event { | 183 | struct drm_exynos_pending_g2d_event { |
117 | struct drm_pending_event base; | 184 | struct drm_pending_event base; |
118 | struct drm_exynos_g2d_event event; | 185 | struct drm_exynos_g2d_event event; |
@@ -131,14 +198,11 @@ struct g2d_cmdlist_userptr { | |||
131 | bool in_pool; | 198 | bool in_pool; |
132 | bool out_of_list; | 199 | bool out_of_list; |
133 | }; | 200 | }; |
134 | |||
135 | struct g2d_cmdlist_node { | 201 | struct g2d_cmdlist_node { |
136 | struct list_head list; | 202 | struct list_head list; |
137 | struct g2d_cmdlist *cmdlist; | 203 | struct g2d_cmdlist *cmdlist; |
138 | unsigned int map_nr; | ||
139 | unsigned long handles[MAX_BUF_ADDR_NR]; | ||
140 | unsigned int obj_type[MAX_BUF_ADDR_NR]; | ||
141 | dma_addr_t dma_addr; | 204 | dma_addr_t dma_addr; |
205 | struct g2d_buf_info buf_info; | ||
142 | 206 | ||
143 | struct drm_exynos_pending_g2d_event *event; | 207 | struct drm_exynos_pending_g2d_event *event; |
144 | }; | 208 | }; |
@@ -188,6 +252,7 @@ static int g2d_init_cmdlist(struct g2d_data *g2d) | |||
188 | struct exynos_drm_subdrv *subdrv = &g2d->subdrv; | 252 | struct exynos_drm_subdrv *subdrv = &g2d->subdrv; |
189 | int nr; | 253 | int nr; |
190 | int ret; | 254 | int ret; |
255 | struct g2d_buf_info *buf_info; | ||
191 | 256 | ||
192 | init_dma_attrs(&g2d->cmdlist_dma_attrs); | 257 | init_dma_attrs(&g2d->cmdlist_dma_attrs); |
193 | dma_set_attr(DMA_ATTR_WRITE_COMBINE, &g2d->cmdlist_dma_attrs); | 258 | dma_set_attr(DMA_ATTR_WRITE_COMBINE, &g2d->cmdlist_dma_attrs); |
@@ -209,11 +274,17 @@ static int g2d_init_cmdlist(struct g2d_data *g2d) | |||
209 | } | 274 | } |
210 | 275 | ||
211 | for (nr = 0; nr < G2D_CMDLIST_NUM; nr++) { | 276 | for (nr = 0; nr < G2D_CMDLIST_NUM; nr++) { |
277 | unsigned int i; | ||
278 | |||
212 | node[nr].cmdlist = | 279 | node[nr].cmdlist = |
213 | g2d->cmdlist_pool_virt + nr * G2D_CMDLIST_SIZE; | 280 | g2d->cmdlist_pool_virt + nr * G2D_CMDLIST_SIZE; |
214 | node[nr].dma_addr = | 281 | node[nr].dma_addr = |
215 | g2d->cmdlist_pool + nr * G2D_CMDLIST_SIZE; | 282 | g2d->cmdlist_pool + nr * G2D_CMDLIST_SIZE; |
216 | 283 | ||
284 | buf_info = &node[nr].buf_info; | ||
285 | for (i = 0; i < MAX_REG_TYPE_NR; i++) | ||
286 | buf_info->reg_types[i] = REG_TYPE_NONE; | ||
287 | |||
217 | list_add_tail(&node[nr].list, &g2d->free_cmdlist); | 288 | list_add_tail(&node[nr].list, &g2d->free_cmdlist); |
218 | } | 289 | } |
219 | 290 | ||
@@ -450,7 +521,7 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev, | |||
450 | DMA_BIDIRECTIONAL); | 521 | DMA_BIDIRECTIONAL); |
451 | if (ret < 0) { | 522 | if (ret < 0) { |
452 | DRM_ERROR("failed to map sgt with dma region.\n"); | 523 | DRM_ERROR("failed to map sgt with dma region.\n"); |
453 | goto err_free_sgt; | 524 | goto err_sg_free_table; |
454 | } | 525 | } |
455 | 526 | ||
456 | g2d_userptr->dma_addr = sgt->sgl[0].dma_address; | 527 | g2d_userptr->dma_addr = sgt->sgl[0].dma_address; |
@@ -467,8 +538,10 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev, | |||
467 | 538 | ||
468 | return &g2d_userptr->dma_addr; | 539 | return &g2d_userptr->dma_addr; |
469 | 540 | ||
470 | err_free_sgt: | 541 | err_sg_free_table: |
471 | sg_free_table(sgt); | 542 | sg_free_table(sgt); |
543 | |||
544 | err_free_sgt: | ||
472 | kfree(sgt); | 545 | kfree(sgt); |
473 | sgt = NULL; | 546 | sgt = NULL; |
474 | 547 | ||
@@ -506,36 +579,172 @@ static void g2d_userptr_free_all(struct drm_device *drm_dev, | |||
506 | g2d->current_pool = 0; | 579 | g2d->current_pool = 0; |
507 | } | 580 | } |
508 | 581 | ||
582 | static enum g2d_reg_type g2d_get_reg_type(int reg_offset) | ||
583 | { | ||
584 | enum g2d_reg_type reg_type; | ||
585 | |||
586 | switch (reg_offset) { | ||
587 | case G2D_SRC_BASE_ADDR: | ||
588 | case G2D_SRC_COLOR_MODE: | ||
589 | case G2D_SRC_LEFT_TOP: | ||
590 | case G2D_SRC_RIGHT_BOTTOM: | ||
591 | reg_type = REG_TYPE_SRC; | ||
592 | break; | ||
593 | case G2D_SRC_PLANE2_BASE_ADDR: | ||
594 | reg_type = REG_TYPE_SRC_PLANE2; | ||
595 | break; | ||
596 | case G2D_DST_BASE_ADDR: | ||
597 | case G2D_DST_COLOR_MODE: | ||
598 | case G2D_DST_LEFT_TOP: | ||
599 | case G2D_DST_RIGHT_BOTTOM: | ||
600 | reg_type = REG_TYPE_DST; | ||
601 | break; | ||
602 | case G2D_DST_PLANE2_BASE_ADDR: | ||
603 | reg_type = REG_TYPE_DST_PLANE2; | ||
604 | break; | ||
605 | case G2D_PAT_BASE_ADDR: | ||
606 | reg_type = REG_TYPE_PAT; | ||
607 | break; | ||
608 | case G2D_MSK_BASE_ADDR: | ||
609 | reg_type = REG_TYPE_MSK; | ||
610 | break; | ||
611 | default: | ||
612 | reg_type = REG_TYPE_NONE; | ||
613 | DRM_ERROR("Unknown register offset![%d]\n", reg_offset); | ||
614 | break; | ||
615 | }; | ||
616 | |||
617 | return reg_type; | ||
618 | } | ||
619 | |||
620 | static unsigned long g2d_get_buf_bpp(unsigned int format) | ||
621 | { | ||
622 | unsigned long bpp; | ||
623 | |||
624 | switch (format) { | ||
625 | case G2D_FMT_XRGB8888: | ||
626 | case G2D_FMT_ARGB8888: | ||
627 | bpp = 4; | ||
628 | break; | ||
629 | case G2D_FMT_RGB565: | ||
630 | case G2D_FMT_XRGB1555: | ||
631 | case G2D_FMT_ARGB1555: | ||
632 | case G2D_FMT_XRGB4444: | ||
633 | case G2D_FMT_ARGB4444: | ||
634 | bpp = 2; | ||
635 | break; | ||
636 | case G2D_FMT_PACKED_RGB888: | ||
637 | bpp = 3; | ||
638 | break; | ||
639 | default: | ||
640 | bpp = 1; | ||
641 | break; | ||
642 | } | ||
643 | |||
644 | return bpp; | ||
645 | } | ||
646 | |||
647 | static bool g2d_check_buf_desc_is_valid(struct g2d_buf_desc *buf_desc, | ||
648 | enum g2d_reg_type reg_type, | ||
649 | unsigned long size) | ||
650 | { | ||
651 | unsigned int width, height; | ||
652 | unsigned long area; | ||
653 | |||
654 | /* | ||
655 | * check source and destination buffers only. | ||
656 | * so the others are always valid. | ||
657 | */ | ||
658 | if (reg_type != REG_TYPE_SRC && reg_type != REG_TYPE_DST) | ||
659 | return true; | ||
660 | |||
661 | width = buf_desc->right_x - buf_desc->left_x; | ||
662 | if (width < G2D_LEN_MIN || width > G2D_LEN_MAX) { | ||
663 | DRM_ERROR("width[%u] is out of range!\n", width); | ||
664 | return false; | ||
665 | } | ||
666 | |||
667 | height = buf_desc->bottom_y - buf_desc->top_y; | ||
668 | if (height < G2D_LEN_MIN || height > G2D_LEN_MAX) { | ||
669 | DRM_ERROR("height[%u] is out of range!\n", height); | ||
670 | return false; | ||
671 | } | ||
672 | |||
673 | area = (unsigned long)width * (unsigned long)height * | ||
674 | g2d_get_buf_bpp(buf_desc->format); | ||
675 | if (area > size) { | ||
676 | DRM_ERROR("area[%lu] is out of range[%lu]!\n", area, size); | ||
677 | return false; | ||
678 | } | ||
679 | |||
680 | return true; | ||
681 | } | ||
682 | |||
509 | static int g2d_map_cmdlist_gem(struct g2d_data *g2d, | 683 | static int g2d_map_cmdlist_gem(struct g2d_data *g2d, |
510 | struct g2d_cmdlist_node *node, | 684 | struct g2d_cmdlist_node *node, |
511 | struct drm_device *drm_dev, | 685 | struct drm_device *drm_dev, |
512 | struct drm_file *file) | 686 | struct drm_file *file) |
513 | { | 687 | { |
514 | struct g2d_cmdlist *cmdlist = node->cmdlist; | 688 | struct g2d_cmdlist *cmdlist = node->cmdlist; |
689 | struct g2d_buf_info *buf_info = &node->buf_info; | ||
515 | int offset; | 690 | int offset; |
691 | int ret; | ||
516 | int i; | 692 | int i; |
517 | 693 | ||
518 | for (i = 0; i < node->map_nr; i++) { | 694 | for (i = 0; i < buf_info->map_nr; i++) { |
695 | struct g2d_buf_desc *buf_desc; | ||
696 | enum g2d_reg_type reg_type; | ||
697 | int reg_pos; | ||
519 | unsigned long handle; | 698 | unsigned long handle; |
520 | dma_addr_t *addr; | 699 | dma_addr_t *addr; |
521 | 700 | ||
522 | offset = cmdlist->last - (i * 2 + 1); | 701 | reg_pos = cmdlist->last - 2 * (i + 1); |
523 | handle = cmdlist->data[offset]; | 702 | |
703 | offset = cmdlist->data[reg_pos]; | ||
704 | handle = cmdlist->data[reg_pos + 1]; | ||
705 | |||
706 | reg_type = g2d_get_reg_type(offset); | ||
707 | if (reg_type == REG_TYPE_NONE) { | ||
708 | ret = -EFAULT; | ||
709 | goto err; | ||
710 | } | ||
711 | |||
712 | buf_desc = &buf_info->descs[reg_type]; | ||
713 | |||
714 | if (buf_info->types[reg_type] == BUF_TYPE_GEM) { | ||
715 | unsigned long size; | ||
716 | |||
717 | size = exynos_drm_gem_get_size(drm_dev, handle, file); | ||
718 | if (!size) { | ||
719 | ret = -EFAULT; | ||
720 | goto err; | ||
721 | } | ||
722 | |||
723 | if (!g2d_check_buf_desc_is_valid(buf_desc, reg_type, | ||
724 | size)) { | ||
725 | ret = -EFAULT; | ||
726 | goto err; | ||
727 | } | ||
524 | 728 | ||
525 | if (node->obj_type[i] == BUF_TYPE_GEM) { | ||
526 | addr = exynos_drm_gem_get_dma_addr(drm_dev, handle, | 729 | addr = exynos_drm_gem_get_dma_addr(drm_dev, handle, |
527 | file); | 730 | file); |
528 | if (IS_ERR(addr)) { | 731 | if (IS_ERR(addr)) { |
529 | node->map_nr = i; | 732 | ret = -EFAULT; |
530 | return -EFAULT; | 733 | goto err; |
531 | } | 734 | } |
532 | } else { | 735 | } else { |
533 | struct drm_exynos_g2d_userptr g2d_userptr; | 736 | struct drm_exynos_g2d_userptr g2d_userptr; |
534 | 737 | ||
535 | if (copy_from_user(&g2d_userptr, (void __user *)handle, | 738 | if (copy_from_user(&g2d_userptr, (void __user *)handle, |
536 | sizeof(struct drm_exynos_g2d_userptr))) { | 739 | sizeof(struct drm_exynos_g2d_userptr))) { |
537 | node->map_nr = i; | 740 | ret = -EFAULT; |
538 | return -EFAULT; | 741 | goto err; |
742 | } | ||
743 | |||
744 | if (!g2d_check_buf_desc_is_valid(buf_desc, reg_type, | ||
745 | g2d_userptr.size)) { | ||
746 | ret = -EFAULT; | ||
747 | goto err; | ||
539 | } | 748 | } |
540 | 749 | ||
541 | addr = g2d_userptr_get_dma_addr(drm_dev, | 750 | addr = g2d_userptr_get_dma_addr(drm_dev, |
@@ -544,16 +753,21 @@ static int g2d_map_cmdlist_gem(struct g2d_data *g2d, | |||
544 | file, | 753 | file, |
545 | &handle); | 754 | &handle); |
546 | if (IS_ERR(addr)) { | 755 | if (IS_ERR(addr)) { |
547 | node->map_nr = i; | 756 | ret = -EFAULT; |
548 | return -EFAULT; | 757 | goto err; |
549 | } | 758 | } |
550 | } | 759 | } |
551 | 760 | ||
552 | cmdlist->data[offset] = *addr; | 761 | cmdlist->data[reg_pos + 1] = *addr; |
553 | node->handles[i] = handle; | 762 | buf_info->reg_types[i] = reg_type; |
763 | buf_info->handles[reg_type] = handle; | ||
554 | } | 764 | } |
555 | 765 | ||
556 | return 0; | 766 | return 0; |
767 | |||
768 | err: | ||
769 | buf_info->map_nr = i; | ||
770 | return ret; | ||
557 | } | 771 | } |
558 | 772 | ||
559 | static void g2d_unmap_cmdlist_gem(struct g2d_data *g2d, | 773 | static void g2d_unmap_cmdlist_gem(struct g2d_data *g2d, |
@@ -561,22 +775,33 @@ static void g2d_unmap_cmdlist_gem(struct g2d_data *g2d, | |||
561 | struct drm_file *filp) | 775 | struct drm_file *filp) |
562 | { | 776 | { |
563 | struct exynos_drm_subdrv *subdrv = &g2d->subdrv; | 777 | struct exynos_drm_subdrv *subdrv = &g2d->subdrv; |
778 | struct g2d_buf_info *buf_info = &node->buf_info; | ||
564 | int i; | 779 | int i; |
565 | 780 | ||
566 | for (i = 0; i < node->map_nr; i++) { | 781 | for (i = 0; i < buf_info->map_nr; i++) { |
567 | unsigned long handle = node->handles[i]; | 782 | struct g2d_buf_desc *buf_desc; |
783 | enum g2d_reg_type reg_type; | ||
784 | unsigned long handle; | ||
785 | |||
786 | reg_type = buf_info->reg_types[i]; | ||
787 | |||
788 | buf_desc = &buf_info->descs[reg_type]; | ||
789 | handle = buf_info->handles[reg_type]; | ||
568 | 790 | ||
569 | if (node->obj_type[i] == BUF_TYPE_GEM) | 791 | if (buf_info->types[reg_type] == BUF_TYPE_GEM) |
570 | exynos_drm_gem_put_dma_addr(subdrv->drm_dev, handle, | 792 | exynos_drm_gem_put_dma_addr(subdrv->drm_dev, handle, |
571 | filp); | 793 | filp); |
572 | else | 794 | else |
573 | g2d_userptr_put_dma_addr(subdrv->drm_dev, handle, | 795 | g2d_userptr_put_dma_addr(subdrv->drm_dev, handle, |
574 | false); | 796 | false); |
575 | 797 | ||
576 | node->handles[i] = 0; | 798 | buf_info->reg_types[i] = REG_TYPE_NONE; |
799 | buf_info->handles[reg_type] = 0; | ||
800 | buf_info->types[reg_type] = 0; | ||
801 | memset(buf_desc, 0x00, sizeof(*buf_desc)); | ||
577 | } | 802 | } |
578 | 803 | ||
579 | node->map_nr = 0; | 804 | buf_info->map_nr = 0; |
580 | } | 805 | } |
581 | 806 | ||
582 | static void g2d_dma_start(struct g2d_data *g2d, | 807 | static void g2d_dma_start(struct g2d_data *g2d, |
@@ -589,10 +814,6 @@ static void g2d_dma_start(struct g2d_data *g2d, | |||
589 | pm_runtime_get_sync(g2d->dev); | 814 | pm_runtime_get_sync(g2d->dev); |
590 | clk_enable(g2d->gate_clk); | 815 | clk_enable(g2d->gate_clk); |
591 | 816 | ||
592 | /* interrupt enable */ | ||
593 | writel_relaxed(G2D_INTEN_ACF | G2D_INTEN_UCF | G2D_INTEN_GCF, | ||
594 | g2d->regs + G2D_INTEN); | ||
595 | |||
596 | writel_relaxed(node->dma_addr, g2d->regs + G2D_DMA_SFR_BASE_ADDR); | 817 | writel_relaxed(node->dma_addr, g2d->regs + G2D_DMA_SFR_BASE_ADDR); |
597 | writel_relaxed(G2D_DMA_START, g2d->regs + G2D_DMA_COMMAND); | 818 | writel_relaxed(G2D_DMA_START, g2d->regs + G2D_DMA_COMMAND); |
598 | } | 819 | } |
@@ -643,7 +864,6 @@ static void g2d_runqueue_worker(struct work_struct *work) | |||
643 | struct g2d_data *g2d = container_of(work, struct g2d_data, | 864 | struct g2d_data *g2d = container_of(work, struct g2d_data, |
644 | runqueue_work); | 865 | runqueue_work); |
645 | 866 | ||
646 | |||
647 | mutex_lock(&g2d->runqueue_mutex); | 867 | mutex_lock(&g2d->runqueue_mutex); |
648 | clk_disable(g2d->gate_clk); | 868 | clk_disable(g2d->gate_clk); |
649 | pm_runtime_put_sync(g2d->dev); | 869 | pm_runtime_put_sync(g2d->dev); |
@@ -724,20 +944,14 @@ static int g2d_check_reg_offset(struct device *dev, | |||
724 | int i; | 944 | int i; |
725 | 945 | ||
726 | for (i = 0; i < nr; i++) { | 946 | for (i = 0; i < nr; i++) { |
727 | index = cmdlist->last - 2 * (i + 1); | 947 | struct g2d_buf_info *buf_info = &node->buf_info; |
948 | struct g2d_buf_desc *buf_desc; | ||
949 | enum g2d_reg_type reg_type; | ||
950 | unsigned long value; | ||
728 | 951 | ||
729 | if (for_addr) { | 952 | index = cmdlist->last - 2 * (i + 1); |
730 | /* check userptr buffer type. */ | ||
731 | reg_offset = (cmdlist->data[index] & | ||
732 | ~0x7fffffff) >> 31; | ||
733 | if (reg_offset) { | ||
734 | node->obj_type[i] = BUF_TYPE_USERPTR; | ||
735 | cmdlist->data[index] &= ~G2D_BUF_USERPTR; | ||
736 | } | ||
737 | } | ||
738 | 953 | ||
739 | reg_offset = cmdlist->data[index] & ~0xfffff000; | 954 | reg_offset = cmdlist->data[index] & ~0xfffff000; |
740 | |||
741 | if (reg_offset < G2D_VALID_START || reg_offset > G2D_VALID_END) | 955 | if (reg_offset < G2D_VALID_START || reg_offset > G2D_VALID_END) |
742 | goto err; | 956 | goto err; |
743 | if (reg_offset % 4) | 957 | if (reg_offset % 4) |
@@ -753,8 +967,60 @@ static int g2d_check_reg_offset(struct device *dev, | |||
753 | if (!for_addr) | 967 | if (!for_addr) |
754 | goto err; | 968 | goto err; |
755 | 969 | ||
756 | if (node->obj_type[i] != BUF_TYPE_USERPTR) | 970 | reg_type = g2d_get_reg_type(reg_offset); |
757 | node->obj_type[i] = BUF_TYPE_GEM; | 971 | if (reg_type == REG_TYPE_NONE) |
972 | goto err; | ||
973 | |||
974 | /* check userptr buffer type. */ | ||
975 | if ((cmdlist->data[index] & ~0x7fffffff) >> 31) { | ||
976 | buf_info->types[reg_type] = BUF_TYPE_USERPTR; | ||
977 | cmdlist->data[index] &= ~G2D_BUF_USERPTR; | ||
978 | } else | ||
979 | buf_info->types[reg_type] = BUF_TYPE_GEM; | ||
980 | break; | ||
981 | case G2D_SRC_COLOR_MODE: | ||
982 | case G2D_DST_COLOR_MODE: | ||
983 | if (for_addr) | ||
984 | goto err; | ||
985 | |||
986 | reg_type = g2d_get_reg_type(reg_offset); | ||
987 | if (reg_type == REG_TYPE_NONE) | ||
988 | goto err; | ||
989 | |||
990 | buf_desc = &buf_info->descs[reg_type]; | ||
991 | value = cmdlist->data[index + 1]; | ||
992 | |||
993 | buf_desc->format = value & 0xf; | ||
994 | break; | ||
995 | case G2D_SRC_LEFT_TOP: | ||
996 | case G2D_DST_LEFT_TOP: | ||
997 | if (for_addr) | ||
998 | goto err; | ||
999 | |||
1000 | reg_type = g2d_get_reg_type(reg_offset); | ||
1001 | if (reg_type == REG_TYPE_NONE) | ||
1002 | goto err; | ||
1003 | |||
1004 | buf_desc = &buf_info->descs[reg_type]; | ||
1005 | value = cmdlist->data[index + 1]; | ||
1006 | |||
1007 | buf_desc->left_x = value & 0x1fff; | ||
1008 | buf_desc->top_y = (value & 0x1fff0000) >> 16; | ||
1009 | break; | ||
1010 | case G2D_SRC_RIGHT_BOTTOM: | ||
1011 | case G2D_DST_RIGHT_BOTTOM: | ||
1012 | if (for_addr) | ||
1013 | goto err; | ||
1014 | |||
1015 | reg_type = g2d_get_reg_type(reg_offset); | ||
1016 | if (reg_type == REG_TYPE_NONE) | ||
1017 | goto err; | ||
1018 | |||
1019 | buf_desc = &buf_info->descs[reg_type]; | ||
1020 | value = cmdlist->data[index + 1]; | ||
1021 | |||
1022 | buf_desc->right_x = value & 0x1fff; | ||
1023 | buf_desc->bottom_y = (value & 0x1fff0000) >> 16; | ||
758 | break; | 1024 | break; |
759 | default: | 1025 | default: |
760 | if (for_addr) | 1026 | if (for_addr) |
@@ -860,9 +1126,23 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data, | |||
860 | cmdlist->data[cmdlist->last++] = G2D_SRC_BASE_ADDR; | 1126 | cmdlist->data[cmdlist->last++] = G2D_SRC_BASE_ADDR; |
861 | cmdlist->data[cmdlist->last++] = 0; | 1127 | cmdlist->data[cmdlist->last++] = 0; |
862 | 1128 | ||
1129 | /* | ||
1130 | * 'LIST_HOLD' command should be set to the DMA_HOLD_CMD_REG | ||
1131 | * and GCF bit should be set to INTEN register if user wants | ||
1132 | * G2D interrupt event once current command list execution is | ||
1133 | * finished. | ||
1134 | * Otherwise only ACF bit should be set to INTEN register so | ||
1135 | * that one interrupt is occured after all command lists | ||
1136 | * have been completed. | ||
1137 | */ | ||
863 | if (node->event) { | 1138 | if (node->event) { |
1139 | cmdlist->data[cmdlist->last++] = G2D_INTEN; | ||
1140 | cmdlist->data[cmdlist->last++] = G2D_INTEN_ACF | G2D_INTEN_GCF; | ||
864 | cmdlist->data[cmdlist->last++] = G2D_DMA_HOLD_CMD; | 1141 | cmdlist->data[cmdlist->last++] = G2D_DMA_HOLD_CMD; |
865 | cmdlist->data[cmdlist->last++] = G2D_LIST_HOLD; | 1142 | cmdlist->data[cmdlist->last++] = G2D_LIST_HOLD; |
1143 | } else { | ||
1144 | cmdlist->data[cmdlist->last++] = G2D_INTEN; | ||
1145 | cmdlist->data[cmdlist->last++] = G2D_INTEN_ACF; | ||
866 | } | 1146 | } |
867 | 1147 | ||
868 | /* Check size of cmdlist: last 2 is about G2D_BITBLT_START */ | 1148 | /* Check size of cmdlist: last 2 is about G2D_BITBLT_START */ |
@@ -887,7 +1167,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data, | |||
887 | if (ret < 0) | 1167 | if (ret < 0) |
888 | goto err_free_event; | 1168 | goto err_free_event; |
889 | 1169 | ||
890 | node->map_nr = req->cmd_buf_nr; | 1170 | node->buf_info.map_nr = req->cmd_buf_nr; |
891 | if (req->cmd_buf_nr) { | 1171 | if (req->cmd_buf_nr) { |
892 | struct drm_exynos_g2d_cmd *cmd_buf; | 1172 | struct drm_exynos_g2d_cmd *cmd_buf; |
893 | 1173 | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index 67e17ce112b6..0e6fe000578c 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c | |||
@@ -164,6 +164,27 @@ out: | |||
164 | exynos_gem_obj = NULL; | 164 | exynos_gem_obj = NULL; |
165 | } | 165 | } |
166 | 166 | ||
167 | unsigned long exynos_drm_gem_get_size(struct drm_device *dev, | ||
168 | unsigned int gem_handle, | ||
169 | struct drm_file *file_priv) | ||
170 | { | ||
171 | struct exynos_drm_gem_obj *exynos_gem_obj; | ||
172 | struct drm_gem_object *obj; | ||
173 | |||
174 | obj = drm_gem_object_lookup(dev, file_priv, gem_handle); | ||
175 | if (!obj) { | ||
176 | DRM_ERROR("failed to lookup gem object.\n"); | ||
177 | return 0; | ||
178 | } | ||
179 | |||
180 | exynos_gem_obj = to_exynos_gem_obj(obj); | ||
181 | |||
182 | drm_gem_object_unreference_unlocked(obj); | ||
183 | |||
184 | return exynos_gem_obj->buffer->size; | ||
185 | } | ||
186 | |||
187 | |||
167 | struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev, | 188 | struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev, |
168 | unsigned long size) | 189 | unsigned long size) |
169 | { | 190 | { |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h index 35ebac47dc2b..468766bee450 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.h +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h | |||
@@ -130,6 +130,11 @@ int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data, | |||
130 | int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data, | 130 | int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data, |
131 | struct drm_file *file_priv); | 131 | struct drm_file *file_priv); |
132 | 132 | ||
133 | /* get buffer size to gem handle. */ | ||
134 | unsigned long exynos_drm_gem_get_size(struct drm_device *dev, | ||
135 | unsigned int gem_handle, | ||
136 | struct drm_file *file_priv); | ||
137 | |||
133 | /* initialize gem object. */ | 138 | /* initialize gem object. */ |
134 | int exynos_drm_gem_init_object(struct drm_gem_object *obj); | 139 | int exynos_drm_gem_init_object(struct drm_gem_object *obj); |
135 | 140 | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c index 13ccbd4bcfaa..9504b0cd825a 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c | |||
@@ -117,13 +117,12 @@ static struct edid *vidi_get_edid(struct device *dev, | |||
117 | } | 117 | } |
118 | 118 | ||
119 | edid_len = (1 + ctx->raw_edid->extensions) * EDID_LENGTH; | 119 | edid_len = (1 + ctx->raw_edid->extensions) * EDID_LENGTH; |
120 | edid = kzalloc(edid_len, GFP_KERNEL); | 120 | edid = kmemdup(ctx->raw_edid, edid_len, GFP_KERNEL); |
121 | if (!edid) { | 121 | if (!edid) { |
122 | DRM_DEBUG_KMS("failed to allocate edid\n"); | 122 | DRM_DEBUG_KMS("failed to allocate edid\n"); |
123 | return ERR_PTR(-ENOMEM); | 123 | return ERR_PTR(-ENOMEM); |
124 | } | 124 | } |
125 | 125 | ||
126 | memcpy(edid, ctx->raw_edid, edid_len); | ||
127 | return edid; | 126 | return edid; |
128 | } | 127 | } |
129 | 128 | ||
@@ -563,12 +562,11 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data, | |||
563 | return -EINVAL; | 562 | return -EINVAL; |
564 | } | 563 | } |
565 | edid_len = (1 + raw_edid->extensions) * EDID_LENGTH; | 564 | edid_len = (1 + raw_edid->extensions) * EDID_LENGTH; |
566 | ctx->raw_edid = kzalloc(edid_len, GFP_KERNEL); | 565 | ctx->raw_edid = kmemdup(raw_edid, edid_len, GFP_KERNEL); |
567 | if (!ctx->raw_edid) { | 566 | if (!ctx->raw_edid) { |
568 | DRM_DEBUG_KMS("failed to allocate raw_edid.\n"); | 567 | DRM_DEBUG_KMS("failed to allocate raw_edid.\n"); |
569 | return -ENOMEM; | 568 | return -ENOMEM; |
570 | } | 569 | } |
571 | memcpy(ctx->raw_edid, raw_edid, edid_len); | ||
572 | } else { | 570 | } else { |
573 | /* | 571 | /* |
574 | * with connection = 0, free raw_edid | 572 | * with connection = 0, free raw_edid |
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index e919aba29b3d..2f4f72f07047 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c | |||
@@ -818,7 +818,7 @@ static void mixer_win_disable(void *ctx, int win) | |||
818 | mixer_ctx->win_data[win].enabled = false; | 818 | mixer_ctx->win_data[win].enabled = false; |
819 | } | 819 | } |
820 | 820 | ||
821 | int mixer_check_timing(void *ctx, struct fb_videomode *timing) | 821 | static int mixer_check_timing(void *ctx, struct fb_videomode *timing) |
822 | { | 822 | { |
823 | struct mixer_context *mixer_ctx = ctx; | 823 | struct mixer_context *mixer_ctx = ctx; |
824 | u32 w, h; | 824 | u32 w, h; |
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index aae31489c893..7299ea45dd03 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -103,7 +103,7 @@ static const char *cache_level_str(int type) | |||
103 | static void | 103 | static void |
104 | describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) | 104 | describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) |
105 | { | 105 | { |
106 | seq_printf(m, "%p: %s%s %8zdKiB %02x %02x %d %d %d%s%s%s", | 106 | seq_printf(m, "%pK: %s%s %8zdKiB %02x %02x %d %d %d%s%s%s", |
107 | &obj->base, | 107 | &obj->base, |
108 | get_pin_flag(obj), | 108 | get_pin_flag(obj), |
109 | get_tiling_flag(obj), | 109 | get_tiling_flag(obj), |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 0a8eceb75902..e9b57893db2b 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -125,6 +125,11 @@ MODULE_PARM_DESC(preliminary_hw_support, | |||
125 | "Enable Haswell and ValleyView Support. " | 125 | "Enable Haswell and ValleyView Support. " |
126 | "(default: false)"); | 126 | "(default: false)"); |
127 | 127 | ||
128 | int i915_disable_power_well __read_mostly = 0; | ||
129 | module_param_named(disable_power_well, i915_disable_power_well, int, 0600); | ||
130 | MODULE_PARM_DESC(disable_power_well, | ||
131 | "Disable the power well when possible (default: false)"); | ||
132 | |||
128 | static struct drm_driver driver; | 133 | static struct drm_driver driver; |
129 | extern int intel_agp_enabled; | 134 | extern int intel_agp_enabled; |
130 | 135 | ||
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index e95337c97459..01769e2a9953 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -1398,6 +1398,7 @@ extern int i915_enable_fbc __read_mostly; | |||
1398 | extern bool i915_enable_hangcheck __read_mostly; | 1398 | extern bool i915_enable_hangcheck __read_mostly; |
1399 | extern int i915_enable_ppgtt __read_mostly; | 1399 | extern int i915_enable_ppgtt __read_mostly; |
1400 | extern unsigned int i915_preliminary_hw_support __read_mostly; | 1400 | extern unsigned int i915_preliminary_hw_support __read_mostly; |
1401 | extern int i915_disable_power_well __read_mostly; | ||
1401 | 1402 | ||
1402 | extern int i915_suspend(struct drm_device *dev, pm_message_t state); | 1403 | extern int i915_suspend(struct drm_device *dev, pm_message_t state); |
1403 | extern int i915_resume(struct drm_device *dev); | 1404 | extern int i915_resume(struct drm_device *dev); |
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 2f2daebd0eef..9a48e1a2d417 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
@@ -57,7 +57,7 @@ eb_create(struct drm_i915_gem_execbuffer2 *args) | |||
57 | if (eb == NULL) { | 57 | if (eb == NULL) { |
58 | int size = args->buffer_count; | 58 | int size = args->buffer_count; |
59 | int count = PAGE_SIZE / sizeof(struct hlist_head) / 2; | 59 | int count = PAGE_SIZE / sizeof(struct hlist_head) / 2; |
60 | BUILD_BUG_ON(!is_power_of_2(PAGE_SIZE / sizeof(struct hlist_head))); | 60 | BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head)); |
61 | while (count > 2*size) | 61 | while (count > 2*size) |
62 | count >>= 1; | 62 | count >>= 1; |
63 | eb = kzalloc(count*sizeof(struct hlist_head) + | 63 | eb = kzalloc(count*sizeof(struct hlist_head) + |
@@ -732,6 +732,8 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec, | |||
732 | int count) | 732 | int count) |
733 | { | 733 | { |
734 | int i; | 734 | int i; |
735 | int relocs_total = 0; | ||
736 | int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry); | ||
735 | 737 | ||
736 | for (i = 0; i < count; i++) { | 738 | for (i = 0; i < count; i++) { |
737 | char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr; | 739 | char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr; |
@@ -740,10 +742,13 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec, | |||
740 | if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS) | 742 | if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS) |
741 | return -EINVAL; | 743 | return -EINVAL; |
742 | 744 | ||
743 | /* First check for malicious input causing overflow */ | 745 | /* First check for malicious input causing overflow in |
744 | if (exec[i].relocation_count > | 746 | * the worst case where we need to allocate the entire |
745 | INT_MAX / sizeof(struct drm_i915_gem_relocation_entry)) | 747 | * relocation tree as a single array. |
748 | */ | ||
749 | if (exec[i].relocation_count > relocs_max - relocs_total) | ||
746 | return -EINVAL; | 750 | return -EINVAL; |
751 | relocs_total += exec[i].relocation_count; | ||
747 | 752 | ||
748 | length = exec[i].relocation_count * | 753 | length = exec[i].relocation_count * |
749 | sizeof(struct drm_i915_gem_relocation_entry); | 754 | sizeof(struct drm_i915_gem_relocation_entry); |
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 32a3693905ec..1ce45a0a2d3e 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c | |||
@@ -45,6 +45,9 @@ | |||
45 | 45 | ||
46 | struct intel_crt { | 46 | struct intel_crt { |
47 | struct intel_encoder base; | 47 | struct intel_encoder base; |
48 | /* DPMS state is stored in the connector, which we need in the | ||
49 | * encoder's enable/disable callbacks */ | ||
50 | struct intel_connector *connector; | ||
48 | bool force_hotplug_required; | 51 | bool force_hotplug_required; |
49 | u32 adpa_reg; | 52 | u32 adpa_reg; |
50 | }; | 53 | }; |
@@ -81,29 +84,6 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder, | |||
81 | return true; | 84 | return true; |
82 | } | 85 | } |
83 | 86 | ||
84 | static void intel_disable_crt(struct intel_encoder *encoder) | ||
85 | { | ||
86 | struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; | ||
87 | struct intel_crt *crt = intel_encoder_to_crt(encoder); | ||
88 | u32 temp; | ||
89 | |||
90 | temp = I915_READ(crt->adpa_reg); | ||
91 | temp |= ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE; | ||
92 | temp &= ~ADPA_DAC_ENABLE; | ||
93 | I915_WRITE(crt->adpa_reg, temp); | ||
94 | } | ||
95 | |||
96 | static void intel_enable_crt(struct intel_encoder *encoder) | ||
97 | { | ||
98 | struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; | ||
99 | struct intel_crt *crt = intel_encoder_to_crt(encoder); | ||
100 | u32 temp; | ||
101 | |||
102 | temp = I915_READ(crt->adpa_reg); | ||
103 | temp |= ADPA_DAC_ENABLE; | ||
104 | I915_WRITE(crt->adpa_reg, temp); | ||
105 | } | ||
106 | |||
107 | /* Note: The caller is required to filter out dpms modes not supported by the | 87 | /* Note: The caller is required to filter out dpms modes not supported by the |
108 | * platform. */ | 88 | * platform. */ |
109 | static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode) | 89 | static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode) |
@@ -135,6 +115,19 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode) | |||
135 | I915_WRITE(crt->adpa_reg, temp); | 115 | I915_WRITE(crt->adpa_reg, temp); |
136 | } | 116 | } |
137 | 117 | ||
118 | static void intel_disable_crt(struct intel_encoder *encoder) | ||
119 | { | ||
120 | intel_crt_set_dpms(encoder, DRM_MODE_DPMS_OFF); | ||
121 | } | ||
122 | |||
123 | static void intel_enable_crt(struct intel_encoder *encoder) | ||
124 | { | ||
125 | struct intel_crt *crt = intel_encoder_to_crt(encoder); | ||
126 | |||
127 | intel_crt_set_dpms(encoder, crt->connector->base.dpms); | ||
128 | } | ||
129 | |||
130 | |||
138 | static void intel_crt_dpms(struct drm_connector *connector, int mode) | 131 | static void intel_crt_dpms(struct drm_connector *connector, int mode) |
139 | { | 132 | { |
140 | struct drm_device *dev = connector->dev; | 133 | struct drm_device *dev = connector->dev; |
@@ -746,6 +739,7 @@ void intel_crt_init(struct drm_device *dev) | |||
746 | } | 739 | } |
747 | 740 | ||
748 | connector = &intel_connector->base; | 741 | connector = &intel_connector->base; |
742 | crt->connector = intel_connector; | ||
749 | drm_connector_init(dev, &intel_connector->base, | 743 | drm_connector_init(dev, &intel_connector->base, |
750 | &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); | 744 | &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); |
751 | 745 | ||
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 287b42c9d1a8..b20d50192fcc 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -5771,6 +5771,11 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc, | |||
5771 | num_connectors++; | 5771 | num_connectors++; |
5772 | } | 5772 | } |
5773 | 5773 | ||
5774 | if (is_cpu_edp) | ||
5775 | intel_crtc->cpu_transcoder = TRANSCODER_EDP; | ||
5776 | else | ||
5777 | intel_crtc->cpu_transcoder = pipe; | ||
5778 | |||
5774 | /* We are not sure yet this won't happen. */ | 5779 | /* We are not sure yet this won't happen. */ |
5775 | WARN(!HAS_PCH_LPT(dev), "Unexpected PCH type %d\n", | 5780 | WARN(!HAS_PCH_LPT(dev), "Unexpected PCH type %d\n", |
5776 | INTEL_PCH_TYPE(dev)); | 5781 | INTEL_PCH_TYPE(dev)); |
@@ -5837,11 +5842,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
5837 | int pipe = intel_crtc->pipe; | 5842 | int pipe = intel_crtc->pipe; |
5838 | int ret; | 5843 | int ret; |
5839 | 5844 | ||
5840 | if (IS_HASWELL(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) | ||
5841 | intel_crtc->cpu_transcoder = TRANSCODER_EDP; | ||
5842 | else | ||
5843 | intel_crtc->cpu_transcoder = pipe; | ||
5844 | |||
5845 | drm_vblank_pre_modeset(dev, pipe); | 5845 | drm_vblank_pre_modeset(dev, pipe); |
5846 | 5846 | ||
5847 | ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode, | 5847 | ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode, |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 6f728e5ee793..8fc93f90a7cd 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -820,6 +820,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, | |||
820 | struct intel_link_m_n m_n; | 820 | struct intel_link_m_n m_n; |
821 | int pipe = intel_crtc->pipe; | 821 | int pipe = intel_crtc->pipe; |
822 | enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; | 822 | enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; |
823 | int target_clock; | ||
823 | 824 | ||
824 | /* | 825 | /* |
825 | * Find the lane count in the intel_encoder private | 826 | * Find the lane count in the intel_encoder private |
@@ -835,13 +836,22 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, | |||
835 | } | 836 | } |
836 | } | 837 | } |
837 | 838 | ||
839 | target_clock = mode->clock; | ||
840 | for_each_encoder_on_crtc(dev, crtc, intel_encoder) { | ||
841 | if (intel_encoder->type == INTEL_OUTPUT_EDP) { | ||
842 | target_clock = intel_edp_target_clock(intel_encoder, | ||
843 | mode); | ||
844 | break; | ||
845 | } | ||
846 | } | ||
847 | |||
838 | /* | 848 | /* |
839 | * Compute the GMCH and Link ratios. The '3' here is | 849 | * Compute the GMCH and Link ratios. The '3' here is |
840 | * the number of bytes_per_pixel post-LUT, which we always | 850 | * the number of bytes_per_pixel post-LUT, which we always |
841 | * set up for 8-bits of R/G/B, or 3 bytes total. | 851 | * set up for 8-bits of R/G/B, or 3 bytes total. |
842 | */ | 852 | */ |
843 | intel_link_compute_m_n(intel_crtc->bpp, lane_count, | 853 | intel_link_compute_m_n(intel_crtc->bpp, lane_count, |
844 | mode->clock, adjusted_mode->clock, &m_n); | 854 | target_clock, adjusted_mode->clock, &m_n); |
845 | 855 | ||
846 | if (IS_HASWELL(dev)) { | 856 | if (IS_HASWELL(dev)) { |
847 | I915_WRITE(PIPE_DATA_M1(cpu_transcoder), | 857 | I915_WRITE(PIPE_DATA_M1(cpu_transcoder), |
@@ -1930,7 +1940,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) | |||
1930 | for (i = 0; i < intel_dp->lane_count; i++) | 1940 | for (i = 0; i < intel_dp->lane_count; i++) |
1931 | if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) | 1941 | if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) |
1932 | break; | 1942 | break; |
1933 | if (i == intel_dp->lane_count && voltage_tries == 5) { | 1943 | if (i == intel_dp->lane_count) { |
1934 | ++loop_tries; | 1944 | ++loop_tries; |
1935 | if (loop_tries == 5) { | 1945 | if (loop_tries == 5) { |
1936 | DRM_DEBUG_KMS("too many full retries, give up\n"); | 1946 | DRM_DEBUG_KMS("too many full retries, give up\n"); |
@@ -2549,12 +2559,15 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder) | |||
2549 | { | 2559 | { |
2550 | struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); | 2560 | struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); |
2551 | struct intel_dp *intel_dp = &intel_dig_port->dp; | 2561 | struct intel_dp *intel_dp = &intel_dig_port->dp; |
2562 | struct drm_device *dev = intel_dp_to_dev(intel_dp); | ||
2552 | 2563 | ||
2553 | i2c_del_adapter(&intel_dp->adapter); | 2564 | i2c_del_adapter(&intel_dp->adapter); |
2554 | drm_encoder_cleanup(encoder); | 2565 | drm_encoder_cleanup(encoder); |
2555 | if (is_edp(intel_dp)) { | 2566 | if (is_edp(intel_dp)) { |
2556 | cancel_delayed_work_sync(&intel_dp->panel_vdd_work); | 2567 | cancel_delayed_work_sync(&intel_dp->panel_vdd_work); |
2568 | mutex_lock(&dev->mode_config.mutex); | ||
2557 | ironlake_panel_vdd_off_sync(intel_dp); | 2569 | ironlake_panel_vdd_off_sync(intel_dp); |
2570 | mutex_unlock(&dev->mode_config.mutex); | ||
2558 | } | 2571 | } |
2559 | kfree(intel_dig_port); | 2572 | kfree(intel_dig_port); |
2560 | } | 2573 | } |
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index acf8aec9ada7..ef4744e1bf0b 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c | |||
@@ -203,7 +203,13 @@ intel_gpio_setup(struct intel_gmbus *bus, u32 pin) | |||
203 | algo->data = bus; | 203 | algo->data = bus; |
204 | } | 204 | } |
205 | 205 | ||
206 | #define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 4) | 206 | /* |
207 | * gmbus on gen4 seems to be able to generate legacy interrupts even when in MSI | ||
208 | * mode. This results in spurious interrupt warnings if the legacy irq no. is | ||
209 | * shared with another device. The kernel then disables that interrupt source | ||
210 | * and so prevents the other device from working properly. | ||
211 | */ | ||
212 | #define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5) | ||
207 | static int | 213 | static int |
208 | gmbus_wait_hw_status(struct drm_i915_private *dev_priv, | 214 | gmbus_wait_hw_status(struct drm_i915_private *dev_priv, |
209 | u32 gmbus2_status, | 215 | u32 gmbus2_status, |
@@ -214,6 +220,9 @@ gmbus_wait_hw_status(struct drm_i915_private *dev_priv, | |||
214 | u32 gmbus2 = 0; | 220 | u32 gmbus2 = 0; |
215 | DEFINE_WAIT(wait); | 221 | DEFINE_WAIT(wait); |
216 | 222 | ||
223 | if (!HAS_GMBUS_IRQ(dev_priv->dev)) | ||
224 | gmbus4_irq_en = 0; | ||
225 | |||
217 | /* Important: The hw handles only the first bit, so set only one! Since | 226 | /* Important: The hw handles only the first bit, so set only one! Since |
218 | * we also need to check for NAKs besides the hw ready/idle signal, we | 227 | * we also need to check for NAKs besides the hw ready/idle signal, we |
219 | * need to wake up periodically and check that ourselves. */ | 228 | * need to wake up periodically and check that ourselves. */ |
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index a3730e0289e5..bee8cb6108a7 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c | |||
@@ -321,9 +321,6 @@ void intel_panel_enable_backlight(struct drm_device *dev, | |||
321 | if (dev_priv->backlight_level == 0) | 321 | if (dev_priv->backlight_level == 0) |
322 | dev_priv->backlight_level = intel_panel_get_max_backlight(dev); | 322 | dev_priv->backlight_level = intel_panel_get_max_backlight(dev); |
323 | 323 | ||
324 | dev_priv->backlight_enabled = true; | ||
325 | intel_panel_actually_set_backlight(dev, dev_priv->backlight_level); | ||
326 | |||
327 | if (INTEL_INFO(dev)->gen >= 4) { | 324 | if (INTEL_INFO(dev)->gen >= 4) { |
328 | uint32_t reg, tmp; | 325 | uint32_t reg, tmp; |
329 | 326 | ||
@@ -359,12 +356,12 @@ void intel_panel_enable_backlight(struct drm_device *dev, | |||
359 | } | 356 | } |
360 | 357 | ||
361 | set_level: | 358 | set_level: |
362 | /* Check the current backlight level and try to set again if it's zero. | 359 | /* Call below after setting BLC_PWM_CPU_CTL2 and BLC_PWM_PCH_CTL1. |
363 | * On some machines, BLC_PWM_CPU_CTL is cleared to zero automatically | 360 | * BLC_PWM_CPU_CTL may be cleared to zero automatically when these |
364 | * when BLC_PWM_CPU_CTL2 and BLC_PWM_PCH_CTL1 are written. | 361 | * registers are set. |
365 | */ | 362 | */ |
366 | if (!intel_panel_get_backlight(dev)) | 363 | dev_priv->backlight_enabled = true; |
367 | intel_panel_actually_set_backlight(dev, dev_priv->backlight_level); | 364 | intel_panel_actually_set_backlight(dev, dev_priv->backlight_level); |
368 | } | 365 | } |
369 | 366 | ||
370 | static void intel_panel_init_backlight(struct drm_device *dev) | 367 | static void intel_panel_init_backlight(struct drm_device *dev) |
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index a1794c6df1bf..adca00783e61 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
@@ -4079,6 +4079,9 @@ void intel_set_power_well(struct drm_device *dev, bool enable) | |||
4079 | if (!IS_HASWELL(dev)) | 4079 | if (!IS_HASWELL(dev)) |
4080 | return; | 4080 | return; |
4081 | 4081 | ||
4082 | if (!i915_disable_power_well && !enable) | ||
4083 | return; | ||
4084 | |||
4082 | tmp = I915_READ(HSW_PWR_WELL_DRIVER); | 4085 | tmp = I915_READ(HSW_PWR_WELL_DRIVER); |
4083 | is_enabled = tmp & HSW_PWR_WELL_STATE; | 4086 | is_enabled = tmp & HSW_PWR_WELL_STATE; |
4084 | enable_requested = tmp & HSW_PWR_WELL_ENABLE; | 4087 | enable_requested = tmp & HSW_PWR_WELL_ENABLE; |
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index a274b9906ef8..78d8e919509f 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c | |||
@@ -382,19 +382,19 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock) | |||
382 | m = n = p = 0; | 382 | m = n = p = 0; |
383 | vcomax = 800000; | 383 | vcomax = 800000; |
384 | vcomin = 400000; | 384 | vcomin = 400000; |
385 | pllreffreq = 3333; | 385 | pllreffreq = 33333; |
386 | 386 | ||
387 | delta = 0xffffffff; | 387 | delta = 0xffffffff; |
388 | permitteddelta = clock * 5 / 1000; | 388 | permitteddelta = clock * 5 / 1000; |
389 | 389 | ||
390 | for (testp = 16; testp > 0; testp--) { | 390 | for (testp = 16; testp > 0; testp >>= 1) { |
391 | if (clock * testp > vcomax) | 391 | if (clock * testp > vcomax) |
392 | continue; | 392 | continue; |
393 | if (clock * testp < vcomin) | 393 | if (clock * testp < vcomin) |
394 | continue; | 394 | continue; |
395 | 395 | ||
396 | for (testm = 1; testm < 33; testm++) { | 396 | for (testm = 1; testm < 33; testm++) { |
397 | for (testn = 1; testn < 257; testn++) { | 397 | for (testn = 17; testn < 257; testn++) { |
398 | computed = (pllreffreq * testn) / | 398 | computed = (pllreffreq * testn) / |
399 | (testm * testp); | 399 | (testm * testp); |
400 | if (computed > clock) | 400 | if (computed > clock) |
@@ -404,11 +404,11 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock) | |||
404 | if (tmpdelta < delta) { | 404 | if (tmpdelta < delta) { |
405 | delta = tmpdelta; | 405 | delta = tmpdelta; |
406 | n = testn - 1; | 406 | n = testn - 1; |
407 | m = (testm - 1) | ((n >> 1) & 0x80); | 407 | m = (testm - 1); |
408 | p = testp - 1; | 408 | p = testp - 1; |
409 | } | 409 | } |
410 | if ((clock * testp) >= 600000) | 410 | if ((clock * testp) >= 600000) |
411 | p |= 80; | 411 | p |= 0x80; |
412 | } | 412 | } |
413 | } | 413 | } |
414 | } | 414 | } |
@@ -751,8 +751,6 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc, | |||
751 | int i; | 751 | int i; |
752 | unsigned char misc = 0; | 752 | unsigned char misc = 0; |
753 | unsigned char ext_vga[6]; | 753 | unsigned char ext_vga[6]; |
754 | unsigned char ext_vga_index24; | ||
755 | unsigned char dac_index90 = 0; | ||
756 | u8 bppshift; | 754 | u8 bppshift; |
757 | 755 | ||
758 | static unsigned char dacvalue[] = { | 756 | static unsigned char dacvalue[] = { |
@@ -803,7 +801,6 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc, | |||
803 | option2 = 0x0000b000; | 801 | option2 = 0x0000b000; |
804 | break; | 802 | break; |
805 | case G200_ER: | 803 | case G200_ER: |
806 | dac_index90 = 0; | ||
807 | break; | 804 | break; |
808 | } | 805 | } |
809 | 806 | ||
@@ -852,10 +849,8 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc, | |||
852 | WREG_DAC(i, dacvalue[i]); | 849 | WREG_DAC(i, dacvalue[i]); |
853 | } | 850 | } |
854 | 851 | ||
855 | if (mdev->type == G200_ER) { | 852 | if (mdev->type == G200_ER) |
856 | WREG_DAC(0x90, dac_index90); | 853 | WREG_DAC(0x90, 0); |
857 | } | ||
858 | |||
859 | 854 | ||
860 | if (option) | 855 | if (option) |
861 | pci_write_config_dword(dev->pdev, PCI_MGA_OPTION, option); | 856 | pci_write_config_dword(dev->pdev, PCI_MGA_OPTION, option); |
@@ -952,8 +947,6 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc, | |||
952 | if (mdev->type == G200_WB) | 947 | if (mdev->type == G200_WB) |
953 | ext_vga[1] |= 0x88; | 948 | ext_vga[1] |= 0x88; |
954 | 949 | ||
955 | ext_vga_index24 = 0x05; | ||
956 | |||
957 | /* Set pixel clocks */ | 950 | /* Set pixel clocks */ |
958 | misc = 0x2d; | 951 | misc = 0x2d; |
959 | WREG8(MGA_MISC_OUT, misc); | 952 | WREG8(MGA_MISC_OUT, misc); |
@@ -965,7 +958,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc, | |||
965 | } | 958 | } |
966 | 959 | ||
967 | if (mdev->type == G200_ER) | 960 | if (mdev->type == G200_ER) |
968 | WREG_ECRT(24, ext_vga_index24); | 961 | WREG_ECRT(0x24, 0x5); |
969 | 962 | ||
970 | if (mdev->type == G200_EV) { | 963 | if (mdev->type == G200_EV) { |
971 | WREG_ECRT(6, 0); | 964 | WREG_ECRT(6, 0); |
diff --git a/drivers/gpu/drm/nouveau/core/core/object.c b/drivers/gpu/drm/nouveau/core/core/object.c index 0daab62ea14c..3b2e7b6304d3 100644 --- a/drivers/gpu/drm/nouveau/core/core/object.c +++ b/drivers/gpu/drm/nouveau/core/core/object.c | |||
@@ -278,7 +278,6 @@ nouveau_object_del(struct nouveau_object *client, u32 _parent, u32 _handle) | |||
278 | struct nouveau_object *parent = NULL; | 278 | struct nouveau_object *parent = NULL; |
279 | struct nouveau_object *namedb = NULL; | 279 | struct nouveau_object *namedb = NULL; |
280 | struct nouveau_handle *handle = NULL; | 280 | struct nouveau_handle *handle = NULL; |
281 | int ret = -EINVAL; | ||
282 | 281 | ||
283 | parent = nouveau_handle_ref(client, _parent); | 282 | parent = nouveau_handle_ref(client, _parent); |
284 | if (!parent) | 283 | if (!parent) |
@@ -295,7 +294,7 @@ nouveau_object_del(struct nouveau_object *client, u32 _parent, u32 _handle) | |||
295 | } | 294 | } |
296 | 295 | ||
297 | nouveau_object_ref(NULL, &parent); | 296 | nouveau_object_ref(NULL, &parent); |
298 | return ret; | 297 | return handle ? 0 : -EINVAL; |
299 | } | 298 | } |
300 | 299 | ||
301 | int | 300 | int |
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/therm.h b/drivers/gpu/drm/nouveau/core/include/subdev/therm.h index 6b17b614629f..0b20fc0d19c1 100644 --- a/drivers/gpu/drm/nouveau/core/include/subdev/therm.h +++ b/drivers/gpu/drm/nouveau/core/include/subdev/therm.h | |||
@@ -4,7 +4,7 @@ | |||
4 | #include <core/device.h> | 4 | #include <core/device.h> |
5 | #include <core/subdev.h> | 5 | #include <core/subdev.h> |
6 | 6 | ||
7 | enum nouveau_therm_mode { | 7 | enum nouveau_therm_fan_mode { |
8 | NOUVEAU_THERM_CTRL_NONE = 0, | 8 | NOUVEAU_THERM_CTRL_NONE = 0, |
9 | NOUVEAU_THERM_CTRL_MANUAL = 1, | 9 | NOUVEAU_THERM_CTRL_MANUAL = 1, |
10 | NOUVEAU_THERM_CTRL_AUTO = 2, | 10 | NOUVEAU_THERM_CTRL_AUTO = 2, |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c index e816f06637a7..0e2c1a4f1659 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c +++ b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c | |||
@@ -248,6 +248,22 @@ nouveau_bios_shadow_pci(struct nouveau_bios *bios) | |||
248 | } | 248 | } |
249 | } | 249 | } |
250 | 250 | ||
251 | static void | ||
252 | nouveau_bios_shadow_platform(struct nouveau_bios *bios) | ||
253 | { | ||
254 | struct pci_dev *pdev = nv_device(bios)->pdev; | ||
255 | size_t size; | ||
256 | |||
257 | void __iomem *rom = pci_platform_rom(pdev, &size); | ||
258 | if (rom && size) { | ||
259 | bios->data = kmalloc(size, GFP_KERNEL); | ||
260 | if (bios->data) { | ||
261 | memcpy_fromio(bios->data, rom, size); | ||
262 | bios->size = size; | ||
263 | } | ||
264 | } | ||
265 | } | ||
266 | |||
251 | static int | 267 | static int |
252 | nouveau_bios_score(struct nouveau_bios *bios, const bool writeable) | 268 | nouveau_bios_score(struct nouveau_bios *bios, const bool writeable) |
253 | { | 269 | { |
@@ -288,6 +304,7 @@ nouveau_bios_shadow(struct nouveau_bios *bios) | |||
288 | { "PROM", nouveau_bios_shadow_prom, false, 0, 0, NULL }, | 304 | { "PROM", nouveau_bios_shadow_prom, false, 0, 0, NULL }, |
289 | { "ACPI", nouveau_bios_shadow_acpi, true, 0, 0, NULL }, | 305 | { "ACPI", nouveau_bios_shadow_acpi, true, 0, 0, NULL }, |
290 | { "PCIROM", nouveau_bios_shadow_pci, true, 0, 0, NULL }, | 306 | { "PCIROM", nouveau_bios_shadow_pci, true, 0, 0, NULL }, |
307 | { "PLATFORM", nouveau_bios_shadow_platform, true, 0, 0, NULL }, | ||
291 | {} | 308 | {} |
292 | }; | 309 | }; |
293 | struct methods *mthd, *best; | 310 | struct methods *mthd, *best; |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/base.c b/drivers/gpu/drm/nouveau/core/subdev/therm/base.c index f794dc89a3b2..a00a5a76e2d6 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/therm/base.c +++ b/drivers/gpu/drm/nouveau/core/subdev/therm/base.c | |||
@@ -134,7 +134,7 @@ nouveau_therm_alarm(struct nouveau_alarm *alarm) | |||
134 | } | 134 | } |
135 | 135 | ||
136 | int | 136 | int |
137 | nouveau_therm_mode(struct nouveau_therm *therm, int mode) | 137 | nouveau_therm_fan_mode(struct nouveau_therm *therm, int mode) |
138 | { | 138 | { |
139 | struct nouveau_therm_priv *priv = (void *)therm; | 139 | struct nouveau_therm_priv *priv = (void *)therm; |
140 | struct nouveau_device *device = nv_device(therm); | 140 | struct nouveau_device *device = nv_device(therm); |
@@ -149,10 +149,15 @@ nouveau_therm_mode(struct nouveau_therm *therm, int mode) | |||
149 | (mode != NOUVEAU_THERM_CTRL_NONE && device->card_type >= NV_C0)) | 149 | (mode != NOUVEAU_THERM_CTRL_NONE && device->card_type >= NV_C0)) |
150 | return -EINVAL; | 150 | return -EINVAL; |
151 | 151 | ||
152 | /* do not allow automatic fan management if the thermal sensor is | ||
153 | * not available */ | ||
154 | if (priv->mode == 2 && therm->temp_get(therm) < 0) | ||
155 | return -EINVAL; | ||
156 | |||
152 | if (priv->mode == mode) | 157 | if (priv->mode == mode) |
153 | return 0; | 158 | return 0; |
154 | 159 | ||
155 | nv_info(therm, "Thermal management: %s\n", name[mode]); | 160 | nv_info(therm, "fan management: %s\n", name[mode]); |
156 | nouveau_therm_update(therm, mode); | 161 | nouveau_therm_update(therm, mode); |
157 | return 0; | 162 | return 0; |
158 | } | 163 | } |
@@ -213,7 +218,7 @@ nouveau_therm_attr_set(struct nouveau_therm *therm, | |||
213 | priv->fan->bios.max_duty = value; | 218 | priv->fan->bios.max_duty = value; |
214 | return 0; | 219 | return 0; |
215 | case NOUVEAU_THERM_ATTR_FAN_MODE: | 220 | case NOUVEAU_THERM_ATTR_FAN_MODE: |
216 | return nouveau_therm_mode(therm, value); | 221 | return nouveau_therm_fan_mode(therm, value); |
217 | case NOUVEAU_THERM_ATTR_THRS_FAN_BOOST: | 222 | case NOUVEAU_THERM_ATTR_THRS_FAN_BOOST: |
218 | priv->bios_sensor.thrs_fan_boost.temp = value; | 223 | priv->bios_sensor.thrs_fan_boost.temp = value; |
219 | priv->sensor.program_alarms(therm); | 224 | priv->sensor.program_alarms(therm); |
@@ -263,7 +268,7 @@ _nouveau_therm_init(struct nouveau_object *object) | |||
263 | return ret; | 268 | return ret; |
264 | 269 | ||
265 | if (priv->suspend >= 0) | 270 | if (priv->suspend >= 0) |
266 | nouveau_therm_mode(therm, priv->mode); | 271 | nouveau_therm_fan_mode(therm, priv->mode); |
267 | priv->sensor.program_alarms(therm); | 272 | priv->sensor.program_alarms(therm); |
268 | return 0; | 273 | return 0; |
269 | } | 274 | } |
@@ -313,11 +318,12 @@ nouveau_therm_create_(struct nouveau_object *parent, | |||
313 | int | 318 | int |
314 | nouveau_therm_preinit(struct nouveau_therm *therm) | 319 | nouveau_therm_preinit(struct nouveau_therm *therm) |
315 | { | 320 | { |
316 | nouveau_therm_ic_ctor(therm); | ||
317 | nouveau_therm_sensor_ctor(therm); | 321 | nouveau_therm_sensor_ctor(therm); |
322 | nouveau_therm_ic_ctor(therm); | ||
318 | nouveau_therm_fan_ctor(therm); | 323 | nouveau_therm_fan_ctor(therm); |
319 | 324 | ||
320 | nouveau_therm_mode(therm, NOUVEAU_THERM_CTRL_NONE); | 325 | nouveau_therm_fan_mode(therm, NOUVEAU_THERM_CTRL_NONE); |
326 | nouveau_therm_sensor_preinit(therm); | ||
321 | return 0; | 327 | return 0; |
322 | } | 328 | } |
323 | 329 | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c index e24090bac195..8b3adec5fbb1 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c +++ b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c | |||
@@ -32,6 +32,7 @@ probe_monitoring_device(struct nouveau_i2c_port *i2c, | |||
32 | struct i2c_board_info *info) | 32 | struct i2c_board_info *info) |
33 | { | 33 | { |
34 | struct nouveau_therm_priv *priv = (void *)nouveau_therm(i2c); | 34 | struct nouveau_therm_priv *priv = (void *)nouveau_therm(i2c); |
35 | struct nvbios_therm_sensor *sensor = &priv->bios_sensor; | ||
35 | struct i2c_client *client; | 36 | struct i2c_client *client; |
36 | 37 | ||
37 | request_module("%s%s", I2C_MODULE_PREFIX, info->type); | 38 | request_module("%s%s", I2C_MODULE_PREFIX, info->type); |
@@ -46,8 +47,9 @@ probe_monitoring_device(struct nouveau_i2c_port *i2c, | |||
46 | } | 47 | } |
47 | 48 | ||
48 | nv_info(priv, | 49 | nv_info(priv, |
49 | "Found an %s at address 0x%x (controlled by lm_sensors)\n", | 50 | "Found an %s at address 0x%x (controlled by lm_sensors, " |
50 | info->type, info->addr); | 51 | "temp offset %+i C)\n", |
52 | info->type, info->addr, sensor->offset_constant); | ||
51 | priv->ic = client; | 53 | priv->ic = client; |
52 | 54 | ||
53 | return true; | 55 | return true; |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c index 0f5363edb964..a70d1b7e397b 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c +++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c | |||
@@ -29,54 +29,83 @@ struct nv40_therm_priv { | |||
29 | struct nouveau_therm_priv base; | 29 | struct nouveau_therm_priv base; |
30 | }; | 30 | }; |
31 | 31 | ||
32 | enum nv40_sensor_style { INVALID_STYLE = -1, OLD_STYLE = 0, NEW_STYLE = 1 }; | ||
33 | |||
34 | static enum nv40_sensor_style | ||
35 | nv40_sensor_style(struct nouveau_therm *therm) | ||
36 | { | ||
37 | struct nouveau_device *device = nv_device(therm); | ||
38 | |||
39 | switch (device->chipset) { | ||
40 | case 0x43: | ||
41 | case 0x44: | ||
42 | case 0x4a: | ||
43 | case 0x47: | ||
44 | return OLD_STYLE; | ||
45 | |||
46 | case 0x46: | ||
47 | case 0x49: | ||
48 | case 0x4b: | ||
49 | case 0x4e: | ||
50 | case 0x4c: | ||
51 | case 0x67: | ||
52 | case 0x68: | ||
53 | case 0x63: | ||
54 | return NEW_STYLE; | ||
55 | default: | ||
56 | return INVALID_STYLE; | ||
57 | } | ||
58 | } | ||
59 | |||
32 | static int | 60 | static int |
33 | nv40_sensor_setup(struct nouveau_therm *therm) | 61 | nv40_sensor_setup(struct nouveau_therm *therm) |
34 | { | 62 | { |
35 | struct nouveau_device *device = nv_device(therm); | 63 | enum nv40_sensor_style style = nv40_sensor_style(therm); |
36 | 64 | ||
37 | /* enable ADC readout and disable the ALARM threshold */ | 65 | /* enable ADC readout and disable the ALARM threshold */ |
38 | if (device->chipset >= 0x46) { | 66 | if (style == NEW_STYLE) { |
39 | nv_mask(therm, 0x15b8, 0x80000000, 0); | 67 | nv_mask(therm, 0x15b8, 0x80000000, 0); |
40 | nv_wr32(therm, 0x15b0, 0x80003fff); | 68 | nv_wr32(therm, 0x15b0, 0x80003fff); |
41 | mdelay(10); /* wait for the temperature to stabilize */ | 69 | mdelay(20); /* wait for the temperature to stabilize */ |
42 | return nv_rd32(therm, 0x15b4) & 0x3fff; | 70 | return nv_rd32(therm, 0x15b4) & 0x3fff; |
43 | } else { | 71 | } else if (style == OLD_STYLE) { |
44 | nv_wr32(therm, 0x15b0, 0xff); | 72 | nv_wr32(therm, 0x15b0, 0xff); |
73 | mdelay(20); /* wait for the temperature to stabilize */ | ||
45 | return nv_rd32(therm, 0x15b4) & 0xff; | 74 | return nv_rd32(therm, 0x15b4) & 0xff; |
46 | } | 75 | } else |
76 | return -ENODEV; | ||
47 | } | 77 | } |
48 | 78 | ||
49 | static int | 79 | static int |
50 | nv40_temp_get(struct nouveau_therm *therm) | 80 | nv40_temp_get(struct nouveau_therm *therm) |
51 | { | 81 | { |
52 | struct nouveau_therm_priv *priv = (void *)therm; | 82 | struct nouveau_therm_priv *priv = (void *)therm; |
53 | struct nouveau_device *device = nv_device(therm); | ||
54 | struct nvbios_therm_sensor *sensor = &priv->bios_sensor; | 83 | struct nvbios_therm_sensor *sensor = &priv->bios_sensor; |
84 | enum nv40_sensor_style style = nv40_sensor_style(therm); | ||
55 | int core_temp; | 85 | int core_temp; |
56 | 86 | ||
57 | if (device->chipset >= 0x46) { | 87 | if (style == NEW_STYLE) { |
58 | nv_wr32(therm, 0x15b0, 0x80003fff); | 88 | nv_wr32(therm, 0x15b0, 0x80003fff); |
59 | core_temp = nv_rd32(therm, 0x15b4) & 0x3fff; | 89 | core_temp = nv_rd32(therm, 0x15b4) & 0x3fff; |
60 | } else { | 90 | } else if (style == OLD_STYLE) { |
61 | nv_wr32(therm, 0x15b0, 0xff); | 91 | nv_wr32(therm, 0x15b0, 0xff); |
62 | core_temp = nv_rd32(therm, 0x15b4) & 0xff; | 92 | core_temp = nv_rd32(therm, 0x15b4) & 0xff; |
63 | } | 93 | } else |
64 | 94 | return -ENODEV; | |
65 | /* Setup the sensor if the temperature is 0 */ | ||
66 | if (core_temp == 0) | ||
67 | core_temp = nv40_sensor_setup(therm); | ||
68 | 95 | ||
69 | if (sensor->slope_div == 0) | 96 | /* if the slope or the offset is unset, do no use the sensor */ |
70 | sensor->slope_div = 1; | 97 | if (!sensor->slope_div || !sensor->slope_mult || |
71 | if (sensor->offset_den == 0) | 98 | !sensor->offset_num || !sensor->offset_den) |
72 | sensor->offset_den = 1; | 99 | return -ENODEV; |
73 | if (sensor->slope_mult < 1) | ||
74 | sensor->slope_mult = 1; | ||
75 | 100 | ||
76 | core_temp = core_temp * sensor->slope_mult / sensor->slope_div; | 101 | core_temp = core_temp * sensor->slope_mult / sensor->slope_div; |
77 | core_temp = core_temp + sensor->offset_num / sensor->offset_den; | 102 | core_temp = core_temp + sensor->offset_num / sensor->offset_den; |
78 | core_temp = core_temp + sensor->offset_constant - 8; | 103 | core_temp = core_temp + sensor->offset_constant - 8; |
79 | 104 | ||
105 | /* reserve negative temperatures for errors */ | ||
106 | if (core_temp < 0) | ||
107 | core_temp = 0; | ||
108 | |||
80 | return core_temp; | 109 | return core_temp; |
81 | } | 110 | } |
82 | 111 | ||
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h b/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h index 06b98706b3fc..438d9824b774 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h +++ b/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h | |||
@@ -102,7 +102,7 @@ struct nouveau_therm_priv { | |||
102 | struct i2c_client *ic; | 102 | struct i2c_client *ic; |
103 | }; | 103 | }; |
104 | 104 | ||
105 | int nouveau_therm_mode(struct nouveau_therm *therm, int mode); | 105 | int nouveau_therm_fan_mode(struct nouveau_therm *therm, int mode); |
106 | int nouveau_therm_attr_get(struct nouveau_therm *therm, | 106 | int nouveau_therm_attr_get(struct nouveau_therm *therm, |
107 | enum nouveau_therm_attr_type type); | 107 | enum nouveau_therm_attr_type type); |
108 | int nouveau_therm_attr_set(struct nouveau_therm *therm, | 108 | int nouveau_therm_attr_set(struct nouveau_therm *therm, |
@@ -122,6 +122,7 @@ int nouveau_therm_fan_sense(struct nouveau_therm *therm); | |||
122 | 122 | ||
123 | int nouveau_therm_preinit(struct nouveau_therm *); | 123 | int nouveau_therm_preinit(struct nouveau_therm *); |
124 | 124 | ||
125 | void nouveau_therm_sensor_preinit(struct nouveau_therm *); | ||
125 | void nouveau_therm_sensor_set_threshold_state(struct nouveau_therm *therm, | 126 | void nouveau_therm_sensor_set_threshold_state(struct nouveau_therm *therm, |
126 | enum nouveau_therm_thrs thrs, | 127 | enum nouveau_therm_thrs thrs, |
127 | enum nouveau_therm_thrs_state st); | 128 | enum nouveau_therm_thrs_state st); |
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c index b37624af8297..470f6a47b656 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c +++ b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c | |||
@@ -34,10 +34,6 @@ nouveau_therm_temp_set_defaults(struct nouveau_therm *therm) | |||
34 | { | 34 | { |
35 | struct nouveau_therm_priv *priv = (void *)therm; | 35 | struct nouveau_therm_priv *priv = (void *)therm; |
36 | 36 | ||
37 | priv->bios_sensor.slope_mult = 1; | ||
38 | priv->bios_sensor.slope_div = 1; | ||
39 | priv->bios_sensor.offset_num = 0; | ||
40 | priv->bios_sensor.offset_den = 1; | ||
41 | priv->bios_sensor.offset_constant = 0; | 37 | priv->bios_sensor.offset_constant = 0; |
42 | 38 | ||
43 | priv->bios_sensor.thrs_fan_boost.temp = 90; | 39 | priv->bios_sensor.thrs_fan_boost.temp = 90; |
@@ -60,11 +56,6 @@ nouveau_therm_temp_safety_checks(struct nouveau_therm *therm) | |||
60 | struct nouveau_therm_priv *priv = (void *)therm; | 56 | struct nouveau_therm_priv *priv = (void *)therm; |
61 | struct nvbios_therm_sensor *s = &priv->bios_sensor; | 57 | struct nvbios_therm_sensor *s = &priv->bios_sensor; |
62 | 58 | ||
63 | if (!priv->bios_sensor.slope_div) | ||
64 | priv->bios_sensor.slope_div = 1; | ||
65 | if (!priv->bios_sensor.offset_den) | ||
66 | priv->bios_sensor.offset_den = 1; | ||
67 | |||
68 | /* enforce a minimum hysteresis on thresholds */ | 59 | /* enforce a minimum hysteresis on thresholds */ |
69 | s->thrs_fan_boost.hysteresis = max_t(u8, s->thrs_fan_boost.hysteresis, 2); | 60 | s->thrs_fan_boost.hysteresis = max_t(u8, s->thrs_fan_boost.hysteresis, 2); |
70 | s->thrs_down_clock.hysteresis = max_t(u8, s->thrs_down_clock.hysteresis, 2); | 61 | s->thrs_down_clock.hysteresis = max_t(u8, s->thrs_down_clock.hysteresis, 2); |
@@ -106,16 +97,16 @@ void nouveau_therm_sensor_event(struct nouveau_therm *therm, | |||
106 | const char *thresolds[] = { | 97 | const char *thresolds[] = { |
107 | "fanboost", "downclock", "critical", "shutdown" | 98 | "fanboost", "downclock", "critical", "shutdown" |
108 | }; | 99 | }; |
109 | uint8_t temperature = therm->temp_get(therm); | 100 | int temperature = therm->temp_get(therm); |
110 | 101 | ||
111 | if (thrs < 0 || thrs > 3) | 102 | if (thrs < 0 || thrs > 3) |
112 | return; | 103 | return; |
113 | 104 | ||
114 | if (dir == NOUVEAU_THERM_THRS_FALLING) | 105 | if (dir == NOUVEAU_THERM_THRS_FALLING) |
115 | nv_info(therm, "temperature (%u C) went below the '%s' threshold\n", | 106 | nv_info(therm, "temperature (%i C) went below the '%s' threshold\n", |
116 | temperature, thresolds[thrs]); | 107 | temperature, thresolds[thrs]); |
117 | else | 108 | else |
118 | nv_info(therm, "temperature (%u C) hit the '%s' threshold\n", | 109 | nv_info(therm, "temperature (%i C) hit the '%s' threshold\n", |
119 | temperature, thresolds[thrs]); | 110 | temperature, thresolds[thrs]); |
120 | 111 | ||
121 | active = (dir == NOUVEAU_THERM_THRS_RISING); | 112 | active = (dir == NOUVEAU_THERM_THRS_RISING); |
@@ -123,7 +114,7 @@ void nouveau_therm_sensor_event(struct nouveau_therm *therm, | |||
123 | case NOUVEAU_THERM_THRS_FANBOOST: | 114 | case NOUVEAU_THERM_THRS_FANBOOST: |
124 | if (active) { | 115 | if (active) { |
125 | nouveau_therm_fan_set(therm, true, 100); | 116 | nouveau_therm_fan_set(therm, true, 100); |
126 | nouveau_therm_mode(therm, NOUVEAU_THERM_CTRL_AUTO); | 117 | nouveau_therm_fan_mode(therm, NOUVEAU_THERM_CTRL_AUTO); |
127 | } | 118 | } |
128 | break; | 119 | break; |
129 | case NOUVEAU_THERM_THRS_DOWNCLOCK: | 120 | case NOUVEAU_THERM_THRS_DOWNCLOCK: |
@@ -202,7 +193,7 @@ alarm_timer_callback(struct nouveau_alarm *alarm) | |||
202 | NOUVEAU_THERM_THRS_SHUTDOWN); | 193 | NOUVEAU_THERM_THRS_SHUTDOWN); |
203 | 194 | ||
204 | /* schedule the next poll in one second */ | 195 | /* schedule the next poll in one second */ |
205 | if (list_empty(&alarm->head)) | 196 | if (therm->temp_get(therm) >= 0 && list_empty(&alarm->head)) |
206 | ptimer->alarm(ptimer, 1000 * 1000 * 1000, alarm); | 197 | ptimer->alarm(ptimer, 1000 * 1000 * 1000, alarm); |
207 | 198 | ||
208 | spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags); | 199 | spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags); |
@@ -225,6 +216,17 @@ nouveau_therm_program_alarms_polling(struct nouveau_therm *therm) | |||
225 | alarm_timer_callback(&priv->sensor.therm_poll_alarm); | 216 | alarm_timer_callback(&priv->sensor.therm_poll_alarm); |
226 | } | 217 | } |
227 | 218 | ||
219 | void | ||
220 | nouveau_therm_sensor_preinit(struct nouveau_therm *therm) | ||
221 | { | ||
222 | const char *sensor_avail = "yes"; | ||
223 | |||
224 | if (therm->temp_get(therm) < 0) | ||
225 | sensor_avail = "no"; | ||
226 | |||
227 | nv_info(therm, "internal sensor: %s\n", sensor_avail); | ||
228 | } | ||
229 | |||
228 | int | 230 | int |
229 | nouveau_therm_sensor_ctor(struct nouveau_therm *therm) | 231 | nouveau_therm_sensor_ctor(struct nouveau_therm *therm) |
230 | { | 232 | { |
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c index 3b6dc883e150..5eb3e0da7c6e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_abi16.c +++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c | |||
@@ -391,7 +391,7 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS) | |||
391 | struct nouveau_drm *drm = nouveau_drm(dev); | 391 | struct nouveau_drm *drm = nouveau_drm(dev); |
392 | struct nouveau_device *device = nv_device(drm->device); | 392 | struct nouveau_device *device = nv_device(drm->device); |
393 | struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); | 393 | struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); |
394 | struct nouveau_abi16_chan *chan, *temp; | 394 | struct nouveau_abi16_chan *chan = NULL, *temp; |
395 | struct nouveau_abi16_ntfy *ntfy; | 395 | struct nouveau_abi16_ntfy *ntfy; |
396 | struct nouveau_object *object; | 396 | struct nouveau_object *object; |
397 | struct nv_dma_class args = {}; | 397 | struct nv_dma_class args = {}; |
@@ -404,10 +404,11 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS) | |||
404 | if (unlikely(nv_device(abi16->device)->card_type >= NV_C0)) | 404 | if (unlikely(nv_device(abi16->device)->card_type >= NV_C0)) |
405 | return nouveau_abi16_put(abi16, -EINVAL); | 405 | return nouveau_abi16_put(abi16, -EINVAL); |
406 | 406 | ||
407 | list_for_each_entry_safe(chan, temp, &abi16->channels, head) { | 407 | list_for_each_entry(temp, &abi16->channels, head) { |
408 | if (chan->chan->handle == (NVDRM_CHAN | info->channel)) | 408 | if (temp->chan->handle == (NVDRM_CHAN | info->channel)) { |
409 | chan = temp; | ||
409 | break; | 410 | break; |
410 | chan = NULL; | 411 | } |
411 | } | 412 | } |
412 | 413 | ||
413 | if (!chan) | 414 | if (!chan) |
@@ -459,17 +460,18 @@ nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS) | |||
459 | { | 460 | { |
460 | struct drm_nouveau_gpuobj_free *fini = data; | 461 | struct drm_nouveau_gpuobj_free *fini = data; |
461 | struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); | 462 | struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); |
462 | struct nouveau_abi16_chan *chan, *temp; | 463 | struct nouveau_abi16_chan *chan = NULL, *temp; |
463 | struct nouveau_abi16_ntfy *ntfy; | 464 | struct nouveau_abi16_ntfy *ntfy; |
464 | int ret; | 465 | int ret; |
465 | 466 | ||
466 | if (unlikely(!abi16)) | 467 | if (unlikely(!abi16)) |
467 | return -ENOMEM; | 468 | return -ENOMEM; |
468 | 469 | ||
469 | list_for_each_entry_safe(chan, temp, &abi16->channels, head) { | 470 | list_for_each_entry(temp, &abi16->channels, head) { |
470 | if (chan->chan->handle == (NVDRM_CHAN | fini->channel)) | 471 | if (temp->chan->handle == (NVDRM_CHAN | fini->channel)) { |
472 | chan = temp; | ||
471 | break; | 473 | break; |
472 | chan = NULL; | 474 | } |
473 | } | 475 | } |
474 | 476 | ||
475 | if (!chan) | 477 | if (!chan) |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index d1099365bfc1..c95decf543e9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c | |||
@@ -72,11 +72,25 @@ module_param_named(modeset, nouveau_modeset, int, 0400); | |||
72 | static struct drm_driver driver; | 72 | static struct drm_driver driver; |
73 | 73 | ||
74 | static int | 74 | static int |
75 | nouveau_drm_vblank_handler(struct nouveau_eventh *event, int head) | ||
76 | { | ||
77 | struct nouveau_drm *drm = | ||
78 | container_of(event, struct nouveau_drm, vblank[head]); | ||
79 | drm_handle_vblank(drm->dev, head); | ||
80 | return NVKM_EVENT_KEEP; | ||
81 | } | ||
82 | |||
83 | static int | ||
75 | nouveau_drm_vblank_enable(struct drm_device *dev, int head) | 84 | nouveau_drm_vblank_enable(struct drm_device *dev, int head) |
76 | { | 85 | { |
77 | struct nouveau_drm *drm = nouveau_drm(dev); | 86 | struct nouveau_drm *drm = nouveau_drm(dev); |
78 | struct nouveau_disp *pdisp = nouveau_disp(drm->device); | 87 | struct nouveau_disp *pdisp = nouveau_disp(drm->device); |
79 | nouveau_event_get(pdisp->vblank, head, &drm->vblank); | 88 | |
89 | if (WARN_ON_ONCE(head > ARRAY_SIZE(drm->vblank))) | ||
90 | return -EIO; | ||
91 | WARN_ON_ONCE(drm->vblank[head].func); | ||
92 | drm->vblank[head].func = nouveau_drm_vblank_handler; | ||
93 | nouveau_event_get(pdisp->vblank, head, &drm->vblank[head]); | ||
80 | return 0; | 94 | return 0; |
81 | } | 95 | } |
82 | 96 | ||
@@ -85,16 +99,11 @@ nouveau_drm_vblank_disable(struct drm_device *dev, int head) | |||
85 | { | 99 | { |
86 | struct nouveau_drm *drm = nouveau_drm(dev); | 100 | struct nouveau_drm *drm = nouveau_drm(dev); |
87 | struct nouveau_disp *pdisp = nouveau_disp(drm->device); | 101 | struct nouveau_disp *pdisp = nouveau_disp(drm->device); |
88 | nouveau_event_put(pdisp->vblank, head, &drm->vblank); | 102 | if (drm->vblank[head].func) |
89 | } | 103 | nouveau_event_put(pdisp->vblank, head, &drm->vblank[head]); |
90 | 104 | else | |
91 | static int | 105 | WARN_ON_ONCE(1); |
92 | nouveau_drm_vblank_handler(struct nouveau_eventh *event, int head) | 106 | drm->vblank[head].func = NULL; |
93 | { | ||
94 | struct nouveau_drm *drm = | ||
95 | container_of(event, struct nouveau_drm, vblank); | ||
96 | drm_handle_vblank(drm->dev, head); | ||
97 | return NVKM_EVENT_KEEP; | ||
98 | } | 107 | } |
99 | 108 | ||
100 | static u64 | 109 | static u64 |
@@ -292,7 +301,6 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags) | |||
292 | 301 | ||
293 | dev->dev_private = drm; | 302 | dev->dev_private = drm; |
294 | drm->dev = dev; | 303 | drm->dev = dev; |
295 | drm->vblank.func = nouveau_drm_vblank_handler; | ||
296 | 304 | ||
297 | INIT_LIST_HEAD(&drm->clients); | 305 | INIT_LIST_HEAD(&drm->clients); |
298 | spin_lock_init(&drm->tile.lock); | 306 | spin_lock_init(&drm->tile.lock); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h index b25df374c901..9c39bafbef2c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.h +++ b/drivers/gpu/drm/nouveau/nouveau_drm.h | |||
@@ -113,7 +113,7 @@ struct nouveau_drm { | |||
113 | struct nvbios vbios; | 113 | struct nvbios vbios; |
114 | struct nouveau_display *display; | 114 | struct nouveau_display *display; |
115 | struct backlight_device *backlight; | 115 | struct backlight_device *backlight; |
116 | struct nouveau_eventh vblank; | 116 | struct nouveau_eventh vblank[4]; |
117 | 117 | ||
118 | /* power management */ | 118 | /* power management */ |
119 | struct nouveau_pm *pm; | 119 | struct nouveau_pm *pm; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c index bb54098c6d97..936b442a6ab7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_pm.c +++ b/drivers/gpu/drm/nouveau/nouveau_pm.c | |||
@@ -402,8 +402,12 @@ nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf) | |||
402 | struct drm_device *dev = dev_get_drvdata(d); | 402 | struct drm_device *dev = dev_get_drvdata(d); |
403 | struct nouveau_drm *drm = nouveau_drm(dev); | 403 | struct nouveau_drm *drm = nouveau_drm(dev); |
404 | struct nouveau_therm *therm = nouveau_therm(drm->device); | 404 | struct nouveau_therm *therm = nouveau_therm(drm->device); |
405 | int temp = therm->temp_get(therm); | ||
405 | 406 | ||
406 | return snprintf(buf, PAGE_SIZE, "%d\n", therm->temp_get(therm) * 1000); | 407 | if (temp < 0) |
408 | return temp; | ||
409 | |||
410 | return snprintf(buf, PAGE_SIZE, "%d\n", temp * 1000); | ||
407 | } | 411 | } |
408 | static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, nouveau_hwmon_show_temp, | 412 | static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, nouveau_hwmon_show_temp, |
409 | NULL, 0); | 413 | NULL, 0); |
@@ -871,7 +875,12 @@ static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO | S_IWUSR, | |||
871 | nouveau_hwmon_get_pwm1_max, | 875 | nouveau_hwmon_get_pwm1_max, |
872 | nouveau_hwmon_set_pwm1_max, 0); | 876 | nouveau_hwmon_set_pwm1_max, 0); |
873 | 877 | ||
874 | static struct attribute *hwmon_attributes[] = { | 878 | static struct attribute *hwmon_default_attributes[] = { |
879 | &sensor_dev_attr_name.dev_attr.attr, | ||
880 | &sensor_dev_attr_update_rate.dev_attr.attr, | ||
881 | NULL | ||
882 | }; | ||
883 | static struct attribute *hwmon_temp_attributes[] = { | ||
875 | &sensor_dev_attr_temp1_input.dev_attr.attr, | 884 | &sensor_dev_attr_temp1_input.dev_attr.attr, |
876 | &sensor_dev_attr_temp1_auto_point1_pwm.dev_attr.attr, | 885 | &sensor_dev_attr_temp1_auto_point1_pwm.dev_attr.attr, |
877 | &sensor_dev_attr_temp1_auto_point1_temp.dev_attr.attr, | 886 | &sensor_dev_attr_temp1_auto_point1_temp.dev_attr.attr, |
@@ -882,8 +891,6 @@ static struct attribute *hwmon_attributes[] = { | |||
882 | &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr, | 891 | &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr, |
883 | &sensor_dev_attr_temp1_emergency.dev_attr.attr, | 892 | &sensor_dev_attr_temp1_emergency.dev_attr.attr, |
884 | &sensor_dev_attr_temp1_emergency_hyst.dev_attr.attr, | 893 | &sensor_dev_attr_temp1_emergency_hyst.dev_attr.attr, |
885 | &sensor_dev_attr_name.dev_attr.attr, | ||
886 | &sensor_dev_attr_update_rate.dev_attr.attr, | ||
887 | NULL | 894 | NULL |
888 | }; | 895 | }; |
889 | static struct attribute *hwmon_fan_rpm_attributes[] = { | 896 | static struct attribute *hwmon_fan_rpm_attributes[] = { |
@@ -898,8 +905,11 @@ static struct attribute *hwmon_pwm_fan_attributes[] = { | |||
898 | NULL | 905 | NULL |
899 | }; | 906 | }; |
900 | 907 | ||
901 | static const struct attribute_group hwmon_attrgroup = { | 908 | static const struct attribute_group hwmon_default_attrgroup = { |
902 | .attrs = hwmon_attributes, | 909 | .attrs = hwmon_default_attributes, |
910 | }; | ||
911 | static const struct attribute_group hwmon_temp_attrgroup = { | ||
912 | .attrs = hwmon_temp_attributes, | ||
903 | }; | 913 | }; |
904 | static const struct attribute_group hwmon_fan_rpm_attrgroup = { | 914 | static const struct attribute_group hwmon_fan_rpm_attrgroup = { |
905 | .attrs = hwmon_fan_rpm_attributes, | 915 | .attrs = hwmon_fan_rpm_attributes, |
@@ -931,13 +941,22 @@ nouveau_hwmon_init(struct drm_device *dev) | |||
931 | } | 941 | } |
932 | dev_set_drvdata(hwmon_dev, dev); | 942 | dev_set_drvdata(hwmon_dev, dev); |
933 | 943 | ||
934 | /* default sysfs entries */ | 944 | /* set the default attributes */ |
935 | ret = sysfs_create_group(&hwmon_dev->kobj, &hwmon_attrgroup); | 945 | ret = sysfs_create_group(&hwmon_dev->kobj, &hwmon_default_attrgroup); |
936 | if (ret) { | 946 | if (ret) { |
937 | if (ret) | 947 | if (ret) |
938 | goto error; | 948 | goto error; |
939 | } | 949 | } |
940 | 950 | ||
951 | /* if the card has a working thermal sensor */ | ||
952 | if (therm->temp_get(therm) >= 0) { | ||
953 | ret = sysfs_create_group(&hwmon_dev->kobj, &hwmon_temp_attrgroup); | ||
954 | if (ret) { | ||
955 | if (ret) | ||
956 | goto error; | ||
957 | } | ||
958 | } | ||
959 | |||
941 | /* if the card has a pwm fan */ | 960 | /* if the card has a pwm fan */ |
942 | /*XXX: incorrect, need better detection for this, some boards have | 961 | /*XXX: incorrect, need better detection for this, some boards have |
943 | * the gpio entries for pwm fan control even when there's no | 962 | * the gpio entries for pwm fan control even when there's no |
@@ -979,11 +998,10 @@ nouveau_hwmon_fini(struct drm_device *dev) | |||
979 | struct nouveau_pm *pm = nouveau_pm(dev); | 998 | struct nouveau_pm *pm = nouveau_pm(dev); |
980 | 999 | ||
981 | if (pm->hwmon) { | 1000 | if (pm->hwmon) { |
982 | sysfs_remove_group(&pm->hwmon->kobj, &hwmon_attrgroup); | 1001 | sysfs_remove_group(&pm->hwmon->kobj, &hwmon_default_attrgroup); |
983 | sysfs_remove_group(&pm->hwmon->kobj, | 1002 | sysfs_remove_group(&pm->hwmon->kobj, &hwmon_temp_attrgroup); |
984 | &hwmon_pwm_fan_attrgroup); | 1003 | sysfs_remove_group(&pm->hwmon->kobj, &hwmon_pwm_fan_attrgroup); |
985 | sysfs_remove_group(&pm->hwmon->kobj, | 1004 | sysfs_remove_group(&pm->hwmon->kobj, &hwmon_fan_rpm_attrgroup); |
986 | &hwmon_fan_rpm_attrgroup); | ||
987 | 1005 | ||
988 | hwmon_device_unregister(pm->hwmon); | 1006 | hwmon_device_unregister(pm->hwmon); |
989 | } | 1007 | } |
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 2db57990f65c..1ddc03e51bf4 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c | |||
@@ -479,7 +479,7 @@ nv50_display_flip_wait(void *data) | |||
479 | { | 479 | { |
480 | struct nv50_display_flip *flip = data; | 480 | struct nv50_display_flip *flip = data; |
481 | if (nouveau_bo_rd32(flip->disp->sync, flip->chan->addr / 4) == | 481 | if (nouveau_bo_rd32(flip->disp->sync, flip->chan->addr / 4) == |
482 | flip->chan->data); | 482 | flip->chan->data) |
483 | return true; | 483 | return true; |
484 | usleep_range(1, 2); | 484 | usleep_range(1, 2); |
485 | return false; | 485 | return false; |
@@ -524,6 +524,8 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
524 | swap_interval <<= 4; | 524 | swap_interval <<= 4; |
525 | if (swap_interval == 0) | 525 | if (swap_interval == 0) |
526 | swap_interval |= 0x100; | 526 | swap_interval |= 0x100; |
527 | if (chan == NULL) | ||
528 | evo_sync(crtc->dev); | ||
527 | 529 | ||
528 | push = evo_wait(sync, 128); | 530 | push = evo_wait(sync, 128); |
529 | if (unlikely(push == NULL)) | 531 | if (unlikely(push == NULL)) |
@@ -586,8 +588,6 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
586 | sync->addr ^= 0x10; | 588 | sync->addr ^= 0x10; |
587 | sync->data++; | 589 | sync->data++; |
588 | FIRE_RING (chan); | 590 | FIRE_RING (chan); |
589 | } else { | ||
590 | evo_sync(crtc->dev); | ||
591 | } | 591 | } |
592 | 592 | ||
593 | /* queue the flip */ | 593 | /* queue the flip */ |
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index d4c633e12863..27769e724b6d 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c | |||
@@ -468,13 +468,19 @@ static void cayman_gpu_init(struct radeon_device *rdev) | |||
468 | (rdev->pdev->device == 0x9907) || | 468 | (rdev->pdev->device == 0x9907) || |
469 | (rdev->pdev->device == 0x9908) || | 469 | (rdev->pdev->device == 0x9908) || |
470 | (rdev->pdev->device == 0x9909) || | 470 | (rdev->pdev->device == 0x9909) || |
471 | (rdev->pdev->device == 0x990B) || | ||
472 | (rdev->pdev->device == 0x990C) || | ||
473 | (rdev->pdev->device == 0x990F) || | ||
471 | (rdev->pdev->device == 0x9910) || | 474 | (rdev->pdev->device == 0x9910) || |
472 | (rdev->pdev->device == 0x9917)) { | 475 | (rdev->pdev->device == 0x9917) || |
476 | (rdev->pdev->device == 0x9999)) { | ||
473 | rdev->config.cayman.max_simds_per_se = 6; | 477 | rdev->config.cayman.max_simds_per_se = 6; |
474 | rdev->config.cayman.max_backends_per_se = 2; | 478 | rdev->config.cayman.max_backends_per_se = 2; |
475 | } else if ((rdev->pdev->device == 0x9903) || | 479 | } else if ((rdev->pdev->device == 0x9903) || |
476 | (rdev->pdev->device == 0x9904) || | 480 | (rdev->pdev->device == 0x9904) || |
477 | (rdev->pdev->device == 0x990A) || | 481 | (rdev->pdev->device == 0x990A) || |
482 | (rdev->pdev->device == 0x990D) || | ||
483 | (rdev->pdev->device == 0x990E) || | ||
478 | (rdev->pdev->device == 0x9913) || | 484 | (rdev->pdev->device == 0x9913) || |
479 | (rdev->pdev->device == 0x9918)) { | 485 | (rdev->pdev->device == 0x9918)) { |
480 | rdev->config.cayman.max_simds_per_se = 4; | 486 | rdev->config.cayman.max_simds_per_se = 4; |
@@ -483,6 +489,9 @@ static void cayman_gpu_init(struct radeon_device *rdev) | |||
483 | (rdev->pdev->device == 0x9990) || | 489 | (rdev->pdev->device == 0x9990) || |
484 | (rdev->pdev->device == 0x9991) || | 490 | (rdev->pdev->device == 0x9991) || |
485 | (rdev->pdev->device == 0x9994) || | 491 | (rdev->pdev->device == 0x9994) || |
492 | (rdev->pdev->device == 0x9995) || | ||
493 | (rdev->pdev->device == 0x9996) || | ||
494 | (rdev->pdev->device == 0x999A) || | ||
486 | (rdev->pdev->device == 0x99A0)) { | 495 | (rdev->pdev->device == 0x99A0)) { |
487 | rdev->config.cayman.max_simds_per_se = 3; | 496 | rdev->config.cayman.max_simds_per_se = 3; |
488 | rdev->config.cayman.max_backends_per_se = 1; | 497 | rdev->config.cayman.max_backends_per_se = 1; |
@@ -616,11 +625,22 @@ static void cayman_gpu_init(struct radeon_device *rdev) | |||
616 | WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config); | 625 | WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config); |
617 | WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config); | 626 | WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config); |
618 | 627 | ||
619 | tmp = gb_addr_config & NUM_PIPES_MASK; | 628 | if ((rdev->config.cayman.max_backends_per_se == 1) && |
620 | tmp = r6xx_remap_render_backend(rdev, tmp, | 629 | (rdev->flags & RADEON_IS_IGP)) { |
621 | rdev->config.cayman.max_backends_per_se * | 630 | if ((disabled_rb_mask & 3) == 1) { |
622 | rdev->config.cayman.max_shader_engines, | 631 | /* RB0 disabled, RB1 enabled */ |
623 | CAYMAN_MAX_BACKENDS, disabled_rb_mask); | 632 | tmp = 0x11111111; |
633 | } else { | ||
634 | /* RB1 disabled, RB0 enabled */ | ||
635 | tmp = 0x00000000; | ||
636 | } | ||
637 | } else { | ||
638 | tmp = gb_addr_config & NUM_PIPES_MASK; | ||
639 | tmp = r6xx_remap_render_backend(rdev, tmp, | ||
640 | rdev->config.cayman.max_backends_per_se * | ||
641 | rdev->config.cayman.max_shader_engines, | ||
642 | CAYMAN_MAX_BACKENDS, disabled_rb_mask); | ||
643 | } | ||
624 | WREG32(GB_BACKEND_MAP, tmp); | 644 | WREG32(GB_BACKEND_MAP, tmp); |
625 | 645 | ||
626 | cgts_tcc_disable = 0xffff0000; | 646 | cgts_tcc_disable = 0xffff0000; |
@@ -1771,6 +1791,7 @@ int cayman_resume(struct radeon_device *rdev) | |||
1771 | int cayman_suspend(struct radeon_device *rdev) | 1791 | int cayman_suspend(struct radeon_device *rdev) |
1772 | { | 1792 | { |
1773 | r600_audio_fini(rdev); | 1793 | r600_audio_fini(rdev); |
1794 | radeon_vm_manager_fini(rdev); | ||
1774 | cayman_cp_enable(rdev, false); | 1795 | cayman_cp_enable(rdev, false); |
1775 | cayman_dma_stop(rdev); | 1796 | cayman_dma_stop(rdev); |
1776 | evergreen_irq_suspend(rdev); | 1797 | evergreen_irq_suspend(rdev); |
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c index bedda9caadd9..6e05a2e75a46 100644 --- a/drivers/gpu/drm/radeon/radeon_benchmark.c +++ b/drivers/gpu/drm/radeon/radeon_benchmark.c | |||
@@ -122,10 +122,7 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size, | |||
122 | goto out_cleanup; | 122 | goto out_cleanup; |
123 | } | 123 | } |
124 | 124 | ||
125 | /* r100 doesn't have dma engine so skip the test */ | 125 | if (rdev->asic->copy.dma) { |
126 | /* also, VRAM-to-VRAM test doesn't make much sense for DMA */ | ||
127 | /* skip it as well if domains are the same */ | ||
128 | if ((rdev->asic->copy.dma) && (sdomain != ddomain)) { | ||
129 | time = radeon_benchmark_do_move(rdev, size, saddr, daddr, | 126 | time = radeon_benchmark_do_move(rdev, size, saddr, daddr, |
130 | RADEON_BENCHMARK_COPY_DMA, n); | 127 | RADEON_BENCHMARK_COPY_DMA, n); |
131 | if (time < 0) | 128 | if (time < 0) |
@@ -135,13 +132,15 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size, | |||
135 | sdomain, ddomain, "dma"); | 132 | sdomain, ddomain, "dma"); |
136 | } | 133 | } |
137 | 134 | ||
138 | time = radeon_benchmark_do_move(rdev, size, saddr, daddr, | 135 | if (rdev->asic->copy.blit) { |
139 | RADEON_BENCHMARK_COPY_BLIT, n); | 136 | time = radeon_benchmark_do_move(rdev, size, saddr, daddr, |
140 | if (time < 0) | 137 | RADEON_BENCHMARK_COPY_BLIT, n); |
141 | goto out_cleanup; | 138 | if (time < 0) |
142 | if (time > 0) | 139 | goto out_cleanup; |
143 | radeon_benchmark_log_results(n, size, time, | 140 | if (time > 0) |
144 | sdomain, ddomain, "blit"); | 141 | radeon_benchmark_log_results(n, size, time, |
142 | sdomain, ddomain, "blit"); | ||
143 | } | ||
145 | 144 | ||
146 | out_cleanup: | 145 | out_cleanup: |
147 | if (sobj) { | 146 | if (sobj) { |
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c index b8015913d382..fa3c56fba294 100644 --- a/drivers/gpu/drm/radeon/radeon_bios.c +++ b/drivers/gpu/drm/radeon/radeon_bios.c | |||
@@ -99,6 +99,29 @@ static bool radeon_read_bios(struct radeon_device *rdev) | |||
99 | return true; | 99 | return true; |
100 | } | 100 | } |
101 | 101 | ||
102 | static bool radeon_read_platform_bios(struct radeon_device *rdev) | ||
103 | { | ||
104 | uint8_t __iomem *bios; | ||
105 | size_t size; | ||
106 | |||
107 | rdev->bios = NULL; | ||
108 | |||
109 | bios = pci_platform_rom(rdev->pdev, &size); | ||
110 | if (!bios) { | ||
111 | return false; | ||
112 | } | ||
113 | |||
114 | if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) { | ||
115 | return false; | ||
116 | } | ||
117 | rdev->bios = kmemdup(bios, size, GFP_KERNEL); | ||
118 | if (rdev->bios == NULL) { | ||
119 | return false; | ||
120 | } | ||
121 | |||
122 | return true; | ||
123 | } | ||
124 | |||
102 | #ifdef CONFIG_ACPI | 125 | #ifdef CONFIG_ACPI |
103 | /* ATRM is used to get the BIOS on the discrete cards in | 126 | /* ATRM is used to get the BIOS on the discrete cards in |
104 | * dual-gpu systems. | 127 | * dual-gpu systems. |
@@ -620,6 +643,9 @@ bool radeon_get_bios(struct radeon_device *rdev) | |||
620 | if (r == false) { | 643 | if (r == false) { |
621 | r = radeon_read_disabled_bios(rdev); | 644 | r = radeon_read_disabled_bios(rdev); |
622 | } | 645 | } |
646 | if (r == false) { | ||
647 | r = radeon_read_platform_bios(rdev); | ||
648 | } | ||
623 | if (r == false || rdev->bios == NULL) { | 649 | if (r == false || rdev->bios == NULL) { |
624 | DRM_ERROR("Unable to locate a BIOS ROM\n"); | 650 | DRM_ERROR("Unable to locate a BIOS ROM\n"); |
625 | rdev->bios = NULL; | 651 | rdev->bios = NULL; |
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 9128120da044..bafbe3216952 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c | |||
@@ -4469,6 +4469,7 @@ int si_resume(struct radeon_device *rdev) | |||
4469 | 4469 | ||
4470 | int si_suspend(struct radeon_device *rdev) | 4470 | int si_suspend(struct radeon_device *rdev) |
4471 | { | 4471 | { |
4472 | radeon_vm_manager_fini(rdev); | ||
4472 | si_cp_enable(rdev, false); | 4473 | si_cp_enable(rdev, false); |
4473 | cayman_dma_stop(rdev); | 4474 | cayman_dma_stop(rdev); |
4474 | si_irq_suspend(rdev); | 4475 | si_irq_suspend(rdev); |
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 512b01c04ea7..aa341d135867 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c | |||
@@ -2077,7 +2077,6 @@ static const struct hid_device_id hid_ignore_list[] = { | |||
2077 | { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HYBRID) }, | 2077 | { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HYBRID) }, |
2078 | { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HEATCONTROL) }, | 2078 | { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HEATCONTROL) }, |
2079 | { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_BEATPAD) }, | 2079 | { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_BEATPAD) }, |
2080 | { HID_USB_DEVICE(USB_VENDOR_ID_MASTERKIT, USB_DEVICE_ID_MASTERKIT_MA901RADIO) }, | ||
2081 | { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1024LS) }, | 2080 | { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1024LS) }, |
2082 | { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1208LS) }, | 2081 | { HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1208LS) }, |
2083 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT1) }, | 2082 | { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT1) }, |
@@ -2244,6 +2243,18 @@ bool hid_ignore(struct hid_device *hdev) | |||
2244 | hdev->product <= USB_DEVICE_ID_VELLEMAN_K8061_LAST)) | 2243 | hdev->product <= USB_DEVICE_ID_VELLEMAN_K8061_LAST)) |
2245 | return true; | 2244 | return true; |
2246 | break; | 2245 | break; |
2246 | case USB_VENDOR_ID_ATMEL_V_USB: | ||
2247 | /* Masterkit MA901 usb radio based on Atmel tiny85 chip and | ||
2248 | * it has the same USB ID as many Atmel V-USB devices. This | ||
2249 | * usb radio is handled by radio-ma901.c driver so we want | ||
2250 | * ignore the hid. Check the name, bus, product and ignore | ||
2251 | * if we have MA901 usb radio. | ||
2252 | */ | ||
2253 | if (hdev->product == USB_DEVICE_ID_ATMEL_V_USB && | ||
2254 | hdev->bus == BUS_USB && | ||
2255 | strncmp(hdev->name, "www.masterkit.ru MA901", 22) == 0) | ||
2256 | return true; | ||
2257 | break; | ||
2247 | } | 2258 | } |
2248 | 2259 | ||
2249 | if (hdev->type == HID_TYPE_USBMOUSE && | 2260 | if (hdev->type == HID_TYPE_USBMOUSE && |
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 92e47e5c9564..5309fd5eb0eb 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h | |||
@@ -158,6 +158,8 @@ | |||
158 | #define USB_VENDOR_ID_ATMEL 0x03eb | 158 | #define USB_VENDOR_ID_ATMEL 0x03eb |
159 | #define USB_DEVICE_ID_ATMEL_MULTITOUCH 0x211c | 159 | #define USB_DEVICE_ID_ATMEL_MULTITOUCH 0x211c |
160 | #define USB_DEVICE_ID_ATMEL_MXT_DIGITIZER 0x2118 | 160 | #define USB_DEVICE_ID_ATMEL_MXT_DIGITIZER 0x2118 |
161 | #define USB_VENDOR_ID_ATMEL_V_USB 0x16c0 | ||
162 | #define USB_DEVICE_ID_ATMEL_V_USB 0x05df | ||
161 | 163 | ||
162 | #define USB_VENDOR_ID_AUREAL 0x0755 | 164 | #define USB_VENDOR_ID_AUREAL 0x0755 |
163 | #define USB_DEVICE_ID_AUREAL_W01RN 0x2626 | 165 | #define USB_DEVICE_ID_AUREAL_W01RN 0x2626 |
@@ -557,9 +559,6 @@ | |||
557 | #define USB_VENDOR_ID_MADCATZ 0x0738 | 559 | #define USB_VENDOR_ID_MADCATZ 0x0738 |
558 | #define USB_DEVICE_ID_MADCATZ_BEATPAD 0x4540 | 560 | #define USB_DEVICE_ID_MADCATZ_BEATPAD 0x4540 |
559 | 561 | ||
560 | #define USB_VENDOR_ID_MASTERKIT 0x16c0 | ||
561 | #define USB_DEVICE_ID_MASTERKIT_MA901RADIO 0x05df | ||
562 | |||
563 | #define USB_VENDOR_ID_MCC 0x09db | 562 | #define USB_VENDOR_ID_MCC 0x09db |
564 | #define USB_DEVICE_ID_MCC_PMD1024LS 0x0076 | 563 | #define USB_DEVICE_ID_MCC_PMD1024LS 0x0076 |
565 | #define USB_DEVICE_ID_MCC_PMD1208LS 0x007a | 564 | #define USB_DEVICE_ID_MCC_PMD1208LS 0x007a |
@@ -590,6 +589,9 @@ | |||
590 | #define USB_VENDOR_ID_MONTEREY 0x0566 | 589 | #define USB_VENDOR_ID_MONTEREY 0x0566 |
591 | #define USB_DEVICE_ID_GENIUS_KB29E 0x3004 | 590 | #define USB_DEVICE_ID_GENIUS_KB29E 0x3004 |
592 | 591 | ||
592 | #define USB_VENDOR_ID_MSI 0x1770 | ||
593 | #define USB_DEVICE_ID_MSI_GX680R_LED_PANEL 0xff00 | ||
594 | |||
593 | #define USB_VENDOR_ID_NATIONAL_SEMICONDUCTOR 0x0400 | 595 | #define USB_VENDOR_ID_NATIONAL_SEMICONDUCTOR 0x0400 |
594 | #define USB_DEVICE_ID_N_S_HARMONY 0xc359 | 596 | #define USB_DEVICE_ID_N_S_HARMONY 0xc359 |
595 | 597 | ||
@@ -684,6 +686,9 @@ | |||
684 | #define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001 0x3001 | 686 | #define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001 0x3001 |
685 | #define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008 0x3008 | 687 | #define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008 0x3008 |
686 | 688 | ||
689 | #define USB_VENDOR_ID_REALTEK 0x0bda | ||
690 | #define USB_DEVICE_ID_REALTEK_READER 0x0152 | ||
691 | |||
687 | #define USB_VENDOR_ID_ROCCAT 0x1e7d | 692 | #define USB_VENDOR_ID_ROCCAT 0x1e7d |
688 | #define USB_DEVICE_ID_ROCCAT_ARVO 0x30d4 | 693 | #define USB_DEVICE_ID_ROCCAT_ARVO 0x30d4 |
689 | #define USB_DEVICE_ID_ROCCAT_ISKU 0x319c | 694 | #define USB_DEVICE_ID_ROCCAT_ISKU 0x319c |
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c index f7f113ba083e..a8ce44296cfd 100644 --- a/drivers/hid/hid-magicmouse.c +++ b/drivers/hid/hid-magicmouse.c | |||
@@ -462,6 +462,21 @@ static int magicmouse_input_mapping(struct hid_device *hdev, | |||
462 | return 0; | 462 | return 0; |
463 | } | 463 | } |
464 | 464 | ||
465 | static void magicmouse_input_configured(struct hid_device *hdev, | ||
466 | struct hid_input *hi) | ||
467 | |||
468 | { | ||
469 | struct magicmouse_sc *msc = hid_get_drvdata(hdev); | ||
470 | |||
471 | int ret = magicmouse_setup_input(msc->input, hdev); | ||
472 | if (ret) { | ||
473 | hid_err(hdev, "magicmouse setup input failed (%d)\n", ret); | ||
474 | /* clean msc->input to notify probe() of the failure */ | ||
475 | msc->input = NULL; | ||
476 | } | ||
477 | } | ||
478 | |||
479 | |||
465 | static int magicmouse_probe(struct hid_device *hdev, | 480 | static int magicmouse_probe(struct hid_device *hdev, |
466 | const struct hid_device_id *id) | 481 | const struct hid_device_id *id) |
467 | { | 482 | { |
@@ -493,15 +508,10 @@ static int magicmouse_probe(struct hid_device *hdev, | |||
493 | goto err_free; | 508 | goto err_free; |
494 | } | 509 | } |
495 | 510 | ||
496 | /* We do this after hid-input is done parsing reports so that | 511 | if (!msc->input) { |
497 | * hid-input uses the most natural button and axis IDs. | 512 | hid_err(hdev, "magicmouse input not registered\n"); |
498 | */ | 513 | ret = -ENOMEM; |
499 | if (msc->input) { | 514 | goto err_stop_hw; |
500 | ret = magicmouse_setup_input(msc->input, hdev); | ||
501 | if (ret) { | ||
502 | hid_err(hdev, "magicmouse setup input failed (%d)\n", ret); | ||
503 | goto err_stop_hw; | ||
504 | } | ||
505 | } | 515 | } |
506 | 516 | ||
507 | if (id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE) | 517 | if (id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE) |
@@ -568,6 +578,7 @@ static struct hid_driver magicmouse_driver = { | |||
568 | .remove = magicmouse_remove, | 578 | .remove = magicmouse_remove, |
569 | .raw_event = magicmouse_raw_event, | 579 | .raw_event = magicmouse_raw_event, |
570 | .input_mapping = magicmouse_input_mapping, | 580 | .input_mapping = magicmouse_input_mapping, |
581 | .input_configured = magicmouse_input_configured, | ||
571 | }; | 582 | }; |
572 | module_hid_driver(magicmouse_driver); | 583 | module_hid_driver(magicmouse_driver); |
573 | 584 | ||
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index 7a1ebb867cf4..82e9211b3ca9 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c | |||
@@ -621,6 +621,7 @@ static void mt_process_mt_event(struct hid_device *hid, struct hid_field *field, | |||
621 | { | 621 | { |
622 | struct mt_device *td = hid_get_drvdata(hid); | 622 | struct mt_device *td = hid_get_drvdata(hid); |
623 | __s32 quirks = td->mtclass.quirks; | 623 | __s32 quirks = td->mtclass.quirks; |
624 | struct input_dev *input = field->hidinput->input; | ||
624 | 625 | ||
625 | if (hid->claimed & HID_CLAIMED_INPUT) { | 626 | if (hid->claimed & HID_CLAIMED_INPUT) { |
626 | switch (usage->hid) { | 627 | switch (usage->hid) { |
@@ -670,13 +671,16 @@ static void mt_process_mt_event(struct hid_device *hid, struct hid_field *field, | |||
670 | break; | 671 | break; |
671 | 672 | ||
672 | default: | 673 | default: |
674 | if (usage->type) | ||
675 | input_event(input, usage->type, usage->code, | ||
676 | value); | ||
673 | return; | 677 | return; |
674 | } | 678 | } |
675 | 679 | ||
676 | if (usage->usage_index + 1 == field->report_count) { | 680 | if (usage->usage_index + 1 == field->report_count) { |
677 | /* we only take into account the last report. */ | 681 | /* we only take into account the last report. */ |
678 | if (usage->hid == td->last_slot_field) | 682 | if (usage->hid == td->last_slot_field) |
679 | mt_complete_slot(td, field->hidinput->input); | 683 | mt_complete_slot(td, input); |
680 | 684 | ||
681 | if (field->index == td->last_field_index | 685 | if (field->index == td->last_field_index |
682 | && td->num_received >= td->num_expected) | 686 | && td->num_received >= td->num_expected) |
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index e0e6abf1cd3b..19b8360f2330 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c | |||
@@ -73,6 +73,7 @@ static const struct hid_blacklist { | |||
73 | { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS }, | 73 | { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS }, |
74 | { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET }, | 74 | { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET }, |
75 | { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET }, | 75 | { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET }, |
76 | { USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GX680R_LED_PANEL, HID_QUIRK_NO_INIT_REPORTS }, | ||
76 | { USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS }, | 77 | { USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS }, |
77 | { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS }, | 78 | { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS }, |
78 | { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS }, | 79 | { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS }, |
@@ -80,6 +81,7 @@ static const struct hid_blacklist { | |||
80 | { USB_VENDOR_ID_PRODIGE, USB_DEVICE_ID_PRODIGE_CORDLESS, HID_QUIRK_NOGET }, | 81 | { USB_VENDOR_ID_PRODIGE, USB_DEVICE_ID_PRODIGE_CORDLESS, HID_QUIRK_NOGET }, |
81 | { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001, HID_QUIRK_NOGET }, | 82 | { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001, HID_QUIRK_NOGET }, |
82 | { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008, HID_QUIRK_NOGET }, | 83 | { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008, HID_QUIRK_NOGET }, |
84 | { USB_VENDOR_ID_REALTEK, USB_DEVICE_ID_REALTEK_READER, HID_QUIRK_NO_INIT_REPORTS }, | ||
83 | { USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB, HID_QUIRK_NOGET }, | 85 | { USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB, HID_QUIRK_NOGET }, |
84 | { USB_VENDOR_ID_SIGMATEL, USB_DEVICE_ID_SIGMATEL_STMP3780, HID_QUIRK_NOGET }, | 86 | { USB_VENDOR_ID_SIGMATEL, USB_DEVICE_ID_SIGMATEL_STMP3780, HID_QUIRK_NOGET }, |
85 | { USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET }, | 87 | { USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET }, |
diff --git a/drivers/hwmon/lm75.h b/drivers/hwmon/lm75.h index 668ff4721323..5cde94e56f17 100644 --- a/drivers/hwmon/lm75.h +++ b/drivers/hwmon/lm75.h | |||
@@ -25,7 +25,7 @@ | |||
25 | which contains this code, we don't worry about the wasted space. | 25 | which contains this code, we don't worry about the wasted space. |
26 | */ | 26 | */ |
27 | 27 | ||
28 | #include <linux/hwmon.h> | 28 | #include <linux/kernel.h> |
29 | 29 | ||
30 | /* straight from the datasheet */ | 30 | /* straight from the datasheet */ |
31 | #define LM75_TEMP_MIN (-55000) | 31 | #define LM75_TEMP_MIN (-55000) |
diff --git a/drivers/hwspinlock/hwspinlock_core.c b/drivers/hwspinlock/hwspinlock_core.c index db713c0dfba4..461a0d739d75 100644 --- a/drivers/hwspinlock/hwspinlock_core.c +++ b/drivers/hwspinlock/hwspinlock_core.c | |||
@@ -416,6 +416,8 @@ static int __hwspin_lock_request(struct hwspinlock *hwlock) | |||
416 | ret = pm_runtime_get_sync(dev); | 416 | ret = pm_runtime_get_sync(dev); |
417 | if (ret < 0) { | 417 | if (ret < 0) { |
418 | dev_err(dev, "%s: can't power on device\n", __func__); | 418 | dev_err(dev, "%s: can't power on device\n", __func__); |
419 | pm_runtime_put_noidle(dev); | ||
420 | module_put(dev->driver->owner); | ||
419 | return ret; | 421 | return ret; |
420 | } | 422 | } |
421 | 423 | ||
diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig index 46cde098c11c..e380c6eef3af 100644 --- a/drivers/i2c/Kconfig +++ b/drivers/i2c/Kconfig | |||
@@ -4,7 +4,6 @@ | |||
4 | 4 | ||
5 | menuconfig I2C | 5 | menuconfig I2C |
6 | tristate "I2C support" | 6 | tristate "I2C support" |
7 | depends on !S390 | ||
8 | select RT_MUTEXES | 7 | select RT_MUTEXES |
9 | ---help--- | 8 | ---help--- |
10 | I2C (pronounce: I-squared-C) is a slow serial bus protocol used in | 9 | I2C (pronounce: I-squared-C) is a slow serial bus protocol used in |
@@ -76,6 +75,7 @@ config I2C_HELPER_AUTO | |||
76 | 75 | ||
77 | config I2C_SMBUS | 76 | config I2C_SMBUS |
78 | tristate "SMBus-specific protocols" if !I2C_HELPER_AUTO | 77 | tristate "SMBus-specific protocols" if !I2C_HELPER_AUTO |
78 | depends on GENERIC_HARDIRQS | ||
79 | help | 79 | help |
80 | Say Y here if you want support for SMBus extensions to the I2C | 80 | Say Y here if you want support for SMBus extensions to the I2C |
81 | specification. At the moment, the only supported extension is | 81 | specification. At the moment, the only supported extension is |
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index a3725de92384..adfee98486b1 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig | |||
@@ -114,7 +114,7 @@ config I2C_I801 | |||
114 | 114 | ||
115 | config I2C_ISCH | 115 | config I2C_ISCH |
116 | tristate "Intel SCH SMBus 1.0" | 116 | tristate "Intel SCH SMBus 1.0" |
117 | depends on PCI | 117 | depends on PCI && GENERIC_HARDIRQS |
118 | select LPC_SCH | 118 | select LPC_SCH |
119 | help | 119 | help |
120 | Say Y here if you want to use SMBus controller on the Intel SCH | 120 | Say Y here if you want to use SMBus controller on the Intel SCH |
@@ -543,6 +543,7 @@ config I2C_NUC900 | |||
543 | 543 | ||
544 | config I2C_OCORES | 544 | config I2C_OCORES |
545 | tristate "OpenCores I2C Controller" | 545 | tristate "OpenCores I2C Controller" |
546 | depends on GENERIC_HARDIRQS | ||
546 | help | 547 | help |
547 | If you say yes to this option, support will be included for the | 548 | If you say yes to this option, support will be included for the |
548 | OpenCores I2C controller. For details see | 549 | OpenCores I2C controller. For details see |
@@ -777,7 +778,7 @@ config I2C_DIOLAN_U2C | |||
777 | 778 | ||
778 | config I2C_PARPORT | 779 | config I2C_PARPORT |
779 | tristate "Parallel port adapter" | 780 | tristate "Parallel port adapter" |
780 | depends on PARPORT | 781 | depends on PARPORT && GENERIC_HARDIRQS |
781 | select I2C_ALGOBIT | 782 | select I2C_ALGOBIT |
782 | select I2C_SMBUS | 783 | select I2C_SMBUS |
783 | help | 784 | help |
@@ -802,6 +803,7 @@ config I2C_PARPORT | |||
802 | 803 | ||
803 | config I2C_PARPORT_LIGHT | 804 | config I2C_PARPORT_LIGHT |
804 | tristate "Parallel port adapter (light)" | 805 | tristate "Parallel port adapter (light)" |
806 | depends on GENERIC_HARDIRQS | ||
805 | select I2C_ALGOBIT | 807 | select I2C_ALGOBIT |
806 | select I2C_SMBUS | 808 | select I2C_SMBUS |
807 | help | 809 | help |
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index 0ceb6e1b0f65..e3085c487ace 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c | |||
@@ -182,7 +182,6 @@ static int dw_i2c_probe(struct platform_device *pdev) | |||
182 | adap->algo = &i2c_dw_algo; | 182 | adap->algo = &i2c_dw_algo; |
183 | adap->dev.parent = &pdev->dev; | 183 | adap->dev.parent = &pdev->dev; |
184 | adap->dev.of_node = pdev->dev.of_node; | 184 | adap->dev.of_node = pdev->dev.of_node; |
185 | ACPI_HANDLE_SET(&adap->dev, ACPI_HANDLE(&pdev->dev)); | ||
186 | 185 | ||
187 | r = i2c_add_numbered_adapter(adap); | 186 | r = i2c_add_numbered_adapter(adap); |
188 | if (r) { | 187 | if (r) { |
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c index e9205ee8cf94..130f02cc9d94 100644 --- a/drivers/i2c/busses/i2c-ismt.c +++ b/drivers/i2c/busses/i2c-ismt.c | |||
@@ -80,6 +80,7 @@ | |||
80 | /* PCI DIDs for the Intel SMBus Message Transport (SMT) Devices */ | 80 | /* PCI DIDs for the Intel SMBus Message Transport (SMT) Devices */ |
81 | #define PCI_DEVICE_ID_INTEL_S1200_SMT0 0x0c59 | 81 | #define PCI_DEVICE_ID_INTEL_S1200_SMT0 0x0c59 |
82 | #define PCI_DEVICE_ID_INTEL_S1200_SMT1 0x0c5a | 82 | #define PCI_DEVICE_ID_INTEL_S1200_SMT1 0x0c5a |
83 | #define PCI_DEVICE_ID_INTEL_AVOTON_SMT 0x1f15 | ||
83 | 84 | ||
84 | #define ISMT_DESC_ENTRIES 32 /* number of descriptor entries */ | 85 | #define ISMT_DESC_ENTRIES 32 /* number of descriptor entries */ |
85 | #define ISMT_MAX_RETRIES 3 /* number of SMBus retries to attempt */ | 86 | #define ISMT_MAX_RETRIES 3 /* number of SMBus retries to attempt */ |
@@ -185,6 +186,7 @@ struct ismt_priv { | |||
185 | static const DEFINE_PCI_DEVICE_TABLE(ismt_ids) = { | 186 | static const DEFINE_PCI_DEVICE_TABLE(ismt_ids) = { |
186 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT0) }, | 187 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT0) }, |
187 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT1) }, | 188 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT1) }, |
189 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_AVOTON_SMT) }, | ||
188 | { 0, } | 190 | { 0, } |
189 | }; | 191 | }; |
190 | 192 | ||
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c index 36704e3ab3fa..b714776b6ddd 100644 --- a/drivers/i2c/busses/i2c-tegra.c +++ b/drivers/i2c/busses/i2c-tegra.c | |||
@@ -411,7 +411,11 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev) | |||
411 | int clk_multiplier = I2C_CLK_MULTIPLIER_STD_FAST_MODE; | 411 | int clk_multiplier = I2C_CLK_MULTIPLIER_STD_FAST_MODE; |
412 | u32 clk_divisor; | 412 | u32 clk_divisor; |
413 | 413 | ||
414 | tegra_i2c_clock_enable(i2c_dev); | 414 | err = tegra_i2c_clock_enable(i2c_dev); |
415 | if (err < 0) { | ||
416 | dev_err(i2c_dev->dev, "Clock enable failed %d\n", err); | ||
417 | return err; | ||
418 | } | ||
415 | 419 | ||
416 | tegra_periph_reset_assert(i2c_dev->div_clk); | 420 | tegra_periph_reset_assert(i2c_dev->div_clk); |
417 | udelay(2); | 421 | udelay(2); |
@@ -628,7 +632,12 @@ static int tegra_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], | |||
628 | if (i2c_dev->is_suspended) | 632 | if (i2c_dev->is_suspended) |
629 | return -EBUSY; | 633 | return -EBUSY; |
630 | 634 | ||
631 | tegra_i2c_clock_enable(i2c_dev); | 635 | ret = tegra_i2c_clock_enable(i2c_dev); |
636 | if (ret < 0) { | ||
637 | dev_err(i2c_dev->dev, "Clock enable failed %d\n", ret); | ||
638 | return ret; | ||
639 | } | ||
640 | |||
632 | for (i = 0; i < num; i++) { | 641 | for (i = 0; i < num; i++) { |
633 | enum msg_end_type end_type = MSG_END_STOP; | 642 | enum msg_end_type end_type = MSG_END_STOP; |
634 | if (i < (num - 1)) { | 643 | if (i < (num - 1)) { |
diff --git a/drivers/i2c/muxes/i2c-mux-pca9541.c b/drivers/i2c/muxes/i2c-mux-pca9541.c index f3b8f9a6a89b..966a18a5d12d 100644 --- a/drivers/i2c/muxes/i2c-mux-pca9541.c +++ b/drivers/i2c/muxes/i2c-mux-pca9541.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Copyright (c) 2010 Ericsson AB. | 4 | * Copyright (c) 2010 Ericsson AB. |
5 | * | 5 | * |
6 | * Author: Guenter Roeck <guenter.roeck@ericsson.com> | 6 | * Author: Guenter Roeck <linux@roeck-us.net> |
7 | * | 7 | * |
8 | * Derived from: | 8 | * Derived from: |
9 | * pca954x.c | 9 | * pca954x.c |
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 565bfb161c1a..a3fde52840ca 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c | |||
@@ -1575,6 +1575,12 @@ static int c4iw_reconnect(struct c4iw_ep *ep) | |||
1575 | 1575 | ||
1576 | neigh = dst_neigh_lookup(ep->dst, | 1576 | neigh = dst_neigh_lookup(ep->dst, |
1577 | &ep->com.cm_id->remote_addr.sin_addr.s_addr); | 1577 | &ep->com.cm_id->remote_addr.sin_addr.s_addr); |
1578 | if (!neigh) { | ||
1579 | pr_err("%s - cannot alloc neigh.\n", __func__); | ||
1580 | err = -ENOMEM; | ||
1581 | goto fail4; | ||
1582 | } | ||
1583 | |||
1578 | /* get a l2t entry */ | 1584 | /* get a l2t entry */ |
1579 | if (neigh->dev->flags & IFF_LOOPBACK) { | 1585 | if (neigh->dev->flags & IFF_LOOPBACK) { |
1580 | PDBG("%s LOOPBACK\n", __func__); | 1586 | PDBG("%s LOOPBACK\n", __func__); |
@@ -3053,6 +3059,12 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb) | |||
3053 | dst = &rt->dst; | 3059 | dst = &rt->dst; |
3054 | neigh = dst_neigh_lookup_skb(dst, skb); | 3060 | neigh = dst_neigh_lookup_skb(dst, skb); |
3055 | 3061 | ||
3062 | if (!neigh) { | ||
3063 | pr_err("%s - failed to allocate neigh!\n", | ||
3064 | __func__); | ||
3065 | goto free_dst; | ||
3066 | } | ||
3067 | |||
3056 | if (neigh->dev->flags & IFF_LOOPBACK) { | 3068 | if (neigh->dev->flags & IFF_LOOPBACK) { |
3057 | pdev = ip_dev_find(&init_net, iph->daddr); | 3069 | pdev = ip_dev_find(&init_net, iph->daddr); |
3058 | e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh, | 3070 | e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh, |
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index 17ba4f8bc12d..70b1808a08f4 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c | |||
@@ -186,8 +186,10 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, | |||
186 | wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev), | 186 | wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev), |
187 | wq->rq.memsize, &(wq->rq.dma_addr), | 187 | wq->rq.memsize, &(wq->rq.dma_addr), |
188 | GFP_KERNEL); | 188 | GFP_KERNEL); |
189 | if (!wq->rq.queue) | 189 | if (!wq->rq.queue) { |
190 | ret = -ENOMEM; | ||
190 | goto free_sq; | 191 | goto free_sq; |
192 | } | ||
191 | PDBG("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n", | 193 | PDBG("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n", |
192 | __func__, wq->sq.queue, | 194 | __func__, wq->sq.queue, |
193 | (unsigned long long)virt_to_phys(wq->sq.queue), | 195 | (unsigned long long)virt_to_phys(wq->sq.queue), |
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c index 439c35d4a669..ea93870266eb 100644 --- a/drivers/infiniband/hw/ipath/ipath_verbs.c +++ b/drivers/infiniband/hw/ipath/ipath_verbs.c | |||
@@ -620,7 +620,7 @@ void ipath_ib_rcv(struct ipath_ibdev *dev, void *rhdr, void *data, | |||
620 | goto bail; | 620 | goto bail; |
621 | } | 621 | } |
622 | 622 | ||
623 | opcode = be32_to_cpu(ohdr->bth[0]) >> 24; | 623 | opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0x7f; |
624 | dev->opstats[opcode].n_bytes += tlen; | 624 | dev->opstats[opcode].n_bytes += tlen; |
625 | dev->opstats[opcode].n_packets++; | 625 | dev->opstats[opcode].n_packets++; |
626 | 626 | ||
diff --git a/drivers/infiniband/hw/qib/Kconfig b/drivers/infiniband/hw/qib/Kconfig index 8349f9c5064c..1e603a375069 100644 --- a/drivers/infiniband/hw/qib/Kconfig +++ b/drivers/infiniband/hw/qib/Kconfig | |||
@@ -1,7 +1,7 @@ | |||
1 | config INFINIBAND_QIB | 1 | config INFINIBAND_QIB |
2 | tristate "QLogic PCIe HCA support" | 2 | tristate "Intel PCIe HCA support" |
3 | depends on 64BIT | 3 | depends on 64BIT |
4 | ---help--- | 4 | ---help--- |
5 | This is a low-level driver for QLogic PCIe QLE InfiniBand host | 5 | This is a low-level driver for Intel PCIe QLE InfiniBand host |
6 | channel adapters. This driver does not support the QLogic | 6 | channel adapters. This driver does not support the Intel |
7 | HyperTransport card (model QHT7140). | 7 | HyperTransport card (model QHT7140). |
diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c index 5423edcab51f..216092477dfc 100644 --- a/drivers/infiniband/hw/qib/qib_driver.c +++ b/drivers/infiniband/hw/qib/qib_driver.c | |||
@@ -1,4 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2013 Intel Corporation. All rights reserved. | ||
2 | * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. | 3 | * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. |
3 | * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. | 4 | * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. |
4 | * | 5 | * |
@@ -63,8 +64,8 @@ MODULE_PARM_DESC(compat_ddr_negotiate, | |||
63 | "Attempt pre-IBTA 1.2 DDR speed negotiation"); | 64 | "Attempt pre-IBTA 1.2 DDR speed negotiation"); |
64 | 65 | ||
65 | MODULE_LICENSE("Dual BSD/GPL"); | 66 | MODULE_LICENSE("Dual BSD/GPL"); |
66 | MODULE_AUTHOR("QLogic <support@qlogic.com>"); | 67 | MODULE_AUTHOR("Intel <ibsupport@intel.com>"); |
67 | MODULE_DESCRIPTION("QLogic IB driver"); | 68 | MODULE_DESCRIPTION("Intel IB driver"); |
68 | MODULE_VERSION(QIB_DRIVER_VERSION); | 69 | MODULE_VERSION(QIB_DRIVER_VERSION); |
69 | 70 | ||
70 | /* | 71 | /* |
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c index a099ac171e22..0232ae56b1fa 100644 --- a/drivers/infiniband/hw/qib/qib_iba6120.c +++ b/drivers/infiniband/hw/qib/qib_iba6120.c | |||
@@ -1,4 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2013 Intel Corporation. All rights reserved. | ||
2 | * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. | 3 | * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. |
3 | * All rights reserved. | 4 | * All rights reserved. |
4 | * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. | 5 | * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. |
@@ -51,7 +52,7 @@ static u32 qib_6120_iblink_state(u64); | |||
51 | 52 | ||
52 | /* | 53 | /* |
53 | * This file contains all the chip-specific register information and | 54 | * This file contains all the chip-specific register information and |
54 | * access functions for the QLogic QLogic_IB PCI-Express chip. | 55 | * access functions for the Intel Intel_IB PCI-Express chip. |
55 | * | 56 | * |
56 | */ | 57 | */ |
57 | 58 | ||
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c index 50e33aa0b4e3..173f805790da 100644 --- a/drivers/infiniband/hw/qib/qib_init.c +++ b/drivers/infiniband/hw/qib/qib_init.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2012 Intel Corporation. All rights reserved. | 2 | * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved. |
3 | * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. | 3 | * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. |
4 | * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. | 4 | * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. |
5 | * | 5 | * |
@@ -1138,7 +1138,7 @@ void qib_disable_after_error(struct qib_devdata *dd) | |||
1138 | static void qib_remove_one(struct pci_dev *); | 1138 | static void qib_remove_one(struct pci_dev *); |
1139 | static int qib_init_one(struct pci_dev *, const struct pci_device_id *); | 1139 | static int qib_init_one(struct pci_dev *, const struct pci_device_id *); |
1140 | 1140 | ||
1141 | #define DRIVER_LOAD_MSG "QLogic " QIB_DRV_NAME " loaded: " | 1141 | #define DRIVER_LOAD_MSG "Intel " QIB_DRV_NAME " loaded: " |
1142 | #define PFX QIB_DRV_NAME ": " | 1142 | #define PFX QIB_DRV_NAME ": " |
1143 | 1143 | ||
1144 | static DEFINE_PCI_DEVICE_TABLE(qib_pci_tbl) = { | 1144 | static DEFINE_PCI_DEVICE_TABLE(qib_pci_tbl) = { |
@@ -1355,7 +1355,7 @@ static int qib_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1355 | dd = qib_init_iba6120_funcs(pdev, ent); | 1355 | dd = qib_init_iba6120_funcs(pdev, ent); |
1356 | #else | 1356 | #else |
1357 | qib_early_err(&pdev->dev, | 1357 | qib_early_err(&pdev->dev, |
1358 | "QLogic PCIE device 0x%x cannot work if CONFIG_PCI_MSI is not enabled\n", | 1358 | "Intel PCIE device 0x%x cannot work if CONFIG_PCI_MSI is not enabled\n", |
1359 | ent->device); | 1359 | ent->device); |
1360 | dd = ERR_PTR(-ENODEV); | 1360 | dd = ERR_PTR(-ENODEV); |
1361 | #endif | 1361 | #endif |
@@ -1371,7 +1371,7 @@ static int qib_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1371 | 1371 | ||
1372 | default: | 1372 | default: |
1373 | qib_early_err(&pdev->dev, | 1373 | qib_early_err(&pdev->dev, |
1374 | "Failing on unknown QLogic deviceid 0x%x\n", | 1374 | "Failing on unknown Intel deviceid 0x%x\n", |
1375 | ent->device); | 1375 | ent->device); |
1376 | ret = -ENODEV; | 1376 | ret = -ENODEV; |
1377 | } | 1377 | } |
diff --git a/drivers/infiniband/hw/qib/qib_sd7220.c b/drivers/infiniband/hw/qib/qib_sd7220.c index 50a8a0d4fe67..911205d3d5a0 100644 --- a/drivers/infiniband/hw/qib/qib_sd7220.c +++ b/drivers/infiniband/hw/qib/qib_sd7220.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2012 Intel Corporation. All rights reserved. | 2 | * Copyright (c) 2013 Intel Corporation. All rights reserved. |
3 | * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. | 3 | * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. |
4 | * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. | 4 | * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. |
5 | * | 5 | * |
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c index ba51a4715a1d..7c0ab16a2fe2 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.c +++ b/drivers/infiniband/hw/qib/qib_verbs.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2012 Intel Corporation. All rights reserved. | 2 | * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved. |
3 | * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. | 3 | * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. |
4 | * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. | 4 | * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. |
5 | * | 5 | * |
@@ -2224,7 +2224,7 @@ int qib_register_ib_device(struct qib_devdata *dd) | |||
2224 | ibdev->dma_ops = &qib_dma_mapping_ops; | 2224 | ibdev->dma_ops = &qib_dma_mapping_ops; |
2225 | 2225 | ||
2226 | snprintf(ibdev->node_desc, sizeof(ibdev->node_desc), | 2226 | snprintf(ibdev->node_desc, sizeof(ibdev->node_desc), |
2227 | "QLogic Infiniband HCA %s", init_utsname()->nodename); | 2227 | "Intel Infiniband HCA %s", init_utsname()->nodename); |
2228 | 2228 | ||
2229 | ret = ib_register_device(ibdev, qib_create_port_files); | 2229 | ret = ib_register_device(ibdev, qib_create_port_files); |
2230 | if (ret) | 2230 | if (ret) |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 67b0c1d23678..1ef880de3a41 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c | |||
@@ -758,9 +758,13 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_ | |||
758 | if (++priv->tx_outstanding == ipoib_sendq_size) { | 758 | if (++priv->tx_outstanding == ipoib_sendq_size) { |
759 | ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n", | 759 | ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n", |
760 | tx->qp->qp_num); | 760 | tx->qp->qp_num); |
761 | if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP)) | ||
762 | ipoib_warn(priv, "request notify on send CQ failed\n"); | ||
763 | netif_stop_queue(dev); | 761 | netif_stop_queue(dev); |
762 | rc = ib_req_notify_cq(priv->send_cq, | ||
763 | IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS); | ||
764 | if (rc < 0) | ||
765 | ipoib_warn(priv, "request notify on send CQ failed\n"); | ||
766 | else if (rc) | ||
767 | ipoib_send_comp_handler(priv->send_cq, dev); | ||
764 | } | 768 | } |
765 | } | 769 | } |
766 | } | 770 | } |
diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c index 7cd74e29cbc8..9135606c8649 100644 --- a/drivers/input/joystick/analog.c +++ b/drivers/input/joystick/analog.c | |||
@@ -158,14 +158,10 @@ static unsigned int get_time_pit(void) | |||
158 | #define GET_TIME(x) rdtscl(x) | 158 | #define GET_TIME(x) rdtscl(x) |
159 | #define DELTA(x,y) ((y)-(x)) | 159 | #define DELTA(x,y) ((y)-(x)) |
160 | #define TIME_NAME "TSC" | 160 | #define TIME_NAME "TSC" |
161 | #elif defined(__alpha__) | 161 | #elif defined(__alpha__) || defined(CONFIG_MN10300) || defined(CONFIG_ARM) || defined(CONFIG_TILE) |
162 | #define GET_TIME(x) do { x = get_cycles(); } while (0) | 162 | #define GET_TIME(x) do { x = get_cycles(); } while (0) |
163 | #define DELTA(x,y) ((y)-(x)) | 163 | #define DELTA(x,y) ((y)-(x)) |
164 | #define TIME_NAME "PCC" | 164 | #define TIME_NAME "get_cycles" |
165 | #elif defined(CONFIG_MN10300) || defined(CONFIG_TILE) | ||
166 | #define GET_TIME(x) do { x = get_cycles(); } while (0) | ||
167 | #define DELTA(x, y) ((x) - (y)) | ||
168 | #define TIME_NAME "TSC" | ||
169 | #else | 165 | #else |
170 | #define FAKE_TIME | 166 | #define FAKE_TIME |
171 | static unsigned long analog_faketime = 0; | 167 | static unsigned long analog_faketime = 0; |
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index 5c514d0711d1..c332fb98480d 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig | |||
@@ -130,7 +130,7 @@ config IRQ_REMAP | |||
130 | # OMAP IOMMU support | 130 | # OMAP IOMMU support |
131 | config OMAP_IOMMU | 131 | config OMAP_IOMMU |
132 | bool "OMAP IOMMU Support" | 132 | bool "OMAP IOMMU Support" |
133 | depends on ARCH_OMAP | 133 | depends on ARCH_OMAP2PLUS |
134 | select IOMMU_API | 134 | select IOMMU_API |
135 | 135 | ||
136 | config OMAP_IOVMM | 136 | config OMAP_IOVMM |
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 98f555dafb55..b287ca33833d 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c | |||
@@ -2466,18 +2466,16 @@ static int device_change_notifier(struct notifier_block *nb, | |||
2466 | 2466 | ||
2467 | /* allocate a protection domain if a device is added */ | 2467 | /* allocate a protection domain if a device is added */ |
2468 | dma_domain = find_protection_domain(devid); | 2468 | dma_domain = find_protection_domain(devid); |
2469 | if (dma_domain) | 2469 | if (!dma_domain) { |
2470 | goto out; | 2470 | dma_domain = dma_ops_domain_alloc(); |
2471 | dma_domain = dma_ops_domain_alloc(); | 2471 | if (!dma_domain) |
2472 | if (!dma_domain) | 2472 | goto out; |
2473 | goto out; | 2473 | dma_domain->target_dev = devid; |
2474 | dma_domain->target_dev = devid; | 2474 | |
2475 | 2475 | spin_lock_irqsave(&iommu_pd_list_lock, flags); | |
2476 | spin_lock_irqsave(&iommu_pd_list_lock, flags); | 2476 | list_add_tail(&dma_domain->list, &iommu_pd_list); |
2477 | list_add_tail(&dma_domain->list, &iommu_pd_list); | 2477 | spin_unlock_irqrestore(&iommu_pd_list_lock, flags); |
2478 | spin_unlock_irqrestore(&iommu_pd_list_lock, flags); | 2478 | } |
2479 | |||
2480 | dev_data = get_dev_data(dev); | ||
2481 | 2479 | ||
2482 | dev->archdata.dma_ops = &amd_iommu_dma_ops; | 2480 | dev->archdata.dma_ops = &amd_iommu_dma_ops; |
2483 | 2481 | ||
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index b6ecddb63cd0..e3c2d74b7684 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c | |||
@@ -980,7 +980,7 @@ static void __init free_iommu_all(void) | |||
980 | * BIOS should disable L2B micellaneous clock gating by setting | 980 | * BIOS should disable L2B micellaneous clock gating by setting |
981 | * L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b | 981 | * L2_L2B_CK_GATE_CONTROL[CKGateL2BMiscDisable](D0F2xF4_x90[2]) = 1b |
982 | */ | 982 | */ |
983 | static void __init amd_iommu_erratum_746_workaround(struct amd_iommu *iommu) | 983 | static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu) |
984 | { | 984 | { |
985 | u32 value; | 985 | u32 value; |
986 | 986 | ||
diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c index d56f8c17c5fe..7c11ff368d07 100644 --- a/drivers/iommu/irq_remapping.c +++ b/drivers/iommu/irq_remapping.c | |||
@@ -2,7 +2,6 @@ | |||
2 | #include <linux/cpumask.h> | 2 | #include <linux/cpumask.h> |
3 | #include <linux/kernel.h> | 3 | #include <linux/kernel.h> |
4 | #include <linux/string.h> | 4 | #include <linux/string.h> |
5 | #include <linux/cpumask.h> | ||
6 | #include <linux/errno.h> | 5 | #include <linux/errno.h> |
7 | #include <linux/msi.h> | 6 | #include <linux/msi.h> |
8 | #include <linux/irq.h> | 7 | #include <linux/irq.h> |
diff --git a/drivers/isdn/hisax/Kconfig b/drivers/isdn/hisax/Kconfig index 5313c9ea44dc..d9edcc94c2a8 100644 --- a/drivers/isdn/hisax/Kconfig +++ b/drivers/isdn/hisax/Kconfig | |||
@@ -237,7 +237,8 @@ config HISAX_MIC | |||
237 | 237 | ||
238 | config HISAX_NETJET | 238 | config HISAX_NETJET |
239 | bool "NETjet card" | 239 | bool "NETjet card" |
240 | depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV || (XTENSA && !CPU_LITTLE_ENDIAN))) | 240 | depends on PCI && (BROKEN || !(PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV || (XTENSA && !CPU_LITTLE_ENDIAN))) |
241 | depends on VIRT_TO_BUS | ||
241 | help | 242 | help |
242 | This enables HiSax support for the NetJet from Traverse | 243 | This enables HiSax support for the NetJet from Traverse |
243 | Technologies. | 244 | Technologies. |
@@ -248,7 +249,8 @@ config HISAX_NETJET | |||
248 | 249 | ||
249 | config HISAX_NETJET_U | 250 | config HISAX_NETJET_U |
250 | bool "NETspider U card" | 251 | bool "NETspider U card" |
251 | depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV || (XTENSA && !CPU_LITTLE_ENDIAN))) | 252 | depends on PCI && (BROKEN || !(PPC || PARISC || M68K || (MIPS && !CPU_LITTLE_ENDIAN) || FRV || (XTENSA && !CPU_LITTLE_ENDIAN))) |
253 | depends on VIRT_TO_BUS | ||
252 | help | 254 | help |
253 | This enables HiSax support for the Netspider U interface ISDN card | 255 | This enables HiSax support for the Netspider U interface ISDN card |
254 | from Traverse Technologies. | 256 | from Traverse Technologies. |
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index 3c955e10a618..c6083132c4b8 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c | |||
@@ -1025,6 +1025,8 @@ void dm_bufio_prefetch(struct dm_bufio_client *c, | |||
1025 | { | 1025 | { |
1026 | struct blk_plug plug; | 1026 | struct blk_plug plug; |
1027 | 1027 | ||
1028 | BUG_ON(dm_bufio_in_request()); | ||
1029 | |||
1028 | blk_start_plug(&plug); | 1030 | blk_start_plug(&plug); |
1029 | dm_bufio_lock(c); | 1031 | dm_bufio_lock(c); |
1030 | 1032 | ||
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c index fbd3625f2748..83e995fece88 100644 --- a/drivers/md/dm-cache-metadata.c +++ b/drivers/md/dm-cache-metadata.c | |||
@@ -83,6 +83,8 @@ struct cache_disk_superblock { | |||
83 | __le32 read_misses; | 83 | __le32 read_misses; |
84 | __le32 write_hits; | 84 | __le32 write_hits; |
85 | __le32 write_misses; | 85 | __le32 write_misses; |
86 | |||
87 | __le32 policy_version[CACHE_POLICY_VERSION_SIZE]; | ||
86 | } __packed; | 88 | } __packed; |
87 | 89 | ||
88 | struct dm_cache_metadata { | 90 | struct dm_cache_metadata { |
@@ -109,6 +111,7 @@ struct dm_cache_metadata { | |||
109 | bool clean_when_opened:1; | 111 | bool clean_when_opened:1; |
110 | 112 | ||
111 | char policy_name[CACHE_POLICY_NAME_SIZE]; | 113 | char policy_name[CACHE_POLICY_NAME_SIZE]; |
114 | unsigned policy_version[CACHE_POLICY_VERSION_SIZE]; | ||
112 | size_t policy_hint_size; | 115 | size_t policy_hint_size; |
113 | struct dm_cache_statistics stats; | 116 | struct dm_cache_statistics stats; |
114 | }; | 117 | }; |
@@ -268,7 +271,8 @@ static int __write_initial_superblock(struct dm_cache_metadata *cmd) | |||
268 | memset(disk_super->uuid, 0, sizeof(disk_super->uuid)); | 271 | memset(disk_super->uuid, 0, sizeof(disk_super->uuid)); |
269 | disk_super->magic = cpu_to_le64(CACHE_SUPERBLOCK_MAGIC); | 272 | disk_super->magic = cpu_to_le64(CACHE_SUPERBLOCK_MAGIC); |
270 | disk_super->version = cpu_to_le32(CACHE_VERSION); | 273 | disk_super->version = cpu_to_le32(CACHE_VERSION); |
271 | memset(disk_super->policy_name, 0, CACHE_POLICY_NAME_SIZE); | 274 | memset(disk_super->policy_name, 0, sizeof(disk_super->policy_name)); |
275 | memset(disk_super->policy_version, 0, sizeof(disk_super->policy_version)); | ||
272 | disk_super->policy_hint_size = 0; | 276 | disk_super->policy_hint_size = 0; |
273 | 277 | ||
274 | r = dm_sm_copy_root(cmd->metadata_sm, &disk_super->metadata_space_map_root, | 278 | r = dm_sm_copy_root(cmd->metadata_sm, &disk_super->metadata_space_map_root, |
@@ -284,7 +288,6 @@ static int __write_initial_superblock(struct dm_cache_metadata *cmd) | |||
284 | disk_super->metadata_block_size = cpu_to_le32(DM_CACHE_METADATA_BLOCK_SIZE >> SECTOR_SHIFT); | 288 | disk_super->metadata_block_size = cpu_to_le32(DM_CACHE_METADATA_BLOCK_SIZE >> SECTOR_SHIFT); |
285 | disk_super->data_block_size = cpu_to_le32(cmd->data_block_size); | 289 | disk_super->data_block_size = cpu_to_le32(cmd->data_block_size); |
286 | disk_super->cache_blocks = cpu_to_le32(0); | 290 | disk_super->cache_blocks = cpu_to_le32(0); |
287 | memset(disk_super->policy_name, 0, sizeof(disk_super->policy_name)); | ||
288 | 291 | ||
289 | disk_super->read_hits = cpu_to_le32(0); | 292 | disk_super->read_hits = cpu_to_le32(0); |
290 | disk_super->read_misses = cpu_to_le32(0); | 293 | disk_super->read_misses = cpu_to_le32(0); |
@@ -478,6 +481,9 @@ static void read_superblock_fields(struct dm_cache_metadata *cmd, | |||
478 | cmd->data_block_size = le32_to_cpu(disk_super->data_block_size); | 481 | cmd->data_block_size = le32_to_cpu(disk_super->data_block_size); |
479 | cmd->cache_blocks = to_cblock(le32_to_cpu(disk_super->cache_blocks)); | 482 | cmd->cache_blocks = to_cblock(le32_to_cpu(disk_super->cache_blocks)); |
480 | strncpy(cmd->policy_name, disk_super->policy_name, sizeof(cmd->policy_name)); | 483 | strncpy(cmd->policy_name, disk_super->policy_name, sizeof(cmd->policy_name)); |
484 | cmd->policy_version[0] = le32_to_cpu(disk_super->policy_version[0]); | ||
485 | cmd->policy_version[1] = le32_to_cpu(disk_super->policy_version[1]); | ||
486 | cmd->policy_version[2] = le32_to_cpu(disk_super->policy_version[2]); | ||
481 | cmd->policy_hint_size = le32_to_cpu(disk_super->policy_hint_size); | 487 | cmd->policy_hint_size = le32_to_cpu(disk_super->policy_hint_size); |
482 | 488 | ||
483 | cmd->stats.read_hits = le32_to_cpu(disk_super->read_hits); | 489 | cmd->stats.read_hits = le32_to_cpu(disk_super->read_hits); |
@@ -572,6 +578,9 @@ static int __commit_transaction(struct dm_cache_metadata *cmd, | |||
572 | disk_super->discard_nr_blocks = cpu_to_le64(from_dblock(cmd->discard_nr_blocks)); | 578 | disk_super->discard_nr_blocks = cpu_to_le64(from_dblock(cmd->discard_nr_blocks)); |
573 | disk_super->cache_blocks = cpu_to_le32(from_cblock(cmd->cache_blocks)); | 579 | disk_super->cache_blocks = cpu_to_le32(from_cblock(cmd->cache_blocks)); |
574 | strncpy(disk_super->policy_name, cmd->policy_name, sizeof(disk_super->policy_name)); | 580 | strncpy(disk_super->policy_name, cmd->policy_name, sizeof(disk_super->policy_name)); |
581 | disk_super->policy_version[0] = cpu_to_le32(cmd->policy_version[0]); | ||
582 | disk_super->policy_version[1] = cpu_to_le32(cmd->policy_version[1]); | ||
583 | disk_super->policy_version[2] = cpu_to_le32(cmd->policy_version[2]); | ||
575 | 584 | ||
576 | disk_super->read_hits = cpu_to_le32(cmd->stats.read_hits); | 585 | disk_super->read_hits = cpu_to_le32(cmd->stats.read_hits); |
577 | disk_super->read_misses = cpu_to_le32(cmd->stats.read_misses); | 586 | disk_super->read_misses = cpu_to_le32(cmd->stats.read_misses); |
@@ -854,18 +863,43 @@ struct thunk { | |||
854 | bool hints_valid; | 863 | bool hints_valid; |
855 | }; | 864 | }; |
856 | 865 | ||
866 | static bool policy_unchanged(struct dm_cache_metadata *cmd, | ||
867 | struct dm_cache_policy *policy) | ||
868 | { | ||
869 | const char *policy_name = dm_cache_policy_get_name(policy); | ||
870 | const unsigned *policy_version = dm_cache_policy_get_version(policy); | ||
871 | size_t policy_hint_size = dm_cache_policy_get_hint_size(policy); | ||
872 | |||
873 | /* | ||
874 | * Ensure policy names match. | ||
875 | */ | ||
876 | if (strncmp(cmd->policy_name, policy_name, sizeof(cmd->policy_name))) | ||
877 | return false; | ||
878 | |||
879 | /* | ||
880 | * Ensure policy major versions match. | ||
881 | */ | ||
882 | if (cmd->policy_version[0] != policy_version[0]) | ||
883 | return false; | ||
884 | |||
885 | /* | ||
886 | * Ensure policy hint sizes match. | ||
887 | */ | ||
888 | if (cmd->policy_hint_size != policy_hint_size) | ||
889 | return false; | ||
890 | |||
891 | return true; | ||
892 | } | ||
893 | |||
857 | static bool hints_array_initialized(struct dm_cache_metadata *cmd) | 894 | static bool hints_array_initialized(struct dm_cache_metadata *cmd) |
858 | { | 895 | { |
859 | return cmd->hint_root && cmd->policy_hint_size; | 896 | return cmd->hint_root && cmd->policy_hint_size; |
860 | } | 897 | } |
861 | 898 | ||
862 | static bool hints_array_available(struct dm_cache_metadata *cmd, | 899 | static bool hints_array_available(struct dm_cache_metadata *cmd, |
863 | const char *policy_name) | 900 | struct dm_cache_policy *policy) |
864 | { | 901 | { |
865 | bool policy_names_match = !strncmp(cmd->policy_name, policy_name, | 902 | return cmd->clean_when_opened && policy_unchanged(cmd, policy) && |
866 | sizeof(cmd->policy_name)); | ||
867 | |||
868 | return cmd->clean_when_opened && policy_names_match && | ||
869 | hints_array_initialized(cmd); | 903 | hints_array_initialized(cmd); |
870 | } | 904 | } |
871 | 905 | ||
@@ -899,7 +933,8 @@ static int __load_mapping(void *context, uint64_t cblock, void *leaf) | |||
899 | return r; | 933 | return r; |
900 | } | 934 | } |
901 | 935 | ||
902 | static int __load_mappings(struct dm_cache_metadata *cmd, const char *policy_name, | 936 | static int __load_mappings(struct dm_cache_metadata *cmd, |
937 | struct dm_cache_policy *policy, | ||
903 | load_mapping_fn fn, void *context) | 938 | load_mapping_fn fn, void *context) |
904 | { | 939 | { |
905 | struct thunk thunk; | 940 | struct thunk thunk; |
@@ -909,18 +944,19 @@ static int __load_mappings(struct dm_cache_metadata *cmd, const char *policy_nam | |||
909 | 944 | ||
910 | thunk.cmd = cmd; | 945 | thunk.cmd = cmd; |
911 | thunk.respect_dirty_flags = cmd->clean_when_opened; | 946 | thunk.respect_dirty_flags = cmd->clean_when_opened; |
912 | thunk.hints_valid = hints_array_available(cmd, policy_name); | 947 | thunk.hints_valid = hints_array_available(cmd, policy); |
913 | 948 | ||
914 | return dm_array_walk(&cmd->info, cmd->root, __load_mapping, &thunk); | 949 | return dm_array_walk(&cmd->info, cmd->root, __load_mapping, &thunk); |
915 | } | 950 | } |
916 | 951 | ||
917 | int dm_cache_load_mappings(struct dm_cache_metadata *cmd, const char *policy_name, | 952 | int dm_cache_load_mappings(struct dm_cache_metadata *cmd, |
953 | struct dm_cache_policy *policy, | ||
918 | load_mapping_fn fn, void *context) | 954 | load_mapping_fn fn, void *context) |
919 | { | 955 | { |
920 | int r; | 956 | int r; |
921 | 957 | ||
922 | down_read(&cmd->root_lock); | 958 | down_read(&cmd->root_lock); |
923 | r = __load_mappings(cmd, policy_name, fn, context); | 959 | r = __load_mappings(cmd, policy, fn, context); |
924 | up_read(&cmd->root_lock); | 960 | up_read(&cmd->root_lock); |
925 | 961 | ||
926 | return r; | 962 | return r; |
@@ -979,7 +1015,7 @@ static int __dirty(struct dm_cache_metadata *cmd, dm_cblock_t cblock, bool dirty | |||
979 | /* nothing to be done */ | 1015 | /* nothing to be done */ |
980 | return 0; | 1016 | return 0; |
981 | 1017 | ||
982 | value = pack_value(oblock, flags | (dirty ? M_DIRTY : 0)); | 1018 | value = pack_value(oblock, (flags & ~M_DIRTY) | (dirty ? M_DIRTY : 0)); |
983 | __dm_bless_for_disk(&value); | 1019 | __dm_bless_for_disk(&value); |
984 | 1020 | ||
985 | r = dm_array_set_value(&cmd->info, cmd->root, from_cblock(cblock), | 1021 | r = dm_array_set_value(&cmd->info, cmd->root, from_cblock(cblock), |
@@ -1070,13 +1106,15 @@ static int begin_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *po | |||
1070 | __le32 value; | 1106 | __le32 value; |
1071 | size_t hint_size; | 1107 | size_t hint_size; |
1072 | const char *policy_name = dm_cache_policy_get_name(policy); | 1108 | const char *policy_name = dm_cache_policy_get_name(policy); |
1109 | const unsigned *policy_version = dm_cache_policy_get_version(policy); | ||
1073 | 1110 | ||
1074 | if (!policy_name[0] || | 1111 | if (!policy_name[0] || |
1075 | (strlen(policy_name) > sizeof(cmd->policy_name) - 1)) | 1112 | (strlen(policy_name) > sizeof(cmd->policy_name) - 1)) |
1076 | return -EINVAL; | 1113 | return -EINVAL; |
1077 | 1114 | ||
1078 | if (strcmp(cmd->policy_name, policy_name)) { | 1115 | if (!policy_unchanged(cmd, policy)) { |
1079 | strncpy(cmd->policy_name, policy_name, sizeof(cmd->policy_name)); | 1116 | strncpy(cmd->policy_name, policy_name, sizeof(cmd->policy_name)); |
1117 | memcpy(cmd->policy_version, policy_version, sizeof(cmd->policy_version)); | ||
1080 | 1118 | ||
1081 | hint_size = dm_cache_policy_get_hint_size(policy); | 1119 | hint_size = dm_cache_policy_get_hint_size(policy); |
1082 | if (!hint_size) | 1120 | if (!hint_size) |
diff --git a/drivers/md/dm-cache-metadata.h b/drivers/md/dm-cache-metadata.h index 135864ea0eee..f45cef21f3d0 100644 --- a/drivers/md/dm-cache-metadata.h +++ b/drivers/md/dm-cache-metadata.h | |||
@@ -89,7 +89,7 @@ typedef int (*load_mapping_fn)(void *context, dm_oblock_t oblock, | |||
89 | dm_cblock_t cblock, bool dirty, | 89 | dm_cblock_t cblock, bool dirty, |
90 | uint32_t hint, bool hint_valid); | 90 | uint32_t hint, bool hint_valid); |
91 | int dm_cache_load_mappings(struct dm_cache_metadata *cmd, | 91 | int dm_cache_load_mappings(struct dm_cache_metadata *cmd, |
92 | const char *policy_name, | 92 | struct dm_cache_policy *policy, |
93 | load_mapping_fn fn, | 93 | load_mapping_fn fn, |
94 | void *context); | 94 | void *context); |
95 | 95 | ||
diff --git a/drivers/md/dm-cache-policy-cleaner.c b/drivers/md/dm-cache-policy-cleaner.c index cc05d70b3cb8..b04d1f904d07 100644 --- a/drivers/md/dm-cache-policy-cleaner.c +++ b/drivers/md/dm-cache-policy-cleaner.c | |||
@@ -17,7 +17,6 @@ | |||
17 | /*----------------------------------------------------------------*/ | 17 | /*----------------------------------------------------------------*/ |
18 | 18 | ||
19 | #define DM_MSG_PREFIX "cache cleaner" | 19 | #define DM_MSG_PREFIX "cache cleaner" |
20 | #define CLEANER_VERSION "1.0.0" | ||
21 | 20 | ||
22 | /* Cache entry struct. */ | 21 | /* Cache entry struct. */ |
23 | struct wb_cache_entry { | 22 | struct wb_cache_entry { |
@@ -434,6 +433,7 @@ static struct dm_cache_policy *wb_create(dm_cblock_t cache_size, | |||
434 | 433 | ||
435 | static struct dm_cache_policy_type wb_policy_type = { | 434 | static struct dm_cache_policy_type wb_policy_type = { |
436 | .name = "cleaner", | 435 | .name = "cleaner", |
436 | .version = {1, 0, 0}, | ||
437 | .hint_size = 0, | 437 | .hint_size = 0, |
438 | .owner = THIS_MODULE, | 438 | .owner = THIS_MODULE, |
439 | .create = wb_create | 439 | .create = wb_create |
@@ -446,7 +446,10 @@ static int __init wb_init(void) | |||
446 | if (r < 0) | 446 | if (r < 0) |
447 | DMERR("register failed %d", r); | 447 | DMERR("register failed %d", r); |
448 | else | 448 | else |
449 | DMINFO("version " CLEANER_VERSION " loaded"); | 449 | DMINFO("version %u.%u.%u loaded", |
450 | wb_policy_type.version[0], | ||
451 | wb_policy_type.version[1], | ||
452 | wb_policy_type.version[2]); | ||
450 | 453 | ||
451 | return r; | 454 | return r; |
452 | } | 455 | } |
diff --git a/drivers/md/dm-cache-policy-internal.h b/drivers/md/dm-cache-policy-internal.h index 52a75beeced5..0928abdc49f0 100644 --- a/drivers/md/dm-cache-policy-internal.h +++ b/drivers/md/dm-cache-policy-internal.h | |||
@@ -117,6 +117,8 @@ void dm_cache_policy_destroy(struct dm_cache_policy *p); | |||
117 | */ | 117 | */ |
118 | const char *dm_cache_policy_get_name(struct dm_cache_policy *p); | 118 | const char *dm_cache_policy_get_name(struct dm_cache_policy *p); |
119 | 119 | ||
120 | const unsigned *dm_cache_policy_get_version(struct dm_cache_policy *p); | ||
121 | |||
120 | size_t dm_cache_policy_get_hint_size(struct dm_cache_policy *p); | 122 | size_t dm_cache_policy_get_hint_size(struct dm_cache_policy *p); |
121 | 123 | ||
122 | /*----------------------------------------------------------------*/ | 124 | /*----------------------------------------------------------------*/ |
diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c index 964153255076..dc112a7137fe 100644 --- a/drivers/md/dm-cache-policy-mq.c +++ b/drivers/md/dm-cache-policy-mq.c | |||
@@ -14,7 +14,6 @@ | |||
14 | #include <linux/vmalloc.h> | 14 | #include <linux/vmalloc.h> |
15 | 15 | ||
16 | #define DM_MSG_PREFIX "cache-policy-mq" | 16 | #define DM_MSG_PREFIX "cache-policy-mq" |
17 | #define MQ_VERSION "1.0.0" | ||
18 | 17 | ||
19 | static struct kmem_cache *mq_entry_cache; | 18 | static struct kmem_cache *mq_entry_cache; |
20 | 19 | ||
@@ -1133,6 +1132,7 @@ bad_cache_alloc: | |||
1133 | 1132 | ||
1134 | static struct dm_cache_policy_type mq_policy_type = { | 1133 | static struct dm_cache_policy_type mq_policy_type = { |
1135 | .name = "mq", | 1134 | .name = "mq", |
1135 | .version = {1, 0, 0}, | ||
1136 | .hint_size = 4, | 1136 | .hint_size = 4, |
1137 | .owner = THIS_MODULE, | 1137 | .owner = THIS_MODULE, |
1138 | .create = mq_create | 1138 | .create = mq_create |
@@ -1140,6 +1140,7 @@ static struct dm_cache_policy_type mq_policy_type = { | |||
1140 | 1140 | ||
1141 | static struct dm_cache_policy_type default_policy_type = { | 1141 | static struct dm_cache_policy_type default_policy_type = { |
1142 | .name = "default", | 1142 | .name = "default", |
1143 | .version = {1, 0, 0}, | ||
1143 | .hint_size = 4, | 1144 | .hint_size = 4, |
1144 | .owner = THIS_MODULE, | 1145 | .owner = THIS_MODULE, |
1145 | .create = mq_create | 1146 | .create = mq_create |
@@ -1164,7 +1165,10 @@ static int __init mq_init(void) | |||
1164 | 1165 | ||
1165 | r = dm_cache_policy_register(&default_policy_type); | 1166 | r = dm_cache_policy_register(&default_policy_type); |
1166 | if (!r) { | 1167 | if (!r) { |
1167 | DMINFO("version " MQ_VERSION " loaded"); | 1168 | DMINFO("version %u.%u.%u loaded", |
1169 | mq_policy_type.version[0], | ||
1170 | mq_policy_type.version[1], | ||
1171 | mq_policy_type.version[2]); | ||
1168 | return 0; | 1172 | return 0; |
1169 | } | 1173 | } |
1170 | 1174 | ||
diff --git a/drivers/md/dm-cache-policy.c b/drivers/md/dm-cache-policy.c index 2cbf5fdaac52..21c03c570c06 100644 --- a/drivers/md/dm-cache-policy.c +++ b/drivers/md/dm-cache-policy.c | |||
@@ -150,6 +150,14 @@ const char *dm_cache_policy_get_name(struct dm_cache_policy *p) | |||
150 | } | 150 | } |
151 | EXPORT_SYMBOL_GPL(dm_cache_policy_get_name); | 151 | EXPORT_SYMBOL_GPL(dm_cache_policy_get_name); |
152 | 152 | ||
153 | const unsigned *dm_cache_policy_get_version(struct dm_cache_policy *p) | ||
154 | { | ||
155 | struct dm_cache_policy_type *t = p->private; | ||
156 | |||
157 | return t->version; | ||
158 | } | ||
159 | EXPORT_SYMBOL_GPL(dm_cache_policy_get_version); | ||
160 | |||
153 | size_t dm_cache_policy_get_hint_size(struct dm_cache_policy *p) | 161 | size_t dm_cache_policy_get_hint_size(struct dm_cache_policy *p) |
154 | { | 162 | { |
155 | struct dm_cache_policy_type *t = p->private; | 163 | struct dm_cache_policy_type *t = p->private; |
diff --git a/drivers/md/dm-cache-policy.h b/drivers/md/dm-cache-policy.h index f0f51b260544..558bdfdabf5f 100644 --- a/drivers/md/dm-cache-policy.h +++ b/drivers/md/dm-cache-policy.h | |||
@@ -196,6 +196,7 @@ struct dm_cache_policy { | |||
196 | * We maintain a little register of the different policy types. | 196 | * We maintain a little register of the different policy types. |
197 | */ | 197 | */ |
198 | #define CACHE_POLICY_NAME_SIZE 16 | 198 | #define CACHE_POLICY_NAME_SIZE 16 |
199 | #define CACHE_POLICY_VERSION_SIZE 3 | ||
199 | 200 | ||
200 | struct dm_cache_policy_type { | 201 | struct dm_cache_policy_type { |
201 | /* For use by the register code only. */ | 202 | /* For use by the register code only. */ |
@@ -206,6 +207,7 @@ struct dm_cache_policy_type { | |||
206 | * what gets passed on the target line to select your policy. | 207 | * what gets passed on the target line to select your policy. |
207 | */ | 208 | */ |
208 | char name[CACHE_POLICY_NAME_SIZE]; | 209 | char name[CACHE_POLICY_NAME_SIZE]; |
210 | unsigned version[CACHE_POLICY_VERSION_SIZE]; | ||
209 | 211 | ||
210 | /* | 212 | /* |
211 | * Policies may store a hint for each each cache block. | 213 | * Policies may store a hint for each each cache block. |
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 0f4e84b15c30..10744091e6ca 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c | |||
@@ -6,6 +6,7 @@ | |||
6 | 6 | ||
7 | #include "dm.h" | 7 | #include "dm.h" |
8 | #include "dm-bio-prison.h" | 8 | #include "dm-bio-prison.h" |
9 | #include "dm-bio-record.h" | ||
9 | #include "dm-cache-metadata.h" | 10 | #include "dm-cache-metadata.h" |
10 | 11 | ||
11 | #include <linux/dm-io.h> | 12 | #include <linux/dm-io.h> |
@@ -142,6 +143,7 @@ struct cache { | |||
142 | spinlock_t lock; | 143 | spinlock_t lock; |
143 | struct bio_list deferred_bios; | 144 | struct bio_list deferred_bios; |
144 | struct bio_list deferred_flush_bios; | 145 | struct bio_list deferred_flush_bios; |
146 | struct bio_list deferred_writethrough_bios; | ||
145 | struct list_head quiesced_migrations; | 147 | struct list_head quiesced_migrations; |
146 | struct list_head completed_migrations; | 148 | struct list_head completed_migrations; |
147 | struct list_head need_commit_migrations; | 149 | struct list_head need_commit_migrations; |
@@ -158,7 +160,7 @@ struct cache { | |||
158 | /* | 160 | /* |
159 | * origin_blocks entries, discarded if set. | 161 | * origin_blocks entries, discarded if set. |
160 | */ | 162 | */ |
161 | sector_t discard_block_size; /* a power of 2 times sectors per block */ | 163 | uint32_t discard_block_size; /* a power of 2 times sectors per block */ |
162 | dm_dblock_t discard_nr_blocks; | 164 | dm_dblock_t discard_nr_blocks; |
163 | unsigned long *discard_bitset; | 165 | unsigned long *discard_bitset; |
164 | 166 | ||
@@ -199,6 +201,16 @@ struct per_bio_data { | |||
199 | bool tick:1; | 201 | bool tick:1; |
200 | unsigned req_nr:2; | 202 | unsigned req_nr:2; |
201 | struct dm_deferred_entry *all_io_entry; | 203 | struct dm_deferred_entry *all_io_entry; |
204 | |||
205 | /* | ||
206 | * writethrough fields. These MUST remain at the end of this | ||
207 | * structure and the 'cache' member must be the first as it | ||
208 | * is used to determine the offsetof the writethrough fields. | ||
209 | */ | ||
210 | struct cache *cache; | ||
211 | dm_cblock_t cblock; | ||
212 | bio_end_io_t *saved_bi_end_io; | ||
213 | struct dm_bio_details bio_details; | ||
202 | }; | 214 | }; |
203 | 215 | ||
204 | struct dm_cache_migration { | 216 | struct dm_cache_migration { |
@@ -412,17 +424,24 @@ static bool block_size_is_power_of_two(struct cache *cache) | |||
412 | return cache->sectors_per_block_shift >= 0; | 424 | return cache->sectors_per_block_shift >= 0; |
413 | } | 425 | } |
414 | 426 | ||
427 | static dm_block_t block_div(dm_block_t b, uint32_t n) | ||
428 | { | ||
429 | do_div(b, n); | ||
430 | |||
431 | return b; | ||
432 | } | ||
433 | |||
415 | static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock) | 434 | static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock) |
416 | { | 435 | { |
417 | sector_t discard_blocks = cache->discard_block_size; | 436 | uint32_t discard_blocks = cache->discard_block_size; |
418 | dm_block_t b = from_oblock(oblock); | 437 | dm_block_t b = from_oblock(oblock); |
419 | 438 | ||
420 | if (!block_size_is_power_of_two(cache)) | 439 | if (!block_size_is_power_of_two(cache)) |
421 | (void) sector_div(discard_blocks, cache->sectors_per_block); | 440 | discard_blocks = discard_blocks / cache->sectors_per_block; |
422 | else | 441 | else |
423 | discard_blocks >>= cache->sectors_per_block_shift; | 442 | discard_blocks >>= cache->sectors_per_block_shift; |
424 | 443 | ||
425 | (void) sector_div(b, discard_blocks); | 444 | b = block_div(b, discard_blocks); |
426 | 445 | ||
427 | return to_dblock(b); | 446 | return to_dblock(b); |
428 | } | 447 | } |
@@ -500,16 +519,28 @@ static void save_stats(struct cache *cache) | |||
500 | /*---------------------------------------------------------------- | 519 | /*---------------------------------------------------------------- |
501 | * Per bio data | 520 | * Per bio data |
502 | *--------------------------------------------------------------*/ | 521 | *--------------------------------------------------------------*/ |
503 | static struct per_bio_data *get_per_bio_data(struct bio *bio) | 522 | |
523 | /* | ||
524 | * If using writeback, leave out struct per_bio_data's writethrough fields. | ||
525 | */ | ||
526 | #define PB_DATA_SIZE_WB (offsetof(struct per_bio_data, cache)) | ||
527 | #define PB_DATA_SIZE_WT (sizeof(struct per_bio_data)) | ||
528 | |||
529 | static size_t get_per_bio_data_size(struct cache *cache) | ||
504 | { | 530 | { |
505 | struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data)); | 531 | return cache->features.write_through ? PB_DATA_SIZE_WT : PB_DATA_SIZE_WB; |
532 | } | ||
533 | |||
534 | static struct per_bio_data *get_per_bio_data(struct bio *bio, size_t data_size) | ||
535 | { | ||
536 | struct per_bio_data *pb = dm_per_bio_data(bio, data_size); | ||
506 | BUG_ON(!pb); | 537 | BUG_ON(!pb); |
507 | return pb; | 538 | return pb; |
508 | } | 539 | } |
509 | 540 | ||
510 | static struct per_bio_data *init_per_bio_data(struct bio *bio) | 541 | static struct per_bio_data *init_per_bio_data(struct bio *bio, size_t data_size) |
511 | { | 542 | { |
512 | struct per_bio_data *pb = get_per_bio_data(bio); | 543 | struct per_bio_data *pb = get_per_bio_data(bio, data_size); |
513 | 544 | ||
514 | pb->tick = false; | 545 | pb->tick = false; |
515 | pb->req_nr = dm_bio_get_target_bio_nr(bio); | 546 | pb->req_nr = dm_bio_get_target_bio_nr(bio); |
@@ -543,7 +574,8 @@ static void remap_to_cache(struct cache *cache, struct bio *bio, | |||
543 | static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio) | 574 | static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio) |
544 | { | 575 | { |
545 | unsigned long flags; | 576 | unsigned long flags; |
546 | struct per_bio_data *pb = get_per_bio_data(bio); | 577 | size_t pb_data_size = get_per_bio_data_size(cache); |
578 | struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); | ||
547 | 579 | ||
548 | spin_lock_irqsave(&cache->lock, flags); | 580 | spin_lock_irqsave(&cache->lock, flags); |
549 | if (cache->need_tick_bio && | 581 | if (cache->need_tick_bio && |
@@ -609,6 +641,58 @@ static void issue(struct cache *cache, struct bio *bio) | |||
609 | spin_unlock_irqrestore(&cache->lock, flags); | 641 | spin_unlock_irqrestore(&cache->lock, flags); |
610 | } | 642 | } |
611 | 643 | ||
644 | static void defer_writethrough_bio(struct cache *cache, struct bio *bio) | ||
645 | { | ||
646 | unsigned long flags; | ||
647 | |||
648 | spin_lock_irqsave(&cache->lock, flags); | ||
649 | bio_list_add(&cache->deferred_writethrough_bios, bio); | ||
650 | spin_unlock_irqrestore(&cache->lock, flags); | ||
651 | |||
652 | wake_worker(cache); | ||
653 | } | ||
654 | |||
655 | static void writethrough_endio(struct bio *bio, int err) | ||
656 | { | ||
657 | struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT); | ||
658 | bio->bi_end_io = pb->saved_bi_end_io; | ||
659 | |||
660 | if (err) { | ||
661 | bio_endio(bio, err); | ||
662 | return; | ||
663 | } | ||
664 | |||
665 | dm_bio_restore(&pb->bio_details, bio); | ||
666 | remap_to_cache(pb->cache, bio, pb->cblock); | ||
667 | |||
668 | /* | ||
669 | * We can't issue this bio directly, since we're in interrupt | ||
670 | * context. So it get's put on a bio list for processing by the | ||
671 | * worker thread. | ||
672 | */ | ||
673 | defer_writethrough_bio(pb->cache, bio); | ||
674 | } | ||
675 | |||
676 | /* | ||
677 | * When running in writethrough mode we need to send writes to clean blocks | ||
678 | * to both the cache and origin devices. In future we'd like to clone the | ||
679 | * bio and send them in parallel, but for now we're doing them in | ||
680 | * series as this is easier. | ||
681 | */ | ||
682 | static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio, | ||
683 | dm_oblock_t oblock, dm_cblock_t cblock) | ||
684 | { | ||
685 | struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT); | ||
686 | |||
687 | pb->cache = cache; | ||
688 | pb->cblock = cblock; | ||
689 | pb->saved_bi_end_io = bio->bi_end_io; | ||
690 | dm_bio_record(&pb->bio_details, bio); | ||
691 | bio->bi_end_io = writethrough_endio; | ||
692 | |||
693 | remap_to_origin_clear_discard(pb->cache, bio, oblock); | ||
694 | } | ||
695 | |||
612 | /*---------------------------------------------------------------- | 696 | /*---------------------------------------------------------------- |
613 | * Migration processing | 697 | * Migration processing |
614 | * | 698 | * |
@@ -972,7 +1056,8 @@ static void defer_bio(struct cache *cache, struct bio *bio) | |||
972 | 1056 | ||
973 | static void process_flush_bio(struct cache *cache, struct bio *bio) | 1057 | static void process_flush_bio(struct cache *cache, struct bio *bio) |
974 | { | 1058 | { |
975 | struct per_bio_data *pb = get_per_bio_data(bio); | 1059 | size_t pb_data_size = get_per_bio_data_size(cache); |
1060 | struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); | ||
976 | 1061 | ||
977 | BUG_ON(bio->bi_size); | 1062 | BUG_ON(bio->bi_size); |
978 | if (!pb->req_nr) | 1063 | if (!pb->req_nr) |
@@ -1002,7 +1087,7 @@ static void process_discard_bio(struct cache *cache, struct bio *bio) | |||
1002 | dm_block_t end_block = bio->bi_sector + bio_sectors(bio); | 1087 | dm_block_t end_block = bio->bi_sector + bio_sectors(bio); |
1003 | dm_block_t b; | 1088 | dm_block_t b; |
1004 | 1089 | ||
1005 | (void) sector_div(end_block, cache->discard_block_size); | 1090 | end_block = block_div(end_block, cache->discard_block_size); |
1006 | 1091 | ||
1007 | for (b = start_block; b < end_block; b++) | 1092 | for (b = start_block; b < end_block; b++) |
1008 | set_discard(cache, to_dblock(b)); | 1093 | set_discard(cache, to_dblock(b)); |
@@ -1044,7 +1129,8 @@ static void process_bio(struct cache *cache, struct prealloc *structs, | |||
1044 | dm_oblock_t block = get_bio_block(cache, bio); | 1129 | dm_oblock_t block = get_bio_block(cache, bio); |
1045 | struct dm_bio_prison_cell *cell_prealloc, *old_ocell, *new_ocell; | 1130 | struct dm_bio_prison_cell *cell_prealloc, *old_ocell, *new_ocell; |
1046 | struct policy_result lookup_result; | 1131 | struct policy_result lookup_result; |
1047 | struct per_bio_data *pb = get_per_bio_data(bio); | 1132 | size_t pb_data_size = get_per_bio_data_size(cache); |
1133 | struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); | ||
1048 | bool discarded_block = is_discarded_oblock(cache, block); | 1134 | bool discarded_block = is_discarded_oblock(cache, block); |
1049 | bool can_migrate = discarded_block || spare_migration_bandwidth(cache); | 1135 | bool can_migrate = discarded_block || spare_migration_bandwidth(cache); |
1050 | 1136 | ||
@@ -1070,14 +1156,9 @@ static void process_bio(struct cache *cache, struct prealloc *structs, | |||
1070 | inc_hit_counter(cache, bio); | 1156 | inc_hit_counter(cache, bio); |
1071 | pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); | 1157 | pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); |
1072 | 1158 | ||
1073 | if (is_writethrough_io(cache, bio, lookup_result.cblock)) { | 1159 | if (is_writethrough_io(cache, bio, lookup_result.cblock)) |
1074 | /* | 1160 | remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock); |
1075 | * No need to mark anything dirty in write through mode. | 1161 | else |
1076 | */ | ||
1077 | pb->req_nr == 0 ? | ||
1078 | remap_to_cache(cache, bio, lookup_result.cblock) : | ||
1079 | remap_to_origin_clear_discard(cache, bio, block); | ||
1080 | } else | ||
1081 | remap_to_cache_dirty(cache, bio, block, lookup_result.cblock); | 1162 | remap_to_cache_dirty(cache, bio, block, lookup_result.cblock); |
1082 | 1163 | ||
1083 | issue(cache, bio); | 1164 | issue(cache, bio); |
@@ -1086,17 +1167,8 @@ static void process_bio(struct cache *cache, struct prealloc *structs, | |||
1086 | case POLICY_MISS: | 1167 | case POLICY_MISS: |
1087 | inc_miss_counter(cache, bio); | 1168 | inc_miss_counter(cache, bio); |
1088 | pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); | 1169 | pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); |
1089 | 1170 | remap_to_origin_clear_discard(cache, bio, block); | |
1090 | if (pb->req_nr != 0) { | 1171 | issue(cache, bio); |
1091 | /* | ||
1092 | * This is a duplicate writethrough io that is no | ||
1093 | * longer needed because the block has been demoted. | ||
1094 | */ | ||
1095 | bio_endio(bio, 0); | ||
1096 | } else { | ||
1097 | remap_to_origin_clear_discard(cache, bio, block); | ||
1098 | issue(cache, bio); | ||
1099 | } | ||
1100 | break; | 1172 | break; |
1101 | 1173 | ||
1102 | case POLICY_NEW: | 1174 | case POLICY_NEW: |
@@ -1217,6 +1289,23 @@ static void process_deferred_flush_bios(struct cache *cache, bool submit_bios) | |||
1217 | submit_bios ? generic_make_request(bio) : bio_io_error(bio); | 1289 | submit_bios ? generic_make_request(bio) : bio_io_error(bio); |
1218 | } | 1290 | } |
1219 | 1291 | ||
1292 | static void process_deferred_writethrough_bios(struct cache *cache) | ||
1293 | { | ||
1294 | unsigned long flags; | ||
1295 | struct bio_list bios; | ||
1296 | struct bio *bio; | ||
1297 | |||
1298 | bio_list_init(&bios); | ||
1299 | |||
1300 | spin_lock_irqsave(&cache->lock, flags); | ||
1301 | bio_list_merge(&bios, &cache->deferred_writethrough_bios); | ||
1302 | bio_list_init(&cache->deferred_writethrough_bios); | ||
1303 | spin_unlock_irqrestore(&cache->lock, flags); | ||
1304 | |||
1305 | while ((bio = bio_list_pop(&bios))) | ||
1306 | generic_make_request(bio); | ||
1307 | } | ||
1308 | |||
1220 | static void writeback_some_dirty_blocks(struct cache *cache) | 1309 | static void writeback_some_dirty_blocks(struct cache *cache) |
1221 | { | 1310 | { |
1222 | int r = 0; | 1311 | int r = 0; |
@@ -1313,6 +1402,7 @@ static int more_work(struct cache *cache) | |||
1313 | else | 1402 | else |
1314 | return !bio_list_empty(&cache->deferred_bios) || | 1403 | return !bio_list_empty(&cache->deferred_bios) || |
1315 | !bio_list_empty(&cache->deferred_flush_bios) || | 1404 | !bio_list_empty(&cache->deferred_flush_bios) || |
1405 | !bio_list_empty(&cache->deferred_writethrough_bios) || | ||
1316 | !list_empty(&cache->quiesced_migrations) || | 1406 | !list_empty(&cache->quiesced_migrations) || |
1317 | !list_empty(&cache->completed_migrations) || | 1407 | !list_empty(&cache->completed_migrations) || |
1318 | !list_empty(&cache->need_commit_migrations); | 1408 | !list_empty(&cache->need_commit_migrations); |
@@ -1331,6 +1421,8 @@ static void do_worker(struct work_struct *ws) | |||
1331 | 1421 | ||
1332 | writeback_some_dirty_blocks(cache); | 1422 | writeback_some_dirty_blocks(cache); |
1333 | 1423 | ||
1424 | process_deferred_writethrough_bios(cache); | ||
1425 | |||
1334 | if (commit_if_needed(cache)) { | 1426 | if (commit_if_needed(cache)) { |
1335 | process_deferred_flush_bios(cache, false); | 1427 | process_deferred_flush_bios(cache, false); |
1336 | 1428 | ||
@@ -1756,8 +1848,11 @@ static int create_cache_policy(struct cache *cache, struct cache_args *ca, | |||
1756 | } | 1848 | } |
1757 | 1849 | ||
1758 | r = set_config_values(cache->policy, ca->policy_argc, ca->policy_argv); | 1850 | r = set_config_values(cache->policy, ca->policy_argc, ca->policy_argv); |
1759 | if (r) | 1851 | if (r) { |
1852 | *error = "Error setting cache policy's config values"; | ||
1760 | dm_cache_policy_destroy(cache->policy); | 1853 | dm_cache_policy_destroy(cache->policy); |
1854 | cache->policy = NULL; | ||
1855 | } | ||
1761 | 1856 | ||
1762 | return r; | 1857 | return r; |
1763 | } | 1858 | } |
@@ -1793,8 +1888,6 @@ static sector_t calculate_discard_block_size(sector_t cache_block_size, | |||
1793 | 1888 | ||
1794 | #define DEFAULT_MIGRATION_THRESHOLD (2048 * 100) | 1889 | #define DEFAULT_MIGRATION_THRESHOLD (2048 * 100) |
1795 | 1890 | ||
1796 | static unsigned cache_num_write_bios(struct dm_target *ti, struct bio *bio); | ||
1797 | |||
1798 | static int cache_create(struct cache_args *ca, struct cache **result) | 1891 | static int cache_create(struct cache_args *ca, struct cache **result) |
1799 | { | 1892 | { |
1800 | int r = 0; | 1893 | int r = 0; |
@@ -1811,7 +1904,6 @@ static int cache_create(struct cache_args *ca, struct cache **result) | |||
1811 | 1904 | ||
1812 | cache->ti = ca->ti; | 1905 | cache->ti = ca->ti; |
1813 | ti->private = cache; | 1906 | ti->private = cache; |
1814 | ti->per_bio_data_size = sizeof(struct per_bio_data); | ||
1815 | ti->num_flush_bios = 2; | 1907 | ti->num_flush_bios = 2; |
1816 | ti->flush_supported = true; | 1908 | ti->flush_supported = true; |
1817 | 1909 | ||
@@ -1820,9 +1912,7 @@ static int cache_create(struct cache_args *ca, struct cache **result) | |||
1820 | ti->discard_zeroes_data_unsupported = true; | 1912 | ti->discard_zeroes_data_unsupported = true; |
1821 | 1913 | ||
1822 | memcpy(&cache->features, &ca->features, sizeof(cache->features)); | 1914 | memcpy(&cache->features, &ca->features, sizeof(cache->features)); |
1823 | 1915 | ti->per_bio_data_size = get_per_bio_data_size(cache); | |
1824 | if (cache->features.write_through) | ||
1825 | ti->num_write_bios = cache_num_write_bios; | ||
1826 | 1916 | ||
1827 | cache->callbacks.congested_fn = cache_is_congested; | 1917 | cache->callbacks.congested_fn = cache_is_congested; |
1828 | dm_table_add_target_callbacks(ti->table, &cache->callbacks); | 1918 | dm_table_add_target_callbacks(ti->table, &cache->callbacks); |
@@ -1835,7 +1925,7 @@ static int cache_create(struct cache_args *ca, struct cache **result) | |||
1835 | 1925 | ||
1836 | /* FIXME: factor out this whole section */ | 1926 | /* FIXME: factor out this whole section */ |
1837 | origin_blocks = cache->origin_sectors = ca->origin_sectors; | 1927 | origin_blocks = cache->origin_sectors = ca->origin_sectors; |
1838 | (void) sector_div(origin_blocks, ca->block_size); | 1928 | origin_blocks = block_div(origin_blocks, ca->block_size); |
1839 | cache->origin_blocks = to_oblock(origin_blocks); | 1929 | cache->origin_blocks = to_oblock(origin_blocks); |
1840 | 1930 | ||
1841 | cache->sectors_per_block = ca->block_size; | 1931 | cache->sectors_per_block = ca->block_size; |
@@ -1848,7 +1938,7 @@ static int cache_create(struct cache_args *ca, struct cache **result) | |||
1848 | dm_block_t cache_size = ca->cache_sectors; | 1938 | dm_block_t cache_size = ca->cache_sectors; |
1849 | 1939 | ||
1850 | cache->sectors_per_block_shift = -1; | 1940 | cache->sectors_per_block_shift = -1; |
1851 | (void) sector_div(cache_size, ca->block_size); | 1941 | cache_size = block_div(cache_size, ca->block_size); |
1852 | cache->cache_size = to_cblock(cache_size); | 1942 | cache->cache_size = to_cblock(cache_size); |
1853 | } else { | 1943 | } else { |
1854 | cache->sectors_per_block_shift = __ffs(ca->block_size); | 1944 | cache->sectors_per_block_shift = __ffs(ca->block_size); |
@@ -1873,6 +1963,7 @@ static int cache_create(struct cache_args *ca, struct cache **result) | |||
1873 | spin_lock_init(&cache->lock); | 1963 | spin_lock_init(&cache->lock); |
1874 | bio_list_init(&cache->deferred_bios); | 1964 | bio_list_init(&cache->deferred_bios); |
1875 | bio_list_init(&cache->deferred_flush_bios); | 1965 | bio_list_init(&cache->deferred_flush_bios); |
1966 | bio_list_init(&cache->deferred_writethrough_bios); | ||
1876 | INIT_LIST_HEAD(&cache->quiesced_migrations); | 1967 | INIT_LIST_HEAD(&cache->quiesced_migrations); |
1877 | INIT_LIST_HEAD(&cache->completed_migrations); | 1968 | INIT_LIST_HEAD(&cache->completed_migrations); |
1878 | INIT_LIST_HEAD(&cache->need_commit_migrations); | 1969 | INIT_LIST_HEAD(&cache->need_commit_migrations); |
@@ -2002,6 +2093,8 @@ static int cache_ctr(struct dm_target *ti, unsigned argc, char **argv) | |||
2002 | goto out; | 2093 | goto out; |
2003 | 2094 | ||
2004 | r = cache_create(ca, &cache); | 2095 | r = cache_create(ca, &cache); |
2096 | if (r) | ||
2097 | goto out; | ||
2005 | 2098 | ||
2006 | r = copy_ctr_args(cache, argc - 3, (const char **)argv + 3); | 2099 | r = copy_ctr_args(cache, argc - 3, (const char **)argv + 3); |
2007 | if (r) { | 2100 | if (r) { |
@@ -2016,26 +2109,13 @@ out: | |||
2016 | return r; | 2109 | return r; |
2017 | } | 2110 | } |
2018 | 2111 | ||
2019 | static unsigned cache_num_write_bios(struct dm_target *ti, struct bio *bio) | ||
2020 | { | ||
2021 | int r; | ||
2022 | struct cache *cache = ti->private; | ||
2023 | dm_oblock_t block = get_bio_block(cache, bio); | ||
2024 | dm_cblock_t cblock; | ||
2025 | |||
2026 | r = policy_lookup(cache->policy, block, &cblock); | ||
2027 | if (r < 0) | ||
2028 | return 2; /* assume the worst */ | ||
2029 | |||
2030 | return (!r && !is_dirty(cache, cblock)) ? 2 : 1; | ||
2031 | } | ||
2032 | |||
2033 | static int cache_map(struct dm_target *ti, struct bio *bio) | 2112 | static int cache_map(struct dm_target *ti, struct bio *bio) |
2034 | { | 2113 | { |
2035 | struct cache *cache = ti->private; | 2114 | struct cache *cache = ti->private; |
2036 | 2115 | ||
2037 | int r; | 2116 | int r; |
2038 | dm_oblock_t block = get_bio_block(cache, bio); | 2117 | dm_oblock_t block = get_bio_block(cache, bio); |
2118 | size_t pb_data_size = get_per_bio_data_size(cache); | ||
2039 | bool can_migrate = false; | 2119 | bool can_migrate = false; |
2040 | bool discarded_block; | 2120 | bool discarded_block; |
2041 | struct dm_bio_prison_cell *cell; | 2121 | struct dm_bio_prison_cell *cell; |
@@ -2052,7 +2132,7 @@ static int cache_map(struct dm_target *ti, struct bio *bio) | |||
2052 | return DM_MAPIO_REMAPPED; | 2132 | return DM_MAPIO_REMAPPED; |
2053 | } | 2133 | } |
2054 | 2134 | ||
2055 | pb = init_per_bio_data(bio); | 2135 | pb = init_per_bio_data(bio, pb_data_size); |
2056 | 2136 | ||
2057 | if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) { | 2137 | if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) { |
2058 | defer_bio(cache, bio); | 2138 | defer_bio(cache, bio); |
@@ -2097,18 +2177,12 @@ static int cache_map(struct dm_target *ti, struct bio *bio) | |||
2097 | inc_hit_counter(cache, bio); | 2177 | inc_hit_counter(cache, bio); |
2098 | pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); | 2178 | pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); |
2099 | 2179 | ||
2100 | if (is_writethrough_io(cache, bio, lookup_result.cblock)) { | 2180 | if (is_writethrough_io(cache, bio, lookup_result.cblock)) |
2101 | /* | 2181 | remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock); |
2102 | * No need to mark anything dirty in write through mode. | 2182 | else |
2103 | */ | ||
2104 | pb->req_nr == 0 ? | ||
2105 | remap_to_cache(cache, bio, lookup_result.cblock) : | ||
2106 | remap_to_origin_clear_discard(cache, bio, block); | ||
2107 | cell_defer(cache, cell, false); | ||
2108 | } else { | ||
2109 | remap_to_cache_dirty(cache, bio, block, lookup_result.cblock); | 2183 | remap_to_cache_dirty(cache, bio, block, lookup_result.cblock); |
2110 | cell_defer(cache, cell, false); | 2184 | |
2111 | } | 2185 | cell_defer(cache, cell, false); |
2112 | break; | 2186 | break; |
2113 | 2187 | ||
2114 | case POLICY_MISS: | 2188 | case POLICY_MISS: |
@@ -2143,7 +2217,8 @@ static int cache_end_io(struct dm_target *ti, struct bio *bio, int error) | |||
2143 | { | 2217 | { |
2144 | struct cache *cache = ti->private; | 2218 | struct cache *cache = ti->private; |
2145 | unsigned long flags; | 2219 | unsigned long flags; |
2146 | struct per_bio_data *pb = get_per_bio_data(bio); | 2220 | size_t pb_data_size = get_per_bio_data_size(cache); |
2221 | struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); | ||
2147 | 2222 | ||
2148 | if (pb->tick) { | 2223 | if (pb->tick) { |
2149 | policy_tick(cache->policy); | 2224 | policy_tick(cache->policy); |
@@ -2319,8 +2394,7 @@ static int cache_preresume(struct dm_target *ti) | |||
2319 | } | 2394 | } |
2320 | 2395 | ||
2321 | if (!cache->loaded_mappings) { | 2396 | if (!cache->loaded_mappings) { |
2322 | r = dm_cache_load_mappings(cache->cmd, | 2397 | r = dm_cache_load_mappings(cache->cmd, cache->policy, |
2323 | dm_cache_policy_get_name(cache->policy), | ||
2324 | load_mapping, cache); | 2398 | load_mapping, cache); |
2325 | if (r) { | 2399 | if (r) { |
2326 | DMERR("could not load cache mappings"); | 2400 | DMERR("could not load cache mappings"); |
@@ -2535,7 +2609,7 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits) | |||
2535 | 2609 | ||
2536 | static struct target_type cache_target = { | 2610 | static struct target_type cache_target = { |
2537 | .name = "cache", | 2611 | .name = "cache", |
2538 | .version = {1, 0, 0}, | 2612 | .version = {1, 1, 0}, |
2539 | .module = THIS_MODULE, | 2613 | .module = THIS_MODULE, |
2540 | .ctr = cache_ctr, | 2614 | .ctr = cache_ctr, |
2541 | .dtr = cache_dtr, | 2615 | .dtr = cache_dtr, |
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 009339d62828..004ad1652b73 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c | |||
@@ -1577,6 +1577,11 @@ static bool data_dev_supports_discard(struct pool_c *pt) | |||
1577 | return q && blk_queue_discard(q); | 1577 | return q && blk_queue_discard(q); |
1578 | } | 1578 | } |
1579 | 1579 | ||
1580 | static bool is_factor(sector_t block_size, uint32_t n) | ||
1581 | { | ||
1582 | return !sector_div(block_size, n); | ||
1583 | } | ||
1584 | |||
1580 | /* | 1585 | /* |
1581 | * If discard_passdown was enabled verify that the data device | 1586 | * If discard_passdown was enabled verify that the data device |
1582 | * supports discards. Disable discard_passdown if not. | 1587 | * supports discards. Disable discard_passdown if not. |
@@ -1602,7 +1607,7 @@ static void disable_passdown_if_not_supported(struct pool_c *pt) | |||
1602 | else if (data_limits->discard_granularity > block_size) | 1607 | else if (data_limits->discard_granularity > block_size) |
1603 | reason = "discard granularity larger than a block"; | 1608 | reason = "discard granularity larger than a block"; |
1604 | 1609 | ||
1605 | else if (block_size & (data_limits->discard_granularity - 1)) | 1610 | else if (!is_factor(block_size, data_limits->discard_granularity)) |
1606 | reason = "discard granularity not a factor of block size"; | 1611 | reason = "discard granularity not a factor of block size"; |
1607 | 1612 | ||
1608 | if (reason) { | 1613 | if (reason) { |
@@ -2544,7 +2549,7 @@ static struct target_type pool_target = { | |||
2544 | .name = "thin-pool", | 2549 | .name = "thin-pool", |
2545 | .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE | | 2550 | .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE | |
2546 | DM_TARGET_IMMUTABLE, | 2551 | DM_TARGET_IMMUTABLE, |
2547 | .version = {1, 6, 1}, | 2552 | .version = {1, 7, 0}, |
2548 | .module = THIS_MODULE, | 2553 | .module = THIS_MODULE, |
2549 | .ctr = pool_ctr, | 2554 | .ctr = pool_ctr, |
2550 | .dtr = pool_dtr, | 2555 | .dtr = pool_dtr, |
@@ -2831,7 +2836,7 @@ static int thin_iterate_devices(struct dm_target *ti, | |||
2831 | 2836 | ||
2832 | static struct target_type thin_target = { | 2837 | static struct target_type thin_target = { |
2833 | .name = "thin", | 2838 | .name = "thin", |
2834 | .version = {1, 7, 1}, | 2839 | .version = {1, 8, 0}, |
2835 | .module = THIS_MODULE, | 2840 | .module = THIS_MODULE, |
2836 | .ctr = thin_ctr, | 2841 | .ctr = thin_ctr, |
2837 | .dtr = thin_dtr, | 2842 | .dtr = thin_dtr, |
diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity.c index 6ad538375c3c..a746f1d21c66 100644 --- a/drivers/md/dm-verity.c +++ b/drivers/md/dm-verity.c | |||
@@ -93,6 +93,13 @@ struct dm_verity_io { | |||
93 | */ | 93 | */ |
94 | }; | 94 | }; |
95 | 95 | ||
96 | struct dm_verity_prefetch_work { | ||
97 | struct work_struct work; | ||
98 | struct dm_verity *v; | ||
99 | sector_t block; | ||
100 | unsigned n_blocks; | ||
101 | }; | ||
102 | |||
96 | static struct shash_desc *io_hash_desc(struct dm_verity *v, struct dm_verity_io *io) | 103 | static struct shash_desc *io_hash_desc(struct dm_verity *v, struct dm_verity_io *io) |
97 | { | 104 | { |
98 | return (struct shash_desc *)(io + 1); | 105 | return (struct shash_desc *)(io + 1); |
@@ -424,15 +431,18 @@ static void verity_end_io(struct bio *bio, int error) | |||
424 | * The root buffer is not prefetched, it is assumed that it will be cached | 431 | * The root buffer is not prefetched, it is assumed that it will be cached |
425 | * all the time. | 432 | * all the time. |
426 | */ | 433 | */ |
427 | static void verity_prefetch_io(struct dm_verity *v, struct dm_verity_io *io) | 434 | static void verity_prefetch_io(struct work_struct *work) |
428 | { | 435 | { |
436 | struct dm_verity_prefetch_work *pw = | ||
437 | container_of(work, struct dm_verity_prefetch_work, work); | ||
438 | struct dm_verity *v = pw->v; | ||
429 | int i; | 439 | int i; |
430 | 440 | ||
431 | for (i = v->levels - 2; i >= 0; i--) { | 441 | for (i = v->levels - 2; i >= 0; i--) { |
432 | sector_t hash_block_start; | 442 | sector_t hash_block_start; |
433 | sector_t hash_block_end; | 443 | sector_t hash_block_end; |
434 | verity_hash_at_level(v, io->block, i, &hash_block_start, NULL); | 444 | verity_hash_at_level(v, pw->block, i, &hash_block_start, NULL); |
435 | verity_hash_at_level(v, io->block + io->n_blocks - 1, i, &hash_block_end, NULL); | 445 | verity_hash_at_level(v, pw->block + pw->n_blocks - 1, i, &hash_block_end, NULL); |
436 | if (!i) { | 446 | if (!i) { |
437 | unsigned cluster = ACCESS_ONCE(dm_verity_prefetch_cluster); | 447 | unsigned cluster = ACCESS_ONCE(dm_verity_prefetch_cluster); |
438 | 448 | ||
@@ -452,6 +462,25 @@ no_prefetch_cluster: | |||
452 | dm_bufio_prefetch(v->bufio, hash_block_start, | 462 | dm_bufio_prefetch(v->bufio, hash_block_start, |
453 | hash_block_end - hash_block_start + 1); | 463 | hash_block_end - hash_block_start + 1); |
454 | } | 464 | } |
465 | |||
466 | kfree(pw); | ||
467 | } | ||
468 | |||
469 | static void verity_submit_prefetch(struct dm_verity *v, struct dm_verity_io *io) | ||
470 | { | ||
471 | struct dm_verity_prefetch_work *pw; | ||
472 | |||
473 | pw = kmalloc(sizeof(struct dm_verity_prefetch_work), | ||
474 | GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN); | ||
475 | |||
476 | if (!pw) | ||
477 | return; | ||
478 | |||
479 | INIT_WORK(&pw->work, verity_prefetch_io); | ||
480 | pw->v = v; | ||
481 | pw->block = io->block; | ||
482 | pw->n_blocks = io->n_blocks; | ||
483 | queue_work(v->verify_wq, &pw->work); | ||
455 | } | 484 | } |
456 | 485 | ||
457 | /* | 486 | /* |
@@ -498,7 +527,7 @@ static int verity_map(struct dm_target *ti, struct bio *bio) | |||
498 | memcpy(io->io_vec, bio_iovec(bio), | 527 | memcpy(io->io_vec, bio_iovec(bio), |
499 | io->io_vec_size * sizeof(struct bio_vec)); | 528 | io->io_vec_size * sizeof(struct bio_vec)); |
500 | 529 | ||
501 | verity_prefetch_io(v, io); | 530 | verity_submit_prefetch(v, io); |
502 | 531 | ||
503 | generic_make_request(bio); | 532 | generic_make_request(bio); |
504 | 533 | ||
@@ -858,7 +887,7 @@ bad: | |||
858 | 887 | ||
859 | static struct target_type verity_target = { | 888 | static struct target_type verity_target = { |
860 | .name = "verity", | 889 | .name = "verity", |
861 | .version = {1, 1, 1}, | 890 | .version = {1, 2, 0}, |
862 | .module = THIS_MODULE, | 891 | .module = THIS_MODULE, |
863 | .ctr = verity_ctr, | 892 | .ctr = verity_ctr, |
864 | .dtr = verity_dtr, | 893 | .dtr = verity_dtr, |
diff --git a/drivers/md/md.c b/drivers/md/md.c index fcb878f88796..aeceedfc530b 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -7663,10 +7663,8 @@ static int remove_and_add_spares(struct mddev *mddev) | |||
7663 | removed++; | 7663 | removed++; |
7664 | } | 7664 | } |
7665 | } | 7665 | } |
7666 | if (removed) | 7666 | if (removed && mddev->kobj.sd) |
7667 | sysfs_notify(&mddev->kobj, NULL, | 7667 | sysfs_notify(&mddev->kobj, NULL, "degraded"); |
7668 | "degraded"); | ||
7669 | |||
7670 | 7668 | ||
7671 | rdev_for_each(rdev, mddev) { | 7669 | rdev_for_each(rdev, mddev) { |
7672 | if (rdev->raid_disk >= 0 && | 7670 | if (rdev->raid_disk >= 0 && |
diff --git a/drivers/md/md.h b/drivers/md/md.h index eca59c3074ef..d90fb1a879e1 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h | |||
@@ -506,7 +506,7 @@ static inline char * mdname (struct mddev * mddev) | |||
506 | static inline int sysfs_link_rdev(struct mddev *mddev, struct md_rdev *rdev) | 506 | static inline int sysfs_link_rdev(struct mddev *mddev, struct md_rdev *rdev) |
507 | { | 507 | { |
508 | char nm[20]; | 508 | char nm[20]; |
509 | if (!test_bit(Replacement, &rdev->flags)) { | 509 | if (!test_bit(Replacement, &rdev->flags) && mddev->kobj.sd) { |
510 | sprintf(nm, "rd%d", rdev->raid_disk); | 510 | sprintf(nm, "rd%d", rdev->raid_disk); |
511 | return sysfs_create_link(&mddev->kobj, &rdev->kobj, nm); | 511 | return sysfs_create_link(&mddev->kobj, &rdev->kobj, nm); |
512 | } else | 512 | } else |
@@ -516,7 +516,7 @@ static inline int sysfs_link_rdev(struct mddev *mddev, struct md_rdev *rdev) | |||
516 | static inline void sysfs_unlink_rdev(struct mddev *mddev, struct md_rdev *rdev) | 516 | static inline void sysfs_unlink_rdev(struct mddev *mddev, struct md_rdev *rdev) |
517 | { | 517 | { |
518 | char nm[20]; | 518 | char nm[20]; |
519 | if (!test_bit(Replacement, &rdev->flags)) { | 519 | if (!test_bit(Replacement, &rdev->flags) && mddev->kobj.sd) { |
520 | sprintf(nm, "rd%d", rdev->raid_disk); | 520 | sprintf(nm, "rd%d", rdev->raid_disk); |
521 | sysfs_remove_link(&mddev->kobj, nm); | 521 | sysfs_remove_link(&mddev->kobj, nm); |
522 | } | 522 | } |
diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c index c4f28133ef82..b88757cd0d1d 100644 --- a/drivers/md/persistent-data/dm-btree-remove.c +++ b/drivers/md/persistent-data/dm-btree-remove.c | |||
@@ -139,15 +139,8 @@ struct child { | |||
139 | struct btree_node *n; | 139 | struct btree_node *n; |
140 | }; | 140 | }; |
141 | 141 | ||
142 | static struct dm_btree_value_type le64_type = { | 142 | static int init_child(struct dm_btree_info *info, struct dm_btree_value_type *vt, |
143 | .context = NULL, | 143 | struct btree_node *parent, |
144 | .size = sizeof(__le64), | ||
145 | .inc = NULL, | ||
146 | .dec = NULL, | ||
147 | .equal = NULL | ||
148 | }; | ||
149 | |||
150 | static int init_child(struct dm_btree_info *info, struct btree_node *parent, | ||
151 | unsigned index, struct child *result) | 144 | unsigned index, struct child *result) |
152 | { | 145 | { |
153 | int r, inc; | 146 | int r, inc; |
@@ -164,7 +157,7 @@ static int init_child(struct dm_btree_info *info, struct btree_node *parent, | |||
164 | result->n = dm_block_data(result->block); | 157 | result->n = dm_block_data(result->block); |
165 | 158 | ||
166 | if (inc) | 159 | if (inc) |
167 | inc_children(info->tm, result->n, &le64_type); | 160 | inc_children(info->tm, result->n, vt); |
168 | 161 | ||
169 | *((__le64 *) value_ptr(parent, index)) = | 162 | *((__le64 *) value_ptr(parent, index)) = |
170 | cpu_to_le64(dm_block_location(result->block)); | 163 | cpu_to_le64(dm_block_location(result->block)); |
@@ -236,7 +229,7 @@ static void __rebalance2(struct dm_btree_info *info, struct btree_node *parent, | |||
236 | } | 229 | } |
237 | 230 | ||
238 | static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info, | 231 | static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info, |
239 | unsigned left_index) | 232 | struct dm_btree_value_type *vt, unsigned left_index) |
240 | { | 233 | { |
241 | int r; | 234 | int r; |
242 | struct btree_node *parent; | 235 | struct btree_node *parent; |
@@ -244,11 +237,11 @@ static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info, | |||
244 | 237 | ||
245 | parent = dm_block_data(shadow_current(s)); | 238 | parent = dm_block_data(shadow_current(s)); |
246 | 239 | ||
247 | r = init_child(info, parent, left_index, &left); | 240 | r = init_child(info, vt, parent, left_index, &left); |
248 | if (r) | 241 | if (r) |
249 | return r; | 242 | return r; |
250 | 243 | ||
251 | r = init_child(info, parent, left_index + 1, &right); | 244 | r = init_child(info, vt, parent, left_index + 1, &right); |
252 | if (r) { | 245 | if (r) { |
253 | exit_child(info, &left); | 246 | exit_child(info, &left); |
254 | return r; | 247 | return r; |
@@ -368,7 +361,7 @@ static void __rebalance3(struct dm_btree_info *info, struct btree_node *parent, | |||
368 | } | 361 | } |
369 | 362 | ||
370 | static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info, | 363 | static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info, |
371 | unsigned left_index) | 364 | struct dm_btree_value_type *vt, unsigned left_index) |
372 | { | 365 | { |
373 | int r; | 366 | int r; |
374 | struct btree_node *parent = dm_block_data(shadow_current(s)); | 367 | struct btree_node *parent = dm_block_data(shadow_current(s)); |
@@ -377,17 +370,17 @@ static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info, | |||
377 | /* | 370 | /* |
378 | * FIXME: fill out an array? | 371 | * FIXME: fill out an array? |
379 | */ | 372 | */ |
380 | r = init_child(info, parent, left_index, &left); | 373 | r = init_child(info, vt, parent, left_index, &left); |
381 | if (r) | 374 | if (r) |
382 | return r; | 375 | return r; |
383 | 376 | ||
384 | r = init_child(info, parent, left_index + 1, ¢er); | 377 | r = init_child(info, vt, parent, left_index + 1, ¢er); |
385 | if (r) { | 378 | if (r) { |
386 | exit_child(info, &left); | 379 | exit_child(info, &left); |
387 | return r; | 380 | return r; |
388 | } | 381 | } |
389 | 382 | ||
390 | r = init_child(info, parent, left_index + 2, &right); | 383 | r = init_child(info, vt, parent, left_index + 2, &right); |
391 | if (r) { | 384 | if (r) { |
392 | exit_child(info, &left); | 385 | exit_child(info, &left); |
393 | exit_child(info, ¢er); | 386 | exit_child(info, ¢er); |
@@ -434,7 +427,8 @@ static int get_nr_entries(struct dm_transaction_manager *tm, | |||
434 | } | 427 | } |
435 | 428 | ||
436 | static int rebalance_children(struct shadow_spine *s, | 429 | static int rebalance_children(struct shadow_spine *s, |
437 | struct dm_btree_info *info, uint64_t key) | 430 | struct dm_btree_info *info, |
431 | struct dm_btree_value_type *vt, uint64_t key) | ||
438 | { | 432 | { |
439 | int i, r, has_left_sibling, has_right_sibling; | 433 | int i, r, has_left_sibling, has_right_sibling; |
440 | uint32_t child_entries; | 434 | uint32_t child_entries; |
@@ -472,13 +466,13 @@ static int rebalance_children(struct shadow_spine *s, | |||
472 | has_right_sibling = i < (le32_to_cpu(n->header.nr_entries) - 1); | 466 | has_right_sibling = i < (le32_to_cpu(n->header.nr_entries) - 1); |
473 | 467 | ||
474 | if (!has_left_sibling) | 468 | if (!has_left_sibling) |
475 | r = rebalance2(s, info, i); | 469 | r = rebalance2(s, info, vt, i); |
476 | 470 | ||
477 | else if (!has_right_sibling) | 471 | else if (!has_right_sibling) |
478 | r = rebalance2(s, info, i - 1); | 472 | r = rebalance2(s, info, vt, i - 1); |
479 | 473 | ||
480 | else | 474 | else |
481 | r = rebalance3(s, info, i - 1); | 475 | r = rebalance3(s, info, vt, i - 1); |
482 | 476 | ||
483 | return r; | 477 | return r; |
484 | } | 478 | } |
@@ -529,7 +523,7 @@ static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info, | |||
529 | if (le32_to_cpu(n->header.flags) & LEAF_NODE) | 523 | if (le32_to_cpu(n->header.flags) & LEAF_NODE) |
530 | return do_leaf(n, key, index); | 524 | return do_leaf(n, key, index); |
531 | 525 | ||
532 | r = rebalance_children(s, info, key); | 526 | r = rebalance_children(s, info, vt, key); |
533 | if (r) | 527 | if (r) |
534 | break; | 528 | break; |
535 | 529 | ||
@@ -550,6 +544,14 @@ static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info, | |||
550 | return r; | 544 | return r; |
551 | } | 545 | } |
552 | 546 | ||
547 | static struct dm_btree_value_type le64_type = { | ||
548 | .context = NULL, | ||
549 | .size = sizeof(__le64), | ||
550 | .inc = NULL, | ||
551 | .dec = NULL, | ||
552 | .equal = NULL | ||
553 | }; | ||
554 | |||
553 | int dm_btree_remove(struct dm_btree_info *info, dm_block_t root, | 555 | int dm_btree_remove(struct dm_btree_info *info, dm_block_t root, |
554 | uint64_t *keys, dm_block_t *new_root) | 556 | uint64_t *keys, dm_block_t *new_root) |
555 | { | 557 | { |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 3ee2912889e7..24909eb13fec 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -671,9 +671,11 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) | |||
671 | bi->bi_next = NULL; | 671 | bi->bi_next = NULL; |
672 | if (rrdev) | 672 | if (rrdev) |
673 | set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags); | 673 | set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags); |
674 | trace_block_bio_remap(bdev_get_queue(bi->bi_bdev), | 674 | |
675 | bi, disk_devt(conf->mddev->gendisk), | 675 | if (conf->mddev->gendisk) |
676 | sh->dev[i].sector); | 676 | trace_block_bio_remap(bdev_get_queue(bi->bi_bdev), |
677 | bi, disk_devt(conf->mddev->gendisk), | ||
678 | sh->dev[i].sector); | ||
677 | generic_make_request(bi); | 679 | generic_make_request(bi); |
678 | } | 680 | } |
679 | if (rrdev) { | 681 | if (rrdev) { |
@@ -701,9 +703,10 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) | |||
701 | rbi->bi_io_vec[0].bv_offset = 0; | 703 | rbi->bi_io_vec[0].bv_offset = 0; |
702 | rbi->bi_size = STRIPE_SIZE; | 704 | rbi->bi_size = STRIPE_SIZE; |
703 | rbi->bi_next = NULL; | 705 | rbi->bi_next = NULL; |
704 | trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev), | 706 | if (conf->mddev->gendisk) |
705 | rbi, disk_devt(conf->mddev->gendisk), | 707 | trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev), |
706 | sh->dev[i].sector); | 708 | rbi, disk_devt(conf->mddev->gendisk), |
709 | sh->dev[i].sector); | ||
707 | generic_make_request(rbi); | 710 | generic_make_request(rbi); |
708 | } | 711 | } |
709 | if (!rdev && !rrdev) { | 712 | if (!rdev && !rrdev) { |
@@ -2280,17 +2283,6 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s, | |||
2280 | int level = conf->level; | 2283 | int level = conf->level; |
2281 | 2284 | ||
2282 | if (rcw) { | 2285 | if (rcw) { |
2283 | /* if we are not expanding this is a proper write request, and | ||
2284 | * there will be bios with new data to be drained into the | ||
2285 | * stripe cache | ||
2286 | */ | ||
2287 | if (!expand) { | ||
2288 | sh->reconstruct_state = reconstruct_state_drain_run; | ||
2289 | set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); | ||
2290 | } else | ||
2291 | sh->reconstruct_state = reconstruct_state_run; | ||
2292 | |||
2293 | set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request); | ||
2294 | 2286 | ||
2295 | for (i = disks; i--; ) { | 2287 | for (i = disks; i--; ) { |
2296 | struct r5dev *dev = &sh->dev[i]; | 2288 | struct r5dev *dev = &sh->dev[i]; |
@@ -2303,6 +2295,21 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s, | |||
2303 | s->locked++; | 2295 | s->locked++; |
2304 | } | 2296 | } |
2305 | } | 2297 | } |
2298 | /* if we are not expanding this is a proper write request, and | ||
2299 | * there will be bios with new data to be drained into the | ||
2300 | * stripe cache | ||
2301 | */ | ||
2302 | if (!expand) { | ||
2303 | if (!s->locked) | ||
2304 | /* False alarm, nothing to do */ | ||
2305 | return; | ||
2306 | sh->reconstruct_state = reconstruct_state_drain_run; | ||
2307 | set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); | ||
2308 | } else | ||
2309 | sh->reconstruct_state = reconstruct_state_run; | ||
2310 | |||
2311 | set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request); | ||
2312 | |||
2306 | if (s->locked + conf->max_degraded == disks) | 2313 | if (s->locked + conf->max_degraded == disks) |
2307 | if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state)) | 2314 | if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state)) |
2308 | atomic_inc(&conf->pending_full_writes); | 2315 | atomic_inc(&conf->pending_full_writes); |
@@ -2311,11 +2318,6 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s, | |||
2311 | BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) || | 2318 | BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) || |
2312 | test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags))); | 2319 | test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags))); |
2313 | 2320 | ||
2314 | sh->reconstruct_state = reconstruct_state_prexor_drain_run; | ||
2315 | set_bit(STRIPE_OP_PREXOR, &s->ops_request); | ||
2316 | set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); | ||
2317 | set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request); | ||
2318 | |||
2319 | for (i = disks; i--; ) { | 2321 | for (i = disks; i--; ) { |
2320 | struct r5dev *dev = &sh->dev[i]; | 2322 | struct r5dev *dev = &sh->dev[i]; |
2321 | if (i == pd_idx) | 2323 | if (i == pd_idx) |
@@ -2330,6 +2332,13 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s, | |||
2330 | s->locked++; | 2332 | s->locked++; |
2331 | } | 2333 | } |
2332 | } | 2334 | } |
2335 | if (!s->locked) | ||
2336 | /* False alarm - nothing to do */ | ||
2337 | return; | ||
2338 | sh->reconstruct_state = reconstruct_state_prexor_drain_run; | ||
2339 | set_bit(STRIPE_OP_PREXOR, &s->ops_request); | ||
2340 | set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); | ||
2341 | set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request); | ||
2333 | } | 2342 | } |
2334 | 2343 | ||
2335 | /* keep the parity disk(s) locked while asynchronous operations | 2344 | /* keep the parity disk(s) locked while asynchronous operations |
@@ -2564,6 +2573,8 @@ handle_failed_sync(struct r5conf *conf, struct stripe_head *sh, | |||
2564 | int i; | 2573 | int i; |
2565 | 2574 | ||
2566 | clear_bit(STRIPE_SYNCING, &sh->state); | 2575 | clear_bit(STRIPE_SYNCING, &sh->state); |
2576 | if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags)) | ||
2577 | wake_up(&conf->wait_for_overlap); | ||
2567 | s->syncing = 0; | 2578 | s->syncing = 0; |
2568 | s->replacing = 0; | 2579 | s->replacing = 0; |
2569 | /* There is nothing more to do for sync/check/repair. | 2580 | /* There is nothing more to do for sync/check/repair. |
@@ -2737,6 +2748,7 @@ static void handle_stripe_clean_event(struct r5conf *conf, | |||
2737 | { | 2748 | { |
2738 | int i; | 2749 | int i; |
2739 | struct r5dev *dev; | 2750 | struct r5dev *dev; |
2751 | int discard_pending = 0; | ||
2740 | 2752 | ||
2741 | for (i = disks; i--; ) | 2753 | for (i = disks; i--; ) |
2742 | if (sh->dev[i].written) { | 2754 | if (sh->dev[i].written) { |
@@ -2765,9 +2777,23 @@ static void handle_stripe_clean_event(struct r5conf *conf, | |||
2765 | STRIPE_SECTORS, | 2777 | STRIPE_SECTORS, |
2766 | !test_bit(STRIPE_DEGRADED, &sh->state), | 2778 | !test_bit(STRIPE_DEGRADED, &sh->state), |
2767 | 0); | 2779 | 0); |
2768 | } | 2780 | } else if (test_bit(R5_Discard, &dev->flags)) |
2769 | } else if (test_bit(R5_Discard, &sh->dev[i].flags)) | 2781 | discard_pending = 1; |
2770 | clear_bit(R5_Discard, &sh->dev[i].flags); | 2782 | } |
2783 | if (!discard_pending && | ||
2784 | test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)) { | ||
2785 | clear_bit(R5_Discard, &sh->dev[sh->pd_idx].flags); | ||
2786 | clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags); | ||
2787 | if (sh->qd_idx >= 0) { | ||
2788 | clear_bit(R5_Discard, &sh->dev[sh->qd_idx].flags); | ||
2789 | clear_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags); | ||
2790 | } | ||
2791 | /* now that discard is done we can proceed with any sync */ | ||
2792 | clear_bit(STRIPE_DISCARD, &sh->state); | ||
2793 | if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state)) | ||
2794 | set_bit(STRIPE_HANDLE, &sh->state); | ||
2795 | |||
2796 | } | ||
2771 | 2797 | ||
2772 | if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) | 2798 | if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) |
2773 | if (atomic_dec_and_test(&conf->pending_full_writes)) | 2799 | if (atomic_dec_and_test(&conf->pending_full_writes)) |
@@ -2826,8 +2852,10 @@ static void handle_stripe_dirtying(struct r5conf *conf, | |||
2826 | set_bit(STRIPE_HANDLE, &sh->state); | 2852 | set_bit(STRIPE_HANDLE, &sh->state); |
2827 | if (rmw < rcw && rmw > 0) { | 2853 | if (rmw < rcw && rmw > 0) { |
2828 | /* prefer read-modify-write, but need to get some data */ | 2854 | /* prefer read-modify-write, but need to get some data */ |
2829 | blk_add_trace_msg(conf->mddev->queue, "raid5 rmw %llu %d", | 2855 | if (conf->mddev->queue) |
2830 | (unsigned long long)sh->sector, rmw); | 2856 | blk_add_trace_msg(conf->mddev->queue, |
2857 | "raid5 rmw %llu %d", | ||
2858 | (unsigned long long)sh->sector, rmw); | ||
2831 | for (i = disks; i--; ) { | 2859 | for (i = disks; i--; ) { |
2832 | struct r5dev *dev = &sh->dev[i]; | 2860 | struct r5dev *dev = &sh->dev[i]; |
2833 | if ((dev->towrite || i == sh->pd_idx) && | 2861 | if ((dev->towrite || i == sh->pd_idx) && |
@@ -2877,7 +2905,7 @@ static void handle_stripe_dirtying(struct r5conf *conf, | |||
2877 | } | 2905 | } |
2878 | } | 2906 | } |
2879 | } | 2907 | } |
2880 | if (rcw) | 2908 | if (rcw && conf->mddev->queue) |
2881 | blk_add_trace_msg(conf->mddev->queue, "raid5 rcw %llu %d %d %d", | 2909 | blk_add_trace_msg(conf->mddev->queue, "raid5 rcw %llu %d %d %d", |
2882 | (unsigned long long)sh->sector, | 2910 | (unsigned long long)sh->sector, |
2883 | rcw, qread, test_bit(STRIPE_DELAYED, &sh->state)); | 2911 | rcw, qread, test_bit(STRIPE_DELAYED, &sh->state)); |
@@ -3417,9 +3445,15 @@ static void handle_stripe(struct stripe_head *sh) | |||
3417 | return; | 3445 | return; |
3418 | } | 3446 | } |
3419 | 3447 | ||
3420 | if (test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) { | 3448 | if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state)) { |
3421 | set_bit(STRIPE_SYNCING, &sh->state); | 3449 | spin_lock(&sh->stripe_lock); |
3422 | clear_bit(STRIPE_INSYNC, &sh->state); | 3450 | /* Cannot process 'sync' concurrently with 'discard' */ |
3451 | if (!test_bit(STRIPE_DISCARD, &sh->state) && | ||
3452 | test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) { | ||
3453 | set_bit(STRIPE_SYNCING, &sh->state); | ||
3454 | clear_bit(STRIPE_INSYNC, &sh->state); | ||
3455 | } | ||
3456 | spin_unlock(&sh->stripe_lock); | ||
3423 | } | 3457 | } |
3424 | clear_bit(STRIPE_DELAYED, &sh->state); | 3458 | clear_bit(STRIPE_DELAYED, &sh->state); |
3425 | 3459 | ||
@@ -3579,6 +3613,8 @@ static void handle_stripe(struct stripe_head *sh) | |||
3579 | test_bit(STRIPE_INSYNC, &sh->state)) { | 3613 | test_bit(STRIPE_INSYNC, &sh->state)) { |
3580 | md_done_sync(conf->mddev, STRIPE_SECTORS, 1); | 3614 | md_done_sync(conf->mddev, STRIPE_SECTORS, 1); |
3581 | clear_bit(STRIPE_SYNCING, &sh->state); | 3615 | clear_bit(STRIPE_SYNCING, &sh->state); |
3616 | if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags)) | ||
3617 | wake_up(&conf->wait_for_overlap); | ||
3582 | } | 3618 | } |
3583 | 3619 | ||
3584 | /* If the failed drives are just a ReadError, then we might need | 3620 | /* If the failed drives are just a ReadError, then we might need |
@@ -3982,9 +4018,10 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio) | |||
3982 | atomic_inc(&conf->active_aligned_reads); | 4018 | atomic_inc(&conf->active_aligned_reads); |
3983 | spin_unlock_irq(&conf->device_lock); | 4019 | spin_unlock_irq(&conf->device_lock); |
3984 | 4020 | ||
3985 | trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev), | 4021 | if (mddev->gendisk) |
3986 | align_bi, disk_devt(mddev->gendisk), | 4022 | trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev), |
3987 | raid_bio->bi_sector); | 4023 | align_bi, disk_devt(mddev->gendisk), |
4024 | raid_bio->bi_sector); | ||
3988 | generic_make_request(align_bi); | 4025 | generic_make_request(align_bi); |
3989 | return 1; | 4026 | return 1; |
3990 | } else { | 4027 | } else { |
@@ -4078,7 +4115,8 @@ static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule) | |||
4078 | } | 4115 | } |
4079 | spin_unlock_irq(&conf->device_lock); | 4116 | spin_unlock_irq(&conf->device_lock); |
4080 | } | 4117 | } |
4081 | trace_block_unplug(mddev->queue, cnt, !from_schedule); | 4118 | if (mddev->queue) |
4119 | trace_block_unplug(mddev->queue, cnt, !from_schedule); | ||
4082 | kfree(cb); | 4120 | kfree(cb); |
4083 | } | 4121 | } |
4084 | 4122 | ||
@@ -4141,6 +4179,13 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi) | |||
4141 | sh = get_active_stripe(conf, logical_sector, 0, 0, 0); | 4179 | sh = get_active_stripe(conf, logical_sector, 0, 0, 0); |
4142 | prepare_to_wait(&conf->wait_for_overlap, &w, | 4180 | prepare_to_wait(&conf->wait_for_overlap, &w, |
4143 | TASK_UNINTERRUPTIBLE); | 4181 | TASK_UNINTERRUPTIBLE); |
4182 | set_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags); | ||
4183 | if (test_bit(STRIPE_SYNCING, &sh->state)) { | ||
4184 | release_stripe(sh); | ||
4185 | schedule(); | ||
4186 | goto again; | ||
4187 | } | ||
4188 | clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags); | ||
4144 | spin_lock_irq(&sh->stripe_lock); | 4189 | spin_lock_irq(&sh->stripe_lock); |
4145 | for (d = 0; d < conf->raid_disks; d++) { | 4190 | for (d = 0; d < conf->raid_disks; d++) { |
4146 | if (d == sh->pd_idx || d == sh->qd_idx) | 4191 | if (d == sh->pd_idx || d == sh->qd_idx) |
@@ -4153,6 +4198,7 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi) | |||
4153 | goto again; | 4198 | goto again; |
4154 | } | 4199 | } |
4155 | } | 4200 | } |
4201 | set_bit(STRIPE_DISCARD, &sh->state); | ||
4156 | finish_wait(&conf->wait_for_overlap, &w); | 4202 | finish_wait(&conf->wait_for_overlap, &w); |
4157 | for (d = 0; d < conf->raid_disks; d++) { | 4203 | for (d = 0; d < conf->raid_disks; d++) { |
4158 | if (d == sh->pd_idx || d == sh->qd_idx) | 4204 | if (d == sh->pd_idx || d == sh->qd_idx) |
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index 18b2c4a8a1fd..b0b663b119a8 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h | |||
@@ -221,10 +221,6 @@ struct stripe_head { | |||
221 | struct stripe_operations { | 221 | struct stripe_operations { |
222 | int target, target2; | 222 | int target, target2; |
223 | enum sum_check_flags zero_sum_result; | 223 | enum sum_check_flags zero_sum_result; |
224 | #ifdef CONFIG_MULTICORE_RAID456 | ||
225 | unsigned long request; | ||
226 | wait_queue_head_t wait_for_ops; | ||
227 | #endif | ||
228 | } ops; | 224 | } ops; |
229 | struct r5dev { | 225 | struct r5dev { |
230 | /* rreq and rvec are used for the replacement device when | 226 | /* rreq and rvec are used for the replacement device when |
@@ -323,6 +319,7 @@ enum { | |||
323 | STRIPE_COMPUTE_RUN, | 319 | STRIPE_COMPUTE_RUN, |
324 | STRIPE_OPS_REQ_PENDING, | 320 | STRIPE_OPS_REQ_PENDING, |
325 | STRIPE_ON_UNPLUG_LIST, | 321 | STRIPE_ON_UNPLUG_LIST, |
322 | STRIPE_DISCARD, | ||
326 | }; | 323 | }; |
327 | 324 | ||
328 | /* | 325 | /* |
diff --git a/drivers/media/i2c/m5mols/m5mols_core.c b/drivers/media/i2c/m5mols/m5mols_core.c index d4e7567b367c..0b899cb6cda1 100644 --- a/drivers/media/i2c/m5mols/m5mols_core.c +++ b/drivers/media/i2c/m5mols/m5mols_core.c | |||
@@ -724,7 +724,7 @@ static int m5mols_s_stream(struct v4l2_subdev *sd, int enable) | |||
724 | if (enable) { | 724 | if (enable) { |
725 | if (is_code(code, M5MOLS_RESTYPE_MONITOR)) | 725 | if (is_code(code, M5MOLS_RESTYPE_MONITOR)) |
726 | ret = m5mols_start_monitor(info); | 726 | ret = m5mols_start_monitor(info); |
727 | if (is_code(code, M5MOLS_RESTYPE_CAPTURE)) | 727 | else if (is_code(code, M5MOLS_RESTYPE_CAPTURE)) |
728 | ret = m5mols_start_capture(info); | 728 | ret = m5mols_start_capture(info); |
729 | else | 729 | else |
730 | ret = -EINVAL; | 730 | ret = -EINVAL; |
diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c index ccd18e4ee789..54579e4c740b 100644 --- a/drivers/media/pci/bt8xx/bttv-driver.c +++ b/drivers/media/pci/bt8xx/bttv-driver.c | |||
@@ -250,17 +250,19 @@ static u8 SRAM_Table[][60] = | |||
250 | vdelay start of active video in 2 * field lines relative to | 250 | vdelay start of active video in 2 * field lines relative to |
251 | trailing edge of /VRESET pulse (VDELAY register). | 251 | trailing edge of /VRESET pulse (VDELAY register). |
252 | sheight height of active video in 2 * field lines. | 252 | sheight height of active video in 2 * field lines. |
253 | extraheight Added to sheight for cropcap.bounds.height only | ||
253 | videostart0 ITU-R frame line number of the line corresponding | 254 | videostart0 ITU-R frame line number of the line corresponding |
254 | to vdelay in the first field. */ | 255 | to vdelay in the first field. */ |
255 | #define CROPCAP(minhdelayx1, hdelayx1, swidth, totalwidth, sqwidth, \ | 256 | #define CROPCAP(minhdelayx1, hdelayx1, swidth, totalwidth, sqwidth, \ |
256 | vdelay, sheight, videostart0) \ | 257 | vdelay, sheight, extraheight, videostart0) \ |
257 | .cropcap.bounds.left = minhdelayx1, \ | 258 | .cropcap.bounds.left = minhdelayx1, \ |
258 | /* * 2 because vertically we count field lines times two, */ \ | 259 | /* * 2 because vertically we count field lines times two, */ \ |
259 | /* e.g. 23 * 2 to 23 * 2 + 576 in PAL-BGHI defrect. */ \ | 260 | /* e.g. 23 * 2 to 23 * 2 + 576 in PAL-BGHI defrect. */ \ |
260 | .cropcap.bounds.top = (videostart0) * 2 - (vdelay) + MIN_VDELAY, \ | 261 | .cropcap.bounds.top = (videostart0) * 2 - (vdelay) + MIN_VDELAY, \ |
261 | /* 4 is a safety margin at the end of the line. */ \ | 262 | /* 4 is a safety margin at the end of the line. */ \ |
262 | .cropcap.bounds.width = (totalwidth) - (minhdelayx1) - 4, \ | 263 | .cropcap.bounds.width = (totalwidth) - (minhdelayx1) - 4, \ |
263 | .cropcap.bounds.height = (sheight) + (vdelay) - MIN_VDELAY, \ | 264 | .cropcap.bounds.height = (sheight) + (extraheight) + (vdelay) - \ |
265 | MIN_VDELAY, \ | ||
264 | .cropcap.defrect.left = hdelayx1, \ | 266 | .cropcap.defrect.left = hdelayx1, \ |
265 | .cropcap.defrect.top = (videostart0) * 2, \ | 267 | .cropcap.defrect.top = (videostart0) * 2, \ |
266 | .cropcap.defrect.width = swidth, \ | 268 | .cropcap.defrect.width = swidth, \ |
@@ -301,9 +303,10 @@ const struct bttv_tvnorm bttv_tvnorms[] = { | |||
301 | /* totalwidth */ 1135, | 303 | /* totalwidth */ 1135, |
302 | /* sqwidth */ 944, | 304 | /* sqwidth */ 944, |
303 | /* vdelay */ 0x20, | 305 | /* vdelay */ 0x20, |
304 | /* bt878 (and bt848?) can capture another | 306 | /* sheight */ 576, |
305 | line below active video. */ | 307 | /* bt878 (and bt848?) can capture another |
306 | /* sheight */ (576 + 2) + 0x20 - 2, | 308 | line below active video. */ |
309 | /* extraheight */ 2, | ||
307 | /* videostart0 */ 23) | 310 | /* videostart0 */ 23) |
308 | },{ | 311 | },{ |
309 | .v4l2_id = V4L2_STD_NTSC_M | V4L2_STD_NTSC_M_KR, | 312 | .v4l2_id = V4L2_STD_NTSC_M | V4L2_STD_NTSC_M_KR, |
@@ -330,6 +333,7 @@ const struct bttv_tvnorm bttv_tvnorms[] = { | |||
330 | /* sqwidth */ 780, | 333 | /* sqwidth */ 780, |
331 | /* vdelay */ 0x1a, | 334 | /* vdelay */ 0x1a, |
332 | /* sheight */ 480, | 335 | /* sheight */ 480, |
336 | /* extraheight */ 0, | ||
333 | /* videostart0 */ 23) | 337 | /* videostart0 */ 23) |
334 | },{ | 338 | },{ |
335 | .v4l2_id = V4L2_STD_SECAM, | 339 | .v4l2_id = V4L2_STD_SECAM, |
@@ -355,6 +359,7 @@ const struct bttv_tvnorm bttv_tvnorms[] = { | |||
355 | /* sqwidth */ 944, | 359 | /* sqwidth */ 944, |
356 | /* vdelay */ 0x20, | 360 | /* vdelay */ 0x20, |
357 | /* sheight */ 576, | 361 | /* sheight */ 576, |
362 | /* extraheight */ 0, | ||
358 | /* videostart0 */ 23) | 363 | /* videostart0 */ 23) |
359 | },{ | 364 | },{ |
360 | .v4l2_id = V4L2_STD_PAL_Nc, | 365 | .v4l2_id = V4L2_STD_PAL_Nc, |
@@ -380,6 +385,7 @@ const struct bttv_tvnorm bttv_tvnorms[] = { | |||
380 | /* sqwidth */ 780, | 385 | /* sqwidth */ 780, |
381 | /* vdelay */ 0x1a, | 386 | /* vdelay */ 0x1a, |
382 | /* sheight */ 576, | 387 | /* sheight */ 576, |
388 | /* extraheight */ 0, | ||
383 | /* videostart0 */ 23) | 389 | /* videostart0 */ 23) |
384 | },{ | 390 | },{ |
385 | .v4l2_id = V4L2_STD_PAL_M, | 391 | .v4l2_id = V4L2_STD_PAL_M, |
@@ -405,6 +411,7 @@ const struct bttv_tvnorm bttv_tvnorms[] = { | |||
405 | /* sqwidth */ 780, | 411 | /* sqwidth */ 780, |
406 | /* vdelay */ 0x1a, | 412 | /* vdelay */ 0x1a, |
407 | /* sheight */ 480, | 413 | /* sheight */ 480, |
414 | /* extraheight */ 0, | ||
408 | /* videostart0 */ 23) | 415 | /* videostart0 */ 23) |
409 | },{ | 416 | },{ |
410 | .v4l2_id = V4L2_STD_PAL_N, | 417 | .v4l2_id = V4L2_STD_PAL_N, |
@@ -430,6 +437,7 @@ const struct bttv_tvnorm bttv_tvnorms[] = { | |||
430 | /* sqwidth */ 944, | 437 | /* sqwidth */ 944, |
431 | /* vdelay */ 0x20, | 438 | /* vdelay */ 0x20, |
432 | /* sheight */ 576, | 439 | /* sheight */ 576, |
440 | /* extraheight */ 0, | ||
433 | /* videostart0 */ 23) | 441 | /* videostart0 */ 23) |
434 | },{ | 442 | },{ |
435 | .v4l2_id = V4L2_STD_NTSC_M_JP, | 443 | .v4l2_id = V4L2_STD_NTSC_M_JP, |
@@ -455,6 +463,7 @@ const struct bttv_tvnorm bttv_tvnorms[] = { | |||
455 | /* sqwidth */ 780, | 463 | /* sqwidth */ 780, |
456 | /* vdelay */ 0x16, | 464 | /* vdelay */ 0x16, |
457 | /* sheight */ 480, | 465 | /* sheight */ 480, |
466 | /* extraheight */ 0, | ||
458 | /* videostart0 */ 23) | 467 | /* videostart0 */ 23) |
459 | },{ | 468 | },{ |
460 | /* that one hopefully works with the strange timing | 469 | /* that one hopefully works with the strange timing |
@@ -484,6 +493,7 @@ const struct bttv_tvnorm bttv_tvnorms[] = { | |||
484 | /* sqwidth */ 944, | 493 | /* sqwidth */ 944, |
485 | /* vdelay */ 0x1a, | 494 | /* vdelay */ 0x1a, |
486 | /* sheight */ 480, | 495 | /* sheight */ 480, |
496 | /* extraheight */ 0, | ||
487 | /* videostart0 */ 23) | 497 | /* videostart0 */ 23) |
488 | } | 498 | } |
489 | }; | 499 | }; |
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig index 05d7b6333461..a0639e779973 100644 --- a/drivers/media/platform/Kconfig +++ b/drivers/media/platform/Kconfig | |||
@@ -204,7 +204,7 @@ config VIDEO_SAMSUNG_EXYNOS_GSC | |||
204 | 204 | ||
205 | config VIDEO_SH_VEU | 205 | config VIDEO_SH_VEU |
206 | tristate "SuperH VEU mem2mem video processing driver" | 206 | tristate "SuperH VEU mem2mem video processing driver" |
207 | depends on VIDEO_DEV && VIDEO_V4L2 | 207 | depends on VIDEO_DEV && VIDEO_V4L2 && GENERIC_HARDIRQS |
208 | select VIDEOBUF2_DMA_CONTIG | 208 | select VIDEOBUF2_DMA_CONTIG |
209 | select V4L2_MEM2MEM_DEV | 209 | select V4L2_MEM2MEM_DEV |
210 | help | 210 | help |
diff --git a/drivers/media/platform/exynos-gsc/gsc-core.c b/drivers/media/platform/exynos-gsc/gsc-core.c index 82d9f6ac12f3..33b5ffc8d66d 100644 --- a/drivers/media/platform/exynos-gsc/gsc-core.c +++ b/drivers/media/platform/exynos-gsc/gsc-core.c | |||
@@ -1054,16 +1054,18 @@ static int gsc_m2m_suspend(struct gsc_dev *gsc) | |||
1054 | 1054 | ||
1055 | static int gsc_m2m_resume(struct gsc_dev *gsc) | 1055 | static int gsc_m2m_resume(struct gsc_dev *gsc) |
1056 | { | 1056 | { |
1057 | struct gsc_ctx *ctx; | ||
1057 | unsigned long flags; | 1058 | unsigned long flags; |
1058 | 1059 | ||
1059 | spin_lock_irqsave(&gsc->slock, flags); | 1060 | spin_lock_irqsave(&gsc->slock, flags); |
1060 | /* Clear for full H/W setup in first run after resume */ | 1061 | /* Clear for full H/W setup in first run after resume */ |
1062 | ctx = gsc->m2m.ctx; | ||
1061 | gsc->m2m.ctx = NULL; | 1063 | gsc->m2m.ctx = NULL; |
1062 | spin_unlock_irqrestore(&gsc->slock, flags); | 1064 | spin_unlock_irqrestore(&gsc->slock, flags); |
1063 | 1065 | ||
1064 | if (test_and_clear_bit(ST_M2M_SUSPENDED, &gsc->state)) | 1066 | if (test_and_clear_bit(ST_M2M_SUSPENDED, &gsc->state)) |
1065 | gsc_m2m_job_finish(gsc->m2m.ctx, | 1067 | gsc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR); |
1066 | VB2_BUF_STATE_ERROR); | 1068 | |
1067 | return 0; | 1069 | return 0; |
1068 | } | 1070 | } |
1069 | 1071 | ||
@@ -1204,7 +1206,7 @@ static int gsc_resume(struct device *dev) | |||
1204 | /* Do not resume if the device was idle before system suspend */ | 1206 | /* Do not resume if the device was idle before system suspend */ |
1205 | spin_lock_irqsave(&gsc->slock, flags); | 1207 | spin_lock_irqsave(&gsc->slock, flags); |
1206 | if (!test_and_clear_bit(ST_SUSPEND, &gsc->state) || | 1208 | if (!test_and_clear_bit(ST_SUSPEND, &gsc->state) || |
1207 | !gsc_m2m_active(gsc)) { | 1209 | !gsc_m2m_opened(gsc)) { |
1208 | spin_unlock_irqrestore(&gsc->slock, flags); | 1210 | spin_unlock_irqrestore(&gsc->slock, flags); |
1209 | return 0; | 1211 | return 0; |
1210 | } | 1212 | } |
diff --git a/drivers/media/platform/s5p-fimc/fimc-core.c b/drivers/media/platform/s5p-fimc/fimc-core.c index e3916bde45cf..0f513dd19f86 100644 --- a/drivers/media/platform/s5p-fimc/fimc-core.c +++ b/drivers/media/platform/s5p-fimc/fimc-core.c | |||
@@ -850,16 +850,18 @@ static int fimc_m2m_suspend(struct fimc_dev *fimc) | |||
850 | 850 | ||
851 | static int fimc_m2m_resume(struct fimc_dev *fimc) | 851 | static int fimc_m2m_resume(struct fimc_dev *fimc) |
852 | { | 852 | { |
853 | struct fimc_ctx *ctx; | ||
853 | unsigned long flags; | 854 | unsigned long flags; |
854 | 855 | ||
855 | spin_lock_irqsave(&fimc->slock, flags); | 856 | spin_lock_irqsave(&fimc->slock, flags); |
856 | /* Clear for full H/W setup in first run after resume */ | 857 | /* Clear for full H/W setup in first run after resume */ |
858 | ctx = fimc->m2m.ctx; | ||
857 | fimc->m2m.ctx = NULL; | 859 | fimc->m2m.ctx = NULL; |
858 | spin_unlock_irqrestore(&fimc->slock, flags); | 860 | spin_unlock_irqrestore(&fimc->slock, flags); |
859 | 861 | ||
860 | if (test_and_clear_bit(ST_M2M_SUSPENDED, &fimc->state)) | 862 | if (test_and_clear_bit(ST_M2M_SUSPENDED, &fimc->state)) |
861 | fimc_m2m_job_finish(fimc->m2m.ctx, | 863 | fimc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR); |
862 | VB2_BUF_STATE_ERROR); | 864 | |
863 | return 0; | 865 | return 0; |
864 | } | 866 | } |
865 | 867 | ||
diff --git a/drivers/media/platform/s5p-fimc/fimc-lite-reg.c b/drivers/media/platform/s5p-fimc/fimc-lite-reg.c index f0af0754a7b4..ac9663ce2a49 100644 --- a/drivers/media/platform/s5p-fimc/fimc-lite-reg.c +++ b/drivers/media/platform/s5p-fimc/fimc-lite-reg.c | |||
@@ -128,10 +128,10 @@ static const u32 src_pixfmt_map[8][3] = { | |||
128 | void flite_hw_set_source_format(struct fimc_lite *dev, struct flite_frame *f) | 128 | void flite_hw_set_source_format(struct fimc_lite *dev, struct flite_frame *f) |
129 | { | 129 | { |
130 | enum v4l2_mbus_pixelcode pixelcode = dev->fmt->mbus_code; | 130 | enum v4l2_mbus_pixelcode pixelcode = dev->fmt->mbus_code; |
131 | unsigned int i = ARRAY_SIZE(src_pixfmt_map); | 131 | int i = ARRAY_SIZE(src_pixfmt_map); |
132 | u32 cfg; | 132 | u32 cfg; |
133 | 133 | ||
134 | while (i-- >= 0) { | 134 | while (--i >= 0) { |
135 | if (src_pixfmt_map[i][0] == pixelcode) | 135 | if (src_pixfmt_map[i][0] == pixelcode) |
136 | break; | 136 | break; |
137 | } | 137 | } |
@@ -224,9 +224,9 @@ static void flite_hw_set_out_order(struct fimc_lite *dev, struct flite_frame *f) | |||
224 | { V4L2_MBUS_FMT_VYUY8_2X8, FLITE_REG_CIODMAFMT_CRYCBY }, | 224 | { V4L2_MBUS_FMT_VYUY8_2X8, FLITE_REG_CIODMAFMT_CRYCBY }, |
225 | }; | 225 | }; |
226 | u32 cfg = readl(dev->regs + FLITE_REG_CIODMAFMT); | 226 | u32 cfg = readl(dev->regs + FLITE_REG_CIODMAFMT); |
227 | unsigned int i = ARRAY_SIZE(pixcode); | 227 | int i = ARRAY_SIZE(pixcode); |
228 | 228 | ||
229 | while (i-- >= 0) | 229 | while (--i >= 0) |
230 | if (pixcode[i][0] == dev->fmt->mbus_code) | 230 | if (pixcode[i][0] == dev->fmt->mbus_code) |
231 | break; | 231 | break; |
232 | cfg &= ~FLITE_REG_CIODMAFMT_YCBCR_ORDER_MASK; | 232 | cfg &= ~FLITE_REG_CIODMAFMT_YCBCR_ORDER_MASK; |
diff --git a/drivers/media/platform/s5p-fimc/fimc-lite.c b/drivers/media/platform/s5p-fimc/fimc-lite.c index bfc4206935c8..bbc35de7db27 100644 --- a/drivers/media/platform/s5p-fimc/fimc-lite.c +++ b/drivers/media/platform/s5p-fimc/fimc-lite.c | |||
@@ -1408,6 +1408,7 @@ static const struct v4l2_ctrl_config fimc_lite_ctrl = { | |||
1408 | .id = V4L2_CTRL_CLASS_USER | 0x1001, | 1408 | .id = V4L2_CTRL_CLASS_USER | 0x1001, |
1409 | .type = V4L2_CTRL_TYPE_BOOLEAN, | 1409 | .type = V4L2_CTRL_TYPE_BOOLEAN, |
1410 | .name = "Test Pattern 640x480", | 1410 | .name = "Test Pattern 640x480", |
1411 | .step = 1, | ||
1411 | }; | 1412 | }; |
1412 | 1413 | ||
1413 | static int fimc_lite_create_capture_subdev(struct fimc_lite *fimc) | 1414 | static int fimc_lite_create_capture_subdev(struct fimc_lite *fimc) |
diff --git a/drivers/media/platform/s5p-fimc/fimc-mdevice.c b/drivers/media/platform/s5p-fimc/fimc-mdevice.c index a17fcb2d5d41..cd38d708ab58 100644 --- a/drivers/media/platform/s5p-fimc/fimc-mdevice.c +++ b/drivers/media/platform/s5p-fimc/fimc-mdevice.c | |||
@@ -827,7 +827,7 @@ static int fimc_md_link_notify(struct media_pad *source, | |||
827 | struct fimc_pipeline *pipeline; | 827 | struct fimc_pipeline *pipeline; |
828 | struct v4l2_subdev *sd; | 828 | struct v4l2_subdev *sd; |
829 | struct mutex *lock; | 829 | struct mutex *lock; |
830 | int ret = 0; | 830 | int i, ret = 0; |
831 | int ref_count; | 831 | int ref_count; |
832 | 832 | ||
833 | if (media_entity_type(sink->entity) != MEDIA_ENT_T_V4L2_SUBDEV) | 833 | if (media_entity_type(sink->entity) != MEDIA_ENT_T_V4L2_SUBDEV) |
@@ -854,29 +854,28 @@ static int fimc_md_link_notify(struct media_pad *source, | |||
854 | return 0; | 854 | return 0; |
855 | } | 855 | } |
856 | 856 | ||
857 | mutex_lock(lock); | ||
858 | ref_count = fimc ? fimc->vid_cap.refcnt : fimc_lite->ref_count; | ||
859 | |||
857 | if (!(flags & MEDIA_LNK_FL_ENABLED)) { | 860 | if (!(flags & MEDIA_LNK_FL_ENABLED)) { |
858 | int i; | 861 | if (ref_count > 0) { |
859 | mutex_lock(lock); | 862 | ret = __fimc_pipeline_close(pipeline); |
860 | ret = __fimc_pipeline_close(pipeline); | 863 | if (!ret && fimc) |
864 | fimc_ctrls_delete(fimc->vid_cap.ctx); | ||
865 | } | ||
861 | for (i = 0; i < IDX_MAX; i++) | 866 | for (i = 0; i < IDX_MAX; i++) |
862 | pipeline->subdevs[i] = NULL; | 867 | pipeline->subdevs[i] = NULL; |
863 | if (fimc) | 868 | } else if (ref_count > 0) { |
864 | fimc_ctrls_delete(fimc->vid_cap.ctx); | 869 | /* |
865 | mutex_unlock(lock); | 870 | * Link activation. Enable power of pipeline elements only if |
866 | return ret; | 871 | * the pipeline is already in use, i.e. its video node is open. |
872 | * Recreate the controls destroyed during the link deactivation. | ||
873 | */ | ||
874 | ret = __fimc_pipeline_open(pipeline, | ||
875 | source->entity, true); | ||
876 | if (!ret && fimc) | ||
877 | ret = fimc_capture_ctrls_create(fimc); | ||
867 | } | 878 | } |
868 | /* | ||
869 | * Link activation. Enable power of pipeline elements only if the | ||
870 | * pipeline is already in use, i.e. its video node is opened. | ||
871 | * Recreate the controls destroyed during the link deactivation. | ||
872 | */ | ||
873 | mutex_lock(lock); | ||
874 | |||
875 | ref_count = fimc ? fimc->vid_cap.refcnt : fimc_lite->ref_count; | ||
876 | if (ref_count > 0) | ||
877 | ret = __fimc_pipeline_open(pipeline, source->entity, true); | ||
878 | if (!ret && fimc) | ||
879 | ret = fimc_capture_ctrls_create(fimc); | ||
880 | 879 | ||
881 | mutex_unlock(lock); | 880 | mutex_unlock(lock); |
882 | return ret ? -EPIPE : ret; | 881 | return ret ? -EPIPE : ret; |
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c index e84703c314ce..1cb6d57987c6 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c | |||
@@ -276,7 +276,7 @@ static void s5p_mfc_handle_frame_new(struct s5p_mfc_ctx *ctx, unsigned int err) | |||
276 | unsigned int frame_type; | 276 | unsigned int frame_type; |
277 | 277 | ||
278 | dspl_y_addr = s5p_mfc_hw_call(dev->mfc_ops, get_dspl_y_adr, dev); | 278 | dspl_y_addr = s5p_mfc_hw_call(dev->mfc_ops, get_dspl_y_adr, dev); |
279 | frame_type = s5p_mfc_hw_call(dev->mfc_ops, get_dec_frame_type, dev); | 279 | frame_type = s5p_mfc_hw_call(dev->mfc_ops, get_disp_frame_type, ctx); |
280 | 280 | ||
281 | /* If frame is same as previous then skip and do not dequeue */ | 281 | /* If frame is same as previous then skip and do not dequeue */ |
282 | if (frame_type == S5P_FIMV_DECODE_FRAME_SKIPPED) { | 282 | if (frame_type == S5P_FIMV_DECODE_FRAME_SKIPPED) { |
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c index 2356fd52a169..4f6b553c4b2d 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c | |||
@@ -232,6 +232,7 @@ static struct mfc_control controls[] = { | |||
232 | .minimum = 0, | 232 | .minimum = 0, |
233 | .maximum = 1, | 233 | .maximum = 1, |
234 | .default_value = 0, | 234 | .default_value = 0, |
235 | .step = 1, | ||
235 | .menu_skip_mask = 0, | 236 | .menu_skip_mask = 0, |
236 | }, | 237 | }, |
237 | { | 238 | { |
diff --git a/drivers/media/radio/radio-ma901.c b/drivers/media/radio/radio-ma901.c index c61f590029ad..348dafc0318a 100644 --- a/drivers/media/radio/radio-ma901.c +++ b/drivers/media/radio/radio-ma901.c | |||
@@ -347,9 +347,20 @@ static void usb_ma901radio_release(struct v4l2_device *v4l2_dev) | |||
347 | static int usb_ma901radio_probe(struct usb_interface *intf, | 347 | static int usb_ma901radio_probe(struct usb_interface *intf, |
348 | const struct usb_device_id *id) | 348 | const struct usb_device_id *id) |
349 | { | 349 | { |
350 | struct usb_device *dev = interface_to_usbdev(intf); | ||
350 | struct ma901radio_device *radio; | 351 | struct ma901radio_device *radio; |
351 | int retval = 0; | 352 | int retval = 0; |
352 | 353 | ||
354 | /* Masterkit MA901 usb radio has the same USB ID as many others | ||
355 | * Atmel V-USB devices. Let's make additional checks to be sure | ||
356 | * that this is our device. | ||
357 | */ | ||
358 | |||
359 | if (dev->product && dev->manufacturer && | ||
360 | (strncmp(dev->product, "MA901", 5) != 0 | ||
361 | || strncmp(dev->manufacturer, "www.masterkit.ru", 16) != 0)) | ||
362 | return -ENODEV; | ||
363 | |||
353 | radio = kzalloc(sizeof(struct ma901radio_device), GFP_KERNEL); | 364 | radio = kzalloc(sizeof(struct ma901radio_device), GFP_KERNEL); |
354 | if (!radio) { | 365 | if (!radio) { |
355 | dev_err(&intf->dev, "kzalloc for ma901radio_device failed\n"); | 366 | dev_err(&intf->dev, "kzalloc for ma901radio_device failed\n"); |
diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig index 19f3563c61da..5a79c333d45e 100644 --- a/drivers/media/rc/Kconfig +++ b/drivers/media/rc/Kconfig | |||
@@ -291,7 +291,7 @@ config IR_TTUSBIR | |||
291 | 291 | ||
292 | config IR_RX51 | 292 | config IR_RX51 |
293 | tristate "Nokia N900 IR transmitter diode" | 293 | tristate "Nokia N900 IR transmitter diode" |
294 | depends on OMAP_DM_TIMER && LIRC && !ARCH_MULTIPLATFORM | 294 | depends on OMAP_DM_TIMER && ARCH_OMAP2PLUS && LIRC && !ARCH_MULTIPLATFORM |
295 | ---help--- | 295 | ---help--- |
296 | Say Y or M here if you want to enable support for the IR | 296 | Say Y or M here if you want to enable support for the IR |
297 | transmitter diode built in the Nokia N900 (RX51) device. | 297 | transmitter diode built in the Nokia N900 (RX51) device. |
diff --git a/drivers/media/v4l2-core/Makefile b/drivers/media/v4l2-core/Makefile index a9d355230e8e..768aaf62d5dc 100644 --- a/drivers/media/v4l2-core/Makefile +++ b/drivers/media/v4l2-core/Makefile | |||
@@ -10,7 +10,7 @@ ifeq ($(CONFIG_COMPAT),y) | |||
10 | videodev-objs += v4l2-compat-ioctl32.o | 10 | videodev-objs += v4l2-compat-ioctl32.o |
11 | endif | 11 | endif |
12 | 12 | ||
13 | obj-$(CONFIG_VIDEO_DEV) += videodev.o | 13 | obj-$(CONFIG_VIDEO_V4L2) += videodev.o |
14 | obj-$(CONFIG_VIDEO_V4L2_INT_DEVICE) += v4l2-int-device.o | 14 | obj-$(CONFIG_VIDEO_V4L2_INT_DEVICE) += v4l2-int-device.o |
15 | obj-$(CONFIG_VIDEO_V4L2) += v4l2-common.o | 15 | obj-$(CONFIG_VIDEO_V4L2) += v4l2-common.o |
16 | 16 | ||
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c index 45ea7185c003..642c6223fa6c 100644 --- a/drivers/misc/mei/hw-me.c +++ b/drivers/misc/mei/hw-me.c | |||
@@ -152,6 +152,20 @@ static void mei_me_intr_disable(struct mei_device *dev) | |||
152 | } | 152 | } |
153 | 153 | ||
154 | /** | 154 | /** |
155 | * mei_me_hw_reset_release - release device from the reset | ||
156 | * | ||
157 | * @dev: the device structure | ||
158 | */ | ||
159 | static void mei_me_hw_reset_release(struct mei_device *dev) | ||
160 | { | ||
161 | struct mei_me_hw *hw = to_me_hw(dev); | ||
162 | u32 hcsr = mei_hcsr_read(hw); | ||
163 | |||
164 | hcsr |= H_IG; | ||
165 | hcsr &= ~H_RST; | ||
166 | mei_hcsr_set(hw, hcsr); | ||
167 | } | ||
168 | /** | ||
155 | * mei_me_hw_reset - resets fw via mei csr register. | 169 | * mei_me_hw_reset - resets fw via mei csr register. |
156 | * | 170 | * |
157 | * @dev: the device structure | 171 | * @dev: the device structure |
@@ -169,18 +183,14 @@ static void mei_me_hw_reset(struct mei_device *dev, bool intr_enable) | |||
169 | if (intr_enable) | 183 | if (intr_enable) |
170 | hcsr |= H_IE; | 184 | hcsr |= H_IE; |
171 | else | 185 | else |
172 | hcsr &= ~H_IE; | 186 | hcsr |= ~H_IE; |
173 | |||
174 | mei_hcsr_set(hw, hcsr); | ||
175 | |||
176 | hcsr = mei_hcsr_read(hw) | H_IG; | ||
177 | hcsr &= ~H_RST; | ||
178 | 187 | ||
179 | mei_hcsr_set(hw, hcsr); | 188 | mei_hcsr_set(hw, hcsr); |
180 | 189 | ||
181 | hcsr = mei_hcsr_read(hw); | 190 | if (dev->dev_state == MEI_DEV_POWER_DOWN) |
191 | mei_me_hw_reset_release(dev); | ||
182 | 192 | ||
183 | dev_dbg(&dev->pdev->dev, "current HCSR = 0x%08x.\n", hcsr); | 193 | dev_dbg(&dev->pdev->dev, "current HCSR = 0x%08x.\n", mei_hcsr_read(hw)); |
184 | } | 194 | } |
185 | 195 | ||
186 | /** | 196 | /** |
@@ -466,7 +476,8 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id) | |||
466 | mutex_unlock(&dev->device_lock); | 476 | mutex_unlock(&dev->device_lock); |
467 | return IRQ_HANDLED; | 477 | return IRQ_HANDLED; |
468 | } else { | 478 | } else { |
469 | dev_dbg(&dev->pdev->dev, "FW not ready.\n"); | 479 | dev_dbg(&dev->pdev->dev, "Reset Completed.\n"); |
480 | mei_me_hw_reset_release(dev); | ||
470 | mutex_unlock(&dev->device_lock); | 481 | mutex_unlock(&dev->device_lock); |
471 | return IRQ_HANDLED; | 482 | return IRQ_HANDLED; |
472 | } | 483 | } |
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c index 6ec530168afb..356179991a2e 100644 --- a/drivers/misc/mei/init.c +++ b/drivers/misc/mei/init.c | |||
@@ -183,6 +183,24 @@ void mei_reset(struct mei_device *dev, int interrupts_enabled) | |||
183 | mei_cl_all_write_clear(dev); | 183 | mei_cl_all_write_clear(dev); |
184 | } | 184 | } |
185 | 185 | ||
186 | void mei_stop(struct mei_device *dev) | ||
187 | { | ||
188 | dev_dbg(&dev->pdev->dev, "stopping the device.\n"); | ||
189 | |||
190 | mutex_lock(&dev->device_lock); | ||
191 | |||
192 | cancel_delayed_work(&dev->timer_work); | ||
193 | |||
194 | mei_wd_stop(dev); | ||
195 | |||
196 | dev->dev_state = MEI_DEV_POWER_DOWN; | ||
197 | mei_reset(dev, 0); | ||
198 | |||
199 | mutex_unlock(&dev->device_lock); | ||
200 | |||
201 | flush_scheduled_work(); | ||
202 | } | ||
203 | |||
186 | 204 | ||
187 | 205 | ||
188 | 206 | ||
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h index cb80166161f0..97873812e33b 100644 --- a/drivers/misc/mei/mei_dev.h +++ b/drivers/misc/mei/mei_dev.h | |||
@@ -381,6 +381,7 @@ static inline unsigned long mei_secs_to_jiffies(unsigned long sec) | |||
381 | void mei_device_init(struct mei_device *dev); | 381 | void mei_device_init(struct mei_device *dev); |
382 | void mei_reset(struct mei_device *dev, int interrupts); | 382 | void mei_reset(struct mei_device *dev, int interrupts); |
383 | int mei_hw_init(struct mei_device *dev); | 383 | int mei_hw_init(struct mei_device *dev); |
384 | void mei_stop(struct mei_device *dev); | ||
384 | 385 | ||
385 | /* | 386 | /* |
386 | * MEI interrupt functions prototype | 387 | * MEI interrupt functions prototype |
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index b40ec0601ab0..b8b5c9c3ad03 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c | |||
@@ -247,44 +247,14 @@ static void mei_remove(struct pci_dev *pdev) | |||
247 | 247 | ||
248 | hw = to_me_hw(dev); | 248 | hw = to_me_hw(dev); |
249 | 249 | ||
250 | mutex_lock(&dev->device_lock); | ||
251 | |||
252 | cancel_delayed_work(&dev->timer_work); | ||
253 | 250 | ||
254 | mei_wd_stop(dev); | 251 | dev_err(&pdev->dev, "stop\n"); |
252 | mei_stop(dev); | ||
255 | 253 | ||
256 | mei_pdev = NULL; | 254 | mei_pdev = NULL; |
257 | 255 | ||
258 | if (dev->iamthif_cl.state == MEI_FILE_CONNECTED) { | ||
259 | dev->iamthif_cl.state = MEI_FILE_DISCONNECTING; | ||
260 | mei_cl_disconnect(&dev->iamthif_cl); | ||
261 | } | ||
262 | if (dev->wd_cl.state == MEI_FILE_CONNECTED) { | ||
263 | dev->wd_cl.state = MEI_FILE_DISCONNECTING; | ||
264 | mei_cl_disconnect(&dev->wd_cl); | ||
265 | } | ||
266 | |||
267 | /* Unregistering watchdog device */ | ||
268 | mei_watchdog_unregister(dev); | 256 | mei_watchdog_unregister(dev); |
269 | 257 | ||
270 | /* remove entry if already in list */ | ||
271 | dev_dbg(&pdev->dev, "list del iamthif and wd file list.\n"); | ||
272 | |||
273 | if (dev->open_handle_count > 0) | ||
274 | dev->open_handle_count--; | ||
275 | mei_cl_unlink(&dev->wd_cl); | ||
276 | |||
277 | if (dev->open_handle_count > 0) | ||
278 | dev->open_handle_count--; | ||
279 | mei_cl_unlink(&dev->iamthif_cl); | ||
280 | |||
281 | dev->iamthif_current_cb = NULL; | ||
282 | dev->me_clients_num = 0; | ||
283 | |||
284 | mutex_unlock(&dev->device_lock); | ||
285 | |||
286 | flush_scheduled_work(); | ||
287 | |||
288 | /* disable interrupts */ | 258 | /* disable interrupts */ |
289 | mei_disable_interrupts(dev); | 259 | mei_disable_interrupts(dev); |
290 | 260 | ||
@@ -308,28 +278,20 @@ static int mei_pci_suspend(struct device *device) | |||
308 | { | 278 | { |
309 | struct pci_dev *pdev = to_pci_dev(device); | 279 | struct pci_dev *pdev = to_pci_dev(device); |
310 | struct mei_device *dev = pci_get_drvdata(pdev); | 280 | struct mei_device *dev = pci_get_drvdata(pdev); |
311 | int err; | ||
312 | 281 | ||
313 | if (!dev) | 282 | if (!dev) |
314 | return -ENODEV; | 283 | return -ENODEV; |
315 | mutex_lock(&dev->device_lock); | ||
316 | 284 | ||
317 | cancel_delayed_work(&dev->timer_work); | 285 | dev_err(&pdev->dev, "suspend\n"); |
318 | 286 | ||
319 | /* Stop watchdog if exists */ | 287 | mei_stop(dev); |
320 | err = mei_wd_stop(dev); | 288 | |
321 | /* Set new mei state */ | 289 | mei_disable_interrupts(dev); |
322 | if (dev->dev_state == MEI_DEV_ENABLED || | ||
323 | dev->dev_state == MEI_DEV_RECOVERING_FROM_RESET) { | ||
324 | dev->dev_state = MEI_DEV_POWER_DOWN; | ||
325 | mei_reset(dev, 0); | ||
326 | } | ||
327 | mutex_unlock(&dev->device_lock); | ||
328 | 290 | ||
329 | free_irq(pdev->irq, dev); | 291 | free_irq(pdev->irq, dev); |
330 | pci_disable_msi(pdev); | 292 | pci_disable_msi(pdev); |
331 | 293 | ||
332 | return err; | 294 | return 0; |
333 | } | 295 | } |
334 | 296 | ||
335 | static int mei_pci_resume(struct device *device) | 297 | static int mei_pci_resume(struct device *device) |
diff --git a/drivers/misc/vmw_vmci/Kconfig b/drivers/misc/vmw_vmci/Kconfig index 39c2ecadb273..ea98f7e9ccd1 100644 --- a/drivers/misc/vmw_vmci/Kconfig +++ b/drivers/misc/vmw_vmci/Kconfig | |||
@@ -4,7 +4,7 @@ | |||
4 | 4 | ||
5 | config VMWARE_VMCI | 5 | config VMWARE_VMCI |
6 | tristate "VMware VMCI Driver" | 6 | tristate "VMware VMCI Driver" |
7 | depends on X86 && PCI | 7 | depends on X86 && PCI && NET |
8 | help | 8 | help |
9 | This is VMware's Virtual Machine Communication Interface. It enables | 9 | This is VMware's Virtual Machine Communication Interface. It enables |
10 | high-speed communication between host and guest in a virtual | 10 | high-speed communication between host and guest in a virtual |
diff --git a/drivers/misc/vmw_vmci/vmci_datagram.c b/drivers/misc/vmw_vmci/vmci_datagram.c index ed5c433cd493..f3cdd904fe4d 100644 --- a/drivers/misc/vmw_vmci/vmci_datagram.c +++ b/drivers/misc/vmw_vmci/vmci_datagram.c | |||
@@ -42,9 +42,11 @@ struct datagram_entry { | |||
42 | 42 | ||
43 | struct delayed_datagram_info { | 43 | struct delayed_datagram_info { |
44 | struct datagram_entry *entry; | 44 | struct datagram_entry *entry; |
45 | struct vmci_datagram msg; | ||
46 | struct work_struct work; | 45 | struct work_struct work; |
47 | bool in_dg_host_queue; | 46 | bool in_dg_host_queue; |
47 | /* msg and msg_payload must be together. */ | ||
48 | struct vmci_datagram msg; | ||
49 | u8 msg_payload[]; | ||
48 | }; | 50 | }; |
49 | 51 | ||
50 | /* Number of in-flight host->host datagrams */ | 52 | /* Number of in-flight host->host datagrams */ |
diff --git a/drivers/mtd/bcm47xxpart.c b/drivers/mtd/bcm47xxpart.c index 63feb75cc8e0..9279a9174f84 100644 --- a/drivers/mtd/bcm47xxpart.c +++ b/drivers/mtd/bcm47xxpart.c | |||
@@ -19,6 +19,12 @@ | |||
19 | /* 10 parts were found on sflash on Netgear WNDR4500 */ | 19 | /* 10 parts were found on sflash on Netgear WNDR4500 */ |
20 | #define BCM47XXPART_MAX_PARTS 12 | 20 | #define BCM47XXPART_MAX_PARTS 12 |
21 | 21 | ||
22 | /* | ||
23 | * Amount of bytes we read when analyzing each block of flash memory. | ||
24 | * Set it big enough to allow detecting partition and reading important data. | ||
25 | */ | ||
26 | #define BCM47XXPART_BYTES_TO_READ 0x404 | ||
27 | |||
22 | /* Magics */ | 28 | /* Magics */ |
23 | #define BOARD_DATA_MAGIC 0x5246504D /* MPFR */ | 29 | #define BOARD_DATA_MAGIC 0x5246504D /* MPFR */ |
24 | #define POT_MAGIC1 0x54544f50 /* POTT */ | 30 | #define POT_MAGIC1 0x54544f50 /* POTT */ |
@@ -57,17 +63,15 @@ static int bcm47xxpart_parse(struct mtd_info *master, | |||
57 | struct trx_header *trx; | 63 | struct trx_header *trx; |
58 | int trx_part = -1; | 64 | int trx_part = -1; |
59 | int last_trx_part = -1; | 65 | int last_trx_part = -1; |
60 | int max_bytes_to_read = 0x8004; | 66 | int possible_nvram_sizes[] = { 0x8000, 0xF000, 0x10000, }; |
61 | 67 | ||
62 | if (blocksize <= 0x10000) | 68 | if (blocksize <= 0x10000) |
63 | blocksize = 0x10000; | 69 | blocksize = 0x10000; |
64 | if (blocksize == 0x20000) | ||
65 | max_bytes_to_read = 0x18004; | ||
66 | 70 | ||
67 | /* Alloc */ | 71 | /* Alloc */ |
68 | parts = kzalloc(sizeof(struct mtd_partition) * BCM47XXPART_MAX_PARTS, | 72 | parts = kzalloc(sizeof(struct mtd_partition) * BCM47XXPART_MAX_PARTS, |
69 | GFP_KERNEL); | 73 | GFP_KERNEL); |
70 | buf = kzalloc(max_bytes_to_read, GFP_KERNEL); | 74 | buf = kzalloc(BCM47XXPART_BYTES_TO_READ, GFP_KERNEL); |
71 | 75 | ||
72 | /* Parse block by block looking for magics */ | 76 | /* Parse block by block looking for magics */ |
73 | for (offset = 0; offset <= master->size - blocksize; | 77 | for (offset = 0; offset <= master->size - blocksize; |
@@ -82,7 +86,7 @@ static int bcm47xxpart_parse(struct mtd_info *master, | |||
82 | } | 86 | } |
83 | 87 | ||
84 | /* Read beginning of the block */ | 88 | /* Read beginning of the block */ |
85 | if (mtd_read(master, offset, max_bytes_to_read, | 89 | if (mtd_read(master, offset, BCM47XXPART_BYTES_TO_READ, |
86 | &bytes_read, (uint8_t *)buf) < 0) { | 90 | &bytes_read, (uint8_t *)buf) < 0) { |
87 | pr_err("mtd_read error while parsing (offset: 0x%X)!\n", | 91 | pr_err("mtd_read error while parsing (offset: 0x%X)!\n", |
88 | offset); | 92 | offset); |
@@ -96,20 +100,6 @@ static int bcm47xxpart_parse(struct mtd_info *master, | |||
96 | continue; | 100 | continue; |
97 | } | 101 | } |
98 | 102 | ||
99 | /* Standard NVRAM */ | ||
100 | if (buf[0x000 / 4] == NVRAM_HEADER || | ||
101 | buf[0x1000 / 4] == NVRAM_HEADER || | ||
102 | buf[0x8000 / 4] == NVRAM_HEADER || | ||
103 | (blocksize == 0x20000 && ( | ||
104 | buf[0x10000 / 4] == NVRAM_HEADER || | ||
105 | buf[0x11000 / 4] == NVRAM_HEADER || | ||
106 | buf[0x18000 / 4] == NVRAM_HEADER))) { | ||
107 | bcm47xxpart_add_part(&parts[curr_part++], "nvram", | ||
108 | offset, 0); | ||
109 | offset = rounddown(offset, blocksize); | ||
110 | continue; | ||
111 | } | ||
112 | |||
113 | /* | 103 | /* |
114 | * board_data starts with board_id which differs across boards, | 104 | * board_data starts with board_id which differs across boards, |
115 | * but we can use 'MPFR' (hopefully) magic at 0x100 | 105 | * but we can use 'MPFR' (hopefully) magic at 0x100 |
@@ -178,6 +168,30 @@ static int bcm47xxpart_parse(struct mtd_info *master, | |||
178 | continue; | 168 | continue; |
179 | } | 169 | } |
180 | } | 170 | } |
171 | |||
172 | /* Look for NVRAM at the end of the last block. */ | ||
173 | for (i = 0; i < ARRAY_SIZE(possible_nvram_sizes); i++) { | ||
174 | if (curr_part > BCM47XXPART_MAX_PARTS) { | ||
175 | pr_warn("Reached maximum number of partitions, scanning stopped!\n"); | ||
176 | break; | ||
177 | } | ||
178 | |||
179 | offset = master->size - possible_nvram_sizes[i]; | ||
180 | if (mtd_read(master, offset, 0x4, &bytes_read, | ||
181 | (uint8_t *)buf) < 0) { | ||
182 | pr_err("mtd_read error while reading at offset 0x%X!\n", | ||
183 | offset); | ||
184 | continue; | ||
185 | } | ||
186 | |||
187 | /* Standard NVRAM */ | ||
188 | if (buf[0] == NVRAM_HEADER) { | ||
189 | bcm47xxpart_add_part(&parts[curr_part++], "nvram", | ||
190 | master->size - blocksize, 0); | ||
191 | break; | ||
192 | } | ||
193 | } | ||
194 | |||
181 | kfree(buf); | 195 | kfree(buf); |
182 | 196 | ||
183 | /* | 197 | /* |
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index 43214151b882..42c63927609d 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c | |||
@@ -1523,6 +1523,14 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from, | |||
1523 | oobreadlen -= toread; | 1523 | oobreadlen -= toread; |
1524 | } | 1524 | } |
1525 | } | 1525 | } |
1526 | |||
1527 | if (chip->options & NAND_NEED_READRDY) { | ||
1528 | /* Apply delay or wait for ready/busy pin */ | ||
1529 | if (!chip->dev_ready) | ||
1530 | udelay(chip->chip_delay); | ||
1531 | else | ||
1532 | nand_wait_ready(mtd); | ||
1533 | } | ||
1526 | } else { | 1534 | } else { |
1527 | memcpy(buf, chip->buffers->databuf + col, bytes); | 1535 | memcpy(buf, chip->buffers->databuf + col, bytes); |
1528 | buf += bytes; | 1536 | buf += bytes; |
@@ -1787,6 +1795,14 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from, | |||
1787 | len = min(len, readlen); | 1795 | len = min(len, readlen); |
1788 | buf = nand_transfer_oob(chip, buf, ops, len); | 1796 | buf = nand_transfer_oob(chip, buf, ops, len); |
1789 | 1797 | ||
1798 | if (chip->options & NAND_NEED_READRDY) { | ||
1799 | /* Apply delay or wait for ready/busy pin */ | ||
1800 | if (!chip->dev_ready) | ||
1801 | udelay(chip->chip_delay); | ||
1802 | else | ||
1803 | nand_wait_ready(mtd); | ||
1804 | } | ||
1805 | |||
1790 | readlen -= len; | 1806 | readlen -= len; |
1791 | if (!readlen) | 1807 | if (!readlen) |
1792 | break; | 1808 | break; |
diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c index e3aa2748a6e7..9c612388e5de 100644 --- a/drivers/mtd/nand/nand_ids.c +++ b/drivers/mtd/nand/nand_ids.c | |||
@@ -22,49 +22,51 @@ | |||
22 | * 512 512 Byte page size | 22 | * 512 512 Byte page size |
23 | */ | 23 | */ |
24 | struct nand_flash_dev nand_flash_ids[] = { | 24 | struct nand_flash_dev nand_flash_ids[] = { |
25 | #define SP_OPTIONS NAND_NEED_READRDY | ||
26 | #define SP_OPTIONS16 (SP_OPTIONS | NAND_BUSWIDTH_16) | ||
25 | 27 | ||
26 | #ifdef CONFIG_MTD_NAND_MUSEUM_IDS | 28 | #ifdef CONFIG_MTD_NAND_MUSEUM_IDS |
27 | {"NAND 1MiB 5V 8-bit", 0x6e, 256, 1, 0x1000, 0}, | 29 | {"NAND 1MiB 5V 8-bit", 0x6e, 256, 1, 0x1000, SP_OPTIONS}, |
28 | {"NAND 2MiB 5V 8-bit", 0x64, 256, 2, 0x1000, 0}, | 30 | {"NAND 2MiB 5V 8-bit", 0x64, 256, 2, 0x1000, SP_OPTIONS}, |
29 | {"NAND 4MiB 5V 8-bit", 0x6b, 512, 4, 0x2000, 0}, | 31 | {"NAND 4MiB 5V 8-bit", 0x6b, 512, 4, 0x2000, SP_OPTIONS}, |
30 | {"NAND 1MiB 3,3V 8-bit", 0xe8, 256, 1, 0x1000, 0}, | 32 | {"NAND 1MiB 3,3V 8-bit", 0xe8, 256, 1, 0x1000, SP_OPTIONS}, |
31 | {"NAND 1MiB 3,3V 8-bit", 0xec, 256, 1, 0x1000, 0}, | 33 | {"NAND 1MiB 3,3V 8-bit", 0xec, 256, 1, 0x1000, SP_OPTIONS}, |
32 | {"NAND 2MiB 3,3V 8-bit", 0xea, 256, 2, 0x1000, 0}, | 34 | {"NAND 2MiB 3,3V 8-bit", 0xea, 256, 2, 0x1000, SP_OPTIONS}, |
33 | {"NAND 4MiB 3,3V 8-bit", 0xd5, 512, 4, 0x2000, 0}, | 35 | {"NAND 4MiB 3,3V 8-bit", 0xd5, 512, 4, 0x2000, SP_OPTIONS}, |
34 | {"NAND 4MiB 3,3V 8-bit", 0xe3, 512, 4, 0x2000, 0}, | 36 | {"NAND 4MiB 3,3V 8-bit", 0xe3, 512, 4, 0x2000, SP_OPTIONS}, |
35 | {"NAND 4MiB 3,3V 8-bit", 0xe5, 512, 4, 0x2000, 0}, | 37 | {"NAND 4MiB 3,3V 8-bit", 0xe5, 512, 4, 0x2000, SP_OPTIONS}, |
36 | {"NAND 8MiB 3,3V 8-bit", 0xd6, 512, 8, 0x2000, 0}, | 38 | {"NAND 8MiB 3,3V 8-bit", 0xd6, 512, 8, 0x2000, SP_OPTIONS}, |
37 | 39 | ||
38 | {"NAND 8MiB 1,8V 8-bit", 0x39, 512, 8, 0x2000, 0}, | 40 | {"NAND 8MiB 1,8V 8-bit", 0x39, 512, 8, 0x2000, SP_OPTIONS}, |
39 | {"NAND 8MiB 3,3V 8-bit", 0xe6, 512, 8, 0x2000, 0}, | 41 | {"NAND 8MiB 3,3V 8-bit", 0xe6, 512, 8, 0x2000, SP_OPTIONS}, |
40 | {"NAND 8MiB 1,8V 16-bit", 0x49, 512, 8, 0x2000, NAND_BUSWIDTH_16}, | 42 | {"NAND 8MiB 1,8V 16-bit", 0x49, 512, 8, 0x2000, SP_OPTIONS16}, |
41 | {"NAND 8MiB 3,3V 16-bit", 0x59, 512, 8, 0x2000, NAND_BUSWIDTH_16}, | 43 | {"NAND 8MiB 3,3V 16-bit", 0x59, 512, 8, 0x2000, SP_OPTIONS16}, |
42 | #endif | 44 | #endif |
43 | 45 | ||
44 | {"NAND 16MiB 1,8V 8-bit", 0x33, 512, 16, 0x4000, 0}, | 46 | {"NAND 16MiB 1,8V 8-bit", 0x33, 512, 16, 0x4000, SP_OPTIONS}, |
45 | {"NAND 16MiB 3,3V 8-bit", 0x73, 512, 16, 0x4000, 0}, | 47 | {"NAND 16MiB 3,3V 8-bit", 0x73, 512, 16, 0x4000, SP_OPTIONS}, |
46 | {"NAND 16MiB 1,8V 16-bit", 0x43, 512, 16, 0x4000, NAND_BUSWIDTH_16}, | 48 | {"NAND 16MiB 1,8V 16-bit", 0x43, 512, 16, 0x4000, SP_OPTIONS16}, |
47 | {"NAND 16MiB 3,3V 16-bit", 0x53, 512, 16, 0x4000, NAND_BUSWIDTH_16}, | 49 | {"NAND 16MiB 3,3V 16-bit", 0x53, 512, 16, 0x4000, SP_OPTIONS16}, |
48 | 50 | ||
49 | {"NAND 32MiB 1,8V 8-bit", 0x35, 512, 32, 0x4000, 0}, | 51 | {"NAND 32MiB 1,8V 8-bit", 0x35, 512, 32, 0x4000, SP_OPTIONS}, |
50 | {"NAND 32MiB 3,3V 8-bit", 0x75, 512, 32, 0x4000, 0}, | 52 | {"NAND 32MiB 3,3V 8-bit", 0x75, 512, 32, 0x4000, SP_OPTIONS}, |
51 | {"NAND 32MiB 1,8V 16-bit", 0x45, 512, 32, 0x4000, NAND_BUSWIDTH_16}, | 53 | {"NAND 32MiB 1,8V 16-bit", 0x45, 512, 32, 0x4000, SP_OPTIONS16}, |
52 | {"NAND 32MiB 3,3V 16-bit", 0x55, 512, 32, 0x4000, NAND_BUSWIDTH_16}, | 54 | {"NAND 32MiB 3,3V 16-bit", 0x55, 512, 32, 0x4000, SP_OPTIONS16}, |
53 | 55 | ||
54 | {"NAND 64MiB 1,8V 8-bit", 0x36, 512, 64, 0x4000, 0}, | 56 | {"NAND 64MiB 1,8V 8-bit", 0x36, 512, 64, 0x4000, SP_OPTIONS}, |
55 | {"NAND 64MiB 3,3V 8-bit", 0x76, 512, 64, 0x4000, 0}, | 57 | {"NAND 64MiB 3,3V 8-bit", 0x76, 512, 64, 0x4000, SP_OPTIONS}, |
56 | {"NAND 64MiB 1,8V 16-bit", 0x46, 512, 64, 0x4000, NAND_BUSWIDTH_16}, | 58 | {"NAND 64MiB 1,8V 16-bit", 0x46, 512, 64, 0x4000, SP_OPTIONS16}, |
57 | {"NAND 64MiB 3,3V 16-bit", 0x56, 512, 64, 0x4000, NAND_BUSWIDTH_16}, | 59 | {"NAND 64MiB 3,3V 16-bit", 0x56, 512, 64, 0x4000, SP_OPTIONS16}, |
58 | 60 | ||
59 | {"NAND 128MiB 1,8V 8-bit", 0x78, 512, 128, 0x4000, 0}, | 61 | {"NAND 128MiB 1,8V 8-bit", 0x78, 512, 128, 0x4000, SP_OPTIONS}, |
60 | {"NAND 128MiB 1,8V 8-bit", 0x39, 512, 128, 0x4000, 0}, | 62 | {"NAND 128MiB 1,8V 8-bit", 0x39, 512, 128, 0x4000, SP_OPTIONS}, |
61 | {"NAND 128MiB 3,3V 8-bit", 0x79, 512, 128, 0x4000, 0}, | 63 | {"NAND 128MiB 3,3V 8-bit", 0x79, 512, 128, 0x4000, SP_OPTIONS}, |
62 | {"NAND 128MiB 1,8V 16-bit", 0x72, 512, 128, 0x4000, NAND_BUSWIDTH_16}, | 64 | {"NAND 128MiB 1,8V 16-bit", 0x72, 512, 128, 0x4000, SP_OPTIONS16}, |
63 | {"NAND 128MiB 1,8V 16-bit", 0x49, 512, 128, 0x4000, NAND_BUSWIDTH_16}, | 65 | {"NAND 128MiB 1,8V 16-bit", 0x49, 512, 128, 0x4000, SP_OPTIONS16}, |
64 | {"NAND 128MiB 3,3V 16-bit", 0x74, 512, 128, 0x4000, NAND_BUSWIDTH_16}, | 66 | {"NAND 128MiB 3,3V 16-bit", 0x74, 512, 128, 0x4000, SP_OPTIONS16}, |
65 | {"NAND 128MiB 3,3V 16-bit", 0x59, 512, 128, 0x4000, NAND_BUSWIDTH_16}, | 67 | {"NAND 128MiB 3,3V 16-bit", 0x59, 512, 128, 0x4000, SP_OPTIONS16}, |
66 | 68 | ||
67 | {"NAND 256MiB 3,3V 8-bit", 0x71, 512, 256, 0x4000, 0}, | 69 | {"NAND 256MiB 3,3V 8-bit", 0x71, 512, 256, 0x4000, SP_OPTIONS}, |
68 | 70 | ||
69 | /* | 71 | /* |
70 | * These are the new chips with large page size. The pagesize and the | 72 | * These are the new chips with large page size. The pagesize and the |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 8b4e96e01d6c..07401a3e256b 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -1746,6 +1746,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1746 | 1746 | ||
1747 | bond_compute_features(bond); | 1747 | bond_compute_features(bond); |
1748 | 1748 | ||
1749 | bond_update_speed_duplex(new_slave); | ||
1750 | |||
1749 | read_lock(&bond->lock); | 1751 | read_lock(&bond->lock); |
1750 | 1752 | ||
1751 | new_slave->last_arp_rx = jiffies - | 1753 | new_slave->last_arp_rx = jiffies - |
@@ -1798,8 +1800,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1798 | new_slave->link == BOND_LINK_DOWN ? "DOWN" : | 1800 | new_slave->link == BOND_LINK_DOWN ? "DOWN" : |
1799 | (new_slave->link == BOND_LINK_UP ? "UP" : "BACK")); | 1801 | (new_slave->link == BOND_LINK_UP ? "UP" : "BACK")); |
1800 | 1802 | ||
1801 | bond_update_speed_duplex(new_slave); | ||
1802 | |||
1803 | if (USES_PRIMARY(bond->params.mode) && bond->params.primary[0]) { | 1803 | if (USES_PRIMARY(bond->params.mode) && bond->params.primary[0]) { |
1804 | /* if there is a primary slave, remember it */ | 1804 | /* if there is a primary slave, remember it */ |
1805 | if (strcmp(bond->params.primary, new_slave->dev->name) == 0) { | 1805 | if (strcmp(bond->params.primary, new_slave->dev->name) == 0) { |
@@ -1976,12 +1976,11 @@ static int __bond_release_one(struct net_device *bond_dev, | |||
1976 | return -EINVAL; | 1976 | return -EINVAL; |
1977 | } | 1977 | } |
1978 | 1978 | ||
1979 | write_unlock_bh(&bond->lock); | ||
1979 | /* unregister rx_handler early so bond_handle_frame wouldn't be called | 1980 | /* unregister rx_handler early so bond_handle_frame wouldn't be called |
1980 | * for this slave anymore. | 1981 | * for this slave anymore. |
1981 | */ | 1982 | */ |
1982 | netdev_rx_handler_unregister(slave_dev); | 1983 | netdev_rx_handler_unregister(slave_dev); |
1983 | write_unlock_bh(&bond->lock); | ||
1984 | synchronize_net(); | ||
1985 | write_lock_bh(&bond->lock); | 1984 | write_lock_bh(&bond->lock); |
1986 | 1985 | ||
1987 | if (!all && !bond->params.fail_over_mac) { | 1986 | if (!all && !bond->params.fail_over_mac) { |
@@ -2374,8 +2373,6 @@ static void bond_miimon_commit(struct bonding *bond) | |||
2374 | bond_set_backup_slave(slave); | 2373 | bond_set_backup_slave(slave); |
2375 | } | 2374 | } |
2376 | 2375 | ||
2377 | bond_update_speed_duplex(slave); | ||
2378 | |||
2379 | pr_info("%s: link status definitely up for interface %s, %u Mbps %s duplex.\n", | 2376 | pr_info("%s: link status definitely up for interface %s, %u Mbps %s duplex.\n", |
2380 | bond->dev->name, slave->dev->name, | 2377 | bond->dev->name, slave->dev->name, |
2381 | slave->speed, slave->duplex ? "full" : "half"); | 2378 | slave->speed, slave->duplex ? "full" : "half"); |
@@ -4849,9 +4846,18 @@ static int __net_init bond_net_init(struct net *net) | |||
4849 | static void __net_exit bond_net_exit(struct net *net) | 4846 | static void __net_exit bond_net_exit(struct net *net) |
4850 | { | 4847 | { |
4851 | struct bond_net *bn = net_generic(net, bond_net_id); | 4848 | struct bond_net *bn = net_generic(net, bond_net_id); |
4849 | struct bonding *bond, *tmp_bond; | ||
4850 | LIST_HEAD(list); | ||
4852 | 4851 | ||
4853 | bond_destroy_sysfs(bn); | 4852 | bond_destroy_sysfs(bn); |
4854 | bond_destroy_proc_dir(bn); | 4853 | bond_destroy_proc_dir(bn); |
4854 | |||
4855 | /* Kill off any bonds created after unregistering bond rtnl ops */ | ||
4856 | rtnl_lock(); | ||
4857 | list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list) | ||
4858 | unregister_netdevice_queue(bond->dev, &list); | ||
4859 | unregister_netdevice_many(&list); | ||
4860 | rtnl_unlock(); | ||
4855 | } | 4861 | } |
4856 | 4862 | ||
4857 | static struct pernet_operations bond_net_ops = { | 4863 | static struct pernet_operations bond_net_ops = { |
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index 1c9e09fbdff8..ea7a388f4843 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c | |||
@@ -183,6 +183,11 @@ int bond_create_slave_symlinks(struct net_device *master, | |||
183 | sprintf(linkname, "slave_%s", slave->name); | 183 | sprintf(linkname, "slave_%s", slave->name); |
184 | ret = sysfs_create_link(&(master->dev.kobj), &(slave->dev.kobj), | 184 | ret = sysfs_create_link(&(master->dev.kobj), &(slave->dev.kobj), |
185 | linkname); | 185 | linkname); |
186 | |||
187 | /* free the master link created earlier in case of error */ | ||
188 | if (ret) | ||
189 | sysfs_remove_link(&(slave->dev.kobj), "master"); | ||
190 | |||
186 | return ret; | 191 | return ret; |
187 | 192 | ||
188 | } | 193 | } |
@@ -522,7 +527,7 @@ static ssize_t bonding_store_arp_interval(struct device *d, | |||
522 | goto out; | 527 | goto out; |
523 | } | 528 | } |
524 | if (new_value < 0) { | 529 | if (new_value < 0) { |
525 | pr_err("%s: Invalid arp_interval value %d not in range 1-%d; rejected.\n", | 530 | pr_err("%s: Invalid arp_interval value %d not in range 0-%d; rejected.\n", |
526 | bond->dev->name, new_value, INT_MAX); | 531 | bond->dev->name, new_value, INT_MAX); |
527 | ret = -EINVAL; | 532 | ret = -EINVAL; |
528 | goto out; | 533 | goto out; |
@@ -537,14 +542,15 @@ static ssize_t bonding_store_arp_interval(struct device *d, | |||
537 | pr_info("%s: Setting ARP monitoring interval to %d.\n", | 542 | pr_info("%s: Setting ARP monitoring interval to %d.\n", |
538 | bond->dev->name, new_value); | 543 | bond->dev->name, new_value); |
539 | bond->params.arp_interval = new_value; | 544 | bond->params.arp_interval = new_value; |
540 | if (bond->params.miimon) { | 545 | if (new_value) { |
541 | pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n", | 546 | if (bond->params.miimon) { |
542 | bond->dev->name, bond->dev->name); | 547 | pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n", |
543 | bond->params.miimon = 0; | 548 | bond->dev->name, bond->dev->name); |
544 | } | 549 | bond->params.miimon = 0; |
545 | if (!bond->params.arp_targets[0]) { | 550 | } |
546 | pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified.\n", | 551 | if (!bond->params.arp_targets[0]) |
547 | bond->dev->name); | 552 | pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified.\n", |
553 | bond->dev->name); | ||
548 | } | 554 | } |
549 | if (bond->dev->flags & IFF_UP) { | 555 | if (bond->dev->flags & IFF_UP) { |
550 | /* If the interface is up, we may need to fire off | 556 | /* If the interface is up, we may need to fire off |
@@ -552,10 +558,13 @@ static ssize_t bonding_store_arp_interval(struct device *d, | |||
552 | * timer will get fired off when the open function | 558 | * timer will get fired off when the open function |
553 | * is called. | 559 | * is called. |
554 | */ | 560 | */ |
555 | cancel_delayed_work_sync(&bond->mii_work); | 561 | if (!new_value) { |
556 | queue_delayed_work(bond->wq, &bond->arp_work, 0); | 562 | cancel_delayed_work_sync(&bond->arp_work); |
563 | } else { | ||
564 | cancel_delayed_work_sync(&bond->mii_work); | ||
565 | queue_delayed_work(bond->wq, &bond->arp_work, 0); | ||
566 | } | ||
557 | } | 567 | } |
558 | |||
559 | out: | 568 | out: |
560 | rtnl_unlock(); | 569 | rtnl_unlock(); |
561 | return ret; | 570 | return ret; |
@@ -697,7 +706,7 @@ static ssize_t bonding_store_downdelay(struct device *d, | |||
697 | } | 706 | } |
698 | if (new_value < 0) { | 707 | if (new_value < 0) { |
699 | pr_err("%s: Invalid down delay value %d not in range %d-%d; rejected.\n", | 708 | pr_err("%s: Invalid down delay value %d not in range %d-%d; rejected.\n", |
700 | bond->dev->name, new_value, 1, INT_MAX); | 709 | bond->dev->name, new_value, 0, INT_MAX); |
701 | ret = -EINVAL; | 710 | ret = -EINVAL; |
702 | goto out; | 711 | goto out; |
703 | } else { | 712 | } else { |
@@ -752,8 +761,8 @@ static ssize_t bonding_store_updelay(struct device *d, | |||
752 | goto out; | 761 | goto out; |
753 | } | 762 | } |
754 | if (new_value < 0) { | 763 | if (new_value < 0) { |
755 | pr_err("%s: Invalid down delay value %d not in range %d-%d; rejected.\n", | 764 | pr_err("%s: Invalid up delay value %d not in range %d-%d; rejected.\n", |
756 | bond->dev->name, new_value, 1, INT_MAX); | 765 | bond->dev->name, new_value, 0, INT_MAX); |
757 | ret = -EINVAL; | 766 | ret = -EINVAL; |
758 | goto out; | 767 | goto out; |
759 | } else { | 768 | } else { |
@@ -963,37 +972,37 @@ static ssize_t bonding_store_miimon(struct device *d, | |||
963 | } | 972 | } |
964 | if (new_value < 0) { | 973 | if (new_value < 0) { |
965 | pr_err("%s: Invalid miimon value %d not in range %d-%d; rejected.\n", | 974 | pr_err("%s: Invalid miimon value %d not in range %d-%d; rejected.\n", |
966 | bond->dev->name, new_value, 1, INT_MAX); | 975 | bond->dev->name, new_value, 0, INT_MAX); |
967 | ret = -EINVAL; | 976 | ret = -EINVAL; |
968 | goto out; | 977 | goto out; |
969 | } else { | 978 | } |
970 | pr_info("%s: Setting MII monitoring interval to %d.\n", | 979 | pr_info("%s: Setting MII monitoring interval to %d.\n", |
971 | bond->dev->name, new_value); | 980 | bond->dev->name, new_value); |
972 | bond->params.miimon = new_value; | 981 | bond->params.miimon = new_value; |
973 | if (bond->params.updelay) | 982 | if (bond->params.updelay) |
974 | pr_info("%s: Note: Updating updelay (to %d) since it is a multiple of the miimon value.\n", | 983 | pr_info("%s: Note: Updating updelay (to %d) since it is a multiple of the miimon value.\n", |
975 | bond->dev->name, | 984 | bond->dev->name, |
976 | bond->params.updelay * bond->params.miimon); | 985 | bond->params.updelay * bond->params.miimon); |
977 | if (bond->params.downdelay) | 986 | if (bond->params.downdelay) |
978 | pr_info("%s: Note: Updating downdelay (to %d) since it is a multiple of the miimon value.\n", | 987 | pr_info("%s: Note: Updating downdelay (to %d) since it is a multiple of the miimon value.\n", |
979 | bond->dev->name, | 988 | bond->dev->name, |
980 | bond->params.downdelay * bond->params.miimon); | 989 | bond->params.downdelay * bond->params.miimon); |
981 | if (bond->params.arp_interval) { | 990 | if (new_value && bond->params.arp_interval) { |
982 | pr_info("%s: MII monitoring cannot be used with ARP monitoring. Disabling ARP monitoring...\n", | 991 | pr_info("%s: MII monitoring cannot be used with ARP monitoring. Disabling ARP monitoring...\n", |
983 | bond->dev->name); | 992 | bond->dev->name); |
984 | bond->params.arp_interval = 0; | 993 | bond->params.arp_interval = 0; |
985 | if (bond->params.arp_validate) { | 994 | if (bond->params.arp_validate) |
986 | bond->params.arp_validate = | 995 | bond->params.arp_validate = BOND_ARP_VALIDATE_NONE; |
987 | BOND_ARP_VALIDATE_NONE; | 996 | } |
988 | } | 997 | if (bond->dev->flags & IFF_UP) { |
989 | } | 998 | /* If the interface is up, we may need to fire off |
990 | 999 | * the MII timer. If the interface is down, the | |
991 | if (bond->dev->flags & IFF_UP) { | 1000 | * timer will get fired off when the open function |
992 | /* If the interface is up, we may need to fire off | 1001 | * is called. |
993 | * the MII timer. If the interface is down, the | 1002 | */ |
994 | * timer will get fired off when the open function | 1003 | if (!new_value) { |
995 | * is called. | 1004 | cancel_delayed_work_sync(&bond->mii_work); |
996 | */ | 1005 | } else { |
997 | cancel_delayed_work_sync(&bond->arp_work); | 1006 | cancel_delayed_work_sync(&bond->arp_work); |
998 | queue_delayed_work(bond->wq, &bond->mii_work, 0); | 1007 | queue_delayed_work(bond->wq, &bond->mii_work, 0); |
999 | } | 1008 | } |
diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig index b39ca5b3ea7f..ff2ba86cd4a4 100644 --- a/drivers/net/can/sja1000/Kconfig +++ b/drivers/net/can/sja1000/Kconfig | |||
@@ -46,6 +46,7 @@ config CAN_EMS_PCI | |||
46 | config CAN_PEAK_PCMCIA | 46 | config CAN_PEAK_PCMCIA |
47 | tristate "PEAK PCAN-PC Card" | 47 | tristate "PEAK PCAN-PC Card" |
48 | depends on PCMCIA | 48 | depends on PCMCIA |
49 | depends on HAS_IOPORT | ||
49 | ---help--- | 50 | ---help--- |
50 | This driver is for the PCAN-PC Card PCMCIA adapter (1 or 2 channels) | 51 | This driver is for the PCAN-PC Card PCMCIA adapter (1 or 2 channels) |
51 | from PEAK-System (http://www.peak-system.com). To compile this | 52 | from PEAK-System (http://www.peak-system.com). To compile this |
diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c index a042cdc260dc..3c18d7d000ed 100644 --- a/drivers/net/can/sja1000/plx_pci.c +++ b/drivers/net/can/sja1000/plx_pci.c | |||
@@ -348,7 +348,7 @@ static inline int plx_pci_check_sja1000(const struct sja1000_priv *priv) | |||
348 | */ | 348 | */ |
349 | if ((priv->read_reg(priv, REG_CR) & REG_CR_BASICCAN_INITIAL_MASK) == | 349 | if ((priv->read_reg(priv, REG_CR) & REG_CR_BASICCAN_INITIAL_MASK) == |
350 | REG_CR_BASICCAN_INITIAL && | 350 | REG_CR_BASICCAN_INITIAL && |
351 | (priv->read_reg(priv, REG_SR) == REG_SR_BASICCAN_INITIAL) && | 351 | (priv->read_reg(priv, SJA1000_REG_SR) == REG_SR_BASICCAN_INITIAL) && |
352 | (priv->read_reg(priv, REG_IR) == REG_IR_BASICCAN_INITIAL)) | 352 | (priv->read_reg(priv, REG_IR) == REG_IR_BASICCAN_INITIAL)) |
353 | flag = 1; | 353 | flag = 1; |
354 | 354 | ||
@@ -360,7 +360,7 @@ static inline int plx_pci_check_sja1000(const struct sja1000_priv *priv) | |||
360 | * See states on p. 23 of the Datasheet. | 360 | * See states on p. 23 of the Datasheet. |
361 | */ | 361 | */ |
362 | if (priv->read_reg(priv, REG_MOD) == REG_MOD_PELICAN_INITIAL && | 362 | if (priv->read_reg(priv, REG_MOD) == REG_MOD_PELICAN_INITIAL && |
363 | priv->read_reg(priv, REG_SR) == REG_SR_PELICAN_INITIAL && | 363 | priv->read_reg(priv, SJA1000_REG_SR) == REG_SR_PELICAN_INITIAL && |
364 | priv->read_reg(priv, REG_IR) == REG_IR_PELICAN_INITIAL) | 364 | priv->read_reg(priv, REG_IR) == REG_IR_PELICAN_INITIAL) |
365 | return flag; | 365 | return flag; |
366 | 366 | ||
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c index daf4013a8fc7..e4df307eaa90 100644 --- a/drivers/net/can/sja1000/sja1000.c +++ b/drivers/net/can/sja1000/sja1000.c | |||
@@ -92,7 +92,7 @@ static void sja1000_write_cmdreg(struct sja1000_priv *priv, u8 val) | |||
92 | */ | 92 | */ |
93 | spin_lock_irqsave(&priv->cmdreg_lock, flags); | 93 | spin_lock_irqsave(&priv->cmdreg_lock, flags); |
94 | priv->write_reg(priv, REG_CMR, val); | 94 | priv->write_reg(priv, REG_CMR, val); |
95 | priv->read_reg(priv, REG_SR); | 95 | priv->read_reg(priv, SJA1000_REG_SR); |
96 | spin_unlock_irqrestore(&priv->cmdreg_lock, flags); | 96 | spin_unlock_irqrestore(&priv->cmdreg_lock, flags); |
97 | } | 97 | } |
98 | 98 | ||
@@ -502,7 +502,7 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id) | |||
502 | 502 | ||
503 | while ((isrc = priv->read_reg(priv, REG_IR)) && (n < SJA1000_MAX_IRQ)) { | 503 | while ((isrc = priv->read_reg(priv, REG_IR)) && (n < SJA1000_MAX_IRQ)) { |
504 | n++; | 504 | n++; |
505 | status = priv->read_reg(priv, REG_SR); | 505 | status = priv->read_reg(priv, SJA1000_REG_SR); |
506 | /* check for absent controller due to hw unplug */ | 506 | /* check for absent controller due to hw unplug */ |
507 | if (status == 0xFF && sja1000_is_absent(priv)) | 507 | if (status == 0xFF && sja1000_is_absent(priv)) |
508 | return IRQ_NONE; | 508 | return IRQ_NONE; |
@@ -530,7 +530,7 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id) | |||
530 | /* receive interrupt */ | 530 | /* receive interrupt */ |
531 | while (status & SR_RBS) { | 531 | while (status & SR_RBS) { |
532 | sja1000_rx(dev); | 532 | sja1000_rx(dev); |
533 | status = priv->read_reg(priv, REG_SR); | 533 | status = priv->read_reg(priv, SJA1000_REG_SR); |
534 | /* check for absent controller */ | 534 | /* check for absent controller */ |
535 | if (status == 0xFF && sja1000_is_absent(priv)) | 535 | if (status == 0xFF && sja1000_is_absent(priv)) |
536 | return IRQ_NONE; | 536 | return IRQ_NONE; |
diff --git a/drivers/net/can/sja1000/sja1000.h b/drivers/net/can/sja1000/sja1000.h index afa99847a510..aa48e053da27 100644 --- a/drivers/net/can/sja1000/sja1000.h +++ b/drivers/net/can/sja1000/sja1000.h | |||
@@ -56,7 +56,7 @@ | |||
56 | /* SJA1000 registers - manual section 6.4 (Pelican Mode) */ | 56 | /* SJA1000 registers - manual section 6.4 (Pelican Mode) */ |
57 | #define REG_MOD 0x00 | 57 | #define REG_MOD 0x00 |
58 | #define REG_CMR 0x01 | 58 | #define REG_CMR 0x01 |
59 | #define REG_SR 0x02 | 59 | #define SJA1000_REG_SR 0x02 |
60 | #define REG_IR 0x03 | 60 | #define REG_IR 0x03 |
61 | #define REG_IER 0x04 | 61 | #define REG_IER 0x04 |
62 | #define REG_ALC 0x0B | 62 | #define REG_ALC 0x0B |
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e.h b/drivers/net/ethernet/atheros/atl1e/atl1e.h index 829b5ad71d0d..b5fd934585e9 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e.h +++ b/drivers/net/ethernet/atheros/atl1e/atl1e.h | |||
@@ -186,7 +186,7 @@ struct atl1e_tpd_desc { | |||
186 | /* how about 0x2000 */ | 186 | /* how about 0x2000 */ |
187 | #define MAX_TX_BUF_LEN 0x2000 | 187 | #define MAX_TX_BUF_LEN 0x2000 |
188 | #define MAX_TX_BUF_SHIFT 13 | 188 | #define MAX_TX_BUF_SHIFT 13 |
189 | /*#define MAX_TX_BUF_LEN 0x3000 */ | 189 | #define MAX_TSO_SEG_SIZE 0x3c00 |
190 | 190 | ||
191 | /* rrs word 1 bit 0:31 */ | 191 | /* rrs word 1 bit 0:31 */ |
192 | #define RRS_RX_CSUM_MASK 0xFFFF | 192 | #define RRS_RX_CSUM_MASK 0xFFFF |
@@ -438,7 +438,6 @@ struct atl1e_adapter { | |||
438 | struct atl1e_hw hw; | 438 | struct atl1e_hw hw; |
439 | struct atl1e_hw_stats hw_stats; | 439 | struct atl1e_hw_stats hw_stats; |
440 | 440 | ||
441 | bool have_msi; | ||
442 | u32 wol; | 441 | u32 wol; |
443 | u16 link_speed; | 442 | u16 link_speed; |
444 | u16 link_duplex; | 443 | u16 link_duplex; |
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c index 92f4734f860d..ac25f05ff68f 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c | |||
@@ -1849,34 +1849,19 @@ static void atl1e_free_irq(struct atl1e_adapter *adapter) | |||
1849 | struct net_device *netdev = adapter->netdev; | 1849 | struct net_device *netdev = adapter->netdev; |
1850 | 1850 | ||
1851 | free_irq(adapter->pdev->irq, netdev); | 1851 | free_irq(adapter->pdev->irq, netdev); |
1852 | |||
1853 | if (adapter->have_msi) | ||
1854 | pci_disable_msi(adapter->pdev); | ||
1855 | } | 1852 | } |
1856 | 1853 | ||
1857 | static int atl1e_request_irq(struct atl1e_adapter *adapter) | 1854 | static int atl1e_request_irq(struct atl1e_adapter *adapter) |
1858 | { | 1855 | { |
1859 | struct pci_dev *pdev = adapter->pdev; | 1856 | struct pci_dev *pdev = adapter->pdev; |
1860 | struct net_device *netdev = adapter->netdev; | 1857 | struct net_device *netdev = adapter->netdev; |
1861 | int flags = 0; | ||
1862 | int err = 0; | 1858 | int err = 0; |
1863 | 1859 | ||
1864 | adapter->have_msi = true; | 1860 | err = request_irq(pdev->irq, atl1e_intr, IRQF_SHARED, netdev->name, |
1865 | err = pci_enable_msi(pdev); | 1861 | netdev); |
1866 | if (err) { | ||
1867 | netdev_dbg(netdev, | ||
1868 | "Unable to allocate MSI interrupt Error: %d\n", err); | ||
1869 | adapter->have_msi = false; | ||
1870 | } | ||
1871 | |||
1872 | if (!adapter->have_msi) | ||
1873 | flags |= IRQF_SHARED; | ||
1874 | err = request_irq(pdev->irq, atl1e_intr, flags, netdev->name, netdev); | ||
1875 | if (err) { | 1862 | if (err) { |
1876 | netdev_dbg(adapter->netdev, | 1863 | netdev_dbg(adapter->netdev, |
1877 | "Unable to allocate interrupt Error: %d\n", err); | 1864 | "Unable to allocate interrupt Error: %d\n", err); |
1878 | if (adapter->have_msi) | ||
1879 | pci_disable_msi(pdev); | ||
1880 | return err; | 1865 | return err; |
1881 | } | 1866 | } |
1882 | netdev_dbg(netdev, "atl1e_request_irq OK\n"); | 1867 | netdev_dbg(netdev, "atl1e_request_irq OK\n"); |
@@ -2344,6 +2329,7 @@ static int atl1e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2344 | 2329 | ||
2345 | INIT_WORK(&adapter->reset_task, atl1e_reset_task); | 2330 | INIT_WORK(&adapter->reset_task, atl1e_reset_task); |
2346 | INIT_WORK(&adapter->link_chg_task, atl1e_link_chg_task); | 2331 | INIT_WORK(&adapter->link_chg_task, atl1e_link_chg_task); |
2332 | netif_set_gso_max_size(netdev, MAX_TSO_SEG_SIZE); | ||
2347 | err = register_netdev(netdev); | 2333 | err = register_netdev(netdev); |
2348 | if (err) { | 2334 | if (err) { |
2349 | netdev_err(netdev, "register netdevice failed\n"); | 2335 | netdev_err(netdev, "register netdevice failed\n"); |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index a923bc4d5a1f..4046f97378c2 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | |||
@@ -2760,6 +2760,7 @@ load_error2: | |||
2760 | bp->port.pmf = 0; | 2760 | bp->port.pmf = 0; |
2761 | load_error1: | 2761 | load_error1: |
2762 | bnx2x_napi_disable(bp); | 2762 | bnx2x_napi_disable(bp); |
2763 | bnx2x_del_all_napi(bp); | ||
2763 | 2764 | ||
2764 | /* clear pf_load status, as it was already set */ | 2765 | /* clear pf_load status, as it was already set */ |
2765 | if (IS_PF(bp)) | 2766 | if (IS_PF(bp)) |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c index 568205436a15..91ecd6a00d05 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c | |||
@@ -2139,12 +2139,12 @@ static u8 bnx2x_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap) | |||
2139 | break; | 2139 | break; |
2140 | default: | 2140 | default: |
2141 | BNX2X_ERR("Non valid capability ID\n"); | 2141 | BNX2X_ERR("Non valid capability ID\n"); |
2142 | rval = -EINVAL; | 2142 | rval = 1; |
2143 | break; | 2143 | break; |
2144 | } | 2144 | } |
2145 | } else { | 2145 | } else { |
2146 | DP(BNX2X_MSG_DCB, "DCB disabled\n"); | 2146 | DP(BNX2X_MSG_DCB, "DCB disabled\n"); |
2147 | rval = -EINVAL; | 2147 | rval = 1; |
2148 | } | 2148 | } |
2149 | 2149 | ||
2150 | DP(BNX2X_MSG_DCB, "capid %d:%x\n", capid, *cap); | 2150 | DP(BNX2X_MSG_DCB, "capid %d:%x\n", capid, *cap); |
@@ -2170,12 +2170,12 @@ static int bnx2x_dcbnl_get_numtcs(struct net_device *netdev, int tcid, u8 *num) | |||
2170 | break; | 2170 | break; |
2171 | default: | 2171 | default: |
2172 | BNX2X_ERR("Non valid TC-ID\n"); | 2172 | BNX2X_ERR("Non valid TC-ID\n"); |
2173 | rval = -EINVAL; | 2173 | rval = 1; |
2174 | break; | 2174 | break; |
2175 | } | 2175 | } |
2176 | } else { | 2176 | } else { |
2177 | DP(BNX2X_MSG_DCB, "DCB disabled\n"); | 2177 | DP(BNX2X_MSG_DCB, "DCB disabled\n"); |
2178 | rval = -EINVAL; | 2178 | rval = 1; |
2179 | } | 2179 | } |
2180 | 2180 | ||
2181 | return rval; | 2181 | return rval; |
@@ -2188,7 +2188,7 @@ static int bnx2x_dcbnl_set_numtcs(struct net_device *netdev, int tcid, u8 num) | |||
2188 | return -EINVAL; | 2188 | return -EINVAL; |
2189 | } | 2189 | } |
2190 | 2190 | ||
2191 | static u8 bnx2x_dcbnl_get_pfc_state(struct net_device *netdev) | 2191 | static u8 bnx2x_dcbnl_get_pfc_state(struct net_device *netdev) |
2192 | { | 2192 | { |
2193 | struct bnx2x *bp = netdev_priv(netdev); | 2193 | struct bnx2x *bp = netdev_priv(netdev); |
2194 | DP(BNX2X_MSG_DCB, "state = %d\n", bp->dcbx_local_feat.pfc.enabled); | 2194 | DP(BNX2X_MSG_DCB, "state = %d\n", bp->dcbx_local_feat.pfc.enabled); |
@@ -2390,12 +2390,12 @@ static u8 bnx2x_dcbnl_get_featcfg(struct net_device *netdev, int featid, | |||
2390 | break; | 2390 | break; |
2391 | default: | 2391 | default: |
2392 | BNX2X_ERR("Non valid featrue-ID\n"); | 2392 | BNX2X_ERR("Non valid featrue-ID\n"); |
2393 | rval = -EINVAL; | 2393 | rval = 1; |
2394 | break; | 2394 | break; |
2395 | } | 2395 | } |
2396 | } else { | 2396 | } else { |
2397 | DP(BNX2X_MSG_DCB, "DCB disabled\n"); | 2397 | DP(BNX2X_MSG_DCB, "DCB disabled\n"); |
2398 | rval = -EINVAL; | 2398 | rval = 1; |
2399 | } | 2399 | } |
2400 | 2400 | ||
2401 | return rval; | 2401 | return rval; |
@@ -2431,12 +2431,12 @@ static u8 bnx2x_dcbnl_set_featcfg(struct net_device *netdev, int featid, | |||
2431 | break; | 2431 | break; |
2432 | default: | 2432 | default: |
2433 | BNX2X_ERR("Non valid featrue-ID\n"); | 2433 | BNX2X_ERR("Non valid featrue-ID\n"); |
2434 | rval = -EINVAL; | 2434 | rval = 1; |
2435 | break; | 2435 | break; |
2436 | } | 2436 | } |
2437 | } else { | 2437 | } else { |
2438 | DP(BNX2X_MSG_DCB, "dcbnl call not valid\n"); | 2438 | DP(BNX2X_MSG_DCB, "dcbnl call not valid\n"); |
2439 | rval = -EINVAL; | 2439 | rval = 1; |
2440 | } | 2440 | } |
2441 | 2441 | ||
2442 | return rval; | 2442 | return rval; |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index 77ebae0ac64a..0283f343b0d1 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c | |||
@@ -13437,13 +13437,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params, | |||
13437 | { | 13437 | { |
13438 | struct bnx2x *bp = params->bp; | 13438 | struct bnx2x *bp = params->bp; |
13439 | u16 base_page, next_page, not_kr2_device, lane; | 13439 | u16 base_page, next_page, not_kr2_device, lane; |
13440 | int sigdet = bnx2x_warpcore_get_sigdet(phy, params); | 13440 | int sigdet; |
13441 | |||
13442 | if (!sigdet) { | ||
13443 | if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) | ||
13444 | bnx2x_kr2_recovery(params, vars, phy); | ||
13445 | return; | ||
13446 | } | ||
13447 | 13441 | ||
13448 | /* Once KR2 was disabled, wait 5 seconds before checking KR2 recovery | 13442 | /* Once KR2 was disabled, wait 5 seconds before checking KR2 recovery |
13449 | * since some switches tend to reinit the AN process and clear the | 13443 | * since some switches tend to reinit the AN process and clear the |
@@ -13454,6 +13448,16 @@ static void bnx2x_check_kr2_wa(struct link_params *params, | |||
13454 | vars->check_kr2_recovery_cnt--; | 13448 | vars->check_kr2_recovery_cnt--; |
13455 | return; | 13449 | return; |
13456 | } | 13450 | } |
13451 | |||
13452 | sigdet = bnx2x_warpcore_get_sigdet(phy, params); | ||
13453 | if (!sigdet) { | ||
13454 | if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { | ||
13455 | bnx2x_kr2_recovery(params, vars, phy); | ||
13456 | DP(NETIF_MSG_LINK, "No sigdet\n"); | ||
13457 | } | ||
13458 | return; | ||
13459 | } | ||
13460 | |||
13457 | lane = bnx2x_get_warpcore_lane(phy, params); | 13461 | lane = bnx2x_get_warpcore_lane(phy, params); |
13458 | CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, | 13462 | CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, |
13459 | MDIO_AER_BLOCK_AER_REG, lane); | 13463 | MDIO_AER_BLOCK_AER_REG, lane); |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index e81a747ea8ce..8e58da909f5c 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | |||
@@ -4947,7 +4947,7 @@ static void bnx2x_after_function_update(struct bnx2x *bp) | |||
4947 | q); | 4947 | q); |
4948 | } | 4948 | } |
4949 | 4949 | ||
4950 | if (!NO_FCOE(bp)) { | 4950 | if (!NO_FCOE(bp) && CNIC_ENABLED(bp)) { |
4951 | fp = &bp->fp[FCOE_IDX(bp)]; | 4951 | fp = &bp->fp[FCOE_IDX(bp)]; |
4952 | queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; | 4952 | queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; |
4953 | 4953 | ||
@@ -13354,6 +13354,7 @@ static int bnx2x_unregister_cnic(struct net_device *dev) | |||
13354 | RCU_INIT_POINTER(bp->cnic_ops, NULL); | 13354 | RCU_INIT_POINTER(bp->cnic_ops, NULL); |
13355 | mutex_unlock(&bp->cnic_mutex); | 13355 | mutex_unlock(&bp->cnic_mutex); |
13356 | synchronize_rcu(); | 13356 | synchronize_rcu(); |
13357 | bp->cnic_enabled = false; | ||
13357 | kfree(bp->cnic_kwq); | 13358 | kfree(bp->cnic_kwq); |
13358 | bp->cnic_kwq = NULL; | 13359 | bp->cnic_kwq = NULL; |
13359 | 13360 | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h index 364e37ecbc5c..198f6f1c9ad5 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h | |||
@@ -459,8 +459,9 @@ struct bnx2x_fw_port_stats_old { | |||
459 | 459 | ||
460 | #define UPDATE_QSTAT(s, t) \ | 460 | #define UPDATE_QSTAT(s, t) \ |
461 | do { \ | 461 | do { \ |
462 | qstats->t##_hi = qstats_old->t##_hi + le32_to_cpu(s.hi); \ | ||
463 | qstats->t##_lo = qstats_old->t##_lo + le32_to_cpu(s.lo); \ | 462 | qstats->t##_lo = qstats_old->t##_lo + le32_to_cpu(s.lo); \ |
463 | qstats->t##_hi = qstats_old->t##_hi + le32_to_cpu(s.hi) \ | ||
464 | + ((qstats->t##_lo < qstats_old->t##_lo) ? 1 : 0); \ | ||
464 | } while (0) | 465 | } while (0) |
465 | 466 | ||
466 | #define UPDATE_QSTAT_OLD(f) \ | 467 | #define UPDATE_QSTAT_OLD(f) \ |
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 93729f942358..17a972734ba7 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c | |||
@@ -4130,6 +4130,14 @@ static void tg3_phy_copper_begin(struct tg3 *tp) | |||
4130 | tp->link_config.active_speed = tp->link_config.speed; | 4130 | tp->link_config.active_speed = tp->link_config.speed; |
4131 | tp->link_config.active_duplex = tp->link_config.duplex; | 4131 | tp->link_config.active_duplex = tp->link_config.duplex; |
4132 | 4132 | ||
4133 | if (tg3_asic_rev(tp) == ASIC_REV_5714) { | ||
4134 | /* With autoneg disabled, 5715 only links up when the | ||
4135 | * advertisement register has the configured speed | ||
4136 | * enabled. | ||
4137 | */ | ||
4138 | tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL); | ||
4139 | } | ||
4140 | |||
4133 | bmcr = 0; | 4141 | bmcr = 0; |
4134 | switch (tp->link_config.speed) { | 4142 | switch (tp->link_config.speed) { |
4135 | default: | 4143 | default: |
@@ -14596,8 +14604,11 @@ static void tg3_read_vpd(struct tg3 *tp) | |||
14596 | if (j + len > block_end) | 14604 | if (j + len > block_end) |
14597 | goto partno; | 14605 | goto partno; |
14598 | 14606 | ||
14599 | memcpy(tp->fw_ver, &vpd_data[j], len); | 14607 | if (len >= sizeof(tp->fw_ver)) |
14600 | strncat(tp->fw_ver, " bc ", vpdlen - len - 1); | 14608 | len = sizeof(tp->fw_ver) - 1; |
14609 | memset(tp->fw_ver, 0, sizeof(tp->fw_ver)); | ||
14610 | snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len, | ||
14611 | &vpd_data[j]); | ||
14601 | } | 14612 | } |
14602 | 14613 | ||
14603 | partno: | 14614 | partno: |
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c index a170065b5973..b0ebc9f6d55e 100644 --- a/drivers/net/ethernet/calxeda/xgmac.c +++ b/drivers/net/ethernet/calxeda/xgmac.c | |||
@@ -163,6 +163,7 @@ | |||
163 | #define XGMAC_FLOW_CTRL_FCB_BPA 0x00000001 /* Flow Control Busy ... */ | 163 | #define XGMAC_FLOW_CTRL_FCB_BPA 0x00000001 /* Flow Control Busy ... */ |
164 | 164 | ||
165 | /* XGMAC_INT_STAT reg */ | 165 | /* XGMAC_INT_STAT reg */ |
166 | #define XGMAC_INT_STAT_PMTIM 0x00800000 /* PMT Interrupt Mask */ | ||
166 | #define XGMAC_INT_STAT_PMT 0x0080 /* PMT Interrupt Status */ | 167 | #define XGMAC_INT_STAT_PMT 0x0080 /* PMT Interrupt Status */ |
167 | #define XGMAC_INT_STAT_LPI 0x0040 /* LPI Interrupt Status */ | 168 | #define XGMAC_INT_STAT_LPI 0x0040 /* LPI Interrupt Status */ |
168 | 169 | ||
@@ -960,6 +961,9 @@ static int xgmac_hw_init(struct net_device *dev) | |||
960 | writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS); | 961 | writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS); |
961 | writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA); | 962 | writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA); |
962 | 963 | ||
964 | /* Mask power mgt interrupt */ | ||
965 | writel(XGMAC_INT_STAT_PMTIM, ioaddr + XGMAC_INT_STAT); | ||
966 | |||
963 | /* XGMAC requires AXI bus init. This is a 'magic number' for now */ | 967 | /* XGMAC requires AXI bus init. This is a 'magic number' for now */ |
964 | writel(0x0077000E, ioaddr + XGMAC_DMA_AXI_BUS); | 968 | writel(0x0077000E, ioaddr + XGMAC_DMA_AXI_BUS); |
965 | 969 | ||
@@ -1141,6 +1145,9 @@ static int xgmac_rx(struct xgmac_priv *priv, int limit) | |||
1141 | struct sk_buff *skb; | 1145 | struct sk_buff *skb; |
1142 | int frame_len; | 1146 | int frame_len; |
1143 | 1147 | ||
1148 | if (!dma_ring_cnt(priv->rx_head, priv->rx_tail, DMA_RX_RING_SZ)) | ||
1149 | break; | ||
1150 | |||
1144 | entry = priv->rx_tail; | 1151 | entry = priv->rx_tail; |
1145 | p = priv->dma_rx + entry; | 1152 | p = priv->dma_rx + entry; |
1146 | if (desc_get_owner(p)) | 1153 | if (desc_get_owner(p)) |
@@ -1825,7 +1832,7 @@ static void xgmac_pmt(void __iomem *ioaddr, unsigned long mode) | |||
1825 | unsigned int pmt = 0; | 1832 | unsigned int pmt = 0; |
1826 | 1833 | ||
1827 | if (mode & WAKE_MAGIC) | 1834 | if (mode & WAKE_MAGIC) |
1828 | pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_MAGIC_PKT; | 1835 | pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_MAGIC_PKT_EN; |
1829 | if (mode & WAKE_UCAST) | 1836 | if (mode & WAKE_UCAST) |
1830 | pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_GLBL_UNICAST; | 1837 | pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_GLBL_UNICAST; |
1831 | 1838 | ||
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 4ce62031f62f..8049268ce0f2 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | |||
@@ -497,8 +497,9 @@ int t4_memory_write(struct adapter *adap, int mtype, u32 addr, u32 len, | |||
497 | } | 497 | } |
498 | 498 | ||
499 | #define EEPROM_STAT_ADDR 0x7bfc | 499 | #define EEPROM_STAT_ADDR 0x7bfc |
500 | #define VPD_BASE 0 | ||
501 | #define VPD_LEN 512 | 500 | #define VPD_LEN 512 |
501 | #define VPD_BASE 0x400 | ||
502 | #define VPD_BASE_OLD 0 | ||
502 | 503 | ||
503 | /** | 504 | /** |
504 | * t4_seeprom_wp - enable/disable EEPROM write protection | 505 | * t4_seeprom_wp - enable/disable EEPROM write protection |
@@ -524,7 +525,7 @@ int t4_seeprom_wp(struct adapter *adapter, bool enable) | |||
524 | int get_vpd_params(struct adapter *adapter, struct vpd_params *p) | 525 | int get_vpd_params(struct adapter *adapter, struct vpd_params *p) |
525 | { | 526 | { |
526 | u32 cclk_param, cclk_val; | 527 | u32 cclk_param, cclk_val; |
527 | int i, ret; | 528 | int i, ret, addr; |
528 | int ec, sn; | 529 | int ec, sn; |
529 | u8 *vpd, csum; | 530 | u8 *vpd, csum; |
530 | unsigned int vpdr_len, kw_offset, id_len; | 531 | unsigned int vpdr_len, kw_offset, id_len; |
@@ -533,7 +534,12 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p) | |||
533 | if (!vpd) | 534 | if (!vpd) |
534 | return -ENOMEM; | 535 | return -ENOMEM; |
535 | 536 | ||
536 | ret = pci_read_vpd(adapter->pdev, VPD_BASE, VPD_LEN, vpd); | 537 | ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd); |
538 | if (ret < 0) | ||
539 | goto out; | ||
540 | addr = *vpd == 0x82 ? VPD_BASE : VPD_BASE_OLD; | ||
541 | |||
542 | ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd); | ||
537 | if (ret < 0) | 543 | if (ret < 0) |
538 | goto out; | 544 | goto out; |
539 | 545 | ||
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index 8cdf02503d13..9eada8e86078 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c | |||
@@ -257,6 +257,107 @@ static void dm9000_dumpblk_32bit(void __iomem *reg, int count) | |||
257 | tmp = readl(reg); | 257 | tmp = readl(reg); |
258 | } | 258 | } |
259 | 259 | ||
260 | /* | ||
261 | * Sleep, either by using msleep() or if we are suspending, then | ||
262 | * use mdelay() to sleep. | ||
263 | */ | ||
264 | static void dm9000_msleep(board_info_t *db, unsigned int ms) | ||
265 | { | ||
266 | if (db->in_suspend) | ||
267 | mdelay(ms); | ||
268 | else | ||
269 | msleep(ms); | ||
270 | } | ||
271 | |||
272 | /* Read a word from phyxcer */ | ||
273 | static int | ||
274 | dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg) | ||
275 | { | ||
276 | board_info_t *db = netdev_priv(dev); | ||
277 | unsigned long flags; | ||
278 | unsigned int reg_save; | ||
279 | int ret; | ||
280 | |||
281 | mutex_lock(&db->addr_lock); | ||
282 | |||
283 | spin_lock_irqsave(&db->lock, flags); | ||
284 | |||
285 | /* Save previous register address */ | ||
286 | reg_save = readb(db->io_addr); | ||
287 | |||
288 | /* Fill the phyxcer register into REG_0C */ | ||
289 | iow(db, DM9000_EPAR, DM9000_PHY | reg); | ||
290 | |||
291 | /* Issue phyxcer read command */ | ||
292 | iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS); | ||
293 | |||
294 | writeb(reg_save, db->io_addr); | ||
295 | spin_unlock_irqrestore(&db->lock, flags); | ||
296 | |||
297 | dm9000_msleep(db, 1); /* Wait read complete */ | ||
298 | |||
299 | spin_lock_irqsave(&db->lock, flags); | ||
300 | reg_save = readb(db->io_addr); | ||
301 | |||
302 | iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer read command */ | ||
303 | |||
304 | /* The read data keeps on REG_0D & REG_0E */ | ||
305 | ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL); | ||
306 | |||
307 | /* restore the previous address */ | ||
308 | writeb(reg_save, db->io_addr); | ||
309 | spin_unlock_irqrestore(&db->lock, flags); | ||
310 | |||
311 | mutex_unlock(&db->addr_lock); | ||
312 | |||
313 | dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret); | ||
314 | return ret; | ||
315 | } | ||
316 | |||
317 | /* Write a word to phyxcer */ | ||
318 | static void | ||
319 | dm9000_phy_write(struct net_device *dev, | ||
320 | int phyaddr_unused, int reg, int value) | ||
321 | { | ||
322 | board_info_t *db = netdev_priv(dev); | ||
323 | unsigned long flags; | ||
324 | unsigned long reg_save; | ||
325 | |||
326 | dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value); | ||
327 | mutex_lock(&db->addr_lock); | ||
328 | |||
329 | spin_lock_irqsave(&db->lock, flags); | ||
330 | |||
331 | /* Save previous register address */ | ||
332 | reg_save = readb(db->io_addr); | ||
333 | |||
334 | /* Fill the phyxcer register into REG_0C */ | ||
335 | iow(db, DM9000_EPAR, DM9000_PHY | reg); | ||
336 | |||
337 | /* Fill the written data into REG_0D & REG_0E */ | ||
338 | iow(db, DM9000_EPDRL, value); | ||
339 | iow(db, DM9000_EPDRH, value >> 8); | ||
340 | |||
341 | /* Issue phyxcer write command */ | ||
342 | iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW); | ||
343 | |||
344 | writeb(reg_save, db->io_addr); | ||
345 | spin_unlock_irqrestore(&db->lock, flags); | ||
346 | |||
347 | dm9000_msleep(db, 1); /* Wait write complete */ | ||
348 | |||
349 | spin_lock_irqsave(&db->lock, flags); | ||
350 | reg_save = readb(db->io_addr); | ||
351 | |||
352 | iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer write command */ | ||
353 | |||
354 | /* restore the previous address */ | ||
355 | writeb(reg_save, db->io_addr); | ||
356 | |||
357 | spin_unlock_irqrestore(&db->lock, flags); | ||
358 | mutex_unlock(&db->addr_lock); | ||
359 | } | ||
360 | |||
260 | /* dm9000_set_io | 361 | /* dm9000_set_io |
261 | * | 362 | * |
262 | * select the specified set of io routines to use with the | 363 | * select the specified set of io routines to use with the |
@@ -795,6 +896,9 @@ dm9000_init_dm9000(struct net_device *dev) | |||
795 | 896 | ||
796 | iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */ | 897 | iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */ |
797 | 898 | ||
899 | dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET); /* PHY RESET */ | ||
900 | dm9000_phy_write(dev, 0, MII_DM_DSPCR, DSPCR_INIT_PARAM); /* Init */ | ||
901 | |||
798 | ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0; | 902 | ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0; |
799 | 903 | ||
800 | /* if wol is needed, then always set NCR_WAKEEN otherwise we end | 904 | /* if wol is needed, then always set NCR_WAKEEN otherwise we end |
@@ -1201,109 +1305,6 @@ dm9000_open(struct net_device *dev) | |||
1201 | return 0; | 1305 | return 0; |
1202 | } | 1306 | } |
1203 | 1307 | ||
1204 | /* | ||
1205 | * Sleep, either by using msleep() or if we are suspending, then | ||
1206 | * use mdelay() to sleep. | ||
1207 | */ | ||
1208 | static void dm9000_msleep(board_info_t *db, unsigned int ms) | ||
1209 | { | ||
1210 | if (db->in_suspend) | ||
1211 | mdelay(ms); | ||
1212 | else | ||
1213 | msleep(ms); | ||
1214 | } | ||
1215 | |||
1216 | /* | ||
1217 | * Read a word from phyxcer | ||
1218 | */ | ||
1219 | static int | ||
1220 | dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg) | ||
1221 | { | ||
1222 | board_info_t *db = netdev_priv(dev); | ||
1223 | unsigned long flags; | ||
1224 | unsigned int reg_save; | ||
1225 | int ret; | ||
1226 | |||
1227 | mutex_lock(&db->addr_lock); | ||
1228 | |||
1229 | spin_lock_irqsave(&db->lock,flags); | ||
1230 | |||
1231 | /* Save previous register address */ | ||
1232 | reg_save = readb(db->io_addr); | ||
1233 | |||
1234 | /* Fill the phyxcer register into REG_0C */ | ||
1235 | iow(db, DM9000_EPAR, DM9000_PHY | reg); | ||
1236 | |||
1237 | iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS); /* Issue phyxcer read command */ | ||
1238 | |||
1239 | writeb(reg_save, db->io_addr); | ||
1240 | spin_unlock_irqrestore(&db->lock,flags); | ||
1241 | |||
1242 | dm9000_msleep(db, 1); /* Wait read complete */ | ||
1243 | |||
1244 | spin_lock_irqsave(&db->lock,flags); | ||
1245 | reg_save = readb(db->io_addr); | ||
1246 | |||
1247 | iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer read command */ | ||
1248 | |||
1249 | /* The read data keeps on REG_0D & REG_0E */ | ||
1250 | ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL); | ||
1251 | |||
1252 | /* restore the previous address */ | ||
1253 | writeb(reg_save, db->io_addr); | ||
1254 | spin_unlock_irqrestore(&db->lock,flags); | ||
1255 | |||
1256 | mutex_unlock(&db->addr_lock); | ||
1257 | |||
1258 | dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret); | ||
1259 | return ret; | ||
1260 | } | ||
1261 | |||
1262 | /* | ||
1263 | * Write a word to phyxcer | ||
1264 | */ | ||
1265 | static void | ||
1266 | dm9000_phy_write(struct net_device *dev, | ||
1267 | int phyaddr_unused, int reg, int value) | ||
1268 | { | ||
1269 | board_info_t *db = netdev_priv(dev); | ||
1270 | unsigned long flags; | ||
1271 | unsigned long reg_save; | ||
1272 | |||
1273 | dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value); | ||
1274 | mutex_lock(&db->addr_lock); | ||
1275 | |||
1276 | spin_lock_irqsave(&db->lock,flags); | ||
1277 | |||
1278 | /* Save previous register address */ | ||
1279 | reg_save = readb(db->io_addr); | ||
1280 | |||
1281 | /* Fill the phyxcer register into REG_0C */ | ||
1282 | iow(db, DM9000_EPAR, DM9000_PHY | reg); | ||
1283 | |||
1284 | /* Fill the written data into REG_0D & REG_0E */ | ||
1285 | iow(db, DM9000_EPDRL, value); | ||
1286 | iow(db, DM9000_EPDRH, value >> 8); | ||
1287 | |||
1288 | iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW); /* Issue phyxcer write command */ | ||
1289 | |||
1290 | writeb(reg_save, db->io_addr); | ||
1291 | spin_unlock_irqrestore(&db->lock, flags); | ||
1292 | |||
1293 | dm9000_msleep(db, 1); /* Wait write complete */ | ||
1294 | |||
1295 | spin_lock_irqsave(&db->lock,flags); | ||
1296 | reg_save = readb(db->io_addr); | ||
1297 | |||
1298 | iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer write command */ | ||
1299 | |||
1300 | /* restore the previous address */ | ||
1301 | writeb(reg_save, db->io_addr); | ||
1302 | |||
1303 | spin_unlock_irqrestore(&db->lock, flags); | ||
1304 | mutex_unlock(&db->addr_lock); | ||
1305 | } | ||
1306 | |||
1307 | static void | 1308 | static void |
1308 | dm9000_shutdown(struct net_device *dev) | 1309 | dm9000_shutdown(struct net_device *dev) |
1309 | { | 1310 | { |
@@ -1502,7 +1503,12 @@ dm9000_probe(struct platform_device *pdev) | |||
1502 | db->flags |= DM9000_PLATF_SIMPLE_PHY; | 1503 | db->flags |= DM9000_PLATF_SIMPLE_PHY; |
1503 | #endif | 1504 | #endif |
1504 | 1505 | ||
1505 | dm9000_reset(db); | 1506 | /* Fixing bug on dm9000_probe, takeover dm9000_reset(db), |
1507 | * Need 'NCR_MAC_LBK' bit to indeed stable our DM9000 fifo | ||
1508 | * while probe stage. | ||
1509 | */ | ||
1510 | |||
1511 | iow(db, DM9000_NCR, NCR_MAC_LBK | NCR_RST); | ||
1506 | 1512 | ||
1507 | /* try multiple times, DM9000 sometimes gets the read wrong */ | 1513 | /* try multiple times, DM9000 sometimes gets the read wrong */ |
1508 | for (i = 0; i < 8; i++) { | 1514 | for (i = 0; i < 8; i++) { |
diff --git a/drivers/net/ethernet/davicom/dm9000.h b/drivers/net/ethernet/davicom/dm9000.h index 55688bd1a3ef..9ce058adabab 100644 --- a/drivers/net/ethernet/davicom/dm9000.h +++ b/drivers/net/ethernet/davicom/dm9000.h | |||
@@ -69,7 +69,9 @@ | |||
69 | #define NCR_WAKEEN (1<<6) | 69 | #define NCR_WAKEEN (1<<6) |
70 | #define NCR_FCOL (1<<4) | 70 | #define NCR_FCOL (1<<4) |
71 | #define NCR_FDX (1<<3) | 71 | #define NCR_FDX (1<<3) |
72 | #define NCR_LBK (3<<1) | 72 | |
73 | #define NCR_RESERVED (3<<1) | ||
74 | #define NCR_MAC_LBK (1<<1) | ||
73 | #define NCR_RST (1<<0) | 75 | #define NCR_RST (1<<0) |
74 | 76 | ||
75 | #define NSR_SPEED (1<<7) | 77 | #define NSR_SPEED (1<<7) |
@@ -167,5 +169,12 @@ | |||
167 | #define ISR_LNKCHNG (1<<5) | 169 | #define ISR_LNKCHNG (1<<5) |
168 | #define ISR_UNDERRUN (1<<4) | 170 | #define ISR_UNDERRUN (1<<4) |
169 | 171 | ||
172 | /* Davicom MII registers. | ||
173 | */ | ||
174 | |||
175 | #define MII_DM_DSPCR 0x1b /* DSP Control Register */ | ||
176 | |||
177 | #define DSPCR_INIT_PARAM 0xE100 /* DSP init parameter */ | ||
178 | |||
170 | #endif /* _DM9000X_H_ */ | 179 | #endif /* _DM9000X_H_ */ |
171 | 180 | ||
diff --git a/drivers/net/ethernet/dec/tulip/Kconfig b/drivers/net/ethernet/dec/tulip/Kconfig index 0c37fb2cc867..1df33c799c00 100644 --- a/drivers/net/ethernet/dec/tulip/Kconfig +++ b/drivers/net/ethernet/dec/tulip/Kconfig | |||
@@ -108,6 +108,7 @@ config TULIP_DM910X | |||
108 | config DE4X5 | 108 | config DE4X5 |
109 | tristate "Generic DECchip & DIGITAL EtherWORKS PCI/EISA" | 109 | tristate "Generic DECchip & DIGITAL EtherWORKS PCI/EISA" |
110 | depends on (PCI || EISA) | 110 | depends on (PCI || EISA) |
111 | depends on VIRT_TO_BUS || ALPHA || PPC || SPARC | ||
111 | select CRC32 | 112 | select CRC32 |
112 | ---help--- | 113 | ---help--- |
113 | This is support for the DIGITAL series of PCI/EISA Ethernet cards. | 114 | This is support for the DIGITAL series of PCI/EISA Ethernet cards. |
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c index 069a155d16ed..f292c3aa423f 100644 --- a/drivers/net/ethernet/freescale/fec.c +++ b/drivers/net/ethernet/freescale/fec.c | |||
@@ -345,6 +345,53 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
345 | return NETDEV_TX_OK; | 345 | return NETDEV_TX_OK; |
346 | } | 346 | } |
347 | 347 | ||
348 | /* Init RX & TX buffer descriptors | ||
349 | */ | ||
350 | static void fec_enet_bd_init(struct net_device *dev) | ||
351 | { | ||
352 | struct fec_enet_private *fep = netdev_priv(dev); | ||
353 | struct bufdesc *bdp; | ||
354 | unsigned int i; | ||
355 | |||
356 | /* Initialize the receive buffer descriptors. */ | ||
357 | bdp = fep->rx_bd_base; | ||
358 | for (i = 0; i < RX_RING_SIZE; i++) { | ||
359 | |||
360 | /* Initialize the BD for every fragment in the page. */ | ||
361 | if (bdp->cbd_bufaddr) | ||
362 | bdp->cbd_sc = BD_ENET_RX_EMPTY; | ||
363 | else | ||
364 | bdp->cbd_sc = 0; | ||
365 | bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); | ||
366 | } | ||
367 | |||
368 | /* Set the last buffer to wrap */ | ||
369 | bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); | ||
370 | bdp->cbd_sc |= BD_SC_WRAP; | ||
371 | |||
372 | fep->cur_rx = fep->rx_bd_base; | ||
373 | |||
374 | /* ...and the same for transmit */ | ||
375 | bdp = fep->tx_bd_base; | ||
376 | fep->cur_tx = bdp; | ||
377 | for (i = 0; i < TX_RING_SIZE; i++) { | ||
378 | |||
379 | /* Initialize the BD for every fragment in the page. */ | ||
380 | bdp->cbd_sc = 0; | ||
381 | if (bdp->cbd_bufaddr && fep->tx_skbuff[i]) { | ||
382 | dev_kfree_skb_any(fep->tx_skbuff[i]); | ||
383 | fep->tx_skbuff[i] = NULL; | ||
384 | } | ||
385 | bdp->cbd_bufaddr = 0; | ||
386 | bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); | ||
387 | } | ||
388 | |||
389 | /* Set the last buffer to wrap */ | ||
390 | bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); | ||
391 | bdp->cbd_sc |= BD_SC_WRAP; | ||
392 | fep->dirty_tx = bdp; | ||
393 | } | ||
394 | |||
348 | /* This function is called to start or restart the FEC during a link | 395 | /* This function is called to start or restart the FEC during a link |
349 | * change. This only happens when switching between half and full | 396 | * change. This only happens when switching between half and full |
350 | * duplex. | 397 | * duplex. |
@@ -388,6 +435,8 @@ fec_restart(struct net_device *ndev, int duplex) | |||
388 | /* Set maximum receive buffer size. */ | 435 | /* Set maximum receive buffer size. */ |
389 | writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE); | 436 | writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE); |
390 | 437 | ||
438 | fec_enet_bd_init(ndev); | ||
439 | |||
391 | /* Set receive and transmit descriptor base. */ | 440 | /* Set receive and transmit descriptor base. */ |
392 | writel(fep->bd_dma, fep->hwp + FEC_R_DES_START); | 441 | writel(fep->bd_dma, fep->hwp + FEC_R_DES_START); |
393 | if (fep->bufdesc_ex) | 442 | if (fep->bufdesc_ex) |
@@ -397,7 +446,6 @@ fec_restart(struct net_device *ndev, int duplex) | |||
397 | writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) | 446 | writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) |
398 | * RX_RING_SIZE, fep->hwp + FEC_X_DES_START); | 447 | * RX_RING_SIZE, fep->hwp + FEC_X_DES_START); |
399 | 448 | ||
400 | fep->cur_rx = fep->rx_bd_base; | ||
401 | 449 | ||
402 | for (i = 0; i <= TX_RING_MOD_MASK; i++) { | 450 | for (i = 0; i <= TX_RING_MOD_MASK; i++) { |
403 | if (fep->tx_skbuff[i]) { | 451 | if (fep->tx_skbuff[i]) { |
@@ -934,24 +982,28 @@ static void fec_enet_adjust_link(struct net_device *ndev) | |||
934 | goto spin_unlock; | 982 | goto spin_unlock; |
935 | } | 983 | } |
936 | 984 | ||
937 | /* Duplex link change */ | ||
938 | if (phy_dev->link) { | 985 | if (phy_dev->link) { |
939 | if (fep->full_duplex != phy_dev->duplex) { | 986 | if (!fep->link) { |
940 | fec_restart(ndev, phy_dev->duplex); | ||
941 | /* prevent unnecessary second fec_restart() below */ | ||
942 | fep->link = phy_dev->link; | 987 | fep->link = phy_dev->link; |
943 | status_change = 1; | 988 | status_change = 1; |
944 | } | 989 | } |
945 | } | ||
946 | 990 | ||
947 | /* Link on or off change */ | 991 | if (fep->full_duplex != phy_dev->duplex) |
948 | if (phy_dev->link != fep->link) { | 992 | status_change = 1; |
949 | fep->link = phy_dev->link; | 993 | |
950 | if (phy_dev->link) | 994 | if (phy_dev->speed != fep->speed) { |
995 | fep->speed = phy_dev->speed; | ||
996 | status_change = 1; | ||
997 | } | ||
998 | |||
999 | /* if any of the above changed restart the FEC */ | ||
1000 | if (status_change) | ||
951 | fec_restart(ndev, phy_dev->duplex); | 1001 | fec_restart(ndev, phy_dev->duplex); |
952 | else | 1002 | } else { |
1003 | if (fep->link) { | ||
953 | fec_stop(ndev); | 1004 | fec_stop(ndev); |
954 | status_change = 1; | 1005 | status_change = 1; |
1006 | } | ||
955 | } | 1007 | } |
956 | 1008 | ||
957 | spin_unlock: | 1009 | spin_unlock: |
@@ -1328,7 +1380,7 @@ static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) | |||
1328 | static void fec_enet_free_buffers(struct net_device *ndev) | 1380 | static void fec_enet_free_buffers(struct net_device *ndev) |
1329 | { | 1381 | { |
1330 | struct fec_enet_private *fep = netdev_priv(ndev); | 1382 | struct fec_enet_private *fep = netdev_priv(ndev); |
1331 | int i; | 1383 | unsigned int i; |
1332 | struct sk_buff *skb; | 1384 | struct sk_buff *skb; |
1333 | struct bufdesc *bdp; | 1385 | struct bufdesc *bdp; |
1334 | 1386 | ||
@@ -1352,7 +1404,7 @@ static void fec_enet_free_buffers(struct net_device *ndev) | |||
1352 | static int fec_enet_alloc_buffers(struct net_device *ndev) | 1404 | static int fec_enet_alloc_buffers(struct net_device *ndev) |
1353 | { | 1405 | { |
1354 | struct fec_enet_private *fep = netdev_priv(ndev); | 1406 | struct fec_enet_private *fep = netdev_priv(ndev); |
1355 | int i; | 1407 | unsigned int i; |
1356 | struct sk_buff *skb; | 1408 | struct sk_buff *skb; |
1357 | struct bufdesc *bdp; | 1409 | struct bufdesc *bdp; |
1358 | 1410 | ||
@@ -1437,6 +1489,7 @@ fec_enet_close(struct net_device *ndev) | |||
1437 | struct fec_enet_private *fep = netdev_priv(ndev); | 1489 | struct fec_enet_private *fep = netdev_priv(ndev); |
1438 | 1490 | ||
1439 | /* Don't know what to do yet. */ | 1491 | /* Don't know what to do yet. */ |
1492 | napi_disable(&fep->napi); | ||
1440 | fep->opened = 0; | 1493 | fep->opened = 0; |
1441 | netif_stop_queue(ndev); | 1494 | netif_stop_queue(ndev); |
1442 | fec_stop(ndev); | 1495 | fec_stop(ndev); |
@@ -1592,8 +1645,6 @@ static int fec_enet_init(struct net_device *ndev) | |||
1592 | { | 1645 | { |
1593 | struct fec_enet_private *fep = netdev_priv(ndev); | 1646 | struct fec_enet_private *fep = netdev_priv(ndev); |
1594 | struct bufdesc *cbd_base; | 1647 | struct bufdesc *cbd_base; |
1595 | struct bufdesc *bdp; | ||
1596 | int i; | ||
1597 | 1648 | ||
1598 | /* Allocate memory for buffer descriptors. */ | 1649 | /* Allocate memory for buffer descriptors. */ |
1599 | cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma, | 1650 | cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma, |
@@ -1603,6 +1654,7 @@ static int fec_enet_init(struct net_device *ndev) | |||
1603 | return -ENOMEM; | 1654 | return -ENOMEM; |
1604 | } | 1655 | } |
1605 | 1656 | ||
1657 | memset(cbd_base, 0, PAGE_SIZE); | ||
1606 | spin_lock_init(&fep->hw_lock); | 1658 | spin_lock_init(&fep->hw_lock); |
1607 | 1659 | ||
1608 | fep->netdev = ndev; | 1660 | fep->netdev = ndev; |
@@ -1626,35 +1678,6 @@ static int fec_enet_init(struct net_device *ndev) | |||
1626 | writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK); | 1678 | writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK); |
1627 | netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, FEC_NAPI_WEIGHT); | 1679 | netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, FEC_NAPI_WEIGHT); |
1628 | 1680 | ||
1629 | /* Initialize the receive buffer descriptors. */ | ||
1630 | bdp = fep->rx_bd_base; | ||
1631 | for (i = 0; i < RX_RING_SIZE; i++) { | ||
1632 | |||
1633 | /* Initialize the BD for every fragment in the page. */ | ||
1634 | bdp->cbd_sc = 0; | ||
1635 | bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); | ||
1636 | } | ||
1637 | |||
1638 | /* Set the last buffer to wrap */ | ||
1639 | bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); | ||
1640 | bdp->cbd_sc |= BD_SC_WRAP; | ||
1641 | |||
1642 | /* ...and the same for transmit */ | ||
1643 | bdp = fep->tx_bd_base; | ||
1644 | fep->cur_tx = bdp; | ||
1645 | for (i = 0; i < TX_RING_SIZE; i++) { | ||
1646 | |||
1647 | /* Initialize the BD for every fragment in the page. */ | ||
1648 | bdp->cbd_sc = 0; | ||
1649 | bdp->cbd_bufaddr = 0; | ||
1650 | bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); | ||
1651 | } | ||
1652 | |||
1653 | /* Set the last buffer to wrap */ | ||
1654 | bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); | ||
1655 | bdp->cbd_sc |= BD_SC_WRAP; | ||
1656 | fep->dirty_tx = bdp; | ||
1657 | |||
1658 | fec_restart(ndev, 0); | 1681 | fec_restart(ndev, 0); |
1659 | 1682 | ||
1660 | return 0; | 1683 | return 0; |
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index f5390071efd0..eb4372962839 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h | |||
@@ -240,6 +240,7 @@ struct fec_enet_private { | |||
240 | phy_interface_t phy_interface; | 240 | phy_interface_t phy_interface; |
241 | int link; | 241 | int link; |
242 | int full_duplex; | 242 | int full_duplex; |
243 | int speed; | ||
243 | struct completion mdio_done; | 244 | struct completion mdio_done; |
244 | int irq[FEC_IRQ_NUM]; | 245 | int irq[FEC_IRQ_NUM]; |
245 | int bufdesc_ex; | 246 | int bufdesc_ex; |
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c index 1f17ca0f2201..0d8df400a479 100644 --- a/drivers/net/ethernet/freescale/fec_ptp.c +++ b/drivers/net/ethernet/freescale/fec_ptp.c | |||
@@ -128,6 +128,7 @@ void fec_ptp_start_cyclecounter(struct net_device *ndev) | |||
128 | 128 | ||
129 | spin_unlock_irqrestore(&fep->tmreg_lock, flags); | 129 | spin_unlock_irqrestore(&fep->tmreg_lock, flags); |
130 | } | 130 | } |
131 | EXPORT_SYMBOL(fec_ptp_start_cyclecounter); | ||
131 | 132 | ||
132 | /** | 133 | /** |
133 | * fec_ptp_adjfreq - adjust ptp cycle frequency | 134 | * fec_ptp_adjfreq - adjust ptp cycle frequency |
@@ -318,6 +319,7 @@ int fec_ptp_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd) | |||
318 | return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? | 319 | return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? |
319 | -EFAULT : 0; | 320 | -EFAULT : 0; |
320 | } | 321 | } |
322 | EXPORT_SYMBOL(fec_ptp_ioctl); | ||
321 | 323 | ||
322 | /** | 324 | /** |
323 | * fec_time_keep - call timecounter_read every second to avoid timer overrun | 325 | * fec_time_keep - call timecounter_read every second to avoid timer overrun |
@@ -383,3 +385,4 @@ void fec_ptp_init(struct net_device *ndev, struct platform_device *pdev) | |||
383 | pr_info("registered PHC device on %s\n", ndev->name); | 385 | pr_info("registered PHC device on %s\n", ndev->name); |
384 | } | 386 | } |
385 | } | 387 | } |
388 | EXPORT_SYMBOL(fec_ptp_init); | ||
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index ec800b093e7e..d2bea3f07c73 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c | |||
@@ -870,7 +870,7 @@ err_unlock: | |||
870 | } | 870 | } |
871 | 871 | ||
872 | static int e100_exec_cb(struct nic *nic, struct sk_buff *skb, | 872 | static int e100_exec_cb(struct nic *nic, struct sk_buff *skb, |
873 | void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *)) | 873 | int (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *)) |
874 | { | 874 | { |
875 | struct cb *cb; | 875 | struct cb *cb; |
876 | unsigned long flags; | 876 | unsigned long flags; |
@@ -888,10 +888,13 @@ static int e100_exec_cb(struct nic *nic, struct sk_buff *skb, | |||
888 | nic->cbs_avail--; | 888 | nic->cbs_avail--; |
889 | cb->skb = skb; | 889 | cb->skb = skb; |
890 | 890 | ||
891 | err = cb_prepare(nic, cb, skb); | ||
892 | if (err) | ||
893 | goto err_unlock; | ||
894 | |||
891 | if (unlikely(!nic->cbs_avail)) | 895 | if (unlikely(!nic->cbs_avail)) |
892 | err = -ENOSPC; | 896 | err = -ENOSPC; |
893 | 897 | ||
894 | cb_prepare(nic, cb, skb); | ||
895 | 898 | ||
896 | /* Order is important otherwise we'll be in a race with h/w: | 899 | /* Order is important otherwise we'll be in a race with h/w: |
897 | * set S-bit in current first, then clear S-bit in previous. */ | 900 | * set S-bit in current first, then clear S-bit in previous. */ |
@@ -1091,7 +1094,7 @@ static void e100_get_defaults(struct nic *nic) | |||
1091 | nic->mii.mdio_write = mdio_write; | 1094 | nic->mii.mdio_write = mdio_write; |
1092 | } | 1095 | } |
1093 | 1096 | ||
1094 | static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb) | 1097 | static int e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb) |
1095 | { | 1098 | { |
1096 | struct config *config = &cb->u.config; | 1099 | struct config *config = &cb->u.config; |
1097 | u8 *c = (u8 *)config; | 1100 | u8 *c = (u8 *)config; |
@@ -1181,6 +1184,7 @@ static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb) | |||
1181 | netif_printk(nic, hw, KERN_DEBUG, nic->netdev, | 1184 | netif_printk(nic, hw, KERN_DEBUG, nic->netdev, |
1182 | "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n", | 1185 | "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n", |
1183 | c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]); | 1186 | c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]); |
1187 | return 0; | ||
1184 | } | 1188 | } |
1185 | 1189 | ||
1186 | /************************************************************************* | 1190 | /************************************************************************* |
@@ -1331,7 +1335,7 @@ static const struct firmware *e100_request_firmware(struct nic *nic) | |||
1331 | return fw; | 1335 | return fw; |
1332 | } | 1336 | } |
1333 | 1337 | ||
1334 | static void e100_setup_ucode(struct nic *nic, struct cb *cb, | 1338 | static int e100_setup_ucode(struct nic *nic, struct cb *cb, |
1335 | struct sk_buff *skb) | 1339 | struct sk_buff *skb) |
1336 | { | 1340 | { |
1337 | const struct firmware *fw = (void *)skb; | 1341 | const struct firmware *fw = (void *)skb; |
@@ -1358,6 +1362,7 @@ static void e100_setup_ucode(struct nic *nic, struct cb *cb, | |||
1358 | cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80); | 1362 | cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80); |
1359 | 1363 | ||
1360 | cb->command = cpu_to_le16(cb_ucode | cb_el); | 1364 | cb->command = cpu_to_le16(cb_ucode | cb_el); |
1365 | return 0; | ||
1361 | } | 1366 | } |
1362 | 1367 | ||
1363 | static inline int e100_load_ucode_wait(struct nic *nic) | 1368 | static inline int e100_load_ucode_wait(struct nic *nic) |
@@ -1400,18 +1405,20 @@ static inline int e100_load_ucode_wait(struct nic *nic) | |||
1400 | return err; | 1405 | return err; |
1401 | } | 1406 | } |
1402 | 1407 | ||
1403 | static void e100_setup_iaaddr(struct nic *nic, struct cb *cb, | 1408 | static int e100_setup_iaaddr(struct nic *nic, struct cb *cb, |
1404 | struct sk_buff *skb) | 1409 | struct sk_buff *skb) |
1405 | { | 1410 | { |
1406 | cb->command = cpu_to_le16(cb_iaaddr); | 1411 | cb->command = cpu_to_le16(cb_iaaddr); |
1407 | memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN); | 1412 | memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN); |
1413 | return 0; | ||
1408 | } | 1414 | } |
1409 | 1415 | ||
1410 | static void e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb) | 1416 | static int e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb) |
1411 | { | 1417 | { |
1412 | cb->command = cpu_to_le16(cb_dump); | 1418 | cb->command = cpu_to_le16(cb_dump); |
1413 | cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr + | 1419 | cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr + |
1414 | offsetof(struct mem, dump_buf)); | 1420 | offsetof(struct mem, dump_buf)); |
1421 | return 0; | ||
1415 | } | 1422 | } |
1416 | 1423 | ||
1417 | static int e100_phy_check_without_mii(struct nic *nic) | 1424 | static int e100_phy_check_without_mii(struct nic *nic) |
@@ -1581,7 +1588,7 @@ static int e100_hw_init(struct nic *nic) | |||
1581 | return 0; | 1588 | return 0; |
1582 | } | 1589 | } |
1583 | 1590 | ||
1584 | static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb) | 1591 | static int e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb) |
1585 | { | 1592 | { |
1586 | struct net_device *netdev = nic->netdev; | 1593 | struct net_device *netdev = nic->netdev; |
1587 | struct netdev_hw_addr *ha; | 1594 | struct netdev_hw_addr *ha; |
@@ -1596,6 +1603,7 @@ static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb) | |||
1596 | memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr, | 1603 | memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr, |
1597 | ETH_ALEN); | 1604 | ETH_ALEN); |
1598 | } | 1605 | } |
1606 | return 0; | ||
1599 | } | 1607 | } |
1600 | 1608 | ||
1601 | static void e100_set_multicast_list(struct net_device *netdev) | 1609 | static void e100_set_multicast_list(struct net_device *netdev) |
@@ -1756,11 +1764,18 @@ static void e100_watchdog(unsigned long data) | |||
1756 | round_jiffies(jiffies + E100_WATCHDOG_PERIOD)); | 1764 | round_jiffies(jiffies + E100_WATCHDOG_PERIOD)); |
1757 | } | 1765 | } |
1758 | 1766 | ||
1759 | static void e100_xmit_prepare(struct nic *nic, struct cb *cb, | 1767 | static int e100_xmit_prepare(struct nic *nic, struct cb *cb, |
1760 | struct sk_buff *skb) | 1768 | struct sk_buff *skb) |
1761 | { | 1769 | { |
1770 | dma_addr_t dma_addr; | ||
1762 | cb->command = nic->tx_command; | 1771 | cb->command = nic->tx_command; |
1763 | 1772 | ||
1773 | dma_addr = pci_map_single(nic->pdev, | ||
1774 | skb->data, skb->len, PCI_DMA_TODEVICE); | ||
1775 | /* If we can't map the skb, have the upper layer try later */ | ||
1776 | if (pci_dma_mapping_error(nic->pdev, dma_addr)) | ||
1777 | return -ENOMEM; | ||
1778 | |||
1764 | /* | 1779 | /* |
1765 | * Use the last 4 bytes of the SKB payload packet as the CRC, used for | 1780 | * Use the last 4 bytes of the SKB payload packet as the CRC, used for |
1766 | * testing, ie sending frames with bad CRC. | 1781 | * testing, ie sending frames with bad CRC. |
@@ -1777,11 +1792,10 @@ static void e100_xmit_prepare(struct nic *nic, struct cb *cb, | |||
1777 | cb->u.tcb.tcb_byte_count = 0; | 1792 | cb->u.tcb.tcb_byte_count = 0; |
1778 | cb->u.tcb.threshold = nic->tx_threshold; | 1793 | cb->u.tcb.threshold = nic->tx_threshold; |
1779 | cb->u.tcb.tbd_count = 1; | 1794 | cb->u.tcb.tbd_count = 1; |
1780 | cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev, | 1795 | cb->u.tcb.tbd.buf_addr = cpu_to_le32(dma_addr); |
1781 | skb->data, skb->len, PCI_DMA_TODEVICE)); | ||
1782 | /* check for mapping failure? */ | ||
1783 | cb->u.tcb.tbd.size = cpu_to_le16(skb->len); | 1796 | cb->u.tcb.tbd.size = cpu_to_le16(skb->len); |
1784 | skb_tx_timestamp(skb); | 1797 | skb_tx_timestamp(skb); |
1798 | return 0; | ||
1785 | } | 1799 | } |
1786 | 1800 | ||
1787 | static netdev_tx_t e100_xmit_frame(struct sk_buff *skb, | 1801 | static netdev_tx_t e100_xmit_frame(struct sk_buff *skb, |
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c index 43462d596a4e..ffd287196bf8 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c +++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c | |||
@@ -1053,6 +1053,10 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) | |||
1053 | txdr->buffer_info[i].dma = | 1053 | txdr->buffer_info[i].dma = |
1054 | dma_map_single(&pdev->dev, skb->data, skb->len, | 1054 | dma_map_single(&pdev->dev, skb->data, skb->len, |
1055 | DMA_TO_DEVICE); | 1055 | DMA_TO_DEVICE); |
1056 | if (dma_mapping_error(&pdev->dev, txdr->buffer_info[i].dma)) { | ||
1057 | ret_val = 4; | ||
1058 | goto err_nomem; | ||
1059 | } | ||
1056 | tx_desc->buffer_addr = cpu_to_le64(txdr->buffer_info[i].dma); | 1060 | tx_desc->buffer_addr = cpu_to_le64(txdr->buffer_info[i].dma); |
1057 | tx_desc->lower.data = cpu_to_le32(skb->len); | 1061 | tx_desc->lower.data = cpu_to_le32(skb->len); |
1058 | tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP | | 1062 | tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP | |
@@ -1069,7 +1073,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) | |||
1069 | rxdr->buffer_info = kcalloc(rxdr->count, sizeof(struct e1000_buffer), | 1073 | rxdr->buffer_info = kcalloc(rxdr->count, sizeof(struct e1000_buffer), |
1070 | GFP_KERNEL); | 1074 | GFP_KERNEL); |
1071 | if (!rxdr->buffer_info) { | 1075 | if (!rxdr->buffer_info) { |
1072 | ret_val = 4; | 1076 | ret_val = 5; |
1073 | goto err_nomem; | 1077 | goto err_nomem; |
1074 | } | 1078 | } |
1075 | 1079 | ||
@@ -1077,7 +1081,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) | |||
1077 | rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, | 1081 | rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, |
1078 | GFP_KERNEL); | 1082 | GFP_KERNEL); |
1079 | if (!rxdr->desc) { | 1083 | if (!rxdr->desc) { |
1080 | ret_val = 5; | 1084 | ret_val = 6; |
1081 | goto err_nomem; | 1085 | goto err_nomem; |
1082 | } | 1086 | } |
1083 | memset(rxdr->desc, 0, rxdr->size); | 1087 | memset(rxdr->desc, 0, rxdr->size); |
@@ -1101,7 +1105,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) | |||
1101 | 1105 | ||
1102 | skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN, GFP_KERNEL); | 1106 | skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN, GFP_KERNEL); |
1103 | if (!skb) { | 1107 | if (!skb) { |
1104 | ret_val = 6; | 1108 | ret_val = 7; |
1105 | goto err_nomem; | 1109 | goto err_nomem; |
1106 | } | 1110 | } |
1107 | skb_reserve(skb, NET_IP_ALIGN); | 1111 | skb_reserve(skb, NET_IP_ALIGN); |
@@ -1110,6 +1114,10 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter) | |||
1110 | rxdr->buffer_info[i].dma = | 1114 | rxdr->buffer_info[i].dma = |
1111 | dma_map_single(&pdev->dev, skb->data, | 1115 | dma_map_single(&pdev->dev, skb->data, |
1112 | E1000_RXBUFFER_2048, DMA_FROM_DEVICE); | 1116 | E1000_RXBUFFER_2048, DMA_FROM_DEVICE); |
1117 | if (dma_mapping_error(&pdev->dev, rxdr->buffer_info[i].dma)) { | ||
1118 | ret_val = 8; | ||
1119 | goto err_nomem; | ||
1120 | } | ||
1113 | rx_desc->buffer_addr = cpu_to_le64(rxdr->buffer_info[i].dma); | 1121 | rx_desc->buffer_addr = cpu_to_le64(rxdr->buffer_info[i].dma); |
1114 | memset(skb->data, 0x00, skb->len); | 1122 | memset(skb->data, 0x00, skb->len); |
1115 | } | 1123 | } |
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 948b86ffa4f0..7e615e2bf7e6 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c | |||
@@ -848,11 +848,16 @@ check_page: | |||
848 | } | 848 | } |
849 | } | 849 | } |
850 | 850 | ||
851 | if (!buffer_info->dma) | 851 | if (!buffer_info->dma) { |
852 | buffer_info->dma = dma_map_page(&pdev->dev, | 852 | buffer_info->dma = dma_map_page(&pdev->dev, |
853 | buffer_info->page, 0, | 853 | buffer_info->page, 0, |
854 | PAGE_SIZE, | 854 | PAGE_SIZE, |
855 | DMA_FROM_DEVICE); | 855 | DMA_FROM_DEVICE); |
856 | if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { | ||
857 | adapter->alloc_rx_buff_failed++; | ||
858 | break; | ||
859 | } | ||
860 | } | ||
856 | 861 | ||
857 | rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); | 862 | rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); |
858 | rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); | 863 | rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); |
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index b64542acfa34..12b1d8480808 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c | |||
@@ -1818,27 +1818,32 @@ out: | |||
1818 | **/ | 1818 | **/ |
1819 | void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf) | 1819 | void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf) |
1820 | { | 1820 | { |
1821 | u32 dtxswc; | 1821 | u32 reg_val, reg_offset; |
1822 | 1822 | ||
1823 | switch (hw->mac.type) { | 1823 | switch (hw->mac.type) { |
1824 | case e1000_82576: | 1824 | case e1000_82576: |
1825 | reg_offset = E1000_DTXSWC; | ||
1826 | break; | ||
1825 | case e1000_i350: | 1827 | case e1000_i350: |
1826 | dtxswc = rd32(E1000_DTXSWC); | 1828 | reg_offset = E1000_TXSWC; |
1827 | if (enable) { | ||
1828 | dtxswc |= (E1000_DTXSWC_MAC_SPOOF_MASK | | ||
1829 | E1000_DTXSWC_VLAN_SPOOF_MASK); | ||
1830 | /* The PF can spoof - it has to in order to | ||
1831 | * support emulation mode NICs */ | ||
1832 | dtxswc ^= (1 << pf | 1 << (pf + MAX_NUM_VFS)); | ||
1833 | } else { | ||
1834 | dtxswc &= ~(E1000_DTXSWC_MAC_SPOOF_MASK | | ||
1835 | E1000_DTXSWC_VLAN_SPOOF_MASK); | ||
1836 | } | ||
1837 | wr32(E1000_DTXSWC, dtxswc); | ||
1838 | break; | 1829 | break; |
1839 | default: | 1830 | default: |
1840 | break; | 1831 | return; |
1832 | } | ||
1833 | |||
1834 | reg_val = rd32(reg_offset); | ||
1835 | if (enable) { | ||
1836 | reg_val |= (E1000_DTXSWC_MAC_SPOOF_MASK | | ||
1837 | E1000_DTXSWC_VLAN_SPOOF_MASK); | ||
1838 | /* The PF can spoof - it has to in order to | ||
1839 | * support emulation mode NICs | ||
1840 | */ | ||
1841 | reg_val ^= (1 << pf | 1 << (pf + MAX_NUM_VFS)); | ||
1842 | } else { | ||
1843 | reg_val &= ~(E1000_DTXSWC_MAC_SPOOF_MASK | | ||
1844 | E1000_DTXSWC_VLAN_SPOOF_MASK); | ||
1841 | } | 1845 | } |
1846 | wr32(reg_offset, reg_val); | ||
1842 | } | 1847 | } |
1843 | 1848 | ||
1844 | /** | 1849 | /** |
diff --git a/drivers/net/ethernet/intel/igb/igb_hwmon.c b/drivers/net/ethernet/intel/igb/igb_hwmon.c index 4623502054d5..0478a1abe541 100644 --- a/drivers/net/ethernet/intel/igb/igb_hwmon.c +++ b/drivers/net/ethernet/intel/igb/igb_hwmon.c | |||
@@ -39,7 +39,7 @@ | |||
39 | #include <linux/pci.h> | 39 | #include <linux/pci.h> |
40 | 40 | ||
41 | #ifdef CONFIG_IGB_HWMON | 41 | #ifdef CONFIG_IGB_HWMON |
42 | struct i2c_board_info i350_sensor_info = { | 42 | static struct i2c_board_info i350_sensor_info = { |
43 | I2C_BOARD_INFO("i350bb", (0Xf8 >> 1)), | 43 | I2C_BOARD_INFO("i350bb", (0Xf8 >> 1)), |
44 | }; | 44 | }; |
45 | 45 | ||
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 4dbd62968c7a..8496adfc6a68 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c | |||
@@ -2542,8 +2542,8 @@ static void igb_probe_vfs(struct igb_adapter *adapter) | |||
2542 | if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) | 2542 | if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) |
2543 | return; | 2543 | return; |
2544 | 2544 | ||
2545 | igb_enable_sriov(pdev, max_vfs); | ||
2546 | pci_sriov_set_totalvfs(pdev, 7); | 2545 | pci_sriov_set_totalvfs(pdev, 7); |
2546 | igb_enable_sriov(pdev, max_vfs); | ||
2547 | 2547 | ||
2548 | #endif /* CONFIG_PCI_IOV */ | 2548 | #endif /* CONFIG_PCI_IOV */ |
2549 | } | 2549 | } |
@@ -2652,7 +2652,7 @@ static int igb_sw_init(struct igb_adapter *adapter) | |||
2652 | if (max_vfs > 7) { | 2652 | if (max_vfs > 7) { |
2653 | dev_warn(&pdev->dev, | 2653 | dev_warn(&pdev->dev, |
2654 | "Maximum of 7 VFs per PF, using max\n"); | 2654 | "Maximum of 7 VFs per PF, using max\n"); |
2655 | adapter->vfs_allocated_count = 7; | 2655 | max_vfs = adapter->vfs_allocated_count = 7; |
2656 | } else | 2656 | } else |
2657 | adapter->vfs_allocated_count = max_vfs; | 2657 | adapter->vfs_allocated_count = max_vfs; |
2658 | if (adapter->vfs_allocated_count) | 2658 | if (adapter->vfs_allocated_count) |
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index 0987822359f0..0a237507ee85 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c | |||
@@ -740,7 +740,7 @@ void igb_ptp_init(struct igb_adapter *adapter) | |||
740 | case e1000_82576: | 740 | case e1000_82576: |
741 | snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr); | 741 | snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr); |
742 | adapter->ptp_caps.owner = THIS_MODULE; | 742 | adapter->ptp_caps.owner = THIS_MODULE; |
743 | adapter->ptp_caps.max_adj = 1000000000; | 743 | adapter->ptp_caps.max_adj = 999999881; |
744 | adapter->ptp_caps.n_ext_ts = 0; | 744 | adapter->ptp_caps.n_ext_ts = 0; |
745 | adapter->ptp_caps.pps = 0; | 745 | adapter->ptp_caps.pps = 0; |
746 | adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82576; | 746 | adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82576; |
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c index ea4808373435..b5f94abe3cff 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c | |||
@@ -2159,6 +2159,10 @@ map_skb: | |||
2159 | skb->data, | 2159 | skb->data, |
2160 | adapter->rx_buffer_len, | 2160 | adapter->rx_buffer_len, |
2161 | DMA_FROM_DEVICE); | 2161 | DMA_FROM_DEVICE); |
2162 | if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { | ||
2163 | adapter->alloc_rx_buff_failed++; | ||
2164 | break; | ||
2165 | } | ||
2162 | 2166 | ||
2163 | rx_desc = IXGB_RX_DESC(*rx_ring, i); | 2167 | rx_desc = IXGB_RX_DESC(*rx_ring, i); |
2164 | rx_desc->buff_addr = cpu_to_le64(buffer_info->dma); | 2168 | rx_desc->buff_addr = cpu_to_le64(buffer_info->dma); |
@@ -2168,7 +2172,8 @@ map_skb: | |||
2168 | rx_desc->status = 0; | 2172 | rx_desc->status = 0; |
2169 | 2173 | ||
2170 | 2174 | ||
2171 | if (++i == rx_ring->count) i = 0; | 2175 | if (++i == rx_ring->count) |
2176 | i = 0; | ||
2172 | buffer_info = &rx_ring->buffer_info[i]; | 2177 | buffer_info = &rx_ring->buffer_info[i]; |
2173 | } | 2178 | } |
2174 | 2179 | ||
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index db5611ae407e..79f4a26ea6cc 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
@@ -7922,12 +7922,19 @@ static int __init ixgbe_init_module(void) | |||
7922 | ixgbe_dbg_init(); | 7922 | ixgbe_dbg_init(); |
7923 | #endif /* CONFIG_DEBUG_FS */ | 7923 | #endif /* CONFIG_DEBUG_FS */ |
7924 | 7924 | ||
7925 | ret = pci_register_driver(&ixgbe_driver); | ||
7926 | if (ret) { | ||
7927 | #ifdef CONFIG_DEBUG_FS | ||
7928 | ixgbe_dbg_exit(); | ||
7929 | #endif /* CONFIG_DEBUG_FS */ | ||
7930 | return ret; | ||
7931 | } | ||
7932 | |||
7925 | #ifdef CONFIG_IXGBE_DCA | 7933 | #ifdef CONFIG_IXGBE_DCA |
7926 | dca_register_notify(&dca_notifier); | 7934 | dca_register_notify(&dca_notifier); |
7927 | #endif | 7935 | #endif |
7928 | 7936 | ||
7929 | ret = pci_register_driver(&ixgbe_driver); | 7937 | return 0; |
7930 | return ret; | ||
7931 | } | 7938 | } |
7932 | 7939 | ||
7933 | module_init(ixgbe_init_module); | 7940 | module_init(ixgbe_init_module); |
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index c3db6cd69b68..2b6cb5ca48ee 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | |||
@@ -944,9 +944,17 @@ free_queue_irqs: | |||
944 | free_irq(adapter->msix_entries[vector].vector, | 944 | free_irq(adapter->msix_entries[vector].vector, |
945 | adapter->q_vector[vector]); | 945 | adapter->q_vector[vector]); |
946 | } | 946 | } |
947 | pci_disable_msix(adapter->pdev); | 947 | /* This failure is non-recoverable - it indicates the system is |
948 | kfree(adapter->msix_entries); | 948 | * out of MSIX vector resources and the VF driver cannot run |
949 | adapter->msix_entries = NULL; | 949 | * without them. Set the number of msix vectors to zero |
950 | * indicating that not enough can be allocated. The error | ||
951 | * will be returned to the user indicating device open failed. | ||
952 | * Any further attempts to force the driver to open will also | ||
953 | * fail. The only way to recover is to unload the driver and | ||
954 | * reload it again. If the system has recovered some MSIX | ||
955 | * vectors then it may succeed. | ||
956 | */ | ||
957 | adapter->num_msix_vectors = 0; | ||
950 | return err; | 958 | return err; |
951 | } | 959 | } |
952 | 960 | ||
@@ -2572,6 +2580,15 @@ static int ixgbevf_open(struct net_device *netdev) | |||
2572 | struct ixgbe_hw *hw = &adapter->hw; | 2580 | struct ixgbe_hw *hw = &adapter->hw; |
2573 | int err; | 2581 | int err; |
2574 | 2582 | ||
2583 | /* A previous failure to open the device because of a lack of | ||
2584 | * available MSIX vector resources may have reset the number | ||
2585 | * of msix vectors variable to zero. The only way to recover | ||
2586 | * is to unload/reload the driver and hope that the system has | ||
2587 | * been able to recover some MSIX vector resources. | ||
2588 | */ | ||
2589 | if (!adapter->num_msix_vectors) | ||
2590 | return -ENOMEM; | ||
2591 | |||
2575 | /* disallow open during test */ | 2592 | /* disallow open during test */ |
2576 | if (test_bit(__IXGBEVF_TESTING, &adapter->state)) | 2593 | if (test_bit(__IXGBEVF_TESTING, &adapter->state)) |
2577 | return -EBUSY; | 2594 | return -EBUSY; |
@@ -2628,7 +2645,6 @@ static int ixgbevf_open(struct net_device *netdev) | |||
2628 | 2645 | ||
2629 | err_req_irq: | 2646 | err_req_irq: |
2630 | ixgbevf_down(adapter); | 2647 | ixgbevf_down(adapter); |
2631 | ixgbevf_free_irq(adapter); | ||
2632 | err_setup_rx: | 2648 | err_setup_rx: |
2633 | ixgbevf_free_all_rx_resources(adapter); | 2649 | ixgbevf_free_all_rx_resources(adapter); |
2634 | err_setup_tx: | 2650 | err_setup_tx: |
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c index 6a2127489af7..bfdb06860397 100644 --- a/drivers/net/ethernet/lantiq_etop.c +++ b/drivers/net/ethernet/lantiq_etop.c | |||
@@ -769,7 +769,7 @@ ltq_etop_probe(struct platform_device *pdev) | |||
769 | return 0; | 769 | return 0; |
770 | 770 | ||
771 | err_free: | 771 | err_free: |
772 | kfree(dev); | 772 | free_netdev(dev); |
773 | err_out: | 773 | err_out: |
774 | return err; | 774 | return err; |
775 | } | 775 | } |
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index cd345b8969bc..1e628ce57201 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c | |||
@@ -2771,16 +2771,17 @@ static int mvneta_probe(struct platform_device *pdev) | |||
2771 | 2771 | ||
2772 | netif_napi_add(dev, &pp->napi, mvneta_poll, pp->weight); | 2772 | netif_napi_add(dev, &pp->napi, mvneta_poll, pp->weight); |
2773 | 2773 | ||
2774 | dev->features = NETIF_F_SG | NETIF_F_IP_CSUM; | ||
2775 | dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM; | ||
2776 | dev->vlan_features |= NETIF_F_SG | NETIF_F_IP_CSUM; | ||
2777 | dev->priv_flags |= IFF_UNICAST_FLT; | ||
2778 | |||
2774 | err = register_netdev(dev); | 2779 | err = register_netdev(dev); |
2775 | if (err < 0) { | 2780 | if (err < 0) { |
2776 | dev_err(&pdev->dev, "failed to register\n"); | 2781 | dev_err(&pdev->dev, "failed to register\n"); |
2777 | goto err_deinit; | 2782 | goto err_deinit; |
2778 | } | 2783 | } |
2779 | 2784 | ||
2780 | dev->features = NETIF_F_SG | NETIF_F_IP_CSUM; | ||
2781 | dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM; | ||
2782 | dev->priv_flags |= IFF_UNICAST_FLT; | ||
2783 | |||
2784 | netdev_info(dev, "mac: %pM\n", dev->dev_addr); | 2785 | netdev_info(dev, "mac: %pM\n", dev->dev_addr); |
2785 | 2786 | ||
2786 | platform_set_drvdata(pdev, pp->dev); | 2787 | platform_set_drvdata(pdev, pp->dev); |
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index fc07ca35721b..6a0e671fcecd 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c | |||
@@ -1067,7 +1067,7 @@ static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, u32 space) | |||
1067 | sky2_write32(hw, RB_ADDR(q, RB_RX_UTHP), tp); | 1067 | sky2_write32(hw, RB_ADDR(q, RB_RX_UTHP), tp); |
1068 | sky2_write32(hw, RB_ADDR(q, RB_RX_LTHP), space/2); | 1068 | sky2_write32(hw, RB_ADDR(q, RB_RX_LTHP), space/2); |
1069 | 1069 | ||
1070 | tp = space - 2048/8; | 1070 | tp = space - 8192/8; |
1071 | sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), tp); | 1071 | sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), tp); |
1072 | sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), space/4); | 1072 | sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), space/4); |
1073 | } else { | 1073 | } else { |
diff --git a/drivers/net/ethernet/marvell/sky2.h b/drivers/net/ethernet/marvell/sky2.h index 615ac63ea860..ec6dcd80152b 100644 --- a/drivers/net/ethernet/marvell/sky2.h +++ b/drivers/net/ethernet/marvell/sky2.h | |||
@@ -2074,7 +2074,7 @@ enum { | |||
2074 | GM_IS_RX_FF_OR = 1<<1, /* Receive FIFO Overrun */ | 2074 | GM_IS_RX_FF_OR = 1<<1, /* Receive FIFO Overrun */ |
2075 | GM_IS_RX_COMPL = 1<<0, /* Frame Reception Complete */ | 2075 | GM_IS_RX_COMPL = 1<<0, /* Frame Reception Complete */ |
2076 | 2076 | ||
2077 | #define GMAC_DEF_MSK GM_IS_TX_FF_UR | 2077 | #define GMAC_DEF_MSK (GM_IS_TX_FF_UR | GM_IS_RX_FF_OR) |
2078 | }; | 2078 | }; |
2079 | 2079 | ||
2080 | /* GMAC_LINK_CTRL 16 bit GMAC Link Control Reg (YUKON only) */ | 2080 | /* GMAC_LINK_CTRL 16 bit GMAC Link Control Reg (YUKON only) */ |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 995d4b6d5c1e..30d78f806dc3 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c | |||
@@ -411,8 +411,8 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) | |||
411 | 411 | ||
412 | static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac) | 412 | static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac) |
413 | { | 413 | { |
414 | unsigned int i; | 414 | int i; |
415 | for (i = ETH_ALEN - 1; i; --i) { | 415 | for (i = ETH_ALEN - 1; i >= 0; --i) { |
416 | dst_mac[i] = src_mac & 0xff; | 416 | dst_mac[i] = src_mac & 0xff; |
417 | src_mac >>= 8; | 417 | src_mac >>= 8; |
418 | } | 418 | } |
@@ -1637,6 +1637,17 @@ void mlx4_en_stop_port(struct net_device *dev, int detach) | |||
1637 | /* Flush multicast filter */ | 1637 | /* Flush multicast filter */ |
1638 | mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG); | 1638 | mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG); |
1639 | 1639 | ||
1640 | /* Remove flow steering rules for the port*/ | ||
1641 | if (mdev->dev->caps.steering_mode == | ||
1642 | MLX4_STEERING_MODE_DEVICE_MANAGED) { | ||
1643 | ASSERT_RTNL(); | ||
1644 | list_for_each_entry_safe(flow, tmp_flow, | ||
1645 | &priv->ethtool_list, list) { | ||
1646 | mlx4_flow_detach(mdev->dev, flow->id); | ||
1647 | list_del(&flow->list); | ||
1648 | } | ||
1649 | } | ||
1650 | |||
1640 | mlx4_en_destroy_drop_qp(priv); | 1651 | mlx4_en_destroy_drop_qp(priv); |
1641 | 1652 | ||
1642 | /* Free TX Rings */ | 1653 | /* Free TX Rings */ |
@@ -1657,17 +1668,6 @@ void mlx4_en_stop_port(struct net_device *dev, int detach) | |||
1657 | if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN)) | 1668 | if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN)) |
1658 | mdev->mac_removed[priv->port] = 1; | 1669 | mdev->mac_removed[priv->port] = 1; |
1659 | 1670 | ||
1660 | /* Remove flow steering rules for the port*/ | ||
1661 | if (mdev->dev->caps.steering_mode == | ||
1662 | MLX4_STEERING_MODE_DEVICE_MANAGED) { | ||
1663 | ASSERT_RTNL(); | ||
1664 | list_for_each_entry_safe(flow, tmp_flow, | ||
1665 | &priv->ethtool_list, list) { | ||
1666 | mlx4_flow_detach(mdev->dev, flow->id); | ||
1667 | list_del(&flow->list); | ||
1668 | } | ||
1669 | } | ||
1670 | |||
1671 | /* Free RX Rings */ | 1671 | /* Free RX Rings */ |
1672 | for (i = 0; i < priv->rx_ring_num; i++) { | 1672 | for (i = 0; i < priv->rx_ring_num; i++) { |
1673 | mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]); | 1673 | mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]); |
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index 251ae2f93116..8e3123a1df88 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c | |||
@@ -771,7 +771,7 @@ int mlx4_MAP_EQ_wrapper(struct mlx4_dev *dev, int slave, | |||
771 | struct mlx4_slave_event_eq_info *event_eq = | 771 | struct mlx4_slave_event_eq_info *event_eq = |
772 | priv->mfunc.master.slave_state[slave].event_eq; | 772 | priv->mfunc.master.slave_state[slave].event_eq; |
773 | u32 in_modifier = vhcr->in_modifier; | 773 | u32 in_modifier = vhcr->in_modifier; |
774 | u32 eqn = in_modifier & 0x1FF; | 774 | u32 eqn = in_modifier & 0x3FF; |
775 | u64 in_param = vhcr->in_param; | 775 | u64 in_param = vhcr->in_param; |
776 | int err = 0; | 776 | int err = 0; |
777 | int i; | 777 | int i; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 2995687f1aee..1391b52f443a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c | |||
@@ -99,6 +99,7 @@ struct res_qp { | |||
99 | struct list_head mcg_list; | 99 | struct list_head mcg_list; |
100 | spinlock_t mcg_spl; | 100 | spinlock_t mcg_spl; |
101 | int local_qpn; | 101 | int local_qpn; |
102 | atomic_t ref_count; | ||
102 | }; | 103 | }; |
103 | 104 | ||
104 | enum res_mtt_states { | 105 | enum res_mtt_states { |
@@ -197,6 +198,7 @@ enum res_fs_rule_states { | |||
197 | 198 | ||
198 | struct res_fs_rule { | 199 | struct res_fs_rule { |
199 | struct res_common com; | 200 | struct res_common com; |
201 | int qpn; | ||
200 | }; | 202 | }; |
201 | 203 | ||
202 | static void *res_tracker_lookup(struct rb_root *root, u64 res_id) | 204 | static void *res_tracker_lookup(struct rb_root *root, u64 res_id) |
@@ -355,7 +357,7 @@ static int mpt_mask(struct mlx4_dev *dev) | |||
355 | return dev->caps.num_mpts - 1; | 357 | return dev->caps.num_mpts - 1; |
356 | } | 358 | } |
357 | 359 | ||
358 | static void *find_res(struct mlx4_dev *dev, int res_id, | 360 | static void *find_res(struct mlx4_dev *dev, u64 res_id, |
359 | enum mlx4_resource type) | 361 | enum mlx4_resource type) |
360 | { | 362 | { |
361 | struct mlx4_priv *priv = mlx4_priv(dev); | 363 | struct mlx4_priv *priv = mlx4_priv(dev); |
@@ -447,6 +449,7 @@ static struct res_common *alloc_qp_tr(int id) | |||
447 | ret->local_qpn = id; | 449 | ret->local_qpn = id; |
448 | INIT_LIST_HEAD(&ret->mcg_list); | 450 | INIT_LIST_HEAD(&ret->mcg_list); |
449 | spin_lock_init(&ret->mcg_spl); | 451 | spin_lock_init(&ret->mcg_spl); |
452 | atomic_set(&ret->ref_count, 0); | ||
450 | 453 | ||
451 | return &ret->com; | 454 | return &ret->com; |
452 | } | 455 | } |
@@ -554,7 +557,7 @@ static struct res_common *alloc_xrcdn_tr(int id) | |||
554 | return &ret->com; | 557 | return &ret->com; |
555 | } | 558 | } |
556 | 559 | ||
557 | static struct res_common *alloc_fs_rule_tr(u64 id) | 560 | static struct res_common *alloc_fs_rule_tr(u64 id, int qpn) |
558 | { | 561 | { |
559 | struct res_fs_rule *ret; | 562 | struct res_fs_rule *ret; |
560 | 563 | ||
@@ -564,7 +567,7 @@ static struct res_common *alloc_fs_rule_tr(u64 id) | |||
564 | 567 | ||
565 | ret->com.res_id = id; | 568 | ret->com.res_id = id; |
566 | ret->com.state = RES_FS_RULE_ALLOCATED; | 569 | ret->com.state = RES_FS_RULE_ALLOCATED; |
567 | 570 | ret->qpn = qpn; | |
568 | return &ret->com; | 571 | return &ret->com; |
569 | } | 572 | } |
570 | 573 | ||
@@ -602,7 +605,7 @@ static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave, | |||
602 | ret = alloc_xrcdn_tr(id); | 605 | ret = alloc_xrcdn_tr(id); |
603 | break; | 606 | break; |
604 | case RES_FS_RULE: | 607 | case RES_FS_RULE: |
605 | ret = alloc_fs_rule_tr(id); | 608 | ret = alloc_fs_rule_tr(id, extra); |
606 | break; | 609 | break; |
607 | default: | 610 | default: |
608 | return NULL; | 611 | return NULL; |
@@ -671,10 +674,14 @@ undo: | |||
671 | 674 | ||
672 | static int remove_qp_ok(struct res_qp *res) | 675 | static int remove_qp_ok(struct res_qp *res) |
673 | { | 676 | { |
674 | if (res->com.state == RES_QP_BUSY) | 677 | if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) || |
678 | !list_empty(&res->mcg_list)) { | ||
679 | pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n", | ||
680 | res->com.state, atomic_read(&res->ref_count)); | ||
675 | return -EBUSY; | 681 | return -EBUSY; |
676 | else if (res->com.state != RES_QP_RESERVED) | 682 | } else if (res->com.state != RES_QP_RESERVED) { |
677 | return -EPERM; | 683 | return -EPERM; |
684 | } | ||
678 | 685 | ||
679 | return 0; | 686 | return 0; |
680 | } | 687 | } |
@@ -3124,6 +3131,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, | |||
3124 | struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC]; | 3131 | struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC]; |
3125 | int err; | 3132 | int err; |
3126 | int qpn; | 3133 | int qpn; |
3134 | struct res_qp *rqp; | ||
3127 | struct mlx4_net_trans_rule_hw_ctrl *ctrl; | 3135 | struct mlx4_net_trans_rule_hw_ctrl *ctrl; |
3128 | struct _rule_hw *rule_header; | 3136 | struct _rule_hw *rule_header; |
3129 | int header_id; | 3137 | int header_id; |
@@ -3134,7 +3142,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, | |||
3134 | 3142 | ||
3135 | ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf; | 3143 | ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf; |
3136 | qpn = be32_to_cpu(ctrl->qpn) & 0xffffff; | 3144 | qpn = be32_to_cpu(ctrl->qpn) & 0xffffff; |
3137 | err = get_res(dev, slave, qpn, RES_QP, NULL); | 3145 | err = get_res(dev, slave, qpn, RES_QP, &rqp); |
3138 | if (err) { | 3146 | if (err) { |
3139 | pr_err("Steering rule with qpn 0x%x rejected.\n", qpn); | 3147 | pr_err("Steering rule with qpn 0x%x rejected.\n", qpn); |
3140 | return err; | 3148 | return err; |
@@ -3175,14 +3183,16 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, | |||
3175 | if (err) | 3183 | if (err) |
3176 | goto err_put; | 3184 | goto err_put; |
3177 | 3185 | ||
3178 | err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, 0); | 3186 | err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn); |
3179 | if (err) { | 3187 | if (err) { |
3180 | mlx4_err(dev, "Fail to add flow steering resources.\n "); | 3188 | mlx4_err(dev, "Fail to add flow steering resources.\n "); |
3181 | /* detach rule*/ | 3189 | /* detach rule*/ |
3182 | mlx4_cmd(dev, vhcr->out_param, 0, 0, | 3190 | mlx4_cmd(dev, vhcr->out_param, 0, 0, |
3183 | MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A, | 3191 | MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A, |
3184 | MLX4_CMD_NATIVE); | 3192 | MLX4_CMD_NATIVE); |
3193 | goto err_put; | ||
3185 | } | 3194 | } |
3195 | atomic_inc(&rqp->ref_count); | ||
3186 | err_put: | 3196 | err_put: |
3187 | put_res(dev, slave, qpn, RES_QP); | 3197 | put_res(dev, slave, qpn, RES_QP); |
3188 | return err; | 3198 | return err; |
@@ -3195,20 +3205,35 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave, | |||
3195 | struct mlx4_cmd_info *cmd) | 3205 | struct mlx4_cmd_info *cmd) |
3196 | { | 3206 | { |
3197 | int err; | 3207 | int err; |
3208 | struct res_qp *rqp; | ||
3209 | struct res_fs_rule *rrule; | ||
3198 | 3210 | ||
3199 | if (dev->caps.steering_mode != | 3211 | if (dev->caps.steering_mode != |
3200 | MLX4_STEERING_MODE_DEVICE_MANAGED) | 3212 | MLX4_STEERING_MODE_DEVICE_MANAGED) |
3201 | return -EOPNOTSUPP; | 3213 | return -EOPNOTSUPP; |
3202 | 3214 | ||
3215 | err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule); | ||
3216 | if (err) | ||
3217 | return err; | ||
3218 | /* Release the rule form busy state before removal */ | ||
3219 | put_res(dev, slave, vhcr->in_param, RES_FS_RULE); | ||
3220 | err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp); | ||
3221 | if (err) | ||
3222 | return err; | ||
3223 | |||
3203 | err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0); | 3224 | err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0); |
3204 | if (err) { | 3225 | if (err) { |
3205 | mlx4_err(dev, "Fail to remove flow steering resources.\n "); | 3226 | mlx4_err(dev, "Fail to remove flow steering resources.\n "); |
3206 | return err; | 3227 | goto out; |
3207 | } | 3228 | } |
3208 | 3229 | ||
3209 | err = mlx4_cmd(dev, vhcr->in_param, 0, 0, | 3230 | err = mlx4_cmd(dev, vhcr->in_param, 0, 0, |
3210 | MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A, | 3231 | MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A, |
3211 | MLX4_CMD_NATIVE); | 3232 | MLX4_CMD_NATIVE); |
3233 | if (!err) | ||
3234 | atomic_dec(&rqp->ref_count); | ||
3235 | out: | ||
3236 | put_res(dev, slave, rrule->qpn, RES_QP); | ||
3212 | return err; | 3237 | return err; |
3213 | } | 3238 | } |
3214 | 3239 | ||
@@ -3806,6 +3831,7 @@ void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave) | |||
3806 | mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex); | 3831 | mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex); |
3807 | /*VLAN*/ | 3832 | /*VLAN*/ |
3808 | rem_slave_macs(dev, slave); | 3833 | rem_slave_macs(dev, slave); |
3834 | rem_slave_fs_rule(dev, slave); | ||
3809 | rem_slave_qps(dev, slave); | 3835 | rem_slave_qps(dev, slave); |
3810 | rem_slave_srqs(dev, slave); | 3836 | rem_slave_srqs(dev, slave); |
3811 | rem_slave_cqs(dev, slave); | 3837 | rem_slave_cqs(dev, slave); |
@@ -3814,6 +3840,5 @@ void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave) | |||
3814 | rem_slave_mtts(dev, slave); | 3840 | rem_slave_mtts(dev, slave); |
3815 | rem_slave_counters(dev, slave); | 3841 | rem_slave_counters(dev, slave); |
3816 | rem_slave_xrcdns(dev, slave); | 3842 | rem_slave_xrcdns(dev, slave); |
3817 | rem_slave_fs_rule(dev, slave); | ||
3818 | mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex); | 3843 | mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex); |
3819 | } | 3844 | } |
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c index 33bcb63d56a2..8fb481252e2c 100644 --- a/drivers/net/ethernet/micrel/ks8851.c +++ b/drivers/net/ethernet/micrel/ks8851.c | |||
@@ -528,7 +528,7 @@ static void ks8851_rx_pkts(struct ks8851_net *ks) | |||
528 | for (; rxfc != 0; rxfc--) { | 528 | for (; rxfc != 0; rxfc--) { |
529 | rxh = ks8851_rdreg32(ks, KS_RXFHSR); | 529 | rxh = ks8851_rdreg32(ks, KS_RXFHSR); |
530 | rxstat = rxh & 0xffff; | 530 | rxstat = rxh & 0xffff; |
531 | rxlen = rxh >> 16; | 531 | rxlen = (rxh >> 16) & 0xfff; |
532 | 532 | ||
533 | netif_dbg(ks, rx_status, ks->netdev, | 533 | netif_dbg(ks, rx_status, ks->netdev, |
534 | "rx: stat 0x%04x, len 0x%04x\n", rxstat, rxlen); | 534 | "rx: stat 0x%04x, len 0x%04x\n", rxstat, rxlen); |
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index c4122c86f829..efa29b712d5f 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c | |||
@@ -1472,7 +1472,8 @@ static int lpc_eth_drv_probe(struct platform_device *pdev) | |||
1472 | } | 1472 | } |
1473 | platform_set_drvdata(pdev, ndev); | 1473 | platform_set_drvdata(pdev, ndev); |
1474 | 1474 | ||
1475 | if (lpc_mii_init(pldat) != 0) | 1475 | ret = lpc_mii_init(pldat); |
1476 | if (ret) | ||
1476 | goto err_out_unregister_netdev; | 1477 | goto err_out_unregister_netdev; |
1477 | 1478 | ||
1478 | netdev_info(ndev, "LPC mac at 0x%08x irq %d\n", | 1479 | netdev_info(ndev, "LPC mac at 0x%08x irq %d\n", |
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index 39ab4d09faaa..73ce7dd6b954 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c | |||
@@ -1726,9 +1726,9 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter, | |||
1726 | 1726 | ||
1727 | skb->protocol = eth_type_trans(skb, netdev); | 1727 | skb->protocol = eth_type_trans(skb, netdev); |
1728 | if (tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK) | 1728 | if (tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK) |
1729 | skb->ip_summed = CHECKSUM_NONE; | ||
1730 | else | ||
1731 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 1729 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
1730 | else | ||
1731 | skb->ip_summed = CHECKSUM_NONE; | ||
1732 | 1732 | ||
1733 | napi_gro_receive(&adapter->napi, skb); | 1733 | napi_gro_receive(&adapter->napi, skb); |
1734 | (*work_done)++; | 1734 | (*work_done)++; |
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 28fb50a1e9c3..4ecbe64a758d 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c | |||
@@ -3818,6 +3818,30 @@ static void rtl_init_mdio_ops(struct rtl8169_private *tp) | |||
3818 | } | 3818 | } |
3819 | } | 3819 | } |
3820 | 3820 | ||
3821 | static void rtl_speed_down(struct rtl8169_private *tp) | ||
3822 | { | ||
3823 | u32 adv; | ||
3824 | int lpa; | ||
3825 | |||
3826 | rtl_writephy(tp, 0x1f, 0x0000); | ||
3827 | lpa = rtl_readphy(tp, MII_LPA); | ||
3828 | |||
3829 | if (lpa & (LPA_10HALF | LPA_10FULL)) | ||
3830 | adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full; | ||
3831 | else if (lpa & (LPA_100HALF | LPA_100FULL)) | ||
3832 | adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | | ||
3833 | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full; | ||
3834 | else | ||
3835 | adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | | ||
3836 | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | | ||
3837 | (tp->mii.supports_gmii ? | ||
3838 | ADVERTISED_1000baseT_Half | | ||
3839 | ADVERTISED_1000baseT_Full : 0); | ||
3840 | |||
3841 | rtl8169_set_speed(tp->dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL, | ||
3842 | adv); | ||
3843 | } | ||
3844 | |||
3821 | static void rtl_wol_suspend_quirk(struct rtl8169_private *tp) | 3845 | static void rtl_wol_suspend_quirk(struct rtl8169_private *tp) |
3822 | { | 3846 | { |
3823 | void __iomem *ioaddr = tp->mmio_addr; | 3847 | void __iomem *ioaddr = tp->mmio_addr; |
@@ -3848,9 +3872,7 @@ static bool rtl_wol_pll_power_down(struct rtl8169_private *tp) | |||
3848 | if (!(__rtl8169_get_wol(tp) & WAKE_ANY)) | 3872 | if (!(__rtl8169_get_wol(tp) & WAKE_ANY)) |
3849 | return false; | 3873 | return false; |
3850 | 3874 | ||
3851 | rtl_writephy(tp, 0x1f, 0x0000); | 3875 | rtl_speed_down(tp); |
3852 | rtl_writephy(tp, MII_BMCR, 0x0000); | ||
3853 | |||
3854 | rtl_wol_suspend_quirk(tp); | 3876 | rtl_wol_suspend_quirk(tp); |
3855 | 3877 | ||
3856 | return true; | 3878 | return true; |
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 33e96176e4d8..6ed333fe5c04 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c | |||
@@ -1216,10 +1216,7 @@ static void sh_eth_error(struct net_device *ndev, int intr_status) | |||
1216 | if (felic_stat & ECSR_LCHNG) { | 1216 | if (felic_stat & ECSR_LCHNG) { |
1217 | /* Link Changed */ | 1217 | /* Link Changed */ |
1218 | if (mdp->cd->no_psr || mdp->no_ether_link) { | 1218 | if (mdp->cd->no_psr || mdp->no_ether_link) { |
1219 | if (mdp->link == PHY_DOWN) | 1219 | goto ignore_link; |
1220 | link_stat = 0; | ||
1221 | else | ||
1222 | link_stat = PHY_ST_LINK; | ||
1223 | } else { | 1220 | } else { |
1224 | link_stat = (sh_eth_read(ndev, PSR)); | 1221 | link_stat = (sh_eth_read(ndev, PSR)); |
1225 | if (mdp->ether_link_active_low) | 1222 | if (mdp->ether_link_active_low) |
@@ -1242,6 +1239,7 @@ static void sh_eth_error(struct net_device *ndev, int intr_status) | |||
1242 | } | 1239 | } |
1243 | } | 1240 | } |
1244 | 1241 | ||
1242 | ignore_link: | ||
1245 | if (intr_status & EESR_TWB) { | 1243 | if (intr_status & EESR_TWB) { |
1246 | /* Write buck end. unused write back interrupt */ | 1244 | /* Write buck end. unused write back interrupt */ |
1247 | if (intr_status & EESR_TABT) /* Transmit Abort int */ | 1245 | if (intr_status & EESR_TABT) /* Transmit Abort int */ |
@@ -1326,12 +1324,18 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev) | |||
1326 | struct sh_eth_private *mdp = netdev_priv(ndev); | 1324 | struct sh_eth_private *mdp = netdev_priv(ndev); |
1327 | struct sh_eth_cpu_data *cd = mdp->cd; | 1325 | struct sh_eth_cpu_data *cd = mdp->cd; |
1328 | irqreturn_t ret = IRQ_NONE; | 1326 | irqreturn_t ret = IRQ_NONE; |
1329 | u32 intr_status = 0; | 1327 | unsigned long intr_status; |
1330 | 1328 | ||
1331 | spin_lock(&mdp->lock); | 1329 | spin_lock(&mdp->lock); |
1332 | 1330 | ||
1333 | /* Get interrpt stat */ | 1331 | /* Get interrupt status */ |
1334 | intr_status = sh_eth_read(ndev, EESR); | 1332 | intr_status = sh_eth_read(ndev, EESR); |
1333 | /* Mask it with the interrupt mask, forcing ECI interrupt to be always | ||
1334 | * enabled since it's the one that comes thru regardless of the mask, | ||
1335 | * and we need to fully handle it in sh_eth_error() in order to quench | ||
1336 | * it as it doesn't get cleared by just writing 1 to the ECI bit... | ||
1337 | */ | ||
1338 | intr_status &= sh_eth_read(ndev, EESIPR) | DMAC_M_ECI; | ||
1335 | /* Clear interrupt */ | 1339 | /* Clear interrupt */ |
1336 | if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF | | 1340 | if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF | |
1337 | EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF | | 1341 | EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF | |
@@ -1373,7 +1377,7 @@ static void sh_eth_adjust_link(struct net_device *ndev) | |||
1373 | struct phy_device *phydev = mdp->phydev; | 1377 | struct phy_device *phydev = mdp->phydev; |
1374 | int new_state = 0; | 1378 | int new_state = 0; |
1375 | 1379 | ||
1376 | if (phydev->link != PHY_DOWN) { | 1380 | if (phydev->link) { |
1377 | if (phydev->duplex != mdp->duplex) { | 1381 | if (phydev->duplex != mdp->duplex) { |
1378 | new_state = 1; | 1382 | new_state = 1; |
1379 | mdp->duplex = phydev->duplex; | 1383 | mdp->duplex = phydev->duplex; |
@@ -1387,17 +1391,21 @@ static void sh_eth_adjust_link(struct net_device *ndev) | |||
1387 | if (mdp->cd->set_rate) | 1391 | if (mdp->cd->set_rate) |
1388 | mdp->cd->set_rate(ndev); | 1392 | mdp->cd->set_rate(ndev); |
1389 | } | 1393 | } |
1390 | if (mdp->link == PHY_DOWN) { | 1394 | if (!mdp->link) { |
1391 | sh_eth_write(ndev, | 1395 | sh_eth_write(ndev, |
1392 | (sh_eth_read(ndev, ECMR) & ~ECMR_TXF), ECMR); | 1396 | (sh_eth_read(ndev, ECMR) & ~ECMR_TXF), ECMR); |
1393 | new_state = 1; | 1397 | new_state = 1; |
1394 | mdp->link = phydev->link; | 1398 | mdp->link = phydev->link; |
1399 | if (mdp->cd->no_psr || mdp->no_ether_link) | ||
1400 | sh_eth_rcv_snd_enable(ndev); | ||
1395 | } | 1401 | } |
1396 | } else if (mdp->link) { | 1402 | } else if (mdp->link) { |
1397 | new_state = 1; | 1403 | new_state = 1; |
1398 | mdp->link = PHY_DOWN; | 1404 | mdp->link = 0; |
1399 | mdp->speed = 0; | 1405 | mdp->speed = 0; |
1400 | mdp->duplex = -1; | 1406 | mdp->duplex = -1; |
1407 | if (mdp->cd->no_psr || mdp->no_ether_link) | ||
1408 | sh_eth_rcv_snd_disable(ndev); | ||
1401 | } | 1409 | } |
1402 | 1410 | ||
1403 | if (new_state && netif_msg_link(mdp)) | 1411 | if (new_state && netif_msg_link(mdp)) |
@@ -1414,7 +1422,7 @@ static int sh_eth_phy_init(struct net_device *ndev) | |||
1414 | snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, | 1422 | snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, |
1415 | mdp->mii_bus->id , mdp->phy_id); | 1423 | mdp->mii_bus->id , mdp->phy_id); |
1416 | 1424 | ||
1417 | mdp->link = PHY_DOWN; | 1425 | mdp->link = 0; |
1418 | mdp->speed = 0; | 1426 | mdp->speed = 0; |
1419 | mdp->duplex = -1; | 1427 | mdp->duplex = -1; |
1420 | 1428 | ||
@@ -2220,6 +2228,7 @@ static void sh_eth_tsu_init(struct sh_eth_private *mdp) | |||
2220 | /* MDIO bus release function */ | 2228 | /* MDIO bus release function */ |
2221 | static int sh_mdio_release(struct net_device *ndev) | 2229 | static int sh_mdio_release(struct net_device *ndev) |
2222 | { | 2230 | { |
2231 | struct sh_eth_private *mdp = netdev_priv(ndev); | ||
2223 | struct mii_bus *bus = dev_get_drvdata(&ndev->dev); | 2232 | struct mii_bus *bus = dev_get_drvdata(&ndev->dev); |
2224 | 2233 | ||
2225 | /* unregister mdio bus */ | 2234 | /* unregister mdio bus */ |
@@ -2234,6 +2243,9 @@ static int sh_mdio_release(struct net_device *ndev) | |||
2234 | /* free bitbang info */ | 2243 | /* free bitbang info */ |
2235 | free_mdio_bitbang(bus); | 2244 | free_mdio_bitbang(bus); |
2236 | 2245 | ||
2246 | /* free bitbang memory */ | ||
2247 | kfree(mdp->bitbang); | ||
2248 | |||
2237 | return 0; | 2249 | return 0; |
2238 | } | 2250 | } |
2239 | 2251 | ||
@@ -2262,6 +2274,7 @@ static int sh_mdio_init(struct net_device *ndev, int id, | |||
2262 | bitbang->ctrl.ops = &bb_ops; | 2274 | bitbang->ctrl.ops = &bb_ops; |
2263 | 2275 | ||
2264 | /* MII controller setting */ | 2276 | /* MII controller setting */ |
2277 | mdp->bitbang = bitbang; | ||
2265 | mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl); | 2278 | mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl); |
2266 | if (!mdp->mii_bus) { | 2279 | if (!mdp->mii_bus) { |
2267 | ret = -ENOMEM; | 2280 | ret = -ENOMEM; |
@@ -2441,6 +2454,11 @@ static int sh_eth_drv_probe(struct platform_device *pdev) | |||
2441 | } | 2454 | } |
2442 | mdp->tsu_addr = ioremap(rtsu->start, | 2455 | mdp->tsu_addr = ioremap(rtsu->start, |
2443 | resource_size(rtsu)); | 2456 | resource_size(rtsu)); |
2457 | if (mdp->tsu_addr == NULL) { | ||
2458 | ret = -ENOMEM; | ||
2459 | dev_err(&pdev->dev, "TSU ioremap failed.\n"); | ||
2460 | goto out_release; | ||
2461 | } | ||
2444 | mdp->port = devno % 2; | 2462 | mdp->port = devno % 2; |
2445 | ndev->features = NETIF_F_HW_VLAN_FILTER; | 2463 | ndev->features = NETIF_F_HW_VLAN_FILTER; |
2446 | } | 2464 | } |
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index bae84fd2e73a..828be4515008 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h | |||
@@ -705,6 +705,7 @@ struct sh_eth_private { | |||
705 | const u16 *reg_offset; | 705 | const u16 *reg_offset; |
706 | void __iomem *addr; | 706 | void __iomem *addr; |
707 | void __iomem *tsu_addr; | 707 | void __iomem *tsu_addr; |
708 | struct bb_info *bitbang; | ||
708 | u32 num_rx_ring; | 709 | u32 num_rx_ring; |
709 | u32 num_tx_ring; | 710 | u32 num_tx_ring; |
710 | dma_addr_t rx_desc_dma; | 711 | dma_addr_t rx_desc_dma; |
@@ -722,7 +723,7 @@ struct sh_eth_private { | |||
722 | u32 phy_id; /* PHY ID */ | 723 | u32 phy_id; /* PHY ID */ |
723 | struct mii_bus *mii_bus; /* MDIO bus control */ | 724 | struct mii_bus *mii_bus; /* MDIO bus control */ |
724 | struct phy_device *phydev; /* PHY device control */ | 725 | struct phy_device *phydev; /* PHY device control */ |
725 | enum phy_state link; | 726 | int link; |
726 | phy_interface_t phy_interface; | 727 | phy_interface_t phy_interface; |
727 | int msg_enable; | 728 | int msg_enable; |
728 | int speed; | 729 | int speed; |
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c index 0ad790cc473c..eaa8e874a3cb 100644 --- a/drivers/net/ethernet/sfc/nic.c +++ b/drivers/net/ethernet/sfc/nic.c | |||
@@ -376,7 +376,8 @@ efx_may_push_tx_desc(struct efx_tx_queue *tx_queue, unsigned int write_count) | |||
376 | return false; | 376 | return false; |
377 | 377 | ||
378 | tx_queue->empty_read_count = 0; | 378 | tx_queue->empty_read_count = 0; |
379 | return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0; | 379 | return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0 |
380 | && tx_queue->write_count - write_count == 1; | ||
380 | } | 381 | } |
381 | 382 | ||
382 | /* For each entry inserted into the software descriptor ring, create a | 383 | /* For each entry inserted into the software descriptor ring, create a |
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 01ffbc486982..80cad06e5eb2 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c | |||
@@ -436,7 +436,7 @@ void cpsw_tx_handler(void *token, int len, int status) | |||
436 | * queue is stopped then start the queue as we have free desc for tx | 436 | * queue is stopped then start the queue as we have free desc for tx |
437 | */ | 437 | */ |
438 | if (unlikely(netif_queue_stopped(ndev))) | 438 | if (unlikely(netif_queue_stopped(ndev))) |
439 | netif_start_queue(ndev); | 439 | netif_wake_queue(ndev); |
440 | cpts_tx_timestamp(priv->cpts, skb); | 440 | cpts_tx_timestamp(priv->cpts, skb); |
441 | priv->stats.tx_packets++; | 441 | priv->stats.tx_packets++; |
442 | priv->stats.tx_bytes += len; | 442 | priv->stats.tx_bytes += len; |
@@ -905,7 +905,7 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb, | |||
905 | /* If there is no more tx desc left free then we need to | 905 | /* If there is no more tx desc left free then we need to |
906 | * tell the kernel to stop sending us tx frames. | 906 | * tell the kernel to stop sending us tx frames. |
907 | */ | 907 | */ |
908 | if (unlikely(cpdma_check_free_tx_desc(priv->txch))) | 908 | if (unlikely(!cpdma_check_free_tx_desc(priv->txch))) |
909 | netif_stop_queue(ndev); | 909 | netif_stop_queue(ndev); |
910 | 910 | ||
911 | return NETDEV_TX_OK; | 911 | return NETDEV_TX_OK; |
@@ -1364,7 +1364,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, | |||
1364 | struct platform_device *mdio; | 1364 | struct platform_device *mdio; |
1365 | 1365 | ||
1366 | parp = of_get_property(slave_node, "phy_id", &lenp); | 1366 | parp = of_get_property(slave_node, "phy_id", &lenp); |
1367 | if ((parp == NULL) && (lenp != (sizeof(void *) * 2))) { | 1367 | if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) { |
1368 | pr_err("Missing slave[%d] phy_id property\n", i); | 1368 | pr_err("Missing slave[%d] phy_id property\n", i); |
1369 | ret = -EINVAL; | 1369 | ret = -EINVAL; |
1370 | goto error_ret; | 1370 | goto error_ret; |
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index 52c05366599a..72300bc9e378 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c | |||
@@ -1053,7 +1053,7 @@ static void emac_tx_handler(void *token, int len, int status) | |||
1053 | * queue is stopped then start the queue as we have free desc for tx | 1053 | * queue is stopped then start the queue as we have free desc for tx |
1054 | */ | 1054 | */ |
1055 | if (unlikely(netif_queue_stopped(ndev))) | 1055 | if (unlikely(netif_queue_stopped(ndev))) |
1056 | netif_start_queue(ndev); | 1056 | netif_wake_queue(ndev); |
1057 | ndev->stats.tx_packets++; | 1057 | ndev->stats.tx_packets++; |
1058 | ndev->stats.tx_bytes += len; | 1058 | ndev->stats.tx_bytes += len; |
1059 | dev_kfree_skb_any(skb); | 1059 | dev_kfree_skb_any(skb); |
@@ -1102,7 +1102,7 @@ static int emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
1102 | /* If there is no more tx desc left free then we need to | 1102 | /* If there is no more tx desc left free then we need to |
1103 | * tell the kernel to stop sending us tx frames. | 1103 | * tell the kernel to stop sending us tx frames. |
1104 | */ | 1104 | */ |
1105 | if (unlikely(cpdma_check_free_tx_desc(priv->txchan))) | 1105 | if (unlikely(!cpdma_check_free_tx_desc(priv->txchan))) |
1106 | netif_stop_queue(ndev); | 1106 | netif_stop_queue(ndev); |
1107 | 1107 | ||
1108 | return NETDEV_TX_OK; | 1108 | return NETDEV_TX_OK; |
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 1cd77483da50..f5f0f09e4cc5 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c | |||
@@ -470,8 +470,10 @@ static void netvsc_send_completion(struct hv_device *device, | |||
470 | packet->trans_id; | 470 | packet->trans_id; |
471 | 471 | ||
472 | /* Notify the layer above us */ | 472 | /* Notify the layer above us */ |
473 | nvsc_packet->completion.send.send_completion( | 473 | if (nvsc_packet) |
474 | nvsc_packet->completion.send.send_completion_ctx); | 474 | nvsc_packet->completion.send.send_completion( |
475 | nvsc_packet->completion.send. | ||
476 | send_completion_ctx); | ||
475 | 477 | ||
476 | num_outstanding_sends = | 478 | num_outstanding_sends = |
477 | atomic_dec_return(&net_device->num_outstanding_sends); | 479 | atomic_dec_return(&net_device->num_outstanding_sends); |
@@ -498,6 +500,7 @@ int netvsc_send(struct hv_device *device, | |||
498 | int ret = 0; | 500 | int ret = 0; |
499 | struct nvsp_message sendMessage; | 501 | struct nvsp_message sendMessage; |
500 | struct net_device *ndev; | 502 | struct net_device *ndev; |
503 | u64 req_id; | ||
501 | 504 | ||
502 | net_device = get_outbound_net_device(device); | 505 | net_device = get_outbound_net_device(device); |
503 | if (!net_device) | 506 | if (!net_device) |
@@ -518,20 +521,24 @@ int netvsc_send(struct hv_device *device, | |||
518 | 0xFFFFFFFF; | 521 | 0xFFFFFFFF; |
519 | sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_size = 0; | 522 | sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_size = 0; |
520 | 523 | ||
524 | if (packet->completion.send.send_completion) | ||
525 | req_id = (u64)packet; | ||
526 | else | ||
527 | req_id = 0; | ||
528 | |||
521 | if (packet->page_buf_cnt) { | 529 | if (packet->page_buf_cnt) { |
522 | ret = vmbus_sendpacket_pagebuffer(device->channel, | 530 | ret = vmbus_sendpacket_pagebuffer(device->channel, |
523 | packet->page_buf, | 531 | packet->page_buf, |
524 | packet->page_buf_cnt, | 532 | packet->page_buf_cnt, |
525 | &sendMessage, | 533 | &sendMessage, |
526 | sizeof(struct nvsp_message), | 534 | sizeof(struct nvsp_message), |
527 | (unsigned long)packet); | 535 | req_id); |
528 | } else { | 536 | } else { |
529 | ret = vmbus_sendpacket(device->channel, &sendMessage, | 537 | ret = vmbus_sendpacket(device->channel, &sendMessage, |
530 | sizeof(struct nvsp_message), | 538 | sizeof(struct nvsp_message), |
531 | (unsigned long)packet, | 539 | req_id, |
532 | VM_PKT_DATA_INBAND, | 540 | VM_PKT_DATA_INBAND, |
533 | VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); | 541 | VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); |
534 | |||
535 | } | 542 | } |
536 | 543 | ||
537 | if (ret == 0) { | 544 | if (ret == 0) { |
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 5f85205cd12b..8341b62e5521 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c | |||
@@ -241,13 +241,11 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj, | |||
241 | 241 | ||
242 | if (status == 1) { | 242 | if (status == 1) { |
243 | netif_carrier_on(net); | 243 | netif_carrier_on(net); |
244 | netif_wake_queue(net); | ||
245 | ndev_ctx = netdev_priv(net); | 244 | ndev_ctx = netdev_priv(net); |
246 | schedule_delayed_work(&ndev_ctx->dwork, 0); | 245 | schedule_delayed_work(&ndev_ctx->dwork, 0); |
247 | schedule_delayed_work(&ndev_ctx->dwork, msecs_to_jiffies(20)); | 246 | schedule_delayed_work(&ndev_ctx->dwork, msecs_to_jiffies(20)); |
248 | } else { | 247 | } else { |
249 | netif_carrier_off(net); | 248 | netif_carrier_off(net); |
250 | netif_tx_disable(net); | ||
251 | } | 249 | } |
252 | } | 250 | } |
253 | 251 | ||
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 2b657d4d63a8..0775f0aefd1e 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c | |||
@@ -61,9 +61,6 @@ struct rndis_request { | |||
61 | 61 | ||
62 | static void rndis_filter_send_completion(void *ctx); | 62 | static void rndis_filter_send_completion(void *ctx); |
63 | 63 | ||
64 | static void rndis_filter_send_request_completion(void *ctx); | ||
65 | |||
66 | |||
67 | 64 | ||
68 | static struct rndis_device *get_rndis_device(void) | 65 | static struct rndis_device *get_rndis_device(void) |
69 | { | 66 | { |
@@ -241,10 +238,7 @@ static int rndis_filter_send_request(struct rndis_device *dev, | |||
241 | packet->page_buf[0].len; | 238 | packet->page_buf[0].len; |
242 | } | 239 | } |
243 | 240 | ||
244 | packet->completion.send.send_completion_ctx = req;/* packet; */ | 241 | packet->completion.send.send_completion = NULL; |
245 | packet->completion.send.send_completion = | ||
246 | rndis_filter_send_request_completion; | ||
247 | packet->completion.send.send_completion_tid = (unsigned long)dev; | ||
248 | 242 | ||
249 | ret = netvsc_send(dev->net_dev->dev, packet); | 243 | ret = netvsc_send(dev->net_dev->dev, packet); |
250 | return ret; | 244 | return ret; |
@@ -999,9 +993,3 @@ static void rndis_filter_send_completion(void *ctx) | |||
999 | /* Pass it back to the original handler */ | 993 | /* Pass it back to the original handler */ |
1000 | filter_pkt->completion(filter_pkt->completion_ctx); | 994 | filter_pkt->completion(filter_pkt->completion_ctx); |
1001 | } | 995 | } |
1002 | |||
1003 | |||
1004 | static void rndis_filter_send_request_completion(void *ctx) | ||
1005 | { | ||
1006 | /* Noop */ | ||
1007 | } | ||
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c index 37add21a3d7d..59ac143dec25 100644 --- a/drivers/net/netconsole.c +++ b/drivers/net/netconsole.c | |||
@@ -666,6 +666,7 @@ static int netconsole_netdev_event(struct notifier_block *this, | |||
666 | goto done; | 666 | goto done; |
667 | 667 | ||
668 | spin_lock_irqsave(&target_list_lock, flags); | 668 | spin_lock_irqsave(&target_list_lock, flags); |
669 | restart: | ||
669 | list_for_each_entry(nt, &target_list, list) { | 670 | list_for_each_entry(nt, &target_list, list) { |
670 | netconsole_target_get(nt); | 671 | netconsole_target_get(nt); |
671 | if (nt->np.dev == dev) { | 672 | if (nt->np.dev == dev) { |
@@ -678,15 +679,17 @@ static int netconsole_netdev_event(struct notifier_block *this, | |||
678 | case NETDEV_UNREGISTER: | 679 | case NETDEV_UNREGISTER: |
679 | /* | 680 | /* |
680 | * rtnl_lock already held | 681 | * rtnl_lock already held |
682 | * we might sleep in __netpoll_cleanup() | ||
681 | */ | 683 | */ |
682 | if (nt->np.dev) { | 684 | spin_unlock_irqrestore(&target_list_lock, flags); |
683 | __netpoll_cleanup(&nt->np); | 685 | __netpoll_cleanup(&nt->np); |
684 | dev_put(nt->np.dev); | 686 | spin_lock_irqsave(&target_list_lock, flags); |
685 | nt->np.dev = NULL; | 687 | dev_put(nt->np.dev); |
686 | } | 688 | nt->np.dev = NULL; |
687 | nt->enabled = 0; | 689 | nt->enabled = 0; |
688 | stopped = true; | 690 | stopped = true; |
689 | break; | 691 | netconsole_target_put(nt); |
692 | goto restart; | ||
690 | } | 693 | } |
691 | } | 694 | } |
692 | netconsole_target_put(nt); | 695 | netconsole_target_put(nt); |
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig index 3b6e9b83342d..7c769d8e25ad 100644 --- a/drivers/net/usb/Kconfig +++ b/drivers/net/usb/Kconfig | |||
@@ -268,7 +268,7 @@ config USB_NET_SMSC75XX | |||
268 | select CRC16 | 268 | select CRC16 |
269 | select CRC32 | 269 | select CRC32 |
270 | help | 270 | help |
271 | This option adds support for SMSC LAN95XX based USB 2.0 | 271 | This option adds support for SMSC LAN75XX based USB 2.0 |
272 | Gigabit Ethernet adapters. | 272 | Gigabit Ethernet adapters. |
273 | 273 | ||
274 | config USB_NET_SMSC95XX | 274 | config USB_NET_SMSC95XX |
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c index 248d2dc765a5..16c842997291 100644 --- a/drivers/net/usb/cdc_mbim.c +++ b/drivers/net/usb/cdc_mbim.c | |||
@@ -68,18 +68,9 @@ static int cdc_mbim_bind(struct usbnet *dev, struct usb_interface *intf) | |||
68 | struct cdc_ncm_ctx *ctx; | 68 | struct cdc_ncm_ctx *ctx; |
69 | struct usb_driver *subdriver = ERR_PTR(-ENODEV); | 69 | struct usb_driver *subdriver = ERR_PTR(-ENODEV); |
70 | int ret = -ENODEV; | 70 | int ret = -ENODEV; |
71 | u8 data_altsetting = CDC_NCM_DATA_ALTSETTING_NCM; | 71 | u8 data_altsetting = cdc_ncm_select_altsetting(dev, intf); |
72 | struct cdc_mbim_state *info = (void *)&dev->data; | 72 | struct cdc_mbim_state *info = (void *)&dev->data; |
73 | 73 | ||
74 | /* see if interface supports MBIM alternate setting */ | ||
75 | if (intf->num_altsetting == 2) { | ||
76 | if (!cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting)) | ||
77 | usb_set_interface(dev->udev, | ||
78 | intf->cur_altsetting->desc.bInterfaceNumber, | ||
79 | CDC_NCM_COMM_ALTSETTING_MBIM); | ||
80 | data_altsetting = CDC_NCM_DATA_ALTSETTING_MBIM; | ||
81 | } | ||
82 | |||
83 | /* Probably NCM, defer for cdc_ncm_bind */ | 74 | /* Probably NCM, defer for cdc_ncm_bind */ |
84 | if (!cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting)) | 75 | if (!cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting)) |
85 | goto err; | 76 | goto err; |
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 61b74a2b89ac..4709fa3497cf 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c | |||
@@ -55,6 +55,14 @@ | |||
55 | 55 | ||
56 | #define DRIVER_VERSION "14-Mar-2012" | 56 | #define DRIVER_VERSION "14-Mar-2012" |
57 | 57 | ||
58 | #if IS_ENABLED(CONFIG_USB_NET_CDC_MBIM) | ||
59 | static bool prefer_mbim = true; | ||
60 | #else | ||
61 | static bool prefer_mbim; | ||
62 | #endif | ||
63 | module_param(prefer_mbim, bool, S_IRUGO | S_IWUSR); | ||
64 | MODULE_PARM_DESC(prefer_mbim, "Prefer MBIM setting on dual NCM/MBIM functions"); | ||
65 | |||
58 | static void cdc_ncm_txpath_bh(unsigned long param); | 66 | static void cdc_ncm_txpath_bh(unsigned long param); |
59 | static void cdc_ncm_tx_timeout_start(struct cdc_ncm_ctx *ctx); | 67 | static void cdc_ncm_tx_timeout_start(struct cdc_ncm_ctx *ctx); |
60 | static enum hrtimer_restart cdc_ncm_tx_timer_cb(struct hrtimer *hr_timer); | 68 | static enum hrtimer_restart cdc_ncm_tx_timer_cb(struct hrtimer *hr_timer); |
@@ -550,9 +558,12 @@ void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf) | |||
550 | } | 558 | } |
551 | EXPORT_SYMBOL_GPL(cdc_ncm_unbind); | 559 | EXPORT_SYMBOL_GPL(cdc_ncm_unbind); |
552 | 560 | ||
553 | static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf) | 561 | /* Select the MBIM altsetting iff it is preferred and available, |
562 | * returning the number of the corresponding data interface altsetting | ||
563 | */ | ||
564 | u8 cdc_ncm_select_altsetting(struct usbnet *dev, struct usb_interface *intf) | ||
554 | { | 565 | { |
555 | int ret; | 566 | struct usb_host_interface *alt; |
556 | 567 | ||
557 | /* The MBIM spec defines a NCM compatible default altsetting, | 568 | /* The MBIM spec defines a NCM compatible default altsetting, |
558 | * which we may have matched: | 569 | * which we may have matched: |
@@ -568,23 +579,27 @@ static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf) | |||
568 | * endpoint descriptors, shall be constructed according to | 579 | * endpoint descriptors, shall be constructed according to |
569 | * the rules given in section 6 (USB Device Model) of this | 580 | * the rules given in section 6 (USB Device Model) of this |
570 | * specification." | 581 | * specification." |
571 | * | ||
572 | * Do not bind to such interfaces, allowing cdc_mbim to handle | ||
573 | * them | ||
574 | */ | 582 | */ |
575 | #if IS_ENABLED(CONFIG_USB_NET_CDC_MBIM) | 583 | if (prefer_mbim && intf->num_altsetting == 2) { |
576 | if ((intf->num_altsetting == 2) && | 584 | alt = usb_altnum_to_altsetting(intf, CDC_NCM_COMM_ALTSETTING_MBIM); |
577 | !usb_set_interface(dev->udev, | 585 | if (alt && cdc_ncm_comm_intf_is_mbim(alt) && |
578 | intf->cur_altsetting->desc.bInterfaceNumber, | 586 | !usb_set_interface(dev->udev, |
579 | CDC_NCM_COMM_ALTSETTING_MBIM)) { | 587 | intf->cur_altsetting->desc.bInterfaceNumber, |
580 | if (cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting)) | 588 | CDC_NCM_COMM_ALTSETTING_MBIM)) |
581 | return -ENODEV; | 589 | return CDC_NCM_DATA_ALTSETTING_MBIM; |
582 | else | ||
583 | usb_set_interface(dev->udev, | ||
584 | intf->cur_altsetting->desc.bInterfaceNumber, | ||
585 | CDC_NCM_COMM_ALTSETTING_NCM); | ||
586 | } | 590 | } |
587 | #endif | 591 | return CDC_NCM_DATA_ALTSETTING_NCM; |
592 | } | ||
593 | EXPORT_SYMBOL_GPL(cdc_ncm_select_altsetting); | ||
594 | |||
595 | static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf) | ||
596 | { | ||
597 | int ret; | ||
598 | |||
599 | /* MBIM backwards compatible function? */ | ||
600 | cdc_ncm_select_altsetting(dev, intf); | ||
601 | if (cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting)) | ||
602 | return -ENODEV; | ||
588 | 603 | ||
589 | /* NCM data altsetting is always 1 */ | 604 | /* NCM data altsetting is always 1 */ |
590 | ret = cdc_ncm_bind_common(dev, intf, 1); | 605 | ret = cdc_ncm_bind_common(dev, intf, 1); |
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index efb5c7c33a28..968d5d50751d 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c | |||
@@ -139,16 +139,9 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf) | |||
139 | 139 | ||
140 | BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data) < sizeof(struct qmi_wwan_state))); | 140 | BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data) < sizeof(struct qmi_wwan_state))); |
141 | 141 | ||
142 | /* control and data is shared? */ | 142 | /* set up initial state */ |
143 | if (intf->cur_altsetting->desc.bNumEndpoints == 3) { | 143 | info->control = intf; |
144 | info->control = intf; | 144 | info->data = intf; |
145 | info->data = intf; | ||
146 | goto shared; | ||
147 | } | ||
148 | |||
149 | /* else require a single interrupt status endpoint on control intf */ | ||
150 | if (intf->cur_altsetting->desc.bNumEndpoints != 1) | ||
151 | goto err; | ||
152 | 145 | ||
153 | /* and a number of CDC descriptors */ | 146 | /* and a number of CDC descriptors */ |
154 | while (len > 3) { | 147 | while (len > 3) { |
@@ -207,25 +200,14 @@ next_desc: | |||
207 | buf += h->bLength; | 200 | buf += h->bLength; |
208 | } | 201 | } |
209 | 202 | ||
210 | /* did we find all the required ones? */ | 203 | /* Use separate control and data interfaces if we found a CDC Union */ |
211 | if (!(found & (1 << USB_CDC_HEADER_TYPE)) || | 204 | if (cdc_union) { |
212 | !(found & (1 << USB_CDC_UNION_TYPE))) { | 205 | info->data = usb_ifnum_to_if(dev->udev, cdc_union->bSlaveInterface0); |
213 | dev_err(&intf->dev, "CDC functional descriptors missing\n"); | 206 | if (desc->bInterfaceNumber != cdc_union->bMasterInterface0 || !info->data) { |
214 | goto err; | 207 | dev_err(&intf->dev, "bogus CDC Union: master=%u, slave=%u\n", |
215 | } | 208 | cdc_union->bMasterInterface0, cdc_union->bSlaveInterface0); |
216 | 209 | goto err; | |
217 | /* verify CDC Union */ | 210 | } |
218 | if (desc->bInterfaceNumber != cdc_union->bMasterInterface0) { | ||
219 | dev_err(&intf->dev, "bogus CDC Union: master=%u\n", cdc_union->bMasterInterface0); | ||
220 | goto err; | ||
221 | } | ||
222 | |||
223 | /* need to save these for unbind */ | ||
224 | info->control = intf; | ||
225 | info->data = usb_ifnum_to_if(dev->udev, cdc_union->bSlaveInterface0); | ||
226 | if (!info->data) { | ||
227 | dev_err(&intf->dev, "bogus CDC Union: slave=%u\n", cdc_union->bSlaveInterface0); | ||
228 | goto err; | ||
229 | } | 211 | } |
230 | 212 | ||
231 | /* errors aren't fatal - we can live with the dynamic address */ | 213 | /* errors aren't fatal - we can live with the dynamic address */ |
@@ -235,11 +217,12 @@ next_desc: | |||
235 | } | 217 | } |
236 | 218 | ||
237 | /* claim data interface and set it up */ | 219 | /* claim data interface and set it up */ |
238 | status = usb_driver_claim_interface(driver, info->data, dev); | 220 | if (info->control != info->data) { |
239 | if (status < 0) | 221 | status = usb_driver_claim_interface(driver, info->data, dev); |
240 | goto err; | 222 | if (status < 0) |
223 | goto err; | ||
224 | } | ||
241 | 225 | ||
242 | shared: | ||
243 | status = qmi_wwan_register_subdriver(dev); | 226 | status = qmi_wwan_register_subdriver(dev); |
244 | if (status < 0 && info->control != info->data) { | 227 | if (status < 0 && info->control != info->data) { |
245 | usb_set_intfdata(info->data, NULL); | 228 | usb_set_intfdata(info->data, NULL); |
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c index 9abe51710f22..1a15ec14c386 100644 --- a/drivers/net/usb/smsc75xx.c +++ b/drivers/net/usb/smsc75xx.c | |||
@@ -914,8 +914,12 @@ static int smsc75xx_set_rx_max_frame_length(struct usbnet *dev, int size) | |||
914 | static int smsc75xx_change_mtu(struct net_device *netdev, int new_mtu) | 914 | static int smsc75xx_change_mtu(struct net_device *netdev, int new_mtu) |
915 | { | 915 | { |
916 | struct usbnet *dev = netdev_priv(netdev); | 916 | struct usbnet *dev = netdev_priv(netdev); |
917 | int ret; | ||
918 | |||
919 | if (new_mtu > MAX_SINGLE_PACKET_SIZE) | ||
920 | return -EINVAL; | ||
917 | 921 | ||
918 | int ret = smsc75xx_set_rx_max_frame_length(dev, new_mtu); | 922 | ret = smsc75xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN); |
919 | if (ret < 0) { | 923 | if (ret < 0) { |
920 | netdev_warn(dev->net, "Failed to set mac rx frame length\n"); | 924 | netdev_warn(dev->net, "Failed to set mac rx frame length\n"); |
921 | return ret; | 925 | return ret; |
@@ -1324,7 +1328,7 @@ static int smsc75xx_reset(struct usbnet *dev) | |||
1324 | 1328 | ||
1325 | netif_dbg(dev, ifup, dev->net, "FCT_TX_CTL set to 0x%08x\n", buf); | 1329 | netif_dbg(dev, ifup, dev->net, "FCT_TX_CTL set to 0x%08x\n", buf); |
1326 | 1330 | ||
1327 | ret = smsc75xx_set_rx_max_frame_length(dev, 1514); | 1331 | ret = smsc75xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN); |
1328 | if (ret < 0) { | 1332 | if (ret < 0) { |
1329 | netdev_warn(dev->net, "Failed to set max rx frame length\n"); | 1333 | netdev_warn(dev->net, "Failed to set max rx frame length\n"); |
1330 | return ret; | 1334 | return ret; |
@@ -2134,8 +2138,8 @@ static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb) | |||
2134 | else if (rx_cmd_a & (RX_CMD_A_LONG | RX_CMD_A_RUNT)) | 2138 | else if (rx_cmd_a & (RX_CMD_A_LONG | RX_CMD_A_RUNT)) |
2135 | dev->net->stats.rx_frame_errors++; | 2139 | dev->net->stats.rx_frame_errors++; |
2136 | } else { | 2140 | } else { |
2137 | /* ETH_FRAME_LEN + 4(CRC) + 2(COE) + 4(Vlan) */ | 2141 | /* MAX_SINGLE_PACKET_SIZE + 4(CRC) + 2(COE) + 4(Vlan) */ |
2138 | if (unlikely(size > (ETH_FRAME_LEN + 12))) { | 2142 | if (unlikely(size > (MAX_SINGLE_PACKET_SIZE + ETH_HLEN + 12))) { |
2139 | netif_dbg(dev, rx_err, dev->net, | 2143 | netif_dbg(dev, rx_err, dev->net, |
2140 | "size err rx_cmd_a=0x%08x\n", | 2144 | "size err rx_cmd_a=0x%08x\n", |
2141 | rx_cmd_a); | 2145 | rx_cmd_a); |
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c index 4cc13940c895..f76c3ca07a45 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c | |||
@@ -1023,6 +1023,7 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah, | |||
1023 | AR_PHY_AGC_CONTROL_FLTR_CAL | | 1023 | AR_PHY_AGC_CONTROL_FLTR_CAL | |
1024 | AR_PHY_AGC_CONTROL_PKDET_CAL; | 1024 | AR_PHY_AGC_CONTROL_PKDET_CAL; |
1025 | 1025 | ||
1026 | /* Use chip chainmask only for calibration */ | ||
1026 | ar9003_hw_set_chain_masks(ah, ah->caps.rx_chainmask, ah->caps.tx_chainmask); | 1027 | ar9003_hw_set_chain_masks(ah, ah->caps.rx_chainmask, ah->caps.tx_chainmask); |
1027 | 1028 | ||
1028 | if (rtt) { | 1029 | if (rtt) { |
@@ -1150,6 +1151,9 @@ skip_tx_iqcal: | |||
1150 | ar9003_hw_rtt_disable(ah); | 1151 | ar9003_hw_rtt_disable(ah); |
1151 | } | 1152 | } |
1152 | 1153 | ||
1154 | /* Revert chainmask to runtime parameters */ | ||
1155 | ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask); | ||
1156 | |||
1153 | /* Initialize list pointers */ | 1157 | /* Initialize list pointers */ |
1154 | ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL; | 1158 | ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL; |
1155 | 1159 | ||
diff --git a/drivers/net/wireless/ath/ath9k/link.c b/drivers/net/wireless/ath/ath9k/link.c index ade3afb21f91..7fdac6c7b3ea 100644 --- a/drivers/net/wireless/ath/ath9k/link.c +++ b/drivers/net/wireless/ath/ath9k/link.c | |||
@@ -28,21 +28,21 @@ void ath_tx_complete_poll_work(struct work_struct *work) | |||
28 | int i; | 28 | int i; |
29 | bool needreset = false; | 29 | bool needreset = false; |
30 | 30 | ||
31 | for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) | 31 | for (i = 0; i < IEEE80211_NUM_ACS; i++) { |
32 | if (ATH_TXQ_SETUP(sc, i)) { | 32 | txq = sc->tx.txq_map[i]; |
33 | txq = &sc->tx.txq[i]; | 33 | |
34 | ath_txq_lock(sc, txq); | 34 | ath_txq_lock(sc, txq); |
35 | if (txq->axq_depth) { | 35 | if (txq->axq_depth) { |
36 | if (txq->axq_tx_inprogress) { | 36 | if (txq->axq_tx_inprogress) { |
37 | needreset = true; | 37 | needreset = true; |
38 | ath_txq_unlock(sc, txq); | 38 | ath_txq_unlock(sc, txq); |
39 | break; | 39 | break; |
40 | } else { | 40 | } else { |
41 | txq->axq_tx_inprogress = true; | 41 | txq->axq_tx_inprogress = true; |
42 | } | ||
43 | } | 42 | } |
44 | ath_txq_unlock_complete(sc, txq); | ||
45 | } | 43 | } |
44 | ath_txq_unlock_complete(sc, txq); | ||
45 | } | ||
46 | 46 | ||
47 | if (needreset) { | 47 | if (needreset) { |
48 | ath_dbg(ath9k_hw_common(sc->sc_ah), RESET, | 48 | ath_dbg(ath9k_hw_common(sc->sc_ah), RESET, |
@@ -170,7 +170,8 @@ void ath_rx_poll(unsigned long data) | |||
170 | { | 170 | { |
171 | struct ath_softc *sc = (struct ath_softc *)data; | 171 | struct ath_softc *sc = (struct ath_softc *)data; |
172 | 172 | ||
173 | ieee80211_queue_work(sc->hw, &sc->hw_check_work); | 173 | if (!test_bit(SC_OP_INVALID, &sc->sc_flags)) |
174 | ieee80211_queue_work(sc->hw, &sc->hw_check_work); | ||
174 | } | 175 | } |
175 | 176 | ||
176 | /* | 177 | /* |
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 6e66f9c6782b..988372d218a4 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c | |||
@@ -280,6 +280,10 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan) | |||
280 | if (r) { | 280 | if (r) { |
281 | ath_err(common, | 281 | ath_err(common, |
282 | "Unable to reset channel, reset status %d\n", r); | 282 | "Unable to reset channel, reset status %d\n", r); |
283 | |||
284 | ath9k_hw_enable_interrupts(ah); | ||
285 | ath9k_queue_reset(sc, RESET_TYPE_BB_HANG); | ||
286 | |||
283 | goto out; | 287 | goto out; |
284 | } | 288 | } |
285 | 289 | ||
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c index 38bc5a7997ff..122146943bf2 100644 --- a/drivers/net/wireless/b43/dma.c +++ b/drivers/net/wireless/b43/dma.c | |||
@@ -1487,8 +1487,12 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, | |||
1487 | const struct b43_dma_ops *ops; | 1487 | const struct b43_dma_ops *ops; |
1488 | struct b43_dmaring *ring; | 1488 | struct b43_dmaring *ring; |
1489 | struct b43_dmadesc_meta *meta; | 1489 | struct b43_dmadesc_meta *meta; |
1490 | static const struct b43_txstatus fake; /* filled with 0 */ | ||
1491 | const struct b43_txstatus *txstat; | ||
1490 | int slot, firstused; | 1492 | int slot, firstused; |
1491 | bool frame_succeed; | 1493 | bool frame_succeed; |
1494 | int skip; | ||
1495 | static u8 err_out1, err_out2; | ||
1492 | 1496 | ||
1493 | ring = parse_cookie(dev, status->cookie, &slot); | 1497 | ring = parse_cookie(dev, status->cookie, &slot); |
1494 | if (unlikely(!ring)) | 1498 | if (unlikely(!ring)) |
@@ -1501,13 +1505,36 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, | |||
1501 | firstused = ring->current_slot - ring->used_slots + 1; | 1505 | firstused = ring->current_slot - ring->used_slots + 1; |
1502 | if (firstused < 0) | 1506 | if (firstused < 0) |
1503 | firstused = ring->nr_slots + firstused; | 1507 | firstused = ring->nr_slots + firstused; |
1508 | |||
1509 | skip = 0; | ||
1504 | if (unlikely(slot != firstused)) { | 1510 | if (unlikely(slot != firstused)) { |
1505 | /* This possibly is a firmware bug and will result in | 1511 | /* This possibly is a firmware bug and will result in |
1506 | * malfunction, memory leaks and/or stall of DMA functionality. */ | 1512 | * malfunction, memory leaks and/or stall of DMA functionality. |
1507 | b43dbg(dev->wl, "Out of order TX status report on DMA ring %d. " | 1513 | */ |
1508 | "Expected %d, but got %d\n", | 1514 | if (slot == next_slot(ring, next_slot(ring, firstused))) { |
1509 | ring->index, firstused, slot); | 1515 | /* If a single header/data pair was missed, skip over |
1510 | return; | 1516 | * the first two slots in an attempt to recover. |
1517 | */ | ||
1518 | slot = firstused; | ||
1519 | skip = 2; | ||
1520 | if (!err_out1) { | ||
1521 | /* Report the error once. */ | ||
1522 | b43dbg(dev->wl, | ||
1523 | "Skip on DMA ring %d slot %d.\n", | ||
1524 | ring->index, slot); | ||
1525 | err_out1 = 1; | ||
1526 | } | ||
1527 | } else { | ||
1528 | /* More than a single header/data pair were missed. | ||
1529 | * Report this error once. | ||
1530 | */ | ||
1531 | if (!err_out2) | ||
1532 | b43dbg(dev->wl, | ||
1533 | "Out of order TX status report on DMA ring %d. Expected %d, but got %d\n", | ||
1534 | ring->index, firstused, slot); | ||
1535 | err_out2 = 1; | ||
1536 | return; | ||
1537 | } | ||
1511 | } | 1538 | } |
1512 | 1539 | ||
1513 | ops = ring->ops; | 1540 | ops = ring->ops; |
@@ -1522,11 +1549,13 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, | |||
1522 | slot, firstused, ring->index); | 1549 | slot, firstused, ring->index); |
1523 | break; | 1550 | break; |
1524 | } | 1551 | } |
1552 | |||
1525 | if (meta->skb) { | 1553 | if (meta->skb) { |
1526 | struct b43_private_tx_info *priv_info = | 1554 | struct b43_private_tx_info *priv_info = |
1527 | b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb)); | 1555 | b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb)); |
1528 | 1556 | ||
1529 | unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1); | 1557 | unmap_descbuffer(ring, meta->dmaaddr, |
1558 | meta->skb->len, 1); | ||
1530 | kfree(priv_info->bouncebuffer); | 1559 | kfree(priv_info->bouncebuffer); |
1531 | priv_info->bouncebuffer = NULL; | 1560 | priv_info->bouncebuffer = NULL; |
1532 | } else { | 1561 | } else { |
@@ -1538,8 +1567,9 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, | |||
1538 | struct ieee80211_tx_info *info; | 1567 | struct ieee80211_tx_info *info; |
1539 | 1568 | ||
1540 | if (unlikely(!meta->skb)) { | 1569 | if (unlikely(!meta->skb)) { |
1541 | /* This is a scatter-gather fragment of a frame, so | 1570 | /* This is a scatter-gather fragment of a frame, |
1542 | * the skb pointer must not be NULL. */ | 1571 | * so the skb pointer must not be NULL. |
1572 | */ | ||
1543 | b43dbg(dev->wl, "TX status unexpected NULL skb " | 1573 | b43dbg(dev->wl, "TX status unexpected NULL skb " |
1544 | "at slot %d (first=%d) on ring %d\n", | 1574 | "at slot %d (first=%d) on ring %d\n", |
1545 | slot, firstused, ring->index); | 1575 | slot, firstused, ring->index); |
@@ -1550,9 +1580,18 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, | |||
1550 | 1580 | ||
1551 | /* | 1581 | /* |
1552 | * Call back to inform the ieee80211 subsystem about | 1582 | * Call back to inform the ieee80211 subsystem about |
1553 | * the status of the transmission. | 1583 | * the status of the transmission. When skipping over |
1584 | * a missed TX status report, use a status structure | ||
1585 | * filled with zeros to indicate that the frame was not | ||
1586 | * sent (frame_count 0) and not acknowledged | ||
1554 | */ | 1587 | */ |
1555 | frame_succeed = b43_fill_txstatus_report(dev, info, status); | 1588 | if (unlikely(skip)) |
1589 | txstat = &fake; | ||
1590 | else | ||
1591 | txstat = status; | ||
1592 | |||
1593 | frame_succeed = b43_fill_txstatus_report(dev, info, | ||
1594 | txstat); | ||
1556 | #ifdef CONFIG_B43_DEBUG | 1595 | #ifdef CONFIG_B43_DEBUG |
1557 | if (frame_succeed) | 1596 | if (frame_succeed) |
1558 | ring->nr_succeed_tx_packets++; | 1597 | ring->nr_succeed_tx_packets++; |
@@ -1580,12 +1619,14 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev, | |||
1580 | /* Everything unmapped and free'd. So it's not used anymore. */ | 1619 | /* Everything unmapped and free'd. So it's not used anymore. */ |
1581 | ring->used_slots--; | 1620 | ring->used_slots--; |
1582 | 1621 | ||
1583 | if (meta->is_last_fragment) { | 1622 | if (meta->is_last_fragment && !skip) { |
1584 | /* This is the last scatter-gather | 1623 | /* This is the last scatter-gather |
1585 | * fragment of the frame. We are done. */ | 1624 | * fragment of the frame. We are done. */ |
1586 | break; | 1625 | break; |
1587 | } | 1626 | } |
1588 | slot = next_slot(ring, slot); | 1627 | slot = next_slot(ring, slot); |
1628 | if (skip > 0) | ||
1629 | --skip; | ||
1589 | } | 1630 | } |
1590 | if (ring->stopped) { | 1631 | if (ring->stopped) { |
1591 | B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME); | 1632 | B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME); |
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c index 3c35382ee6c2..e8486c1e091a 100644 --- a/drivers/net/wireless/b43/phy_n.c +++ b/drivers/net/wireless/b43/phy_n.c | |||
@@ -1564,7 +1564,7 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev) | |||
1564 | u16 clip_off[2] = { 0xFFFF, 0xFFFF }; | 1564 | u16 clip_off[2] = { 0xFFFF, 0xFFFF }; |
1565 | 1565 | ||
1566 | u8 vcm_final = 0; | 1566 | u8 vcm_final = 0; |
1567 | s8 offset[4]; | 1567 | s32 offset[4]; |
1568 | s32 results[8][4] = { }; | 1568 | s32 results[8][4] = { }; |
1569 | s32 results_min[4] = { }; | 1569 | s32 results_min[4] = { }; |
1570 | s32 poll_results[4] = { }; | 1570 | s32 poll_results[4] = { }; |
@@ -1615,7 +1615,7 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev) | |||
1615 | } | 1615 | } |
1616 | for (i = 0; i < 4; i += 2) { | 1616 | for (i = 0; i < 4; i += 2) { |
1617 | s32 curr; | 1617 | s32 curr; |
1618 | s32 mind = 40; | 1618 | s32 mind = 0x100000; |
1619 | s32 minpoll = 249; | 1619 | s32 minpoll = 249; |
1620 | u8 minvcm = 0; | 1620 | u8 minvcm = 0; |
1621 | if (2 * core != i) | 1621 | if (2 * core != i) |
@@ -1732,7 +1732,7 @@ static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, u8 type) | |||
1732 | u8 regs_save_radio[2]; | 1732 | u8 regs_save_radio[2]; |
1733 | u16 regs_save_phy[2]; | 1733 | u16 regs_save_phy[2]; |
1734 | 1734 | ||
1735 | s8 offset[4]; | 1735 | s32 offset[4]; |
1736 | u8 core; | 1736 | u8 core; |
1737 | u8 rail; | 1737 | u8 rail; |
1738 | 1738 | ||
@@ -1799,7 +1799,7 @@ static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, u8 type) | |||
1799 | } | 1799 | } |
1800 | 1800 | ||
1801 | for (i = 0; i < 4; i++) { | 1801 | for (i = 0; i < 4; i++) { |
1802 | s32 mind = 40; | 1802 | s32 mind = 0x100000; |
1803 | u8 minvcm = 0; | 1803 | u8 minvcm = 0; |
1804 | s32 minpoll = 249; | 1804 | s32 minpoll = 249; |
1805 | s32 curr; | 1805 | s32 curr; |
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c index 4469321c0eb3..35fc68be158d 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c | |||
@@ -3317,15 +3317,15 @@ static int _brcmf_sdbrcm_download_firmware(struct brcmf_sdio *bus) | |||
3317 | goto err; | 3317 | goto err; |
3318 | } | 3318 | } |
3319 | 3319 | ||
3320 | /* External image takes precedence if specified */ | ||
3321 | if (brcmf_sdbrcm_download_code_file(bus)) { | 3320 | if (brcmf_sdbrcm_download_code_file(bus)) { |
3322 | brcmf_err("dongle image file download failed\n"); | 3321 | brcmf_err("dongle image file download failed\n"); |
3323 | goto err; | 3322 | goto err; |
3324 | } | 3323 | } |
3325 | 3324 | ||
3326 | /* External nvram takes precedence if specified */ | 3325 | if (brcmf_sdbrcm_download_nvram(bus)) { |
3327 | if (brcmf_sdbrcm_download_nvram(bus)) | ||
3328 | brcmf_err("dongle nvram file download failed\n"); | 3326 | brcmf_err("dongle nvram file download failed\n"); |
3327 | goto err; | ||
3328 | } | ||
3329 | 3329 | ||
3330 | /* Take arm out of reset */ | 3330 | /* Take arm out of reset */ |
3331 | if (brcmf_sdbrcm_download_state(bus, false)) { | 3331 | if (brcmf_sdbrcm_download_state(bus, false)) { |
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c index 2af9c0f0798d..ec46ffff5409 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c | |||
@@ -1891,8 +1891,10 @@ static s32 | |||
1891 | brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev, | 1891 | brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev, |
1892 | u8 key_idx, const u8 *mac_addr, struct key_params *params) | 1892 | u8 key_idx, const u8 *mac_addr, struct key_params *params) |
1893 | { | 1893 | { |
1894 | struct brcmf_if *ifp = netdev_priv(ndev); | ||
1894 | struct brcmf_wsec_key key; | 1895 | struct brcmf_wsec_key key; |
1895 | s32 err = 0; | 1896 | s32 err = 0; |
1897 | u8 keybuf[8]; | ||
1896 | 1898 | ||
1897 | memset(&key, 0, sizeof(key)); | 1899 | memset(&key, 0, sizeof(key)); |
1898 | key.index = (u32) key_idx; | 1900 | key.index = (u32) key_idx; |
@@ -1916,8 +1918,9 @@ brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev, | |||
1916 | brcmf_dbg(CONN, "Setting the key index %d\n", key.index); | 1918 | brcmf_dbg(CONN, "Setting the key index %d\n", key.index); |
1917 | memcpy(key.data, params->key, key.len); | 1919 | memcpy(key.data, params->key, key.len); |
1918 | 1920 | ||
1919 | if (params->cipher == WLAN_CIPHER_SUITE_TKIP) { | 1921 | if ((ifp->vif->mode != WL_MODE_AP) && |
1920 | u8 keybuf[8]; | 1922 | (params->cipher == WLAN_CIPHER_SUITE_TKIP)) { |
1923 | brcmf_dbg(CONN, "Swapping RX/TX MIC key\n"); | ||
1921 | memcpy(keybuf, &key.data[24], sizeof(keybuf)); | 1924 | memcpy(keybuf, &key.data[24], sizeof(keybuf)); |
1922 | memcpy(&key.data[24], &key.data[16], sizeof(keybuf)); | 1925 | memcpy(&key.data[24], &key.data[16], sizeof(keybuf)); |
1923 | memcpy(&key.data[16], keybuf, sizeof(keybuf)); | 1926 | memcpy(&key.data[16], keybuf, sizeof(keybuf)); |
@@ -2013,7 +2016,7 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev, | |||
2013 | break; | 2016 | break; |
2014 | case WLAN_CIPHER_SUITE_TKIP: | 2017 | case WLAN_CIPHER_SUITE_TKIP: |
2015 | if (ifp->vif->mode != WL_MODE_AP) { | 2018 | if (ifp->vif->mode != WL_MODE_AP) { |
2016 | brcmf_dbg(CONN, "Swapping key\n"); | 2019 | brcmf_dbg(CONN, "Swapping RX/TX MIC key\n"); |
2017 | memcpy(keybuf, &key.data[24], sizeof(keybuf)); | 2020 | memcpy(keybuf, &key.data[24], sizeof(keybuf)); |
2018 | memcpy(&key.data[24], &key.data[16], sizeof(keybuf)); | 2021 | memcpy(&key.data[24], &key.data[16], sizeof(keybuf)); |
2019 | memcpy(&key.data[16], keybuf, sizeof(keybuf)); | 2022 | memcpy(&key.data[16], keybuf, sizeof(keybuf)); |
@@ -2118,8 +2121,7 @@ brcmf_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev, | |||
2118 | err = -EAGAIN; | 2121 | err = -EAGAIN; |
2119 | goto done; | 2122 | goto done; |
2120 | } | 2123 | } |
2121 | switch (wsec & ~SES_OW_ENABLED) { | 2124 | if (wsec & WEP_ENABLED) { |
2122 | case WEP_ENABLED: | ||
2123 | sec = &profile->sec; | 2125 | sec = &profile->sec; |
2124 | if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP40) { | 2126 | if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP40) { |
2125 | params.cipher = WLAN_CIPHER_SUITE_WEP40; | 2127 | params.cipher = WLAN_CIPHER_SUITE_WEP40; |
@@ -2128,16 +2130,13 @@ brcmf_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev, | |||
2128 | params.cipher = WLAN_CIPHER_SUITE_WEP104; | 2130 | params.cipher = WLAN_CIPHER_SUITE_WEP104; |
2129 | brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_WEP104\n"); | 2131 | brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_WEP104\n"); |
2130 | } | 2132 | } |
2131 | break; | 2133 | } else if (wsec & TKIP_ENABLED) { |
2132 | case TKIP_ENABLED: | ||
2133 | params.cipher = WLAN_CIPHER_SUITE_TKIP; | 2134 | params.cipher = WLAN_CIPHER_SUITE_TKIP; |
2134 | brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_TKIP\n"); | 2135 | brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_TKIP\n"); |
2135 | break; | 2136 | } else if (wsec & AES_ENABLED) { |
2136 | case AES_ENABLED: | ||
2137 | params.cipher = WLAN_CIPHER_SUITE_AES_CMAC; | 2137 | params.cipher = WLAN_CIPHER_SUITE_AES_CMAC; |
2138 | brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_AES_CMAC\n"); | 2138 | brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_AES_CMAC\n"); |
2139 | break; | 2139 | } else { |
2140 | default: | ||
2141 | brcmf_err("Invalid algo (0x%x)\n", wsec); | 2140 | brcmf_err("Invalid algo (0x%x)\n", wsec); |
2142 | err = -EINVAL; | 2141 | err = -EINVAL; |
2143 | goto done; | 2142 | goto done; |
@@ -3824,8 +3823,9 @@ exit: | |||
3824 | static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev) | 3823 | static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev) |
3825 | { | 3824 | { |
3826 | struct brcmf_if *ifp = netdev_priv(ndev); | 3825 | struct brcmf_if *ifp = netdev_priv(ndev); |
3827 | s32 err = -EPERM; | 3826 | s32 err; |
3828 | struct brcmf_fil_bss_enable_le bss_enable; | 3827 | struct brcmf_fil_bss_enable_le bss_enable; |
3828 | struct brcmf_join_params join_params; | ||
3829 | 3829 | ||
3830 | brcmf_dbg(TRACE, "Enter\n"); | 3830 | brcmf_dbg(TRACE, "Enter\n"); |
3831 | 3831 | ||
@@ -3833,16 +3833,21 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev) | |||
3833 | /* Due to most likely deauths outstanding we sleep */ | 3833 | /* Due to most likely deauths outstanding we sleep */ |
3834 | /* first to make sure they get processed by fw. */ | 3834 | /* first to make sure they get processed by fw. */ |
3835 | msleep(400); | 3835 | msleep(400); |
3836 | err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_AP, 0); | 3836 | |
3837 | if (err < 0) { | 3837 | memset(&join_params, 0, sizeof(join_params)); |
3838 | brcmf_err("setting AP mode failed %d\n", err); | 3838 | err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SSID, |
3839 | goto exit; | 3839 | &join_params, sizeof(join_params)); |
3840 | } | 3840 | if (err < 0) |
3841 | brcmf_err("SET SSID error (%d)\n", err); | ||
3841 | err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 0); | 3842 | err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 0); |
3842 | if (err < 0) { | 3843 | if (err < 0) |
3843 | brcmf_err("BRCMF_C_UP error %d\n", err); | 3844 | brcmf_err("BRCMF_C_UP error %d\n", err); |
3844 | goto exit; | 3845 | err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_AP, 0); |
3845 | } | 3846 | if (err < 0) |
3847 | brcmf_err("setting AP mode failed %d\n", err); | ||
3848 | err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_INFRA, 0); | ||
3849 | if (err < 0) | ||
3850 | brcmf_err("setting INFRA mode failed %d\n", err); | ||
3846 | } else { | 3851 | } else { |
3847 | bss_enable.bsscfg_idx = cpu_to_le32(ifp->bssidx); | 3852 | bss_enable.bsscfg_idx = cpu_to_le32(ifp->bssidx); |
3848 | bss_enable.enable = cpu_to_le32(0); | 3853 | bss_enable.enable = cpu_to_le32(0); |
@@ -3855,7 +3860,6 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev) | |||
3855 | set_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state); | 3860 | set_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state); |
3856 | clear_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state); | 3861 | clear_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state); |
3857 | 3862 | ||
3858 | exit: | ||
3859 | return err; | 3863 | return err; |
3860 | } | 3864 | } |
3861 | 3865 | ||
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c index 21a824232478..18d37645e2cd 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c | |||
@@ -1137,9 +1137,8 @@ wlc_lcnphy_set_rx_gain_by_distribution(struct brcms_phy *pi, | |||
1137 | gain0_15 = ((biq1 & 0xf) << 12) | | 1137 | gain0_15 = ((biq1 & 0xf) << 12) | |
1138 | ((tia & 0xf) << 8) | | 1138 | ((tia & 0xf) << 8) | |
1139 | ((lna2 & 0x3) << 6) | | 1139 | ((lna2 & 0x3) << 6) | |
1140 | ((lna2 & 0x3) << 4) | | 1140 | ((lna2 & |
1141 | ((lna1 & 0x3) << 2) | | 1141 | 0x3) << 4) | ((lna1 & 0x3) << 2) | ((lna1 & 0x3) << 0); |
1142 | ((lna1 & 0x3) << 0); | ||
1143 | 1142 | ||
1144 | mod_phy_reg(pi, 0x4b6, (0xffff << 0), gain0_15 << 0); | 1143 | mod_phy_reg(pi, 0x4b6, (0xffff << 0), gain0_15 << 0); |
1145 | mod_phy_reg(pi, 0x4b7, (0xf << 0), gain16_19 << 0); | 1144 | mod_phy_reg(pi, 0x4b7, (0xf << 0), gain16_19 << 0); |
@@ -1157,8 +1156,6 @@ wlc_lcnphy_set_rx_gain_by_distribution(struct brcms_phy *pi, | |||
1157 | } | 1156 | } |
1158 | 1157 | ||
1159 | mod_phy_reg(pi, 0x44d, (0x1 << 0), (!trsw) << 0); | 1158 | mod_phy_reg(pi, 0x44d, (0x1 << 0), (!trsw) << 0); |
1160 | mod_phy_reg(pi, 0x4b1, (0x3 << 11), lna1 << 11); | ||
1161 | mod_phy_reg(pi, 0x4e6, (0x3 << 3), lna1 << 3); | ||
1162 | 1159 | ||
1163 | } | 1160 | } |
1164 | 1161 | ||
@@ -1331,43 +1328,6 @@ static u32 wlc_lcnphy_measure_digital_power(struct brcms_phy *pi, u16 nsamples) | |||
1331 | return (iq_est.i_pwr + iq_est.q_pwr) / nsamples; | 1328 | return (iq_est.i_pwr + iq_est.q_pwr) / nsamples; |
1332 | } | 1329 | } |
1333 | 1330 | ||
1334 | static bool wlc_lcnphy_rx_iq_cal_gain(struct brcms_phy *pi, u16 biq1_gain, | ||
1335 | u16 tia_gain, u16 lna2_gain) | ||
1336 | { | ||
1337 | u32 i_thresh_l, q_thresh_l; | ||
1338 | u32 i_thresh_h, q_thresh_h; | ||
1339 | struct lcnphy_iq_est iq_est_h, iq_est_l; | ||
1340 | |||
1341 | wlc_lcnphy_set_rx_gain_by_distribution(pi, 0, 0, 0, biq1_gain, tia_gain, | ||
1342 | lna2_gain, 0); | ||
1343 | |||
1344 | wlc_lcnphy_rx_gain_override_enable(pi, true); | ||
1345 | wlc_lcnphy_start_tx_tone(pi, 2000, (40 >> 1), 0); | ||
1346 | udelay(500); | ||
1347 | write_radio_reg(pi, RADIO_2064_REG112, 0); | ||
1348 | if (!wlc_lcnphy_rx_iq_est(pi, 1024, 32, &iq_est_l)) | ||
1349 | return false; | ||
1350 | |||
1351 | wlc_lcnphy_start_tx_tone(pi, 2000, 40, 0); | ||
1352 | udelay(500); | ||
1353 | write_radio_reg(pi, RADIO_2064_REG112, 0); | ||
1354 | if (!wlc_lcnphy_rx_iq_est(pi, 1024, 32, &iq_est_h)) | ||
1355 | return false; | ||
1356 | |||
1357 | i_thresh_l = (iq_est_l.i_pwr << 1); | ||
1358 | i_thresh_h = (iq_est_l.i_pwr << 2) + iq_est_l.i_pwr; | ||
1359 | |||
1360 | q_thresh_l = (iq_est_l.q_pwr << 1); | ||
1361 | q_thresh_h = (iq_est_l.q_pwr << 2) + iq_est_l.q_pwr; | ||
1362 | if ((iq_est_h.i_pwr > i_thresh_l) && | ||
1363 | (iq_est_h.i_pwr < i_thresh_h) && | ||
1364 | (iq_est_h.q_pwr > q_thresh_l) && | ||
1365 | (iq_est_h.q_pwr < q_thresh_h)) | ||
1366 | return true; | ||
1367 | |||
1368 | return false; | ||
1369 | } | ||
1370 | |||
1371 | static bool | 1331 | static bool |
1372 | wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi, | 1332 | wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi, |
1373 | const struct lcnphy_rx_iqcomp *iqcomp, | 1333 | const struct lcnphy_rx_iqcomp *iqcomp, |
@@ -1382,8 +1342,8 @@ wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi, | |||
1382 | RFOverrideVal0_old, rfoverride2_old, rfoverride2val_old, | 1342 | RFOverrideVal0_old, rfoverride2_old, rfoverride2val_old, |
1383 | rfoverride3_old, rfoverride3val_old, rfoverride4_old, | 1343 | rfoverride3_old, rfoverride3val_old, rfoverride4_old, |
1384 | rfoverride4val_old, afectrlovr_old, afectrlovrval_old; | 1344 | rfoverride4val_old, afectrlovr_old, afectrlovrval_old; |
1385 | int tia_gain, lna2_gain, biq1_gain; | 1345 | int tia_gain; |
1386 | bool set_gain; | 1346 | u32 received_power, rx_pwr_threshold; |
1387 | u16 old_sslpnCalibClkEnCtrl, old_sslpnRxFeClkEnCtrl; | 1347 | u16 old_sslpnCalibClkEnCtrl, old_sslpnRxFeClkEnCtrl; |
1388 | u16 values_to_save[11]; | 1348 | u16 values_to_save[11]; |
1389 | s16 *ptr; | 1349 | s16 *ptr; |
@@ -1408,134 +1368,126 @@ wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi, | |||
1408 | goto cal_done; | 1368 | goto cal_done; |
1409 | } | 1369 | } |
1410 | 1370 | ||
1411 | WARN_ON(module != 1); | 1371 | if (module == 1) { |
1412 | tx_pwr_ctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi); | ||
1413 | wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF); | ||
1414 | |||
1415 | for (i = 0; i < 11; i++) | ||
1416 | values_to_save[i] = | ||
1417 | read_radio_reg(pi, rxiq_cal_rf_reg[i]); | ||
1418 | Core1TxControl_old = read_phy_reg(pi, 0x631); | ||
1419 | |||
1420 | or_phy_reg(pi, 0x631, 0x0015); | ||
1421 | |||
1422 | RFOverride0_old = read_phy_reg(pi, 0x44c); | ||
1423 | RFOverrideVal0_old = read_phy_reg(pi, 0x44d); | ||
1424 | rfoverride2_old = read_phy_reg(pi, 0x4b0); | ||
1425 | rfoverride2val_old = read_phy_reg(pi, 0x4b1); | ||
1426 | rfoverride3_old = read_phy_reg(pi, 0x4f9); | ||
1427 | rfoverride3val_old = read_phy_reg(pi, 0x4fa); | ||
1428 | rfoverride4_old = read_phy_reg(pi, 0x938); | ||
1429 | rfoverride4val_old = read_phy_reg(pi, 0x939); | ||
1430 | afectrlovr_old = read_phy_reg(pi, 0x43b); | ||
1431 | afectrlovrval_old = read_phy_reg(pi, 0x43c); | ||
1432 | old_sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da); | ||
1433 | old_sslpnRxFeClkEnCtrl = read_phy_reg(pi, 0x6db); | ||
1434 | |||
1435 | tx_gain_override_old = wlc_lcnphy_tx_gain_override_enabled(pi); | ||
1436 | if (tx_gain_override_old) { | ||
1437 | wlc_lcnphy_get_tx_gain(pi, &old_gains); | ||
1438 | tx_gain_index_old = pi_lcn->lcnphy_current_index; | ||
1439 | } | ||
1440 | |||
1441 | wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_idx); | ||
1442 | 1372 | ||
1443 | mod_phy_reg(pi, 0x4f9, (0x1 << 0), 1 << 0); | 1373 | tx_pwr_ctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi); |
1444 | mod_phy_reg(pi, 0x4fa, (0x1 << 0), 0 << 0); | 1374 | wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF); |
1445 | 1375 | ||
1446 | mod_phy_reg(pi, 0x43b, (0x1 << 1), 1 << 1); | 1376 | for (i = 0; i < 11; i++) |
1447 | mod_phy_reg(pi, 0x43c, (0x1 << 1), 0 << 1); | 1377 | values_to_save[i] = |
1378 | read_radio_reg(pi, rxiq_cal_rf_reg[i]); | ||
1379 | Core1TxControl_old = read_phy_reg(pi, 0x631); | ||
1380 | |||
1381 | or_phy_reg(pi, 0x631, 0x0015); | ||
1382 | |||
1383 | RFOverride0_old = read_phy_reg(pi, 0x44c); | ||
1384 | RFOverrideVal0_old = read_phy_reg(pi, 0x44d); | ||
1385 | rfoverride2_old = read_phy_reg(pi, 0x4b0); | ||
1386 | rfoverride2val_old = read_phy_reg(pi, 0x4b1); | ||
1387 | rfoverride3_old = read_phy_reg(pi, 0x4f9); | ||
1388 | rfoverride3val_old = read_phy_reg(pi, 0x4fa); | ||
1389 | rfoverride4_old = read_phy_reg(pi, 0x938); | ||
1390 | rfoverride4val_old = read_phy_reg(pi, 0x939); | ||
1391 | afectrlovr_old = read_phy_reg(pi, 0x43b); | ||
1392 | afectrlovrval_old = read_phy_reg(pi, 0x43c); | ||
1393 | old_sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da); | ||
1394 | old_sslpnRxFeClkEnCtrl = read_phy_reg(pi, 0x6db); | ||
1395 | |||
1396 | tx_gain_override_old = wlc_lcnphy_tx_gain_override_enabled(pi); | ||
1397 | if (tx_gain_override_old) { | ||
1398 | wlc_lcnphy_get_tx_gain(pi, &old_gains); | ||
1399 | tx_gain_index_old = pi_lcn->lcnphy_current_index; | ||
1400 | } | ||
1448 | 1401 | ||
1449 | write_radio_reg(pi, RADIO_2064_REG116, 0x06); | 1402 | wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_idx); |
1450 | write_radio_reg(pi, RADIO_2064_REG12C, 0x07); | ||
1451 | write_radio_reg(pi, RADIO_2064_REG06A, 0xd3); | ||
1452 | write_radio_reg(pi, RADIO_2064_REG098, 0x03); | ||
1453 | write_radio_reg(pi, RADIO_2064_REG00B, 0x7); | ||
1454 | mod_radio_reg(pi, RADIO_2064_REG113, 1 << 4, 1 << 4); | ||
1455 | write_radio_reg(pi, RADIO_2064_REG01D, 0x01); | ||
1456 | write_radio_reg(pi, RADIO_2064_REG114, 0x01); | ||
1457 | write_radio_reg(pi, RADIO_2064_REG02E, 0x10); | ||
1458 | write_radio_reg(pi, RADIO_2064_REG12A, 0x08); | ||
1459 | |||
1460 | mod_phy_reg(pi, 0x938, (0x1 << 0), 1 << 0); | ||
1461 | mod_phy_reg(pi, 0x939, (0x1 << 0), 0 << 0); | ||
1462 | mod_phy_reg(pi, 0x938, (0x1 << 1), 1 << 1); | ||
1463 | mod_phy_reg(pi, 0x939, (0x1 << 1), 1 << 1); | ||
1464 | mod_phy_reg(pi, 0x938, (0x1 << 2), 1 << 2); | ||
1465 | mod_phy_reg(pi, 0x939, (0x1 << 2), 1 << 2); | ||
1466 | mod_phy_reg(pi, 0x938, (0x1 << 3), 1 << 3); | ||
1467 | mod_phy_reg(pi, 0x939, (0x1 << 3), 1 << 3); | ||
1468 | mod_phy_reg(pi, 0x938, (0x1 << 5), 1 << 5); | ||
1469 | mod_phy_reg(pi, 0x939, (0x1 << 5), 0 << 5); | ||
1470 | 1403 | ||
1471 | mod_phy_reg(pi, 0x43b, (0x1 << 0), 1 << 0); | 1404 | mod_phy_reg(pi, 0x4f9, (0x1 << 0), 1 << 0); |
1472 | mod_phy_reg(pi, 0x43c, (0x1 << 0), 0 << 0); | 1405 | mod_phy_reg(pi, 0x4fa, (0x1 << 0), 0 << 0); |
1473 | 1406 | ||
1474 | write_phy_reg(pi, 0x6da, 0xffff); | 1407 | mod_phy_reg(pi, 0x43b, (0x1 << 1), 1 << 1); |
1475 | or_phy_reg(pi, 0x6db, 0x3); | 1408 | mod_phy_reg(pi, 0x43c, (0x1 << 1), 0 << 1); |
1476 | 1409 | ||
1477 | wlc_lcnphy_set_trsw_override(pi, tx_switch, rx_switch); | 1410 | write_radio_reg(pi, RADIO_2064_REG116, 0x06); |
1478 | set_gain = false; | 1411 | write_radio_reg(pi, RADIO_2064_REG12C, 0x07); |
1479 | 1412 | write_radio_reg(pi, RADIO_2064_REG06A, 0xd3); | |
1480 | lna2_gain = 3; | 1413 | write_radio_reg(pi, RADIO_2064_REG098, 0x03); |
1481 | while ((lna2_gain >= 0) && !set_gain) { | 1414 | write_radio_reg(pi, RADIO_2064_REG00B, 0x7); |
1482 | tia_gain = 4; | 1415 | mod_radio_reg(pi, RADIO_2064_REG113, 1 << 4, 1 << 4); |
1483 | 1416 | write_radio_reg(pi, RADIO_2064_REG01D, 0x01); | |
1484 | while ((tia_gain >= 0) && !set_gain) { | 1417 | write_radio_reg(pi, RADIO_2064_REG114, 0x01); |
1485 | biq1_gain = 6; | 1418 | write_radio_reg(pi, RADIO_2064_REG02E, 0x10); |
1486 | 1419 | write_radio_reg(pi, RADIO_2064_REG12A, 0x08); | |
1487 | while ((biq1_gain >= 0) && !set_gain) { | 1420 | |
1488 | set_gain = wlc_lcnphy_rx_iq_cal_gain(pi, | 1421 | mod_phy_reg(pi, 0x938, (0x1 << 0), 1 << 0); |
1489 | (u16) | 1422 | mod_phy_reg(pi, 0x939, (0x1 << 0), 0 << 0); |
1490 | biq1_gain, | 1423 | mod_phy_reg(pi, 0x938, (0x1 << 1), 1 << 1); |
1491 | (u16) | 1424 | mod_phy_reg(pi, 0x939, (0x1 << 1), 1 << 1); |
1492 | tia_gain, | 1425 | mod_phy_reg(pi, 0x938, (0x1 << 2), 1 << 2); |
1493 | (u16) | 1426 | mod_phy_reg(pi, 0x939, (0x1 << 2), 1 << 2); |
1494 | lna2_gain); | 1427 | mod_phy_reg(pi, 0x938, (0x1 << 3), 1 << 3); |
1495 | biq1_gain -= 1; | 1428 | mod_phy_reg(pi, 0x939, (0x1 << 3), 1 << 3); |
1496 | } | 1429 | mod_phy_reg(pi, 0x938, (0x1 << 5), 1 << 5); |
1430 | mod_phy_reg(pi, 0x939, (0x1 << 5), 0 << 5); | ||
1431 | |||
1432 | mod_phy_reg(pi, 0x43b, (0x1 << 0), 1 << 0); | ||
1433 | mod_phy_reg(pi, 0x43c, (0x1 << 0), 0 << 0); | ||
1434 | |||
1435 | wlc_lcnphy_start_tx_tone(pi, 2000, 120, 0); | ||
1436 | write_phy_reg(pi, 0x6da, 0xffff); | ||
1437 | or_phy_reg(pi, 0x6db, 0x3); | ||
1438 | wlc_lcnphy_set_trsw_override(pi, tx_switch, rx_switch); | ||
1439 | wlc_lcnphy_rx_gain_override_enable(pi, true); | ||
1440 | |||
1441 | tia_gain = 8; | ||
1442 | rx_pwr_threshold = 950; | ||
1443 | while (tia_gain > 0) { | ||
1497 | tia_gain -= 1; | 1444 | tia_gain -= 1; |
1445 | wlc_lcnphy_set_rx_gain_by_distribution(pi, | ||
1446 | 0, 0, 2, 2, | ||
1447 | (u16) | ||
1448 | tia_gain, 1, 0); | ||
1449 | udelay(500); | ||
1450 | |||
1451 | received_power = | ||
1452 | wlc_lcnphy_measure_digital_power(pi, 2000); | ||
1453 | if (received_power < rx_pwr_threshold) | ||
1454 | break; | ||
1498 | } | 1455 | } |
1499 | lna2_gain -= 1; | 1456 | result = wlc_lcnphy_calc_rx_iq_comp(pi, 0xffff); |
1500 | } | ||
1501 | 1457 | ||
1502 | if (set_gain) | 1458 | wlc_lcnphy_stop_tx_tone(pi); |
1503 | result = wlc_lcnphy_calc_rx_iq_comp(pi, 1024); | ||
1504 | else | ||
1505 | result = false; | ||
1506 | 1459 | ||
1507 | wlc_lcnphy_stop_tx_tone(pi); | 1460 | write_phy_reg(pi, 0x631, Core1TxControl_old); |
1508 | 1461 | ||
1509 | write_phy_reg(pi, 0x631, Core1TxControl_old); | 1462 | write_phy_reg(pi, 0x44c, RFOverrideVal0_old); |
1510 | 1463 | write_phy_reg(pi, 0x44d, RFOverrideVal0_old); | |
1511 | write_phy_reg(pi, 0x44c, RFOverrideVal0_old); | 1464 | write_phy_reg(pi, 0x4b0, rfoverride2_old); |
1512 | write_phy_reg(pi, 0x44d, RFOverrideVal0_old); | 1465 | write_phy_reg(pi, 0x4b1, rfoverride2val_old); |
1513 | write_phy_reg(pi, 0x4b0, rfoverride2_old); | 1466 | write_phy_reg(pi, 0x4f9, rfoverride3_old); |
1514 | write_phy_reg(pi, 0x4b1, rfoverride2val_old); | 1467 | write_phy_reg(pi, 0x4fa, rfoverride3val_old); |
1515 | write_phy_reg(pi, 0x4f9, rfoverride3_old); | 1468 | write_phy_reg(pi, 0x938, rfoverride4_old); |
1516 | write_phy_reg(pi, 0x4fa, rfoverride3val_old); | 1469 | write_phy_reg(pi, 0x939, rfoverride4val_old); |
1517 | write_phy_reg(pi, 0x938, rfoverride4_old); | 1470 | write_phy_reg(pi, 0x43b, afectrlovr_old); |
1518 | write_phy_reg(pi, 0x939, rfoverride4val_old); | 1471 | write_phy_reg(pi, 0x43c, afectrlovrval_old); |
1519 | write_phy_reg(pi, 0x43b, afectrlovr_old); | 1472 | write_phy_reg(pi, 0x6da, old_sslpnCalibClkEnCtrl); |
1520 | write_phy_reg(pi, 0x43c, afectrlovrval_old); | 1473 | write_phy_reg(pi, 0x6db, old_sslpnRxFeClkEnCtrl); |
1521 | write_phy_reg(pi, 0x6da, old_sslpnCalibClkEnCtrl); | ||
1522 | write_phy_reg(pi, 0x6db, old_sslpnRxFeClkEnCtrl); | ||
1523 | 1474 | ||
1524 | wlc_lcnphy_clear_trsw_override(pi); | 1475 | wlc_lcnphy_clear_trsw_override(pi); |
1525 | 1476 | ||
1526 | mod_phy_reg(pi, 0x44c, (0x1 << 2), 0 << 2); | 1477 | mod_phy_reg(pi, 0x44c, (0x1 << 2), 0 << 2); |
1527 | 1478 | ||
1528 | for (i = 0; i < 11; i++) | 1479 | for (i = 0; i < 11; i++) |
1529 | write_radio_reg(pi, rxiq_cal_rf_reg[i], | 1480 | write_radio_reg(pi, rxiq_cal_rf_reg[i], |
1530 | values_to_save[i]); | 1481 | values_to_save[i]); |
1531 | 1482 | ||
1532 | if (tx_gain_override_old) | 1483 | if (tx_gain_override_old) |
1533 | wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_index_old); | 1484 | wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_index_old); |
1534 | else | 1485 | else |
1535 | wlc_lcnphy_disable_tx_gain_override(pi); | 1486 | wlc_lcnphy_disable_tx_gain_override(pi); |
1536 | 1487 | ||
1537 | wlc_lcnphy_set_tx_pwr_ctrl(pi, tx_pwr_ctrl); | 1488 | wlc_lcnphy_set_tx_pwr_ctrl(pi, tx_pwr_ctrl); |
1538 | wlc_lcnphy_rx_gain_override_enable(pi, false); | 1489 | wlc_lcnphy_rx_gain_override_enable(pi, false); |
1490 | } | ||
1539 | 1491 | ||
1540 | cal_done: | 1492 | cal_done: |
1541 | kfree(ptr); | 1493 | kfree(ptr); |
@@ -1829,17 +1781,6 @@ wlc_lcnphy_radio_2064_channel_tune_4313(struct brcms_phy *pi, u8 channel) | |||
1829 | write_radio_reg(pi, RADIO_2064_REG038, 3); | 1781 | write_radio_reg(pi, RADIO_2064_REG038, 3); |
1830 | write_radio_reg(pi, RADIO_2064_REG091, 7); | 1782 | write_radio_reg(pi, RADIO_2064_REG091, 7); |
1831 | } | 1783 | } |
1832 | |||
1833 | if (!(pi->sh->boardflags & BFL_FEM)) { | ||
1834 | u8 reg038[14] = {0xd, 0xe, 0xd, 0xd, 0xd, 0xc, | ||
1835 | 0xa, 0xb, 0xb, 0x3, 0x3, 0x2, 0x0, 0x0}; | ||
1836 | |||
1837 | write_radio_reg(pi, RADIO_2064_REG02A, 0xf); | ||
1838 | write_radio_reg(pi, RADIO_2064_REG091, 0x3); | ||
1839 | write_radio_reg(pi, RADIO_2064_REG038, 0x3); | ||
1840 | |||
1841 | write_radio_reg(pi, RADIO_2064_REG038, reg038[channel - 1]); | ||
1842 | } | ||
1843 | } | 1784 | } |
1844 | 1785 | ||
1845 | static int | 1786 | static int |
@@ -2034,16 +1975,6 @@ wlc_lcnphy_set_tssi_mux(struct brcms_phy *pi, enum lcnphy_tssi_mode pos) | |||
2034 | } else { | 1975 | } else { |
2035 | mod_radio_reg(pi, RADIO_2064_REG03A, 1, 0x1); | 1976 | mod_radio_reg(pi, RADIO_2064_REG03A, 1, 0x1); |
2036 | mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 0x8); | 1977 | mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 0x8); |
2037 | mod_radio_reg(pi, RADIO_2064_REG028, 0x1, 0x0); | ||
2038 | mod_radio_reg(pi, RADIO_2064_REG11A, 0x4, 1<<2); | ||
2039 | mod_radio_reg(pi, RADIO_2064_REG036, 0x10, 0x0); | ||
2040 | mod_radio_reg(pi, RADIO_2064_REG11A, 0x10, 1<<4); | ||
2041 | mod_radio_reg(pi, RADIO_2064_REG036, 0x3, 0x0); | ||
2042 | mod_radio_reg(pi, RADIO_2064_REG035, 0xff, 0x77); | ||
2043 | mod_radio_reg(pi, RADIO_2064_REG028, 0x1e, 0xe<<1); | ||
2044 | mod_radio_reg(pi, RADIO_2064_REG112, 0x80, 1<<7); | ||
2045 | mod_radio_reg(pi, RADIO_2064_REG005, 0x7, 1<<1); | ||
2046 | mod_radio_reg(pi, RADIO_2064_REG029, 0xf0, 0<<4); | ||
2047 | } | 1978 | } |
2048 | } else { | 1979 | } else { |
2049 | mod_phy_reg(pi, 0x4d9, (0x1 << 2), (0x1) << 2); | 1980 | mod_phy_reg(pi, 0x4d9, (0x1 << 2), (0x1) << 2); |
@@ -2130,14 +2061,12 @@ static void wlc_lcnphy_pwrctrl_rssiparams(struct brcms_phy *pi) | |||
2130 | (auxpga_vmid_temp << 0) | (auxpga_gain_temp << 12)); | 2061 | (auxpga_vmid_temp << 0) | (auxpga_gain_temp << 12)); |
2131 | 2062 | ||
2132 | mod_radio_reg(pi, RADIO_2064_REG082, (1 << 5), (1 << 5)); | 2063 | mod_radio_reg(pi, RADIO_2064_REG082, (1 << 5), (1 << 5)); |
2133 | mod_radio_reg(pi, RADIO_2064_REG07C, (1 << 0), (1 << 0)); | ||
2134 | } | 2064 | } |
2135 | 2065 | ||
2136 | static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi) | 2066 | static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi) |
2137 | { | 2067 | { |
2138 | struct phytbl_info tab; | 2068 | struct phytbl_info tab; |
2139 | u32 rfseq, ind; | 2069 | u32 rfseq, ind; |
2140 | u8 tssi_sel; | ||
2141 | 2070 | ||
2142 | tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL; | 2071 | tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL; |
2143 | tab.tbl_width = 32; | 2072 | tab.tbl_width = 32; |
@@ -2159,13 +2088,7 @@ static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi) | |||
2159 | 2088 | ||
2160 | mod_phy_reg(pi, 0x503, (0x1 << 4), (1) << 4); | 2089 | mod_phy_reg(pi, 0x503, (0x1 << 4), (1) << 4); |
2161 | 2090 | ||
2162 | if (pi->sh->boardflags & BFL_FEM) { | 2091 | wlc_lcnphy_set_tssi_mux(pi, LCNPHY_TSSI_EXT); |
2163 | tssi_sel = 0x1; | ||
2164 | wlc_lcnphy_set_tssi_mux(pi, LCNPHY_TSSI_EXT); | ||
2165 | } else { | ||
2166 | tssi_sel = 0xe; | ||
2167 | wlc_lcnphy_set_tssi_mux(pi, LCNPHY_TSSI_POST_PA); | ||
2168 | } | ||
2169 | mod_phy_reg(pi, 0x4a4, (0x1 << 14), (0) << 14); | 2092 | mod_phy_reg(pi, 0x4a4, (0x1 << 14), (0) << 14); |
2170 | 2093 | ||
2171 | mod_phy_reg(pi, 0x4a4, (0x1 << 15), (1) << 15); | 2094 | mod_phy_reg(pi, 0x4a4, (0x1 << 15), (1) << 15); |
@@ -2201,10 +2124,9 @@ static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi) | |||
2201 | mod_phy_reg(pi, 0x49a, (0x1ff << 0), (0xff) << 0); | 2124 | mod_phy_reg(pi, 0x49a, (0x1ff << 0), (0xff) << 0); |
2202 | 2125 | ||
2203 | if (LCNREV_IS(pi->pubpi.phy_rev, 2)) { | 2126 | if (LCNREV_IS(pi->pubpi.phy_rev, 2)) { |
2204 | mod_radio_reg(pi, RADIO_2064_REG028, 0xf, tssi_sel); | 2127 | mod_radio_reg(pi, RADIO_2064_REG028, 0xf, 0xe); |
2205 | mod_radio_reg(pi, RADIO_2064_REG086, 0x4, 0x4); | 2128 | mod_radio_reg(pi, RADIO_2064_REG086, 0x4, 0x4); |
2206 | } else { | 2129 | } else { |
2207 | mod_radio_reg(pi, RADIO_2064_REG028, 0x1e, tssi_sel << 1); | ||
2208 | mod_radio_reg(pi, RADIO_2064_REG03A, 0x1, 1); | 2130 | mod_radio_reg(pi, RADIO_2064_REG03A, 0x1, 1); |
2209 | mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 1 << 3); | 2131 | mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 1 << 3); |
2210 | } | 2132 | } |
@@ -2251,10 +2173,6 @@ static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi) | |||
2251 | 2173 | ||
2252 | mod_phy_reg(pi, 0x4d7, (0xf << 8), (0) << 8); | 2174 | mod_phy_reg(pi, 0x4d7, (0xf << 8), (0) << 8); |
2253 | 2175 | ||
2254 | mod_radio_reg(pi, RADIO_2064_REG035, 0xff, 0x0); | ||
2255 | mod_radio_reg(pi, RADIO_2064_REG036, 0x3, 0x0); | ||
2256 | mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 0x8); | ||
2257 | |||
2258 | wlc_lcnphy_pwrctrl_rssiparams(pi); | 2176 | wlc_lcnphy_pwrctrl_rssiparams(pi); |
2259 | } | 2177 | } |
2260 | 2178 | ||
@@ -2873,8 +2791,6 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi) | |||
2873 | read_radio_reg(pi, RADIO_2064_REG007) & 1; | 2791 | read_radio_reg(pi, RADIO_2064_REG007) & 1; |
2874 | u16 SAVE_jtag_auxpga = read_radio_reg(pi, RADIO_2064_REG0FF) & 0x10; | 2792 | u16 SAVE_jtag_auxpga = read_radio_reg(pi, RADIO_2064_REG0FF) & 0x10; |
2875 | u16 SAVE_iqadc_aux_en = read_radio_reg(pi, RADIO_2064_REG11F) & 4; | 2793 | u16 SAVE_iqadc_aux_en = read_radio_reg(pi, RADIO_2064_REG11F) & 4; |
2876 | u8 SAVE_bbmult = wlc_lcnphy_get_bbmult(pi); | ||
2877 | |||
2878 | idleTssi = read_phy_reg(pi, 0x4ab); | 2794 | idleTssi = read_phy_reg(pi, 0x4ab); |
2879 | suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) & | 2795 | suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) & |
2880 | MCTL_EN_MAC)); | 2796 | MCTL_EN_MAC)); |
@@ -2892,12 +2808,6 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi) | |||
2892 | mod_radio_reg(pi, RADIO_2064_REG0FF, 0x10, 1 << 4); | 2808 | mod_radio_reg(pi, RADIO_2064_REG0FF, 0x10, 1 << 4); |
2893 | mod_radio_reg(pi, RADIO_2064_REG11F, 0x4, 1 << 2); | 2809 | mod_radio_reg(pi, RADIO_2064_REG11F, 0x4, 1 << 2); |
2894 | wlc_lcnphy_tssi_setup(pi); | 2810 | wlc_lcnphy_tssi_setup(pi); |
2895 | |||
2896 | mod_phy_reg(pi, 0x4d7, (0x1 << 0), (1 << 0)); | ||
2897 | mod_phy_reg(pi, 0x4d7, (0x1 << 6), (1 << 6)); | ||
2898 | |||
2899 | wlc_lcnphy_set_bbmult(pi, 0x0); | ||
2900 | |||
2901 | wlc_phy_do_dummy_tx(pi, true, OFF); | 2811 | wlc_phy_do_dummy_tx(pi, true, OFF); |
2902 | idleTssi = ((read_phy_reg(pi, 0x4ab) & (0x1ff << 0)) | 2812 | idleTssi = ((read_phy_reg(pi, 0x4ab) & (0x1ff << 0)) |
2903 | >> 0); | 2813 | >> 0); |
@@ -2919,7 +2829,6 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi) | |||
2919 | 2829 | ||
2920 | mod_phy_reg(pi, 0x44c, (0x1 << 12), (0) << 12); | 2830 | mod_phy_reg(pi, 0x44c, (0x1 << 12), (0) << 12); |
2921 | 2831 | ||
2922 | wlc_lcnphy_set_bbmult(pi, SAVE_bbmult); | ||
2923 | wlc_lcnphy_set_tx_gain_override(pi, tx_gain_override_old); | 2832 | wlc_lcnphy_set_tx_gain_override(pi, tx_gain_override_old); |
2924 | wlc_lcnphy_set_tx_gain(pi, &old_gains); | 2833 | wlc_lcnphy_set_tx_gain(pi, &old_gains); |
2925 | wlc_lcnphy_set_tx_pwr_ctrl(pi, SAVE_txpwrctrl); | 2834 | wlc_lcnphy_set_tx_pwr_ctrl(pi, SAVE_txpwrctrl); |
@@ -3133,11 +3042,6 @@ static void wlc_lcnphy_tx_pwr_ctrl_init(struct brcms_phy_pub *ppi) | |||
3133 | wlc_lcnphy_write_table(pi, &tab); | 3042 | wlc_lcnphy_write_table(pi, &tab); |
3134 | tab.tbl_offset++; | 3043 | tab.tbl_offset++; |
3135 | } | 3044 | } |
3136 | mod_phy_reg(pi, 0x4d0, (0x1 << 0), (0) << 0); | ||
3137 | mod_phy_reg(pi, 0x4d3, (0xff << 0), (0) << 0); | ||
3138 | mod_phy_reg(pi, 0x4d3, (0xff << 8), (0) << 8); | ||
3139 | mod_phy_reg(pi, 0x4d0, (0x1 << 4), (0) << 4); | ||
3140 | mod_phy_reg(pi, 0x4d0, (0x1 << 2), (0) << 2); | ||
3141 | 3045 | ||
3142 | mod_phy_reg(pi, 0x410, (0x1 << 7), (0) << 7); | 3046 | mod_phy_reg(pi, 0x410, (0x1 << 7), (0) << 7); |
3143 | 3047 | ||
@@ -3939,6 +3843,7 @@ static void wlc_lcnphy_txpwrtbl_iqlo_cal(struct brcms_phy *pi) | |||
3939 | target_gains.pad_gain = 21; | 3843 | target_gains.pad_gain = 21; |
3940 | target_gains.dac_gain = 0; | 3844 | target_gains.dac_gain = 0; |
3941 | wlc_lcnphy_set_tx_gain(pi, &target_gains); | 3845 | wlc_lcnphy_set_tx_gain(pi, &target_gains); |
3846 | wlc_lcnphy_set_tx_pwr_by_index(pi, 16); | ||
3942 | 3847 | ||
3943 | if (LCNREV_IS(pi->pubpi.phy_rev, 1) || pi_lcn->lcnphy_hw_iqcal_en) { | 3848 | if (LCNREV_IS(pi->pubpi.phy_rev, 1) || pi_lcn->lcnphy_hw_iqcal_en) { |
3944 | 3849 | ||
@@ -3949,7 +3854,6 @@ static void wlc_lcnphy_txpwrtbl_iqlo_cal(struct brcms_phy *pi) | |||
3949 | lcnphy_recal ? LCNPHY_CAL_RECAL : | 3854 | lcnphy_recal ? LCNPHY_CAL_RECAL : |
3950 | LCNPHY_CAL_FULL), false); | 3855 | LCNPHY_CAL_FULL), false); |
3951 | } else { | 3856 | } else { |
3952 | wlc_lcnphy_set_tx_pwr_by_index(pi, 16); | ||
3953 | wlc_lcnphy_tx_iqlo_soft_cal_full(pi); | 3857 | wlc_lcnphy_tx_iqlo_soft_cal_full(pi); |
3954 | } | 3858 | } |
3955 | 3859 | ||
@@ -4374,22 +4278,17 @@ wlc_lcnphy_load_tx_gain_table(struct brcms_phy *pi, | |||
4374 | if (CHSPEC_IS5G(pi->radio_chanspec)) | 4278 | if (CHSPEC_IS5G(pi->radio_chanspec)) |
4375 | pa_gain = 0x70; | 4279 | pa_gain = 0x70; |
4376 | else | 4280 | else |
4377 | pa_gain = 0x60; | 4281 | pa_gain = 0x70; |
4378 | 4282 | ||
4379 | if (pi->sh->boardflags & BFL_FEM) | 4283 | if (pi->sh->boardflags & BFL_FEM) |
4380 | pa_gain = 0x10; | 4284 | pa_gain = 0x10; |
4381 | |||
4382 | tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL; | 4285 | tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL; |
4383 | tab.tbl_width = 32; | 4286 | tab.tbl_width = 32; |
4384 | tab.tbl_len = 1; | 4287 | tab.tbl_len = 1; |
4385 | tab.tbl_ptr = &val; | 4288 | tab.tbl_ptr = &val; |
4386 | 4289 | ||
4387 | for (j = 0; j < 128; j++) { | 4290 | for (j = 0; j < 128; j++) { |
4388 | if (pi->sh->boardflags & BFL_FEM) | 4291 | gm_gain = gain_table[j].gm; |
4389 | gm_gain = gain_table[j].gm; | ||
4390 | else | ||
4391 | gm_gain = 15; | ||
4392 | |||
4393 | val = (((u32) pa_gain << 24) | | 4292 | val = (((u32) pa_gain << 24) | |
4394 | (gain_table[j].pad << 16) | | 4293 | (gain_table[j].pad << 16) | |
4395 | (gain_table[j].pga << 8) | gm_gain); | 4294 | (gain_table[j].pga << 8) | gm_gain); |
@@ -4600,10 +4499,7 @@ static void wlc_radio_2064_init(struct brcms_phy *pi) | |||
4600 | 4499 | ||
4601 | write_phy_reg(pi, 0x4ea, 0x4688); | 4500 | write_phy_reg(pi, 0x4ea, 0x4688); |
4602 | 4501 | ||
4603 | if (pi->sh->boardflags & BFL_FEM) | 4502 | mod_phy_reg(pi, 0x4eb, (0x7 << 0), 2 << 0); |
4604 | mod_phy_reg(pi, 0x4eb, (0x7 << 0), 2 << 0); | ||
4605 | else | ||
4606 | mod_phy_reg(pi, 0x4eb, (0x7 << 0), 3 << 0); | ||
4607 | 4503 | ||
4608 | mod_phy_reg(pi, 0x4eb, (0x7 << 6), 0 << 6); | 4504 | mod_phy_reg(pi, 0x4eb, (0x7 << 6), 0 << 6); |
4609 | 4505 | ||
@@ -4614,13 +4510,6 @@ static void wlc_radio_2064_init(struct brcms_phy *pi) | |||
4614 | wlc_lcnphy_rcal(pi); | 4510 | wlc_lcnphy_rcal(pi); |
4615 | 4511 | ||
4616 | wlc_lcnphy_rc_cal(pi); | 4512 | wlc_lcnphy_rc_cal(pi); |
4617 | |||
4618 | if (!(pi->sh->boardflags & BFL_FEM)) { | ||
4619 | write_radio_reg(pi, RADIO_2064_REG032, 0x6f); | ||
4620 | write_radio_reg(pi, RADIO_2064_REG033, 0x19); | ||
4621 | write_radio_reg(pi, RADIO_2064_REG039, 0xe); | ||
4622 | } | ||
4623 | |||
4624 | } | 4513 | } |
4625 | 4514 | ||
4626 | static void wlc_lcnphy_radio_init(struct brcms_phy *pi) | 4515 | static void wlc_lcnphy_radio_init(struct brcms_phy *pi) |
@@ -4650,20 +4539,22 @@ static void wlc_lcnphy_tbl_init(struct brcms_phy *pi) | |||
4650 | wlc_lcnphy_write_table(pi, &tab); | 4539 | wlc_lcnphy_write_table(pi, &tab); |
4651 | } | 4540 | } |
4652 | 4541 | ||
4653 | if (!(pi->sh->boardflags & BFL_FEM)) { | 4542 | tab.tbl_id = LCNPHY_TBL_ID_RFSEQ; |
4654 | tab.tbl_id = LCNPHY_TBL_ID_RFSEQ; | 4543 | tab.tbl_width = 16; |
4655 | tab.tbl_width = 16; | 4544 | tab.tbl_ptr = &val; |
4656 | tab.tbl_ptr = &val; | 4545 | tab.tbl_len = 1; |
4657 | tab.tbl_len = 1; | ||
4658 | 4546 | ||
4659 | val = 150; | 4547 | val = 114; |
4660 | tab.tbl_offset = 0; | 4548 | tab.tbl_offset = 0; |
4661 | wlc_lcnphy_write_table(pi, &tab); | 4549 | wlc_lcnphy_write_table(pi, &tab); |
4662 | 4550 | ||
4663 | val = 220; | 4551 | val = 130; |
4664 | tab.tbl_offset = 1; | 4552 | tab.tbl_offset = 1; |
4665 | wlc_lcnphy_write_table(pi, &tab); | 4553 | wlc_lcnphy_write_table(pi, &tab); |
4666 | } | 4554 | |
4555 | val = 6; | ||
4556 | tab.tbl_offset = 8; | ||
4557 | wlc_lcnphy_write_table(pi, &tab); | ||
4667 | 4558 | ||
4668 | if (CHSPEC_IS2G(pi->radio_chanspec)) { | 4559 | if (CHSPEC_IS2G(pi->radio_chanspec)) { |
4669 | if (pi->sh->boardflags & BFL_FEM) | 4560 | if (pi->sh->boardflags & BFL_FEM) |
@@ -5055,7 +4946,6 @@ void wlc_phy_chanspec_set_lcnphy(struct brcms_phy *pi, u16 chanspec) | |||
5055 | wlc_lcnphy_load_tx_iir_filter(pi, true, 3); | 4946 | wlc_lcnphy_load_tx_iir_filter(pi, true, 3); |
5056 | 4947 | ||
5057 | mod_phy_reg(pi, 0x4eb, (0x7 << 3), (1) << 3); | 4948 | mod_phy_reg(pi, 0x4eb, (0x7 << 3), (1) << 3); |
5058 | wlc_lcnphy_tssi_setup(pi); | ||
5059 | } | 4949 | } |
5060 | 4950 | ||
5061 | void wlc_phy_detach_lcnphy(struct brcms_phy *pi) | 4951 | void wlc_phy_detach_lcnphy(struct brcms_phy *pi) |
@@ -5094,7 +4984,8 @@ bool wlc_phy_attach_lcnphy(struct brcms_phy *pi) | |||
5094 | if (!wlc_phy_txpwr_srom_read_lcnphy(pi)) | 4984 | if (!wlc_phy_txpwr_srom_read_lcnphy(pi)) |
5095 | return false; | 4985 | return false; |
5096 | 4986 | ||
5097 | if (LCNREV_IS(pi->pubpi.phy_rev, 1)) { | 4987 | if ((pi->sh->boardflags & BFL_FEM) && |
4988 | (LCNREV_IS(pi->pubpi.phy_rev, 1))) { | ||
5098 | if (pi_lcn->lcnphy_tempsense_option == 3) { | 4989 | if (pi_lcn->lcnphy_tempsense_option == 3) { |
5099 | pi->hwpwrctrl = true; | 4990 | pi->hwpwrctrl = true; |
5100 | pi->hwpwrctrl_capable = true; | 4991 | pi->hwpwrctrl_capable = true; |
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c index b7e95acc2084..622c01ca72c5 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c | |||
@@ -1992,70 +1992,70 @@ static const u16 dot11lcn_sw_ctrl_tbl_4313_epa_rev0[] = { | |||
1992 | }; | 1992 | }; |
1993 | 1993 | ||
1994 | static const u16 dot11lcn_sw_ctrl_tbl_4313_rev0[] = { | 1994 | static const u16 dot11lcn_sw_ctrl_tbl_4313_rev0[] = { |
1995 | 0x0009, | ||
1996 | 0x000a, | 1995 | 0x000a, |
1997 | 0x0005, | ||
1998 | 0x0006, | ||
1999 | 0x0009, | 1996 | 0x0009, |
2000 | 0x000a, | ||
2001 | 0x0005, | ||
2002 | 0x0006, | 1997 | 0x0006, |
2003 | 0x0009, | ||
2004 | 0x000a, | ||
2005 | 0x0005, | 1998 | 0x0005, |
2006 | 0x0006, | ||
2007 | 0x0009, | ||
2008 | 0x000a, | 1999 | 0x000a, |
2009 | 0x0005, | ||
2010 | 0x0006, | ||
2011 | 0x0009, | 2000 | 0x0009, |
2012 | 0x000a, | ||
2013 | 0x0005, | ||
2014 | 0x0006, | 2001 | 0x0006, |
2015 | 0x0009, | ||
2016 | 0x000a, | ||
2017 | 0x0005, | 2002 | 0x0005, |
2018 | 0x0006, | ||
2019 | 0x0009, | ||
2020 | 0x000a, | 2003 | 0x000a, |
2021 | 0x0005, | ||
2022 | 0x0006, | ||
2023 | 0x0009, | 2004 | 0x0009, |
2024 | 0x000a, | ||
2025 | 0x0005, | ||
2026 | 0x0006, | 2005 | 0x0006, |
2027 | 0x0009, | ||
2028 | 0x000a, | ||
2029 | 0x0005, | 2006 | 0x0005, |
2030 | 0x0006, | ||
2031 | 0x0009, | ||
2032 | 0x000a, | 2007 | 0x000a, |
2033 | 0x0005, | ||
2034 | 0x0006, | ||
2035 | 0x0009, | 2008 | 0x0009, |
2036 | 0x000a, | ||
2037 | 0x0005, | ||
2038 | 0x0006, | 2009 | 0x0006, |
2039 | 0x0009, | ||
2040 | 0x000a, | ||
2041 | 0x0005, | 2010 | 0x0005, |
2042 | 0x0006, | 2011 | 0x000a, |
2043 | 0x0009, | 2012 | 0x0009, |
2013 | 0x0006, | ||
2014 | 0x0005, | ||
2044 | 0x000a, | 2015 | 0x000a, |
2016 | 0x0009, | ||
2017 | 0x0006, | ||
2045 | 0x0005, | 2018 | 0x0005, |
2019 | 0x000a, | ||
2020 | 0x0009, | ||
2046 | 0x0006, | 2021 | 0x0006, |
2022 | 0x0005, | ||
2023 | 0x000a, | ||
2047 | 0x0009, | 2024 | 0x0009, |
2025 | 0x0006, | ||
2026 | 0x0005, | ||
2048 | 0x000a, | 2027 | 0x000a, |
2028 | 0x0009, | ||
2029 | 0x0006, | ||
2049 | 0x0005, | 2030 | 0x0005, |
2031 | 0x000a, | ||
2032 | 0x0009, | ||
2050 | 0x0006, | 2033 | 0x0006, |
2034 | 0x0005, | ||
2035 | 0x000a, | ||
2051 | 0x0009, | 2036 | 0x0009, |
2037 | 0x0006, | ||
2038 | 0x0005, | ||
2052 | 0x000a, | 2039 | 0x000a, |
2040 | 0x0009, | ||
2041 | 0x0006, | ||
2053 | 0x0005, | 2042 | 0x0005, |
2043 | 0x000a, | ||
2044 | 0x0009, | ||
2054 | 0x0006, | 2045 | 0x0006, |
2046 | 0x0005, | ||
2047 | 0x000a, | ||
2055 | 0x0009, | 2048 | 0x0009, |
2049 | 0x0006, | ||
2050 | 0x0005, | ||
2056 | 0x000a, | 2051 | 0x000a, |
2052 | 0x0009, | ||
2053 | 0x0006, | ||
2057 | 0x0005, | 2054 | 0x0005, |
2055 | 0x000a, | ||
2056 | 0x0009, | ||
2058 | 0x0006, | 2057 | 0x0006, |
2058 | 0x0005, | ||
2059 | }; | 2059 | }; |
2060 | 2060 | ||
2061 | static const u16 dot11lcn_sw_ctrl_tbl_rev0[] = { | 2061 | static const u16 dot11lcn_sw_ctrl_tbl_rev0[] = { |
diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c index 3630a41df50d..c353b5f19c8c 100644 --- a/drivers/net/wireless/iwlegacy/3945-mac.c +++ b/drivers/net/wireless/iwlegacy/3945-mac.c | |||
@@ -475,6 +475,7 @@ il3945_tx_skb(struct il_priv *il, | |||
475 | dma_addr_t txcmd_phys; | 475 | dma_addr_t txcmd_phys; |
476 | int txq_id = skb_get_queue_mapping(skb); | 476 | int txq_id = skb_get_queue_mapping(skb); |
477 | u16 len, idx, hdr_len; | 477 | u16 len, idx, hdr_len; |
478 | u16 firstlen, secondlen; | ||
478 | u8 id; | 479 | u8 id; |
479 | u8 unicast; | 480 | u8 unicast; |
480 | u8 sta_id; | 481 | u8 sta_id; |
@@ -589,21 +590,22 @@ il3945_tx_skb(struct il_priv *il, | |||
589 | len = | 590 | len = |
590 | sizeof(struct il3945_tx_cmd) + sizeof(struct il_cmd_header) + | 591 | sizeof(struct il3945_tx_cmd) + sizeof(struct il_cmd_header) + |
591 | hdr_len; | 592 | hdr_len; |
592 | len = (len + 3) & ~3; | 593 | firstlen = (len + 3) & ~3; |
593 | 594 | ||
594 | /* Physical address of this Tx command's header (not MAC header!), | 595 | /* Physical address of this Tx command's header (not MAC header!), |
595 | * within command buffer array. */ | 596 | * within command buffer array. */ |
596 | txcmd_phys = | 597 | txcmd_phys = |
597 | pci_map_single(il->pci_dev, &out_cmd->hdr, len, PCI_DMA_TODEVICE); | 598 | pci_map_single(il->pci_dev, &out_cmd->hdr, firstlen, |
599 | PCI_DMA_TODEVICE); | ||
598 | if (unlikely(pci_dma_mapping_error(il->pci_dev, txcmd_phys))) | 600 | if (unlikely(pci_dma_mapping_error(il->pci_dev, txcmd_phys))) |
599 | goto drop_unlock; | 601 | goto drop_unlock; |
600 | 602 | ||
601 | /* Set up TFD's 2nd entry to point directly to remainder of skb, | 603 | /* Set up TFD's 2nd entry to point directly to remainder of skb, |
602 | * if any (802.11 null frames have no payload). */ | 604 | * if any (802.11 null frames have no payload). */ |
603 | len = skb->len - hdr_len; | 605 | secondlen = skb->len - hdr_len; |
604 | if (len) { | 606 | if (secondlen > 0) { |
605 | phys_addr = | 607 | phys_addr = |
606 | pci_map_single(il->pci_dev, skb->data + hdr_len, len, | 608 | pci_map_single(il->pci_dev, skb->data + hdr_len, secondlen, |
607 | PCI_DMA_TODEVICE); | 609 | PCI_DMA_TODEVICE); |
608 | if (unlikely(pci_dma_mapping_error(il->pci_dev, phys_addr))) | 610 | if (unlikely(pci_dma_mapping_error(il->pci_dev, phys_addr))) |
609 | goto drop_unlock; | 611 | goto drop_unlock; |
@@ -611,12 +613,12 @@ il3945_tx_skb(struct il_priv *il, | |||
611 | 613 | ||
612 | /* Add buffer containing Tx command and MAC(!) header to TFD's | 614 | /* Add buffer containing Tx command and MAC(!) header to TFD's |
613 | * first entry */ | 615 | * first entry */ |
614 | il->ops->txq_attach_buf_to_tfd(il, txq, txcmd_phys, len, 1, 0); | 616 | il->ops->txq_attach_buf_to_tfd(il, txq, txcmd_phys, firstlen, 1, 0); |
615 | dma_unmap_addr_set(out_meta, mapping, txcmd_phys); | 617 | dma_unmap_addr_set(out_meta, mapping, txcmd_phys); |
616 | dma_unmap_len_set(out_meta, len, len); | 618 | dma_unmap_len_set(out_meta, len, firstlen); |
617 | if (len) | 619 | if (secondlen > 0) |
618 | il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, len, 0, | 620 | il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, secondlen, 0, |
619 | U32_PAD(len)); | 621 | U32_PAD(secondlen)); |
620 | 622 | ||
621 | if (!ieee80211_has_morefrags(hdr->frame_control)) { | 623 | if (!ieee80211_has_morefrags(hdr->frame_control)) { |
622 | txq->need_update = 1; | 624 | txq->need_update = 1; |
diff --git a/drivers/net/wireless/iwlegacy/4965-rs.c b/drivers/net/wireless/iwlegacy/4965-rs.c index e8324b5e5bfe..6c7493c2d698 100644 --- a/drivers/net/wireless/iwlegacy/4965-rs.c +++ b/drivers/net/wireless/iwlegacy/4965-rs.c | |||
@@ -2152,7 +2152,7 @@ il4965_rs_initialize_lq(struct il_priv *il, struct ieee80211_conf *conf, | |||
2152 | int rate_idx; | 2152 | int rate_idx; |
2153 | int i; | 2153 | int i; |
2154 | u32 rate; | 2154 | u32 rate; |
2155 | u8 use_green = il4965_rs_use_green(il, sta); | 2155 | u8 use_green; |
2156 | u8 active_tbl = 0; | 2156 | u8 active_tbl = 0; |
2157 | u8 valid_tx_ant; | 2157 | u8 valid_tx_ant; |
2158 | struct il_station_priv *sta_priv; | 2158 | struct il_station_priv *sta_priv; |
@@ -2160,6 +2160,7 @@ il4965_rs_initialize_lq(struct il_priv *il, struct ieee80211_conf *conf, | |||
2160 | if (!sta || !lq_sta) | 2160 | if (!sta || !lq_sta) |
2161 | return; | 2161 | return; |
2162 | 2162 | ||
2163 | use_green = il4965_rs_use_green(il, sta); | ||
2163 | sta_priv = (void *)sta->drv_priv; | 2164 | sta_priv = (void *)sta->drv_priv; |
2164 | 2165 | ||
2165 | i = lq_sta->last_txrate_idx; | 2166 | i = lq_sta->last_txrate_idx; |
diff --git a/drivers/net/wireless/iwlwifi/dvm/lib.c b/drivers/net/wireless/iwlwifi/dvm/lib.c index 86ea5f4c3939..44ca0e57f9f7 100644 --- a/drivers/net/wireless/iwlwifi/dvm/lib.c +++ b/drivers/net/wireless/iwlwifi/dvm/lib.c | |||
@@ -1262,6 +1262,15 @@ int iwl_dvm_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) | |||
1262 | } | 1262 | } |
1263 | 1263 | ||
1264 | /* | 1264 | /* |
1265 | * This can happen upon FW ASSERT: we clear the STATUS_FW_ERROR flag | ||
1266 | * in iwl_down but cancel the workers only later. | ||
1267 | */ | ||
1268 | if (!priv->ucode_loaded) { | ||
1269 | IWL_ERR(priv, "Fw not loaded - dropping CMD: %x\n", cmd->id); | ||
1270 | return -EIO; | ||
1271 | } | ||
1272 | |||
1273 | /* | ||
1265 | * Synchronous commands from this op-mode must hold | 1274 | * Synchronous commands from this op-mode must hold |
1266 | * the mutex, this ensures we don't try to send two | 1275 | * the mutex, this ensures we don't try to send two |
1267 | * (or more) synchronous commands at a time. | 1276 | * (or more) synchronous commands at a time. |
diff --git a/drivers/net/wireless/iwlwifi/dvm/rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c index 23be948cf162..a82b6b39d4ff 100644 --- a/drivers/net/wireless/iwlwifi/dvm/rxon.c +++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c | |||
@@ -1419,6 +1419,14 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw, | |||
1419 | 1419 | ||
1420 | mutex_lock(&priv->mutex); | 1420 | mutex_lock(&priv->mutex); |
1421 | 1421 | ||
1422 | if (changes & BSS_CHANGED_IDLE && bss_conf->idle) { | ||
1423 | /* | ||
1424 | * If we go idle, then clearly no "passive-no-rx" | ||
1425 | * workaround is needed any more, this is a reset. | ||
1426 | */ | ||
1427 | iwlagn_lift_passive_no_rx(priv); | ||
1428 | } | ||
1429 | |||
1422 | if (unlikely(!iwl_is_ready(priv))) { | 1430 | if (unlikely(!iwl_is_ready(priv))) { |
1423 | IWL_DEBUG_MAC80211(priv, "leave - not ready\n"); | 1431 | IWL_DEBUG_MAC80211(priv, "leave - not ready\n"); |
1424 | mutex_unlock(&priv->mutex); | 1432 | mutex_unlock(&priv->mutex); |
@@ -1450,16 +1458,6 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw, | |||
1450 | priv->timestamp = bss_conf->sync_tsf; | 1458 | priv->timestamp = bss_conf->sync_tsf; |
1451 | ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; | 1459 | ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; |
1452 | } else { | 1460 | } else { |
1453 | /* | ||
1454 | * If we disassociate while there are pending | ||
1455 | * frames, just wake up the queues and let the | ||
1456 | * frames "escape" ... This shouldn't really | ||
1457 | * be happening to start with, but we should | ||
1458 | * not get stuck in this case either since it | ||
1459 | * can happen if userspace gets confused. | ||
1460 | */ | ||
1461 | iwlagn_lift_passive_no_rx(priv); | ||
1462 | |||
1463 | ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; | 1461 | ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; |
1464 | 1462 | ||
1465 | if (ctx->ctxid == IWL_RXON_CTX_BSS) | 1463 | if (ctx->ctxid == IWL_RXON_CTX_BSS) |
diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c index 6aec2df3bb27..d1a670d7b10c 100644 --- a/drivers/net/wireless/iwlwifi/dvm/tx.c +++ b/drivers/net/wireless/iwlwifi/dvm/tx.c | |||
@@ -1192,7 +1192,7 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb, | |||
1192 | memset(&info->status, 0, sizeof(info->status)); | 1192 | memset(&info->status, 0, sizeof(info->status)); |
1193 | 1193 | ||
1194 | if (status == TX_STATUS_FAIL_PASSIVE_NO_RX && | 1194 | if (status == TX_STATUS_FAIL_PASSIVE_NO_RX && |
1195 | iwl_is_associated_ctx(ctx) && ctx->vif && | 1195 | ctx->vif && |
1196 | ctx->vif->type == NL80211_IFTYPE_STATION) { | 1196 | ctx->vif->type == NL80211_IFTYPE_STATION) { |
1197 | /* block and stop all queues */ | 1197 | /* block and stop all queues */ |
1198 | priv->passive_no_rx = true; | 1198 | priv->passive_no_rx = true; |
diff --git a/drivers/net/wireless/iwlwifi/dvm/ucode.c b/drivers/net/wireless/iwlwifi/dvm/ucode.c index 736fe9bb140e..1a4ac9236a44 100644 --- a/drivers/net/wireless/iwlwifi/dvm/ucode.c +++ b/drivers/net/wireless/iwlwifi/dvm/ucode.c | |||
@@ -367,6 +367,8 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv, | |||
367 | return -EIO; | 367 | return -EIO; |
368 | } | 368 | } |
369 | 369 | ||
370 | priv->ucode_loaded = true; | ||
371 | |||
370 | if (ucode_type != IWL_UCODE_WOWLAN) { | 372 | if (ucode_type != IWL_UCODE_WOWLAN) { |
371 | /* delay a bit to give rfkill time to run */ | 373 | /* delay a bit to give rfkill time to run */ |
372 | msleep(5); | 374 | msleep(5); |
@@ -380,8 +382,6 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv, | |||
380 | return ret; | 382 | return ret; |
381 | } | 383 | } |
382 | 384 | ||
383 | priv->ucode_loaded = true; | ||
384 | |||
385 | return 0; | 385 | return 0; |
386 | } | 386 | } |
387 | 387 | ||
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c index 17bedc50e753..12c4f31ca8fb 100644 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c | |||
@@ -475,6 +475,10 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, | |||
475 | 475 | ||
476 | /* If platform's RF_KILL switch is NOT set to KILL */ | 476 | /* If platform's RF_KILL switch is NOT set to KILL */ |
477 | hw_rfkill = iwl_is_rfkill_set(trans); | 477 | hw_rfkill = iwl_is_rfkill_set(trans); |
478 | if (hw_rfkill) | ||
479 | set_bit(STATUS_RFKILL, &trans_pcie->status); | ||
480 | else | ||
481 | clear_bit(STATUS_RFKILL, &trans_pcie->status); | ||
478 | iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); | 482 | iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); |
479 | if (hw_rfkill && !run_in_rfkill) | 483 | if (hw_rfkill && !run_in_rfkill) |
480 | return -ERFKILL; | 484 | return -ERFKILL; |
@@ -641,6 +645,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, | |||
641 | 645 | ||
642 | static int iwl_trans_pcie_start_hw(struct iwl_trans *trans) | 646 | static int iwl_trans_pcie_start_hw(struct iwl_trans *trans) |
643 | { | 647 | { |
648 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
644 | bool hw_rfkill; | 649 | bool hw_rfkill; |
645 | int err; | 650 | int err; |
646 | 651 | ||
@@ -656,6 +661,10 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans) | |||
656 | iwl_enable_rfkill_int(trans); | 661 | iwl_enable_rfkill_int(trans); |
657 | 662 | ||
658 | hw_rfkill = iwl_is_rfkill_set(trans); | 663 | hw_rfkill = iwl_is_rfkill_set(trans); |
664 | if (hw_rfkill) | ||
665 | set_bit(STATUS_RFKILL, &trans_pcie->status); | ||
666 | else | ||
667 | clear_bit(STATUS_RFKILL, &trans_pcie->status); | ||
659 | iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); | 668 | iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); |
660 | 669 | ||
661 | return 0; | 670 | return 0; |
@@ -694,6 +703,10 @@ static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans, | |||
694 | * op_mode. | 703 | * op_mode. |
695 | */ | 704 | */ |
696 | hw_rfkill = iwl_is_rfkill_set(trans); | 705 | hw_rfkill = iwl_is_rfkill_set(trans); |
706 | if (hw_rfkill) | ||
707 | set_bit(STATUS_RFKILL, &trans_pcie->status); | ||
708 | else | ||
709 | clear_bit(STATUS_RFKILL, &trans_pcie->status); | ||
697 | iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); | 710 | iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); |
698 | } | 711 | } |
699 | } | 712 | } |
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c index 8595c16f74de..cb5c6792e3a8 100644 --- a/drivers/net/wireless/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/iwlwifi/pcie/tx.c | |||
@@ -1264,7 +1264,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, | |||
1264 | for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { | 1264 | for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { |
1265 | int copy = 0; | 1265 | int copy = 0; |
1266 | 1266 | ||
1267 | if (!cmd->len) | 1267 | if (!cmd->len[i]) |
1268 | continue; | 1268 | continue; |
1269 | 1269 | ||
1270 | /* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */ | 1270 | /* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */ |
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c index a44023a7bd57..8aaf56ade4d9 100644 --- a/drivers/net/wireless/mwifiex/cfg80211.c +++ b/drivers/net/wireless/mwifiex/cfg80211.c | |||
@@ -1892,7 +1892,8 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy, | |||
1892 | } | 1892 | } |
1893 | } | 1893 | } |
1894 | 1894 | ||
1895 | for (i = 0; i < request->n_channels; i++) { | 1895 | for (i = 0; i < min_t(u32, request->n_channels, |
1896 | MWIFIEX_USER_SCAN_CHAN_MAX); i++) { | ||
1896 | chan = request->channels[i]; | 1897 | chan = request->channels[i]; |
1897 | priv->user_scan_cfg->chan_list[i].chan_number = chan->hw_value; | 1898 | priv->user_scan_cfg->chan_list[i].chan_number = chan->hw_value; |
1898 | priv->user_scan_cfg->chan_list[i].radio_type = chan->band; | 1899 | priv->user_scan_cfg->chan_list[i].radio_type = chan->band; |
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c index 20a6c5555873..b5c8b962ce12 100644 --- a/drivers/net/wireless/mwifiex/cmdevt.c +++ b/drivers/net/wireless/mwifiex/cmdevt.c | |||
@@ -157,6 +157,20 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv, | |||
157 | return -1; | 157 | return -1; |
158 | } | 158 | } |
159 | 159 | ||
160 | cmd_code = le16_to_cpu(host_cmd->command); | ||
161 | cmd_size = le16_to_cpu(host_cmd->size); | ||
162 | |||
163 | if (adapter->hw_status == MWIFIEX_HW_STATUS_RESET && | ||
164 | cmd_code != HostCmd_CMD_FUNC_SHUTDOWN && | ||
165 | cmd_code != HostCmd_CMD_FUNC_INIT) { | ||
166 | dev_err(adapter->dev, | ||
167 | "DNLD_CMD: FW in reset state, ignore cmd %#x\n", | ||
168 | cmd_code); | ||
169 | mwifiex_complete_cmd(adapter, cmd_node); | ||
170 | mwifiex_insert_cmd_to_free_q(adapter, cmd_node); | ||
171 | return -1; | ||
172 | } | ||
173 | |||
160 | /* Set command sequence number */ | 174 | /* Set command sequence number */ |
161 | adapter->seq_num++; | 175 | adapter->seq_num++; |
162 | host_cmd->seq_num = cpu_to_le16(HostCmd_SET_SEQ_NO_BSS_INFO | 176 | host_cmd->seq_num = cpu_to_le16(HostCmd_SET_SEQ_NO_BSS_INFO |
@@ -168,9 +182,6 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv, | |||
168 | adapter->curr_cmd = cmd_node; | 182 | adapter->curr_cmd = cmd_node; |
169 | spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags); | 183 | spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags); |
170 | 184 | ||
171 | cmd_code = le16_to_cpu(host_cmd->command); | ||
172 | cmd_size = le16_to_cpu(host_cmd->size); | ||
173 | |||
174 | /* Adjust skb length */ | 185 | /* Adjust skb length */ |
175 | if (cmd_node->cmd_skb->len > cmd_size) | 186 | if (cmd_node->cmd_skb->len > cmd_size) |
176 | /* | 187 | /* |
@@ -484,8 +495,6 @@ int mwifiex_send_cmd_sync(struct mwifiex_private *priv, uint16_t cmd_no, | |||
484 | 495 | ||
485 | ret = mwifiex_send_cmd_async(priv, cmd_no, cmd_action, cmd_oid, | 496 | ret = mwifiex_send_cmd_async(priv, cmd_no, cmd_action, cmd_oid, |
486 | data_buf); | 497 | data_buf); |
487 | if (!ret) | ||
488 | ret = mwifiex_wait_queue_complete(adapter); | ||
489 | 498 | ||
490 | return ret; | 499 | return ret; |
491 | } | 500 | } |
@@ -588,9 +597,10 @@ int mwifiex_send_cmd_async(struct mwifiex_private *priv, uint16_t cmd_no, | |||
588 | if (cmd_no == HostCmd_CMD_802_11_SCAN) { | 597 | if (cmd_no == HostCmd_CMD_802_11_SCAN) { |
589 | mwifiex_queue_scan_cmd(priv, cmd_node); | 598 | mwifiex_queue_scan_cmd(priv, cmd_node); |
590 | } else { | 599 | } else { |
591 | adapter->cmd_queued = cmd_node; | ||
592 | mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, true); | 600 | mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, true); |
593 | queue_work(adapter->workqueue, &adapter->main_work); | 601 | queue_work(adapter->workqueue, &adapter->main_work); |
602 | if (cmd_node->wait_q_enabled) | ||
603 | ret = mwifiex_wait_queue_complete(adapter, cmd_node); | ||
594 | } | 604 | } |
595 | 605 | ||
596 | return ret; | 606 | return ret; |
diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c index e38aa9b3663d..0ff4c37ab42a 100644 --- a/drivers/net/wireless/mwifiex/init.c +++ b/drivers/net/wireless/mwifiex/init.c | |||
@@ -709,6 +709,14 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter) | |||
709 | return ret; | 709 | return ret; |
710 | } | 710 | } |
711 | 711 | ||
712 | /* cancel current command */ | ||
713 | if (adapter->curr_cmd) { | ||
714 | dev_warn(adapter->dev, "curr_cmd is still in processing\n"); | ||
715 | del_timer(&adapter->cmd_timer); | ||
716 | mwifiex_insert_cmd_to_free_q(adapter, adapter->curr_cmd); | ||
717 | adapter->curr_cmd = NULL; | ||
718 | } | ||
719 | |||
712 | /* shut down mwifiex */ | 720 | /* shut down mwifiex */ |
713 | dev_dbg(adapter->dev, "info: shutdown mwifiex...\n"); | 721 | dev_dbg(adapter->dev, "info: shutdown mwifiex...\n"); |
714 | 722 | ||
diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c index 246aa62a4817..2fe0ceba4400 100644 --- a/drivers/net/wireless/mwifiex/join.c +++ b/drivers/net/wireless/mwifiex/join.c | |||
@@ -1117,10 +1117,9 @@ mwifiex_cmd_802_11_ad_hoc_join(struct mwifiex_private *priv, | |||
1117 | adhoc_join->bss_descriptor.bssid, | 1117 | adhoc_join->bss_descriptor.bssid, |
1118 | adhoc_join->bss_descriptor.ssid); | 1118 | adhoc_join->bss_descriptor.ssid); |
1119 | 1119 | ||
1120 | for (i = 0; bss_desc->supported_rates[i] && | 1120 | for (i = 0; i < MWIFIEX_SUPPORTED_RATES && |
1121 | i < MWIFIEX_SUPPORTED_RATES; | 1121 | bss_desc->supported_rates[i]; i++) |
1122 | i++) | 1122 | ; |
1123 | ; | ||
1124 | rates_size = i; | 1123 | rates_size = i; |
1125 | 1124 | ||
1126 | /* Copy Data Rates from the Rates recorded in scan response */ | 1125 | /* Copy Data Rates from the Rates recorded in scan response */ |
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h index 553adfb0aa81..7035ade9af74 100644 --- a/drivers/net/wireless/mwifiex/main.h +++ b/drivers/net/wireless/mwifiex/main.h | |||
@@ -723,7 +723,6 @@ struct mwifiex_adapter { | |||
723 | u16 cmd_wait_q_required; | 723 | u16 cmd_wait_q_required; |
724 | struct mwifiex_wait_queue cmd_wait_q; | 724 | struct mwifiex_wait_queue cmd_wait_q; |
725 | u8 scan_wait_q_woken; | 725 | u8 scan_wait_q_woken; |
726 | struct cmd_ctrl_node *cmd_queued; | ||
727 | spinlock_t queue_lock; /* lock for tx queues */ | 726 | spinlock_t queue_lock; /* lock for tx queues */ |
728 | struct completion fw_load; | 727 | struct completion fw_load; |
729 | u8 country_code[IEEE80211_COUNTRY_STRING_LEN]; | 728 | u8 country_code[IEEE80211_COUNTRY_STRING_LEN]; |
@@ -1018,7 +1017,8 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv, | |||
1018 | struct mwifiex_multicast_list *mcast_list); | 1017 | struct mwifiex_multicast_list *mcast_list); |
1019 | int mwifiex_copy_mcast_addr(struct mwifiex_multicast_list *mlist, | 1018 | int mwifiex_copy_mcast_addr(struct mwifiex_multicast_list *mlist, |
1020 | struct net_device *dev); | 1019 | struct net_device *dev); |
1021 | int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter); | 1020 | int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter, |
1021 | struct cmd_ctrl_node *cmd_queued); | ||
1022 | int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss, | 1022 | int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss, |
1023 | struct cfg80211_ssid *req_ssid); | 1023 | struct cfg80211_ssid *req_ssid); |
1024 | int mwifiex_cancel_hs(struct mwifiex_private *priv, int cmd_type); | 1024 | int mwifiex_cancel_hs(struct mwifiex_private *priv, int cmd_type); |
diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c index 5c395e2e6a2b..feb204613397 100644 --- a/drivers/net/wireless/mwifiex/pcie.c +++ b/drivers/net/wireless/mwifiex/pcie.c | |||
@@ -1508,6 +1508,7 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter) | |||
1508 | } | 1508 | } |
1509 | memcpy(adapter->upld_buf, skb->data, | 1509 | memcpy(adapter->upld_buf, skb->data, |
1510 | min_t(u32, MWIFIEX_SIZE_OF_CMD_BUFFER, skb->len)); | 1510 | min_t(u32, MWIFIEX_SIZE_OF_CMD_BUFFER, skb->len)); |
1511 | skb_push(skb, INTF_HEADER_LEN); | ||
1511 | if (mwifiex_map_pci_memory(adapter, skb, MWIFIEX_UPLD_SIZE, | 1512 | if (mwifiex_map_pci_memory(adapter, skb, MWIFIEX_UPLD_SIZE, |
1512 | PCI_DMA_FROMDEVICE)) | 1513 | PCI_DMA_FROMDEVICE)) |
1513 | return -1; | 1514 | return -1; |
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c index bb60c2754a97..e7f6deaf715e 100644 --- a/drivers/net/wireless/mwifiex/scan.c +++ b/drivers/net/wireless/mwifiex/scan.c | |||
@@ -1388,10 +1388,15 @@ int mwifiex_scan_networks(struct mwifiex_private *priv, | |||
1388 | list_del(&cmd_node->list); | 1388 | list_del(&cmd_node->list); |
1389 | spin_unlock_irqrestore(&adapter->scan_pending_q_lock, | 1389 | spin_unlock_irqrestore(&adapter->scan_pending_q_lock, |
1390 | flags); | 1390 | flags); |
1391 | adapter->cmd_queued = cmd_node; | ||
1392 | mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, | 1391 | mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, |
1393 | true); | 1392 | true); |
1394 | queue_work(adapter->workqueue, &adapter->main_work); | 1393 | queue_work(adapter->workqueue, &adapter->main_work); |
1394 | |||
1395 | /* Perform internal scan synchronously */ | ||
1396 | if (!priv->scan_request) { | ||
1397 | dev_dbg(adapter->dev, "wait internal scan\n"); | ||
1398 | mwifiex_wait_queue_complete(adapter, cmd_node); | ||
1399 | } | ||
1395 | } else { | 1400 | } else { |
1396 | spin_unlock_irqrestore(&adapter->scan_pending_q_lock, | 1401 | spin_unlock_irqrestore(&adapter->scan_pending_q_lock, |
1397 | flags); | 1402 | flags); |
@@ -1790,7 +1795,12 @@ check_next_scan: | |||
1790 | /* Need to indicate IOCTL complete */ | 1795 | /* Need to indicate IOCTL complete */ |
1791 | if (adapter->curr_cmd->wait_q_enabled) { | 1796 | if (adapter->curr_cmd->wait_q_enabled) { |
1792 | adapter->cmd_wait_q.status = 0; | 1797 | adapter->cmd_wait_q.status = 0; |
1793 | mwifiex_complete_cmd(adapter, adapter->curr_cmd); | 1798 | if (!priv->scan_request) { |
1799 | dev_dbg(adapter->dev, | ||
1800 | "complete internal scan\n"); | ||
1801 | mwifiex_complete_cmd(adapter, | ||
1802 | adapter->curr_cmd); | ||
1803 | } | ||
1794 | } | 1804 | } |
1795 | if (priv->report_scan_result) | 1805 | if (priv->report_scan_result) |
1796 | priv->report_scan_result = false; | 1806 | priv->report_scan_result = false; |
@@ -1946,9 +1956,6 @@ int mwifiex_request_scan(struct mwifiex_private *priv, | |||
1946 | /* Normal scan */ | 1956 | /* Normal scan */ |
1947 | ret = mwifiex_scan_networks(priv, NULL); | 1957 | ret = mwifiex_scan_networks(priv, NULL); |
1948 | 1958 | ||
1949 | if (!ret) | ||
1950 | ret = mwifiex_wait_queue_complete(priv->adapter); | ||
1951 | |||
1952 | up(&priv->async_sem); | 1959 | up(&priv->async_sem); |
1953 | 1960 | ||
1954 | return ret; | 1961 | return ret; |
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c index 9f33c92c90f5..13100f8de3db 100644 --- a/drivers/net/wireless/mwifiex/sta_ioctl.c +++ b/drivers/net/wireless/mwifiex/sta_ioctl.c | |||
@@ -54,16 +54,10 @@ int mwifiex_copy_mcast_addr(struct mwifiex_multicast_list *mlist, | |||
54 | * This function waits on a cmd wait queue. It also cancels the pending | 54 | * This function waits on a cmd wait queue. It also cancels the pending |
55 | * request after waking up, in case of errors. | 55 | * request after waking up, in case of errors. |
56 | */ | 56 | */ |
57 | int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter) | 57 | int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter, |
58 | struct cmd_ctrl_node *cmd_queued) | ||
58 | { | 59 | { |
59 | int status; | 60 | int status; |
60 | struct cmd_ctrl_node *cmd_queued; | ||
61 | |||
62 | if (!adapter->cmd_queued) | ||
63 | return 0; | ||
64 | |||
65 | cmd_queued = adapter->cmd_queued; | ||
66 | adapter->cmd_queued = NULL; | ||
67 | 61 | ||
68 | dev_dbg(adapter->dev, "cmd pending\n"); | 62 | dev_dbg(adapter->dev, "cmd pending\n"); |
69 | atomic_inc(&adapter->cmd_pending); | 63 | atomic_inc(&adapter->cmd_pending); |
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig index 44d6ead43341..76cd47eb901e 100644 --- a/drivers/net/wireless/rt2x00/Kconfig +++ b/drivers/net/wireless/rt2x00/Kconfig | |||
@@ -20,6 +20,7 @@ if RT2X00 | |||
20 | config RT2400PCI | 20 | config RT2400PCI |
21 | tristate "Ralink rt2400 (PCI/PCMCIA) support" | 21 | tristate "Ralink rt2400 (PCI/PCMCIA) support" |
22 | depends on PCI | 22 | depends on PCI |
23 | select RT2X00_LIB_MMIO | ||
23 | select RT2X00_LIB_PCI | 24 | select RT2X00_LIB_PCI |
24 | select EEPROM_93CX6 | 25 | select EEPROM_93CX6 |
25 | ---help--- | 26 | ---help--- |
@@ -31,6 +32,7 @@ config RT2400PCI | |||
31 | config RT2500PCI | 32 | config RT2500PCI |
32 | tristate "Ralink rt2500 (PCI/PCMCIA) support" | 33 | tristate "Ralink rt2500 (PCI/PCMCIA) support" |
33 | depends on PCI | 34 | depends on PCI |
35 | select RT2X00_LIB_MMIO | ||
34 | select RT2X00_LIB_PCI | 36 | select RT2X00_LIB_PCI |
35 | select EEPROM_93CX6 | 37 | select EEPROM_93CX6 |
36 | ---help--- | 38 | ---help--- |
@@ -43,6 +45,7 @@ config RT61PCI | |||
43 | tristate "Ralink rt2501/rt61 (PCI/PCMCIA) support" | 45 | tristate "Ralink rt2501/rt61 (PCI/PCMCIA) support" |
44 | depends on PCI | 46 | depends on PCI |
45 | select RT2X00_LIB_PCI | 47 | select RT2X00_LIB_PCI |
48 | select RT2X00_LIB_MMIO | ||
46 | select RT2X00_LIB_FIRMWARE | 49 | select RT2X00_LIB_FIRMWARE |
47 | select RT2X00_LIB_CRYPTO | 50 | select RT2X00_LIB_CRYPTO |
48 | select CRC_ITU_T | 51 | select CRC_ITU_T |
@@ -55,10 +58,11 @@ config RT61PCI | |||
55 | 58 | ||
56 | config RT2800PCI | 59 | config RT2800PCI |
57 | tristate "Ralink rt27xx/rt28xx/rt30xx (PCI/PCIe/PCMCIA) support" | 60 | tristate "Ralink rt27xx/rt28xx/rt30xx (PCI/PCIe/PCMCIA) support" |
58 | depends on PCI || RALINK_RT288X || RALINK_RT305X | 61 | depends on PCI || SOC_RT288X || SOC_RT305X |
59 | select RT2800_LIB | 62 | select RT2800_LIB |
63 | select RT2X00_LIB_MMIO | ||
60 | select RT2X00_LIB_PCI if PCI | 64 | select RT2X00_LIB_PCI if PCI |
61 | select RT2X00_LIB_SOC if RALINK_RT288X || RALINK_RT305X | 65 | select RT2X00_LIB_SOC if SOC_RT288X || SOC_RT305X |
62 | select RT2X00_LIB_FIRMWARE | 66 | select RT2X00_LIB_FIRMWARE |
63 | select RT2X00_LIB_CRYPTO | 67 | select RT2X00_LIB_CRYPTO |
64 | select CRC_CCITT | 68 | select CRC_CCITT |
@@ -185,6 +189,9 @@ endif | |||
185 | config RT2800_LIB | 189 | config RT2800_LIB |
186 | tristate | 190 | tristate |
187 | 191 | ||
192 | config RT2X00_LIB_MMIO | ||
193 | tristate | ||
194 | |||
188 | config RT2X00_LIB_PCI | 195 | config RT2X00_LIB_PCI |
189 | tristate | 196 | tristate |
190 | select RT2X00_LIB | 197 | select RT2X00_LIB |
diff --git a/drivers/net/wireless/rt2x00/Makefile b/drivers/net/wireless/rt2x00/Makefile index 349d5b8284a4..f069d8bc5b67 100644 --- a/drivers/net/wireless/rt2x00/Makefile +++ b/drivers/net/wireless/rt2x00/Makefile | |||
@@ -9,6 +9,7 @@ rt2x00lib-$(CONFIG_RT2X00_LIB_FIRMWARE) += rt2x00firmware.o | |||
9 | rt2x00lib-$(CONFIG_RT2X00_LIB_LEDS) += rt2x00leds.o | 9 | rt2x00lib-$(CONFIG_RT2X00_LIB_LEDS) += rt2x00leds.o |
10 | 10 | ||
11 | obj-$(CONFIG_RT2X00_LIB) += rt2x00lib.o | 11 | obj-$(CONFIG_RT2X00_LIB) += rt2x00lib.o |
12 | obj-$(CONFIG_RT2X00_LIB_MMIO) += rt2x00mmio.o | ||
12 | obj-$(CONFIG_RT2X00_LIB_PCI) += rt2x00pci.o | 13 | obj-$(CONFIG_RT2X00_LIB_PCI) += rt2x00pci.o |
13 | obj-$(CONFIG_RT2X00_LIB_SOC) += rt2x00soc.o | 14 | obj-$(CONFIG_RT2X00_LIB_SOC) += rt2x00soc.o |
14 | obj-$(CONFIG_RT2X00_LIB_USB) += rt2x00usb.o | 15 | obj-$(CONFIG_RT2X00_LIB_USB) += rt2x00usb.o |
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c index 221beaaa83f1..dcfb54e0c516 100644 --- a/drivers/net/wireless/rt2x00/rt2400pci.c +++ b/drivers/net/wireless/rt2x00/rt2400pci.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/slab.h> | 34 | #include <linux/slab.h> |
35 | 35 | ||
36 | #include "rt2x00.h" | 36 | #include "rt2x00.h" |
37 | #include "rt2x00mmio.h" | ||
37 | #include "rt2x00pci.h" | 38 | #include "rt2x00pci.h" |
38 | #include "rt2400pci.h" | 39 | #include "rt2400pci.h" |
39 | 40 | ||
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c index 39edc59e8d03..e1d2dc9ed28a 100644 --- a/drivers/net/wireless/rt2x00/rt2500pci.c +++ b/drivers/net/wireless/rt2x00/rt2500pci.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/slab.h> | 34 | #include <linux/slab.h> |
35 | 35 | ||
36 | #include "rt2x00.h" | 36 | #include "rt2x00.h" |
37 | #include "rt2x00mmio.h" | ||
37 | #include "rt2x00pci.h" | 38 | #include "rt2x00pci.h" |
38 | #include "rt2500pci.h" | 39 | #include "rt2500pci.h" |
39 | 40 | ||
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c index 48a01aa21f1c..ba5a05625aaa 100644 --- a/drivers/net/wireless/rt2x00/rt2800pci.c +++ b/drivers/net/wireless/rt2x00/rt2800pci.c | |||
@@ -41,6 +41,7 @@ | |||
41 | #include <linux/eeprom_93cx6.h> | 41 | #include <linux/eeprom_93cx6.h> |
42 | 42 | ||
43 | #include "rt2x00.h" | 43 | #include "rt2x00.h" |
44 | #include "rt2x00mmio.h" | ||
44 | #include "rt2x00pci.h" | 45 | #include "rt2x00pci.h" |
45 | #include "rt2x00soc.h" | 46 | #include "rt2x00soc.h" |
46 | #include "rt2800lib.h" | 47 | #include "rt2800lib.h" |
@@ -89,7 +90,7 @@ static void rt2800pci_mcu_status(struct rt2x00_dev *rt2x00dev, const u8 token) | |||
89 | rt2x00pci_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0); | 90 | rt2x00pci_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0); |
90 | } | 91 | } |
91 | 92 | ||
92 | #if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X) | 93 | #if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X) |
93 | static int rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev) | 94 | static int rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev) |
94 | { | 95 | { |
95 | void __iomem *base_addr = ioremap(0x1F040000, EEPROM_SIZE); | 96 | void __iomem *base_addr = ioremap(0x1F040000, EEPROM_SIZE); |
@@ -107,7 +108,7 @@ static inline int rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev) | |||
107 | { | 108 | { |
108 | return -ENOMEM; | 109 | return -ENOMEM; |
109 | } | 110 | } |
110 | #endif /* CONFIG_RALINK_RT288X || CONFIG_RALINK_RT305X */ | 111 | #endif /* CONFIG_SOC_RT288X || CONFIG_SOC_RT305X */ |
111 | 112 | ||
112 | #ifdef CONFIG_PCI | 113 | #ifdef CONFIG_PCI |
113 | static void rt2800pci_eepromregister_read(struct eeprom_93cx6 *eeprom) | 114 | static void rt2800pci_eepromregister_read(struct eeprom_93cx6 *eeprom) |
@@ -1177,7 +1178,7 @@ MODULE_DEVICE_TABLE(pci, rt2800pci_device_table); | |||
1177 | #endif /* CONFIG_PCI */ | 1178 | #endif /* CONFIG_PCI */ |
1178 | MODULE_LICENSE("GPL"); | 1179 | MODULE_LICENSE("GPL"); |
1179 | 1180 | ||
1180 | #if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X) | 1181 | #if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X) |
1181 | static int rt2800soc_probe(struct platform_device *pdev) | 1182 | static int rt2800soc_probe(struct platform_device *pdev) |
1182 | { | 1183 | { |
1183 | return rt2x00soc_probe(pdev, &rt2800pci_ops); | 1184 | return rt2x00soc_probe(pdev, &rt2800pci_ops); |
@@ -1194,7 +1195,7 @@ static struct platform_driver rt2800soc_driver = { | |||
1194 | .suspend = rt2x00soc_suspend, | 1195 | .suspend = rt2x00soc_suspend, |
1195 | .resume = rt2x00soc_resume, | 1196 | .resume = rt2x00soc_resume, |
1196 | }; | 1197 | }; |
1197 | #endif /* CONFIG_RALINK_RT288X || CONFIG_RALINK_RT305X */ | 1198 | #endif /* CONFIG_SOC_RT288X || CONFIG_SOC_RT305X */ |
1198 | 1199 | ||
1199 | #ifdef CONFIG_PCI | 1200 | #ifdef CONFIG_PCI |
1200 | static int rt2800pci_probe(struct pci_dev *pci_dev, | 1201 | static int rt2800pci_probe(struct pci_dev *pci_dev, |
@@ -1217,7 +1218,7 @@ static int __init rt2800pci_init(void) | |||
1217 | { | 1218 | { |
1218 | int ret = 0; | 1219 | int ret = 0; |
1219 | 1220 | ||
1220 | #if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X) | 1221 | #if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X) |
1221 | ret = platform_driver_register(&rt2800soc_driver); | 1222 | ret = platform_driver_register(&rt2800soc_driver); |
1222 | if (ret) | 1223 | if (ret) |
1223 | return ret; | 1224 | return ret; |
@@ -1225,7 +1226,7 @@ static int __init rt2800pci_init(void) | |||
1225 | #ifdef CONFIG_PCI | 1226 | #ifdef CONFIG_PCI |
1226 | ret = pci_register_driver(&rt2800pci_driver); | 1227 | ret = pci_register_driver(&rt2800pci_driver); |
1227 | if (ret) { | 1228 | if (ret) { |
1228 | #if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X) | 1229 | #if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X) |
1229 | platform_driver_unregister(&rt2800soc_driver); | 1230 | platform_driver_unregister(&rt2800soc_driver); |
1230 | #endif | 1231 | #endif |
1231 | return ret; | 1232 | return ret; |
@@ -1240,7 +1241,7 @@ static void __exit rt2800pci_exit(void) | |||
1240 | #ifdef CONFIG_PCI | 1241 | #ifdef CONFIG_PCI |
1241 | pci_unregister_driver(&rt2800pci_driver); | 1242 | pci_unregister_driver(&rt2800pci_driver); |
1242 | #endif | 1243 | #endif |
1243 | #if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X) | 1244 | #if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X) |
1244 | platform_driver_unregister(&rt2800soc_driver); | 1245 | platform_driver_unregister(&rt2800soc_driver); |
1245 | #endif | 1246 | #endif |
1246 | } | 1247 | } |
diff --git a/drivers/net/wireless/rt2x00/rt2x00mmio.c b/drivers/net/wireless/rt2x00/rt2x00mmio.c new file mode 100644 index 000000000000..d84a680ba0c9 --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2x00mmio.c | |||
@@ -0,0 +1,216 @@ | |||
1 | /* | ||
2 | Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com> | ||
3 | <http://rt2x00.serialmonkey.com> | ||
4 | |||
5 | This program is free software; you can redistribute it and/or modify | ||
6 | it under the terms of the GNU General Public License as published by | ||
7 | the Free Software Foundation; either version 2 of the License, or | ||
8 | (at your option) any later version. | ||
9 | |||
10 | This program is distributed in the hope that it will be useful, | ||
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | GNU General Public License for more details. | ||
14 | |||
15 | You should have received a copy of the GNU General Public License | ||
16 | along with this program; if not, write to the | ||
17 | Free Software Foundation, Inc., | ||
18 | 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
19 | */ | ||
20 | |||
21 | /* | ||
22 | Module: rt2x00mmio | ||
23 | Abstract: rt2x00 generic mmio device routines. | ||
24 | */ | ||
25 | |||
26 | #include <linux/dma-mapping.h> | ||
27 | #include <linux/kernel.h> | ||
28 | #include <linux/module.h> | ||
29 | #include <linux/slab.h> | ||
30 | |||
31 | #include "rt2x00.h" | ||
32 | #include "rt2x00mmio.h" | ||
33 | |||
34 | /* | ||
35 | * Register access. | ||
36 | */ | ||
37 | int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev, | ||
38 | const unsigned int offset, | ||
39 | const struct rt2x00_field32 field, | ||
40 | u32 *reg) | ||
41 | { | ||
42 | unsigned int i; | ||
43 | |||
44 | if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) | ||
45 | return 0; | ||
46 | |||
47 | for (i = 0; i < REGISTER_BUSY_COUNT; i++) { | ||
48 | rt2x00pci_register_read(rt2x00dev, offset, reg); | ||
49 | if (!rt2x00_get_field32(*reg, field)) | ||
50 | return 1; | ||
51 | udelay(REGISTER_BUSY_DELAY); | ||
52 | } | ||
53 | |||
54 | printk_once(KERN_ERR "%s() Indirect register access failed: " | ||
55 | "offset=0x%.08x, value=0x%.08x\n", __func__, offset, *reg); | ||
56 | *reg = ~0; | ||
57 | |||
58 | return 0; | ||
59 | } | ||
60 | EXPORT_SYMBOL_GPL(rt2x00pci_regbusy_read); | ||
61 | |||
62 | bool rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev) | ||
63 | { | ||
64 | struct data_queue *queue = rt2x00dev->rx; | ||
65 | struct queue_entry *entry; | ||
66 | struct queue_entry_priv_pci *entry_priv; | ||
67 | struct skb_frame_desc *skbdesc; | ||
68 | int max_rx = 16; | ||
69 | |||
70 | while (--max_rx) { | ||
71 | entry = rt2x00queue_get_entry(queue, Q_INDEX); | ||
72 | entry_priv = entry->priv_data; | ||
73 | |||
74 | if (rt2x00dev->ops->lib->get_entry_state(entry)) | ||
75 | break; | ||
76 | |||
77 | /* | ||
78 | * Fill in desc fields of the skb descriptor | ||
79 | */ | ||
80 | skbdesc = get_skb_frame_desc(entry->skb); | ||
81 | skbdesc->desc = entry_priv->desc; | ||
82 | skbdesc->desc_len = entry->queue->desc_size; | ||
83 | |||
84 | /* | ||
85 | * DMA is already done, notify rt2x00lib that | ||
86 | * it finished successfully. | ||
87 | */ | ||
88 | rt2x00lib_dmastart(entry); | ||
89 | rt2x00lib_dmadone(entry); | ||
90 | |||
91 | /* | ||
92 | * Send the frame to rt2x00lib for further processing. | ||
93 | */ | ||
94 | rt2x00lib_rxdone(entry, GFP_ATOMIC); | ||
95 | } | ||
96 | |||
97 | return !max_rx; | ||
98 | } | ||
99 | EXPORT_SYMBOL_GPL(rt2x00pci_rxdone); | ||
100 | |||
101 | void rt2x00pci_flush_queue(struct data_queue *queue, bool drop) | ||
102 | { | ||
103 | unsigned int i; | ||
104 | |||
105 | for (i = 0; !rt2x00queue_empty(queue) && i < 10; i++) | ||
106 | msleep(10); | ||
107 | } | ||
108 | EXPORT_SYMBOL_GPL(rt2x00pci_flush_queue); | ||
109 | |||
110 | /* | ||
111 | * Device initialization handlers. | ||
112 | */ | ||
113 | static int rt2x00pci_alloc_queue_dma(struct rt2x00_dev *rt2x00dev, | ||
114 | struct data_queue *queue) | ||
115 | { | ||
116 | struct queue_entry_priv_pci *entry_priv; | ||
117 | void *addr; | ||
118 | dma_addr_t dma; | ||
119 | unsigned int i; | ||
120 | |||
121 | /* | ||
122 | * Allocate DMA memory for descriptor and buffer. | ||
123 | */ | ||
124 | addr = dma_alloc_coherent(rt2x00dev->dev, | ||
125 | queue->limit * queue->desc_size, | ||
126 | &dma, GFP_KERNEL); | ||
127 | if (!addr) | ||
128 | return -ENOMEM; | ||
129 | |||
130 | memset(addr, 0, queue->limit * queue->desc_size); | ||
131 | |||
132 | /* | ||
133 | * Initialize all queue entries to contain valid addresses. | ||
134 | */ | ||
135 | for (i = 0; i < queue->limit; i++) { | ||
136 | entry_priv = queue->entries[i].priv_data; | ||
137 | entry_priv->desc = addr + i * queue->desc_size; | ||
138 | entry_priv->desc_dma = dma + i * queue->desc_size; | ||
139 | } | ||
140 | |||
141 | return 0; | ||
142 | } | ||
143 | |||
144 | static void rt2x00pci_free_queue_dma(struct rt2x00_dev *rt2x00dev, | ||
145 | struct data_queue *queue) | ||
146 | { | ||
147 | struct queue_entry_priv_pci *entry_priv = | ||
148 | queue->entries[0].priv_data; | ||
149 | |||
150 | if (entry_priv->desc) | ||
151 | dma_free_coherent(rt2x00dev->dev, | ||
152 | queue->limit * queue->desc_size, | ||
153 | entry_priv->desc, entry_priv->desc_dma); | ||
154 | entry_priv->desc = NULL; | ||
155 | } | ||
156 | |||
157 | int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev) | ||
158 | { | ||
159 | struct data_queue *queue; | ||
160 | int status; | ||
161 | |||
162 | /* | ||
163 | * Allocate DMA | ||
164 | */ | ||
165 | queue_for_each(rt2x00dev, queue) { | ||
166 | status = rt2x00pci_alloc_queue_dma(rt2x00dev, queue); | ||
167 | if (status) | ||
168 | goto exit; | ||
169 | } | ||
170 | |||
171 | /* | ||
172 | * Register interrupt handler. | ||
173 | */ | ||
174 | status = request_irq(rt2x00dev->irq, | ||
175 | rt2x00dev->ops->lib->irq_handler, | ||
176 | IRQF_SHARED, rt2x00dev->name, rt2x00dev); | ||
177 | if (status) { | ||
178 | ERROR(rt2x00dev, "IRQ %d allocation failed (error %d).\n", | ||
179 | rt2x00dev->irq, status); | ||
180 | goto exit; | ||
181 | } | ||
182 | |||
183 | return 0; | ||
184 | |||
185 | exit: | ||
186 | queue_for_each(rt2x00dev, queue) | ||
187 | rt2x00pci_free_queue_dma(rt2x00dev, queue); | ||
188 | |||
189 | return status; | ||
190 | } | ||
191 | EXPORT_SYMBOL_GPL(rt2x00pci_initialize); | ||
192 | |||
193 | void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev) | ||
194 | { | ||
195 | struct data_queue *queue; | ||
196 | |||
197 | /* | ||
198 | * Free irq line. | ||
199 | */ | ||
200 | free_irq(rt2x00dev->irq, rt2x00dev); | ||
201 | |||
202 | /* | ||
203 | * Free DMA | ||
204 | */ | ||
205 | queue_for_each(rt2x00dev, queue) | ||
206 | rt2x00pci_free_queue_dma(rt2x00dev, queue); | ||
207 | } | ||
208 | EXPORT_SYMBOL_GPL(rt2x00pci_uninitialize); | ||
209 | |||
210 | /* | ||
211 | * rt2x00mmio module information. | ||
212 | */ | ||
213 | MODULE_AUTHOR(DRV_PROJECT); | ||
214 | MODULE_VERSION(DRV_VERSION); | ||
215 | MODULE_DESCRIPTION("rt2x00 mmio library"); | ||
216 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/net/wireless/rt2x00/rt2x00mmio.h b/drivers/net/wireless/rt2x00/rt2x00mmio.h new file mode 100644 index 000000000000..4ecaf60175bf --- /dev/null +++ b/drivers/net/wireless/rt2x00/rt2x00mmio.h | |||
@@ -0,0 +1,119 @@ | |||
1 | /* | ||
2 | Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com> | ||
3 | <http://rt2x00.serialmonkey.com> | ||
4 | |||
5 | This program is free software; you can redistribute it and/or modify | ||
6 | it under the terms of the GNU General Public License as published by | ||
7 | the Free Software Foundation; either version 2 of the License, or | ||
8 | (at your option) any later version. | ||
9 | |||
10 | This program is distributed in the hope that it will be useful, | ||
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | GNU General Public License for more details. | ||
14 | |||
15 | You should have received a copy of the GNU General Public License | ||
16 | along with this program; if not, write to the | ||
17 | Free Software Foundation, Inc., | ||
18 | 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
19 | */ | ||
20 | |||
21 | /* | ||
22 | Module: rt2x00mmio | ||
23 | Abstract: Data structures for the rt2x00mmio module. | ||
24 | */ | ||
25 | |||
26 | #ifndef RT2X00MMIO_H | ||
27 | #define RT2X00MMIO_H | ||
28 | |||
29 | #include <linux/io.h> | ||
30 | |||
31 | /* | ||
32 | * Register access. | ||
33 | */ | ||
34 | static inline void rt2x00pci_register_read(struct rt2x00_dev *rt2x00dev, | ||
35 | const unsigned int offset, | ||
36 | u32 *value) | ||
37 | { | ||
38 | *value = readl(rt2x00dev->csr.base + offset); | ||
39 | } | ||
40 | |||
41 | static inline void rt2x00pci_register_multiread(struct rt2x00_dev *rt2x00dev, | ||
42 | const unsigned int offset, | ||
43 | void *value, const u32 length) | ||
44 | { | ||
45 | memcpy_fromio(value, rt2x00dev->csr.base + offset, length); | ||
46 | } | ||
47 | |||
48 | static inline void rt2x00pci_register_write(struct rt2x00_dev *rt2x00dev, | ||
49 | const unsigned int offset, | ||
50 | u32 value) | ||
51 | { | ||
52 | writel(value, rt2x00dev->csr.base + offset); | ||
53 | } | ||
54 | |||
55 | static inline void rt2x00pci_register_multiwrite(struct rt2x00_dev *rt2x00dev, | ||
56 | const unsigned int offset, | ||
57 | const void *value, | ||
58 | const u32 length) | ||
59 | { | ||
60 | __iowrite32_copy(rt2x00dev->csr.base + offset, value, length >> 2); | ||
61 | } | ||
62 | |||
63 | /** | ||
64 | * rt2x00pci_regbusy_read - Read from register with busy check | ||
65 | * @rt2x00dev: Device pointer, see &struct rt2x00_dev. | ||
66 | * @offset: Register offset | ||
67 | * @field: Field to check if register is busy | ||
68 | * @reg: Pointer to where register contents should be stored | ||
69 | * | ||
70 | * This function will read the given register, and checks if the | ||
71 | * register is busy. If it is, it will sleep for a couple of | ||
72 | * microseconds before reading the register again. If the register | ||
73 | * is not read after a certain timeout, this function will return | ||
74 | * FALSE. | ||
75 | */ | ||
76 | int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev, | ||
77 | const unsigned int offset, | ||
78 | const struct rt2x00_field32 field, | ||
79 | u32 *reg); | ||
80 | |||
81 | /** | ||
82 | * struct queue_entry_priv_pci: Per entry PCI specific information | ||
83 | * | ||
84 | * @desc: Pointer to device descriptor | ||
85 | * @desc_dma: DMA pointer to &desc. | ||
86 | * @data: Pointer to device's entry memory. | ||
87 | * @data_dma: DMA pointer to &data. | ||
88 | */ | ||
89 | struct queue_entry_priv_pci { | ||
90 | __le32 *desc; | ||
91 | dma_addr_t desc_dma; | ||
92 | }; | ||
93 | |||
94 | /** | ||
95 | * rt2x00pci_rxdone - Handle RX done events | ||
96 | * @rt2x00dev: Device pointer, see &struct rt2x00_dev. | ||
97 | * | ||
98 | * Returns true if there are still rx frames pending and false if all | ||
99 | * pending rx frames were processed. | ||
100 | */ | ||
101 | bool rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev); | ||
102 | |||
103 | /** | ||
104 | * rt2x00pci_flush_queue - Flush data queue | ||
105 | * @queue: Data queue to stop | ||
106 | * @drop: True to drop all pending frames. | ||
107 | * | ||
108 | * This will wait for a maximum of 100ms, waiting for the queues | ||
109 | * to become empty. | ||
110 | */ | ||
111 | void rt2x00pci_flush_queue(struct data_queue *queue, bool drop); | ||
112 | |||
113 | /* | ||
114 | * Device initialization handlers. | ||
115 | */ | ||
116 | int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev); | ||
117 | void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev); | ||
118 | |||
119 | #endif /* RT2X00MMIO_H */ | ||
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c index a0c8caef3b0a..e87865e33113 100644 --- a/drivers/net/wireless/rt2x00/rt2x00pci.c +++ b/drivers/net/wireless/rt2x00/rt2x00pci.c | |||
@@ -33,182 +33,6 @@ | |||
33 | #include "rt2x00pci.h" | 33 | #include "rt2x00pci.h" |
34 | 34 | ||
35 | /* | 35 | /* |
36 | * Register access. | ||
37 | */ | ||
38 | int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev, | ||
39 | const unsigned int offset, | ||
40 | const struct rt2x00_field32 field, | ||
41 | u32 *reg) | ||
42 | { | ||
43 | unsigned int i; | ||
44 | |||
45 | if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) | ||
46 | return 0; | ||
47 | |||
48 | for (i = 0; i < REGISTER_BUSY_COUNT; i++) { | ||
49 | rt2x00pci_register_read(rt2x00dev, offset, reg); | ||
50 | if (!rt2x00_get_field32(*reg, field)) | ||
51 | return 1; | ||
52 | udelay(REGISTER_BUSY_DELAY); | ||
53 | } | ||
54 | |||
55 | ERROR(rt2x00dev, "Indirect register access failed: " | ||
56 | "offset=0x%.08x, value=0x%.08x\n", offset, *reg); | ||
57 | *reg = ~0; | ||
58 | |||
59 | return 0; | ||
60 | } | ||
61 | EXPORT_SYMBOL_GPL(rt2x00pci_regbusy_read); | ||
62 | |||
63 | bool rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev) | ||
64 | { | ||
65 | struct data_queue *queue = rt2x00dev->rx; | ||
66 | struct queue_entry *entry; | ||
67 | struct queue_entry_priv_pci *entry_priv; | ||
68 | struct skb_frame_desc *skbdesc; | ||
69 | int max_rx = 16; | ||
70 | |||
71 | while (--max_rx) { | ||
72 | entry = rt2x00queue_get_entry(queue, Q_INDEX); | ||
73 | entry_priv = entry->priv_data; | ||
74 | |||
75 | if (rt2x00dev->ops->lib->get_entry_state(entry)) | ||
76 | break; | ||
77 | |||
78 | /* | ||
79 | * Fill in desc fields of the skb descriptor | ||
80 | */ | ||
81 | skbdesc = get_skb_frame_desc(entry->skb); | ||
82 | skbdesc->desc = entry_priv->desc; | ||
83 | skbdesc->desc_len = entry->queue->desc_size; | ||
84 | |||
85 | /* | ||
86 | * DMA is already done, notify rt2x00lib that | ||
87 | * it finished successfully. | ||
88 | */ | ||
89 | rt2x00lib_dmastart(entry); | ||
90 | rt2x00lib_dmadone(entry); | ||
91 | |||
92 | /* | ||
93 | * Send the frame to rt2x00lib for further processing. | ||
94 | */ | ||
95 | rt2x00lib_rxdone(entry, GFP_ATOMIC); | ||
96 | } | ||
97 | |||
98 | return !max_rx; | ||
99 | } | ||
100 | EXPORT_SYMBOL_GPL(rt2x00pci_rxdone); | ||
101 | |||
102 | void rt2x00pci_flush_queue(struct data_queue *queue, bool drop) | ||
103 | { | ||
104 | unsigned int i; | ||
105 | |||
106 | for (i = 0; !rt2x00queue_empty(queue) && i < 10; i++) | ||
107 | msleep(10); | ||
108 | } | ||
109 | EXPORT_SYMBOL_GPL(rt2x00pci_flush_queue); | ||
110 | |||
111 | /* | ||
112 | * Device initialization handlers. | ||
113 | */ | ||
114 | static int rt2x00pci_alloc_queue_dma(struct rt2x00_dev *rt2x00dev, | ||
115 | struct data_queue *queue) | ||
116 | { | ||
117 | struct queue_entry_priv_pci *entry_priv; | ||
118 | void *addr; | ||
119 | dma_addr_t dma; | ||
120 | unsigned int i; | ||
121 | |||
122 | /* | ||
123 | * Allocate DMA memory for descriptor and buffer. | ||
124 | */ | ||
125 | addr = dma_alloc_coherent(rt2x00dev->dev, | ||
126 | queue->limit * queue->desc_size, | ||
127 | &dma, GFP_KERNEL); | ||
128 | if (!addr) | ||
129 | return -ENOMEM; | ||
130 | |||
131 | memset(addr, 0, queue->limit * queue->desc_size); | ||
132 | |||
133 | /* | ||
134 | * Initialize all queue entries to contain valid addresses. | ||
135 | */ | ||
136 | for (i = 0; i < queue->limit; i++) { | ||
137 | entry_priv = queue->entries[i].priv_data; | ||
138 | entry_priv->desc = addr + i * queue->desc_size; | ||
139 | entry_priv->desc_dma = dma + i * queue->desc_size; | ||
140 | } | ||
141 | |||
142 | return 0; | ||
143 | } | ||
144 | |||
145 | static void rt2x00pci_free_queue_dma(struct rt2x00_dev *rt2x00dev, | ||
146 | struct data_queue *queue) | ||
147 | { | ||
148 | struct queue_entry_priv_pci *entry_priv = | ||
149 | queue->entries[0].priv_data; | ||
150 | |||
151 | if (entry_priv->desc) | ||
152 | dma_free_coherent(rt2x00dev->dev, | ||
153 | queue->limit * queue->desc_size, | ||
154 | entry_priv->desc, entry_priv->desc_dma); | ||
155 | entry_priv->desc = NULL; | ||
156 | } | ||
157 | |||
158 | int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev) | ||
159 | { | ||
160 | struct data_queue *queue; | ||
161 | int status; | ||
162 | |||
163 | /* | ||
164 | * Allocate DMA | ||
165 | */ | ||
166 | queue_for_each(rt2x00dev, queue) { | ||
167 | status = rt2x00pci_alloc_queue_dma(rt2x00dev, queue); | ||
168 | if (status) | ||
169 | goto exit; | ||
170 | } | ||
171 | |||
172 | /* | ||
173 | * Register interrupt handler. | ||
174 | */ | ||
175 | status = request_irq(rt2x00dev->irq, | ||
176 | rt2x00dev->ops->lib->irq_handler, | ||
177 | IRQF_SHARED, rt2x00dev->name, rt2x00dev); | ||
178 | if (status) { | ||
179 | ERROR(rt2x00dev, "IRQ %d allocation failed (error %d).\n", | ||
180 | rt2x00dev->irq, status); | ||
181 | goto exit; | ||
182 | } | ||
183 | |||
184 | return 0; | ||
185 | |||
186 | exit: | ||
187 | queue_for_each(rt2x00dev, queue) | ||
188 | rt2x00pci_free_queue_dma(rt2x00dev, queue); | ||
189 | |||
190 | return status; | ||
191 | } | ||
192 | EXPORT_SYMBOL_GPL(rt2x00pci_initialize); | ||
193 | |||
194 | void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev) | ||
195 | { | ||
196 | struct data_queue *queue; | ||
197 | |||
198 | /* | ||
199 | * Free irq line. | ||
200 | */ | ||
201 | free_irq(rt2x00dev->irq, rt2x00dev); | ||
202 | |||
203 | /* | ||
204 | * Free DMA | ||
205 | */ | ||
206 | queue_for_each(rt2x00dev, queue) | ||
207 | rt2x00pci_free_queue_dma(rt2x00dev, queue); | ||
208 | } | ||
209 | EXPORT_SYMBOL_GPL(rt2x00pci_uninitialize); | ||
210 | |||
211 | /* | ||
212 | * PCI driver handlers. | 36 | * PCI driver handlers. |
213 | */ | 37 | */ |
214 | static void rt2x00pci_free_reg(struct rt2x00_dev *rt2x00dev) | 38 | static void rt2x00pci_free_reg(struct rt2x00_dev *rt2x00dev) |
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.h b/drivers/net/wireless/rt2x00/rt2x00pci.h index e2c99f2b9a14..60d90b20f8b9 100644 --- a/drivers/net/wireless/rt2x00/rt2x00pci.h +++ b/drivers/net/wireless/rt2x00/rt2x00pci.h | |||
@@ -36,94 +36,6 @@ | |||
36 | #define PCI_DEVICE_DATA(__ops) .driver_data = (kernel_ulong_t)(__ops) | 36 | #define PCI_DEVICE_DATA(__ops) .driver_data = (kernel_ulong_t)(__ops) |
37 | 37 | ||
38 | /* | 38 | /* |
39 | * Register access. | ||
40 | */ | ||
41 | static inline void rt2x00pci_register_read(struct rt2x00_dev *rt2x00dev, | ||
42 | const unsigned int offset, | ||
43 | u32 *value) | ||
44 | { | ||
45 | *value = readl(rt2x00dev->csr.base + offset); | ||
46 | } | ||
47 | |||
48 | static inline void rt2x00pci_register_multiread(struct rt2x00_dev *rt2x00dev, | ||
49 | const unsigned int offset, | ||
50 | void *value, const u32 length) | ||
51 | { | ||
52 | memcpy_fromio(value, rt2x00dev->csr.base + offset, length); | ||
53 | } | ||
54 | |||
55 | static inline void rt2x00pci_register_write(struct rt2x00_dev *rt2x00dev, | ||
56 | const unsigned int offset, | ||
57 | u32 value) | ||
58 | { | ||
59 | writel(value, rt2x00dev->csr.base + offset); | ||
60 | } | ||
61 | |||
62 | static inline void rt2x00pci_register_multiwrite(struct rt2x00_dev *rt2x00dev, | ||
63 | const unsigned int offset, | ||
64 | const void *value, | ||
65 | const u32 length) | ||
66 | { | ||
67 | __iowrite32_copy(rt2x00dev->csr.base + offset, value, length >> 2); | ||
68 | } | ||
69 | |||
70 | /** | ||
71 | * rt2x00pci_regbusy_read - Read from register with busy check | ||
72 | * @rt2x00dev: Device pointer, see &struct rt2x00_dev. | ||
73 | * @offset: Register offset | ||
74 | * @field: Field to check if register is busy | ||
75 | * @reg: Pointer to where register contents should be stored | ||
76 | * | ||
77 | * This function will read the given register, and checks if the | ||
78 | * register is busy. If it is, it will sleep for a couple of | ||
79 | * microseconds before reading the register again. If the register | ||
80 | * is not read after a certain timeout, this function will return | ||
81 | * FALSE. | ||
82 | */ | ||
83 | int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev, | ||
84 | const unsigned int offset, | ||
85 | const struct rt2x00_field32 field, | ||
86 | u32 *reg); | ||
87 | |||
88 | /** | ||
89 | * struct queue_entry_priv_pci: Per entry PCI specific information | ||
90 | * | ||
91 | * @desc: Pointer to device descriptor | ||
92 | * @desc_dma: DMA pointer to &desc. | ||
93 | * @data: Pointer to device's entry memory. | ||
94 | * @data_dma: DMA pointer to &data. | ||
95 | */ | ||
96 | struct queue_entry_priv_pci { | ||
97 | __le32 *desc; | ||
98 | dma_addr_t desc_dma; | ||
99 | }; | ||
100 | |||
101 | /** | ||
102 | * rt2x00pci_rxdone - Handle RX done events | ||
103 | * @rt2x00dev: Device pointer, see &struct rt2x00_dev. | ||
104 | * | ||
105 | * Returns true if there are still rx frames pending and false if all | ||
106 | * pending rx frames were processed. | ||
107 | */ | ||
108 | bool rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev); | ||
109 | |||
110 | /** | ||
111 | * rt2x00pci_flush_queue - Flush data queue | ||
112 | * @queue: Data queue to stop | ||
113 | * @drop: True to drop all pending frames. | ||
114 | * | ||
115 | * This will wait for a maximum of 100ms, waiting for the queues | ||
116 | * to become empty. | ||
117 | */ | ||
118 | void rt2x00pci_flush_queue(struct data_queue *queue, bool drop); | ||
119 | |||
120 | /* | ||
121 | * Device initialization handlers. | ||
122 | */ | ||
123 | int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev); | ||
124 | void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev); | ||
125 | |||
126 | /* | ||
127 | * PCI driver handlers. | 39 | * PCI driver handlers. |
128 | */ | 40 | */ |
129 | int rt2x00pci_probe(struct pci_dev *pci_dev, const struct rt2x00_ops *ops); | 41 | int rt2x00pci_probe(struct pci_dev *pci_dev, const struct rt2x00_ops *ops); |
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c index f95792cfcf89..9e3c8ff53e3f 100644 --- a/drivers/net/wireless/rt2x00/rt61pci.c +++ b/drivers/net/wireless/rt2x00/rt61pci.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/eeprom_93cx6.h> | 35 | #include <linux/eeprom_93cx6.h> |
36 | 36 | ||
37 | #include "rt2x00.h" | 37 | #include "rt2x00.h" |
38 | #include "rt2x00mmio.h" | ||
38 | #include "rt2x00pci.h" | 39 | #include "rt2x00pci.h" |
39 | #include "rt61pci.h" | 40 | #include "rt61pci.h" |
40 | 41 | ||
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c index b1ccff474c79..c08d0f4c5f3d 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c | |||
@@ -1377,74 +1377,57 @@ void rtl92cu_card_disable(struct ieee80211_hw *hw) | |||
1377 | 1377 | ||
1378 | void rtl92cu_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid) | 1378 | void rtl92cu_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid) |
1379 | { | 1379 | { |
1380 | /* dummy routine needed for callback from rtl_op_configure_filter() */ | ||
1381 | } | ||
1382 | |||
1383 | /*========================================================================== */ | ||
1384 | |||
1385 | static void _rtl92cu_set_check_bssid(struct ieee80211_hw *hw, | ||
1386 | enum nl80211_iftype type) | ||
1387 | { | ||
1388 | struct rtl_priv *rtlpriv = rtl_priv(hw); | 1380 | struct rtl_priv *rtlpriv = rtl_priv(hw); |
1389 | u32 reg_rcr = rtl_read_dword(rtlpriv, REG_RCR); | ||
1390 | struct rtl_hal *rtlhal = rtl_hal(rtlpriv); | 1381 | struct rtl_hal *rtlhal = rtl_hal(rtlpriv); |
1391 | struct rtl_phy *rtlphy = &(rtlpriv->phy); | 1382 | u32 reg_rcr = rtl_read_dword(rtlpriv, REG_RCR); |
1392 | u8 filterout_non_associated_bssid = false; | ||
1393 | 1383 | ||
1394 | switch (type) { | 1384 | if (rtlpriv->psc.rfpwr_state != ERFON) |
1395 | case NL80211_IFTYPE_ADHOC: | 1385 | return; |
1396 | case NL80211_IFTYPE_STATION: | 1386 | |
1397 | filterout_non_associated_bssid = true; | 1387 | if (check_bssid) { |
1398 | break; | 1388 | u8 tmp; |
1399 | case NL80211_IFTYPE_UNSPECIFIED: | ||
1400 | case NL80211_IFTYPE_AP: | ||
1401 | default: | ||
1402 | break; | ||
1403 | } | ||
1404 | if (filterout_non_associated_bssid) { | ||
1405 | if (IS_NORMAL_CHIP(rtlhal->version)) { | 1389 | if (IS_NORMAL_CHIP(rtlhal->version)) { |
1406 | switch (rtlphy->current_io_type) { | 1390 | reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN); |
1407 | case IO_CMD_RESUME_DM_BY_SCAN: | 1391 | tmp = BIT(4); |
1408 | reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN); | ||
1409 | rtlpriv->cfg->ops->set_hw_reg(hw, | ||
1410 | HW_VAR_RCR, (u8 *)(®_rcr)); | ||
1411 | /* enable update TSF */ | ||
1412 | _rtl92cu_set_bcn_ctrl_reg(hw, 0, BIT(4)); | ||
1413 | break; | ||
1414 | case IO_CMD_PAUSE_DM_BY_SCAN: | ||
1415 | reg_rcr &= ~(RCR_CBSSID_DATA | RCR_CBSSID_BCN); | ||
1416 | rtlpriv->cfg->ops->set_hw_reg(hw, | ||
1417 | HW_VAR_RCR, (u8 *)(®_rcr)); | ||
1418 | /* disable update TSF */ | ||
1419 | _rtl92cu_set_bcn_ctrl_reg(hw, BIT(4), 0); | ||
1420 | break; | ||
1421 | } | ||
1422 | } else { | 1392 | } else { |
1423 | reg_rcr |= (RCR_CBSSID); | 1393 | reg_rcr |= RCR_CBSSID; |
1424 | rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, | 1394 | tmp = BIT(4) | BIT(5); |
1425 | (u8 *)(®_rcr)); | ||
1426 | _rtl92cu_set_bcn_ctrl_reg(hw, 0, (BIT(4)|BIT(5))); | ||
1427 | } | 1395 | } |
1428 | } else if (filterout_non_associated_bssid == false) { | 1396 | rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, |
1397 | (u8 *) (®_rcr)); | ||
1398 | _rtl92cu_set_bcn_ctrl_reg(hw, 0, tmp); | ||
1399 | } else { | ||
1400 | u8 tmp; | ||
1429 | if (IS_NORMAL_CHIP(rtlhal->version)) { | 1401 | if (IS_NORMAL_CHIP(rtlhal->version)) { |
1430 | reg_rcr &= (~(RCR_CBSSID_DATA | RCR_CBSSID_BCN)); | 1402 | reg_rcr &= ~(RCR_CBSSID_DATA | RCR_CBSSID_BCN); |
1431 | rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, | 1403 | tmp = BIT(4); |
1432 | (u8 *)(®_rcr)); | ||
1433 | _rtl92cu_set_bcn_ctrl_reg(hw, BIT(4), 0); | ||
1434 | } else { | 1404 | } else { |
1435 | reg_rcr &= (~RCR_CBSSID); | 1405 | reg_rcr &= ~RCR_CBSSID; |
1436 | rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, | 1406 | tmp = BIT(4) | BIT(5); |
1437 | (u8 *)(®_rcr)); | ||
1438 | _rtl92cu_set_bcn_ctrl_reg(hw, (BIT(4)|BIT(5)), 0); | ||
1439 | } | 1407 | } |
1408 | reg_rcr &= (~(RCR_CBSSID_DATA | RCR_CBSSID_BCN)); | ||
1409 | rtlpriv->cfg->ops->set_hw_reg(hw, | ||
1410 | HW_VAR_RCR, (u8 *) (®_rcr)); | ||
1411 | _rtl92cu_set_bcn_ctrl_reg(hw, tmp, 0); | ||
1440 | } | 1412 | } |
1441 | } | 1413 | } |
1442 | 1414 | ||
1415 | /*========================================================================== */ | ||
1416 | |||
1443 | int rtl92cu_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type) | 1417 | int rtl92cu_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type) |
1444 | { | 1418 | { |
1419 | struct rtl_priv *rtlpriv = rtl_priv(hw); | ||
1420 | |||
1445 | if (_rtl92cu_set_media_status(hw, type)) | 1421 | if (_rtl92cu_set_media_status(hw, type)) |
1446 | return -EOPNOTSUPP; | 1422 | return -EOPNOTSUPP; |
1447 | _rtl92cu_set_check_bssid(hw, type); | 1423 | |
1424 | if (rtlpriv->mac80211.link_state == MAC80211_LINKED) { | ||
1425 | if (type != NL80211_IFTYPE_AP) | ||
1426 | rtl92cu_set_check_bssid(hw, true); | ||
1427 | } else { | ||
1428 | rtl92cu_set_check_bssid(hw, false); | ||
1429 | } | ||
1430 | |||
1448 | return 0; | 1431 | return 0; |
1449 | } | 1432 | } |
1450 | 1433 | ||
@@ -2058,8 +2041,6 @@ void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw, | |||
2058 | (shortgi_rate << 4) | (shortgi_rate); | 2041 | (shortgi_rate << 4) | (shortgi_rate); |
2059 | } | 2042 | } |
2060 | rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value); | 2043 | rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value); |
2061 | RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, "%x\n", | ||
2062 | rtl_read_dword(rtlpriv, REG_ARFR0)); | ||
2063 | } | 2044 | } |
2064 | 2045 | ||
2065 | void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level) | 2046 | void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level) |
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c index 156b52732f3d..5847d6d0881e 100644 --- a/drivers/net/wireless/rtlwifi/usb.c +++ b/drivers/net/wireless/rtlwifi/usb.c | |||
@@ -851,6 +851,7 @@ static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb, | |||
851 | if (unlikely(!_urb)) { | 851 | if (unlikely(!_urb)) { |
852 | RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, | 852 | RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, |
853 | "Can't allocate urb. Drop skb!\n"); | 853 | "Can't allocate urb. Drop skb!\n"); |
854 | kfree_skb(skb); | ||
854 | return; | 855 | return; |
855 | } | 856 | } |
856 | _rtl_submit_tx_urb(hw, _urb); | 857 | _rtl_submit_tx_urb(hw, _urb); |
diff --git a/drivers/nfc/microread/mei.c b/drivers/nfc/microread/mei.c index eef38cfd812e..ca33ae193935 100644 --- a/drivers/nfc/microread/mei.c +++ b/drivers/nfc/microread/mei.c | |||
@@ -22,7 +22,7 @@ | |||
22 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
23 | #include <linux/interrupt.h> | 23 | #include <linux/interrupt.h> |
24 | #include <linux/gpio.h> | 24 | #include <linux/gpio.h> |
25 | #include <linux/mei_bus.h> | 25 | #include <linux/mei_cl_bus.h> |
26 | 26 | ||
27 | #include <linux/nfc.h> | 27 | #include <linux/nfc.h> |
28 | #include <net/nfc/hci.h> | 28 | #include <net/nfc/hci.h> |
@@ -32,9 +32,6 @@ | |||
32 | 32 | ||
33 | #define MICROREAD_DRIVER_NAME "microread" | 33 | #define MICROREAD_DRIVER_NAME "microread" |
34 | 34 | ||
35 | #define MICROREAD_UUID UUID_LE(0x0bb17a78, 0x2a8e, 0x4c50, 0x94, \ | ||
36 | 0xd4, 0x50, 0x26, 0x67, 0x23, 0x77, 0x5c) | ||
37 | |||
38 | struct mei_nfc_hdr { | 35 | struct mei_nfc_hdr { |
39 | u8 cmd; | 36 | u8 cmd; |
40 | u8 status; | 37 | u8 status; |
@@ -48,7 +45,7 @@ struct mei_nfc_hdr { | |||
48 | #define MEI_NFC_MAX_READ (MEI_NFC_HEADER_SIZE + MEI_NFC_MAX_HCI_PAYLOAD) | 45 | #define MEI_NFC_MAX_READ (MEI_NFC_HEADER_SIZE + MEI_NFC_MAX_HCI_PAYLOAD) |
49 | 46 | ||
50 | struct microread_mei_phy { | 47 | struct microread_mei_phy { |
51 | struct mei_device *mei_device; | 48 | struct mei_cl_device *device; |
52 | struct nfc_hci_dev *hdev; | 49 | struct nfc_hci_dev *hdev; |
53 | 50 | ||
54 | int powered; | 51 | int powered; |
@@ -105,14 +102,14 @@ static int microread_mei_write(void *phy_id, struct sk_buff *skb) | |||
105 | 102 | ||
106 | MEI_DUMP_SKB_OUT("mei frame sent", skb); | 103 | MEI_DUMP_SKB_OUT("mei frame sent", skb); |
107 | 104 | ||
108 | r = mei_send(phy->device, skb->data, skb->len); | 105 | r = mei_cl_send(phy->device, skb->data, skb->len); |
109 | if (r > 0) | 106 | if (r > 0) |
110 | r = 0; | 107 | r = 0; |
111 | 108 | ||
112 | return r; | 109 | return r; |
113 | } | 110 | } |
114 | 111 | ||
115 | static void microread_event_cb(struct mei_device *device, u32 events, | 112 | static void microread_event_cb(struct mei_cl_device *device, u32 events, |
116 | void *context) | 113 | void *context) |
117 | { | 114 | { |
118 | struct microread_mei_phy *phy = context; | 115 | struct microread_mei_phy *phy = context; |
@@ -120,7 +117,7 @@ static void microread_event_cb(struct mei_device *device, u32 events, | |||
120 | if (phy->hard_fault != 0) | 117 | if (phy->hard_fault != 0) |
121 | return; | 118 | return; |
122 | 119 | ||
123 | if (events & BIT(MEI_EVENT_RX)) { | 120 | if (events & BIT(MEI_CL_EVENT_RX)) { |
124 | struct sk_buff *skb; | 121 | struct sk_buff *skb; |
125 | int reply_size; | 122 | int reply_size; |
126 | 123 | ||
@@ -128,7 +125,7 @@ static void microread_event_cb(struct mei_device *device, u32 events, | |||
128 | if (!skb) | 125 | if (!skb) |
129 | return; | 126 | return; |
130 | 127 | ||
131 | reply_size = mei_recv(device, skb->data, MEI_NFC_MAX_READ); | 128 | reply_size = mei_cl_recv(device, skb->data, MEI_NFC_MAX_READ); |
132 | if (reply_size < MEI_NFC_HEADER_SIZE) { | 129 | if (reply_size < MEI_NFC_HEADER_SIZE) { |
133 | kfree(skb); | 130 | kfree(skb); |
134 | return; | 131 | return; |
@@ -149,8 +146,8 @@ static struct nfc_phy_ops mei_phy_ops = { | |||
149 | .disable = microread_mei_disable, | 146 | .disable = microread_mei_disable, |
150 | }; | 147 | }; |
151 | 148 | ||
152 | static int microread_mei_probe(struct mei_device *device, | 149 | static int microread_mei_probe(struct mei_cl_device *device, |
153 | const struct mei_id *id) | 150 | const struct mei_cl_device_id *id) |
154 | { | 151 | { |
155 | struct microread_mei_phy *phy; | 152 | struct microread_mei_phy *phy; |
156 | int r; | 153 | int r; |
@@ -164,9 +161,9 @@ static int microread_mei_probe(struct mei_device *device, | |||
164 | } | 161 | } |
165 | 162 | ||
166 | phy->device = device; | 163 | phy->device = device; |
167 | mei_set_clientdata(device, phy); | 164 | mei_cl_set_drvdata(device, phy); |
168 | 165 | ||
169 | r = mei_register_event_cb(device, microread_event_cb, phy); | 166 | r = mei_cl_register_event_cb(device, microread_event_cb, phy); |
170 | if (r) { | 167 | if (r) { |
171 | pr_err(MICROREAD_DRIVER_NAME ": event cb registration failed\n"); | 168 | pr_err(MICROREAD_DRIVER_NAME ": event cb registration failed\n"); |
172 | goto err_out; | 169 | goto err_out; |
@@ -186,9 +183,9 @@ err_out: | |||
186 | return r; | 183 | return r; |
187 | } | 184 | } |
188 | 185 | ||
189 | static int microread_mei_remove(struct mei_device *device) | 186 | static int microread_mei_remove(struct mei_cl_device *device) |
190 | { | 187 | { |
191 | struct microread_mei_phy *phy = mei_get_clientdata(device); | 188 | struct microread_mei_phy *phy = mei_cl_get_drvdata(device); |
192 | 189 | ||
193 | pr_info("Removing microread\n"); | 190 | pr_info("Removing microread\n"); |
194 | 191 | ||
@@ -202,16 +199,15 @@ static int microread_mei_remove(struct mei_device *device) | |||
202 | return 0; | 199 | return 0; |
203 | } | 200 | } |
204 | 201 | ||
205 | static struct mei_id microread_mei_tbl[] = { | 202 | static struct mei_cl_device_id microread_mei_tbl[] = { |
206 | { MICROREAD_DRIVER_NAME, MICROREAD_UUID }, | 203 | { MICROREAD_DRIVER_NAME }, |
207 | 204 | ||
208 | /* required last entry */ | 205 | /* required last entry */ |
209 | { } | 206 | { } |
210 | }; | 207 | }; |
211 | |||
212 | MODULE_DEVICE_TABLE(mei, microread_mei_tbl); | 208 | MODULE_DEVICE_TABLE(mei, microread_mei_tbl); |
213 | 209 | ||
214 | static struct mei_driver microread_driver = { | 210 | static struct mei_cl_driver microread_driver = { |
215 | .id_table = microread_mei_tbl, | 211 | .id_table = microread_mei_tbl, |
216 | .name = MICROREAD_DRIVER_NAME, | 212 | .name = MICROREAD_DRIVER_NAME, |
217 | 213 | ||
@@ -225,7 +221,7 @@ static int microread_mei_init(void) | |||
225 | 221 | ||
226 | pr_debug(DRIVER_DESC ": %s\n", __func__); | 222 | pr_debug(DRIVER_DESC ": %s\n", __func__); |
227 | 223 | ||
228 | r = mei_driver_register(µread_driver); | 224 | r = mei_cl_driver_register(µread_driver); |
229 | if (r) { | 225 | if (r) { |
230 | pr_err(MICROREAD_DRIVER_NAME ": driver registration failed\n"); | 226 | pr_err(MICROREAD_DRIVER_NAME ": driver registration failed\n"); |
231 | return r; | 227 | return r; |
@@ -236,7 +232,7 @@ static int microread_mei_init(void) | |||
236 | 232 | ||
237 | static void microread_mei_exit(void) | 233 | static void microread_mei_exit(void) |
238 | { | 234 | { |
239 | mei_driver_unregister(µread_driver); | 235 | mei_cl_driver_unregister(µread_driver); |
240 | } | 236 | } |
241 | 237 | ||
242 | module_init(microread_mei_init); | 238 | module_init(microread_mei_init); |
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index dee5dddaa292..5147c210df52 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c | |||
@@ -53,14 +53,15 @@ static void pci_acpi_wake_dev(acpi_handle handle, u32 event, void *context) | |||
53 | return; | 53 | return; |
54 | } | 54 | } |
55 | 55 | ||
56 | if (!pci_dev->pm_cap || !pci_dev->pme_support | 56 | /* Clear PME Status if set. */ |
57 | || pci_check_pme_status(pci_dev)) { | 57 | if (pci_dev->pme_support) |
58 | if (pci_dev->pme_poll) | 58 | pci_check_pme_status(pci_dev); |
59 | pci_dev->pme_poll = false; | ||
60 | 59 | ||
61 | pci_wakeup_event(pci_dev); | 60 | if (pci_dev->pme_poll) |
62 | pm_runtime_resume(&pci_dev->dev); | 61 | pci_dev->pme_poll = false; |
63 | } | 62 | |
63 | pci_wakeup_event(pci_dev); | ||
64 | pm_runtime_resume(&pci_dev->dev); | ||
64 | 65 | ||
65 | if (pci_dev->subordinate) | 66 | if (pci_dev->subordinate) |
66 | pci_pme_wakeup_bus(pci_dev->subordinate); | 67 | pci_pme_wakeup_bus(pci_dev->subordinate); |
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 1fa1e482a999..79277fb36c6b 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c | |||
@@ -390,9 +390,10 @@ static void pci_device_shutdown(struct device *dev) | |||
390 | 390 | ||
391 | /* | 391 | /* |
392 | * Turn off Bus Master bit on the device to tell it to not | 392 | * Turn off Bus Master bit on the device to tell it to not |
393 | * continue to do DMA | 393 | * continue to do DMA. Don't touch devices in D3cold or unknown states. |
394 | */ | 394 | */ |
395 | pci_clear_master(pci_dev); | 395 | if (pci_dev->current_state <= PCI_D3hot) |
396 | pci_clear_master(pci_dev); | ||
396 | } | 397 | } |
397 | 398 | ||
398 | #ifdef CONFIG_PM | 399 | #ifdef CONFIG_PM |
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c index 08c243ab034e..ed4d09498337 100644 --- a/drivers/pci/pcie/portdrv_pci.c +++ b/drivers/pci/pcie/portdrv_pci.c | |||
@@ -185,14 +185,6 @@ static const struct dev_pm_ops pcie_portdrv_pm_ops = { | |||
185 | #endif /* !PM */ | 185 | #endif /* !PM */ |
186 | 186 | ||
187 | /* | 187 | /* |
188 | * PCIe port runtime suspend is broken for some chipsets, so use a | ||
189 | * black list to disable runtime PM for these chipsets. | ||
190 | */ | ||
191 | static const struct pci_device_id port_runtime_pm_black_list[] = { | ||
192 | { /* end: all zeroes */ } | ||
193 | }; | ||
194 | |||
195 | /* | ||
196 | * pcie_portdrv_probe - Probe PCI-Express port devices | 188 | * pcie_portdrv_probe - Probe PCI-Express port devices |
197 | * @dev: PCI-Express port device being probed | 189 | * @dev: PCI-Express port device being probed |
198 | * | 190 | * |
@@ -225,16 +217,11 @@ static int pcie_portdrv_probe(struct pci_dev *dev, | |||
225 | * it by default. | 217 | * it by default. |
226 | */ | 218 | */ |
227 | dev->d3cold_allowed = false; | 219 | dev->d3cold_allowed = false; |
228 | if (!pci_match_id(port_runtime_pm_black_list, dev)) | ||
229 | pm_runtime_put_noidle(&dev->dev); | ||
230 | |||
231 | return 0; | 220 | return 0; |
232 | } | 221 | } |
233 | 222 | ||
234 | static void pcie_portdrv_remove(struct pci_dev *dev) | 223 | static void pcie_portdrv_remove(struct pci_dev *dev) |
235 | { | 224 | { |
236 | if (!pci_match_id(port_runtime_pm_black_list, dev)) | ||
237 | pm_runtime_get_noresume(&dev->dev); | ||
238 | pcie_port_device_remove(dev); | 225 | pcie_port_device_remove(dev); |
239 | pci_disable_device(dev); | 226 | pci_disable_device(dev); |
240 | } | 227 | } |
diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c index ab886b7ee327..c5d0a08a8747 100644 --- a/drivers/pci/rom.c +++ b/drivers/pci/rom.c | |||
@@ -118,17 +118,11 @@ void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size) | |||
118 | void __iomem *rom; | 118 | void __iomem *rom; |
119 | 119 | ||
120 | /* | 120 | /* |
121 | * Some devices may provide ROMs via a source other than the BAR | ||
122 | */ | ||
123 | if (pdev->rom && pdev->romlen) { | ||
124 | *size = pdev->romlen; | ||
125 | return phys_to_virt(pdev->rom); | ||
126 | /* | ||
127 | * IORESOURCE_ROM_SHADOW set on x86, x86_64 and IA64 supports legacy | 121 | * IORESOURCE_ROM_SHADOW set on x86, x86_64 and IA64 supports legacy |
128 | * memory map if the VGA enable bit of the Bridge Control register is | 122 | * memory map if the VGA enable bit of the Bridge Control register is |
129 | * set for embedded VGA. | 123 | * set for embedded VGA. |
130 | */ | 124 | */ |
131 | } else if (res->flags & IORESOURCE_ROM_SHADOW) { | 125 | if (res->flags & IORESOURCE_ROM_SHADOW) { |
132 | /* primary video rom always starts here */ | 126 | /* primary video rom always starts here */ |
133 | start = (loff_t)0xC0000; | 127 | start = (loff_t)0xC0000; |
134 | *size = 0x20000; /* cover C000:0 through E000:0 */ | 128 | *size = 0x20000; /* cover C000:0 through E000:0 */ |
@@ -187,8 +181,7 @@ void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom) | |||
187 | if (res->flags & (IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY)) | 181 | if (res->flags & (IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY)) |
188 | return; | 182 | return; |
189 | 183 | ||
190 | if (!pdev->rom || !pdev->romlen) | 184 | iounmap(rom); |
191 | iounmap(rom); | ||
192 | 185 | ||
193 | /* Disable again before continuing, leave enabled if pci=rom */ | 186 | /* Disable again before continuing, leave enabled if pci=rom */ |
194 | if (!(res->flags & (IORESOURCE_ROM_ENABLE | IORESOURCE_ROM_SHADOW))) | 187 | if (!(res->flags & (IORESOURCE_ROM_ENABLE | IORESOURCE_ROM_SHADOW))) |
@@ -212,7 +205,24 @@ void pci_cleanup_rom(struct pci_dev *pdev) | |||
212 | } | 205 | } |
213 | } | 206 | } |
214 | 207 | ||
208 | /** | ||
209 | * pci_platform_rom - provides a pointer to any ROM image provided by the | ||
210 | * platform | ||
211 | * @pdev: pointer to pci device struct | ||
212 | * @size: pointer to receive size of pci window over ROM | ||
213 | */ | ||
214 | void __iomem *pci_platform_rom(struct pci_dev *pdev, size_t *size) | ||
215 | { | ||
216 | if (pdev->rom && pdev->romlen) { | ||
217 | *size = pdev->romlen; | ||
218 | return phys_to_virt((phys_addr_t)pdev->rom); | ||
219 | } | ||
220 | |||
221 | return NULL; | ||
222 | } | ||
223 | |||
215 | EXPORT_SYMBOL(pci_map_rom); | 224 | EXPORT_SYMBOL(pci_map_rom); |
216 | EXPORT_SYMBOL(pci_unmap_rom); | 225 | EXPORT_SYMBOL(pci_unmap_rom); |
217 | EXPORT_SYMBOL_GPL(pci_enable_rom); | 226 | EXPORT_SYMBOL_GPL(pci_enable_rom); |
218 | EXPORT_SYMBOL_GPL(pci_disable_rom); | 227 | EXPORT_SYMBOL_GPL(pci_disable_rom); |
228 | EXPORT_SYMBOL(pci_platform_rom); | ||
diff --git a/drivers/pinctrl/mvebu/pinctrl-mvebu.c b/drivers/pinctrl/mvebu/pinctrl-mvebu.c index c689c04a4f52..2d2f0a43d36b 100644 --- a/drivers/pinctrl/mvebu/pinctrl-mvebu.c +++ b/drivers/pinctrl/mvebu/pinctrl-mvebu.c | |||
@@ -620,7 +620,7 @@ int mvebu_pinctrl_probe(struct platform_device *pdev) | |||
620 | 620 | ||
621 | /* special soc specific control */ | 621 | /* special soc specific control */ |
622 | if (ctrl->mpp_get || ctrl->mpp_set) { | 622 | if (ctrl->mpp_get || ctrl->mpp_set) { |
623 | if (!ctrl->name || !ctrl->mpp_set || !ctrl->mpp_set) { | 623 | if (!ctrl->name || !ctrl->mpp_get || !ctrl->mpp_set) { |
624 | dev_err(&pdev->dev, "wrong soc control info\n"); | 624 | dev_err(&pdev->dev, "wrong soc control info\n"); |
625 | return -EINVAL; | 625 | return -EINVAL; |
626 | } | 626 | } |
diff --git a/drivers/pinctrl/pinconf.c b/drivers/pinctrl/pinconf.c index ac8d382a79bb..d611ecfcbf70 100644 --- a/drivers/pinctrl/pinconf.c +++ b/drivers/pinctrl/pinconf.c | |||
@@ -622,7 +622,7 @@ static const struct file_operations pinconf_dbg_pinname_fops = { | |||
622 | static int pinconf_dbg_state_print(struct seq_file *s, void *d) | 622 | static int pinconf_dbg_state_print(struct seq_file *s, void *d) |
623 | { | 623 | { |
624 | if (strlen(dbg_state_name)) | 624 | if (strlen(dbg_state_name)) |
625 | seq_printf(s, "%s\n", dbg_pinname); | 625 | seq_printf(s, "%s\n", dbg_state_name); |
626 | else | 626 | else |
627 | seq_printf(s, "No pin state set\n"); | 627 | seq_printf(s, "No pin state set\n"); |
628 | return 0; | 628 | return 0; |
diff --git a/drivers/pinctrl/pinconf.h b/drivers/pinctrl/pinconf.h index e3ed8cb072a5..bfda73d64eed 100644 --- a/drivers/pinctrl/pinconf.h +++ b/drivers/pinctrl/pinconf.h | |||
@@ -90,7 +90,7 @@ static inline void pinconf_init_device_debugfs(struct dentry *devroot, | |||
90 | * pin config. | 90 | * pin config. |
91 | */ | 91 | */ |
92 | 92 | ||
93 | #ifdef CONFIG_GENERIC_PINCONF | 93 | #if defined(CONFIG_GENERIC_PINCONF) && defined(CONFIG_DEBUG_FS) |
94 | 94 | ||
95 | void pinconf_generic_dump_pin(struct pinctrl_dev *pctldev, | 95 | void pinconf_generic_dump_pin(struct pinctrl_dev *pctldev, |
96 | struct seq_file *s, unsigned pin); | 96 | struct seq_file *s, unsigned pin); |
diff --git a/drivers/pinctrl/pinctrl-abx500.c b/drivers/pinctrl/pinctrl-abx500.c index caecdd373061..c542a97c82f3 100644 --- a/drivers/pinctrl/pinctrl-abx500.c +++ b/drivers/pinctrl/pinctrl-abx500.c | |||
@@ -422,7 +422,7 @@ static u8 abx500_get_mode(struct pinctrl_dev *pctldev, struct gpio_chip *chip, | |||
422 | } | 422 | } |
423 | 423 | ||
424 | /* check if pin use AlternateFunction register */ | 424 | /* check if pin use AlternateFunction register */ |
425 | if ((af.alt_bit1 == UNUSED) && (af.alt_bit1 == UNUSED)) | 425 | if ((af.alt_bit1 == UNUSED) && (af.alt_bit2 == UNUSED)) |
426 | return mode; | 426 | return mode; |
427 | /* | 427 | /* |
428 | * if pin GPIOSEL bit is set and pin supports alternate function, | 428 | * if pin GPIOSEL bit is set and pin supports alternate function, |
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c index 75933a6aa828..efb7f10e902a 100644 --- a/drivers/pinctrl/pinctrl-at91.c +++ b/drivers/pinctrl/pinctrl-at91.c | |||
@@ -1277,21 +1277,80 @@ static int alt_gpio_irq_type(struct irq_data *d, unsigned type) | |||
1277 | } | 1277 | } |
1278 | 1278 | ||
1279 | #ifdef CONFIG_PM | 1279 | #ifdef CONFIG_PM |
1280 | |||
1281 | static u32 wakeups[MAX_GPIO_BANKS]; | ||
1282 | static u32 backups[MAX_GPIO_BANKS]; | ||
1283 | |||
1280 | static int gpio_irq_set_wake(struct irq_data *d, unsigned state) | 1284 | static int gpio_irq_set_wake(struct irq_data *d, unsigned state) |
1281 | { | 1285 | { |
1282 | struct at91_gpio_chip *at91_gpio = irq_data_get_irq_chip_data(d); | 1286 | struct at91_gpio_chip *at91_gpio = irq_data_get_irq_chip_data(d); |
1283 | unsigned bank = at91_gpio->pioc_idx; | 1287 | unsigned bank = at91_gpio->pioc_idx; |
1288 | unsigned mask = 1 << d->hwirq; | ||
1284 | 1289 | ||
1285 | if (unlikely(bank >= MAX_GPIO_BANKS)) | 1290 | if (unlikely(bank >= MAX_GPIO_BANKS)) |
1286 | return -EINVAL; | 1291 | return -EINVAL; |
1287 | 1292 | ||
1293 | if (state) | ||
1294 | wakeups[bank] |= mask; | ||
1295 | else | ||
1296 | wakeups[bank] &= ~mask; | ||
1297 | |||
1288 | irq_set_irq_wake(at91_gpio->pioc_virq, state); | 1298 | irq_set_irq_wake(at91_gpio->pioc_virq, state); |
1289 | 1299 | ||
1290 | return 0; | 1300 | return 0; |
1291 | } | 1301 | } |
1302 | |||
1303 | void at91_pinctrl_gpio_suspend(void) | ||
1304 | { | ||
1305 | int i; | ||
1306 | |||
1307 | for (i = 0; i < gpio_banks; i++) { | ||
1308 | void __iomem *pio; | ||
1309 | |||
1310 | if (!gpio_chips[i]) | ||
1311 | continue; | ||
1312 | |||
1313 | pio = gpio_chips[i]->regbase; | ||
1314 | |||
1315 | backups[i] = __raw_readl(pio + PIO_IMR); | ||
1316 | __raw_writel(backups[i], pio + PIO_IDR); | ||
1317 | __raw_writel(wakeups[i], pio + PIO_IER); | ||
1318 | |||
1319 | if (!wakeups[i]) { | ||
1320 | clk_unprepare(gpio_chips[i]->clock); | ||
1321 | clk_disable(gpio_chips[i]->clock); | ||
1322 | } else { | ||
1323 | printk(KERN_DEBUG "GPIO-%c may wake for %08x\n", | ||
1324 | 'A'+i, wakeups[i]); | ||
1325 | } | ||
1326 | } | ||
1327 | } | ||
1328 | |||
1329 | void at91_pinctrl_gpio_resume(void) | ||
1330 | { | ||
1331 | int i; | ||
1332 | |||
1333 | for (i = 0; i < gpio_banks; i++) { | ||
1334 | void __iomem *pio; | ||
1335 | |||
1336 | if (!gpio_chips[i]) | ||
1337 | continue; | ||
1338 | |||
1339 | pio = gpio_chips[i]->regbase; | ||
1340 | |||
1341 | if (!wakeups[i]) { | ||
1342 | if (clk_prepare(gpio_chips[i]->clock) == 0) | ||
1343 | clk_enable(gpio_chips[i]->clock); | ||
1344 | } | ||
1345 | |||
1346 | __raw_writel(wakeups[i], pio + PIO_IDR); | ||
1347 | __raw_writel(backups[i], pio + PIO_IER); | ||
1348 | } | ||
1349 | } | ||
1350 | |||
1292 | #else | 1351 | #else |
1293 | #define gpio_irq_set_wake NULL | 1352 | #define gpio_irq_set_wake NULL |
1294 | #endif | 1353 | #endif /* CONFIG_PM */ |
1295 | 1354 | ||
1296 | static struct irq_chip gpio_irqchip = { | 1355 | static struct irq_chip gpio_irqchip = { |
1297 | .name = "GPIO", | 1356 | .name = "GPIO", |
diff --git a/drivers/pinctrl/pinmux.c b/drivers/pinctrl/pinmux.c index 1a00658b3ea0..bd83c8b01cd1 100644 --- a/drivers/pinctrl/pinmux.c +++ b/drivers/pinctrl/pinmux.c | |||
@@ -194,6 +194,11 @@ static const char *pin_free(struct pinctrl_dev *pctldev, int pin, | |||
194 | } | 194 | } |
195 | 195 | ||
196 | if (!gpio_range) { | 196 | if (!gpio_range) { |
197 | /* | ||
198 | * A pin should not be freed more times than allocated. | ||
199 | */ | ||
200 | if (WARN_ON(!desc->mux_usecount)) | ||
201 | return NULL; | ||
197 | desc->mux_usecount--; | 202 | desc->mux_usecount--; |
198 | if (desc->mux_usecount) | 203 | if (desc->mux_usecount) |
199 | return NULL; | 204 | return NULL; |
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig index cc1f7bf53fd0..c6d77e20622c 100644 --- a/drivers/remoteproc/Kconfig +++ b/drivers/remoteproc/Kconfig | |||
@@ -4,7 +4,7 @@ menu "Remoteproc drivers" | |||
4 | config REMOTEPROC | 4 | config REMOTEPROC |
5 | tristate | 5 | tristate |
6 | depends on HAS_DMA | 6 | depends on HAS_DMA |
7 | select FW_CONFIG | 7 | select FW_LOADER |
8 | select VIRTIO | 8 | select VIRTIO |
9 | 9 | ||
10 | config OMAP_REMOTEPROC | 10 | config OMAP_REMOTEPROC |
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c index 29387df4bfc9..8edb4aed5d36 100644 --- a/drivers/remoteproc/remoteproc_core.c +++ b/drivers/remoteproc/remoteproc_core.c | |||
@@ -217,7 +217,7 @@ int rproc_alloc_vring(struct rproc_vdev *rvdev, int i) | |||
217 | * TODO: support predefined notifyids (via resource table) | 217 | * TODO: support predefined notifyids (via resource table) |
218 | */ | 218 | */ |
219 | ret = idr_alloc(&rproc->notifyids, rvring, 0, 0, GFP_KERNEL); | 219 | ret = idr_alloc(&rproc->notifyids, rvring, 0, 0, GFP_KERNEL); |
220 | if (ret) { | 220 | if (ret < 0) { |
221 | dev_err(dev, "idr_alloc failed: %d\n", ret); | 221 | dev_err(dev, "idr_alloc failed: %d\n", ret); |
222 | dma_free_coherent(dev->parent, size, va, dma); | 222 | dma_free_coherent(dev->parent, size, va, dma); |
223 | return ret; | 223 | return ret; |
@@ -366,10 +366,12 @@ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc, | |||
366 | /* it is now safe to add the virtio device */ | 366 | /* it is now safe to add the virtio device */ |
367 | ret = rproc_add_virtio_dev(rvdev, rsc->id); | 367 | ret = rproc_add_virtio_dev(rvdev, rsc->id); |
368 | if (ret) | 368 | if (ret) |
369 | goto free_rvdev; | 369 | goto remove_rvdev; |
370 | 370 | ||
371 | return 0; | 371 | return 0; |
372 | 372 | ||
373 | remove_rvdev: | ||
374 | list_del(&rvdev->node); | ||
373 | free_rvdev: | 375 | free_rvdev: |
374 | kfree(rvdev); | 376 | kfree(rvdev); |
375 | return ret; | 377 | return ret; |
diff --git a/drivers/remoteproc/ste_modem_rproc.c b/drivers/remoteproc/ste_modem_rproc.c index a7743c069339..fb95c4220052 100644 --- a/drivers/remoteproc/ste_modem_rproc.c +++ b/drivers/remoteproc/ste_modem_rproc.c | |||
@@ -240,6 +240,8 @@ static int sproc_drv_remove(struct platform_device *pdev) | |||
240 | 240 | ||
241 | /* Unregister as remoteproc device */ | 241 | /* Unregister as remoteproc device */ |
242 | rproc_del(sproc->rproc); | 242 | rproc_del(sproc->rproc); |
243 | dma_free_coherent(sproc->rproc->dev.parent, SPROC_FW_SIZE, | ||
244 | sproc->fw_addr, sproc->fw_dma_addr); | ||
243 | rproc_put(sproc->rproc); | 245 | rproc_put(sproc->rproc); |
244 | 246 | ||
245 | mdev->drv_data = NULL; | 247 | mdev->drv_data = NULL; |
@@ -297,10 +299,13 @@ static int sproc_probe(struct platform_device *pdev) | |||
297 | /* Register as a remoteproc device */ | 299 | /* Register as a remoteproc device */ |
298 | err = rproc_add(rproc); | 300 | err = rproc_add(rproc); |
299 | if (err) | 301 | if (err) |
300 | goto free_rproc; | 302 | goto free_mem; |
301 | 303 | ||
302 | return 0; | 304 | return 0; |
303 | 305 | ||
306 | free_mem: | ||
307 | dma_free_coherent(rproc->dev.parent, SPROC_FW_SIZE, | ||
308 | sproc->fw_addr, sproc->fw_dma_addr); | ||
304 | free_rproc: | 309 | free_rproc: |
305 | /* Reset device data upon error */ | 310 | /* Reset device data upon error */ |
306 | mdev->drv_data = NULL; | 311 | mdev->drv_data = NULL; |
diff --git a/drivers/rtc/rtc-da9052.c b/drivers/rtc/rtc-da9052.c index 0dde688ca09b..969abbad7fe3 100644 --- a/drivers/rtc/rtc-da9052.c +++ b/drivers/rtc/rtc-da9052.c | |||
@@ -239,11 +239,9 @@ static int da9052_rtc_probe(struct platform_device *pdev) | |||
239 | 239 | ||
240 | rtc->da9052 = dev_get_drvdata(pdev->dev.parent); | 240 | rtc->da9052 = dev_get_drvdata(pdev->dev.parent); |
241 | platform_set_drvdata(pdev, rtc); | 241 | platform_set_drvdata(pdev, rtc); |
242 | rtc->irq = platform_get_irq_byname(pdev, "ALM"); | 242 | rtc->irq = DA9052_IRQ_ALARM; |
243 | ret = devm_request_threaded_irq(&pdev->dev, rtc->irq, NULL, | 243 | ret = da9052_request_irq(rtc->da9052, rtc->irq, "ALM", |
244 | da9052_rtc_irq, | 244 | da9052_rtc_irq, rtc); |
245 | IRQF_TRIGGER_LOW | IRQF_ONESHOT, | ||
246 | "ALM", rtc); | ||
247 | if (ret != 0) { | 245 | if (ret != 0) { |
248 | rtc_err(rtc->da9052, "irq registration failed: %d\n", ret); | 246 | rtc_err(rtc->da9052, "irq registration failed: %d\n", ret); |
249 | return ret; | 247 | return ret; |
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c index 9978ad4433cb..e9b9c8392832 100644 --- a/drivers/s390/block/scm_blk.c +++ b/drivers/s390/block/scm_blk.c | |||
@@ -135,6 +135,11 @@ static const struct block_device_operations scm_blk_devops = { | |||
135 | .release = scm_release, | 135 | .release = scm_release, |
136 | }; | 136 | }; |
137 | 137 | ||
138 | static bool scm_permit_request(struct scm_blk_dev *bdev, struct request *req) | ||
139 | { | ||
140 | return rq_data_dir(req) != WRITE || bdev->state != SCM_WR_PROHIBIT; | ||
141 | } | ||
142 | |||
138 | static void scm_request_prepare(struct scm_request *scmrq) | 143 | static void scm_request_prepare(struct scm_request *scmrq) |
139 | { | 144 | { |
140 | struct scm_blk_dev *bdev = scmrq->bdev; | 145 | struct scm_blk_dev *bdev = scmrq->bdev; |
@@ -195,14 +200,18 @@ void scm_request_requeue(struct scm_request *scmrq) | |||
195 | 200 | ||
196 | scm_release_cluster(scmrq); | 201 | scm_release_cluster(scmrq); |
197 | blk_requeue_request(bdev->rq, scmrq->request); | 202 | blk_requeue_request(bdev->rq, scmrq->request); |
203 | atomic_dec(&bdev->queued_reqs); | ||
198 | scm_request_done(scmrq); | 204 | scm_request_done(scmrq); |
199 | scm_ensure_queue_restart(bdev); | 205 | scm_ensure_queue_restart(bdev); |
200 | } | 206 | } |
201 | 207 | ||
202 | void scm_request_finish(struct scm_request *scmrq) | 208 | void scm_request_finish(struct scm_request *scmrq) |
203 | { | 209 | { |
210 | struct scm_blk_dev *bdev = scmrq->bdev; | ||
211 | |||
204 | scm_release_cluster(scmrq); | 212 | scm_release_cluster(scmrq); |
205 | blk_end_request_all(scmrq->request, scmrq->error); | 213 | blk_end_request_all(scmrq->request, scmrq->error); |
214 | atomic_dec(&bdev->queued_reqs); | ||
206 | scm_request_done(scmrq); | 215 | scm_request_done(scmrq); |
207 | } | 216 | } |
208 | 217 | ||
@@ -218,6 +227,10 @@ static void scm_blk_request(struct request_queue *rq) | |||
218 | if (req->cmd_type != REQ_TYPE_FS) | 227 | if (req->cmd_type != REQ_TYPE_FS) |
219 | continue; | 228 | continue; |
220 | 229 | ||
230 | if (!scm_permit_request(bdev, req)) { | ||
231 | scm_ensure_queue_restart(bdev); | ||
232 | return; | ||
233 | } | ||
221 | scmrq = scm_request_fetch(); | 234 | scmrq = scm_request_fetch(); |
222 | if (!scmrq) { | 235 | if (!scmrq) { |
223 | SCM_LOG(5, "no request"); | 236 | SCM_LOG(5, "no request"); |
@@ -231,11 +244,13 @@ static void scm_blk_request(struct request_queue *rq) | |||
231 | return; | 244 | return; |
232 | } | 245 | } |
233 | if (scm_need_cluster_request(scmrq)) { | 246 | if (scm_need_cluster_request(scmrq)) { |
247 | atomic_inc(&bdev->queued_reqs); | ||
234 | blk_start_request(req); | 248 | blk_start_request(req); |
235 | scm_initiate_cluster_request(scmrq); | 249 | scm_initiate_cluster_request(scmrq); |
236 | return; | 250 | return; |
237 | } | 251 | } |
238 | scm_request_prepare(scmrq); | 252 | scm_request_prepare(scmrq); |
253 | atomic_inc(&bdev->queued_reqs); | ||
239 | blk_start_request(req); | 254 | blk_start_request(req); |
240 | 255 | ||
241 | ret = scm_start_aob(scmrq->aob); | 256 | ret = scm_start_aob(scmrq->aob); |
@@ -244,7 +259,6 @@ static void scm_blk_request(struct request_queue *rq) | |||
244 | scm_request_requeue(scmrq); | 259 | scm_request_requeue(scmrq); |
245 | return; | 260 | return; |
246 | } | 261 | } |
247 | atomic_inc(&bdev->queued_reqs); | ||
248 | } | 262 | } |
249 | } | 263 | } |
250 | 264 | ||
@@ -280,6 +294,38 @@ void scm_blk_irq(struct scm_device *scmdev, void *data, int error) | |||
280 | tasklet_hi_schedule(&bdev->tasklet); | 294 | tasklet_hi_schedule(&bdev->tasklet); |
281 | } | 295 | } |
282 | 296 | ||
297 | static void scm_blk_handle_error(struct scm_request *scmrq) | ||
298 | { | ||
299 | struct scm_blk_dev *bdev = scmrq->bdev; | ||
300 | unsigned long flags; | ||
301 | |||
302 | if (scmrq->error != -EIO) | ||
303 | goto restart; | ||
304 | |||
305 | /* For -EIO the response block is valid. */ | ||
306 | switch (scmrq->aob->response.eqc) { | ||
307 | case EQC_WR_PROHIBIT: | ||
308 | spin_lock_irqsave(&bdev->lock, flags); | ||
309 | if (bdev->state != SCM_WR_PROHIBIT) | ||
310 | pr_info("%lx: Write access to the SCM increment is suspended\n", | ||
311 | (unsigned long) bdev->scmdev->address); | ||
312 | bdev->state = SCM_WR_PROHIBIT; | ||
313 | spin_unlock_irqrestore(&bdev->lock, flags); | ||
314 | goto requeue; | ||
315 | default: | ||
316 | break; | ||
317 | } | ||
318 | |||
319 | restart: | ||
320 | if (!scm_start_aob(scmrq->aob)) | ||
321 | return; | ||
322 | |||
323 | requeue: | ||
324 | spin_lock_irqsave(&bdev->rq_lock, flags); | ||
325 | scm_request_requeue(scmrq); | ||
326 | spin_unlock_irqrestore(&bdev->rq_lock, flags); | ||
327 | } | ||
328 | |||
283 | static void scm_blk_tasklet(struct scm_blk_dev *bdev) | 329 | static void scm_blk_tasklet(struct scm_blk_dev *bdev) |
284 | { | 330 | { |
285 | struct scm_request *scmrq; | 331 | struct scm_request *scmrq; |
@@ -293,11 +339,8 @@ static void scm_blk_tasklet(struct scm_blk_dev *bdev) | |||
293 | spin_unlock_irqrestore(&bdev->lock, flags); | 339 | spin_unlock_irqrestore(&bdev->lock, flags); |
294 | 340 | ||
295 | if (scmrq->error && scmrq->retries-- > 0) { | 341 | if (scmrq->error && scmrq->retries-- > 0) { |
296 | if (scm_start_aob(scmrq->aob)) { | 342 | scm_blk_handle_error(scmrq); |
297 | spin_lock_irqsave(&bdev->rq_lock, flags); | 343 | |
298 | scm_request_requeue(scmrq); | ||
299 | spin_unlock_irqrestore(&bdev->rq_lock, flags); | ||
300 | } | ||
301 | /* Request restarted or requeued, handle next. */ | 344 | /* Request restarted or requeued, handle next. */ |
302 | spin_lock_irqsave(&bdev->lock, flags); | 345 | spin_lock_irqsave(&bdev->lock, flags); |
303 | continue; | 346 | continue; |
@@ -310,7 +353,6 @@ static void scm_blk_tasklet(struct scm_blk_dev *bdev) | |||
310 | } | 353 | } |
311 | 354 | ||
312 | scm_request_finish(scmrq); | 355 | scm_request_finish(scmrq); |
313 | atomic_dec(&bdev->queued_reqs); | ||
314 | spin_lock_irqsave(&bdev->lock, flags); | 356 | spin_lock_irqsave(&bdev->lock, flags); |
315 | } | 357 | } |
316 | spin_unlock_irqrestore(&bdev->lock, flags); | 358 | spin_unlock_irqrestore(&bdev->lock, flags); |
@@ -332,6 +374,7 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev) | |||
332 | } | 374 | } |
333 | 375 | ||
334 | bdev->scmdev = scmdev; | 376 | bdev->scmdev = scmdev; |
377 | bdev->state = SCM_OPER; | ||
335 | spin_lock_init(&bdev->rq_lock); | 378 | spin_lock_init(&bdev->rq_lock); |
336 | spin_lock_init(&bdev->lock); | 379 | spin_lock_init(&bdev->lock); |
337 | INIT_LIST_HEAD(&bdev->finished_requests); | 380 | INIT_LIST_HEAD(&bdev->finished_requests); |
@@ -396,6 +439,18 @@ void scm_blk_dev_cleanup(struct scm_blk_dev *bdev) | |||
396 | put_disk(bdev->gendisk); | 439 | put_disk(bdev->gendisk); |
397 | } | 440 | } |
398 | 441 | ||
442 | void scm_blk_set_available(struct scm_blk_dev *bdev) | ||
443 | { | ||
444 | unsigned long flags; | ||
445 | |||
446 | spin_lock_irqsave(&bdev->lock, flags); | ||
447 | if (bdev->state == SCM_WR_PROHIBIT) | ||
448 | pr_info("%lx: Write access to the SCM increment is restored\n", | ||
449 | (unsigned long) bdev->scmdev->address); | ||
450 | bdev->state = SCM_OPER; | ||
451 | spin_unlock_irqrestore(&bdev->lock, flags); | ||
452 | } | ||
453 | |||
399 | static int __init scm_blk_init(void) | 454 | static int __init scm_blk_init(void) |
400 | { | 455 | { |
401 | int ret = -EINVAL; | 456 | int ret = -EINVAL; |
@@ -408,12 +463,15 @@ static int __init scm_blk_init(void) | |||
408 | goto out; | 463 | goto out; |
409 | 464 | ||
410 | scm_major = ret; | 465 | scm_major = ret; |
411 | if (scm_alloc_rqs(nr_requests)) | 466 | ret = scm_alloc_rqs(nr_requests); |
467 | if (ret) | ||
412 | goto out_unreg; | 468 | goto out_unreg; |
413 | 469 | ||
414 | scm_debug = debug_register("scm_log", 16, 1, 16); | 470 | scm_debug = debug_register("scm_log", 16, 1, 16); |
415 | if (!scm_debug) | 471 | if (!scm_debug) { |
472 | ret = -ENOMEM; | ||
416 | goto out_free; | 473 | goto out_free; |
474 | } | ||
417 | 475 | ||
418 | debug_register_view(scm_debug, &debug_hex_ascii_view); | 476 | debug_register_view(scm_debug, &debug_hex_ascii_view); |
419 | debug_set_level(scm_debug, 2); | 477 | debug_set_level(scm_debug, 2); |
diff --git a/drivers/s390/block/scm_blk.h b/drivers/s390/block/scm_blk.h index 3c1ccf494647..8b387b32fd62 100644 --- a/drivers/s390/block/scm_blk.h +++ b/drivers/s390/block/scm_blk.h | |||
@@ -21,6 +21,7 @@ struct scm_blk_dev { | |||
21 | spinlock_t rq_lock; /* guard the request queue */ | 21 | spinlock_t rq_lock; /* guard the request queue */ |
22 | spinlock_t lock; /* guard the rest of the blockdev */ | 22 | spinlock_t lock; /* guard the rest of the blockdev */ |
23 | atomic_t queued_reqs; | 23 | atomic_t queued_reqs; |
24 | enum {SCM_OPER, SCM_WR_PROHIBIT} state; | ||
24 | struct list_head finished_requests; | 25 | struct list_head finished_requests; |
25 | #ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE | 26 | #ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE |
26 | struct list_head cluster_list; | 27 | struct list_head cluster_list; |
@@ -48,6 +49,7 @@ struct scm_request { | |||
48 | 49 | ||
49 | int scm_blk_dev_setup(struct scm_blk_dev *, struct scm_device *); | 50 | int scm_blk_dev_setup(struct scm_blk_dev *, struct scm_device *); |
50 | void scm_blk_dev_cleanup(struct scm_blk_dev *); | 51 | void scm_blk_dev_cleanup(struct scm_blk_dev *); |
52 | void scm_blk_set_available(struct scm_blk_dev *); | ||
51 | void scm_blk_irq(struct scm_device *, void *, int); | 53 | void scm_blk_irq(struct scm_device *, void *, int); |
52 | 54 | ||
53 | void scm_request_finish(struct scm_request *); | 55 | void scm_request_finish(struct scm_request *); |
diff --git a/drivers/s390/block/scm_drv.c b/drivers/s390/block/scm_drv.c index 9fa0a908607b..c98cf52d78d1 100644 --- a/drivers/s390/block/scm_drv.c +++ b/drivers/s390/block/scm_drv.c | |||
@@ -13,12 +13,23 @@ | |||
13 | #include <asm/eadm.h> | 13 | #include <asm/eadm.h> |
14 | #include "scm_blk.h" | 14 | #include "scm_blk.h" |
15 | 15 | ||
16 | static void notify(struct scm_device *scmdev) | 16 | static void scm_notify(struct scm_device *scmdev, enum scm_event event) |
17 | { | 17 | { |
18 | pr_info("%lu: The capabilities of the SCM increment changed\n", | 18 | struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev); |
19 | (unsigned long) scmdev->address); | 19 | |
20 | SCM_LOG(2, "State changed"); | 20 | switch (event) { |
21 | SCM_LOG_STATE(2, scmdev); | 21 | case SCM_CHANGE: |
22 | pr_info("%lx: The capabilities of the SCM increment changed\n", | ||
23 | (unsigned long) scmdev->address); | ||
24 | SCM_LOG(2, "State changed"); | ||
25 | SCM_LOG_STATE(2, scmdev); | ||
26 | break; | ||
27 | case SCM_AVAIL: | ||
28 | SCM_LOG(2, "Increment available"); | ||
29 | SCM_LOG_STATE(2, scmdev); | ||
30 | scm_blk_set_available(bdev); | ||
31 | break; | ||
32 | } | ||
22 | } | 33 | } |
23 | 34 | ||
24 | static int scm_probe(struct scm_device *scmdev) | 35 | static int scm_probe(struct scm_device *scmdev) |
@@ -64,7 +75,7 @@ static struct scm_driver scm_drv = { | |||
64 | .name = "scm_block", | 75 | .name = "scm_block", |
65 | .owner = THIS_MODULE, | 76 | .owner = THIS_MODULE, |
66 | }, | 77 | }, |
67 | .notify = notify, | 78 | .notify = scm_notify, |
68 | .probe = scm_probe, | 79 | .probe = scm_probe, |
69 | .remove = scm_remove, | 80 | .remove = scm_remove, |
70 | .handler = scm_blk_irq, | 81 | .handler = scm_blk_irq, |
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c index 30a2255389e5..cd798386b622 100644 --- a/drivers/s390/char/sclp_cmd.c +++ b/drivers/s390/char/sclp_cmd.c | |||
@@ -627,6 +627,8 @@ static int __init sclp_detect_standby_memory(void) | |||
627 | struct read_storage_sccb *sccb; | 627 | struct read_storage_sccb *sccb; |
628 | int i, id, assigned, rc; | 628 | int i, id, assigned, rc; |
629 | 629 | ||
630 | if (OLDMEM_BASE) /* No standby memory in kdump mode */ | ||
631 | return 0; | ||
630 | if (!early_read_info_sccb_valid) | 632 | if (!early_read_info_sccb_valid) |
631 | return 0; | 633 | return 0; |
632 | if ((sclp_facilities & 0xe00000000000ULL) != 0xe00000000000ULL) | 634 | if ((sclp_facilities & 0xe00000000000ULL) != 0xe00000000000ULL) |
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c index b907dba24025..cee69dac3e18 100644 --- a/drivers/s390/char/tty3270.c +++ b/drivers/s390/char/tty3270.c | |||
@@ -915,7 +915,7 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty) | |||
915 | int i, rc; | 915 | int i, rc; |
916 | 916 | ||
917 | /* Check if the tty3270 is already there. */ | 917 | /* Check if the tty3270 is already there. */ |
918 | view = raw3270_find_view(&tty3270_fn, tty->index); | 918 | view = raw3270_find_view(&tty3270_fn, tty->index + RAW3270_FIRSTMINOR); |
919 | if (!IS_ERR(view)) { | 919 | if (!IS_ERR(view)) { |
920 | tp = container_of(view, struct tty3270, view); | 920 | tp = container_of(view, struct tty3270, view); |
921 | tty->driver_data = tp; | 921 | tty->driver_data = tp; |
@@ -927,15 +927,16 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty) | |||
927 | tp->inattr = TF_INPUT; | 927 | tp->inattr = TF_INPUT; |
928 | return tty_port_install(&tp->port, driver, tty); | 928 | return tty_port_install(&tp->port, driver, tty); |
929 | } | 929 | } |
930 | if (tty3270_max_index < tty->index) | 930 | if (tty3270_max_index < tty->index + 1) |
931 | tty3270_max_index = tty->index; | 931 | tty3270_max_index = tty->index + 1; |
932 | 932 | ||
933 | /* Allocate tty3270 structure on first open. */ | 933 | /* Allocate tty3270 structure on first open. */ |
934 | tp = tty3270_alloc_view(); | 934 | tp = tty3270_alloc_view(); |
935 | if (IS_ERR(tp)) | 935 | if (IS_ERR(tp)) |
936 | return PTR_ERR(tp); | 936 | return PTR_ERR(tp); |
937 | 937 | ||
938 | rc = raw3270_add_view(&tp->view, &tty3270_fn, tty->index); | 938 | rc = raw3270_add_view(&tp->view, &tty3270_fn, |
939 | tty->index + RAW3270_FIRSTMINOR); | ||
939 | if (rc) { | 940 | if (rc) { |
940 | tty3270_free_view(tp); | 941 | tty3270_free_view(tp); |
941 | return rc; | 942 | return rc; |
@@ -1846,12 +1847,12 @@ static const struct tty_operations tty3270_ops = { | |||
1846 | 1847 | ||
1847 | void tty3270_create_cb(int minor) | 1848 | void tty3270_create_cb(int minor) |
1848 | { | 1849 | { |
1849 | tty_register_device(tty3270_driver, minor, NULL); | 1850 | tty_register_device(tty3270_driver, minor - RAW3270_FIRSTMINOR, NULL); |
1850 | } | 1851 | } |
1851 | 1852 | ||
1852 | void tty3270_destroy_cb(int minor) | 1853 | void tty3270_destroy_cb(int minor) |
1853 | { | 1854 | { |
1854 | tty_unregister_device(tty3270_driver, minor); | 1855 | tty_unregister_device(tty3270_driver, minor - RAW3270_FIRSTMINOR); |
1855 | } | 1856 | } |
1856 | 1857 | ||
1857 | struct raw3270_notifier tty3270_notifier = | 1858 | struct raw3270_notifier tty3270_notifier = |
@@ -1884,7 +1885,8 @@ static int __init tty3270_init(void) | |||
1884 | driver->driver_name = "tty3270"; | 1885 | driver->driver_name = "tty3270"; |
1885 | driver->name = "3270/tty"; | 1886 | driver->name = "3270/tty"; |
1886 | driver->major = IBM_TTY3270_MAJOR; | 1887 | driver->major = IBM_TTY3270_MAJOR; |
1887 | driver->minor_start = 0; | 1888 | driver->minor_start = RAW3270_FIRSTMINOR; |
1889 | driver->name_base = RAW3270_FIRSTMINOR; | ||
1888 | driver->type = TTY_DRIVER_TYPE_SYSTEM; | 1890 | driver->type = TTY_DRIVER_TYPE_SYSTEM; |
1889 | driver->subtype = SYSTEM_TYPE_TTY; | 1891 | driver->subtype = SYSTEM_TYPE_TTY; |
1890 | driver->init_termios = tty_std_termios; | 1892 | driver->init_termios = tty_std_termios; |
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index 31ceef1beb8b..e16c553f6556 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c | |||
@@ -433,6 +433,20 @@ static void chsc_process_sei_scm_change(struct chsc_sei_nt0_area *sei_area) | |||
433 | " failed (rc=%d).\n", ret); | 433 | " failed (rc=%d).\n", ret); |
434 | } | 434 | } |
435 | 435 | ||
436 | static void chsc_process_sei_scm_avail(struct chsc_sei_nt0_area *sei_area) | ||
437 | { | ||
438 | int ret; | ||
439 | |||
440 | CIO_CRW_EVENT(4, "chsc: scm available information\n"); | ||
441 | if (sei_area->rs != 7) | ||
442 | return; | ||
443 | |||
444 | ret = scm_process_availability_information(); | ||
445 | if (ret) | ||
446 | CIO_CRW_EVENT(0, "chsc: process availability information" | ||
447 | " failed (rc=%d).\n", ret); | ||
448 | } | ||
449 | |||
436 | static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area) | 450 | static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area) |
437 | { | 451 | { |
438 | switch (sei_area->cc) { | 452 | switch (sei_area->cc) { |
@@ -468,6 +482,9 @@ static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area) | |||
468 | case 12: /* scm change notification */ | 482 | case 12: /* scm change notification */ |
469 | chsc_process_sei_scm_change(sei_area); | 483 | chsc_process_sei_scm_change(sei_area); |
470 | break; | 484 | break; |
485 | case 14: /* scm available notification */ | ||
486 | chsc_process_sei_scm_avail(sei_area); | ||
487 | break; | ||
471 | default: /* other stuff */ | 488 | default: /* other stuff */ |
472 | CIO_CRW_EVENT(2, "chsc: sei nt0 unhandled cc=%d\n", | 489 | CIO_CRW_EVENT(2, "chsc: sei nt0 unhandled cc=%d\n", |
473 | sei_area->cc); | 490 | sei_area->cc); |
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h index 227e05f674b3..349d5fc47196 100644 --- a/drivers/s390/cio/chsc.h +++ b/drivers/s390/cio/chsc.h | |||
@@ -156,8 +156,10 @@ int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token); | |||
156 | 156 | ||
157 | #ifdef CONFIG_SCM_BUS | 157 | #ifdef CONFIG_SCM_BUS |
158 | int scm_update_information(void); | 158 | int scm_update_information(void); |
159 | int scm_process_availability_information(void); | ||
159 | #else /* CONFIG_SCM_BUS */ | 160 | #else /* CONFIG_SCM_BUS */ |
160 | static inline int scm_update_information(void) { return 0; } | 161 | static inline int scm_update_information(void) { return 0; } |
162 | static inline int scm_process_availability_information(void) { return 0; } | ||
161 | #endif /* CONFIG_SCM_BUS */ | 163 | #endif /* CONFIG_SCM_BUS */ |
162 | 164 | ||
163 | 165 | ||
diff --git a/drivers/s390/cio/scm.c b/drivers/s390/cio/scm.c index bcf20f3aa51b..46ec25632e8b 100644 --- a/drivers/s390/cio/scm.c +++ b/drivers/s390/cio/scm.c | |||
@@ -211,7 +211,7 @@ static void scmdev_update(struct scm_device *scmdev, struct sale *sale) | |||
211 | goto out; | 211 | goto out; |
212 | scmdrv = to_scm_drv(scmdev->dev.driver); | 212 | scmdrv = to_scm_drv(scmdev->dev.driver); |
213 | if (changed && scmdrv->notify) | 213 | if (changed && scmdrv->notify) |
214 | scmdrv->notify(scmdev); | 214 | scmdrv->notify(scmdev, SCM_CHANGE); |
215 | out: | 215 | out: |
216 | device_unlock(&scmdev->dev); | 216 | device_unlock(&scmdev->dev); |
217 | if (changed) | 217 | if (changed) |
@@ -297,6 +297,22 @@ int scm_update_information(void) | |||
297 | return ret; | 297 | return ret; |
298 | } | 298 | } |
299 | 299 | ||
300 | static int scm_dev_avail(struct device *dev, void *unused) | ||
301 | { | ||
302 | struct scm_driver *scmdrv = to_scm_drv(dev->driver); | ||
303 | struct scm_device *scmdev = to_scm_dev(dev); | ||
304 | |||
305 | if (dev->driver && scmdrv->notify) | ||
306 | scmdrv->notify(scmdev, SCM_AVAIL); | ||
307 | |||
308 | return 0; | ||
309 | } | ||
310 | |||
311 | int scm_process_availability_information(void) | ||
312 | { | ||
313 | return bus_for_each_dev(&scm_bus_type, NULL, NULL, scm_dev_avail); | ||
314 | } | ||
315 | |||
300 | static int __init scm_init(void) | 316 | static int __init scm_init(void) |
301 | { | 317 | { |
302 | int ret; | 318 | int ret; |
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index d87961d4c0de..6ccb7457746b 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h | |||
@@ -769,6 +769,7 @@ struct qeth_card { | |||
769 | unsigned long thread_start_mask; | 769 | unsigned long thread_start_mask; |
770 | unsigned long thread_allowed_mask; | 770 | unsigned long thread_allowed_mask; |
771 | unsigned long thread_running_mask; | 771 | unsigned long thread_running_mask; |
772 | struct task_struct *recovery_task; | ||
772 | spinlock_t ip_lock; | 773 | spinlock_t ip_lock; |
773 | struct list_head ip_list; | 774 | struct list_head ip_list; |
774 | struct list_head *ip_tbd_list; | 775 | struct list_head *ip_tbd_list; |
@@ -862,6 +863,8 @@ extern struct qeth_card_list_struct qeth_core_card_list; | |||
862 | extern struct kmem_cache *qeth_core_header_cache; | 863 | extern struct kmem_cache *qeth_core_header_cache; |
863 | extern struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS]; | 864 | extern struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS]; |
864 | 865 | ||
866 | void qeth_set_recovery_task(struct qeth_card *); | ||
867 | void qeth_clear_recovery_task(struct qeth_card *); | ||
865 | void qeth_set_allowed_threads(struct qeth_card *, unsigned long , int); | 868 | void qeth_set_allowed_threads(struct qeth_card *, unsigned long , int); |
866 | int qeth_threads_running(struct qeth_card *, unsigned long); | 869 | int qeth_threads_running(struct qeth_card *, unsigned long); |
867 | int qeth_wait_for_threads(struct qeth_card *, unsigned long); | 870 | int qeth_wait_for_threads(struct qeth_card *, unsigned long); |
@@ -916,6 +919,7 @@ int qeth_send_control_data(struct qeth_card *, int, struct qeth_cmd_buffer *, | |||
916 | void *reply_param); | 919 | void *reply_param); |
917 | int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int); | 920 | int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int); |
918 | int qeth_get_elements_no(struct qeth_card *, void *, struct sk_buff *, int); | 921 | int qeth_get_elements_no(struct qeth_card *, void *, struct sk_buff *, int); |
922 | int qeth_get_elements_for_frags(struct sk_buff *); | ||
919 | int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *, | 923 | int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *, |
920 | struct sk_buff *, struct qeth_hdr *, int, int, int); | 924 | struct sk_buff *, struct qeth_hdr *, int, int, int); |
921 | int qeth_do_send_packet(struct qeth_card *, struct qeth_qdio_out_q *, | 925 | int qeth_do_send_packet(struct qeth_card *, struct qeth_qdio_out_q *, |
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 0d8cdff81813..451f92020599 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c | |||
@@ -177,6 +177,23 @@ const char *qeth_get_cardname_short(struct qeth_card *card) | |||
177 | return "n/a"; | 177 | return "n/a"; |
178 | } | 178 | } |
179 | 179 | ||
180 | void qeth_set_recovery_task(struct qeth_card *card) | ||
181 | { | ||
182 | card->recovery_task = current; | ||
183 | } | ||
184 | EXPORT_SYMBOL_GPL(qeth_set_recovery_task); | ||
185 | |||
186 | void qeth_clear_recovery_task(struct qeth_card *card) | ||
187 | { | ||
188 | card->recovery_task = NULL; | ||
189 | } | ||
190 | EXPORT_SYMBOL_GPL(qeth_clear_recovery_task); | ||
191 | |||
192 | static bool qeth_is_recovery_task(const struct qeth_card *card) | ||
193 | { | ||
194 | return card->recovery_task == current; | ||
195 | } | ||
196 | |||
180 | void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads, | 197 | void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads, |
181 | int clear_start_mask) | 198 | int clear_start_mask) |
182 | { | 199 | { |
@@ -205,6 +222,8 @@ EXPORT_SYMBOL_GPL(qeth_threads_running); | |||
205 | 222 | ||
206 | int qeth_wait_for_threads(struct qeth_card *card, unsigned long threads) | 223 | int qeth_wait_for_threads(struct qeth_card *card, unsigned long threads) |
207 | { | 224 | { |
225 | if (qeth_is_recovery_task(card)) | ||
226 | return 0; | ||
208 | return wait_event_interruptible(card->wait_q, | 227 | return wait_event_interruptible(card->wait_q, |
209 | qeth_threads_running(card, threads) == 0); | 228 | qeth_threads_running(card, threads) == 0); |
210 | } | 229 | } |
@@ -3679,6 +3698,25 @@ int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb, | |||
3679 | } | 3698 | } |
3680 | EXPORT_SYMBOL_GPL(qeth_get_priority_queue); | 3699 | EXPORT_SYMBOL_GPL(qeth_get_priority_queue); |
3681 | 3700 | ||
3701 | int qeth_get_elements_for_frags(struct sk_buff *skb) | ||
3702 | { | ||
3703 | int cnt, length, e, elements = 0; | ||
3704 | struct skb_frag_struct *frag; | ||
3705 | char *data; | ||
3706 | |||
3707 | for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) { | ||
3708 | frag = &skb_shinfo(skb)->frags[cnt]; | ||
3709 | data = (char *)page_to_phys(skb_frag_page(frag)) + | ||
3710 | frag->page_offset; | ||
3711 | length = frag->size; | ||
3712 | e = PFN_UP((unsigned long)data + length - 1) - | ||
3713 | PFN_DOWN((unsigned long)data); | ||
3714 | elements += e; | ||
3715 | } | ||
3716 | return elements; | ||
3717 | } | ||
3718 | EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags); | ||
3719 | |||
3682 | int qeth_get_elements_no(struct qeth_card *card, void *hdr, | 3720 | int qeth_get_elements_no(struct qeth_card *card, void *hdr, |
3683 | struct sk_buff *skb, int elems) | 3721 | struct sk_buff *skb, int elems) |
3684 | { | 3722 | { |
@@ -3686,7 +3724,8 @@ int qeth_get_elements_no(struct qeth_card *card, void *hdr, | |||
3686 | int elements_needed = PFN_UP((unsigned long)skb->data + dlen - 1) - | 3724 | int elements_needed = PFN_UP((unsigned long)skb->data + dlen - 1) - |
3687 | PFN_DOWN((unsigned long)skb->data); | 3725 | PFN_DOWN((unsigned long)skb->data); |
3688 | 3726 | ||
3689 | elements_needed += skb_shinfo(skb)->nr_frags; | 3727 | elements_needed += qeth_get_elements_for_frags(skb); |
3728 | |||
3690 | if ((elements_needed + elems) > QETH_MAX_BUFFER_ELEMENTS(card)) { | 3729 | if ((elements_needed + elems) > QETH_MAX_BUFFER_ELEMENTS(card)) { |
3691 | QETH_DBF_MESSAGE(2, "Invalid size of IP packet " | 3730 | QETH_DBF_MESSAGE(2, "Invalid size of IP packet " |
3692 | "(Number=%d / Length=%d). Discarded.\n", | 3731 | "(Number=%d / Length=%d). Discarded.\n", |
@@ -3771,12 +3810,23 @@ static inline void __qeth_fill_buffer(struct sk_buff *skb, | |||
3771 | 3810 | ||
3772 | for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) { | 3811 | for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) { |
3773 | frag = &skb_shinfo(skb)->frags[cnt]; | 3812 | frag = &skb_shinfo(skb)->frags[cnt]; |
3774 | buffer->element[element].addr = (char *) | 3813 | data = (char *)page_to_phys(skb_frag_page(frag)) + |
3775 | page_to_phys(skb_frag_page(frag)) | 3814 | frag->page_offset; |
3776 | + frag->page_offset; | 3815 | length = frag->size; |
3777 | buffer->element[element].length = frag->size; | 3816 | while (length > 0) { |
3778 | buffer->element[element].eflags = SBAL_EFLAGS_MIDDLE_FRAG; | 3817 | length_here = PAGE_SIZE - |
3779 | element++; | 3818 | ((unsigned long) data % PAGE_SIZE); |
3819 | if (length < length_here) | ||
3820 | length_here = length; | ||
3821 | |||
3822 | buffer->element[element].addr = data; | ||
3823 | buffer->element[element].length = length_here; | ||
3824 | buffer->element[element].eflags = | ||
3825 | SBAL_EFLAGS_MIDDLE_FRAG; | ||
3826 | length -= length_here; | ||
3827 | data += length_here; | ||
3828 | element++; | ||
3829 | } | ||
3780 | } | 3830 | } |
3781 | 3831 | ||
3782 | if (buffer->element[element - 1].eflags) | 3832 | if (buffer->element[element - 1].eflags) |
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index d690166efeaf..155b101bd730 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c | |||
@@ -1143,6 +1143,7 @@ static int qeth_l2_recover(void *ptr) | |||
1143 | QETH_CARD_TEXT(card, 2, "recover2"); | 1143 | QETH_CARD_TEXT(card, 2, "recover2"); |
1144 | dev_warn(&card->gdev->dev, | 1144 | dev_warn(&card->gdev->dev, |
1145 | "A recovery process has been started for the device\n"); | 1145 | "A recovery process has been started for the device\n"); |
1146 | qeth_set_recovery_task(card); | ||
1146 | __qeth_l2_set_offline(card->gdev, 1); | 1147 | __qeth_l2_set_offline(card->gdev, 1); |
1147 | rc = __qeth_l2_set_online(card->gdev, 1); | 1148 | rc = __qeth_l2_set_online(card->gdev, 1); |
1148 | if (!rc) | 1149 | if (!rc) |
@@ -1153,6 +1154,7 @@ static int qeth_l2_recover(void *ptr) | |||
1153 | dev_warn(&card->gdev->dev, "The qeth device driver " | 1154 | dev_warn(&card->gdev->dev, "The qeth device driver " |
1154 | "failed to recover an error on the device\n"); | 1155 | "failed to recover an error on the device\n"); |
1155 | } | 1156 | } |
1157 | qeth_clear_recovery_task(card); | ||
1156 | qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); | 1158 | qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); |
1157 | qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); | 1159 | qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); |
1158 | return 0; | 1160 | return 0; |
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 091ca0efa1c5..1f7edf1b26c3 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c | |||
@@ -623,7 +623,7 @@ static int qeth_l3_send_setrouting(struct qeth_card *card, | |||
623 | return rc; | 623 | return rc; |
624 | } | 624 | } |
625 | 625 | ||
626 | static void qeth_l3_correct_routing_type(struct qeth_card *card, | 626 | static int qeth_l3_correct_routing_type(struct qeth_card *card, |
627 | enum qeth_routing_types *type, enum qeth_prot_versions prot) | 627 | enum qeth_routing_types *type, enum qeth_prot_versions prot) |
628 | { | 628 | { |
629 | if (card->info.type == QETH_CARD_TYPE_IQD) { | 629 | if (card->info.type == QETH_CARD_TYPE_IQD) { |
@@ -632,7 +632,7 @@ static void qeth_l3_correct_routing_type(struct qeth_card *card, | |||
632 | case PRIMARY_CONNECTOR: | 632 | case PRIMARY_CONNECTOR: |
633 | case SECONDARY_CONNECTOR: | 633 | case SECONDARY_CONNECTOR: |
634 | case MULTICAST_ROUTER: | 634 | case MULTICAST_ROUTER: |
635 | return; | 635 | return 0; |
636 | default: | 636 | default: |
637 | goto out_inval; | 637 | goto out_inval; |
638 | } | 638 | } |
@@ -641,17 +641,18 @@ static void qeth_l3_correct_routing_type(struct qeth_card *card, | |||
641 | case NO_ROUTER: | 641 | case NO_ROUTER: |
642 | case PRIMARY_ROUTER: | 642 | case PRIMARY_ROUTER: |
643 | case SECONDARY_ROUTER: | 643 | case SECONDARY_ROUTER: |
644 | return; | 644 | return 0; |
645 | case MULTICAST_ROUTER: | 645 | case MULTICAST_ROUTER: |
646 | if (qeth_is_ipafunc_supported(card, prot, | 646 | if (qeth_is_ipafunc_supported(card, prot, |
647 | IPA_OSA_MC_ROUTER)) | 647 | IPA_OSA_MC_ROUTER)) |
648 | return; | 648 | return 0; |
649 | default: | 649 | default: |
650 | goto out_inval; | 650 | goto out_inval; |
651 | } | 651 | } |
652 | } | 652 | } |
653 | out_inval: | 653 | out_inval: |
654 | *type = NO_ROUTER; | 654 | *type = NO_ROUTER; |
655 | return -EINVAL; | ||
655 | } | 656 | } |
656 | 657 | ||
657 | int qeth_l3_setrouting_v4(struct qeth_card *card) | 658 | int qeth_l3_setrouting_v4(struct qeth_card *card) |
@@ -660,8 +661,10 @@ int qeth_l3_setrouting_v4(struct qeth_card *card) | |||
660 | 661 | ||
661 | QETH_CARD_TEXT(card, 3, "setrtg4"); | 662 | QETH_CARD_TEXT(card, 3, "setrtg4"); |
662 | 663 | ||
663 | qeth_l3_correct_routing_type(card, &card->options.route4.type, | 664 | rc = qeth_l3_correct_routing_type(card, &card->options.route4.type, |
664 | QETH_PROT_IPV4); | 665 | QETH_PROT_IPV4); |
666 | if (rc) | ||
667 | return rc; | ||
665 | 668 | ||
666 | rc = qeth_l3_send_setrouting(card, card->options.route4.type, | 669 | rc = qeth_l3_send_setrouting(card, card->options.route4.type, |
667 | QETH_PROT_IPV4); | 670 | QETH_PROT_IPV4); |
@@ -683,8 +686,10 @@ int qeth_l3_setrouting_v6(struct qeth_card *card) | |||
683 | 686 | ||
684 | if (!qeth_is_supported(card, IPA_IPV6)) | 687 | if (!qeth_is_supported(card, IPA_IPV6)) |
685 | return 0; | 688 | return 0; |
686 | qeth_l3_correct_routing_type(card, &card->options.route6.type, | 689 | rc = qeth_l3_correct_routing_type(card, &card->options.route6.type, |
687 | QETH_PROT_IPV6); | 690 | QETH_PROT_IPV6); |
691 | if (rc) | ||
692 | return rc; | ||
688 | 693 | ||
689 | rc = qeth_l3_send_setrouting(card, card->options.route6.type, | 694 | rc = qeth_l3_send_setrouting(card, card->options.route6.type, |
690 | QETH_PROT_IPV6); | 695 | QETH_PROT_IPV6); |
@@ -2898,7 +2903,9 @@ static inline int qeth_l3_tso_elements(struct sk_buff *skb) | |||
2898 | tcp_hdr(skb)->doff * 4; | 2903 | tcp_hdr(skb)->doff * 4; |
2899 | int tcpd_len = skb->len - (tcpd - (unsigned long)skb->data); | 2904 | int tcpd_len = skb->len - (tcpd - (unsigned long)skb->data); |
2900 | int elements = PFN_UP(tcpd + tcpd_len - 1) - PFN_DOWN(tcpd); | 2905 | int elements = PFN_UP(tcpd + tcpd_len - 1) - PFN_DOWN(tcpd); |
2901 | elements += skb_shinfo(skb)->nr_frags; | 2906 | |
2907 | elements += qeth_get_elements_for_frags(skb); | ||
2908 | |||
2902 | return elements; | 2909 | return elements; |
2903 | } | 2910 | } |
2904 | 2911 | ||
@@ -3348,7 +3355,6 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) | |||
3348 | rc = -ENODEV; | 3355 | rc = -ENODEV; |
3349 | goto out_remove; | 3356 | goto out_remove; |
3350 | } | 3357 | } |
3351 | qeth_trace_features(card); | ||
3352 | 3358 | ||
3353 | if (!card->dev && qeth_l3_setup_netdev(card)) { | 3359 | if (!card->dev && qeth_l3_setup_netdev(card)) { |
3354 | rc = -ENODEV; | 3360 | rc = -ENODEV; |
@@ -3425,6 +3431,7 @@ contin: | |||
3425 | qeth_l3_set_multicast_list(card->dev); | 3431 | qeth_l3_set_multicast_list(card->dev); |
3426 | rtnl_unlock(); | 3432 | rtnl_unlock(); |
3427 | } | 3433 | } |
3434 | qeth_trace_features(card); | ||
3428 | /* let user_space know that device is online */ | 3435 | /* let user_space know that device is online */ |
3429 | kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); | 3436 | kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); |
3430 | mutex_unlock(&card->conf_mutex); | 3437 | mutex_unlock(&card->conf_mutex); |
@@ -3508,6 +3515,7 @@ static int qeth_l3_recover(void *ptr) | |||
3508 | QETH_CARD_TEXT(card, 2, "recover2"); | 3515 | QETH_CARD_TEXT(card, 2, "recover2"); |
3509 | dev_warn(&card->gdev->dev, | 3516 | dev_warn(&card->gdev->dev, |
3510 | "A recovery process has been started for the device\n"); | 3517 | "A recovery process has been started for the device\n"); |
3518 | qeth_set_recovery_task(card); | ||
3511 | __qeth_l3_set_offline(card->gdev, 1); | 3519 | __qeth_l3_set_offline(card->gdev, 1); |
3512 | rc = __qeth_l3_set_online(card->gdev, 1); | 3520 | rc = __qeth_l3_set_online(card->gdev, 1); |
3513 | if (!rc) | 3521 | if (!rc) |
@@ -3518,6 +3526,7 @@ static int qeth_l3_recover(void *ptr) | |||
3518 | dev_warn(&card->gdev->dev, "The qeth device driver " | 3526 | dev_warn(&card->gdev->dev, "The qeth device driver " |
3519 | "failed to recover an error on the device\n"); | 3527 | "failed to recover an error on the device\n"); |
3520 | } | 3528 | } |
3529 | qeth_clear_recovery_task(card); | ||
3521 | qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); | 3530 | qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); |
3522 | qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); | 3531 | qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); |
3523 | return 0; | 3532 | return 0; |
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c index ebc379486267..e70af2406ff9 100644 --- a/drivers/s390/net/qeth_l3_sys.c +++ b/drivers/s390/net/qeth_l3_sys.c | |||
@@ -87,6 +87,8 @@ static ssize_t qeth_l3_dev_route_store(struct qeth_card *card, | |||
87 | rc = qeth_l3_setrouting_v6(card); | 87 | rc = qeth_l3_setrouting_v6(card); |
88 | } | 88 | } |
89 | out: | 89 | out: |
90 | if (rc) | ||
91 | route->type = old_route_type; | ||
90 | mutex_unlock(&card->conf_mutex); | 92 | mutex_unlock(&card->conf_mutex); |
91 | return rc ? rc : count; | 93 | return rc ? rc : count; |
92 | } | 94 | } |
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index 2daf4b0da434..90bc7bd00966 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c | |||
@@ -940,6 +940,7 @@ static int bnx2fc_libfc_config(struct fc_lport *lport) | |||
940 | fc_exch_init(lport); | 940 | fc_exch_init(lport); |
941 | fc_rport_init(lport); | 941 | fc_rport_init(lport); |
942 | fc_disc_init(lport); | 942 | fc_disc_init(lport); |
943 | fc_disc_config(lport, lport); | ||
943 | return 0; | 944 | return 0; |
944 | } | 945 | } |
945 | 946 | ||
@@ -2133,6 +2134,7 @@ static int _bnx2fc_create(struct net_device *netdev, | |||
2133 | } | 2134 | } |
2134 | 2135 | ||
2135 | ctlr = bnx2fc_to_ctlr(interface); | 2136 | ctlr = bnx2fc_to_ctlr(interface); |
2137 | cdev = fcoe_ctlr_to_ctlr_dev(ctlr); | ||
2136 | interface->vlan_id = vlan_id; | 2138 | interface->vlan_id = vlan_id; |
2137 | 2139 | ||
2138 | interface->timer_work_queue = | 2140 | interface->timer_work_queue = |
@@ -2143,7 +2145,7 @@ static int _bnx2fc_create(struct net_device *netdev, | |||
2143 | goto ifput_err; | 2145 | goto ifput_err; |
2144 | } | 2146 | } |
2145 | 2147 | ||
2146 | lport = bnx2fc_if_create(interface, &interface->hba->pcidev->dev, 0); | 2148 | lport = bnx2fc_if_create(interface, &cdev->dev, 0); |
2147 | if (!lport) { | 2149 | if (!lport) { |
2148 | printk(KERN_ERR PFX "Failed to create interface (%s)\n", | 2150 | printk(KERN_ERR PFX "Failed to create interface (%s)\n", |
2149 | netdev->name); | 2151 | netdev->name); |
@@ -2159,8 +2161,6 @@ static int _bnx2fc_create(struct net_device *netdev, | |||
2159 | /* Make this master N_port */ | 2161 | /* Make this master N_port */ |
2160 | ctlr->lp = lport; | 2162 | ctlr->lp = lport; |
2161 | 2163 | ||
2162 | cdev = fcoe_ctlr_to_ctlr_dev(ctlr); | ||
2163 | |||
2164 | if (link_state == BNX2FC_CREATE_LINK_UP) | 2164 | if (link_state == BNX2FC_CREATE_LINK_UP) |
2165 | cdev->enabled = FCOE_CTLR_ENABLED; | 2165 | cdev->enabled = FCOE_CTLR_ENABLED; |
2166 | else | 2166 | else |
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index b5d92fc93c70..9bfdc9a3f897 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c | |||
@@ -490,7 +490,6 @@ static void fcoe_interface_cleanup(struct fcoe_interface *fcoe) | |||
490 | { | 490 | { |
491 | struct net_device *netdev = fcoe->netdev; | 491 | struct net_device *netdev = fcoe->netdev; |
492 | struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe); | 492 | struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe); |
493 | struct fcoe_ctlr_device *ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip); | ||
494 | 493 | ||
495 | rtnl_lock(); | 494 | rtnl_lock(); |
496 | if (!fcoe->removed) | 495 | if (!fcoe->removed) |
@@ -501,7 +500,6 @@ static void fcoe_interface_cleanup(struct fcoe_interface *fcoe) | |||
501 | /* tear-down the FCoE controller */ | 500 | /* tear-down the FCoE controller */ |
502 | fcoe_ctlr_destroy(fip); | 501 | fcoe_ctlr_destroy(fip); |
503 | scsi_host_put(fip->lp->host); | 502 | scsi_host_put(fip->lp->host); |
504 | fcoe_ctlr_device_delete(ctlr_dev); | ||
505 | dev_put(netdev); | 503 | dev_put(netdev); |
506 | module_put(THIS_MODULE); | 504 | module_put(THIS_MODULE); |
507 | } | 505 | } |
@@ -2194,6 +2192,8 @@ out_nodev: | |||
2194 | */ | 2192 | */ |
2195 | static void fcoe_destroy_work(struct work_struct *work) | 2193 | static void fcoe_destroy_work(struct work_struct *work) |
2196 | { | 2194 | { |
2195 | struct fcoe_ctlr_device *cdev; | ||
2196 | struct fcoe_ctlr *ctlr; | ||
2197 | struct fcoe_port *port; | 2197 | struct fcoe_port *port; |
2198 | struct fcoe_interface *fcoe; | 2198 | struct fcoe_interface *fcoe; |
2199 | struct Scsi_Host *shost; | 2199 | struct Scsi_Host *shost; |
@@ -2224,10 +2224,15 @@ static void fcoe_destroy_work(struct work_struct *work) | |||
2224 | mutex_lock(&fcoe_config_mutex); | 2224 | mutex_lock(&fcoe_config_mutex); |
2225 | 2225 | ||
2226 | fcoe = port->priv; | 2226 | fcoe = port->priv; |
2227 | ctlr = fcoe_to_ctlr(fcoe); | ||
2228 | cdev = fcoe_ctlr_to_ctlr_dev(ctlr); | ||
2229 | |||
2227 | fcoe_if_destroy(port->lport); | 2230 | fcoe_if_destroy(port->lport); |
2228 | fcoe_interface_cleanup(fcoe); | 2231 | fcoe_interface_cleanup(fcoe); |
2229 | 2232 | ||
2230 | mutex_unlock(&fcoe_config_mutex); | 2233 | mutex_unlock(&fcoe_config_mutex); |
2234 | |||
2235 | fcoe_ctlr_device_delete(cdev); | ||
2231 | } | 2236 | } |
2232 | 2237 | ||
2233 | /** | 2238 | /** |
@@ -2335,7 +2340,9 @@ static int _fcoe_create(struct net_device *netdev, enum fip_state fip_mode, | |||
2335 | rc = -EIO; | 2340 | rc = -EIO; |
2336 | rtnl_unlock(); | 2341 | rtnl_unlock(); |
2337 | fcoe_interface_cleanup(fcoe); | 2342 | fcoe_interface_cleanup(fcoe); |
2338 | goto out_nortnl; | 2343 | mutex_unlock(&fcoe_config_mutex); |
2344 | fcoe_ctlr_device_delete(ctlr_dev); | ||
2345 | goto out; | ||
2339 | } | 2346 | } |
2340 | 2347 | ||
2341 | /* Make this the "master" N_Port */ | 2348 | /* Make this the "master" N_Port */ |
@@ -2375,8 +2382,8 @@ static int _fcoe_create(struct net_device *netdev, enum fip_state fip_mode, | |||
2375 | 2382 | ||
2376 | out_nodev: | 2383 | out_nodev: |
2377 | rtnl_unlock(); | 2384 | rtnl_unlock(); |
2378 | out_nortnl: | ||
2379 | mutex_unlock(&fcoe_config_mutex); | 2385 | mutex_unlock(&fcoe_config_mutex); |
2386 | out: | ||
2380 | return rc; | 2387 | return rc; |
2381 | } | 2388 | } |
2382 | 2389 | ||
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c index 08c3bc398da2..a76247201be5 100644 --- a/drivers/scsi/fcoe/fcoe_ctlr.c +++ b/drivers/scsi/fcoe/fcoe_ctlr.c | |||
@@ -2815,6 +2815,47 @@ unlock: | |||
2815 | } | 2815 | } |
2816 | 2816 | ||
2817 | /** | 2817 | /** |
2818 | * fcoe_ctlr_mode_set() - Set or reset the ctlr's mode | ||
2819 | * @lport: The local port to be (re)configured | ||
2820 | * @fip: The FCoE controller whose mode is changing | ||
2821 | * @fip_mode: The new fip mode | ||
2822 | * | ||
2823 | * Note that the we shouldn't be changing the libfc discovery settings | ||
2824 | * (fc_disc_config) while an lport is going through the libfc state | ||
2825 | * machine. The mode can only be changed when a fcoe_ctlr device is | ||
2826 | * disabled, so that should ensure that this routine is only called | ||
2827 | * when nothing is happening. | ||
2828 | */ | ||
2829 | void fcoe_ctlr_mode_set(struct fc_lport *lport, struct fcoe_ctlr *fip, | ||
2830 | enum fip_state fip_mode) | ||
2831 | { | ||
2832 | void *priv; | ||
2833 | |||
2834 | WARN_ON(lport->state != LPORT_ST_RESET && | ||
2835 | lport->state != LPORT_ST_DISABLED); | ||
2836 | |||
2837 | if (fip_mode == FIP_MODE_VN2VN) { | ||
2838 | lport->rport_priv_size = sizeof(struct fcoe_rport); | ||
2839 | lport->point_to_multipoint = 1; | ||
2840 | lport->tt.disc_recv_req = fcoe_ctlr_disc_recv; | ||
2841 | lport->tt.disc_start = fcoe_ctlr_disc_start; | ||
2842 | lport->tt.disc_stop = fcoe_ctlr_disc_stop; | ||
2843 | lport->tt.disc_stop_final = fcoe_ctlr_disc_stop_final; | ||
2844 | priv = fip; | ||
2845 | } else { | ||
2846 | lport->rport_priv_size = 0; | ||
2847 | lport->point_to_multipoint = 0; | ||
2848 | lport->tt.disc_recv_req = NULL; | ||
2849 | lport->tt.disc_start = NULL; | ||
2850 | lport->tt.disc_stop = NULL; | ||
2851 | lport->tt.disc_stop_final = NULL; | ||
2852 | priv = lport; | ||
2853 | } | ||
2854 | |||
2855 | fc_disc_config(lport, priv); | ||
2856 | } | ||
2857 | |||
2858 | /** | ||
2818 | * fcoe_libfc_config() - Sets up libfc related properties for local port | 2859 | * fcoe_libfc_config() - Sets up libfc related properties for local port |
2819 | * @lport: The local port to configure libfc for | 2860 | * @lport: The local port to configure libfc for |
2820 | * @fip: The FCoE controller in use by the local port | 2861 | * @fip: The FCoE controller in use by the local port |
@@ -2833,21 +2874,9 @@ int fcoe_libfc_config(struct fc_lport *lport, struct fcoe_ctlr *fip, | |||
2833 | fc_exch_init(lport); | 2874 | fc_exch_init(lport); |
2834 | fc_elsct_init(lport); | 2875 | fc_elsct_init(lport); |
2835 | fc_lport_init(lport); | 2876 | fc_lport_init(lport); |
2836 | if (fip->mode == FIP_MODE_VN2VN) | ||
2837 | lport->rport_priv_size = sizeof(struct fcoe_rport); | ||
2838 | fc_rport_init(lport); | 2877 | fc_rport_init(lport); |
2839 | if (fip->mode == FIP_MODE_VN2VN) { | 2878 | fc_disc_init(lport); |
2840 | lport->point_to_multipoint = 1; | 2879 | fcoe_ctlr_mode_set(lport, fip, fip->mode); |
2841 | lport->tt.disc_recv_req = fcoe_ctlr_disc_recv; | ||
2842 | lport->tt.disc_start = fcoe_ctlr_disc_start; | ||
2843 | lport->tt.disc_stop = fcoe_ctlr_disc_stop; | ||
2844 | lport->tt.disc_stop_final = fcoe_ctlr_disc_stop_final; | ||
2845 | mutex_init(&lport->disc.disc_mutex); | ||
2846 | INIT_LIST_HEAD(&lport->disc.rports); | ||
2847 | lport->disc.priv = fip; | ||
2848 | } else { | ||
2849 | fc_disc_init(lport); | ||
2850 | } | ||
2851 | return 0; | 2880 | return 0; |
2852 | } | 2881 | } |
2853 | EXPORT_SYMBOL_GPL(fcoe_libfc_config); | 2882 | EXPORT_SYMBOL_GPL(fcoe_libfc_config); |
@@ -2875,6 +2904,7 @@ EXPORT_SYMBOL(fcoe_fcf_get_selected); | |||
2875 | void fcoe_ctlr_set_fip_mode(struct fcoe_ctlr_device *ctlr_dev) | 2904 | void fcoe_ctlr_set_fip_mode(struct fcoe_ctlr_device *ctlr_dev) |
2876 | { | 2905 | { |
2877 | struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev); | 2906 | struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev); |
2907 | struct fc_lport *lport = ctlr->lp; | ||
2878 | 2908 | ||
2879 | mutex_lock(&ctlr->ctlr_mutex); | 2909 | mutex_lock(&ctlr->ctlr_mutex); |
2880 | switch (ctlr_dev->mode) { | 2910 | switch (ctlr_dev->mode) { |
@@ -2888,5 +2918,7 @@ void fcoe_ctlr_set_fip_mode(struct fcoe_ctlr_device *ctlr_dev) | |||
2888 | } | 2918 | } |
2889 | 2919 | ||
2890 | mutex_unlock(&ctlr->ctlr_mutex); | 2920 | mutex_unlock(&ctlr->ctlr_mutex); |
2921 | |||
2922 | fcoe_ctlr_mode_set(lport, ctlr, ctlr->mode); | ||
2891 | } | 2923 | } |
2892 | EXPORT_SYMBOL(fcoe_ctlr_set_fip_mode); | 2924 | EXPORT_SYMBOL(fcoe_ctlr_set_fip_mode); |
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index a044f593e8b9..d0fa4b6c551f 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c | |||
@@ -1899,8 +1899,8 @@ static int ibmvscsi_slave_configure(struct scsi_device *sdev) | |||
1899 | sdev->allow_restart = 1; | 1899 | sdev->allow_restart = 1; |
1900 | blk_queue_rq_timeout(sdev->request_queue, 120 * HZ); | 1900 | blk_queue_rq_timeout(sdev->request_queue, 120 * HZ); |
1901 | } | 1901 | } |
1902 | scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun); | ||
1903 | spin_unlock_irqrestore(shost->host_lock, lock_flags); | 1902 | spin_unlock_irqrestore(shost->host_lock, lock_flags); |
1903 | scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun); | ||
1904 | return 0; | 1904 | return 0; |
1905 | } | 1905 | } |
1906 | 1906 | ||
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index f328089a1060..2197b57fb225 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c | |||
@@ -5148,7 +5148,7 @@ static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd) | |||
5148 | ipr_trace; | 5148 | ipr_trace; |
5149 | } | 5149 | } |
5150 | 5150 | ||
5151 | list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q); | 5151 | list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); |
5152 | if (!ipr_is_naca_model(res)) | 5152 | if (!ipr_is_naca_model(res)) |
5153 | res->needs_sync_complete = 1; | 5153 | res->needs_sync_complete = 1; |
5154 | 5154 | ||
@@ -9349,7 +9349,10 @@ static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev) | |||
9349 | int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); | 9349 | int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg); |
9350 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | 9350 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); |
9351 | 9351 | ||
9352 | rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg); | 9352 | if (ioa_cfg->intr_flag == IPR_USE_MSIX) |
9353 | rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_test_intr, 0, IPR_NAME, ioa_cfg); | ||
9354 | else | ||
9355 | rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg); | ||
9353 | if (rc) { | 9356 | if (rc) { |
9354 | dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq); | 9357 | dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq); |
9355 | return rc; | 9358 | return rc; |
@@ -9371,7 +9374,10 @@ static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev) | |||
9371 | 9374 | ||
9372 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); | 9375 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); |
9373 | 9376 | ||
9374 | free_irq(pdev->irq, ioa_cfg); | 9377 | if (ioa_cfg->intr_flag == IPR_USE_MSIX) |
9378 | free_irq(ioa_cfg->vectors_info[0].vec, ioa_cfg); | ||
9379 | else | ||
9380 | free_irq(pdev->irq, ioa_cfg); | ||
9375 | 9381 | ||
9376 | LEAVE; | 9382 | LEAVE; |
9377 | 9383 | ||
@@ -9722,6 +9728,7 @@ static void __ipr_remove(struct pci_dev *pdev) | |||
9722 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); | 9728 | spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); |
9723 | wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); | 9729 | wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); |
9724 | flush_work(&ioa_cfg->work_q); | 9730 | flush_work(&ioa_cfg->work_q); |
9731 | INIT_LIST_HEAD(&ioa_cfg->used_res_q); | ||
9725 | spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); | 9732 | spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); |
9726 | 9733 | ||
9727 | spin_lock(&ipr_driver_lock); | 9734 | spin_lock(&ipr_driver_lock); |
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c index 8e561e6a557c..880a9068ca12 100644 --- a/drivers/scsi/libfc/fc_disc.c +++ b/drivers/scsi/libfc/fc_disc.c | |||
@@ -712,12 +712,13 @@ static void fc_disc_stop_final(struct fc_lport *lport) | |||
712 | } | 712 | } |
713 | 713 | ||
714 | /** | 714 | /** |
715 | * fc_disc_init() - Initialize the discovery layer for a local port | 715 | * fc_disc_config() - Configure the discovery layer for a local port |
716 | * @lport: The local port that needs the discovery layer to be initialized | 716 | * @lport: The local port that needs the discovery layer to be configured |
717 | * @priv: Private data structre for users of the discovery layer | ||
717 | */ | 718 | */ |
718 | int fc_disc_init(struct fc_lport *lport) | 719 | void fc_disc_config(struct fc_lport *lport, void *priv) |
719 | { | 720 | { |
720 | struct fc_disc *disc; | 721 | struct fc_disc *disc = &lport->disc; |
721 | 722 | ||
722 | if (!lport->tt.disc_start) | 723 | if (!lport->tt.disc_start) |
723 | lport->tt.disc_start = fc_disc_start; | 724 | lport->tt.disc_start = fc_disc_start; |
@@ -732,12 +733,21 @@ int fc_disc_init(struct fc_lport *lport) | |||
732 | lport->tt.disc_recv_req = fc_disc_recv_req; | 733 | lport->tt.disc_recv_req = fc_disc_recv_req; |
733 | 734 | ||
734 | disc = &lport->disc; | 735 | disc = &lport->disc; |
736 | |||
737 | disc->priv = priv; | ||
738 | } | ||
739 | EXPORT_SYMBOL(fc_disc_config); | ||
740 | |||
741 | /** | ||
742 | * fc_disc_init() - Initialize the discovery layer for a local port | ||
743 | * @lport: The local port that needs the discovery layer to be initialized | ||
744 | */ | ||
745 | void fc_disc_init(struct fc_lport *lport) | ||
746 | { | ||
747 | struct fc_disc *disc = &lport->disc; | ||
748 | |||
735 | INIT_DELAYED_WORK(&disc->disc_work, fc_disc_timeout); | 749 | INIT_DELAYED_WORK(&disc->disc_work, fc_disc_timeout); |
736 | mutex_init(&disc->disc_mutex); | 750 | mutex_init(&disc->disc_mutex); |
737 | INIT_LIST_HEAD(&disc->rports); | 751 | INIT_LIST_HEAD(&disc->rports); |
738 | |||
739 | disc->priv = lport; | ||
740 | |||
741 | return 0; | ||
742 | } | 752 | } |
743 | EXPORT_SYMBOL(fc_disc_init); | 753 | EXPORT_SYMBOL(fc_disc_init); |
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index aec2e0da5016..55cbd0180159 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c | |||
@@ -235,6 +235,17 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp) | |||
235 | linkrate = phy->linkrate; | 235 | linkrate = phy->linkrate; |
236 | memcpy(sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE); | 236 | memcpy(sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE); |
237 | 237 | ||
238 | /* Handle vacant phy - rest of dr data is not valid so skip it */ | ||
239 | if (phy->phy_state == PHY_VACANT) { | ||
240 | memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); | ||
241 | phy->attached_dev_type = NO_DEVICE; | ||
242 | if (!test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) { | ||
243 | phy->phy_id = phy_id; | ||
244 | goto skip; | ||
245 | } else | ||
246 | goto out; | ||
247 | } | ||
248 | |||
238 | phy->attached_dev_type = to_dev_type(dr); | 249 | phy->attached_dev_type = to_dev_type(dr); |
239 | if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) | 250 | if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) |
240 | goto out; | 251 | goto out; |
@@ -272,6 +283,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp) | |||
272 | phy->phy->maximum_linkrate = dr->pmax_linkrate; | 283 | phy->phy->maximum_linkrate = dr->pmax_linkrate; |
273 | phy->phy->negotiated_linkrate = phy->linkrate; | 284 | phy->phy->negotiated_linkrate = phy->linkrate; |
274 | 285 | ||
286 | skip: | ||
275 | if (new_phy) | 287 | if (new_phy) |
276 | if (sas_phy_add(phy->phy)) { | 288 | if (sas_phy_add(phy->phy)) { |
277 | sas_phy_free(phy->phy); | 289 | sas_phy_free(phy->phy); |
@@ -388,7 +400,7 @@ int sas_ex_phy_discover(struct domain_device *dev, int single) | |||
388 | if (!disc_req) | 400 | if (!disc_req) |
389 | return -ENOMEM; | 401 | return -ENOMEM; |
390 | 402 | ||
391 | disc_resp = alloc_smp_req(DISCOVER_RESP_SIZE); | 403 | disc_resp = alloc_smp_resp(DISCOVER_RESP_SIZE); |
392 | if (!disc_resp) { | 404 | if (!disc_resp) { |
393 | kfree(disc_req); | 405 | kfree(disc_req); |
394 | return -ENOMEM; | 406 | return -ENOMEM; |
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 74b67d98e952..d43faf34c1e2 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c | |||
@@ -438,11 +438,12 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq, | |||
438 | struct lpfc_rqe *temp_hrqe; | 438 | struct lpfc_rqe *temp_hrqe; |
439 | struct lpfc_rqe *temp_drqe; | 439 | struct lpfc_rqe *temp_drqe; |
440 | struct lpfc_register doorbell; | 440 | struct lpfc_register doorbell; |
441 | int put_index = hq->host_index; | 441 | int put_index; |
442 | 442 | ||
443 | /* sanity check on queue memory */ | 443 | /* sanity check on queue memory */ |
444 | if (unlikely(!hq) || unlikely(!dq)) | 444 | if (unlikely(!hq) || unlikely(!dq)) |
445 | return -ENOMEM; | 445 | return -ENOMEM; |
446 | put_index = hq->host_index; | ||
446 | temp_hrqe = hq->qe[hq->host_index].rqe; | 447 | temp_hrqe = hq->qe[hq->host_index].rqe; |
447 | temp_drqe = dq->qe[dq->host_index].rqe; | 448 | temp_drqe = dq->qe[dq->host_index].rqe; |
448 | 449 | ||
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 1d82eef4e1eb..b3db9dcc2619 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c | |||
@@ -1938,11 +1938,6 @@ qla24xx_vport_delete(struct fc_vport *fc_vport) | |||
1938 | "Timer for the VP[%d] has stopped\n", vha->vp_idx); | 1938 | "Timer for the VP[%d] has stopped\n", vha->vp_idx); |
1939 | } | 1939 | } |
1940 | 1940 | ||
1941 | /* No pending activities shall be there on the vha now */ | ||
1942 | if (ql2xextended_error_logging & ql_dbg_user) | ||
1943 | msleep(random32()%10); /* Just to see if something falls on | ||
1944 | * the net we have placed below */ | ||
1945 | |||
1946 | BUG_ON(atomic_read(&vha->vref_count)); | 1941 | BUG_ON(atomic_read(&vha->vref_count)); |
1947 | 1942 | ||
1948 | qla2x00_free_fcports(vha); | 1943 | qla2x00_free_fcports(vha); |
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index 1626de52e32a..fbc305f1c87c 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c | |||
@@ -15,6 +15,7 @@ | |||
15 | * | Mailbox commands | 0x115b | 0x111a-0x111b | | 15 | * | Mailbox commands | 0x115b | 0x111a-0x111b | |
16 | * | | | 0x112c-0x112e | | 16 | * | | | 0x112c-0x112e | |
17 | * | | | 0x113a | | 17 | * | | | 0x113a | |
18 | * | | | 0x1155-0x1158 | | ||
18 | * | Device Discovery | 0x2087 | 0x2020-0x2022, | | 19 | * | Device Discovery | 0x2087 | 0x2020-0x2022, | |
19 | * | | | 0x2016 | | 20 | * | | | 0x2016 | |
20 | * | Queue Command and IO tracing | 0x3031 | 0x3006-0x300b | | 21 | * | Queue Command and IO tracing | 0x3031 | 0x3006-0x300b | |
@@ -401,7 +402,7 @@ qla2xxx_copy_atioqueues(struct qla_hw_data *ha, void *ptr, | |||
401 | void *ring; | 402 | void *ring; |
402 | } aq, *aqp; | 403 | } aq, *aqp; |
403 | 404 | ||
404 | if (!ha->tgt.atio_q_length) | 405 | if (!ha->tgt.atio_ring) |
405 | return ptr; | 406 | return ptr; |
406 | 407 | ||
407 | num_queues = 1; | 408 | num_queues = 1; |
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index c6509911772b..65c5ff75936b 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h | |||
@@ -863,7 +863,6 @@ typedef struct { | |||
863 | #define MBX_1 BIT_1 | 863 | #define MBX_1 BIT_1 |
864 | #define MBX_0 BIT_0 | 864 | #define MBX_0 BIT_0 |
865 | 865 | ||
866 | #define RNID_TYPE_SET_VERSION 0x9 | ||
867 | #define RNID_TYPE_ASIC_TEMP 0xC | 866 | #define RNID_TYPE_ASIC_TEMP 0xC |
868 | 867 | ||
869 | /* | 868 | /* |
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index eb3ca21a7f17..b310fa97b545 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h | |||
@@ -358,9 +358,6 @@ extern int | |||
358 | qla2x00_disable_fce_trace(scsi_qla_host_t *, uint64_t *, uint64_t *); | 358 | qla2x00_disable_fce_trace(scsi_qla_host_t *, uint64_t *, uint64_t *); |
359 | 359 | ||
360 | extern int | 360 | extern int |
361 | qla2x00_set_driver_version(scsi_qla_host_t *, char *); | ||
362 | |||
363 | extern int | ||
364 | qla2x00_read_sfp(scsi_qla_host_t *, dma_addr_t, uint8_t *, | 361 | qla2x00_read_sfp(scsi_qla_host_t *, dma_addr_t, uint8_t *, |
365 | uint16_t, uint16_t, uint16_t, uint16_t); | 362 | uint16_t, uint16_t, uint16_t, uint16_t); |
366 | 363 | ||
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index edf4d14a1335..b59203393cb2 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c | |||
@@ -619,8 +619,6 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha) | |||
619 | if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha)) | 619 | if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha)) |
620 | qla24xx_read_fcp_prio_cfg(vha); | 620 | qla24xx_read_fcp_prio_cfg(vha); |
621 | 621 | ||
622 | qla2x00_set_driver_version(vha, QLA2XXX_VERSION); | ||
623 | |||
624 | return (rval); | 622 | return (rval); |
625 | } | 623 | } |
626 | 624 | ||
@@ -1399,7 +1397,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) | |||
1399 | mq_size += ha->max_rsp_queues * | 1397 | mq_size += ha->max_rsp_queues * |
1400 | (rsp->length * sizeof(response_t)); | 1398 | (rsp->length * sizeof(response_t)); |
1401 | } | 1399 | } |
1402 | if (ha->tgt.atio_q_length) | 1400 | if (ha->tgt.atio_ring) |
1403 | mq_size += ha->tgt.atio_q_length * sizeof(request_t); | 1401 | mq_size += ha->tgt.atio_q_length * sizeof(request_t); |
1404 | /* Allocate memory for Fibre Channel Event Buffer. */ | 1402 | /* Allocate memory for Fibre Channel Event Buffer. */ |
1405 | if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha)) | 1403 | if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha)) |
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 186dd59ce4fa..43345af56431 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c | |||
@@ -3866,64 +3866,6 @@ qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha) | |||
3866 | return rval; | 3866 | return rval; |
3867 | } | 3867 | } |
3868 | 3868 | ||
3869 | int | ||
3870 | qla2x00_set_driver_version(scsi_qla_host_t *vha, char *version) | ||
3871 | { | ||
3872 | int rval; | ||
3873 | mbx_cmd_t mc; | ||
3874 | mbx_cmd_t *mcp = &mc; | ||
3875 | int len; | ||
3876 | uint16_t dwlen; | ||
3877 | uint8_t *str; | ||
3878 | dma_addr_t str_dma; | ||
3879 | struct qla_hw_data *ha = vha->hw; | ||
3880 | |||
3881 | if (!IS_FWI2_CAPABLE(ha) || IS_QLA82XX(ha)) | ||
3882 | return QLA_FUNCTION_FAILED; | ||
3883 | |||
3884 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1155, | ||
3885 | "Entered %s.\n", __func__); | ||
3886 | |||
3887 | str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma); | ||
3888 | if (!str) { | ||
3889 | ql_log(ql_log_warn, vha, 0x1156, | ||
3890 | "Failed to allocate driver version param.\n"); | ||
3891 | return QLA_MEMORY_ALLOC_FAILED; | ||
3892 | } | ||
3893 | |||
3894 | memcpy(str, "\x7\x3\x11\x0", 4); | ||
3895 | dwlen = str[0]; | ||
3896 | len = dwlen * sizeof(uint32_t) - 4; | ||
3897 | memset(str + 4, 0, len); | ||
3898 | if (len > strlen(version)) | ||
3899 | len = strlen(version); | ||
3900 | memcpy(str + 4, version, len); | ||
3901 | |||
3902 | mcp->mb[0] = MBC_SET_RNID_PARAMS; | ||
3903 | mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen; | ||
3904 | mcp->mb[2] = MSW(LSD(str_dma)); | ||
3905 | mcp->mb[3] = LSW(LSD(str_dma)); | ||
3906 | mcp->mb[6] = MSW(MSD(str_dma)); | ||
3907 | mcp->mb[7] = LSW(MSD(str_dma)); | ||
3908 | mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; | ||
3909 | mcp->in_mb = MBX_0; | ||
3910 | mcp->tov = MBX_TOV_SECONDS; | ||
3911 | mcp->flags = 0; | ||
3912 | rval = qla2x00_mailbox_command(vha, mcp); | ||
3913 | |||
3914 | if (rval != QLA_SUCCESS) { | ||
3915 | ql_dbg(ql_dbg_mbx, vha, 0x1157, | ||
3916 | "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); | ||
3917 | } else { | ||
3918 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1158, | ||
3919 | "Done %s.\n", __func__); | ||
3920 | } | ||
3921 | |||
3922 | dma_pool_free(ha->s_dma_pool, str, str_dma); | ||
3923 | |||
3924 | return rval; | ||
3925 | } | ||
3926 | |||
3927 | static int | 3869 | static int |
3928 | qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp) | 3870 | qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp) |
3929 | { | 3871 | { |
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index 2b6e478d9e33..ec54036d1e12 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h | |||
@@ -7,7 +7,7 @@ | |||
7 | /* | 7 | /* |
8 | * Driver version | 8 | * Driver version |
9 | */ | 9 | */ |
10 | #define QLA2XXX_VERSION "8.04.00.08-k" | 10 | #define QLA2XXX_VERSION "8.04.00.13-k" |
11 | 11 | ||
12 | #define QLA_DRIVER_MAJOR_VER 8 | 12 | #define QLA_DRIVER_MAJOR_VER 8 |
13 | #define QLA_DRIVER_MINOR_VER 4 | 13 | #define QLA_DRIVER_MINOR_VER 4 |
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index 86974471af68..2a32036a9404 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c | |||
@@ -4112,6 +4112,10 @@ static int st_probe(struct device *dev) | |||
4112 | tpnt->disk = disk; | 4112 | tpnt->disk = disk; |
4113 | disk->private_data = &tpnt->driver; | 4113 | disk->private_data = &tpnt->driver; |
4114 | disk->queue = SDp->request_queue; | 4114 | disk->queue = SDp->request_queue; |
4115 | /* SCSI tape doesn't register this gendisk via add_disk(). Manually | ||
4116 | * take queue reference that release_disk() expects. */ | ||
4117 | if (!blk_get_queue(disk->queue)) | ||
4118 | goto out_put_disk; | ||
4115 | tpnt->driver = &st_template; | 4119 | tpnt->driver = &st_template; |
4116 | 4120 | ||
4117 | tpnt->device = SDp; | 4121 | tpnt->device = SDp; |
@@ -4185,7 +4189,7 @@ static int st_probe(struct device *dev) | |||
4185 | idr_preload_end(); | 4189 | idr_preload_end(); |
4186 | if (error < 0) { | 4190 | if (error < 0) { |
4187 | pr_warn("st: idr allocation failed: %d\n", error); | 4191 | pr_warn("st: idr allocation failed: %d\n", error); |
4188 | goto out_put_disk; | 4192 | goto out_put_queue; |
4189 | } | 4193 | } |
4190 | tpnt->index = error; | 4194 | tpnt->index = error; |
4191 | sprintf(disk->disk_name, "st%d", tpnt->index); | 4195 | sprintf(disk->disk_name, "st%d", tpnt->index); |
@@ -4211,6 +4215,8 @@ out_remove_devs: | |||
4211 | spin_lock(&st_index_lock); | 4215 | spin_lock(&st_index_lock); |
4212 | idr_remove(&st_index_idr, tpnt->index); | 4216 | idr_remove(&st_index_idr, tpnt->index); |
4213 | spin_unlock(&st_index_lock); | 4217 | spin_unlock(&st_index_lock); |
4218 | out_put_queue: | ||
4219 | blk_put_queue(disk->queue); | ||
4214 | out_put_disk: | 4220 | out_put_disk: |
4215 | put_disk(disk); | 4221 | put_disk(disk); |
4216 | kfree(tpnt); | 4222 | kfree(tpnt); |
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index f80eee74a311..2be0de920d67 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig | |||
@@ -55,6 +55,7 @@ comment "SPI Master Controller Drivers" | |||
55 | 55 | ||
56 | config SPI_ALTERA | 56 | config SPI_ALTERA |
57 | tristate "Altera SPI Controller" | 57 | tristate "Altera SPI Controller" |
58 | depends on GENERIC_HARDIRQS | ||
58 | select SPI_BITBANG | 59 | select SPI_BITBANG |
59 | help | 60 | help |
60 | This is the driver for the Altera SPI Controller. | 61 | This is the driver for the Altera SPI Controller. |
@@ -310,7 +311,7 @@ config SPI_PXA2XX_DMA | |||
310 | 311 | ||
311 | config SPI_PXA2XX | 312 | config SPI_PXA2XX |
312 | tristate "PXA2xx SSP SPI master" | 313 | tristate "PXA2xx SSP SPI master" |
313 | depends on ARCH_PXA || PCI || ACPI | 314 | depends on (ARCH_PXA || PCI || ACPI) && GENERIC_HARDIRQS |
314 | select PXA_SSP if ARCH_PXA | 315 | select PXA_SSP if ARCH_PXA |
315 | help | 316 | help |
316 | This enables using a PXA2xx or Sodaville SSP port as a SPI master | 317 | This enables using a PXA2xx or Sodaville SSP port as a SPI master |
diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c index 9578af782a77..d7df435d962e 100644 --- a/drivers/spi/spi-bcm63xx.c +++ b/drivers/spi/spi-bcm63xx.c | |||
@@ -152,7 +152,6 @@ static void bcm63xx_spi_setup_transfer(struct spi_device *spi, | |||
152 | static int bcm63xx_spi_setup(struct spi_device *spi) | 152 | static int bcm63xx_spi_setup(struct spi_device *spi) |
153 | { | 153 | { |
154 | struct bcm63xx_spi *bs; | 154 | struct bcm63xx_spi *bs; |
155 | int ret; | ||
156 | 155 | ||
157 | bs = spi_master_get_devdata(spi->master); | 156 | bs = spi_master_get_devdata(spi->master); |
158 | 157 | ||
@@ -490,7 +489,7 @@ static int bcm63xx_spi_probe(struct platform_device *pdev) | |||
490 | default: | 489 | default: |
491 | dev_err(dev, "unsupported MSG_CTL width: %d\n", | 490 | dev_err(dev, "unsupported MSG_CTL width: %d\n", |
492 | bs->msg_ctl_width); | 491 | bs->msg_ctl_width); |
493 | goto out_clk_disable; | 492 | goto out_err; |
494 | } | 493 | } |
495 | 494 | ||
496 | /* Initialize hardware */ | 495 | /* Initialize hardware */ |
diff --git a/drivers/spi/spi-mpc512x-psc.c b/drivers/spi/spi-mpc512x-psc.c index 89480b281d74..3e490ee7f275 100644 --- a/drivers/spi/spi-mpc512x-psc.c +++ b/drivers/spi/spi-mpc512x-psc.c | |||
@@ -164,7 +164,7 @@ static int mpc512x_psc_spi_transfer_rxtx(struct spi_device *spi, | |||
164 | 164 | ||
165 | for (i = count; i > 0; i--) { | 165 | for (i = count; i > 0; i--) { |
166 | data = tx_buf ? *tx_buf++ : 0; | 166 | data = tx_buf ? *tx_buf++ : 0; |
167 | if (len == EOFBYTE) | 167 | if (len == EOFBYTE && t->cs_change) |
168 | setbits32(&fifo->txcmd, MPC512x_PSC_FIFO_EOF); | 168 | setbits32(&fifo->txcmd, MPC512x_PSC_FIFO_EOF); |
169 | out_8(&fifo->txdata_8, data); | 169 | out_8(&fifo->txdata_8, data); |
170 | len--; | 170 | len--; |
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index 90b27a3508a6..810413883c79 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c | |||
@@ -1168,7 +1168,6 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) | |||
1168 | 1168 | ||
1169 | master->dev.parent = &pdev->dev; | 1169 | master->dev.parent = &pdev->dev; |
1170 | master->dev.of_node = pdev->dev.of_node; | 1170 | master->dev.of_node = pdev->dev.of_node; |
1171 | ACPI_HANDLE_SET(&master->dev, ACPI_HANDLE(&pdev->dev)); | ||
1172 | /* the spi->mode bits understood by this driver: */ | 1171 | /* the spi->mode bits understood by this driver: */ |
1173 | master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP; | 1172 | master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP; |
1174 | 1173 | ||
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c index e862ab8853aa..4188b2faac5c 100644 --- a/drivers/spi/spi-s3c64xx.c +++ b/drivers/spi/spi-s3c64xx.c | |||
@@ -994,25 +994,30 @@ static irqreturn_t s3c64xx_spi_irq(int irq, void *data) | |||
994 | { | 994 | { |
995 | struct s3c64xx_spi_driver_data *sdd = data; | 995 | struct s3c64xx_spi_driver_data *sdd = data; |
996 | struct spi_master *spi = sdd->master; | 996 | struct spi_master *spi = sdd->master; |
997 | unsigned int val; | 997 | unsigned int val, clr = 0; |
998 | 998 | ||
999 | val = readl(sdd->regs + S3C64XX_SPI_PENDING_CLR); | 999 | val = readl(sdd->regs + S3C64XX_SPI_STATUS); |
1000 | 1000 | ||
1001 | val &= S3C64XX_SPI_PND_RX_OVERRUN_CLR | | 1001 | if (val & S3C64XX_SPI_ST_RX_OVERRUN_ERR) { |
1002 | S3C64XX_SPI_PND_RX_UNDERRUN_CLR | | 1002 | clr = S3C64XX_SPI_PND_RX_OVERRUN_CLR; |
1003 | S3C64XX_SPI_PND_TX_OVERRUN_CLR | | ||
1004 | S3C64XX_SPI_PND_TX_UNDERRUN_CLR; | ||
1005 | |||
1006 | writel(val, sdd->regs + S3C64XX_SPI_PENDING_CLR); | ||
1007 | |||
1008 | if (val & S3C64XX_SPI_PND_RX_OVERRUN_CLR) | ||
1009 | dev_err(&spi->dev, "RX overrun\n"); | 1003 | dev_err(&spi->dev, "RX overrun\n"); |
1010 | if (val & S3C64XX_SPI_PND_RX_UNDERRUN_CLR) | 1004 | } |
1005 | if (val & S3C64XX_SPI_ST_RX_UNDERRUN_ERR) { | ||
1006 | clr |= S3C64XX_SPI_PND_RX_UNDERRUN_CLR; | ||
1011 | dev_err(&spi->dev, "RX underrun\n"); | 1007 | dev_err(&spi->dev, "RX underrun\n"); |
1012 | if (val & S3C64XX_SPI_PND_TX_OVERRUN_CLR) | 1008 | } |
1009 | if (val & S3C64XX_SPI_ST_TX_OVERRUN_ERR) { | ||
1010 | clr |= S3C64XX_SPI_PND_TX_OVERRUN_CLR; | ||
1013 | dev_err(&spi->dev, "TX overrun\n"); | 1011 | dev_err(&spi->dev, "TX overrun\n"); |
1014 | if (val & S3C64XX_SPI_PND_TX_UNDERRUN_CLR) | 1012 | } |
1013 | if (val & S3C64XX_SPI_ST_TX_UNDERRUN_ERR) { | ||
1014 | clr |= S3C64XX_SPI_PND_TX_UNDERRUN_CLR; | ||
1015 | dev_err(&spi->dev, "TX underrun\n"); | 1015 | dev_err(&spi->dev, "TX underrun\n"); |
1016 | } | ||
1017 | |||
1018 | /* Clear the pending irq by setting and then clearing it */ | ||
1019 | writel(clr, sdd->regs + S3C64XX_SPI_PENDING_CLR); | ||
1020 | writel(0, sdd->regs + S3C64XX_SPI_PENDING_CLR); | ||
1016 | 1021 | ||
1017 | return IRQ_HANDLED; | 1022 | return IRQ_HANDLED; |
1018 | } | 1023 | } |
@@ -1036,9 +1041,13 @@ static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel) | |||
1036 | writel(0, regs + S3C64XX_SPI_MODE_CFG); | 1041 | writel(0, regs + S3C64XX_SPI_MODE_CFG); |
1037 | writel(0, regs + S3C64XX_SPI_PACKET_CNT); | 1042 | writel(0, regs + S3C64XX_SPI_PACKET_CNT); |
1038 | 1043 | ||
1039 | /* Clear any irq pending bits */ | 1044 | /* Clear any irq pending bits, should set and clear the bits */ |
1040 | writel(readl(regs + S3C64XX_SPI_PENDING_CLR), | 1045 | val = S3C64XX_SPI_PND_RX_OVERRUN_CLR | |
1041 | regs + S3C64XX_SPI_PENDING_CLR); | 1046 | S3C64XX_SPI_PND_RX_UNDERRUN_CLR | |
1047 | S3C64XX_SPI_PND_TX_OVERRUN_CLR | | ||
1048 | S3C64XX_SPI_PND_TX_UNDERRUN_CLR; | ||
1049 | writel(val, regs + S3C64XX_SPI_PENDING_CLR); | ||
1050 | writel(0, regs + S3C64XX_SPI_PENDING_CLR); | ||
1042 | 1051 | ||
1043 | writel(0, regs + S3C64XX_SPI_SWAP_CFG); | 1052 | writel(0, regs + S3C64XX_SPI_SWAP_CFG); |
1044 | 1053 | ||
diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c index b8698b389ef3..a829563f4713 100644 --- a/drivers/spi/spi-tegra20-slink.c +++ b/drivers/spi/spi-tegra20-slink.c | |||
@@ -858,21 +858,6 @@ static int tegra_slink_setup(struct spi_device *spi) | |||
858 | return 0; | 858 | return 0; |
859 | } | 859 | } |
860 | 860 | ||
861 | static int tegra_slink_prepare_transfer(struct spi_master *master) | ||
862 | { | ||
863 | struct tegra_slink_data *tspi = spi_master_get_devdata(master); | ||
864 | |||
865 | return pm_runtime_get_sync(tspi->dev); | ||
866 | } | ||
867 | |||
868 | static int tegra_slink_unprepare_transfer(struct spi_master *master) | ||
869 | { | ||
870 | struct tegra_slink_data *tspi = spi_master_get_devdata(master); | ||
871 | |||
872 | pm_runtime_put(tspi->dev); | ||
873 | return 0; | ||
874 | } | ||
875 | |||
876 | static int tegra_slink_transfer_one_message(struct spi_master *master, | 861 | static int tegra_slink_transfer_one_message(struct spi_master *master, |
877 | struct spi_message *msg) | 862 | struct spi_message *msg) |
878 | { | 863 | { |
@@ -885,6 +870,12 @@ static int tegra_slink_transfer_one_message(struct spi_master *master, | |||
885 | 870 | ||
886 | msg->status = 0; | 871 | msg->status = 0; |
887 | msg->actual_length = 0; | 872 | msg->actual_length = 0; |
873 | ret = pm_runtime_get_sync(tspi->dev); | ||
874 | if (ret < 0) { | ||
875 | dev_err(tspi->dev, "runtime get failed: %d\n", ret); | ||
876 | goto done; | ||
877 | } | ||
878 | |||
888 | single_xfer = list_is_singular(&msg->transfers); | 879 | single_xfer = list_is_singular(&msg->transfers); |
889 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { | 880 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { |
890 | INIT_COMPLETION(tspi->xfer_completion); | 881 | INIT_COMPLETION(tspi->xfer_completion); |
@@ -921,6 +912,8 @@ static int tegra_slink_transfer_one_message(struct spi_master *master, | |||
921 | exit: | 912 | exit: |
922 | tegra_slink_writel(tspi, tspi->def_command_reg, SLINK_COMMAND); | 913 | tegra_slink_writel(tspi, tspi->def_command_reg, SLINK_COMMAND); |
923 | tegra_slink_writel(tspi, tspi->def_command2_reg, SLINK_COMMAND2); | 914 | tegra_slink_writel(tspi, tspi->def_command2_reg, SLINK_COMMAND2); |
915 | pm_runtime_put(tspi->dev); | ||
916 | done: | ||
924 | msg->status = ret; | 917 | msg->status = ret; |
925 | spi_finalize_current_message(master); | 918 | spi_finalize_current_message(master); |
926 | return ret; | 919 | return ret; |
@@ -1148,9 +1141,7 @@ static int tegra_slink_probe(struct platform_device *pdev) | |||
1148 | /* the spi->mode bits understood by this driver: */ | 1141 | /* the spi->mode bits understood by this driver: */ |
1149 | master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; | 1142 | master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; |
1150 | master->setup = tegra_slink_setup; | 1143 | master->setup = tegra_slink_setup; |
1151 | master->prepare_transfer_hardware = tegra_slink_prepare_transfer; | ||
1152 | master->transfer_one_message = tegra_slink_transfer_one_message; | 1144 | master->transfer_one_message = tegra_slink_transfer_one_message; |
1153 | master->unprepare_transfer_hardware = tegra_slink_unprepare_transfer; | ||
1154 | master->num_chipselect = MAX_CHIP_SELECT; | 1145 | master->num_chipselect = MAX_CHIP_SELECT; |
1155 | master->bus_num = -1; | 1146 | master->bus_num = -1; |
1156 | 1147 | ||
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index f996c600eb8c..004b10f184d4 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c | |||
@@ -543,17 +543,16 @@ static void spi_pump_messages(struct kthread_work *work) | |||
543 | /* Lock queue and check for queue work */ | 543 | /* Lock queue and check for queue work */ |
544 | spin_lock_irqsave(&master->queue_lock, flags); | 544 | spin_lock_irqsave(&master->queue_lock, flags); |
545 | if (list_empty(&master->queue) || !master->running) { | 545 | if (list_empty(&master->queue) || !master->running) { |
546 | if (master->busy && master->unprepare_transfer_hardware) { | 546 | if (!master->busy) { |
547 | ret = master->unprepare_transfer_hardware(master); | 547 | spin_unlock_irqrestore(&master->queue_lock, flags); |
548 | if (ret) { | 548 | return; |
549 | spin_unlock_irqrestore(&master->queue_lock, flags); | ||
550 | dev_err(&master->dev, | ||
551 | "failed to unprepare transfer hardware\n"); | ||
552 | return; | ||
553 | } | ||
554 | } | 549 | } |
555 | master->busy = false; | 550 | master->busy = false; |
556 | spin_unlock_irqrestore(&master->queue_lock, flags); | 551 | spin_unlock_irqrestore(&master->queue_lock, flags); |
552 | if (master->unprepare_transfer_hardware && | ||
553 | master->unprepare_transfer_hardware(master)) | ||
554 | dev_err(&master->dev, | ||
555 | "failed to unprepare transfer hardware\n"); | ||
557 | return; | 556 | return; |
558 | } | 557 | } |
559 | 558 | ||
@@ -984,7 +983,7 @@ static void acpi_register_spi_devices(struct spi_master *master) | |||
984 | acpi_status status; | 983 | acpi_status status; |
985 | acpi_handle handle; | 984 | acpi_handle handle; |
986 | 985 | ||
987 | handle = ACPI_HANDLE(&master->dev); | 986 | handle = ACPI_HANDLE(master->dev.parent); |
988 | if (!handle) | 987 | if (!handle) |
989 | return; | 988 | return; |
990 | 989 | ||
diff --git a/drivers/staging/comedi/drivers/s626.c b/drivers/staging/comedi/drivers/s626.c index 81a1fe661579..71a73ec5af8d 100644 --- a/drivers/staging/comedi/drivers/s626.c +++ b/drivers/staging/comedi/drivers/s626.c | |||
@@ -1483,7 +1483,7 @@ static int s626_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s) | |||
1483 | case TRIG_NONE: | 1483 | case TRIG_NONE: |
1484 | /* continous acquisition */ | 1484 | /* continous acquisition */ |
1485 | devpriv->ai_continous = 1; | 1485 | devpriv->ai_continous = 1; |
1486 | devpriv->ai_sample_count = 0; | 1486 | devpriv->ai_sample_count = 1; |
1487 | break; | 1487 | break; |
1488 | } | 1488 | } |
1489 | 1489 | ||
diff --git a/drivers/staging/zcache/Kconfig b/drivers/staging/zcache/Kconfig index 73582705e8c5..5c3714530961 100644 --- a/drivers/staging/zcache/Kconfig +++ b/drivers/staging/zcache/Kconfig | |||
@@ -15,7 +15,7 @@ config RAMSTER | |||
15 | depends on CONFIGFS_FS=y && SYSFS=y && !HIGHMEM && ZCACHE=y | 15 | depends on CONFIGFS_FS=y && SYSFS=y && !HIGHMEM && ZCACHE=y |
16 | depends on NET | 16 | depends on NET |
17 | # must ensure struct page is 8-byte aligned | 17 | # must ensure struct page is 8-byte aligned |
18 | select HAVE_ALIGNED_STRUCT_PAGE if !64_BIT | 18 | select HAVE_ALIGNED_STRUCT_PAGE if !64BIT |
19 | default n | 19 | default n |
20 | help | 20 | help |
21 | RAMster allows RAM on other machines in a cluster to be utilized | 21 | RAMster allows RAM on other machines in a cluster to be utilized |
diff --git a/drivers/thermal/dove_thermal.c b/drivers/thermal/dove_thermal.c index 7b0bfa0e7a9c..3078c403b42d 100644 --- a/drivers/thermal/dove_thermal.c +++ b/drivers/thermal/dove_thermal.c | |||
@@ -143,22 +143,18 @@ static int dove_thermal_probe(struct platform_device *pdev) | |||
143 | if (!priv) | 143 | if (!priv) |
144 | return -ENOMEM; | 144 | return -ENOMEM; |
145 | 145 | ||
146 | priv->sensor = devm_request_and_ioremap(&pdev->dev, res); | 146 | priv->sensor = devm_ioremap_resource(&pdev->dev, res); |
147 | if (!priv->sensor) { | 147 | if (IS_ERR(priv->sensor)) |
148 | dev_err(&pdev->dev, "Failed to request_ioremap memory\n"); | 148 | return PTR_ERR(priv->sensor); |
149 | return -EADDRNOTAVAIL; | ||
150 | } | ||
151 | 149 | ||
152 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | 150 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); |
153 | if (!res) { | 151 | if (!res) { |
154 | dev_err(&pdev->dev, "Failed to get platform resource\n"); | 152 | dev_err(&pdev->dev, "Failed to get platform resource\n"); |
155 | return -ENODEV; | 153 | return -ENODEV; |
156 | } | 154 | } |
157 | priv->control = devm_request_and_ioremap(&pdev->dev, res); | 155 | priv->control = devm_ioremap_resource(&pdev->dev, res); |
158 | if (!priv->control) { | 156 | if (IS_ERR(priv->control)) |
159 | dev_err(&pdev->dev, "Failed to request_ioremap memory\n"); | 157 | return PTR_ERR(priv->control); |
160 | return -EADDRNOTAVAIL; | ||
161 | } | ||
162 | 158 | ||
163 | ret = dove_init_sensor(priv); | 159 | ret = dove_init_sensor(priv); |
164 | if (ret) { | 160 | if (ret) { |
diff --git a/drivers/thermal/exynos_thermal.c b/drivers/thermal/exynos_thermal.c index e04ebd8671ac..46568c078dee 100644 --- a/drivers/thermal/exynos_thermal.c +++ b/drivers/thermal/exynos_thermal.c | |||
@@ -476,7 +476,7 @@ static int exynos_register_thermal(struct thermal_sensor_conf *sensor_conf) | |||
476 | 476 | ||
477 | if (IS_ERR(th_zone->therm_dev)) { | 477 | if (IS_ERR(th_zone->therm_dev)) { |
478 | pr_err("Failed to register thermal zone device\n"); | 478 | pr_err("Failed to register thermal zone device\n"); |
479 | ret = -EINVAL; | 479 | ret = PTR_ERR(th_zone->therm_dev); |
480 | goto err_unregister; | 480 | goto err_unregister; |
481 | } | 481 | } |
482 | th_zone->mode = THERMAL_DEVICE_ENABLED; | 482 | th_zone->mode = THERMAL_DEVICE_ENABLED; |
diff --git a/drivers/thermal/kirkwood_thermal.c b/drivers/thermal/kirkwood_thermal.c index 65cb4f09e8f6..e5500edb5285 100644 --- a/drivers/thermal/kirkwood_thermal.c +++ b/drivers/thermal/kirkwood_thermal.c | |||
@@ -85,11 +85,9 @@ static int kirkwood_thermal_probe(struct platform_device *pdev) | |||
85 | if (!priv) | 85 | if (!priv) |
86 | return -ENOMEM; | 86 | return -ENOMEM; |
87 | 87 | ||
88 | priv->sensor = devm_request_and_ioremap(&pdev->dev, res); | 88 | priv->sensor = devm_ioremap_resource(&pdev->dev, res); |
89 | if (!priv->sensor) { | 89 | if (IS_ERR(priv->sensor)) |
90 | dev_err(&pdev->dev, "Failed to request_ioremap memory\n"); | 90 | return PTR_ERR(priv->sensor); |
91 | return -EADDRNOTAVAIL; | ||
92 | } | ||
93 | 91 | ||
94 | thermal = thermal_zone_device_register("kirkwood_thermal", 0, 0, | 92 | thermal = thermal_zone_device_register("kirkwood_thermal", 0, 0, |
95 | priv, &ops, NULL, 0, 0); | 93 | priv, &ops, NULL, 0, 0); |
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c index 28f091994013..2cc5b6115e3e 100644 --- a/drivers/thermal/rcar_thermal.c +++ b/drivers/thermal/rcar_thermal.c | |||
@@ -145,6 +145,7 @@ static int rcar_thermal_update_temp(struct rcar_thermal_priv *priv) | |||
145 | struct device *dev = rcar_priv_to_dev(priv); | 145 | struct device *dev = rcar_priv_to_dev(priv); |
146 | int i; | 146 | int i; |
147 | int ctemp, old, new; | 147 | int ctemp, old, new; |
148 | int ret = -EINVAL; | ||
148 | 149 | ||
149 | mutex_lock(&priv->lock); | 150 | mutex_lock(&priv->lock); |
150 | 151 | ||
@@ -174,7 +175,7 @@ static int rcar_thermal_update_temp(struct rcar_thermal_priv *priv) | |||
174 | 175 | ||
175 | if (!ctemp) { | 176 | if (!ctemp) { |
176 | dev_err(dev, "thermal sensor was broken\n"); | 177 | dev_err(dev, "thermal sensor was broken\n"); |
177 | return -EINVAL; | 178 | goto err_out_unlock; |
178 | } | 179 | } |
179 | 180 | ||
180 | /* | 181 | /* |
@@ -192,10 +193,10 @@ static int rcar_thermal_update_temp(struct rcar_thermal_priv *priv) | |||
192 | dev_dbg(dev, "thermal%d %d -> %d\n", priv->id, priv->ctemp, ctemp); | 193 | dev_dbg(dev, "thermal%d %d -> %d\n", priv->id, priv->ctemp, ctemp); |
193 | 194 | ||
194 | priv->ctemp = ctemp; | 195 | priv->ctemp = ctemp; |
195 | 196 | ret = 0; | |
197 | err_out_unlock: | ||
196 | mutex_unlock(&priv->lock); | 198 | mutex_unlock(&priv->lock); |
197 | 199 | return ret; | |
198 | return 0; | ||
199 | } | 200 | } |
200 | 201 | ||
201 | static int rcar_thermal_get_temp(struct thermal_zone_device *zone, | 202 | static int rcar_thermal_get_temp(struct thermal_zone_device *zone, |
@@ -363,6 +364,7 @@ static int rcar_thermal_probe(struct platform_device *pdev) | |||
363 | struct resource *res, *irq; | 364 | struct resource *res, *irq; |
364 | int mres = 0; | 365 | int mres = 0; |
365 | int i; | 366 | int i; |
367 | int ret = -ENODEV; | ||
366 | int idle = IDLE_INTERVAL; | 368 | int idle = IDLE_INTERVAL; |
367 | 369 | ||
368 | common = devm_kzalloc(dev, sizeof(*common), GFP_KERNEL); | 370 | common = devm_kzalloc(dev, sizeof(*common), GFP_KERNEL); |
@@ -399,11 +401,9 @@ static int rcar_thermal_probe(struct platform_device *pdev) | |||
399 | /* | 401 | /* |
400 | * rcar_has_irq_support() will be enabled | 402 | * rcar_has_irq_support() will be enabled |
401 | */ | 403 | */ |
402 | common->base = devm_request_and_ioremap(dev, res); | 404 | common->base = devm_ioremap_resource(dev, res); |
403 | if (!common->base) { | 405 | if (IS_ERR(common->base)) |
404 | dev_err(dev, "Unable to ioremap thermal register\n"); | 406 | return PTR_ERR(common->base); |
405 | return -ENOMEM; | ||
406 | } | ||
407 | 407 | ||
408 | /* enable temperature comparation */ | 408 | /* enable temperature comparation */ |
409 | rcar_thermal_common_write(common, ENR, 0x00030303); | 409 | rcar_thermal_common_write(common, ENR, 0x00030303); |
@@ -422,11 +422,9 @@ static int rcar_thermal_probe(struct platform_device *pdev) | |||
422 | return -ENOMEM; | 422 | return -ENOMEM; |
423 | } | 423 | } |
424 | 424 | ||
425 | priv->base = devm_request_and_ioremap(dev, res); | 425 | priv->base = devm_ioremap_resource(dev, res); |
426 | if (!priv->base) { | 426 | if (IS_ERR(priv->base)) |
427 | dev_err(dev, "Unable to ioremap priv register\n"); | 427 | return PTR_ERR(priv->base); |
428 | return -ENOMEM; | ||
429 | } | ||
430 | 428 | ||
431 | priv->common = common; | 429 | priv->common = common; |
432 | priv->id = i; | 430 | priv->id = i; |
@@ -441,6 +439,7 @@ static int rcar_thermal_probe(struct platform_device *pdev) | |||
441 | idle); | 439 | idle); |
442 | if (IS_ERR(priv->zone)) { | 440 | if (IS_ERR(priv->zone)) { |
443 | dev_err(dev, "can't register thermal zone\n"); | 441 | dev_err(dev, "can't register thermal zone\n"); |
442 | ret = PTR_ERR(priv->zone); | ||
444 | goto error_unregister; | 443 | goto error_unregister; |
445 | } | 444 | } |
446 | 445 | ||
@@ -460,7 +459,7 @@ error_unregister: | |||
460 | rcar_thermal_for_each_priv(priv, common) | 459 | rcar_thermal_for_each_priv(priv, common) |
461 | thermal_zone_device_unregister(priv->zone); | 460 | thermal_zone_device_unregister(priv->zone); |
462 | 461 | ||
463 | return -ENODEV; | 462 | return ret; |
464 | } | 463 | } |
465 | 464 | ||
466 | static int rcar_thermal_remove(struct platform_device *pdev) | 465 | static int rcar_thermal_remove(struct platform_device *pdev) |
diff --git a/drivers/tty/mxser.c b/drivers/tty/mxser.c index 484b6a3c9b03..302909ccf183 100644 --- a/drivers/tty/mxser.c +++ b/drivers/tty/mxser.c | |||
@@ -2643,9 +2643,9 @@ static int mxser_probe(struct pci_dev *pdev, | |||
2643 | mxvar_sdriver, brd->idx + i, &pdev->dev); | 2643 | mxvar_sdriver, brd->idx + i, &pdev->dev); |
2644 | if (IS_ERR(tty_dev)) { | 2644 | if (IS_ERR(tty_dev)) { |
2645 | retval = PTR_ERR(tty_dev); | 2645 | retval = PTR_ERR(tty_dev); |
2646 | for (i--; i >= 0; i--) | 2646 | for (; i > 0; i--) |
2647 | tty_unregister_device(mxvar_sdriver, | 2647 | tty_unregister_device(mxvar_sdriver, |
2648 | brd->idx + i); | 2648 | brd->idx + i - 1); |
2649 | goto err_relbrd; | 2649 | goto err_relbrd; |
2650 | } | 2650 | } |
2651 | } | 2651 | } |
@@ -2751,9 +2751,9 @@ static int __init mxser_module_init(void) | |||
2751 | tty_dev = tty_port_register_device(&brd->ports[i].port, | 2751 | tty_dev = tty_port_register_device(&brd->ports[i].port, |
2752 | mxvar_sdriver, brd->idx + i, NULL); | 2752 | mxvar_sdriver, brd->idx + i, NULL); |
2753 | if (IS_ERR(tty_dev)) { | 2753 | if (IS_ERR(tty_dev)) { |
2754 | for (i--; i >= 0; i--) | 2754 | for (; i > 0; i--) |
2755 | tty_unregister_device(mxvar_sdriver, | 2755 | tty_unregister_device(mxvar_sdriver, |
2756 | brd->idx + i); | 2756 | brd->idx + i - 1); |
2757 | for (i = 0; i < brd->info->nports; i++) | 2757 | for (i = 0; i < brd->info->nports; i++) |
2758 | tty_port_destroy(&brd->ports[i].port); | 2758 | tty_port_destroy(&brd->ports[i].port); |
2759 | free_irq(brd->irq, brd); | 2759 | free_irq(brd->irq, brd); |
diff --git a/drivers/tty/serial/8250/8250.c b/drivers/tty/serial/8250/8250_core.c index cf6a5383748a..35f9c96aada9 100644 --- a/drivers/tty/serial/8250/8250.c +++ b/drivers/tty/serial/8250/8250_core.c | |||
@@ -3418,6 +3418,7 @@ MODULE_PARM_DESC(probe_rsa, "Probe I/O ports for RSA"); | |||
3418 | #endif | 3418 | #endif |
3419 | MODULE_ALIAS_CHARDEV_MAJOR(TTY_MAJOR); | 3419 | MODULE_ALIAS_CHARDEV_MAJOR(TTY_MAJOR); |
3420 | 3420 | ||
3421 | #ifdef CONFIG_SERIAL_8250_DEPRECATED_OPTIONS | ||
3421 | #ifndef MODULE | 3422 | #ifndef MODULE |
3422 | /* This module was renamed to 8250_core in 3.7. Keep the old "8250" name | 3423 | /* This module was renamed to 8250_core in 3.7. Keep the old "8250" name |
3423 | * working as well for the module options so we don't break people. We | 3424 | * working as well for the module options so we don't break people. We |
@@ -3432,7 +3433,7 @@ MODULE_ALIAS_CHARDEV_MAJOR(TTY_MAJOR); | |||
3432 | static void __used s8250_options(void) | 3433 | static void __used s8250_options(void) |
3433 | { | 3434 | { |
3434 | #undef MODULE_PARAM_PREFIX | 3435 | #undef MODULE_PARAM_PREFIX |
3435 | #define MODULE_PARAM_PREFIX "8250." | 3436 | #define MODULE_PARAM_PREFIX "8250_core." |
3436 | 3437 | ||
3437 | module_param_cb(share_irqs, ¶m_ops_uint, &share_irqs, 0644); | 3438 | module_param_cb(share_irqs, ¶m_ops_uint, &share_irqs, 0644); |
3438 | module_param_cb(nr_uarts, ¶m_ops_uint, &nr_uarts, 0644); | 3439 | module_param_cb(nr_uarts, ¶m_ops_uint, &nr_uarts, 0644); |
@@ -3444,5 +3445,6 @@ static void __used s8250_options(void) | |||
3444 | #endif | 3445 | #endif |
3445 | } | 3446 | } |
3446 | #else | 3447 | #else |
3447 | MODULE_ALIAS("8250"); | 3448 | MODULE_ALIAS("8250_core"); |
3449 | #endif | ||
3448 | #endif | 3450 | #endif |
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index aa76825229dc..26e3a97ab157 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c | |||
@@ -1554,6 +1554,7 @@ pci_wch_ch353_setup(struct serial_private *priv, | |||
1554 | #define PCI_DEVICE_ID_PLX_CRONYX_OMEGA 0xc001 | 1554 | #define PCI_DEVICE_ID_PLX_CRONYX_OMEGA 0xc001 |
1555 | #define PCI_DEVICE_ID_INTEL_PATSBURG_KT 0x1d3d | 1555 | #define PCI_DEVICE_ID_INTEL_PATSBURG_KT 0x1d3d |
1556 | #define PCI_VENDOR_ID_WCH 0x4348 | 1556 | #define PCI_VENDOR_ID_WCH 0x4348 |
1557 | #define PCI_DEVICE_ID_WCH_CH352_2S 0x3253 | ||
1557 | #define PCI_DEVICE_ID_WCH_CH353_4S 0x3453 | 1558 | #define PCI_DEVICE_ID_WCH_CH353_4S 0x3453 |
1558 | #define PCI_DEVICE_ID_WCH_CH353_2S1PF 0x5046 | 1559 | #define PCI_DEVICE_ID_WCH_CH353_2S1PF 0x5046 |
1559 | #define PCI_DEVICE_ID_WCH_CH353_2S1P 0x7053 | 1560 | #define PCI_DEVICE_ID_WCH_CH353_2S1P 0x7053 |
@@ -2172,6 +2173,14 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = { | |||
2172 | .subdevice = PCI_ANY_ID, | 2173 | .subdevice = PCI_ANY_ID, |
2173 | .setup = pci_wch_ch353_setup, | 2174 | .setup = pci_wch_ch353_setup, |
2174 | }, | 2175 | }, |
2176 | /* WCH CH352 2S card (16550 clone) */ | ||
2177 | { | ||
2178 | .vendor = PCI_VENDOR_ID_WCH, | ||
2179 | .device = PCI_DEVICE_ID_WCH_CH352_2S, | ||
2180 | .subvendor = PCI_ANY_ID, | ||
2181 | .subdevice = PCI_ANY_ID, | ||
2182 | .setup = pci_wch_ch353_setup, | ||
2183 | }, | ||
2175 | /* | 2184 | /* |
2176 | * ASIX devices with FIFO bug | 2185 | * ASIX devices with FIFO bug |
2177 | */ | 2186 | */ |
@@ -4870,6 +4879,10 @@ static struct pci_device_id serial_pci_tbl[] = { | |||
4870 | PCI_ANY_ID, PCI_ANY_ID, | 4879 | PCI_ANY_ID, PCI_ANY_ID, |
4871 | 0, 0, pbn_b0_bt_2_115200 }, | 4880 | 0, 0, pbn_b0_bt_2_115200 }, |
4872 | 4881 | ||
4882 | { PCI_VENDOR_ID_WCH, PCI_DEVICE_ID_WCH_CH352_2S, | ||
4883 | PCI_ANY_ID, PCI_ANY_ID, | ||
4884 | 0, 0, pbn_b0_bt_2_115200 }, | ||
4885 | |||
4873 | /* | 4886 | /* |
4874 | * Commtech, Inc. Fastcom adapters | 4887 | * Commtech, Inc. Fastcom adapters |
4875 | */ | 4888 | */ |
diff --git a/drivers/tty/serial/8250/8250_pnp.c b/drivers/tty/serial/8250/8250_pnp.c index b3455a970a1d..35d9ab95c5cb 100644 --- a/drivers/tty/serial/8250/8250_pnp.c +++ b/drivers/tty/serial/8250/8250_pnp.c | |||
@@ -429,7 +429,6 @@ serial_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id) | |||
429 | { | 429 | { |
430 | struct uart_8250_port uart; | 430 | struct uart_8250_port uart; |
431 | int ret, line, flags = dev_id->driver_data; | 431 | int ret, line, flags = dev_id->driver_data; |
432 | struct resource *res = NULL; | ||
433 | 432 | ||
434 | if (flags & UNKNOWN_DEV) { | 433 | if (flags & UNKNOWN_DEV) { |
435 | ret = serial_pnp_guess_board(dev); | 434 | ret = serial_pnp_guess_board(dev); |
@@ -440,12 +439,11 @@ serial_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id) | |||
440 | memset(&uart, 0, sizeof(uart)); | 439 | memset(&uart, 0, sizeof(uart)); |
441 | if (pnp_irq_valid(dev, 0)) | 440 | if (pnp_irq_valid(dev, 0)) |
442 | uart.port.irq = pnp_irq(dev, 0); | 441 | uart.port.irq = pnp_irq(dev, 0); |
443 | if ((flags & CIR_PORT) && pnp_port_valid(dev, 2)) | 442 | if ((flags & CIR_PORT) && pnp_port_valid(dev, 2)) { |
444 | res = pnp_get_resource(dev, IORESOURCE_IO, 2); | 443 | uart.port.iobase = pnp_port_start(dev, 2); |
445 | else if (pnp_port_valid(dev, 0)) | 444 | uart.port.iotype = UPIO_PORT; |
446 | res = pnp_get_resource(dev, IORESOURCE_IO, 0); | 445 | } else if (pnp_port_valid(dev, 0)) { |
447 | if (pnp_resource_enabled(res)) { | 446 | uart.port.iobase = pnp_port_start(dev, 0); |
448 | uart.port.iobase = res->start; | ||
449 | uart.port.iotype = UPIO_PORT; | 447 | uart.port.iotype = UPIO_PORT; |
450 | } else if (pnp_mem_valid(dev, 0)) { | 448 | } else if (pnp_mem_valid(dev, 0)) { |
451 | uart.port.mapbase = pnp_mem_start(dev, 0); | 449 | uart.port.mapbase = pnp_mem_start(dev, 0); |
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig index 2ef9537bcb2c..80fe91e64a52 100644 --- a/drivers/tty/serial/8250/Kconfig +++ b/drivers/tty/serial/8250/Kconfig | |||
@@ -33,6 +33,23 @@ config SERIAL_8250 | |||
33 | Most people will say Y or M here, so that they can use serial mice, | 33 | Most people will say Y or M here, so that they can use serial mice, |
34 | modems and similar devices connecting to the standard serial ports. | 34 | modems and similar devices connecting to the standard serial ports. |
35 | 35 | ||
36 | config SERIAL_8250_DEPRECATED_OPTIONS | ||
37 | bool "Support 8250_core.* kernel options (DEPRECATED)" | ||
38 | depends on SERIAL_8250 | ||
39 | default y | ||
40 | ---help--- | ||
41 | In 3.7 we renamed 8250 to 8250_core by mistake, so now we have to | ||
42 | accept kernel parameters in both forms like 8250_core.nr_uarts=4 and | ||
43 | 8250.nr_uarts=4. We now renamed the module back to 8250, but if | ||
44 | anybody noticed in 3.7 and changed their userspace we still have to | ||
45 | keep the 8350_core.* options around until they revert the changes | ||
46 | they already did. | ||
47 | |||
48 | If 8250 is built as a module, this adds 8250_core alias instead. | ||
49 | |||
50 | If you did not notice yet and/or you have userspace from pre-3.7, it | ||
51 | is safe (and recommended) to say N here. | ||
52 | |||
36 | config SERIAL_8250_PNP | 53 | config SERIAL_8250_PNP |
37 | bool "8250/16550 PNP device support" if EXPERT | 54 | bool "8250/16550 PNP device support" if EXPERT |
38 | depends on SERIAL_8250 && PNP | 55 | depends on SERIAL_8250 && PNP |
diff --git a/drivers/tty/serial/8250/Makefile b/drivers/tty/serial/8250/Makefile index a23838a4d535..36d68d054307 100644 --- a/drivers/tty/serial/8250/Makefile +++ b/drivers/tty/serial/8250/Makefile | |||
@@ -2,10 +2,10 @@ | |||
2 | # Makefile for the 8250 serial device drivers. | 2 | # Makefile for the 8250 serial device drivers. |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-$(CONFIG_SERIAL_8250) += 8250_core.o | 5 | obj-$(CONFIG_SERIAL_8250) += 8250.o |
6 | 8250_core-y := 8250.o | 6 | 8250-y := 8250_core.o |
7 | 8250_core-$(CONFIG_SERIAL_8250_PNP) += 8250_pnp.o | 7 | 8250-$(CONFIG_SERIAL_8250_PNP) += 8250_pnp.o |
8 | 8250_core-$(CONFIG_SERIAL_8250_DMA) += 8250_dma.o | 8 | 8250-$(CONFIG_SERIAL_8250_DMA) += 8250_dma.o |
9 | obj-$(CONFIG_SERIAL_8250_GSC) += 8250_gsc.o | 9 | obj-$(CONFIG_SERIAL_8250_GSC) += 8250_gsc.o |
10 | obj-$(CONFIG_SERIAL_8250_PCI) += 8250_pci.o | 10 | obj-$(CONFIG_SERIAL_8250_PCI) += 8250_pci.o |
11 | obj-$(CONFIG_SERIAL_8250_HP300) += 8250_hp300.o | 11 | obj-$(CONFIG_SERIAL_8250_HP300) += 8250_hp300.o |
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c index d4a7c241b751..3467462869ce 100644 --- a/drivers/tty/serial/atmel_serial.c +++ b/drivers/tty/serial/atmel_serial.c | |||
@@ -158,7 +158,7 @@ struct atmel_uart_port { | |||
158 | }; | 158 | }; |
159 | 159 | ||
160 | static struct atmel_uart_port atmel_ports[ATMEL_MAX_UART]; | 160 | static struct atmel_uart_port atmel_ports[ATMEL_MAX_UART]; |
161 | static unsigned long atmel_ports_in_use; | 161 | static DECLARE_BITMAP(atmel_ports_in_use, ATMEL_MAX_UART); |
162 | 162 | ||
163 | #ifdef SUPPORT_SYSRQ | 163 | #ifdef SUPPORT_SYSRQ |
164 | static struct console atmel_console; | 164 | static struct console atmel_console; |
@@ -1769,15 +1769,14 @@ static int atmel_serial_probe(struct platform_device *pdev) | |||
1769 | if (ret < 0) | 1769 | if (ret < 0) |
1770 | /* port id not found in platform data nor device-tree aliases: | 1770 | /* port id not found in platform data nor device-tree aliases: |
1771 | * auto-enumerate it */ | 1771 | * auto-enumerate it */ |
1772 | ret = find_first_zero_bit(&atmel_ports_in_use, | 1772 | ret = find_first_zero_bit(atmel_ports_in_use, ATMEL_MAX_UART); |
1773 | sizeof(atmel_ports_in_use)); | ||
1774 | 1773 | ||
1775 | if (ret > ATMEL_MAX_UART) { | 1774 | if (ret >= ATMEL_MAX_UART) { |
1776 | ret = -ENODEV; | 1775 | ret = -ENODEV; |
1777 | goto err; | 1776 | goto err; |
1778 | } | 1777 | } |
1779 | 1778 | ||
1780 | if (test_and_set_bit(ret, &atmel_ports_in_use)) { | 1779 | if (test_and_set_bit(ret, atmel_ports_in_use)) { |
1781 | /* port already in use */ | 1780 | /* port already in use */ |
1782 | ret = -EBUSY; | 1781 | ret = -EBUSY; |
1783 | goto err; | 1782 | goto err; |
@@ -1857,7 +1856,7 @@ static int atmel_serial_remove(struct platform_device *pdev) | |||
1857 | 1856 | ||
1858 | /* "port" is allocated statically, so we shouldn't free it */ | 1857 | /* "port" is allocated statically, so we shouldn't free it */ |
1859 | 1858 | ||
1860 | clear_bit(port->line, &atmel_ports_in_use); | 1859 | clear_bit(port->line, atmel_ports_in_use); |
1861 | 1860 | ||
1862 | clk_put(atmel_port->clk); | 1861 | clk_put(atmel_port->clk); |
1863 | 1862 | ||
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c index 4dc41408ecb7..30d4f7a783cd 100644 --- a/drivers/tty/serial/omap-serial.c +++ b/drivers/tty/serial/omap-serial.c | |||
@@ -886,6 +886,17 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios, | |||
886 | serial_out(up, UART_MCR, up->mcr | UART_MCR_TCRTLR); | 886 | serial_out(up, UART_MCR, up->mcr | UART_MCR_TCRTLR); |
887 | /* FIFO ENABLE, DMA MODE */ | 887 | /* FIFO ENABLE, DMA MODE */ |
888 | 888 | ||
889 | up->scr |= OMAP_UART_SCR_RX_TRIG_GRANU1_MASK; | ||
890 | /* | ||
891 | * NOTE: Setting OMAP_UART_SCR_RX_TRIG_GRANU1_MASK | ||
892 | * sets Enables the granularity of 1 for TRIGGER RX | ||
893 | * level. Along with setting RX FIFO trigger level | ||
894 | * to 1 (as noted below, 16 characters) and TLR[3:0] | ||
895 | * to zero this will result RX FIFO threshold level | ||
896 | * to 1 character, instead of 16 as noted in comment | ||
897 | * below. | ||
898 | */ | ||
899 | |||
889 | /* Set receive FIFO threshold to 16 characters and | 900 | /* Set receive FIFO threshold to 16 characters and |
890 | * transmit FIFO threshold to 16 spaces | 901 | * transmit FIFO threshold to 16 spaces |
891 | */ | 902 | */ |
diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c index e343d6670854..451687cb9685 100644 --- a/drivers/tty/serial/sunsu.c +++ b/drivers/tty/serial/sunsu.c | |||
@@ -968,6 +968,7 @@ static struct uart_ops sunsu_pops = { | |||
968 | #define UART_NR 4 | 968 | #define UART_NR 4 |
969 | 969 | ||
970 | static struct uart_sunsu_port sunsu_ports[UART_NR]; | 970 | static struct uart_sunsu_port sunsu_ports[UART_NR]; |
971 | static int nr_inst; /* Number of already registered ports */ | ||
971 | 972 | ||
972 | #ifdef CONFIG_SERIO | 973 | #ifdef CONFIG_SERIO |
973 | 974 | ||
@@ -1337,13 +1338,8 @@ static int __init sunsu_console_setup(struct console *co, char *options) | |||
1337 | printk("Console: ttyS%d (SU)\n", | 1338 | printk("Console: ttyS%d (SU)\n", |
1338 | (sunsu_reg.minor - 64) + co->index); | 1339 | (sunsu_reg.minor - 64) + co->index); |
1339 | 1340 | ||
1340 | /* | 1341 | if (co->index > nr_inst) |
1341 | * Check whether an invalid uart number has been specified, and | 1342 | return -ENODEV; |
1342 | * if so, search for the first available port that does have | ||
1343 | * console support. | ||
1344 | */ | ||
1345 | if (co->index >= UART_NR) | ||
1346 | co->index = 0; | ||
1347 | port = &sunsu_ports[co->index].port; | 1343 | port = &sunsu_ports[co->index].port; |
1348 | 1344 | ||
1349 | /* | 1345 | /* |
@@ -1408,7 +1404,6 @@ static enum su_type su_get_type(struct device_node *dp) | |||
1408 | 1404 | ||
1409 | static int su_probe(struct platform_device *op) | 1405 | static int su_probe(struct platform_device *op) |
1410 | { | 1406 | { |
1411 | static int inst; | ||
1412 | struct device_node *dp = op->dev.of_node; | 1407 | struct device_node *dp = op->dev.of_node; |
1413 | struct uart_sunsu_port *up; | 1408 | struct uart_sunsu_port *up; |
1414 | struct resource *rp; | 1409 | struct resource *rp; |
@@ -1418,16 +1413,16 @@ static int su_probe(struct platform_device *op) | |||
1418 | 1413 | ||
1419 | type = su_get_type(dp); | 1414 | type = su_get_type(dp); |
1420 | if (type == SU_PORT_PORT) { | 1415 | if (type == SU_PORT_PORT) { |
1421 | if (inst >= UART_NR) | 1416 | if (nr_inst >= UART_NR) |
1422 | return -EINVAL; | 1417 | return -EINVAL; |
1423 | up = &sunsu_ports[inst]; | 1418 | up = &sunsu_ports[nr_inst]; |
1424 | } else { | 1419 | } else { |
1425 | up = kzalloc(sizeof(*up), GFP_KERNEL); | 1420 | up = kzalloc(sizeof(*up), GFP_KERNEL); |
1426 | if (!up) | 1421 | if (!up) |
1427 | return -ENOMEM; | 1422 | return -ENOMEM; |
1428 | } | 1423 | } |
1429 | 1424 | ||
1430 | up->port.line = inst; | 1425 | up->port.line = nr_inst; |
1431 | 1426 | ||
1432 | spin_lock_init(&up->port.lock); | 1427 | spin_lock_init(&up->port.lock); |
1433 | 1428 | ||
@@ -1461,6 +1456,8 @@ static int su_probe(struct platform_device *op) | |||
1461 | } | 1456 | } |
1462 | dev_set_drvdata(&op->dev, up); | 1457 | dev_set_drvdata(&op->dev, up); |
1463 | 1458 | ||
1459 | nr_inst++; | ||
1460 | |||
1464 | return 0; | 1461 | return 0; |
1465 | } | 1462 | } |
1466 | 1463 | ||
@@ -1488,7 +1485,7 @@ static int su_probe(struct platform_device *op) | |||
1488 | 1485 | ||
1489 | dev_set_drvdata(&op->dev, up); | 1486 | dev_set_drvdata(&op->dev, up); |
1490 | 1487 | ||
1491 | inst++; | 1488 | nr_inst++; |
1492 | 1489 | ||
1493 | return 0; | 1490 | return 0; |
1494 | 1491 | ||
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c index ba451c7209fc..f36bbba1ac8b 100644 --- a/drivers/tty/serial/xilinx_uartps.c +++ b/drivers/tty/serial/xilinx_uartps.c | |||
@@ -578,6 +578,8 @@ static int xuartps_startup(struct uart_port *port) | |||
578 | /* Receive Timeout register is enabled with value of 10 */ | 578 | /* Receive Timeout register is enabled with value of 10 */ |
579 | xuartps_writel(10, XUARTPS_RXTOUT_OFFSET); | 579 | xuartps_writel(10, XUARTPS_RXTOUT_OFFSET); |
580 | 580 | ||
581 | /* Clear out any pending interrupts before enabling them */ | ||
582 | xuartps_writel(xuartps_readl(XUARTPS_ISR_OFFSET), XUARTPS_ISR_OFFSET); | ||
581 | 583 | ||
582 | /* Set the Interrupt Registers with desired interrupts */ | 584 | /* Set the Interrupt Registers with desired interrupts */ |
583 | xuartps_writel(XUARTPS_IXR_TXEMPTY | XUARTPS_IXR_PARITY | | 585 | xuartps_writel(XUARTPS_IXR_TXEMPTY | XUARTPS_IXR_PARITY | |
diff --git a/drivers/tty/vt/vc_screen.c b/drivers/tty/vt/vc_screen.c index e4ca345873c3..d7799deacb21 100644 --- a/drivers/tty/vt/vc_screen.c +++ b/drivers/tty/vt/vc_screen.c | |||
@@ -93,7 +93,7 @@ vcs_poll_data_free(struct vcs_poll_data *poll) | |||
93 | static struct vcs_poll_data * | 93 | static struct vcs_poll_data * |
94 | vcs_poll_data_get(struct file *file) | 94 | vcs_poll_data_get(struct file *file) |
95 | { | 95 | { |
96 | struct vcs_poll_data *poll = file->private_data; | 96 | struct vcs_poll_data *poll = file->private_data, *kill = NULL; |
97 | 97 | ||
98 | if (poll) | 98 | if (poll) |
99 | return poll; | 99 | return poll; |
@@ -122,10 +122,12 @@ vcs_poll_data_get(struct file *file) | |||
122 | file->private_data = poll; | 122 | file->private_data = poll; |
123 | } else { | 123 | } else { |
124 | /* someone else raced ahead of us */ | 124 | /* someone else raced ahead of us */ |
125 | vcs_poll_data_free(poll); | 125 | kill = poll; |
126 | poll = file->private_data; | 126 | poll = file->private_data; |
127 | } | 127 | } |
128 | spin_unlock(&file->f_lock); | 128 | spin_unlock(&file->f_lock); |
129 | if (kill) | ||
130 | vcs_poll_data_free(kill); | ||
129 | 131 | ||
130 | return poll; | 132 | return poll; |
131 | } | 133 | } |
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 8ac25adf31b4..387dc6c8ad25 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c | |||
@@ -593,7 +593,6 @@ static void acm_port_destruct(struct tty_port *port) | |||
593 | 593 | ||
594 | dev_dbg(&acm->control->dev, "%s\n", __func__); | 594 | dev_dbg(&acm->control->dev, "%s\n", __func__); |
595 | 595 | ||
596 | tty_unregister_device(acm_tty_driver, acm->minor); | ||
597 | acm_release_minor(acm); | 596 | acm_release_minor(acm); |
598 | usb_put_intf(acm->control); | 597 | usb_put_intf(acm->control); |
599 | kfree(acm->country_codes); | 598 | kfree(acm->country_codes); |
@@ -977,6 +976,8 @@ static int acm_probe(struct usb_interface *intf, | |||
977 | int num_rx_buf; | 976 | int num_rx_buf; |
978 | int i; | 977 | int i; |
979 | int combined_interfaces = 0; | 978 | int combined_interfaces = 0; |
979 | struct device *tty_dev; | ||
980 | int rv = -ENOMEM; | ||
980 | 981 | ||
981 | /* normal quirks */ | 982 | /* normal quirks */ |
982 | quirks = (unsigned long)id->driver_info; | 983 | quirks = (unsigned long)id->driver_info; |
@@ -1339,11 +1340,24 @@ skip_countries: | |||
1339 | usb_set_intfdata(data_interface, acm); | 1340 | usb_set_intfdata(data_interface, acm); |
1340 | 1341 | ||
1341 | usb_get_intf(control_interface); | 1342 | usb_get_intf(control_interface); |
1342 | tty_port_register_device(&acm->port, acm_tty_driver, minor, | 1343 | tty_dev = tty_port_register_device(&acm->port, acm_tty_driver, minor, |
1343 | &control_interface->dev); | 1344 | &control_interface->dev); |
1345 | if (IS_ERR(tty_dev)) { | ||
1346 | rv = PTR_ERR(tty_dev); | ||
1347 | goto alloc_fail8; | ||
1348 | } | ||
1344 | 1349 | ||
1345 | return 0; | 1350 | return 0; |
1351 | alloc_fail8: | ||
1352 | if (acm->country_codes) { | ||
1353 | device_remove_file(&acm->control->dev, | ||
1354 | &dev_attr_wCountryCodes); | ||
1355 | device_remove_file(&acm->control->dev, | ||
1356 | &dev_attr_iCountryCodeRelDate); | ||
1357 | } | ||
1358 | device_remove_file(&acm->control->dev, &dev_attr_bmCapabilities); | ||
1346 | alloc_fail7: | 1359 | alloc_fail7: |
1360 | usb_set_intfdata(intf, NULL); | ||
1347 | for (i = 0; i < ACM_NW; i++) | 1361 | for (i = 0; i < ACM_NW; i++) |
1348 | usb_free_urb(acm->wb[i].urb); | 1362 | usb_free_urb(acm->wb[i].urb); |
1349 | alloc_fail6: | 1363 | alloc_fail6: |
@@ -1359,7 +1373,7 @@ alloc_fail2: | |||
1359 | acm_release_minor(acm); | 1373 | acm_release_minor(acm); |
1360 | kfree(acm); | 1374 | kfree(acm); |
1361 | alloc_fail: | 1375 | alloc_fail: |
1362 | return -ENOMEM; | 1376 | return rv; |
1363 | } | 1377 | } |
1364 | 1378 | ||
1365 | static void stop_data_traffic(struct acm *acm) | 1379 | static void stop_data_traffic(struct acm *acm) |
@@ -1411,6 +1425,8 @@ static void acm_disconnect(struct usb_interface *intf) | |||
1411 | 1425 | ||
1412 | stop_data_traffic(acm); | 1426 | stop_data_traffic(acm); |
1413 | 1427 | ||
1428 | tty_unregister_device(acm_tty_driver, acm->minor); | ||
1429 | |||
1414 | usb_free_urb(acm->ctrlurb); | 1430 | usb_free_urb(acm->ctrlurb); |
1415 | for (i = 0; i < ACM_NW; i++) | 1431 | for (i = 0; i < ACM_NW; i++) |
1416 | usb_free_urb(acm->wb[i].urb); | 1432 | usb_free_urb(acm->wb[i].urb); |
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c index 622b4a48e732..2b487d4797bd 100644 --- a/drivers/usb/core/hcd-pci.c +++ b/drivers/usb/core/hcd-pci.c | |||
@@ -173,6 +173,7 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) | |||
173 | struct hc_driver *driver; | 173 | struct hc_driver *driver; |
174 | struct usb_hcd *hcd; | 174 | struct usb_hcd *hcd; |
175 | int retval; | 175 | int retval; |
176 | int hcd_irq = 0; | ||
176 | 177 | ||
177 | if (usb_disabled()) | 178 | if (usb_disabled()) |
178 | return -ENODEV; | 179 | return -ENODEV; |
@@ -187,15 +188,19 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) | |||
187 | return -ENODEV; | 188 | return -ENODEV; |
188 | dev->current_state = PCI_D0; | 189 | dev->current_state = PCI_D0; |
189 | 190 | ||
190 | /* The xHCI driver supports MSI and MSI-X, | 191 | /* |
191 | * so don't fail if the BIOS doesn't provide a legacy IRQ. | 192 | * The xHCI driver has its own irq management |
193 | * make sure irq setup is not touched for xhci in generic hcd code | ||
192 | */ | 194 | */ |
193 | if (!dev->irq && (driver->flags & HCD_MASK) != HCD_USB3) { | 195 | if ((driver->flags & HCD_MASK) != HCD_USB3) { |
194 | dev_err(&dev->dev, | 196 | if (!dev->irq) { |
195 | "Found HC with no IRQ. Check BIOS/PCI %s setup!\n", | 197 | dev_err(&dev->dev, |
196 | pci_name(dev)); | 198 | "Found HC with no IRQ. Check BIOS/PCI %s setup!\n", |
197 | retval = -ENODEV; | 199 | pci_name(dev)); |
198 | goto disable_pci; | 200 | retval = -ENODEV; |
201 | goto disable_pci; | ||
202 | } | ||
203 | hcd_irq = dev->irq; | ||
199 | } | 204 | } |
200 | 205 | ||
201 | hcd = usb_create_hcd(driver, &dev->dev, pci_name(dev)); | 206 | hcd = usb_create_hcd(driver, &dev->dev, pci_name(dev)); |
@@ -245,7 +250,7 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) | |||
245 | 250 | ||
246 | pci_set_master(dev); | 251 | pci_set_master(dev); |
247 | 252 | ||
248 | retval = usb_add_hcd(hcd, dev->irq, IRQF_SHARED); | 253 | retval = usb_add_hcd(hcd, hcd_irq, IRQF_SHARED); |
249 | if (retval != 0) | 254 | if (retval != 0) |
250 | goto unmap_registers; | 255 | goto unmap_registers; |
251 | set_hs_companion(dev, hcd); | 256 | set_hs_companion(dev, hcd); |
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 99b34a30354f..f9ec44cbb82f 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c | |||
@@ -2412,6 +2412,14 @@ int usb_hcd_is_primary_hcd(struct usb_hcd *hcd) | |||
2412 | } | 2412 | } |
2413 | EXPORT_SYMBOL_GPL(usb_hcd_is_primary_hcd); | 2413 | EXPORT_SYMBOL_GPL(usb_hcd_is_primary_hcd); |
2414 | 2414 | ||
2415 | int usb_hcd_find_raw_port_number(struct usb_hcd *hcd, int port1) | ||
2416 | { | ||
2417 | if (!hcd->driver->find_raw_port_number) | ||
2418 | return port1; | ||
2419 | |||
2420 | return hcd->driver->find_raw_port_number(hcd, port1); | ||
2421 | } | ||
2422 | |||
2415 | static int usb_hcd_request_irqs(struct usb_hcd *hcd, | 2423 | static int usb_hcd_request_irqs(struct usb_hcd *hcd, |
2416 | unsigned int irqnum, unsigned long irqflags) | 2424 | unsigned int irqnum, unsigned long irqflags) |
2417 | { | 2425 | { |
diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c index 797f9d514732..65d4e55552c6 100644 --- a/drivers/usb/core/port.c +++ b/drivers/usb/core/port.c | |||
@@ -67,7 +67,6 @@ static void usb_port_device_release(struct device *dev) | |||
67 | { | 67 | { |
68 | struct usb_port *port_dev = to_usb_port(dev); | 68 | struct usb_port *port_dev = to_usb_port(dev); |
69 | 69 | ||
70 | dev_pm_qos_hide_flags(dev); | ||
71 | kfree(port_dev); | 70 | kfree(port_dev); |
72 | } | 71 | } |
73 | 72 | ||
diff --git a/drivers/usb/core/usb-acpi.c b/drivers/usb/core/usb-acpi.c index b6f4bad3f756..255c14464bf2 100644 --- a/drivers/usb/core/usb-acpi.c +++ b/drivers/usb/core/usb-acpi.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
16 | #include <linux/acpi.h> | 16 | #include <linux/acpi.h> |
17 | #include <linux/pci.h> | 17 | #include <linux/pci.h> |
18 | #include <linux/usb/hcd.h> | ||
18 | #include <acpi/acpi_bus.h> | 19 | #include <acpi/acpi_bus.h> |
19 | 20 | ||
20 | #include "usb.h" | 21 | #include "usb.h" |
@@ -188,8 +189,13 @@ static int usb_acpi_find_device(struct device *dev, acpi_handle *handle) | |||
188 | * connected to. | 189 | * connected to. |
189 | */ | 190 | */ |
190 | if (!udev->parent) { | 191 | if (!udev->parent) { |
191 | *handle = acpi_get_child(DEVICE_ACPI_HANDLE(&udev->dev), | 192 | struct usb_hcd *hcd = bus_to_hcd(udev->bus); |
193 | int raw_port_num; | ||
194 | |||
195 | raw_port_num = usb_hcd_find_raw_port_number(hcd, | ||
192 | port_num); | 196 | port_num); |
197 | *handle = acpi_get_child(DEVICE_ACPI_HANDLE(&udev->dev), | ||
198 | raw_port_num); | ||
193 | if (!*handle) | 199 | if (!*handle) |
194 | return -ENODEV; | 200 | return -ENODEV; |
195 | } else { | 201 | } else { |
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig index 5a0c541daf89..c7525b1cad74 100644 --- a/drivers/usb/gadget/Kconfig +++ b/drivers/usb/gadget/Kconfig | |||
@@ -145,6 +145,7 @@ config USB_LPC32XX | |||
145 | tristate "LPC32XX USB Peripheral Controller" | 145 | tristate "LPC32XX USB Peripheral Controller" |
146 | depends on ARCH_LPC32XX | 146 | depends on ARCH_LPC32XX |
147 | select USB_ISP1301 | 147 | select USB_ISP1301 |
148 | select USB_OTG_UTILS | ||
148 | help | 149 | help |
149 | This option selects the USB device controller in the LPC32xx SoC. | 150 | This option selects the USB device controller in the LPC32xx SoC. |
150 | 151 | ||
diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c index 71beeb833558..cc9c49c57c80 100644 --- a/drivers/usb/gadget/f_rndis.c +++ b/drivers/usb/gadget/f_rndis.c | |||
@@ -447,14 +447,13 @@ static void rndis_response_complete(struct usb_ep *ep, struct usb_request *req) | |||
447 | static void rndis_command_complete(struct usb_ep *ep, struct usb_request *req) | 447 | static void rndis_command_complete(struct usb_ep *ep, struct usb_request *req) |
448 | { | 448 | { |
449 | struct f_rndis *rndis = req->context; | 449 | struct f_rndis *rndis = req->context; |
450 | struct usb_composite_dev *cdev = rndis->port.func.config->cdev; | ||
451 | int status; | 450 | int status; |
452 | 451 | ||
453 | /* received RNDIS command from USB_CDC_SEND_ENCAPSULATED_COMMAND */ | 452 | /* received RNDIS command from USB_CDC_SEND_ENCAPSULATED_COMMAND */ |
454 | // spin_lock(&dev->lock); | 453 | // spin_lock(&dev->lock); |
455 | status = rndis_msg_parser(rndis->config, (u8 *) req->buf); | 454 | status = rndis_msg_parser(rndis->config, (u8 *) req->buf); |
456 | if (status < 0) | 455 | if (status < 0) |
457 | ERROR(cdev, "RNDIS command error %d, %d/%d\n", | 456 | pr_err("RNDIS command error %d, %d/%d\n", |
458 | status, req->actual, req->length); | 457 | status, req->actual, req->length); |
459 | // spin_unlock(&dev->lock); | 458 | // spin_unlock(&dev->lock); |
460 | } | 459 | } |
diff --git a/drivers/usb/gadget/g_ffs.c b/drivers/usb/gadget/g_ffs.c index 3953dd4d7186..3b343b23e4b0 100644 --- a/drivers/usb/gadget/g_ffs.c +++ b/drivers/usb/gadget/g_ffs.c | |||
@@ -357,7 +357,7 @@ static int gfs_bind(struct usb_composite_dev *cdev) | |||
357 | goto error; | 357 | goto error; |
358 | gfs_dev_desc.iProduct = gfs_strings[USB_GADGET_PRODUCT_IDX].id; | 358 | gfs_dev_desc.iProduct = gfs_strings[USB_GADGET_PRODUCT_IDX].id; |
359 | 359 | ||
360 | for (i = func_num; --i; ) { | 360 | for (i = func_num; i--; ) { |
361 | ret = functionfs_bind(ffs_tab[i].ffs_data, cdev); | 361 | ret = functionfs_bind(ffs_tab[i].ffs_data, cdev); |
362 | if (unlikely(ret < 0)) { | 362 | if (unlikely(ret < 0)) { |
363 | while (++i < func_num) | 363 | while (++i < func_num) |
@@ -413,7 +413,7 @@ static int gfs_unbind(struct usb_composite_dev *cdev) | |||
413 | gether_cleanup(); | 413 | gether_cleanup(); |
414 | gfs_ether_setup = false; | 414 | gfs_ether_setup = false; |
415 | 415 | ||
416 | for (i = func_num; --i; ) | 416 | for (i = func_num; i--; ) |
417 | if (ffs_tab[i].ffs_data) | 417 | if (ffs_tab[i].ffs_data) |
418 | functionfs_unbind(ffs_tab[i].ffs_data); | 418 | functionfs_unbind(ffs_tab[i].ffs_data); |
419 | 419 | ||
diff --git a/drivers/usb/gadget/net2272.c b/drivers/usb/gadget/net2272.c index d226058e3b88..32524b631959 100644 --- a/drivers/usb/gadget/net2272.c +++ b/drivers/usb/gadget/net2272.c | |||
@@ -59,7 +59,7 @@ static const char * const ep_name[] = { | |||
59 | }; | 59 | }; |
60 | 60 | ||
61 | #define DMA_ADDR_INVALID (~(dma_addr_t)0) | 61 | #define DMA_ADDR_INVALID (~(dma_addr_t)0) |
62 | #ifdef CONFIG_USB_GADGET_NET2272_DMA | 62 | #ifdef CONFIG_USB_NET2272_DMA |
63 | /* | 63 | /* |
64 | * use_dma: the NET2272 can use an external DMA controller. | 64 | * use_dma: the NET2272 can use an external DMA controller. |
65 | * Note that since there is no generic DMA api, some functions, | 65 | * Note that since there is no generic DMA api, some functions, |
@@ -1495,6 +1495,13 @@ stop_activity(struct net2272 *dev, struct usb_gadget_driver *driver) | |||
1495 | for (i = 0; i < 4; ++i) | 1495 | for (i = 0; i < 4; ++i) |
1496 | net2272_dequeue_all(&dev->ep[i]); | 1496 | net2272_dequeue_all(&dev->ep[i]); |
1497 | 1497 | ||
1498 | /* report disconnect; the driver is already quiesced */ | ||
1499 | if (driver) { | ||
1500 | spin_unlock(&dev->lock); | ||
1501 | driver->disconnect(&dev->gadget); | ||
1502 | spin_lock(&dev->lock); | ||
1503 | } | ||
1504 | |||
1498 | net2272_usb_reinit(dev); | 1505 | net2272_usb_reinit(dev); |
1499 | } | 1506 | } |
1500 | 1507 | ||
diff --git a/drivers/usb/gadget/net2280.c b/drivers/usb/gadget/net2280.c index a1b650e11339..3bd0f992fb49 100644 --- a/drivers/usb/gadget/net2280.c +++ b/drivers/usb/gadget/net2280.c | |||
@@ -1924,7 +1924,6 @@ static int net2280_start(struct usb_gadget *_gadget, | |||
1924 | err_func: | 1924 | err_func: |
1925 | device_remove_file (&dev->pdev->dev, &dev_attr_function); | 1925 | device_remove_file (&dev->pdev->dev, &dev_attr_function); |
1926 | err_unbind: | 1926 | err_unbind: |
1927 | driver->unbind (&dev->gadget); | ||
1928 | dev->gadget.dev.driver = NULL; | 1927 | dev->gadget.dev.driver = NULL; |
1929 | dev->driver = NULL; | 1928 | dev->driver = NULL; |
1930 | return retval; | 1929 | return retval; |
@@ -1946,6 +1945,13 @@ stop_activity (struct net2280 *dev, struct usb_gadget_driver *driver) | |||
1946 | for (i = 0; i < 7; i++) | 1945 | for (i = 0; i < 7; i++) |
1947 | nuke (&dev->ep [i]); | 1946 | nuke (&dev->ep [i]); |
1948 | 1947 | ||
1948 | /* report disconnect; the driver is already quiesced */ | ||
1949 | if (driver) { | ||
1950 | spin_unlock(&dev->lock); | ||
1951 | driver->disconnect(&dev->gadget); | ||
1952 | spin_lock(&dev->lock); | ||
1953 | } | ||
1954 | |||
1949 | usb_reinit (dev); | 1955 | usb_reinit (dev); |
1950 | } | 1956 | } |
1951 | 1957 | ||
diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c index c5034d9c946b..b369292d4b90 100644 --- a/drivers/usb/gadget/u_serial.c +++ b/drivers/usb/gadget/u_serial.c | |||
@@ -136,7 +136,7 @@ static struct portmaster { | |||
136 | pr_debug(fmt, ##arg) | 136 | pr_debug(fmt, ##arg) |
137 | #endif /* pr_vdebug */ | 137 | #endif /* pr_vdebug */ |
138 | #else | 138 | #else |
139 | #ifndef pr_vdebig | 139 | #ifndef pr_vdebug |
140 | #define pr_vdebug(fmt, arg...) \ | 140 | #define pr_vdebug(fmt, arg...) \ |
141 | ({ if (0) pr_debug(fmt, ##arg); }) | 141 | ({ if (0) pr_debug(fmt, ##arg); }) |
142 | #endif /* pr_vdebug */ | 142 | #endif /* pr_vdebug */ |
diff --git a/drivers/usb/gadget/udc-core.c b/drivers/usb/gadget/udc-core.c index 2a9cd369f71c..f8f62c3ed65e 100644 --- a/drivers/usb/gadget/udc-core.c +++ b/drivers/usb/gadget/udc-core.c | |||
@@ -216,7 +216,7 @@ static void usb_gadget_remove_driver(struct usb_udc *udc) | |||
216 | usb_gadget_disconnect(udc->gadget); | 216 | usb_gadget_disconnect(udc->gadget); |
217 | udc->driver->disconnect(udc->gadget); | 217 | udc->driver->disconnect(udc->gadget); |
218 | udc->driver->unbind(udc->gadget); | 218 | udc->driver->unbind(udc->gadget); |
219 | usb_gadget_udc_stop(udc->gadget, udc->driver); | 219 | usb_gadget_udc_stop(udc->gadget, NULL); |
220 | 220 | ||
221 | udc->driver = NULL; | 221 | udc->driver = NULL; |
222 | udc->dev.driver = NULL; | 222 | udc->dev.driver = NULL; |
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index 5726cb144abf..416a6dce5e11 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c | |||
@@ -302,6 +302,7 @@ static void ehci_quiesce (struct ehci_hcd *ehci) | |||
302 | 302 | ||
303 | static void end_unlink_async(struct ehci_hcd *ehci); | 303 | static void end_unlink_async(struct ehci_hcd *ehci); |
304 | static void unlink_empty_async(struct ehci_hcd *ehci); | 304 | static void unlink_empty_async(struct ehci_hcd *ehci); |
305 | static void unlink_empty_async_suspended(struct ehci_hcd *ehci); | ||
305 | static void ehci_work(struct ehci_hcd *ehci); | 306 | static void ehci_work(struct ehci_hcd *ehci); |
306 | static void start_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh); | 307 | static void start_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh); |
307 | static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh); | 308 | static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh); |
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c index 4d3b294f203e..7d06e77f6c4f 100644 --- a/drivers/usb/host/ehci-hub.c +++ b/drivers/usb/host/ehci-hub.c | |||
@@ -328,7 +328,7 @@ static int ehci_bus_suspend (struct usb_hcd *hcd) | |||
328 | ehci->rh_state = EHCI_RH_SUSPENDED; | 328 | ehci->rh_state = EHCI_RH_SUSPENDED; |
329 | 329 | ||
330 | end_unlink_async(ehci); | 330 | end_unlink_async(ehci); |
331 | unlink_empty_async(ehci); | 331 | unlink_empty_async_suspended(ehci); |
332 | ehci_handle_intr_unlinks(ehci); | 332 | ehci_handle_intr_unlinks(ehci); |
333 | end_free_itds(ehci); | 333 | end_free_itds(ehci); |
334 | 334 | ||
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c index 5464665f0b6a..23d136904285 100644 --- a/drivers/usb/host/ehci-q.c +++ b/drivers/usb/host/ehci-q.c | |||
@@ -1316,6 +1316,19 @@ static void unlink_empty_async(struct ehci_hcd *ehci) | |||
1316 | } | 1316 | } |
1317 | } | 1317 | } |
1318 | 1318 | ||
1319 | /* The root hub is suspended; unlink all the async QHs */ | ||
1320 | static void unlink_empty_async_suspended(struct ehci_hcd *ehci) | ||
1321 | { | ||
1322 | struct ehci_qh *qh; | ||
1323 | |||
1324 | while (ehci->async->qh_next.qh) { | ||
1325 | qh = ehci->async->qh_next.qh; | ||
1326 | WARN_ON(!list_empty(&qh->qtd_list)); | ||
1327 | single_unlink_async(ehci, qh); | ||
1328 | } | ||
1329 | start_iaa_cycle(ehci, false); | ||
1330 | } | ||
1331 | |||
1319 | /* makes sure the async qh will become idle */ | 1332 | /* makes sure the async qh will become idle */ |
1320 | /* caller must own ehci->lock */ | 1333 | /* caller must own ehci->lock */ |
1321 | 1334 | ||
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c index b476daf49f6f..010f686d8881 100644 --- a/drivers/usb/host/ehci-sched.c +++ b/drivers/usb/host/ehci-sched.c | |||
@@ -1214,6 +1214,7 @@ itd_urb_transaction ( | |||
1214 | 1214 | ||
1215 | memset (itd, 0, sizeof *itd); | 1215 | memset (itd, 0, sizeof *itd); |
1216 | itd->itd_dma = itd_dma; | 1216 | itd->itd_dma = itd_dma; |
1217 | itd->frame = 9999; /* an invalid value */ | ||
1217 | list_add (&itd->itd_list, &sched->td_list); | 1218 | list_add (&itd->itd_list, &sched->td_list); |
1218 | } | 1219 | } |
1219 | spin_unlock_irqrestore (&ehci->lock, flags); | 1220 | spin_unlock_irqrestore (&ehci->lock, flags); |
@@ -1915,6 +1916,7 @@ sitd_urb_transaction ( | |||
1915 | 1916 | ||
1916 | memset (sitd, 0, sizeof *sitd); | 1917 | memset (sitd, 0, sizeof *sitd); |
1917 | sitd->sitd_dma = sitd_dma; | 1918 | sitd->sitd_dma = sitd_dma; |
1919 | sitd->frame = 9999; /* an invalid value */ | ||
1918 | list_add (&sitd->sitd_list, &iso_sched->td_list); | 1920 | list_add (&sitd->sitd_list, &iso_sched->td_list); |
1919 | } | 1921 | } |
1920 | 1922 | ||
diff --git a/drivers/usb/host/ehci-timer.c b/drivers/usb/host/ehci-timer.c index 20dbdcbe9b0f..c3fa1305f830 100644 --- a/drivers/usb/host/ehci-timer.c +++ b/drivers/usb/host/ehci-timer.c | |||
@@ -304,7 +304,7 @@ static void ehci_iaa_watchdog(struct ehci_hcd *ehci) | |||
304 | * (a) SMP races against real IAA firing and retriggering, and | 304 | * (a) SMP races against real IAA firing and retriggering, and |
305 | * (b) clean HC shutdown, when IAA watchdog was pending. | 305 | * (b) clean HC shutdown, when IAA watchdog was pending. |
306 | */ | 306 | */ |
307 | if (ehci->async_iaa) { | 307 | if (1) { |
308 | u32 cmd, status; | 308 | u32 cmd, status; |
309 | 309 | ||
310 | /* If we get here, IAA is *REALLY* late. It's barely | 310 | /* If we get here, IAA is *REALLY* late. It's barely |
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 35616ffbe3ae..6dc238c592bc 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c | |||
@@ -1022,44 +1022,24 @@ void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci, | |||
1022 | * is attached to (or the roothub port its ancestor hub is attached to). All we | 1022 | * is attached to (or the roothub port its ancestor hub is attached to). All we |
1023 | * know is the index of that port under either the USB 2.0 or the USB 3.0 | 1023 | * know is the index of that port under either the USB 2.0 or the USB 3.0 |
1024 | * roothub, but that doesn't give us the real index into the HW port status | 1024 | * roothub, but that doesn't give us the real index into the HW port status |
1025 | * registers. Scan through the xHCI roothub port array, looking for the Nth | 1025 | * registers. Call xhci_find_raw_port_number() to get real index. |
1026 | * entry of the correct port speed. Return the port number of that entry. | ||
1027 | */ | 1026 | */ |
1028 | static u32 xhci_find_real_port_number(struct xhci_hcd *xhci, | 1027 | static u32 xhci_find_real_port_number(struct xhci_hcd *xhci, |
1029 | struct usb_device *udev) | 1028 | struct usb_device *udev) |
1030 | { | 1029 | { |
1031 | struct usb_device *top_dev; | 1030 | struct usb_device *top_dev; |
1032 | unsigned int num_similar_speed_ports; | 1031 | struct usb_hcd *hcd; |
1033 | unsigned int faked_port_num; | 1032 | |
1034 | int i; | 1033 | if (udev->speed == USB_SPEED_SUPER) |
1034 | hcd = xhci->shared_hcd; | ||
1035 | else | ||
1036 | hcd = xhci->main_hcd; | ||
1035 | 1037 | ||
1036 | for (top_dev = udev; top_dev->parent && top_dev->parent->parent; | 1038 | for (top_dev = udev; top_dev->parent && top_dev->parent->parent; |
1037 | top_dev = top_dev->parent) | 1039 | top_dev = top_dev->parent) |
1038 | /* Found device below root hub */; | 1040 | /* Found device below root hub */; |
1039 | faked_port_num = top_dev->portnum; | ||
1040 | for (i = 0, num_similar_speed_ports = 0; | ||
1041 | i < HCS_MAX_PORTS(xhci->hcs_params1); i++) { | ||
1042 | u8 port_speed = xhci->port_array[i]; | ||
1043 | |||
1044 | /* | ||
1045 | * Skip ports that don't have known speeds, or have duplicate | ||
1046 | * Extended Capabilities port speed entries. | ||
1047 | */ | ||
1048 | if (port_speed == 0 || port_speed == DUPLICATE_ENTRY) | ||
1049 | continue; | ||
1050 | 1041 | ||
1051 | /* | 1042 | return xhci_find_raw_port_number(hcd, top_dev->portnum); |
1052 | * USB 3.0 ports are always under a USB 3.0 hub. USB 2.0 and | ||
1053 | * 1.1 ports are under the USB 2.0 hub. If the port speed | ||
1054 | * matches the device speed, it's a similar speed port. | ||
1055 | */ | ||
1056 | if ((port_speed == 0x03) == (udev->speed == USB_SPEED_SUPER)) | ||
1057 | num_similar_speed_ports++; | ||
1058 | if (num_similar_speed_ports == faked_port_num) | ||
1059 | /* Roothub ports are numbered from 1 to N */ | ||
1060 | return i+1; | ||
1061 | } | ||
1062 | return 0; | ||
1063 | } | 1043 | } |
1064 | 1044 | ||
1065 | /* Setup an xHCI virtual device for a Set Address command */ | 1045 | /* Setup an xHCI virtual device for a Set Address command */ |
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index af259e0ec172..1a30c380043c 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c | |||
@@ -313,6 +313,7 @@ static const struct hc_driver xhci_pci_hc_driver = { | |||
313 | .set_usb2_hw_lpm = xhci_set_usb2_hardware_lpm, | 313 | .set_usb2_hw_lpm = xhci_set_usb2_hardware_lpm, |
314 | .enable_usb3_lpm_timeout = xhci_enable_usb3_lpm_timeout, | 314 | .enable_usb3_lpm_timeout = xhci_enable_usb3_lpm_timeout, |
315 | .disable_usb3_lpm_timeout = xhci_disable_usb3_lpm_timeout, | 315 | .disable_usb3_lpm_timeout = xhci_disable_usb3_lpm_timeout, |
316 | .find_raw_port_number = xhci_find_raw_port_number, | ||
316 | }; | 317 | }; |
317 | 318 | ||
318 | /*-------------------------------------------------------------------------*/ | 319 | /*-------------------------------------------------------------------------*/ |
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 882875465301..1969c001b3f9 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c | |||
@@ -1599,14 +1599,20 @@ static void handle_port_status(struct xhci_hcd *xhci, | |||
1599 | max_ports = HCS_MAX_PORTS(xhci->hcs_params1); | 1599 | max_ports = HCS_MAX_PORTS(xhci->hcs_params1); |
1600 | if ((port_id <= 0) || (port_id > max_ports)) { | 1600 | if ((port_id <= 0) || (port_id > max_ports)) { |
1601 | xhci_warn(xhci, "Invalid port id %d\n", port_id); | 1601 | xhci_warn(xhci, "Invalid port id %d\n", port_id); |
1602 | bogus_port_status = true; | 1602 | inc_deq(xhci, xhci->event_ring); |
1603 | goto cleanup; | 1603 | return; |
1604 | } | 1604 | } |
1605 | 1605 | ||
1606 | /* Figure out which usb_hcd this port is attached to: | 1606 | /* Figure out which usb_hcd this port is attached to: |
1607 | * is it a USB 3.0 port or a USB 2.0/1.1 port? | 1607 | * is it a USB 3.0 port or a USB 2.0/1.1 port? |
1608 | */ | 1608 | */ |
1609 | major_revision = xhci->port_array[port_id - 1]; | 1609 | major_revision = xhci->port_array[port_id - 1]; |
1610 | |||
1611 | /* Find the right roothub. */ | ||
1612 | hcd = xhci_to_hcd(xhci); | ||
1613 | if ((major_revision == 0x03) != (hcd->speed == HCD_USB3)) | ||
1614 | hcd = xhci->shared_hcd; | ||
1615 | |||
1610 | if (major_revision == 0) { | 1616 | if (major_revision == 0) { |
1611 | xhci_warn(xhci, "Event for port %u not in " | 1617 | xhci_warn(xhci, "Event for port %u not in " |
1612 | "Extended Capabilities, ignoring.\n", | 1618 | "Extended Capabilities, ignoring.\n", |
@@ -1629,10 +1635,6 @@ static void handle_port_status(struct xhci_hcd *xhci, | |||
1629 | * into the index into the ports on the correct split roothub, and the | 1635 | * into the index into the ports on the correct split roothub, and the |
1630 | * correct bus_state structure. | 1636 | * correct bus_state structure. |
1631 | */ | 1637 | */ |
1632 | /* Find the right roothub. */ | ||
1633 | hcd = xhci_to_hcd(xhci); | ||
1634 | if ((major_revision == 0x03) != (hcd->speed == HCD_USB3)) | ||
1635 | hcd = xhci->shared_hcd; | ||
1636 | bus_state = &xhci->bus_state[hcd_index(hcd)]; | 1638 | bus_state = &xhci->bus_state[hcd_index(hcd)]; |
1637 | if (hcd->speed == HCD_USB3) | 1639 | if (hcd->speed == HCD_USB3) |
1638 | port_array = xhci->usb3_ports; | 1640 | port_array = xhci->usb3_ports; |
@@ -2027,8 +2029,8 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, | |||
2027 | if (event_trb != ep_ring->dequeue && | 2029 | if (event_trb != ep_ring->dequeue && |
2028 | event_trb != td->last_trb) | 2030 | event_trb != td->last_trb) |
2029 | td->urb->actual_length = | 2031 | td->urb->actual_length = |
2030 | td->urb->transfer_buffer_length | 2032 | td->urb->transfer_buffer_length - |
2031 | - TRB_LEN(le32_to_cpu(event->transfer_len)); | 2033 | EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); |
2032 | else | 2034 | else |
2033 | td->urb->actual_length = 0; | 2035 | td->urb->actual_length = 0; |
2034 | 2036 | ||
@@ -2060,7 +2062,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, | |||
2060 | /* Maybe the event was for the data stage? */ | 2062 | /* Maybe the event was for the data stage? */ |
2061 | td->urb->actual_length = | 2063 | td->urb->actual_length = |
2062 | td->urb->transfer_buffer_length - | 2064 | td->urb->transfer_buffer_length - |
2063 | TRB_LEN(le32_to_cpu(event->transfer_len)); | 2065 | EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); |
2064 | xhci_dbg(xhci, "Waiting for status " | 2066 | xhci_dbg(xhci, "Waiting for status " |
2065 | "stage event\n"); | 2067 | "stage event\n"); |
2066 | return 0; | 2068 | return 0; |
@@ -2096,7 +2098,7 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, | |||
2096 | /* handle completion code */ | 2098 | /* handle completion code */ |
2097 | switch (trb_comp_code) { | 2099 | switch (trb_comp_code) { |
2098 | case COMP_SUCCESS: | 2100 | case COMP_SUCCESS: |
2099 | if (TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) { | 2101 | if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) { |
2100 | frame->status = 0; | 2102 | frame->status = 0; |
2101 | break; | 2103 | break; |
2102 | } | 2104 | } |
@@ -2141,7 +2143,7 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, | |||
2141 | len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])); | 2143 | len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])); |
2142 | } | 2144 | } |
2143 | len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) - | 2145 | len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) - |
2144 | TRB_LEN(le32_to_cpu(event->transfer_len)); | 2146 | EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); |
2145 | 2147 | ||
2146 | if (trb_comp_code != COMP_STOP_INVAL) { | 2148 | if (trb_comp_code != COMP_STOP_INVAL) { |
2147 | frame->actual_length = len; | 2149 | frame->actual_length = len; |
@@ -2199,7 +2201,7 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, | |||
2199 | case COMP_SUCCESS: | 2201 | case COMP_SUCCESS: |
2200 | /* Double check that the HW transferred everything. */ | 2202 | /* Double check that the HW transferred everything. */ |
2201 | if (event_trb != td->last_trb || | 2203 | if (event_trb != td->last_trb || |
2202 | TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) { | 2204 | EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) { |
2203 | xhci_warn(xhci, "WARN Successful completion " | 2205 | xhci_warn(xhci, "WARN Successful completion " |
2204 | "on short TX\n"); | 2206 | "on short TX\n"); |
2205 | if (td->urb->transfer_flags & URB_SHORT_NOT_OK) | 2207 | if (td->urb->transfer_flags & URB_SHORT_NOT_OK) |
@@ -2227,18 +2229,18 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, | |||
2227 | "%d bytes untransferred\n", | 2229 | "%d bytes untransferred\n", |
2228 | td->urb->ep->desc.bEndpointAddress, | 2230 | td->urb->ep->desc.bEndpointAddress, |
2229 | td->urb->transfer_buffer_length, | 2231 | td->urb->transfer_buffer_length, |
2230 | TRB_LEN(le32_to_cpu(event->transfer_len))); | 2232 | EVENT_TRB_LEN(le32_to_cpu(event->transfer_len))); |
2231 | /* Fast path - was this the last TRB in the TD for this URB? */ | 2233 | /* Fast path - was this the last TRB in the TD for this URB? */ |
2232 | if (event_trb == td->last_trb) { | 2234 | if (event_trb == td->last_trb) { |
2233 | if (TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) { | 2235 | if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) { |
2234 | td->urb->actual_length = | 2236 | td->urb->actual_length = |
2235 | td->urb->transfer_buffer_length - | 2237 | td->urb->transfer_buffer_length - |
2236 | TRB_LEN(le32_to_cpu(event->transfer_len)); | 2238 | EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); |
2237 | if (td->urb->transfer_buffer_length < | 2239 | if (td->urb->transfer_buffer_length < |
2238 | td->urb->actual_length) { | 2240 | td->urb->actual_length) { |
2239 | xhci_warn(xhci, "HC gave bad length " | 2241 | xhci_warn(xhci, "HC gave bad length " |
2240 | "of %d bytes left\n", | 2242 | "of %d bytes left\n", |
2241 | TRB_LEN(le32_to_cpu(event->transfer_len))); | 2243 | EVENT_TRB_LEN(le32_to_cpu(event->transfer_len))); |
2242 | td->urb->actual_length = 0; | 2244 | td->urb->actual_length = 0; |
2243 | if (td->urb->transfer_flags & URB_SHORT_NOT_OK) | 2245 | if (td->urb->transfer_flags & URB_SHORT_NOT_OK) |
2244 | *status = -EREMOTEIO; | 2246 | *status = -EREMOTEIO; |
@@ -2280,7 +2282,7 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, | |||
2280 | if (trb_comp_code != COMP_STOP_INVAL) | 2282 | if (trb_comp_code != COMP_STOP_INVAL) |
2281 | td->urb->actual_length += | 2283 | td->urb->actual_length += |
2282 | TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) - | 2284 | TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) - |
2283 | TRB_LEN(le32_to_cpu(event->transfer_len)); | 2285 | EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); |
2284 | } | 2286 | } |
2285 | 2287 | ||
2286 | return finish_td(xhci, td, event_trb, event, ep, status, false); | 2288 | return finish_td(xhci, td, event_trb, event, ep, status, false); |
@@ -2368,7 +2370,7 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
2368 | * transfer type | 2370 | * transfer type |
2369 | */ | 2371 | */ |
2370 | case COMP_SUCCESS: | 2372 | case COMP_SUCCESS: |
2371 | if (TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) | 2373 | if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) |
2372 | break; | 2374 | break; |
2373 | if (xhci->quirks & XHCI_TRUST_TX_LENGTH) | 2375 | if (xhci->quirks & XHCI_TRUST_TX_LENGTH) |
2374 | trb_comp_code = COMP_SHORT_TX; | 2376 | trb_comp_code = COMP_SHORT_TX; |
@@ -2461,14 +2463,21 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
2461 | * TD list. | 2463 | * TD list. |
2462 | */ | 2464 | */ |
2463 | if (list_empty(&ep_ring->td_list)) { | 2465 | if (list_empty(&ep_ring->td_list)) { |
2464 | xhci_warn(xhci, "WARN Event TRB for slot %d ep %d " | 2466 | /* |
2465 | "with no TDs queued?\n", | 2467 | * A stopped endpoint may generate an extra completion |
2466 | TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), | 2468 | * event if the device was suspended. Don't print |
2467 | ep_index); | 2469 | * warnings. |
2468 | xhci_dbg(xhci, "Event TRB with TRB type ID %u\n", | 2470 | */ |
2469 | (le32_to_cpu(event->flags) & | 2471 | if (!(trb_comp_code == COMP_STOP || |
2470 | TRB_TYPE_BITMASK)>>10); | 2472 | trb_comp_code == COMP_STOP_INVAL)) { |
2471 | xhci_print_trb_offsets(xhci, (union xhci_trb *) event); | 2473 | xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n", |
2474 | TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), | ||
2475 | ep_index); | ||
2476 | xhci_dbg(xhci, "Event TRB with TRB type ID %u\n", | ||
2477 | (le32_to_cpu(event->flags) & | ||
2478 | TRB_TYPE_BITMASK)>>10); | ||
2479 | xhci_print_trb_offsets(xhci, (union xhci_trb *) event); | ||
2480 | } | ||
2472 | if (ep->skip) { | 2481 | if (ep->skip) { |
2473 | ep->skip = false; | 2482 | ep->skip = false; |
2474 | xhci_dbg(xhci, "td_list is empty while skip " | 2483 | xhci_dbg(xhci, "td_list is empty while skip " |
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index f1f01a834ba7..53b8f89a0b1c 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c | |||
@@ -350,7 +350,7 @@ static int xhci_try_enable_msi(struct usb_hcd *hcd) | |||
350 | * generate interrupts. Don't even try to enable MSI. | 350 | * generate interrupts. Don't even try to enable MSI. |
351 | */ | 351 | */ |
352 | if (xhci->quirks & XHCI_BROKEN_MSI) | 352 | if (xhci->quirks & XHCI_BROKEN_MSI) |
353 | return 0; | 353 | goto legacy_irq; |
354 | 354 | ||
355 | /* unregister the legacy interrupt */ | 355 | /* unregister the legacy interrupt */ |
356 | if (hcd->irq) | 356 | if (hcd->irq) |
@@ -371,6 +371,7 @@ static int xhci_try_enable_msi(struct usb_hcd *hcd) | |||
371 | return -EINVAL; | 371 | return -EINVAL; |
372 | } | 372 | } |
373 | 373 | ||
374 | legacy_irq: | ||
374 | /* fall back to legacy interrupt*/ | 375 | /* fall back to legacy interrupt*/ |
375 | ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED, | 376 | ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED, |
376 | hcd->irq_descr, hcd); | 377 | hcd->irq_descr, hcd); |
@@ -3778,6 +3779,28 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) | |||
3778 | return 0; | 3779 | return 0; |
3779 | } | 3780 | } |
3780 | 3781 | ||
3782 | /* | ||
3783 | * Transfer the port index into real index in the HW port status | ||
3784 | * registers. Caculate offset between the port's PORTSC register | ||
3785 | * and port status base. Divide the number of per port register | ||
3786 | * to get the real index. The raw port number bases 1. | ||
3787 | */ | ||
3788 | int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1) | ||
3789 | { | ||
3790 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | ||
3791 | __le32 __iomem *base_addr = &xhci->op_regs->port_status_base; | ||
3792 | __le32 __iomem *addr; | ||
3793 | int raw_port; | ||
3794 | |||
3795 | if (hcd->speed != HCD_USB3) | ||
3796 | addr = xhci->usb2_ports[port1 - 1]; | ||
3797 | else | ||
3798 | addr = xhci->usb3_ports[port1 - 1]; | ||
3799 | |||
3800 | raw_port = (addr - base_addr)/NUM_PORT_REGS + 1; | ||
3801 | return raw_port; | ||
3802 | } | ||
3803 | |||
3781 | #ifdef CONFIG_USB_SUSPEND | 3804 | #ifdef CONFIG_USB_SUSPEND |
3782 | 3805 | ||
3783 | /* BESL to HIRD Encoding array for USB2 LPM */ | 3806 | /* BESL to HIRD Encoding array for USB2 LPM */ |
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index f791bd0aee6c..63582719e0fb 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h | |||
@@ -206,8 +206,8 @@ struct xhci_op_regs { | |||
206 | /* bits 12:31 are reserved (and should be preserved on writes). */ | 206 | /* bits 12:31 are reserved (and should be preserved on writes). */ |
207 | 207 | ||
208 | /* IMAN - Interrupt Management Register */ | 208 | /* IMAN - Interrupt Management Register */ |
209 | #define IMAN_IP (1 << 1) | 209 | #define IMAN_IE (1 << 1) |
210 | #define IMAN_IE (1 << 0) | 210 | #define IMAN_IP (1 << 0) |
211 | 211 | ||
212 | /* USBSTS - USB status - status bitmasks */ | 212 | /* USBSTS - USB status - status bitmasks */ |
213 | /* HC not running - set to 1 when run/stop bit is cleared. */ | 213 | /* HC not running - set to 1 when run/stop bit is cleared. */ |
@@ -972,6 +972,10 @@ struct xhci_transfer_event { | |||
972 | __le32 flags; | 972 | __le32 flags; |
973 | }; | 973 | }; |
974 | 974 | ||
975 | /* Transfer event TRB length bit mask */ | ||
976 | /* bits 0:23 */ | ||
977 | #define EVENT_TRB_LEN(p) ((p) & 0xffffff) | ||
978 | |||
975 | /** Transfer Event bit fields **/ | 979 | /** Transfer Event bit fields **/ |
976 | #define TRB_TO_EP_ID(p) (((p) >> 16) & 0x1f) | 980 | #define TRB_TO_EP_ID(p) (((p) >> 16) & 0x1f) |
977 | 981 | ||
@@ -1829,6 +1833,7 @@ void xhci_test_and_clear_bit(struct xhci_hcd *xhci, __le32 __iomem **port_array, | |||
1829 | int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, | 1833 | int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, |
1830 | char *buf, u16 wLength); | 1834 | char *buf, u16 wLength); |
1831 | int xhci_hub_status_data(struct usb_hcd *hcd, char *buf); | 1835 | int xhci_hub_status_data(struct usb_hcd *hcd, char *buf); |
1836 | int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1); | ||
1832 | 1837 | ||
1833 | #ifdef CONFIG_PM | 1838 | #ifdef CONFIG_PM |
1834 | int xhci_bus_suspend(struct usb_hcd *hcd); | 1839 | int xhci_bus_suspend(struct usb_hcd *hcd); |
diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c index 7c71769d71ff..41613a2b35e8 100644 --- a/drivers/usb/musb/da8xx.c +++ b/drivers/usb/musb/da8xx.c | |||
@@ -327,7 +327,7 @@ static irqreturn_t da8xx_musb_interrupt(int irq, void *hci) | |||
327 | u8 devctl = musb_readb(mregs, MUSB_DEVCTL); | 327 | u8 devctl = musb_readb(mregs, MUSB_DEVCTL); |
328 | int err; | 328 | int err; |
329 | 329 | ||
330 | err = musb->int_usb & USB_INTR_VBUSERROR; | 330 | err = musb->int_usb & MUSB_INTR_VBUSERROR; |
331 | if (err) { | 331 | if (err) { |
332 | /* | 332 | /* |
333 | * The Mentor core doesn't debounce VBUS as needed | 333 | * The Mentor core doesn't debounce VBUS as needed |
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index be18537c5f14..83eddedcd9be 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c | |||
@@ -141,7 +141,9 @@ static inline void map_dma_buffer(struct musb_request *request, | |||
141 | static inline void unmap_dma_buffer(struct musb_request *request, | 141 | static inline void unmap_dma_buffer(struct musb_request *request, |
142 | struct musb *musb) | 142 | struct musb *musb) |
143 | { | 143 | { |
144 | if (!is_buffer_mapped(request)) | 144 | struct musb_ep *musb_ep = request->ep; |
145 | |||
146 | if (!is_buffer_mapped(request) || !musb_ep->dma) | ||
145 | return; | 147 | return; |
146 | 148 | ||
147 | if (request->request.dma == DMA_ADDR_INVALID) { | 149 | if (request->request.dma == DMA_ADDR_INVALID) { |
@@ -195,7 +197,10 @@ __acquires(ep->musb->lock) | |||
195 | 197 | ||
196 | ep->busy = 1; | 198 | ep->busy = 1; |
197 | spin_unlock(&musb->lock); | 199 | spin_unlock(&musb->lock); |
198 | unmap_dma_buffer(req, musb); | 200 | |
201 | if (!dma_mapping_error(&musb->g.dev, request->dma)) | ||
202 | unmap_dma_buffer(req, musb); | ||
203 | |||
199 | if (request->status == 0) | 204 | if (request->status == 0) |
200 | dev_dbg(musb->controller, "%s done request %p, %d/%d\n", | 205 | dev_dbg(musb->controller, "%s done request %p, %d/%d\n", |
201 | ep->end_point.name, request, | 206 | ep->end_point.name, request, |
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig index 65217a590068..90549382eba5 100644 --- a/drivers/usb/phy/Kconfig +++ b/drivers/usb/phy/Kconfig | |||
@@ -38,6 +38,7 @@ config USB_ISP1301 | |||
38 | tristate "NXP ISP1301 USB transceiver support" | 38 | tristate "NXP ISP1301 USB transceiver support" |
39 | depends on USB || USB_GADGET | 39 | depends on USB || USB_GADGET |
40 | depends on I2C | 40 | depends on I2C |
41 | select USB_OTG_UTILS | ||
41 | help | 42 | help |
42 | Say Y here to add support for the NXP ISP1301 USB transceiver driver. | 43 | Say Y here to add support for the NXP ISP1301 USB transceiver driver. |
43 | This chip is typically used as USB transceiver for USB host, gadget | 44 | This chip is typically used as USB transceiver for USB host, gadget |
diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c index cbd904b8fba5..4775f8209e55 100644 --- a/drivers/usb/serial/ark3116.c +++ b/drivers/usb/serial/ark3116.c | |||
@@ -62,7 +62,6 @@ static int is_irda(struct usb_serial *serial) | |||
62 | } | 62 | } |
63 | 63 | ||
64 | struct ark3116_private { | 64 | struct ark3116_private { |
65 | wait_queue_head_t delta_msr_wait; | ||
66 | struct async_icount icount; | 65 | struct async_icount icount; |
67 | int irda; /* 1 for irda device */ | 66 | int irda; /* 1 for irda device */ |
68 | 67 | ||
@@ -146,7 +145,6 @@ static int ark3116_port_probe(struct usb_serial_port *port) | |||
146 | if (!priv) | 145 | if (!priv) |
147 | return -ENOMEM; | 146 | return -ENOMEM; |
148 | 147 | ||
149 | init_waitqueue_head(&priv->delta_msr_wait); | ||
150 | mutex_init(&priv->hw_lock); | 148 | mutex_init(&priv->hw_lock); |
151 | spin_lock_init(&priv->status_lock); | 149 | spin_lock_init(&priv->status_lock); |
152 | 150 | ||
@@ -456,10 +454,14 @@ static int ark3116_ioctl(struct tty_struct *tty, | |||
456 | case TIOCMIWAIT: | 454 | case TIOCMIWAIT: |
457 | for (;;) { | 455 | for (;;) { |
458 | struct async_icount prev = priv->icount; | 456 | struct async_icount prev = priv->icount; |
459 | interruptible_sleep_on(&priv->delta_msr_wait); | 457 | interruptible_sleep_on(&port->delta_msr_wait); |
460 | /* see if a signal did it */ | 458 | /* see if a signal did it */ |
461 | if (signal_pending(current)) | 459 | if (signal_pending(current)) |
462 | return -ERESTARTSYS; | 460 | return -ERESTARTSYS; |
461 | |||
462 | if (port->serial->disconnected) | ||
463 | return -EIO; | ||
464 | |||
463 | if ((prev.rng == priv->icount.rng) && | 465 | if ((prev.rng == priv->icount.rng) && |
464 | (prev.dsr == priv->icount.dsr) && | 466 | (prev.dsr == priv->icount.dsr) && |
465 | (prev.dcd == priv->icount.dcd) && | 467 | (prev.dcd == priv->icount.dcd) && |
@@ -580,7 +582,7 @@ static void ark3116_update_msr(struct usb_serial_port *port, __u8 msr) | |||
580 | priv->icount.dcd++; | 582 | priv->icount.dcd++; |
581 | if (msr & UART_MSR_TERI) | 583 | if (msr & UART_MSR_TERI) |
582 | priv->icount.rng++; | 584 | priv->icount.rng++; |
583 | wake_up_interruptible(&priv->delta_msr_wait); | 585 | wake_up_interruptible(&port->delta_msr_wait); |
584 | } | 586 | } |
585 | } | 587 | } |
586 | 588 | ||
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c index d255f66e708e..07d4650a32ab 100644 --- a/drivers/usb/serial/ch341.c +++ b/drivers/usb/serial/ch341.c | |||
@@ -80,7 +80,6 @@ MODULE_DEVICE_TABLE(usb, id_table); | |||
80 | 80 | ||
81 | struct ch341_private { | 81 | struct ch341_private { |
82 | spinlock_t lock; /* access lock */ | 82 | spinlock_t lock; /* access lock */ |
83 | wait_queue_head_t delta_msr_wait; /* wait queue for modem status */ | ||
84 | unsigned baud_rate; /* set baud rate */ | 83 | unsigned baud_rate; /* set baud rate */ |
85 | u8 line_control; /* set line control value RTS/DTR */ | 84 | u8 line_control; /* set line control value RTS/DTR */ |
86 | u8 line_status; /* active status of modem control inputs */ | 85 | u8 line_status; /* active status of modem control inputs */ |
@@ -252,7 +251,6 @@ static int ch341_port_probe(struct usb_serial_port *port) | |||
252 | return -ENOMEM; | 251 | return -ENOMEM; |
253 | 252 | ||
254 | spin_lock_init(&priv->lock); | 253 | spin_lock_init(&priv->lock); |
255 | init_waitqueue_head(&priv->delta_msr_wait); | ||
256 | priv->baud_rate = DEFAULT_BAUD_RATE; | 254 | priv->baud_rate = DEFAULT_BAUD_RATE; |
257 | priv->line_control = CH341_BIT_RTS | CH341_BIT_DTR; | 255 | priv->line_control = CH341_BIT_RTS | CH341_BIT_DTR; |
258 | 256 | ||
@@ -298,7 +296,7 @@ static void ch341_dtr_rts(struct usb_serial_port *port, int on) | |||
298 | priv->line_control &= ~(CH341_BIT_RTS | CH341_BIT_DTR); | 296 | priv->line_control &= ~(CH341_BIT_RTS | CH341_BIT_DTR); |
299 | spin_unlock_irqrestore(&priv->lock, flags); | 297 | spin_unlock_irqrestore(&priv->lock, flags); |
300 | ch341_set_handshake(port->serial->dev, priv->line_control); | 298 | ch341_set_handshake(port->serial->dev, priv->line_control); |
301 | wake_up_interruptible(&priv->delta_msr_wait); | 299 | wake_up_interruptible(&port->delta_msr_wait); |
302 | } | 300 | } |
303 | 301 | ||
304 | static void ch341_close(struct usb_serial_port *port) | 302 | static void ch341_close(struct usb_serial_port *port) |
@@ -491,7 +489,7 @@ static void ch341_read_int_callback(struct urb *urb) | |||
491 | tty_kref_put(tty); | 489 | tty_kref_put(tty); |
492 | } | 490 | } |
493 | 491 | ||
494 | wake_up_interruptible(&priv->delta_msr_wait); | 492 | wake_up_interruptible(&port->delta_msr_wait); |
495 | } | 493 | } |
496 | 494 | ||
497 | exit: | 495 | exit: |
@@ -517,11 +515,14 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg) | |||
517 | spin_unlock_irqrestore(&priv->lock, flags); | 515 | spin_unlock_irqrestore(&priv->lock, flags); |
518 | 516 | ||
519 | while (!multi_change) { | 517 | while (!multi_change) { |
520 | interruptible_sleep_on(&priv->delta_msr_wait); | 518 | interruptible_sleep_on(&port->delta_msr_wait); |
521 | /* see if a signal did it */ | 519 | /* see if a signal did it */ |
522 | if (signal_pending(current)) | 520 | if (signal_pending(current)) |
523 | return -ERESTARTSYS; | 521 | return -ERESTARTSYS; |
524 | 522 | ||
523 | if (port->serial->disconnected) | ||
524 | return -EIO; | ||
525 | |||
525 | spin_lock_irqsave(&priv->lock, flags); | 526 | spin_lock_irqsave(&priv->lock, flags); |
526 | status = priv->line_status; | 527 | status = priv->line_status; |
527 | multi_change = priv->multi_status_change; | 528 | multi_change = priv->multi_status_change; |
diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c index 8efa19d0e9fb..ba7352e4187e 100644 --- a/drivers/usb/serial/cypress_m8.c +++ b/drivers/usb/serial/cypress_m8.c | |||
@@ -111,7 +111,6 @@ struct cypress_private { | |||
111 | int baud_rate; /* stores current baud rate in | 111 | int baud_rate; /* stores current baud rate in |
112 | integer form */ | 112 | integer form */ |
113 | int isthrottled; /* if throttled, discard reads */ | 113 | int isthrottled; /* if throttled, discard reads */ |
114 | wait_queue_head_t delta_msr_wait; /* used for TIOCMIWAIT */ | ||
115 | char prev_status, diff_status; /* used for TIOCMIWAIT */ | 114 | char prev_status, diff_status; /* used for TIOCMIWAIT */ |
116 | /* we pass a pointer to this as the argument sent to | 115 | /* we pass a pointer to this as the argument sent to |
117 | cypress_set_termios old_termios */ | 116 | cypress_set_termios old_termios */ |
@@ -449,7 +448,6 @@ static int cypress_generic_port_probe(struct usb_serial_port *port) | |||
449 | kfree(priv); | 448 | kfree(priv); |
450 | return -ENOMEM; | 449 | return -ENOMEM; |
451 | } | 450 | } |
452 | init_waitqueue_head(&priv->delta_msr_wait); | ||
453 | 451 | ||
454 | usb_reset_configuration(serial->dev); | 452 | usb_reset_configuration(serial->dev); |
455 | 453 | ||
@@ -868,12 +866,16 @@ static int cypress_ioctl(struct tty_struct *tty, | |||
868 | switch (cmd) { | 866 | switch (cmd) { |
869 | /* This code comes from drivers/char/serial.c and ftdi_sio.c */ | 867 | /* This code comes from drivers/char/serial.c and ftdi_sio.c */ |
870 | case TIOCMIWAIT: | 868 | case TIOCMIWAIT: |
871 | while (priv != NULL) { | 869 | for (;;) { |
872 | interruptible_sleep_on(&priv->delta_msr_wait); | 870 | interruptible_sleep_on(&port->delta_msr_wait); |
873 | /* see if a signal did it */ | 871 | /* see if a signal did it */ |
874 | if (signal_pending(current)) | 872 | if (signal_pending(current)) |
875 | return -ERESTARTSYS; | 873 | return -ERESTARTSYS; |
876 | else { | 874 | |
875 | if (port->serial->disconnected) | ||
876 | return -EIO; | ||
877 | |||
878 | { | ||
877 | char diff = priv->diff_status; | 879 | char diff = priv->diff_status; |
878 | if (diff == 0) | 880 | if (diff == 0) |
879 | return -EIO; /* no change => error */ | 881 | return -EIO; /* no change => error */ |
@@ -1187,7 +1189,7 @@ static void cypress_read_int_callback(struct urb *urb) | |||
1187 | if (priv->current_status != priv->prev_status) { | 1189 | if (priv->current_status != priv->prev_status) { |
1188 | priv->diff_status |= priv->current_status ^ | 1190 | priv->diff_status |= priv->current_status ^ |
1189 | priv->prev_status; | 1191 | priv->prev_status; |
1190 | wake_up_interruptible(&priv->delta_msr_wait); | 1192 | wake_up_interruptible(&port->delta_msr_wait); |
1191 | priv->prev_status = priv->current_status; | 1193 | priv->prev_status = priv->current_status; |
1192 | } | 1194 | } |
1193 | spin_unlock_irqrestore(&priv->lock, flags); | 1195 | spin_unlock_irqrestore(&priv->lock, flags); |
diff --git a/drivers/usb/serial/f81232.c b/drivers/usb/serial/f81232.c index b1b2dc64b50b..a172ad5c5ce8 100644 --- a/drivers/usb/serial/f81232.c +++ b/drivers/usb/serial/f81232.c | |||
@@ -47,7 +47,6 @@ MODULE_DEVICE_TABLE(usb, id_table); | |||
47 | 47 | ||
48 | struct f81232_private { | 48 | struct f81232_private { |
49 | spinlock_t lock; | 49 | spinlock_t lock; |
50 | wait_queue_head_t delta_msr_wait; | ||
51 | u8 line_control; | 50 | u8 line_control; |
52 | u8 line_status; | 51 | u8 line_status; |
53 | }; | 52 | }; |
@@ -111,7 +110,7 @@ static void f81232_process_read_urb(struct urb *urb) | |||
111 | line_status = priv->line_status; | 110 | line_status = priv->line_status; |
112 | priv->line_status &= ~UART_STATE_TRANSIENT_MASK; | 111 | priv->line_status &= ~UART_STATE_TRANSIENT_MASK; |
113 | spin_unlock_irqrestore(&priv->lock, flags); | 112 | spin_unlock_irqrestore(&priv->lock, flags); |
114 | wake_up_interruptible(&priv->delta_msr_wait); | 113 | wake_up_interruptible(&port->delta_msr_wait); |
115 | 114 | ||
116 | if (!urb->actual_length) | 115 | if (!urb->actual_length) |
117 | return; | 116 | return; |
@@ -256,11 +255,14 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg) | |||
256 | spin_unlock_irqrestore(&priv->lock, flags); | 255 | spin_unlock_irqrestore(&priv->lock, flags); |
257 | 256 | ||
258 | while (1) { | 257 | while (1) { |
259 | interruptible_sleep_on(&priv->delta_msr_wait); | 258 | interruptible_sleep_on(&port->delta_msr_wait); |
260 | /* see if a signal did it */ | 259 | /* see if a signal did it */ |
261 | if (signal_pending(current)) | 260 | if (signal_pending(current)) |
262 | return -ERESTARTSYS; | 261 | return -ERESTARTSYS; |
263 | 262 | ||
263 | if (port->serial->disconnected) | ||
264 | return -EIO; | ||
265 | |||
264 | spin_lock_irqsave(&priv->lock, flags); | 266 | spin_lock_irqsave(&priv->lock, flags); |
265 | status = priv->line_status; | 267 | status = priv->line_status; |
266 | spin_unlock_irqrestore(&priv->lock, flags); | 268 | spin_unlock_irqrestore(&priv->lock, flags); |
@@ -322,7 +324,6 @@ static int f81232_port_probe(struct usb_serial_port *port) | |||
322 | return -ENOMEM; | 324 | return -ENOMEM; |
323 | 325 | ||
324 | spin_lock_init(&priv->lock); | 326 | spin_lock_init(&priv->lock); |
325 | init_waitqueue_head(&priv->delta_msr_wait); | ||
326 | 327 | ||
327 | usb_set_serial_port_data(port, priv); | 328 | usb_set_serial_port_data(port, priv); |
328 | 329 | ||
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index edd162df49ca..9886180e45f1 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c | |||
@@ -69,9 +69,7 @@ struct ftdi_private { | |||
69 | int flags; /* some ASYNC_xxxx flags are supported */ | 69 | int flags; /* some ASYNC_xxxx flags are supported */ |
70 | unsigned long last_dtr_rts; /* saved modem control outputs */ | 70 | unsigned long last_dtr_rts; /* saved modem control outputs */ |
71 | struct async_icount icount; | 71 | struct async_icount icount; |
72 | wait_queue_head_t delta_msr_wait; /* Used for TIOCMIWAIT */ | ||
73 | char prev_status; /* Used for TIOCMIWAIT */ | 72 | char prev_status; /* Used for TIOCMIWAIT */ |
74 | bool dev_gone; /* Used to abort TIOCMIWAIT */ | ||
75 | char transmit_empty; /* If transmitter is empty or not */ | 73 | char transmit_empty; /* If transmitter is empty or not */ |
76 | __u16 interface; /* FT2232C, FT2232H or FT4232H port interface | 74 | __u16 interface; /* FT2232C, FT2232H or FT4232H port interface |
77 | (0 for FT232/245) */ | 75 | (0 for FT232/245) */ |
@@ -642,6 +640,7 @@ static struct usb_device_id id_table_combined [] = { | |||
642 | { USB_DEVICE(FTDI_VID, FTDI_RM_CANVIEW_PID) }, | 640 | { USB_DEVICE(FTDI_VID, FTDI_RM_CANVIEW_PID) }, |
643 | { USB_DEVICE(ACTON_VID, ACTON_SPECTRAPRO_PID) }, | 641 | { USB_DEVICE(ACTON_VID, ACTON_SPECTRAPRO_PID) }, |
644 | { USB_DEVICE(CONTEC_VID, CONTEC_COM1USBH_PID) }, | 642 | { USB_DEVICE(CONTEC_VID, CONTEC_COM1USBH_PID) }, |
643 | { USB_DEVICE(MITSUBISHI_VID, MITSUBISHI_FXUSB_PID) }, | ||
645 | { USB_DEVICE(BANDB_VID, BANDB_USOTL4_PID) }, | 644 | { USB_DEVICE(BANDB_VID, BANDB_USOTL4_PID) }, |
646 | { USB_DEVICE(BANDB_VID, BANDB_USTL4_PID) }, | 645 | { USB_DEVICE(BANDB_VID, BANDB_USTL4_PID) }, |
647 | { USB_DEVICE(BANDB_VID, BANDB_USO9ML2_PID) }, | 646 | { USB_DEVICE(BANDB_VID, BANDB_USO9ML2_PID) }, |
@@ -1691,10 +1690,8 @@ static int ftdi_sio_port_probe(struct usb_serial_port *port) | |||
1691 | 1690 | ||
1692 | kref_init(&priv->kref); | 1691 | kref_init(&priv->kref); |
1693 | mutex_init(&priv->cfg_lock); | 1692 | mutex_init(&priv->cfg_lock); |
1694 | init_waitqueue_head(&priv->delta_msr_wait); | ||
1695 | 1693 | ||
1696 | priv->flags = ASYNC_LOW_LATENCY; | 1694 | priv->flags = ASYNC_LOW_LATENCY; |
1697 | priv->dev_gone = false; | ||
1698 | 1695 | ||
1699 | if (quirk && quirk->port_probe) | 1696 | if (quirk && quirk->port_probe) |
1700 | quirk->port_probe(priv); | 1697 | quirk->port_probe(priv); |
@@ -1840,8 +1837,7 @@ static int ftdi_sio_port_remove(struct usb_serial_port *port) | |||
1840 | { | 1837 | { |
1841 | struct ftdi_private *priv = usb_get_serial_port_data(port); | 1838 | struct ftdi_private *priv = usb_get_serial_port_data(port); |
1842 | 1839 | ||
1843 | priv->dev_gone = true; | 1840 | wake_up_interruptible(&port->delta_msr_wait); |
1844 | wake_up_interruptible_all(&priv->delta_msr_wait); | ||
1845 | 1841 | ||
1846 | remove_sysfs_attrs(port); | 1842 | remove_sysfs_attrs(port); |
1847 | 1843 | ||
@@ -1989,7 +1985,7 @@ static int ftdi_process_packet(struct usb_serial_port *port, | |||
1989 | if (diff_status & FTDI_RS0_RLSD) | 1985 | if (diff_status & FTDI_RS0_RLSD) |
1990 | priv->icount.dcd++; | 1986 | priv->icount.dcd++; |
1991 | 1987 | ||
1992 | wake_up_interruptible_all(&priv->delta_msr_wait); | 1988 | wake_up_interruptible(&port->delta_msr_wait); |
1993 | priv->prev_status = status; | 1989 | priv->prev_status = status; |
1994 | } | 1990 | } |
1995 | 1991 | ||
@@ -2440,11 +2436,15 @@ static int ftdi_ioctl(struct tty_struct *tty, | |||
2440 | */ | 2436 | */ |
2441 | case TIOCMIWAIT: | 2437 | case TIOCMIWAIT: |
2442 | cprev = priv->icount; | 2438 | cprev = priv->icount; |
2443 | while (!priv->dev_gone) { | 2439 | for (;;) { |
2444 | interruptible_sleep_on(&priv->delta_msr_wait); | 2440 | interruptible_sleep_on(&port->delta_msr_wait); |
2445 | /* see if a signal did it */ | 2441 | /* see if a signal did it */ |
2446 | if (signal_pending(current)) | 2442 | if (signal_pending(current)) |
2447 | return -ERESTARTSYS; | 2443 | return -ERESTARTSYS; |
2444 | |||
2445 | if (port->serial->disconnected) | ||
2446 | return -EIO; | ||
2447 | |||
2448 | cnow = priv->icount; | 2448 | cnow = priv->icount; |
2449 | if (((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) || | 2449 | if (((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) || |
2450 | ((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) || | 2450 | ((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) || |
@@ -2454,8 +2454,6 @@ static int ftdi_ioctl(struct tty_struct *tty, | |||
2454 | } | 2454 | } |
2455 | cprev = cnow; | 2455 | cprev = cnow; |
2456 | } | 2456 | } |
2457 | return -EIO; | ||
2458 | break; | ||
2459 | case TIOCSERGETLSR: | 2457 | case TIOCSERGETLSR: |
2460 | return get_lsr_info(port, (struct serial_struct __user *)arg); | 2458 | return get_lsr_info(port, (struct serial_struct __user *)arg); |
2461 | break; | 2459 | break; |
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 9d359e189a64..e79861eeed4c 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h | |||
@@ -584,6 +584,13 @@ | |||
584 | #define CONTEC_COM1USBH_PID 0x8311 /* COM-1(USB)H */ | 584 | #define CONTEC_COM1USBH_PID 0x8311 /* COM-1(USB)H */ |
585 | 585 | ||
586 | /* | 586 | /* |
587 | * Mitsubishi Electric Corp. (http://www.meau.com) | ||
588 | * Submitted by Konstantin Holoborodko | ||
589 | */ | ||
590 | #define MITSUBISHI_VID 0x06D3 | ||
591 | #define MITSUBISHI_FXUSB_PID 0x0284 /* USB/RS422 converters: FX-USB-AW/-BD */ | ||
592 | |||
593 | /* | ||
587 | * Definitions for B&B Electronics products. | 594 | * Definitions for B&B Electronics products. |
588 | */ | 595 | */ |
589 | #define BANDB_VID 0x0856 /* B&B Electronics Vendor ID */ | 596 | #define BANDB_VID 0x0856 /* B&B Electronics Vendor ID */ |
diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c index 1a07b12ef341..81caf5623ee2 100644 --- a/drivers/usb/serial/garmin_gps.c +++ b/drivers/usb/serial/garmin_gps.c | |||
@@ -956,10 +956,7 @@ static void garmin_close(struct usb_serial_port *port) | |||
956 | if (!serial) | 956 | if (!serial) |
957 | return; | 957 | return; |
958 | 958 | ||
959 | mutex_lock(&port->serial->disc_mutex); | 959 | garmin_clear(garmin_data_p); |
960 | |||
961 | if (!port->serial->disconnected) | ||
962 | garmin_clear(garmin_data_p); | ||
963 | 960 | ||
964 | /* shutdown our urbs */ | 961 | /* shutdown our urbs */ |
965 | usb_kill_urb(port->read_urb); | 962 | usb_kill_urb(port->read_urb); |
@@ -968,8 +965,6 @@ static void garmin_close(struct usb_serial_port *port) | |||
968 | /* keep reset state so we know that we must start a new session */ | 965 | /* keep reset state so we know that we must start a new session */ |
969 | if (garmin_data_p->state != STATE_RESET) | 966 | if (garmin_data_p->state != STATE_RESET) |
970 | garmin_data_p->state = STATE_DISCONNECTED; | 967 | garmin_data_p->state = STATE_DISCONNECTED; |
971 | |||
972 | mutex_unlock(&port->serial->disc_mutex); | ||
973 | } | 968 | } |
974 | 969 | ||
975 | 970 | ||
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c index b00e5cbf741f..efd8b978128c 100644 --- a/drivers/usb/serial/io_edgeport.c +++ b/drivers/usb/serial/io_edgeport.c | |||
@@ -110,7 +110,6 @@ struct edgeport_port { | |||
110 | wait_queue_head_t wait_chase; /* for handling sleeping while waiting for chase to finish */ | 110 | wait_queue_head_t wait_chase; /* for handling sleeping while waiting for chase to finish */ |
111 | wait_queue_head_t wait_open; /* for handling sleeping while waiting for open to finish */ | 111 | wait_queue_head_t wait_open; /* for handling sleeping while waiting for open to finish */ |
112 | wait_queue_head_t wait_command; /* for handling sleeping while waiting for command to finish */ | 112 | wait_queue_head_t wait_command; /* for handling sleeping while waiting for command to finish */ |
113 | wait_queue_head_t delta_msr_wait; /* for handling sleeping while waiting for msr change to happen */ | ||
114 | 113 | ||
115 | struct async_icount icount; | 114 | struct async_icount icount; |
116 | struct usb_serial_port *port; /* loop back to the owner of this object */ | 115 | struct usb_serial_port *port; /* loop back to the owner of this object */ |
@@ -884,7 +883,6 @@ static int edge_open(struct tty_struct *tty, struct usb_serial_port *port) | |||
884 | /* initialize our wait queues */ | 883 | /* initialize our wait queues */ |
885 | init_waitqueue_head(&edge_port->wait_open); | 884 | init_waitqueue_head(&edge_port->wait_open); |
886 | init_waitqueue_head(&edge_port->wait_chase); | 885 | init_waitqueue_head(&edge_port->wait_chase); |
887 | init_waitqueue_head(&edge_port->delta_msr_wait); | ||
888 | init_waitqueue_head(&edge_port->wait_command); | 886 | init_waitqueue_head(&edge_port->wait_command); |
889 | 887 | ||
890 | /* initialize our icount structure */ | 888 | /* initialize our icount structure */ |
@@ -1669,13 +1667,17 @@ static int edge_ioctl(struct tty_struct *tty, | |||
1669 | dev_dbg(&port->dev, "%s (%d) TIOCMIWAIT\n", __func__, port->number); | 1667 | dev_dbg(&port->dev, "%s (%d) TIOCMIWAIT\n", __func__, port->number); |
1670 | cprev = edge_port->icount; | 1668 | cprev = edge_port->icount; |
1671 | while (1) { | 1669 | while (1) { |
1672 | prepare_to_wait(&edge_port->delta_msr_wait, | 1670 | prepare_to_wait(&port->delta_msr_wait, |
1673 | &wait, TASK_INTERRUPTIBLE); | 1671 | &wait, TASK_INTERRUPTIBLE); |
1674 | schedule(); | 1672 | schedule(); |
1675 | finish_wait(&edge_port->delta_msr_wait, &wait); | 1673 | finish_wait(&port->delta_msr_wait, &wait); |
1676 | /* see if a signal did it */ | 1674 | /* see if a signal did it */ |
1677 | if (signal_pending(current)) | 1675 | if (signal_pending(current)) |
1678 | return -ERESTARTSYS; | 1676 | return -ERESTARTSYS; |
1677 | |||
1678 | if (port->serial->disconnected) | ||
1679 | return -EIO; | ||
1680 | |||
1679 | cnow = edge_port->icount; | 1681 | cnow = edge_port->icount; |
1680 | if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr && | 1682 | if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr && |
1681 | cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) | 1683 | cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) |
@@ -2051,7 +2053,7 @@ static void handle_new_msr(struct edgeport_port *edge_port, __u8 newMsr) | |||
2051 | icount->dcd++; | 2053 | icount->dcd++; |
2052 | if (newMsr & EDGEPORT_MSR_DELTA_RI) | 2054 | if (newMsr & EDGEPORT_MSR_DELTA_RI) |
2053 | icount->rng++; | 2055 | icount->rng++; |
2054 | wake_up_interruptible(&edge_port->delta_msr_wait); | 2056 | wake_up_interruptible(&edge_port->port->delta_msr_wait); |
2055 | } | 2057 | } |
2056 | 2058 | ||
2057 | /* Save the new modem status */ | 2059 | /* Save the new modem status */ |
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c index c23776679f70..7777172206de 100644 --- a/drivers/usb/serial/io_ti.c +++ b/drivers/usb/serial/io_ti.c | |||
@@ -87,9 +87,6 @@ struct edgeport_port { | |||
87 | int close_pending; | 87 | int close_pending; |
88 | int lsr_event; | 88 | int lsr_event; |
89 | struct async_icount icount; | 89 | struct async_icount icount; |
90 | wait_queue_head_t delta_msr_wait; /* for handling sleeping while | ||
91 | waiting for msr change to | ||
92 | happen */ | ||
93 | struct edgeport_serial *edge_serial; | 90 | struct edgeport_serial *edge_serial; |
94 | struct usb_serial_port *port; | 91 | struct usb_serial_port *port; |
95 | __u8 bUartMode; /* Port type, 0: RS232, etc. */ | 92 | __u8 bUartMode; /* Port type, 0: RS232, etc. */ |
@@ -1459,7 +1456,7 @@ static void handle_new_msr(struct edgeport_port *edge_port, __u8 msr) | |||
1459 | icount->dcd++; | 1456 | icount->dcd++; |
1460 | if (msr & EDGEPORT_MSR_DELTA_RI) | 1457 | if (msr & EDGEPORT_MSR_DELTA_RI) |
1461 | icount->rng++; | 1458 | icount->rng++; |
1462 | wake_up_interruptible(&edge_port->delta_msr_wait); | 1459 | wake_up_interruptible(&edge_port->port->delta_msr_wait); |
1463 | } | 1460 | } |
1464 | 1461 | ||
1465 | /* Save the new modem status */ | 1462 | /* Save the new modem status */ |
@@ -1754,7 +1751,6 @@ static int edge_open(struct tty_struct *tty, struct usb_serial_port *port) | |||
1754 | dev = port->serial->dev; | 1751 | dev = port->serial->dev; |
1755 | 1752 | ||
1756 | memset(&(edge_port->icount), 0x00, sizeof(edge_port->icount)); | 1753 | memset(&(edge_port->icount), 0x00, sizeof(edge_port->icount)); |
1757 | init_waitqueue_head(&edge_port->delta_msr_wait); | ||
1758 | 1754 | ||
1759 | /* turn off loopback */ | 1755 | /* turn off loopback */ |
1760 | status = ti_do_config(edge_port, UMPC_SET_CLR_LOOPBACK, 0); | 1756 | status = ti_do_config(edge_port, UMPC_SET_CLR_LOOPBACK, 0); |
@@ -2434,10 +2430,14 @@ static int edge_ioctl(struct tty_struct *tty, | |||
2434 | dev_dbg(&port->dev, "%s - TIOCMIWAIT\n", __func__); | 2430 | dev_dbg(&port->dev, "%s - TIOCMIWAIT\n", __func__); |
2435 | cprev = edge_port->icount; | 2431 | cprev = edge_port->icount; |
2436 | while (1) { | 2432 | while (1) { |
2437 | interruptible_sleep_on(&edge_port->delta_msr_wait); | 2433 | interruptible_sleep_on(&port->delta_msr_wait); |
2438 | /* see if a signal did it */ | 2434 | /* see if a signal did it */ |
2439 | if (signal_pending(current)) | 2435 | if (signal_pending(current)) |
2440 | return -ERESTARTSYS; | 2436 | return -ERESTARTSYS; |
2437 | |||
2438 | if (port->serial->disconnected) | ||
2439 | return -EIO; | ||
2440 | |||
2441 | cnow = edge_port->icount; | 2441 | cnow = edge_port->icount; |
2442 | if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr && | 2442 | if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr && |
2443 | cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) | 2443 | cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) |
@@ -2649,6 +2649,7 @@ static struct usb_serial_driver edgeport_2port_device = { | |||
2649 | .set_termios = edge_set_termios, | 2649 | .set_termios = edge_set_termios, |
2650 | .tiocmget = edge_tiocmget, | 2650 | .tiocmget = edge_tiocmget, |
2651 | .tiocmset = edge_tiocmset, | 2651 | .tiocmset = edge_tiocmset, |
2652 | .get_icount = edge_get_icount, | ||
2652 | .write = edge_write, | 2653 | .write = edge_write, |
2653 | .write_room = edge_write_room, | 2654 | .write_room = edge_write_room, |
2654 | .chars_in_buffer = edge_chars_in_buffer, | 2655 | .chars_in_buffer = edge_chars_in_buffer, |
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c index a64d420f687b..06d5a60be2c4 100644 --- a/drivers/usb/serial/mct_u232.c +++ b/drivers/usb/serial/mct_u232.c | |||
@@ -114,8 +114,6 @@ struct mct_u232_private { | |||
114 | unsigned char last_msr; /* Modem Status Register */ | 114 | unsigned char last_msr; /* Modem Status Register */ |
115 | unsigned int rx_flags; /* Throttling flags */ | 115 | unsigned int rx_flags; /* Throttling flags */ |
116 | struct async_icount icount; | 116 | struct async_icount icount; |
117 | wait_queue_head_t msr_wait; /* for handling sleeping while waiting | ||
118 | for msr change to happen */ | ||
119 | }; | 117 | }; |
120 | 118 | ||
121 | #define THROTTLED 0x01 | 119 | #define THROTTLED 0x01 |
@@ -409,7 +407,6 @@ static int mct_u232_port_probe(struct usb_serial_port *port) | |||
409 | return -ENOMEM; | 407 | return -ENOMEM; |
410 | 408 | ||
411 | spin_lock_init(&priv->lock); | 409 | spin_lock_init(&priv->lock); |
412 | init_waitqueue_head(&priv->msr_wait); | ||
413 | 410 | ||
414 | usb_set_serial_port_data(port, priv); | 411 | usb_set_serial_port_data(port, priv); |
415 | 412 | ||
@@ -601,7 +598,7 @@ static void mct_u232_read_int_callback(struct urb *urb) | |||
601 | tty_kref_put(tty); | 598 | tty_kref_put(tty); |
602 | } | 599 | } |
603 | #endif | 600 | #endif |
604 | wake_up_interruptible(&priv->msr_wait); | 601 | wake_up_interruptible(&port->delta_msr_wait); |
605 | spin_unlock_irqrestore(&priv->lock, flags); | 602 | spin_unlock_irqrestore(&priv->lock, flags); |
606 | exit: | 603 | exit: |
607 | retval = usb_submit_urb(urb, GFP_ATOMIC); | 604 | retval = usb_submit_urb(urb, GFP_ATOMIC); |
@@ -810,13 +807,17 @@ static int mct_u232_ioctl(struct tty_struct *tty, | |||
810 | cprev = mct_u232_port->icount; | 807 | cprev = mct_u232_port->icount; |
811 | spin_unlock_irqrestore(&mct_u232_port->lock, flags); | 808 | spin_unlock_irqrestore(&mct_u232_port->lock, flags); |
812 | for ( ; ; ) { | 809 | for ( ; ; ) { |
813 | prepare_to_wait(&mct_u232_port->msr_wait, | 810 | prepare_to_wait(&port->delta_msr_wait, |
814 | &wait, TASK_INTERRUPTIBLE); | 811 | &wait, TASK_INTERRUPTIBLE); |
815 | schedule(); | 812 | schedule(); |
816 | finish_wait(&mct_u232_port->msr_wait, &wait); | 813 | finish_wait(&port->delta_msr_wait, &wait); |
817 | /* see if a signal did it */ | 814 | /* see if a signal did it */ |
818 | if (signal_pending(current)) | 815 | if (signal_pending(current)) |
819 | return -ERESTARTSYS; | 816 | return -ERESTARTSYS; |
817 | |||
818 | if (port->serial->disconnected) | ||
819 | return -EIO; | ||
820 | |||
820 | spin_lock_irqsave(&mct_u232_port->lock, flags); | 821 | spin_lock_irqsave(&mct_u232_port->lock, flags); |
821 | cnow = mct_u232_port->icount; | 822 | cnow = mct_u232_port->icount; |
822 | spin_unlock_irqrestore(&mct_u232_port->lock, flags); | 823 | spin_unlock_irqrestore(&mct_u232_port->lock, flags); |
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c index 809fb329eca5..b8051fa61911 100644 --- a/drivers/usb/serial/mos7840.c +++ b/drivers/usb/serial/mos7840.c | |||
@@ -219,7 +219,6 @@ struct moschip_port { | |||
219 | char open; | 219 | char open; |
220 | char open_ports; | 220 | char open_ports; |
221 | wait_queue_head_t wait_chase; /* for handling sleeping while waiting for chase to finish */ | 221 | wait_queue_head_t wait_chase; /* for handling sleeping while waiting for chase to finish */ |
222 | wait_queue_head_t delta_msr_wait; /* for handling sleeping while waiting for msr change to happen */ | ||
223 | int delta_msr_cond; | 222 | int delta_msr_cond; |
224 | struct async_icount icount; | 223 | struct async_icount icount; |
225 | struct usb_serial_port *port; /* loop back to the owner of this object */ | 224 | struct usb_serial_port *port; /* loop back to the owner of this object */ |
@@ -423,6 +422,9 @@ static void mos7840_handle_new_msr(struct moschip_port *port, __u8 new_msr) | |||
423 | icount->rng++; | 422 | icount->rng++; |
424 | smp_wmb(); | 423 | smp_wmb(); |
425 | } | 424 | } |
425 | |||
426 | mos7840_port->delta_msr_cond = 1; | ||
427 | wake_up_interruptible(&port->port->delta_msr_wait); | ||
426 | } | 428 | } |
427 | } | 429 | } |
428 | 430 | ||
@@ -1127,7 +1129,6 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port) | |||
1127 | 1129 | ||
1128 | /* initialize our wait queues */ | 1130 | /* initialize our wait queues */ |
1129 | init_waitqueue_head(&mos7840_port->wait_chase); | 1131 | init_waitqueue_head(&mos7840_port->wait_chase); |
1130 | init_waitqueue_head(&mos7840_port->delta_msr_wait); | ||
1131 | 1132 | ||
1132 | /* initialize our icount structure */ | 1133 | /* initialize our icount structure */ |
1133 | memset(&(mos7840_port->icount), 0x00, sizeof(mos7840_port->icount)); | 1134 | memset(&(mos7840_port->icount), 0x00, sizeof(mos7840_port->icount)); |
@@ -2017,8 +2018,6 @@ static void mos7840_change_port_settings(struct tty_struct *tty, | |||
2017 | mos7840_port->read_urb_busy = false; | 2018 | mos7840_port->read_urb_busy = false; |
2018 | } | 2019 | } |
2019 | } | 2020 | } |
2020 | wake_up(&mos7840_port->delta_msr_wait); | ||
2021 | mos7840_port->delta_msr_cond = 1; | ||
2022 | dev_dbg(&port->dev, "%s - mos7840_port->shadowLCR is End %x\n", __func__, | 2021 | dev_dbg(&port->dev, "%s - mos7840_port->shadowLCR is End %x\n", __func__, |
2023 | mos7840_port->shadowLCR); | 2022 | mos7840_port->shadowLCR); |
2024 | } | 2023 | } |
@@ -2219,13 +2218,18 @@ static int mos7840_ioctl(struct tty_struct *tty, | |||
2219 | while (1) { | 2218 | while (1) { |
2220 | /* interruptible_sleep_on(&mos7840_port->delta_msr_wait); */ | 2219 | /* interruptible_sleep_on(&mos7840_port->delta_msr_wait); */ |
2221 | mos7840_port->delta_msr_cond = 0; | 2220 | mos7840_port->delta_msr_cond = 0; |
2222 | wait_event_interruptible(mos7840_port->delta_msr_wait, | 2221 | wait_event_interruptible(port->delta_msr_wait, |
2223 | (mos7840_port-> | 2222 | (port->serial->disconnected || |
2223 | mos7840_port-> | ||
2224 | delta_msr_cond == 1)); | 2224 | delta_msr_cond == 1)); |
2225 | 2225 | ||
2226 | /* see if a signal did it */ | 2226 | /* see if a signal did it */ |
2227 | if (signal_pending(current)) | 2227 | if (signal_pending(current)) |
2228 | return -ERESTARTSYS; | 2228 | return -ERESTARTSYS; |
2229 | |||
2230 | if (port->serial->disconnected) | ||
2231 | return -EIO; | ||
2232 | |||
2229 | cnow = mos7840_port->icount; | 2233 | cnow = mos7840_port->icount; |
2230 | smp_rmb(); | 2234 | smp_rmb(); |
2231 | if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr && | 2235 | if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr && |
diff --git a/drivers/usb/serial/oti6858.c b/drivers/usb/serial/oti6858.c index a958fd41b5b3..87c71ccfee87 100644 --- a/drivers/usb/serial/oti6858.c +++ b/drivers/usb/serial/oti6858.c | |||
@@ -188,7 +188,6 @@ struct oti6858_private { | |||
188 | u8 setup_done; | 188 | u8 setup_done; |
189 | struct delayed_work delayed_setup_work; | 189 | struct delayed_work delayed_setup_work; |
190 | 190 | ||
191 | wait_queue_head_t intr_wait; | ||
192 | struct usb_serial_port *port; /* USB port with which associated */ | 191 | struct usb_serial_port *port; /* USB port with which associated */ |
193 | }; | 192 | }; |
194 | 193 | ||
@@ -339,7 +338,6 @@ static int oti6858_port_probe(struct usb_serial_port *port) | |||
339 | return -ENOMEM; | 338 | return -ENOMEM; |
340 | 339 | ||
341 | spin_lock_init(&priv->lock); | 340 | spin_lock_init(&priv->lock); |
342 | init_waitqueue_head(&priv->intr_wait); | ||
343 | priv->port = port; | 341 | priv->port = port; |
344 | INIT_DELAYED_WORK(&priv->delayed_setup_work, setup_line); | 342 | INIT_DELAYED_WORK(&priv->delayed_setup_work, setup_line); |
345 | INIT_DELAYED_WORK(&priv->delayed_write_work, send_data); | 343 | INIT_DELAYED_WORK(&priv->delayed_write_work, send_data); |
@@ -664,11 +662,15 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg) | |||
664 | spin_unlock_irqrestore(&priv->lock, flags); | 662 | spin_unlock_irqrestore(&priv->lock, flags); |
665 | 663 | ||
666 | while (1) { | 664 | while (1) { |
667 | wait_event_interruptible(priv->intr_wait, | 665 | wait_event_interruptible(port->delta_msr_wait, |
666 | port->serial->disconnected || | ||
668 | priv->status.pin_state != prev); | 667 | priv->status.pin_state != prev); |
669 | if (signal_pending(current)) | 668 | if (signal_pending(current)) |
670 | return -ERESTARTSYS; | 669 | return -ERESTARTSYS; |
671 | 670 | ||
671 | if (port->serial->disconnected) | ||
672 | return -EIO; | ||
673 | |||
672 | spin_lock_irqsave(&priv->lock, flags); | 674 | spin_lock_irqsave(&priv->lock, flags); |
673 | status = priv->status.pin_state & PIN_MASK; | 675 | status = priv->status.pin_state & PIN_MASK; |
674 | spin_unlock_irqrestore(&priv->lock, flags); | 676 | spin_unlock_irqrestore(&priv->lock, flags); |
@@ -763,7 +765,7 @@ static void oti6858_read_int_callback(struct urb *urb) | |||
763 | 765 | ||
764 | if (!priv->transient) { | 766 | if (!priv->transient) { |
765 | if (xs->pin_state != priv->status.pin_state) | 767 | if (xs->pin_state != priv->status.pin_state) |
766 | wake_up_interruptible(&priv->intr_wait); | 768 | wake_up_interruptible(&port->delta_msr_wait); |
767 | memcpy(&priv->status, xs, OTI6858_CTRL_PKT_SIZE); | 769 | memcpy(&priv->status, xs, OTI6858_CTRL_PKT_SIZE); |
768 | } | 770 | } |
769 | 771 | ||
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index 54adc9125e5c..3b10018d89a3 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c | |||
@@ -139,7 +139,6 @@ struct pl2303_serial_private { | |||
139 | 139 | ||
140 | struct pl2303_private { | 140 | struct pl2303_private { |
141 | spinlock_t lock; | 141 | spinlock_t lock; |
142 | wait_queue_head_t delta_msr_wait; | ||
143 | u8 line_control; | 142 | u8 line_control; |
144 | u8 line_status; | 143 | u8 line_status; |
145 | }; | 144 | }; |
@@ -233,7 +232,6 @@ static int pl2303_port_probe(struct usb_serial_port *port) | |||
233 | return -ENOMEM; | 232 | return -ENOMEM; |
234 | 233 | ||
235 | spin_lock_init(&priv->lock); | 234 | spin_lock_init(&priv->lock); |
236 | init_waitqueue_head(&priv->delta_msr_wait); | ||
237 | 235 | ||
238 | usb_set_serial_port_data(port, priv); | 236 | usb_set_serial_port_data(port, priv); |
239 | 237 | ||
@@ -607,11 +605,14 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg) | |||
607 | spin_unlock_irqrestore(&priv->lock, flags); | 605 | spin_unlock_irqrestore(&priv->lock, flags); |
608 | 606 | ||
609 | while (1) { | 607 | while (1) { |
610 | interruptible_sleep_on(&priv->delta_msr_wait); | 608 | interruptible_sleep_on(&port->delta_msr_wait); |
611 | /* see if a signal did it */ | 609 | /* see if a signal did it */ |
612 | if (signal_pending(current)) | 610 | if (signal_pending(current)) |
613 | return -ERESTARTSYS; | 611 | return -ERESTARTSYS; |
614 | 612 | ||
613 | if (port->serial->disconnected) | ||
614 | return -EIO; | ||
615 | |||
615 | spin_lock_irqsave(&priv->lock, flags); | 616 | spin_lock_irqsave(&priv->lock, flags); |
616 | status = priv->line_status; | 617 | status = priv->line_status; |
617 | spin_unlock_irqrestore(&priv->lock, flags); | 618 | spin_unlock_irqrestore(&priv->lock, flags); |
@@ -719,7 +720,7 @@ static void pl2303_update_line_status(struct usb_serial_port *port, | |||
719 | spin_unlock_irqrestore(&priv->lock, flags); | 720 | spin_unlock_irqrestore(&priv->lock, flags); |
720 | if (priv->line_status & UART_BREAK_ERROR) | 721 | if (priv->line_status & UART_BREAK_ERROR) |
721 | usb_serial_handle_break(port); | 722 | usb_serial_handle_break(port); |
722 | wake_up_interruptible(&priv->delta_msr_wait); | 723 | wake_up_interruptible(&port->delta_msr_wait); |
723 | 724 | ||
724 | tty = tty_port_tty_get(&port->port); | 725 | tty = tty_port_tty_get(&port->port); |
725 | if (!tty) | 726 | if (!tty) |
@@ -783,7 +784,7 @@ static void pl2303_process_read_urb(struct urb *urb) | |||
783 | line_status = priv->line_status; | 784 | line_status = priv->line_status; |
784 | priv->line_status &= ~UART_STATE_TRANSIENT_MASK; | 785 | priv->line_status &= ~UART_STATE_TRANSIENT_MASK; |
785 | spin_unlock_irqrestore(&priv->lock, flags); | 786 | spin_unlock_irqrestore(&priv->lock, flags); |
786 | wake_up_interruptible(&priv->delta_msr_wait); | 787 | wake_up_interruptible(&port->delta_msr_wait); |
787 | 788 | ||
788 | if (!urb->actual_length) | 789 | if (!urb->actual_length) |
789 | return; | 790 | return; |
diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c index d643a4d4d770..75f125ddb0c9 100644 --- a/drivers/usb/serial/quatech2.c +++ b/drivers/usb/serial/quatech2.c | |||
@@ -128,7 +128,6 @@ struct qt2_port_private { | |||
128 | u8 shadowLSR; | 128 | u8 shadowLSR; |
129 | u8 shadowMSR; | 129 | u8 shadowMSR; |
130 | 130 | ||
131 | wait_queue_head_t delta_msr_wait; /* Used for TIOCMIWAIT */ | ||
132 | struct async_icount icount; | 131 | struct async_icount icount; |
133 | 132 | ||
134 | struct usb_serial_port *port; | 133 | struct usb_serial_port *port; |
@@ -506,8 +505,9 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg) | |||
506 | spin_unlock_irqrestore(&priv->lock, flags); | 505 | spin_unlock_irqrestore(&priv->lock, flags); |
507 | 506 | ||
508 | while (1) { | 507 | while (1) { |
509 | wait_event_interruptible(priv->delta_msr_wait, | 508 | wait_event_interruptible(port->delta_msr_wait, |
510 | ((priv->icount.rng != prev.rng) || | 509 | (port->serial->disconnected || |
510 | (priv->icount.rng != prev.rng) || | ||
511 | (priv->icount.dsr != prev.dsr) || | 511 | (priv->icount.dsr != prev.dsr) || |
512 | (priv->icount.dcd != prev.dcd) || | 512 | (priv->icount.dcd != prev.dcd) || |
513 | (priv->icount.cts != prev.cts))); | 513 | (priv->icount.cts != prev.cts))); |
@@ -515,6 +515,9 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg) | |||
515 | if (signal_pending(current)) | 515 | if (signal_pending(current)) |
516 | return -ERESTARTSYS; | 516 | return -ERESTARTSYS; |
517 | 517 | ||
518 | if (port->serial->disconnected) | ||
519 | return -EIO; | ||
520 | |||
518 | spin_lock_irqsave(&priv->lock, flags); | 521 | spin_lock_irqsave(&priv->lock, flags); |
519 | cur = priv->icount; | 522 | cur = priv->icount; |
520 | spin_unlock_irqrestore(&priv->lock, flags); | 523 | spin_unlock_irqrestore(&priv->lock, flags); |
@@ -827,7 +830,6 @@ static int qt2_port_probe(struct usb_serial_port *port) | |||
827 | 830 | ||
828 | spin_lock_init(&port_priv->lock); | 831 | spin_lock_init(&port_priv->lock); |
829 | spin_lock_init(&port_priv->urb_lock); | 832 | spin_lock_init(&port_priv->urb_lock); |
830 | init_waitqueue_head(&port_priv->delta_msr_wait); | ||
831 | port_priv->port = port; | 833 | port_priv->port = port; |
832 | 834 | ||
833 | port_priv->write_urb = usb_alloc_urb(0, GFP_KERNEL); | 835 | port_priv->write_urb = usb_alloc_urb(0, GFP_KERNEL); |
@@ -970,7 +972,7 @@ static void qt2_update_msr(struct usb_serial_port *port, unsigned char *ch) | |||
970 | if (newMSR & UART_MSR_TERI) | 972 | if (newMSR & UART_MSR_TERI) |
971 | port_priv->icount.rng++; | 973 | port_priv->icount.rng++; |
972 | 974 | ||
973 | wake_up_interruptible(&port_priv->delta_msr_wait); | 975 | wake_up_interruptible(&port->delta_msr_wait); |
974 | } | 976 | } |
975 | } | 977 | } |
976 | 978 | ||
diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c index 91ff8e3bddbd..549ef68ff5fa 100644 --- a/drivers/usb/serial/spcp8x5.c +++ b/drivers/usb/serial/spcp8x5.c | |||
@@ -149,7 +149,6 @@ enum spcp8x5_type { | |||
149 | struct spcp8x5_private { | 149 | struct spcp8x5_private { |
150 | spinlock_t lock; | 150 | spinlock_t lock; |
151 | enum spcp8x5_type type; | 151 | enum spcp8x5_type type; |
152 | wait_queue_head_t delta_msr_wait; | ||
153 | u8 line_control; | 152 | u8 line_control; |
154 | u8 line_status; | 153 | u8 line_status; |
155 | }; | 154 | }; |
@@ -179,7 +178,6 @@ static int spcp8x5_port_probe(struct usb_serial_port *port) | |||
179 | return -ENOMEM; | 178 | return -ENOMEM; |
180 | 179 | ||
181 | spin_lock_init(&priv->lock); | 180 | spin_lock_init(&priv->lock); |
182 | init_waitqueue_head(&priv->delta_msr_wait); | ||
183 | priv->type = type; | 181 | priv->type = type; |
184 | 182 | ||
185 | usb_set_serial_port_data(port , priv); | 183 | usb_set_serial_port_data(port , priv); |
@@ -475,7 +473,7 @@ static void spcp8x5_process_read_urb(struct urb *urb) | |||
475 | priv->line_status &= ~UART_STATE_TRANSIENT_MASK; | 473 | priv->line_status &= ~UART_STATE_TRANSIENT_MASK; |
476 | spin_unlock_irqrestore(&priv->lock, flags); | 474 | spin_unlock_irqrestore(&priv->lock, flags); |
477 | /* wake up the wait for termios */ | 475 | /* wake up the wait for termios */ |
478 | wake_up_interruptible(&priv->delta_msr_wait); | 476 | wake_up_interruptible(&port->delta_msr_wait); |
479 | 477 | ||
480 | if (!urb->actual_length) | 478 | if (!urb->actual_length) |
481 | return; | 479 | return; |
@@ -526,12 +524,15 @@ static int spcp8x5_wait_modem_info(struct usb_serial_port *port, | |||
526 | 524 | ||
527 | while (1) { | 525 | while (1) { |
528 | /* wake up in bulk read */ | 526 | /* wake up in bulk read */ |
529 | interruptible_sleep_on(&priv->delta_msr_wait); | 527 | interruptible_sleep_on(&port->delta_msr_wait); |
530 | 528 | ||
531 | /* see if a signal did it */ | 529 | /* see if a signal did it */ |
532 | if (signal_pending(current)) | 530 | if (signal_pending(current)) |
533 | return -ERESTARTSYS; | 531 | return -ERESTARTSYS; |
534 | 532 | ||
533 | if (port->serial->disconnected) | ||
534 | return -EIO; | ||
535 | |||
535 | spin_lock_irqsave(&priv->lock, flags); | 536 | spin_lock_irqsave(&priv->lock, flags); |
536 | status = priv->line_status; | 537 | status = priv->line_status; |
537 | spin_unlock_irqrestore(&priv->lock, flags); | 538 | spin_unlock_irqrestore(&priv->lock, flags); |
diff --git a/drivers/usb/serial/ssu100.c b/drivers/usb/serial/ssu100.c index b57cf841c5b6..4b2a19757b4d 100644 --- a/drivers/usb/serial/ssu100.c +++ b/drivers/usb/serial/ssu100.c | |||
@@ -61,7 +61,6 @@ struct ssu100_port_private { | |||
61 | spinlock_t status_lock; | 61 | spinlock_t status_lock; |
62 | u8 shadowLSR; | 62 | u8 shadowLSR; |
63 | u8 shadowMSR; | 63 | u8 shadowMSR; |
64 | wait_queue_head_t delta_msr_wait; /* Used for TIOCMIWAIT */ | ||
65 | struct async_icount icount; | 64 | struct async_icount icount; |
66 | }; | 65 | }; |
67 | 66 | ||
@@ -355,8 +354,9 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg) | |||
355 | spin_unlock_irqrestore(&priv->status_lock, flags); | 354 | spin_unlock_irqrestore(&priv->status_lock, flags); |
356 | 355 | ||
357 | while (1) { | 356 | while (1) { |
358 | wait_event_interruptible(priv->delta_msr_wait, | 357 | wait_event_interruptible(port->delta_msr_wait, |
359 | ((priv->icount.rng != prev.rng) || | 358 | (port->serial->disconnected || |
359 | (priv->icount.rng != prev.rng) || | ||
360 | (priv->icount.dsr != prev.dsr) || | 360 | (priv->icount.dsr != prev.dsr) || |
361 | (priv->icount.dcd != prev.dcd) || | 361 | (priv->icount.dcd != prev.dcd) || |
362 | (priv->icount.cts != prev.cts))); | 362 | (priv->icount.cts != prev.cts))); |
@@ -364,6 +364,9 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg) | |||
364 | if (signal_pending(current)) | 364 | if (signal_pending(current)) |
365 | return -ERESTARTSYS; | 365 | return -ERESTARTSYS; |
366 | 366 | ||
367 | if (port->serial->disconnected) | ||
368 | return -EIO; | ||
369 | |||
367 | spin_lock_irqsave(&priv->status_lock, flags); | 370 | spin_lock_irqsave(&priv->status_lock, flags); |
368 | cur = priv->icount; | 371 | cur = priv->icount; |
369 | spin_unlock_irqrestore(&priv->status_lock, flags); | 372 | spin_unlock_irqrestore(&priv->status_lock, flags); |
@@ -445,7 +448,6 @@ static int ssu100_port_probe(struct usb_serial_port *port) | |||
445 | return -ENOMEM; | 448 | return -ENOMEM; |
446 | 449 | ||
447 | spin_lock_init(&priv->status_lock); | 450 | spin_lock_init(&priv->status_lock); |
448 | init_waitqueue_head(&priv->delta_msr_wait); | ||
449 | 451 | ||
450 | usb_set_serial_port_data(port, priv); | 452 | usb_set_serial_port_data(port, priv); |
451 | 453 | ||
@@ -537,7 +539,7 @@ static void ssu100_update_msr(struct usb_serial_port *port, u8 msr) | |||
537 | priv->icount.dcd++; | 539 | priv->icount.dcd++; |
538 | if (msr & UART_MSR_TERI) | 540 | if (msr & UART_MSR_TERI) |
539 | priv->icount.rng++; | 541 | priv->icount.rng++; |
540 | wake_up_interruptible(&priv->delta_msr_wait); | 542 | wake_up_interruptible(&port->delta_msr_wait); |
541 | } | 543 | } |
542 | } | 544 | } |
543 | 545 | ||
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c index 39cb9b807c3c..73deb029fc05 100644 --- a/drivers/usb/serial/ti_usb_3410_5052.c +++ b/drivers/usb/serial/ti_usb_3410_5052.c | |||
@@ -74,7 +74,6 @@ struct ti_port { | |||
74 | int tp_flags; | 74 | int tp_flags; |
75 | int tp_closing_wait;/* in .01 secs */ | 75 | int tp_closing_wait;/* in .01 secs */ |
76 | struct async_icount tp_icount; | 76 | struct async_icount tp_icount; |
77 | wait_queue_head_t tp_msr_wait; /* wait for msr change */ | ||
78 | wait_queue_head_t tp_write_wait; | 77 | wait_queue_head_t tp_write_wait; |
79 | struct ti_device *tp_tdev; | 78 | struct ti_device *tp_tdev; |
80 | struct usb_serial_port *tp_port; | 79 | struct usb_serial_port *tp_port; |
@@ -432,7 +431,6 @@ static int ti_port_probe(struct usb_serial_port *port) | |||
432 | else | 431 | else |
433 | tport->tp_uart_base_addr = TI_UART2_BASE_ADDR; | 432 | tport->tp_uart_base_addr = TI_UART2_BASE_ADDR; |
434 | tport->tp_closing_wait = closing_wait; | 433 | tport->tp_closing_wait = closing_wait; |
435 | init_waitqueue_head(&tport->tp_msr_wait); | ||
436 | init_waitqueue_head(&tport->tp_write_wait); | 434 | init_waitqueue_head(&tport->tp_write_wait); |
437 | if (kfifo_alloc(&tport->write_fifo, TI_WRITE_BUF_SIZE, GFP_KERNEL)) { | 435 | if (kfifo_alloc(&tport->write_fifo, TI_WRITE_BUF_SIZE, GFP_KERNEL)) { |
438 | kfree(tport); | 436 | kfree(tport); |
@@ -784,9 +782,13 @@ static int ti_ioctl(struct tty_struct *tty, | |||
784 | dev_dbg(&port->dev, "%s - TIOCMIWAIT\n", __func__); | 782 | dev_dbg(&port->dev, "%s - TIOCMIWAIT\n", __func__); |
785 | cprev = tport->tp_icount; | 783 | cprev = tport->tp_icount; |
786 | while (1) { | 784 | while (1) { |
787 | interruptible_sleep_on(&tport->tp_msr_wait); | 785 | interruptible_sleep_on(&port->delta_msr_wait); |
788 | if (signal_pending(current)) | 786 | if (signal_pending(current)) |
789 | return -ERESTARTSYS; | 787 | return -ERESTARTSYS; |
788 | |||
789 | if (port->serial->disconnected) | ||
790 | return -EIO; | ||
791 | |||
790 | cnow = tport->tp_icount; | 792 | cnow = tport->tp_icount; |
791 | if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr && | 793 | if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr && |
792 | cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) | 794 | cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) |
@@ -1392,7 +1394,7 @@ static void ti_handle_new_msr(struct ti_port *tport, __u8 msr) | |||
1392 | icount->dcd++; | 1394 | icount->dcd++; |
1393 | if (msr & TI_MSR_DELTA_RI) | 1395 | if (msr & TI_MSR_DELTA_RI) |
1394 | icount->rng++; | 1396 | icount->rng++; |
1395 | wake_up_interruptible(&tport->tp_msr_wait); | 1397 | wake_up_interruptible(&tport->tp_port->delta_msr_wait); |
1396 | spin_unlock_irqrestore(&tport->tp_lock, flags); | 1398 | spin_unlock_irqrestore(&tport->tp_lock, flags); |
1397 | } | 1399 | } |
1398 | 1400 | ||
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c index a19ed74d770d..5d9b178484fd 100644 --- a/drivers/usb/serial/usb-serial.c +++ b/drivers/usb/serial/usb-serial.c | |||
@@ -151,6 +151,7 @@ static void destroy_serial(struct kref *kref) | |||
151 | } | 151 | } |
152 | } | 152 | } |
153 | 153 | ||
154 | usb_put_intf(serial->interface); | ||
154 | usb_put_dev(serial->dev); | 155 | usb_put_dev(serial->dev); |
155 | kfree(serial); | 156 | kfree(serial); |
156 | } | 157 | } |
@@ -620,7 +621,7 @@ static struct usb_serial *create_serial(struct usb_device *dev, | |||
620 | } | 621 | } |
621 | serial->dev = usb_get_dev(dev); | 622 | serial->dev = usb_get_dev(dev); |
622 | serial->type = driver; | 623 | serial->type = driver; |
623 | serial->interface = interface; | 624 | serial->interface = usb_get_intf(interface); |
624 | kref_init(&serial->kref); | 625 | kref_init(&serial->kref); |
625 | mutex_init(&serial->disc_mutex); | 626 | mutex_init(&serial->disc_mutex); |
626 | serial->minor = SERIAL_TTY_NO_MINOR; | 627 | serial->minor = SERIAL_TTY_NO_MINOR; |
@@ -902,6 +903,7 @@ static int usb_serial_probe(struct usb_interface *interface, | |||
902 | port->port.ops = &serial_port_ops; | 903 | port->port.ops = &serial_port_ops; |
903 | port->serial = serial; | 904 | port->serial = serial; |
904 | spin_lock_init(&port->lock); | 905 | spin_lock_init(&port->lock); |
906 | init_waitqueue_head(&port->delta_msr_wait); | ||
905 | /* Keep this for private driver use for the moment but | 907 | /* Keep this for private driver use for the moment but |
906 | should probably go away */ | 908 | should probably go away */ |
907 | INIT_WORK(&port->work, usb_serial_port_work); | 909 | INIT_WORK(&port->work, usb_serial_port_work); |
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index da04a074e790..1799335288bd 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h | |||
@@ -496,6 +496,13 @@ UNUSUAL_DEV( 0x04e8, 0x5122, 0x0000, 0x9999, | |||
496 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, | 496 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, |
497 | US_FL_MAX_SECTORS_64 | US_FL_BULK_IGNORE_TAG), | 497 | US_FL_MAX_SECTORS_64 | US_FL_BULK_IGNORE_TAG), |
498 | 498 | ||
499 | /* Added by Dmitry Artamonow <mad_soft@inbox.ru> */ | ||
500 | UNUSUAL_DEV( 0x04e8, 0x5136, 0x0000, 0x9999, | ||
501 | "Samsung", | ||
502 | "YP-Z3", | ||
503 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, | ||
504 | US_FL_MAX_SECTORS_64), | ||
505 | |||
499 | /* Entry and supporting patch by Theodore Kilgore <kilgota@auburn.edu>. | 506 | /* Entry and supporting patch by Theodore Kilgore <kilgota@auburn.edu>. |
500 | * Device uses standards-violating 32-byte Bulk Command Block Wrappers and | 507 | * Device uses standards-violating 32-byte Bulk Command Block Wrappers and |
501 | * reports itself as "Proprietary SCSI Bulk." Cf. device entry 0x084d:0x0011. | 508 | * reports itself as "Proprietary SCSI Bulk." Cf. device entry 0x084d:0x0011. |
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index 8189cb6a86af..7abc5c81af2c 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c | |||
@@ -346,6 +346,7 @@ static long vfio_pci_ioctl(void *device_data, | |||
346 | 346 | ||
347 | if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) { | 347 | if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) { |
348 | size_t size; | 348 | size_t size; |
349 | int max = vfio_pci_get_irq_count(vdev, hdr.index); | ||
349 | 350 | ||
350 | if (hdr.flags & VFIO_IRQ_SET_DATA_BOOL) | 351 | if (hdr.flags & VFIO_IRQ_SET_DATA_BOOL) |
351 | size = sizeof(uint8_t); | 352 | size = sizeof(uint8_t); |
@@ -355,7 +356,7 @@ static long vfio_pci_ioctl(void *device_data, | |||
355 | return -EINVAL; | 356 | return -EINVAL; |
356 | 357 | ||
357 | if (hdr.argsz - minsz < hdr.count * size || | 358 | if (hdr.argsz - minsz < hdr.count * size || |
358 | hdr.count > vfio_pci_get_irq_count(vdev, hdr.index)) | 359 | hdr.start >= max || hdr.start + hdr.count > max) |
359 | return -EINVAL; | 360 | return -EINVAL; |
360 | 361 | ||
361 | data = memdup_user((void __user *)(arg + minsz), | 362 | data = memdup_user((void __user *)(arg + minsz), |
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c index 964ff22bf281..aeb00fc2d3be 100644 --- a/drivers/vfio/pci/vfio_pci_config.c +++ b/drivers/vfio/pci/vfio_pci_config.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <linux/pci.h> | 27 | #include <linux/pci.h> |
28 | #include <linux/uaccess.h> | 28 | #include <linux/uaccess.h> |
29 | #include <linux/vfio.h> | 29 | #include <linux/vfio.h> |
30 | #include <linux/slab.h> | ||
30 | 31 | ||
31 | #include "vfio_pci_private.h" | 32 | #include "vfio_pci_private.h" |
32 | 33 | ||
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c index 3639371fa697..a96509187deb 100644 --- a/drivers/vfio/pci/vfio_pci_intrs.c +++ b/drivers/vfio/pci/vfio_pci_intrs.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/vfio.h> | 22 | #include <linux/vfio.h> |
23 | #include <linux/wait.h> | 23 | #include <linux/wait.h> |
24 | #include <linux/workqueue.h> | 24 | #include <linux/workqueue.h> |
25 | #include <linux/slab.h> | ||
25 | 26 | ||
26 | #include "vfio_pci_private.h" | 27 | #include "vfio_pci_private.h" |
27 | 28 | ||
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 959b1cd89e6a..ec6fb3fa59bb 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c | |||
@@ -339,7 +339,8 @@ static void handle_tx(struct vhost_net *net) | |||
339 | msg.msg_controllen = 0; | 339 | msg.msg_controllen = 0; |
340 | ubufs = NULL; | 340 | ubufs = NULL; |
341 | } else { | 341 | } else { |
342 | struct ubuf_info *ubuf = &vq->ubuf_info[head]; | 342 | struct ubuf_info *ubuf; |
343 | ubuf = vq->ubuf_info + vq->upend_idx; | ||
343 | 344 | ||
344 | vq->heads[vq->upend_idx].len = | 345 | vq->heads[vq->upend_idx].len = |
345 | VHOST_DMA_IN_PROGRESS; | 346 | VHOST_DMA_IN_PROGRESS; |
diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c index 12cf5f31ee8f..025428e04c33 100644 --- a/drivers/video/atmel_lcdfb.c +++ b/drivers/video/atmel_lcdfb.c | |||
@@ -422,17 +422,22 @@ static int atmel_lcdfb_check_var(struct fb_var_screeninfo *var, | |||
422 | = var->bits_per_pixel; | 422 | = var->bits_per_pixel; |
423 | break; | 423 | break; |
424 | case 16: | 424 | case 16: |
425 | /* Older SOCs use IBGR:555 rather than BGR:565. */ | ||
426 | if (sinfo->have_intensity_bit) | ||
427 | var->green.length = 5; | ||
428 | else | ||
429 | var->green.length = 6; | ||
430 | |||
425 | if (sinfo->lcd_wiring_mode == ATMEL_LCDC_WIRING_RGB) { | 431 | if (sinfo->lcd_wiring_mode == ATMEL_LCDC_WIRING_RGB) { |
426 | /* RGB:565 mode */ | 432 | /* RGB:5X5 mode */ |
427 | var->red.offset = 11; | 433 | var->red.offset = var->green.length + 5; |
428 | var->blue.offset = 0; | 434 | var->blue.offset = 0; |
429 | } else { | 435 | } else { |
430 | /* BGR:565 mode */ | 436 | /* BGR:5X5 mode */ |
431 | var->red.offset = 0; | 437 | var->red.offset = 0; |
432 | var->blue.offset = 11; | 438 | var->blue.offset = var->green.length + 5; |
433 | } | 439 | } |
434 | var->green.offset = 5; | 440 | var->green.offset = 5; |
435 | var->green.length = 6; | ||
436 | var->red.length = var->blue.length = 5; | 441 | var->red.length = var->blue.length = 5; |
437 | break; | 442 | break; |
438 | case 32: | 443 | case 32: |
@@ -679,8 +684,7 @@ static int atmel_lcdfb_setcolreg(unsigned int regno, unsigned int red, | |||
679 | 684 | ||
680 | case FB_VISUAL_PSEUDOCOLOR: | 685 | case FB_VISUAL_PSEUDOCOLOR: |
681 | if (regno < 256) { | 686 | if (regno < 256) { |
682 | if (cpu_is_at91sam9261() || cpu_is_at91sam9263() | 687 | if (sinfo->have_intensity_bit) { |
683 | || cpu_is_at91sam9rl()) { | ||
684 | /* old style I+BGR:555 */ | 688 | /* old style I+BGR:555 */ |
685 | val = ((red >> 11) & 0x001f); | 689 | val = ((red >> 11) & 0x001f); |
686 | val |= ((green >> 6) & 0x03e0); | 690 | val |= ((green >> 6) & 0x03e0); |
@@ -870,6 +874,10 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev) | |||
870 | } | 874 | } |
871 | sinfo->info = info; | 875 | sinfo->info = info; |
872 | sinfo->pdev = pdev; | 876 | sinfo->pdev = pdev; |
877 | if (cpu_is_at91sam9261() || cpu_is_at91sam9263() || | ||
878 | cpu_is_at91sam9rl()) { | ||
879 | sinfo->have_intensity_bit = true; | ||
880 | } | ||
873 | 881 | ||
874 | strcpy(info->fix.id, sinfo->pdev->name); | 882 | strcpy(info->fix.id, sinfo->pdev->name); |
875 | info->flags = ATMEL_LCDFB_FBINFO_DEFAULT; | 883 | info->flags = ATMEL_LCDFB_FBINFO_DEFAULT; |
diff --git a/drivers/video/ep93xx-fb.c b/drivers/video/ep93xx-fb.c index 3f2519d30715..e06cd5d90c97 100644 --- a/drivers/video/ep93xx-fb.c +++ b/drivers/video/ep93xx-fb.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/slab.h> | 23 | #include <linux/slab.h> |
24 | #include <linux/clk.h> | 24 | #include <linux/clk.h> |
25 | #include <linux/fb.h> | 25 | #include <linux/fb.h> |
26 | #include <linux/io.h> | ||
26 | 27 | ||
27 | #include <linux/platform_data/video-ep93xx.h> | 28 | #include <linux/platform_data/video-ep93xx.h> |
28 | 29 | ||
diff --git a/drivers/video/fbmon.c b/drivers/video/fbmon.c index 94ad0f71383c..7f6709991a5c 100644 --- a/drivers/video/fbmon.c +++ b/drivers/video/fbmon.c | |||
@@ -1400,7 +1400,7 @@ int fb_videomode_from_videomode(const struct videomode *vm, | |||
1400 | fbmode->vmode = 0; | 1400 | fbmode->vmode = 0; |
1401 | if (vm->dmt_flags & VESA_DMT_HSYNC_HIGH) | 1401 | if (vm->dmt_flags & VESA_DMT_HSYNC_HIGH) |
1402 | fbmode->sync |= FB_SYNC_HOR_HIGH_ACT; | 1402 | fbmode->sync |= FB_SYNC_HOR_HIGH_ACT; |
1403 | if (vm->dmt_flags & VESA_DMT_HSYNC_HIGH) | 1403 | if (vm->dmt_flags & VESA_DMT_VSYNC_HIGH) |
1404 | fbmode->sync |= FB_SYNC_VERT_HIGH_ACT; | 1404 | fbmode->sync |= FB_SYNC_VERT_HIGH_ACT; |
1405 | if (vm->data_flags & DISPLAY_FLAGS_INTERLACED) | 1405 | if (vm->data_flags & DISPLAY_FLAGS_INTERLACED) |
1406 | fbmode->vmode |= FB_VMODE_INTERLACED; | 1406 | fbmode->vmode |= FB_VMODE_INTERLACED; |
diff --git a/drivers/video/mxsfb.c b/drivers/video/mxsfb.c index 755556ca5b2d..45169cbaba6e 100644 --- a/drivers/video/mxsfb.c +++ b/drivers/video/mxsfb.c | |||
@@ -169,6 +169,7 @@ struct mxsfb_info { | |||
169 | unsigned dotclk_delay; | 169 | unsigned dotclk_delay; |
170 | const struct mxsfb_devdata *devdata; | 170 | const struct mxsfb_devdata *devdata; |
171 | int mapped; | 171 | int mapped; |
172 | u32 sync; | ||
172 | }; | 173 | }; |
173 | 174 | ||
174 | #define mxsfb_is_v3(host) (host->devdata->ipversion == 3) | 175 | #define mxsfb_is_v3(host) (host->devdata->ipversion == 3) |
@@ -456,9 +457,9 @@ static int mxsfb_set_par(struct fb_info *fb_info) | |||
456 | vdctrl0 |= VDCTRL0_HSYNC_ACT_HIGH; | 457 | vdctrl0 |= VDCTRL0_HSYNC_ACT_HIGH; |
457 | if (fb_info->var.sync & FB_SYNC_VERT_HIGH_ACT) | 458 | if (fb_info->var.sync & FB_SYNC_VERT_HIGH_ACT) |
458 | vdctrl0 |= VDCTRL0_VSYNC_ACT_HIGH; | 459 | vdctrl0 |= VDCTRL0_VSYNC_ACT_HIGH; |
459 | if (fb_info->var.sync & FB_SYNC_DATA_ENABLE_HIGH_ACT) | 460 | if (host->sync & MXSFB_SYNC_DATA_ENABLE_HIGH_ACT) |
460 | vdctrl0 |= VDCTRL0_ENABLE_ACT_HIGH; | 461 | vdctrl0 |= VDCTRL0_ENABLE_ACT_HIGH; |
461 | if (fb_info->var.sync & FB_SYNC_DOTCLK_FAILING_ACT) | 462 | if (host->sync & MXSFB_SYNC_DOTCLK_FAILING_ACT) |
462 | vdctrl0 |= VDCTRL0_DOTCLK_ACT_FAILING; | 463 | vdctrl0 |= VDCTRL0_DOTCLK_ACT_FAILING; |
463 | 464 | ||
464 | writel(vdctrl0, host->base + LCDC_VDCTRL0); | 465 | writel(vdctrl0, host->base + LCDC_VDCTRL0); |
@@ -861,6 +862,8 @@ static int mxsfb_probe(struct platform_device *pdev) | |||
861 | 862 | ||
862 | INIT_LIST_HEAD(&fb_info->modelist); | 863 | INIT_LIST_HEAD(&fb_info->modelist); |
863 | 864 | ||
865 | host->sync = pdata->sync; | ||
866 | |||
864 | ret = mxsfb_init_fbinfo(host); | 867 | ret = mxsfb_init_fbinfo(host); |
865 | if (ret != 0) | 868 | if (ret != 0) |
866 | goto error_init_fb; | 869 | goto error_init_fb; |
diff --git a/drivers/video/omap/omapfb_main.c b/drivers/video/omap/omapfb_main.c index e31f5b33b501..d40612c31a98 100644 --- a/drivers/video/omap/omapfb_main.c +++ b/drivers/video/omap/omapfb_main.c | |||
@@ -32,6 +32,8 @@ | |||
32 | 32 | ||
33 | #include <linux/omap-dma.h> | 33 | #include <linux/omap-dma.h> |
34 | 34 | ||
35 | #include <mach/hardware.h> | ||
36 | |||
35 | #include "omapfb.h" | 37 | #include "omapfb.h" |
36 | #include "lcdc.h" | 38 | #include "lcdc.h" |
37 | 39 | ||
diff --git a/drivers/video/omap2/displays/panel-tpo-td043mtea1.c b/drivers/video/omap2/displays/panel-tpo-td043mtea1.c index 6b6643911d29..048c98381ef6 100644 --- a/drivers/video/omap2/displays/panel-tpo-td043mtea1.c +++ b/drivers/video/omap2/displays/panel-tpo-td043mtea1.c | |||
@@ -63,6 +63,9 @@ struct tpo_td043_device { | |||
63 | u32 power_on_resume:1; | 63 | u32 power_on_resume:1; |
64 | }; | 64 | }; |
65 | 65 | ||
66 | /* used to pass spi_device from SPI to DSS portion of the driver */ | ||
67 | static struct tpo_td043_device *g_tpo_td043; | ||
68 | |||
66 | static int tpo_td043_write(struct spi_device *spi, u8 addr, u8 data) | 69 | static int tpo_td043_write(struct spi_device *spi, u8 addr, u8 data) |
67 | { | 70 | { |
68 | struct spi_message m; | 71 | struct spi_message m; |
@@ -403,7 +406,7 @@ static void tpo_td043_disable(struct omap_dss_device *dssdev) | |||
403 | 406 | ||
404 | static int tpo_td043_probe(struct omap_dss_device *dssdev) | 407 | static int tpo_td043_probe(struct omap_dss_device *dssdev) |
405 | { | 408 | { |
406 | struct tpo_td043_device *tpo_td043 = dev_get_drvdata(&dssdev->dev); | 409 | struct tpo_td043_device *tpo_td043 = g_tpo_td043; |
407 | int nreset_gpio = dssdev->reset_gpio; | 410 | int nreset_gpio = dssdev->reset_gpio; |
408 | int ret = 0; | 411 | int ret = 0; |
409 | 412 | ||
@@ -440,6 +443,8 @@ static int tpo_td043_probe(struct omap_dss_device *dssdev) | |||
440 | if (ret) | 443 | if (ret) |
441 | dev_warn(&dssdev->dev, "failed to create sysfs files\n"); | 444 | dev_warn(&dssdev->dev, "failed to create sysfs files\n"); |
442 | 445 | ||
446 | dev_set_drvdata(&dssdev->dev, tpo_td043); | ||
447 | |||
443 | return 0; | 448 | return 0; |
444 | 449 | ||
445 | fail_gpio_req: | 450 | fail_gpio_req: |
@@ -505,6 +510,9 @@ static int tpo_td043_spi_probe(struct spi_device *spi) | |||
505 | return -ENODEV; | 510 | return -ENODEV; |
506 | } | 511 | } |
507 | 512 | ||
513 | if (g_tpo_td043 != NULL) | ||
514 | return -EBUSY; | ||
515 | |||
508 | spi->bits_per_word = 16; | 516 | spi->bits_per_word = 16; |
509 | spi->mode = SPI_MODE_0; | 517 | spi->mode = SPI_MODE_0; |
510 | 518 | ||
@@ -521,7 +529,7 @@ static int tpo_td043_spi_probe(struct spi_device *spi) | |||
521 | tpo_td043->spi = spi; | 529 | tpo_td043->spi = spi; |
522 | tpo_td043->nreset_gpio = dssdev->reset_gpio; | 530 | tpo_td043->nreset_gpio = dssdev->reset_gpio; |
523 | dev_set_drvdata(&spi->dev, tpo_td043); | 531 | dev_set_drvdata(&spi->dev, tpo_td043); |
524 | dev_set_drvdata(&dssdev->dev, tpo_td043); | 532 | g_tpo_td043 = tpo_td043; |
525 | 533 | ||
526 | omap_dss_register_driver(&tpo_td043_driver); | 534 | omap_dss_register_driver(&tpo_td043_driver); |
527 | 535 | ||
@@ -534,6 +542,7 @@ static int tpo_td043_spi_remove(struct spi_device *spi) | |||
534 | 542 | ||
535 | omap_dss_unregister_driver(&tpo_td043_driver); | 543 | omap_dss_unregister_driver(&tpo_td043_driver); |
536 | kfree(tpo_td043); | 544 | kfree(tpo_td043); |
545 | g_tpo_td043 = NULL; | ||
537 | 546 | ||
538 | return 0; | 547 | return 0; |
539 | } | 548 | } |
diff --git a/drivers/video/omap2/dss/dss_features.c b/drivers/video/omap2/dss/dss_features.c index d7d66ef5cb58..7f791aeda4d2 100644 --- a/drivers/video/omap2/dss/dss_features.c +++ b/drivers/video/omap2/dss/dss_features.c | |||
@@ -202,12 +202,10 @@ static const enum omap_dss_output_id omap3630_dss_supported_outputs[] = { | |||
202 | 202 | ||
203 | static const enum omap_dss_output_id omap4_dss_supported_outputs[] = { | 203 | static const enum omap_dss_output_id omap4_dss_supported_outputs[] = { |
204 | /* OMAP_DSS_CHANNEL_LCD */ | 204 | /* OMAP_DSS_CHANNEL_LCD */ |
205 | OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI | | 205 | OMAP_DSS_OUTPUT_DBI | OMAP_DSS_OUTPUT_DSI1, |
206 | OMAP_DSS_OUTPUT_DSI1, | ||
207 | 206 | ||
208 | /* OMAP_DSS_CHANNEL_DIGIT */ | 207 | /* OMAP_DSS_CHANNEL_DIGIT */ |
209 | OMAP_DSS_OUTPUT_VENC | OMAP_DSS_OUTPUT_HDMI | | 208 | OMAP_DSS_OUTPUT_VENC | OMAP_DSS_OUTPUT_HDMI, |
210 | OMAP_DSS_OUTPUT_DPI, | ||
211 | 209 | ||
212 | /* OMAP_DSS_CHANNEL_LCD2 */ | 210 | /* OMAP_DSS_CHANNEL_LCD2 */ |
213 | OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI | | 211 | OMAP_DSS_OUTPUT_DPI | OMAP_DSS_OUTPUT_DBI | |
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c index 63203acef812..0264704a52be 100644 --- a/drivers/video/sh_mobile_lcdcfb.c +++ b/drivers/video/sh_mobile_lcdcfb.c | |||
@@ -858,6 +858,7 @@ static void sh_mobile_lcdc_geometry(struct sh_mobile_lcdc_chan *ch) | |||
858 | tmp = ((mode->xres & 7) << 24) | ((display_h_total & 7) << 16) | 858 | tmp = ((mode->xres & 7) << 24) | ((display_h_total & 7) << 16) |
859 | | ((mode->hsync_len & 7) << 8) | (hsync_pos & 7); | 859 | | ((mode->hsync_len & 7) << 8) | (hsync_pos & 7); |
860 | lcdc_write_chan(ch, LDHAJR, tmp); | 860 | lcdc_write_chan(ch, LDHAJR, tmp); |
861 | lcdc_write_chan_mirror(ch, LDHAJR, tmp); | ||
861 | } | 862 | } |
862 | 863 | ||
863 | static void sh_mobile_lcdc_overlay_setup(struct sh_mobile_lcdc_overlay *ovl) | 864 | static void sh_mobile_lcdc_overlay_setup(struct sh_mobile_lcdc_overlay *ovl) |
diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c index b75db0186488..d4284458377e 100644 --- a/drivers/video/uvesafb.c +++ b/drivers/video/uvesafb.c | |||
@@ -1973,7 +1973,8 @@ static int uvesafb_init(void) | |||
1973 | err = -ENOMEM; | 1973 | err = -ENOMEM; |
1974 | 1974 | ||
1975 | if (err) { | 1975 | if (err) { |
1976 | platform_device_put(uvesafb_device); | 1976 | if (uvesafb_device) |
1977 | platform_device_put(uvesafb_device); | ||
1977 | platform_driver_unregister(&uvesafb_driver); | 1978 | platform_driver_unregister(&uvesafb_driver); |
1978 | cn_del_callback(&uvesafb_cn_id); | 1979 | cn_del_callback(&uvesafb_cn_id); |
1979 | return err; | 1980 | return err; |
diff --git a/drivers/watchdog/sp5100_tco.c b/drivers/watchdog/sp5100_tco.c index e3b8f757d2d3..0e9d8c479c35 100644 --- a/drivers/watchdog/sp5100_tco.c +++ b/drivers/watchdog/sp5100_tco.c | |||
@@ -40,13 +40,12 @@ | |||
40 | #include "sp5100_tco.h" | 40 | #include "sp5100_tco.h" |
41 | 41 | ||
42 | /* Module and version information */ | 42 | /* Module and version information */ |
43 | #define TCO_VERSION "0.03" | 43 | #define TCO_VERSION "0.05" |
44 | #define TCO_MODULE_NAME "SP5100 TCO timer" | 44 | #define TCO_MODULE_NAME "SP5100 TCO timer" |
45 | #define TCO_DRIVER_NAME TCO_MODULE_NAME ", v" TCO_VERSION | 45 | #define TCO_DRIVER_NAME TCO_MODULE_NAME ", v" TCO_VERSION |
46 | 46 | ||
47 | /* internal variables */ | 47 | /* internal variables */ |
48 | static u32 tcobase_phys; | 48 | static u32 tcobase_phys; |
49 | static u32 resbase_phys; | ||
50 | static u32 tco_wdt_fired; | 49 | static u32 tco_wdt_fired; |
51 | static void __iomem *tcobase; | 50 | static void __iomem *tcobase; |
52 | static unsigned int pm_iobase; | 51 | static unsigned int pm_iobase; |
@@ -54,10 +53,6 @@ static DEFINE_SPINLOCK(tco_lock); /* Guards the hardware */ | |||
54 | static unsigned long timer_alive; | 53 | static unsigned long timer_alive; |
55 | static char tco_expect_close; | 54 | static char tco_expect_close; |
56 | static struct pci_dev *sp5100_tco_pci; | 55 | static struct pci_dev *sp5100_tco_pci; |
57 | static struct resource wdt_res = { | ||
58 | .name = "Watchdog Timer", | ||
59 | .flags = IORESOURCE_MEM, | ||
60 | }; | ||
61 | 56 | ||
62 | /* the watchdog platform device */ | 57 | /* the watchdog platform device */ |
63 | static struct platform_device *sp5100_tco_platform_device; | 58 | static struct platform_device *sp5100_tco_platform_device; |
@@ -75,12 +70,6 @@ module_param(nowayout, bool, 0); | |||
75 | MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started." | 70 | MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started." |
76 | " (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); | 71 | " (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); |
77 | 72 | ||
78 | static unsigned int force_addr; | ||
79 | module_param(force_addr, uint, 0); | ||
80 | MODULE_PARM_DESC(force_addr, "Force the use of specified MMIO address." | ||
81 | " ONLY USE THIS PARAMETER IF YOU REALLY KNOW" | ||
82 | " WHAT YOU ARE DOING (default=none)"); | ||
83 | |||
84 | /* | 73 | /* |
85 | * Some TCO specific functions | 74 | * Some TCO specific functions |
86 | */ | 75 | */ |
@@ -176,39 +165,6 @@ static void tco_timer_enable(void) | |||
176 | } | 165 | } |
177 | } | 166 | } |
178 | 167 | ||
179 | static void tco_timer_disable(void) | ||
180 | { | ||
181 | int val; | ||
182 | |||
183 | if (sp5100_tco_pci->revision >= 0x40) { | ||
184 | /* For SB800 or later */ | ||
185 | /* Enable watchdog decode bit and Disable watchdog timer */ | ||
186 | outb(SB800_PM_WATCHDOG_CONTROL, SB800_IO_PM_INDEX_REG); | ||
187 | val = inb(SB800_IO_PM_DATA_REG); | ||
188 | val |= SB800_PCI_WATCHDOG_DECODE_EN; | ||
189 | val |= SB800_PM_WATCHDOG_DISABLE; | ||
190 | outb(val, SB800_IO_PM_DATA_REG); | ||
191 | } else { | ||
192 | /* For SP5100 or SB7x0 */ | ||
193 | /* Enable watchdog decode bit */ | ||
194 | pci_read_config_dword(sp5100_tco_pci, | ||
195 | SP5100_PCI_WATCHDOG_MISC_REG, | ||
196 | &val); | ||
197 | |||
198 | val |= SP5100_PCI_WATCHDOG_DECODE_EN; | ||
199 | |||
200 | pci_write_config_dword(sp5100_tco_pci, | ||
201 | SP5100_PCI_WATCHDOG_MISC_REG, | ||
202 | val); | ||
203 | |||
204 | /* Disable Watchdog timer */ | ||
205 | outb(SP5100_PM_WATCHDOG_CONTROL, SP5100_IO_PM_INDEX_REG); | ||
206 | val = inb(SP5100_IO_PM_DATA_REG); | ||
207 | val |= SP5100_PM_WATCHDOG_DISABLE; | ||
208 | outb(val, SP5100_IO_PM_DATA_REG); | ||
209 | } | ||
210 | } | ||
211 | |||
212 | /* | 168 | /* |
213 | * /dev/watchdog handling | 169 | * /dev/watchdog handling |
214 | */ | 170 | */ |
@@ -361,7 +317,7 @@ static unsigned char sp5100_tco_setupdevice(void) | |||
361 | { | 317 | { |
362 | struct pci_dev *dev = NULL; | 318 | struct pci_dev *dev = NULL; |
363 | const char *dev_name = NULL; | 319 | const char *dev_name = NULL; |
364 | u32 val, tmp_val; | 320 | u32 val; |
365 | u32 index_reg, data_reg, base_addr; | 321 | u32 index_reg, data_reg, base_addr; |
366 | 322 | ||
367 | /* Match the PCI device */ | 323 | /* Match the PCI device */ |
@@ -459,63 +415,8 @@ static unsigned char sp5100_tco_setupdevice(void) | |||
459 | } else | 415 | } else |
460 | pr_debug("SBResource_MMIO is disabled(0x%04x)\n", val); | 416 | pr_debug("SBResource_MMIO is disabled(0x%04x)\n", val); |
461 | 417 | ||
462 | /* | 418 | pr_notice("failed to find MMIO address, giving up.\n"); |
463 | * Lastly re-programming the watchdog timer MMIO address, | 419 | goto unreg_region; |
464 | * This method is a last resort... | ||
465 | * | ||
466 | * Before re-programming, to ensure that the watchdog timer | ||
467 | * is disabled, disable the watchdog timer. | ||
468 | */ | ||
469 | tco_timer_disable(); | ||
470 | |||
471 | if (force_addr) { | ||
472 | /* | ||
473 | * Force the use of watchdog timer MMIO address, and aligned to | ||
474 | * 8byte boundary. | ||
475 | */ | ||
476 | force_addr &= ~0x7; | ||
477 | val = force_addr; | ||
478 | |||
479 | pr_info("Force the use of 0x%04x as MMIO address\n", val); | ||
480 | } else { | ||
481 | /* | ||
482 | * Get empty slot into the resource tree for watchdog timer. | ||
483 | */ | ||
484 | if (allocate_resource(&iomem_resource, | ||
485 | &wdt_res, | ||
486 | SP5100_WDT_MEM_MAP_SIZE, | ||
487 | 0xf0000000, | ||
488 | 0xfffffff8, | ||
489 | 0x8, | ||
490 | NULL, | ||
491 | NULL)) { | ||
492 | pr_err("MMIO allocation failed\n"); | ||
493 | goto unreg_region; | ||
494 | } | ||
495 | |||
496 | val = resbase_phys = wdt_res.start; | ||
497 | pr_debug("Got 0x%04x from resource tree\n", val); | ||
498 | } | ||
499 | |||
500 | /* Restore to the low three bits */ | ||
501 | outb(base_addr+0, index_reg); | ||
502 | tmp_val = val | (inb(data_reg) & 0x7); | ||
503 | |||
504 | /* Re-programming the watchdog timer base address */ | ||
505 | outb(base_addr+0, index_reg); | ||
506 | outb((tmp_val >> 0) & 0xff, data_reg); | ||
507 | outb(base_addr+1, index_reg); | ||
508 | outb((tmp_val >> 8) & 0xff, data_reg); | ||
509 | outb(base_addr+2, index_reg); | ||
510 | outb((tmp_val >> 16) & 0xff, data_reg); | ||
511 | outb(base_addr+3, index_reg); | ||
512 | outb((tmp_val >> 24) & 0xff, data_reg); | ||
513 | |||
514 | if (!request_mem_region_exclusive(val, SP5100_WDT_MEM_MAP_SIZE, | ||
515 | dev_name)) { | ||
516 | pr_err("MMIO address 0x%04x already in use\n", val); | ||
517 | goto unreg_resource; | ||
518 | } | ||
519 | 420 | ||
520 | setup_wdt: | 421 | setup_wdt: |
521 | tcobase_phys = val; | 422 | tcobase_phys = val; |
@@ -555,9 +456,6 @@ setup_wdt: | |||
555 | 456 | ||
556 | unreg_mem_region: | 457 | unreg_mem_region: |
557 | release_mem_region(tcobase_phys, SP5100_WDT_MEM_MAP_SIZE); | 458 | release_mem_region(tcobase_phys, SP5100_WDT_MEM_MAP_SIZE); |
558 | unreg_resource: | ||
559 | if (resbase_phys) | ||
560 | release_resource(&wdt_res); | ||
561 | unreg_region: | 459 | unreg_region: |
562 | release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE); | 460 | release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE); |
563 | exit: | 461 | exit: |
@@ -567,7 +465,6 @@ exit: | |||
567 | static int sp5100_tco_init(struct platform_device *dev) | 465 | static int sp5100_tco_init(struct platform_device *dev) |
568 | { | 466 | { |
569 | int ret; | 467 | int ret; |
570 | char addr_str[16]; | ||
571 | 468 | ||
572 | /* | 469 | /* |
573 | * Check whether or not the hardware watchdog is there. If found, then | 470 | * Check whether or not the hardware watchdog is there. If found, then |
@@ -599,23 +496,14 @@ static int sp5100_tco_init(struct platform_device *dev) | |||
599 | clear_bit(0, &timer_alive); | 496 | clear_bit(0, &timer_alive); |
600 | 497 | ||
601 | /* Show module parameters */ | 498 | /* Show module parameters */ |
602 | if (force_addr == tcobase_phys) | 499 | pr_info("initialized (0x%p). heartbeat=%d sec (nowayout=%d)\n", |
603 | /* The force_addr is vaild */ | 500 | tcobase, heartbeat, nowayout); |
604 | sprintf(addr_str, "0x%04x", force_addr); | ||
605 | else | ||
606 | strcpy(addr_str, "none"); | ||
607 | |||
608 | pr_info("initialized (0x%p). heartbeat=%d sec (nowayout=%d, " | ||
609 | "force_addr=%s)\n", | ||
610 | tcobase, heartbeat, nowayout, addr_str); | ||
611 | 501 | ||
612 | return 0; | 502 | return 0; |
613 | 503 | ||
614 | exit: | 504 | exit: |
615 | iounmap(tcobase); | 505 | iounmap(tcobase); |
616 | release_mem_region(tcobase_phys, SP5100_WDT_MEM_MAP_SIZE); | 506 | release_mem_region(tcobase_phys, SP5100_WDT_MEM_MAP_SIZE); |
617 | if (resbase_phys) | ||
618 | release_resource(&wdt_res); | ||
619 | release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE); | 507 | release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE); |
620 | return ret; | 508 | return ret; |
621 | } | 509 | } |
@@ -630,8 +518,6 @@ static void sp5100_tco_cleanup(void) | |||
630 | misc_deregister(&sp5100_tco_miscdev); | 518 | misc_deregister(&sp5100_tco_miscdev); |
631 | iounmap(tcobase); | 519 | iounmap(tcobase); |
632 | release_mem_region(tcobase_phys, SP5100_WDT_MEM_MAP_SIZE); | 520 | release_mem_region(tcobase_phys, SP5100_WDT_MEM_MAP_SIZE); |
633 | if (resbase_phys) | ||
634 | release_resource(&wdt_res); | ||
635 | release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE); | 521 | release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE); |
636 | } | 522 | } |
637 | 523 | ||
diff --git a/drivers/watchdog/sp5100_tco.h b/drivers/watchdog/sp5100_tco.h index 71594a0c14b7..2b28c00da0df 100644 --- a/drivers/watchdog/sp5100_tco.h +++ b/drivers/watchdog/sp5100_tco.h | |||
@@ -57,7 +57,7 @@ | |||
57 | #define SB800_PM_WATCHDOG_DISABLE (1 << 2) | 57 | #define SB800_PM_WATCHDOG_DISABLE (1 << 2) |
58 | #define SB800_PM_WATCHDOG_SECOND_RES (3 << 0) | 58 | #define SB800_PM_WATCHDOG_SECOND_RES (3 << 0) |
59 | #define SB800_ACPI_MMIO_DECODE_EN (1 << 0) | 59 | #define SB800_ACPI_MMIO_DECODE_EN (1 << 0) |
60 | #define SB800_ACPI_MMIO_SEL (1 << 2) | 60 | #define SB800_ACPI_MMIO_SEL (1 << 1) |
61 | 61 | ||
62 | 62 | ||
63 | #define SB800_PM_WDT_MMIO_OFFSET 0xB00 | 63 | #define SB800_PM_WDT_MMIO_OFFSET 0xB00 |
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig index 5a32232cf7c1..67af155cf602 100644 --- a/drivers/xen/Kconfig +++ b/drivers/xen/Kconfig | |||
@@ -182,7 +182,7 @@ config XEN_PRIVCMD | |||
182 | 182 | ||
183 | config XEN_STUB | 183 | config XEN_STUB |
184 | bool "Xen stub drivers" | 184 | bool "Xen stub drivers" |
185 | depends on XEN && X86_64 | 185 | depends on XEN && X86_64 && BROKEN |
186 | default n | 186 | default n |
187 | help | 187 | help |
188 | Allow kernel to install stub drivers, to reserve space for Xen drivers, | 188 | Allow kernel to install stub drivers, to reserve space for Xen drivers, |
diff --git a/drivers/xen/events.c b/drivers/xen/events.c index d17aa41a9041..2647ad8e1f19 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c | |||
@@ -403,11 +403,23 @@ static void unmask_evtchn(int port) | |||
403 | 403 | ||
404 | if (unlikely((cpu != cpu_from_evtchn(port)))) | 404 | if (unlikely((cpu != cpu_from_evtchn(port)))) |
405 | do_hypercall = 1; | 405 | do_hypercall = 1; |
406 | else | 406 | else { |
407 | /* | ||
408 | * Need to clear the mask before checking pending to | ||
409 | * avoid a race with an event becoming pending. | ||
410 | * | ||
411 | * EVTCHNOP_unmask will only trigger an upcall if the | ||
412 | * mask bit was set, so if a hypercall is needed | ||
413 | * remask the event. | ||
414 | */ | ||
415 | sync_clear_bit(port, BM(&s->evtchn_mask[0])); | ||
407 | evtchn_pending = sync_test_bit(port, BM(&s->evtchn_pending[0])); | 416 | evtchn_pending = sync_test_bit(port, BM(&s->evtchn_pending[0])); |
408 | 417 | ||
409 | if (unlikely(evtchn_pending && xen_hvm_domain())) | 418 | if (unlikely(evtchn_pending && xen_hvm_domain())) { |
410 | do_hypercall = 1; | 419 | sync_set_bit(port, BM(&s->evtchn_mask[0])); |
420 | do_hypercall = 1; | ||
421 | } | ||
422 | } | ||
411 | 423 | ||
412 | /* Slow path (hypercall) if this is a non-local port or if this is | 424 | /* Slow path (hypercall) if this is a non-local port or if this is |
413 | * an hvm domain and an event is pending (hvm domains don't have | 425 | * an hvm domain and an event is pending (hvm domains don't have |
@@ -418,8 +430,6 @@ static void unmask_evtchn(int port) | |||
418 | } else { | 430 | } else { |
419 | struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu); | 431 | struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu); |
420 | 432 | ||
421 | sync_clear_bit(port, BM(&s->evtchn_mask[0])); | ||
422 | |||
423 | /* | 433 | /* |
424 | * The following is basically the equivalent of | 434 | * The following is basically the equivalent of |
425 | * 'hw_resend_irq'. Just like a real IO-APIC we 'lose | 435 | * 'hw_resend_irq'. Just like a real IO-APIC we 'lose |
@@ -1306,7 +1316,7 @@ static void __xen_evtchn_do_upcall(void) | |||
1306 | { | 1316 | { |
1307 | int start_word_idx, start_bit_idx; | 1317 | int start_word_idx, start_bit_idx; |
1308 | int word_idx, bit_idx; | 1318 | int word_idx, bit_idx; |
1309 | int i; | 1319 | int i, irq; |
1310 | int cpu = get_cpu(); | 1320 | int cpu = get_cpu(); |
1311 | struct shared_info *s = HYPERVISOR_shared_info; | 1321 | struct shared_info *s = HYPERVISOR_shared_info; |
1312 | struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu); | 1322 | struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu); |
@@ -1314,6 +1324,8 @@ static void __xen_evtchn_do_upcall(void) | |||
1314 | 1324 | ||
1315 | do { | 1325 | do { |
1316 | xen_ulong_t pending_words; | 1326 | xen_ulong_t pending_words; |
1327 | xen_ulong_t pending_bits; | ||
1328 | struct irq_desc *desc; | ||
1317 | 1329 | ||
1318 | vcpu_info->evtchn_upcall_pending = 0; | 1330 | vcpu_info->evtchn_upcall_pending = 0; |
1319 | 1331 | ||
@@ -1325,6 +1337,17 @@ static void __xen_evtchn_do_upcall(void) | |||
1325 | * selector flag. xchg_xen_ulong must contain an | 1337 | * selector flag. xchg_xen_ulong must contain an |
1326 | * appropriate barrier. | 1338 | * appropriate barrier. |
1327 | */ | 1339 | */ |
1340 | if ((irq = per_cpu(virq_to_irq, cpu)[VIRQ_TIMER]) != -1) { | ||
1341 | int evtchn = evtchn_from_irq(irq); | ||
1342 | word_idx = evtchn / BITS_PER_LONG; | ||
1343 | pending_bits = evtchn % BITS_PER_LONG; | ||
1344 | if (active_evtchns(cpu, s, word_idx) & (1ULL << pending_bits)) { | ||
1345 | desc = irq_to_desc(irq); | ||
1346 | if (desc) | ||
1347 | generic_handle_irq_desc(irq, desc); | ||
1348 | } | ||
1349 | } | ||
1350 | |||
1328 | pending_words = xchg_xen_ulong(&vcpu_info->evtchn_pending_sel, 0); | 1351 | pending_words = xchg_xen_ulong(&vcpu_info->evtchn_pending_sel, 0); |
1329 | 1352 | ||
1330 | start_word_idx = __this_cpu_read(current_word_idx); | 1353 | start_word_idx = __this_cpu_read(current_word_idx); |
@@ -1333,7 +1356,6 @@ static void __xen_evtchn_do_upcall(void) | |||
1333 | word_idx = start_word_idx; | 1356 | word_idx = start_word_idx; |
1334 | 1357 | ||
1335 | for (i = 0; pending_words != 0; i++) { | 1358 | for (i = 0; pending_words != 0; i++) { |
1336 | xen_ulong_t pending_bits; | ||
1337 | xen_ulong_t words; | 1359 | xen_ulong_t words; |
1338 | 1360 | ||
1339 | words = MASK_LSBS(pending_words, word_idx); | 1361 | words = MASK_LSBS(pending_words, word_idx); |
@@ -1362,8 +1384,7 @@ static void __xen_evtchn_do_upcall(void) | |||
1362 | 1384 | ||
1363 | do { | 1385 | do { |
1364 | xen_ulong_t bits; | 1386 | xen_ulong_t bits; |
1365 | int port, irq; | 1387 | int port; |
1366 | struct irq_desc *desc; | ||
1367 | 1388 | ||
1368 | bits = MASK_LSBS(pending_bits, bit_idx); | 1389 | bits = MASK_LSBS(pending_bits, bit_idx); |
1369 | 1390 | ||
diff --git a/drivers/xen/fallback.c b/drivers/xen/fallback.c index 0ef7c4d40f86..b04fb64c5a91 100644 --- a/drivers/xen/fallback.c +++ b/drivers/xen/fallback.c | |||
@@ -44,7 +44,7 @@ int xen_event_channel_op_compat(int cmd, void *arg) | |||
44 | } | 44 | } |
45 | EXPORT_SYMBOL_GPL(xen_event_channel_op_compat); | 45 | EXPORT_SYMBOL_GPL(xen_event_channel_op_compat); |
46 | 46 | ||
47 | int HYPERVISOR_physdev_op_compat(int cmd, void *arg) | 47 | int xen_physdev_op_compat(int cmd, void *arg) |
48 | { | 48 | { |
49 | struct physdev_op op; | 49 | struct physdev_op op; |
50 | int rc; | 50 | int rc; |
@@ -78,3 +78,4 @@ int HYPERVISOR_physdev_op_compat(int cmd, void *arg) | |||
78 | 78 | ||
79 | return rc; | 79 | return rc; |
80 | } | 80 | } |
81 | EXPORT_SYMBOL_GPL(xen_physdev_op_compat); | ||
diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c index f3278a6603ca..90e34ac7e522 100644 --- a/drivers/xen/xen-acpi-processor.c +++ b/drivers/xen/xen-acpi-processor.c | |||
@@ -505,6 +505,9 @@ static int __init xen_acpi_processor_init(void) | |||
505 | 505 | ||
506 | pr = per_cpu(processors, i); | 506 | pr = per_cpu(processors, i); |
507 | perf = per_cpu_ptr(acpi_perf_data, i); | 507 | perf = per_cpu_ptr(acpi_perf_data, i); |
508 | if (!pr) | ||
509 | continue; | ||
510 | |||
508 | pr->performance = perf; | 511 | pr->performance = perf; |
509 | rc = acpi_processor_get_performance_info(pr); | 512 | rc = acpi_processor_get_performance_info(pr); |
510 | if (rc) | 513 | if (rc) |
diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c index 9204126f1560..a2278ba7fb27 100644 --- a/drivers/xen/xen-pciback/pci_stub.c +++ b/drivers/xen/xen-pciback/pci_stub.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <xen/events.h> | 17 | #include <xen/events.h> |
18 | #include <asm/xen/pci.h> | 18 | #include <asm/xen/pci.h> |
19 | #include <asm/xen/hypervisor.h> | 19 | #include <asm/xen/hypervisor.h> |
20 | #include <xen/interface/physdev.h> | ||
20 | #include "pciback.h" | 21 | #include "pciback.h" |
21 | #include "conf_space.h" | 22 | #include "conf_space.h" |
22 | #include "conf_space_quirks.h" | 23 | #include "conf_space_quirks.h" |
@@ -85,37 +86,52 @@ static struct pcistub_device *pcistub_device_alloc(struct pci_dev *dev) | |||
85 | static void pcistub_device_release(struct kref *kref) | 86 | static void pcistub_device_release(struct kref *kref) |
86 | { | 87 | { |
87 | struct pcistub_device *psdev; | 88 | struct pcistub_device *psdev; |
89 | struct pci_dev *dev; | ||
88 | struct xen_pcibk_dev_data *dev_data; | 90 | struct xen_pcibk_dev_data *dev_data; |
89 | 91 | ||
90 | psdev = container_of(kref, struct pcistub_device, kref); | 92 | psdev = container_of(kref, struct pcistub_device, kref); |
91 | dev_data = pci_get_drvdata(psdev->dev); | 93 | dev = psdev->dev; |
94 | dev_data = pci_get_drvdata(dev); | ||
92 | 95 | ||
93 | dev_dbg(&psdev->dev->dev, "pcistub_device_release\n"); | 96 | dev_dbg(&dev->dev, "pcistub_device_release\n"); |
94 | 97 | ||
95 | xen_unregister_device_domain_owner(psdev->dev); | 98 | xen_unregister_device_domain_owner(dev); |
96 | 99 | ||
97 | /* Call the reset function which does not take lock as this | 100 | /* Call the reset function which does not take lock as this |
98 | * is called from "unbind" which takes a device_lock mutex. | 101 | * is called from "unbind" which takes a device_lock mutex. |
99 | */ | 102 | */ |
100 | __pci_reset_function_locked(psdev->dev); | 103 | __pci_reset_function_locked(dev); |
101 | if (pci_load_and_free_saved_state(psdev->dev, | 104 | if (pci_load_and_free_saved_state(dev, &dev_data->pci_saved_state)) |
102 | &dev_data->pci_saved_state)) { | 105 | dev_dbg(&dev->dev, "Could not reload PCI state\n"); |
103 | dev_dbg(&psdev->dev->dev, "Could not reload PCI state\n"); | 106 | else |
104 | } else | 107 | pci_restore_state(dev); |
105 | pci_restore_state(psdev->dev); | 108 | |
109 | if (pci_find_capability(dev, PCI_CAP_ID_MSIX)) { | ||
110 | struct physdev_pci_device ppdev = { | ||
111 | .seg = pci_domain_nr(dev->bus), | ||
112 | .bus = dev->bus->number, | ||
113 | .devfn = dev->devfn | ||
114 | }; | ||
115 | int err = HYPERVISOR_physdev_op(PHYSDEVOP_release_msix, | ||
116 | &ppdev); | ||
117 | |||
118 | if (err) | ||
119 | dev_warn(&dev->dev, "MSI-X release failed (%d)\n", | ||
120 | err); | ||
121 | } | ||
106 | 122 | ||
107 | /* Disable the device */ | 123 | /* Disable the device */ |
108 | xen_pcibk_reset_device(psdev->dev); | 124 | xen_pcibk_reset_device(dev); |
109 | 125 | ||
110 | kfree(dev_data); | 126 | kfree(dev_data); |
111 | pci_set_drvdata(psdev->dev, NULL); | 127 | pci_set_drvdata(dev, NULL); |
112 | 128 | ||
113 | /* Clean-up the device */ | 129 | /* Clean-up the device */ |
114 | xen_pcibk_config_free_dyn_fields(psdev->dev); | 130 | xen_pcibk_config_free_dyn_fields(dev); |
115 | xen_pcibk_config_free_dev(psdev->dev); | 131 | xen_pcibk_config_free_dev(dev); |
116 | 132 | ||
117 | psdev->dev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED; | 133 | dev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED; |
118 | pci_dev_put(psdev->dev); | 134 | pci_dev_put(dev); |
119 | 135 | ||
120 | kfree(psdev); | 136 | kfree(psdev); |
121 | } | 137 | } |
@@ -355,6 +371,19 @@ static int pcistub_init_device(struct pci_dev *dev) | |||
355 | if (err) | 371 | if (err) |
356 | goto config_release; | 372 | goto config_release; |
357 | 373 | ||
374 | if (pci_find_capability(dev, PCI_CAP_ID_MSIX)) { | ||
375 | struct physdev_pci_device ppdev = { | ||
376 | .seg = pci_domain_nr(dev->bus), | ||
377 | .bus = dev->bus->number, | ||
378 | .devfn = dev->devfn | ||
379 | }; | ||
380 | |||
381 | err = HYPERVISOR_physdev_op(PHYSDEVOP_prepare_msix, &ppdev); | ||
382 | if (err) | ||
383 | dev_err(&dev->dev, "MSI-X preparation failed (%d)\n", | ||
384 | err); | ||
385 | } | ||
386 | |||
358 | /* We need the device active to save the state. */ | 387 | /* We need the device active to save the state. */ |
359 | dev_dbg(&dev->dev, "save state of device\n"); | 388 | dev_dbg(&dev->dev, "save state of device\n"); |
360 | pci_save_state(dev); | 389 | pci_save_state(dev); |
diff --git a/fs/block_dev.c b/fs/block_dev.c index aea605c98ba6..aae187a7f94a 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c | |||
@@ -551,6 +551,7 @@ struct block_device *bdgrab(struct block_device *bdev) | |||
551 | ihold(bdev->bd_inode); | 551 | ihold(bdev->bd_inode); |
552 | return bdev; | 552 | return bdev; |
553 | } | 553 | } |
554 | EXPORT_SYMBOL(bdgrab); | ||
554 | 555 | ||
555 | long nr_blockdev_pages(void) | 556 | long nr_blockdev_pages(void) |
556 | { | 557 | { |
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index ecd25a1b4e51..ca9d8f1a3bb6 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c | |||
@@ -651,6 +651,8 @@ tree_mod_log_insert_root(struct btrfs_fs_info *fs_info, | |||
651 | if (tree_mod_dont_log(fs_info, NULL)) | 651 | if (tree_mod_dont_log(fs_info, NULL)) |
652 | return 0; | 652 | return 0; |
653 | 653 | ||
654 | __tree_mod_log_free_eb(fs_info, old_root); | ||
655 | |||
654 | ret = tree_mod_alloc(fs_info, flags, &tm); | 656 | ret = tree_mod_alloc(fs_info, flags, &tm); |
655 | if (ret < 0) | 657 | if (ret < 0) |
656 | goto out; | 658 | goto out; |
@@ -736,7 +738,7 @@ tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq) | |||
736 | static noinline void | 738 | static noinline void |
737 | tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst, | 739 | tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst, |
738 | struct extent_buffer *src, unsigned long dst_offset, | 740 | struct extent_buffer *src, unsigned long dst_offset, |
739 | unsigned long src_offset, int nr_items) | 741 | unsigned long src_offset, int nr_items, int log_removal) |
740 | { | 742 | { |
741 | int ret; | 743 | int ret; |
742 | int i; | 744 | int i; |
@@ -750,10 +752,12 @@ tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst, | |||
750 | } | 752 | } |
751 | 753 | ||
752 | for (i = 0; i < nr_items; i++) { | 754 | for (i = 0; i < nr_items; i++) { |
753 | ret = tree_mod_log_insert_key_locked(fs_info, src, | 755 | if (log_removal) { |
754 | i + src_offset, | 756 | ret = tree_mod_log_insert_key_locked(fs_info, src, |
755 | MOD_LOG_KEY_REMOVE); | 757 | i + src_offset, |
756 | BUG_ON(ret < 0); | 758 | MOD_LOG_KEY_REMOVE); |
759 | BUG_ON(ret < 0); | ||
760 | } | ||
757 | ret = tree_mod_log_insert_key_locked(fs_info, dst, | 761 | ret = tree_mod_log_insert_key_locked(fs_info, dst, |
758 | i + dst_offset, | 762 | i + dst_offset, |
759 | MOD_LOG_KEY_ADD); | 763 | MOD_LOG_KEY_ADD); |
@@ -927,7 +931,6 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans, | |||
927 | ret = btrfs_dec_ref(trans, root, buf, 1, 1); | 931 | ret = btrfs_dec_ref(trans, root, buf, 1, 1); |
928 | BUG_ON(ret); /* -ENOMEM */ | 932 | BUG_ON(ret); /* -ENOMEM */ |
929 | } | 933 | } |
930 | tree_mod_log_free_eb(root->fs_info, buf); | ||
931 | clean_tree_block(trans, root, buf); | 934 | clean_tree_block(trans, root, buf); |
932 | *last_ref = 1; | 935 | *last_ref = 1; |
933 | } | 936 | } |
@@ -1046,6 +1049,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans, | |||
1046 | btrfs_set_node_ptr_generation(parent, parent_slot, | 1049 | btrfs_set_node_ptr_generation(parent, parent_slot, |
1047 | trans->transid); | 1050 | trans->transid); |
1048 | btrfs_mark_buffer_dirty(parent); | 1051 | btrfs_mark_buffer_dirty(parent); |
1052 | tree_mod_log_free_eb(root->fs_info, buf); | ||
1049 | btrfs_free_tree_block(trans, root, buf, parent_start, | 1053 | btrfs_free_tree_block(trans, root, buf, parent_start, |
1050 | last_ref); | 1054 | last_ref); |
1051 | } | 1055 | } |
@@ -1750,7 +1754,6 @@ static noinline int balance_level(struct btrfs_trans_handle *trans, | |||
1750 | goto enospc; | 1754 | goto enospc; |
1751 | } | 1755 | } |
1752 | 1756 | ||
1753 | tree_mod_log_free_eb(root->fs_info, root->node); | ||
1754 | tree_mod_log_set_root_pointer(root, child); | 1757 | tree_mod_log_set_root_pointer(root, child); |
1755 | rcu_assign_pointer(root->node, child); | 1758 | rcu_assign_pointer(root->node, child); |
1756 | 1759 | ||
@@ -2995,7 +2998,7 @@ static int push_node_left(struct btrfs_trans_handle *trans, | |||
2995 | push_items = min(src_nritems - 8, push_items); | 2998 | push_items = min(src_nritems - 8, push_items); |
2996 | 2999 | ||
2997 | tree_mod_log_eb_copy(root->fs_info, dst, src, dst_nritems, 0, | 3000 | tree_mod_log_eb_copy(root->fs_info, dst, src, dst_nritems, 0, |
2998 | push_items); | 3001 | push_items, 1); |
2999 | copy_extent_buffer(dst, src, | 3002 | copy_extent_buffer(dst, src, |
3000 | btrfs_node_key_ptr_offset(dst_nritems), | 3003 | btrfs_node_key_ptr_offset(dst_nritems), |
3001 | btrfs_node_key_ptr_offset(0), | 3004 | btrfs_node_key_ptr_offset(0), |
@@ -3066,7 +3069,7 @@ static int balance_node_right(struct btrfs_trans_handle *trans, | |||
3066 | sizeof(struct btrfs_key_ptr)); | 3069 | sizeof(struct btrfs_key_ptr)); |
3067 | 3070 | ||
3068 | tree_mod_log_eb_copy(root->fs_info, dst, src, 0, | 3071 | tree_mod_log_eb_copy(root->fs_info, dst, src, 0, |
3069 | src_nritems - push_items, push_items); | 3072 | src_nritems - push_items, push_items, 1); |
3070 | copy_extent_buffer(dst, src, | 3073 | copy_extent_buffer(dst, src, |
3071 | btrfs_node_key_ptr_offset(0), | 3074 | btrfs_node_key_ptr_offset(0), |
3072 | btrfs_node_key_ptr_offset(src_nritems - push_items), | 3075 | btrfs_node_key_ptr_offset(src_nritems - push_items), |
@@ -3218,12 +3221,18 @@ static noinline int split_node(struct btrfs_trans_handle *trans, | |||
3218 | int mid; | 3221 | int mid; |
3219 | int ret; | 3222 | int ret; |
3220 | u32 c_nritems; | 3223 | u32 c_nritems; |
3224 | int tree_mod_log_removal = 1; | ||
3221 | 3225 | ||
3222 | c = path->nodes[level]; | 3226 | c = path->nodes[level]; |
3223 | WARN_ON(btrfs_header_generation(c) != trans->transid); | 3227 | WARN_ON(btrfs_header_generation(c) != trans->transid); |
3224 | if (c == root->node) { | 3228 | if (c == root->node) { |
3225 | /* trying to split the root, lets make a new one */ | 3229 | /* trying to split the root, lets make a new one */ |
3226 | ret = insert_new_root(trans, root, path, level + 1); | 3230 | ret = insert_new_root(trans, root, path, level + 1); |
3231 | /* | ||
3232 | * removal of root nodes has been logged by | ||
3233 | * tree_mod_log_set_root_pointer due to locking | ||
3234 | */ | ||
3235 | tree_mod_log_removal = 0; | ||
3227 | if (ret) | 3236 | if (ret) |
3228 | return ret; | 3237 | return ret; |
3229 | } else { | 3238 | } else { |
@@ -3261,7 +3270,8 @@ static noinline int split_node(struct btrfs_trans_handle *trans, | |||
3261 | (unsigned long)btrfs_header_chunk_tree_uuid(split), | 3270 | (unsigned long)btrfs_header_chunk_tree_uuid(split), |
3262 | BTRFS_UUID_SIZE); | 3271 | BTRFS_UUID_SIZE); |
3263 | 3272 | ||
3264 | tree_mod_log_eb_copy(root->fs_info, split, c, 0, mid, c_nritems - mid); | 3273 | tree_mod_log_eb_copy(root->fs_info, split, c, 0, mid, c_nritems - mid, |
3274 | tree_mod_log_removal); | ||
3265 | copy_extent_buffer(split, c, | 3275 | copy_extent_buffer(split, c, |
3266 | btrfs_node_key_ptr_offset(0), | 3276 | btrfs_node_key_ptr_offset(0), |
3267 | btrfs_node_key_ptr_offset(mid), | 3277 | btrfs_node_key_ptr_offset(mid), |
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 7d84651e850b..6d19a0a554aa 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
@@ -1291,6 +1291,7 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans, | |||
1291 | 0, objectid, NULL, 0, 0, 0); | 1291 | 0, objectid, NULL, 0, 0, 0); |
1292 | if (IS_ERR(leaf)) { | 1292 | if (IS_ERR(leaf)) { |
1293 | ret = PTR_ERR(leaf); | 1293 | ret = PTR_ERR(leaf); |
1294 | leaf = NULL; | ||
1294 | goto fail; | 1295 | goto fail; |
1295 | } | 1296 | } |
1296 | 1297 | ||
@@ -1334,11 +1335,16 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans, | |||
1334 | 1335 | ||
1335 | btrfs_tree_unlock(leaf); | 1336 | btrfs_tree_unlock(leaf); |
1336 | 1337 | ||
1338 | return root; | ||
1339 | |||
1337 | fail: | 1340 | fail: |
1338 | if (ret) | 1341 | if (leaf) { |
1339 | return ERR_PTR(ret); | 1342 | btrfs_tree_unlock(leaf); |
1343 | free_extent_buffer(leaf); | ||
1344 | } | ||
1345 | kfree(root); | ||
1340 | 1346 | ||
1341 | return root; | 1347 | return ERR_PTR(ret); |
1342 | } | 1348 | } |
1343 | 1349 | ||
1344 | static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans, | 1350 | static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans, |
@@ -3253,7 +3259,7 @@ void btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root) | |||
3253 | if (btrfs_root_refs(&root->root_item) == 0) | 3259 | if (btrfs_root_refs(&root->root_item) == 0) |
3254 | synchronize_srcu(&fs_info->subvol_srcu); | 3260 | synchronize_srcu(&fs_info->subvol_srcu); |
3255 | 3261 | ||
3256 | if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) { | 3262 | if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) { |
3257 | btrfs_free_log(NULL, root); | 3263 | btrfs_free_log(NULL, root); |
3258 | btrfs_free_log_root_tree(NULL, fs_info); | 3264 | btrfs_free_log_root_tree(NULL, fs_info); |
3259 | } | 3265 | } |
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 9ac2eca681eb..3d551231caba 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
@@ -257,7 +257,8 @@ static int exclude_super_stripes(struct btrfs_root *root, | |||
257 | cache->bytes_super += stripe_len; | 257 | cache->bytes_super += stripe_len; |
258 | ret = add_excluded_extent(root, cache->key.objectid, | 258 | ret = add_excluded_extent(root, cache->key.objectid, |
259 | stripe_len); | 259 | stripe_len); |
260 | BUG_ON(ret); /* -ENOMEM */ | 260 | if (ret) |
261 | return ret; | ||
261 | } | 262 | } |
262 | 263 | ||
263 | for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { | 264 | for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { |
@@ -265,13 +266,17 @@ static int exclude_super_stripes(struct btrfs_root *root, | |||
265 | ret = btrfs_rmap_block(&root->fs_info->mapping_tree, | 266 | ret = btrfs_rmap_block(&root->fs_info->mapping_tree, |
266 | cache->key.objectid, bytenr, | 267 | cache->key.objectid, bytenr, |
267 | 0, &logical, &nr, &stripe_len); | 268 | 0, &logical, &nr, &stripe_len); |
268 | BUG_ON(ret); /* -ENOMEM */ | 269 | if (ret) |
270 | return ret; | ||
269 | 271 | ||
270 | while (nr--) { | 272 | while (nr--) { |
271 | cache->bytes_super += stripe_len; | 273 | cache->bytes_super += stripe_len; |
272 | ret = add_excluded_extent(root, logical[nr], | 274 | ret = add_excluded_extent(root, logical[nr], |
273 | stripe_len); | 275 | stripe_len); |
274 | BUG_ON(ret); /* -ENOMEM */ | 276 | if (ret) { |
277 | kfree(logical); | ||
278 | return ret; | ||
279 | } | ||
275 | } | 280 | } |
276 | 281 | ||
277 | kfree(logical); | 282 | kfree(logical); |
@@ -4438,7 +4443,7 @@ static void update_global_block_rsv(struct btrfs_fs_info *fs_info) | |||
4438 | spin_lock(&sinfo->lock); | 4443 | spin_lock(&sinfo->lock); |
4439 | spin_lock(&block_rsv->lock); | 4444 | spin_lock(&block_rsv->lock); |
4440 | 4445 | ||
4441 | block_rsv->size = num_bytes; | 4446 | block_rsv->size = min_t(u64, num_bytes, 512 * 1024 * 1024); |
4442 | 4447 | ||
4443 | num_bytes = sinfo->bytes_used + sinfo->bytes_pinned + | 4448 | num_bytes = sinfo->bytes_used + sinfo->bytes_pinned + |
4444 | sinfo->bytes_reserved + sinfo->bytes_readonly + | 4449 | sinfo->bytes_reserved + sinfo->bytes_readonly + |
@@ -4793,14 +4798,49 @@ out_fail: | |||
4793 | * If the inodes csum_bytes is the same as the original | 4798 | * If the inodes csum_bytes is the same as the original |
4794 | * csum_bytes then we know we haven't raced with any free()ers | 4799 | * csum_bytes then we know we haven't raced with any free()ers |
4795 | * so we can just reduce our inodes csum bytes and carry on. | 4800 | * so we can just reduce our inodes csum bytes and carry on. |
4796 | * Otherwise we have to do the normal free thing to account for | ||
4797 | * the case that the free side didn't free up its reserve | ||
4798 | * because of this outstanding reservation. | ||
4799 | */ | 4801 | */ |
4800 | if (BTRFS_I(inode)->csum_bytes == csum_bytes) | 4802 | if (BTRFS_I(inode)->csum_bytes == csum_bytes) { |
4801 | calc_csum_metadata_size(inode, num_bytes, 0); | 4803 | calc_csum_metadata_size(inode, num_bytes, 0); |
4802 | else | 4804 | } else { |
4803 | to_free = calc_csum_metadata_size(inode, num_bytes, 0); | 4805 | u64 orig_csum_bytes = BTRFS_I(inode)->csum_bytes; |
4806 | u64 bytes; | ||
4807 | |||
4808 | /* | ||
4809 | * This is tricky, but first we need to figure out how much we | ||
4810 | * free'd from any free-ers that occured during this | ||
4811 | * reservation, so we reset ->csum_bytes to the csum_bytes | ||
4812 | * before we dropped our lock, and then call the free for the | ||
4813 | * number of bytes that were freed while we were trying our | ||
4814 | * reservation. | ||
4815 | */ | ||
4816 | bytes = csum_bytes - BTRFS_I(inode)->csum_bytes; | ||
4817 | BTRFS_I(inode)->csum_bytes = csum_bytes; | ||
4818 | to_free = calc_csum_metadata_size(inode, bytes, 0); | ||
4819 | |||
4820 | |||
4821 | /* | ||
4822 | * Now we need to see how much we would have freed had we not | ||
4823 | * been making this reservation and our ->csum_bytes were not | ||
4824 | * artificially inflated. | ||
4825 | */ | ||
4826 | BTRFS_I(inode)->csum_bytes = csum_bytes - num_bytes; | ||
4827 | bytes = csum_bytes - orig_csum_bytes; | ||
4828 | bytes = calc_csum_metadata_size(inode, bytes, 0); | ||
4829 | |||
4830 | /* | ||
4831 | * Now reset ->csum_bytes to what it should be. If bytes is | ||
4832 | * more than to_free then we would have free'd more space had we | ||
4833 | * not had an artificially high ->csum_bytes, so we need to free | ||
4834 | * the remainder. If bytes is the same or less then we don't | ||
4835 | * need to do anything, the other free-ers did the correct | ||
4836 | * thing. | ||
4837 | */ | ||
4838 | BTRFS_I(inode)->csum_bytes = orig_csum_bytes - num_bytes; | ||
4839 | if (bytes > to_free) | ||
4840 | to_free = bytes - to_free; | ||
4841 | else | ||
4842 | to_free = 0; | ||
4843 | } | ||
4804 | spin_unlock(&BTRFS_I(inode)->lock); | 4844 | spin_unlock(&BTRFS_I(inode)->lock); |
4805 | if (dropped) | 4845 | if (dropped) |
4806 | to_free += btrfs_calc_trans_metadata_size(root, dropped); | 4846 | to_free += btrfs_calc_trans_metadata_size(root, dropped); |
@@ -7947,7 +7987,17 @@ int btrfs_read_block_groups(struct btrfs_root *root) | |||
7947 | * info has super bytes accounted for, otherwise we'll think | 7987 | * info has super bytes accounted for, otherwise we'll think |
7948 | * we have more space than we actually do. | 7988 | * we have more space than we actually do. |
7949 | */ | 7989 | */ |
7950 | exclude_super_stripes(root, cache); | 7990 | ret = exclude_super_stripes(root, cache); |
7991 | if (ret) { | ||
7992 | /* | ||
7993 | * We may have excluded something, so call this just in | ||
7994 | * case. | ||
7995 | */ | ||
7996 | free_excluded_extents(root, cache); | ||
7997 | kfree(cache->free_space_ctl); | ||
7998 | kfree(cache); | ||
7999 | goto error; | ||
8000 | } | ||
7951 | 8001 | ||
7952 | /* | 8002 | /* |
7953 | * check for two cases, either we are full, and therefore | 8003 | * check for two cases, either we are full, and therefore |
@@ -8089,7 +8139,17 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, | |||
8089 | 8139 | ||
8090 | cache->last_byte_to_unpin = (u64)-1; | 8140 | cache->last_byte_to_unpin = (u64)-1; |
8091 | cache->cached = BTRFS_CACHE_FINISHED; | 8141 | cache->cached = BTRFS_CACHE_FINISHED; |
8092 | exclude_super_stripes(root, cache); | 8142 | ret = exclude_super_stripes(root, cache); |
8143 | if (ret) { | ||
8144 | /* | ||
8145 | * We may have excluded something, so call this just in | ||
8146 | * case. | ||
8147 | */ | ||
8148 | free_excluded_extents(root, cache); | ||
8149 | kfree(cache->free_space_ctl); | ||
8150 | kfree(cache); | ||
8151 | return ret; | ||
8152 | } | ||
8093 | 8153 | ||
8094 | add_new_free_space(cache, root->fs_info, chunk_offset, | 8154 | add_new_free_space(cache, root->fs_info, chunk_offset, |
8095 | chunk_offset + size); | 8155 | chunk_offset + size); |
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index f173c5af6461..cdee391fc7bf 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
@@ -1257,6 +1257,39 @@ int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end) | |||
1257 | GFP_NOFS); | 1257 | GFP_NOFS); |
1258 | } | 1258 | } |
1259 | 1259 | ||
1260 | int extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end) | ||
1261 | { | ||
1262 | unsigned long index = start >> PAGE_CACHE_SHIFT; | ||
1263 | unsigned long end_index = end >> PAGE_CACHE_SHIFT; | ||
1264 | struct page *page; | ||
1265 | |||
1266 | while (index <= end_index) { | ||
1267 | page = find_get_page(inode->i_mapping, index); | ||
1268 | BUG_ON(!page); /* Pages should be in the extent_io_tree */ | ||
1269 | clear_page_dirty_for_io(page); | ||
1270 | page_cache_release(page); | ||
1271 | index++; | ||
1272 | } | ||
1273 | return 0; | ||
1274 | } | ||
1275 | |||
1276 | int extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end) | ||
1277 | { | ||
1278 | unsigned long index = start >> PAGE_CACHE_SHIFT; | ||
1279 | unsigned long end_index = end >> PAGE_CACHE_SHIFT; | ||
1280 | struct page *page; | ||
1281 | |||
1282 | while (index <= end_index) { | ||
1283 | page = find_get_page(inode->i_mapping, index); | ||
1284 | BUG_ON(!page); /* Pages should be in the extent_io_tree */ | ||
1285 | account_page_redirty(page); | ||
1286 | __set_page_dirty_nobuffers(page); | ||
1287 | page_cache_release(page); | ||
1288 | index++; | ||
1289 | } | ||
1290 | return 0; | ||
1291 | } | ||
1292 | |||
1260 | /* | 1293 | /* |
1261 | * helper function to set both pages and extents in the tree writeback | 1294 | * helper function to set both pages and extents in the tree writeback |
1262 | */ | 1295 | */ |
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 6068a1985560..258c92156857 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h | |||
@@ -325,6 +325,8 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long offset, | |||
325 | unsigned long *map_len); | 325 | unsigned long *map_len); |
326 | int extent_range_uptodate(struct extent_io_tree *tree, | 326 | int extent_range_uptodate(struct extent_io_tree *tree, |
327 | u64 start, u64 end); | 327 | u64 start, u64 end); |
328 | int extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end); | ||
329 | int extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end); | ||
328 | int extent_clear_unlock_delalloc(struct inode *inode, | 330 | int extent_clear_unlock_delalloc(struct inode *inode, |
329 | struct extent_io_tree *tree, | 331 | struct extent_io_tree *tree, |
330 | u64 start, u64 end, struct page *locked_page, | 332 | u64 start, u64 end, struct page *locked_page, |
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index ec160202be3e..c4628a201cb3 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c | |||
@@ -118,9 +118,11 @@ struct btrfs_csum_item *btrfs_lookup_csum(struct btrfs_trans_handle *trans, | |||
118 | csums_in_item = btrfs_item_size_nr(leaf, path->slots[0]); | 118 | csums_in_item = btrfs_item_size_nr(leaf, path->slots[0]); |
119 | csums_in_item /= csum_size; | 119 | csums_in_item /= csum_size; |
120 | 120 | ||
121 | if (csum_offset >= csums_in_item) { | 121 | if (csum_offset == csums_in_item) { |
122 | ret = -EFBIG; | 122 | ret = -EFBIG; |
123 | goto fail; | 123 | goto fail; |
124 | } else if (csum_offset > csums_in_item) { | ||
125 | goto fail; | ||
124 | } | 126 | } |
125 | } | 127 | } |
126 | item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item); | 128 | item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item); |
@@ -728,7 +730,6 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, | |||
728 | return -ENOMEM; | 730 | return -ENOMEM; |
729 | 731 | ||
730 | sector_sum = sums->sums; | 732 | sector_sum = sums->sums; |
731 | trans->adding_csums = 1; | ||
732 | again: | 733 | again: |
733 | next_offset = (u64)-1; | 734 | next_offset = (u64)-1; |
734 | found_next = 0; | 735 | found_next = 0; |
@@ -899,7 +900,6 @@ next_sector: | |||
899 | goto again; | 900 | goto again; |
900 | } | 901 | } |
901 | out: | 902 | out: |
902 | trans->adding_csums = 0; | ||
903 | btrfs_free_path(path); | 903 | btrfs_free_path(path); |
904 | return ret; | 904 | return ret; |
905 | 905 | ||
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 5b4ea5f55b8f..ade03e6f7bd2 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c | |||
@@ -2142,6 +2142,7 @@ static long btrfs_fallocate(struct file *file, int mode, | |||
2142 | { | 2142 | { |
2143 | struct inode *inode = file_inode(file); | 2143 | struct inode *inode = file_inode(file); |
2144 | struct extent_state *cached_state = NULL; | 2144 | struct extent_state *cached_state = NULL; |
2145 | struct btrfs_root *root = BTRFS_I(inode)->root; | ||
2145 | u64 cur_offset; | 2146 | u64 cur_offset; |
2146 | u64 last_byte; | 2147 | u64 last_byte; |
2147 | u64 alloc_start; | 2148 | u64 alloc_start; |
@@ -2169,6 +2170,11 @@ static long btrfs_fallocate(struct file *file, int mode, | |||
2169 | ret = btrfs_check_data_free_space(inode, alloc_end - alloc_start); | 2170 | ret = btrfs_check_data_free_space(inode, alloc_end - alloc_start); |
2170 | if (ret) | 2171 | if (ret) |
2171 | return ret; | 2172 | return ret; |
2173 | if (root->fs_info->quota_enabled) { | ||
2174 | ret = btrfs_qgroup_reserve(root, alloc_end - alloc_start); | ||
2175 | if (ret) | ||
2176 | goto out_reserve_fail; | ||
2177 | } | ||
2172 | 2178 | ||
2173 | /* | 2179 | /* |
2174 | * wait for ordered IO before we have any locks. We'll loop again | 2180 | * wait for ordered IO before we have any locks. We'll loop again |
@@ -2272,6 +2278,9 @@ static long btrfs_fallocate(struct file *file, int mode, | |||
2272 | &cached_state, GFP_NOFS); | 2278 | &cached_state, GFP_NOFS); |
2273 | out: | 2279 | out: |
2274 | mutex_unlock(&inode->i_mutex); | 2280 | mutex_unlock(&inode->i_mutex); |
2281 | if (root->fs_info->quota_enabled) | ||
2282 | btrfs_qgroup_free(root, alloc_end - alloc_start); | ||
2283 | out_reserve_fail: | ||
2275 | /* Let go of our reservation. */ | 2284 | /* Let go of our reservation. */ |
2276 | btrfs_free_reserved_data_space(inode, alloc_end - alloc_start); | 2285 | btrfs_free_reserved_data_space(inode, alloc_end - alloc_start); |
2277 | return ret; | 2286 | return ret; |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index ca1b767d51f7..09c58a35b429 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -353,6 +353,7 @@ static noinline int compress_file_range(struct inode *inode, | |||
353 | int i; | 353 | int i; |
354 | int will_compress; | 354 | int will_compress; |
355 | int compress_type = root->fs_info->compress_type; | 355 | int compress_type = root->fs_info->compress_type; |
356 | int redirty = 0; | ||
356 | 357 | ||
357 | /* if this is a small write inside eof, kick off a defrag */ | 358 | /* if this is a small write inside eof, kick off a defrag */ |
358 | if ((end - start + 1) < 16 * 1024 && | 359 | if ((end - start + 1) < 16 * 1024 && |
@@ -415,6 +416,17 @@ again: | |||
415 | if (BTRFS_I(inode)->force_compress) | 416 | if (BTRFS_I(inode)->force_compress) |
416 | compress_type = BTRFS_I(inode)->force_compress; | 417 | compress_type = BTRFS_I(inode)->force_compress; |
417 | 418 | ||
419 | /* | ||
420 | * we need to call clear_page_dirty_for_io on each | ||
421 | * page in the range. Otherwise applications with the file | ||
422 | * mmap'd can wander in and change the page contents while | ||
423 | * we are compressing them. | ||
424 | * | ||
425 | * If the compression fails for any reason, we set the pages | ||
426 | * dirty again later on. | ||
427 | */ | ||
428 | extent_range_clear_dirty_for_io(inode, start, end); | ||
429 | redirty = 1; | ||
418 | ret = btrfs_compress_pages(compress_type, | 430 | ret = btrfs_compress_pages(compress_type, |
419 | inode->i_mapping, start, | 431 | inode->i_mapping, start, |
420 | total_compressed, pages, | 432 | total_compressed, pages, |
@@ -554,6 +566,8 @@ cleanup_and_bail_uncompressed: | |||
554 | __set_page_dirty_nobuffers(locked_page); | 566 | __set_page_dirty_nobuffers(locked_page); |
555 | /* unlocked later on in the async handlers */ | 567 | /* unlocked later on in the async handlers */ |
556 | } | 568 | } |
569 | if (redirty) | ||
570 | extent_range_redirty_for_io(inode, start, end); | ||
557 | add_async_extent(async_cow, start, end - start + 1, | 571 | add_async_extent(async_cow, start, end - start + 1, |
558 | 0, NULL, 0, BTRFS_COMPRESS_NONE); | 572 | 0, NULL, 0, BTRFS_COMPRESS_NONE); |
559 | *num_added += 1; | 573 | *num_added += 1; |
@@ -1743,8 +1757,10 @@ static noinline int add_pending_csums(struct btrfs_trans_handle *trans, | |||
1743 | struct btrfs_ordered_sum *sum; | 1757 | struct btrfs_ordered_sum *sum; |
1744 | 1758 | ||
1745 | list_for_each_entry(sum, list, list) { | 1759 | list_for_each_entry(sum, list, list) { |
1760 | trans->adding_csums = 1; | ||
1746 | btrfs_csum_file_blocks(trans, | 1761 | btrfs_csum_file_blocks(trans, |
1747 | BTRFS_I(inode)->root->fs_info->csum_root, sum); | 1762 | BTRFS_I(inode)->root->fs_info->csum_root, sum); |
1763 | trans->adding_csums = 0; | ||
1748 | } | 1764 | } |
1749 | return 0; | 1765 | return 0; |
1750 | } | 1766 | } |
@@ -3679,11 +3695,9 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir, | |||
3679 | * 1 for the dir item | 3695 | * 1 for the dir item |
3680 | * 1 for the dir index | 3696 | * 1 for the dir index |
3681 | * 1 for the inode ref | 3697 | * 1 for the inode ref |
3682 | * 1 for the inode ref in the tree log | ||
3683 | * 2 for the dir entries in the log | ||
3684 | * 1 for the inode | 3698 | * 1 for the inode |
3685 | */ | 3699 | */ |
3686 | trans = btrfs_start_transaction(root, 8); | 3700 | trans = btrfs_start_transaction(root, 5); |
3687 | if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC) | 3701 | if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC) |
3688 | return trans; | 3702 | return trans; |
3689 | 3703 | ||
@@ -8127,7 +8141,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
8127 | * inodes. So 5 * 2 is 10, plus 1 for the new link, so 11 total items | 8141 | * inodes. So 5 * 2 is 10, plus 1 for the new link, so 11 total items |
8128 | * should cover the worst case number of items we'll modify. | 8142 | * should cover the worst case number of items we'll modify. |
8129 | */ | 8143 | */ |
8130 | trans = btrfs_start_transaction(root, 20); | 8144 | trans = btrfs_start_transaction(root, 11); |
8131 | if (IS_ERR(trans)) { | 8145 | if (IS_ERR(trans)) { |
8132 | ret = PTR_ERR(trans); | 8146 | ret = PTR_ERR(trans); |
8133 | goto out_notrans; | 8147 | goto out_notrans; |
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index dc08d77b717e..005c45db699e 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c | |||
@@ -557,6 +557,7 @@ void btrfs_wait_ordered_extents(struct btrfs_root *root, int delay_iput) | |||
557 | INIT_LIST_HEAD(&splice); | 557 | INIT_LIST_HEAD(&splice); |
558 | INIT_LIST_HEAD(&works); | 558 | INIT_LIST_HEAD(&works); |
559 | 559 | ||
560 | mutex_lock(&root->fs_info->ordered_operations_mutex); | ||
560 | spin_lock(&root->fs_info->ordered_extent_lock); | 561 | spin_lock(&root->fs_info->ordered_extent_lock); |
561 | list_splice_init(&root->fs_info->ordered_extents, &splice); | 562 | list_splice_init(&root->fs_info->ordered_extents, &splice); |
562 | while (!list_empty(&splice)) { | 563 | while (!list_empty(&splice)) { |
@@ -600,6 +601,7 @@ void btrfs_wait_ordered_extents(struct btrfs_root *root, int delay_iput) | |||
600 | 601 | ||
601 | cond_resched(); | 602 | cond_resched(); |
602 | } | 603 | } |
604 | mutex_unlock(&root->fs_info->ordered_operations_mutex); | ||
603 | } | 605 | } |
604 | 606 | ||
605 | /* | 607 | /* |
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index 5471e47d6559..b44124dd2370 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c | |||
@@ -1153,7 +1153,7 @@ int btrfs_qgroup_account_ref(struct btrfs_trans_handle *trans, | |||
1153 | ret = btrfs_find_all_roots(trans, fs_info, node->bytenr, | 1153 | ret = btrfs_find_all_roots(trans, fs_info, node->bytenr, |
1154 | sgn > 0 ? node->seq - 1 : node->seq, &roots); | 1154 | sgn > 0 ? node->seq - 1 : node->seq, &roots); |
1155 | if (ret < 0) | 1155 | if (ret < 0) |
1156 | goto out; | 1156 | return ret; |
1157 | 1157 | ||
1158 | spin_lock(&fs_info->qgroup_lock); | 1158 | spin_lock(&fs_info->qgroup_lock); |
1159 | quota_root = fs_info->quota_root; | 1159 | quota_root = fs_info->quota_root; |
@@ -1275,7 +1275,6 @@ int btrfs_qgroup_account_ref(struct btrfs_trans_handle *trans, | |||
1275 | ret = 0; | 1275 | ret = 0; |
1276 | unlock: | 1276 | unlock: |
1277 | spin_unlock(&fs_info->qgroup_lock); | 1277 | spin_unlock(&fs_info->qgroup_lock); |
1278 | out: | ||
1279 | ulist_free(roots); | 1278 | ulist_free(roots); |
1280 | ulist_free(tmp); | 1279 | ulist_free(tmp); |
1281 | 1280 | ||
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 53c3501fa4ca..85e072b956d5 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c | |||
@@ -542,7 +542,6 @@ static void scrub_print_warning(const char *errstr, struct scrub_block *sblock) | |||
542 | eb = path->nodes[0]; | 542 | eb = path->nodes[0]; |
543 | ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item); | 543 | ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item); |
544 | item_size = btrfs_item_size_nr(eb, path->slots[0]); | 544 | item_size = btrfs_item_size_nr(eb, path->slots[0]); |
545 | btrfs_release_path(path); | ||
546 | 545 | ||
547 | if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { | 546 | if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { |
548 | do { | 547 | do { |
@@ -558,7 +557,9 @@ static void scrub_print_warning(const char *errstr, struct scrub_block *sblock) | |||
558 | ret < 0 ? -1 : ref_level, | 557 | ret < 0 ? -1 : ref_level, |
559 | ret < 0 ? -1 : ref_root); | 558 | ret < 0 ? -1 : ref_root); |
560 | } while (ret != 1); | 559 | } while (ret != 1); |
560 | btrfs_release_path(path); | ||
561 | } else { | 561 | } else { |
562 | btrfs_release_path(path); | ||
562 | swarn.path = path; | 563 | swarn.path = path; |
563 | swarn.dev = dev; | 564 | swarn.dev = dev; |
564 | iterate_extent_inodes(fs_info, found_key.objectid, | 565 | iterate_extent_inodes(fs_info, found_key.objectid, |
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index f7a8b861058b..c85e7c6b4598 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c | |||
@@ -3945,12 +3945,10 @@ static int is_extent_unchanged(struct send_ctx *sctx, | |||
3945 | found_key.type != key.type) { | 3945 | found_key.type != key.type) { |
3946 | key.offset += right_len; | 3946 | key.offset += right_len; |
3947 | break; | 3947 | break; |
3948 | } else { | 3948 | } |
3949 | if (found_key.offset != key.offset + right_len) { | 3949 | if (found_key.offset != key.offset + right_len) { |
3950 | /* Should really not happen */ | 3950 | ret = 0; |
3951 | ret = -EIO; | 3951 | goto out; |
3952 | goto out; | ||
3953 | } | ||
3954 | } | 3952 | } |
3955 | key = found_key; | 3953 | key = found_key; |
3956 | } | 3954 | } |
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 5989a92236f7..2854c824ab64 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c | |||
@@ -4935,7 +4935,18 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree, | |||
4935 | em = lookup_extent_mapping(em_tree, chunk_start, 1); | 4935 | em = lookup_extent_mapping(em_tree, chunk_start, 1); |
4936 | read_unlock(&em_tree->lock); | 4936 | read_unlock(&em_tree->lock); |
4937 | 4937 | ||
4938 | BUG_ON(!em || em->start != chunk_start); | 4938 | if (!em) { |
4939 | printk(KERN_ERR "btrfs: couldn't find em for chunk %Lu\n", | ||
4940 | chunk_start); | ||
4941 | return -EIO; | ||
4942 | } | ||
4943 | |||
4944 | if (em->start != chunk_start) { | ||
4945 | printk(KERN_ERR "btrfs: bad chunk start, em=%Lu, wanted=%Lu\n", | ||
4946 | em->start, chunk_start); | ||
4947 | free_extent_map(em); | ||
4948 | return -EIO; | ||
4949 | } | ||
4939 | map = (struct map_lookup *)em->bdev; | 4950 | map = (struct map_lookup *)em->bdev; |
4940 | 4951 | ||
4941 | length = em->len; | 4952 | length = em->len; |
diff --git a/fs/cifs/asn1.c b/fs/cifs/asn1.c index cfd1ce34e0bc..1d36db114772 100644 --- a/fs/cifs/asn1.c +++ b/fs/cifs/asn1.c | |||
@@ -614,53 +614,10 @@ decode_negTokenInit(unsigned char *security_blob, int length, | |||
614 | } | 614 | } |
615 | } | 615 | } |
616 | 616 | ||
617 | /* mechlistMIC */ | 617 | /* |
618 | if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { | 618 | * We currently ignore anything at the end of the SPNEGO blob after |
619 | /* Check if we have reached the end of the blob, but with | 619 | * the mechTypes have been parsed, since none of that info is |
620 | no mechListMic (e.g. NTLMSSP instead of KRB5) */ | 620 | * used at the moment. |
621 | if (ctx.error == ASN1_ERR_DEC_EMPTY) | 621 | */ |
622 | goto decode_negtoken_exit; | ||
623 | cFYI(1, "Error decoding last part negTokenInit exit3"); | ||
624 | return 0; | ||
625 | } else if ((cls != ASN1_CTX) || (con != ASN1_CON)) { | ||
626 | /* tag = 3 indicating mechListMIC */ | ||
627 | cFYI(1, "Exit 4 cls = %d con = %d tag = %d end = %p (%d)", | ||
628 | cls, con, tag, end, *end); | ||
629 | return 0; | ||
630 | } | ||
631 | |||
632 | /* sequence */ | ||
633 | if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { | ||
634 | cFYI(1, "Error decoding last part negTokenInit exit5"); | ||
635 | return 0; | ||
636 | } else if ((cls != ASN1_UNI) || (con != ASN1_CON) | ||
637 | || (tag != ASN1_SEQ)) { | ||
638 | cFYI(1, "cls = %d con = %d tag = %d end = %p (%d)", | ||
639 | cls, con, tag, end, *end); | ||
640 | } | ||
641 | |||
642 | /* sequence of */ | ||
643 | if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { | ||
644 | cFYI(1, "Error decoding last part negTokenInit exit 7"); | ||
645 | return 0; | ||
646 | } else if ((cls != ASN1_CTX) || (con != ASN1_CON)) { | ||
647 | cFYI(1, "Exit 8 cls = %d con = %d tag = %d end = %p (%d)", | ||
648 | cls, con, tag, end, *end); | ||
649 | return 0; | ||
650 | } | ||
651 | |||
652 | /* general string */ | ||
653 | if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { | ||
654 | cFYI(1, "Error decoding last part negTokenInit exit9"); | ||
655 | return 0; | ||
656 | } else if ((cls != ASN1_UNI) || (con != ASN1_PRI) | ||
657 | || (tag != ASN1_GENSTR)) { | ||
658 | cFYI(1, "Exit10 cls = %d con = %d tag = %d end = %p (%d)", | ||
659 | cls, con, tag, end, *end); | ||
660 | return 0; | ||
661 | } | ||
662 | cFYI(1, "Need to call asn1_octets_decode() function for %s", | ||
663 | ctx.pointer); /* is this UTF-8 or ASCII? */ | ||
664 | decode_negtoken_exit: | ||
665 | return 1; | 622 | return 1; |
666 | } | 623 | } |
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 3cf8a15af916..345fc89c4286 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c | |||
@@ -91,6 +91,30 @@ struct workqueue_struct *cifsiod_wq; | |||
91 | __u8 cifs_client_guid[SMB2_CLIENT_GUID_SIZE]; | 91 | __u8 cifs_client_guid[SMB2_CLIENT_GUID_SIZE]; |
92 | #endif | 92 | #endif |
93 | 93 | ||
94 | /* | ||
95 | * Bumps refcount for cifs super block. | ||
96 | * Note that it should be only called if a referece to VFS super block is | ||
97 | * already held, e.g. in open-type syscalls context. Otherwise it can race with | ||
98 | * atomic_dec_and_test in deactivate_locked_super. | ||
99 | */ | ||
100 | void | ||
101 | cifs_sb_active(struct super_block *sb) | ||
102 | { | ||
103 | struct cifs_sb_info *server = CIFS_SB(sb); | ||
104 | |||
105 | if (atomic_inc_return(&server->active) == 1) | ||
106 | atomic_inc(&sb->s_active); | ||
107 | } | ||
108 | |||
109 | void | ||
110 | cifs_sb_deactive(struct super_block *sb) | ||
111 | { | ||
112 | struct cifs_sb_info *server = CIFS_SB(sb); | ||
113 | |||
114 | if (atomic_dec_and_test(&server->active)) | ||
115 | deactivate_super(sb); | ||
116 | } | ||
117 | |||
94 | static int | 118 | static int |
95 | cifs_read_super(struct super_block *sb) | 119 | cifs_read_super(struct super_block *sb) |
96 | { | 120 | { |
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index 7163419cecd9..0e32c3446ce9 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h | |||
@@ -41,6 +41,10 @@ extern struct file_system_type cifs_fs_type; | |||
41 | extern const struct address_space_operations cifs_addr_ops; | 41 | extern const struct address_space_operations cifs_addr_ops; |
42 | extern const struct address_space_operations cifs_addr_ops_smallbuf; | 42 | extern const struct address_space_operations cifs_addr_ops_smallbuf; |
43 | 43 | ||
44 | /* Functions related to super block operations */ | ||
45 | extern void cifs_sb_active(struct super_block *sb); | ||
46 | extern void cifs_sb_deactive(struct super_block *sb); | ||
47 | |||
44 | /* Functions related to inodes */ | 48 | /* Functions related to inodes */ |
45 | extern const struct inode_operations cifs_dir_inode_ops; | 49 | extern const struct inode_operations cifs_dir_inode_ops; |
46 | extern struct inode *cifs_root_iget(struct super_block *); | 50 | extern struct inode *cifs_root_iget(struct super_block *); |
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 991c63c6bdd0..21b3a291c327 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c | |||
@@ -1575,14 +1575,24 @@ cifs_parse_mount_options(const char *mountdata, const char *devname, | |||
1575 | } | 1575 | } |
1576 | break; | 1576 | break; |
1577 | case Opt_blank_pass: | 1577 | case Opt_blank_pass: |
1578 | vol->password = NULL; | ||
1579 | break; | ||
1580 | case Opt_pass: | ||
1581 | /* passwords have to be handled differently | 1578 | /* passwords have to be handled differently |
1582 | * to allow the character used for deliminator | 1579 | * to allow the character used for deliminator |
1583 | * to be passed within them | 1580 | * to be passed within them |
1584 | */ | 1581 | */ |
1585 | 1582 | ||
1583 | /* | ||
1584 | * Check if this is a case where the password | ||
1585 | * starts with a delimiter | ||
1586 | */ | ||
1587 | tmp_end = strchr(data, '='); | ||
1588 | tmp_end++; | ||
1589 | if (!(tmp_end < end && tmp_end[1] == delim)) { | ||
1590 | /* No it is not. Set the password to NULL */ | ||
1591 | vol->password = NULL; | ||
1592 | break; | ||
1593 | } | ||
1594 | /* Yes it is. Drop down to Opt_pass below.*/ | ||
1595 | case Opt_pass: | ||
1586 | /* Obtain the value string */ | 1596 | /* Obtain the value string */ |
1587 | value = strchr(data, '='); | 1597 | value = strchr(data, '='); |
1588 | value++; | 1598 | value++; |
diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 8c0d85577314..7a0dd99e4507 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c | |||
@@ -300,6 +300,8 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file, | |||
300 | INIT_WORK(&cfile->oplock_break, cifs_oplock_break); | 300 | INIT_WORK(&cfile->oplock_break, cifs_oplock_break); |
301 | mutex_init(&cfile->fh_mutex); | 301 | mutex_init(&cfile->fh_mutex); |
302 | 302 | ||
303 | cifs_sb_active(inode->i_sb); | ||
304 | |||
303 | /* | 305 | /* |
304 | * If the server returned a read oplock and we have mandatory brlocks, | 306 | * If the server returned a read oplock and we have mandatory brlocks, |
305 | * set oplock level to None. | 307 | * set oplock level to None. |
@@ -349,7 +351,8 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file) | |||
349 | struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink); | 351 | struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink); |
350 | struct TCP_Server_Info *server = tcon->ses->server; | 352 | struct TCP_Server_Info *server = tcon->ses->server; |
351 | struct cifsInodeInfo *cifsi = CIFS_I(inode); | 353 | struct cifsInodeInfo *cifsi = CIFS_I(inode); |
352 | struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); | 354 | struct super_block *sb = inode->i_sb; |
355 | struct cifs_sb_info *cifs_sb = CIFS_SB(sb); | ||
353 | struct cifsLockInfo *li, *tmp; | 356 | struct cifsLockInfo *li, *tmp; |
354 | struct cifs_fid fid; | 357 | struct cifs_fid fid; |
355 | struct cifs_pending_open open; | 358 | struct cifs_pending_open open; |
@@ -414,6 +417,7 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file) | |||
414 | 417 | ||
415 | cifs_put_tlink(cifs_file->tlink); | 418 | cifs_put_tlink(cifs_file->tlink); |
416 | dput(cifs_file->dentry); | 419 | dput(cifs_file->dentry); |
420 | cifs_sb_deactive(sb); | ||
417 | kfree(cifs_file); | 421 | kfree(cifs_file); |
418 | } | 422 | } |
419 | 423 | ||
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 0079696305c9..20887bf63121 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c | |||
@@ -1043,7 +1043,7 @@ cifs_rename_pending_delete(const char *full_path, struct dentry *dentry, | |||
1043 | cifs_sb->mnt_cifs_flags & | 1043 | cifs_sb->mnt_cifs_flags & |
1044 | CIFS_MOUNT_MAP_SPECIAL_CHR); | 1044 | CIFS_MOUNT_MAP_SPECIAL_CHR); |
1045 | if (rc != 0) { | 1045 | if (rc != 0) { |
1046 | rc = -ETXTBSY; | 1046 | rc = -EBUSY; |
1047 | goto undo_setattr; | 1047 | goto undo_setattr; |
1048 | } | 1048 | } |
1049 | 1049 | ||
@@ -1062,7 +1062,7 @@ cifs_rename_pending_delete(const char *full_path, struct dentry *dentry, | |||
1062 | if (rc == -ENOENT) | 1062 | if (rc == -ENOENT) |
1063 | rc = 0; | 1063 | rc = 0; |
1064 | else if (rc != 0) { | 1064 | else if (rc != 0) { |
1065 | rc = -ETXTBSY; | 1065 | rc = -EBUSY; |
1066 | goto undo_rename; | 1066 | goto undo_rename; |
1067 | } | 1067 | } |
1068 | cifsInode->delete_pending = true; | 1068 | cifsInode->delete_pending = true; |
@@ -1169,15 +1169,13 @@ psx_del_no_retry: | |||
1169 | cifs_drop_nlink(inode); | 1169 | cifs_drop_nlink(inode); |
1170 | } else if (rc == -ENOENT) { | 1170 | } else if (rc == -ENOENT) { |
1171 | d_drop(dentry); | 1171 | d_drop(dentry); |
1172 | } else if (rc == -ETXTBSY) { | 1172 | } else if (rc == -EBUSY) { |
1173 | if (server->ops->rename_pending_delete) { | 1173 | if (server->ops->rename_pending_delete) { |
1174 | rc = server->ops->rename_pending_delete(full_path, | 1174 | rc = server->ops->rename_pending_delete(full_path, |
1175 | dentry, xid); | 1175 | dentry, xid); |
1176 | if (rc == 0) | 1176 | if (rc == 0) |
1177 | cifs_drop_nlink(inode); | 1177 | cifs_drop_nlink(inode); |
1178 | } | 1178 | } |
1179 | if (rc == -ETXTBSY) | ||
1180 | rc = -EBUSY; | ||
1181 | } else if ((rc == -EACCES) && (dosattr == 0) && inode) { | 1179 | } else if ((rc == -EACCES) && (dosattr == 0) && inode) { |
1182 | attrs = kzalloc(sizeof(*attrs), GFP_KERNEL); | 1180 | attrs = kzalloc(sizeof(*attrs), GFP_KERNEL); |
1183 | if (attrs == NULL) { | 1181 | if (attrs == NULL) { |
@@ -1518,7 +1516,7 @@ cifs_do_rename(const unsigned int xid, struct dentry *from_dentry, | |||
1518 | * source. Note that cross directory moves do not work with | 1516 | * source. Note that cross directory moves do not work with |
1519 | * rename by filehandle to various Windows servers. | 1517 | * rename by filehandle to various Windows servers. |
1520 | */ | 1518 | */ |
1521 | if (rc == 0 || rc != -ETXTBSY) | 1519 | if (rc == 0 || rc != -EBUSY) |
1522 | goto do_rename_exit; | 1520 | goto do_rename_exit; |
1523 | 1521 | ||
1524 | /* open-file renames don't work across directories */ | 1522 | /* open-file renames don't work across directories */ |
diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c index a82bc51fdc82..c0b25b28be6c 100644 --- a/fs/cifs/netmisc.c +++ b/fs/cifs/netmisc.c | |||
@@ -62,7 +62,7 @@ static const struct smb_to_posix_error mapping_table_ERRDOS[] = { | |||
62 | {ERRdiffdevice, -EXDEV}, | 62 | {ERRdiffdevice, -EXDEV}, |
63 | {ERRnofiles, -ENOENT}, | 63 | {ERRnofiles, -ENOENT}, |
64 | {ERRwriteprot, -EROFS}, | 64 | {ERRwriteprot, -EROFS}, |
65 | {ERRbadshare, -ETXTBSY}, | 65 | {ERRbadshare, -EBUSY}, |
66 | {ERRlock, -EACCES}, | 66 | {ERRlock, -EACCES}, |
67 | {ERRunsup, -EINVAL}, | 67 | {ERRunsup, -EINVAL}, |
68 | {ERRnosuchshare, -ENXIO}, | 68 | {ERRnosuchshare, -ENXIO}, |
diff --git a/fs/dcache.c b/fs/dcache.c index fbfae008ba44..e8bc3420d63e 100644 --- a/fs/dcache.c +++ b/fs/dcache.c | |||
@@ -2542,7 +2542,6 @@ static int prepend_path(const struct path *path, | |||
2542 | bool slash = false; | 2542 | bool slash = false; |
2543 | int error = 0; | 2543 | int error = 0; |
2544 | 2544 | ||
2545 | br_read_lock(&vfsmount_lock); | ||
2546 | while (dentry != root->dentry || vfsmnt != root->mnt) { | 2545 | while (dentry != root->dentry || vfsmnt != root->mnt) { |
2547 | struct dentry * parent; | 2546 | struct dentry * parent; |
2548 | 2547 | ||
@@ -2572,8 +2571,6 @@ static int prepend_path(const struct path *path, | |||
2572 | if (!error && !slash) | 2571 | if (!error && !slash) |
2573 | error = prepend(buffer, buflen, "/", 1); | 2572 | error = prepend(buffer, buflen, "/", 1); |
2574 | 2573 | ||
2575 | out: | ||
2576 | br_read_unlock(&vfsmount_lock); | ||
2577 | return error; | 2574 | return error; |
2578 | 2575 | ||
2579 | global_root: | 2576 | global_root: |
@@ -2590,7 +2587,7 @@ global_root: | |||
2590 | error = prepend(buffer, buflen, "/", 1); | 2587 | error = prepend(buffer, buflen, "/", 1); |
2591 | if (!error) | 2588 | if (!error) |
2592 | error = is_mounted(vfsmnt) ? 1 : 2; | 2589 | error = is_mounted(vfsmnt) ? 1 : 2; |
2593 | goto out; | 2590 | return error; |
2594 | } | 2591 | } |
2595 | 2592 | ||
2596 | /** | 2593 | /** |
@@ -2617,9 +2614,11 @@ char *__d_path(const struct path *path, | |||
2617 | int error; | 2614 | int error; |
2618 | 2615 | ||
2619 | prepend(&res, &buflen, "\0", 1); | 2616 | prepend(&res, &buflen, "\0", 1); |
2617 | br_read_lock(&vfsmount_lock); | ||
2620 | write_seqlock(&rename_lock); | 2618 | write_seqlock(&rename_lock); |
2621 | error = prepend_path(path, root, &res, &buflen); | 2619 | error = prepend_path(path, root, &res, &buflen); |
2622 | write_sequnlock(&rename_lock); | 2620 | write_sequnlock(&rename_lock); |
2621 | br_read_unlock(&vfsmount_lock); | ||
2623 | 2622 | ||
2624 | if (error < 0) | 2623 | if (error < 0) |
2625 | return ERR_PTR(error); | 2624 | return ERR_PTR(error); |
@@ -2636,9 +2635,11 @@ char *d_absolute_path(const struct path *path, | |||
2636 | int error; | 2635 | int error; |
2637 | 2636 | ||
2638 | prepend(&res, &buflen, "\0", 1); | 2637 | prepend(&res, &buflen, "\0", 1); |
2638 | br_read_lock(&vfsmount_lock); | ||
2639 | write_seqlock(&rename_lock); | 2639 | write_seqlock(&rename_lock); |
2640 | error = prepend_path(path, &root, &res, &buflen); | 2640 | error = prepend_path(path, &root, &res, &buflen); |
2641 | write_sequnlock(&rename_lock); | 2641 | write_sequnlock(&rename_lock); |
2642 | br_read_unlock(&vfsmount_lock); | ||
2642 | 2643 | ||
2643 | if (error > 1) | 2644 | if (error > 1) |
2644 | error = -EINVAL; | 2645 | error = -EINVAL; |
@@ -2702,11 +2703,13 @@ char *d_path(const struct path *path, char *buf, int buflen) | |||
2702 | return path->dentry->d_op->d_dname(path->dentry, buf, buflen); | 2703 | return path->dentry->d_op->d_dname(path->dentry, buf, buflen); |
2703 | 2704 | ||
2704 | get_fs_root(current->fs, &root); | 2705 | get_fs_root(current->fs, &root); |
2706 | br_read_lock(&vfsmount_lock); | ||
2705 | write_seqlock(&rename_lock); | 2707 | write_seqlock(&rename_lock); |
2706 | error = path_with_deleted(path, &root, &res, &buflen); | 2708 | error = path_with_deleted(path, &root, &res, &buflen); |
2709 | write_sequnlock(&rename_lock); | ||
2710 | br_read_unlock(&vfsmount_lock); | ||
2707 | if (error < 0) | 2711 | if (error < 0) |
2708 | res = ERR_PTR(error); | 2712 | res = ERR_PTR(error); |
2709 | write_sequnlock(&rename_lock); | ||
2710 | path_put(&root); | 2713 | path_put(&root); |
2711 | return res; | 2714 | return res; |
2712 | } | 2715 | } |
@@ -2830,6 +2833,7 @@ SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size) | |||
2830 | get_fs_root_and_pwd(current->fs, &root, &pwd); | 2833 | get_fs_root_and_pwd(current->fs, &root, &pwd); |
2831 | 2834 | ||
2832 | error = -ENOENT; | 2835 | error = -ENOENT; |
2836 | br_read_lock(&vfsmount_lock); | ||
2833 | write_seqlock(&rename_lock); | 2837 | write_seqlock(&rename_lock); |
2834 | if (!d_unlinked(pwd.dentry)) { | 2838 | if (!d_unlinked(pwd.dentry)) { |
2835 | unsigned long len; | 2839 | unsigned long len; |
@@ -2839,6 +2843,7 @@ SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size) | |||
2839 | prepend(&cwd, &buflen, "\0", 1); | 2843 | prepend(&cwd, &buflen, "\0", 1); |
2840 | error = prepend_path(&pwd, &root, &cwd, &buflen); | 2844 | error = prepend_path(&pwd, &root, &cwd, &buflen); |
2841 | write_sequnlock(&rename_lock); | 2845 | write_sequnlock(&rename_lock); |
2846 | br_read_unlock(&vfsmount_lock); | ||
2842 | 2847 | ||
2843 | if (error < 0) | 2848 | if (error < 0) |
2844 | goto out; | 2849 | goto out; |
@@ -2859,6 +2864,7 @@ SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size) | |||
2859 | } | 2864 | } |
2860 | } else { | 2865 | } else { |
2861 | write_sequnlock(&rename_lock); | 2866 | write_sequnlock(&rename_lock); |
2867 | br_read_unlock(&vfsmount_lock); | ||
2862 | } | 2868 | } |
2863 | 2869 | ||
2864 | out: | 2870 | out: |
diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c index 412e6eda25f8..e4141f257495 100644 --- a/fs/ecryptfs/miscdev.c +++ b/fs/ecryptfs/miscdev.c | |||
@@ -80,13 +80,6 @@ ecryptfs_miscdev_open(struct inode *inode, struct file *file) | |||
80 | int rc; | 80 | int rc; |
81 | 81 | ||
82 | mutex_lock(&ecryptfs_daemon_hash_mux); | 82 | mutex_lock(&ecryptfs_daemon_hash_mux); |
83 | rc = try_module_get(THIS_MODULE); | ||
84 | if (rc == 0) { | ||
85 | rc = -EIO; | ||
86 | printk(KERN_ERR "%s: Error attempting to increment module use " | ||
87 | "count; rc = [%d]\n", __func__, rc); | ||
88 | goto out_unlock_daemon_list; | ||
89 | } | ||
90 | rc = ecryptfs_find_daemon_by_euid(&daemon); | 83 | rc = ecryptfs_find_daemon_by_euid(&daemon); |
91 | if (!rc) { | 84 | if (!rc) { |
92 | rc = -EINVAL; | 85 | rc = -EINVAL; |
@@ -96,7 +89,7 @@ ecryptfs_miscdev_open(struct inode *inode, struct file *file) | |||
96 | if (rc) { | 89 | if (rc) { |
97 | printk(KERN_ERR "%s: Error attempting to spawn daemon; " | 90 | printk(KERN_ERR "%s: Error attempting to spawn daemon; " |
98 | "rc = [%d]\n", __func__, rc); | 91 | "rc = [%d]\n", __func__, rc); |
99 | goto out_module_put_unlock_daemon_list; | 92 | goto out_unlock_daemon_list; |
100 | } | 93 | } |
101 | mutex_lock(&daemon->mux); | 94 | mutex_lock(&daemon->mux); |
102 | if (daemon->flags & ECRYPTFS_DAEMON_MISCDEV_OPEN) { | 95 | if (daemon->flags & ECRYPTFS_DAEMON_MISCDEV_OPEN) { |
@@ -108,9 +101,6 @@ ecryptfs_miscdev_open(struct inode *inode, struct file *file) | |||
108 | atomic_inc(&ecryptfs_num_miscdev_opens); | 101 | atomic_inc(&ecryptfs_num_miscdev_opens); |
109 | out_unlock_daemon: | 102 | out_unlock_daemon: |
110 | mutex_unlock(&daemon->mux); | 103 | mutex_unlock(&daemon->mux); |
111 | out_module_put_unlock_daemon_list: | ||
112 | if (rc) | ||
113 | module_put(THIS_MODULE); | ||
114 | out_unlock_daemon_list: | 104 | out_unlock_daemon_list: |
115 | mutex_unlock(&ecryptfs_daemon_hash_mux); | 105 | mutex_unlock(&ecryptfs_daemon_hash_mux); |
116 | return rc; | 106 | return rc; |
@@ -147,7 +137,6 @@ ecryptfs_miscdev_release(struct inode *inode, struct file *file) | |||
147 | "bug.\n", __func__, rc); | 137 | "bug.\n", __func__, rc); |
148 | BUG(); | 138 | BUG(); |
149 | } | 139 | } |
150 | module_put(THIS_MODULE); | ||
151 | return rc; | 140 | return rc; |
152 | } | 141 | } |
153 | 142 | ||
@@ -471,6 +460,7 @@ out_free: | |||
471 | 460 | ||
472 | 461 | ||
473 | static const struct file_operations ecryptfs_miscdev_fops = { | 462 | static const struct file_operations ecryptfs_miscdev_fops = { |
463 | .owner = THIS_MODULE, | ||
474 | .open = ecryptfs_miscdev_open, | 464 | .open = ecryptfs_miscdev_open, |
475 | .poll = ecryptfs_miscdev_poll, | 465 | .poll = ecryptfs_miscdev_poll, |
476 | .read = ecryptfs_miscdev_read, | 466 | .read = ecryptfs_miscdev_read, |
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 4a01ba315262..3b83cd604796 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h | |||
@@ -335,9 +335,9 @@ struct ext4_group_desc | |||
335 | */ | 335 | */ |
336 | 336 | ||
337 | struct flex_groups { | 337 | struct flex_groups { |
338 | atomic_t free_inodes; | 338 | atomic64_t free_clusters; |
339 | atomic_t free_clusters; | 339 | atomic_t free_inodes; |
340 | atomic_t used_dirs; | 340 | atomic_t used_dirs; |
341 | }; | 341 | }; |
342 | 342 | ||
343 | #define EXT4_BG_INODE_UNINIT 0x0001 /* Inode table/bitmap not in use */ | 343 | #define EXT4_BG_INODE_UNINIT 0x0001 /* Inode table/bitmap not in use */ |
@@ -2617,7 +2617,7 @@ extern int ext4_move_extents(struct file *o_filp, struct file *d_filp, | |||
2617 | extern int __init ext4_init_pageio(void); | 2617 | extern int __init ext4_init_pageio(void); |
2618 | extern void ext4_add_complete_io(ext4_io_end_t *io_end); | 2618 | extern void ext4_add_complete_io(ext4_io_end_t *io_end); |
2619 | extern void ext4_exit_pageio(void); | 2619 | extern void ext4_exit_pageio(void); |
2620 | extern void ext4_ioend_wait(struct inode *); | 2620 | extern void ext4_ioend_shutdown(struct inode *); |
2621 | extern void ext4_free_io_end(ext4_io_end_t *io); | 2621 | extern void ext4_free_io_end(ext4_io_end_t *io); |
2622 | extern ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags); | 2622 | extern ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags); |
2623 | extern void ext4_end_io_work(struct work_struct *work); | 2623 | extern void ext4_end_io_work(struct work_struct *work); |
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 28dd8eeea6a9..9c6d06dcef8b 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c | |||
@@ -1584,10 +1584,12 @@ ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1, | |||
1584 | unsigned short ext1_ee_len, ext2_ee_len, max_len; | 1584 | unsigned short ext1_ee_len, ext2_ee_len, max_len; |
1585 | 1585 | ||
1586 | /* | 1586 | /* |
1587 | * Make sure that either both extents are uninitialized, or | 1587 | * Make sure that both extents are initialized. We don't merge |
1588 | * both are _not_. | 1588 | * uninitialized extents so that we can be sure that end_io code has |
1589 | * the extent that was written properly split out and conversion to | ||
1590 | * initialized is trivial. | ||
1589 | */ | 1591 | */ |
1590 | if (ext4_ext_is_uninitialized(ex1) ^ ext4_ext_is_uninitialized(ex2)) | 1592 | if (ext4_ext_is_uninitialized(ex1) || ext4_ext_is_uninitialized(ex2)) |
1591 | return 0; | 1593 | return 0; |
1592 | 1594 | ||
1593 | if (ext4_ext_is_uninitialized(ex1)) | 1595 | if (ext4_ext_is_uninitialized(ex1)) |
@@ -2923,7 +2925,7 @@ static int ext4_split_extent_at(handle_t *handle, | |||
2923 | { | 2925 | { |
2924 | ext4_fsblk_t newblock; | 2926 | ext4_fsblk_t newblock; |
2925 | ext4_lblk_t ee_block; | 2927 | ext4_lblk_t ee_block; |
2926 | struct ext4_extent *ex, newex, orig_ex; | 2928 | struct ext4_extent *ex, newex, orig_ex, zero_ex; |
2927 | struct ext4_extent *ex2 = NULL; | 2929 | struct ext4_extent *ex2 = NULL; |
2928 | unsigned int ee_len, depth; | 2930 | unsigned int ee_len, depth; |
2929 | int err = 0; | 2931 | int err = 0; |
@@ -2943,6 +2945,10 @@ static int ext4_split_extent_at(handle_t *handle, | |||
2943 | newblock = split - ee_block + ext4_ext_pblock(ex); | 2945 | newblock = split - ee_block + ext4_ext_pblock(ex); |
2944 | 2946 | ||
2945 | BUG_ON(split < ee_block || split >= (ee_block + ee_len)); | 2947 | BUG_ON(split < ee_block || split >= (ee_block + ee_len)); |
2948 | BUG_ON(!ext4_ext_is_uninitialized(ex) && | ||
2949 | split_flag & (EXT4_EXT_MAY_ZEROOUT | | ||
2950 | EXT4_EXT_MARK_UNINIT1 | | ||
2951 | EXT4_EXT_MARK_UNINIT2)); | ||
2946 | 2952 | ||
2947 | err = ext4_ext_get_access(handle, inode, path + depth); | 2953 | err = ext4_ext_get_access(handle, inode, path + depth); |
2948 | if (err) | 2954 | if (err) |
@@ -2990,12 +2996,29 @@ static int ext4_split_extent_at(handle_t *handle, | |||
2990 | err = ext4_ext_insert_extent(handle, inode, path, &newex, flags); | 2996 | err = ext4_ext_insert_extent(handle, inode, path, &newex, flags); |
2991 | if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) { | 2997 | if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) { |
2992 | if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) { | 2998 | if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) { |
2993 | if (split_flag & EXT4_EXT_DATA_VALID1) | 2999 | if (split_flag & EXT4_EXT_DATA_VALID1) { |
2994 | err = ext4_ext_zeroout(inode, ex2); | 3000 | err = ext4_ext_zeroout(inode, ex2); |
2995 | else | 3001 | zero_ex.ee_block = ex2->ee_block; |
3002 | zero_ex.ee_len = cpu_to_le16( | ||
3003 | ext4_ext_get_actual_len(ex2)); | ||
3004 | ext4_ext_store_pblock(&zero_ex, | ||
3005 | ext4_ext_pblock(ex2)); | ||
3006 | } else { | ||
2996 | err = ext4_ext_zeroout(inode, ex); | 3007 | err = ext4_ext_zeroout(inode, ex); |
2997 | } else | 3008 | zero_ex.ee_block = ex->ee_block; |
3009 | zero_ex.ee_len = cpu_to_le16( | ||
3010 | ext4_ext_get_actual_len(ex)); | ||
3011 | ext4_ext_store_pblock(&zero_ex, | ||
3012 | ext4_ext_pblock(ex)); | ||
3013 | } | ||
3014 | } else { | ||
2998 | err = ext4_ext_zeroout(inode, &orig_ex); | 3015 | err = ext4_ext_zeroout(inode, &orig_ex); |
3016 | zero_ex.ee_block = orig_ex.ee_block; | ||
3017 | zero_ex.ee_len = cpu_to_le16( | ||
3018 | ext4_ext_get_actual_len(&orig_ex)); | ||
3019 | ext4_ext_store_pblock(&zero_ex, | ||
3020 | ext4_ext_pblock(&orig_ex)); | ||
3021 | } | ||
2999 | 3022 | ||
3000 | if (err) | 3023 | if (err) |
3001 | goto fix_extent_len; | 3024 | goto fix_extent_len; |
@@ -3003,6 +3026,12 @@ static int ext4_split_extent_at(handle_t *handle, | |||
3003 | ex->ee_len = cpu_to_le16(ee_len); | 3026 | ex->ee_len = cpu_to_le16(ee_len); |
3004 | ext4_ext_try_to_merge(handle, inode, path, ex); | 3027 | ext4_ext_try_to_merge(handle, inode, path, ex); |
3005 | err = ext4_ext_dirty(handle, inode, path + path->p_depth); | 3028 | err = ext4_ext_dirty(handle, inode, path + path->p_depth); |
3029 | if (err) | ||
3030 | goto fix_extent_len; | ||
3031 | |||
3032 | /* update extent status tree */ | ||
3033 | err = ext4_es_zeroout(inode, &zero_ex); | ||
3034 | |||
3006 | goto out; | 3035 | goto out; |
3007 | } else if (err) | 3036 | } else if (err) |
3008 | goto fix_extent_len; | 3037 | goto fix_extent_len; |
@@ -3041,6 +3070,7 @@ static int ext4_split_extent(handle_t *handle, | |||
3041 | int err = 0; | 3070 | int err = 0; |
3042 | int uninitialized; | 3071 | int uninitialized; |
3043 | int split_flag1, flags1; | 3072 | int split_flag1, flags1; |
3073 | int allocated = map->m_len; | ||
3044 | 3074 | ||
3045 | depth = ext_depth(inode); | 3075 | depth = ext_depth(inode); |
3046 | ex = path[depth].p_ext; | 3076 | ex = path[depth].p_ext; |
@@ -3060,20 +3090,29 @@ static int ext4_split_extent(handle_t *handle, | |||
3060 | map->m_lblk + map->m_len, split_flag1, flags1); | 3090 | map->m_lblk + map->m_len, split_flag1, flags1); |
3061 | if (err) | 3091 | if (err) |
3062 | goto out; | 3092 | goto out; |
3093 | } else { | ||
3094 | allocated = ee_len - (map->m_lblk - ee_block); | ||
3063 | } | 3095 | } |
3064 | 3096 | /* | |
3097 | * Update path is required because previous ext4_split_extent_at() may | ||
3098 | * result in split of original leaf or extent zeroout. | ||
3099 | */ | ||
3065 | ext4_ext_drop_refs(path); | 3100 | ext4_ext_drop_refs(path); |
3066 | path = ext4_ext_find_extent(inode, map->m_lblk, path); | 3101 | path = ext4_ext_find_extent(inode, map->m_lblk, path); |
3067 | if (IS_ERR(path)) | 3102 | if (IS_ERR(path)) |
3068 | return PTR_ERR(path); | 3103 | return PTR_ERR(path); |
3104 | depth = ext_depth(inode); | ||
3105 | ex = path[depth].p_ext; | ||
3106 | uninitialized = ext4_ext_is_uninitialized(ex); | ||
3107 | split_flag1 = 0; | ||
3069 | 3108 | ||
3070 | if (map->m_lblk >= ee_block) { | 3109 | if (map->m_lblk >= ee_block) { |
3071 | split_flag1 = split_flag & (EXT4_EXT_MAY_ZEROOUT | | 3110 | split_flag1 = split_flag & EXT4_EXT_DATA_VALID2; |
3072 | EXT4_EXT_DATA_VALID2); | 3111 | if (uninitialized) { |
3073 | if (uninitialized) | ||
3074 | split_flag1 |= EXT4_EXT_MARK_UNINIT1; | 3112 | split_flag1 |= EXT4_EXT_MARK_UNINIT1; |
3075 | if (split_flag & EXT4_EXT_MARK_UNINIT2) | 3113 | split_flag1 |= split_flag & (EXT4_EXT_MAY_ZEROOUT | |
3076 | split_flag1 |= EXT4_EXT_MARK_UNINIT2; | 3114 | EXT4_EXT_MARK_UNINIT2); |
3115 | } | ||
3077 | err = ext4_split_extent_at(handle, inode, path, | 3116 | err = ext4_split_extent_at(handle, inode, path, |
3078 | map->m_lblk, split_flag1, flags); | 3117 | map->m_lblk, split_flag1, flags); |
3079 | if (err) | 3118 | if (err) |
@@ -3082,7 +3121,7 @@ static int ext4_split_extent(handle_t *handle, | |||
3082 | 3121 | ||
3083 | ext4_ext_show_leaf(inode, path); | 3122 | ext4_ext_show_leaf(inode, path); |
3084 | out: | 3123 | out: |
3085 | return err ? err : map->m_len; | 3124 | return err ? err : allocated; |
3086 | } | 3125 | } |
3087 | 3126 | ||
3088 | /* | 3127 | /* |
@@ -3137,6 +3176,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, | |||
3137 | ee_block = le32_to_cpu(ex->ee_block); | 3176 | ee_block = le32_to_cpu(ex->ee_block); |
3138 | ee_len = ext4_ext_get_actual_len(ex); | 3177 | ee_len = ext4_ext_get_actual_len(ex); |
3139 | allocated = ee_len - (map->m_lblk - ee_block); | 3178 | allocated = ee_len - (map->m_lblk - ee_block); |
3179 | zero_ex.ee_len = 0; | ||
3140 | 3180 | ||
3141 | trace_ext4_ext_convert_to_initialized_enter(inode, map, ex); | 3181 | trace_ext4_ext_convert_to_initialized_enter(inode, map, ex); |
3142 | 3182 | ||
@@ -3227,13 +3267,16 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, | |||
3227 | 3267 | ||
3228 | if (EXT4_EXT_MAY_ZEROOUT & split_flag) | 3268 | if (EXT4_EXT_MAY_ZEROOUT & split_flag) |
3229 | max_zeroout = sbi->s_extent_max_zeroout_kb >> | 3269 | max_zeroout = sbi->s_extent_max_zeroout_kb >> |
3230 | inode->i_sb->s_blocksize_bits; | 3270 | (inode->i_sb->s_blocksize_bits - 10); |
3231 | 3271 | ||
3232 | /* If extent is less than s_max_zeroout_kb, zeroout directly */ | 3272 | /* If extent is less than s_max_zeroout_kb, zeroout directly */ |
3233 | if (max_zeroout && (ee_len <= max_zeroout)) { | 3273 | if (max_zeroout && (ee_len <= max_zeroout)) { |
3234 | err = ext4_ext_zeroout(inode, ex); | 3274 | err = ext4_ext_zeroout(inode, ex); |
3235 | if (err) | 3275 | if (err) |
3236 | goto out; | 3276 | goto out; |
3277 | zero_ex.ee_block = ex->ee_block; | ||
3278 | zero_ex.ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)); | ||
3279 | ext4_ext_store_pblock(&zero_ex, ext4_ext_pblock(ex)); | ||
3237 | 3280 | ||
3238 | err = ext4_ext_get_access(handle, inode, path + depth); | 3281 | err = ext4_ext_get_access(handle, inode, path + depth); |
3239 | if (err) | 3282 | if (err) |
@@ -3292,6 +3335,9 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, | |||
3292 | err = allocated; | 3335 | err = allocated; |
3293 | 3336 | ||
3294 | out: | 3337 | out: |
3338 | /* If we have gotten a failure, don't zero out status tree */ | ||
3339 | if (!err) | ||
3340 | err = ext4_es_zeroout(inode, &zero_ex); | ||
3295 | return err ? err : allocated; | 3341 | return err ? err : allocated; |
3296 | } | 3342 | } |
3297 | 3343 | ||
@@ -3374,8 +3420,19 @@ static int ext4_convert_unwritten_extents_endio(handle_t *handle, | |||
3374 | "block %llu, max_blocks %u\n", inode->i_ino, | 3420 | "block %llu, max_blocks %u\n", inode->i_ino, |
3375 | (unsigned long long)ee_block, ee_len); | 3421 | (unsigned long long)ee_block, ee_len); |
3376 | 3422 | ||
3377 | /* If extent is larger than requested then split is required */ | 3423 | /* If extent is larger than requested it is a clear sign that we still |
3424 | * have some extent state machine issues left. So extent_split is still | ||
3425 | * required. | ||
3426 | * TODO: Once all related issues will be fixed this situation should be | ||
3427 | * illegal. | ||
3428 | */ | ||
3378 | if (ee_block != map->m_lblk || ee_len > map->m_len) { | 3429 | if (ee_block != map->m_lblk || ee_len > map->m_len) { |
3430 | #ifdef EXT4_DEBUG | ||
3431 | ext4_warning("Inode (%ld) finished: extent logical block %llu," | ||
3432 | " len %u; IO logical block %llu, len %u\n", | ||
3433 | inode->i_ino, (unsigned long long)ee_block, ee_len, | ||
3434 | (unsigned long long)map->m_lblk, map->m_len); | ||
3435 | #endif | ||
3379 | err = ext4_split_unwritten_extents(handle, inode, map, path, | 3436 | err = ext4_split_unwritten_extents(handle, inode, map, path, |
3380 | EXT4_GET_BLOCKS_CONVERT); | 3437 | EXT4_GET_BLOCKS_CONVERT); |
3381 | if (err < 0) | 3438 | if (err < 0) |
@@ -3626,6 +3683,10 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode, | |||
3626 | path, map->m_len); | 3683 | path, map->m_len); |
3627 | } else | 3684 | } else |
3628 | err = ret; | 3685 | err = ret; |
3686 | map->m_flags |= EXT4_MAP_MAPPED; | ||
3687 | if (allocated > map->m_len) | ||
3688 | allocated = map->m_len; | ||
3689 | map->m_len = allocated; | ||
3629 | goto out2; | 3690 | goto out2; |
3630 | } | 3691 | } |
3631 | /* buffered IO case */ | 3692 | /* buffered IO case */ |
@@ -3675,6 +3736,7 @@ out: | |||
3675 | allocated - map->m_len); | 3736 | allocated - map->m_len); |
3676 | allocated = map->m_len; | 3737 | allocated = map->m_len; |
3677 | } | 3738 | } |
3739 | map->m_len = allocated; | ||
3678 | 3740 | ||
3679 | /* | 3741 | /* |
3680 | * If we have done fallocate with the offset that is already | 3742 | * If we have done fallocate with the offset that is already |
@@ -4106,9 +4168,6 @@ got_allocated_blocks: | |||
4106 | } | 4168 | } |
4107 | } else { | 4169 | } else { |
4108 | BUG_ON(allocated_clusters < reserved_clusters); | 4170 | BUG_ON(allocated_clusters < reserved_clusters); |
4109 | /* We will claim quota for all newly allocated blocks.*/ | ||
4110 | ext4_da_update_reserve_space(inode, allocated_clusters, | ||
4111 | 1); | ||
4112 | if (reserved_clusters < allocated_clusters) { | 4171 | if (reserved_clusters < allocated_clusters) { |
4113 | struct ext4_inode_info *ei = EXT4_I(inode); | 4172 | struct ext4_inode_info *ei = EXT4_I(inode); |
4114 | int reservation = allocated_clusters - | 4173 | int reservation = allocated_clusters - |
@@ -4159,6 +4218,15 @@ got_allocated_blocks: | |||
4159 | ei->i_reserved_data_blocks += reservation; | 4218 | ei->i_reserved_data_blocks += reservation; |
4160 | spin_unlock(&ei->i_block_reservation_lock); | 4219 | spin_unlock(&ei->i_block_reservation_lock); |
4161 | } | 4220 | } |
4221 | /* | ||
4222 | * We will claim quota for all newly allocated blocks. | ||
4223 | * We're updating the reserved space *after* the | ||
4224 | * correction above so we do not accidentally free | ||
4225 | * all the metadata reservation because we might | ||
4226 | * actually need it later on. | ||
4227 | */ | ||
4228 | ext4_da_update_reserve_space(inode, allocated_clusters, | ||
4229 | 1); | ||
4162 | } | 4230 | } |
4163 | } | 4231 | } |
4164 | 4232 | ||
@@ -4368,8 +4436,6 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len) | |||
4368 | if (len <= EXT_UNINIT_MAX_LEN << blkbits) | 4436 | if (len <= EXT_UNINIT_MAX_LEN << blkbits) |
4369 | flags |= EXT4_GET_BLOCKS_NO_NORMALIZE; | 4437 | flags |= EXT4_GET_BLOCKS_NO_NORMALIZE; |
4370 | 4438 | ||
4371 | /* Prevent race condition between unwritten */ | ||
4372 | ext4_flush_unwritten_io(inode); | ||
4373 | retry: | 4439 | retry: |
4374 | while (ret >= 0 && ret < max_blocks) { | 4440 | while (ret >= 0 && ret < max_blocks) { |
4375 | map.m_lblk = map.m_lblk + ret; | 4441 | map.m_lblk = map.m_lblk + ret; |
diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c index 95796a1b7522..fe3337a85ede 100644 --- a/fs/ext4/extents_status.c +++ b/fs/ext4/extents_status.c | |||
@@ -333,17 +333,27 @@ static void ext4_es_free_extent(struct inode *inode, struct extent_status *es) | |||
333 | static int ext4_es_can_be_merged(struct extent_status *es1, | 333 | static int ext4_es_can_be_merged(struct extent_status *es1, |
334 | struct extent_status *es2) | 334 | struct extent_status *es2) |
335 | { | 335 | { |
336 | if (es1->es_lblk + es1->es_len != es2->es_lblk) | 336 | if (ext4_es_status(es1) != ext4_es_status(es2)) |
337 | return 0; | 337 | return 0; |
338 | 338 | ||
339 | if (ext4_es_status(es1) != ext4_es_status(es2)) | 339 | if (((__u64) es1->es_len) + es2->es_len > 0xFFFFFFFFULL) |
340 | return 0; | 340 | return 0; |
341 | 341 | ||
342 | if ((ext4_es_is_written(es1) || ext4_es_is_unwritten(es1)) && | 342 | if (((__u64) es1->es_lblk) + es1->es_len != es2->es_lblk) |
343 | (ext4_es_pblock(es1) + es1->es_len != ext4_es_pblock(es2))) | ||
344 | return 0; | 343 | return 0; |
345 | 344 | ||
346 | return 1; | 345 | if ((ext4_es_is_written(es1) || ext4_es_is_unwritten(es1)) && |
346 | (ext4_es_pblock(es1) + es1->es_len == ext4_es_pblock(es2))) | ||
347 | return 1; | ||
348 | |||
349 | if (ext4_es_is_hole(es1)) | ||
350 | return 1; | ||
351 | |||
352 | /* we need to check delayed extent is without unwritten status */ | ||
353 | if (ext4_es_is_delayed(es1) && !ext4_es_is_unwritten(es1)) | ||
354 | return 1; | ||
355 | |||
356 | return 0; | ||
347 | } | 357 | } |
348 | 358 | ||
349 | static struct extent_status * | 359 | static struct extent_status * |
@@ -389,6 +399,179 @@ ext4_es_try_to_merge_right(struct inode *inode, struct extent_status *es) | |||
389 | return es; | 399 | return es; |
390 | } | 400 | } |
391 | 401 | ||
402 | #ifdef ES_AGGRESSIVE_TEST | ||
403 | static void ext4_es_insert_extent_ext_check(struct inode *inode, | ||
404 | struct extent_status *es) | ||
405 | { | ||
406 | struct ext4_ext_path *path = NULL; | ||
407 | struct ext4_extent *ex; | ||
408 | ext4_lblk_t ee_block; | ||
409 | ext4_fsblk_t ee_start; | ||
410 | unsigned short ee_len; | ||
411 | int depth, ee_status, es_status; | ||
412 | |||
413 | path = ext4_ext_find_extent(inode, es->es_lblk, NULL); | ||
414 | if (IS_ERR(path)) | ||
415 | return; | ||
416 | |||
417 | depth = ext_depth(inode); | ||
418 | ex = path[depth].p_ext; | ||
419 | |||
420 | if (ex) { | ||
421 | |||
422 | ee_block = le32_to_cpu(ex->ee_block); | ||
423 | ee_start = ext4_ext_pblock(ex); | ||
424 | ee_len = ext4_ext_get_actual_len(ex); | ||
425 | |||
426 | ee_status = ext4_ext_is_uninitialized(ex) ? 1 : 0; | ||
427 | es_status = ext4_es_is_unwritten(es) ? 1 : 0; | ||
428 | |||
429 | /* | ||
430 | * Make sure ex and es are not overlap when we try to insert | ||
431 | * a delayed/hole extent. | ||
432 | */ | ||
433 | if (!ext4_es_is_written(es) && !ext4_es_is_unwritten(es)) { | ||
434 | if (in_range(es->es_lblk, ee_block, ee_len)) { | ||
435 | pr_warn("ES insert assertation failed for " | ||
436 | "inode: %lu we can find an extent " | ||
437 | "at block [%d/%d/%llu/%c], but we " | ||
438 | "want to add an delayed/hole extent " | ||
439 | "[%d/%d/%llu/%llx]\n", | ||
440 | inode->i_ino, ee_block, ee_len, | ||
441 | ee_start, ee_status ? 'u' : 'w', | ||
442 | es->es_lblk, es->es_len, | ||
443 | ext4_es_pblock(es), ext4_es_status(es)); | ||
444 | } | ||
445 | goto out; | ||
446 | } | ||
447 | |||
448 | /* | ||
449 | * We don't check ee_block == es->es_lblk, etc. because es | ||
450 | * might be a part of whole extent, vice versa. | ||
451 | */ | ||
452 | if (es->es_lblk < ee_block || | ||
453 | ext4_es_pblock(es) != ee_start + es->es_lblk - ee_block) { | ||
454 | pr_warn("ES insert assertation failed for inode: %lu " | ||
455 | "ex_status [%d/%d/%llu/%c] != " | ||
456 | "es_status [%d/%d/%llu/%c]\n", inode->i_ino, | ||
457 | ee_block, ee_len, ee_start, | ||
458 | ee_status ? 'u' : 'w', es->es_lblk, es->es_len, | ||
459 | ext4_es_pblock(es), es_status ? 'u' : 'w'); | ||
460 | goto out; | ||
461 | } | ||
462 | |||
463 | if (ee_status ^ es_status) { | ||
464 | pr_warn("ES insert assertation failed for inode: %lu " | ||
465 | "ex_status [%d/%d/%llu/%c] != " | ||
466 | "es_status [%d/%d/%llu/%c]\n", inode->i_ino, | ||
467 | ee_block, ee_len, ee_start, | ||
468 | ee_status ? 'u' : 'w', es->es_lblk, es->es_len, | ||
469 | ext4_es_pblock(es), es_status ? 'u' : 'w'); | ||
470 | } | ||
471 | } else { | ||
472 | /* | ||
473 | * We can't find an extent on disk. So we need to make sure | ||
474 | * that we don't want to add an written/unwritten extent. | ||
475 | */ | ||
476 | if (!ext4_es_is_delayed(es) && !ext4_es_is_hole(es)) { | ||
477 | pr_warn("ES insert assertation failed for inode: %lu " | ||
478 | "can't find an extent at block %d but we want " | ||
479 | "to add an written/unwritten extent " | ||
480 | "[%d/%d/%llu/%llx]\n", inode->i_ino, | ||
481 | es->es_lblk, es->es_lblk, es->es_len, | ||
482 | ext4_es_pblock(es), ext4_es_status(es)); | ||
483 | } | ||
484 | } | ||
485 | out: | ||
486 | if (path) { | ||
487 | ext4_ext_drop_refs(path); | ||
488 | kfree(path); | ||
489 | } | ||
490 | } | ||
491 | |||
492 | static void ext4_es_insert_extent_ind_check(struct inode *inode, | ||
493 | struct extent_status *es) | ||
494 | { | ||
495 | struct ext4_map_blocks map; | ||
496 | int retval; | ||
497 | |||
498 | /* | ||
499 | * Here we call ext4_ind_map_blocks to lookup a block mapping because | ||
500 | * 'Indirect' structure is defined in indirect.c. So we couldn't | ||
501 | * access direct/indirect tree from outside. It is too dirty to define | ||
502 | * this function in indirect.c file. | ||
503 | */ | ||
504 | |||
505 | map.m_lblk = es->es_lblk; | ||
506 | map.m_len = es->es_len; | ||
507 | |||
508 | retval = ext4_ind_map_blocks(NULL, inode, &map, 0); | ||
509 | if (retval > 0) { | ||
510 | if (ext4_es_is_delayed(es) || ext4_es_is_hole(es)) { | ||
511 | /* | ||
512 | * We want to add a delayed/hole extent but this | ||
513 | * block has been allocated. | ||
514 | */ | ||
515 | pr_warn("ES insert assertation failed for inode: %lu " | ||
516 | "We can find blocks but we want to add a " | ||
517 | "delayed/hole extent [%d/%d/%llu/%llx]\n", | ||
518 | inode->i_ino, es->es_lblk, es->es_len, | ||
519 | ext4_es_pblock(es), ext4_es_status(es)); | ||
520 | return; | ||
521 | } else if (ext4_es_is_written(es)) { | ||
522 | if (retval != es->es_len) { | ||
523 | pr_warn("ES insert assertation failed for " | ||
524 | "inode: %lu retval %d != es_len %d\n", | ||
525 | inode->i_ino, retval, es->es_len); | ||
526 | return; | ||
527 | } | ||
528 | if (map.m_pblk != ext4_es_pblock(es)) { | ||
529 | pr_warn("ES insert assertation failed for " | ||
530 | "inode: %lu m_pblk %llu != " | ||
531 | "es_pblk %llu\n", | ||
532 | inode->i_ino, map.m_pblk, | ||
533 | ext4_es_pblock(es)); | ||
534 | return; | ||
535 | } | ||
536 | } else { | ||
537 | /* | ||
538 | * We don't need to check unwritten extent because | ||
539 | * indirect-based file doesn't have it. | ||
540 | */ | ||
541 | BUG_ON(1); | ||
542 | } | ||
543 | } else if (retval == 0) { | ||
544 | if (ext4_es_is_written(es)) { | ||
545 | pr_warn("ES insert assertation failed for inode: %lu " | ||
546 | "We can't find the block but we want to add " | ||
547 | "an written extent [%d/%d/%llu/%llx]\n", | ||
548 | inode->i_ino, es->es_lblk, es->es_len, | ||
549 | ext4_es_pblock(es), ext4_es_status(es)); | ||
550 | return; | ||
551 | } | ||
552 | } | ||
553 | } | ||
554 | |||
555 | static inline void ext4_es_insert_extent_check(struct inode *inode, | ||
556 | struct extent_status *es) | ||
557 | { | ||
558 | /* | ||
559 | * We don't need to worry about the race condition because | ||
560 | * caller takes i_data_sem locking. | ||
561 | */ | ||
562 | BUG_ON(!rwsem_is_locked(&EXT4_I(inode)->i_data_sem)); | ||
563 | if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) | ||
564 | ext4_es_insert_extent_ext_check(inode, es); | ||
565 | else | ||
566 | ext4_es_insert_extent_ind_check(inode, es); | ||
567 | } | ||
568 | #else | ||
569 | static inline void ext4_es_insert_extent_check(struct inode *inode, | ||
570 | struct extent_status *es) | ||
571 | { | ||
572 | } | ||
573 | #endif | ||
574 | |||
392 | static int __es_insert_extent(struct inode *inode, struct extent_status *newes) | 575 | static int __es_insert_extent(struct inode *inode, struct extent_status *newes) |
393 | { | 576 | { |
394 | struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree; | 577 | struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree; |
@@ -471,6 +654,8 @@ int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk, | |||
471 | ext4_es_store_status(&newes, status); | 654 | ext4_es_store_status(&newes, status); |
472 | trace_ext4_es_insert_extent(inode, &newes); | 655 | trace_ext4_es_insert_extent(inode, &newes); |
473 | 656 | ||
657 | ext4_es_insert_extent_check(inode, &newes); | ||
658 | |||
474 | write_lock(&EXT4_I(inode)->i_es_lock); | 659 | write_lock(&EXT4_I(inode)->i_es_lock); |
475 | err = __es_remove_extent(inode, lblk, end); | 660 | err = __es_remove_extent(inode, lblk, end); |
476 | if (err != 0) | 661 | if (err != 0) |
@@ -669,6 +854,23 @@ int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk, | |||
669 | return err; | 854 | return err; |
670 | } | 855 | } |
671 | 856 | ||
857 | int ext4_es_zeroout(struct inode *inode, struct ext4_extent *ex) | ||
858 | { | ||
859 | ext4_lblk_t ee_block; | ||
860 | ext4_fsblk_t ee_pblock; | ||
861 | unsigned int ee_len; | ||
862 | |||
863 | ee_block = le32_to_cpu(ex->ee_block); | ||
864 | ee_len = ext4_ext_get_actual_len(ex); | ||
865 | ee_pblock = ext4_ext_pblock(ex); | ||
866 | |||
867 | if (ee_len == 0) | ||
868 | return 0; | ||
869 | |||
870 | return ext4_es_insert_extent(inode, ee_block, ee_len, ee_pblock, | ||
871 | EXTENT_STATUS_WRITTEN); | ||
872 | } | ||
873 | |||
672 | static int ext4_es_shrink(struct shrinker *shrink, struct shrink_control *sc) | 874 | static int ext4_es_shrink(struct shrinker *shrink, struct shrink_control *sc) |
673 | { | 875 | { |
674 | struct ext4_sb_info *sbi = container_of(shrink, | 876 | struct ext4_sb_info *sbi = container_of(shrink, |
diff --git a/fs/ext4/extents_status.h b/fs/ext4/extents_status.h index f190dfe969da..d8e2d4dc311e 100644 --- a/fs/ext4/extents_status.h +++ b/fs/ext4/extents_status.h | |||
@@ -21,6 +21,12 @@ | |||
21 | #endif | 21 | #endif |
22 | 22 | ||
23 | /* | 23 | /* |
24 | * With ES_AGGRESSIVE_TEST defined, the result of es caching will be | ||
25 | * checked with old map_block's result. | ||
26 | */ | ||
27 | #define ES_AGGRESSIVE_TEST__ | ||
28 | |||
29 | /* | ||
24 | * These flags live in the high bits of extent_status.es_pblk | 30 | * These flags live in the high bits of extent_status.es_pblk |
25 | */ | 31 | */ |
26 | #define EXTENT_STATUS_WRITTEN (1ULL << 63) | 32 | #define EXTENT_STATUS_WRITTEN (1ULL << 63) |
@@ -33,6 +39,8 @@ | |||
33 | EXTENT_STATUS_DELAYED | \ | 39 | EXTENT_STATUS_DELAYED | \ |
34 | EXTENT_STATUS_HOLE) | 40 | EXTENT_STATUS_HOLE) |
35 | 41 | ||
42 | struct ext4_extent; | ||
43 | |||
36 | struct extent_status { | 44 | struct extent_status { |
37 | struct rb_node rb_node; | 45 | struct rb_node rb_node; |
38 | ext4_lblk_t es_lblk; /* first logical block extent covers */ | 46 | ext4_lblk_t es_lblk; /* first logical block extent covers */ |
@@ -58,6 +66,7 @@ extern void ext4_es_find_delayed_extent(struct inode *inode, ext4_lblk_t lblk, | |||
58 | struct extent_status *es); | 66 | struct extent_status *es); |
59 | extern int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk, | 67 | extern int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk, |
60 | struct extent_status *es); | 68 | struct extent_status *es); |
69 | extern int ext4_es_zeroout(struct inode *inode, struct ext4_extent *ex); | ||
61 | 70 | ||
62 | static inline int ext4_es_is_written(struct extent_status *es) | 71 | static inline int ext4_es_is_written(struct extent_status *es) |
63 | { | 72 | { |
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c index 32fd2b9075dd..6c5bb8d993fe 100644 --- a/fs/ext4/ialloc.c +++ b/fs/ext4/ialloc.c | |||
@@ -324,8 +324,8 @@ error_return: | |||
324 | } | 324 | } |
325 | 325 | ||
326 | struct orlov_stats { | 326 | struct orlov_stats { |
327 | __u64 free_clusters; | ||
327 | __u32 free_inodes; | 328 | __u32 free_inodes; |
328 | __u32 free_clusters; | ||
329 | __u32 used_dirs; | 329 | __u32 used_dirs; |
330 | }; | 330 | }; |
331 | 331 | ||
@@ -342,7 +342,7 @@ static void get_orlov_stats(struct super_block *sb, ext4_group_t g, | |||
342 | 342 | ||
343 | if (flex_size > 1) { | 343 | if (flex_size > 1) { |
344 | stats->free_inodes = atomic_read(&flex_group[g].free_inodes); | 344 | stats->free_inodes = atomic_read(&flex_group[g].free_inodes); |
345 | stats->free_clusters = atomic_read(&flex_group[g].free_clusters); | 345 | stats->free_clusters = atomic64_read(&flex_group[g].free_clusters); |
346 | stats->used_dirs = atomic_read(&flex_group[g].used_dirs); | 346 | stats->used_dirs = atomic_read(&flex_group[g].used_dirs); |
347 | return; | 347 | return; |
348 | } | 348 | } |
diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c index b505a145a593..a04183127ef0 100644 --- a/fs/ext4/indirect.c +++ b/fs/ext4/indirect.c | |||
@@ -1539,9 +1539,9 @@ static int free_hole_blocks(handle_t *handle, struct inode *inode, | |||
1539 | blk = *i_data; | 1539 | blk = *i_data; |
1540 | if (level > 0) { | 1540 | if (level > 0) { |
1541 | ext4_lblk_t first2; | 1541 | ext4_lblk_t first2; |
1542 | bh = sb_bread(inode->i_sb, blk); | 1542 | bh = sb_bread(inode->i_sb, le32_to_cpu(blk)); |
1543 | if (!bh) { | 1543 | if (!bh) { |
1544 | EXT4_ERROR_INODE_BLOCK(inode, blk, | 1544 | EXT4_ERROR_INODE_BLOCK(inode, le32_to_cpu(blk), |
1545 | "Read failure"); | 1545 | "Read failure"); |
1546 | return -EIO; | 1546 | return -EIO; |
1547 | } | 1547 | } |
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 9ea0cde3fa9e..b3a5213bc73e 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c | |||
@@ -185,8 +185,6 @@ void ext4_evict_inode(struct inode *inode) | |||
185 | 185 | ||
186 | trace_ext4_evict_inode(inode); | 186 | trace_ext4_evict_inode(inode); |
187 | 187 | ||
188 | ext4_ioend_wait(inode); | ||
189 | |||
190 | if (inode->i_nlink) { | 188 | if (inode->i_nlink) { |
191 | /* | 189 | /* |
192 | * When journalling data dirty buffers are tracked only in the | 190 | * When journalling data dirty buffers are tracked only in the |
@@ -207,7 +205,8 @@ void ext4_evict_inode(struct inode *inode) | |||
207 | * don't use page cache. | 205 | * don't use page cache. |
208 | */ | 206 | */ |
209 | if (ext4_should_journal_data(inode) && | 207 | if (ext4_should_journal_data(inode) && |
210 | (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode))) { | 208 | (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode)) && |
209 | inode->i_ino != EXT4_JOURNAL_INO) { | ||
211 | journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; | 210 | journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; |
212 | tid_t commit_tid = EXT4_I(inode)->i_datasync_tid; | 211 | tid_t commit_tid = EXT4_I(inode)->i_datasync_tid; |
213 | 212 | ||
@@ -216,6 +215,7 @@ void ext4_evict_inode(struct inode *inode) | |||
216 | filemap_write_and_wait(&inode->i_data); | 215 | filemap_write_and_wait(&inode->i_data); |
217 | } | 216 | } |
218 | truncate_inode_pages(&inode->i_data, 0); | 217 | truncate_inode_pages(&inode->i_data, 0); |
218 | ext4_ioend_shutdown(inode); | ||
219 | goto no_delete; | 219 | goto no_delete; |
220 | } | 220 | } |
221 | 221 | ||
@@ -225,6 +225,7 @@ void ext4_evict_inode(struct inode *inode) | |||
225 | if (ext4_should_order_data(inode)) | 225 | if (ext4_should_order_data(inode)) |
226 | ext4_begin_ordered_truncate(inode, 0); | 226 | ext4_begin_ordered_truncate(inode, 0); |
227 | truncate_inode_pages(&inode->i_data, 0); | 227 | truncate_inode_pages(&inode->i_data, 0); |
228 | ext4_ioend_shutdown(inode); | ||
228 | 229 | ||
229 | if (is_bad_inode(inode)) | 230 | if (is_bad_inode(inode)) |
230 | goto no_delete; | 231 | goto no_delete; |
@@ -482,6 +483,58 @@ static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx, | |||
482 | return num; | 483 | return num; |
483 | } | 484 | } |
484 | 485 | ||
486 | #ifdef ES_AGGRESSIVE_TEST | ||
487 | static void ext4_map_blocks_es_recheck(handle_t *handle, | ||
488 | struct inode *inode, | ||
489 | struct ext4_map_blocks *es_map, | ||
490 | struct ext4_map_blocks *map, | ||
491 | int flags) | ||
492 | { | ||
493 | int retval; | ||
494 | |||
495 | map->m_flags = 0; | ||
496 | /* | ||
497 | * There is a race window that the result is not the same. | ||
498 | * e.g. xfstests #223 when dioread_nolock enables. The reason | ||
499 | * is that we lookup a block mapping in extent status tree with | ||
500 | * out taking i_data_sem. So at the time the unwritten extent | ||
501 | * could be converted. | ||
502 | */ | ||
503 | if (!(flags & EXT4_GET_BLOCKS_NO_LOCK)) | ||
504 | down_read((&EXT4_I(inode)->i_data_sem)); | ||
505 | if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { | ||
506 | retval = ext4_ext_map_blocks(handle, inode, map, flags & | ||
507 | EXT4_GET_BLOCKS_KEEP_SIZE); | ||
508 | } else { | ||
509 | retval = ext4_ind_map_blocks(handle, inode, map, flags & | ||
510 | EXT4_GET_BLOCKS_KEEP_SIZE); | ||
511 | } | ||
512 | if (!(flags & EXT4_GET_BLOCKS_NO_LOCK)) | ||
513 | up_read((&EXT4_I(inode)->i_data_sem)); | ||
514 | /* | ||
515 | * Clear EXT4_MAP_FROM_CLUSTER and EXT4_MAP_BOUNDARY flag | ||
516 | * because it shouldn't be marked in es_map->m_flags. | ||
517 | */ | ||
518 | map->m_flags &= ~(EXT4_MAP_FROM_CLUSTER | EXT4_MAP_BOUNDARY); | ||
519 | |||
520 | /* | ||
521 | * We don't check m_len because extent will be collpased in status | ||
522 | * tree. So the m_len might not equal. | ||
523 | */ | ||
524 | if (es_map->m_lblk != map->m_lblk || | ||
525 | es_map->m_flags != map->m_flags || | ||
526 | es_map->m_pblk != map->m_pblk) { | ||
527 | printk("ES cache assertation failed for inode: %lu " | ||
528 | "es_cached ex [%d/%d/%llu/%x] != " | ||
529 | "found ex [%d/%d/%llu/%x] retval %d flags %x\n", | ||
530 | inode->i_ino, es_map->m_lblk, es_map->m_len, | ||
531 | es_map->m_pblk, es_map->m_flags, map->m_lblk, | ||
532 | map->m_len, map->m_pblk, map->m_flags, | ||
533 | retval, flags); | ||
534 | } | ||
535 | } | ||
536 | #endif /* ES_AGGRESSIVE_TEST */ | ||
537 | |||
485 | /* | 538 | /* |
486 | * The ext4_map_blocks() function tries to look up the requested blocks, | 539 | * The ext4_map_blocks() function tries to look up the requested blocks, |
487 | * and returns if the blocks are already mapped. | 540 | * and returns if the blocks are already mapped. |
@@ -509,6 +562,11 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode, | |||
509 | { | 562 | { |
510 | struct extent_status es; | 563 | struct extent_status es; |
511 | int retval; | 564 | int retval; |
565 | #ifdef ES_AGGRESSIVE_TEST | ||
566 | struct ext4_map_blocks orig_map; | ||
567 | |||
568 | memcpy(&orig_map, map, sizeof(*map)); | ||
569 | #endif | ||
512 | 570 | ||
513 | map->m_flags = 0; | 571 | map->m_flags = 0; |
514 | ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u," | 572 | ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u," |
@@ -531,6 +589,10 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode, | |||
531 | } else { | 589 | } else { |
532 | BUG_ON(1); | 590 | BUG_ON(1); |
533 | } | 591 | } |
592 | #ifdef ES_AGGRESSIVE_TEST | ||
593 | ext4_map_blocks_es_recheck(handle, inode, map, | ||
594 | &orig_map, flags); | ||
595 | #endif | ||
534 | goto found; | 596 | goto found; |
535 | } | 597 | } |
536 | 598 | ||
@@ -551,6 +613,15 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode, | |||
551 | int ret; | 613 | int ret; |
552 | unsigned long long status; | 614 | unsigned long long status; |
553 | 615 | ||
616 | #ifdef ES_AGGRESSIVE_TEST | ||
617 | if (retval != map->m_len) { | ||
618 | printk("ES len assertation failed for inode: %lu " | ||
619 | "retval %d != map->m_len %d " | ||
620 | "in %s (lookup)\n", inode->i_ino, retval, | ||
621 | map->m_len, __func__); | ||
622 | } | ||
623 | #endif | ||
624 | |||
554 | status = map->m_flags & EXT4_MAP_UNWRITTEN ? | 625 | status = map->m_flags & EXT4_MAP_UNWRITTEN ? |
555 | EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; | 626 | EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; |
556 | if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) && | 627 | if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) && |
@@ -643,6 +714,24 @@ found: | |||
643 | int ret; | 714 | int ret; |
644 | unsigned long long status; | 715 | unsigned long long status; |
645 | 716 | ||
717 | #ifdef ES_AGGRESSIVE_TEST | ||
718 | if (retval != map->m_len) { | ||
719 | printk("ES len assertation failed for inode: %lu " | ||
720 | "retval %d != map->m_len %d " | ||
721 | "in %s (allocation)\n", inode->i_ino, retval, | ||
722 | map->m_len, __func__); | ||
723 | } | ||
724 | #endif | ||
725 | |||
726 | /* | ||
727 | * If the extent has been zeroed out, we don't need to update | ||
728 | * extent status tree. | ||
729 | */ | ||
730 | if ((flags & EXT4_GET_BLOCKS_PRE_IO) && | ||
731 | ext4_es_lookup_extent(inode, map->m_lblk, &es)) { | ||
732 | if (ext4_es_is_written(&es)) | ||
733 | goto has_zeroout; | ||
734 | } | ||
646 | status = map->m_flags & EXT4_MAP_UNWRITTEN ? | 735 | status = map->m_flags & EXT4_MAP_UNWRITTEN ? |
647 | EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; | 736 | EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; |
648 | if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) && | 737 | if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) && |
@@ -655,6 +744,7 @@ found: | |||
655 | retval = ret; | 744 | retval = ret; |
656 | } | 745 | } |
657 | 746 | ||
747 | has_zeroout: | ||
658 | up_write((&EXT4_I(inode)->i_data_sem)); | 748 | up_write((&EXT4_I(inode)->i_data_sem)); |
659 | if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { | 749 | if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { |
660 | int ret = check_block_validity(inode, map); | 750 | int ret = check_block_validity(inode, map); |
@@ -1216,6 +1306,55 @@ static int ext4_journalled_write_end(struct file *file, | |||
1216 | } | 1306 | } |
1217 | 1307 | ||
1218 | /* | 1308 | /* |
1309 | * Reserve a metadata for a single block located at lblock | ||
1310 | */ | ||
1311 | static int ext4_da_reserve_metadata(struct inode *inode, ext4_lblk_t lblock) | ||
1312 | { | ||
1313 | int retries = 0; | ||
1314 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); | ||
1315 | struct ext4_inode_info *ei = EXT4_I(inode); | ||
1316 | unsigned int md_needed; | ||
1317 | ext4_lblk_t save_last_lblock; | ||
1318 | int save_len; | ||
1319 | |||
1320 | /* | ||
1321 | * recalculate the amount of metadata blocks to reserve | ||
1322 | * in order to allocate nrblocks | ||
1323 | * worse case is one extent per block | ||
1324 | */ | ||
1325 | repeat: | ||
1326 | spin_lock(&ei->i_block_reservation_lock); | ||
1327 | /* | ||
1328 | * ext4_calc_metadata_amount() has side effects, which we have | ||
1329 | * to be prepared undo if we fail to claim space. | ||
1330 | */ | ||
1331 | save_len = ei->i_da_metadata_calc_len; | ||
1332 | save_last_lblock = ei->i_da_metadata_calc_last_lblock; | ||
1333 | md_needed = EXT4_NUM_B2C(sbi, | ||
1334 | ext4_calc_metadata_amount(inode, lblock)); | ||
1335 | trace_ext4_da_reserve_space(inode, md_needed); | ||
1336 | |||
1337 | /* | ||
1338 | * We do still charge estimated metadata to the sb though; | ||
1339 | * we cannot afford to run out of free blocks. | ||
1340 | */ | ||
1341 | if (ext4_claim_free_clusters(sbi, md_needed, 0)) { | ||
1342 | ei->i_da_metadata_calc_len = save_len; | ||
1343 | ei->i_da_metadata_calc_last_lblock = save_last_lblock; | ||
1344 | spin_unlock(&ei->i_block_reservation_lock); | ||
1345 | if (ext4_should_retry_alloc(inode->i_sb, &retries)) { | ||
1346 | cond_resched(); | ||
1347 | goto repeat; | ||
1348 | } | ||
1349 | return -ENOSPC; | ||
1350 | } | ||
1351 | ei->i_reserved_meta_blocks += md_needed; | ||
1352 | spin_unlock(&ei->i_block_reservation_lock); | ||
1353 | |||
1354 | return 0; /* success */ | ||
1355 | } | ||
1356 | |||
1357 | /* | ||
1219 | * Reserve a single cluster located at lblock | 1358 | * Reserve a single cluster located at lblock |
1220 | */ | 1359 | */ |
1221 | static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock) | 1360 | static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock) |
@@ -1263,7 +1402,7 @@ repeat: | |||
1263 | ei->i_da_metadata_calc_last_lblock = save_last_lblock; | 1402 | ei->i_da_metadata_calc_last_lblock = save_last_lblock; |
1264 | spin_unlock(&ei->i_block_reservation_lock); | 1403 | spin_unlock(&ei->i_block_reservation_lock); |
1265 | if (ext4_should_retry_alloc(inode->i_sb, &retries)) { | 1404 | if (ext4_should_retry_alloc(inode->i_sb, &retries)) { |
1266 | yield(); | 1405 | cond_resched(); |
1267 | goto repeat; | 1406 | goto repeat; |
1268 | } | 1407 | } |
1269 | dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1)); | 1408 | dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1)); |
@@ -1768,6 +1907,11 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock, | |||
1768 | struct extent_status es; | 1907 | struct extent_status es; |
1769 | int retval; | 1908 | int retval; |
1770 | sector_t invalid_block = ~((sector_t) 0xffff); | 1909 | sector_t invalid_block = ~((sector_t) 0xffff); |
1910 | #ifdef ES_AGGRESSIVE_TEST | ||
1911 | struct ext4_map_blocks orig_map; | ||
1912 | |||
1913 | memcpy(&orig_map, map, sizeof(*map)); | ||
1914 | #endif | ||
1771 | 1915 | ||
1772 | if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es)) | 1916 | if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es)) |
1773 | invalid_block = ~0; | 1917 | invalid_block = ~0; |
@@ -1809,6 +1953,9 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock, | |||
1809 | else | 1953 | else |
1810 | BUG_ON(1); | 1954 | BUG_ON(1); |
1811 | 1955 | ||
1956 | #ifdef ES_AGGRESSIVE_TEST | ||
1957 | ext4_map_blocks_es_recheck(NULL, inode, map, &orig_map, 0); | ||
1958 | #endif | ||
1812 | return retval; | 1959 | return retval; |
1813 | } | 1960 | } |
1814 | 1961 | ||
@@ -1843,8 +1990,11 @@ add_delayed: | |||
1843 | * XXX: __block_prepare_write() unmaps passed block, | 1990 | * XXX: __block_prepare_write() unmaps passed block, |
1844 | * is it OK? | 1991 | * is it OK? |
1845 | */ | 1992 | */ |
1846 | /* If the block was allocated from previously allocated cluster, | 1993 | /* |
1847 | * then we dont need to reserve it again. */ | 1994 | * If the block was allocated from previously allocated cluster, |
1995 | * then we don't need to reserve it again. However we still need | ||
1996 | * to reserve metadata for every block we're going to write. | ||
1997 | */ | ||
1848 | if (!(map->m_flags & EXT4_MAP_FROM_CLUSTER)) { | 1998 | if (!(map->m_flags & EXT4_MAP_FROM_CLUSTER)) { |
1849 | ret = ext4_da_reserve_space(inode, iblock); | 1999 | ret = ext4_da_reserve_space(inode, iblock); |
1850 | if (ret) { | 2000 | if (ret) { |
@@ -1852,6 +2002,13 @@ add_delayed: | |||
1852 | retval = ret; | 2002 | retval = ret; |
1853 | goto out_unlock; | 2003 | goto out_unlock; |
1854 | } | 2004 | } |
2005 | } else { | ||
2006 | ret = ext4_da_reserve_metadata(inode, iblock); | ||
2007 | if (ret) { | ||
2008 | /* not enough space to reserve */ | ||
2009 | retval = ret; | ||
2010 | goto out_unlock; | ||
2011 | } | ||
1855 | } | 2012 | } |
1856 | 2013 | ||
1857 | ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len, | 2014 | ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len, |
@@ -1873,6 +2030,15 @@ add_delayed: | |||
1873 | int ret; | 2030 | int ret; |
1874 | unsigned long long status; | 2031 | unsigned long long status; |
1875 | 2032 | ||
2033 | #ifdef ES_AGGRESSIVE_TEST | ||
2034 | if (retval != map->m_len) { | ||
2035 | printk("ES len assertation failed for inode: %lu " | ||
2036 | "retval %d != map->m_len %d " | ||
2037 | "in %s (lookup)\n", inode->i_ino, retval, | ||
2038 | map->m_len, __func__); | ||
2039 | } | ||
2040 | #endif | ||
2041 | |||
1876 | status = map->m_flags & EXT4_MAP_UNWRITTEN ? | 2042 | status = map->m_flags & EXT4_MAP_UNWRITTEN ? |
1877 | EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; | 2043 | EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; |
1878 | ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len, | 2044 | ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len, |
@@ -2908,8 +3074,8 @@ static int ext4_releasepage(struct page *page, gfp_t wait) | |||
2908 | 3074 | ||
2909 | trace_ext4_releasepage(page); | 3075 | trace_ext4_releasepage(page); |
2910 | 3076 | ||
2911 | WARN_ON(PageChecked(page)); | 3077 | /* Page has dirty journalled data -> cannot release */ |
2912 | if (!page_has_buffers(page)) | 3078 | if (PageChecked(page)) |
2913 | return 0; | 3079 | return 0; |
2914 | if (journal) | 3080 | if (journal) |
2915 | return jbd2_journal_try_to_free_buffers(journal, page, wait); | 3081 | return jbd2_journal_try_to_free_buffers(journal, page, wait); |
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 7bb713a46fe4..ee6614bdb639 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c | |||
@@ -2804,8 +2804,8 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, | |||
2804 | if (sbi->s_log_groups_per_flex) { | 2804 | if (sbi->s_log_groups_per_flex) { |
2805 | ext4_group_t flex_group = ext4_flex_group(sbi, | 2805 | ext4_group_t flex_group = ext4_flex_group(sbi, |
2806 | ac->ac_b_ex.fe_group); | 2806 | ac->ac_b_ex.fe_group); |
2807 | atomic_sub(ac->ac_b_ex.fe_len, | 2807 | atomic64_sub(ac->ac_b_ex.fe_len, |
2808 | &sbi->s_flex_groups[flex_group].free_clusters); | 2808 | &sbi->s_flex_groups[flex_group].free_clusters); |
2809 | } | 2809 | } |
2810 | 2810 | ||
2811 | err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); | 2811 | err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); |
@@ -3692,11 +3692,7 @@ repeat: | |||
3692 | if (free < needed && busy) { | 3692 | if (free < needed && busy) { |
3693 | busy = 0; | 3693 | busy = 0; |
3694 | ext4_unlock_group(sb, group); | 3694 | ext4_unlock_group(sb, group); |
3695 | /* | 3695 | cond_resched(); |
3696 | * Yield the CPU here so that we don't get soft lockup | ||
3697 | * in non preempt case. | ||
3698 | */ | ||
3699 | yield(); | ||
3700 | goto repeat; | 3696 | goto repeat; |
3701 | } | 3697 | } |
3702 | 3698 | ||
@@ -4246,7 +4242,7 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, | |||
4246 | ext4_claim_free_clusters(sbi, ar->len, ar->flags)) { | 4242 | ext4_claim_free_clusters(sbi, ar->len, ar->flags)) { |
4247 | 4243 | ||
4248 | /* let others to free the space */ | 4244 | /* let others to free the space */ |
4249 | yield(); | 4245 | cond_resched(); |
4250 | ar->len = ar->len >> 1; | 4246 | ar->len = ar->len >> 1; |
4251 | } | 4247 | } |
4252 | if (!ar->len) { | 4248 | if (!ar->len) { |
@@ -4464,7 +4460,6 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode, | |||
4464 | struct buffer_head *bitmap_bh = NULL; | 4460 | struct buffer_head *bitmap_bh = NULL; |
4465 | struct super_block *sb = inode->i_sb; | 4461 | struct super_block *sb = inode->i_sb; |
4466 | struct ext4_group_desc *gdp; | 4462 | struct ext4_group_desc *gdp; |
4467 | unsigned long freed = 0; | ||
4468 | unsigned int overflow; | 4463 | unsigned int overflow; |
4469 | ext4_grpblk_t bit; | 4464 | ext4_grpblk_t bit; |
4470 | struct buffer_head *gd_bh; | 4465 | struct buffer_head *gd_bh; |
@@ -4666,14 +4661,12 @@ do_more: | |||
4666 | 4661 | ||
4667 | if (sbi->s_log_groups_per_flex) { | 4662 | if (sbi->s_log_groups_per_flex) { |
4668 | ext4_group_t flex_group = ext4_flex_group(sbi, block_group); | 4663 | ext4_group_t flex_group = ext4_flex_group(sbi, block_group); |
4669 | atomic_add(count_clusters, | 4664 | atomic64_add(count_clusters, |
4670 | &sbi->s_flex_groups[flex_group].free_clusters); | 4665 | &sbi->s_flex_groups[flex_group].free_clusters); |
4671 | } | 4666 | } |
4672 | 4667 | ||
4673 | ext4_mb_unload_buddy(&e4b); | 4668 | ext4_mb_unload_buddy(&e4b); |
4674 | 4669 | ||
4675 | freed += count; | ||
4676 | |||
4677 | if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE)) | 4670 | if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE)) |
4678 | dquot_free_block(inode, EXT4_C2B(sbi, count_clusters)); | 4671 | dquot_free_block(inode, EXT4_C2B(sbi, count_clusters)); |
4679 | 4672 | ||
@@ -4811,8 +4804,8 @@ int ext4_group_add_blocks(handle_t *handle, struct super_block *sb, | |||
4811 | 4804 | ||
4812 | if (sbi->s_log_groups_per_flex) { | 4805 | if (sbi->s_log_groups_per_flex) { |
4813 | ext4_group_t flex_group = ext4_flex_group(sbi, block_group); | 4806 | ext4_group_t flex_group = ext4_flex_group(sbi, block_group); |
4814 | atomic_add(EXT4_NUM_B2C(sbi, blocks_freed), | 4807 | atomic64_add(EXT4_NUM_B2C(sbi, blocks_freed), |
4815 | &sbi->s_flex_groups[flex_group].free_clusters); | 4808 | &sbi->s_flex_groups[flex_group].free_clusters); |
4816 | } | 4809 | } |
4817 | 4810 | ||
4818 | ext4_mb_unload_buddy(&e4b); | 4811 | ext4_mb_unload_buddy(&e4b); |
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c index 4e81d47aa8cb..33e1c086858b 100644 --- a/fs/ext4/move_extent.c +++ b/fs/ext4/move_extent.c | |||
@@ -32,16 +32,18 @@ | |||
32 | */ | 32 | */ |
33 | static inline int | 33 | static inline int |
34 | get_ext_path(struct inode *inode, ext4_lblk_t lblock, | 34 | get_ext_path(struct inode *inode, ext4_lblk_t lblock, |
35 | struct ext4_ext_path **path) | 35 | struct ext4_ext_path **orig_path) |
36 | { | 36 | { |
37 | int ret = 0; | 37 | int ret = 0; |
38 | struct ext4_ext_path *path; | ||
38 | 39 | ||
39 | *path = ext4_ext_find_extent(inode, lblock, *path); | 40 | path = ext4_ext_find_extent(inode, lblock, *orig_path); |
40 | if (IS_ERR(*path)) { | 41 | if (IS_ERR(path)) |
41 | ret = PTR_ERR(*path); | 42 | ret = PTR_ERR(path); |
42 | *path = NULL; | 43 | else if (path[ext_depth(inode)].p_ext == NULL) |
43 | } else if ((*path)[ext_depth(inode)].p_ext == NULL) | ||
44 | ret = -ENODATA; | 44 | ret = -ENODATA; |
45 | else | ||
46 | *orig_path = path; | ||
45 | 47 | ||
46 | return ret; | 48 | return ret; |
47 | } | 49 | } |
@@ -611,24 +613,25 @@ mext_check_coverage(struct inode *inode, ext4_lblk_t from, ext4_lblk_t count, | |||
611 | { | 613 | { |
612 | struct ext4_ext_path *path = NULL; | 614 | struct ext4_ext_path *path = NULL; |
613 | struct ext4_extent *ext; | 615 | struct ext4_extent *ext; |
616 | int ret = 0; | ||
614 | ext4_lblk_t last = from + count; | 617 | ext4_lblk_t last = from + count; |
615 | while (from < last) { | 618 | while (from < last) { |
616 | *err = get_ext_path(inode, from, &path); | 619 | *err = get_ext_path(inode, from, &path); |
617 | if (*err) | 620 | if (*err) |
618 | return 0; | 621 | goto out; |
619 | ext = path[ext_depth(inode)].p_ext; | 622 | ext = path[ext_depth(inode)].p_ext; |
620 | if (!ext) { | 623 | if (uninit != ext4_ext_is_uninitialized(ext)) |
621 | ext4_ext_drop_refs(path); | 624 | goto out; |
622 | return 0; | ||
623 | } | ||
624 | if (uninit != ext4_ext_is_uninitialized(ext)) { | ||
625 | ext4_ext_drop_refs(path); | ||
626 | return 0; | ||
627 | } | ||
628 | from += ext4_ext_get_actual_len(ext); | 625 | from += ext4_ext_get_actual_len(ext); |
629 | ext4_ext_drop_refs(path); | 626 | ext4_ext_drop_refs(path); |
630 | } | 627 | } |
631 | return 1; | 628 | ret = 1; |
629 | out: | ||
630 | if (path) { | ||
631 | ext4_ext_drop_refs(path); | ||
632 | kfree(path); | ||
633 | } | ||
634 | return ret; | ||
632 | } | 635 | } |
633 | 636 | ||
634 | /** | 637 | /** |
@@ -666,6 +669,14 @@ mext_replace_branches(handle_t *handle, struct inode *orig_inode, | |||
666 | int replaced_count = 0; | 669 | int replaced_count = 0; |
667 | int dext_alen; | 670 | int dext_alen; |
668 | 671 | ||
672 | *err = ext4_es_remove_extent(orig_inode, from, count); | ||
673 | if (*err) | ||
674 | goto out; | ||
675 | |||
676 | *err = ext4_es_remove_extent(donor_inode, from, count); | ||
677 | if (*err) | ||
678 | goto out; | ||
679 | |||
669 | /* Get the original extent for the block "orig_off" */ | 680 | /* Get the original extent for the block "orig_off" */ |
670 | *err = get_ext_path(orig_inode, orig_off, &orig_path); | 681 | *err = get_ext_path(orig_inode, orig_off, &orig_path); |
671 | if (*err) | 682 | if (*err) |
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c index 809b31003ecc..047a6de04a0a 100644 --- a/fs/ext4/page-io.c +++ b/fs/ext4/page-io.c | |||
@@ -50,11 +50,21 @@ void ext4_exit_pageio(void) | |||
50 | kmem_cache_destroy(io_page_cachep); | 50 | kmem_cache_destroy(io_page_cachep); |
51 | } | 51 | } |
52 | 52 | ||
53 | void ext4_ioend_wait(struct inode *inode) | 53 | /* |
54 | * This function is called by ext4_evict_inode() to make sure there is | ||
55 | * no more pending I/O completion work left to do. | ||
56 | */ | ||
57 | void ext4_ioend_shutdown(struct inode *inode) | ||
54 | { | 58 | { |
55 | wait_queue_head_t *wq = ext4_ioend_wq(inode); | 59 | wait_queue_head_t *wq = ext4_ioend_wq(inode); |
56 | 60 | ||
57 | wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_ioend_count) == 0)); | 61 | wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_ioend_count) == 0)); |
62 | /* | ||
63 | * We need to make sure the work structure is finished being | ||
64 | * used before we let the inode get destroyed. | ||
65 | */ | ||
66 | if (work_pending(&EXT4_I(inode)->i_unwritten_work)) | ||
67 | cancel_work_sync(&EXT4_I(inode)->i_unwritten_work); | ||
58 | } | 68 | } |
59 | 69 | ||
60 | static void put_io_page(struct ext4_io_page *io_page) | 70 | static void put_io_page(struct ext4_io_page *io_page) |
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c index b2c8ee56eb98..c169477a62c9 100644 --- a/fs/ext4/resize.c +++ b/fs/ext4/resize.c | |||
@@ -1360,8 +1360,8 @@ static void ext4_update_super(struct super_block *sb, | |||
1360 | sbi->s_log_groups_per_flex) { | 1360 | sbi->s_log_groups_per_flex) { |
1361 | ext4_group_t flex_group; | 1361 | ext4_group_t flex_group; |
1362 | flex_group = ext4_flex_group(sbi, group_data[0].group); | 1362 | flex_group = ext4_flex_group(sbi, group_data[0].group); |
1363 | atomic_add(EXT4_NUM_B2C(sbi, free_blocks), | 1363 | atomic64_add(EXT4_NUM_B2C(sbi, free_blocks), |
1364 | &sbi->s_flex_groups[flex_group].free_clusters); | 1364 | &sbi->s_flex_groups[flex_group].free_clusters); |
1365 | atomic_add(EXT4_INODES_PER_GROUP(sb) * flex_gd->count, | 1365 | atomic_add(EXT4_INODES_PER_GROUP(sb) * flex_gd->count, |
1366 | &sbi->s_flex_groups[flex_group].free_inodes); | 1366 | &sbi->s_flex_groups[flex_group].free_inodes); |
1367 | } | 1367 | } |
diff --git a/fs/ext4/super.c b/fs/ext4/super.c index b3818b48f418..5d6d53578124 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c | |||
@@ -1927,8 +1927,8 @@ static int ext4_fill_flex_info(struct super_block *sb) | |||
1927 | flex_group = ext4_flex_group(sbi, i); | 1927 | flex_group = ext4_flex_group(sbi, i); |
1928 | atomic_add(ext4_free_inodes_count(sb, gdp), | 1928 | atomic_add(ext4_free_inodes_count(sb, gdp), |
1929 | &sbi->s_flex_groups[flex_group].free_inodes); | 1929 | &sbi->s_flex_groups[flex_group].free_inodes); |
1930 | atomic_add(ext4_free_group_clusters(sb, gdp), | 1930 | atomic64_add(ext4_free_group_clusters(sb, gdp), |
1931 | &sbi->s_flex_groups[flex_group].free_clusters); | 1931 | &sbi->s_flex_groups[flex_group].free_clusters); |
1932 | atomic_add(ext4_used_dirs_count(sb, gdp), | 1932 | atomic_add(ext4_used_dirs_count(sb, gdp), |
1933 | &sbi->s_flex_groups[flex_group].used_dirs); | 1933 | &sbi->s_flex_groups[flex_group].used_dirs); |
1934 | } | 1934 | } |
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c index 019f45e45097..d79c2dadc536 100644 --- a/fs/gfs2/file.c +++ b/fs/gfs2/file.c | |||
@@ -923,8 +923,11 @@ static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl) | |||
923 | cmd = F_SETLK; | 923 | cmd = F_SETLK; |
924 | fl->fl_type = F_UNLCK; | 924 | fl->fl_type = F_UNLCK; |
925 | } | 925 | } |
926 | if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) | 926 | if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) { |
927 | if (fl->fl_type == F_UNLCK) | ||
928 | posix_lock_file_wait(file, fl); | ||
927 | return -EIO; | 929 | return -EIO; |
930 | } | ||
928 | if (IS_GETLK(cmd)) | 931 | if (IS_GETLK(cmd)) |
929 | return dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl); | 932 | return dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl); |
930 | else if (fl->fl_type == F_UNLCK) | 933 | else if (fl->fl_type == F_UNLCK) |
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h index 156e42ec84ea..5c29216e9cc1 100644 --- a/fs/gfs2/incore.h +++ b/fs/gfs2/incore.h | |||
@@ -588,6 +588,7 @@ struct lm_lockstruct { | |||
588 | struct dlm_lksb ls_control_lksb; /* control_lock */ | 588 | struct dlm_lksb ls_control_lksb; /* control_lock */ |
589 | char ls_control_lvb[GDLM_LVB_SIZE]; /* control_lock lvb */ | 589 | char ls_control_lvb[GDLM_LVB_SIZE]; /* control_lock lvb */ |
590 | struct completion ls_sync_wait; /* {control,mounted}_{lock,unlock} */ | 590 | struct completion ls_sync_wait; /* {control,mounted}_{lock,unlock} */ |
591 | char *ls_lvb_bits; | ||
591 | 592 | ||
592 | spinlock_t ls_recover_spin; /* protects following fields */ | 593 | spinlock_t ls_recover_spin; /* protects following fields */ |
593 | unsigned long ls_recover_flags; /* DFL_ */ | 594 | unsigned long ls_recover_flags; /* DFL_ */ |
diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c index 9802de0f85e6..c8423d6de6c3 100644 --- a/fs/gfs2/lock_dlm.c +++ b/fs/gfs2/lock_dlm.c | |||
@@ -483,12 +483,8 @@ static void control_lvb_write(struct lm_lockstruct *ls, uint32_t lvb_gen, | |||
483 | 483 | ||
484 | static int all_jid_bits_clear(char *lvb) | 484 | static int all_jid_bits_clear(char *lvb) |
485 | { | 485 | { |
486 | int i; | 486 | return !memchr_inv(lvb + JID_BITMAP_OFFSET, 0, |
487 | for (i = JID_BITMAP_OFFSET; i < GDLM_LVB_SIZE; i++) { | 487 | GDLM_LVB_SIZE - JID_BITMAP_OFFSET); |
488 | if (lvb[i]) | ||
489 | return 0; | ||
490 | } | ||
491 | return 1; | ||
492 | } | 488 | } |
493 | 489 | ||
494 | static void sync_wait_cb(void *arg) | 490 | static void sync_wait_cb(void *arg) |
@@ -580,7 +576,6 @@ static void gfs2_control_func(struct work_struct *work) | |||
580 | { | 576 | { |
581 | struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_control_work.work); | 577 | struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_control_work.work); |
582 | struct lm_lockstruct *ls = &sdp->sd_lockstruct; | 578 | struct lm_lockstruct *ls = &sdp->sd_lockstruct; |
583 | char lvb_bits[GDLM_LVB_SIZE]; | ||
584 | uint32_t block_gen, start_gen, lvb_gen, flags; | 579 | uint32_t block_gen, start_gen, lvb_gen, flags; |
585 | int recover_set = 0; | 580 | int recover_set = 0; |
586 | int write_lvb = 0; | 581 | int write_lvb = 0; |
@@ -634,7 +629,7 @@ static void gfs2_control_func(struct work_struct *work) | |||
634 | return; | 629 | return; |
635 | } | 630 | } |
636 | 631 | ||
637 | control_lvb_read(ls, &lvb_gen, lvb_bits); | 632 | control_lvb_read(ls, &lvb_gen, ls->ls_lvb_bits); |
638 | 633 | ||
639 | spin_lock(&ls->ls_recover_spin); | 634 | spin_lock(&ls->ls_recover_spin); |
640 | if (block_gen != ls->ls_recover_block || | 635 | if (block_gen != ls->ls_recover_block || |
@@ -664,10 +659,10 @@ static void gfs2_control_func(struct work_struct *work) | |||
664 | 659 | ||
665 | ls->ls_recover_result[i] = 0; | 660 | ls->ls_recover_result[i] = 0; |
666 | 661 | ||
667 | if (!test_bit_le(i, lvb_bits + JID_BITMAP_OFFSET)) | 662 | if (!test_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET)) |
668 | continue; | 663 | continue; |
669 | 664 | ||
670 | __clear_bit_le(i, lvb_bits + JID_BITMAP_OFFSET); | 665 | __clear_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET); |
671 | write_lvb = 1; | 666 | write_lvb = 1; |
672 | } | 667 | } |
673 | } | 668 | } |
@@ -691,7 +686,7 @@ static void gfs2_control_func(struct work_struct *work) | |||
691 | continue; | 686 | continue; |
692 | if (ls->ls_recover_submit[i] < start_gen) { | 687 | if (ls->ls_recover_submit[i] < start_gen) { |
693 | ls->ls_recover_submit[i] = 0; | 688 | ls->ls_recover_submit[i] = 0; |
694 | __set_bit_le(i, lvb_bits + JID_BITMAP_OFFSET); | 689 | __set_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET); |
695 | } | 690 | } |
696 | } | 691 | } |
697 | /* even if there are no bits to set, we need to write the | 692 | /* even if there are no bits to set, we need to write the |
@@ -705,7 +700,7 @@ static void gfs2_control_func(struct work_struct *work) | |||
705 | spin_unlock(&ls->ls_recover_spin); | 700 | spin_unlock(&ls->ls_recover_spin); |
706 | 701 | ||
707 | if (write_lvb) { | 702 | if (write_lvb) { |
708 | control_lvb_write(ls, start_gen, lvb_bits); | 703 | control_lvb_write(ls, start_gen, ls->ls_lvb_bits); |
709 | flags = DLM_LKF_CONVERT | DLM_LKF_VALBLK; | 704 | flags = DLM_LKF_CONVERT | DLM_LKF_VALBLK; |
710 | } else { | 705 | } else { |
711 | flags = DLM_LKF_CONVERT; | 706 | flags = DLM_LKF_CONVERT; |
@@ -725,7 +720,7 @@ static void gfs2_control_func(struct work_struct *work) | |||
725 | */ | 720 | */ |
726 | 721 | ||
727 | for (i = 0; i < recover_size; i++) { | 722 | for (i = 0; i < recover_size; i++) { |
728 | if (test_bit_le(i, lvb_bits + JID_BITMAP_OFFSET)) { | 723 | if (test_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET)) { |
729 | fs_info(sdp, "recover generation %u jid %d\n", | 724 | fs_info(sdp, "recover generation %u jid %d\n", |
730 | start_gen, i); | 725 | start_gen, i); |
731 | gfs2_recover_set(sdp, i); | 726 | gfs2_recover_set(sdp, i); |
@@ -758,7 +753,6 @@ static void gfs2_control_func(struct work_struct *work) | |||
758 | static int control_mount(struct gfs2_sbd *sdp) | 753 | static int control_mount(struct gfs2_sbd *sdp) |
759 | { | 754 | { |
760 | struct lm_lockstruct *ls = &sdp->sd_lockstruct; | 755 | struct lm_lockstruct *ls = &sdp->sd_lockstruct; |
761 | char lvb_bits[GDLM_LVB_SIZE]; | ||
762 | uint32_t start_gen, block_gen, mount_gen, lvb_gen; | 756 | uint32_t start_gen, block_gen, mount_gen, lvb_gen; |
763 | int mounted_mode; | 757 | int mounted_mode; |
764 | int retries = 0; | 758 | int retries = 0; |
@@ -857,7 +851,7 @@ locks_done: | |||
857 | * lvb_gen will be non-zero. | 851 | * lvb_gen will be non-zero. |
858 | */ | 852 | */ |
859 | 853 | ||
860 | control_lvb_read(ls, &lvb_gen, lvb_bits); | 854 | control_lvb_read(ls, &lvb_gen, ls->ls_lvb_bits); |
861 | 855 | ||
862 | if (lvb_gen == 0xFFFFFFFF) { | 856 | if (lvb_gen == 0xFFFFFFFF) { |
863 | /* special value to force mount attempts to fail */ | 857 | /* special value to force mount attempts to fail */ |
@@ -887,7 +881,7 @@ locks_done: | |||
887 | * and all lvb bits to be clear (no pending journal recoveries.) | 881 | * and all lvb bits to be clear (no pending journal recoveries.) |
888 | */ | 882 | */ |
889 | 883 | ||
890 | if (!all_jid_bits_clear(lvb_bits)) { | 884 | if (!all_jid_bits_clear(ls->ls_lvb_bits)) { |
891 | /* journals need recovery, wait until all are clear */ | 885 | /* journals need recovery, wait until all are clear */ |
892 | fs_info(sdp, "control_mount wait for journal recovery\n"); | 886 | fs_info(sdp, "control_mount wait for journal recovery\n"); |
893 | goto restart; | 887 | goto restart; |
@@ -949,7 +943,6 @@ static int dlm_recovery_wait(void *word) | |||
949 | static int control_first_done(struct gfs2_sbd *sdp) | 943 | static int control_first_done(struct gfs2_sbd *sdp) |
950 | { | 944 | { |
951 | struct lm_lockstruct *ls = &sdp->sd_lockstruct; | 945 | struct lm_lockstruct *ls = &sdp->sd_lockstruct; |
952 | char lvb_bits[GDLM_LVB_SIZE]; | ||
953 | uint32_t start_gen, block_gen; | 946 | uint32_t start_gen, block_gen; |
954 | int error; | 947 | int error; |
955 | 948 | ||
@@ -991,8 +984,8 @@ restart: | |||
991 | memset(ls->ls_recover_result, 0, ls->ls_recover_size*sizeof(uint32_t)); | 984 | memset(ls->ls_recover_result, 0, ls->ls_recover_size*sizeof(uint32_t)); |
992 | spin_unlock(&ls->ls_recover_spin); | 985 | spin_unlock(&ls->ls_recover_spin); |
993 | 986 | ||
994 | memset(lvb_bits, 0, sizeof(lvb_bits)); | 987 | memset(ls->ls_lvb_bits, 0, GDLM_LVB_SIZE); |
995 | control_lvb_write(ls, start_gen, lvb_bits); | 988 | control_lvb_write(ls, start_gen, ls->ls_lvb_bits); |
996 | 989 | ||
997 | error = mounted_lock(sdp, DLM_LOCK_PR, DLM_LKF_CONVERT); | 990 | error = mounted_lock(sdp, DLM_LOCK_PR, DLM_LKF_CONVERT); |
998 | if (error) | 991 | if (error) |
@@ -1022,6 +1015,12 @@ static int set_recover_size(struct gfs2_sbd *sdp, struct dlm_slot *slots, | |||
1022 | uint32_t old_size, new_size; | 1015 | uint32_t old_size, new_size; |
1023 | int i, max_jid; | 1016 | int i, max_jid; |
1024 | 1017 | ||
1018 | if (!ls->ls_lvb_bits) { | ||
1019 | ls->ls_lvb_bits = kzalloc(GDLM_LVB_SIZE, GFP_NOFS); | ||
1020 | if (!ls->ls_lvb_bits) | ||
1021 | return -ENOMEM; | ||
1022 | } | ||
1023 | |||
1025 | max_jid = 0; | 1024 | max_jid = 0; |
1026 | for (i = 0; i < num_slots; i++) { | 1025 | for (i = 0; i < num_slots; i++) { |
1027 | if (max_jid < slots[i].slot - 1) | 1026 | if (max_jid < slots[i].slot - 1) |
@@ -1057,6 +1056,7 @@ static int set_recover_size(struct gfs2_sbd *sdp, struct dlm_slot *slots, | |||
1057 | 1056 | ||
1058 | static void free_recover_size(struct lm_lockstruct *ls) | 1057 | static void free_recover_size(struct lm_lockstruct *ls) |
1059 | { | 1058 | { |
1059 | kfree(ls->ls_lvb_bits); | ||
1060 | kfree(ls->ls_recover_submit); | 1060 | kfree(ls->ls_recover_submit); |
1061 | kfree(ls->ls_recover_result); | 1061 | kfree(ls->ls_recover_result); |
1062 | ls->ls_recover_submit = NULL; | 1062 | ls->ls_recover_submit = NULL; |
@@ -1205,6 +1205,7 @@ static int gdlm_mount(struct gfs2_sbd *sdp, const char *table) | |||
1205 | ls->ls_recover_size = 0; | 1205 | ls->ls_recover_size = 0; |
1206 | ls->ls_recover_submit = NULL; | 1206 | ls->ls_recover_submit = NULL; |
1207 | ls->ls_recover_result = NULL; | 1207 | ls->ls_recover_result = NULL; |
1208 | ls->ls_lvb_bits = NULL; | ||
1208 | 1209 | ||
1209 | error = set_recover_size(sdp, NULL, 0); | 1210 | error = set_recover_size(sdp, NULL, 0); |
1210 | if (error) | 1211 | if (error) |
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c index d1f51fd73f86..5a51265a4341 100644 --- a/fs/gfs2/rgrp.c +++ b/fs/gfs2/rgrp.c | |||
@@ -576,7 +576,7 @@ int gfs2_rs_alloc(struct gfs2_inode *ip) | |||
576 | RB_CLEAR_NODE(&ip->i_res->rs_node); | 576 | RB_CLEAR_NODE(&ip->i_res->rs_node); |
577 | out: | 577 | out: |
578 | up_write(&ip->i_rw_mutex); | 578 | up_write(&ip->i_rw_mutex); |
579 | return 0; | 579 | return error; |
580 | } | 580 | } |
581 | 581 | ||
582 | static void dump_rs(struct seq_file *seq, const struct gfs2_blkreserv *rs) | 582 | static void dump_rs(struct seq_file *seq, const struct gfs2_blkreserv *rs) |
@@ -1181,12 +1181,9 @@ int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset, | |||
1181 | const struct gfs2_bitmap *bi, unsigned minlen, u64 *ptrimmed) | 1181 | const struct gfs2_bitmap *bi, unsigned minlen, u64 *ptrimmed) |
1182 | { | 1182 | { |
1183 | struct super_block *sb = sdp->sd_vfs; | 1183 | struct super_block *sb = sdp->sd_vfs; |
1184 | struct block_device *bdev = sb->s_bdev; | ||
1185 | const unsigned int sects_per_blk = sdp->sd_sb.sb_bsize / | ||
1186 | bdev_logical_block_size(sb->s_bdev); | ||
1187 | u64 blk; | 1184 | u64 blk; |
1188 | sector_t start = 0; | 1185 | sector_t start = 0; |
1189 | sector_t nr_sects = 0; | 1186 | sector_t nr_blks = 0; |
1190 | int rv; | 1187 | int rv; |
1191 | unsigned int x; | 1188 | unsigned int x; |
1192 | u32 trimmed = 0; | 1189 | u32 trimmed = 0; |
@@ -1206,35 +1203,34 @@ int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset, | |||
1206 | if (diff == 0) | 1203 | if (diff == 0) |
1207 | continue; | 1204 | continue; |
1208 | blk = offset + ((bi->bi_start + x) * GFS2_NBBY); | 1205 | blk = offset + ((bi->bi_start + x) * GFS2_NBBY); |
1209 | blk *= sects_per_blk; /* convert to sectors */ | ||
1210 | while(diff) { | 1206 | while(diff) { |
1211 | if (diff & 1) { | 1207 | if (diff & 1) { |
1212 | if (nr_sects == 0) | 1208 | if (nr_blks == 0) |
1213 | goto start_new_extent; | 1209 | goto start_new_extent; |
1214 | if ((start + nr_sects) != blk) { | 1210 | if ((start + nr_blks) != blk) { |
1215 | if (nr_sects >= minlen) { | 1211 | if (nr_blks >= minlen) { |
1216 | rv = blkdev_issue_discard(bdev, | 1212 | rv = sb_issue_discard(sb, |
1217 | start, nr_sects, | 1213 | start, nr_blks, |
1218 | GFP_NOFS, 0); | 1214 | GFP_NOFS, 0); |
1219 | if (rv) | 1215 | if (rv) |
1220 | goto fail; | 1216 | goto fail; |
1221 | trimmed += nr_sects; | 1217 | trimmed += nr_blks; |
1222 | } | 1218 | } |
1223 | nr_sects = 0; | 1219 | nr_blks = 0; |
1224 | start_new_extent: | 1220 | start_new_extent: |
1225 | start = blk; | 1221 | start = blk; |
1226 | } | 1222 | } |
1227 | nr_sects += sects_per_blk; | 1223 | nr_blks++; |
1228 | } | 1224 | } |
1229 | diff >>= 2; | 1225 | diff >>= 2; |
1230 | blk += sects_per_blk; | 1226 | blk++; |
1231 | } | 1227 | } |
1232 | } | 1228 | } |
1233 | if (nr_sects >= minlen) { | 1229 | if (nr_blks >= minlen) { |
1234 | rv = blkdev_issue_discard(bdev, start, nr_sects, GFP_NOFS, 0); | 1230 | rv = sb_issue_discard(sb, start, nr_blks, GFP_NOFS, 0); |
1235 | if (rv) | 1231 | if (rv) |
1236 | goto fail; | 1232 | goto fail; |
1237 | trimmed += nr_sects; | 1233 | trimmed += nr_blks; |
1238 | } | 1234 | } |
1239 | if (ptrimmed) | 1235 | if (ptrimmed) |
1240 | *ptrimmed = trimmed; | 1236 | *ptrimmed = trimmed; |
diff --git a/fs/internal.h b/fs/internal.h index 507141fceb99..4be78237d896 100644 --- a/fs/internal.h +++ b/fs/internal.h | |||
@@ -125,3 +125,8 @@ extern int invalidate_inodes(struct super_block *, bool); | |||
125 | * dcache.c | 125 | * dcache.c |
126 | */ | 126 | */ |
127 | extern struct dentry *__d_alloc(struct super_block *, const struct qstr *); | 127 | extern struct dentry *__d_alloc(struct super_block *, const struct qstr *); |
128 | |||
129 | /* | ||
130 | * read_write.c | ||
131 | */ | ||
132 | extern ssize_t __kernel_write(struct file *, const char *, size_t, loff_t *); | ||
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index d6ee5aed56b1..325bc019ed88 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c | |||
@@ -1065,9 +1065,12 @@ out: | |||
1065 | void jbd2_journal_set_triggers(struct buffer_head *bh, | 1065 | void jbd2_journal_set_triggers(struct buffer_head *bh, |
1066 | struct jbd2_buffer_trigger_type *type) | 1066 | struct jbd2_buffer_trigger_type *type) |
1067 | { | 1067 | { |
1068 | struct journal_head *jh = bh2jh(bh); | 1068 | struct journal_head *jh = jbd2_journal_grab_journal_head(bh); |
1069 | 1069 | ||
1070 | if (WARN_ON(!jh)) | ||
1071 | return; | ||
1070 | jh->b_triggers = type; | 1072 | jh->b_triggers = type; |
1073 | jbd2_journal_put_journal_head(jh); | ||
1071 | } | 1074 | } |
1072 | 1075 | ||
1073 | void jbd2_buffer_frozen_trigger(struct journal_head *jh, void *mapped_data, | 1076 | void jbd2_buffer_frozen_trigger(struct journal_head *jh, void *mapped_data, |
@@ -1119,17 +1122,18 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh) | |||
1119 | { | 1122 | { |
1120 | transaction_t *transaction = handle->h_transaction; | 1123 | transaction_t *transaction = handle->h_transaction; |
1121 | journal_t *journal = transaction->t_journal; | 1124 | journal_t *journal = transaction->t_journal; |
1122 | struct journal_head *jh = bh2jh(bh); | 1125 | struct journal_head *jh; |
1123 | int ret = 0; | 1126 | int ret = 0; |
1124 | 1127 | ||
1125 | jbd_debug(5, "journal_head %p\n", jh); | ||
1126 | JBUFFER_TRACE(jh, "entry"); | ||
1127 | if (is_handle_aborted(handle)) | 1128 | if (is_handle_aborted(handle)) |
1128 | goto out; | 1129 | goto out; |
1129 | if (!buffer_jbd(bh)) { | 1130 | jh = jbd2_journal_grab_journal_head(bh); |
1131 | if (!jh) { | ||
1130 | ret = -EUCLEAN; | 1132 | ret = -EUCLEAN; |
1131 | goto out; | 1133 | goto out; |
1132 | } | 1134 | } |
1135 | jbd_debug(5, "journal_head %p\n", jh); | ||
1136 | JBUFFER_TRACE(jh, "entry"); | ||
1133 | 1137 | ||
1134 | jbd_lock_bh_state(bh); | 1138 | jbd_lock_bh_state(bh); |
1135 | 1139 | ||
@@ -1220,6 +1224,7 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh) | |||
1220 | spin_unlock(&journal->j_list_lock); | 1224 | spin_unlock(&journal->j_list_lock); |
1221 | out_unlock_bh: | 1225 | out_unlock_bh: |
1222 | jbd_unlock_bh_state(bh); | 1226 | jbd_unlock_bh_state(bh); |
1227 | jbd2_journal_put_journal_head(jh); | ||
1223 | out: | 1228 | out: |
1224 | JBUFFER_TRACE(jh, "exit"); | 1229 | JBUFFER_TRACE(jh, "exit"); |
1225 | WARN_ON(ret); /* All errors are bugs, so dump the stack */ | 1230 | WARN_ON(ret); /* All errors are bugs, so dump the stack */ |
diff --git a/fs/namespace.c b/fs/namespace.c index 50ca17d3cb45..341d3f564082 100644 --- a/fs/namespace.c +++ b/fs/namespace.c | |||
@@ -798,6 +798,10 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root, | |||
798 | } | 798 | } |
799 | 799 | ||
800 | mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~MNT_WRITE_HOLD; | 800 | mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~MNT_WRITE_HOLD; |
801 | /* Don't allow unprivileged users to change mount flags */ | ||
802 | if ((flag & CL_UNPRIVILEGED) && (mnt->mnt.mnt_flags & MNT_READONLY)) | ||
803 | mnt->mnt.mnt_flags |= MNT_LOCK_READONLY; | ||
804 | |||
801 | atomic_inc(&sb->s_active); | 805 | atomic_inc(&sb->s_active); |
802 | mnt->mnt.mnt_sb = sb; | 806 | mnt->mnt.mnt_sb = sb; |
803 | mnt->mnt.mnt_root = dget(root); | 807 | mnt->mnt.mnt_root = dget(root); |
@@ -1686,7 +1690,7 @@ static int do_loopback(struct path *path, const char *old_name, | |||
1686 | 1690 | ||
1687 | if (IS_ERR(mnt)) { | 1691 | if (IS_ERR(mnt)) { |
1688 | err = PTR_ERR(mnt); | 1692 | err = PTR_ERR(mnt); |
1689 | goto out; | 1693 | goto out2; |
1690 | } | 1694 | } |
1691 | 1695 | ||
1692 | err = graft_tree(mnt, path); | 1696 | err = graft_tree(mnt, path); |
@@ -1713,6 +1717,9 @@ static int change_mount_flags(struct vfsmount *mnt, int ms_flags) | |||
1713 | if (readonly_request == __mnt_is_readonly(mnt)) | 1717 | if (readonly_request == __mnt_is_readonly(mnt)) |
1714 | return 0; | 1718 | return 0; |
1715 | 1719 | ||
1720 | if (mnt->mnt_flags & MNT_LOCK_READONLY) | ||
1721 | return -EPERM; | ||
1722 | |||
1716 | if (readonly_request) | 1723 | if (readonly_request) |
1717 | error = mnt_make_readonly(real_mount(mnt)); | 1724 | error = mnt_make_readonly(real_mount(mnt)); |
1718 | else | 1725 | else |
@@ -2339,7 +2346,7 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns, | |||
2339 | /* First pass: copy the tree topology */ | 2346 | /* First pass: copy the tree topology */ |
2340 | copy_flags = CL_COPY_ALL | CL_EXPIRE; | 2347 | copy_flags = CL_COPY_ALL | CL_EXPIRE; |
2341 | if (user_ns != mnt_ns->user_ns) | 2348 | if (user_ns != mnt_ns->user_ns) |
2342 | copy_flags |= CL_SHARED_TO_SLAVE; | 2349 | copy_flags |= CL_SHARED_TO_SLAVE | CL_UNPRIVILEGED; |
2343 | new = copy_tree(old, old->mnt.mnt_root, copy_flags); | 2350 | new = copy_tree(old, old->mnt.mnt_root, copy_flags); |
2344 | if (IS_ERR(new)) { | 2351 | if (IS_ERR(new)) { |
2345 | up_write(&namespace_sem); | 2352 | up_write(&namespace_sem); |
@@ -2732,6 +2739,51 @@ bool our_mnt(struct vfsmount *mnt) | |||
2732 | return check_mnt(real_mount(mnt)); | 2739 | return check_mnt(real_mount(mnt)); |
2733 | } | 2740 | } |
2734 | 2741 | ||
2742 | bool current_chrooted(void) | ||
2743 | { | ||
2744 | /* Does the current process have a non-standard root */ | ||
2745 | struct path ns_root; | ||
2746 | struct path fs_root; | ||
2747 | bool chrooted; | ||
2748 | |||
2749 | /* Find the namespace root */ | ||
2750 | ns_root.mnt = ¤t->nsproxy->mnt_ns->root->mnt; | ||
2751 | ns_root.dentry = ns_root.mnt->mnt_root; | ||
2752 | path_get(&ns_root); | ||
2753 | while (d_mountpoint(ns_root.dentry) && follow_down_one(&ns_root)) | ||
2754 | ; | ||
2755 | |||
2756 | get_fs_root(current->fs, &fs_root); | ||
2757 | |||
2758 | chrooted = !path_equal(&fs_root, &ns_root); | ||
2759 | |||
2760 | path_put(&fs_root); | ||
2761 | path_put(&ns_root); | ||
2762 | |||
2763 | return chrooted; | ||
2764 | } | ||
2765 | |||
2766 | void update_mnt_policy(struct user_namespace *userns) | ||
2767 | { | ||
2768 | struct mnt_namespace *ns = current->nsproxy->mnt_ns; | ||
2769 | struct mount *mnt; | ||
2770 | |||
2771 | down_read(&namespace_sem); | ||
2772 | list_for_each_entry(mnt, &ns->list, mnt_list) { | ||
2773 | switch (mnt->mnt.mnt_sb->s_magic) { | ||
2774 | case SYSFS_MAGIC: | ||
2775 | userns->may_mount_sysfs = true; | ||
2776 | break; | ||
2777 | case PROC_SUPER_MAGIC: | ||
2778 | userns->may_mount_proc = true; | ||
2779 | break; | ||
2780 | } | ||
2781 | if (userns->may_mount_sysfs && userns->may_mount_proc) | ||
2782 | break; | ||
2783 | } | ||
2784 | up_read(&namespace_sem); | ||
2785 | } | ||
2786 | |||
2735 | static void *mntns_get(struct task_struct *task) | 2787 | static void *mntns_get(struct task_struct *task) |
2736 | { | 2788 | { |
2737 | struct mnt_namespace *ns = NULL; | 2789 | struct mnt_namespace *ns = NULL; |
diff --git a/fs/nfs/blocklayout/blocklayoutdm.c b/fs/nfs/blocklayout/blocklayoutdm.c index 737d839bc17b..6fc7b5cae92b 100644 --- a/fs/nfs/blocklayout/blocklayoutdm.c +++ b/fs/nfs/blocklayout/blocklayoutdm.c | |||
@@ -55,7 +55,8 @@ static void dev_remove(struct net *net, dev_t dev) | |||
55 | 55 | ||
56 | bl_pipe_msg.bl_wq = &nn->bl_wq; | 56 | bl_pipe_msg.bl_wq = &nn->bl_wq; |
57 | memset(msg, 0, sizeof(*msg)); | 57 | memset(msg, 0, sizeof(*msg)); |
58 | msg->data = kzalloc(1 + sizeof(bl_umount_request), GFP_NOFS); | 58 | msg->len = sizeof(bl_msg) + bl_msg.totallen; |
59 | msg->data = kzalloc(msg->len, GFP_NOFS); | ||
59 | if (!msg->data) | 60 | if (!msg->data) |
60 | goto out; | 61 | goto out; |
61 | 62 | ||
@@ -66,7 +67,6 @@ static void dev_remove(struct net *net, dev_t dev) | |||
66 | memcpy(msg->data, &bl_msg, sizeof(bl_msg)); | 67 | memcpy(msg->data, &bl_msg, sizeof(bl_msg)); |
67 | dataptr = (uint8_t *) msg->data; | 68 | dataptr = (uint8_t *) msg->data; |
68 | memcpy(&dataptr[sizeof(bl_msg)], &bl_umount_request, sizeof(bl_umount_request)); | 69 | memcpy(&dataptr[sizeof(bl_msg)], &bl_umount_request, sizeof(bl_umount_request)); |
69 | msg->len = sizeof(bl_msg) + bl_msg.totallen; | ||
70 | 70 | ||
71 | add_wait_queue(&nn->bl_wq, &wq); | 71 | add_wait_queue(&nn->bl_wq, &wq); |
72 | if (rpc_queue_upcall(nn->bl_device_pipe, msg) < 0) { | 72 | if (rpc_queue_upcall(nn->bl_device_pipe, msg) < 0) { |
diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c index dc0f98dfa717..c516da5873fd 100644 --- a/fs/nfs/idmap.c +++ b/fs/nfs/idmap.c | |||
@@ -726,9 +726,9 @@ out1: | |||
726 | return ret; | 726 | return ret; |
727 | } | 727 | } |
728 | 728 | ||
729 | static int nfs_idmap_instantiate(struct key *key, struct key *authkey, char *data) | 729 | static int nfs_idmap_instantiate(struct key *key, struct key *authkey, char *data, size_t datalen) |
730 | { | 730 | { |
731 | return key_instantiate_and_link(key, data, strlen(data) + 1, | 731 | return key_instantiate_and_link(key, data, datalen, |
732 | id_resolver_cache->thread_keyring, | 732 | id_resolver_cache->thread_keyring, |
733 | authkey); | 733 | authkey); |
734 | } | 734 | } |
@@ -738,6 +738,7 @@ static int nfs_idmap_read_and_verify_message(struct idmap_msg *im, | |||
738 | struct key *key, struct key *authkey) | 738 | struct key *key, struct key *authkey) |
739 | { | 739 | { |
740 | char id_str[NFS_UINT_MAXLEN]; | 740 | char id_str[NFS_UINT_MAXLEN]; |
741 | size_t len; | ||
741 | int ret = -ENOKEY; | 742 | int ret = -ENOKEY; |
742 | 743 | ||
743 | /* ret = -ENOKEY */ | 744 | /* ret = -ENOKEY */ |
@@ -747,13 +748,15 @@ static int nfs_idmap_read_and_verify_message(struct idmap_msg *im, | |||
747 | case IDMAP_CONV_NAMETOID: | 748 | case IDMAP_CONV_NAMETOID: |
748 | if (strcmp(upcall->im_name, im->im_name) != 0) | 749 | if (strcmp(upcall->im_name, im->im_name) != 0) |
749 | break; | 750 | break; |
750 | sprintf(id_str, "%d", im->im_id); | 751 | /* Note: here we store the NUL terminator too */ |
751 | ret = nfs_idmap_instantiate(key, authkey, id_str); | 752 | len = sprintf(id_str, "%d", im->im_id) + 1; |
753 | ret = nfs_idmap_instantiate(key, authkey, id_str, len); | ||
752 | break; | 754 | break; |
753 | case IDMAP_CONV_IDTONAME: | 755 | case IDMAP_CONV_IDTONAME: |
754 | if (upcall->im_id != im->im_id) | 756 | if (upcall->im_id != im->im_id) |
755 | break; | 757 | break; |
756 | ret = nfs_idmap_instantiate(key, authkey, im->im_name); | 758 | len = strlen(im->im_name); |
759 | ret = nfs_idmap_instantiate(key, authkey, im->im_name, len); | ||
757 | break; | 760 | break; |
758 | default: | 761 | default: |
759 | ret = -EINVAL; | 762 | ret = -EINVAL; |
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c index ac4fc9a8fdbc..66b6664dcd4c 100644 --- a/fs/nfs/nfs4client.c +++ b/fs/nfs/nfs4client.c | |||
@@ -300,7 +300,7 @@ int nfs40_walk_client_list(struct nfs_client *new, | |||
300 | struct rpc_cred *cred) | 300 | struct rpc_cred *cred) |
301 | { | 301 | { |
302 | struct nfs_net *nn = net_generic(new->cl_net, nfs_net_id); | 302 | struct nfs_net *nn = net_generic(new->cl_net, nfs_net_id); |
303 | struct nfs_client *pos, *n, *prev = NULL; | 303 | struct nfs_client *pos, *prev = NULL; |
304 | struct nfs4_setclientid_res clid = { | 304 | struct nfs4_setclientid_res clid = { |
305 | .clientid = new->cl_clientid, | 305 | .clientid = new->cl_clientid, |
306 | .confirm = new->cl_confirm, | 306 | .confirm = new->cl_confirm, |
@@ -308,10 +308,23 @@ int nfs40_walk_client_list(struct nfs_client *new, | |||
308 | int status = -NFS4ERR_STALE_CLIENTID; | 308 | int status = -NFS4ERR_STALE_CLIENTID; |
309 | 309 | ||
310 | spin_lock(&nn->nfs_client_lock); | 310 | spin_lock(&nn->nfs_client_lock); |
311 | list_for_each_entry_safe(pos, n, &nn->nfs_client_list, cl_share_link) { | 311 | list_for_each_entry(pos, &nn->nfs_client_list, cl_share_link) { |
312 | /* If "pos" isn't marked ready, we can't trust the | 312 | /* If "pos" isn't marked ready, we can't trust the |
313 | * remaining fields in "pos" */ | 313 | * remaining fields in "pos" */ |
314 | if (pos->cl_cons_state < NFS_CS_READY) | 314 | if (pos->cl_cons_state > NFS_CS_READY) { |
315 | atomic_inc(&pos->cl_count); | ||
316 | spin_unlock(&nn->nfs_client_lock); | ||
317 | |||
318 | if (prev) | ||
319 | nfs_put_client(prev); | ||
320 | prev = pos; | ||
321 | |||
322 | status = nfs_wait_client_init_complete(pos); | ||
323 | spin_lock(&nn->nfs_client_lock); | ||
324 | if (status < 0) | ||
325 | continue; | ||
326 | } | ||
327 | if (pos->cl_cons_state != NFS_CS_READY) | ||
315 | continue; | 328 | continue; |
316 | 329 | ||
317 | if (pos->rpc_ops != new->rpc_ops) | 330 | if (pos->rpc_ops != new->rpc_ops) |
@@ -423,16 +436,16 @@ int nfs41_walk_client_list(struct nfs_client *new, | |||
423 | struct rpc_cred *cred) | 436 | struct rpc_cred *cred) |
424 | { | 437 | { |
425 | struct nfs_net *nn = net_generic(new->cl_net, nfs_net_id); | 438 | struct nfs_net *nn = net_generic(new->cl_net, nfs_net_id); |
426 | struct nfs_client *pos, *n, *prev = NULL; | 439 | struct nfs_client *pos, *prev = NULL; |
427 | int status = -NFS4ERR_STALE_CLIENTID; | 440 | int status = -NFS4ERR_STALE_CLIENTID; |
428 | 441 | ||
429 | spin_lock(&nn->nfs_client_lock); | 442 | spin_lock(&nn->nfs_client_lock); |
430 | list_for_each_entry_safe(pos, n, &nn->nfs_client_list, cl_share_link) { | 443 | list_for_each_entry(pos, &nn->nfs_client_list, cl_share_link) { |
431 | /* If "pos" isn't marked ready, we can't trust the | 444 | /* If "pos" isn't marked ready, we can't trust the |
432 | * remaining fields in "pos", especially the client | 445 | * remaining fields in "pos", especially the client |
433 | * ID and serverowner fields. Wait for CREATE_SESSION | 446 | * ID and serverowner fields. Wait for CREATE_SESSION |
434 | * to finish. */ | 447 | * to finish. */ |
435 | if (pos->cl_cons_state < NFS_CS_READY) { | 448 | if (pos->cl_cons_state > NFS_CS_READY) { |
436 | atomic_inc(&pos->cl_count); | 449 | atomic_inc(&pos->cl_count); |
437 | spin_unlock(&nn->nfs_client_lock); | 450 | spin_unlock(&nn->nfs_client_lock); |
438 | 451 | ||
@@ -440,18 +453,17 @@ int nfs41_walk_client_list(struct nfs_client *new, | |||
440 | nfs_put_client(prev); | 453 | nfs_put_client(prev); |
441 | prev = pos; | 454 | prev = pos; |
442 | 455 | ||
443 | nfs4_schedule_lease_recovery(pos); | ||
444 | status = nfs_wait_client_init_complete(pos); | 456 | status = nfs_wait_client_init_complete(pos); |
445 | if (status < 0) { | 457 | if (status == 0) { |
446 | nfs_put_client(pos); | 458 | nfs4_schedule_lease_recovery(pos); |
447 | spin_lock(&nn->nfs_client_lock); | 459 | status = nfs4_wait_clnt_recover(pos); |
448 | continue; | ||
449 | } | 460 | } |
450 | status = pos->cl_cons_state; | ||
451 | spin_lock(&nn->nfs_client_lock); | 461 | spin_lock(&nn->nfs_client_lock); |
452 | if (status < 0) | 462 | if (status < 0) |
453 | continue; | 463 | continue; |
454 | } | 464 | } |
465 | if (pos->cl_cons_state != NFS_CS_READY) | ||
466 | continue; | ||
455 | 467 | ||
456 | if (pos->rpc_ops != new->rpc_ops) | 468 | if (pos->rpc_ops != new->rpc_ops) |
457 | continue; | 469 | continue; |
@@ -469,17 +481,18 @@ int nfs41_walk_client_list(struct nfs_client *new, | |||
469 | continue; | 481 | continue; |
470 | 482 | ||
471 | atomic_inc(&pos->cl_count); | 483 | atomic_inc(&pos->cl_count); |
472 | spin_unlock(&nn->nfs_client_lock); | 484 | *result = pos; |
485 | status = 0; | ||
473 | dprintk("NFS: <-- %s using nfs_client = %p ({%d})\n", | 486 | dprintk("NFS: <-- %s using nfs_client = %p ({%d})\n", |
474 | __func__, pos, atomic_read(&pos->cl_count)); | 487 | __func__, pos, atomic_read(&pos->cl_count)); |
475 | 488 | break; | |
476 | *result = pos; | ||
477 | return 0; | ||
478 | } | 489 | } |
479 | 490 | ||
480 | /* No matching nfs_client found. */ | 491 | /* No matching nfs_client found. */ |
481 | spin_unlock(&nn->nfs_client_lock); | 492 | spin_unlock(&nn->nfs_client_lock); |
482 | dprintk("NFS: <-- %s status = %d\n", __func__, status); | 493 | dprintk("NFS: <-- %s status = %d\n", __func__, status); |
494 | if (prev) | ||
495 | nfs_put_client(prev); | ||
483 | return status; | 496 | return status; |
484 | } | 497 | } |
485 | #endif /* CONFIG_NFS_V4_1 */ | 498 | #endif /* CONFIG_NFS_V4_1 */ |
diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c index 49eeb044c109..4fb234d3aefb 100644 --- a/fs/nfs/nfs4filelayout.c +++ b/fs/nfs/nfs4filelayout.c | |||
@@ -129,7 +129,6 @@ static void filelayout_fenceme(struct inode *inode, struct pnfs_layout_hdr *lo) | |||
129 | { | 129 | { |
130 | if (!test_and_clear_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) | 130 | if (!test_and_clear_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) |
131 | return; | 131 | return; |
132 | clear_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(inode)->flags); | ||
133 | pnfs_return_layout(inode); | 132 | pnfs_return_layout(inode); |
134 | } | 133 | } |
135 | 134 | ||
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index b2671cb0f901..0ad025eb523b 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c | |||
@@ -1046,6 +1046,7 @@ static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata) | |||
1046 | /* Save the delegation */ | 1046 | /* Save the delegation */ |
1047 | nfs4_stateid_copy(&stateid, &delegation->stateid); | 1047 | nfs4_stateid_copy(&stateid, &delegation->stateid); |
1048 | rcu_read_unlock(); | 1048 | rcu_read_unlock(); |
1049 | nfs_release_seqid(opendata->o_arg.seqid); | ||
1049 | ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode); | 1050 | ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode); |
1050 | if (ret != 0) | 1051 | if (ret != 0) |
1051 | goto out; | 1052 | goto out; |
@@ -2632,7 +2633,7 @@ nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr, | |||
2632 | int status; | 2633 | int status; |
2633 | 2634 | ||
2634 | if (pnfs_ld_layoutret_on_setattr(inode)) | 2635 | if (pnfs_ld_layoutret_on_setattr(inode)) |
2635 | pnfs_return_layout(inode); | 2636 | pnfs_commit_and_return_layout(inode); |
2636 | 2637 | ||
2637 | nfs_fattr_init(fattr); | 2638 | nfs_fattr_init(fattr); |
2638 | 2639 | ||
@@ -6416,22 +6417,8 @@ nfs4_layoutcommit_done(struct rpc_task *task, void *calldata) | |||
6416 | static void nfs4_layoutcommit_release(void *calldata) | 6417 | static void nfs4_layoutcommit_release(void *calldata) |
6417 | { | 6418 | { |
6418 | struct nfs4_layoutcommit_data *data = calldata; | 6419 | struct nfs4_layoutcommit_data *data = calldata; |
6419 | struct pnfs_layout_segment *lseg, *tmp; | ||
6420 | unsigned long *bitlock = &NFS_I(data->args.inode)->flags; | ||
6421 | 6420 | ||
6422 | pnfs_cleanup_layoutcommit(data); | 6421 | pnfs_cleanup_layoutcommit(data); |
6423 | /* Matched by references in pnfs_set_layoutcommit */ | ||
6424 | list_for_each_entry_safe(lseg, tmp, &data->lseg_list, pls_lc_list) { | ||
6425 | list_del_init(&lseg->pls_lc_list); | ||
6426 | if (test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, | ||
6427 | &lseg->pls_flags)) | ||
6428 | pnfs_put_lseg(lseg); | ||
6429 | } | ||
6430 | |||
6431 | clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock); | ||
6432 | smp_mb__after_clear_bit(); | ||
6433 | wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING); | ||
6434 | |||
6435 | put_rpccred(data->cred); | 6422 | put_rpccred(data->cred); |
6436 | kfree(data); | 6423 | kfree(data); |
6437 | } | 6424 | } |
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 6ace365c6334..d41a3518509f 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c | |||
@@ -1886,7 +1886,13 @@ again: | |||
1886 | status = PTR_ERR(clnt); | 1886 | status = PTR_ERR(clnt); |
1887 | break; | 1887 | break; |
1888 | } | 1888 | } |
1889 | clp->cl_rpcclient = clnt; | 1889 | /* Note: this is safe because we haven't yet marked the |
1890 | * client as ready, so we are the only user of | ||
1891 | * clp->cl_rpcclient | ||
1892 | */ | ||
1893 | clnt = xchg(&clp->cl_rpcclient, clnt); | ||
1894 | rpc_shutdown_client(clnt); | ||
1895 | clnt = clp->cl_rpcclient; | ||
1890 | goto again; | 1896 | goto again; |
1891 | 1897 | ||
1892 | case -NFS4ERR_MINOR_VERS_MISMATCH: | 1898 | case -NFS4ERR_MINOR_VERS_MISMATCH: |
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 48ac5aad6258..4bdffe0ba025 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c | |||
@@ -417,6 +417,16 @@ should_free_lseg(struct pnfs_layout_range *lseg_range, | |||
417 | lo_seg_intersecting(lseg_range, recall_range); | 417 | lo_seg_intersecting(lseg_range, recall_range); |
418 | } | 418 | } |
419 | 419 | ||
420 | static bool pnfs_lseg_dec_and_remove_zero(struct pnfs_layout_segment *lseg, | ||
421 | struct list_head *tmp_list) | ||
422 | { | ||
423 | if (!atomic_dec_and_test(&lseg->pls_refcount)) | ||
424 | return false; | ||
425 | pnfs_layout_remove_lseg(lseg->pls_layout, lseg); | ||
426 | list_add(&lseg->pls_list, tmp_list); | ||
427 | return true; | ||
428 | } | ||
429 | |||
420 | /* Returns 1 if lseg is removed from list, 0 otherwise */ | 430 | /* Returns 1 if lseg is removed from list, 0 otherwise */ |
421 | static int mark_lseg_invalid(struct pnfs_layout_segment *lseg, | 431 | static int mark_lseg_invalid(struct pnfs_layout_segment *lseg, |
422 | struct list_head *tmp_list) | 432 | struct list_head *tmp_list) |
@@ -430,11 +440,8 @@ static int mark_lseg_invalid(struct pnfs_layout_segment *lseg, | |||
430 | */ | 440 | */ |
431 | dprintk("%s: lseg %p ref %d\n", __func__, lseg, | 441 | dprintk("%s: lseg %p ref %d\n", __func__, lseg, |
432 | atomic_read(&lseg->pls_refcount)); | 442 | atomic_read(&lseg->pls_refcount)); |
433 | if (atomic_dec_and_test(&lseg->pls_refcount)) { | 443 | if (pnfs_lseg_dec_and_remove_zero(lseg, tmp_list)) |
434 | pnfs_layout_remove_lseg(lseg->pls_layout, lseg); | ||
435 | list_add(&lseg->pls_list, tmp_list); | ||
436 | rv = 1; | 444 | rv = 1; |
437 | } | ||
438 | } | 445 | } |
439 | return rv; | 446 | return rv; |
440 | } | 447 | } |
@@ -777,6 +784,21 @@ send_layoutget(struct pnfs_layout_hdr *lo, | |||
777 | return lseg; | 784 | return lseg; |
778 | } | 785 | } |
779 | 786 | ||
787 | static void pnfs_clear_layoutcommit(struct inode *inode, | ||
788 | struct list_head *head) | ||
789 | { | ||
790 | struct nfs_inode *nfsi = NFS_I(inode); | ||
791 | struct pnfs_layout_segment *lseg, *tmp; | ||
792 | |||
793 | if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) | ||
794 | return; | ||
795 | list_for_each_entry_safe(lseg, tmp, &nfsi->layout->plh_segs, pls_list) { | ||
796 | if (!test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags)) | ||
797 | continue; | ||
798 | pnfs_lseg_dec_and_remove_zero(lseg, head); | ||
799 | } | ||
800 | } | ||
801 | |||
780 | /* | 802 | /* |
781 | * Initiates a LAYOUTRETURN(FILE), and removes the pnfs_layout_hdr | 803 | * Initiates a LAYOUTRETURN(FILE), and removes the pnfs_layout_hdr |
782 | * when the layout segment list is empty. | 804 | * when the layout segment list is empty. |
@@ -808,6 +830,7 @@ _pnfs_return_layout(struct inode *ino) | |||
808 | /* Reference matched in nfs4_layoutreturn_release */ | 830 | /* Reference matched in nfs4_layoutreturn_release */ |
809 | pnfs_get_layout_hdr(lo); | 831 | pnfs_get_layout_hdr(lo); |
810 | empty = list_empty(&lo->plh_segs); | 832 | empty = list_empty(&lo->plh_segs); |
833 | pnfs_clear_layoutcommit(ino, &tmp_list); | ||
811 | pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL); | 834 | pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL); |
812 | /* Don't send a LAYOUTRETURN if list was initially empty */ | 835 | /* Don't send a LAYOUTRETURN if list was initially empty */ |
813 | if (empty) { | 836 | if (empty) { |
@@ -820,8 +843,6 @@ _pnfs_return_layout(struct inode *ino) | |||
820 | spin_unlock(&ino->i_lock); | 843 | spin_unlock(&ino->i_lock); |
821 | pnfs_free_lseg_list(&tmp_list); | 844 | pnfs_free_lseg_list(&tmp_list); |
822 | 845 | ||
823 | WARN_ON(test_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)); | ||
824 | |||
825 | lrp = kzalloc(sizeof(*lrp), GFP_KERNEL); | 846 | lrp = kzalloc(sizeof(*lrp), GFP_KERNEL); |
826 | if (unlikely(lrp == NULL)) { | 847 | if (unlikely(lrp == NULL)) { |
827 | status = -ENOMEM; | 848 | status = -ENOMEM; |
@@ -845,6 +866,33 @@ out: | |||
845 | } | 866 | } |
846 | EXPORT_SYMBOL_GPL(_pnfs_return_layout); | 867 | EXPORT_SYMBOL_GPL(_pnfs_return_layout); |
847 | 868 | ||
869 | int | ||
870 | pnfs_commit_and_return_layout(struct inode *inode) | ||
871 | { | ||
872 | struct pnfs_layout_hdr *lo; | ||
873 | int ret; | ||
874 | |||
875 | spin_lock(&inode->i_lock); | ||
876 | lo = NFS_I(inode)->layout; | ||
877 | if (lo == NULL) { | ||
878 | spin_unlock(&inode->i_lock); | ||
879 | return 0; | ||
880 | } | ||
881 | pnfs_get_layout_hdr(lo); | ||
882 | /* Block new layoutgets and read/write to ds */ | ||
883 | lo->plh_block_lgets++; | ||
884 | spin_unlock(&inode->i_lock); | ||
885 | filemap_fdatawait(inode->i_mapping); | ||
886 | ret = pnfs_layoutcommit_inode(inode, true); | ||
887 | if (ret == 0) | ||
888 | ret = _pnfs_return_layout(inode); | ||
889 | spin_lock(&inode->i_lock); | ||
890 | lo->plh_block_lgets--; | ||
891 | spin_unlock(&inode->i_lock); | ||
892 | pnfs_put_layout_hdr(lo); | ||
893 | return ret; | ||
894 | } | ||
895 | |||
848 | bool pnfs_roc(struct inode *ino) | 896 | bool pnfs_roc(struct inode *ino) |
849 | { | 897 | { |
850 | struct pnfs_layout_hdr *lo; | 898 | struct pnfs_layout_hdr *lo; |
@@ -1458,7 +1506,6 @@ static void pnfs_ld_handle_write_error(struct nfs_write_data *data) | |||
1458 | dprintk("pnfs write error = %d\n", hdr->pnfs_error); | 1506 | dprintk("pnfs write error = %d\n", hdr->pnfs_error); |
1459 | if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags & | 1507 | if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags & |
1460 | PNFS_LAYOUTRET_ON_ERROR) { | 1508 | PNFS_LAYOUTRET_ON_ERROR) { |
1461 | clear_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(hdr->inode)->flags); | ||
1462 | pnfs_return_layout(hdr->inode); | 1509 | pnfs_return_layout(hdr->inode); |
1463 | } | 1510 | } |
1464 | if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) | 1511 | if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) |
@@ -1613,7 +1660,6 @@ static void pnfs_ld_handle_read_error(struct nfs_read_data *data) | |||
1613 | dprintk("pnfs read error = %d\n", hdr->pnfs_error); | 1660 | dprintk("pnfs read error = %d\n", hdr->pnfs_error); |
1614 | if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags & | 1661 | if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags & |
1615 | PNFS_LAYOUTRET_ON_ERROR) { | 1662 | PNFS_LAYOUTRET_ON_ERROR) { |
1616 | clear_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(hdr->inode)->flags); | ||
1617 | pnfs_return_layout(hdr->inode); | 1663 | pnfs_return_layout(hdr->inode); |
1618 | } | 1664 | } |
1619 | if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) | 1665 | if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) |
@@ -1746,11 +1792,27 @@ static void pnfs_list_write_lseg(struct inode *inode, struct list_head *listp) | |||
1746 | 1792 | ||
1747 | list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list) { | 1793 | list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list) { |
1748 | if (lseg->pls_range.iomode == IOMODE_RW && | 1794 | if (lseg->pls_range.iomode == IOMODE_RW && |
1749 | test_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags)) | 1795 | test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags)) |
1750 | list_add(&lseg->pls_lc_list, listp); | 1796 | list_add(&lseg->pls_lc_list, listp); |
1751 | } | 1797 | } |
1752 | } | 1798 | } |
1753 | 1799 | ||
1800 | static void pnfs_list_write_lseg_done(struct inode *inode, struct list_head *listp) | ||
1801 | { | ||
1802 | struct pnfs_layout_segment *lseg, *tmp; | ||
1803 | unsigned long *bitlock = &NFS_I(inode)->flags; | ||
1804 | |||
1805 | /* Matched by references in pnfs_set_layoutcommit */ | ||
1806 | list_for_each_entry_safe(lseg, tmp, listp, pls_lc_list) { | ||
1807 | list_del_init(&lseg->pls_lc_list); | ||
1808 | pnfs_put_lseg(lseg); | ||
1809 | } | ||
1810 | |||
1811 | clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock); | ||
1812 | smp_mb__after_clear_bit(); | ||
1813 | wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING); | ||
1814 | } | ||
1815 | |||
1754 | void pnfs_set_lo_fail(struct pnfs_layout_segment *lseg) | 1816 | void pnfs_set_lo_fail(struct pnfs_layout_segment *lseg) |
1755 | { | 1817 | { |
1756 | pnfs_layout_io_set_failed(lseg->pls_layout, lseg->pls_range.iomode); | 1818 | pnfs_layout_io_set_failed(lseg->pls_layout, lseg->pls_range.iomode); |
@@ -1795,6 +1857,7 @@ void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data) | |||
1795 | 1857 | ||
1796 | if (nfss->pnfs_curr_ld->cleanup_layoutcommit) | 1858 | if (nfss->pnfs_curr_ld->cleanup_layoutcommit) |
1797 | nfss->pnfs_curr_ld->cleanup_layoutcommit(data); | 1859 | nfss->pnfs_curr_ld->cleanup_layoutcommit(data); |
1860 | pnfs_list_write_lseg_done(data->args.inode, &data->lseg_list); | ||
1798 | } | 1861 | } |
1799 | 1862 | ||
1800 | /* | 1863 | /* |
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index 94ba80417748..f5f8a470a647 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h | |||
@@ -219,6 +219,7 @@ void pnfs_set_layoutcommit(struct nfs_write_data *wdata); | |||
219 | void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data); | 219 | void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data); |
220 | int pnfs_layoutcommit_inode(struct inode *inode, bool sync); | 220 | int pnfs_layoutcommit_inode(struct inode *inode, bool sync); |
221 | int _pnfs_return_layout(struct inode *); | 221 | int _pnfs_return_layout(struct inode *); |
222 | int pnfs_commit_and_return_layout(struct inode *); | ||
222 | void pnfs_ld_write_done(struct nfs_write_data *); | 223 | void pnfs_ld_write_done(struct nfs_write_data *); |
223 | void pnfs_ld_read_done(struct nfs_read_data *); | 224 | void pnfs_ld_read_done(struct nfs_read_data *); |
224 | struct pnfs_layout_segment *pnfs_update_layout(struct inode *ino, | 225 | struct pnfs_layout_segment *pnfs_update_layout(struct inode *ino, |
@@ -407,6 +408,11 @@ static inline int pnfs_return_layout(struct inode *ino) | |||
407 | return 0; | 408 | return 0; |
408 | } | 409 | } |
409 | 410 | ||
411 | static inline int pnfs_commit_and_return_layout(struct inode *inode) | ||
412 | { | ||
413 | return 0; | ||
414 | } | ||
415 | |||
410 | static inline bool | 416 | static inline bool |
411 | pnfs_ld_layoutret_on_setattr(struct inode *inode) | 417 | pnfs_ld_layoutret_on_setattr(struct inode *inode) |
412 | { | 418 | { |
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index 01168865dd37..a2720071f282 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c | |||
@@ -264,7 +264,7 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval, | |||
264 | iattr->ia_valid |= ATTR_SIZE; | 264 | iattr->ia_valid |= ATTR_SIZE; |
265 | } | 265 | } |
266 | if (bmval[0] & FATTR4_WORD0_ACL) { | 266 | if (bmval[0] & FATTR4_WORD0_ACL) { |
267 | int nace; | 267 | u32 nace; |
268 | struct nfs4_ace *ace; | 268 | struct nfs4_ace *ace; |
269 | 269 | ||
270 | READ_BUF(4); len += 4; | 270 | READ_BUF(4); len += 4; |
diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c index 62c1ee128aeb..ca05f6dc3544 100644 --- a/fs/nfsd/nfscache.c +++ b/fs/nfsd/nfscache.c | |||
@@ -102,7 +102,8 @@ nfsd_reply_cache_free_locked(struct svc_cacherep *rp) | |||
102 | { | 102 | { |
103 | if (rp->c_type == RC_REPLBUFF) | 103 | if (rp->c_type == RC_REPLBUFF) |
104 | kfree(rp->c_replvec.iov_base); | 104 | kfree(rp->c_replvec.iov_base); |
105 | hlist_del(&rp->c_hash); | 105 | if (!hlist_unhashed(&rp->c_hash)) |
106 | hlist_del(&rp->c_hash); | ||
106 | list_del(&rp->c_lru); | 107 | list_del(&rp->c_lru); |
107 | --num_drc_entries; | 108 | --num_drc_entries; |
108 | kmem_cache_free(drc_slab, rp); | 109 | kmem_cache_free(drc_slab, rp); |
@@ -118,6 +119,10 @@ nfsd_reply_cache_free(struct svc_cacherep *rp) | |||
118 | 119 | ||
119 | int nfsd_reply_cache_init(void) | 120 | int nfsd_reply_cache_init(void) |
120 | { | 121 | { |
122 | INIT_LIST_HEAD(&lru_head); | ||
123 | max_drc_entries = nfsd_cache_size_limit(); | ||
124 | num_drc_entries = 0; | ||
125 | |||
121 | register_shrinker(&nfsd_reply_cache_shrinker); | 126 | register_shrinker(&nfsd_reply_cache_shrinker); |
122 | drc_slab = kmem_cache_create("nfsd_drc", sizeof(struct svc_cacherep), | 127 | drc_slab = kmem_cache_create("nfsd_drc", sizeof(struct svc_cacherep), |
123 | 0, 0, NULL); | 128 | 0, 0, NULL); |
@@ -128,10 +133,6 @@ int nfsd_reply_cache_init(void) | |||
128 | if (!cache_hash) | 133 | if (!cache_hash) |
129 | goto out_nomem; | 134 | goto out_nomem; |
130 | 135 | ||
131 | INIT_LIST_HEAD(&lru_head); | ||
132 | max_drc_entries = nfsd_cache_size_limit(); | ||
133 | num_drc_entries = 0; | ||
134 | |||
135 | return 0; | 136 | return 0; |
136 | out_nomem: | 137 | out_nomem: |
137 | printk(KERN_ERR "nfsd: failed to allocate reply cache\n"); | 138 | printk(KERN_ERR "nfsd: failed to allocate reply cache\n"); |
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 2a7eb536de0b..2b2e2396a869 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c | |||
@@ -1013,6 +1013,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file, | |||
1013 | int host_err; | 1013 | int host_err; |
1014 | int stable = *stablep; | 1014 | int stable = *stablep; |
1015 | int use_wgather; | 1015 | int use_wgather; |
1016 | loff_t pos = offset; | ||
1016 | 1017 | ||
1017 | dentry = file->f_path.dentry; | 1018 | dentry = file->f_path.dentry; |
1018 | inode = dentry->d_inode; | 1019 | inode = dentry->d_inode; |
@@ -1025,7 +1026,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file, | |||
1025 | 1026 | ||
1026 | /* Write the data. */ | 1027 | /* Write the data. */ |
1027 | oldfs = get_fs(); set_fs(KERNEL_DS); | 1028 | oldfs = get_fs(); set_fs(KERNEL_DS); |
1028 | host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset); | 1029 | host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &pos); |
1029 | set_fs(oldfs); | 1030 | set_fs(oldfs); |
1030 | if (host_err < 0) | 1031 | if (host_err < 0) |
1031 | goto out_nfserr; | 1032 | goto out_nfserr; |
diff --git a/fs/pnode.c b/fs/pnode.c index 3e000a51ac0d..8b29d2164da6 100644 --- a/fs/pnode.c +++ b/fs/pnode.c | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <linux/mnt_namespace.h> | 9 | #include <linux/mnt_namespace.h> |
10 | #include <linux/mount.h> | 10 | #include <linux/mount.h> |
11 | #include <linux/fs.h> | 11 | #include <linux/fs.h> |
12 | #include <linux/nsproxy.h> | ||
12 | #include "internal.h" | 13 | #include "internal.h" |
13 | #include "pnode.h" | 14 | #include "pnode.h" |
14 | 15 | ||
@@ -220,6 +221,7 @@ static struct mount *get_source(struct mount *dest, | |||
220 | int propagate_mnt(struct mount *dest_mnt, struct dentry *dest_dentry, | 221 | int propagate_mnt(struct mount *dest_mnt, struct dentry *dest_dentry, |
221 | struct mount *source_mnt, struct list_head *tree_list) | 222 | struct mount *source_mnt, struct list_head *tree_list) |
222 | { | 223 | { |
224 | struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns; | ||
223 | struct mount *m, *child; | 225 | struct mount *m, *child; |
224 | int ret = 0; | 226 | int ret = 0; |
225 | struct mount *prev_dest_mnt = dest_mnt; | 227 | struct mount *prev_dest_mnt = dest_mnt; |
@@ -237,6 +239,10 @@ int propagate_mnt(struct mount *dest_mnt, struct dentry *dest_dentry, | |||
237 | 239 | ||
238 | source = get_source(m, prev_dest_mnt, prev_src_mnt, &type); | 240 | source = get_source(m, prev_dest_mnt, prev_src_mnt, &type); |
239 | 241 | ||
242 | /* Notice when we are propagating across user namespaces */ | ||
243 | if (m->mnt_ns->user_ns != user_ns) | ||
244 | type |= CL_UNPRIVILEGED; | ||
245 | |||
240 | child = copy_tree(source, source->mnt.mnt_root, type); | 246 | child = copy_tree(source, source->mnt.mnt_root, type); |
241 | if (IS_ERR(child)) { | 247 | if (IS_ERR(child)) { |
242 | ret = PTR_ERR(child); | 248 | ret = PTR_ERR(child); |
diff --git a/fs/pnode.h b/fs/pnode.h index 19b853a3445c..a0493d5ebfbf 100644 --- a/fs/pnode.h +++ b/fs/pnode.h | |||
@@ -23,6 +23,7 @@ | |||
23 | #define CL_MAKE_SHARED 0x08 | 23 | #define CL_MAKE_SHARED 0x08 |
24 | #define CL_PRIVATE 0x10 | 24 | #define CL_PRIVATE 0x10 |
25 | #define CL_SHARED_TO_SLAVE 0x20 | 25 | #define CL_SHARED_TO_SLAVE 0x20 |
26 | #define CL_UNPRIVILEGED 0x40 | ||
26 | 27 | ||
27 | static inline void set_mnt_shared(struct mount *mnt) | 28 | static inline void set_mnt_shared(struct mount *mnt) |
28 | { | 29 | { |
diff --git a/fs/proc/generic.c b/fs/proc/generic.c index 4b3b3ffb52f1..21e1a8f1659d 100644 --- a/fs/proc/generic.c +++ b/fs/proc/generic.c | |||
@@ -755,37 +755,8 @@ void pde_put(struct proc_dir_entry *pde) | |||
755 | free_proc_entry(pde); | 755 | free_proc_entry(pde); |
756 | } | 756 | } |
757 | 757 | ||
758 | /* | 758 | static void entry_rundown(struct proc_dir_entry *de) |
759 | * Remove a /proc entry and free it if it's not currently in use. | ||
760 | */ | ||
761 | void remove_proc_entry(const char *name, struct proc_dir_entry *parent) | ||
762 | { | 759 | { |
763 | struct proc_dir_entry **p; | ||
764 | struct proc_dir_entry *de = NULL; | ||
765 | const char *fn = name; | ||
766 | unsigned int len; | ||
767 | |||
768 | spin_lock(&proc_subdir_lock); | ||
769 | if (__xlate_proc_name(name, &parent, &fn) != 0) { | ||
770 | spin_unlock(&proc_subdir_lock); | ||
771 | return; | ||
772 | } | ||
773 | len = strlen(fn); | ||
774 | |||
775 | for (p = &parent->subdir; *p; p=&(*p)->next ) { | ||
776 | if (proc_match(len, fn, *p)) { | ||
777 | de = *p; | ||
778 | *p = de->next; | ||
779 | de->next = NULL; | ||
780 | break; | ||
781 | } | ||
782 | } | ||
783 | spin_unlock(&proc_subdir_lock); | ||
784 | if (!de) { | ||
785 | WARN(1, "name '%s'\n", name); | ||
786 | return; | ||
787 | } | ||
788 | |||
789 | spin_lock(&de->pde_unload_lock); | 760 | spin_lock(&de->pde_unload_lock); |
790 | /* | 761 | /* |
791 | * Stop accepting new callers into module. If you're | 762 | * Stop accepting new callers into module. If you're |
@@ -817,6 +788,40 @@ void remove_proc_entry(const char *name, struct proc_dir_entry *parent) | |||
817 | spin_lock(&de->pde_unload_lock); | 788 | spin_lock(&de->pde_unload_lock); |
818 | } | 789 | } |
819 | spin_unlock(&de->pde_unload_lock); | 790 | spin_unlock(&de->pde_unload_lock); |
791 | } | ||
792 | |||
793 | /* | ||
794 | * Remove a /proc entry and free it if it's not currently in use. | ||
795 | */ | ||
796 | void remove_proc_entry(const char *name, struct proc_dir_entry *parent) | ||
797 | { | ||
798 | struct proc_dir_entry **p; | ||
799 | struct proc_dir_entry *de = NULL; | ||
800 | const char *fn = name; | ||
801 | unsigned int len; | ||
802 | |||
803 | spin_lock(&proc_subdir_lock); | ||
804 | if (__xlate_proc_name(name, &parent, &fn) != 0) { | ||
805 | spin_unlock(&proc_subdir_lock); | ||
806 | return; | ||
807 | } | ||
808 | len = strlen(fn); | ||
809 | |||
810 | for (p = &parent->subdir; *p; p=&(*p)->next ) { | ||
811 | if (proc_match(len, fn, *p)) { | ||
812 | de = *p; | ||
813 | *p = de->next; | ||
814 | de->next = NULL; | ||
815 | break; | ||
816 | } | ||
817 | } | ||
818 | spin_unlock(&proc_subdir_lock); | ||
819 | if (!de) { | ||
820 | WARN(1, "name '%s'\n", name); | ||
821 | return; | ||
822 | } | ||
823 | |||
824 | entry_rundown(de); | ||
820 | 825 | ||
821 | if (S_ISDIR(de->mode)) | 826 | if (S_ISDIR(de->mode)) |
822 | parent->nlink--; | 827 | parent->nlink--; |
@@ -827,3 +832,57 @@ void remove_proc_entry(const char *name, struct proc_dir_entry *parent) | |||
827 | pde_put(de); | 832 | pde_put(de); |
828 | } | 833 | } |
829 | EXPORT_SYMBOL(remove_proc_entry); | 834 | EXPORT_SYMBOL(remove_proc_entry); |
835 | |||
836 | int remove_proc_subtree(const char *name, struct proc_dir_entry *parent) | ||
837 | { | ||
838 | struct proc_dir_entry **p; | ||
839 | struct proc_dir_entry *root = NULL, *de, *next; | ||
840 | const char *fn = name; | ||
841 | unsigned int len; | ||
842 | |||
843 | spin_lock(&proc_subdir_lock); | ||
844 | if (__xlate_proc_name(name, &parent, &fn) != 0) { | ||
845 | spin_unlock(&proc_subdir_lock); | ||
846 | return -ENOENT; | ||
847 | } | ||
848 | len = strlen(fn); | ||
849 | |||
850 | for (p = &parent->subdir; *p; p=&(*p)->next ) { | ||
851 | if (proc_match(len, fn, *p)) { | ||
852 | root = *p; | ||
853 | *p = root->next; | ||
854 | root->next = NULL; | ||
855 | break; | ||
856 | } | ||
857 | } | ||
858 | if (!root) { | ||
859 | spin_unlock(&proc_subdir_lock); | ||
860 | return -ENOENT; | ||
861 | } | ||
862 | de = root; | ||
863 | while (1) { | ||
864 | next = de->subdir; | ||
865 | if (next) { | ||
866 | de->subdir = next->next; | ||
867 | next->next = NULL; | ||
868 | de = next; | ||
869 | continue; | ||
870 | } | ||
871 | spin_unlock(&proc_subdir_lock); | ||
872 | |||
873 | entry_rundown(de); | ||
874 | next = de->parent; | ||
875 | if (S_ISDIR(de->mode)) | ||
876 | next->nlink--; | ||
877 | de->nlink = 0; | ||
878 | if (de == root) | ||
879 | break; | ||
880 | pde_put(de); | ||
881 | |||
882 | spin_lock(&proc_subdir_lock); | ||
883 | de = next; | ||
884 | } | ||
885 | pde_put(root); | ||
886 | return 0; | ||
887 | } | ||
888 | EXPORT_SYMBOL(remove_proc_subtree); | ||
diff --git a/fs/proc/inode.c b/fs/proc/inode.c index a86aebc9ba7c..869116c2afbe 100644 --- a/fs/proc/inode.c +++ b/fs/proc/inode.c | |||
@@ -446,9 +446,10 @@ static const struct file_operations proc_reg_file_ops_no_compat = { | |||
446 | 446 | ||
447 | struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de) | 447 | struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de) |
448 | { | 448 | { |
449 | struct inode *inode = iget_locked(sb, de->low_ino); | 449 | struct inode *inode = new_inode_pseudo(sb); |
450 | 450 | ||
451 | if (inode && (inode->i_state & I_NEW)) { | 451 | if (inode) { |
452 | inode->i_ino = de->low_ino; | ||
452 | inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; | 453 | inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; |
453 | PROC_I(inode)->pde = de; | 454 | PROC_I(inode)->pde = de; |
454 | 455 | ||
@@ -476,7 +477,6 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de) | |||
476 | inode->i_fop = de->proc_fops; | 477 | inode->i_fop = de->proc_fops; |
477 | } | 478 | } |
478 | } | 479 | } |
479 | unlock_new_inode(inode); | ||
480 | } else | 480 | } else |
481 | pde_put(de); | 481 | pde_put(de); |
482 | return inode; | 482 | return inode; |
diff --git a/fs/proc/root.c b/fs/proc/root.c index c6e9fac26bac..9c7fab1d23f0 100644 --- a/fs/proc/root.c +++ b/fs/proc/root.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/sched.h> | 16 | #include <linux/sched.h> |
17 | #include <linux/module.h> | 17 | #include <linux/module.h> |
18 | #include <linux/bitops.h> | 18 | #include <linux/bitops.h> |
19 | #include <linux/user_namespace.h> | ||
19 | #include <linux/mount.h> | 20 | #include <linux/mount.h> |
20 | #include <linux/pid_namespace.h> | 21 | #include <linux/pid_namespace.h> |
21 | #include <linux/parser.h> | 22 | #include <linux/parser.h> |
@@ -108,6 +109,9 @@ static struct dentry *proc_mount(struct file_system_type *fs_type, | |||
108 | } else { | 109 | } else { |
109 | ns = task_active_pid_ns(current); | 110 | ns = task_active_pid_ns(current); |
110 | options = data; | 111 | options = data; |
112 | |||
113 | if (!current_user_ns()->may_mount_proc) | ||
114 | return ERR_PTR(-EPERM); | ||
111 | } | 115 | } |
112 | 116 | ||
113 | sb = sget(fs_type, proc_test_super, proc_set_super, flags, ns); | 117 | sb = sget(fs_type, proc_test_super, proc_set_super, flags, ns); |
diff --git a/fs/read_write.c b/fs/read_write.c index a698eff457fb..e6ddc8dceb96 100644 --- a/fs/read_write.c +++ b/fs/read_write.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/splice.h> | 17 | #include <linux/splice.h> |
18 | #include <linux/compat.h> | 18 | #include <linux/compat.h> |
19 | #include "read_write.h" | 19 | #include "read_write.h" |
20 | #include "internal.h" | ||
20 | 21 | ||
21 | #include <asm/uaccess.h> | 22 | #include <asm/uaccess.h> |
22 | #include <asm/unistd.h> | 23 | #include <asm/unistd.h> |
@@ -417,6 +418,33 @@ ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, lof | |||
417 | 418 | ||
418 | EXPORT_SYMBOL(do_sync_write); | 419 | EXPORT_SYMBOL(do_sync_write); |
419 | 420 | ||
421 | ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t *pos) | ||
422 | { | ||
423 | mm_segment_t old_fs; | ||
424 | const char __user *p; | ||
425 | ssize_t ret; | ||
426 | |||
427 | if (!file->f_op || (!file->f_op->write && !file->f_op->aio_write)) | ||
428 | return -EINVAL; | ||
429 | |||
430 | old_fs = get_fs(); | ||
431 | set_fs(get_ds()); | ||
432 | p = (__force const char __user *)buf; | ||
433 | if (count > MAX_RW_COUNT) | ||
434 | count = MAX_RW_COUNT; | ||
435 | if (file->f_op->write) | ||
436 | ret = file->f_op->write(file, p, count, pos); | ||
437 | else | ||
438 | ret = do_sync_write(file, p, count, pos); | ||
439 | set_fs(old_fs); | ||
440 | if (ret > 0) { | ||
441 | fsnotify_modify(file); | ||
442 | add_wchar(current, ret); | ||
443 | } | ||
444 | inc_syscw(current); | ||
445 | return ret; | ||
446 | } | ||
447 | |||
420 | ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) | 448 | ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) |
421 | { | 449 | { |
422 | ssize_t ret; | 450 | ssize_t ret; |
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c index c196369fe408..4cce1d9552fb 100644 --- a/fs/reiserfs/xattr.c +++ b/fs/reiserfs/xattr.c | |||
@@ -187,8 +187,8 @@ fill_with_dentries(void *buf, const char *name, int namelen, loff_t offset, | |||
187 | if (dbuf->count == ARRAY_SIZE(dbuf->dentries)) | 187 | if (dbuf->count == ARRAY_SIZE(dbuf->dentries)) |
188 | return -ENOSPC; | 188 | return -ENOSPC; |
189 | 189 | ||
190 | if (name[0] == '.' && (name[1] == '\0' || | 190 | if (name[0] == '.' && (namelen < 2 || |
191 | (name[1] == '.' && name[2] == '\0'))) | 191 | (namelen == 2 && name[1] == '.'))) |
192 | return 0; | 192 | return 0; |
193 | 193 | ||
194 | dentry = lookup_one_len(name, dbuf->xadir, namelen); | 194 | dentry = lookup_one_len(name, dbuf->xadir, namelen); |
diff --git a/fs/splice.c b/fs/splice.c index 718bd0056384..29e394e49ddd 100644 --- a/fs/splice.c +++ b/fs/splice.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/security.h> | 31 | #include <linux/security.h> |
32 | #include <linux/gfp.h> | 32 | #include <linux/gfp.h> |
33 | #include <linux/socket.h> | 33 | #include <linux/socket.h> |
34 | #include "internal.h" | ||
34 | 35 | ||
35 | /* | 36 | /* |
36 | * Attempt to steal a page from a pipe buffer. This should perhaps go into | 37 | * Attempt to steal a page from a pipe buffer. This should perhaps go into |
@@ -1048,9 +1049,10 @@ static int write_pipe_buf(struct pipe_inode_info *pipe, struct pipe_buffer *buf, | |||
1048 | { | 1049 | { |
1049 | int ret; | 1050 | int ret; |
1050 | void *data; | 1051 | void *data; |
1052 | loff_t tmp = sd->pos; | ||
1051 | 1053 | ||
1052 | data = buf->ops->map(pipe, buf, 0); | 1054 | data = buf->ops->map(pipe, buf, 0); |
1053 | ret = kernel_write(sd->u.file, data + buf->offset, sd->len, sd->pos); | 1055 | ret = __kernel_write(sd->u.file, data + buf->offset, sd->len, &tmp); |
1054 | buf->ops->unmap(pipe, buf, data); | 1056 | buf->ops->unmap(pipe, buf, data); |
1055 | 1057 | ||
1056 | return ret; | 1058 | return ret; |
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c index 2fbdff6be25c..e14512678c9b 100644 --- a/fs/sysfs/dir.c +++ b/fs/sysfs/dir.c | |||
@@ -1020,6 +1020,8 @@ static int sysfs_readdir(struct file * filp, void * dirent, filldir_t filldir) | |||
1020 | ino = parent_sd->s_ino; | 1020 | ino = parent_sd->s_ino; |
1021 | if (filldir(dirent, ".", 1, filp->f_pos, ino, DT_DIR) == 0) | 1021 | if (filldir(dirent, ".", 1, filp->f_pos, ino, DT_DIR) == 0) |
1022 | filp->f_pos++; | 1022 | filp->f_pos++; |
1023 | else | ||
1024 | return 0; | ||
1023 | } | 1025 | } |
1024 | if (filp->f_pos == 1) { | 1026 | if (filp->f_pos == 1) { |
1025 | if (parent_sd->s_parent) | 1027 | if (parent_sd->s_parent) |
@@ -1028,6 +1030,8 @@ static int sysfs_readdir(struct file * filp, void * dirent, filldir_t filldir) | |||
1028 | ino = parent_sd->s_ino; | 1030 | ino = parent_sd->s_ino; |
1029 | if (filldir(dirent, "..", 2, filp->f_pos, ino, DT_DIR) == 0) | 1031 | if (filldir(dirent, "..", 2, filp->f_pos, ino, DT_DIR) == 0) |
1030 | filp->f_pos++; | 1032 | filp->f_pos++; |
1033 | else | ||
1034 | return 0; | ||
1031 | } | 1035 | } |
1032 | mutex_lock(&sysfs_mutex); | 1036 | mutex_lock(&sysfs_mutex); |
1033 | for (pos = sysfs_dir_pos(ns, parent_sd, filp->f_pos, pos); | 1037 | for (pos = sysfs_dir_pos(ns, parent_sd, filp->f_pos, pos); |
@@ -1058,10 +1062,21 @@ static int sysfs_readdir(struct file * filp, void * dirent, filldir_t filldir) | |||
1058 | return 0; | 1062 | return 0; |
1059 | } | 1063 | } |
1060 | 1064 | ||
1065 | static loff_t sysfs_dir_llseek(struct file *file, loff_t offset, int whence) | ||
1066 | { | ||
1067 | struct inode *inode = file_inode(file); | ||
1068 | loff_t ret; | ||
1069 | |||
1070 | mutex_lock(&inode->i_mutex); | ||
1071 | ret = generic_file_llseek(file, offset, whence); | ||
1072 | mutex_unlock(&inode->i_mutex); | ||
1073 | |||
1074 | return ret; | ||
1075 | } | ||
1061 | 1076 | ||
1062 | const struct file_operations sysfs_dir_operations = { | 1077 | const struct file_operations sysfs_dir_operations = { |
1063 | .read = generic_read_dir, | 1078 | .read = generic_read_dir, |
1064 | .readdir = sysfs_readdir, | 1079 | .readdir = sysfs_readdir, |
1065 | .release = sysfs_dir_release, | 1080 | .release = sysfs_dir_release, |
1066 | .llseek = generic_file_llseek, | 1081 | .llseek = sysfs_dir_llseek, |
1067 | }; | 1082 | }; |
diff --git a/fs/sysfs/mount.c b/fs/sysfs/mount.c index 8d924b5ec733..afd83273e6ce 100644 --- a/fs/sysfs/mount.c +++ b/fs/sysfs/mount.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/module.h> | 19 | #include <linux/module.h> |
20 | #include <linux/magic.h> | 20 | #include <linux/magic.h> |
21 | #include <linux/slab.h> | 21 | #include <linux/slab.h> |
22 | #include <linux/user_namespace.h> | ||
22 | 23 | ||
23 | #include "sysfs.h" | 24 | #include "sysfs.h" |
24 | 25 | ||
@@ -111,6 +112,9 @@ static struct dentry *sysfs_mount(struct file_system_type *fs_type, | |||
111 | struct super_block *sb; | 112 | struct super_block *sb; |
112 | int error; | 113 | int error; |
113 | 114 | ||
115 | if (!(flags & MS_KERNMOUNT) && !current_user_ns()->may_mount_sysfs) | ||
116 | return ERR_PTR(-EPERM); | ||
117 | |||
114 | info = kzalloc(sizeof(*info), GFP_KERNEL); | 118 | info = kzalloc(sizeof(*info), GFP_KERNEL); |
115 | if (!info) | 119 | if (!info) |
116 | return ERR_PTR(-ENOMEM); | 120 | return ERR_PTR(-ENOMEM); |
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index ac838b844936..f21acf0ef01f 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c | |||
@@ -1568,6 +1568,12 @@ static int ubifs_remount_rw(struct ubifs_info *c) | |||
1568 | c->remounting_rw = 1; | 1568 | c->remounting_rw = 1; |
1569 | c->ro_mount = 0; | 1569 | c->ro_mount = 0; |
1570 | 1570 | ||
1571 | if (c->space_fixup) { | ||
1572 | err = ubifs_fixup_free_space(c); | ||
1573 | if (err) | ||
1574 | return err; | ||
1575 | } | ||
1576 | |||
1571 | err = check_free_space(c); | 1577 | err = check_free_space(c); |
1572 | if (err) | 1578 | if (err) |
1573 | goto out; | 1579 | goto out; |
@@ -1684,12 +1690,6 @@ static int ubifs_remount_rw(struct ubifs_info *c) | |||
1684 | err = dbg_check_space_info(c); | 1690 | err = dbg_check_space_info(c); |
1685 | } | 1691 | } |
1686 | 1692 | ||
1687 | if (c->space_fixup) { | ||
1688 | err = ubifs_fixup_free_space(c); | ||
1689 | if (err) | ||
1690 | goto out; | ||
1691 | } | ||
1692 | |||
1693 | mutex_unlock(&c->umount_mutex); | 1693 | mutex_unlock(&c->umount_mutex); |
1694 | return err; | 1694 | return err; |
1695 | 1695 | ||
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index 4e8f0df82d02..8459b5d8cb71 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c | |||
@@ -1334,6 +1334,12 @@ _xfs_buf_ioapply( | |||
1334 | int size; | 1334 | int size; |
1335 | int i; | 1335 | int i; |
1336 | 1336 | ||
1337 | /* | ||
1338 | * Make sure we capture only current IO errors rather than stale errors | ||
1339 | * left over from previous use of the buffer (e.g. failed readahead). | ||
1340 | */ | ||
1341 | bp->b_error = 0; | ||
1342 | |||
1337 | if (bp->b_flags & XBF_WRITE) { | 1343 | if (bp->b_flags & XBF_WRITE) { |
1338 | if (bp->b_flags & XBF_SYNCIO) | 1344 | if (bp->b_flags & XBF_SYNCIO) |
1339 | rw = WRITE_SYNC; | 1345 | rw = WRITE_SYNC; |
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c index 912d83d8860a..5a30dd899d2b 100644 --- a/fs/xfs/xfs_iomap.c +++ b/fs/xfs/xfs_iomap.c | |||
@@ -325,7 +325,7 @@ xfs_iomap_eof_want_preallocate( | |||
325 | * rather than falling short due to things like stripe unit/width alignment of | 325 | * rather than falling short due to things like stripe unit/width alignment of |
326 | * real extents. | 326 | * real extents. |
327 | */ | 327 | */ |
328 | STATIC int | 328 | STATIC xfs_fsblock_t |
329 | xfs_iomap_eof_prealloc_initial_size( | 329 | xfs_iomap_eof_prealloc_initial_size( |
330 | struct xfs_mount *mp, | 330 | struct xfs_mount *mp, |
331 | struct xfs_inode *ip, | 331 | struct xfs_inode *ip, |
@@ -413,7 +413,7 @@ xfs_iomap_prealloc_size( | |||
413 | * have a large file on a small filesystem and the above | 413 | * have a large file on a small filesystem and the above |
414 | * lowspace thresholds are smaller than MAXEXTLEN. | 414 | * lowspace thresholds are smaller than MAXEXTLEN. |
415 | */ | 415 | */ |
416 | while (alloc_blocks >= freesp) | 416 | while (alloc_blocks && alloc_blocks >= freesp) |
417 | alloc_blocks >>= 4; | 417 | alloc_blocks >>= 4; |
418 | } | 418 | } |
419 | 419 | ||
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h index a386b0b654cc..918e8fe2f5e9 100644 --- a/include/drm/drm_pciids.h +++ b/include/drm/drm_pciids.h | |||
@@ -581,7 +581,11 @@ | |||
581 | {0x1002, 0x9908, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 581 | {0x1002, 0x9908, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
582 | {0x1002, 0x9909, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 582 | {0x1002, 0x9909, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
583 | {0x1002, 0x990A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 583 | {0x1002, 0x990A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
584 | {0x1002, 0x990F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 584 | {0x1002, 0x990B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
585 | {0x1002, 0x990C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | ||
586 | {0x1002, 0x990D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | ||
587 | {0x1002, 0x990E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | ||
588 | {0x1002, 0x990F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | ||
585 | {0x1002, 0x9910, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 589 | {0x1002, 0x9910, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
586 | {0x1002, 0x9913, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 590 | {0x1002, 0x9913, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
587 | {0x1002, 0x9917, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 591 | {0x1002, 0x9917, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
@@ -592,6 +596,13 @@ | |||
592 | {0x1002, 0x9992, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 596 | {0x1002, 0x9992, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
593 | {0x1002, 0x9993, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 597 | {0x1002, 0x9993, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
594 | {0x1002, 0x9994, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 598 | {0x1002, 0x9994, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
599 | {0x1002, 0x9995, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | ||
600 | {0x1002, 0x9996, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | ||
601 | {0x1002, 0x9997, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | ||
602 | {0x1002, 0x9998, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | ||
603 | {0x1002, 0x9999, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | ||
604 | {0x1002, 0x999A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | ||
605 | {0x1002, 0x999B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | ||
595 | {0x1002, 0x99A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 606 | {0x1002, 0x99A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
596 | {0x1002, 0x99A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 607 | {0x1002, 0x99A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
597 | {0x1002, 0x99A4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ | 608 | {0x1002, 0x99A4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
diff --git a/include/linux/ata.h b/include/linux/ata.h index 8f7a3d68371a..ee0bd9524055 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h | |||
@@ -954,7 +954,7 @@ static inline int atapi_cdb_len(const u16 *dev_id) | |||
954 | } | 954 | } |
955 | } | 955 | } |
956 | 956 | ||
957 | static inline bool atapi_command_packet_set(const u16 *dev_id) | 957 | static inline int atapi_command_packet_set(const u16 *dev_id) |
958 | { | 958 | { |
959 | return (dev_id[ATA_ID_CONFIG] >> 8) & 0x1f; | 959 | return (dev_id[ATA_ID_CONFIG] >> 8) & 0x1f; |
960 | } | 960 | } |
diff --git a/include/linux/compat.h b/include/linux/compat.h index 76a87fb57ac2..377cd8c3395e 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h | |||
@@ -141,11 +141,11 @@ typedef struct { | |||
141 | } compat_sigset_t; | 141 | } compat_sigset_t; |
142 | 142 | ||
143 | struct compat_sigaction { | 143 | struct compat_sigaction { |
144 | #ifndef __ARCH_HAS_ODD_SIGACTION | 144 | #ifndef __ARCH_HAS_IRIX_SIGACTION |
145 | compat_uptr_t sa_handler; | 145 | compat_uptr_t sa_handler; |
146 | compat_ulong_t sa_flags; | 146 | compat_ulong_t sa_flags; |
147 | #else | 147 | #else |
148 | compat_ulong_t sa_flags; | 148 | compat_uint_t sa_flags; |
149 | compat_uptr_t sa_handler; | 149 | compat_uptr_t sa_handler; |
150 | #endif | 150 | #endif |
151 | #ifdef __ARCH_HAS_SA_RESTORER | 151 | #ifdef __ARCH_HAS_SA_RESTORER |
diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h index a975de1ff59f..3bd46f766751 100644 --- a/include/linux/debug_locks.h +++ b/include/linux/debug_locks.h | |||
@@ -51,7 +51,7 @@ struct task_struct; | |||
51 | extern void debug_show_all_locks(void); | 51 | extern void debug_show_all_locks(void); |
52 | extern void debug_show_held_locks(struct task_struct *task); | 52 | extern void debug_show_held_locks(struct task_struct *task); |
53 | extern void debug_check_no_locks_freed(const void *from, unsigned long len); | 53 | extern void debug_check_no_locks_freed(const void *from, unsigned long len); |
54 | extern void debug_check_no_locks_held(void); | 54 | extern void debug_check_no_locks_held(struct task_struct *task); |
55 | #else | 55 | #else |
56 | static inline void debug_show_all_locks(void) | 56 | static inline void debug_show_all_locks(void) |
57 | { | 57 | { |
@@ -67,7 +67,7 @@ debug_check_no_locks_freed(const void *from, unsigned long len) | |||
67 | } | 67 | } |
68 | 68 | ||
69 | static inline void | 69 | static inline void |
70 | debug_check_no_locks_held(void) | 70 | debug_check_no_locks_held(struct task_struct *task) |
71 | { | 71 | { |
72 | } | 72 | } |
73 | #endif | 73 | #endif |
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h index e83ef39b3bea..fe8c4476f7e4 100644 --- a/include/linux/devfreq.h +++ b/include/linux/devfreq.h | |||
@@ -213,7 +213,7 @@ struct devfreq_simple_ondemand_data { | |||
213 | #endif | 213 | #endif |
214 | 214 | ||
215 | #else /* !CONFIG_PM_DEVFREQ */ | 215 | #else /* !CONFIG_PM_DEVFREQ */ |
216 | static struct devfreq *devfreq_add_device(struct device *dev, | 216 | static inline struct devfreq *devfreq_add_device(struct device *dev, |
217 | struct devfreq_dev_profile *profile, | 217 | struct devfreq_dev_profile *profile, |
218 | const char *governor_name, | 218 | const char *governor_name, |
219 | void *data) | 219 | void *data) |
@@ -221,34 +221,34 @@ static struct devfreq *devfreq_add_device(struct device *dev, | |||
221 | return NULL; | 221 | return NULL; |
222 | } | 222 | } |
223 | 223 | ||
224 | static int devfreq_remove_device(struct devfreq *devfreq) | 224 | static inline int devfreq_remove_device(struct devfreq *devfreq) |
225 | { | 225 | { |
226 | return 0; | 226 | return 0; |
227 | } | 227 | } |
228 | 228 | ||
229 | static int devfreq_suspend_device(struct devfreq *devfreq) | 229 | static inline int devfreq_suspend_device(struct devfreq *devfreq) |
230 | { | 230 | { |
231 | return 0; | 231 | return 0; |
232 | } | 232 | } |
233 | 233 | ||
234 | static int devfreq_resume_device(struct devfreq *devfreq) | 234 | static inline int devfreq_resume_device(struct devfreq *devfreq) |
235 | { | 235 | { |
236 | return 0; | 236 | return 0; |
237 | } | 237 | } |
238 | 238 | ||
239 | static struct opp *devfreq_recommended_opp(struct device *dev, | 239 | static inline struct opp *devfreq_recommended_opp(struct device *dev, |
240 | unsigned long *freq, u32 flags) | 240 | unsigned long *freq, u32 flags) |
241 | { | 241 | { |
242 | return -EINVAL; | 242 | return ERR_PTR(-EINVAL); |
243 | } | 243 | } |
244 | 244 | ||
245 | static int devfreq_register_opp_notifier(struct device *dev, | 245 | static inline int devfreq_register_opp_notifier(struct device *dev, |
246 | struct devfreq *devfreq) | 246 | struct devfreq *devfreq) |
247 | { | 247 | { |
248 | return -EINVAL; | 248 | return -EINVAL; |
249 | } | 249 | } |
250 | 250 | ||
251 | static int devfreq_unregister_opp_notifier(struct device *dev, | 251 | static inline int devfreq_unregister_opp_notifier(struct device *dev, |
252 | struct devfreq *devfreq) | 252 | struct devfreq *devfreq) |
253 | { | 253 | { |
254 | return -EINVAL; | 254 | return -EINVAL; |
diff --git a/include/linux/edac.h b/include/linux/edac.h index 4fd4999ccb5b..0b763276f619 100644 --- a/include/linux/edac.h +++ b/include/linux/edac.h | |||
@@ -561,7 +561,6 @@ struct csrow_info { | |||
561 | 561 | ||
562 | u32 ue_count; /* Uncorrectable Errors for this csrow */ | 562 | u32 ue_count; /* Uncorrectable Errors for this csrow */ |
563 | u32 ce_count; /* Correctable Errors for this csrow */ | 563 | u32 ce_count; /* Correctable Errors for this csrow */ |
564 | u32 nr_pages; /* combined pages count of all channels */ | ||
565 | 564 | ||
566 | struct mem_ctl_info *mci; /* the parent */ | 565 | struct mem_ctl_info *mci; /* the parent */ |
567 | 566 | ||
@@ -676,11 +675,11 @@ struct mem_ctl_info { | |||
676 | * sees memory sticks ("dimms"), and the ones that sees memory ranks. | 675 | * sees memory sticks ("dimms"), and the ones that sees memory ranks. |
677 | * All old memory controllers enumerate memories per rank, but most | 676 | * All old memory controllers enumerate memories per rank, but most |
678 | * of the recent drivers enumerate memories per DIMM, instead. | 677 | * of the recent drivers enumerate memories per DIMM, instead. |
679 | * When the memory controller is per rank, mem_is_per_rank is true. | 678 | * When the memory controller is per rank, csbased is true. |
680 | */ | 679 | */ |
681 | unsigned n_layers; | 680 | unsigned n_layers; |
682 | struct edac_mc_layer *layers; | 681 | struct edac_mc_layer *layers; |
683 | bool mem_is_per_rank; | 682 | bool csbased; |
684 | 683 | ||
685 | /* | 684 | /* |
686 | * DIMM info. Will eventually remove the entire csrows_info some day | 685 | * DIMM info. Will eventually remove the entire csrows_info some day |
@@ -741,8 +740,6 @@ struct mem_ctl_info { | |||
741 | u32 fake_inject_ue; | 740 | u32 fake_inject_ue; |
742 | u16 fake_inject_count; | 741 | u16 fake_inject_count; |
743 | #endif | 742 | #endif |
744 | __u8 csbased : 1, /* csrow-based memory controller */ | ||
745 | __resv : 7; | ||
746 | }; | 743 | }; |
747 | 744 | ||
748 | #endif | 745 | #endif |
diff --git a/include/linux/freezer.h b/include/linux/freezer.h index 043a5cf8b5ba..e70df40d84f6 100644 --- a/include/linux/freezer.h +++ b/include/linux/freezer.h | |||
@@ -3,7 +3,6 @@ | |||
3 | #ifndef FREEZER_H_INCLUDED | 3 | #ifndef FREEZER_H_INCLUDED |
4 | #define FREEZER_H_INCLUDED | 4 | #define FREEZER_H_INCLUDED |
5 | 5 | ||
6 | #include <linux/debug_locks.h> | ||
7 | #include <linux/sched.h> | 6 | #include <linux/sched.h> |
8 | #include <linux/wait.h> | 7 | #include <linux/wait.h> |
9 | #include <linux/atomic.h> | 8 | #include <linux/atomic.h> |
@@ -49,8 +48,6 @@ extern void thaw_kernel_threads(void); | |||
49 | 48 | ||
50 | static inline bool try_to_freeze(void) | 49 | static inline bool try_to_freeze(void) |
51 | { | 50 | { |
52 | if (!(current->flags & PF_NOFREEZE)) | ||
53 | debug_check_no_locks_held(); | ||
54 | might_sleep(); | 51 | might_sleep(); |
55 | if (likely(!freezing(current))) | 52 | if (likely(!freezing(current))) |
56 | return false; | 53 | return false; |
diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h index 729eded4b24f..2b93a9a5a1e6 100644 --- a/include/linux/fs_struct.h +++ b/include/linux/fs_struct.h | |||
@@ -50,4 +50,6 @@ static inline void get_fs_root_and_pwd(struct fs_struct *fs, struct path *root, | |||
50 | spin_unlock(&fs->lock); | 50 | spin_unlock(&fs->lock); |
51 | } | 51 | } |
52 | 52 | ||
53 | extern bool current_chrooted(void); | ||
54 | |||
53 | #endif /* _LINUX_FS_STRUCT_H */ | 55 | #endif /* _LINUX_FS_STRUCT_H */ |
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index e5ca8ef50e9b..167abf907802 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h | |||
@@ -89,6 +89,7 @@ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip, | |||
89 | * that the call back has its own recursion protection. If it does | 89 | * that the call back has its own recursion protection. If it does |
90 | * not set this, then the ftrace infrastructure will add recursion | 90 | * not set this, then the ftrace infrastructure will add recursion |
91 | * protection for the caller. | 91 | * protection for the caller. |
92 | * STUB - The ftrace_ops is just a place holder. | ||
92 | */ | 93 | */ |
93 | enum { | 94 | enum { |
94 | FTRACE_OPS_FL_ENABLED = 1 << 0, | 95 | FTRACE_OPS_FL_ENABLED = 1 << 0, |
@@ -98,6 +99,7 @@ enum { | |||
98 | FTRACE_OPS_FL_SAVE_REGS = 1 << 4, | 99 | FTRACE_OPS_FL_SAVE_REGS = 1 << 4, |
99 | FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 5, | 100 | FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 5, |
100 | FTRACE_OPS_FL_RECURSION_SAFE = 1 << 6, | 101 | FTRACE_OPS_FL_RECURSION_SAFE = 1 << 6, |
102 | FTRACE_OPS_FL_STUB = 1 << 7, | ||
101 | }; | 103 | }; |
102 | 104 | ||
103 | struct ftrace_ops { | 105 | struct ftrace_ops { |
diff --git a/include/linux/hash.h b/include/linux/hash.h index 61c97ae22e01..f09a0ae4d858 100644 --- a/include/linux/hash.h +++ b/include/linux/hash.h | |||
@@ -15,6 +15,7 @@ | |||
15 | */ | 15 | */ |
16 | 16 | ||
17 | #include <asm/types.h> | 17 | #include <asm/types.h> |
18 | #include <linux/compiler.h> | ||
18 | 19 | ||
19 | /* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */ | 20 | /* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */ |
20 | #define GOLDEN_RATIO_PRIME_32 0x9e370001UL | 21 | #define GOLDEN_RATIO_PRIME_32 0x9e370001UL |
@@ -31,7 +32,7 @@ | |||
31 | #error Wordsize not 32 or 64 | 32 | #error Wordsize not 32 or 64 |
32 | #endif | 33 | #endif |
33 | 34 | ||
34 | static inline u64 hash_64(u64 val, unsigned int bits) | 35 | static __always_inline u64 hash_64(u64 val, unsigned int bits) |
35 | { | 36 | { |
36 | u64 hash = val; | 37 | u64 hash = val; |
37 | 38 | ||
diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h index f5dbce50466e..66017028dcb3 100644 --- a/include/linux/irq_work.h +++ b/include/linux/irq_work.h | |||
@@ -37,7 +37,7 @@ void irq_work_sync(struct irq_work *work); | |||
37 | #ifdef CONFIG_IRQ_WORK | 37 | #ifdef CONFIG_IRQ_WORK |
38 | bool irq_work_needs_cpu(void); | 38 | bool irq_work_needs_cpu(void); |
39 | #else | 39 | #else |
40 | static bool irq_work_needs_cpu(void) { return false; } | 40 | static inline bool irq_work_needs_cpu(void) { return false; } |
41 | #endif | 41 | #endif |
42 | 42 | ||
43 | #endif /* _LINUX_IRQ_WORK_H */ | 43 | #endif /* _LINUX_IRQ_WORK_H */ |
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 80d36874689b..79fdd80a42d4 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h | |||
@@ -390,7 +390,6 @@ extern struct pid *session_of_pgrp(struct pid *pgrp); | |||
390 | unsigned long int_sqrt(unsigned long); | 390 | unsigned long int_sqrt(unsigned long); |
391 | 391 | ||
392 | extern void bust_spinlocks(int yes); | 392 | extern void bust_spinlocks(int yes); |
393 | extern void wake_up_klogd(void); | ||
394 | extern int oops_in_progress; /* If set, an oops, panic(), BUG() or die() is in progress */ | 393 | extern int oops_in_progress; /* If set, an oops, panic(), BUG() or die() is in progress */ |
395 | extern int panic_timeout; | 394 | extern int panic_timeout; |
396 | extern int panic_on_oops; | 395 | extern int panic_on_oops; |
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index cad77fe09d77..c13958251927 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
@@ -518,7 +518,7 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, | |||
518 | int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, | 518 | int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, |
519 | void *data, unsigned long len); | 519 | void *data, unsigned long len); |
520 | int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, | 520 | int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, |
521 | gpa_t gpa); | 521 | gpa_t gpa, unsigned long len); |
522 | int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); | 522 | int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); |
523 | int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); | 523 | int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); |
524 | struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); | 524 | struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); |
diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h index fa7cc7244cbd..b0bcce0ddc95 100644 --- a/include/linux/kvm_types.h +++ b/include/linux/kvm_types.h | |||
@@ -71,6 +71,7 @@ struct gfn_to_hva_cache { | |||
71 | u64 generation; | 71 | u64 generation; |
72 | gpa_t gpa; | 72 | gpa_t gpa; |
73 | unsigned long hva; | 73 | unsigned long hva; |
74 | unsigned long len; | ||
74 | struct kvm_memory_slot *memslot; | 75 | struct kvm_memory_slot *memslot; |
75 | }; | 76 | }; |
76 | 77 | ||
diff --git a/include/linux/libata.h b/include/linux/libata.h index 91c9d109e5f1..eae7a053dc51 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h | |||
@@ -398,6 +398,7 @@ enum { | |||
398 | ATA_HORKAGE_NOSETXFER = (1 << 14), /* skip SETXFER, SATA only */ | 398 | ATA_HORKAGE_NOSETXFER = (1 << 14), /* skip SETXFER, SATA only */ |
399 | ATA_HORKAGE_BROKEN_FPDMA_AA = (1 << 15), /* skip AA */ | 399 | ATA_HORKAGE_BROKEN_FPDMA_AA = (1 << 15), /* skip AA */ |
400 | ATA_HORKAGE_DUMP_ID = (1 << 16), /* dump IDENTIFY data */ | 400 | ATA_HORKAGE_DUMP_ID = (1 << 16), /* dump IDENTIFY data */ |
401 | ATA_HORKAGE_MAX_SEC_LBA48 = (1 << 17), /* Set max sects to 65535 */ | ||
401 | 402 | ||
402 | /* DMA mask for user DMA control: User visible values; DO NOT | 403 | /* DMA mask for user DMA control: User visible values; DO NOT |
403 | renumber */ | 404 | renumber */ |
diff --git a/include/linux/mfd/max77693-private.h b/include/linux/mfd/max77693-private.h index 5b18ecde69b5..1aa4f13cdfa6 100644 --- a/include/linux/mfd/max77693-private.h +++ b/include/linux/mfd/max77693-private.h | |||
@@ -106,6 +106,29 @@ enum max77693_muic_reg { | |||
106 | MAX77693_MUIC_REG_END, | 106 | MAX77693_MUIC_REG_END, |
107 | }; | 107 | }; |
108 | 108 | ||
109 | /* MAX77693 INTMASK1~2 Register */ | ||
110 | #define INTMASK1_ADC1K_SHIFT 3 | ||
111 | #define INTMASK1_ADCERR_SHIFT 2 | ||
112 | #define INTMASK1_ADCLOW_SHIFT 1 | ||
113 | #define INTMASK1_ADC_SHIFT 0 | ||
114 | #define INTMASK1_ADC1K_MASK (1 << INTMASK1_ADC1K_SHIFT) | ||
115 | #define INTMASK1_ADCERR_MASK (1 << INTMASK1_ADCERR_SHIFT) | ||
116 | #define INTMASK1_ADCLOW_MASK (1 << INTMASK1_ADCLOW_SHIFT) | ||
117 | #define INTMASK1_ADC_MASK (1 << INTMASK1_ADC_SHIFT) | ||
118 | |||
119 | #define INTMASK2_VIDRM_SHIFT 5 | ||
120 | #define INTMASK2_VBVOLT_SHIFT 4 | ||
121 | #define INTMASK2_DXOVP_SHIFT 3 | ||
122 | #define INTMASK2_DCDTMR_SHIFT 2 | ||
123 | #define INTMASK2_CHGDETRUN_SHIFT 1 | ||
124 | #define INTMASK2_CHGTYP_SHIFT 0 | ||
125 | #define INTMASK2_VIDRM_MASK (1 << INTMASK2_VIDRM_SHIFT) | ||
126 | #define INTMASK2_VBVOLT_MASK (1 << INTMASK2_VBVOLT_SHIFT) | ||
127 | #define INTMASK2_DXOVP_MASK (1 << INTMASK2_DXOVP_SHIFT) | ||
128 | #define INTMASK2_DCDTMR_MASK (1 << INTMASK2_DCDTMR_SHIFT) | ||
129 | #define INTMASK2_CHGDETRUN_MASK (1 << INTMASK2_CHGDETRUN_SHIFT) | ||
130 | #define INTMASK2_CHGTYP_MASK (1 << INTMASK2_CHGTYP_SHIFT) | ||
131 | |||
109 | /* MAX77693 MUIC - STATUS1~3 Register */ | 132 | /* MAX77693 MUIC - STATUS1~3 Register */ |
110 | #define STATUS1_ADC_SHIFT (0) | 133 | #define STATUS1_ADC_SHIFT (0) |
111 | #define STATUS1_ADCLOW_SHIFT (5) | 134 | #define STATUS1_ADCLOW_SHIFT (5) |
diff --git a/include/linux/mm.h b/include/linux/mm.h index 7acc9dc73c9f..e19ff30ad0a2 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -87,7 +87,6 @@ extern unsigned int kobjsize(const void *objp); | |||
87 | #define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */ | 87 | #define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */ |
88 | #define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */ | 88 | #define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */ |
89 | 89 | ||
90 | #define VM_POPULATE 0x00001000 | ||
91 | #define VM_LOCKED 0x00002000 | 90 | #define VM_LOCKED 0x00002000 |
92 | #define VM_IO 0x00004000 /* Memory mapped I/O or similar */ | 91 | #define VM_IO 0x00004000 /* Memory mapped I/O or similar */ |
93 | 92 | ||
diff --git a/include/linux/mman.h b/include/linux/mman.h index 61c7a87e5d2b..9aa863da287f 100644 --- a/include/linux/mman.h +++ b/include/linux/mman.h | |||
@@ -79,8 +79,6 @@ calc_vm_flag_bits(unsigned long flags) | |||
79 | { | 79 | { |
80 | return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) | | 80 | return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) | |
81 | _calc_vm_trans(flags, MAP_DENYWRITE, VM_DENYWRITE ) | | 81 | _calc_vm_trans(flags, MAP_DENYWRITE, VM_DENYWRITE ) | |
82 | ((flags & MAP_LOCKED) ? (VM_LOCKED | VM_POPULATE) : 0) | | 82 | _calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED ); |
83 | (((flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE) ? | ||
84 | VM_POPULATE : 0); | ||
85 | } | 83 | } |
86 | #endif /* _LINUX_MMAN_H */ | 84 | #endif /* _LINUX_MMAN_H */ |
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index ede274957e05..c74092eebf5c 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
@@ -527,7 +527,7 @@ static inline int zone_is_oom_locked(const struct zone *zone) | |||
527 | return test_bit(ZONE_OOM_LOCKED, &zone->flags); | 527 | return test_bit(ZONE_OOM_LOCKED, &zone->flags); |
528 | } | 528 | } |
529 | 529 | ||
530 | static inline unsigned zone_end_pfn(const struct zone *zone) | 530 | static inline unsigned long zone_end_pfn(const struct zone *zone) |
531 | { | 531 | { |
532 | return zone->zone_start_pfn + zone->spanned_pages; | 532 | return zone->zone_start_pfn + zone->spanned_pages; |
533 | } | 533 | } |
diff --git a/include/linux/mount.h b/include/linux/mount.h index d7029f4a191a..73005f9957ea 100644 --- a/include/linux/mount.h +++ b/include/linux/mount.h | |||
@@ -47,6 +47,8 @@ struct mnt_namespace; | |||
47 | 47 | ||
48 | #define MNT_INTERNAL 0x4000 | 48 | #define MNT_INTERNAL 0x4000 |
49 | 49 | ||
50 | #define MNT_LOCK_READONLY 0x400000 | ||
51 | |||
50 | struct vfsmount { | 52 | struct vfsmount { |
51 | struct dentry *mnt_root; /* root of the mounted tree */ | 53 | struct dentry *mnt_root; /* root of the mounted tree */ |
52 | struct super_block *mnt_sb; /* pointer to superblock */ | 54 | struct super_block *mnt_sb; /* pointer to superblock */ |
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 7ccb3c59ed60..ef52d9c91459 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h | |||
@@ -187,6 +187,13 @@ typedef enum { | |||
187 | * This happens with the Renesas AG-AND chips, possibly others. | 187 | * This happens with the Renesas AG-AND chips, possibly others. |
188 | */ | 188 | */ |
189 | #define BBT_AUTO_REFRESH 0x00000080 | 189 | #define BBT_AUTO_REFRESH 0x00000080 |
190 | /* | ||
191 | * Chip requires ready check on read (for auto-incremented sequential read). | ||
192 | * True only for small page devices; large page devices do not support | ||
193 | * autoincrement. | ||
194 | */ | ||
195 | #define NAND_NEED_READRDY 0x00000100 | ||
196 | |||
190 | /* Chip does not allow subpage writes */ | 197 | /* Chip does not allow subpage writes */ |
191 | #define NAND_NO_SUBPAGE_WRITE 0x00000200 | 198 | #define NAND_NO_SUBPAGE_WRITE 0x00000200 |
192 | 199 | ||
diff --git a/include/linux/mxsfb.h b/include/linux/mxsfb.h index f14943d55315..f80af8674342 100644 --- a/include/linux/mxsfb.h +++ b/include/linux/mxsfb.h | |||
@@ -24,8 +24,8 @@ | |||
24 | #define STMLCDIF_18BIT 2 /** pixel data bus to the display is of 18 bit width */ | 24 | #define STMLCDIF_18BIT 2 /** pixel data bus to the display is of 18 bit width */ |
25 | #define STMLCDIF_24BIT 3 /** pixel data bus to the display is of 24 bit width */ | 25 | #define STMLCDIF_24BIT 3 /** pixel data bus to the display is of 24 bit width */ |
26 | 26 | ||
27 | #define FB_SYNC_DATA_ENABLE_HIGH_ACT (1 << 6) | 27 | #define MXSFB_SYNC_DATA_ENABLE_HIGH_ACT (1 << 6) |
28 | #define FB_SYNC_DOTCLK_FAILING_ACT (1 << 7) /* failing/negtive edge sampling */ | 28 | #define MXSFB_SYNC_DOTCLK_FAILING_ACT (1 << 7) /* failing/negtive edge sampling */ |
29 | 29 | ||
30 | struct mxsfb_platform_data { | 30 | struct mxsfb_platform_data { |
31 | struct fb_videomode *mode_list; | 31 | struct fb_videomode *mode_list; |
@@ -44,6 +44,9 @@ struct mxsfb_platform_data { | |||
44 | * allocated. If specified,fb_size must also be specified. | 44 | * allocated. If specified,fb_size must also be specified. |
45 | * fb_phys must be unused by Linux. | 45 | * fb_phys must be unused by Linux. |
46 | */ | 46 | */ |
47 | u32 sync; /* sync mask, contains MXSFB specifics not | ||
48 | * carried in fb_info->var.sync | ||
49 | */ | ||
47 | }; | 50 | }; |
48 | 51 | ||
49 | #endif /* __LINUX_MXSFB_H */ | 52 | #endif /* __LINUX_MXSFB_H */ |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index b3d00fa4b314..6151e903eef0 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -210,9 +210,9 @@ struct netdev_hw_addr { | |||
210 | #define NETDEV_HW_ADDR_T_SLAVE 3 | 210 | #define NETDEV_HW_ADDR_T_SLAVE 3 |
211 | #define NETDEV_HW_ADDR_T_UNICAST 4 | 211 | #define NETDEV_HW_ADDR_T_UNICAST 4 |
212 | #define NETDEV_HW_ADDR_T_MULTICAST 5 | 212 | #define NETDEV_HW_ADDR_T_MULTICAST 5 |
213 | bool synced; | ||
214 | bool global_use; | 213 | bool global_use; |
215 | int refcount; | 214 | int refcount; |
215 | int synced; | ||
216 | struct rcu_head rcu_head; | 216 | struct rcu_head rcu_head; |
217 | }; | 217 | }; |
218 | 218 | ||
@@ -895,7 +895,7 @@ struct netdev_fcoe_hbainfo { | |||
895 | * | 895 | * |
896 | * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh) | 896 | * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh) |
897 | * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq, | 897 | * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq, |
898 | * struct net_device *dev) | 898 | * struct net_device *dev, u32 filter_mask) |
899 | * | 899 | * |
900 | * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier); | 900 | * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier); |
901 | * Called to change device carrier. Soft-devices (like dummy, team, etc) | 901 | * Called to change device carrier. Soft-devices (like dummy, team, etc) |
diff --git a/include/linux/nvme.h b/include/linux/nvme.h index c25cccaa555a..4fa3b0b9b071 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h | |||
@@ -137,6 +137,34 @@ enum { | |||
137 | NVME_LBAF_RP_DEGRADED = 3, | 137 | NVME_LBAF_RP_DEGRADED = 3, |
138 | }; | 138 | }; |
139 | 139 | ||
140 | struct nvme_smart_log { | ||
141 | __u8 critical_warning; | ||
142 | __u8 temperature[2]; | ||
143 | __u8 avail_spare; | ||
144 | __u8 spare_thresh; | ||
145 | __u8 percent_used; | ||
146 | __u8 rsvd6[26]; | ||
147 | __u8 data_units_read[16]; | ||
148 | __u8 data_units_written[16]; | ||
149 | __u8 host_reads[16]; | ||
150 | __u8 host_writes[16]; | ||
151 | __u8 ctrl_busy_time[16]; | ||
152 | __u8 power_cycles[16]; | ||
153 | __u8 power_on_hours[16]; | ||
154 | __u8 unsafe_shutdowns[16]; | ||
155 | __u8 media_errors[16]; | ||
156 | __u8 num_err_log_entries[16]; | ||
157 | __u8 rsvd192[320]; | ||
158 | }; | ||
159 | |||
160 | enum { | ||
161 | NVME_SMART_CRIT_SPARE = 1 << 0, | ||
162 | NVME_SMART_CRIT_TEMPERATURE = 1 << 1, | ||
163 | NVME_SMART_CRIT_RELIABILITY = 1 << 2, | ||
164 | NVME_SMART_CRIT_MEDIA = 1 << 3, | ||
165 | NVME_SMART_CRIT_VOLATILE_MEMORY = 1 << 4, | ||
166 | }; | ||
167 | |||
140 | struct nvme_lba_range_type { | 168 | struct nvme_lba_range_type { |
141 | __u8 type; | 169 | __u8 type; |
142 | __u8 attributes; | 170 | __u8 attributes; |
diff --git a/include/linux/pci.h b/include/linux/pci.h index 2461033a7987..710067f3618c 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h | |||
@@ -916,6 +916,7 @@ void pci_disable_rom(struct pci_dev *pdev); | |||
916 | void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size); | 916 | void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size); |
917 | void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom); | 917 | void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom); |
918 | size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size); | 918 | size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size); |
919 | void __iomem __must_check *pci_platform_rom(struct pci_dev *pdev, size_t *size); | ||
919 | 920 | ||
920 | /* Power management related routines */ | 921 | /* Power management related routines */ |
921 | int pci_save_state(struct pci_dev *dev); | 922 | int pci_save_state(struct pci_dev *dev); |
diff --git a/include/linux/preempt.h b/include/linux/preempt.h index 5a710b9c578e..87a03c746f17 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h | |||
@@ -93,14 +93,20 @@ do { \ | |||
93 | 93 | ||
94 | #else /* !CONFIG_PREEMPT_COUNT */ | 94 | #else /* !CONFIG_PREEMPT_COUNT */ |
95 | 95 | ||
96 | #define preempt_disable() do { } while (0) | 96 | /* |
97 | #define sched_preempt_enable_no_resched() do { } while (0) | 97 | * Even if we don't have any preemption, we need preempt disable/enable |
98 | #define preempt_enable_no_resched() do { } while (0) | 98 | * to be barriers, so that we don't have things like get_user/put_user |
99 | #define preempt_enable() do { } while (0) | 99 | * that can cause faults and scheduling migrate into our preempt-protected |
100 | 100 | * region. | |
101 | #define preempt_disable_notrace() do { } while (0) | 101 | */ |
102 | #define preempt_enable_no_resched_notrace() do { } while (0) | 102 | #define preempt_disable() barrier() |
103 | #define preempt_enable_notrace() do { } while (0) | 103 | #define sched_preempt_enable_no_resched() barrier() |
104 | #define preempt_enable_no_resched() barrier() | ||
105 | #define preempt_enable() barrier() | ||
106 | |||
107 | #define preempt_disable_notrace() barrier() | ||
108 | #define preempt_enable_no_resched_notrace() barrier() | ||
109 | #define preempt_enable_notrace() barrier() | ||
104 | 110 | ||
105 | #endif /* CONFIG_PREEMPT_COUNT */ | 111 | #endif /* CONFIG_PREEMPT_COUNT */ |
106 | 112 | ||
diff --git a/include/linux/printk.h b/include/linux/printk.h index 1249a54d17e0..822171fcb1c8 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h | |||
@@ -134,6 +134,8 @@ extern int printk_delay_msec; | |||
134 | extern int dmesg_restrict; | 134 | extern int dmesg_restrict; |
135 | extern int kptr_restrict; | 135 | extern int kptr_restrict; |
136 | 136 | ||
137 | extern void wake_up_klogd(void); | ||
138 | |||
137 | void log_buf_kexec_setup(void); | 139 | void log_buf_kexec_setup(void); |
138 | void __init setup_log_buf(int early); | 140 | void __init setup_log_buf(int early); |
139 | #else | 141 | #else |
@@ -162,6 +164,10 @@ static inline bool printk_timed_ratelimit(unsigned long *caller_jiffies, | |||
162 | return false; | 164 | return false; |
163 | } | 165 | } |
164 | 166 | ||
167 | static inline void wake_up_klogd(void) | ||
168 | { | ||
169 | } | ||
170 | |||
165 | static inline void log_buf_kexec_setup(void) | 171 | static inline void log_buf_kexec_setup(void) |
166 | { | 172 | { |
167 | } | 173 | } |
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index 8307f2f94d86..94dfb2aa5533 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h | |||
@@ -117,6 +117,7 @@ struct proc_dir_entry *proc_create_data(const char *name, umode_t mode, | |||
117 | const struct file_operations *proc_fops, | 117 | const struct file_operations *proc_fops, |
118 | void *data); | 118 | void *data); |
119 | extern void remove_proc_entry(const char *name, struct proc_dir_entry *parent); | 119 | extern void remove_proc_entry(const char *name, struct proc_dir_entry *parent); |
120 | extern int remove_proc_subtree(const char *name, struct proc_dir_entry *parent); | ||
120 | 121 | ||
121 | struct pid_namespace; | 122 | struct pid_namespace; |
122 | 123 | ||
@@ -202,6 +203,7 @@ static inline struct proc_dir_entry *proc_create_data(const char *name, | |||
202 | return NULL; | 203 | return NULL; |
203 | } | 204 | } |
204 | #define remove_proc_entry(name, parent) do {} while (0) | 205 | #define remove_proc_entry(name, parent) do {} while (0) |
206 | #define remove_proc_subtree(name, parent) do {} while (0) | ||
205 | 207 | ||
206 | static inline struct proc_dir_entry *proc_symlink(const char *name, | 208 | static inline struct proc_dir_entry *proc_symlink(const char *name, |
207 | struct proc_dir_entry *parent,const char *dest) {return NULL;} | 209 | struct proc_dir_entry *parent,const char *dest) {return NULL;} |
diff --git a/include/linux/security.h b/include/linux/security.h index eee7478cda70..032c366ef1c6 100644 --- a/include/linux/security.h +++ b/include/linux/security.h | |||
@@ -1012,6 +1012,10 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) | |||
1012 | * This hook can be used by the module to update any security state | 1012 | * This hook can be used by the module to update any security state |
1013 | * associated with the TUN device's security structure. | 1013 | * associated with the TUN device's security structure. |
1014 | * @security pointer to the TUN devices's security structure. | 1014 | * @security pointer to the TUN devices's security structure. |
1015 | * @skb_owned_by: | ||
1016 | * This hook sets the packet's owning sock. | ||
1017 | * @skb is the packet. | ||
1018 | * @sk the sock which owns the packet. | ||
1015 | * | 1019 | * |
1016 | * Security hooks for XFRM operations. | 1020 | * Security hooks for XFRM operations. |
1017 | * | 1021 | * |
@@ -1638,6 +1642,7 @@ struct security_operations { | |||
1638 | int (*tun_dev_attach_queue) (void *security); | 1642 | int (*tun_dev_attach_queue) (void *security); |
1639 | int (*tun_dev_attach) (struct sock *sk, void *security); | 1643 | int (*tun_dev_attach) (struct sock *sk, void *security); |
1640 | int (*tun_dev_open) (void *security); | 1644 | int (*tun_dev_open) (void *security); |
1645 | void (*skb_owned_by) (struct sk_buff *skb, struct sock *sk); | ||
1641 | #endif /* CONFIG_SECURITY_NETWORK */ | 1646 | #endif /* CONFIG_SECURITY_NETWORK */ |
1642 | 1647 | ||
1643 | #ifdef CONFIG_SECURITY_NETWORK_XFRM | 1648 | #ifdef CONFIG_SECURITY_NETWORK_XFRM |
@@ -2588,6 +2593,8 @@ int security_tun_dev_attach_queue(void *security); | |||
2588 | int security_tun_dev_attach(struct sock *sk, void *security); | 2593 | int security_tun_dev_attach(struct sock *sk, void *security); |
2589 | int security_tun_dev_open(void *security); | 2594 | int security_tun_dev_open(void *security); |
2590 | 2595 | ||
2596 | void security_skb_owned_by(struct sk_buff *skb, struct sock *sk); | ||
2597 | |||
2591 | #else /* CONFIG_SECURITY_NETWORK */ | 2598 | #else /* CONFIG_SECURITY_NETWORK */ |
2592 | static inline int security_unix_stream_connect(struct sock *sock, | 2599 | static inline int security_unix_stream_connect(struct sock *sock, |
2593 | struct sock *other, | 2600 | struct sock *other, |
@@ -2779,6 +2786,11 @@ static inline int security_tun_dev_open(void *security) | |||
2779 | { | 2786 | { |
2780 | return 0; | 2787 | return 0; |
2781 | } | 2788 | } |
2789 | |||
2790 | static inline void security_skb_owned_by(struct sk_buff *skb, struct sock *sk) | ||
2791 | { | ||
2792 | } | ||
2793 | |||
2782 | #endif /* CONFIG_SECURITY_NETWORK */ | 2794 | #endif /* CONFIG_SECURITY_NETWORK */ |
2783 | 2795 | ||
2784 | #ifdef CONFIG_SECURITY_NETWORK_XFRM | 2796 | #ifdef CONFIG_SECURITY_NETWORK_XFRM |
diff --git a/include/linux/signal.h b/include/linux/signal.h index a2dcb94ea49d..9475c5cb28bc 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h | |||
@@ -250,11 +250,11 @@ extern int show_unhandled_signals; | |||
250 | extern int sigsuspend(sigset_t *); | 250 | extern int sigsuspend(sigset_t *); |
251 | 251 | ||
252 | struct sigaction { | 252 | struct sigaction { |
253 | #ifndef __ARCH_HAS_ODD_SIGACTION | 253 | #ifndef __ARCH_HAS_IRIX_SIGACTION |
254 | __sighandler_t sa_handler; | 254 | __sighandler_t sa_handler; |
255 | unsigned long sa_flags; | 255 | unsigned long sa_flags; |
256 | #else | 256 | #else |
257 | unsigned long sa_flags; | 257 | unsigned int sa_flags; |
258 | __sighandler_t sa_handler; | 258 | __sighandler_t sa_handler; |
259 | #endif | 259 | #endif |
260 | #ifdef __ARCH_HAS_SA_RESTORER | 260 | #ifdef __ARCH_HAS_SA_RESTORER |
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 821c7f45d2a7..b8292d8cc9fa 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
@@ -500,7 +500,7 @@ struct sk_buff { | |||
500 | union { | 500 | union { |
501 | __u32 mark; | 501 | __u32 mark; |
502 | __u32 dropcount; | 502 | __u32 dropcount; |
503 | __u32 avail_size; | 503 | __u32 reserved_tailroom; |
504 | }; | 504 | }; |
505 | 505 | ||
506 | sk_buff_data_t inner_transport_header; | 506 | sk_buff_data_t inner_transport_header; |
@@ -1288,11 +1288,13 @@ static inline void __skb_fill_page_desc(struct sk_buff *skb, int i, | |||
1288 | * do not lose pfmemalloc information as the pages would not be | 1288 | * do not lose pfmemalloc information as the pages would not be |
1289 | * allocated using __GFP_MEMALLOC. | 1289 | * allocated using __GFP_MEMALLOC. |
1290 | */ | 1290 | */ |
1291 | if (page->pfmemalloc && !page->mapping) | ||
1292 | skb->pfmemalloc = true; | ||
1293 | frag->page.p = page; | 1291 | frag->page.p = page; |
1294 | frag->page_offset = off; | 1292 | frag->page_offset = off; |
1295 | skb_frag_size_set(frag, size); | 1293 | skb_frag_size_set(frag, size); |
1294 | |||
1295 | page = compound_head(page); | ||
1296 | if (page->pfmemalloc && !page->mapping) | ||
1297 | skb->pfmemalloc = true; | ||
1296 | } | 1298 | } |
1297 | 1299 | ||
1298 | /** | 1300 | /** |
@@ -1447,7 +1449,10 @@ static inline int skb_tailroom(const struct sk_buff *skb) | |||
1447 | */ | 1449 | */ |
1448 | static inline int skb_availroom(const struct sk_buff *skb) | 1450 | static inline int skb_availroom(const struct sk_buff *skb) |
1449 | { | 1451 | { |
1450 | return skb_is_nonlinear(skb) ? 0 : skb->avail_size - skb->len; | 1452 | if (skb_is_nonlinear(skb)) |
1453 | return 0; | ||
1454 | |||
1455 | return skb->end - skb->tail - skb->reserved_tailroom; | ||
1451 | } | 1456 | } |
1452 | 1457 | ||
1453 | /** | 1458 | /** |
@@ -2638,6 +2643,13 @@ static inline void nf_reset(struct sk_buff *skb) | |||
2638 | #endif | 2643 | #endif |
2639 | } | 2644 | } |
2640 | 2645 | ||
2646 | static inline void nf_reset_trace(struct sk_buff *skb) | ||
2647 | { | ||
2648 | #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) | ||
2649 | skb->nf_trace = 0; | ||
2650 | #endif | ||
2651 | } | ||
2652 | |||
2641 | /* Note: This doesn't put any conntrack and bridge info in dst. */ | 2653 | /* Note: This doesn't put any conntrack and bridge info in dst. */ |
2642 | static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src) | 2654 | static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src) |
2643 | { | 2655 | { |
diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h index a26e2fb604e6..e2369c167dbd 100644 --- a/include/linux/spinlock_up.h +++ b/include/linux/spinlock_up.h | |||
@@ -16,7 +16,10 @@ | |||
16 | * In the debug case, 1 means unlocked, 0 means locked. (the values | 16 | * In the debug case, 1 means unlocked, 0 means locked. (the values |
17 | * are inverted, to catch initialization bugs) | 17 | * are inverted, to catch initialization bugs) |
18 | * | 18 | * |
19 | * No atomicity anywhere, we are on UP. | 19 | * No atomicity anywhere, we are on UP. However, we still need |
20 | * the compiler barriers, because we do not want the compiler to | ||
21 | * move potentially faulting instructions (notably user accesses) | ||
22 | * into the locked sequence, resulting in non-atomic execution. | ||
20 | */ | 23 | */ |
21 | 24 | ||
22 | #ifdef CONFIG_DEBUG_SPINLOCK | 25 | #ifdef CONFIG_DEBUG_SPINLOCK |
@@ -25,6 +28,7 @@ | |||
25 | static inline void arch_spin_lock(arch_spinlock_t *lock) | 28 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
26 | { | 29 | { |
27 | lock->slock = 0; | 30 | lock->slock = 0; |
31 | barrier(); | ||
28 | } | 32 | } |
29 | 33 | ||
30 | static inline void | 34 | static inline void |
@@ -32,6 +36,7 @@ arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) | |||
32 | { | 36 | { |
33 | local_irq_save(flags); | 37 | local_irq_save(flags); |
34 | lock->slock = 0; | 38 | lock->slock = 0; |
39 | barrier(); | ||
35 | } | 40 | } |
36 | 41 | ||
37 | static inline int arch_spin_trylock(arch_spinlock_t *lock) | 42 | static inline int arch_spin_trylock(arch_spinlock_t *lock) |
@@ -39,32 +44,34 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) | |||
39 | char oldval = lock->slock; | 44 | char oldval = lock->slock; |
40 | 45 | ||
41 | lock->slock = 0; | 46 | lock->slock = 0; |
47 | barrier(); | ||
42 | 48 | ||
43 | return oldval > 0; | 49 | return oldval > 0; |
44 | } | 50 | } |
45 | 51 | ||
46 | static inline void arch_spin_unlock(arch_spinlock_t *lock) | 52 | static inline void arch_spin_unlock(arch_spinlock_t *lock) |
47 | { | 53 | { |
54 | barrier(); | ||
48 | lock->slock = 1; | 55 | lock->slock = 1; |
49 | } | 56 | } |
50 | 57 | ||
51 | /* | 58 | /* |
52 | * Read-write spinlocks. No debug version. | 59 | * Read-write spinlocks. No debug version. |
53 | */ | 60 | */ |
54 | #define arch_read_lock(lock) do { (void)(lock); } while (0) | 61 | #define arch_read_lock(lock) do { barrier(); (void)(lock); } while (0) |
55 | #define arch_write_lock(lock) do { (void)(lock); } while (0) | 62 | #define arch_write_lock(lock) do { barrier(); (void)(lock); } while (0) |
56 | #define arch_read_trylock(lock) ({ (void)(lock); 1; }) | 63 | #define arch_read_trylock(lock) ({ barrier(); (void)(lock); 1; }) |
57 | #define arch_write_trylock(lock) ({ (void)(lock); 1; }) | 64 | #define arch_write_trylock(lock) ({ barrier(); (void)(lock); 1; }) |
58 | #define arch_read_unlock(lock) do { (void)(lock); } while (0) | 65 | #define arch_read_unlock(lock) do { barrier(); (void)(lock); } while (0) |
59 | #define arch_write_unlock(lock) do { (void)(lock); } while (0) | 66 | #define arch_write_unlock(lock) do { barrier(); (void)(lock); } while (0) |
60 | 67 | ||
61 | #else /* DEBUG_SPINLOCK */ | 68 | #else /* DEBUG_SPINLOCK */ |
62 | #define arch_spin_is_locked(lock) ((void)(lock), 0) | 69 | #define arch_spin_is_locked(lock) ((void)(lock), 0) |
63 | /* for sched.c and kernel_lock.c: */ | 70 | /* for sched.c and kernel_lock.c: */ |
64 | # define arch_spin_lock(lock) do { (void)(lock); } while (0) | 71 | # define arch_spin_lock(lock) do { barrier(); (void)(lock); } while (0) |
65 | # define arch_spin_lock_flags(lock, flags) do { (void)(lock); } while (0) | 72 | # define arch_spin_lock_flags(lock, flags) do { barrier(); (void)(lock); } while (0) |
66 | # define arch_spin_unlock(lock) do { (void)(lock); } while (0) | 73 | # define arch_spin_unlock(lock) do { barrier(); (void)(lock); } while (0) |
67 | # define arch_spin_trylock(lock) ({ (void)(lock); 1; }) | 74 | # define arch_spin_trylock(lock) ({ barrier(); (void)(lock); 1; }) |
68 | #endif /* DEBUG_SPINLOCK */ | 75 | #endif /* DEBUG_SPINLOCK */ |
69 | 76 | ||
70 | #define arch_spin_is_contended(lock) (((void)(lock), 0)) | 77 | #define arch_spin_is_contended(lock) (((void)(lock), 0)) |
diff --git a/include/linux/thermal.h b/include/linux/thermal.h index f0bd7f90a90d..e3c0ae9bb1fa 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h | |||
@@ -44,7 +44,7 @@ | |||
44 | /* Adding event notification support elements */ | 44 | /* Adding event notification support elements */ |
45 | #define THERMAL_GENL_FAMILY_NAME "thermal_event" | 45 | #define THERMAL_GENL_FAMILY_NAME "thermal_event" |
46 | #define THERMAL_GENL_VERSION 0x01 | 46 | #define THERMAL_GENL_VERSION 0x01 |
47 | #define THERMAL_GENL_MCAST_GROUP_NAME "thermal_mc_group" | 47 | #define THERMAL_GENL_MCAST_GROUP_NAME "thermal_mc_grp" |
48 | 48 | ||
49 | /* Default Thermal Governor */ | 49 | /* Default Thermal Governor */ |
50 | #if defined(CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE) | 50 | #if defined(CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE) |
diff --git a/include/linux/udp.h b/include/linux/udp.h index 9d81de123c90..42278bbf7a88 100644 --- a/include/linux/udp.h +++ b/include/linux/udp.h | |||
@@ -68,6 +68,7 @@ struct udp_sock { | |||
68 | * For encapsulation sockets. | 68 | * For encapsulation sockets. |
69 | */ | 69 | */ |
70 | int (*encap_rcv)(struct sock *sk, struct sk_buff *skb); | 70 | int (*encap_rcv)(struct sock *sk, struct sk_buff *skb); |
71 | void (*encap_destroy)(struct sock *sk); | ||
71 | }; | 72 | }; |
72 | 73 | ||
73 | static inline struct udp_sock *udp_sk(const struct sock *sk) | 74 | static inline struct udp_sock *udp_sk(const struct sock *sk) |
diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h index 3b8f9d4fc3fe..cc25b70af33c 100644 --- a/include/linux/usb/cdc_ncm.h +++ b/include/linux/usb/cdc_ncm.h | |||
@@ -127,6 +127,7 @@ struct cdc_ncm_ctx { | |||
127 | u16 connected; | 127 | u16 connected; |
128 | }; | 128 | }; |
129 | 129 | ||
130 | extern u8 cdc_ncm_select_altsetting(struct usbnet *dev, struct usb_interface *intf); | ||
130 | extern int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting); | 131 | extern int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting); |
131 | extern void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf); | 132 | extern void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf); |
132 | extern struct sk_buff *cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb, __le32 sign); | 133 | extern struct sk_buff *cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb, __le32 sign); |
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h index 0a78df5f6cfd..59694b5e5e90 100644 --- a/include/linux/usb/hcd.h +++ b/include/linux/usb/hcd.h | |||
@@ -357,6 +357,7 @@ struct hc_driver { | |||
357 | */ | 357 | */ |
358 | int (*disable_usb3_lpm_timeout)(struct usb_hcd *, | 358 | int (*disable_usb3_lpm_timeout)(struct usb_hcd *, |
359 | struct usb_device *, enum usb3_link_state state); | 359 | struct usb_device *, enum usb3_link_state state); |
360 | int (*find_raw_port_number)(struct usb_hcd *, int); | ||
360 | }; | 361 | }; |
361 | 362 | ||
362 | extern int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb); | 363 | extern int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb); |
@@ -396,6 +397,7 @@ extern int usb_hcd_is_primary_hcd(struct usb_hcd *hcd); | |||
396 | extern int usb_add_hcd(struct usb_hcd *hcd, | 397 | extern int usb_add_hcd(struct usb_hcd *hcd, |
397 | unsigned int irqnum, unsigned long irqflags); | 398 | unsigned int irqnum, unsigned long irqflags); |
398 | extern void usb_remove_hcd(struct usb_hcd *hcd); | 399 | extern void usb_remove_hcd(struct usb_hcd *hcd); |
400 | extern int usb_hcd_find_raw_port_number(struct usb_hcd *hcd, int port1); | ||
399 | 401 | ||
400 | struct platform_device; | 402 | struct platform_device; |
401 | extern void usb_hcd_platform_shutdown(struct platform_device *dev); | 403 | extern void usb_hcd_platform_shutdown(struct platform_device *dev); |
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h index ef9be7e1e190..1819b59aab2a 100644 --- a/include/linux/usb/serial.h +++ b/include/linux/usb/serial.h | |||
@@ -66,6 +66,7 @@ | |||
66 | * port. | 66 | * port. |
67 | * @flags: usb serial port flags | 67 | * @flags: usb serial port flags |
68 | * @write_wait: a wait_queue_head_t used by the port. | 68 | * @write_wait: a wait_queue_head_t used by the port. |
69 | * @delta_msr_wait: modem-status-change wait queue | ||
69 | * @work: work queue entry for the line discipline waking up. | 70 | * @work: work queue entry for the line discipline waking up. |
70 | * @throttled: nonzero if the read urb is inactive to throttle the device | 71 | * @throttled: nonzero if the read urb is inactive to throttle the device |
71 | * @throttle_req: nonzero if the tty wants to throttle us | 72 | * @throttle_req: nonzero if the tty wants to throttle us |
@@ -112,6 +113,7 @@ struct usb_serial_port { | |||
112 | 113 | ||
113 | unsigned long flags; | 114 | unsigned long flags; |
114 | wait_queue_head_t write_wait; | 115 | wait_queue_head_t write_wait; |
116 | wait_queue_head_t delta_msr_wait; | ||
115 | struct work_struct work; | 117 | struct work_struct work; |
116 | char throttled; | 118 | char throttled; |
117 | char throttle_req; | 119 | char throttle_req; |
diff --git a/include/linux/usb/ulpi.h b/include/linux/usb/ulpi.h index 6f033a415ecb..5c295c26ad37 100644 --- a/include/linux/usb/ulpi.h +++ b/include/linux/usb/ulpi.h | |||
@@ -181,8 +181,16 @@ | |||
181 | 181 | ||
182 | /*-------------------------------------------------------------------------*/ | 182 | /*-------------------------------------------------------------------------*/ |
183 | 183 | ||
184 | #if IS_ENABLED(CONFIG_USB_ULPI) | ||
184 | struct usb_phy *otg_ulpi_create(struct usb_phy_io_ops *ops, | 185 | struct usb_phy *otg_ulpi_create(struct usb_phy_io_ops *ops, |
185 | unsigned int flags); | 186 | unsigned int flags); |
187 | #else | ||
188 | static inline struct usb_phy *otg_ulpi_create(struct usb_phy_io_ops *ops, | ||
189 | unsigned int flags) | ||
190 | { | ||
191 | return NULL; | ||
192 | } | ||
193 | #endif | ||
186 | 194 | ||
187 | #ifdef CONFIG_USB_ULPI_VIEWPORT | 195 | #ifdef CONFIG_USB_ULPI_VIEWPORT |
188 | /* access ops for controllers with a viewport register */ | 196 | /* access ops for controllers with a viewport register */ |
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h index 4ce009324933..b6b215f13b45 100644 --- a/include/linux/user_namespace.h +++ b/include/linux/user_namespace.h | |||
@@ -26,6 +26,8 @@ struct user_namespace { | |||
26 | kuid_t owner; | 26 | kuid_t owner; |
27 | kgid_t group; | 27 | kgid_t group; |
28 | unsigned int proc_inum; | 28 | unsigned int proc_inum; |
29 | bool may_mount_sysfs; | ||
30 | bool may_mount_proc; | ||
29 | }; | 31 | }; |
30 | 32 | ||
31 | extern struct user_namespace init_user_ns; | 33 | extern struct user_namespace init_user_ns; |
@@ -82,4 +84,6 @@ static inline void put_user_ns(struct user_namespace *ns) | |||
82 | 84 | ||
83 | #endif | 85 | #endif |
84 | 86 | ||
87 | void update_mnt_policy(struct user_namespace *userns); | ||
88 | |||
85 | #endif /* _LINUX_USER_H */ | 89 | #endif /* _LINUX_USER_H */ |
diff --git a/include/net/dst.h b/include/net/dst.h index 853cda11e518..1f8fd109e225 100644 --- a/include/net/dst.h +++ b/include/net/dst.h | |||
@@ -413,13 +413,15 @@ static inline int dst_neigh_output(struct dst_entry *dst, struct neighbour *n, | |||
413 | 413 | ||
414 | static inline struct neighbour *dst_neigh_lookup(const struct dst_entry *dst, const void *daddr) | 414 | static inline struct neighbour *dst_neigh_lookup(const struct dst_entry *dst, const void *daddr) |
415 | { | 415 | { |
416 | return dst->ops->neigh_lookup(dst, NULL, daddr); | 416 | struct neighbour *n = dst->ops->neigh_lookup(dst, NULL, daddr); |
417 | return IS_ERR(n) ? NULL : n; | ||
417 | } | 418 | } |
418 | 419 | ||
419 | static inline struct neighbour *dst_neigh_lookup_skb(const struct dst_entry *dst, | 420 | static inline struct neighbour *dst_neigh_lookup_skb(const struct dst_entry *dst, |
420 | struct sk_buff *skb) | 421 | struct sk_buff *skb) |
421 | { | 422 | { |
422 | return dst->ops->neigh_lookup(dst, skb, NULL); | 423 | struct neighbour *n = dst->ops->neigh_lookup(dst, skb, NULL); |
424 | return IS_ERR(n) ? NULL : n; | ||
423 | } | 425 | } |
424 | 426 | ||
425 | static inline void dst_link_failure(struct sk_buff *skb) | 427 | static inline void dst_link_failure(struct sk_buff *skb) |
diff --git a/include/net/flow_keys.h b/include/net/flow_keys.h index 80461c1ae9ef..bb8271d487b7 100644 --- a/include/net/flow_keys.h +++ b/include/net/flow_keys.h | |||
@@ -9,6 +9,7 @@ struct flow_keys { | |||
9 | __be32 ports; | 9 | __be32 ports; |
10 | __be16 port16[2]; | 10 | __be16 port16[2]; |
11 | }; | 11 | }; |
12 | u16 thoff; | ||
12 | u8 ip_proto; | 13 | u8 ip_proto; |
13 | }; | 14 | }; |
14 | 15 | ||
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h index 76c3fe5ecc2e..0a1dcc2fa2f5 100644 --- a/include/net/inet_frag.h +++ b/include/net/inet_frag.h | |||
@@ -43,6 +43,13 @@ struct inet_frag_queue { | |||
43 | 43 | ||
44 | #define INETFRAGS_HASHSZ 64 | 44 | #define INETFRAGS_HASHSZ 64 |
45 | 45 | ||
46 | /* averaged: | ||
47 | * max_depth = default ipfrag_high_thresh / INETFRAGS_HASHSZ / | ||
48 | * rounded up (SKB_TRUELEN(0) + sizeof(struct ipq or | ||
49 | * struct frag_queue)) | ||
50 | */ | ||
51 | #define INETFRAGS_MAXDEPTH 128 | ||
52 | |||
46 | struct inet_frags { | 53 | struct inet_frags { |
47 | struct hlist_head hash[INETFRAGS_HASHSZ]; | 54 | struct hlist_head hash[INETFRAGS_HASHSZ]; |
48 | /* This rwlock is a global lock (seperate per IPv4, IPv6 and | 55 | /* This rwlock is a global lock (seperate per IPv4, IPv6 and |
@@ -76,6 +83,8 @@ int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force); | |||
76 | struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, | 83 | struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, |
77 | struct inet_frags *f, void *key, unsigned int hash) | 84 | struct inet_frags *f, void *key, unsigned int hash) |
78 | __releases(&f->lock); | 85 | __releases(&f->lock); |
86 | void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q, | ||
87 | const char *prefix); | ||
79 | 88 | ||
80 | static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f) | 89 | static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f) |
81 | { | 90 | { |
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index 9497be1ad4c0..e49db91593a9 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h | |||
@@ -152,18 +152,16 @@ struct fib_result_nl { | |||
152 | }; | 152 | }; |
153 | 153 | ||
154 | #ifdef CONFIG_IP_ROUTE_MULTIPATH | 154 | #ifdef CONFIG_IP_ROUTE_MULTIPATH |
155 | |||
156 | #define FIB_RES_NH(res) ((res).fi->fib_nh[(res).nh_sel]) | 155 | #define FIB_RES_NH(res) ((res).fi->fib_nh[(res).nh_sel]) |
157 | |||
158 | #define FIB_TABLE_HASHSZ 2 | ||
159 | |||
160 | #else /* CONFIG_IP_ROUTE_MULTIPATH */ | 156 | #else /* CONFIG_IP_ROUTE_MULTIPATH */ |
161 | |||
162 | #define FIB_RES_NH(res) ((res).fi->fib_nh[0]) | 157 | #define FIB_RES_NH(res) ((res).fi->fib_nh[0]) |
158 | #endif /* CONFIG_IP_ROUTE_MULTIPATH */ | ||
163 | 159 | ||
160 | #ifdef CONFIG_IP_MULTIPLE_TABLES | ||
164 | #define FIB_TABLE_HASHSZ 256 | 161 | #define FIB_TABLE_HASHSZ 256 |
165 | 162 | #else | |
166 | #endif /* CONFIG_IP_ROUTE_MULTIPATH */ | 163 | #define FIB_TABLE_HASHSZ 2 |
164 | #endif | ||
167 | 165 | ||
168 | extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh); | 166 | extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh); |
169 | 167 | ||
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index 68c69d54d392..fce8e6b66d55 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h | |||
@@ -976,6 +976,7 @@ struct netns_ipvs { | |||
976 | int sysctl_sync_retries; | 976 | int sysctl_sync_retries; |
977 | int sysctl_nat_icmp_send; | 977 | int sysctl_nat_icmp_send; |
978 | int sysctl_pmtu_disc; | 978 | int sysctl_pmtu_disc; |
979 | int sysctl_backup_only; | ||
979 | 980 | ||
980 | /* ip_vs_lblc */ | 981 | /* ip_vs_lblc */ |
981 | int sysctl_lblc_expiration; | 982 | int sysctl_lblc_expiration; |
@@ -1067,6 +1068,12 @@ static inline int sysctl_pmtu_disc(struct netns_ipvs *ipvs) | |||
1067 | return ipvs->sysctl_pmtu_disc; | 1068 | return ipvs->sysctl_pmtu_disc; |
1068 | } | 1069 | } |
1069 | 1070 | ||
1071 | static inline int sysctl_backup_only(struct netns_ipvs *ipvs) | ||
1072 | { | ||
1073 | return ipvs->sync_state & IP_VS_STATE_BACKUP && | ||
1074 | ipvs->sysctl_backup_only; | ||
1075 | } | ||
1076 | |||
1070 | #else | 1077 | #else |
1071 | 1078 | ||
1072 | static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs) | 1079 | static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs) |
@@ -1114,6 +1121,11 @@ static inline int sysctl_pmtu_disc(struct netns_ipvs *ipvs) | |||
1114 | return 1; | 1121 | return 1; |
1115 | } | 1122 | } |
1116 | 1123 | ||
1124 | static inline int sysctl_backup_only(struct netns_ipvs *ipvs) | ||
1125 | { | ||
1126 | return 0; | ||
1127 | } | ||
1128 | |||
1117 | #endif | 1129 | #endif |
1118 | 1130 | ||
1119 | /* | 1131 | /* |
diff --git a/include/net/ipip.h b/include/net/ipip.h index fd19625ff99d..982141c15200 100644 --- a/include/net/ipip.h +++ b/include/net/ipip.h | |||
@@ -77,15 +77,11 @@ static inline void tunnel_ip_select_ident(struct sk_buff *skb, | |||
77 | { | 77 | { |
78 | struct iphdr *iph = ip_hdr(skb); | 78 | struct iphdr *iph = ip_hdr(skb); |
79 | 79 | ||
80 | if (iph->frag_off & htons(IP_DF)) | 80 | /* Use inner packet iph-id if possible. */ |
81 | iph->id = 0; | 81 | if (skb->protocol == htons(ETH_P_IP) && old_iph->id) |
82 | else { | 82 | iph->id = old_iph->id; |
83 | /* Use inner packet iph-id if possible. */ | 83 | else |
84 | if (skb->protocol == htons(ETH_P_IP) && old_iph->id) | 84 | __ip_select_ident(iph, dst, |
85 | iph->id = old_iph->id; | 85 | (skb_shinfo(skb)->gso_segs ?: 1) - 1); |
86 | else | ||
87 | __ip_select_ident(iph, dst, | ||
88 | (skb_shinfo(skb)->gso_segs ?: 1) - 1); | ||
89 | } | ||
90 | } | 86 | } |
91 | #endif | 87 | #endif |
diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h index cc7c19732389..714cc9a54a4c 100644 --- a/include/net/iucv/af_iucv.h +++ b/include/net/iucv/af_iucv.h | |||
@@ -130,6 +130,14 @@ struct iucv_sock { | |||
130 | enum iucv_tx_notify n); | 130 | enum iucv_tx_notify n); |
131 | }; | 131 | }; |
132 | 132 | ||
133 | struct iucv_skb_cb { | ||
134 | u32 class; /* target class of message */ | ||
135 | u32 tag; /* tag associated with message */ | ||
136 | u32 offset; /* offset for skb receival */ | ||
137 | }; | ||
138 | |||
139 | #define IUCV_SKB_CB(__skb) ((struct iucv_skb_cb *)&((__skb)->cb[0])) | ||
140 | |||
133 | /* iucv socket options (SOL_IUCV) */ | 141 | /* iucv socket options (SOL_IUCV) */ |
134 | #define SO_IPRMDATA_MSG 0x0080 /* send/recv IPRM_DATA msgs */ | 142 | #define SO_IPRMDATA_MSG 0x0080 /* send/recv IPRM_DATA msgs */ |
135 | #define SO_MSGLIMIT 0x1000 /* get/set IUCV MSGLIMIT */ | 143 | #define SO_MSGLIMIT 0x1000 /* get/set IUCV MSGLIMIT */ |
diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index 399162b50a8d..e1379b4e8faf 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h | |||
@@ -1074,7 +1074,8 @@ void fc_rport_terminate_io(struct fc_rport *); | |||
1074 | /* | 1074 | /* |
1075 | * DISCOVERY LAYER | 1075 | * DISCOVERY LAYER |
1076 | *****************************/ | 1076 | *****************************/ |
1077 | int fc_disc_init(struct fc_lport *); | 1077 | void fc_disc_init(struct fc_lport *); |
1078 | void fc_disc_config(struct fc_lport *, void *); | ||
1078 | 1079 | ||
1079 | static inline struct fc_lport *fc_disc_lport(struct fc_disc *disc) | 1080 | static inline struct fc_lport *fc_disc_lport(struct fc_disc *disc) |
1080 | { | 1081 | { |
diff --git a/include/sound/max98090.h b/include/sound/max98090.h index 95efb13f8478..95efb13f8478 100755..100644 --- a/include/sound/max98090.h +++ b/include/sound/max98090.h | |||
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h index e1ef63d4a5c4..44a30b108683 100644 --- a/include/sound/soc-dapm.h +++ b/include/sound/soc-dapm.h | |||
@@ -488,6 +488,7 @@ struct snd_soc_dapm_path { | |||
488 | /* status */ | 488 | /* status */ |
489 | u32 connect:1; /* source and sink widgets are connected */ | 489 | u32 connect:1; /* source and sink widgets are connected */ |
490 | u32 walked:1; /* path has been walked */ | 490 | u32 walked:1; /* path has been walked */ |
491 | u32 walking:1; /* path is in the process of being walked */ | ||
491 | u32 weak:1; /* path ignored for power management */ | 492 | u32 weak:1; /* path ignored for power management */ |
492 | 493 | ||
493 | int (*connected)(struct snd_soc_dapm_widget *source, | 494 | int (*connected)(struct snd_soc_dapm_widget *source, |
diff --git a/include/uapi/linux/packet_diag.h b/include/uapi/linux/packet_diag.h index 93f5fa94a431..afafd703ad92 100644 --- a/include/uapi/linux/packet_diag.h +++ b/include/uapi/linux/packet_diag.h | |||
@@ -33,9 +33,11 @@ enum { | |||
33 | PACKET_DIAG_TX_RING, | 33 | PACKET_DIAG_TX_RING, |
34 | PACKET_DIAG_FANOUT, | 34 | PACKET_DIAG_FANOUT, |
35 | 35 | ||
36 | PACKET_DIAG_MAX, | 36 | __PACKET_DIAG_MAX, |
37 | }; | 37 | }; |
38 | 38 | ||
39 | #define PACKET_DIAG_MAX (__PACKET_DIAG_MAX - 1) | ||
40 | |||
39 | struct packet_diag_info { | 41 | struct packet_diag_info { |
40 | __u32 pdi_index; | 42 | __u32 pdi_index; |
41 | __u32 pdi_version; | 43 | __u32 pdi_version; |
diff --git a/include/uapi/linux/unix_diag.h b/include/uapi/linux/unix_diag.h index b8a24941db21..b9e2a6a7446f 100644 --- a/include/uapi/linux/unix_diag.h +++ b/include/uapi/linux/unix_diag.h | |||
@@ -39,9 +39,11 @@ enum { | |||
39 | UNIX_DIAG_MEMINFO, | 39 | UNIX_DIAG_MEMINFO, |
40 | UNIX_DIAG_SHUTDOWN, | 40 | UNIX_DIAG_SHUTDOWN, |
41 | 41 | ||
42 | UNIX_DIAG_MAX, | 42 | __UNIX_DIAG_MAX, |
43 | }; | 43 | }; |
44 | 44 | ||
45 | #define UNIX_DIAG_MAX (__UNIX_DIAG_MAX - 1) | ||
46 | |||
45 | struct unix_diag_vfs { | 47 | struct unix_diag_vfs { |
46 | __u32 udiag_vfs_ino; | 48 | __u32 udiag_vfs_ino; |
47 | __u32 udiag_vfs_dev; | 49 | __u32 udiag_vfs_dev; |
diff --git a/include/video/atmel_lcdc.h b/include/video/atmel_lcdc.h index 28447f1594fa..8deb22672ada 100644 --- a/include/video/atmel_lcdc.h +++ b/include/video/atmel_lcdc.h | |||
@@ -30,7 +30,6 @@ | |||
30 | */ | 30 | */ |
31 | #define ATMEL_LCDC_WIRING_BGR 0 | 31 | #define ATMEL_LCDC_WIRING_BGR 0 |
32 | #define ATMEL_LCDC_WIRING_RGB 1 | 32 | #define ATMEL_LCDC_WIRING_RGB 1 |
33 | #define ATMEL_LCDC_WIRING_RGB555 2 | ||
34 | 33 | ||
35 | 34 | ||
36 | /* LCD Controller info data structure, stored in device platform_data */ | 35 | /* LCD Controller info data structure, stored in device platform_data */ |
@@ -62,6 +61,7 @@ struct atmel_lcdfb_info { | |||
62 | void (*atmel_lcdfb_power_control)(int on); | 61 | void (*atmel_lcdfb_power_control)(int on); |
63 | struct fb_monspecs *default_monspecs; | 62 | struct fb_monspecs *default_monspecs; |
64 | u32 pseudo_palette[16]; | 63 | u32 pseudo_palette[16]; |
64 | bool have_intensity_bit; | ||
65 | }; | 65 | }; |
66 | 66 | ||
67 | #define ATMEL_LCDC_DMABADDR1 0x00 | 67 | #define ATMEL_LCDC_DMABADDR1 0x00 |
diff --git a/include/xen/interface/io/blkif.h b/include/xen/interface/io/blkif.h index 01c3d62436ef..ffd4652de91c 100644 --- a/include/xen/interface/io/blkif.h +++ b/include/xen/interface/io/blkif.h | |||
@@ -138,11 +138,21 @@ struct blkif_request_discard { | |||
138 | uint8_t _pad3; | 138 | uint8_t _pad3; |
139 | } __attribute__((__packed__)); | 139 | } __attribute__((__packed__)); |
140 | 140 | ||
141 | struct blkif_request_other { | ||
142 | uint8_t _pad1; | ||
143 | blkif_vdev_t _pad2; /* only for read/write requests */ | ||
144 | #ifdef CONFIG_X86_64 | ||
145 | uint32_t _pad3; /* offsetof(blkif_req..,u.other.id)==8*/ | ||
146 | #endif | ||
147 | uint64_t id; /* private guest value, echoed in resp */ | ||
148 | } __attribute__((__packed__)); | ||
149 | |||
141 | struct blkif_request { | 150 | struct blkif_request { |
142 | uint8_t operation; /* BLKIF_OP_??? */ | 151 | uint8_t operation; /* BLKIF_OP_??? */ |
143 | union { | 152 | union { |
144 | struct blkif_request_rw rw; | 153 | struct blkif_request_rw rw; |
145 | struct blkif_request_discard discard; | 154 | struct blkif_request_discard discard; |
155 | struct blkif_request_other other; | ||
146 | } u; | 156 | } u; |
147 | } __attribute__((__packed__)); | 157 | } __attribute__((__packed__)); |
148 | 158 | ||
diff --git a/include/xen/interface/physdev.h b/include/xen/interface/physdev.h index 1844d31f4552..7000bb1f6e96 100644 --- a/include/xen/interface/physdev.h +++ b/include/xen/interface/physdev.h | |||
@@ -251,6 +251,12 @@ struct physdev_pci_device_add { | |||
251 | 251 | ||
252 | #define PHYSDEVOP_pci_device_remove 26 | 252 | #define PHYSDEVOP_pci_device_remove 26 |
253 | #define PHYSDEVOP_restore_msi_ext 27 | 253 | #define PHYSDEVOP_restore_msi_ext 27 |
254 | /* | ||
255 | * Dom0 should use these two to announce MMIO resources assigned to | ||
256 | * MSI-X capable devices won't (prepare) or may (release) change. | ||
257 | */ | ||
258 | #define PHYSDEVOP_prepare_msix 30 | ||
259 | #define PHYSDEVOP_release_msix 31 | ||
254 | struct physdev_pci_device { | 260 | struct physdev_pci_device { |
255 | /* IN */ | 261 | /* IN */ |
256 | uint16_t seg; | 262 | uint16_t seg; |
diff --git a/ipc/mqueue.c b/ipc/mqueue.c index e5c4f609f22c..e4e47f647446 100644 --- a/ipc/mqueue.c +++ b/ipc/mqueue.c | |||
@@ -330,8 +330,16 @@ static struct dentry *mqueue_mount(struct file_system_type *fs_type, | |||
330 | int flags, const char *dev_name, | 330 | int flags, const char *dev_name, |
331 | void *data) | 331 | void *data) |
332 | { | 332 | { |
333 | if (!(flags & MS_KERNMOUNT)) | 333 | if (!(flags & MS_KERNMOUNT)) { |
334 | data = current->nsproxy->ipc_ns; | 334 | struct ipc_namespace *ns = current->nsproxy->ipc_ns; |
335 | /* Don't allow mounting unless the caller has CAP_SYS_ADMIN | ||
336 | * over the ipc namespace. | ||
337 | */ | ||
338 | if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN)) | ||
339 | return ERR_PTR(-EPERM); | ||
340 | |||
341 | data = ns; | ||
342 | } | ||
335 | return mount_ns(fs_type, flags, data, mqueue_fill_super); | 343 | return mount_ns(fs_type, flags, data, mqueue_fill_super); |
336 | } | 344 | } |
337 | 345 | ||
@@ -840,7 +848,8 @@ out_putfd: | |||
840 | fd = error; | 848 | fd = error; |
841 | } | 849 | } |
842 | mutex_unlock(&root->d_inode->i_mutex); | 850 | mutex_unlock(&root->d_inode->i_mutex); |
843 | mnt_drop_write(mnt); | 851 | if (!ro) |
852 | mnt_drop_write(mnt); | ||
844 | out_putname: | 853 | out_putname: |
845 | putname(name); | 854 | putname(name); |
846 | return fd; | 855 | return fd; |
@@ -872,6 +872,7 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, | |||
872 | goto out_unlock; | 872 | goto out_unlock; |
873 | break; | 873 | break; |
874 | } | 874 | } |
875 | msg = ERR_PTR(-EAGAIN); | ||
875 | } else | 876 | } else |
876 | break; | 877 | break; |
877 | msg_counter++; | 878 | msg_counter++; |
diff --git a/kernel/events/core.c b/kernel/events/core.c index b0cd86501c30..59412d037eed 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -4434,12 +4434,15 @@ static void perf_event_task_event(struct perf_task_event *task_event) | |||
4434 | if (ctxn < 0) | 4434 | if (ctxn < 0) |
4435 | goto next; | 4435 | goto next; |
4436 | ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); | 4436 | ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); |
4437 | if (ctx) | ||
4438 | perf_event_task_ctx(ctx, task_event); | ||
4437 | } | 4439 | } |
4438 | if (ctx) | ||
4439 | perf_event_task_ctx(ctx, task_event); | ||
4440 | next: | 4440 | next: |
4441 | put_cpu_ptr(pmu->pmu_cpu_context); | 4441 | put_cpu_ptr(pmu->pmu_cpu_context); |
4442 | } | 4442 | } |
4443 | if (task_event->task_ctx) | ||
4444 | perf_event_task_ctx(task_event->task_ctx, task_event); | ||
4445 | |||
4443 | rcu_read_unlock(); | 4446 | rcu_read_unlock(); |
4444 | } | 4447 | } |
4445 | 4448 | ||
@@ -5647,6 +5650,7 @@ static void perf_swevent_init_hrtimer(struct perf_event *event) | |||
5647 | event->attr.sample_period = NSEC_PER_SEC / freq; | 5650 | event->attr.sample_period = NSEC_PER_SEC / freq; |
5648 | hwc->sample_period = event->attr.sample_period; | 5651 | hwc->sample_period = event->attr.sample_period; |
5649 | local64_set(&hwc->period_left, hwc->sample_period); | 5652 | local64_set(&hwc->period_left, hwc->sample_period); |
5653 | hwc->last_period = hwc->sample_period; | ||
5650 | event->attr.freq = 0; | 5654 | event->attr.freq = 0; |
5651 | } | 5655 | } |
5652 | } | 5656 | } |
diff --git a/kernel/exit.c b/kernel/exit.c index 51e485ca9935..60bc027c61c3 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -835,7 +835,7 @@ void do_exit(long code) | |||
835 | /* | 835 | /* |
836 | * Make sure we are holding no locks: | 836 | * Make sure we are holding no locks: |
837 | */ | 837 | */ |
838 | debug_check_no_locks_held(); | 838 | debug_check_no_locks_held(tsk); |
839 | /* | 839 | /* |
840 | * We can do this unlocked here. The futex code uses this flag | 840 | * We can do this unlocked here. The futex code uses this flag |
841 | * just to verify whether the pi state cleanup has been done | 841 | * just to verify whether the pi state cleanup has been done |
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 259db207b5d9..8a0efac4f99d 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
@@ -4088,7 +4088,7 @@ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len) | |||
4088 | } | 4088 | } |
4089 | EXPORT_SYMBOL_GPL(debug_check_no_locks_freed); | 4089 | EXPORT_SYMBOL_GPL(debug_check_no_locks_freed); |
4090 | 4090 | ||
4091 | static void print_held_locks_bug(void) | 4091 | static void print_held_locks_bug(struct task_struct *curr) |
4092 | { | 4092 | { |
4093 | if (!debug_locks_off()) | 4093 | if (!debug_locks_off()) |
4094 | return; | 4094 | return; |
@@ -4097,21 +4097,22 @@ static void print_held_locks_bug(void) | |||
4097 | 4097 | ||
4098 | printk("\n"); | 4098 | printk("\n"); |
4099 | printk("=====================================\n"); | 4099 | printk("=====================================\n"); |
4100 | printk("[ BUG: %s/%d still has locks held! ]\n", | 4100 | printk("[ BUG: lock held at task exit time! ]\n"); |
4101 | current->comm, task_pid_nr(current)); | ||
4102 | print_kernel_ident(); | 4101 | print_kernel_ident(); |
4103 | printk("-------------------------------------\n"); | 4102 | printk("-------------------------------------\n"); |
4104 | lockdep_print_held_locks(current); | 4103 | printk("%s/%d is exiting with locks still held!\n", |
4104 | curr->comm, task_pid_nr(curr)); | ||
4105 | lockdep_print_held_locks(curr); | ||
4106 | |||
4105 | printk("\nstack backtrace:\n"); | 4107 | printk("\nstack backtrace:\n"); |
4106 | dump_stack(); | 4108 | dump_stack(); |
4107 | } | 4109 | } |
4108 | 4110 | ||
4109 | void debug_check_no_locks_held(void) | 4111 | void debug_check_no_locks_held(struct task_struct *task) |
4110 | { | 4112 | { |
4111 | if (unlikely(current->lockdep_depth > 0)) | 4113 | if (unlikely(task->lockdep_depth > 0)) |
4112 | print_held_locks_bug(); | 4114 | print_held_locks_bug(task); |
4113 | } | 4115 | } |
4114 | EXPORT_SYMBOL_GPL(debug_check_no_locks_held); | ||
4115 | 4116 | ||
4116 | void debug_show_all_locks(void) | 4117 | void debug_show_all_locks(void) |
4117 | { | 4118 | { |
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c index c1c3dc1c6023..bea15bdf82b0 100644 --- a/kernel/pid_namespace.c +++ b/kernel/pid_namespace.c | |||
@@ -181,6 +181,7 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns) | |||
181 | int nr; | 181 | int nr; |
182 | int rc; | 182 | int rc; |
183 | struct task_struct *task, *me = current; | 183 | struct task_struct *task, *me = current; |
184 | int init_pids = thread_group_leader(me) ? 1 : 2; | ||
184 | 185 | ||
185 | /* Don't allow any more processes into the pid namespace */ | 186 | /* Don't allow any more processes into the pid namespace */ |
186 | disable_pid_allocation(pid_ns); | 187 | disable_pid_allocation(pid_ns); |
@@ -230,7 +231,7 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns) | |||
230 | */ | 231 | */ |
231 | for (;;) { | 232 | for (;;) { |
232 | set_current_state(TASK_UNINTERRUPTIBLE); | 233 | set_current_state(TASK_UNINTERRUPTIBLE); |
233 | if (pid_ns->nr_hashed == 1) | 234 | if (pid_ns->nr_hashed == init_pids) |
234 | break; | 235 | break; |
235 | schedule(); | 236 | schedule(); |
236 | } | 237 | } |
diff --git a/kernel/printk.c b/kernel/printk.c index 0b31715f335a..abbdd9e2ac82 100644 --- a/kernel/printk.c +++ b/kernel/printk.c | |||
@@ -63,8 +63,6 @@ void asmlinkage __attribute__((weak)) early_printk(const char *fmt, ...) | |||
63 | #define MINIMUM_CONSOLE_LOGLEVEL 1 /* Minimum loglevel we let people use */ | 63 | #define MINIMUM_CONSOLE_LOGLEVEL 1 /* Minimum loglevel we let people use */ |
64 | #define DEFAULT_CONSOLE_LOGLEVEL 7 /* anything MORE serious than KERN_DEBUG */ | 64 | #define DEFAULT_CONSOLE_LOGLEVEL 7 /* anything MORE serious than KERN_DEBUG */ |
65 | 65 | ||
66 | DECLARE_WAIT_QUEUE_HEAD(log_wait); | ||
67 | |||
68 | int console_printk[4] = { | 66 | int console_printk[4] = { |
69 | DEFAULT_CONSOLE_LOGLEVEL, /* console_loglevel */ | 67 | DEFAULT_CONSOLE_LOGLEVEL, /* console_loglevel */ |
70 | DEFAULT_MESSAGE_LOGLEVEL, /* default_message_loglevel */ | 68 | DEFAULT_MESSAGE_LOGLEVEL, /* default_message_loglevel */ |
@@ -224,6 +222,7 @@ struct log { | |||
224 | static DEFINE_RAW_SPINLOCK(logbuf_lock); | 222 | static DEFINE_RAW_SPINLOCK(logbuf_lock); |
225 | 223 | ||
226 | #ifdef CONFIG_PRINTK | 224 | #ifdef CONFIG_PRINTK |
225 | DECLARE_WAIT_QUEUE_HEAD(log_wait); | ||
227 | /* the next printk record to read by syslog(READ) or /proc/kmsg */ | 226 | /* the next printk record to read by syslog(READ) or /proc/kmsg */ |
228 | static u64 syslog_seq; | 227 | static u64 syslog_seq; |
229 | static u32 syslog_idx; | 228 | static u32 syslog_idx; |
@@ -1957,45 +1956,6 @@ int is_console_locked(void) | |||
1957 | return console_locked; | 1956 | return console_locked; |
1958 | } | 1957 | } |
1959 | 1958 | ||
1960 | /* | ||
1961 | * Delayed printk version, for scheduler-internal messages: | ||
1962 | */ | ||
1963 | #define PRINTK_BUF_SIZE 512 | ||
1964 | |||
1965 | #define PRINTK_PENDING_WAKEUP 0x01 | ||
1966 | #define PRINTK_PENDING_SCHED 0x02 | ||
1967 | |||
1968 | static DEFINE_PER_CPU(int, printk_pending); | ||
1969 | static DEFINE_PER_CPU(char [PRINTK_BUF_SIZE], printk_sched_buf); | ||
1970 | |||
1971 | static void wake_up_klogd_work_func(struct irq_work *irq_work) | ||
1972 | { | ||
1973 | int pending = __this_cpu_xchg(printk_pending, 0); | ||
1974 | |||
1975 | if (pending & PRINTK_PENDING_SCHED) { | ||
1976 | char *buf = __get_cpu_var(printk_sched_buf); | ||
1977 | printk(KERN_WARNING "[sched_delayed] %s", buf); | ||
1978 | } | ||
1979 | |||
1980 | if (pending & PRINTK_PENDING_WAKEUP) | ||
1981 | wake_up_interruptible(&log_wait); | ||
1982 | } | ||
1983 | |||
1984 | static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) = { | ||
1985 | .func = wake_up_klogd_work_func, | ||
1986 | .flags = IRQ_WORK_LAZY, | ||
1987 | }; | ||
1988 | |||
1989 | void wake_up_klogd(void) | ||
1990 | { | ||
1991 | preempt_disable(); | ||
1992 | if (waitqueue_active(&log_wait)) { | ||
1993 | this_cpu_or(printk_pending, PRINTK_PENDING_WAKEUP); | ||
1994 | irq_work_queue(&__get_cpu_var(wake_up_klogd_work)); | ||
1995 | } | ||
1996 | preempt_enable(); | ||
1997 | } | ||
1998 | |||
1999 | static void console_cont_flush(char *text, size_t size) | 1959 | static void console_cont_flush(char *text, size_t size) |
2000 | { | 1960 | { |
2001 | unsigned long flags; | 1961 | unsigned long flags; |
@@ -2458,6 +2418,44 @@ static int __init printk_late_init(void) | |||
2458 | late_initcall(printk_late_init); | 2418 | late_initcall(printk_late_init); |
2459 | 2419 | ||
2460 | #if defined CONFIG_PRINTK | 2420 | #if defined CONFIG_PRINTK |
2421 | /* | ||
2422 | * Delayed printk version, for scheduler-internal messages: | ||
2423 | */ | ||
2424 | #define PRINTK_BUF_SIZE 512 | ||
2425 | |||
2426 | #define PRINTK_PENDING_WAKEUP 0x01 | ||
2427 | #define PRINTK_PENDING_SCHED 0x02 | ||
2428 | |||
2429 | static DEFINE_PER_CPU(int, printk_pending); | ||
2430 | static DEFINE_PER_CPU(char [PRINTK_BUF_SIZE], printk_sched_buf); | ||
2431 | |||
2432 | static void wake_up_klogd_work_func(struct irq_work *irq_work) | ||
2433 | { | ||
2434 | int pending = __this_cpu_xchg(printk_pending, 0); | ||
2435 | |||
2436 | if (pending & PRINTK_PENDING_SCHED) { | ||
2437 | char *buf = __get_cpu_var(printk_sched_buf); | ||
2438 | printk(KERN_WARNING "[sched_delayed] %s", buf); | ||
2439 | } | ||
2440 | |||
2441 | if (pending & PRINTK_PENDING_WAKEUP) | ||
2442 | wake_up_interruptible(&log_wait); | ||
2443 | } | ||
2444 | |||
2445 | static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) = { | ||
2446 | .func = wake_up_klogd_work_func, | ||
2447 | .flags = IRQ_WORK_LAZY, | ||
2448 | }; | ||
2449 | |||
2450 | void wake_up_klogd(void) | ||
2451 | { | ||
2452 | preempt_disable(); | ||
2453 | if (waitqueue_active(&log_wait)) { | ||
2454 | this_cpu_or(printk_pending, PRINTK_PENDING_WAKEUP); | ||
2455 | irq_work_queue(&__get_cpu_var(wake_up_klogd_work)); | ||
2456 | } | ||
2457 | preempt_enable(); | ||
2458 | } | ||
2461 | 2459 | ||
2462 | int printk_sched(const char *fmt, ...) | 2460 | int printk_sched(const char *fmt, ...) |
2463 | { | 2461 | { |
diff --git a/kernel/sys.c b/kernel/sys.c index 81f56445fba9..0da73cf73e60 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
@@ -324,7 +324,6 @@ void kernel_restart_prepare(char *cmd) | |||
324 | system_state = SYSTEM_RESTART; | 324 | system_state = SYSTEM_RESTART; |
325 | usermodehelper_disable(); | 325 | usermodehelper_disable(); |
326 | device_shutdown(); | 326 | device_shutdown(); |
327 | syscore_shutdown(); | ||
328 | } | 327 | } |
329 | 328 | ||
330 | /** | 329 | /** |
@@ -370,6 +369,7 @@ void kernel_restart(char *cmd) | |||
370 | { | 369 | { |
371 | kernel_restart_prepare(cmd); | 370 | kernel_restart_prepare(cmd); |
372 | disable_nonboot_cpus(); | 371 | disable_nonboot_cpus(); |
372 | syscore_shutdown(); | ||
373 | if (!cmd) | 373 | if (!cmd) |
374 | printk(KERN_EMERG "Restarting system.\n"); | 374 | printk(KERN_EMERG "Restarting system.\n"); |
375 | else | 375 | else |
@@ -395,6 +395,7 @@ static void kernel_shutdown_prepare(enum system_states state) | |||
395 | void kernel_halt(void) | 395 | void kernel_halt(void) |
396 | { | 396 | { |
397 | kernel_shutdown_prepare(SYSTEM_HALT); | 397 | kernel_shutdown_prepare(SYSTEM_HALT); |
398 | disable_nonboot_cpus(); | ||
398 | syscore_shutdown(); | 399 | syscore_shutdown(); |
399 | printk(KERN_EMERG "System halted.\n"); | 400 | printk(KERN_EMERG "System halted.\n"); |
400 | kmsg_dump(KMSG_DUMP_HALT); | 401 | kmsg_dump(KMSG_DUMP_HALT); |
@@ -2185,9 +2186,8 @@ SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep, | |||
2185 | 2186 | ||
2186 | char poweroff_cmd[POWEROFF_CMD_PATH_LEN] = "/sbin/poweroff"; | 2187 | char poweroff_cmd[POWEROFF_CMD_PATH_LEN] = "/sbin/poweroff"; |
2187 | 2188 | ||
2188 | static int __orderly_poweroff(void) | 2189 | static int __orderly_poweroff(bool force) |
2189 | { | 2190 | { |
2190 | int argc; | ||
2191 | char **argv; | 2191 | char **argv; |
2192 | static char *envp[] = { | 2192 | static char *envp[] = { |
2193 | "HOME=/", | 2193 | "HOME=/", |
@@ -2196,20 +2196,40 @@ static int __orderly_poweroff(void) | |||
2196 | }; | 2196 | }; |
2197 | int ret; | 2197 | int ret; |
2198 | 2198 | ||
2199 | argv = argv_split(GFP_ATOMIC, poweroff_cmd, &argc); | 2199 | argv = argv_split(GFP_KERNEL, poweroff_cmd, NULL); |
2200 | if (argv == NULL) { | 2200 | if (argv) { |
2201 | ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC); | ||
2202 | argv_free(argv); | ||
2203 | } else { | ||
2201 | printk(KERN_WARNING "%s failed to allocate memory for \"%s\"\n", | 2204 | printk(KERN_WARNING "%s failed to allocate memory for \"%s\"\n", |
2202 | __func__, poweroff_cmd); | 2205 | __func__, poweroff_cmd); |
2203 | return -ENOMEM; | 2206 | ret = -ENOMEM; |
2204 | } | 2207 | } |
2205 | 2208 | ||
2206 | ret = call_usermodehelper_fns(argv[0], argv, envp, UMH_WAIT_EXEC, | 2209 | if (ret && force) { |
2207 | NULL, NULL, NULL); | 2210 | printk(KERN_WARNING "Failed to start orderly shutdown: " |
2208 | argv_free(argv); | 2211 | "forcing the issue\n"); |
2212 | /* | ||
2213 | * I guess this should try to kick off some daemon to sync and | ||
2214 | * poweroff asap. Or not even bother syncing if we're doing an | ||
2215 | * emergency shutdown? | ||
2216 | */ | ||
2217 | emergency_sync(); | ||
2218 | kernel_power_off(); | ||
2219 | } | ||
2209 | 2220 | ||
2210 | return ret; | 2221 | return ret; |
2211 | } | 2222 | } |
2212 | 2223 | ||
2224 | static bool poweroff_force; | ||
2225 | |||
2226 | static void poweroff_work_func(struct work_struct *work) | ||
2227 | { | ||
2228 | __orderly_poweroff(poweroff_force); | ||
2229 | } | ||
2230 | |||
2231 | static DECLARE_WORK(poweroff_work, poweroff_work_func); | ||
2232 | |||
2213 | /** | 2233 | /** |
2214 | * orderly_poweroff - Trigger an orderly system poweroff | 2234 | * orderly_poweroff - Trigger an orderly system poweroff |
2215 | * @force: force poweroff if command execution fails | 2235 | * @force: force poweroff if command execution fails |
@@ -2219,21 +2239,9 @@ static int __orderly_poweroff(void) | |||
2219 | */ | 2239 | */ |
2220 | int orderly_poweroff(bool force) | 2240 | int orderly_poweroff(bool force) |
2221 | { | 2241 | { |
2222 | int ret = __orderly_poweroff(); | 2242 | if (force) /* do not override the pending "true" */ |
2223 | 2243 | poweroff_force = true; | |
2224 | if (ret && force) { | 2244 | schedule_work(&poweroff_work); |
2225 | printk(KERN_WARNING "Failed to start orderly shutdown: " | 2245 | return 0; |
2226 | "forcing the issue\n"); | ||
2227 | |||
2228 | /* | ||
2229 | * I guess this should try to kick off some daemon to sync and | ||
2230 | * poweroff asap. Or not even bother syncing if we're doing an | ||
2231 | * emergency shutdown? | ||
2232 | */ | ||
2233 | emergency_sync(); | ||
2234 | kernel_power_off(); | ||
2235 | } | ||
2236 | |||
2237 | return ret; | ||
2238 | } | 2246 | } |
2239 | EXPORT_SYMBOL_GPL(orderly_poweroff); | 2247 | EXPORT_SYMBOL_GPL(orderly_poweroff); |
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 2fb8cb88df8d..7f32fe0e52cd 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c | |||
@@ -67,7 +67,8 @@ static void tick_broadcast_start_periodic(struct clock_event_device *bc) | |||
67 | */ | 67 | */ |
68 | int tick_check_broadcast_device(struct clock_event_device *dev) | 68 | int tick_check_broadcast_device(struct clock_event_device *dev) |
69 | { | 69 | { |
70 | if ((tick_broadcast_device.evtdev && | 70 | if ((dev->features & CLOCK_EVT_FEAT_DUMMY) || |
71 | (tick_broadcast_device.evtdev && | ||
71 | tick_broadcast_device.evtdev->rating >= dev->rating) || | 72 | tick_broadcast_device.evtdev->rating >= dev->rating) || |
72 | (dev->features & CLOCK_EVT_FEAT_C3STOP)) | 73 | (dev->features & CLOCK_EVT_FEAT_C3STOP)) |
73 | return 0; | 74 | return 0; |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index ab25b88aae56..926ebfb74936 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -66,7 +66,7 @@ | |||
66 | 66 | ||
67 | static struct ftrace_ops ftrace_list_end __read_mostly = { | 67 | static struct ftrace_ops ftrace_list_end __read_mostly = { |
68 | .func = ftrace_stub, | 68 | .func = ftrace_stub, |
69 | .flags = FTRACE_OPS_FL_RECURSION_SAFE, | 69 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB, |
70 | }; | 70 | }; |
71 | 71 | ||
72 | /* ftrace_enabled is a method to turn ftrace on or off */ | 72 | /* ftrace_enabled is a method to turn ftrace on or off */ |
@@ -694,7 +694,6 @@ int ftrace_profile_pages_init(struct ftrace_profile_stat *stat) | |||
694 | free_page(tmp); | 694 | free_page(tmp); |
695 | } | 695 | } |
696 | 696 | ||
697 | free_page((unsigned long)stat->pages); | ||
698 | stat->pages = NULL; | 697 | stat->pages = NULL; |
699 | stat->start = NULL; | 698 | stat->start = NULL; |
700 | 699 | ||
@@ -3104,8 +3103,8 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | |||
3104 | continue; | 3103 | continue; |
3105 | } | 3104 | } |
3106 | 3105 | ||
3107 | hlist_del(&entry->node); | 3106 | hlist_del_rcu(&entry->node); |
3108 | call_rcu(&entry->rcu, ftrace_free_entry_rcu); | 3107 | call_rcu_sched(&entry->rcu, ftrace_free_entry_rcu); |
3109 | } | 3108 | } |
3110 | } | 3109 | } |
3111 | __disable_ftrace_function_probe(); | 3110 | __disable_ftrace_function_probe(); |
@@ -4131,7 +4130,8 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip, | |||
4131 | preempt_disable_notrace(); | 4130 | preempt_disable_notrace(); |
4132 | trace_recursion_set(TRACE_CONTROL_BIT); | 4131 | trace_recursion_set(TRACE_CONTROL_BIT); |
4133 | do_for_each_ftrace_op(op, ftrace_control_list) { | 4132 | do_for_each_ftrace_op(op, ftrace_control_list) { |
4134 | if (!ftrace_function_local_disabled(op) && | 4133 | if (!(op->flags & FTRACE_OPS_FL_STUB) && |
4134 | !ftrace_function_local_disabled(op) && | ||
4135 | ftrace_ops_test(op, ip)) | 4135 | ftrace_ops_test(op, ip)) |
4136 | op->func(ip, parent_ip, op, regs); | 4136 | op->func(ip, parent_ip, op, regs); |
4137 | } while_for_each_ftrace_op(op); | 4137 | } while_for_each_ftrace_op(op); |
@@ -4555,12 +4555,8 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, | |||
4555 | ftrace_startup_sysctl(); | 4555 | ftrace_startup_sysctl(); |
4556 | 4556 | ||
4557 | /* we are starting ftrace again */ | 4557 | /* we are starting ftrace again */ |
4558 | if (ftrace_ops_list != &ftrace_list_end) { | 4558 | if (ftrace_ops_list != &ftrace_list_end) |
4559 | if (ftrace_ops_list->next == &ftrace_list_end) | 4559 | update_ftrace_function(); |
4560 | ftrace_trace_function = ftrace_ops_list->func; | ||
4561 | else | ||
4562 | ftrace_trace_function = ftrace_ops_list_func; | ||
4563 | } | ||
4564 | 4560 | ||
4565 | } else { | 4561 | } else { |
4566 | /* stopping ftrace calls (just send to ftrace_stub) */ | 4562 | /* stopping ftrace calls (just send to ftrace_stub) */ |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 1f835a83cb2c..7ba7fc76f9eb 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -704,7 +704,7 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
704 | void | 704 | void |
705 | update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | 705 | update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) |
706 | { | 706 | { |
707 | struct ring_buffer *buf = tr->buffer; | 707 | struct ring_buffer *buf; |
708 | 708 | ||
709 | if (trace_stop_count) | 709 | if (trace_stop_count) |
710 | return; | 710 | return; |
@@ -719,6 +719,7 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
719 | 719 | ||
720 | arch_spin_lock(&ftrace_max_lock); | 720 | arch_spin_lock(&ftrace_max_lock); |
721 | 721 | ||
722 | buf = tr->buffer; | ||
722 | tr->buffer = max_tr.buffer; | 723 | tr->buffer = max_tr.buffer; |
723 | max_tr.buffer = buf; | 724 | max_tr.buffer = buf; |
724 | 725 | ||
@@ -743,8 +744,11 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
743 | return; | 744 | return; |
744 | 745 | ||
745 | WARN_ON_ONCE(!irqs_disabled()); | 746 | WARN_ON_ONCE(!irqs_disabled()); |
746 | if (WARN_ON_ONCE(!current_trace->allocated_snapshot)) | 747 | if (!current_trace->allocated_snapshot) { |
748 | /* Only the nop tracer should hit this when disabling */ | ||
749 | WARN_ON_ONCE(current_trace != &nop_trace); | ||
747 | return; | 750 | return; |
751 | } | ||
748 | 752 | ||
749 | arch_spin_lock(&ftrace_max_lock); | 753 | arch_spin_lock(&ftrace_max_lock); |
750 | 754 | ||
@@ -2880,11 +2884,25 @@ static int set_tracer_option(struct tracer *trace, char *cmp, int neg) | |||
2880 | return -EINVAL; | 2884 | return -EINVAL; |
2881 | } | 2885 | } |
2882 | 2886 | ||
2883 | static void set_tracer_flags(unsigned int mask, int enabled) | 2887 | /* Some tracers require overwrite to stay enabled */ |
2888 | int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set) | ||
2889 | { | ||
2890 | if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set) | ||
2891 | return -1; | ||
2892 | |||
2893 | return 0; | ||
2894 | } | ||
2895 | |||
2896 | int set_tracer_flag(unsigned int mask, int enabled) | ||
2884 | { | 2897 | { |
2885 | /* do nothing if flag is already set */ | 2898 | /* do nothing if flag is already set */ |
2886 | if (!!(trace_flags & mask) == !!enabled) | 2899 | if (!!(trace_flags & mask) == !!enabled) |
2887 | return; | 2900 | return 0; |
2901 | |||
2902 | /* Give the tracer a chance to approve the change */ | ||
2903 | if (current_trace->flag_changed) | ||
2904 | if (current_trace->flag_changed(current_trace, mask, !!enabled)) | ||
2905 | return -EINVAL; | ||
2888 | 2906 | ||
2889 | if (enabled) | 2907 | if (enabled) |
2890 | trace_flags |= mask; | 2908 | trace_flags |= mask; |
@@ -2894,18 +2912,24 @@ static void set_tracer_flags(unsigned int mask, int enabled) | |||
2894 | if (mask == TRACE_ITER_RECORD_CMD) | 2912 | if (mask == TRACE_ITER_RECORD_CMD) |
2895 | trace_event_enable_cmd_record(enabled); | 2913 | trace_event_enable_cmd_record(enabled); |
2896 | 2914 | ||
2897 | if (mask == TRACE_ITER_OVERWRITE) | 2915 | if (mask == TRACE_ITER_OVERWRITE) { |
2898 | ring_buffer_change_overwrite(global_trace.buffer, enabled); | 2916 | ring_buffer_change_overwrite(global_trace.buffer, enabled); |
2917 | #ifdef CONFIG_TRACER_MAX_TRACE | ||
2918 | ring_buffer_change_overwrite(max_tr.buffer, enabled); | ||
2919 | #endif | ||
2920 | } | ||
2899 | 2921 | ||
2900 | if (mask == TRACE_ITER_PRINTK) | 2922 | if (mask == TRACE_ITER_PRINTK) |
2901 | trace_printk_start_stop_comm(enabled); | 2923 | trace_printk_start_stop_comm(enabled); |
2924 | |||
2925 | return 0; | ||
2902 | } | 2926 | } |
2903 | 2927 | ||
2904 | static int trace_set_options(char *option) | 2928 | static int trace_set_options(char *option) |
2905 | { | 2929 | { |
2906 | char *cmp; | 2930 | char *cmp; |
2907 | int neg = 0; | 2931 | int neg = 0; |
2908 | int ret = 0; | 2932 | int ret = -ENODEV; |
2909 | int i; | 2933 | int i; |
2910 | 2934 | ||
2911 | cmp = strstrip(option); | 2935 | cmp = strstrip(option); |
@@ -2915,19 +2939,20 @@ static int trace_set_options(char *option) | |||
2915 | cmp += 2; | 2939 | cmp += 2; |
2916 | } | 2940 | } |
2917 | 2941 | ||
2942 | mutex_lock(&trace_types_lock); | ||
2943 | |||
2918 | for (i = 0; trace_options[i]; i++) { | 2944 | for (i = 0; trace_options[i]; i++) { |
2919 | if (strcmp(cmp, trace_options[i]) == 0) { | 2945 | if (strcmp(cmp, trace_options[i]) == 0) { |
2920 | set_tracer_flags(1 << i, !neg); | 2946 | ret = set_tracer_flag(1 << i, !neg); |
2921 | break; | 2947 | break; |
2922 | } | 2948 | } |
2923 | } | 2949 | } |
2924 | 2950 | ||
2925 | /* If no option could be set, test the specific tracer options */ | 2951 | /* If no option could be set, test the specific tracer options */ |
2926 | if (!trace_options[i]) { | 2952 | if (!trace_options[i]) |
2927 | mutex_lock(&trace_types_lock); | ||
2928 | ret = set_tracer_option(current_trace, cmp, neg); | 2953 | ret = set_tracer_option(current_trace, cmp, neg); |
2929 | mutex_unlock(&trace_types_lock); | 2954 | |
2930 | } | 2955 | mutex_unlock(&trace_types_lock); |
2931 | 2956 | ||
2932 | return ret; | 2957 | return ret; |
2933 | } | 2958 | } |
@@ -2937,6 +2962,7 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf, | |||
2937 | size_t cnt, loff_t *ppos) | 2962 | size_t cnt, loff_t *ppos) |
2938 | { | 2963 | { |
2939 | char buf[64]; | 2964 | char buf[64]; |
2965 | int ret; | ||
2940 | 2966 | ||
2941 | if (cnt >= sizeof(buf)) | 2967 | if (cnt >= sizeof(buf)) |
2942 | return -EINVAL; | 2968 | return -EINVAL; |
@@ -2946,7 +2972,9 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf, | |||
2946 | 2972 | ||
2947 | buf[cnt] = 0; | 2973 | buf[cnt] = 0; |
2948 | 2974 | ||
2949 | trace_set_options(buf); | 2975 | ret = trace_set_options(buf); |
2976 | if (ret < 0) | ||
2977 | return ret; | ||
2950 | 2978 | ||
2951 | *ppos += cnt; | 2979 | *ppos += cnt; |
2952 | 2980 | ||
@@ -3250,6 +3278,9 @@ static int tracing_set_tracer(const char *buf) | |||
3250 | goto out; | 3278 | goto out; |
3251 | 3279 | ||
3252 | trace_branch_disable(); | 3280 | trace_branch_disable(); |
3281 | |||
3282 | current_trace->enabled = false; | ||
3283 | |||
3253 | if (current_trace->reset) | 3284 | if (current_trace->reset) |
3254 | current_trace->reset(tr); | 3285 | current_trace->reset(tr); |
3255 | 3286 | ||
@@ -3294,6 +3325,7 @@ static int tracing_set_tracer(const char *buf) | |||
3294 | } | 3325 | } |
3295 | 3326 | ||
3296 | current_trace = t; | 3327 | current_trace = t; |
3328 | current_trace->enabled = true; | ||
3297 | trace_branch_enable(tr); | 3329 | trace_branch_enable(tr); |
3298 | out: | 3330 | out: |
3299 | mutex_unlock(&trace_types_lock); | 3331 | mutex_unlock(&trace_types_lock); |
@@ -4780,7 +4812,13 @@ trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt, | |||
4780 | 4812 | ||
4781 | if (val != 0 && val != 1) | 4813 | if (val != 0 && val != 1) |
4782 | return -EINVAL; | 4814 | return -EINVAL; |
4783 | set_tracer_flags(1 << index, val); | 4815 | |
4816 | mutex_lock(&trace_types_lock); | ||
4817 | ret = set_tracer_flag(1 << index, val); | ||
4818 | mutex_unlock(&trace_types_lock); | ||
4819 | |||
4820 | if (ret < 0) | ||
4821 | return ret; | ||
4784 | 4822 | ||
4785 | *ppos += cnt; | 4823 | *ppos += cnt; |
4786 | 4824 | ||
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 57d7e5397d56..2081971367ea 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -283,11 +283,15 @@ struct tracer { | |||
283 | enum print_line_t (*print_line)(struct trace_iterator *iter); | 283 | enum print_line_t (*print_line)(struct trace_iterator *iter); |
284 | /* If you handled the flag setting, return 0 */ | 284 | /* If you handled the flag setting, return 0 */ |
285 | int (*set_flag)(u32 old_flags, u32 bit, int set); | 285 | int (*set_flag)(u32 old_flags, u32 bit, int set); |
286 | /* Return 0 if OK with change, else return non-zero */ | ||
287 | int (*flag_changed)(struct tracer *tracer, | ||
288 | u32 mask, int set); | ||
286 | struct tracer *next; | 289 | struct tracer *next; |
287 | struct tracer_flags *flags; | 290 | struct tracer_flags *flags; |
288 | bool print_max; | 291 | bool print_max; |
289 | bool use_max_tr; | 292 | bool use_max_tr; |
290 | bool allocated_snapshot; | 293 | bool allocated_snapshot; |
294 | bool enabled; | ||
291 | }; | 295 | }; |
292 | 296 | ||
293 | 297 | ||
@@ -943,6 +947,8 @@ extern const char *__stop___trace_bprintk_fmt[]; | |||
943 | 947 | ||
944 | void trace_printk_init_buffers(void); | 948 | void trace_printk_init_buffers(void); |
945 | void trace_printk_start_comm(void); | 949 | void trace_printk_start_comm(void); |
950 | int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set); | ||
951 | int set_tracer_flag(unsigned int mask, int enabled); | ||
946 | 952 | ||
947 | #undef FTRACE_ENTRY | 953 | #undef FTRACE_ENTRY |
948 | #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \ | 954 | #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \ |
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index 713a2cac4881..443b25b43b4f 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c | |||
@@ -32,7 +32,7 @@ enum { | |||
32 | 32 | ||
33 | static int trace_type __read_mostly; | 33 | static int trace_type __read_mostly; |
34 | 34 | ||
35 | static int save_lat_flag; | 35 | static int save_flags; |
36 | 36 | ||
37 | static void stop_irqsoff_tracer(struct trace_array *tr, int graph); | 37 | static void stop_irqsoff_tracer(struct trace_array *tr, int graph); |
38 | static int start_irqsoff_tracer(struct trace_array *tr, int graph); | 38 | static int start_irqsoff_tracer(struct trace_array *tr, int graph); |
@@ -558,8 +558,11 @@ static void stop_irqsoff_tracer(struct trace_array *tr, int graph) | |||
558 | 558 | ||
559 | static void __irqsoff_tracer_init(struct trace_array *tr) | 559 | static void __irqsoff_tracer_init(struct trace_array *tr) |
560 | { | 560 | { |
561 | save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT; | 561 | save_flags = trace_flags; |
562 | trace_flags |= TRACE_ITER_LATENCY_FMT; | 562 | |
563 | /* non overwrite screws up the latency tracers */ | ||
564 | set_tracer_flag(TRACE_ITER_OVERWRITE, 1); | ||
565 | set_tracer_flag(TRACE_ITER_LATENCY_FMT, 1); | ||
563 | 566 | ||
564 | tracing_max_latency = 0; | 567 | tracing_max_latency = 0; |
565 | irqsoff_trace = tr; | 568 | irqsoff_trace = tr; |
@@ -573,10 +576,13 @@ static void __irqsoff_tracer_init(struct trace_array *tr) | |||
573 | 576 | ||
574 | static void irqsoff_tracer_reset(struct trace_array *tr) | 577 | static void irqsoff_tracer_reset(struct trace_array *tr) |
575 | { | 578 | { |
579 | int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT; | ||
580 | int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE; | ||
581 | |||
576 | stop_irqsoff_tracer(tr, is_graph()); | 582 | stop_irqsoff_tracer(tr, is_graph()); |
577 | 583 | ||
578 | if (!save_lat_flag) | 584 | set_tracer_flag(TRACE_ITER_LATENCY_FMT, lat_flag); |
579 | trace_flags &= ~TRACE_ITER_LATENCY_FMT; | 585 | set_tracer_flag(TRACE_ITER_OVERWRITE, overwrite_flag); |
580 | } | 586 | } |
581 | 587 | ||
582 | static void irqsoff_tracer_start(struct trace_array *tr) | 588 | static void irqsoff_tracer_start(struct trace_array *tr) |
@@ -609,6 +615,7 @@ static struct tracer irqsoff_tracer __read_mostly = | |||
609 | .print_line = irqsoff_print_line, | 615 | .print_line = irqsoff_print_line, |
610 | .flags = &tracer_flags, | 616 | .flags = &tracer_flags, |
611 | .set_flag = irqsoff_set_flag, | 617 | .set_flag = irqsoff_set_flag, |
618 | .flag_changed = trace_keep_overwrite, | ||
612 | #ifdef CONFIG_FTRACE_SELFTEST | 619 | #ifdef CONFIG_FTRACE_SELFTEST |
613 | .selftest = trace_selftest_startup_irqsoff, | 620 | .selftest = trace_selftest_startup_irqsoff, |
614 | #endif | 621 | #endif |
@@ -642,6 +649,7 @@ static struct tracer preemptoff_tracer __read_mostly = | |||
642 | .print_line = irqsoff_print_line, | 649 | .print_line = irqsoff_print_line, |
643 | .flags = &tracer_flags, | 650 | .flags = &tracer_flags, |
644 | .set_flag = irqsoff_set_flag, | 651 | .set_flag = irqsoff_set_flag, |
652 | .flag_changed = trace_keep_overwrite, | ||
645 | #ifdef CONFIG_FTRACE_SELFTEST | 653 | #ifdef CONFIG_FTRACE_SELFTEST |
646 | .selftest = trace_selftest_startup_preemptoff, | 654 | .selftest = trace_selftest_startup_preemptoff, |
647 | #endif | 655 | #endif |
@@ -677,6 +685,7 @@ static struct tracer preemptirqsoff_tracer __read_mostly = | |||
677 | .print_line = irqsoff_print_line, | 685 | .print_line = irqsoff_print_line, |
678 | .flags = &tracer_flags, | 686 | .flags = &tracer_flags, |
679 | .set_flag = irqsoff_set_flag, | 687 | .set_flag = irqsoff_set_flag, |
688 | .flag_changed = trace_keep_overwrite, | ||
680 | #ifdef CONFIG_FTRACE_SELFTEST | 689 | #ifdef CONFIG_FTRACE_SELFTEST |
681 | .selftest = trace_selftest_startup_preemptirqsoff, | 690 | .selftest = trace_selftest_startup_preemptirqsoff, |
682 | #endif | 691 | #endif |
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 75aa97fbe1a1..fde652c9a511 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c | |||
@@ -36,7 +36,7 @@ static void __wakeup_reset(struct trace_array *tr); | |||
36 | static int wakeup_graph_entry(struct ftrace_graph_ent *trace); | 36 | static int wakeup_graph_entry(struct ftrace_graph_ent *trace); |
37 | static void wakeup_graph_return(struct ftrace_graph_ret *trace); | 37 | static void wakeup_graph_return(struct ftrace_graph_ret *trace); |
38 | 38 | ||
39 | static int save_lat_flag; | 39 | static int save_flags; |
40 | 40 | ||
41 | #define TRACE_DISPLAY_GRAPH 1 | 41 | #define TRACE_DISPLAY_GRAPH 1 |
42 | 42 | ||
@@ -540,8 +540,11 @@ static void stop_wakeup_tracer(struct trace_array *tr) | |||
540 | 540 | ||
541 | static int __wakeup_tracer_init(struct trace_array *tr) | 541 | static int __wakeup_tracer_init(struct trace_array *tr) |
542 | { | 542 | { |
543 | save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT; | 543 | save_flags = trace_flags; |
544 | trace_flags |= TRACE_ITER_LATENCY_FMT; | 544 | |
545 | /* non overwrite screws up the latency tracers */ | ||
546 | set_tracer_flag(TRACE_ITER_OVERWRITE, 1); | ||
547 | set_tracer_flag(TRACE_ITER_LATENCY_FMT, 1); | ||
545 | 548 | ||
546 | tracing_max_latency = 0; | 549 | tracing_max_latency = 0; |
547 | wakeup_trace = tr; | 550 | wakeup_trace = tr; |
@@ -563,12 +566,15 @@ static int wakeup_rt_tracer_init(struct trace_array *tr) | |||
563 | 566 | ||
564 | static void wakeup_tracer_reset(struct trace_array *tr) | 567 | static void wakeup_tracer_reset(struct trace_array *tr) |
565 | { | 568 | { |
569 | int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT; | ||
570 | int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE; | ||
571 | |||
566 | stop_wakeup_tracer(tr); | 572 | stop_wakeup_tracer(tr); |
567 | /* make sure we put back any tasks we are tracing */ | 573 | /* make sure we put back any tasks we are tracing */ |
568 | wakeup_reset(tr); | 574 | wakeup_reset(tr); |
569 | 575 | ||
570 | if (!save_lat_flag) | 576 | set_tracer_flag(TRACE_ITER_LATENCY_FMT, lat_flag); |
571 | trace_flags &= ~TRACE_ITER_LATENCY_FMT; | 577 | set_tracer_flag(TRACE_ITER_OVERWRITE, overwrite_flag); |
572 | } | 578 | } |
573 | 579 | ||
574 | static void wakeup_tracer_start(struct trace_array *tr) | 580 | static void wakeup_tracer_start(struct trace_array *tr) |
@@ -594,6 +600,7 @@ static struct tracer wakeup_tracer __read_mostly = | |||
594 | .print_line = wakeup_print_line, | 600 | .print_line = wakeup_print_line, |
595 | .flags = &tracer_flags, | 601 | .flags = &tracer_flags, |
596 | .set_flag = wakeup_set_flag, | 602 | .set_flag = wakeup_set_flag, |
603 | .flag_changed = trace_keep_overwrite, | ||
597 | #ifdef CONFIG_FTRACE_SELFTEST | 604 | #ifdef CONFIG_FTRACE_SELFTEST |
598 | .selftest = trace_selftest_startup_wakeup, | 605 | .selftest = trace_selftest_startup_wakeup, |
599 | #endif | 606 | #endif |
@@ -615,6 +622,7 @@ static struct tracer wakeup_rt_tracer __read_mostly = | |||
615 | .print_line = wakeup_print_line, | 622 | .print_line = wakeup_print_line, |
616 | .flags = &tracer_flags, | 623 | .flags = &tracer_flags, |
617 | .set_flag = wakeup_set_flag, | 624 | .set_flag = wakeup_set_flag, |
625 | .flag_changed = trace_keep_overwrite, | ||
618 | #ifdef CONFIG_FTRACE_SELFTEST | 626 | #ifdef CONFIG_FTRACE_SELFTEST |
619 | .selftest = trace_selftest_startup_wakeup, | 627 | .selftest = trace_selftest_startup_wakeup, |
620 | #endif | 628 | #endif |
diff --git a/kernel/user.c b/kernel/user.c index e81978e8c03b..8e635a18ab52 100644 --- a/kernel/user.c +++ b/kernel/user.c | |||
@@ -51,6 +51,8 @@ struct user_namespace init_user_ns = { | |||
51 | .owner = GLOBAL_ROOT_UID, | 51 | .owner = GLOBAL_ROOT_UID, |
52 | .group = GLOBAL_ROOT_GID, | 52 | .group = GLOBAL_ROOT_GID, |
53 | .proc_inum = PROC_USER_INIT_INO, | 53 | .proc_inum = PROC_USER_INIT_INO, |
54 | .may_mount_sysfs = true, | ||
55 | .may_mount_proc = true, | ||
54 | }; | 56 | }; |
55 | EXPORT_SYMBOL_GPL(init_user_ns); | 57 | EXPORT_SYMBOL_GPL(init_user_ns); |
56 | 58 | ||
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c index b14f4d342043..a54f26f82eb2 100644 --- a/kernel/user_namespace.c +++ b/kernel/user_namespace.c | |||
@@ -61,6 +61,15 @@ int create_user_ns(struct cred *new) | |||
61 | kgid_t group = new->egid; | 61 | kgid_t group = new->egid; |
62 | int ret; | 62 | int ret; |
63 | 63 | ||
64 | /* | ||
65 | * Verify that we can not violate the policy of which files | ||
66 | * may be accessed that is specified by the root directory, | ||
67 | * by verifing that the root directory is at the root of the | ||
68 | * mount namespace which allows all files to be accessed. | ||
69 | */ | ||
70 | if (current_chrooted()) | ||
71 | return -EPERM; | ||
72 | |||
64 | /* The creator needs a mapping in the parent user namespace | 73 | /* The creator needs a mapping in the parent user namespace |
65 | * or else we won't be able to reasonably tell userspace who | 74 | * or else we won't be able to reasonably tell userspace who |
66 | * created a user_namespace. | 75 | * created a user_namespace. |
@@ -87,6 +96,8 @@ int create_user_ns(struct cred *new) | |||
87 | 96 | ||
88 | set_cred_user_ns(new, ns); | 97 | set_cred_user_ns(new, ns); |
89 | 98 | ||
99 | update_mnt_policy(ns); | ||
100 | |||
90 | return 0; | 101 | return 0; |
91 | } | 102 | } |
92 | 103 | ||
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 55fac5b991b7..b48cd597145d 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -3447,28 +3447,34 @@ static void wq_unbind_fn(struct work_struct *work) | |||
3447 | 3447 | ||
3448 | spin_unlock_irq(&pool->lock); | 3448 | spin_unlock_irq(&pool->lock); |
3449 | mutex_unlock(&pool->assoc_mutex); | 3449 | mutex_unlock(&pool->assoc_mutex); |
3450 | } | ||
3451 | 3450 | ||
3452 | /* | 3451 | /* |
3453 | * Call schedule() so that we cross rq->lock and thus can guarantee | 3452 | * Call schedule() so that we cross rq->lock and thus can |
3454 | * sched callbacks see the %WORKER_UNBOUND flag. This is necessary | 3453 | * guarantee sched callbacks see the %WORKER_UNBOUND flag. |
3455 | * as scheduler callbacks may be invoked from other cpus. | 3454 | * This is necessary as scheduler callbacks may be invoked |
3456 | */ | 3455 | * from other cpus. |
3457 | schedule(); | 3456 | */ |
3457 | schedule(); | ||
3458 | 3458 | ||
3459 | /* | 3459 | /* |
3460 | * Sched callbacks are disabled now. Zap nr_running. After this, | 3460 | * Sched callbacks are disabled now. Zap nr_running. |
3461 | * nr_running stays zero and need_more_worker() and keep_working() | 3461 | * After this, nr_running stays zero and need_more_worker() |
3462 | * are always true as long as the worklist is not empty. Pools on | 3462 | * and keep_working() are always true as long as the |
3463 | * @cpu now behave as unbound (in terms of concurrency management) | 3463 | * worklist is not empty. This pool now behaves as an |
3464 | * pools which are served by workers tied to the CPU. | 3464 | * unbound (in terms of concurrency management) pool which |
3465 | * | 3465 | * are served by workers tied to the pool. |
3466 | * On return from this function, the current worker would trigger | 3466 | */ |
3467 | * unbound chain execution of pending work items if other workers | ||
3468 | * didn't already. | ||
3469 | */ | ||
3470 | for_each_std_worker_pool(pool, cpu) | ||
3471 | atomic_set(&pool->nr_running, 0); | 3467 | atomic_set(&pool->nr_running, 0); |
3468 | |||
3469 | /* | ||
3470 | * With concurrency management just turned off, a busy | ||
3471 | * worker blocking could lead to lengthy stalls. Kick off | ||
3472 | * unbound chain execution of currently pending work items. | ||
3473 | */ | ||
3474 | spin_lock_irq(&pool->lock); | ||
3475 | wake_up_worker(pool); | ||
3476 | spin_unlock_irq(&pool->lock); | ||
3477 | } | ||
3472 | } | 3478 | } |
3473 | 3479 | ||
3474 | /* | 3480 | /* |
diff --git a/lib/bust_spinlocks.c b/lib/bust_spinlocks.c index 9681d54b95d1..f8e0e5367398 100644 --- a/lib/bust_spinlocks.c +++ b/lib/bust_spinlocks.c | |||
@@ -8,6 +8,7 @@ | |||
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
11 | #include <linux/printk.h> | ||
11 | #include <linux/spinlock.h> | 12 | #include <linux/spinlock.h> |
12 | #include <linux/tty.h> | 13 | #include <linux/tty.h> |
13 | #include <linux/wait.h> | 14 | #include <linux/wait.h> |
@@ -28,5 +29,3 @@ void __attribute__((weak)) bust_spinlocks(int yes) | |||
28 | wake_up_klogd(); | 29 | wake_up_klogd(); |
29 | } | 30 | } |
30 | } | 31 | } |
31 | |||
32 | |||
diff --git a/lib/dma-debug.c b/lib/dma-debug.c index 5e396accd3d0..d87a17a819d0 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c | |||
@@ -862,17 +862,21 @@ static void check_unmap(struct dma_debug_entry *ref) | |||
862 | entry = bucket_find_exact(bucket, ref); | 862 | entry = bucket_find_exact(bucket, ref); |
863 | 863 | ||
864 | if (!entry) { | 864 | if (!entry) { |
865 | /* must drop lock before calling dma_mapping_error */ | ||
866 | put_hash_bucket(bucket, &flags); | ||
867 | |||
865 | if (dma_mapping_error(ref->dev, ref->dev_addr)) { | 868 | if (dma_mapping_error(ref->dev, ref->dev_addr)) { |
866 | err_printk(ref->dev, NULL, | 869 | err_printk(ref->dev, NULL, |
867 | "DMA-API: device driver tries " | 870 | "DMA-API: device driver tries to free an " |
868 | "to free an invalid DMA memory address\n"); | 871 | "invalid DMA memory address\n"); |
869 | return; | 872 | } else { |
873 | err_printk(ref->dev, NULL, | ||
874 | "DMA-API: device driver tries to free DMA " | ||
875 | "memory it has not allocated [device " | ||
876 | "address=0x%016llx] [size=%llu bytes]\n", | ||
877 | ref->dev_addr, ref->size); | ||
870 | } | 878 | } |
871 | err_printk(ref->dev, NULL, "DMA-API: device driver tries " | 879 | return; |
872 | "to free DMA memory it has not allocated " | ||
873 | "[device address=0x%016llx] [size=%llu bytes]\n", | ||
874 | ref->dev_addr, ref->size); | ||
875 | goto out; | ||
876 | } | 880 | } |
877 | 881 | ||
878 | if (ref->size != entry->size) { | 882 | if (ref->size != entry->size) { |
@@ -936,7 +940,6 @@ static void check_unmap(struct dma_debug_entry *ref) | |||
936 | hash_bucket_del(entry); | 940 | hash_bucket_del(entry); |
937 | dma_entry_free(entry); | 941 | dma_entry_free(entry); |
938 | 942 | ||
939 | out: | ||
940 | put_hash_bucket(bucket, &flags); | 943 | put_hash_bucket(bucket, &flags); |
941 | } | 944 | } |
942 | 945 | ||
@@ -1082,13 +1085,27 @@ void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | |||
1082 | ref.dev = dev; | 1085 | ref.dev = dev; |
1083 | ref.dev_addr = dma_addr; | 1086 | ref.dev_addr = dma_addr; |
1084 | bucket = get_hash_bucket(&ref, &flags); | 1087 | bucket = get_hash_bucket(&ref, &flags); |
1085 | entry = bucket_find_exact(bucket, &ref); | ||
1086 | 1088 | ||
1087 | if (!entry) | 1089 | list_for_each_entry(entry, &bucket->list, list) { |
1088 | goto out; | 1090 | if (!exact_match(&ref, entry)) |
1091 | continue; | ||
1092 | |||
1093 | /* | ||
1094 | * The same physical address can be mapped multiple | ||
1095 | * times. Without a hardware IOMMU this results in the | ||
1096 | * same device addresses being put into the dma-debug | ||
1097 | * hash multiple times too. This can result in false | ||
1098 | * positives being reported. Therefore we implement a | ||
1099 | * best-fit algorithm here which updates the first entry | ||
1100 | * from the hash which fits the reference value and is | ||
1101 | * not currently listed as being checked. | ||
1102 | */ | ||
1103 | if (entry->map_err_type == MAP_ERR_NOT_CHECKED) { | ||
1104 | entry->map_err_type = MAP_ERR_CHECKED; | ||
1105 | break; | ||
1106 | } | ||
1107 | } | ||
1089 | 1108 | ||
1090 | entry->map_err_type = MAP_ERR_CHECKED; | ||
1091 | out: | ||
1092 | put_hash_bucket(bucket, &flags); | 1109 | put_hash_bucket(bucket, &flags); |
1093 | } | 1110 | } |
1094 | EXPORT_SYMBOL(debug_dma_mapping_error); | 1111 | EXPORT_SYMBOL(debug_dma_mapping_error); |
diff --git a/mm/fremap.c b/mm/fremap.c index 4723ac8d2fc2..87da3590c61e 100644 --- a/mm/fremap.c +++ b/mm/fremap.c | |||
@@ -204,10 +204,8 @@ get_write_lock: | |||
204 | unsigned long addr; | 204 | unsigned long addr; |
205 | struct file *file = get_file(vma->vm_file); | 205 | struct file *file = get_file(vma->vm_file); |
206 | 206 | ||
207 | vm_flags = vma->vm_flags; | 207 | addr = mmap_region(file, start, size, |
208 | if (!(flags & MAP_NONBLOCK)) | 208 | vma->vm_flags, pgoff); |
209 | vm_flags |= VM_POPULATE; | ||
210 | addr = mmap_region(file, start, size, vm_flags, pgoff); | ||
211 | fput(file); | 209 | fput(file); |
212 | if (IS_ERR_VALUE(addr)) { | 210 | if (IS_ERR_VALUE(addr)) { |
213 | err = addr; | 211 | err = addr; |
@@ -226,12 +224,6 @@ get_write_lock: | |||
226 | mutex_unlock(&mapping->i_mmap_mutex); | 224 | mutex_unlock(&mapping->i_mmap_mutex); |
227 | } | 225 | } |
228 | 226 | ||
229 | if (!(flags & MAP_NONBLOCK) && !(vma->vm_flags & VM_POPULATE)) { | ||
230 | if (!has_write_lock) | ||
231 | goto get_write_lock; | ||
232 | vma->vm_flags |= VM_POPULATE; | ||
233 | } | ||
234 | |||
235 | if (vma->vm_flags & VM_LOCKED) { | 227 | if (vma->vm_flags & VM_LOCKED) { |
236 | /* | 228 | /* |
237 | * drop PG_Mlocked flag for over-mapped range | 229 | * drop PG_Mlocked flag for over-mapped range |
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 0a0be33bb199..ca9a7c6d7e97 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -2124,8 +2124,12 @@ int hugetlb_report_node_meminfo(int nid, char *buf) | |||
2124 | /* Return the number pages of memory we physically have, in PAGE_SIZE units. */ | 2124 | /* Return the number pages of memory we physically have, in PAGE_SIZE units. */ |
2125 | unsigned long hugetlb_total_pages(void) | 2125 | unsigned long hugetlb_total_pages(void) |
2126 | { | 2126 | { |
2127 | struct hstate *h = &default_hstate; | 2127 | struct hstate *h; |
2128 | return h->nr_huge_pages * pages_per_huge_page(h); | 2128 | unsigned long nr_total_pages = 0; |
2129 | |||
2130 | for_each_hstate(h) | ||
2131 | nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h); | ||
2132 | return nr_total_pages; | ||
2129 | } | 2133 | } |
2130 | 2134 | ||
2131 | static int hugetlb_acct_memory(struct hstate *h, long delta) | 2135 | static int hugetlb_acct_memory(struct hstate *h, long delta) |
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 9597eec8239d..ee3765760818 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c | |||
@@ -1779,7 +1779,11 @@ void try_offline_node(int nid) | |||
1779 | for (i = 0; i < MAX_NR_ZONES; i++) { | 1779 | for (i = 0; i < MAX_NR_ZONES; i++) { |
1780 | struct zone *zone = pgdat->node_zones + i; | 1780 | struct zone *zone = pgdat->node_zones + i; |
1781 | 1781 | ||
1782 | if (zone->wait_table) | 1782 | /* |
1783 | * wait_table may be allocated from boot memory, | ||
1784 | * here only free if it's allocated by vmalloc. | ||
1785 | */ | ||
1786 | if (is_vmalloc_addr(zone->wait_table)) | ||
1783 | vfree(zone->wait_table); | 1787 | vfree(zone->wait_table); |
1784 | } | 1788 | } |
1785 | 1789 | ||
diff --git a/mm/mlock.c b/mm/mlock.c index 1c5e33fce639..79b7cf7d1bca 100644 --- a/mm/mlock.c +++ b/mm/mlock.c | |||
@@ -358,7 +358,7 @@ static int do_mlock(unsigned long start, size_t len, int on) | |||
358 | 358 | ||
359 | newflags = vma->vm_flags & ~VM_LOCKED; | 359 | newflags = vma->vm_flags & ~VM_LOCKED; |
360 | if (on) | 360 | if (on) |
361 | newflags |= VM_LOCKED | VM_POPULATE; | 361 | newflags |= VM_LOCKED; |
362 | 362 | ||
363 | tmp = vma->vm_end; | 363 | tmp = vma->vm_end; |
364 | if (tmp > end) | 364 | if (tmp > end) |
@@ -418,8 +418,7 @@ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors) | |||
418 | * range with the first VMA. Also, skip undesirable VMA types. | 418 | * range with the first VMA. Also, skip undesirable VMA types. |
419 | */ | 419 | */ |
420 | nend = min(end, vma->vm_end); | 420 | nend = min(end, vma->vm_end); |
421 | if ((vma->vm_flags & (VM_IO | VM_PFNMAP | VM_POPULATE)) != | 421 | if (vma->vm_flags & (VM_IO | VM_PFNMAP)) |
422 | VM_POPULATE) | ||
423 | continue; | 422 | continue; |
424 | if (nstart < vma->vm_start) | 423 | if (nstart < vma->vm_start) |
425 | nstart = vma->vm_start; | 424 | nstart = vma->vm_start; |
@@ -492,9 +491,9 @@ static int do_mlockall(int flags) | |||
492 | struct vm_area_struct * vma, * prev = NULL; | 491 | struct vm_area_struct * vma, * prev = NULL; |
493 | 492 | ||
494 | if (flags & MCL_FUTURE) | 493 | if (flags & MCL_FUTURE) |
495 | current->mm->def_flags |= VM_LOCKED | VM_POPULATE; | 494 | current->mm->def_flags |= VM_LOCKED; |
496 | else | 495 | else |
497 | current->mm->def_flags &= ~(VM_LOCKED | VM_POPULATE); | 496 | current->mm->def_flags &= ~VM_LOCKED; |
498 | if (flags == MCL_FUTURE) | 497 | if (flags == MCL_FUTURE) |
499 | goto out; | 498 | goto out; |
500 | 499 | ||
@@ -503,7 +502,7 @@ static int do_mlockall(int flags) | |||
503 | 502 | ||
504 | newflags = vma->vm_flags & ~VM_LOCKED; | 503 | newflags = vma->vm_flags & ~VM_LOCKED; |
505 | if (flags & MCL_CURRENT) | 504 | if (flags & MCL_CURRENT) |
506 | newflags |= VM_LOCKED | VM_POPULATE; | 505 | newflags |= VM_LOCKED; |
507 | 506 | ||
508 | /* Ignore errors */ | 507 | /* Ignore errors */ |
509 | mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags); | 508 | mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags); |
@@ -1306,7 +1306,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, | |||
1306 | } | 1306 | } |
1307 | 1307 | ||
1308 | addr = mmap_region(file, addr, len, vm_flags, pgoff); | 1308 | addr = mmap_region(file, addr, len, vm_flags, pgoff); |
1309 | if (!IS_ERR_VALUE(addr) && (vm_flags & VM_POPULATE)) | 1309 | if (!IS_ERR_VALUE(addr) && |
1310 | ((vm_flags & VM_LOCKED) || | ||
1311 | (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE)) | ||
1310 | *populate = len; | 1312 | *populate = len; |
1311 | return addr; | 1313 | return addr; |
1312 | } | 1314 | } |
@@ -1938,7 +1940,7 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) | |||
1938 | 1940 | ||
1939 | /* Check the cache first. */ | 1941 | /* Check the cache first. */ |
1940 | /* (Cache hit rate is typically around 35%.) */ | 1942 | /* (Cache hit rate is typically around 35%.) */ |
1941 | vma = mm->mmap_cache; | 1943 | vma = ACCESS_ONCE(mm->mmap_cache); |
1942 | if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) { | 1944 | if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) { |
1943 | struct rb_node *rb_node; | 1945 | struct rb_node *rb_node; |
1944 | 1946 | ||
diff --git a/mm/nommu.c b/mm/nommu.c index e19328087534..2f3ea749c318 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
@@ -821,7 +821,7 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) | |||
821 | struct vm_area_struct *vma; | 821 | struct vm_area_struct *vma; |
822 | 822 | ||
823 | /* check the cache first */ | 823 | /* check the cache first */ |
824 | vma = mm->mmap_cache; | 824 | vma = ACCESS_ONCE(mm->mmap_cache); |
825 | if (vma && vma->vm_start <= addr && vma->vm_end > addr) | 825 | if (vma && vma->vm_start <= addr && vma->vm_end > addr) |
826 | return vma; | 826 | return vma; |
827 | 827 | ||
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index a18714469bf7..85addcd9372b 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c | |||
@@ -86,13 +86,6 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head) | |||
86 | 86 | ||
87 | grp = &vlan_info->grp; | 87 | grp = &vlan_info->grp; |
88 | 88 | ||
89 | /* Take it out of our own structures, but be sure to interlock with | ||
90 | * HW accelerating devices or SW vlan input packet processing if | ||
91 | * VLAN is not 0 (leave it there for 802.1p). | ||
92 | */ | ||
93 | if (vlan_id) | ||
94 | vlan_vid_del(real_dev, vlan_id); | ||
95 | |||
96 | grp->nr_vlan_devs--; | 89 | grp->nr_vlan_devs--; |
97 | 90 | ||
98 | if (vlan->flags & VLAN_FLAG_MVRP) | 91 | if (vlan->flags & VLAN_FLAG_MVRP) |
@@ -114,6 +107,13 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head) | |||
114 | vlan_gvrp_uninit_applicant(real_dev); | 107 | vlan_gvrp_uninit_applicant(real_dev); |
115 | } | 108 | } |
116 | 109 | ||
110 | /* Take it out of our own structures, but be sure to interlock with | ||
111 | * HW accelerating devices or SW vlan input packet processing if | ||
112 | * VLAN is not 0 (leave it there for 802.1p). | ||
113 | */ | ||
114 | if (vlan_id) | ||
115 | vlan_vid_del(real_dev, vlan_id); | ||
116 | |||
117 | /* Get rid of the vlan's reference to real_dev */ | 117 | /* Get rid of the vlan's reference to real_dev */ |
118 | dev_put(real_dev); | 118 | dev_put(real_dev); |
119 | } | 119 | } |
diff --git a/net/atm/common.c b/net/atm/common.c index 7b491006eaf4..737bef59ce89 100644 --- a/net/atm/common.c +++ b/net/atm/common.c | |||
@@ -531,6 +531,8 @@ int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, | |||
531 | struct sk_buff *skb; | 531 | struct sk_buff *skb; |
532 | int copied, error = -EINVAL; | 532 | int copied, error = -EINVAL; |
533 | 533 | ||
534 | msg->msg_namelen = 0; | ||
535 | |||
534 | if (sock->state != SS_CONNECTED) | 536 | if (sock->state != SS_CONNECTED) |
535 | return -ENOTCONN; | 537 | return -ENOTCONN; |
536 | 538 | ||
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index 7b11f8bc5071..e277e38f736b 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c | |||
@@ -1642,6 +1642,7 @@ static int ax25_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1642 | ax25_address src; | 1642 | ax25_address src; |
1643 | const unsigned char *mac = skb_mac_header(skb); | 1643 | const unsigned char *mac = skb_mac_header(skb); |
1644 | 1644 | ||
1645 | memset(sax, 0, sizeof(struct full_sockaddr_ax25)); | ||
1645 | ax25_addr_parse(mac + 1, skb->data - mac - 1, &src, NULL, | 1646 | ax25_addr_parse(mac + 1, skb->data - mac - 1, &src, NULL, |
1646 | &digi, NULL, NULL); | 1647 | &digi, NULL, NULL); |
1647 | sax->sax25_family = AF_AX25; | 1648 | sax->sax25_family = AF_AX25; |
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index a0b253ecadaf..a5bb0a769eb9 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c | |||
@@ -1288,7 +1288,8 @@ static int batadv_iv_ogm_receive(struct sk_buff *skb, | |||
1288 | batadv_ogm_packet = (struct batadv_ogm_packet *)packet_buff; | 1288 | batadv_ogm_packet = (struct batadv_ogm_packet *)packet_buff; |
1289 | 1289 | ||
1290 | /* unpack the aggregated packets and process them one by one */ | 1290 | /* unpack the aggregated packets and process them one by one */ |
1291 | do { | 1291 | while (batadv_iv_ogm_aggr_packet(buff_pos, packet_len, |
1292 | batadv_ogm_packet->tt_num_changes)) { | ||
1292 | tt_buff = packet_buff + buff_pos + BATADV_OGM_HLEN; | 1293 | tt_buff = packet_buff + buff_pos + BATADV_OGM_HLEN; |
1293 | 1294 | ||
1294 | batadv_iv_ogm_process(ethhdr, batadv_ogm_packet, tt_buff, | 1295 | batadv_iv_ogm_process(ethhdr, batadv_ogm_packet, tt_buff, |
@@ -1299,8 +1300,7 @@ static int batadv_iv_ogm_receive(struct sk_buff *skb, | |||
1299 | 1300 | ||
1300 | packet_pos = packet_buff + buff_pos; | 1301 | packet_pos = packet_buff + buff_pos; |
1301 | batadv_ogm_packet = (struct batadv_ogm_packet *)packet_pos; | 1302 | batadv_ogm_packet = (struct batadv_ogm_packet *)packet_pos; |
1302 | } while (batadv_iv_ogm_aggr_packet(buff_pos, packet_len, | 1303 | } |
1303 | batadv_ogm_packet->tt_num_changes)); | ||
1304 | 1304 | ||
1305 | kfree_skb(skb); | 1305 | kfree_skb(skb); |
1306 | return NET_RX_SUCCESS; | 1306 | return NET_RX_SUCCESS; |
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c index d3ee69b35a78..0d1b08cc76e1 100644 --- a/net/bluetooth/af_bluetooth.c +++ b/net/bluetooth/af_bluetooth.c | |||
@@ -230,6 +230,8 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
230 | if (flags & (MSG_OOB)) | 230 | if (flags & (MSG_OOB)) |
231 | return -EOPNOTSUPP; | 231 | return -EOPNOTSUPP; |
232 | 232 | ||
233 | msg->msg_namelen = 0; | ||
234 | |||
233 | skb = skb_recv_datagram(sk, flags, noblock, &err); | 235 | skb = skb_recv_datagram(sk, flags, noblock, &err); |
234 | if (!skb) { | 236 | if (!skb) { |
235 | if (sk->sk_shutdown & RCV_SHUTDOWN) | 237 | if (sk->sk_shutdown & RCV_SHUTDOWN) |
@@ -237,8 +239,6 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
237 | return err; | 239 | return err; |
238 | } | 240 | } |
239 | 241 | ||
240 | msg->msg_namelen = 0; | ||
241 | |||
242 | copied = skb->len; | 242 | copied = skb->len; |
243 | if (len < copied) { | 243 | if (len < copied) { |
244 | msg->msg_flags |= MSG_TRUNC; | 244 | msg->msg_flags |= MSG_TRUNC; |
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index c23bae86263b..7c9224bcce17 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c | |||
@@ -608,6 +608,7 @@ static int rfcomm_sock_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
608 | 608 | ||
609 | if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) { | 609 | if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) { |
610 | rfcomm_dlc_accept(d); | 610 | rfcomm_dlc_accept(d); |
611 | msg->msg_namelen = 0; | ||
611 | return 0; | 612 | return 0; |
612 | } | 613 | } |
613 | 614 | ||
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index 79d87d8d4f51..fb6192c9812e 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c | |||
@@ -359,6 +359,7 @@ static void __sco_sock_close(struct sock *sk) | |||
359 | sco_chan_del(sk, ECONNRESET); | 359 | sco_chan_del(sk, ECONNRESET); |
360 | break; | 360 | break; |
361 | 361 | ||
362 | case BT_CONNECT2: | ||
362 | case BT_CONNECT: | 363 | case BT_CONNECT: |
363 | case BT_DISCONN: | 364 | case BT_DISCONN: |
364 | sco_chan_del(sk, ECONNRESET); | 365 | sco_chan_del(sk, ECONNRESET); |
@@ -664,6 +665,7 @@ static int sco_sock_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
664 | test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { | 665 | test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { |
665 | hci_conn_accept(pi->conn->hcon, 0); | 666 | hci_conn_accept(pi->conn->hcon, 0); |
666 | sk->sk_state = BT_CONFIG; | 667 | sk->sk_state = BT_CONFIG; |
668 | msg->msg_namelen = 0; | ||
667 | 669 | ||
668 | release_sock(sk); | 670 | release_sock(sk); |
669 | return 0; | 671 | return 0; |
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index b0812c91c0f0..bab338e6270d 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c | |||
@@ -423,7 +423,7 @@ static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source, | |||
423 | return 0; | 423 | return 0; |
424 | br_warn(br, "adding interface %s with same address " | 424 | br_warn(br, "adding interface %s with same address " |
425 | "as a received packet\n", | 425 | "as a received packet\n", |
426 | source->dev->name); | 426 | source ? source->dev->name : br->dev->name); |
427 | fdb_delete(br, fdb); | 427 | fdb_delete(br, fdb); |
428 | } | 428 | } |
429 | 429 | ||
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index 27aa3ee517ce..299fc5f40a26 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c | |||
@@ -29,6 +29,7 @@ static inline size_t br_port_info_size(void) | |||
29 | + nla_total_size(1) /* IFLA_BRPORT_MODE */ | 29 | + nla_total_size(1) /* IFLA_BRPORT_MODE */ |
30 | + nla_total_size(1) /* IFLA_BRPORT_GUARD */ | 30 | + nla_total_size(1) /* IFLA_BRPORT_GUARD */ |
31 | + nla_total_size(1) /* IFLA_BRPORT_PROTECT */ | 31 | + nla_total_size(1) /* IFLA_BRPORT_PROTECT */ |
32 | + nla_total_size(1) /* IFLA_BRPORT_FAST_LEAVE */ | ||
32 | + 0; | 33 | + 0; |
33 | } | 34 | } |
34 | 35 | ||
@@ -329,6 +330,7 @@ static int br_setport(struct net_bridge_port *p, struct nlattr *tb[]) | |||
329 | br_set_port_flag(p, tb, IFLA_BRPORT_MODE, BR_HAIRPIN_MODE); | 330 | br_set_port_flag(p, tb, IFLA_BRPORT_MODE, BR_HAIRPIN_MODE); |
330 | br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD); | 331 | br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD); |
331 | br_set_port_flag(p, tb, IFLA_BRPORT_FAST_LEAVE, BR_MULTICAST_FAST_LEAVE); | 332 | br_set_port_flag(p, tb, IFLA_BRPORT_FAST_LEAVE, BR_MULTICAST_FAST_LEAVE); |
333 | br_set_port_flag(p, tb, IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK); | ||
332 | 334 | ||
333 | if (tb[IFLA_BRPORT_COST]) { | 335 | if (tb[IFLA_BRPORT_COST]) { |
334 | err = br_stp_set_path_cost(p, nla_get_u32(tb[IFLA_BRPORT_COST])); | 336 | err = br_stp_set_path_cost(p, nla_get_u32(tb[IFLA_BRPORT_COST])); |
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index 095259f83902..ff2ff3ce6965 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c | |||
@@ -286,6 +286,8 @@ static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
286 | if (m->msg_flags&MSG_OOB) | 286 | if (m->msg_flags&MSG_OOB) |
287 | goto read_error; | 287 | goto read_error; |
288 | 288 | ||
289 | m->msg_namelen = 0; | ||
290 | |||
289 | skb = skb_recv_datagram(sk, flags, 0 , &ret); | 291 | skb = skb_recv_datagram(sk, flags, 0 , &ret); |
290 | if (!skb) | 292 | if (!skb) |
291 | goto read_error; | 293 | goto read_error; |
diff --git a/net/can/gw.c b/net/can/gw.c index 2d117dc5ebea..117814a7e73c 100644 --- a/net/can/gw.c +++ b/net/can/gw.c | |||
@@ -466,7 +466,7 @@ static int cgw_notifier(struct notifier_block *nb, | |||
466 | if (gwj->src.dev == dev || gwj->dst.dev == dev) { | 466 | if (gwj->src.dev == dev || gwj->dst.dev == dev) { |
467 | hlist_del(&gwj->list); | 467 | hlist_del(&gwj->list); |
468 | cgw_unregister_filter(gwj); | 468 | cgw_unregister_filter(gwj); |
469 | kfree(gwj); | 469 | kmem_cache_free(cgw_cache, gwj); |
470 | } | 470 | } |
471 | } | 471 | } |
472 | } | 472 | } |
@@ -864,7 +864,7 @@ static void cgw_remove_all_jobs(void) | |||
864 | hlist_for_each_entry_safe(gwj, nx, &cgw_list, list) { | 864 | hlist_for_each_entry_safe(gwj, nx, &cgw_list, list) { |
865 | hlist_del(&gwj->list); | 865 | hlist_del(&gwj->list); |
866 | cgw_unregister_filter(gwj); | 866 | cgw_unregister_filter(gwj); |
867 | kfree(gwj); | 867 | kmem_cache_free(cgw_cache, gwj); |
868 | } | 868 | } |
869 | } | 869 | } |
870 | 870 | ||
@@ -920,7 +920,7 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) | |||
920 | 920 | ||
921 | hlist_del(&gwj->list); | 921 | hlist_del(&gwj->list); |
922 | cgw_unregister_filter(gwj); | 922 | cgw_unregister_filter(gwj); |
923 | kfree(gwj); | 923 | kmem_cache_free(cgw_cache, gwj); |
924 | err = 0; | 924 | err = 0; |
925 | break; | 925 | break; |
926 | } | 926 | } |
diff --git a/net/core/dev.c b/net/core/dev.c index dffbef70cd31..e7d68ed8aafe 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -1545,7 +1545,6 @@ void net_enable_timestamp(void) | |||
1545 | return; | 1545 | return; |
1546 | } | 1546 | } |
1547 | #endif | 1547 | #endif |
1548 | WARN_ON(in_interrupt()); | ||
1549 | static_key_slow_inc(&netstamp_needed); | 1548 | static_key_slow_inc(&netstamp_needed); |
1550 | } | 1549 | } |
1551 | EXPORT_SYMBOL(net_enable_timestamp); | 1550 | EXPORT_SYMBOL(net_enable_timestamp); |
@@ -1625,7 +1624,6 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) | |||
1625 | } | 1624 | } |
1626 | 1625 | ||
1627 | skb_orphan(skb); | 1626 | skb_orphan(skb); |
1628 | nf_reset(skb); | ||
1629 | 1627 | ||
1630 | if (unlikely(!is_skb_forwardable(dev, skb))) { | 1628 | if (unlikely(!is_skb_forwardable(dev, skb))) { |
1631 | atomic_long_inc(&dev->rx_dropped); | 1629 | atomic_long_inc(&dev->rx_dropped); |
@@ -1641,6 +1639,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) | |||
1641 | skb->mark = 0; | 1639 | skb->mark = 0; |
1642 | secpath_reset(skb); | 1640 | secpath_reset(skb); |
1643 | nf_reset(skb); | 1641 | nf_reset(skb); |
1642 | nf_reset_trace(skb); | ||
1644 | return netif_rx(skb); | 1643 | return netif_rx(skb); |
1645 | } | 1644 | } |
1646 | EXPORT_SYMBOL_GPL(dev_forward_skb); | 1645 | EXPORT_SYMBOL_GPL(dev_forward_skb); |
@@ -2219,9 +2218,9 @@ struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, | |||
2219 | struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); | 2218 | struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); |
2220 | struct packet_offload *ptype; | 2219 | struct packet_offload *ptype; |
2221 | __be16 type = skb->protocol; | 2220 | __be16 type = skb->protocol; |
2221 | int vlan_depth = ETH_HLEN; | ||
2222 | 2222 | ||
2223 | while (type == htons(ETH_P_8021Q)) { | 2223 | while (type == htons(ETH_P_8021Q)) { |
2224 | int vlan_depth = ETH_HLEN; | ||
2225 | struct vlan_hdr *vh; | 2224 | struct vlan_hdr *vh; |
2226 | 2225 | ||
2227 | if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN))) | 2226 | if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN))) |
@@ -3315,6 +3314,7 @@ int netdev_rx_handler_register(struct net_device *dev, | |||
3315 | if (dev->rx_handler) | 3314 | if (dev->rx_handler) |
3316 | return -EBUSY; | 3315 | return -EBUSY; |
3317 | 3316 | ||
3317 | /* Note: rx_handler_data must be set before rx_handler */ | ||
3318 | rcu_assign_pointer(dev->rx_handler_data, rx_handler_data); | 3318 | rcu_assign_pointer(dev->rx_handler_data, rx_handler_data); |
3319 | rcu_assign_pointer(dev->rx_handler, rx_handler); | 3319 | rcu_assign_pointer(dev->rx_handler, rx_handler); |
3320 | 3320 | ||
@@ -3335,6 +3335,11 @@ void netdev_rx_handler_unregister(struct net_device *dev) | |||
3335 | 3335 | ||
3336 | ASSERT_RTNL(); | 3336 | ASSERT_RTNL(); |
3337 | RCU_INIT_POINTER(dev->rx_handler, NULL); | 3337 | RCU_INIT_POINTER(dev->rx_handler, NULL); |
3338 | /* a reader seeing a non NULL rx_handler in a rcu_read_lock() | ||
3339 | * section has a guarantee to see a non NULL rx_handler_data | ||
3340 | * as well. | ||
3341 | */ | ||
3342 | synchronize_net(); | ||
3338 | RCU_INIT_POINTER(dev->rx_handler_data, NULL); | 3343 | RCU_INIT_POINTER(dev->rx_handler_data, NULL); |
3339 | } | 3344 | } |
3340 | EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister); | 3345 | EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister); |
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c index bd2eb9d3e369..abdc9e6ef33e 100644 --- a/net/core/dev_addr_lists.c +++ b/net/core/dev_addr_lists.c | |||
@@ -37,7 +37,7 @@ static int __hw_addr_create_ex(struct netdev_hw_addr_list *list, | |||
37 | ha->type = addr_type; | 37 | ha->type = addr_type; |
38 | ha->refcount = 1; | 38 | ha->refcount = 1; |
39 | ha->global_use = global; | 39 | ha->global_use = global; |
40 | ha->synced = false; | 40 | ha->synced = 0; |
41 | list_add_tail_rcu(&ha->list, &list->list); | 41 | list_add_tail_rcu(&ha->list, &list->list); |
42 | list->count++; | 42 | list->count++; |
43 | 43 | ||
@@ -165,7 +165,7 @@ int __hw_addr_sync(struct netdev_hw_addr_list *to_list, | |||
165 | addr_len, ha->type); | 165 | addr_len, ha->type); |
166 | if (err) | 166 | if (err) |
167 | break; | 167 | break; |
168 | ha->synced = true; | 168 | ha->synced++; |
169 | ha->refcount++; | 169 | ha->refcount++; |
170 | } else if (ha->refcount == 1) { | 170 | } else if (ha->refcount == 1) { |
171 | __hw_addr_del(to_list, ha->addr, addr_len, ha->type); | 171 | __hw_addr_del(to_list, ha->addr, addr_len, ha->type); |
@@ -186,7 +186,7 @@ void __hw_addr_unsync(struct netdev_hw_addr_list *to_list, | |||
186 | if (ha->synced) { | 186 | if (ha->synced) { |
187 | __hw_addr_del(to_list, ha->addr, | 187 | __hw_addr_del(to_list, ha->addr, |
188 | addr_len, ha->type); | 188 | addr_len, ha->type); |
189 | ha->synced = false; | 189 | ha->synced--; |
190 | __hw_addr_del(from_list, ha->addr, | 190 | __hw_addr_del(from_list, ha->addr, |
191 | addr_len, ha->type); | 191 | addr_len, ha->type); |
192 | } | 192 | } |
diff --git a/net/core/flow.c b/net/core/flow.c index c56ea6f7f6c7..2bfd081c59f7 100644 --- a/net/core/flow.c +++ b/net/core/flow.c | |||
@@ -328,7 +328,7 @@ static void flow_cache_flush_per_cpu(void *data) | |||
328 | struct flow_flush_info *info = data; | 328 | struct flow_flush_info *info = data; |
329 | struct tasklet_struct *tasklet; | 329 | struct tasklet_struct *tasklet; |
330 | 330 | ||
331 | tasklet = this_cpu_ptr(&info->cache->percpu->flush_tasklet); | 331 | tasklet = &this_cpu_ptr(info->cache->percpu)->flush_tasklet; |
332 | tasklet->data = (unsigned long)info; | 332 | tasklet->data = (unsigned long)info; |
333 | tasklet_schedule(tasklet); | 333 | tasklet_schedule(tasklet); |
334 | } | 334 | } |
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 9d4c7201400d..e187bf06d673 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c | |||
@@ -140,6 +140,8 @@ ipv6: | |||
140 | flow->ports = *ports; | 140 | flow->ports = *ports; |
141 | } | 141 | } |
142 | 142 | ||
143 | flow->thoff = (u16) nhoff; | ||
144 | |||
143 | return true; | 145 | return true; |
144 | } | 146 | } |
145 | EXPORT_SYMBOL(skb_flow_dissect); | 147 | EXPORT_SYMBOL(skb_flow_dissect); |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index a585d45cc9d9..23854b51a259 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -496,8 +496,10 @@ static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev) | |||
496 | } | 496 | } |
497 | if (ops->fill_info) { | 497 | if (ops->fill_info) { |
498 | data = nla_nest_start(skb, IFLA_INFO_DATA); | 498 | data = nla_nest_start(skb, IFLA_INFO_DATA); |
499 | if (data == NULL) | 499 | if (data == NULL) { |
500 | err = -EMSGSIZE; | ||
500 | goto err_cancel_link; | 501 | goto err_cancel_link; |
502 | } | ||
501 | err = ops->fill_info(skb, dev); | 503 | err = ops->fill_info(skb, dev); |
502 | if (err < 0) | 504 | if (err < 0) |
503 | goto err_cancel_data; | 505 | goto err_cancel_data; |
@@ -1070,7 +1072,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) | |||
1070 | rcu_read_lock(); | 1072 | rcu_read_lock(); |
1071 | cb->seq = net->dev_base_seq; | 1073 | cb->seq = net->dev_base_seq; |
1072 | 1074 | ||
1073 | if (nlmsg_parse(cb->nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX, | 1075 | if (nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX, |
1074 | ifla_policy) >= 0) { | 1076 | ifla_policy) >= 0) { |
1075 | 1077 | ||
1076 | if (tb[IFLA_EXT_MASK]) | 1078 | if (tb[IFLA_EXT_MASK]) |
@@ -1920,7 +1922,7 @@ static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
1920 | u32 ext_filter_mask = 0; | 1922 | u32 ext_filter_mask = 0; |
1921 | u16 min_ifinfo_dump_size = 0; | 1923 | u16 min_ifinfo_dump_size = 0; |
1922 | 1924 | ||
1923 | if (nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX, | 1925 | if (nlmsg_parse(nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX, |
1924 | ifla_policy) >= 0) { | 1926 | ifla_policy) >= 0) { |
1925 | if (tb[IFLA_EXT_MASK]) | 1927 | if (tb[IFLA_EXT_MASK]) |
1926 | ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]); | 1928 | ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]); |
@@ -2621,7 +2623,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
2621 | struct rtattr *attr = (void *)nlh + NLMSG_ALIGN(min_len); | 2623 | struct rtattr *attr = (void *)nlh + NLMSG_ALIGN(min_len); |
2622 | 2624 | ||
2623 | while (RTA_OK(attr, attrlen)) { | 2625 | while (RTA_OK(attr, attrlen)) { |
2624 | unsigned int flavor = attr->rta_type; | 2626 | unsigned int flavor = attr->rta_type & NLA_TYPE_MASK; |
2625 | if (flavor) { | 2627 | if (flavor) { |
2626 | if (flavor > rta_max[sz_idx]) | 2628 | if (flavor > rta_max[sz_idx]) |
2627 | return -EINVAL; | 2629 | return -EINVAL; |
diff --git a/net/core/scm.c b/net/core/scm.c index 905dcc6ad1e3..2dc6cdaaae8a 100644 --- a/net/core/scm.c +++ b/net/core/scm.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/interrupt.h> | 24 | #include <linux/interrupt.h> |
25 | #include <linux/netdevice.h> | 25 | #include <linux/netdevice.h> |
26 | #include <linux/security.h> | 26 | #include <linux/security.h> |
27 | #include <linux/pid_namespace.h> | ||
27 | #include <linux/pid.h> | 28 | #include <linux/pid.h> |
28 | #include <linux/nsproxy.h> | 29 | #include <linux/nsproxy.h> |
29 | #include <linux/slab.h> | 30 | #include <linux/slab.h> |
@@ -52,7 +53,8 @@ static __inline__ int scm_check_creds(struct ucred *creds) | |||
52 | if (!uid_valid(uid) || !gid_valid(gid)) | 53 | if (!uid_valid(uid) || !gid_valid(gid)) |
53 | return -EINVAL; | 54 | return -EINVAL; |
54 | 55 | ||
55 | if ((creds->pid == task_tgid_vnr(current) || nsown_capable(CAP_SYS_ADMIN)) && | 56 | if ((creds->pid == task_tgid_vnr(current) || |
57 | ns_capable(current->nsproxy->pid_ns->user_ns, CAP_SYS_ADMIN)) && | ||
56 | ((uid_eq(uid, cred->uid) || uid_eq(uid, cred->euid) || | 58 | ((uid_eq(uid, cred->uid) || uid_eq(uid, cred->euid) || |
57 | uid_eq(uid, cred->suid)) || nsown_capable(CAP_SETUID)) && | 59 | uid_eq(uid, cred->suid)) || nsown_capable(CAP_SETUID)) && |
58 | ((gid_eq(gid, cred->gid) || gid_eq(gid, cred->egid) || | 60 | ((gid_eq(gid, cred->gid) || gid_eq(gid, cred->egid) || |
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 68f6a94f7661..c929d9c1c4b6 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c | |||
@@ -1333,8 +1333,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, | |||
1333 | iph->frag_off |= htons(IP_MF); | 1333 | iph->frag_off |= htons(IP_MF); |
1334 | offset += (skb->len - skb->mac_len - iph->ihl * 4); | 1334 | offset += (skb->len - skb->mac_len - iph->ihl * 4); |
1335 | } else { | 1335 | } else { |
1336 | if (!(iph->frag_off & htons(IP_DF))) | 1336 | iph->id = htons(id++); |
1337 | iph->id = htons(id++); | ||
1338 | } | 1337 | } |
1339 | iph->tot_len = htons(skb->len - skb->mac_len); | 1338 | iph->tot_len = htons(skb->len - skb->mac_len); |
1340 | iph->check = 0; | 1339 | iph->check = 0; |
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index f678507bc829..c6287cd978c2 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c | |||
@@ -587,13 +587,16 @@ static void check_lifetime(struct work_struct *work) | |||
587 | { | 587 | { |
588 | unsigned long now, next, next_sec, next_sched; | 588 | unsigned long now, next, next_sec, next_sched; |
589 | struct in_ifaddr *ifa; | 589 | struct in_ifaddr *ifa; |
590 | struct hlist_node *n; | ||
590 | int i; | 591 | int i; |
591 | 592 | ||
592 | now = jiffies; | 593 | now = jiffies; |
593 | next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY); | 594 | next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY); |
594 | 595 | ||
595 | rcu_read_lock(); | ||
596 | for (i = 0; i < IN4_ADDR_HSIZE; i++) { | 596 | for (i = 0; i < IN4_ADDR_HSIZE; i++) { |
597 | bool change_needed = false; | ||
598 | |||
599 | rcu_read_lock(); | ||
597 | hlist_for_each_entry_rcu(ifa, &inet_addr_lst[i], hash) { | 600 | hlist_for_each_entry_rcu(ifa, &inet_addr_lst[i], hash) { |
598 | unsigned long age; | 601 | unsigned long age; |
599 | 602 | ||
@@ -606,16 +609,7 @@ static void check_lifetime(struct work_struct *work) | |||
606 | 609 | ||
607 | if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME && | 610 | if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME && |
608 | age >= ifa->ifa_valid_lft) { | 611 | age >= ifa->ifa_valid_lft) { |
609 | struct in_ifaddr **ifap ; | 612 | change_needed = true; |
610 | |||
611 | rtnl_lock(); | ||
612 | for (ifap = &ifa->ifa_dev->ifa_list; | ||
613 | *ifap != NULL; ifap = &ifa->ifa_next) { | ||
614 | if (*ifap == ifa) | ||
615 | inet_del_ifa(ifa->ifa_dev, | ||
616 | ifap, 1); | ||
617 | } | ||
618 | rtnl_unlock(); | ||
619 | } else if (ifa->ifa_preferred_lft == | 613 | } else if (ifa->ifa_preferred_lft == |
620 | INFINITY_LIFE_TIME) { | 614 | INFINITY_LIFE_TIME) { |
621 | continue; | 615 | continue; |
@@ -625,10 +619,8 @@ static void check_lifetime(struct work_struct *work) | |||
625 | next = ifa->ifa_tstamp + | 619 | next = ifa->ifa_tstamp + |
626 | ifa->ifa_valid_lft * HZ; | 620 | ifa->ifa_valid_lft * HZ; |
627 | 621 | ||
628 | if (!(ifa->ifa_flags & IFA_F_DEPRECATED)) { | 622 | if (!(ifa->ifa_flags & IFA_F_DEPRECATED)) |
629 | ifa->ifa_flags |= IFA_F_DEPRECATED; | 623 | change_needed = true; |
630 | rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0); | ||
631 | } | ||
632 | } else if (time_before(ifa->ifa_tstamp + | 624 | } else if (time_before(ifa->ifa_tstamp + |
633 | ifa->ifa_preferred_lft * HZ, | 625 | ifa->ifa_preferred_lft * HZ, |
634 | next)) { | 626 | next)) { |
@@ -636,8 +628,42 @@ static void check_lifetime(struct work_struct *work) | |||
636 | ifa->ifa_preferred_lft * HZ; | 628 | ifa->ifa_preferred_lft * HZ; |
637 | } | 629 | } |
638 | } | 630 | } |
631 | rcu_read_unlock(); | ||
632 | if (!change_needed) | ||
633 | continue; | ||
634 | rtnl_lock(); | ||
635 | hlist_for_each_entry_safe(ifa, n, &inet_addr_lst[i], hash) { | ||
636 | unsigned long age; | ||
637 | |||
638 | if (ifa->ifa_flags & IFA_F_PERMANENT) | ||
639 | continue; | ||
640 | |||
641 | /* We try to batch several events at once. */ | ||
642 | age = (now - ifa->ifa_tstamp + | ||
643 | ADDRCONF_TIMER_FUZZ_MINUS) / HZ; | ||
644 | |||
645 | if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME && | ||
646 | age >= ifa->ifa_valid_lft) { | ||
647 | struct in_ifaddr **ifap; | ||
648 | |||
649 | for (ifap = &ifa->ifa_dev->ifa_list; | ||
650 | *ifap != NULL; ifap = &(*ifap)->ifa_next) { | ||
651 | if (*ifap == ifa) { | ||
652 | inet_del_ifa(ifa->ifa_dev, | ||
653 | ifap, 1); | ||
654 | break; | ||
655 | } | ||
656 | } | ||
657 | } else if (ifa->ifa_preferred_lft != | ||
658 | INFINITY_LIFE_TIME && | ||
659 | age >= ifa->ifa_preferred_lft && | ||
660 | !(ifa->ifa_flags & IFA_F_DEPRECATED)) { | ||
661 | ifa->ifa_flags |= IFA_F_DEPRECATED; | ||
662 | rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0); | ||
663 | } | ||
664 | } | ||
665 | rtnl_unlock(); | ||
639 | } | 666 | } |
640 | rcu_read_unlock(); | ||
641 | 667 | ||
642 | next_sec = round_jiffies_up(next); | 668 | next_sec = round_jiffies_up(next); |
643 | next_sched = next; | 669 | next_sched = next; |
@@ -802,8 +828,12 @@ static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg | |||
802 | if (nlh->nlmsg_flags & NLM_F_EXCL || | 828 | if (nlh->nlmsg_flags & NLM_F_EXCL || |
803 | !(nlh->nlmsg_flags & NLM_F_REPLACE)) | 829 | !(nlh->nlmsg_flags & NLM_F_REPLACE)) |
804 | return -EEXIST; | 830 | return -EEXIST; |
805 | 831 | ifa = ifa_existing; | |
806 | set_ifa_lifetime(ifa_existing, valid_lft, prefered_lft); | 832 | set_ifa_lifetime(ifa, valid_lft, prefered_lft); |
833 | cancel_delayed_work(&check_lifetime_work); | ||
834 | schedule_delayed_work(&check_lifetime_work, 0); | ||
835 | rtmsg_ifa(RTM_NEWADDR, ifa, nlh, NETLINK_CB(skb).portid); | ||
836 | blocking_notifier_call_chain(&inetaddr_chain, NETDEV_UP, ifa); | ||
807 | } | 837 | } |
808 | return 0; | 838 | return 0; |
809 | } | 839 | } |
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c index 245ae078a07f..f4fd23de9b13 100644 --- a/net/ipv4/inet_fragment.c +++ b/net/ipv4/inet_fragment.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/rtnetlink.h> | 21 | #include <linux/rtnetlink.h> |
22 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
23 | 23 | ||
24 | #include <net/sock.h> | ||
24 | #include <net/inet_frag.h> | 25 | #include <net/inet_frag.h> |
25 | 26 | ||
26 | static void inet_frag_secret_rebuild(unsigned long dummy) | 27 | static void inet_frag_secret_rebuild(unsigned long dummy) |
@@ -277,6 +278,7 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, | |||
277 | __releases(&f->lock) | 278 | __releases(&f->lock) |
278 | { | 279 | { |
279 | struct inet_frag_queue *q; | 280 | struct inet_frag_queue *q; |
281 | int depth = 0; | ||
280 | 282 | ||
281 | hlist_for_each_entry(q, &f->hash[hash], list) { | 283 | hlist_for_each_entry(q, &f->hash[hash], list) { |
282 | if (q->net == nf && f->match(q, key)) { | 284 | if (q->net == nf && f->match(q, key)) { |
@@ -284,9 +286,25 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, | |||
284 | read_unlock(&f->lock); | 286 | read_unlock(&f->lock); |
285 | return q; | 287 | return q; |
286 | } | 288 | } |
289 | depth++; | ||
287 | } | 290 | } |
288 | read_unlock(&f->lock); | 291 | read_unlock(&f->lock); |
289 | 292 | ||
290 | return inet_frag_create(nf, f, key); | 293 | if (depth <= INETFRAGS_MAXDEPTH) |
294 | return inet_frag_create(nf, f, key); | ||
295 | else | ||
296 | return ERR_PTR(-ENOBUFS); | ||
291 | } | 297 | } |
292 | EXPORT_SYMBOL(inet_frag_find); | 298 | EXPORT_SYMBOL(inet_frag_find); |
299 | |||
300 | void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q, | ||
301 | const char *prefix) | ||
302 | { | ||
303 | static const char msg[] = "inet_frag_find: Fragment hash bucket" | ||
304 | " list length grew over limit " __stringify(INETFRAGS_MAXDEPTH) | ||
305 | ". Dropping fragment.\n"; | ||
306 | |||
307 | if (PTR_ERR(q) == -ENOBUFS) | ||
308 | LIMIT_NETDEBUG(KERN_WARNING "%s%s", prefix, msg); | ||
309 | } | ||
310 | EXPORT_SYMBOL(inet_frag_maybe_warn_overflow); | ||
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index b6d30acb600c..a6445b843ef4 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c | |||
@@ -292,14 +292,11 @@ static inline struct ipq *ip_find(struct net *net, struct iphdr *iph, u32 user) | |||
292 | hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol); | 292 | hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol); |
293 | 293 | ||
294 | q = inet_frag_find(&net->ipv4.frags, &ip4_frags, &arg, hash); | 294 | q = inet_frag_find(&net->ipv4.frags, &ip4_frags, &arg, hash); |
295 | if (q == NULL) | 295 | if (IS_ERR_OR_NULL(q)) { |
296 | goto out_nomem; | 296 | inet_frag_maybe_warn_overflow(q, pr_fmt()); |
297 | 297 | return NULL; | |
298 | } | ||
298 | return container_of(q, struct ipq, q); | 299 | return container_of(q, struct ipq, q); |
299 | |||
300 | out_nomem: | ||
301 | LIMIT_NETDEBUG(KERN_ERR pr_fmt("ip_frag_create: no memory left !\n")); | ||
302 | return NULL; | ||
303 | } | 300 | } |
304 | 301 | ||
305 | /* Is the fragment too far ahead to be part of ipq? */ | 302 | /* Is the fragment too far ahead to be part of ipq? */ |
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index d0ef0e674ec5..91d66dbde9c0 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
@@ -798,10 +798,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev | |||
798 | 798 | ||
799 | if (dev->header_ops && dev->type == ARPHRD_IPGRE) { | 799 | if (dev->header_ops && dev->type == ARPHRD_IPGRE) { |
800 | gre_hlen = 0; | 800 | gre_hlen = 0; |
801 | if (skb->protocol == htons(ETH_P_IP)) | 801 | tiph = (const struct iphdr *)skb->data; |
802 | tiph = (const struct iphdr *)skb->data; | ||
803 | else | ||
804 | tiph = &tunnel->parms.iph; | ||
805 | } else { | 802 | } else { |
806 | gre_hlen = tunnel->hlen; | 803 | gre_hlen = tunnel->hlen; |
807 | tiph = &tunnel->parms.iph; | 804 | tiph = &tunnel->parms.iph; |
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c index 310a3647c83d..ec7264514a82 100644 --- a/net/ipv4/ip_options.c +++ b/net/ipv4/ip_options.c | |||
@@ -370,7 +370,6 @@ int ip_options_compile(struct net *net, | |||
370 | } | 370 | } |
371 | switch (optptr[3]&0xF) { | 371 | switch (optptr[3]&0xF) { |
372 | case IPOPT_TS_TSONLY: | 372 | case IPOPT_TS_TSONLY: |
373 | opt->ts = optptr - iph; | ||
374 | if (skb) | 373 | if (skb) |
375 | timeptr = &optptr[optptr[2]-1]; | 374 | timeptr = &optptr[optptr[2]-1]; |
376 | opt->ts_needtime = 1; | 375 | opt->ts_needtime = 1; |
@@ -381,7 +380,6 @@ int ip_options_compile(struct net *net, | |||
381 | pp_ptr = optptr + 2; | 380 | pp_ptr = optptr + 2; |
382 | goto error; | 381 | goto error; |
383 | } | 382 | } |
384 | opt->ts = optptr - iph; | ||
385 | if (rt) { | 383 | if (rt) { |
386 | spec_dst_fill(&spec_dst, skb); | 384 | spec_dst_fill(&spec_dst, skb); |
387 | memcpy(&optptr[optptr[2]-1], &spec_dst, 4); | 385 | memcpy(&optptr[optptr[2]-1], &spec_dst, 4); |
@@ -396,7 +394,6 @@ int ip_options_compile(struct net *net, | |||
396 | pp_ptr = optptr + 2; | 394 | pp_ptr = optptr + 2; |
397 | goto error; | 395 | goto error; |
398 | } | 396 | } |
399 | opt->ts = optptr - iph; | ||
400 | { | 397 | { |
401 | __be32 addr; | 398 | __be32 addr; |
402 | memcpy(&addr, &optptr[optptr[2]-1], 4); | 399 | memcpy(&addr, &optptr[optptr[2]-1], 4); |
@@ -429,12 +426,12 @@ int ip_options_compile(struct net *net, | |||
429 | pp_ptr = optptr + 3; | 426 | pp_ptr = optptr + 3; |
430 | goto error; | 427 | goto error; |
431 | } | 428 | } |
432 | opt->ts = optptr - iph; | ||
433 | if (skb) { | 429 | if (skb) { |
434 | optptr[3] = (optptr[3]&0xF)|((overflow+1)<<4); | 430 | optptr[3] = (optptr[3]&0xF)|((overflow+1)<<4); |
435 | opt->is_changed = 1; | 431 | opt->is_changed = 1; |
436 | } | 432 | } |
437 | } | 433 | } |
434 | opt->ts = optptr - iph; | ||
438 | break; | 435 | break; |
439 | case IPOPT_RA: | 436 | case IPOPT_RA: |
440 | if (optlen < 4) { | 437 | if (optlen < 4) { |
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index 98cbc6877019..bf6c5cf31aed 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c | |||
@@ -1522,7 +1522,8 @@ static int __init ip_auto_config(void) | |||
1522 | } | 1522 | } |
1523 | for (i++; i < CONF_NAMESERVERS_MAX; i++) | 1523 | for (i++; i < CONF_NAMESERVERS_MAX; i++) |
1524 | if (ic_nameservers[i] != NONE) | 1524 | if (ic_nameservers[i] != NONE) |
1525 | pr_cont(", nameserver%u=%pI4\n", i, &ic_nameservers[i]); | 1525 | pr_cont(", nameserver%u=%pI4", i, &ic_nameservers[i]); |
1526 | pr_cont("\n"); | ||
1526 | #endif /* !SILENT */ | 1527 | #endif /* !SILENT */ |
1527 | 1528 | ||
1528 | return 0; | 1529 | return 0; |
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig index ce2d43e1f09f..0d755c50994b 100644 --- a/net/ipv4/netfilter/Kconfig +++ b/net/ipv4/netfilter/Kconfig | |||
@@ -36,19 +36,6 @@ config NF_CONNTRACK_PROC_COMPAT | |||
36 | 36 | ||
37 | If unsure, say Y. | 37 | If unsure, say Y. |
38 | 38 | ||
39 | config IP_NF_QUEUE | ||
40 | tristate "IP Userspace queueing via NETLINK (OBSOLETE)" | ||
41 | depends on NETFILTER_ADVANCED | ||
42 | help | ||
43 | Netfilter has the ability to queue packets to user space: the | ||
44 | netlink device can be used to access them using this driver. | ||
45 | |||
46 | This option enables the old IPv4-only "ip_queue" implementation | ||
47 | which has been obsoleted by the new "nfnetlink_queue" code (see | ||
48 | CONFIG_NETFILTER_NETLINK_QUEUE). | ||
49 | |||
50 | To compile it as a module, choose M here. If unsure, say N. | ||
51 | |||
52 | config IP_NF_IPTABLES | 39 | config IP_NF_IPTABLES |
53 | tristate "IP tables support (required for filtering/masq/NAT)" | 40 | tristate "IP tables support (required for filtering/masq/NAT)" |
54 | default m if NETFILTER_ADVANCED=n | 41 | default m if NETFILTER_ADVANCED=n |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 47e854fcae24..e22020790709 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -775,7 +775,7 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp) | |||
775 | * Make sure that we have exactly size bytes | 775 | * Make sure that we have exactly size bytes |
776 | * available to the caller, no more, no less. | 776 | * available to the caller, no more, no less. |
777 | */ | 777 | */ |
778 | skb->avail_size = size; | 778 | skb->reserved_tailroom = skb->end - skb->tail - size; |
779 | return skb; | 779 | return skb; |
780 | } | 780 | } |
781 | __kfree_skb(skb); | 781 | __kfree_skb(skb); |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 0d9bdacce99f..3bd55bad230a 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -2059,11 +2059,8 @@ void tcp_enter_loss(struct sock *sk, int how) | |||
2059 | if (tcp_is_reno(tp)) | 2059 | if (tcp_is_reno(tp)) |
2060 | tcp_reset_reno_sack(tp); | 2060 | tcp_reset_reno_sack(tp); |
2061 | 2061 | ||
2062 | if (!how) { | 2062 | tp->undo_marker = tp->snd_una; |
2063 | /* Push undo marker, if it was plain RTO and nothing | 2063 | if (how) { |
2064 | * was retransmitted. */ | ||
2065 | tp->undo_marker = tp->snd_una; | ||
2066 | } else { | ||
2067 | tp->sacked_out = 0; | 2064 | tp->sacked_out = 0; |
2068 | tp->fackets_out = 0; | 2065 | tp->fackets_out = 0; |
2069 | } | 2066 | } |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 4a8ec457310f..d09203c63264 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -274,13 +274,6 @@ static void tcp_v4_mtu_reduced(struct sock *sk) | |||
274 | struct inet_sock *inet = inet_sk(sk); | 274 | struct inet_sock *inet = inet_sk(sk); |
275 | u32 mtu = tcp_sk(sk)->mtu_info; | 275 | u32 mtu = tcp_sk(sk)->mtu_info; |
276 | 276 | ||
277 | /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs | ||
278 | * send out by Linux are always <576bytes so they should go through | ||
279 | * unfragmented). | ||
280 | */ | ||
281 | if (sk->sk_state == TCP_LISTEN) | ||
282 | return; | ||
283 | |||
284 | dst = inet_csk_update_pmtu(sk, mtu); | 277 | dst = inet_csk_update_pmtu(sk, mtu); |
285 | if (!dst) | 278 | if (!dst) |
286 | return; | 279 | return; |
@@ -408,6 +401,13 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) | |||
408 | goto out; | 401 | goto out; |
409 | 402 | ||
410 | if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */ | 403 | if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */ |
404 | /* We are not interested in TCP_LISTEN and open_requests | ||
405 | * (SYN-ACKs send out by Linux are always <576bytes so | ||
406 | * they should go through unfragmented). | ||
407 | */ | ||
408 | if (sk->sk_state == TCP_LISTEN) | ||
409 | goto out; | ||
410 | |||
411 | tp->mtu_info = info; | 411 | tp->mtu_info = info; |
412 | if (!sock_owned_by_user(sk)) { | 412 | if (!sock_owned_by_user(sk)) { |
413 | tcp_v4_mtu_reduced(sk); | 413 | tcp_v4_mtu_reduced(sk); |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index e2b4461074da..b44cf81d8178 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -1298,7 +1298,6 @@ static void __pskb_trim_head(struct sk_buff *skb, int len) | |||
1298 | eat = min_t(int, len, skb_headlen(skb)); | 1298 | eat = min_t(int, len, skb_headlen(skb)); |
1299 | if (eat) { | 1299 | if (eat) { |
1300 | __skb_pull(skb, eat); | 1300 | __skb_pull(skb, eat); |
1301 | skb->avail_size -= eat; | ||
1302 | len -= eat; | 1301 | len -= eat; |
1303 | if (!len) | 1302 | if (!len) |
1304 | return; | 1303 | return; |
@@ -1810,8 +1809,11 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb) | |||
1810 | goto send_now; | 1809 | goto send_now; |
1811 | } | 1810 | } |
1812 | 1811 | ||
1813 | /* Ok, it looks like it is advisable to defer. */ | 1812 | /* Ok, it looks like it is advisable to defer. |
1814 | tp->tso_deferred = 1 | (jiffies << 1); | 1813 | * Do not rearm the timer if already set to not break TCP ACK clocking. |
1814 | */ | ||
1815 | if (!tp->tso_deferred) | ||
1816 | tp->tso_deferred = 1 | (jiffies << 1); | ||
1815 | 1817 | ||
1816 | return true; | 1818 | return true; |
1817 | 1819 | ||
@@ -2707,6 +2709,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, | |||
2707 | skb_reserve(skb, MAX_TCP_HEADER); | 2709 | skb_reserve(skb, MAX_TCP_HEADER); |
2708 | 2710 | ||
2709 | skb_dst_set(skb, dst); | 2711 | skb_dst_set(skb, dst); |
2712 | security_skb_owned_by(skb, sk); | ||
2710 | 2713 | ||
2711 | mss = dst_metric_advmss(dst); | 2714 | mss = dst_metric_advmss(dst); |
2712 | if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss) | 2715 | if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss) |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 265c42cf963c..0a073a263720 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -1762,9 +1762,16 @@ int udp_rcv(struct sk_buff *skb) | |||
1762 | 1762 | ||
1763 | void udp_destroy_sock(struct sock *sk) | 1763 | void udp_destroy_sock(struct sock *sk) |
1764 | { | 1764 | { |
1765 | struct udp_sock *up = udp_sk(sk); | ||
1765 | bool slow = lock_sock_fast(sk); | 1766 | bool slow = lock_sock_fast(sk); |
1766 | udp_flush_pending_frames(sk); | 1767 | udp_flush_pending_frames(sk); |
1767 | unlock_sock_fast(sk, slow); | 1768 | unlock_sock_fast(sk, slow); |
1769 | if (static_key_false(&udp_encap_needed) && up->encap_type) { | ||
1770 | void (*encap_destroy)(struct sock *sk); | ||
1771 | encap_destroy = ACCESS_ONCE(up->encap_destroy); | ||
1772 | if (encap_destroy) | ||
1773 | encap_destroy(sk); | ||
1774 | } | ||
1768 | } | 1775 | } |
1769 | 1776 | ||
1770 | /* | 1777 | /* |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index f2c7e615f902..a459c4f5b769 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -2529,6 +2529,9 @@ static void sit_add_v4_addrs(struct inet6_dev *idev) | |||
2529 | static void init_loopback(struct net_device *dev) | 2529 | static void init_loopback(struct net_device *dev) |
2530 | { | 2530 | { |
2531 | struct inet6_dev *idev; | 2531 | struct inet6_dev *idev; |
2532 | struct net_device *sp_dev; | ||
2533 | struct inet6_ifaddr *sp_ifa; | ||
2534 | struct rt6_info *sp_rt; | ||
2532 | 2535 | ||
2533 | /* ::1 */ | 2536 | /* ::1 */ |
2534 | 2537 | ||
@@ -2540,6 +2543,30 @@ static void init_loopback(struct net_device *dev) | |||
2540 | } | 2543 | } |
2541 | 2544 | ||
2542 | add_addr(idev, &in6addr_loopback, 128, IFA_HOST); | 2545 | add_addr(idev, &in6addr_loopback, 128, IFA_HOST); |
2546 | |||
2547 | /* Add routes to other interface's IPv6 addresses */ | ||
2548 | for_each_netdev(dev_net(dev), sp_dev) { | ||
2549 | if (!strcmp(sp_dev->name, dev->name)) | ||
2550 | continue; | ||
2551 | |||
2552 | idev = __in6_dev_get(sp_dev); | ||
2553 | if (!idev) | ||
2554 | continue; | ||
2555 | |||
2556 | read_lock_bh(&idev->lock); | ||
2557 | list_for_each_entry(sp_ifa, &idev->addr_list, if_list) { | ||
2558 | |||
2559 | if (sp_ifa->flags & (IFA_F_DADFAILED | IFA_F_TENTATIVE)) | ||
2560 | continue; | ||
2561 | |||
2562 | sp_rt = addrconf_dst_alloc(idev, &sp_ifa->addr, 0); | ||
2563 | |||
2564 | /* Failure cases are ignored */ | ||
2565 | if (!IS_ERR(sp_rt)) | ||
2566 | ip6_ins_rt(sp_rt); | ||
2567 | } | ||
2568 | read_unlock_bh(&idev->lock); | ||
2569 | } | ||
2543 | } | 2570 | } |
2544 | 2571 | ||
2545 | static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr *addr) | 2572 | static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr *addr) |
@@ -4784,26 +4811,20 @@ static void addrconf_sysctl_unregister(struct inet6_dev *idev) | |||
4784 | 4811 | ||
4785 | static int __net_init addrconf_init_net(struct net *net) | 4812 | static int __net_init addrconf_init_net(struct net *net) |
4786 | { | 4813 | { |
4787 | int err; | 4814 | int err = -ENOMEM; |
4788 | struct ipv6_devconf *all, *dflt; | 4815 | struct ipv6_devconf *all, *dflt; |
4789 | 4816 | ||
4790 | err = -ENOMEM; | 4817 | all = kmemdup(&ipv6_devconf, sizeof(ipv6_devconf), GFP_KERNEL); |
4791 | all = &ipv6_devconf; | 4818 | if (all == NULL) |
4792 | dflt = &ipv6_devconf_dflt; | 4819 | goto err_alloc_all; |
4793 | 4820 | ||
4794 | if (!net_eq(net, &init_net)) { | 4821 | dflt = kmemdup(&ipv6_devconf_dflt, sizeof(ipv6_devconf_dflt), GFP_KERNEL); |
4795 | all = kmemdup(all, sizeof(ipv6_devconf), GFP_KERNEL); | 4822 | if (dflt == NULL) |
4796 | if (all == NULL) | 4823 | goto err_alloc_dflt; |
4797 | goto err_alloc_all; | ||
4798 | 4824 | ||
4799 | dflt = kmemdup(dflt, sizeof(ipv6_devconf_dflt), GFP_KERNEL); | 4825 | /* these will be inherited by all namespaces */ |
4800 | if (dflt == NULL) | 4826 | dflt->autoconf = ipv6_defaults.autoconf; |
4801 | goto err_alloc_dflt; | 4827 | dflt->disable_ipv6 = ipv6_defaults.disable_ipv6; |
4802 | } else { | ||
4803 | /* these will be inherited by all namespaces */ | ||
4804 | dflt->autoconf = ipv6_defaults.autoconf; | ||
4805 | dflt->disable_ipv6 = ipv6_defaults.disable_ipv6; | ||
4806 | } | ||
4807 | 4828 | ||
4808 | net->ipv6.devconf_all = all; | 4829 | net->ipv6.devconf_all = all; |
4809 | net->ipv6.devconf_dflt = dflt; | 4830 | net->ipv6.devconf_dflt = dflt; |
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index e33fe0ab2568..2bab2aa59745 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c | |||
@@ -118,6 +118,18 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt | |||
118 | ipv6_addr_loopback(&hdr->daddr)) | 118 | ipv6_addr_loopback(&hdr->daddr)) |
119 | goto err; | 119 | goto err; |
120 | 120 | ||
121 | /* RFC4291 Errata ID: 3480 | ||
122 | * Interface-Local scope spans only a single interface on a | ||
123 | * node and is useful only for loopback transmission of | ||
124 | * multicast. Packets with interface-local scope received | ||
125 | * from another node must be discarded. | ||
126 | */ | ||
127 | if (!(skb->pkt_type == PACKET_LOOPBACK || | ||
128 | dev->flags & IFF_LOOPBACK) && | ||
129 | ipv6_addr_is_multicast(&hdr->daddr) && | ||
130 | IPV6_ADDR_MC_SCOPE(&hdr->daddr) == 1) | ||
131 | goto err; | ||
132 | |||
121 | /* RFC4291 2.7 | 133 | /* RFC4291 2.7 |
122 | * Nodes must not originate a packet to a multicast address whose scope | 134 | * Nodes must not originate a packet to a multicast address whose scope |
123 | * field contains the reserved value 0; if such a packet is received, it | 135 | * field contains the reserved value 0; if such a packet is received, it |
diff --git a/net/ipv6/netfilter/ip6t_NPT.c b/net/ipv6/netfilter/ip6t_NPT.c index 83acc1405a18..cb631143721c 100644 --- a/net/ipv6/netfilter/ip6t_NPT.c +++ b/net/ipv6/netfilter/ip6t_NPT.c | |||
@@ -57,7 +57,7 @@ static bool ip6t_npt_map_pfx(const struct ip6t_npt_tginfo *npt, | |||
57 | if (pfx_len - i >= 32) | 57 | if (pfx_len - i >= 32) |
58 | mask = 0; | 58 | mask = 0; |
59 | else | 59 | else |
60 | mask = htonl(~((1 << (pfx_len - i)) - 1)); | 60 | mask = htonl((1 << (i - pfx_len + 32)) - 1); |
61 | 61 | ||
62 | idx = i / 32; | 62 | idx = i / 32; |
63 | addr->s6_addr32[idx] &= mask; | 63 | addr->s6_addr32[idx] &= mask; |
@@ -114,6 +114,7 @@ ip6t_dnpt_tg(struct sk_buff *skb, const struct xt_action_param *par) | |||
114 | static struct xt_target ip6t_npt_target_reg[] __read_mostly = { | 114 | static struct xt_target ip6t_npt_target_reg[] __read_mostly = { |
115 | { | 115 | { |
116 | .name = "SNPT", | 116 | .name = "SNPT", |
117 | .table = "mangle", | ||
117 | .target = ip6t_snpt_tg, | 118 | .target = ip6t_snpt_tg, |
118 | .targetsize = sizeof(struct ip6t_npt_tginfo), | 119 | .targetsize = sizeof(struct ip6t_npt_tginfo), |
119 | .checkentry = ip6t_npt_checkentry, | 120 | .checkentry = ip6t_npt_checkentry, |
@@ -124,6 +125,7 @@ static struct xt_target ip6t_npt_target_reg[] __read_mostly = { | |||
124 | }, | 125 | }, |
125 | { | 126 | { |
126 | .name = "DNPT", | 127 | .name = "DNPT", |
128 | .table = "mangle", | ||
127 | .target = ip6t_dnpt_tg, | 129 | .target = ip6t_dnpt_tg, |
128 | .targetsize = sizeof(struct ip6t_npt_tginfo), | 130 | .targetsize = sizeof(struct ip6t_npt_tginfo), |
129 | .checkentry = ip6t_npt_checkentry, | 131 | .checkentry = ip6t_npt_checkentry, |
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index 54087e96d7b8..6700069949dd 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c | |||
@@ -14,6 +14,8 @@ | |||
14 | * 2 of the License, or (at your option) any later version. | 14 | * 2 of the License, or (at your option) any later version. |
15 | */ | 15 | */ |
16 | 16 | ||
17 | #define pr_fmt(fmt) "IPv6-nf: " fmt | ||
18 | |||
17 | #include <linux/errno.h> | 19 | #include <linux/errno.h> |
18 | #include <linux/types.h> | 20 | #include <linux/types.h> |
19 | #include <linux/string.h> | 21 | #include <linux/string.h> |
@@ -180,13 +182,11 @@ static inline struct frag_queue *fq_find(struct net *net, __be32 id, | |||
180 | 182 | ||
181 | q = inet_frag_find(&net->nf_frag.frags, &nf_frags, &arg, hash); | 183 | q = inet_frag_find(&net->nf_frag.frags, &nf_frags, &arg, hash); |
182 | local_bh_enable(); | 184 | local_bh_enable(); |
183 | if (q == NULL) | 185 | if (IS_ERR_OR_NULL(q)) { |
184 | goto oom; | 186 | inet_frag_maybe_warn_overflow(q, pr_fmt()); |
185 | 187 | return NULL; | |
188 | } | ||
186 | return container_of(q, struct frag_queue, q); | 189 | return container_of(q, struct frag_queue, q); |
187 | |||
188 | oom: | ||
189 | return NULL; | ||
190 | } | 190 | } |
191 | 191 | ||
192 | 192 | ||
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index 3c6a77290c6e..196ab9347ad1 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c | |||
@@ -26,6 +26,9 @@ | |||
26 | * YOSHIFUJI,H. @USAGI Always remove fragment header to | 26 | * YOSHIFUJI,H. @USAGI Always remove fragment header to |
27 | * calculate ICV correctly. | 27 | * calculate ICV correctly. |
28 | */ | 28 | */ |
29 | |||
30 | #define pr_fmt(fmt) "IPv6: " fmt | ||
31 | |||
29 | #include <linux/errno.h> | 32 | #include <linux/errno.h> |
30 | #include <linux/types.h> | 33 | #include <linux/types.h> |
31 | #include <linux/string.h> | 34 | #include <linux/string.h> |
@@ -185,9 +188,10 @@ fq_find(struct net *net, __be32 id, const struct in6_addr *src, const struct in6 | |||
185 | hash = inet6_hash_frag(id, src, dst, ip6_frags.rnd); | 188 | hash = inet6_hash_frag(id, src, dst, ip6_frags.rnd); |
186 | 189 | ||
187 | q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash); | 190 | q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash); |
188 | if (q == NULL) | 191 | if (IS_ERR_OR_NULL(q)) { |
192 | inet_frag_maybe_warn_overflow(q, pr_fmt()); | ||
189 | return NULL; | 193 | return NULL; |
190 | 194 | } | |
191 | return container_of(q, struct frag_queue, q); | 195 | return container_of(q, struct frag_queue, q); |
192 | } | 196 | } |
193 | 197 | ||
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 9b6460055df5..46a5be85be87 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -386,9 +386,17 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
386 | 386 | ||
387 | if (dst) | 387 | if (dst) |
388 | dst->ops->redirect(dst, sk, skb); | 388 | dst->ops->redirect(dst, sk, skb); |
389 | goto out; | ||
389 | } | 390 | } |
390 | 391 | ||
391 | if (type == ICMPV6_PKT_TOOBIG) { | 392 | if (type == ICMPV6_PKT_TOOBIG) { |
393 | /* We are not interested in TCP_LISTEN and open_requests | ||
394 | * (SYN-ACKs send out by Linux are always <576bytes so | ||
395 | * they should go through unfragmented). | ||
396 | */ | ||
397 | if (sk->sk_state == TCP_LISTEN) | ||
398 | goto out; | ||
399 | |||
392 | tp->mtu_info = ntohl(info); | 400 | tp->mtu_info = ntohl(info); |
393 | if (!sock_owned_by_user(sk)) | 401 | if (!sock_owned_by_user(sk)) |
394 | tcp_v6_mtu_reduced(sk); | 402 | tcp_v6_mtu_reduced(sk); |
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 599e1ba6d1ce..d8e5e852fc7a 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -1285,10 +1285,18 @@ do_confirm: | |||
1285 | 1285 | ||
1286 | void udpv6_destroy_sock(struct sock *sk) | 1286 | void udpv6_destroy_sock(struct sock *sk) |
1287 | { | 1287 | { |
1288 | struct udp_sock *up = udp_sk(sk); | ||
1288 | lock_sock(sk); | 1289 | lock_sock(sk); |
1289 | udp_v6_flush_pending_frames(sk); | 1290 | udp_v6_flush_pending_frames(sk); |
1290 | release_sock(sk); | 1291 | release_sock(sk); |
1291 | 1292 | ||
1293 | if (static_key_false(&udpv6_encap_needed) && up->encap_type) { | ||
1294 | void (*encap_destroy)(struct sock *sk); | ||
1295 | encap_destroy = ACCESS_ONCE(up->encap_destroy); | ||
1296 | if (encap_destroy) | ||
1297 | encap_destroy(sk); | ||
1298 | } | ||
1299 | |||
1292 | inet6_destroy_sock(sk); | 1300 | inet6_destroy_sock(sk); |
1293 | } | 1301 | } |
1294 | 1302 | ||
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c index d07e3a626446..e493b3397ae3 100644 --- a/net/irda/af_irda.c +++ b/net/irda/af_irda.c | |||
@@ -1386,6 +1386,8 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock, | |||
1386 | 1386 | ||
1387 | IRDA_DEBUG(4, "%s()\n", __func__); | 1387 | IRDA_DEBUG(4, "%s()\n", __func__); |
1388 | 1388 | ||
1389 | msg->msg_namelen = 0; | ||
1390 | |||
1389 | skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, | 1391 | skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, |
1390 | flags & MSG_DONTWAIT, &err); | 1392 | flags & MSG_DONTWAIT, &err); |
1391 | if (!skb) | 1393 | if (!skb) |
@@ -2583,8 +2585,10 @@ bed: | |||
2583 | NULL, NULL, NULL); | 2585 | NULL, NULL, NULL); |
2584 | 2586 | ||
2585 | /* Check if the we got some results */ | 2587 | /* Check if the we got some results */ |
2586 | if (!self->cachedaddr) | 2588 | if (!self->cachedaddr) { |
2587 | return -EAGAIN; /* Didn't find any devices */ | 2589 | err = -EAGAIN; /* Didn't find any devices */ |
2590 | goto out; | ||
2591 | } | ||
2588 | daddr = self->cachedaddr; | 2592 | daddr = self->cachedaddr; |
2589 | /* Cleanup */ | 2593 | /* Cleanup */ |
2590 | self->cachedaddr = 0; | 2594 | self->cachedaddr = 0; |
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index a7d11ffe4284..206ce6db2c36 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c | |||
@@ -49,12 +49,6 @@ static const u8 iprm_shutdown[8] = | |||
49 | 49 | ||
50 | #define TRGCLS_SIZE (sizeof(((struct iucv_message *)0)->class)) | 50 | #define TRGCLS_SIZE (sizeof(((struct iucv_message *)0)->class)) |
51 | 51 | ||
52 | /* macros to set/get socket control buffer at correct offset */ | ||
53 | #define CB_TAG(skb) ((skb)->cb) /* iucv message tag */ | ||
54 | #define CB_TAG_LEN (sizeof(((struct iucv_message *) 0)->tag)) | ||
55 | #define CB_TRGCLS(skb) ((skb)->cb + CB_TAG_LEN) /* iucv msg target class */ | ||
56 | #define CB_TRGCLS_LEN (TRGCLS_SIZE) | ||
57 | |||
58 | #define __iucv_sock_wait(sk, condition, timeo, ret) \ | 52 | #define __iucv_sock_wait(sk, condition, timeo, ret) \ |
59 | do { \ | 53 | do { \ |
60 | DEFINE_WAIT(__wait); \ | 54 | DEFINE_WAIT(__wait); \ |
@@ -1141,7 +1135,7 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
1141 | 1135 | ||
1142 | /* increment and save iucv message tag for msg_completion cbk */ | 1136 | /* increment and save iucv message tag for msg_completion cbk */ |
1143 | txmsg.tag = iucv->send_tag++; | 1137 | txmsg.tag = iucv->send_tag++; |
1144 | memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN); | 1138 | IUCV_SKB_CB(skb)->tag = txmsg.tag; |
1145 | 1139 | ||
1146 | if (iucv->transport == AF_IUCV_TRANS_HIPER) { | 1140 | if (iucv->transport == AF_IUCV_TRANS_HIPER) { |
1147 | atomic_inc(&iucv->msg_sent); | 1141 | atomic_inc(&iucv->msg_sent); |
@@ -1224,7 +1218,7 @@ static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len) | |||
1224 | return -ENOMEM; | 1218 | return -ENOMEM; |
1225 | 1219 | ||
1226 | /* copy target class to control buffer of new skb */ | 1220 | /* copy target class to control buffer of new skb */ |
1227 | memcpy(CB_TRGCLS(nskb), CB_TRGCLS(skb), CB_TRGCLS_LEN); | 1221 | IUCV_SKB_CB(nskb)->class = IUCV_SKB_CB(skb)->class; |
1228 | 1222 | ||
1229 | /* copy data fragment */ | 1223 | /* copy data fragment */ |
1230 | memcpy(nskb->data, skb->data + copied, size); | 1224 | memcpy(nskb->data, skb->data + copied, size); |
@@ -1256,7 +1250,7 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb, | |||
1256 | 1250 | ||
1257 | /* store msg target class in the second 4 bytes of skb ctrl buffer */ | 1251 | /* store msg target class in the second 4 bytes of skb ctrl buffer */ |
1258 | /* Note: the first 4 bytes are reserved for msg tag */ | 1252 | /* Note: the first 4 bytes are reserved for msg tag */ |
1259 | memcpy(CB_TRGCLS(skb), &msg->class, CB_TRGCLS_LEN); | 1253 | IUCV_SKB_CB(skb)->class = msg->class; |
1260 | 1254 | ||
1261 | /* check for special IPRM messages (e.g. iucv_sock_shutdown) */ | 1255 | /* check for special IPRM messages (e.g. iucv_sock_shutdown) */ |
1262 | if ((msg->flags & IUCV_IPRMDATA) && len > 7) { | 1256 | if ((msg->flags & IUCV_IPRMDATA) && len > 7) { |
@@ -1292,6 +1286,7 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb, | |||
1292 | } | 1286 | } |
1293 | } | 1287 | } |
1294 | 1288 | ||
1289 | IUCV_SKB_CB(skb)->offset = 0; | ||
1295 | if (sock_queue_rcv_skb(sk, skb)) | 1290 | if (sock_queue_rcv_skb(sk, skb)) |
1296 | skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb); | 1291 | skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb); |
1297 | } | 1292 | } |
@@ -1327,6 +1322,9 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1327 | unsigned int copied, rlen; | 1322 | unsigned int copied, rlen; |
1328 | struct sk_buff *skb, *rskb, *cskb; | 1323 | struct sk_buff *skb, *rskb, *cskb; |
1329 | int err = 0; | 1324 | int err = 0; |
1325 | u32 offset; | ||
1326 | |||
1327 | msg->msg_namelen = 0; | ||
1330 | 1328 | ||
1331 | if ((sk->sk_state == IUCV_DISCONN) && | 1329 | if ((sk->sk_state == IUCV_DISCONN) && |
1332 | skb_queue_empty(&iucv->backlog_skb_q) && | 1330 | skb_queue_empty(&iucv->backlog_skb_q) && |
@@ -1346,13 +1344,14 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1346 | return err; | 1344 | return err; |
1347 | } | 1345 | } |
1348 | 1346 | ||
1349 | rlen = skb->len; /* real length of skb */ | 1347 | offset = IUCV_SKB_CB(skb)->offset; |
1348 | rlen = skb->len - offset; /* real length of skb */ | ||
1350 | copied = min_t(unsigned int, rlen, len); | 1349 | copied = min_t(unsigned int, rlen, len); |
1351 | if (!rlen) | 1350 | if (!rlen) |
1352 | sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN; | 1351 | sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN; |
1353 | 1352 | ||
1354 | cskb = skb; | 1353 | cskb = skb; |
1355 | if (skb_copy_datagram_iovec(cskb, 0, msg->msg_iov, copied)) { | 1354 | if (skb_copy_datagram_iovec(cskb, offset, msg->msg_iov, copied)) { |
1356 | if (!(flags & MSG_PEEK)) | 1355 | if (!(flags & MSG_PEEK)) |
1357 | skb_queue_head(&sk->sk_receive_queue, skb); | 1356 | skb_queue_head(&sk->sk_receive_queue, skb); |
1358 | return -EFAULT; | 1357 | return -EFAULT; |
@@ -1370,7 +1369,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1370 | * get the trgcls from the control buffer of the skb due to | 1369 | * get the trgcls from the control buffer of the skb due to |
1371 | * fragmentation of original iucv message. */ | 1370 | * fragmentation of original iucv message. */ |
1372 | err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS, | 1371 | err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS, |
1373 | CB_TRGCLS_LEN, CB_TRGCLS(skb)); | 1372 | sizeof(IUCV_SKB_CB(skb)->class), |
1373 | (void *)&IUCV_SKB_CB(skb)->class); | ||
1374 | if (err) { | 1374 | if (err) { |
1375 | if (!(flags & MSG_PEEK)) | 1375 | if (!(flags & MSG_PEEK)) |
1376 | skb_queue_head(&sk->sk_receive_queue, skb); | 1376 | skb_queue_head(&sk->sk_receive_queue, skb); |
@@ -1382,9 +1382,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1382 | 1382 | ||
1383 | /* SOCK_STREAM: re-queue skb if it contains unreceived data */ | 1383 | /* SOCK_STREAM: re-queue skb if it contains unreceived data */ |
1384 | if (sk->sk_type == SOCK_STREAM) { | 1384 | if (sk->sk_type == SOCK_STREAM) { |
1385 | skb_pull(skb, copied); | 1385 | if (copied < rlen) { |
1386 | if (skb->len) { | 1386 | IUCV_SKB_CB(skb)->offset = offset + copied; |
1387 | skb_queue_head(&sk->sk_receive_queue, skb); | ||
1388 | goto done; | 1387 | goto done; |
1389 | } | 1388 | } |
1390 | } | 1389 | } |
@@ -1403,6 +1402,7 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1403 | spin_lock_bh(&iucv->message_q.lock); | 1402 | spin_lock_bh(&iucv->message_q.lock); |
1404 | rskb = skb_dequeue(&iucv->backlog_skb_q); | 1403 | rskb = skb_dequeue(&iucv->backlog_skb_q); |
1405 | while (rskb) { | 1404 | while (rskb) { |
1405 | IUCV_SKB_CB(rskb)->offset = 0; | ||
1406 | if (sock_queue_rcv_skb(sk, rskb)) { | 1406 | if (sock_queue_rcv_skb(sk, rskb)) { |
1407 | skb_queue_head(&iucv->backlog_skb_q, | 1407 | skb_queue_head(&iucv->backlog_skb_q, |
1408 | rskb); | 1408 | rskb); |
@@ -1830,7 +1830,7 @@ static void iucv_callback_txdone(struct iucv_path *path, | |||
1830 | spin_lock_irqsave(&list->lock, flags); | 1830 | spin_lock_irqsave(&list->lock, flags); |
1831 | 1831 | ||
1832 | while (list_skb != (struct sk_buff *)list) { | 1832 | while (list_skb != (struct sk_buff *)list) { |
1833 | if (!memcmp(&msg->tag, CB_TAG(list_skb), CB_TAG_LEN)) { | 1833 | if (msg->tag != IUCV_SKB_CB(list_skb)->tag) { |
1834 | this = list_skb; | 1834 | this = list_skb; |
1835 | break; | 1835 | break; |
1836 | } | 1836 | } |
@@ -2091,6 +2091,7 @@ static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb) | |||
2091 | skb_pull(skb, sizeof(struct af_iucv_trans_hdr)); | 2091 | skb_pull(skb, sizeof(struct af_iucv_trans_hdr)); |
2092 | skb_reset_transport_header(skb); | 2092 | skb_reset_transport_header(skb); |
2093 | skb_reset_network_header(skb); | 2093 | skb_reset_network_header(skb); |
2094 | IUCV_SKB_CB(skb)->offset = 0; | ||
2094 | spin_lock(&iucv->message_q.lock); | 2095 | spin_lock(&iucv->message_q.lock); |
2095 | if (skb_queue_empty(&iucv->backlog_skb_q)) { | 2096 | if (skb_queue_empty(&iucv->backlog_skb_q)) { |
2096 | if (sock_queue_rcv_skb(sk, skb)) { | 2097 | if (sock_queue_rcv_skb(sk, skb)) { |
@@ -2195,8 +2196,7 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev, | |||
2195 | /* fall through and receive zero length data */ | 2196 | /* fall through and receive zero length data */ |
2196 | case 0: | 2197 | case 0: |
2197 | /* plain data frame */ | 2198 | /* plain data frame */ |
2198 | memcpy(CB_TRGCLS(skb), &trans_hdr->iucv_hdr.class, | 2199 | IUCV_SKB_CB(skb)->class = trans_hdr->iucv_hdr.class; |
2199 | CB_TRGCLS_LEN); | ||
2200 | err = afiucv_hs_callback_rx(sk, skb); | 2200 | err = afiucv_hs_callback_rx(sk, skb); |
2201 | break; | 2201 | break; |
2202 | default: | 2202 | default: |
diff --git a/net/key/af_key.c b/net/key/af_key.c index 8555f331ea60..5b1e5af25713 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c | |||
@@ -2693,6 +2693,7 @@ static int key_notify_policy_flush(const struct km_event *c) | |||
2693 | hdr->sadb_msg_pid = c->portid; | 2693 | hdr->sadb_msg_pid = c->portid; |
2694 | hdr->sadb_msg_version = PF_KEY_V2; | 2694 | hdr->sadb_msg_version = PF_KEY_V2; |
2695 | hdr->sadb_msg_errno = (uint8_t) 0; | 2695 | hdr->sadb_msg_errno = (uint8_t) 0; |
2696 | hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC; | ||
2696 | hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t)); | 2697 | hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t)); |
2697 | pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net); | 2698 | pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net); |
2698 | return 0; | 2699 | return 0; |
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index d36875f3427e..8aecf5df6656 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c | |||
@@ -114,7 +114,6 @@ struct l2tp_net { | |||
114 | 114 | ||
115 | static void l2tp_session_set_header_len(struct l2tp_session *session, int version); | 115 | static void l2tp_session_set_header_len(struct l2tp_session *session, int version); |
116 | static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel); | 116 | static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel); |
117 | static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel); | ||
118 | 117 | ||
119 | static inline struct l2tp_net *l2tp_pernet(struct net *net) | 118 | static inline struct l2tp_net *l2tp_pernet(struct net *net) |
120 | { | 119 | { |
@@ -192,6 +191,7 @@ struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel) | |||
192 | } else { | 191 | } else { |
193 | /* Socket is owned by kernelspace */ | 192 | /* Socket is owned by kernelspace */ |
194 | sk = tunnel->sock; | 193 | sk = tunnel->sock; |
194 | sock_hold(sk); | ||
195 | } | 195 | } |
196 | 196 | ||
197 | out: | 197 | out: |
@@ -210,6 +210,7 @@ void l2tp_tunnel_sock_put(struct sock *sk) | |||
210 | } | 210 | } |
211 | sock_put(sk); | 211 | sock_put(sk); |
212 | } | 212 | } |
213 | sock_put(sk); | ||
213 | } | 214 | } |
214 | EXPORT_SYMBOL_GPL(l2tp_tunnel_sock_put); | 215 | EXPORT_SYMBOL_GPL(l2tp_tunnel_sock_put); |
215 | 216 | ||
@@ -373,10 +374,8 @@ static void l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *sk | |||
373 | struct sk_buff *skbp; | 374 | struct sk_buff *skbp; |
374 | struct sk_buff *tmp; | 375 | struct sk_buff *tmp; |
375 | u32 ns = L2TP_SKB_CB(skb)->ns; | 376 | u32 ns = L2TP_SKB_CB(skb)->ns; |
376 | struct l2tp_stats *sstats; | ||
377 | 377 | ||
378 | spin_lock_bh(&session->reorder_q.lock); | 378 | spin_lock_bh(&session->reorder_q.lock); |
379 | sstats = &session->stats; | ||
380 | skb_queue_walk_safe(&session->reorder_q, skbp, tmp) { | 379 | skb_queue_walk_safe(&session->reorder_q, skbp, tmp) { |
381 | if (L2TP_SKB_CB(skbp)->ns > ns) { | 380 | if (L2TP_SKB_CB(skbp)->ns > ns) { |
382 | __skb_queue_before(&session->reorder_q, skbp, skb); | 381 | __skb_queue_before(&session->reorder_q, skbp, skb); |
@@ -384,9 +383,7 @@ static void l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *sk | |||
384 | "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n", | 383 | "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n", |
385 | session->name, ns, L2TP_SKB_CB(skbp)->ns, | 384 | session->name, ns, L2TP_SKB_CB(skbp)->ns, |
386 | skb_queue_len(&session->reorder_q)); | 385 | skb_queue_len(&session->reorder_q)); |
387 | u64_stats_update_begin(&sstats->syncp); | 386 | atomic_long_inc(&session->stats.rx_oos_packets); |
388 | sstats->rx_oos_packets++; | ||
389 | u64_stats_update_end(&sstats->syncp); | ||
390 | goto out; | 387 | goto out; |
391 | } | 388 | } |
392 | } | 389 | } |
@@ -403,23 +400,16 @@ static void l2tp_recv_dequeue_skb(struct l2tp_session *session, struct sk_buff * | |||
403 | { | 400 | { |
404 | struct l2tp_tunnel *tunnel = session->tunnel; | 401 | struct l2tp_tunnel *tunnel = session->tunnel; |
405 | int length = L2TP_SKB_CB(skb)->length; | 402 | int length = L2TP_SKB_CB(skb)->length; |
406 | struct l2tp_stats *tstats, *sstats; | ||
407 | 403 | ||
408 | /* We're about to requeue the skb, so return resources | 404 | /* We're about to requeue the skb, so return resources |
409 | * to its current owner (a socket receive buffer). | 405 | * to its current owner (a socket receive buffer). |
410 | */ | 406 | */ |
411 | skb_orphan(skb); | 407 | skb_orphan(skb); |
412 | 408 | ||
413 | tstats = &tunnel->stats; | 409 | atomic_long_inc(&tunnel->stats.rx_packets); |
414 | u64_stats_update_begin(&tstats->syncp); | 410 | atomic_long_add(length, &tunnel->stats.rx_bytes); |
415 | sstats = &session->stats; | 411 | atomic_long_inc(&session->stats.rx_packets); |
416 | u64_stats_update_begin(&sstats->syncp); | 412 | atomic_long_add(length, &session->stats.rx_bytes); |
417 | tstats->rx_packets++; | ||
418 | tstats->rx_bytes += length; | ||
419 | sstats->rx_packets++; | ||
420 | sstats->rx_bytes += length; | ||
421 | u64_stats_update_end(&tstats->syncp); | ||
422 | u64_stats_update_end(&sstats->syncp); | ||
423 | 413 | ||
424 | if (L2TP_SKB_CB(skb)->has_seq) { | 414 | if (L2TP_SKB_CB(skb)->has_seq) { |
425 | /* Bump our Nr */ | 415 | /* Bump our Nr */ |
@@ -450,7 +440,6 @@ static void l2tp_recv_dequeue(struct l2tp_session *session) | |||
450 | { | 440 | { |
451 | struct sk_buff *skb; | 441 | struct sk_buff *skb; |
452 | struct sk_buff *tmp; | 442 | struct sk_buff *tmp; |
453 | struct l2tp_stats *sstats; | ||
454 | 443 | ||
455 | /* If the pkt at the head of the queue has the nr that we | 444 | /* If the pkt at the head of the queue has the nr that we |
456 | * expect to send up next, dequeue it and any other | 445 | * expect to send up next, dequeue it and any other |
@@ -458,13 +447,10 @@ static void l2tp_recv_dequeue(struct l2tp_session *session) | |||
458 | */ | 447 | */ |
459 | start: | 448 | start: |
460 | spin_lock_bh(&session->reorder_q.lock); | 449 | spin_lock_bh(&session->reorder_q.lock); |
461 | sstats = &session->stats; | ||
462 | skb_queue_walk_safe(&session->reorder_q, skb, tmp) { | 450 | skb_queue_walk_safe(&session->reorder_q, skb, tmp) { |
463 | if (time_after(jiffies, L2TP_SKB_CB(skb)->expires)) { | 451 | if (time_after(jiffies, L2TP_SKB_CB(skb)->expires)) { |
464 | u64_stats_update_begin(&sstats->syncp); | 452 | atomic_long_inc(&session->stats.rx_seq_discards); |
465 | sstats->rx_seq_discards++; | 453 | atomic_long_inc(&session->stats.rx_errors); |
466 | sstats->rx_errors++; | ||
467 | u64_stats_update_end(&sstats->syncp); | ||
468 | l2tp_dbg(session, L2TP_MSG_SEQ, | 454 | l2tp_dbg(session, L2TP_MSG_SEQ, |
469 | "%s: oos pkt %u len %d discarded (too old), waiting for %u, reorder_q_len=%d\n", | 455 | "%s: oos pkt %u len %d discarded (too old), waiting for %u, reorder_q_len=%d\n", |
470 | session->name, L2TP_SKB_CB(skb)->ns, | 456 | session->name, L2TP_SKB_CB(skb)->ns, |
@@ -623,7 +609,6 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, | |||
623 | struct l2tp_tunnel *tunnel = session->tunnel; | 609 | struct l2tp_tunnel *tunnel = session->tunnel; |
624 | int offset; | 610 | int offset; |
625 | u32 ns, nr; | 611 | u32 ns, nr; |
626 | struct l2tp_stats *sstats = &session->stats; | ||
627 | 612 | ||
628 | /* The ref count is increased since we now hold a pointer to | 613 | /* The ref count is increased since we now hold a pointer to |
629 | * the session. Take care to decrement the refcnt when exiting | 614 | * the session. Take care to decrement the refcnt when exiting |
@@ -640,9 +625,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, | |||
640 | "%s: cookie mismatch (%u/%u). Discarding.\n", | 625 | "%s: cookie mismatch (%u/%u). Discarding.\n", |
641 | tunnel->name, tunnel->tunnel_id, | 626 | tunnel->name, tunnel->tunnel_id, |
642 | session->session_id); | 627 | session->session_id); |
643 | u64_stats_update_begin(&sstats->syncp); | 628 | atomic_long_inc(&session->stats.rx_cookie_discards); |
644 | sstats->rx_cookie_discards++; | ||
645 | u64_stats_update_end(&sstats->syncp); | ||
646 | goto discard; | 629 | goto discard; |
647 | } | 630 | } |
648 | ptr += session->peer_cookie_len; | 631 | ptr += session->peer_cookie_len; |
@@ -711,9 +694,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, | |||
711 | l2tp_warn(session, L2TP_MSG_SEQ, | 694 | l2tp_warn(session, L2TP_MSG_SEQ, |
712 | "%s: recv data has no seq numbers when required. Discarding.\n", | 695 | "%s: recv data has no seq numbers when required. Discarding.\n", |
713 | session->name); | 696 | session->name); |
714 | u64_stats_update_begin(&sstats->syncp); | 697 | atomic_long_inc(&session->stats.rx_seq_discards); |
715 | sstats->rx_seq_discards++; | ||
716 | u64_stats_update_end(&sstats->syncp); | ||
717 | goto discard; | 698 | goto discard; |
718 | } | 699 | } |
719 | 700 | ||
@@ -732,9 +713,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, | |||
732 | l2tp_warn(session, L2TP_MSG_SEQ, | 713 | l2tp_warn(session, L2TP_MSG_SEQ, |
733 | "%s: recv data has no seq numbers when required. Discarding.\n", | 714 | "%s: recv data has no seq numbers when required. Discarding.\n", |
734 | session->name); | 715 | session->name); |
735 | u64_stats_update_begin(&sstats->syncp); | 716 | atomic_long_inc(&session->stats.rx_seq_discards); |
736 | sstats->rx_seq_discards++; | ||
737 | u64_stats_update_end(&sstats->syncp); | ||
738 | goto discard; | 717 | goto discard; |
739 | } | 718 | } |
740 | } | 719 | } |
@@ -788,9 +767,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, | |||
788 | * packets | 767 | * packets |
789 | */ | 768 | */ |
790 | if (L2TP_SKB_CB(skb)->ns != session->nr) { | 769 | if (L2TP_SKB_CB(skb)->ns != session->nr) { |
791 | u64_stats_update_begin(&sstats->syncp); | 770 | atomic_long_inc(&session->stats.rx_seq_discards); |
792 | sstats->rx_seq_discards++; | ||
793 | u64_stats_update_end(&sstats->syncp); | ||
794 | l2tp_dbg(session, L2TP_MSG_SEQ, | 771 | l2tp_dbg(session, L2TP_MSG_SEQ, |
795 | "%s: oos pkt %u len %d discarded, waiting for %u, reorder_q_len=%d\n", | 772 | "%s: oos pkt %u len %d discarded, waiting for %u, reorder_q_len=%d\n", |
796 | session->name, L2TP_SKB_CB(skb)->ns, | 773 | session->name, L2TP_SKB_CB(skb)->ns, |
@@ -816,9 +793,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, | |||
816 | return; | 793 | return; |
817 | 794 | ||
818 | discard: | 795 | discard: |
819 | u64_stats_update_begin(&sstats->syncp); | 796 | atomic_long_inc(&session->stats.rx_errors); |
820 | sstats->rx_errors++; | ||
821 | u64_stats_update_end(&sstats->syncp); | ||
822 | kfree_skb(skb); | 797 | kfree_skb(skb); |
823 | 798 | ||
824 | if (session->deref) | 799 | if (session->deref) |
@@ -828,6 +803,23 @@ discard: | |||
828 | } | 803 | } |
829 | EXPORT_SYMBOL(l2tp_recv_common); | 804 | EXPORT_SYMBOL(l2tp_recv_common); |
830 | 805 | ||
806 | /* Drop skbs from the session's reorder_q | ||
807 | */ | ||
808 | int l2tp_session_queue_purge(struct l2tp_session *session) | ||
809 | { | ||
810 | struct sk_buff *skb = NULL; | ||
811 | BUG_ON(!session); | ||
812 | BUG_ON(session->magic != L2TP_SESSION_MAGIC); | ||
813 | while ((skb = skb_dequeue(&session->reorder_q))) { | ||
814 | atomic_long_inc(&session->stats.rx_errors); | ||
815 | kfree_skb(skb); | ||
816 | if (session->deref) | ||
817 | (*session->deref)(session); | ||
818 | } | ||
819 | return 0; | ||
820 | } | ||
821 | EXPORT_SYMBOL_GPL(l2tp_session_queue_purge); | ||
822 | |||
831 | /* Internal UDP receive frame. Do the real work of receiving an L2TP data frame | 823 | /* Internal UDP receive frame. Do the real work of receiving an L2TP data frame |
832 | * here. The skb is not on a list when we get here. | 824 | * here. The skb is not on a list when we get here. |
833 | * Returns 0 if the packet was a data packet and was successfully passed on. | 825 | * Returns 0 if the packet was a data packet and was successfully passed on. |
@@ -843,7 +835,6 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb, | |||
843 | u32 tunnel_id, session_id; | 835 | u32 tunnel_id, session_id; |
844 | u16 version; | 836 | u16 version; |
845 | int length; | 837 | int length; |
846 | struct l2tp_stats *tstats; | ||
847 | 838 | ||
848 | if (tunnel->sock && l2tp_verify_udp_checksum(tunnel->sock, skb)) | 839 | if (tunnel->sock && l2tp_verify_udp_checksum(tunnel->sock, skb)) |
849 | goto discard_bad_csum; | 840 | goto discard_bad_csum; |
@@ -932,10 +923,7 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb, | |||
932 | discard_bad_csum: | 923 | discard_bad_csum: |
933 | LIMIT_NETDEBUG("%s: UDP: bad checksum\n", tunnel->name); | 924 | LIMIT_NETDEBUG("%s: UDP: bad checksum\n", tunnel->name); |
934 | UDP_INC_STATS_USER(tunnel->l2tp_net, UDP_MIB_INERRORS, 0); | 925 | UDP_INC_STATS_USER(tunnel->l2tp_net, UDP_MIB_INERRORS, 0); |
935 | tstats = &tunnel->stats; | 926 | atomic_long_inc(&tunnel->stats.rx_errors); |
936 | u64_stats_update_begin(&tstats->syncp); | ||
937 | tstats->rx_errors++; | ||
938 | u64_stats_update_end(&tstats->syncp); | ||
939 | kfree_skb(skb); | 927 | kfree_skb(skb); |
940 | 928 | ||
941 | return 0; | 929 | return 0; |
@@ -1062,7 +1050,6 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, | |||
1062 | struct l2tp_tunnel *tunnel = session->tunnel; | 1050 | struct l2tp_tunnel *tunnel = session->tunnel; |
1063 | unsigned int len = skb->len; | 1051 | unsigned int len = skb->len; |
1064 | int error; | 1052 | int error; |
1065 | struct l2tp_stats *tstats, *sstats; | ||
1066 | 1053 | ||
1067 | /* Debug */ | 1054 | /* Debug */ |
1068 | if (session->send_seq) | 1055 | if (session->send_seq) |
@@ -1091,21 +1078,15 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, | |||
1091 | error = ip_queue_xmit(skb, fl); | 1078 | error = ip_queue_xmit(skb, fl); |
1092 | 1079 | ||
1093 | /* Update stats */ | 1080 | /* Update stats */ |
1094 | tstats = &tunnel->stats; | ||
1095 | u64_stats_update_begin(&tstats->syncp); | ||
1096 | sstats = &session->stats; | ||
1097 | u64_stats_update_begin(&sstats->syncp); | ||
1098 | if (error >= 0) { | 1081 | if (error >= 0) { |
1099 | tstats->tx_packets++; | 1082 | atomic_long_inc(&tunnel->stats.tx_packets); |
1100 | tstats->tx_bytes += len; | 1083 | atomic_long_add(len, &tunnel->stats.tx_bytes); |
1101 | sstats->tx_packets++; | 1084 | atomic_long_inc(&session->stats.tx_packets); |
1102 | sstats->tx_bytes += len; | 1085 | atomic_long_add(len, &session->stats.tx_bytes); |
1103 | } else { | 1086 | } else { |
1104 | tstats->tx_errors++; | 1087 | atomic_long_inc(&tunnel->stats.tx_errors); |
1105 | sstats->tx_errors++; | 1088 | atomic_long_inc(&session->stats.tx_errors); |
1106 | } | 1089 | } |
1107 | u64_stats_update_end(&tstats->syncp); | ||
1108 | u64_stats_update_end(&sstats->syncp); | ||
1109 | 1090 | ||
1110 | return 0; | 1091 | return 0; |
1111 | } | 1092 | } |
@@ -1282,6 +1263,7 @@ static void l2tp_tunnel_destruct(struct sock *sk) | |||
1282 | /* No longer an encapsulation socket. See net/ipv4/udp.c */ | 1263 | /* No longer an encapsulation socket. See net/ipv4/udp.c */ |
1283 | (udp_sk(sk))->encap_type = 0; | 1264 | (udp_sk(sk))->encap_type = 0; |
1284 | (udp_sk(sk))->encap_rcv = NULL; | 1265 | (udp_sk(sk))->encap_rcv = NULL; |
1266 | (udp_sk(sk))->encap_destroy = NULL; | ||
1285 | break; | 1267 | break; |
1286 | case L2TP_ENCAPTYPE_IP: | 1268 | case L2TP_ENCAPTYPE_IP: |
1287 | break; | 1269 | break; |
@@ -1311,7 +1293,7 @@ end: | |||
1311 | 1293 | ||
1312 | /* When the tunnel is closed, all the attached sessions need to go too. | 1294 | /* When the tunnel is closed, all the attached sessions need to go too. |
1313 | */ | 1295 | */ |
1314 | static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel) | 1296 | void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel) |
1315 | { | 1297 | { |
1316 | int hash; | 1298 | int hash; |
1317 | struct hlist_node *walk; | 1299 | struct hlist_node *walk; |
@@ -1334,25 +1316,13 @@ again: | |||
1334 | 1316 | ||
1335 | hlist_del_init(&session->hlist); | 1317 | hlist_del_init(&session->hlist); |
1336 | 1318 | ||
1337 | /* Since we should hold the sock lock while | ||
1338 | * doing any unbinding, we need to release the | ||
1339 | * lock we're holding before taking that lock. | ||
1340 | * Hold a reference to the sock so it doesn't | ||
1341 | * disappear as we're jumping between locks. | ||
1342 | */ | ||
1343 | if (session->ref != NULL) | 1319 | if (session->ref != NULL) |
1344 | (*session->ref)(session); | 1320 | (*session->ref)(session); |
1345 | 1321 | ||
1346 | write_unlock_bh(&tunnel->hlist_lock); | 1322 | write_unlock_bh(&tunnel->hlist_lock); |
1347 | 1323 | ||
1348 | if (tunnel->version != L2TP_HDR_VER_2) { | 1324 | __l2tp_session_unhash(session); |
1349 | struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net); | 1325 | l2tp_session_queue_purge(session); |
1350 | |||
1351 | spin_lock_bh(&pn->l2tp_session_hlist_lock); | ||
1352 | hlist_del_init_rcu(&session->global_hlist); | ||
1353 | spin_unlock_bh(&pn->l2tp_session_hlist_lock); | ||
1354 | synchronize_rcu(); | ||
1355 | } | ||
1356 | 1326 | ||
1357 | if (session->session_close != NULL) | 1327 | if (session->session_close != NULL) |
1358 | (*session->session_close)(session); | 1328 | (*session->session_close)(session); |
@@ -1360,6 +1330,8 @@ again: | |||
1360 | if (session->deref != NULL) | 1330 | if (session->deref != NULL) |
1361 | (*session->deref)(session); | 1331 | (*session->deref)(session); |
1362 | 1332 | ||
1333 | l2tp_session_dec_refcount(session); | ||
1334 | |||
1363 | write_lock_bh(&tunnel->hlist_lock); | 1335 | write_lock_bh(&tunnel->hlist_lock); |
1364 | 1336 | ||
1365 | /* Now restart from the beginning of this hash | 1337 | /* Now restart from the beginning of this hash |
@@ -1372,6 +1344,17 @@ again: | |||
1372 | } | 1344 | } |
1373 | write_unlock_bh(&tunnel->hlist_lock); | 1345 | write_unlock_bh(&tunnel->hlist_lock); |
1374 | } | 1346 | } |
1347 | EXPORT_SYMBOL_GPL(l2tp_tunnel_closeall); | ||
1348 | |||
1349 | /* Tunnel socket destroy hook for UDP encapsulation */ | ||
1350 | static void l2tp_udp_encap_destroy(struct sock *sk) | ||
1351 | { | ||
1352 | struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk); | ||
1353 | if (tunnel) { | ||
1354 | l2tp_tunnel_closeall(tunnel); | ||
1355 | sock_put(sk); | ||
1356 | } | ||
1357 | } | ||
1375 | 1358 | ||
1376 | /* Really kill the tunnel. | 1359 | /* Really kill the tunnel. |
1377 | * Come here only when all sessions have been cleared from the tunnel. | 1360 | * Come here only when all sessions have been cleared from the tunnel. |
@@ -1397,19 +1380,21 @@ static void l2tp_tunnel_del_work(struct work_struct *work) | |||
1397 | return; | 1380 | return; |
1398 | 1381 | ||
1399 | sock = sk->sk_socket; | 1382 | sock = sk->sk_socket; |
1400 | BUG_ON(!sock); | ||
1401 | 1383 | ||
1402 | /* If the tunnel socket was created directly by the kernel, use the | 1384 | /* If the tunnel socket was created by userspace, then go through the |
1403 | * sk_* API to release the socket now. Otherwise go through the | 1385 | * inet layer to shut the socket down, and let userspace close it. |
1404 | * inet_* layer to shut the socket down, and let userspace close it. | 1386 | * Otherwise, if we created the socket directly within the kernel, use |
1387 | * the sk API to release it here. | ||
1405 | * In either case the tunnel resources are freed in the socket | 1388 | * In either case the tunnel resources are freed in the socket |
1406 | * destructor when the tunnel socket goes away. | 1389 | * destructor when the tunnel socket goes away. |
1407 | */ | 1390 | */ |
1408 | if (sock->file == NULL) { | 1391 | if (tunnel->fd >= 0) { |
1409 | kernel_sock_shutdown(sock, SHUT_RDWR); | 1392 | if (sock) |
1410 | sk_release_kernel(sk); | 1393 | inet_shutdown(sock, 2); |
1411 | } else { | 1394 | } else { |
1412 | inet_shutdown(sock, 2); | 1395 | if (sock) |
1396 | kernel_sock_shutdown(sock, SHUT_RDWR); | ||
1397 | sk_release_kernel(sk); | ||
1413 | } | 1398 | } |
1414 | 1399 | ||
1415 | l2tp_tunnel_sock_put(sk); | 1400 | l2tp_tunnel_sock_put(sk); |
@@ -1668,6 +1653,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 | |||
1668 | /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */ | 1653 | /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */ |
1669 | udp_sk(sk)->encap_type = UDP_ENCAP_L2TPINUDP; | 1654 | udp_sk(sk)->encap_type = UDP_ENCAP_L2TPINUDP; |
1670 | udp_sk(sk)->encap_rcv = l2tp_udp_encap_recv; | 1655 | udp_sk(sk)->encap_rcv = l2tp_udp_encap_recv; |
1656 | udp_sk(sk)->encap_destroy = l2tp_udp_encap_destroy; | ||
1671 | #if IS_ENABLED(CONFIG_IPV6) | 1657 | #if IS_ENABLED(CONFIG_IPV6) |
1672 | if (sk->sk_family == PF_INET6) | 1658 | if (sk->sk_family == PF_INET6) |
1673 | udpv6_encap_enable(); | 1659 | udpv6_encap_enable(); |
@@ -1723,6 +1709,7 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_create); | |||
1723 | */ | 1709 | */ |
1724 | int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel) | 1710 | int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel) |
1725 | { | 1711 | { |
1712 | l2tp_tunnel_closeall(tunnel); | ||
1726 | return (false == queue_work(l2tp_wq, &tunnel->del_work)); | 1713 | return (false == queue_work(l2tp_wq, &tunnel->del_work)); |
1727 | } | 1714 | } |
1728 | EXPORT_SYMBOL_GPL(l2tp_tunnel_delete); | 1715 | EXPORT_SYMBOL_GPL(l2tp_tunnel_delete); |
@@ -1731,62 +1718,71 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_delete); | |||
1731 | */ | 1718 | */ |
1732 | void l2tp_session_free(struct l2tp_session *session) | 1719 | void l2tp_session_free(struct l2tp_session *session) |
1733 | { | 1720 | { |
1734 | struct l2tp_tunnel *tunnel; | 1721 | struct l2tp_tunnel *tunnel = session->tunnel; |
1735 | 1722 | ||
1736 | BUG_ON(atomic_read(&session->ref_count) != 0); | 1723 | BUG_ON(atomic_read(&session->ref_count) != 0); |
1737 | 1724 | ||
1738 | tunnel = session->tunnel; | 1725 | if (tunnel) { |
1739 | if (tunnel != NULL) { | ||
1740 | BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC); | 1726 | BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC); |
1727 | if (session->session_id != 0) | ||
1728 | atomic_dec(&l2tp_session_count); | ||
1729 | sock_put(tunnel->sock); | ||
1730 | session->tunnel = NULL; | ||
1731 | l2tp_tunnel_dec_refcount(tunnel); | ||
1732 | } | ||
1733 | |||
1734 | kfree(session); | ||
1741 | 1735 | ||
1742 | /* Delete the session from the hash */ | 1736 | return; |
1737 | } | ||
1738 | EXPORT_SYMBOL_GPL(l2tp_session_free); | ||
1739 | |||
1740 | /* Remove an l2tp session from l2tp_core's hash lists. | ||
1741 | * Provides a tidyup interface for pseudowire code which can't just route all | ||
1742 | * shutdown via. l2tp_session_delete and a pseudowire-specific session_close | ||
1743 | * callback. | ||
1744 | */ | ||
1745 | void __l2tp_session_unhash(struct l2tp_session *session) | ||
1746 | { | ||
1747 | struct l2tp_tunnel *tunnel = session->tunnel; | ||
1748 | |||
1749 | /* Remove the session from core hashes */ | ||
1750 | if (tunnel) { | ||
1751 | /* Remove from the per-tunnel hash */ | ||
1743 | write_lock_bh(&tunnel->hlist_lock); | 1752 | write_lock_bh(&tunnel->hlist_lock); |
1744 | hlist_del_init(&session->hlist); | 1753 | hlist_del_init(&session->hlist); |
1745 | write_unlock_bh(&tunnel->hlist_lock); | 1754 | write_unlock_bh(&tunnel->hlist_lock); |
1746 | 1755 | ||
1747 | /* Unlink from the global hash if not L2TPv2 */ | 1756 | /* For L2TPv3 we have a per-net hash: remove from there, too */ |
1748 | if (tunnel->version != L2TP_HDR_VER_2) { | 1757 | if (tunnel->version != L2TP_HDR_VER_2) { |
1749 | struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net); | 1758 | struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net); |
1750 | |||
1751 | spin_lock_bh(&pn->l2tp_session_hlist_lock); | 1759 | spin_lock_bh(&pn->l2tp_session_hlist_lock); |
1752 | hlist_del_init_rcu(&session->global_hlist); | 1760 | hlist_del_init_rcu(&session->global_hlist); |
1753 | spin_unlock_bh(&pn->l2tp_session_hlist_lock); | 1761 | spin_unlock_bh(&pn->l2tp_session_hlist_lock); |
1754 | synchronize_rcu(); | 1762 | synchronize_rcu(); |
1755 | } | 1763 | } |
1756 | |||
1757 | if (session->session_id != 0) | ||
1758 | atomic_dec(&l2tp_session_count); | ||
1759 | |||
1760 | sock_put(tunnel->sock); | ||
1761 | |||
1762 | /* This will delete the tunnel context if this | ||
1763 | * is the last session on the tunnel. | ||
1764 | */ | ||
1765 | session->tunnel = NULL; | ||
1766 | l2tp_tunnel_dec_refcount(tunnel); | ||
1767 | } | 1764 | } |
1768 | |||
1769 | kfree(session); | ||
1770 | |||
1771 | return; | ||
1772 | } | 1765 | } |
1773 | EXPORT_SYMBOL_GPL(l2tp_session_free); | 1766 | EXPORT_SYMBOL_GPL(__l2tp_session_unhash); |
1774 | 1767 | ||
1775 | /* This function is used by the netlink SESSION_DELETE command and by | 1768 | /* This function is used by the netlink SESSION_DELETE command and by |
1776 | pseudowire modules. | 1769 | pseudowire modules. |
1777 | */ | 1770 | */ |
1778 | int l2tp_session_delete(struct l2tp_session *session) | 1771 | int l2tp_session_delete(struct l2tp_session *session) |
1779 | { | 1772 | { |
1773 | if (session->ref) | ||
1774 | (*session->ref)(session); | ||
1775 | __l2tp_session_unhash(session); | ||
1776 | l2tp_session_queue_purge(session); | ||
1780 | if (session->session_close != NULL) | 1777 | if (session->session_close != NULL) |
1781 | (*session->session_close)(session); | 1778 | (*session->session_close)(session); |
1782 | 1779 | if (session->deref) | |
1780 | (*session->ref)(session); | ||
1783 | l2tp_session_dec_refcount(session); | 1781 | l2tp_session_dec_refcount(session); |
1784 | |||
1785 | return 0; | 1782 | return 0; |
1786 | } | 1783 | } |
1787 | EXPORT_SYMBOL_GPL(l2tp_session_delete); | 1784 | EXPORT_SYMBOL_GPL(l2tp_session_delete); |
1788 | 1785 | ||
1789 | |||
1790 | /* We come here whenever a session's send_seq, cookie_len or | 1786 | /* We come here whenever a session's send_seq, cookie_len or |
1791 | * l2specific_len parameters are set. | 1787 | * l2specific_len parameters are set. |
1792 | */ | 1788 | */ |
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index 8eb8f1d47f3a..485a490fd990 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h | |||
@@ -36,16 +36,15 @@ enum { | |||
36 | struct sk_buff; | 36 | struct sk_buff; |
37 | 37 | ||
38 | struct l2tp_stats { | 38 | struct l2tp_stats { |
39 | u64 tx_packets; | 39 | atomic_long_t tx_packets; |
40 | u64 tx_bytes; | 40 | atomic_long_t tx_bytes; |
41 | u64 tx_errors; | 41 | atomic_long_t tx_errors; |
42 | u64 rx_packets; | 42 | atomic_long_t rx_packets; |
43 | u64 rx_bytes; | 43 | atomic_long_t rx_bytes; |
44 | u64 rx_seq_discards; | 44 | atomic_long_t rx_seq_discards; |
45 | u64 rx_oos_packets; | 45 | atomic_long_t rx_oos_packets; |
46 | u64 rx_errors; | 46 | atomic_long_t rx_errors; |
47 | u64 rx_cookie_discards; | 47 | atomic_long_t rx_cookie_discards; |
48 | struct u64_stats_sync syncp; | ||
49 | }; | 48 | }; |
50 | 49 | ||
51 | struct l2tp_tunnel; | 50 | struct l2tp_tunnel; |
@@ -240,11 +239,14 @@ extern struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id); | |||
240 | extern struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth); | 239 | extern struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth); |
241 | 240 | ||
242 | extern int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp); | 241 | extern int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp); |
242 | extern void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel); | ||
243 | extern int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel); | 243 | extern int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel); |
244 | extern struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg); | 244 | extern struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg); |
245 | extern void __l2tp_session_unhash(struct l2tp_session *session); | ||
245 | extern int l2tp_session_delete(struct l2tp_session *session); | 246 | extern int l2tp_session_delete(struct l2tp_session *session); |
246 | extern void l2tp_session_free(struct l2tp_session *session); | 247 | extern void l2tp_session_free(struct l2tp_session *session); |
247 | extern void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, unsigned char *ptr, unsigned char *optr, u16 hdrflags, int length, int (*payload_hook)(struct sk_buff *skb)); | 248 | extern void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, unsigned char *ptr, unsigned char *optr, u16 hdrflags, int length, int (*payload_hook)(struct sk_buff *skb)); |
249 | extern int l2tp_session_queue_purge(struct l2tp_session *session); | ||
248 | extern int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb); | 250 | extern int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb); |
249 | 251 | ||
250 | extern int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len); | 252 | extern int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len); |
diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c index c3813bc84552..072d7202e182 100644 --- a/net/l2tp/l2tp_debugfs.c +++ b/net/l2tp/l2tp_debugfs.c | |||
@@ -146,14 +146,14 @@ static void l2tp_dfs_seq_tunnel_show(struct seq_file *m, void *v) | |||
146 | tunnel->sock ? atomic_read(&tunnel->sock->sk_refcnt) : 0, | 146 | tunnel->sock ? atomic_read(&tunnel->sock->sk_refcnt) : 0, |
147 | atomic_read(&tunnel->ref_count)); | 147 | atomic_read(&tunnel->ref_count)); |
148 | 148 | ||
149 | seq_printf(m, " %08x rx %llu/%llu/%llu rx %llu/%llu/%llu\n", | 149 | seq_printf(m, " %08x rx %ld/%ld/%ld rx %ld/%ld/%ld\n", |
150 | tunnel->debug, | 150 | tunnel->debug, |
151 | (unsigned long long)tunnel->stats.tx_packets, | 151 | atomic_long_read(&tunnel->stats.tx_packets), |
152 | (unsigned long long)tunnel->stats.tx_bytes, | 152 | atomic_long_read(&tunnel->stats.tx_bytes), |
153 | (unsigned long long)tunnel->stats.tx_errors, | 153 | atomic_long_read(&tunnel->stats.tx_errors), |
154 | (unsigned long long)tunnel->stats.rx_packets, | 154 | atomic_long_read(&tunnel->stats.rx_packets), |
155 | (unsigned long long)tunnel->stats.rx_bytes, | 155 | atomic_long_read(&tunnel->stats.rx_bytes), |
156 | (unsigned long long)tunnel->stats.rx_errors); | 156 | atomic_long_read(&tunnel->stats.rx_errors)); |
157 | 157 | ||
158 | if (tunnel->show != NULL) | 158 | if (tunnel->show != NULL) |
159 | tunnel->show(m, tunnel); | 159 | tunnel->show(m, tunnel); |
@@ -203,14 +203,14 @@ static void l2tp_dfs_seq_session_show(struct seq_file *m, void *v) | |||
203 | seq_printf(m, "\n"); | 203 | seq_printf(m, "\n"); |
204 | } | 204 | } |
205 | 205 | ||
206 | seq_printf(m, " %hu/%hu tx %llu/%llu/%llu rx %llu/%llu/%llu\n", | 206 | seq_printf(m, " %hu/%hu tx %ld/%ld/%ld rx %ld/%ld/%ld\n", |
207 | session->nr, session->ns, | 207 | session->nr, session->ns, |
208 | (unsigned long long)session->stats.tx_packets, | 208 | atomic_long_read(&session->stats.tx_packets), |
209 | (unsigned long long)session->stats.tx_bytes, | 209 | atomic_long_read(&session->stats.tx_bytes), |
210 | (unsigned long long)session->stats.tx_errors, | 210 | atomic_long_read(&session->stats.tx_errors), |
211 | (unsigned long long)session->stats.rx_packets, | 211 | atomic_long_read(&session->stats.rx_packets), |
212 | (unsigned long long)session->stats.rx_bytes, | 212 | atomic_long_read(&session->stats.rx_bytes), |
213 | (unsigned long long)session->stats.rx_errors); | 213 | atomic_long_read(&session->stats.rx_errors)); |
214 | 214 | ||
215 | if (session->show != NULL) | 215 | if (session->show != NULL) |
216 | session->show(m, session); | 216 | session->show(m, session); |
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index 7f41b7051269..571db8dd2292 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c | |||
@@ -228,10 +228,16 @@ static void l2tp_ip_close(struct sock *sk, long timeout) | |||
228 | static void l2tp_ip_destroy_sock(struct sock *sk) | 228 | static void l2tp_ip_destroy_sock(struct sock *sk) |
229 | { | 229 | { |
230 | struct sk_buff *skb; | 230 | struct sk_buff *skb; |
231 | struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk); | ||
231 | 232 | ||
232 | while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) | 233 | while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) |
233 | kfree_skb(skb); | 234 | kfree_skb(skb); |
234 | 235 | ||
236 | if (tunnel) { | ||
237 | l2tp_tunnel_closeall(tunnel); | ||
238 | sock_put(sk); | ||
239 | } | ||
240 | |||
235 | sk_refcnt_debug_dec(sk); | 241 | sk_refcnt_debug_dec(sk); |
236 | } | 242 | } |
237 | 243 | ||
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index 41f2f8126ebc..b8a6039314e8 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c | |||
@@ -241,10 +241,17 @@ static void l2tp_ip6_close(struct sock *sk, long timeout) | |||
241 | 241 | ||
242 | static void l2tp_ip6_destroy_sock(struct sock *sk) | 242 | static void l2tp_ip6_destroy_sock(struct sock *sk) |
243 | { | 243 | { |
244 | struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk); | ||
245 | |||
244 | lock_sock(sk); | 246 | lock_sock(sk); |
245 | ip6_flush_pending_frames(sk); | 247 | ip6_flush_pending_frames(sk); |
246 | release_sock(sk); | 248 | release_sock(sk); |
247 | 249 | ||
250 | if (tunnel) { | ||
251 | l2tp_tunnel_closeall(tunnel); | ||
252 | sock_put(sk); | ||
253 | } | ||
254 | |||
248 | inet6_destroy_sock(sk); | 255 | inet6_destroy_sock(sk); |
249 | } | 256 | } |
250 | 257 | ||
@@ -683,6 +690,7 @@ static int l2tp_ip6_recvmsg(struct kiocb *iocb, struct sock *sk, | |||
683 | lsa->l2tp_addr = ipv6_hdr(skb)->saddr; | 690 | lsa->l2tp_addr = ipv6_hdr(skb)->saddr; |
684 | lsa->l2tp_flowinfo = 0; | 691 | lsa->l2tp_flowinfo = 0; |
685 | lsa->l2tp_scope_id = 0; | 692 | lsa->l2tp_scope_id = 0; |
693 | lsa->l2tp_conn_id = 0; | ||
686 | if (ipv6_addr_type(&lsa->l2tp_addr) & IPV6_ADDR_LINKLOCAL) | 694 | if (ipv6_addr_type(&lsa->l2tp_addr) & IPV6_ADDR_LINKLOCAL) |
687 | lsa->l2tp_scope_id = IP6CB(skb)->iif; | 695 | lsa->l2tp_scope_id = IP6CB(skb)->iif; |
688 | } | 696 | } |
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c index c1bab22db85e..0825ff26e113 100644 --- a/net/l2tp/l2tp_netlink.c +++ b/net/l2tp/l2tp_netlink.c | |||
@@ -246,8 +246,6 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 portid, u32 seq, int fla | |||
246 | #if IS_ENABLED(CONFIG_IPV6) | 246 | #if IS_ENABLED(CONFIG_IPV6) |
247 | struct ipv6_pinfo *np = NULL; | 247 | struct ipv6_pinfo *np = NULL; |
248 | #endif | 248 | #endif |
249 | struct l2tp_stats stats; | ||
250 | unsigned int start; | ||
251 | 249 | ||
252 | hdr = genlmsg_put(skb, portid, seq, &l2tp_nl_family, flags, | 250 | hdr = genlmsg_put(skb, portid, seq, &l2tp_nl_family, flags, |
253 | L2TP_CMD_TUNNEL_GET); | 251 | L2TP_CMD_TUNNEL_GET); |
@@ -265,28 +263,22 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 portid, u32 seq, int fla | |||
265 | if (nest == NULL) | 263 | if (nest == NULL) |
266 | goto nla_put_failure; | 264 | goto nla_put_failure; |
267 | 265 | ||
268 | do { | 266 | if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, |
269 | start = u64_stats_fetch_begin(&tunnel->stats.syncp); | 267 | atomic_long_read(&tunnel->stats.tx_packets)) || |
270 | stats.tx_packets = tunnel->stats.tx_packets; | 268 | nla_put_u64(skb, L2TP_ATTR_TX_BYTES, |
271 | stats.tx_bytes = tunnel->stats.tx_bytes; | 269 | atomic_long_read(&tunnel->stats.tx_bytes)) || |
272 | stats.tx_errors = tunnel->stats.tx_errors; | 270 | nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, |
273 | stats.rx_packets = tunnel->stats.rx_packets; | 271 | atomic_long_read(&tunnel->stats.tx_errors)) || |
274 | stats.rx_bytes = tunnel->stats.rx_bytes; | 272 | nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, |
275 | stats.rx_errors = tunnel->stats.rx_errors; | 273 | atomic_long_read(&tunnel->stats.rx_packets)) || |
276 | stats.rx_seq_discards = tunnel->stats.rx_seq_discards; | 274 | nla_put_u64(skb, L2TP_ATTR_RX_BYTES, |
277 | stats.rx_oos_packets = tunnel->stats.rx_oos_packets; | 275 | atomic_long_read(&tunnel->stats.rx_bytes)) || |
278 | } while (u64_stats_fetch_retry(&tunnel->stats.syncp, start)); | ||
279 | |||
280 | if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, stats.tx_packets) || | ||
281 | nla_put_u64(skb, L2TP_ATTR_TX_BYTES, stats.tx_bytes) || | ||
282 | nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, stats.tx_errors) || | ||
283 | nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, stats.rx_packets) || | ||
284 | nla_put_u64(skb, L2TP_ATTR_RX_BYTES, stats.rx_bytes) || | ||
285 | nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, | 276 | nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, |
286 | stats.rx_seq_discards) || | 277 | atomic_long_read(&tunnel->stats.rx_seq_discards)) || |
287 | nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS, | 278 | nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS, |
288 | stats.rx_oos_packets) || | 279 | atomic_long_read(&tunnel->stats.rx_oos_packets)) || |
289 | nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, stats.rx_errors)) | 280 | nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, |
281 | atomic_long_read(&tunnel->stats.rx_errors))) | ||
290 | goto nla_put_failure; | 282 | goto nla_put_failure; |
291 | nla_nest_end(skb, nest); | 283 | nla_nest_end(skb, nest); |
292 | 284 | ||
@@ -612,8 +604,6 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int fl | |||
612 | struct nlattr *nest; | 604 | struct nlattr *nest; |
613 | struct l2tp_tunnel *tunnel = session->tunnel; | 605 | struct l2tp_tunnel *tunnel = session->tunnel; |
614 | struct sock *sk = NULL; | 606 | struct sock *sk = NULL; |
615 | struct l2tp_stats stats; | ||
616 | unsigned int start; | ||
617 | 607 | ||
618 | sk = tunnel->sock; | 608 | sk = tunnel->sock; |
619 | 609 | ||
@@ -656,28 +646,22 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int fl | |||
656 | if (nest == NULL) | 646 | if (nest == NULL) |
657 | goto nla_put_failure; | 647 | goto nla_put_failure; |
658 | 648 | ||
659 | do { | 649 | if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, |
660 | start = u64_stats_fetch_begin(&session->stats.syncp); | 650 | atomic_long_read(&session->stats.tx_packets)) || |
661 | stats.tx_packets = session->stats.tx_packets; | 651 | nla_put_u64(skb, L2TP_ATTR_TX_BYTES, |
662 | stats.tx_bytes = session->stats.tx_bytes; | 652 | atomic_long_read(&session->stats.tx_bytes)) || |
663 | stats.tx_errors = session->stats.tx_errors; | 653 | nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, |
664 | stats.rx_packets = session->stats.rx_packets; | 654 | atomic_long_read(&session->stats.tx_errors)) || |
665 | stats.rx_bytes = session->stats.rx_bytes; | 655 | nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, |
666 | stats.rx_errors = session->stats.rx_errors; | 656 | atomic_long_read(&session->stats.rx_packets)) || |
667 | stats.rx_seq_discards = session->stats.rx_seq_discards; | 657 | nla_put_u64(skb, L2TP_ATTR_RX_BYTES, |
668 | stats.rx_oos_packets = session->stats.rx_oos_packets; | 658 | atomic_long_read(&session->stats.rx_bytes)) || |
669 | } while (u64_stats_fetch_retry(&session->stats.syncp, start)); | ||
670 | |||
671 | if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, stats.tx_packets) || | ||
672 | nla_put_u64(skb, L2TP_ATTR_TX_BYTES, stats.tx_bytes) || | ||
673 | nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, stats.tx_errors) || | ||
674 | nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, stats.rx_packets) || | ||
675 | nla_put_u64(skb, L2TP_ATTR_RX_BYTES, stats.rx_bytes) || | ||
676 | nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, | 659 | nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, |
677 | stats.rx_seq_discards) || | 660 | atomic_long_read(&session->stats.rx_seq_discards)) || |
678 | nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS, | 661 | nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS, |
679 | stats.rx_oos_packets) || | 662 | atomic_long_read(&session->stats.rx_oos_packets)) || |
680 | nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, stats.rx_errors)) | 663 | nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, |
664 | atomic_long_read(&session->stats.rx_errors))) | ||
681 | goto nla_put_failure; | 665 | goto nla_put_failure; |
682 | nla_nest_end(skb, nest); | 666 | nla_nest_end(skb, nest); |
683 | 667 | ||
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index 6a53371dba1f..637a341c1e2d 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c | |||
@@ -97,6 +97,7 @@ | |||
97 | #include <net/ip.h> | 97 | #include <net/ip.h> |
98 | #include <net/udp.h> | 98 | #include <net/udp.h> |
99 | #include <net/xfrm.h> | 99 | #include <net/xfrm.h> |
100 | #include <net/inet_common.h> | ||
100 | 101 | ||
101 | #include <asm/byteorder.h> | 102 | #include <asm/byteorder.h> |
102 | #include <linux/atomic.h> | 103 | #include <linux/atomic.h> |
@@ -259,7 +260,7 @@ static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int | |||
259 | session->name); | 260 | session->name); |
260 | 261 | ||
261 | /* Not bound. Nothing we can do, so discard. */ | 262 | /* Not bound. Nothing we can do, so discard. */ |
262 | session->stats.rx_errors++; | 263 | atomic_long_inc(&session->stats.rx_errors); |
263 | kfree_skb(skb); | 264 | kfree_skb(skb); |
264 | } | 265 | } |
265 | 266 | ||
@@ -447,34 +448,16 @@ static void pppol2tp_session_close(struct l2tp_session *session) | |||
447 | { | 448 | { |
448 | struct pppol2tp_session *ps = l2tp_session_priv(session); | 449 | struct pppol2tp_session *ps = l2tp_session_priv(session); |
449 | struct sock *sk = ps->sock; | 450 | struct sock *sk = ps->sock; |
450 | struct sk_buff *skb; | 451 | struct socket *sock = sk->sk_socket; |
451 | 452 | ||
452 | BUG_ON(session->magic != L2TP_SESSION_MAGIC); | 453 | BUG_ON(session->magic != L2TP_SESSION_MAGIC); |
453 | 454 | ||
454 | if (session->session_id == 0) | ||
455 | goto out; | ||
456 | |||
457 | if (sk != NULL) { | ||
458 | lock_sock(sk); | ||
459 | |||
460 | if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) { | ||
461 | pppox_unbind_sock(sk); | ||
462 | sk->sk_state = PPPOX_DEAD; | ||
463 | sk->sk_state_change(sk); | ||
464 | } | ||
465 | |||
466 | /* Purge any queued data */ | ||
467 | skb_queue_purge(&sk->sk_receive_queue); | ||
468 | skb_queue_purge(&sk->sk_write_queue); | ||
469 | while ((skb = skb_dequeue(&session->reorder_q))) { | ||
470 | kfree_skb(skb); | ||
471 | sock_put(sk); | ||
472 | } | ||
473 | 455 | ||
474 | release_sock(sk); | 456 | if (sock) { |
457 | inet_shutdown(sock, 2); | ||
458 | /* Don't let the session go away before our socket does */ | ||
459 | l2tp_session_inc_refcount(session); | ||
475 | } | 460 | } |
476 | |||
477 | out: | ||
478 | return; | 461 | return; |
479 | } | 462 | } |
480 | 463 | ||
@@ -483,19 +466,12 @@ out: | |||
483 | */ | 466 | */ |
484 | static void pppol2tp_session_destruct(struct sock *sk) | 467 | static void pppol2tp_session_destruct(struct sock *sk) |
485 | { | 468 | { |
486 | struct l2tp_session *session; | 469 | struct l2tp_session *session = sk->sk_user_data; |
487 | 470 | if (session) { | |
488 | if (sk->sk_user_data != NULL) { | ||
489 | session = sk->sk_user_data; | ||
490 | if (session == NULL) | ||
491 | goto out; | ||
492 | |||
493 | sk->sk_user_data = NULL; | 471 | sk->sk_user_data = NULL; |
494 | BUG_ON(session->magic != L2TP_SESSION_MAGIC); | 472 | BUG_ON(session->magic != L2TP_SESSION_MAGIC); |
495 | l2tp_session_dec_refcount(session); | 473 | l2tp_session_dec_refcount(session); |
496 | } | 474 | } |
497 | |||
498 | out: | ||
499 | return; | 475 | return; |
500 | } | 476 | } |
501 | 477 | ||
@@ -525,16 +501,13 @@ static int pppol2tp_release(struct socket *sock) | |||
525 | session = pppol2tp_sock_to_session(sk); | 501 | session = pppol2tp_sock_to_session(sk); |
526 | 502 | ||
527 | /* Purge any queued data */ | 503 | /* Purge any queued data */ |
528 | skb_queue_purge(&sk->sk_receive_queue); | ||
529 | skb_queue_purge(&sk->sk_write_queue); | ||
530 | if (session != NULL) { | 504 | if (session != NULL) { |
531 | struct sk_buff *skb; | 505 | __l2tp_session_unhash(session); |
532 | while ((skb = skb_dequeue(&session->reorder_q))) { | 506 | l2tp_session_queue_purge(session); |
533 | kfree_skb(skb); | ||
534 | sock_put(sk); | ||
535 | } | ||
536 | sock_put(sk); | 507 | sock_put(sk); |
537 | } | 508 | } |
509 | skb_queue_purge(&sk->sk_receive_queue); | ||
510 | skb_queue_purge(&sk->sk_write_queue); | ||
538 | 511 | ||
539 | release_sock(sk); | 512 | release_sock(sk); |
540 | 513 | ||
@@ -880,18 +853,6 @@ out: | |||
880 | return error; | 853 | return error; |
881 | } | 854 | } |
882 | 855 | ||
883 | /* Called when deleting sessions via the netlink interface. | ||
884 | */ | ||
885 | static int pppol2tp_session_delete(struct l2tp_session *session) | ||
886 | { | ||
887 | struct pppol2tp_session *ps = l2tp_session_priv(session); | ||
888 | |||
889 | if (ps->sock == NULL) | ||
890 | l2tp_session_dec_refcount(session); | ||
891 | |||
892 | return 0; | ||
893 | } | ||
894 | |||
895 | #endif /* CONFIG_L2TP_V3 */ | 856 | #endif /* CONFIG_L2TP_V3 */ |
896 | 857 | ||
897 | /* getname() support. | 858 | /* getname() support. |
@@ -1025,14 +986,14 @@ end: | |||
1025 | static void pppol2tp_copy_stats(struct pppol2tp_ioc_stats *dest, | 986 | static void pppol2tp_copy_stats(struct pppol2tp_ioc_stats *dest, |
1026 | struct l2tp_stats *stats) | 987 | struct l2tp_stats *stats) |
1027 | { | 988 | { |
1028 | dest->tx_packets = stats->tx_packets; | 989 | dest->tx_packets = atomic_long_read(&stats->tx_packets); |
1029 | dest->tx_bytes = stats->tx_bytes; | 990 | dest->tx_bytes = atomic_long_read(&stats->tx_bytes); |
1030 | dest->tx_errors = stats->tx_errors; | 991 | dest->tx_errors = atomic_long_read(&stats->tx_errors); |
1031 | dest->rx_packets = stats->rx_packets; | 992 | dest->rx_packets = atomic_long_read(&stats->rx_packets); |
1032 | dest->rx_bytes = stats->rx_bytes; | 993 | dest->rx_bytes = atomic_long_read(&stats->rx_bytes); |
1033 | dest->rx_seq_discards = stats->rx_seq_discards; | 994 | dest->rx_seq_discards = atomic_long_read(&stats->rx_seq_discards); |
1034 | dest->rx_oos_packets = stats->rx_oos_packets; | 995 | dest->rx_oos_packets = atomic_long_read(&stats->rx_oos_packets); |
1035 | dest->rx_errors = stats->rx_errors; | 996 | dest->rx_errors = atomic_long_read(&stats->rx_errors); |
1036 | } | 997 | } |
1037 | 998 | ||
1038 | /* Session ioctl helper. | 999 | /* Session ioctl helper. |
@@ -1666,14 +1627,14 @@ static void pppol2tp_seq_tunnel_show(struct seq_file *m, void *v) | |||
1666 | tunnel->name, | 1627 | tunnel->name, |
1667 | (tunnel == tunnel->sock->sk_user_data) ? 'Y' : 'N', | 1628 | (tunnel == tunnel->sock->sk_user_data) ? 'Y' : 'N', |
1668 | atomic_read(&tunnel->ref_count) - 1); | 1629 | atomic_read(&tunnel->ref_count) - 1); |
1669 | seq_printf(m, " %08x %llu/%llu/%llu %llu/%llu/%llu\n", | 1630 | seq_printf(m, " %08x %ld/%ld/%ld %ld/%ld/%ld\n", |
1670 | tunnel->debug, | 1631 | tunnel->debug, |
1671 | (unsigned long long)tunnel->stats.tx_packets, | 1632 | atomic_long_read(&tunnel->stats.tx_packets), |
1672 | (unsigned long long)tunnel->stats.tx_bytes, | 1633 | atomic_long_read(&tunnel->stats.tx_bytes), |
1673 | (unsigned long long)tunnel->stats.tx_errors, | 1634 | atomic_long_read(&tunnel->stats.tx_errors), |
1674 | (unsigned long long)tunnel->stats.rx_packets, | 1635 | atomic_long_read(&tunnel->stats.rx_packets), |
1675 | (unsigned long long)tunnel->stats.rx_bytes, | 1636 | atomic_long_read(&tunnel->stats.rx_bytes), |
1676 | (unsigned long long)tunnel->stats.rx_errors); | 1637 | atomic_long_read(&tunnel->stats.rx_errors)); |
1677 | } | 1638 | } |
1678 | 1639 | ||
1679 | static void pppol2tp_seq_session_show(struct seq_file *m, void *v) | 1640 | static void pppol2tp_seq_session_show(struct seq_file *m, void *v) |
@@ -1708,14 +1669,14 @@ static void pppol2tp_seq_session_show(struct seq_file *m, void *v) | |||
1708 | session->lns_mode ? "LNS" : "LAC", | 1669 | session->lns_mode ? "LNS" : "LAC", |
1709 | session->debug, | 1670 | session->debug, |
1710 | jiffies_to_msecs(session->reorder_timeout)); | 1671 | jiffies_to_msecs(session->reorder_timeout)); |
1711 | seq_printf(m, " %hu/%hu %llu/%llu/%llu %llu/%llu/%llu\n", | 1672 | seq_printf(m, " %hu/%hu %ld/%ld/%ld %ld/%ld/%ld\n", |
1712 | session->nr, session->ns, | 1673 | session->nr, session->ns, |
1713 | (unsigned long long)session->stats.tx_packets, | 1674 | atomic_long_read(&session->stats.tx_packets), |
1714 | (unsigned long long)session->stats.tx_bytes, | 1675 | atomic_long_read(&session->stats.tx_bytes), |
1715 | (unsigned long long)session->stats.tx_errors, | 1676 | atomic_long_read(&session->stats.tx_errors), |
1716 | (unsigned long long)session->stats.rx_packets, | 1677 | atomic_long_read(&session->stats.rx_packets), |
1717 | (unsigned long long)session->stats.rx_bytes, | 1678 | atomic_long_read(&session->stats.rx_bytes), |
1718 | (unsigned long long)session->stats.rx_errors); | 1679 | atomic_long_read(&session->stats.rx_errors)); |
1719 | 1680 | ||
1720 | if (po) | 1681 | if (po) |
1721 | seq_printf(m, " interface %s\n", ppp_dev_name(&po->chan)); | 1682 | seq_printf(m, " interface %s\n", ppp_dev_name(&po->chan)); |
@@ -1839,7 +1800,7 @@ static const struct pppox_proto pppol2tp_proto = { | |||
1839 | 1800 | ||
1840 | static const struct l2tp_nl_cmd_ops pppol2tp_nl_cmd_ops = { | 1801 | static const struct l2tp_nl_cmd_ops pppol2tp_nl_cmd_ops = { |
1841 | .session_create = pppol2tp_session_create, | 1802 | .session_create = pppol2tp_session_create, |
1842 | .session_delete = pppol2tp_session_delete, | 1803 | .session_delete = l2tp_session_delete, |
1843 | }; | 1804 | }; |
1844 | 1805 | ||
1845 | #endif /* CONFIG_L2TP_V3 */ | 1806 | #endif /* CONFIG_L2TP_V3 */ |
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index 88709882c464..48aaa89253e0 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c | |||
@@ -720,6 +720,8 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
720 | int target; /* Read at least this many bytes */ | 720 | int target; /* Read at least this many bytes */ |
721 | long timeo; | 721 | long timeo; |
722 | 722 | ||
723 | msg->msg_namelen = 0; | ||
724 | |||
723 | lock_sock(sk); | 725 | lock_sock(sk); |
724 | copied = -ENOTCONN; | 726 | copied = -ENOTCONN; |
725 | if (unlikely(sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_LISTEN)) | 727 | if (unlikely(sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_LISTEN)) |
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index fb306814576a..a6893602f87a 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c | |||
@@ -2582,7 +2582,7 @@ static int ieee80211_cancel_roc(struct ieee80211_local *local, | |||
2582 | list_del(&dep->list); | 2582 | list_del(&dep->list); |
2583 | mutex_unlock(&local->mtx); | 2583 | mutex_unlock(&local->mtx); |
2584 | 2584 | ||
2585 | ieee80211_roc_notify_destroy(dep); | 2585 | ieee80211_roc_notify_destroy(dep, true); |
2586 | return 0; | 2586 | return 0; |
2587 | } | 2587 | } |
2588 | 2588 | ||
@@ -2622,7 +2622,7 @@ static int ieee80211_cancel_roc(struct ieee80211_local *local, | |||
2622 | ieee80211_start_next_roc(local); | 2622 | ieee80211_start_next_roc(local); |
2623 | mutex_unlock(&local->mtx); | 2623 | mutex_unlock(&local->mtx); |
2624 | 2624 | ||
2625 | ieee80211_roc_notify_destroy(found); | 2625 | ieee80211_roc_notify_destroy(found, true); |
2626 | } else { | 2626 | } else { |
2627 | /* work may be pending so use it all the time */ | 2627 | /* work may be pending so use it all the time */ |
2628 | found->abort = true; | 2628 | found->abort = true; |
@@ -2632,6 +2632,8 @@ static int ieee80211_cancel_roc(struct ieee80211_local *local, | |||
2632 | 2632 | ||
2633 | /* work will clean up etc */ | 2633 | /* work will clean up etc */ |
2634 | flush_delayed_work(&found->work); | 2634 | flush_delayed_work(&found->work); |
2635 | WARN_ON(!found->to_be_freed); | ||
2636 | kfree(found); | ||
2635 | } | 2637 | } |
2636 | 2638 | ||
2637 | return 0; | 2639 | return 0; |
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c index 78c0d90dd641..931be419ab5a 100644 --- a/net/mac80211/chan.c +++ b/net/mac80211/chan.c | |||
@@ -63,6 +63,7 @@ ieee80211_new_chanctx(struct ieee80211_local *local, | |||
63 | enum ieee80211_chanctx_mode mode) | 63 | enum ieee80211_chanctx_mode mode) |
64 | { | 64 | { |
65 | struct ieee80211_chanctx *ctx; | 65 | struct ieee80211_chanctx *ctx; |
66 | u32 changed; | ||
66 | int err; | 67 | int err; |
67 | 68 | ||
68 | lockdep_assert_held(&local->chanctx_mtx); | 69 | lockdep_assert_held(&local->chanctx_mtx); |
@@ -76,6 +77,13 @@ ieee80211_new_chanctx(struct ieee80211_local *local, | |||
76 | ctx->conf.rx_chains_dynamic = 1; | 77 | ctx->conf.rx_chains_dynamic = 1; |
77 | ctx->mode = mode; | 78 | ctx->mode = mode; |
78 | 79 | ||
80 | /* acquire mutex to prevent idle from changing */ | ||
81 | mutex_lock(&local->mtx); | ||
82 | /* turn idle off *before* setting channel -- some drivers need that */ | ||
83 | changed = ieee80211_idle_off(local); | ||
84 | if (changed) | ||
85 | ieee80211_hw_config(local, changed); | ||
86 | |||
79 | if (!local->use_chanctx) { | 87 | if (!local->use_chanctx) { |
80 | local->_oper_channel_type = | 88 | local->_oper_channel_type = |
81 | cfg80211_get_chandef_type(chandef); | 89 | cfg80211_get_chandef_type(chandef); |
@@ -85,14 +93,17 @@ ieee80211_new_chanctx(struct ieee80211_local *local, | |||
85 | err = drv_add_chanctx(local, ctx); | 93 | err = drv_add_chanctx(local, ctx); |
86 | if (err) { | 94 | if (err) { |
87 | kfree(ctx); | 95 | kfree(ctx); |
88 | return ERR_PTR(err); | 96 | ctx = ERR_PTR(err); |
97 | |||
98 | ieee80211_recalc_idle(local); | ||
99 | goto out; | ||
89 | } | 100 | } |
90 | } | 101 | } |
91 | 102 | ||
103 | /* and keep the mutex held until the new chanctx is on the list */ | ||
92 | list_add_rcu(&ctx->list, &local->chanctx_list); | 104 | list_add_rcu(&ctx->list, &local->chanctx_list); |
93 | 105 | ||
94 | mutex_lock(&local->mtx); | 106 | out: |
95 | ieee80211_recalc_idle(local); | ||
96 | mutex_unlock(&local->mtx); | 107 | mutex_unlock(&local->mtx); |
97 | 108 | ||
98 | return ctx; | 109 | return ctx; |
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 388580a1bada..5672533a0832 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h | |||
@@ -309,6 +309,7 @@ struct ieee80211_roc_work { | |||
309 | struct ieee80211_channel *chan; | 309 | struct ieee80211_channel *chan; |
310 | 310 | ||
311 | bool started, abort, hw_begun, notified; | 311 | bool started, abort, hw_begun, notified; |
312 | bool to_be_freed; | ||
312 | 313 | ||
313 | unsigned long hw_start_time; | 314 | unsigned long hw_start_time; |
314 | 315 | ||
@@ -1347,7 +1348,7 @@ void ieee80211_offchannel_return(struct ieee80211_local *local); | |||
1347 | void ieee80211_roc_setup(struct ieee80211_local *local); | 1348 | void ieee80211_roc_setup(struct ieee80211_local *local); |
1348 | void ieee80211_start_next_roc(struct ieee80211_local *local); | 1349 | void ieee80211_start_next_roc(struct ieee80211_local *local); |
1349 | void ieee80211_roc_purge(struct ieee80211_sub_if_data *sdata); | 1350 | void ieee80211_roc_purge(struct ieee80211_sub_if_data *sdata); |
1350 | void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc); | 1351 | void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc, bool free); |
1351 | void ieee80211_sw_roc_work(struct work_struct *work); | 1352 | void ieee80211_sw_roc_work(struct work_struct *work); |
1352 | void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc); | 1353 | void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc); |
1353 | 1354 | ||
@@ -1361,6 +1362,7 @@ int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata, | |||
1361 | enum nl80211_iftype type); | 1362 | enum nl80211_iftype type); |
1362 | void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata); | 1363 | void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata); |
1363 | void ieee80211_remove_interfaces(struct ieee80211_local *local); | 1364 | void ieee80211_remove_interfaces(struct ieee80211_local *local); |
1365 | u32 ieee80211_idle_off(struct ieee80211_local *local); | ||
1364 | void ieee80211_recalc_idle(struct ieee80211_local *local); | 1366 | void ieee80211_recalc_idle(struct ieee80211_local *local); |
1365 | void ieee80211_adjust_monitor_flags(struct ieee80211_sub_if_data *sdata, | 1367 | void ieee80211_adjust_monitor_flags(struct ieee80211_sub_if_data *sdata, |
1366 | const int offset); | 1368 | const int offset); |
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index baaa8608e52d..58150f877ec3 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c | |||
@@ -78,7 +78,7 @@ void ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata) | |||
78 | ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_TXPOWER); | 78 | ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_TXPOWER); |
79 | } | 79 | } |
80 | 80 | ||
81 | static u32 ieee80211_idle_off(struct ieee80211_local *local) | 81 | u32 ieee80211_idle_off(struct ieee80211_local *local) |
82 | { | 82 | { |
83 | if (!(local->hw.conf.flags & IEEE80211_CONF_IDLE)) | 83 | if (!(local->hw.conf.flags & IEEE80211_CONF_IDLE)) |
84 | return 0; | 84 | return 0; |
@@ -349,21 +349,19 @@ static void ieee80211_set_default_queues(struct ieee80211_sub_if_data *sdata) | |||
349 | static int ieee80211_add_virtual_monitor(struct ieee80211_local *local) | 349 | static int ieee80211_add_virtual_monitor(struct ieee80211_local *local) |
350 | { | 350 | { |
351 | struct ieee80211_sub_if_data *sdata; | 351 | struct ieee80211_sub_if_data *sdata; |
352 | int ret = 0; | 352 | int ret; |
353 | 353 | ||
354 | if (!(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF)) | 354 | if (!(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF)) |
355 | return 0; | 355 | return 0; |
356 | 356 | ||
357 | mutex_lock(&local->iflist_mtx); | 357 | ASSERT_RTNL(); |
358 | 358 | ||
359 | if (local->monitor_sdata) | 359 | if (local->monitor_sdata) |
360 | goto out_unlock; | 360 | return 0; |
361 | 361 | ||
362 | sdata = kzalloc(sizeof(*sdata) + local->hw.vif_data_size, GFP_KERNEL); | 362 | sdata = kzalloc(sizeof(*sdata) + local->hw.vif_data_size, GFP_KERNEL); |
363 | if (!sdata) { | 363 | if (!sdata) |
364 | ret = -ENOMEM; | 364 | return -ENOMEM; |
365 | goto out_unlock; | ||
366 | } | ||
367 | 365 | ||
368 | /* set up data */ | 366 | /* set up data */ |
369 | sdata->local = local; | 367 | sdata->local = local; |
@@ -377,13 +375,13 @@ static int ieee80211_add_virtual_monitor(struct ieee80211_local *local) | |||
377 | if (WARN_ON(ret)) { | 375 | if (WARN_ON(ret)) { |
378 | /* ok .. stupid driver, it asked for this! */ | 376 | /* ok .. stupid driver, it asked for this! */ |
379 | kfree(sdata); | 377 | kfree(sdata); |
380 | goto out_unlock; | 378 | return ret; |
381 | } | 379 | } |
382 | 380 | ||
383 | ret = ieee80211_check_queues(sdata); | 381 | ret = ieee80211_check_queues(sdata); |
384 | if (ret) { | 382 | if (ret) { |
385 | kfree(sdata); | 383 | kfree(sdata); |
386 | goto out_unlock; | 384 | return ret; |
387 | } | 385 | } |
388 | 386 | ||
389 | ret = ieee80211_vif_use_channel(sdata, &local->monitor_chandef, | 387 | ret = ieee80211_vif_use_channel(sdata, &local->monitor_chandef, |
@@ -391,13 +389,14 @@ static int ieee80211_add_virtual_monitor(struct ieee80211_local *local) | |||
391 | if (ret) { | 389 | if (ret) { |
392 | drv_remove_interface(local, sdata); | 390 | drv_remove_interface(local, sdata); |
393 | kfree(sdata); | 391 | kfree(sdata); |
394 | goto out_unlock; | 392 | return ret; |
395 | } | 393 | } |
396 | 394 | ||
395 | mutex_lock(&local->iflist_mtx); | ||
397 | rcu_assign_pointer(local->monitor_sdata, sdata); | 396 | rcu_assign_pointer(local->monitor_sdata, sdata); |
398 | out_unlock: | ||
399 | mutex_unlock(&local->iflist_mtx); | 397 | mutex_unlock(&local->iflist_mtx); |
400 | return ret; | 398 | |
399 | return 0; | ||
401 | } | 400 | } |
402 | 401 | ||
403 | static void ieee80211_del_virtual_monitor(struct ieee80211_local *local) | 402 | static void ieee80211_del_virtual_monitor(struct ieee80211_local *local) |
@@ -407,14 +406,20 @@ static void ieee80211_del_virtual_monitor(struct ieee80211_local *local) | |||
407 | if (!(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF)) | 406 | if (!(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF)) |
408 | return; | 407 | return; |
409 | 408 | ||
409 | ASSERT_RTNL(); | ||
410 | |||
410 | mutex_lock(&local->iflist_mtx); | 411 | mutex_lock(&local->iflist_mtx); |
411 | 412 | ||
412 | sdata = rcu_dereference_protected(local->monitor_sdata, | 413 | sdata = rcu_dereference_protected(local->monitor_sdata, |
413 | lockdep_is_held(&local->iflist_mtx)); | 414 | lockdep_is_held(&local->iflist_mtx)); |
414 | if (!sdata) | 415 | if (!sdata) { |
415 | goto out_unlock; | 416 | mutex_unlock(&local->iflist_mtx); |
417 | return; | ||
418 | } | ||
416 | 419 | ||
417 | rcu_assign_pointer(local->monitor_sdata, NULL); | 420 | rcu_assign_pointer(local->monitor_sdata, NULL); |
421 | mutex_unlock(&local->iflist_mtx); | ||
422 | |||
418 | synchronize_net(); | 423 | synchronize_net(); |
419 | 424 | ||
420 | ieee80211_vif_release_channel(sdata); | 425 | ieee80211_vif_release_channel(sdata); |
@@ -422,8 +427,6 @@ static void ieee80211_del_virtual_monitor(struct ieee80211_local *local) | |||
422 | drv_remove_interface(local, sdata); | 427 | drv_remove_interface(local, sdata); |
423 | 428 | ||
424 | kfree(sdata); | 429 | kfree(sdata); |
425 | out_unlock: | ||
426 | mutex_unlock(&local->iflist_mtx); | ||
427 | } | 430 | } |
428 | 431 | ||
429 | /* | 432 | /* |
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 29ce2aa87e7b..4749b3858695 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c | |||
@@ -1060,7 +1060,8 @@ void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local) | |||
1060 | 1060 | ||
1061 | rcu_read_lock(); | 1061 | rcu_read_lock(); |
1062 | list_for_each_entry_rcu(sdata, &local->interfaces, list) | 1062 | list_for_each_entry_rcu(sdata, &local->interfaces, list) |
1063 | if (ieee80211_vif_is_mesh(&sdata->vif)) | 1063 | if (ieee80211_vif_is_mesh(&sdata->vif) && |
1064 | ieee80211_sdata_running(sdata)) | ||
1064 | ieee80211_queue_work(&local->hw, &sdata->work); | 1065 | ieee80211_queue_work(&local->hw, &sdata->work); |
1065 | rcu_read_unlock(); | 1066 | rcu_read_unlock(); |
1066 | } | 1067 | } |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 141577412d84..82cc30318a86 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
@@ -3608,8 +3608,10 @@ void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local) | |||
3608 | 3608 | ||
3609 | /* Restart STA timers */ | 3609 | /* Restart STA timers */ |
3610 | rcu_read_lock(); | 3610 | rcu_read_lock(); |
3611 | list_for_each_entry_rcu(sdata, &local->interfaces, list) | 3611 | list_for_each_entry_rcu(sdata, &local->interfaces, list) { |
3612 | ieee80211_restart_sta_timer(sdata); | 3612 | if (ieee80211_sdata_running(sdata)) |
3613 | ieee80211_restart_sta_timer(sdata); | ||
3614 | } | ||
3613 | rcu_read_unlock(); | 3615 | rcu_read_unlock(); |
3614 | } | 3616 | } |
3615 | 3617 | ||
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c index cc79b4a2e821..430bd254e496 100644 --- a/net/mac80211/offchannel.c +++ b/net/mac80211/offchannel.c | |||
@@ -297,10 +297,13 @@ void ieee80211_start_next_roc(struct ieee80211_local *local) | |||
297 | } | 297 | } |
298 | } | 298 | } |
299 | 299 | ||
300 | void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc) | 300 | void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc, bool free) |
301 | { | 301 | { |
302 | struct ieee80211_roc_work *dep, *tmp; | 302 | struct ieee80211_roc_work *dep, *tmp; |
303 | 303 | ||
304 | if (WARN_ON(roc->to_be_freed)) | ||
305 | return; | ||
306 | |||
304 | /* was never transmitted */ | 307 | /* was never transmitted */ |
305 | if (roc->frame) { | 308 | if (roc->frame) { |
306 | cfg80211_mgmt_tx_status(&roc->sdata->wdev, | 309 | cfg80211_mgmt_tx_status(&roc->sdata->wdev, |
@@ -316,9 +319,12 @@ void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc) | |||
316 | GFP_KERNEL); | 319 | GFP_KERNEL); |
317 | 320 | ||
318 | list_for_each_entry_safe(dep, tmp, &roc->dependents, list) | 321 | list_for_each_entry_safe(dep, tmp, &roc->dependents, list) |
319 | ieee80211_roc_notify_destroy(dep); | 322 | ieee80211_roc_notify_destroy(dep, true); |
320 | 323 | ||
321 | kfree(roc); | 324 | if (free) |
325 | kfree(roc); | ||
326 | else | ||
327 | roc->to_be_freed = true; | ||
322 | } | 328 | } |
323 | 329 | ||
324 | void ieee80211_sw_roc_work(struct work_struct *work) | 330 | void ieee80211_sw_roc_work(struct work_struct *work) |
@@ -331,6 +337,9 @@ void ieee80211_sw_roc_work(struct work_struct *work) | |||
331 | 337 | ||
332 | mutex_lock(&local->mtx); | 338 | mutex_lock(&local->mtx); |
333 | 339 | ||
340 | if (roc->to_be_freed) | ||
341 | goto out_unlock; | ||
342 | |||
334 | if (roc->abort) | 343 | if (roc->abort) |
335 | goto finish; | 344 | goto finish; |
336 | 345 | ||
@@ -370,7 +379,7 @@ void ieee80211_sw_roc_work(struct work_struct *work) | |||
370 | finish: | 379 | finish: |
371 | list_del(&roc->list); | 380 | list_del(&roc->list); |
372 | started = roc->started; | 381 | started = roc->started; |
373 | ieee80211_roc_notify_destroy(roc); | 382 | ieee80211_roc_notify_destroy(roc, !roc->abort); |
374 | 383 | ||
375 | if (started) { | 384 | if (started) { |
376 | drv_flush(local, false); | 385 | drv_flush(local, false); |
@@ -410,7 +419,7 @@ static void ieee80211_hw_roc_done(struct work_struct *work) | |||
410 | 419 | ||
411 | list_del(&roc->list); | 420 | list_del(&roc->list); |
412 | 421 | ||
413 | ieee80211_roc_notify_destroy(roc); | 422 | ieee80211_roc_notify_destroy(roc, true); |
414 | 423 | ||
415 | /* if there's another roc, start it now */ | 424 | /* if there's another roc, start it now */ |
416 | ieee80211_start_next_roc(local); | 425 | ieee80211_start_next_roc(local); |
@@ -460,12 +469,14 @@ void ieee80211_roc_purge(struct ieee80211_sub_if_data *sdata) | |||
460 | list_for_each_entry_safe(roc, tmp, &tmp_list, list) { | 469 | list_for_each_entry_safe(roc, tmp, &tmp_list, list) { |
461 | if (local->ops->remain_on_channel) { | 470 | if (local->ops->remain_on_channel) { |
462 | list_del(&roc->list); | 471 | list_del(&roc->list); |
463 | ieee80211_roc_notify_destroy(roc); | 472 | ieee80211_roc_notify_destroy(roc, true); |
464 | } else { | 473 | } else { |
465 | ieee80211_queue_delayed_work(&local->hw, &roc->work, 0); | 474 | ieee80211_queue_delayed_work(&local->hw, &roc->work, 0); |
466 | 475 | ||
467 | /* work will clean up etc */ | 476 | /* work will clean up etc */ |
468 | flush_delayed_work(&roc->work); | 477 | flush_delayed_work(&roc->work); |
478 | WARN_ON(!roc->to_be_freed); | ||
479 | kfree(roc); | ||
469 | } | 480 | } |
470 | } | 481 | } |
471 | 482 | ||
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index bb73ed2d20b9..c6844ad080be 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
@@ -2675,7 +2675,19 @@ ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx) | |||
2675 | 2675 | ||
2676 | memset(nskb->cb, 0, sizeof(nskb->cb)); | 2676 | memset(nskb->cb, 0, sizeof(nskb->cb)); |
2677 | 2677 | ||
2678 | ieee80211_tx_skb(rx->sdata, nskb); | 2678 | if (rx->sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE) { |
2679 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(nskb); | ||
2680 | |||
2681 | info->flags = IEEE80211_TX_CTL_TX_OFFCHAN | | ||
2682 | IEEE80211_TX_INTFL_OFFCHAN_TX_OK | | ||
2683 | IEEE80211_TX_CTL_NO_CCK_RATE; | ||
2684 | if (local->hw.flags & IEEE80211_HW_QUEUE_CONTROL) | ||
2685 | info->hw_queue = | ||
2686 | local->hw.offchannel_tx_hw_queue; | ||
2687 | } | ||
2688 | |||
2689 | __ieee80211_tx_skb_tid_band(rx->sdata, nskb, 7, | ||
2690 | status->band); | ||
2679 | } | 2691 | } |
2680 | dev_kfree_skb(rx->skb); | 2692 | dev_kfree_skb(rx->skb); |
2681 | return RX_QUEUED; | 2693 | return RX_QUEUED; |
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index a79ce820cb50..238a0cca320e 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c | |||
@@ -766,6 +766,7 @@ int __must_check __sta_info_destroy(struct sta_info *sta) | |||
766 | struct ieee80211_local *local; | 766 | struct ieee80211_local *local; |
767 | struct ieee80211_sub_if_data *sdata; | 767 | struct ieee80211_sub_if_data *sdata; |
768 | int ret, i; | 768 | int ret, i; |
769 | bool have_key = false; | ||
769 | 770 | ||
770 | might_sleep(); | 771 | might_sleep(); |
771 | 772 | ||
@@ -793,12 +794,19 @@ int __must_check __sta_info_destroy(struct sta_info *sta) | |||
793 | list_del_rcu(&sta->list); | 794 | list_del_rcu(&sta->list); |
794 | 795 | ||
795 | mutex_lock(&local->key_mtx); | 796 | mutex_lock(&local->key_mtx); |
796 | for (i = 0; i < NUM_DEFAULT_KEYS; i++) | 797 | for (i = 0; i < NUM_DEFAULT_KEYS; i++) { |
797 | __ieee80211_key_free(key_mtx_dereference(local, sta->gtk[i])); | 798 | __ieee80211_key_free(key_mtx_dereference(local, sta->gtk[i])); |
798 | if (sta->ptk) | 799 | have_key = true; |
800 | } | ||
801 | if (sta->ptk) { | ||
799 | __ieee80211_key_free(key_mtx_dereference(local, sta->ptk)); | 802 | __ieee80211_key_free(key_mtx_dereference(local, sta->ptk)); |
803 | have_key = true; | ||
804 | } | ||
800 | mutex_unlock(&local->key_mtx); | 805 | mutex_unlock(&local->key_mtx); |
801 | 806 | ||
807 | if (!have_key) | ||
808 | synchronize_net(); | ||
809 | |||
802 | sta->dead = true; | 810 | sta->dead = true; |
803 | 811 | ||
804 | local->num_sta--; | 812 | local->num_sta--; |
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index 47edf5a40a59..61f49d241712 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c | |||
@@ -1394,10 +1394,8 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum) | |||
1394 | skb_reset_network_header(skb); | 1394 | skb_reset_network_header(skb); |
1395 | IP_VS_DBG(12, "ICMP for IPIP %pI4->%pI4: mtu=%u\n", | 1395 | IP_VS_DBG(12, "ICMP for IPIP %pI4->%pI4: mtu=%u\n", |
1396 | &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr, mtu); | 1396 | &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr, mtu); |
1397 | rcu_read_lock(); | ||
1398 | ipv4_update_pmtu(skb, dev_net(skb->dev), | 1397 | ipv4_update_pmtu(skb, dev_net(skb->dev), |
1399 | mtu, 0, 0, 0, 0); | 1398 | mtu, 0, 0, 0, 0); |
1400 | rcu_read_unlock(); | ||
1401 | /* Client uses PMTUD? */ | 1399 | /* Client uses PMTUD? */ |
1402 | if (!(cih->frag_off & htons(IP_DF))) | 1400 | if (!(cih->frag_off & htons(IP_DF))) |
1403 | goto ignore_ipip; | 1401 | goto ignore_ipip; |
@@ -1577,7 +1575,8 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af) | |||
1577 | } | 1575 | } |
1578 | /* ipvs enabled in this netns ? */ | 1576 | /* ipvs enabled in this netns ? */ |
1579 | net = skb_net(skb); | 1577 | net = skb_net(skb); |
1580 | if (!net_ipvs(net)->enable) | 1578 | ipvs = net_ipvs(net); |
1579 | if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable)) | ||
1581 | return NF_ACCEPT; | 1580 | return NF_ACCEPT; |
1582 | 1581 | ||
1583 | ip_vs_fill_iph_skb(af, skb, &iph); | 1582 | ip_vs_fill_iph_skb(af, skb, &iph); |
@@ -1654,7 +1653,6 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af) | |||
1654 | } | 1653 | } |
1655 | 1654 | ||
1656 | IP_VS_DBG_PKT(11, af, pp, skb, 0, "Incoming packet"); | 1655 | IP_VS_DBG_PKT(11, af, pp, skb, 0, "Incoming packet"); |
1657 | ipvs = net_ipvs(net); | ||
1658 | /* Check the server status */ | 1656 | /* Check the server status */ |
1659 | if (cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) { | 1657 | if (cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) { |
1660 | /* the destination server is not available */ | 1658 | /* the destination server is not available */ |
@@ -1815,13 +1813,15 @@ ip_vs_forward_icmp(unsigned int hooknum, struct sk_buff *skb, | |||
1815 | { | 1813 | { |
1816 | int r; | 1814 | int r; |
1817 | struct net *net; | 1815 | struct net *net; |
1816 | struct netns_ipvs *ipvs; | ||
1818 | 1817 | ||
1819 | if (ip_hdr(skb)->protocol != IPPROTO_ICMP) | 1818 | if (ip_hdr(skb)->protocol != IPPROTO_ICMP) |
1820 | return NF_ACCEPT; | 1819 | return NF_ACCEPT; |
1821 | 1820 | ||
1822 | /* ipvs enabled in this netns ? */ | 1821 | /* ipvs enabled in this netns ? */ |
1823 | net = skb_net(skb); | 1822 | net = skb_net(skb); |
1824 | if (!net_ipvs(net)->enable) | 1823 | ipvs = net_ipvs(net); |
1824 | if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable)) | ||
1825 | return NF_ACCEPT; | 1825 | return NF_ACCEPT; |
1826 | 1826 | ||
1827 | return ip_vs_in_icmp(skb, &r, hooknum); | 1827 | return ip_vs_in_icmp(skb, &r, hooknum); |
@@ -1835,6 +1835,7 @@ ip_vs_forward_icmp_v6(unsigned int hooknum, struct sk_buff *skb, | |||
1835 | { | 1835 | { |
1836 | int r; | 1836 | int r; |
1837 | struct net *net; | 1837 | struct net *net; |
1838 | struct netns_ipvs *ipvs; | ||
1838 | struct ip_vs_iphdr iphdr; | 1839 | struct ip_vs_iphdr iphdr; |
1839 | 1840 | ||
1840 | ip_vs_fill_iph_skb(AF_INET6, skb, &iphdr); | 1841 | ip_vs_fill_iph_skb(AF_INET6, skb, &iphdr); |
@@ -1843,7 +1844,8 @@ ip_vs_forward_icmp_v6(unsigned int hooknum, struct sk_buff *skb, | |||
1843 | 1844 | ||
1844 | /* ipvs enabled in this netns ? */ | 1845 | /* ipvs enabled in this netns ? */ |
1845 | net = skb_net(skb); | 1846 | net = skb_net(skb); |
1846 | if (!net_ipvs(net)->enable) | 1847 | ipvs = net_ipvs(net); |
1848 | if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable)) | ||
1847 | return NF_ACCEPT; | 1849 | return NF_ACCEPT; |
1848 | 1850 | ||
1849 | return ip_vs_in_icmp_v6(skb, &r, hooknum, &iphdr); | 1851 | return ip_vs_in_icmp_v6(skb, &r, hooknum, &iphdr); |
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index c68198bf9128..9e2d1cccd1eb 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c | |||
@@ -1808,6 +1808,12 @@ static struct ctl_table vs_vars[] = { | |||
1808 | .mode = 0644, | 1808 | .mode = 0644, |
1809 | .proc_handler = proc_dointvec, | 1809 | .proc_handler = proc_dointvec, |
1810 | }, | 1810 | }, |
1811 | { | ||
1812 | .procname = "backup_only", | ||
1813 | .maxlen = sizeof(int), | ||
1814 | .mode = 0644, | ||
1815 | .proc_handler = proc_dointvec, | ||
1816 | }, | ||
1811 | #ifdef CONFIG_IP_VS_DEBUG | 1817 | #ifdef CONFIG_IP_VS_DEBUG |
1812 | { | 1818 | { |
1813 | .procname = "debug_level", | 1819 | .procname = "debug_level", |
@@ -3741,6 +3747,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct net *net) | |||
3741 | tbl[idx++].data = &ipvs->sysctl_nat_icmp_send; | 3747 | tbl[idx++].data = &ipvs->sysctl_nat_icmp_send; |
3742 | ipvs->sysctl_pmtu_disc = 1; | 3748 | ipvs->sysctl_pmtu_disc = 1; |
3743 | tbl[idx++].data = &ipvs->sysctl_pmtu_disc; | 3749 | tbl[idx++].data = &ipvs->sysctl_pmtu_disc; |
3750 | tbl[idx++].data = &ipvs->sysctl_backup_only; | ||
3744 | 3751 | ||
3745 | 3752 | ||
3746 | ipvs->sysctl_hdr = register_net_sysctl(net, "net/ipv4/vs", tbl); | 3753 | ipvs->sysctl_hdr = register_net_sysctl(net, "net/ipv4/vs", tbl); |
diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c index ae8ec6f27688..cd1d7298f7ba 100644 --- a/net/netfilter/ipvs/ip_vs_proto_sctp.c +++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c | |||
@@ -906,7 +906,7 @@ set_sctp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp, | |||
906 | sctp_chunkhdr_t _sctpch, *sch; | 906 | sctp_chunkhdr_t _sctpch, *sch; |
907 | unsigned char chunk_type; | 907 | unsigned char chunk_type; |
908 | int event, next_state; | 908 | int event, next_state; |
909 | int ihl; | 909 | int ihl, cofs; |
910 | 910 | ||
911 | #ifdef CONFIG_IP_VS_IPV6 | 911 | #ifdef CONFIG_IP_VS_IPV6 |
912 | ihl = cp->af == AF_INET ? ip_hdrlen(skb) : sizeof(struct ipv6hdr); | 912 | ihl = cp->af == AF_INET ? ip_hdrlen(skb) : sizeof(struct ipv6hdr); |
@@ -914,8 +914,8 @@ set_sctp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp, | |||
914 | ihl = ip_hdrlen(skb); | 914 | ihl = ip_hdrlen(skb); |
915 | #endif | 915 | #endif |
916 | 916 | ||
917 | sch = skb_header_pointer(skb, ihl + sizeof(sctp_sctphdr_t), | 917 | cofs = ihl + sizeof(sctp_sctphdr_t); |
918 | sizeof(_sctpch), &_sctpch); | 918 | sch = skb_header_pointer(skb, cofs, sizeof(_sctpch), &_sctpch); |
919 | if (sch == NULL) | 919 | if (sch == NULL) |
920 | return; | 920 | return; |
921 | 921 | ||
@@ -933,10 +933,12 @@ set_sctp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp, | |||
933 | */ | 933 | */ |
934 | if ((sch->type == SCTP_CID_COOKIE_ECHO) || | 934 | if ((sch->type == SCTP_CID_COOKIE_ECHO) || |
935 | (sch->type == SCTP_CID_COOKIE_ACK)) { | 935 | (sch->type == SCTP_CID_COOKIE_ACK)) { |
936 | sch = skb_header_pointer(skb, (ihl + sizeof(sctp_sctphdr_t) + | 936 | int clen = ntohs(sch->length); |
937 | sch->length), sizeof(_sctpch), &_sctpch); | 937 | |
938 | if (sch) { | 938 | if (clen >= sizeof(sctp_chunkhdr_t)) { |
939 | if (sch->type == SCTP_CID_ABORT) | 939 | sch = skb_header_pointer(skb, cofs + ALIGN(clen, 4), |
940 | sizeof(_sctpch), &_sctpch); | ||
941 | if (sch && sch->type == SCTP_CID_ABORT) | ||
940 | chunk_type = sch->type; | 942 | chunk_type = sch->type; |
941 | } | 943 | } |
942 | } | 944 | } |
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c index 432f95780003..ba65b2041eb4 100644 --- a/net/netfilter/nf_conntrack_proto_dccp.c +++ b/net/netfilter/nf_conntrack_proto_dccp.c | |||
@@ -969,6 +969,10 @@ static int __init nf_conntrack_proto_dccp_init(void) | |||
969 | { | 969 | { |
970 | int ret; | 970 | int ret; |
971 | 971 | ||
972 | ret = register_pernet_subsys(&dccp_net_ops); | ||
973 | if (ret < 0) | ||
974 | goto out_pernet; | ||
975 | |||
972 | ret = nf_ct_l4proto_register(&dccp_proto4); | 976 | ret = nf_ct_l4proto_register(&dccp_proto4); |
973 | if (ret < 0) | 977 | if (ret < 0) |
974 | goto out_dccp4; | 978 | goto out_dccp4; |
@@ -977,16 +981,12 @@ static int __init nf_conntrack_proto_dccp_init(void) | |||
977 | if (ret < 0) | 981 | if (ret < 0) |
978 | goto out_dccp6; | 982 | goto out_dccp6; |
979 | 983 | ||
980 | ret = register_pernet_subsys(&dccp_net_ops); | ||
981 | if (ret < 0) | ||
982 | goto out_pernet; | ||
983 | |||
984 | return 0; | 984 | return 0; |
985 | out_pernet: | ||
986 | nf_ct_l4proto_unregister(&dccp_proto6); | ||
987 | out_dccp6: | 985 | out_dccp6: |
988 | nf_ct_l4proto_unregister(&dccp_proto4); | 986 | nf_ct_l4proto_unregister(&dccp_proto4); |
989 | out_dccp4: | 987 | out_dccp4: |
988 | unregister_pernet_subsys(&dccp_net_ops); | ||
989 | out_pernet: | ||
990 | return ret; | 990 | return ret; |
991 | } | 991 | } |
992 | 992 | ||
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c index bd7d01d9c7e7..155ce9f8a0db 100644 --- a/net/netfilter/nf_conntrack_proto_gre.c +++ b/net/netfilter/nf_conntrack_proto_gre.c | |||
@@ -420,18 +420,18 @@ static int __init nf_ct_proto_gre_init(void) | |||
420 | { | 420 | { |
421 | int ret; | 421 | int ret; |
422 | 422 | ||
423 | ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_gre4); | ||
424 | if (ret < 0) | ||
425 | goto out_gre4; | ||
426 | |||
427 | ret = register_pernet_subsys(&proto_gre_net_ops); | 423 | ret = register_pernet_subsys(&proto_gre_net_ops); |
428 | if (ret < 0) | 424 | if (ret < 0) |
429 | goto out_pernet; | 425 | goto out_pernet; |
430 | 426 | ||
427 | ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_gre4); | ||
428 | if (ret < 0) | ||
429 | goto out_gre4; | ||
430 | |||
431 | return 0; | 431 | return 0; |
432 | out_pernet: | ||
433 | nf_ct_l4proto_unregister(&nf_conntrack_l4proto_gre4); | ||
434 | out_gre4: | 432 | out_gre4: |
433 | unregister_pernet_subsys(&proto_gre_net_ops); | ||
434 | out_pernet: | ||
435 | return ret; | 435 | return ret; |
436 | } | 436 | } |
437 | 437 | ||
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c index 480f616d5936..ec83536def9a 100644 --- a/net/netfilter/nf_conntrack_proto_sctp.c +++ b/net/netfilter/nf_conntrack_proto_sctp.c | |||
@@ -888,6 +888,10 @@ static int __init nf_conntrack_proto_sctp_init(void) | |||
888 | { | 888 | { |
889 | int ret; | 889 | int ret; |
890 | 890 | ||
891 | ret = register_pernet_subsys(&sctp_net_ops); | ||
892 | if (ret < 0) | ||
893 | goto out_pernet; | ||
894 | |||
891 | ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_sctp4); | 895 | ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_sctp4); |
892 | if (ret < 0) | 896 | if (ret < 0) |
893 | goto out_sctp4; | 897 | goto out_sctp4; |
@@ -896,16 +900,12 @@ static int __init nf_conntrack_proto_sctp_init(void) | |||
896 | if (ret < 0) | 900 | if (ret < 0) |
897 | goto out_sctp6; | 901 | goto out_sctp6; |
898 | 902 | ||
899 | ret = register_pernet_subsys(&sctp_net_ops); | ||
900 | if (ret < 0) | ||
901 | goto out_pernet; | ||
902 | |||
903 | return 0; | 903 | return 0; |
904 | out_pernet: | ||
905 | nf_ct_l4proto_unregister(&nf_conntrack_l4proto_sctp6); | ||
906 | out_sctp6: | 904 | out_sctp6: |
907 | nf_ct_l4proto_unregister(&nf_conntrack_l4proto_sctp4); | 905 | nf_ct_l4proto_unregister(&nf_conntrack_l4proto_sctp4); |
908 | out_sctp4: | 906 | out_sctp4: |
907 | unregister_pernet_subsys(&sctp_net_ops); | ||
908 | out_pernet: | ||
909 | return ret; | 909 | return ret; |
910 | } | 910 | } |
911 | 911 | ||
diff --git a/net/netfilter/nf_conntrack_proto_udplite.c b/net/netfilter/nf_conntrack_proto_udplite.c index 157489581c31..ca969f6273f7 100644 --- a/net/netfilter/nf_conntrack_proto_udplite.c +++ b/net/netfilter/nf_conntrack_proto_udplite.c | |||
@@ -371,6 +371,10 @@ static int __init nf_conntrack_proto_udplite_init(void) | |||
371 | { | 371 | { |
372 | int ret; | 372 | int ret; |
373 | 373 | ||
374 | ret = register_pernet_subsys(&udplite_net_ops); | ||
375 | if (ret < 0) | ||
376 | goto out_pernet; | ||
377 | |||
374 | ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_udplite4); | 378 | ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_udplite4); |
375 | if (ret < 0) | 379 | if (ret < 0) |
376 | goto out_udplite4; | 380 | goto out_udplite4; |
@@ -379,16 +383,12 @@ static int __init nf_conntrack_proto_udplite_init(void) | |||
379 | if (ret < 0) | 383 | if (ret < 0) |
380 | goto out_udplite6; | 384 | goto out_udplite6; |
381 | 385 | ||
382 | ret = register_pernet_subsys(&udplite_net_ops); | ||
383 | if (ret < 0) | ||
384 | goto out_pernet; | ||
385 | |||
386 | return 0; | 386 | return 0; |
387 | out_pernet: | ||
388 | nf_ct_l4proto_unregister(&nf_conntrack_l4proto_udplite6); | ||
389 | out_udplite6: | 387 | out_udplite6: |
390 | nf_ct_l4proto_unregister(&nf_conntrack_l4proto_udplite4); | 388 | nf_ct_l4proto_unregister(&nf_conntrack_l4proto_udplite4); |
391 | out_udplite4: | 389 | out_udplite4: |
390 | unregister_pernet_subsys(&udplite_net_ops); | ||
391 | out_pernet: | ||
392 | return ret; | 392 | return ret; |
393 | } | 393 | } |
394 | 394 | ||
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c index 6bcce401fd1c..fedee3943661 100644 --- a/net/netfilter/nf_conntrack_standalone.c +++ b/net/netfilter/nf_conntrack_standalone.c | |||
@@ -568,6 +568,7 @@ static int __init nf_conntrack_standalone_init(void) | |||
568 | register_net_sysctl(&init_net, "net", nf_ct_netfilter_table); | 568 | register_net_sysctl(&init_net, "net", nf_ct_netfilter_table); |
569 | if (!nf_ct_netfilter_header) { | 569 | if (!nf_ct_netfilter_header) { |
570 | pr_err("nf_conntrack: can't register to sysctl.\n"); | 570 | pr_err("nf_conntrack: can't register to sysctl.\n"); |
571 | ret = -ENOMEM; | ||
571 | goto out_sysctl; | 572 | goto out_sysctl; |
572 | } | 573 | } |
573 | #endif | 574 | #endif |
diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c index 589d686f0b4c..dc3fd5d44464 100644 --- a/net/netfilter/nfnetlink_acct.c +++ b/net/netfilter/nfnetlink_acct.c | |||
@@ -49,6 +49,8 @@ nfnl_acct_new(struct sock *nfnl, struct sk_buff *skb, | |||
49 | return -EINVAL; | 49 | return -EINVAL; |
50 | 50 | ||
51 | acct_name = nla_data(tb[NFACCT_NAME]); | 51 | acct_name = nla_data(tb[NFACCT_NAME]); |
52 | if (strlen(acct_name) == 0) | ||
53 | return -EINVAL; | ||
52 | 54 | ||
53 | list_for_each_entry(nfacct, &nfnl_acct_list, head) { | 55 | list_for_each_entry(nfacct, &nfnl_acct_list, head) { |
54 | if (strncmp(nfacct->name, acct_name, NFACCT_NAME_MAX) != 0) | 56 | if (strncmp(nfacct->name, acct_name, NFACCT_NAME_MAX) != 0) |
diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c index 858fd52c1040..42680b2baa11 100644 --- a/net/netfilter/nfnetlink_queue_core.c +++ b/net/netfilter/nfnetlink_queue_core.c | |||
@@ -112,7 +112,7 @@ instance_create(u_int16_t queue_num, int portid) | |||
112 | inst->queue_num = queue_num; | 112 | inst->queue_num = queue_num; |
113 | inst->peer_portid = portid; | 113 | inst->peer_portid = portid; |
114 | inst->queue_maxlen = NFQNL_QMAX_DEFAULT; | 114 | inst->queue_maxlen = NFQNL_QMAX_DEFAULT; |
115 | inst->copy_range = 0xfffff; | 115 | inst->copy_range = 0xffff; |
116 | inst->copy_mode = NFQNL_COPY_NONE; | 116 | inst->copy_mode = NFQNL_COPY_NONE; |
117 | spin_lock_init(&inst->lock); | 117 | spin_lock_init(&inst->lock); |
118 | INIT_LIST_HEAD(&inst->queue_list); | 118 | INIT_LIST_HEAD(&inst->queue_list); |
@@ -1062,8 +1062,10 @@ static int __init nfnetlink_queue_init(void) | |||
1062 | 1062 | ||
1063 | #ifdef CONFIG_PROC_FS | 1063 | #ifdef CONFIG_PROC_FS |
1064 | if (!proc_create("nfnetlink_queue", 0440, | 1064 | if (!proc_create("nfnetlink_queue", 0440, |
1065 | proc_net_netfilter, &nfqnl_file_ops)) | 1065 | proc_net_netfilter, &nfqnl_file_ops)) { |
1066 | status = -ENOMEM; | ||
1066 | goto cleanup_subsys; | 1067 | goto cleanup_subsys; |
1068 | } | ||
1067 | #endif | 1069 | #endif |
1068 | 1070 | ||
1069 | register_netdevice_notifier(&nfqnl_dev_notifier); | 1071 | register_netdevice_notifier(&nfqnl_dev_notifier); |
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index f2aabb6f4105..5a55be3f17a5 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c | |||
@@ -142,6 +142,7 @@ int genl_register_mc_group(struct genl_family *family, | |||
142 | int err = 0; | 142 | int err = 0; |
143 | 143 | ||
144 | BUG_ON(grp->name[0] == '\0'); | 144 | BUG_ON(grp->name[0] == '\0'); |
145 | BUG_ON(memchr(grp->name, '\0', GENL_NAMSIZ) == NULL); | ||
145 | 146 | ||
146 | genl_lock(); | 147 | genl_lock(); |
147 | 148 | ||
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c index d1fa1d9ffd2e..103bd704b5fc 100644 --- a/net/netrom/af_netrom.c +++ b/net/netrom/af_netrom.c | |||
@@ -1173,6 +1173,7 @@ static int nr_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1173 | } | 1173 | } |
1174 | 1174 | ||
1175 | if (sax != NULL) { | 1175 | if (sax != NULL) { |
1176 | memset(sax, 0, sizeof(*sax)); | ||
1176 | sax->sax25_family = AF_NETROM; | 1177 | sax->sax25_family = AF_NETROM; |
1177 | skb_copy_from_linear_data_offset(skb, 7, sax->sax25_call.ax25_call, | 1178 | skb_copy_from_linear_data_offset(skb, 7, sax->sax25_call.ax25_call, |
1178 | AX25_ADDR_LEN); | 1179 | AX25_ADDR_LEN); |
diff --git a/net/nfc/llcp/llcp.c b/net/nfc/llcp/llcp.c index 7f8266dd14cb..ee25f25f0cd6 100644 --- a/net/nfc/llcp/llcp.c +++ b/net/nfc/llcp/llcp.c | |||
@@ -68,7 +68,8 @@ static void nfc_llcp_socket_purge(struct nfc_llcp_sock *sock) | |||
68 | } | 68 | } |
69 | } | 69 | } |
70 | 70 | ||
71 | static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen) | 71 | static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen, |
72 | int err) | ||
72 | { | 73 | { |
73 | struct sock *sk; | 74 | struct sock *sk; |
74 | struct hlist_node *tmp; | 75 | struct hlist_node *tmp; |
@@ -100,11 +101,12 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen) | |||
100 | 101 | ||
101 | nfc_llcp_accept_unlink(accept_sk); | 102 | nfc_llcp_accept_unlink(accept_sk); |
102 | 103 | ||
104 | if (err) | ||
105 | accept_sk->sk_err = err; | ||
103 | accept_sk->sk_state = LLCP_CLOSED; | 106 | accept_sk->sk_state = LLCP_CLOSED; |
107 | accept_sk->sk_state_change(sk); | ||
104 | 108 | ||
105 | bh_unlock_sock(accept_sk); | 109 | bh_unlock_sock(accept_sk); |
106 | |||
107 | sock_orphan(accept_sk); | ||
108 | } | 110 | } |
109 | 111 | ||
110 | if (listen == true) { | 112 | if (listen == true) { |
@@ -123,16 +125,45 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen) | |||
123 | continue; | 125 | continue; |
124 | } | 126 | } |
125 | 127 | ||
128 | if (err) | ||
129 | sk->sk_err = err; | ||
126 | sk->sk_state = LLCP_CLOSED; | 130 | sk->sk_state = LLCP_CLOSED; |
131 | sk->sk_state_change(sk); | ||
127 | 132 | ||
128 | bh_unlock_sock(sk); | 133 | bh_unlock_sock(sk); |
129 | 134 | ||
130 | sock_orphan(sk); | ||
131 | |||
132 | sk_del_node_init(sk); | 135 | sk_del_node_init(sk); |
133 | } | 136 | } |
134 | 137 | ||
135 | write_unlock(&local->sockets.lock); | 138 | write_unlock(&local->sockets.lock); |
139 | |||
140 | /* | ||
141 | * If we want to keep the listening sockets alive, | ||
142 | * we don't touch the RAW ones. | ||
143 | */ | ||
144 | if (listen == true) | ||
145 | return; | ||
146 | |||
147 | write_lock(&local->raw_sockets.lock); | ||
148 | |||
149 | sk_for_each_safe(sk, tmp, &local->raw_sockets.head) { | ||
150 | llcp_sock = nfc_llcp_sock(sk); | ||
151 | |||
152 | bh_lock_sock(sk); | ||
153 | |||
154 | nfc_llcp_socket_purge(llcp_sock); | ||
155 | |||
156 | if (err) | ||
157 | sk->sk_err = err; | ||
158 | sk->sk_state = LLCP_CLOSED; | ||
159 | sk->sk_state_change(sk); | ||
160 | |||
161 | bh_unlock_sock(sk); | ||
162 | |||
163 | sk_del_node_init(sk); | ||
164 | } | ||
165 | |||
166 | write_unlock(&local->raw_sockets.lock); | ||
136 | } | 167 | } |
137 | 168 | ||
138 | struct nfc_llcp_local *nfc_llcp_local_get(struct nfc_llcp_local *local) | 169 | struct nfc_llcp_local *nfc_llcp_local_get(struct nfc_llcp_local *local) |
@@ -142,20 +173,25 @@ struct nfc_llcp_local *nfc_llcp_local_get(struct nfc_llcp_local *local) | |||
142 | return local; | 173 | return local; |
143 | } | 174 | } |
144 | 175 | ||
145 | static void local_release(struct kref *ref) | 176 | static void local_cleanup(struct nfc_llcp_local *local, bool listen) |
146 | { | 177 | { |
147 | struct nfc_llcp_local *local; | 178 | nfc_llcp_socket_release(local, listen, ENXIO); |
148 | |||
149 | local = container_of(ref, struct nfc_llcp_local, ref); | ||
150 | |||
151 | list_del(&local->list); | ||
152 | nfc_llcp_socket_release(local, false); | ||
153 | del_timer_sync(&local->link_timer); | 179 | del_timer_sync(&local->link_timer); |
154 | skb_queue_purge(&local->tx_queue); | 180 | skb_queue_purge(&local->tx_queue); |
155 | cancel_work_sync(&local->tx_work); | 181 | cancel_work_sync(&local->tx_work); |
156 | cancel_work_sync(&local->rx_work); | 182 | cancel_work_sync(&local->rx_work); |
157 | cancel_work_sync(&local->timeout_work); | 183 | cancel_work_sync(&local->timeout_work); |
158 | kfree_skb(local->rx_pending); | 184 | kfree_skb(local->rx_pending); |
185 | } | ||
186 | |||
187 | static void local_release(struct kref *ref) | ||
188 | { | ||
189 | struct nfc_llcp_local *local; | ||
190 | |||
191 | local = container_of(ref, struct nfc_llcp_local, ref); | ||
192 | |||
193 | list_del(&local->list); | ||
194 | local_cleanup(local, false); | ||
159 | kfree(local); | 195 | kfree(local); |
160 | } | 196 | } |
161 | 197 | ||
@@ -785,7 +821,6 @@ static void nfc_llcp_recv_ui(struct nfc_llcp_local *local, | |||
785 | skb_get(skb); | 821 | skb_get(skb); |
786 | } else { | 822 | } else { |
787 | pr_err("Receive queue is full\n"); | 823 | pr_err("Receive queue is full\n"); |
788 | kfree_skb(skb); | ||
789 | } | 824 | } |
790 | 825 | ||
791 | nfc_llcp_sock_put(llcp_sock); | 826 | nfc_llcp_sock_put(llcp_sock); |
@@ -986,7 +1021,6 @@ static void nfc_llcp_recv_hdlc(struct nfc_llcp_local *local, | |||
986 | skb_get(skb); | 1021 | skb_get(skb); |
987 | } else { | 1022 | } else { |
988 | pr_err("Receive queue is full\n"); | 1023 | pr_err("Receive queue is full\n"); |
989 | kfree_skb(skb); | ||
990 | } | 1024 | } |
991 | } | 1025 | } |
992 | 1026 | ||
@@ -1348,7 +1382,7 @@ void nfc_llcp_mac_is_down(struct nfc_dev *dev) | |||
1348 | return; | 1382 | return; |
1349 | 1383 | ||
1350 | /* Close and purge all existing sockets */ | 1384 | /* Close and purge all existing sockets */ |
1351 | nfc_llcp_socket_release(local, true); | 1385 | nfc_llcp_socket_release(local, true, 0); |
1352 | } | 1386 | } |
1353 | 1387 | ||
1354 | void nfc_llcp_mac_is_up(struct nfc_dev *dev, u32 target_idx, | 1388 | void nfc_llcp_mac_is_up(struct nfc_dev *dev, u32 target_idx, |
@@ -1427,6 +1461,8 @@ void nfc_llcp_unregister_device(struct nfc_dev *dev) | |||
1427 | return; | 1461 | return; |
1428 | } | 1462 | } |
1429 | 1463 | ||
1464 | local_cleanup(local, false); | ||
1465 | |||
1430 | nfc_llcp_local_put(local); | 1466 | nfc_llcp_local_put(local); |
1431 | } | 1467 | } |
1432 | 1468 | ||
diff --git a/net/nfc/llcp/sock.c b/net/nfc/llcp/sock.c index 5332751943a9..6c94447ec414 100644 --- a/net/nfc/llcp/sock.c +++ b/net/nfc/llcp/sock.c | |||
@@ -270,7 +270,9 @@ struct sock *nfc_llcp_accept_dequeue(struct sock *parent, | |||
270 | } | 270 | } |
271 | 271 | ||
272 | if (sk->sk_state == LLCP_CONNECTED || !newsock) { | 272 | if (sk->sk_state == LLCP_CONNECTED || !newsock) { |
273 | nfc_llcp_accept_unlink(sk); | 273 | list_del_init(&lsk->accept_queue); |
274 | sock_put(sk); | ||
275 | |||
274 | if (newsock) | 276 | if (newsock) |
275 | sock_graft(sk, newsock); | 277 | sock_graft(sk, newsock); |
276 | 278 | ||
@@ -278,6 +280,8 @@ struct sock *nfc_llcp_accept_dequeue(struct sock *parent, | |||
278 | 280 | ||
279 | pr_debug("Returning sk state %d\n", sk->sk_state); | 281 | pr_debug("Returning sk state %d\n", sk->sk_state); |
280 | 282 | ||
283 | sk_acceptq_removed(parent); | ||
284 | |||
281 | return sk; | 285 | return sk; |
282 | } | 286 | } |
283 | 287 | ||
@@ -462,8 +466,6 @@ static int llcp_sock_release(struct socket *sock) | |||
462 | nfc_llcp_accept_unlink(accept_sk); | 466 | nfc_llcp_accept_unlink(accept_sk); |
463 | 467 | ||
464 | release_sock(accept_sk); | 468 | release_sock(accept_sk); |
465 | |||
466 | sock_orphan(accept_sk); | ||
467 | } | 469 | } |
468 | } | 470 | } |
469 | 471 | ||
@@ -644,6 +646,8 @@ static int llcp_sock_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
644 | 646 | ||
645 | pr_debug("%p %zu\n", sk, len); | 647 | pr_debug("%p %zu\n", sk, len); |
646 | 648 | ||
649 | msg->msg_namelen = 0; | ||
650 | |||
647 | lock_sock(sk); | 651 | lock_sock(sk); |
648 | 652 | ||
649 | if (sk->sk_state == LLCP_CLOSED && | 653 | if (sk->sk_state == LLCP_CLOSED && |
@@ -689,6 +693,7 @@ static int llcp_sock_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
689 | 693 | ||
690 | pr_debug("Datagram socket %d %d\n", ui_cb->dsap, ui_cb->ssap); | 694 | pr_debug("Datagram socket %d %d\n", ui_cb->dsap, ui_cb->ssap); |
691 | 695 | ||
696 | memset(sockaddr, 0, sizeof(*sockaddr)); | ||
692 | sockaddr->sa_family = AF_NFC; | 697 | sockaddr->sa_family = AF_NFC; |
693 | sockaddr->nfc_protocol = NFC_PROTO_NFC_DEP; | 698 | sockaddr->nfc_protocol = NFC_PROTO_NFC_DEP; |
694 | sockaddr->dsap = ui_cb->dsap; | 699 | sockaddr->dsap = ui_cb->dsap; |
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index ac2defeeba83..d4d5363c7ba7 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c | |||
@@ -58,7 +58,7 @@ static int __pop_vlan_tci(struct sk_buff *skb, __be16 *current_tci) | |||
58 | 58 | ||
59 | if (skb->ip_summed == CHECKSUM_COMPLETE) | 59 | if (skb->ip_summed == CHECKSUM_COMPLETE) |
60 | skb->csum = csum_sub(skb->csum, csum_partial(skb->data | 60 | skb->csum = csum_sub(skb->csum, csum_partial(skb->data |
61 | + ETH_HLEN, VLAN_HLEN, 0)); | 61 | + (2 * ETH_ALEN), VLAN_HLEN, 0)); |
62 | 62 | ||
63 | vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN); | 63 | vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN); |
64 | *current_tci = vhdr->h_vlan_TCI; | 64 | *current_tci = vhdr->h_vlan_TCI; |
@@ -115,7 +115,7 @@ static int push_vlan(struct sk_buff *skb, const struct ovs_action_push_vlan *vla | |||
115 | 115 | ||
116 | if (skb->ip_summed == CHECKSUM_COMPLETE) | 116 | if (skb->ip_summed == CHECKSUM_COMPLETE) |
117 | skb->csum = csum_add(skb->csum, csum_partial(skb->data | 117 | skb->csum = csum_add(skb->csum, csum_partial(skb->data |
118 | + ETH_HLEN, VLAN_HLEN, 0)); | 118 | + (2 * ETH_ALEN), VLAN_HLEN, 0)); |
119 | 119 | ||
120 | } | 120 | } |
121 | __vlan_hwaccel_put_tag(skb, ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT); | 121 | __vlan_hwaccel_put_tag(skb, ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT); |
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index e87a26506dba..a4b724708a1a 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c | |||
@@ -394,6 +394,7 @@ static int queue_userspace_packet(struct net *net, int dp_ifindex, | |||
394 | 394 | ||
395 | skb_copy_and_csum_dev(skb, nla_data(nla)); | 395 | skb_copy_and_csum_dev(skb, nla_data(nla)); |
396 | 396 | ||
397 | genlmsg_end(user_skb, upcall); | ||
397 | err = genlmsg_unicast(net, user_skb, upcall_info->portid); | 398 | err = genlmsg_unicast(net, user_skb, upcall_info->portid); |
398 | 399 | ||
399 | out: | 400 | out: |
@@ -1690,6 +1691,7 @@ static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info) | |||
1690 | if (IS_ERR(vport)) | 1691 | if (IS_ERR(vport)) |
1691 | goto exit_unlock; | 1692 | goto exit_unlock; |
1692 | 1693 | ||
1694 | err = 0; | ||
1693 | reply = ovs_vport_cmd_build_info(vport, info->snd_portid, info->snd_seq, | 1695 | reply = ovs_vport_cmd_build_info(vport, info->snd_portid, info->snd_seq, |
1694 | OVS_VPORT_CMD_NEW); | 1696 | OVS_VPORT_CMD_NEW); |
1695 | if (IS_ERR(reply)) { | 1697 | if (IS_ERR(reply)) { |
@@ -1771,6 +1773,7 @@ static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info) | |||
1771 | if (IS_ERR(reply)) | 1773 | if (IS_ERR(reply)) |
1772 | goto exit_unlock; | 1774 | goto exit_unlock; |
1773 | 1775 | ||
1776 | err = 0; | ||
1774 | ovs_dp_detach_port(vport); | 1777 | ovs_dp_detach_port(vport); |
1775 | 1778 | ||
1776 | genl_notify(reply, genl_info_net(info), info->snd_portid, | 1779 | genl_notify(reply, genl_info_net(info), info->snd_portid, |
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index 20605ecf100b..fe0e4215c73d 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c | |||
@@ -482,7 +482,11 @@ static __be16 parse_ethertype(struct sk_buff *skb) | |||
482 | return htons(ETH_P_802_2); | 482 | return htons(ETH_P_802_2); |
483 | 483 | ||
484 | __skb_pull(skb, sizeof(struct llc_snap_hdr)); | 484 | __skb_pull(skb, sizeof(struct llc_snap_hdr)); |
485 | return llc->ethertype; | 485 | |
486 | if (ntohs(llc->ethertype) >= 1536) | ||
487 | return llc->ethertype; | ||
488 | |||
489 | return htons(ETH_P_802_2); | ||
486 | } | 490 | } |
487 | 491 | ||
488 | static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key, | 492 | static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key, |
diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c index 670cbc3518de..2130d61c384a 100644 --- a/net/openvswitch/vport-netdev.c +++ b/net/openvswitch/vport-netdev.c | |||
@@ -43,8 +43,7 @@ static void netdev_port_receive(struct vport *vport, struct sk_buff *skb) | |||
43 | 43 | ||
44 | /* Make our own copy of the packet. Otherwise we will mangle the | 44 | /* Make our own copy of the packet. Otherwise we will mangle the |
45 | * packet for anyone who came before us (e.g. tcpdump via AF_PACKET). | 45 | * packet for anyone who came before us (e.g. tcpdump via AF_PACKET). |
46 | * (No one comes after us, since we tell handle_bridge() that we took | 46 | */ |
47 | * the packet.) */ | ||
48 | skb = skb_share_check(skb, GFP_ATOMIC); | 47 | skb = skb_share_check(skb, GFP_ATOMIC); |
49 | if (unlikely(!skb)) | 48 | if (unlikely(!skb)) |
50 | return; | 49 | return; |
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c index ba717cc038b3..f6b8132ce4cb 100644 --- a/net/openvswitch/vport.c +++ b/net/openvswitch/vport.c | |||
@@ -325,8 +325,7 @@ int ovs_vport_get_options(const struct vport *vport, struct sk_buff *skb) | |||
325 | * @skb: skb that was received | 325 | * @skb: skb that was received |
326 | * | 326 | * |
327 | * Must be called with rcu_read_lock. The packet cannot be shared and | 327 | * Must be called with rcu_read_lock. The packet cannot be shared and |
328 | * skb->data should point to the Ethernet header. The caller must have already | 328 | * skb->data should point to the Ethernet header. |
329 | * called compute_ip_summed() to initialize the checksumming fields. | ||
330 | */ | 329 | */ |
331 | void ovs_vport_receive(struct vport *vport, struct sk_buff *skb) | 330 | void ovs_vport_receive(struct vport *vport, struct sk_buff *skb) |
332 | { | 331 | { |
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index cf68e6e4054a..9c8347451597 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c | |||
@@ -1253,6 +1253,7 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1253 | skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); | 1253 | skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); |
1254 | 1254 | ||
1255 | if (srose != NULL) { | 1255 | if (srose != NULL) { |
1256 | memset(srose, 0, msg->msg_namelen); | ||
1256 | srose->srose_family = AF_ROSE; | 1257 | srose->srose_family = AF_ROSE; |
1257 | srose->srose_addr = rose->dest_addr; | 1258 | srose->srose_addr = rose->dest_addr; |
1258 | srose->srose_call = rose->dest_call; | 1259 | srose->srose_call = rose->dest_call; |
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 13aa47aa2ffb..1bc210ffcba2 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c | |||
@@ -962,8 +962,11 @@ cbq_dequeue(struct Qdisc *sch) | |||
962 | cbq_update(q); | 962 | cbq_update(q); |
963 | if ((incr -= incr2) < 0) | 963 | if ((incr -= incr2) < 0) |
964 | incr = 0; | 964 | incr = 0; |
965 | q->now += incr; | ||
966 | } else { | ||
967 | if (now > q->now) | ||
968 | q->now = now; | ||
965 | } | 969 | } |
966 | q->now += incr; | ||
967 | q->now_rt = now; | 970 | q->now_rt = now; |
968 | 971 | ||
969 | for (;;) { | 972 | for (;;) { |
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index 4e606fcb2534..55786283a3df 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c | |||
@@ -195,7 +195,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
195 | flow->deficit = q->quantum; | 195 | flow->deficit = q->quantum; |
196 | flow->dropped = 0; | 196 | flow->dropped = 0; |
197 | } | 197 | } |
198 | if (++sch->q.qlen < sch->limit) | 198 | if (++sch->q.qlen <= sch->limit) |
199 | return NET_XMIT_SUCCESS; | 199 | return NET_XMIT_SUCCESS; |
200 | 200 | ||
201 | q->drop_overlimit++; | 201 | q->drop_overlimit++; |
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index ffad48109a22..eac7e0ee23c1 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -904,7 +904,7 @@ void psched_ratecfg_precompute(struct psched_ratecfg *r, u32 rate) | |||
904 | u64 mult; | 904 | u64 mult; |
905 | int shift; | 905 | int shift; |
906 | 906 | ||
907 | r->rate_bps = rate << 3; | 907 | r->rate_bps = (u64)rate << 3; |
908 | r->shift = 0; | 908 | r->shift = 0; |
909 | r->mult = 1; | 909 | r->mult = 1; |
910 | /* | 910 | /* |
diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 43cd0dd9149d..d2709e2b7be6 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c | |||
@@ -1079,7 +1079,7 @@ struct sctp_transport *sctp_assoc_lookup_tsn(struct sctp_association *asoc, | |||
1079 | transports) { | 1079 | transports) { |
1080 | 1080 | ||
1081 | if (transport == active) | 1081 | if (transport == active) |
1082 | break; | 1082 | continue; |
1083 | list_for_each_entry(chunk, &transport->transmitted, | 1083 | list_for_each_entry(chunk, &transport->transmitted, |
1084 | transmitted_list) { | 1084 | transmitted_list) { |
1085 | if (key == chunk->subh.data_hdr->tsn) { | 1085 | if (key == chunk->subh.data_hdr->tsn) { |
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 5131fcfedb03..de1a0138317f 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c | |||
@@ -2082,7 +2082,7 @@ sctp_disposition_t sctp_sf_do_5_2_4_dupcook(struct net *net, | |||
2082 | } | 2082 | } |
2083 | 2083 | ||
2084 | /* Delete the tempory new association. */ | 2084 | /* Delete the tempory new association. */ |
2085 | sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc)); | 2085 | sctp_add_cmd_sf(commands, SCTP_CMD_SET_ASOC, SCTP_ASOC(new_asoc)); |
2086 | sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); | 2086 | sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); |
2087 | 2087 | ||
2088 | /* Restore association pointer to provide SCTP command interpeter | 2088 | /* Restore association pointer to provide SCTP command interpeter |
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index dcc446e7fbf6..d5f35f15af98 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c | |||
@@ -304,10 +304,8 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, stru | |||
304 | err = rpciod_up(); | 304 | err = rpciod_up(); |
305 | if (err) | 305 | if (err) |
306 | goto out_no_rpciod; | 306 | goto out_no_rpciod; |
307 | err = -EINVAL; | ||
308 | if (!xprt) | ||
309 | goto out_no_xprt; | ||
310 | 307 | ||
308 | err = -EINVAL; | ||
311 | if (args->version >= program->nrvers) | 309 | if (args->version >= program->nrvers) |
312 | goto out_err; | 310 | goto out_err; |
313 | version = program->version[args->version]; | 311 | version = program->version[args->version]; |
@@ -382,10 +380,9 @@ out_no_principal: | |||
382 | out_no_stats: | 380 | out_no_stats: |
383 | kfree(clnt); | 381 | kfree(clnt); |
384 | out_err: | 382 | out_err: |
385 | xprt_put(xprt); | ||
386 | out_no_xprt: | ||
387 | rpciod_down(); | 383 | rpciod_down(); |
388 | out_no_rpciod: | 384 | out_no_rpciod: |
385 | xprt_put(xprt); | ||
389 | return ERR_PTR(err); | 386 | return ERR_PTR(err); |
390 | } | 387 | } |
391 | 388 | ||
@@ -512,7 +509,7 @@ static struct rpc_clnt *__rpc_clone_client(struct rpc_create_args *args, | |||
512 | new = rpc_new_client(args, xprt); | 509 | new = rpc_new_client(args, xprt); |
513 | if (IS_ERR(new)) { | 510 | if (IS_ERR(new)) { |
514 | err = PTR_ERR(new); | 511 | err = PTR_ERR(new); |
515 | goto out_put; | 512 | goto out_err; |
516 | } | 513 | } |
517 | 514 | ||
518 | atomic_inc(&clnt->cl_count); | 515 | atomic_inc(&clnt->cl_count); |
@@ -525,8 +522,6 @@ static struct rpc_clnt *__rpc_clone_client(struct rpc_create_args *args, | |||
525 | new->cl_chatty = clnt->cl_chatty; | 522 | new->cl_chatty = clnt->cl_chatty; |
526 | return new; | 523 | return new; |
527 | 524 | ||
528 | out_put: | ||
529 | xprt_put(xprt); | ||
530 | out_err: | 525 | out_err: |
531 | dprintk("RPC: %s: returned error %d\n", __func__, err); | 526 | dprintk("RPC: %s: returned error %d\n", __func__, err); |
532 | return ERR_PTR(err); | 527 | return ERR_PTR(err); |
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index fb20f25ddec9..f8529fc8e542 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c | |||
@@ -180,6 +180,8 @@ static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, | |||
180 | list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]); | 180 | list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]); |
181 | task->tk_waitqueue = queue; | 181 | task->tk_waitqueue = queue; |
182 | queue->qlen++; | 182 | queue->qlen++; |
183 | /* barrier matches the read in rpc_wake_up_task_queue_locked() */ | ||
184 | smp_wmb(); | ||
183 | rpc_set_queued(task); | 185 | rpc_set_queued(task); |
184 | 186 | ||
185 | dprintk("RPC: %5u added to queue %p \"%s\"\n", | 187 | dprintk("RPC: %5u added to queue %p \"%s\"\n", |
@@ -430,8 +432,11 @@ static void __rpc_do_wake_up_task(struct rpc_wait_queue *queue, struct rpc_task | |||
430 | */ | 432 | */ |
431 | static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task) | 433 | static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task) |
432 | { | 434 | { |
433 | if (RPC_IS_QUEUED(task) && task->tk_waitqueue == queue) | 435 | if (RPC_IS_QUEUED(task)) { |
434 | __rpc_do_wake_up_task(queue, task); | 436 | smp_rmb(); |
437 | if (task->tk_waitqueue == queue) | ||
438 | __rpc_do_wake_up_task(queue, task); | ||
439 | } | ||
435 | } | 440 | } |
436 | 441 | ||
437 | /* | 442 | /* |
diff --git a/net/tipc/socket.c b/net/tipc/socket.c index a9622b6cd916..515ce38e4f4c 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c | |||
@@ -790,6 +790,7 @@ static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg) | |||
790 | if (addr) { | 790 | if (addr) { |
791 | addr->family = AF_TIPC; | 791 | addr->family = AF_TIPC; |
792 | addr->addrtype = TIPC_ADDR_ID; | 792 | addr->addrtype = TIPC_ADDR_ID; |
793 | memset(&addr->addr, 0, sizeof(addr->addr)); | ||
793 | addr->addr.id.ref = msg_origport(msg); | 794 | addr->addr.id.ref = msg_origport(msg); |
794 | addr->addr.id.node = msg_orignode(msg); | 795 | addr->addr.id.node = msg_orignode(msg); |
795 | addr->addr.name.domain = 0; /* could leave uninitialized */ | 796 | addr->addr.name.domain = 0; /* could leave uninitialized */ |
@@ -904,6 +905,9 @@ static int recv_msg(struct kiocb *iocb, struct socket *sock, | |||
904 | goto exit; | 905 | goto exit; |
905 | } | 906 | } |
906 | 907 | ||
908 | /* will be updated in set_orig_addr() if needed */ | ||
909 | m->msg_namelen = 0; | ||
910 | |||
907 | timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); | 911 | timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); |
908 | restart: | 912 | restart: |
909 | 913 | ||
@@ -1013,6 +1017,9 @@ static int recv_stream(struct kiocb *iocb, struct socket *sock, | |||
1013 | goto exit; | 1017 | goto exit; |
1014 | } | 1018 | } |
1015 | 1019 | ||
1020 | /* will be updated in set_orig_addr() if needed */ | ||
1021 | m->msg_namelen = 0; | ||
1022 | |||
1016 | target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len); | 1023 | target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len); |
1017 | timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); | 1024 | timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); |
1018 | 1025 | ||
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 51be64f163ec..2db702d82e7d 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c | |||
@@ -382,7 +382,7 @@ static void unix_sock_destructor(struct sock *sk) | |||
382 | #endif | 382 | #endif |
383 | } | 383 | } |
384 | 384 | ||
385 | static int unix_release_sock(struct sock *sk, int embrion) | 385 | static void unix_release_sock(struct sock *sk, int embrion) |
386 | { | 386 | { |
387 | struct unix_sock *u = unix_sk(sk); | 387 | struct unix_sock *u = unix_sk(sk); |
388 | struct path path; | 388 | struct path path; |
@@ -451,8 +451,6 @@ static int unix_release_sock(struct sock *sk, int embrion) | |||
451 | 451 | ||
452 | if (unix_tot_inflight) | 452 | if (unix_tot_inflight) |
453 | unix_gc(); /* Garbage collect fds */ | 453 | unix_gc(); /* Garbage collect fds */ |
454 | |||
455 | return 0; | ||
456 | } | 454 | } |
457 | 455 | ||
458 | static void init_peercred(struct sock *sk) | 456 | static void init_peercred(struct sock *sk) |
@@ -699,9 +697,10 @@ static int unix_release(struct socket *sock) | |||
699 | if (!sk) | 697 | if (!sk) |
700 | return 0; | 698 | return 0; |
701 | 699 | ||
700 | unix_release_sock(sk, 0); | ||
702 | sock->sk = NULL; | 701 | sock->sk = NULL; |
703 | 702 | ||
704 | return unix_release_sock(sk, 0); | 703 | return 0; |
705 | } | 704 | } |
706 | 705 | ||
707 | static int unix_autobind(struct socket *sock) | 706 | static int unix_autobind(struct socket *sock) |
@@ -1994,7 +1993,7 @@ again: | |||
1994 | if ((UNIXCB(skb).pid != siocb->scm->pid) || | 1993 | if ((UNIXCB(skb).pid != siocb->scm->pid) || |
1995 | (UNIXCB(skb).cred != siocb->scm->cred)) | 1994 | (UNIXCB(skb).cred != siocb->scm->cred)) |
1996 | break; | 1995 | break; |
1997 | } else { | 1996 | } else if (test_bit(SOCK_PASSCRED, &sock->flags)) { |
1998 | /* Copy credentials */ | 1997 | /* Copy credentials */ |
1999 | scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred); | 1998 | scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred); |
2000 | check_creds = 1; | 1999 | check_creds = 1; |
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index ca511c4f388a..7f93e2a42d7a 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c | |||
@@ -207,7 +207,7 @@ static struct sock *__vsock_find_bound_socket(struct sockaddr_vm *addr) | |||
207 | struct vsock_sock *vsk; | 207 | struct vsock_sock *vsk; |
208 | 208 | ||
209 | list_for_each_entry(vsk, vsock_bound_sockets(addr), bound_table) | 209 | list_for_each_entry(vsk, vsock_bound_sockets(addr), bound_table) |
210 | if (vsock_addr_equals_addr_any(addr, &vsk->local_addr)) | 210 | if (addr->svm_port == vsk->local_addr.svm_port) |
211 | return sk_vsock(vsk); | 211 | return sk_vsock(vsk); |
212 | 212 | ||
213 | return NULL; | 213 | return NULL; |
@@ -220,8 +220,8 @@ static struct sock *__vsock_find_connected_socket(struct sockaddr_vm *src, | |||
220 | 220 | ||
221 | list_for_each_entry(vsk, vsock_connected_sockets(src, dst), | 221 | list_for_each_entry(vsk, vsock_connected_sockets(src, dst), |
222 | connected_table) { | 222 | connected_table) { |
223 | if (vsock_addr_equals_addr(src, &vsk->remote_addr) | 223 | if (vsock_addr_equals_addr(src, &vsk->remote_addr) && |
224 | && vsock_addr_equals_addr(dst, &vsk->local_addr)) { | 224 | dst->svm_port == vsk->local_addr.svm_port) { |
225 | return sk_vsock(vsk); | 225 | return sk_vsock(vsk); |
226 | } | 226 | } |
227 | } | 227 | } |
@@ -1670,6 +1670,8 @@ vsock_stream_recvmsg(struct kiocb *kiocb, | |||
1670 | vsk = vsock_sk(sk); | 1670 | vsk = vsock_sk(sk); |
1671 | err = 0; | 1671 | err = 0; |
1672 | 1672 | ||
1673 | msg->msg_namelen = 0; | ||
1674 | |||
1673 | lock_sock(sk); | 1675 | lock_sock(sk); |
1674 | 1676 | ||
1675 | if (sk->sk_state != SS_CONNECTED) { | 1677 | if (sk->sk_state != SS_CONNECTED) { |
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c index a70ace83a153..5e04d3d96285 100644 --- a/net/vmw_vsock/vmci_transport.c +++ b/net/vmw_vsock/vmci_transport.c | |||
@@ -464,19 +464,16 @@ static struct sock *vmci_transport_get_pending( | |||
464 | struct vsock_sock *vlistener; | 464 | struct vsock_sock *vlistener; |
465 | struct vsock_sock *vpending; | 465 | struct vsock_sock *vpending; |
466 | struct sock *pending; | 466 | struct sock *pending; |
467 | struct sockaddr_vm src; | ||
468 | |||
469 | vsock_addr_init(&src, pkt->dg.src.context, pkt->src_port); | ||
467 | 470 | ||
468 | vlistener = vsock_sk(listener); | 471 | vlistener = vsock_sk(listener); |
469 | 472 | ||
470 | list_for_each_entry(vpending, &vlistener->pending_links, | 473 | list_for_each_entry(vpending, &vlistener->pending_links, |
471 | pending_links) { | 474 | pending_links) { |
472 | struct sockaddr_vm src; | ||
473 | struct sockaddr_vm dst; | ||
474 | |||
475 | vsock_addr_init(&src, pkt->dg.src.context, pkt->src_port); | ||
476 | vsock_addr_init(&dst, pkt->dg.dst.context, pkt->dst_port); | ||
477 | |||
478 | if (vsock_addr_equals_addr(&src, &vpending->remote_addr) && | 475 | if (vsock_addr_equals_addr(&src, &vpending->remote_addr) && |
479 | vsock_addr_equals_addr(&dst, &vpending->local_addr)) { | 476 | pkt->dst_port == vpending->local_addr.svm_port) { |
480 | pending = sk_vsock(vpending); | 477 | pending = sk_vsock(vpending); |
481 | sock_hold(pending); | 478 | sock_hold(pending); |
482 | goto found; | 479 | goto found; |
@@ -739,10 +736,15 @@ static int vmci_transport_recv_stream_cb(void *data, struct vmci_datagram *dg) | |||
739 | */ | 736 | */ |
740 | bh_lock_sock(sk); | 737 | bh_lock_sock(sk); |
741 | 738 | ||
742 | if (!sock_owned_by_user(sk) && sk->sk_state == SS_CONNECTED) | 739 | if (!sock_owned_by_user(sk)) { |
743 | vmci_trans(vsk)->notify_ops->handle_notify_pkt( | 740 | /* The local context ID may be out of date, update it. */ |
744 | sk, pkt, true, &dst, &src, | 741 | vsk->local_addr.svm_cid = dst.svm_cid; |
745 | &bh_process_pkt); | 742 | |
743 | if (sk->sk_state == SS_CONNECTED) | ||
744 | vmci_trans(vsk)->notify_ops->handle_notify_pkt( | ||
745 | sk, pkt, true, &dst, &src, | ||
746 | &bh_process_pkt); | ||
747 | } | ||
746 | 748 | ||
747 | bh_unlock_sock(sk); | 749 | bh_unlock_sock(sk); |
748 | 750 | ||
@@ -902,6 +904,9 @@ static void vmci_transport_recv_pkt_work(struct work_struct *work) | |||
902 | 904 | ||
903 | lock_sock(sk); | 905 | lock_sock(sk); |
904 | 906 | ||
907 | /* The local context ID may be out of date. */ | ||
908 | vsock_sk(sk)->local_addr.svm_cid = pkt->dg.dst.context; | ||
909 | |||
905 | switch (sk->sk_state) { | 910 | switch (sk->sk_state) { |
906 | case SS_LISTEN: | 911 | case SS_LISTEN: |
907 | vmci_transport_recv_listen(sk, pkt); | 912 | vmci_transport_recv_listen(sk, pkt); |
@@ -958,6 +963,10 @@ static int vmci_transport_recv_listen(struct sock *sk, | |||
958 | pending = vmci_transport_get_pending(sk, pkt); | 963 | pending = vmci_transport_get_pending(sk, pkt); |
959 | if (pending) { | 964 | if (pending) { |
960 | lock_sock(pending); | 965 | lock_sock(pending); |
966 | |||
967 | /* The local context ID may be out of date. */ | ||
968 | vsock_sk(pending)->local_addr.svm_cid = pkt->dg.dst.context; | ||
969 | |||
961 | switch (pending->sk_state) { | 970 | switch (pending->sk_state) { |
962 | case SS_CONNECTING: | 971 | case SS_CONNECTING: |
963 | err = vmci_transport_recv_connecting_server(sk, | 972 | err = vmci_transport_recv_connecting_server(sk, |
@@ -1727,6 +1736,8 @@ static int vmci_transport_dgram_dequeue(struct kiocb *kiocb, | |||
1727 | if (flags & MSG_OOB || flags & MSG_ERRQUEUE) | 1736 | if (flags & MSG_OOB || flags & MSG_ERRQUEUE) |
1728 | return -EOPNOTSUPP; | 1737 | return -EOPNOTSUPP; |
1729 | 1738 | ||
1739 | msg->msg_namelen = 0; | ||
1740 | |||
1730 | /* Retrieve the head sk_buff from the socket's receive queue. */ | 1741 | /* Retrieve the head sk_buff from the socket's receive queue. */ |
1731 | err = 0; | 1742 | err = 0; |
1732 | skb = skb_recv_datagram(&vsk->sk, flags, noblock, &err); | 1743 | skb = skb_recv_datagram(&vsk->sk, flags, noblock, &err); |
@@ -1759,7 +1770,6 @@ static int vmci_transport_dgram_dequeue(struct kiocb *kiocb, | |||
1759 | if (err) | 1770 | if (err) |
1760 | goto out; | 1771 | goto out; |
1761 | 1772 | ||
1762 | msg->msg_namelen = 0; | ||
1763 | if (msg->msg_name) { | 1773 | if (msg->msg_name) { |
1764 | struct sockaddr_vm *vm_addr; | 1774 | struct sockaddr_vm *vm_addr; |
1765 | 1775 | ||
diff --git a/net/vmw_vsock/vsock_addr.c b/net/vmw_vsock/vsock_addr.c index b7df1aea7c59..ec2611b4ea0e 100644 --- a/net/vmw_vsock/vsock_addr.c +++ b/net/vmw_vsock/vsock_addr.c | |||
@@ -64,16 +64,6 @@ bool vsock_addr_equals_addr(const struct sockaddr_vm *addr, | |||
64 | } | 64 | } |
65 | EXPORT_SYMBOL_GPL(vsock_addr_equals_addr); | 65 | EXPORT_SYMBOL_GPL(vsock_addr_equals_addr); |
66 | 66 | ||
67 | bool vsock_addr_equals_addr_any(const struct sockaddr_vm *addr, | ||
68 | const struct sockaddr_vm *other) | ||
69 | { | ||
70 | return (addr->svm_cid == VMADDR_CID_ANY || | ||
71 | other->svm_cid == VMADDR_CID_ANY || | ||
72 | addr->svm_cid == other->svm_cid) && | ||
73 | addr->svm_port == other->svm_port; | ||
74 | } | ||
75 | EXPORT_SYMBOL_GPL(vsock_addr_equals_addr_any); | ||
76 | |||
77 | int vsock_addr_cast(const struct sockaddr *addr, | 67 | int vsock_addr_cast(const struct sockaddr *addr, |
78 | size_t len, struct sockaddr_vm **out_addr) | 68 | size_t len, struct sockaddr_vm **out_addr) |
79 | { | 69 | { |
diff --git a/net/vmw_vsock/vsock_addr.h b/net/vmw_vsock/vsock_addr.h index cdfbcefdf843..9ccd5316eac0 100644 --- a/net/vmw_vsock/vsock_addr.h +++ b/net/vmw_vsock/vsock_addr.h | |||
@@ -24,8 +24,6 @@ bool vsock_addr_bound(const struct sockaddr_vm *addr); | |||
24 | void vsock_addr_unbind(struct sockaddr_vm *addr); | 24 | void vsock_addr_unbind(struct sockaddr_vm *addr); |
25 | bool vsock_addr_equals_addr(const struct sockaddr_vm *addr, | 25 | bool vsock_addr_equals_addr(const struct sockaddr_vm *addr, |
26 | const struct sockaddr_vm *other); | 26 | const struct sockaddr_vm *other); |
27 | bool vsock_addr_equals_addr_any(const struct sockaddr_vm *addr, | ||
28 | const struct sockaddr_vm *other); | ||
29 | int vsock_addr_cast(const struct sockaddr *addr, size_t len, | 27 | int vsock_addr_cast(const struct sockaddr *addr, size_t len, |
30 | struct sockaddr_vm **out_addr); | 28 | struct sockaddr_vm **out_addr); |
31 | 29 | ||
diff --git a/net/wireless/core.c b/net/wireless/core.c index ea4155fe9733..6ddf74f0ae1e 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c | |||
@@ -212,6 +212,39 @@ static void cfg80211_rfkill_poll(struct rfkill *rfkill, void *data) | |||
212 | rdev_rfkill_poll(rdev); | 212 | rdev_rfkill_poll(rdev); |
213 | } | 213 | } |
214 | 214 | ||
215 | void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev, | ||
216 | struct wireless_dev *wdev) | ||
217 | { | ||
218 | lockdep_assert_held(&rdev->devlist_mtx); | ||
219 | lockdep_assert_held(&rdev->sched_scan_mtx); | ||
220 | |||
221 | if (WARN_ON(wdev->iftype != NL80211_IFTYPE_P2P_DEVICE)) | ||
222 | return; | ||
223 | |||
224 | if (!wdev->p2p_started) | ||
225 | return; | ||
226 | |||
227 | rdev_stop_p2p_device(rdev, wdev); | ||
228 | wdev->p2p_started = false; | ||
229 | |||
230 | rdev->opencount--; | ||
231 | |||
232 | if (rdev->scan_req && rdev->scan_req->wdev == wdev) { | ||
233 | bool busy = work_busy(&rdev->scan_done_wk); | ||
234 | |||
235 | /* | ||
236 | * If the work isn't pending or running (in which case it would | ||
237 | * be waiting for the lock we hold) the driver didn't properly | ||
238 | * cancel the scan when the interface was removed. In this case | ||
239 | * warn and leak the scan request object to not crash later. | ||
240 | */ | ||
241 | WARN_ON(!busy); | ||
242 | |||
243 | rdev->scan_req->aborted = true; | ||
244 | ___cfg80211_scan_done(rdev, !busy); | ||
245 | } | ||
246 | } | ||
247 | |||
215 | static int cfg80211_rfkill_set_block(void *data, bool blocked) | 248 | static int cfg80211_rfkill_set_block(void *data, bool blocked) |
216 | { | 249 | { |
217 | struct cfg80211_registered_device *rdev = data; | 250 | struct cfg80211_registered_device *rdev = data; |
@@ -221,7 +254,8 @@ static int cfg80211_rfkill_set_block(void *data, bool blocked) | |||
221 | return 0; | 254 | return 0; |
222 | 255 | ||
223 | rtnl_lock(); | 256 | rtnl_lock(); |
224 | mutex_lock(&rdev->devlist_mtx); | 257 | |
258 | /* read-only iteration need not hold the devlist_mtx */ | ||
225 | 259 | ||
226 | list_for_each_entry(wdev, &rdev->wdev_list, list) { | 260 | list_for_each_entry(wdev, &rdev->wdev_list, list) { |
227 | if (wdev->netdev) { | 261 | if (wdev->netdev) { |
@@ -231,18 +265,18 @@ static int cfg80211_rfkill_set_block(void *data, bool blocked) | |||
231 | /* otherwise, check iftype */ | 265 | /* otherwise, check iftype */ |
232 | switch (wdev->iftype) { | 266 | switch (wdev->iftype) { |
233 | case NL80211_IFTYPE_P2P_DEVICE: | 267 | case NL80211_IFTYPE_P2P_DEVICE: |
234 | if (!wdev->p2p_started) | 268 | /* but this requires it */ |
235 | break; | 269 | mutex_lock(&rdev->devlist_mtx); |
236 | rdev_stop_p2p_device(rdev, wdev); | 270 | mutex_lock(&rdev->sched_scan_mtx); |
237 | wdev->p2p_started = false; | 271 | cfg80211_stop_p2p_device(rdev, wdev); |
238 | rdev->opencount--; | 272 | mutex_unlock(&rdev->sched_scan_mtx); |
273 | mutex_unlock(&rdev->devlist_mtx); | ||
239 | break; | 274 | break; |
240 | default: | 275 | default: |
241 | break; | 276 | break; |
242 | } | 277 | } |
243 | } | 278 | } |
244 | 279 | ||
245 | mutex_unlock(&rdev->devlist_mtx); | ||
246 | rtnl_unlock(); | 280 | rtnl_unlock(); |
247 | 281 | ||
248 | return 0; | 282 | return 0; |
@@ -745,17 +779,13 @@ static void wdev_cleanup_work(struct work_struct *work) | |||
745 | wdev = container_of(work, struct wireless_dev, cleanup_work); | 779 | wdev = container_of(work, struct wireless_dev, cleanup_work); |
746 | rdev = wiphy_to_dev(wdev->wiphy); | 780 | rdev = wiphy_to_dev(wdev->wiphy); |
747 | 781 | ||
748 | cfg80211_lock_rdev(rdev); | 782 | mutex_lock(&rdev->sched_scan_mtx); |
749 | 783 | ||
750 | if (WARN_ON(rdev->scan_req && rdev->scan_req->wdev == wdev)) { | 784 | if (WARN_ON(rdev->scan_req && rdev->scan_req->wdev == wdev)) { |
751 | rdev->scan_req->aborted = true; | 785 | rdev->scan_req->aborted = true; |
752 | ___cfg80211_scan_done(rdev, true); | 786 | ___cfg80211_scan_done(rdev, true); |
753 | } | 787 | } |
754 | 788 | ||
755 | cfg80211_unlock_rdev(rdev); | ||
756 | |||
757 | mutex_lock(&rdev->sched_scan_mtx); | ||
758 | |||
759 | if (WARN_ON(rdev->sched_scan_req && | 789 | if (WARN_ON(rdev->sched_scan_req && |
760 | rdev->sched_scan_req->dev == wdev->netdev)) { | 790 | rdev->sched_scan_req->dev == wdev->netdev)) { |
761 | __cfg80211_stop_sched_scan(rdev, false); | 791 | __cfg80211_stop_sched_scan(rdev, false); |
@@ -781,21 +811,19 @@ void cfg80211_unregister_wdev(struct wireless_dev *wdev) | |||
781 | return; | 811 | return; |
782 | 812 | ||
783 | mutex_lock(&rdev->devlist_mtx); | 813 | mutex_lock(&rdev->devlist_mtx); |
814 | mutex_lock(&rdev->sched_scan_mtx); | ||
784 | list_del_rcu(&wdev->list); | 815 | list_del_rcu(&wdev->list); |
785 | rdev->devlist_generation++; | 816 | rdev->devlist_generation++; |
786 | 817 | ||
787 | switch (wdev->iftype) { | 818 | switch (wdev->iftype) { |
788 | case NL80211_IFTYPE_P2P_DEVICE: | 819 | case NL80211_IFTYPE_P2P_DEVICE: |
789 | if (!wdev->p2p_started) | 820 | cfg80211_stop_p2p_device(rdev, wdev); |
790 | break; | ||
791 | rdev_stop_p2p_device(rdev, wdev); | ||
792 | wdev->p2p_started = false; | ||
793 | rdev->opencount--; | ||
794 | break; | 821 | break; |
795 | default: | 822 | default: |
796 | WARN_ON_ONCE(1); | 823 | WARN_ON_ONCE(1); |
797 | break; | 824 | break; |
798 | } | 825 | } |
826 | mutex_unlock(&rdev->sched_scan_mtx); | ||
799 | mutex_unlock(&rdev->devlist_mtx); | 827 | mutex_unlock(&rdev->devlist_mtx); |
800 | } | 828 | } |
801 | EXPORT_SYMBOL(cfg80211_unregister_wdev); | 829 | EXPORT_SYMBOL(cfg80211_unregister_wdev); |
@@ -936,6 +964,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb, | |||
936 | cfg80211_update_iface_num(rdev, wdev->iftype, 1); | 964 | cfg80211_update_iface_num(rdev, wdev->iftype, 1); |
937 | cfg80211_lock_rdev(rdev); | 965 | cfg80211_lock_rdev(rdev); |
938 | mutex_lock(&rdev->devlist_mtx); | 966 | mutex_lock(&rdev->devlist_mtx); |
967 | mutex_lock(&rdev->sched_scan_mtx); | ||
939 | wdev_lock(wdev); | 968 | wdev_lock(wdev); |
940 | switch (wdev->iftype) { | 969 | switch (wdev->iftype) { |
941 | #ifdef CONFIG_CFG80211_WEXT | 970 | #ifdef CONFIG_CFG80211_WEXT |
@@ -967,6 +996,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb, | |||
967 | break; | 996 | break; |
968 | } | 997 | } |
969 | wdev_unlock(wdev); | 998 | wdev_unlock(wdev); |
999 | mutex_unlock(&rdev->sched_scan_mtx); | ||
970 | rdev->opencount++; | 1000 | rdev->opencount++; |
971 | mutex_unlock(&rdev->devlist_mtx); | 1001 | mutex_unlock(&rdev->devlist_mtx); |
972 | cfg80211_unlock_rdev(rdev); | 1002 | cfg80211_unlock_rdev(rdev); |
diff --git a/net/wireless/core.h b/net/wireless/core.h index 3aec0e429d8a..5845c2b37aa8 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h | |||
@@ -503,6 +503,9 @@ int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev, | |||
503 | void cfg80211_update_iface_num(struct cfg80211_registered_device *rdev, | 503 | void cfg80211_update_iface_num(struct cfg80211_registered_device *rdev, |
504 | enum nl80211_iftype iftype, int num); | 504 | enum nl80211_iftype iftype, int num); |
505 | 505 | ||
506 | void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev, | ||
507 | struct wireless_dev *wdev); | ||
508 | |||
506 | #define CFG80211_MAX_NUM_DIFFERENT_CHANNELS 10 | 509 | #define CFG80211_MAX_NUM_DIFFERENT_CHANNELS 10 |
507 | 510 | ||
508 | #ifdef CONFIG_CFG80211_DEVELOPER_WARNINGS | 511 | #ifdef CONFIG_CFG80211_DEVELOPER_WARNINGS |
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index d44ab216c0ec..58e13a8c95f9 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
@@ -4702,14 +4702,19 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) | |||
4702 | if (!rdev->ops->scan) | 4702 | if (!rdev->ops->scan) |
4703 | return -EOPNOTSUPP; | 4703 | return -EOPNOTSUPP; |
4704 | 4704 | ||
4705 | if (rdev->scan_req) | 4705 | mutex_lock(&rdev->sched_scan_mtx); |
4706 | return -EBUSY; | 4706 | if (rdev->scan_req) { |
4707 | err = -EBUSY; | ||
4708 | goto unlock; | ||
4709 | } | ||
4707 | 4710 | ||
4708 | if (info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]) { | 4711 | if (info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]) { |
4709 | n_channels = validate_scan_freqs( | 4712 | n_channels = validate_scan_freqs( |
4710 | info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]); | 4713 | info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]); |
4711 | if (!n_channels) | 4714 | if (!n_channels) { |
4712 | return -EINVAL; | 4715 | err = -EINVAL; |
4716 | goto unlock; | ||
4717 | } | ||
4713 | } else { | 4718 | } else { |
4714 | enum ieee80211_band band; | 4719 | enum ieee80211_band band; |
4715 | n_channels = 0; | 4720 | n_channels = 0; |
@@ -4723,23 +4728,29 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) | |||
4723 | nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], tmp) | 4728 | nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], tmp) |
4724 | n_ssids++; | 4729 | n_ssids++; |
4725 | 4730 | ||
4726 | if (n_ssids > wiphy->max_scan_ssids) | 4731 | if (n_ssids > wiphy->max_scan_ssids) { |
4727 | return -EINVAL; | 4732 | err = -EINVAL; |
4733 | goto unlock; | ||
4734 | } | ||
4728 | 4735 | ||
4729 | if (info->attrs[NL80211_ATTR_IE]) | 4736 | if (info->attrs[NL80211_ATTR_IE]) |
4730 | ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); | 4737 | ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); |
4731 | else | 4738 | else |
4732 | ie_len = 0; | 4739 | ie_len = 0; |
4733 | 4740 | ||
4734 | if (ie_len > wiphy->max_scan_ie_len) | 4741 | if (ie_len > wiphy->max_scan_ie_len) { |
4735 | return -EINVAL; | 4742 | err = -EINVAL; |
4743 | goto unlock; | ||
4744 | } | ||
4736 | 4745 | ||
4737 | request = kzalloc(sizeof(*request) | 4746 | request = kzalloc(sizeof(*request) |
4738 | + sizeof(*request->ssids) * n_ssids | 4747 | + sizeof(*request->ssids) * n_ssids |
4739 | + sizeof(*request->channels) * n_channels | 4748 | + sizeof(*request->channels) * n_channels |
4740 | + ie_len, GFP_KERNEL); | 4749 | + ie_len, GFP_KERNEL); |
4741 | if (!request) | 4750 | if (!request) { |
4742 | return -ENOMEM; | 4751 | err = -ENOMEM; |
4752 | goto unlock; | ||
4753 | } | ||
4743 | 4754 | ||
4744 | if (n_ssids) | 4755 | if (n_ssids) |
4745 | request->ssids = (void *)&request->channels[n_channels]; | 4756 | request->ssids = (void *)&request->channels[n_channels]; |
@@ -4876,6 +4887,8 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) | |||
4876 | kfree(request); | 4887 | kfree(request); |
4877 | } | 4888 | } |
4878 | 4889 | ||
4890 | unlock: | ||
4891 | mutex_unlock(&rdev->sched_scan_mtx); | ||
4879 | return err; | 4892 | return err; |
4880 | } | 4893 | } |
4881 | 4894 | ||
@@ -7749,20 +7762,9 @@ static int nl80211_stop_p2p_device(struct sk_buff *skb, struct genl_info *info) | |||
7749 | if (!rdev->ops->stop_p2p_device) | 7762 | if (!rdev->ops->stop_p2p_device) |
7750 | return -EOPNOTSUPP; | 7763 | return -EOPNOTSUPP; |
7751 | 7764 | ||
7752 | if (!wdev->p2p_started) | 7765 | mutex_lock(&rdev->sched_scan_mtx); |
7753 | return 0; | 7766 | cfg80211_stop_p2p_device(rdev, wdev); |
7754 | 7767 | mutex_unlock(&rdev->sched_scan_mtx); | |
7755 | rdev_stop_p2p_device(rdev, wdev); | ||
7756 | wdev->p2p_started = false; | ||
7757 | |||
7758 | mutex_lock(&rdev->devlist_mtx); | ||
7759 | rdev->opencount--; | ||
7760 | mutex_unlock(&rdev->devlist_mtx); | ||
7761 | |||
7762 | if (WARN_ON(rdev->scan_req && rdev->scan_req->wdev == wdev)) { | ||
7763 | rdev->scan_req->aborted = true; | ||
7764 | ___cfg80211_scan_done(rdev, true); | ||
7765 | } | ||
7766 | 7768 | ||
7767 | return 0; | 7769 | return 0; |
7768 | } | 7770 | } |
@@ -8486,7 +8488,7 @@ static int nl80211_add_scan_req(struct sk_buff *msg, | |||
8486 | struct nlattr *nest; | 8488 | struct nlattr *nest; |
8487 | int i; | 8489 | int i; |
8488 | 8490 | ||
8489 | ASSERT_RDEV_LOCK(rdev); | 8491 | lockdep_assert_held(&rdev->sched_scan_mtx); |
8490 | 8492 | ||
8491 | if (WARN_ON(!req)) | 8493 | if (WARN_ON(!req)) |
8492 | return 0; | 8494 | return 0; |
diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 674aadca0079..fd99ea495b7e 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c | |||
@@ -169,7 +169,7 @@ void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak) | |||
169 | union iwreq_data wrqu; | 169 | union iwreq_data wrqu; |
170 | #endif | 170 | #endif |
171 | 171 | ||
172 | ASSERT_RDEV_LOCK(rdev); | 172 | lockdep_assert_held(&rdev->sched_scan_mtx); |
173 | 173 | ||
174 | request = rdev->scan_req; | 174 | request = rdev->scan_req; |
175 | 175 | ||
@@ -230,9 +230,9 @@ void __cfg80211_scan_done(struct work_struct *wk) | |||
230 | rdev = container_of(wk, struct cfg80211_registered_device, | 230 | rdev = container_of(wk, struct cfg80211_registered_device, |
231 | scan_done_wk); | 231 | scan_done_wk); |
232 | 232 | ||
233 | cfg80211_lock_rdev(rdev); | 233 | mutex_lock(&rdev->sched_scan_mtx); |
234 | ___cfg80211_scan_done(rdev, false); | 234 | ___cfg80211_scan_done(rdev, false); |
235 | cfg80211_unlock_rdev(rdev); | 235 | mutex_unlock(&rdev->sched_scan_mtx); |
236 | } | 236 | } |
237 | 237 | ||
238 | void cfg80211_scan_done(struct cfg80211_scan_request *request, bool aborted) | 238 | void cfg80211_scan_done(struct cfg80211_scan_request *request, bool aborted) |
@@ -698,11 +698,6 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev, | |||
698 | found = rb_find_bss(dev, tmp, BSS_CMP_REGULAR); | 698 | found = rb_find_bss(dev, tmp, BSS_CMP_REGULAR); |
699 | 699 | ||
700 | if (found) { | 700 | if (found) { |
701 | found->pub.beacon_interval = tmp->pub.beacon_interval; | ||
702 | found->pub.signal = tmp->pub.signal; | ||
703 | found->pub.capability = tmp->pub.capability; | ||
704 | found->ts = tmp->ts; | ||
705 | |||
706 | /* Update IEs */ | 701 | /* Update IEs */ |
707 | if (rcu_access_pointer(tmp->pub.proberesp_ies)) { | 702 | if (rcu_access_pointer(tmp->pub.proberesp_ies)) { |
708 | const struct cfg80211_bss_ies *old; | 703 | const struct cfg80211_bss_ies *old; |
@@ -723,6 +718,8 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev, | |||
723 | 718 | ||
724 | if (found->pub.hidden_beacon_bss && | 719 | if (found->pub.hidden_beacon_bss && |
725 | !list_empty(&found->hidden_list)) { | 720 | !list_empty(&found->hidden_list)) { |
721 | const struct cfg80211_bss_ies *f; | ||
722 | |||
726 | /* | 723 | /* |
727 | * The found BSS struct is one of the probe | 724 | * The found BSS struct is one of the probe |
728 | * response members of a group, but we're | 725 | * response members of a group, but we're |
@@ -732,6 +729,10 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev, | |||
732 | * SSID to showing it, which is confusing so | 729 | * SSID to showing it, which is confusing so |
733 | * drop this information. | 730 | * drop this information. |
734 | */ | 731 | */ |
732 | |||
733 | f = rcu_access_pointer(tmp->pub.beacon_ies); | ||
734 | kfree_rcu((struct cfg80211_bss_ies *)f, | ||
735 | rcu_head); | ||
735 | goto drop; | 736 | goto drop; |
736 | } | 737 | } |
737 | 738 | ||
@@ -761,6 +762,11 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev, | |||
761 | kfree_rcu((struct cfg80211_bss_ies *)old, | 762 | kfree_rcu((struct cfg80211_bss_ies *)old, |
762 | rcu_head); | 763 | rcu_head); |
763 | } | 764 | } |
765 | |||
766 | found->pub.beacon_interval = tmp->pub.beacon_interval; | ||
767 | found->pub.signal = tmp->pub.signal; | ||
768 | found->pub.capability = tmp->pub.capability; | ||
769 | found->ts = tmp->ts; | ||
764 | } else { | 770 | } else { |
765 | struct cfg80211_internal_bss *new; | 771 | struct cfg80211_internal_bss *new; |
766 | struct cfg80211_internal_bss *hidden; | 772 | struct cfg80211_internal_bss *hidden; |
@@ -1056,6 +1062,7 @@ int cfg80211_wext_siwscan(struct net_device *dev, | |||
1056 | if (IS_ERR(rdev)) | 1062 | if (IS_ERR(rdev)) |
1057 | return PTR_ERR(rdev); | 1063 | return PTR_ERR(rdev); |
1058 | 1064 | ||
1065 | mutex_lock(&rdev->sched_scan_mtx); | ||
1059 | if (rdev->scan_req) { | 1066 | if (rdev->scan_req) { |
1060 | err = -EBUSY; | 1067 | err = -EBUSY; |
1061 | goto out; | 1068 | goto out; |
@@ -1162,6 +1169,7 @@ int cfg80211_wext_siwscan(struct net_device *dev, | |||
1162 | dev_hold(dev); | 1169 | dev_hold(dev); |
1163 | } | 1170 | } |
1164 | out: | 1171 | out: |
1172 | mutex_unlock(&rdev->sched_scan_mtx); | ||
1165 | kfree(creq); | 1173 | kfree(creq); |
1166 | cfg80211_unlock_rdev(rdev); | 1174 | cfg80211_unlock_rdev(rdev); |
1167 | return err; | 1175 | return err; |
diff --git a/net/wireless/sme.c b/net/wireless/sme.c index f432bd3755b1..482c70e70127 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c | |||
@@ -85,6 +85,7 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev) | |||
85 | ASSERT_RTNL(); | 85 | ASSERT_RTNL(); |
86 | ASSERT_RDEV_LOCK(rdev); | 86 | ASSERT_RDEV_LOCK(rdev); |
87 | ASSERT_WDEV_LOCK(wdev); | 87 | ASSERT_WDEV_LOCK(wdev); |
88 | lockdep_assert_held(&rdev->sched_scan_mtx); | ||
88 | 89 | ||
89 | if (rdev->scan_req) | 90 | if (rdev->scan_req) |
90 | return -EBUSY; | 91 | return -EBUSY; |
@@ -223,6 +224,7 @@ void cfg80211_conn_work(struct work_struct *work) | |||
223 | rtnl_lock(); | 224 | rtnl_lock(); |
224 | cfg80211_lock_rdev(rdev); | 225 | cfg80211_lock_rdev(rdev); |
225 | mutex_lock(&rdev->devlist_mtx); | 226 | mutex_lock(&rdev->devlist_mtx); |
227 | mutex_lock(&rdev->sched_scan_mtx); | ||
226 | 228 | ||
227 | list_for_each_entry(wdev, &rdev->wdev_list, list) { | 229 | list_for_each_entry(wdev, &rdev->wdev_list, list) { |
228 | wdev_lock(wdev); | 230 | wdev_lock(wdev); |
@@ -247,6 +249,7 @@ void cfg80211_conn_work(struct work_struct *work) | |||
247 | wdev_unlock(wdev); | 249 | wdev_unlock(wdev); |
248 | } | 250 | } |
249 | 251 | ||
252 | mutex_unlock(&rdev->sched_scan_mtx); | ||
250 | mutex_unlock(&rdev->devlist_mtx); | 253 | mutex_unlock(&rdev->devlist_mtx); |
251 | cfg80211_unlock_rdev(rdev); | 254 | cfg80211_unlock_rdev(rdev); |
252 | rtnl_unlock(); | 255 | rtnl_unlock(); |
@@ -320,11 +323,9 @@ void cfg80211_sme_scan_done(struct net_device *dev) | |||
320 | { | 323 | { |
321 | struct wireless_dev *wdev = dev->ieee80211_ptr; | 324 | struct wireless_dev *wdev = dev->ieee80211_ptr; |
322 | 325 | ||
323 | mutex_lock(&wiphy_to_dev(wdev->wiphy)->devlist_mtx); | ||
324 | wdev_lock(wdev); | 326 | wdev_lock(wdev); |
325 | __cfg80211_sme_scan_done(dev); | 327 | __cfg80211_sme_scan_done(dev); |
326 | wdev_unlock(wdev); | 328 | wdev_unlock(wdev); |
327 | mutex_unlock(&wiphy_to_dev(wdev->wiphy)->devlist_mtx); | ||
328 | } | 329 | } |
329 | 330 | ||
330 | void cfg80211_sme_rx_auth(struct net_device *dev, | 331 | void cfg80211_sme_rx_auth(struct net_device *dev, |
@@ -924,9 +925,12 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev, | |||
924 | int err; | 925 | int err; |
925 | 926 | ||
926 | mutex_lock(&rdev->devlist_mtx); | 927 | mutex_lock(&rdev->devlist_mtx); |
928 | /* might request scan - scan_mtx -> wdev_mtx dependency */ | ||
929 | mutex_lock(&rdev->sched_scan_mtx); | ||
927 | wdev_lock(dev->ieee80211_ptr); | 930 | wdev_lock(dev->ieee80211_ptr); |
928 | err = __cfg80211_connect(rdev, dev, connect, connkeys, NULL); | 931 | err = __cfg80211_connect(rdev, dev, connect, connkeys, NULL); |
929 | wdev_unlock(dev->ieee80211_ptr); | 932 | wdev_unlock(dev->ieee80211_ptr); |
933 | mutex_unlock(&rdev->sched_scan_mtx); | ||
930 | mutex_unlock(&rdev->devlist_mtx); | 934 | mutex_unlock(&rdev->devlist_mtx); |
931 | 935 | ||
932 | return err; | 936 | return err; |
diff --git a/net/wireless/trace.h b/net/wireless/trace.h index b7a531380e19..7586de77a2f8 100644 --- a/net/wireless/trace.h +++ b/net/wireless/trace.h | |||
@@ -27,7 +27,8 @@ | |||
27 | #define WIPHY_PR_ARG __entry->wiphy_name | 27 | #define WIPHY_PR_ARG __entry->wiphy_name |
28 | 28 | ||
29 | #define WDEV_ENTRY __field(u32, id) | 29 | #define WDEV_ENTRY __field(u32, id) |
30 | #define WDEV_ASSIGN (__entry->id) = (wdev ? wdev->identifier : 0) | 30 | #define WDEV_ASSIGN (__entry->id) = (!IS_ERR_OR_NULL(wdev) \ |
31 | ? wdev->identifier : 0) | ||
31 | #define WDEV_PR_FMT "wdev(%u)" | 32 | #define WDEV_PR_FMT "wdev(%u)" |
32 | #define WDEV_PR_ARG (__entry->id) | 33 | #define WDEV_PR_ARG (__entry->id) |
33 | 34 | ||
@@ -1778,7 +1779,7 @@ TRACE_EVENT(rdev_set_mac_acl, | |||
1778 | ), | 1779 | ), |
1779 | TP_fast_assign( | 1780 | TP_fast_assign( |
1780 | WIPHY_ASSIGN; | 1781 | WIPHY_ASSIGN; |
1781 | WIPHY_ASSIGN; | 1782 | NETDEV_ASSIGN; |
1782 | __entry->acl_policy = params->acl_policy; | 1783 | __entry->acl_policy = params->acl_policy; |
1783 | ), | 1784 | ), |
1784 | TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", acl policy: %d", | 1785 | TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", acl policy: %d", |
diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c index fb9622f6d99c..e79cb5c0655a 100644 --- a/net/wireless/wext-sme.c +++ b/net/wireless/wext-sme.c | |||
@@ -89,6 +89,7 @@ int cfg80211_mgd_wext_siwfreq(struct net_device *dev, | |||
89 | 89 | ||
90 | cfg80211_lock_rdev(rdev); | 90 | cfg80211_lock_rdev(rdev); |
91 | mutex_lock(&rdev->devlist_mtx); | 91 | mutex_lock(&rdev->devlist_mtx); |
92 | mutex_lock(&rdev->sched_scan_mtx); | ||
92 | wdev_lock(wdev); | 93 | wdev_lock(wdev); |
93 | 94 | ||
94 | if (wdev->sme_state != CFG80211_SME_IDLE) { | 95 | if (wdev->sme_state != CFG80211_SME_IDLE) { |
@@ -135,6 +136,7 @@ int cfg80211_mgd_wext_siwfreq(struct net_device *dev, | |||
135 | err = cfg80211_mgd_wext_connect(rdev, wdev); | 136 | err = cfg80211_mgd_wext_connect(rdev, wdev); |
136 | out: | 137 | out: |
137 | wdev_unlock(wdev); | 138 | wdev_unlock(wdev); |
139 | mutex_unlock(&rdev->sched_scan_mtx); | ||
138 | mutex_unlock(&rdev->devlist_mtx); | 140 | mutex_unlock(&rdev->devlist_mtx); |
139 | cfg80211_unlock_rdev(rdev); | 141 | cfg80211_unlock_rdev(rdev); |
140 | return err; | 142 | return err; |
@@ -190,6 +192,7 @@ int cfg80211_mgd_wext_siwessid(struct net_device *dev, | |||
190 | 192 | ||
191 | cfg80211_lock_rdev(rdev); | 193 | cfg80211_lock_rdev(rdev); |
192 | mutex_lock(&rdev->devlist_mtx); | 194 | mutex_lock(&rdev->devlist_mtx); |
195 | mutex_lock(&rdev->sched_scan_mtx); | ||
193 | wdev_lock(wdev); | 196 | wdev_lock(wdev); |
194 | 197 | ||
195 | err = 0; | 198 | err = 0; |
@@ -223,6 +226,7 @@ int cfg80211_mgd_wext_siwessid(struct net_device *dev, | |||
223 | err = cfg80211_mgd_wext_connect(rdev, wdev); | 226 | err = cfg80211_mgd_wext_connect(rdev, wdev); |
224 | out: | 227 | out: |
225 | wdev_unlock(wdev); | 228 | wdev_unlock(wdev); |
229 | mutex_unlock(&rdev->sched_scan_mtx); | ||
226 | mutex_unlock(&rdev->devlist_mtx); | 230 | mutex_unlock(&rdev->devlist_mtx); |
227 | cfg80211_unlock_rdev(rdev); | 231 | cfg80211_unlock_rdev(rdev); |
228 | return err; | 232 | return err; |
@@ -285,6 +289,7 @@ int cfg80211_mgd_wext_siwap(struct net_device *dev, | |||
285 | 289 | ||
286 | cfg80211_lock_rdev(rdev); | 290 | cfg80211_lock_rdev(rdev); |
287 | mutex_lock(&rdev->devlist_mtx); | 291 | mutex_lock(&rdev->devlist_mtx); |
292 | mutex_lock(&rdev->sched_scan_mtx); | ||
288 | wdev_lock(wdev); | 293 | wdev_lock(wdev); |
289 | 294 | ||
290 | if (wdev->sme_state != CFG80211_SME_IDLE) { | 295 | if (wdev->sme_state != CFG80211_SME_IDLE) { |
@@ -313,6 +318,7 @@ int cfg80211_mgd_wext_siwap(struct net_device *dev, | |||
313 | err = cfg80211_mgd_wext_connect(rdev, wdev); | 318 | err = cfg80211_mgd_wext_connect(rdev, wdev); |
314 | out: | 319 | out: |
315 | wdev_unlock(wdev); | 320 | wdev_unlock(wdev); |
321 | mutex_unlock(&rdev->sched_scan_mtx); | ||
316 | mutex_unlock(&rdev->devlist_mtx); | 322 | mutex_unlock(&rdev->devlist_mtx); |
317 | cfg80211_unlock_rdev(rdev); | 323 | cfg80211_unlock_rdev(rdev); |
318 | return err; | 324 | return err; |
diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c index 35754cc8a9e5..8dafe6d3c6e4 100644 --- a/net/xfrm/xfrm_replay.c +++ b/net/xfrm/xfrm_replay.c | |||
@@ -334,6 +334,70 @@ static void xfrm_replay_notify_bmp(struct xfrm_state *x, int event) | |||
334 | x->xflags &= ~XFRM_TIME_DEFER; | 334 | x->xflags &= ~XFRM_TIME_DEFER; |
335 | } | 335 | } |
336 | 336 | ||
337 | static void xfrm_replay_notify_esn(struct xfrm_state *x, int event) | ||
338 | { | ||
339 | u32 seq_diff, oseq_diff; | ||
340 | struct km_event c; | ||
341 | struct xfrm_replay_state_esn *replay_esn = x->replay_esn; | ||
342 | struct xfrm_replay_state_esn *preplay_esn = x->preplay_esn; | ||
343 | |||
344 | /* we send notify messages in case | ||
345 | * 1. we updated on of the sequence numbers, and the seqno difference | ||
346 | * is at least x->replay_maxdiff, in this case we also update the | ||
347 | * timeout of our timer function | ||
348 | * 2. if x->replay_maxage has elapsed since last update, | ||
349 | * and there were changes | ||
350 | * | ||
351 | * The state structure must be locked! | ||
352 | */ | ||
353 | |||
354 | switch (event) { | ||
355 | case XFRM_REPLAY_UPDATE: | ||
356 | if (!x->replay_maxdiff) | ||
357 | break; | ||
358 | |||
359 | if (replay_esn->seq_hi == preplay_esn->seq_hi) | ||
360 | seq_diff = replay_esn->seq - preplay_esn->seq; | ||
361 | else | ||
362 | seq_diff = ~preplay_esn->seq + replay_esn->seq + 1; | ||
363 | |||
364 | if (replay_esn->oseq_hi == preplay_esn->oseq_hi) | ||
365 | oseq_diff = replay_esn->oseq - preplay_esn->oseq; | ||
366 | else | ||
367 | oseq_diff = ~preplay_esn->oseq + replay_esn->oseq + 1; | ||
368 | |||
369 | if (seq_diff < x->replay_maxdiff && | ||
370 | oseq_diff < x->replay_maxdiff) { | ||
371 | |||
372 | if (x->xflags & XFRM_TIME_DEFER) | ||
373 | event = XFRM_REPLAY_TIMEOUT; | ||
374 | else | ||
375 | return; | ||
376 | } | ||
377 | |||
378 | break; | ||
379 | |||
380 | case XFRM_REPLAY_TIMEOUT: | ||
381 | if (memcmp(x->replay_esn, x->preplay_esn, | ||
382 | xfrm_replay_state_esn_len(replay_esn)) == 0) { | ||
383 | x->xflags |= XFRM_TIME_DEFER; | ||
384 | return; | ||
385 | } | ||
386 | |||
387 | break; | ||
388 | } | ||
389 | |||
390 | memcpy(x->preplay_esn, x->replay_esn, | ||
391 | xfrm_replay_state_esn_len(replay_esn)); | ||
392 | c.event = XFRM_MSG_NEWAE; | ||
393 | c.data.aevent = event; | ||
394 | km_state_notify(x, &c); | ||
395 | |||
396 | if (x->replay_maxage && | ||
397 | !mod_timer(&x->rtimer, jiffies + x->replay_maxage)) | ||
398 | x->xflags &= ~XFRM_TIME_DEFER; | ||
399 | } | ||
400 | |||
337 | static int xfrm_replay_overflow_esn(struct xfrm_state *x, struct sk_buff *skb) | 401 | static int xfrm_replay_overflow_esn(struct xfrm_state *x, struct sk_buff *skb) |
338 | { | 402 | { |
339 | int err = 0; | 403 | int err = 0; |
@@ -510,7 +574,7 @@ static struct xfrm_replay xfrm_replay_esn = { | |||
510 | .advance = xfrm_replay_advance_esn, | 574 | .advance = xfrm_replay_advance_esn, |
511 | .check = xfrm_replay_check_esn, | 575 | .check = xfrm_replay_check_esn, |
512 | .recheck = xfrm_replay_recheck_esn, | 576 | .recheck = xfrm_replay_recheck_esn, |
513 | .notify = xfrm_replay_notify_bmp, | 577 | .notify = xfrm_replay_notify_esn, |
514 | .overflow = xfrm_replay_overflow_esn, | 578 | .overflow = xfrm_replay_overflow_esn, |
515 | }; | 579 | }; |
516 | 580 | ||
diff --git a/security/capability.c b/security/capability.c index 579775088967..6783c3e6c88e 100644 --- a/security/capability.c +++ b/security/capability.c | |||
@@ -737,6 +737,11 @@ static int cap_tun_dev_open(void *security) | |||
737 | { | 737 | { |
738 | return 0; | 738 | return 0; |
739 | } | 739 | } |
740 | |||
741 | static void cap_skb_owned_by(struct sk_buff *skb, struct sock *sk) | ||
742 | { | ||
743 | } | ||
744 | |||
740 | #endif /* CONFIG_SECURITY_NETWORK */ | 745 | #endif /* CONFIG_SECURITY_NETWORK */ |
741 | 746 | ||
742 | #ifdef CONFIG_SECURITY_NETWORK_XFRM | 747 | #ifdef CONFIG_SECURITY_NETWORK_XFRM |
@@ -1071,6 +1076,7 @@ void __init security_fixup_ops(struct security_operations *ops) | |||
1071 | set_to_cap_if_null(ops, tun_dev_open); | 1076 | set_to_cap_if_null(ops, tun_dev_open); |
1072 | set_to_cap_if_null(ops, tun_dev_attach_queue); | 1077 | set_to_cap_if_null(ops, tun_dev_attach_queue); |
1073 | set_to_cap_if_null(ops, tun_dev_attach); | 1078 | set_to_cap_if_null(ops, tun_dev_attach); |
1079 | set_to_cap_if_null(ops, skb_owned_by); | ||
1074 | #endif /* CONFIG_SECURITY_NETWORK */ | 1080 | #endif /* CONFIG_SECURITY_NETWORK */ |
1075 | #ifdef CONFIG_SECURITY_NETWORK_XFRM | 1081 | #ifdef CONFIG_SECURITY_NETWORK_XFRM |
1076 | set_to_cap_if_null(ops, xfrm_policy_alloc_security); | 1082 | set_to_cap_if_null(ops, xfrm_policy_alloc_security); |
diff --git a/security/security.c b/security/security.c index 7b88c6aeaed4..03f248b84e9f 100644 --- a/security/security.c +++ b/security/security.c | |||
@@ -1290,6 +1290,11 @@ int security_tun_dev_open(void *security) | |||
1290 | } | 1290 | } |
1291 | EXPORT_SYMBOL(security_tun_dev_open); | 1291 | EXPORT_SYMBOL(security_tun_dev_open); |
1292 | 1292 | ||
1293 | void security_skb_owned_by(struct sk_buff *skb, struct sock *sk) | ||
1294 | { | ||
1295 | security_ops->skb_owned_by(skb, sk); | ||
1296 | } | ||
1297 | |||
1293 | #endif /* CONFIG_SECURITY_NETWORK */ | 1298 | #endif /* CONFIG_SECURITY_NETWORK */ |
1294 | 1299 | ||
1295 | #ifdef CONFIG_SECURITY_NETWORK_XFRM | 1300 | #ifdef CONFIG_SECURITY_NETWORK_XFRM |
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 2fa28c88900c..7171a957b933 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c | |||
@@ -51,6 +51,7 @@ | |||
51 | #include <linux/tty.h> | 51 | #include <linux/tty.h> |
52 | #include <net/icmp.h> | 52 | #include <net/icmp.h> |
53 | #include <net/ip.h> /* for local_port_range[] */ | 53 | #include <net/ip.h> /* for local_port_range[] */ |
54 | #include <net/sock.h> | ||
54 | #include <net/tcp.h> /* struct or_callable used in sock_rcv_skb */ | 55 | #include <net/tcp.h> /* struct or_callable used in sock_rcv_skb */ |
55 | #include <net/net_namespace.h> | 56 | #include <net/net_namespace.h> |
56 | #include <net/netlabel.h> | 57 | #include <net/netlabel.h> |
@@ -4363,6 +4364,11 @@ static void selinux_inet_conn_established(struct sock *sk, struct sk_buff *skb) | |||
4363 | selinux_skb_peerlbl_sid(skb, family, &sksec->peer_sid); | 4364 | selinux_skb_peerlbl_sid(skb, family, &sksec->peer_sid); |
4364 | } | 4365 | } |
4365 | 4366 | ||
4367 | static void selinux_skb_owned_by(struct sk_buff *skb, struct sock *sk) | ||
4368 | { | ||
4369 | skb_set_owner_w(skb, sk); | ||
4370 | } | ||
4371 | |||
4366 | static int selinux_secmark_relabel_packet(u32 sid) | 4372 | static int selinux_secmark_relabel_packet(u32 sid) |
4367 | { | 4373 | { |
4368 | const struct task_security_struct *__tsec; | 4374 | const struct task_security_struct *__tsec; |
@@ -5664,6 +5670,7 @@ static struct security_operations selinux_ops = { | |||
5664 | .tun_dev_attach_queue = selinux_tun_dev_attach_queue, | 5670 | .tun_dev_attach_queue = selinux_tun_dev_attach_queue, |
5665 | .tun_dev_attach = selinux_tun_dev_attach, | 5671 | .tun_dev_attach = selinux_tun_dev_attach, |
5666 | .tun_dev_open = selinux_tun_dev_open, | 5672 | .tun_dev_open = selinux_tun_dev_open, |
5673 | .skb_owned_by = selinux_skb_owned_by, | ||
5667 | 5674 | ||
5668 | #ifdef CONFIG_SECURITY_NETWORK_XFRM | 5675 | #ifdef CONFIG_SECURITY_NETWORK_XFRM |
5669 | .xfrm_policy_alloc_security = selinux_xfrm_policy_alloc, | 5676 | .xfrm_policy_alloc_security = selinux_xfrm_policy_alloc, |
diff --git a/security/selinux/xfrm.c b/security/selinux/xfrm.c index 48665ecd1197..8ab295154517 100644 --- a/security/selinux/xfrm.c +++ b/security/selinux/xfrm.c | |||
@@ -310,7 +310,7 @@ int selinux_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx, | |||
310 | 310 | ||
311 | if (old_ctx) { | 311 | if (old_ctx) { |
312 | new_ctx = kmalloc(sizeof(*old_ctx) + old_ctx->ctx_len, | 312 | new_ctx = kmalloc(sizeof(*old_ctx) + old_ctx->ctx_len, |
313 | GFP_KERNEL); | 313 | GFP_ATOMIC); |
314 | if (!new_ctx) | 314 | if (!new_ctx) |
315 | return -ENOMEM; | 315 | return -ENOMEM; |
316 | 316 | ||
diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c index 23414b93771f..13c88fbcf037 100644 --- a/security/yama/yama_lsm.c +++ b/security/yama/yama_lsm.c | |||
@@ -347,10 +347,8 @@ int yama_ptrace_traceme(struct task_struct *parent) | |||
347 | /* Only disallow PTRACE_TRACEME on more aggressive settings. */ | 347 | /* Only disallow PTRACE_TRACEME on more aggressive settings. */ |
348 | switch (ptrace_scope) { | 348 | switch (ptrace_scope) { |
349 | case YAMA_SCOPE_CAPABILITY: | 349 | case YAMA_SCOPE_CAPABILITY: |
350 | rcu_read_lock(); | 350 | if (!has_ns_capability(parent, current_user_ns(), CAP_SYS_PTRACE)) |
351 | if (!ns_capable(__task_cred(parent)->user_ns, CAP_SYS_PTRACE)) | ||
352 | rc = -EPERM; | 351 | rc = -EPERM; |
353 | rcu_read_unlock(); | ||
354 | break; | 352 | break; |
355 | case YAMA_SCOPE_NO_ATTACH: | 353 | case YAMA_SCOPE_NO_ATTACH: |
356 | rc = -EPERM; | 354 | rc = -EPERM; |
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index a9ebcf9e3710..4aba7646dd9c 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c | |||
@@ -173,7 +173,7 @@ const char *snd_hda_get_jack_type(u32 cfg) | |||
173 | "Line Out", "Speaker", "HP Out", "CD", | 173 | "Line Out", "Speaker", "HP Out", "CD", |
174 | "SPDIF Out", "Digital Out", "Modem Line", "Modem Hand", | 174 | "SPDIF Out", "Digital Out", "Modem Line", "Modem Hand", |
175 | "Line In", "Aux", "Mic", "Telephony", | 175 | "Line In", "Aux", "Mic", "Telephony", |
176 | "SPDIF In", "Digitial In", "Reserved", "Other" | 176 | "SPDIF In", "Digital In", "Reserved", "Other" |
177 | }; | 177 | }; |
178 | 178 | ||
179 | return jack_types[(cfg & AC_DEFCFG_DEVICE) | 179 | return jack_types[(cfg & AC_DEFCFG_DEVICE) |
@@ -3144,7 +3144,7 @@ static unsigned int convert_to_spdif_status(unsigned short val) | |||
3144 | if (val & AC_DIG1_PROFESSIONAL) | 3144 | if (val & AC_DIG1_PROFESSIONAL) |
3145 | sbits |= IEC958_AES0_PROFESSIONAL; | 3145 | sbits |= IEC958_AES0_PROFESSIONAL; |
3146 | if (sbits & IEC958_AES0_PROFESSIONAL) { | 3146 | if (sbits & IEC958_AES0_PROFESSIONAL) { |
3147 | if (sbits & AC_DIG1_EMPHASIS) | 3147 | if (val & AC_DIG1_EMPHASIS) |
3148 | sbits |= IEC958_AES0_PRO_EMPHASIS_5015; | 3148 | sbits |= IEC958_AES0_PRO_EMPHASIS_5015; |
3149 | } else { | 3149 | } else { |
3150 | if (val & AC_DIG1_EMPHASIS) | 3150 | if (val & AC_DIG1_EMPHASIS) |
diff --git a/sound/pci/hda/hda_eld.c b/sound/pci/hda/hda_eld.c index 7dd846380a50..d0d7ac1e99d2 100644 --- a/sound/pci/hda/hda_eld.c +++ b/sound/pci/hda/hda_eld.c | |||
@@ -320,7 +320,7 @@ int snd_hdmi_get_eld(struct hda_codec *codec, hda_nid_t nid, | |||
320 | unsigned char *buf, int *eld_size) | 320 | unsigned char *buf, int *eld_size) |
321 | { | 321 | { |
322 | int i; | 322 | int i; |
323 | int ret; | 323 | int ret = 0; |
324 | int size; | 324 | int size; |
325 | 325 | ||
326 | /* | 326 | /* |
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c index 78897d05d80f..2dbe767be16b 100644 --- a/sound/pci/hda/hda_generic.c +++ b/sound/pci/hda/hda_generic.c | |||
@@ -740,7 +740,7 @@ EXPORT_SYMBOL_HDA(snd_hda_activate_path); | |||
740 | static void path_power_down_sync(struct hda_codec *codec, struct nid_path *path) | 740 | static void path_power_down_sync(struct hda_codec *codec, struct nid_path *path) |
741 | { | 741 | { |
742 | struct hda_gen_spec *spec = codec->spec; | 742 | struct hda_gen_spec *spec = codec->spec; |
743 | bool changed; | 743 | bool changed = false; |
744 | int i; | 744 | int i; |
745 | 745 | ||
746 | if (!spec->power_down_unused || path->active) | 746 | if (!spec->power_down_unused || path->active) |
@@ -995,6 +995,8 @@ enum { | |||
995 | BAD_NO_EXTRA_SURR_DAC = 0x101, | 995 | BAD_NO_EXTRA_SURR_DAC = 0x101, |
996 | /* Primary DAC shared with main surrounds */ | 996 | /* Primary DAC shared with main surrounds */ |
997 | BAD_SHARED_SURROUND = 0x100, | 997 | BAD_SHARED_SURROUND = 0x100, |
998 | /* No independent HP possible */ | ||
999 | BAD_NO_INDEP_HP = 0x40, | ||
998 | /* Primary DAC shared with main CLFE */ | 1000 | /* Primary DAC shared with main CLFE */ |
999 | BAD_SHARED_CLFE = 0x10, | 1001 | BAD_SHARED_CLFE = 0x10, |
1000 | /* Primary DAC shared with extra surrounds */ | 1002 | /* Primary DAC shared with extra surrounds */ |
@@ -1392,6 +1394,43 @@ static int check_aamix_out_path(struct hda_codec *codec, int path_idx) | |||
1392 | return snd_hda_get_path_idx(codec, path); | 1394 | return snd_hda_get_path_idx(codec, path); |
1393 | } | 1395 | } |
1394 | 1396 | ||
1397 | /* check whether the independent HP is available with the current config */ | ||
1398 | static bool indep_hp_possible(struct hda_codec *codec) | ||
1399 | { | ||
1400 | struct hda_gen_spec *spec = codec->spec; | ||
1401 | struct auto_pin_cfg *cfg = &spec->autocfg; | ||
1402 | struct nid_path *path; | ||
1403 | int i, idx; | ||
1404 | |||
1405 | if (cfg->line_out_type == AUTO_PIN_HP_OUT) | ||
1406 | idx = spec->out_paths[0]; | ||
1407 | else | ||
1408 | idx = spec->hp_paths[0]; | ||
1409 | path = snd_hda_get_path_from_idx(codec, idx); | ||
1410 | if (!path) | ||
1411 | return false; | ||
1412 | |||
1413 | /* assume no path conflicts unless aamix is involved */ | ||
1414 | if (!spec->mixer_nid || !is_nid_contained(path, spec->mixer_nid)) | ||
1415 | return true; | ||
1416 | |||
1417 | /* check whether output paths contain aamix */ | ||
1418 | for (i = 0; i < cfg->line_outs; i++) { | ||
1419 | if (spec->out_paths[i] == idx) | ||
1420 | break; | ||
1421 | path = snd_hda_get_path_from_idx(codec, spec->out_paths[i]); | ||
1422 | if (path && is_nid_contained(path, spec->mixer_nid)) | ||
1423 | return false; | ||
1424 | } | ||
1425 | for (i = 0; i < cfg->speaker_outs; i++) { | ||
1426 | path = snd_hda_get_path_from_idx(codec, spec->speaker_paths[i]); | ||
1427 | if (path && is_nid_contained(path, spec->mixer_nid)) | ||
1428 | return false; | ||
1429 | } | ||
1430 | |||
1431 | return true; | ||
1432 | } | ||
1433 | |||
1395 | /* fill the empty entries in the dac array for speaker/hp with the | 1434 | /* fill the empty entries in the dac array for speaker/hp with the |
1396 | * shared dac pointed by the paths | 1435 | * shared dac pointed by the paths |
1397 | */ | 1436 | */ |
@@ -1545,6 +1584,9 @@ static int fill_and_eval_dacs(struct hda_codec *codec, | |||
1545 | badness += BAD_MULTI_IO; | 1584 | badness += BAD_MULTI_IO; |
1546 | } | 1585 | } |
1547 | 1586 | ||
1587 | if (spec->indep_hp && !indep_hp_possible(codec)) | ||
1588 | badness += BAD_NO_INDEP_HP; | ||
1589 | |||
1548 | /* re-fill the shared DAC for speaker / headphone */ | 1590 | /* re-fill the shared DAC for speaker / headphone */ |
1549 | if (cfg->line_out_type != AUTO_PIN_HP_OUT) | 1591 | if (cfg->line_out_type != AUTO_PIN_HP_OUT) |
1550 | refill_shared_dacs(codec, cfg->hp_outs, | 1592 | refill_shared_dacs(codec, cfg->hp_outs, |
@@ -1758,6 +1800,10 @@ static int parse_output_paths(struct hda_codec *codec) | |||
1758 | cfg->speaker_pins, val); | 1800 | cfg->speaker_pins, val); |
1759 | } | 1801 | } |
1760 | 1802 | ||
1803 | /* clear indep_hp flag if not available */ | ||
1804 | if (spec->indep_hp && !indep_hp_possible(codec)) | ||
1805 | spec->indep_hp = 0; | ||
1806 | |||
1761 | kfree(best_cfg); | 1807 | kfree(best_cfg); |
1762 | return 0; | 1808 | return 0; |
1763 | } | 1809 | } |
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 4cea6bb6fade..bcd40ee488e3 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c | |||
@@ -134,8 +134,8 @@ MODULE_PARM_DESC(power_save, "Automatic power-saving timeout " | |||
134 | * this may give more power-saving, but will take longer time to | 134 | * this may give more power-saving, but will take longer time to |
135 | * wake up. | 135 | * wake up. |
136 | */ | 136 | */ |
137 | static int power_save_controller = -1; | 137 | static bool power_save_controller = 1; |
138 | module_param(power_save_controller, bint, 0644); | 138 | module_param(power_save_controller, bool, 0644); |
139 | MODULE_PARM_DESC(power_save_controller, "Reset controller in power save mode."); | 139 | MODULE_PARM_DESC(power_save_controller, "Reset controller in power save mode."); |
140 | #endif /* CONFIG_PM */ | 140 | #endif /* CONFIG_PM */ |
141 | 141 | ||
@@ -415,6 +415,8 @@ struct azx_dev { | |||
415 | unsigned int opened :1; | 415 | unsigned int opened :1; |
416 | unsigned int running :1; | 416 | unsigned int running :1; |
417 | unsigned int irq_pending :1; | 417 | unsigned int irq_pending :1; |
418 | unsigned int prepared:1; | ||
419 | unsigned int locked:1; | ||
418 | /* | 420 | /* |
419 | * For VIA: | 421 | * For VIA: |
420 | * A flag to ensure DMA position is 0 | 422 | * A flag to ensure DMA position is 0 |
@@ -426,8 +428,25 @@ struct azx_dev { | |||
426 | 428 | ||
427 | struct timecounter azx_tc; | 429 | struct timecounter azx_tc; |
428 | struct cyclecounter azx_cc; | 430 | struct cyclecounter azx_cc; |
431 | |||
432 | #ifdef CONFIG_SND_HDA_DSP_LOADER | ||
433 | struct mutex dsp_mutex; | ||
434 | #endif | ||
429 | }; | 435 | }; |
430 | 436 | ||
437 | /* DSP lock helpers */ | ||
438 | #ifdef CONFIG_SND_HDA_DSP_LOADER | ||
439 | #define dsp_lock_init(dev) mutex_init(&(dev)->dsp_mutex) | ||
440 | #define dsp_lock(dev) mutex_lock(&(dev)->dsp_mutex) | ||
441 | #define dsp_unlock(dev) mutex_unlock(&(dev)->dsp_mutex) | ||
442 | #define dsp_is_locked(dev) ((dev)->locked) | ||
443 | #else | ||
444 | #define dsp_lock_init(dev) do {} while (0) | ||
445 | #define dsp_lock(dev) do {} while (0) | ||
446 | #define dsp_unlock(dev) do {} while (0) | ||
447 | #define dsp_is_locked(dev) 0 | ||
448 | #endif | ||
449 | |||
431 | /* CORB/RIRB */ | 450 | /* CORB/RIRB */ |
432 | struct azx_rb { | 451 | struct azx_rb { |
433 | u32 *buf; /* CORB/RIRB buffer | 452 | u32 *buf; /* CORB/RIRB buffer |
@@ -527,6 +546,10 @@ struct azx { | |||
527 | 546 | ||
528 | /* card list (for power_save trigger) */ | 547 | /* card list (for power_save trigger) */ |
529 | struct list_head list; | 548 | struct list_head list; |
549 | |||
550 | #ifdef CONFIG_SND_HDA_DSP_LOADER | ||
551 | struct azx_dev saved_azx_dev; | ||
552 | #endif | ||
530 | }; | 553 | }; |
531 | 554 | ||
532 | #define CREATE_TRACE_POINTS | 555 | #define CREATE_TRACE_POINTS |
@@ -1793,15 +1816,25 @@ azx_assign_device(struct azx *chip, struct snd_pcm_substream *substream) | |||
1793 | dev = chip->capture_index_offset; | 1816 | dev = chip->capture_index_offset; |
1794 | nums = chip->capture_streams; | 1817 | nums = chip->capture_streams; |
1795 | } | 1818 | } |
1796 | for (i = 0; i < nums; i++, dev++) | 1819 | for (i = 0; i < nums; i++, dev++) { |
1797 | if (!chip->azx_dev[dev].opened) { | 1820 | struct azx_dev *azx_dev = &chip->azx_dev[dev]; |
1798 | res = &chip->azx_dev[dev]; | 1821 | dsp_lock(azx_dev); |
1799 | if (res->assigned_key == key) | 1822 | if (!azx_dev->opened && !dsp_is_locked(azx_dev)) { |
1800 | break; | 1823 | res = azx_dev; |
1824 | if (res->assigned_key == key) { | ||
1825 | res->opened = 1; | ||
1826 | res->assigned_key = key; | ||
1827 | dsp_unlock(azx_dev); | ||
1828 | return azx_dev; | ||
1829 | } | ||
1801 | } | 1830 | } |
1831 | dsp_unlock(azx_dev); | ||
1832 | } | ||
1802 | if (res) { | 1833 | if (res) { |
1834 | dsp_lock(res); | ||
1803 | res->opened = 1; | 1835 | res->opened = 1; |
1804 | res->assigned_key = key; | 1836 | res->assigned_key = key; |
1837 | dsp_unlock(res); | ||
1805 | } | 1838 | } |
1806 | return res; | 1839 | return res; |
1807 | } | 1840 | } |
@@ -2009,6 +2042,12 @@ static int azx_pcm_hw_params(struct snd_pcm_substream *substream, | |||
2009 | struct azx_dev *azx_dev = get_azx_dev(substream); | 2042 | struct azx_dev *azx_dev = get_azx_dev(substream); |
2010 | int ret; | 2043 | int ret; |
2011 | 2044 | ||
2045 | dsp_lock(azx_dev); | ||
2046 | if (dsp_is_locked(azx_dev)) { | ||
2047 | ret = -EBUSY; | ||
2048 | goto unlock; | ||
2049 | } | ||
2050 | |||
2012 | mark_runtime_wc(chip, azx_dev, substream, false); | 2051 | mark_runtime_wc(chip, azx_dev, substream, false); |
2013 | azx_dev->bufsize = 0; | 2052 | azx_dev->bufsize = 0; |
2014 | azx_dev->period_bytes = 0; | 2053 | azx_dev->period_bytes = 0; |
@@ -2016,8 +2055,10 @@ static int azx_pcm_hw_params(struct snd_pcm_substream *substream, | |||
2016 | ret = snd_pcm_lib_malloc_pages(substream, | 2055 | ret = snd_pcm_lib_malloc_pages(substream, |
2017 | params_buffer_bytes(hw_params)); | 2056 | params_buffer_bytes(hw_params)); |
2018 | if (ret < 0) | 2057 | if (ret < 0) |
2019 | return ret; | 2058 | goto unlock; |
2020 | mark_runtime_wc(chip, azx_dev, substream, true); | 2059 | mark_runtime_wc(chip, azx_dev, substream, true); |
2060 | unlock: | ||
2061 | dsp_unlock(azx_dev); | ||
2021 | return ret; | 2062 | return ret; |
2022 | } | 2063 | } |
2023 | 2064 | ||
@@ -2029,16 +2070,21 @@ static int azx_pcm_hw_free(struct snd_pcm_substream *substream) | |||
2029 | struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream]; | 2070 | struct hda_pcm_stream *hinfo = apcm->hinfo[substream->stream]; |
2030 | 2071 | ||
2031 | /* reset BDL address */ | 2072 | /* reset BDL address */ |
2032 | azx_sd_writel(azx_dev, SD_BDLPL, 0); | 2073 | dsp_lock(azx_dev); |
2033 | azx_sd_writel(azx_dev, SD_BDLPU, 0); | 2074 | if (!dsp_is_locked(azx_dev)) { |
2034 | azx_sd_writel(azx_dev, SD_CTL, 0); | 2075 | azx_sd_writel(azx_dev, SD_BDLPL, 0); |
2035 | azx_dev->bufsize = 0; | 2076 | azx_sd_writel(azx_dev, SD_BDLPU, 0); |
2036 | azx_dev->period_bytes = 0; | 2077 | azx_sd_writel(azx_dev, SD_CTL, 0); |
2037 | azx_dev->format_val = 0; | 2078 | azx_dev->bufsize = 0; |
2079 | azx_dev->period_bytes = 0; | ||
2080 | azx_dev->format_val = 0; | ||
2081 | } | ||
2038 | 2082 | ||
2039 | snd_hda_codec_cleanup(apcm->codec, hinfo, substream); | 2083 | snd_hda_codec_cleanup(apcm->codec, hinfo, substream); |
2040 | 2084 | ||
2041 | mark_runtime_wc(chip, azx_dev, substream, false); | 2085 | mark_runtime_wc(chip, azx_dev, substream, false); |
2086 | azx_dev->prepared = 0; | ||
2087 | dsp_unlock(azx_dev); | ||
2042 | return snd_pcm_lib_free_pages(substream); | 2088 | return snd_pcm_lib_free_pages(substream); |
2043 | } | 2089 | } |
2044 | 2090 | ||
@@ -2055,6 +2101,12 @@ static int azx_pcm_prepare(struct snd_pcm_substream *substream) | |||
2055 | snd_hda_spdif_out_of_nid(apcm->codec, hinfo->nid); | 2101 | snd_hda_spdif_out_of_nid(apcm->codec, hinfo->nid); |
2056 | unsigned short ctls = spdif ? spdif->ctls : 0; | 2102 | unsigned short ctls = spdif ? spdif->ctls : 0; |
2057 | 2103 | ||
2104 | dsp_lock(azx_dev); | ||
2105 | if (dsp_is_locked(azx_dev)) { | ||
2106 | err = -EBUSY; | ||
2107 | goto unlock; | ||
2108 | } | ||
2109 | |||
2058 | azx_stream_reset(chip, azx_dev); | 2110 | azx_stream_reset(chip, azx_dev); |
2059 | format_val = snd_hda_calc_stream_format(runtime->rate, | 2111 | format_val = snd_hda_calc_stream_format(runtime->rate, |
2060 | runtime->channels, | 2112 | runtime->channels, |
@@ -2065,7 +2117,8 @@ static int azx_pcm_prepare(struct snd_pcm_substream *substream) | |||
2065 | snd_printk(KERN_ERR SFX | 2117 | snd_printk(KERN_ERR SFX |
2066 | "%s: invalid format_val, rate=%d, ch=%d, format=%d\n", | 2118 | "%s: invalid format_val, rate=%d, ch=%d, format=%d\n", |
2067 | pci_name(chip->pci), runtime->rate, runtime->channels, runtime->format); | 2119 | pci_name(chip->pci), runtime->rate, runtime->channels, runtime->format); |
2068 | return -EINVAL; | 2120 | err = -EINVAL; |
2121 | goto unlock; | ||
2069 | } | 2122 | } |
2070 | 2123 | ||
2071 | bufsize = snd_pcm_lib_buffer_bytes(substream); | 2124 | bufsize = snd_pcm_lib_buffer_bytes(substream); |
@@ -2084,7 +2137,7 @@ static int azx_pcm_prepare(struct snd_pcm_substream *substream) | |||
2084 | azx_dev->no_period_wakeup = runtime->no_period_wakeup; | 2137 | azx_dev->no_period_wakeup = runtime->no_period_wakeup; |
2085 | err = azx_setup_periods(chip, substream, azx_dev); | 2138 | err = azx_setup_periods(chip, substream, azx_dev); |
2086 | if (err < 0) | 2139 | if (err < 0) |
2087 | return err; | 2140 | goto unlock; |
2088 | } | 2141 | } |
2089 | 2142 | ||
2090 | /* wallclk has 24Mhz clock source */ | 2143 | /* wallclk has 24Mhz clock source */ |
@@ -2101,8 +2154,14 @@ static int azx_pcm_prepare(struct snd_pcm_substream *substream) | |||
2101 | if ((chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) && | 2154 | if ((chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) && |
2102 | stream_tag > chip->capture_streams) | 2155 | stream_tag > chip->capture_streams) |
2103 | stream_tag -= chip->capture_streams; | 2156 | stream_tag -= chip->capture_streams; |
2104 | return snd_hda_codec_prepare(apcm->codec, hinfo, stream_tag, | 2157 | err = snd_hda_codec_prepare(apcm->codec, hinfo, stream_tag, |
2105 | azx_dev->format_val, substream); | 2158 | azx_dev->format_val, substream); |
2159 | |||
2160 | unlock: | ||
2161 | if (!err) | ||
2162 | azx_dev->prepared = 1; | ||
2163 | dsp_unlock(azx_dev); | ||
2164 | return err; | ||
2106 | } | 2165 | } |
2107 | 2166 | ||
2108 | static int azx_pcm_trigger(struct snd_pcm_substream *substream, int cmd) | 2167 | static int azx_pcm_trigger(struct snd_pcm_substream *substream, int cmd) |
@@ -2117,6 +2176,9 @@ static int azx_pcm_trigger(struct snd_pcm_substream *substream, int cmd) | |||
2117 | azx_dev = get_azx_dev(substream); | 2176 | azx_dev = get_azx_dev(substream); |
2118 | trace_azx_pcm_trigger(chip, azx_dev, cmd); | 2177 | trace_azx_pcm_trigger(chip, azx_dev, cmd); |
2119 | 2178 | ||
2179 | if (dsp_is_locked(azx_dev) || !azx_dev->prepared) | ||
2180 | return -EPIPE; | ||
2181 | |||
2120 | switch (cmd) { | 2182 | switch (cmd) { |
2121 | case SNDRV_PCM_TRIGGER_START: | 2183 | case SNDRV_PCM_TRIGGER_START: |
2122 | rstart = 1; | 2184 | rstart = 1; |
@@ -2621,17 +2683,27 @@ static int azx_load_dsp_prepare(struct hda_bus *bus, unsigned int format, | |||
2621 | struct azx_dev *azx_dev; | 2683 | struct azx_dev *azx_dev; |
2622 | int err; | 2684 | int err; |
2623 | 2685 | ||
2624 | if (snd_hda_lock_devices(bus)) | 2686 | azx_dev = azx_get_dsp_loader_dev(chip); |
2625 | return -EBUSY; | 2687 | |
2688 | dsp_lock(azx_dev); | ||
2689 | spin_lock_irq(&chip->reg_lock); | ||
2690 | if (azx_dev->running || azx_dev->locked) { | ||
2691 | spin_unlock_irq(&chip->reg_lock); | ||
2692 | err = -EBUSY; | ||
2693 | goto unlock; | ||
2694 | } | ||
2695 | azx_dev->prepared = 0; | ||
2696 | chip->saved_azx_dev = *azx_dev; | ||
2697 | azx_dev->locked = 1; | ||
2698 | spin_unlock_irq(&chip->reg_lock); | ||
2626 | 2699 | ||
2627 | err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV_SG, | 2700 | err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV_SG, |
2628 | snd_dma_pci_data(chip->pci), | 2701 | snd_dma_pci_data(chip->pci), |
2629 | byte_size, bufp); | 2702 | byte_size, bufp); |
2630 | if (err < 0) | 2703 | if (err < 0) |
2631 | goto unlock; | 2704 | goto err_alloc; |
2632 | 2705 | ||
2633 | mark_pages_wc(chip, bufp, true); | 2706 | mark_pages_wc(chip, bufp, true); |
2634 | azx_dev = azx_get_dsp_loader_dev(chip); | ||
2635 | azx_dev->bufsize = byte_size; | 2707 | azx_dev->bufsize = byte_size; |
2636 | azx_dev->period_bytes = byte_size; | 2708 | azx_dev->period_bytes = byte_size; |
2637 | azx_dev->format_val = format; | 2709 | azx_dev->format_val = format; |
@@ -2649,13 +2721,20 @@ static int azx_load_dsp_prepare(struct hda_bus *bus, unsigned int format, | |||
2649 | goto error; | 2721 | goto error; |
2650 | 2722 | ||
2651 | azx_setup_controller(chip, azx_dev); | 2723 | azx_setup_controller(chip, azx_dev); |
2724 | dsp_unlock(azx_dev); | ||
2652 | return azx_dev->stream_tag; | 2725 | return azx_dev->stream_tag; |
2653 | 2726 | ||
2654 | error: | 2727 | error: |
2655 | mark_pages_wc(chip, bufp, false); | 2728 | mark_pages_wc(chip, bufp, false); |
2656 | snd_dma_free_pages(bufp); | 2729 | snd_dma_free_pages(bufp); |
2657 | unlock: | 2730 | err_alloc: |
2658 | snd_hda_unlock_devices(bus); | 2731 | spin_lock_irq(&chip->reg_lock); |
2732 | if (azx_dev->opened) | ||
2733 | *azx_dev = chip->saved_azx_dev; | ||
2734 | azx_dev->locked = 0; | ||
2735 | spin_unlock_irq(&chip->reg_lock); | ||
2736 | unlock: | ||
2737 | dsp_unlock(azx_dev); | ||
2659 | return err; | 2738 | return err; |
2660 | } | 2739 | } |
2661 | 2740 | ||
@@ -2677,9 +2756,10 @@ static void azx_load_dsp_cleanup(struct hda_bus *bus, | |||
2677 | struct azx *chip = bus->private_data; | 2756 | struct azx *chip = bus->private_data; |
2678 | struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip); | 2757 | struct azx_dev *azx_dev = azx_get_dsp_loader_dev(chip); |
2679 | 2758 | ||
2680 | if (!dmab->area) | 2759 | if (!dmab->area || !azx_dev->locked) |
2681 | return; | 2760 | return; |
2682 | 2761 | ||
2762 | dsp_lock(azx_dev); | ||
2683 | /* reset BDL address */ | 2763 | /* reset BDL address */ |
2684 | azx_sd_writel(azx_dev, SD_BDLPL, 0); | 2764 | azx_sd_writel(azx_dev, SD_BDLPL, 0); |
2685 | azx_sd_writel(azx_dev, SD_BDLPU, 0); | 2765 | azx_sd_writel(azx_dev, SD_BDLPU, 0); |
@@ -2692,7 +2772,12 @@ static void azx_load_dsp_cleanup(struct hda_bus *bus, | |||
2692 | snd_dma_free_pages(dmab); | 2772 | snd_dma_free_pages(dmab); |
2693 | dmab->area = NULL; | 2773 | dmab->area = NULL; |
2694 | 2774 | ||
2695 | snd_hda_unlock_devices(bus); | 2775 | spin_lock_irq(&chip->reg_lock); |
2776 | if (azx_dev->opened) | ||
2777 | *azx_dev = chip->saved_azx_dev; | ||
2778 | azx_dev->locked = 0; | ||
2779 | spin_unlock_irq(&chip->reg_lock); | ||
2780 | dsp_unlock(azx_dev); | ||
2696 | } | 2781 | } |
2697 | #endif /* CONFIG_SND_HDA_DSP_LOADER */ | 2782 | #endif /* CONFIG_SND_HDA_DSP_LOADER */ |
2698 | 2783 | ||
@@ -2846,8 +2931,6 @@ static int azx_runtime_idle(struct device *dev) | |||
2846 | struct snd_card *card = dev_get_drvdata(dev); | 2931 | struct snd_card *card = dev_get_drvdata(dev); |
2847 | struct azx *chip = card->private_data; | 2932 | struct azx *chip = card->private_data; |
2848 | 2933 | ||
2849 | if (power_save_controller > 0) | ||
2850 | return 0; | ||
2851 | if (!power_save_controller || | 2934 | if (!power_save_controller || |
2852 | !(chip->driver_caps & AZX_DCAPS_PM_RUNTIME)) | 2935 | !(chip->driver_caps & AZX_DCAPS_PM_RUNTIME)) |
2853 | return -EBUSY; | 2936 | return -EBUSY; |
@@ -3481,6 +3564,7 @@ static int azx_first_init(struct azx *chip) | |||
3481 | } | 3564 | } |
3482 | 3565 | ||
3483 | for (i = 0; i < chip->num_streams; i++) { | 3566 | for (i = 0; i < chip->num_streams; i++) { |
3567 | dsp_lock_init(&chip->azx_dev[i]); | ||
3484 | /* allocate memory for the BDL for each stream */ | 3568 | /* allocate memory for the BDL for each stream */ |
3485 | err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, | 3569 | err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, |
3486 | snd_dma_pci_data(chip->pci), | 3570 | snd_dma_pci_data(chip->pci), |
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c index 60d08f669f0c..0d9c58f13560 100644 --- a/sound/pci/hda/patch_cirrus.c +++ b/sound/pci/hda/patch_cirrus.c | |||
@@ -168,10 +168,10 @@ static void cs_automute(struct hda_codec *codec) | |||
168 | snd_hda_gen_update_outputs(codec); | 168 | snd_hda_gen_update_outputs(codec); |
169 | 169 | ||
170 | if (spec->gpio_eapd_hp) { | 170 | if (spec->gpio_eapd_hp) { |
171 | unsigned int gpio = spec->gen.hp_jack_present ? | 171 | spec->gpio_data = spec->gen.hp_jack_present ? |
172 | spec->gpio_eapd_hp : spec->gpio_eapd_speaker; | 172 | spec->gpio_eapd_hp : spec->gpio_eapd_speaker; |
173 | snd_hda_codec_write(codec, 0x01, 0, | 173 | snd_hda_codec_write(codec, 0x01, 0, |
174 | AC_VERB_SET_GPIO_DATA, gpio); | 174 | AC_VERB_SET_GPIO_DATA, spec->gpio_data); |
175 | } | 175 | } |
176 | } | 176 | } |
177 | 177 | ||
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index 941bf6c766ec..2a89d1eefeb6 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c | |||
@@ -1142,7 +1142,7 @@ static int patch_cxt5045(struct hda_codec *codec) | |||
1142 | } | 1142 | } |
1143 | 1143 | ||
1144 | if (spec->beep_amp) | 1144 | if (spec->beep_amp) |
1145 | snd_hda_attach_beep_device(codec, spec->beep_amp); | 1145 | snd_hda_attach_beep_device(codec, get_amp_nid_(spec->beep_amp)); |
1146 | 1146 | ||
1147 | return 0; | 1147 | return 0; |
1148 | } | 1148 | } |
@@ -1921,7 +1921,7 @@ static int patch_cxt5051(struct hda_codec *codec) | |||
1921 | } | 1921 | } |
1922 | 1922 | ||
1923 | if (spec->beep_amp) | 1923 | if (spec->beep_amp) |
1924 | snd_hda_attach_beep_device(codec, spec->beep_amp); | 1924 | snd_hda_attach_beep_device(codec, get_amp_nid_(spec->beep_amp)); |
1925 | 1925 | ||
1926 | return 0; | 1926 | return 0; |
1927 | } | 1927 | } |
@@ -3099,7 +3099,7 @@ static int patch_cxt5066(struct hda_codec *codec) | |||
3099 | } | 3099 | } |
3100 | 3100 | ||
3101 | if (spec->beep_amp) | 3101 | if (spec->beep_amp) |
3102 | snd_hda_attach_beep_device(codec, spec->beep_amp); | 3102 | snd_hda_attach_beep_device(codec, get_amp_nid_(spec->beep_amp)); |
3103 | 3103 | ||
3104 | return 0; | 3104 | return 0; |
3105 | } | 3105 | } |
@@ -3191,11 +3191,17 @@ static int cx_auto_build_controls(struct hda_codec *codec) | |||
3191 | return 0; | 3191 | return 0; |
3192 | } | 3192 | } |
3193 | 3193 | ||
3194 | static void cx_auto_free(struct hda_codec *codec) | ||
3195 | { | ||
3196 | snd_hda_detach_beep_device(codec); | ||
3197 | snd_hda_gen_free(codec); | ||
3198 | } | ||
3199 | |||
3194 | static const struct hda_codec_ops cx_auto_patch_ops = { | 3200 | static const struct hda_codec_ops cx_auto_patch_ops = { |
3195 | .build_controls = cx_auto_build_controls, | 3201 | .build_controls = cx_auto_build_controls, |
3196 | .build_pcms = snd_hda_gen_build_pcms, | 3202 | .build_pcms = snd_hda_gen_build_pcms, |
3197 | .init = snd_hda_gen_init, | 3203 | .init = snd_hda_gen_init, |
3198 | .free = snd_hda_gen_free, | 3204 | .free = cx_auto_free, |
3199 | .unsol_event = snd_hda_jack_unsol_event, | 3205 | .unsol_event = snd_hda_jack_unsol_event, |
3200 | #ifdef CONFIG_PM | 3206 | #ifdef CONFIG_PM |
3201 | .check_power_status = snd_hda_gen_check_power_status, | 3207 | .check_power_status = snd_hda_gen_check_power_status, |
@@ -3391,7 +3397,7 @@ static int patch_conexant_auto(struct hda_codec *codec) | |||
3391 | 3397 | ||
3392 | codec->patch_ops = cx_auto_patch_ops; | 3398 | codec->patch_ops = cx_auto_patch_ops; |
3393 | if (spec->beep_amp) | 3399 | if (spec->beep_amp) |
3394 | snd_hda_attach_beep_device(codec, spec->beep_amp); | 3400 | snd_hda_attach_beep_device(codec, get_amp_nid_(spec->beep_amp)); |
3395 | 3401 | ||
3396 | /* Some laptops with Conexant chips show stalls in S3 resume, | 3402 | /* Some laptops with Conexant chips show stalls in S3 resume, |
3397 | * which falls into the single-cmd mode. | 3403 | * which falls into the single-cmd mode. |
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 78e1827d0a95..de8ac5c07fd0 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c | |||
@@ -1196,7 +1196,7 @@ static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll) | |||
1196 | 1196 | ||
1197 | _snd_printd(SND_PR_VERBOSE, | 1197 | _snd_printd(SND_PR_VERBOSE, |
1198 | "HDMI status: Codec=%d Pin=%d Presence_Detect=%d ELD_Valid=%d\n", | 1198 | "HDMI status: Codec=%d Pin=%d Presence_Detect=%d ELD_Valid=%d\n", |
1199 | codec->addr, pin_nid, eld->monitor_present, eld->eld_valid); | 1199 | codec->addr, pin_nid, pin_eld->monitor_present, eld->eld_valid); |
1200 | 1200 | ||
1201 | if (eld->eld_valid) { | 1201 | if (eld->eld_valid) { |
1202 | if (snd_hdmi_get_eld(codec, pin_nid, eld->eld_buffer, | 1202 | if (snd_hdmi_get_eld(codec, pin_nid, eld->eld_buffer, |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 563c24df4d6f..f15c36bde540 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
@@ -3440,7 +3440,8 @@ static int alc662_parse_auto_config(struct hda_codec *codec) | |||
3440 | const hda_nid_t *ssids; | 3440 | const hda_nid_t *ssids; |
3441 | 3441 | ||
3442 | if (codec->vendor_id == 0x10ec0272 || codec->vendor_id == 0x10ec0663 || | 3442 | if (codec->vendor_id == 0x10ec0272 || codec->vendor_id == 0x10ec0663 || |
3443 | codec->vendor_id == 0x10ec0665 || codec->vendor_id == 0x10ec0670) | 3443 | codec->vendor_id == 0x10ec0665 || codec->vendor_id == 0x10ec0670 || |
3444 | codec->vendor_id == 0x10ec0671) | ||
3444 | ssids = alc663_ssids; | 3445 | ssids = alc663_ssids; |
3445 | else | 3446 | else |
3446 | ssids = alc662_ssids; | 3447 | ssids = alc662_ssids; |
@@ -3894,6 +3895,7 @@ static const struct hda_codec_preset snd_hda_preset_realtek[] = { | |||
3894 | { .id = 0x10ec0665, .name = "ALC665", .patch = patch_alc662 }, | 3895 | { .id = 0x10ec0665, .name = "ALC665", .patch = patch_alc662 }, |
3895 | { .id = 0x10ec0668, .name = "ALC668", .patch = patch_alc662 }, | 3896 | { .id = 0x10ec0668, .name = "ALC668", .patch = patch_alc662 }, |
3896 | { .id = 0x10ec0670, .name = "ALC670", .patch = patch_alc662 }, | 3897 | { .id = 0x10ec0670, .name = "ALC670", .patch = patch_alc662 }, |
3898 | { .id = 0x10ec0671, .name = "ALC671", .patch = patch_alc662 }, | ||
3897 | { .id = 0x10ec0680, .name = "ALC680", .patch = patch_alc680 }, | 3899 | { .id = 0x10ec0680, .name = "ALC680", .patch = patch_alc680 }, |
3898 | { .id = 0x10ec0880, .name = "ALC880", .patch = patch_alc880 }, | 3900 | { .id = 0x10ec0880, .name = "ALC880", .patch = patch_alc880 }, |
3899 | { .id = 0x10ec0882, .name = "ALC882", .patch = patch_alc882 }, | 3901 | { .id = 0x10ec0882, .name = "ALC882", .patch = patch_alc882 }, |
diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c index fc176044994d..fc176044994d 100755..100644 --- a/sound/soc/codecs/max98090.c +++ b/sound/soc/codecs/max98090.c | |||
diff --git a/sound/soc/codecs/max98090.h b/sound/soc/codecs/max98090.h index 7e103f249053..7e103f249053 100755..100644 --- a/sound/soc/codecs/max98090.h +++ b/sound/soc/codecs/max98090.h | |||
diff --git a/sound/soc/codecs/si476x.c b/sound/soc/codecs/si476x.c index f2d61a187830..566ea3256e2d 100644 --- a/sound/soc/codecs/si476x.c +++ b/sound/soc/codecs/si476x.c | |||
@@ -159,6 +159,7 @@ static int si476x_codec_hw_params(struct snd_pcm_substream *substream, | |||
159 | switch (params_format(params)) { | 159 | switch (params_format(params)) { |
160 | case SNDRV_PCM_FORMAT_S8: | 160 | case SNDRV_PCM_FORMAT_S8: |
161 | width = SI476X_PCM_FORMAT_S8; | 161 | width = SI476X_PCM_FORMAT_S8; |
162 | break; | ||
162 | case SNDRV_PCM_FORMAT_S16_LE: | 163 | case SNDRV_PCM_FORMAT_S16_LE: |
163 | width = SI476X_PCM_FORMAT_S16_LE; | 164 | width = SI476X_PCM_FORMAT_S16_LE; |
164 | break; | 165 | break; |
diff --git a/sound/soc/codecs/wm5102.c b/sound/soc/codecs/wm5102.c index b82bbf584146..34d0201d6a78 100644 --- a/sound/soc/codecs/wm5102.c +++ b/sound/soc/codecs/wm5102.c | |||
@@ -584,7 +584,7 @@ static int wm5102_sysclk_ev(struct snd_soc_dapm_widget *w, | |||
584 | struct snd_kcontrol *kcontrol, int event) | 584 | struct snd_kcontrol *kcontrol, int event) |
585 | { | 585 | { |
586 | struct snd_soc_codec *codec = w->codec; | 586 | struct snd_soc_codec *codec = w->codec; |
587 | struct arizona *arizona = dev_get_drvdata(codec->dev); | 587 | struct arizona *arizona = dev_get_drvdata(codec->dev->parent); |
588 | struct regmap *regmap = codec->control_data; | 588 | struct regmap *regmap = codec->control_data; |
589 | const struct reg_default *patch = NULL; | 589 | const struct reg_default *patch = NULL; |
590 | int i, patch_size; | 590 | int i, patch_size; |
diff --git a/sound/soc/codecs/wm8903.c b/sound/soc/codecs/wm8903.c index 134e41c870b9..f8a31ad0b203 100644 --- a/sound/soc/codecs/wm8903.c +++ b/sound/soc/codecs/wm8903.c | |||
@@ -1083,6 +1083,8 @@ static const struct snd_soc_dapm_route wm8903_intercon[] = { | |||
1083 | { "ROP", NULL, "Right Speaker PGA" }, | 1083 | { "ROP", NULL, "Right Speaker PGA" }, |
1084 | { "RON", NULL, "Right Speaker PGA" }, | 1084 | { "RON", NULL, "Right Speaker PGA" }, |
1085 | 1085 | ||
1086 | { "Charge Pump", NULL, "CLK_DSP" }, | ||
1087 | |||
1086 | { "Left Headphone Output PGA", NULL, "Charge Pump" }, | 1088 | { "Left Headphone Output PGA", NULL, "Charge Pump" }, |
1087 | { "Right Headphone Output PGA", NULL, "Charge Pump" }, | 1089 | { "Right Headphone Output PGA", NULL, "Charge Pump" }, |
1088 | { "Left Line Output PGA", NULL, "Charge Pump" }, | 1090 | { "Left Line Output PGA", NULL, "Charge Pump" }, |
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c index f3f7e75f8628..9af1bddc4c62 100644 --- a/sound/soc/codecs/wm_adsp.c +++ b/sound/soc/codecs/wm_adsp.c | |||
@@ -828,7 +828,8 @@ static int wm_adsp_load_coeff(struct wm_adsp *dsp) | |||
828 | &buf_list); | 828 | &buf_list); |
829 | if (!buf) { | 829 | if (!buf) { |
830 | adsp_err(dsp, "Out of memory\n"); | 830 | adsp_err(dsp, "Out of memory\n"); |
831 | return -ENOMEM; | 831 | ret = -ENOMEM; |
832 | goto out_fw; | ||
832 | } | 833 | } |
833 | 834 | ||
834 | adsp_dbg(dsp, "%s.%d: Writing %d bytes at %x\n", | 835 | adsp_dbg(dsp, "%s.%d: Writing %d bytes at %x\n", |
@@ -865,7 +866,7 @@ out_fw: | |||
865 | wm_adsp_buf_free(&buf_list); | 866 | wm_adsp_buf_free(&buf_list); |
866 | out: | 867 | out: |
867 | kfree(file); | 868 | kfree(file); |
868 | return 0; | 869 | return ret; |
869 | } | 870 | } |
870 | 871 | ||
871 | int wm_adsp1_init(struct wm_adsp *adsp) | 872 | int wm_adsp1_init(struct wm_adsp *adsp) |
diff --git a/sound/soc/fsl/imx-ssi.c b/sound/soc/fsl/imx-ssi.c index 55464a5b0706..810c7eeb7b03 100644 --- a/sound/soc/fsl/imx-ssi.c +++ b/sound/soc/fsl/imx-ssi.c | |||
@@ -496,6 +496,8 @@ static void imx_ssi_ac97_reset(struct snd_ac97 *ac97) | |||
496 | 496 | ||
497 | if (imx_ssi->ac97_reset) | 497 | if (imx_ssi->ac97_reset) |
498 | imx_ssi->ac97_reset(ac97); | 498 | imx_ssi->ac97_reset(ac97); |
499 | /* First read sometimes fails, do a dummy read */ | ||
500 | imx_ssi_ac97_read(ac97, 0); | ||
499 | } | 501 | } |
500 | 502 | ||
501 | static void imx_ssi_ac97_warm_reset(struct snd_ac97 *ac97) | 503 | static void imx_ssi_ac97_warm_reset(struct snd_ac97 *ac97) |
@@ -504,6 +506,9 @@ static void imx_ssi_ac97_warm_reset(struct snd_ac97 *ac97) | |||
504 | 506 | ||
505 | if (imx_ssi->ac97_warm_reset) | 507 | if (imx_ssi->ac97_warm_reset) |
506 | imx_ssi->ac97_warm_reset(ac97); | 508 | imx_ssi->ac97_warm_reset(ac97); |
509 | |||
510 | /* First read sometimes fails, do a dummy read */ | ||
511 | imx_ssi_ac97_read(ac97, 0); | ||
507 | } | 512 | } |
508 | 513 | ||
509 | struct snd_ac97_bus_ops soc_ac97_ops = { | 514 | struct snd_ac97_bus_ops soc_ac97_ops = { |
diff --git a/sound/soc/fsl/pcm030-audio-fabric.c b/sound/soc/fsl/pcm030-audio-fabric.c index 8e52c1485df3..eb4373840bb6 100644 --- a/sound/soc/fsl/pcm030-audio-fabric.c +++ b/sound/soc/fsl/pcm030-audio-fabric.c | |||
@@ -51,7 +51,7 @@ static struct snd_soc_card pcm030_card = { | |||
51 | .num_links = ARRAY_SIZE(pcm030_fabric_dai), | 51 | .num_links = ARRAY_SIZE(pcm030_fabric_dai), |
52 | }; | 52 | }; |
53 | 53 | ||
54 | static int __init pcm030_fabric_probe(struct platform_device *op) | 54 | static int pcm030_fabric_probe(struct platform_device *op) |
55 | { | 55 | { |
56 | struct device_node *np = op->dev.of_node; | 56 | struct device_node *np = op->dev.of_node; |
57 | struct device_node *platform_np; | 57 | struct device_node *platform_np; |
diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c index d7231e336a7c..6bbeb0bf1a73 100644 --- a/sound/soc/samsung/i2s.c +++ b/sound/soc/samsung/i2s.c | |||
@@ -972,6 +972,7 @@ static const struct snd_soc_dai_ops samsung_i2s_dai_ops = { | |||
972 | static struct i2s_dai *i2s_alloc_dai(struct platform_device *pdev, bool sec) | 972 | static struct i2s_dai *i2s_alloc_dai(struct platform_device *pdev, bool sec) |
973 | { | 973 | { |
974 | struct i2s_dai *i2s; | 974 | struct i2s_dai *i2s; |
975 | int ret; | ||
975 | 976 | ||
976 | i2s = devm_kzalloc(&pdev->dev, sizeof(struct i2s_dai), GFP_KERNEL); | 977 | i2s = devm_kzalloc(&pdev->dev, sizeof(struct i2s_dai), GFP_KERNEL); |
977 | if (i2s == NULL) | 978 | if (i2s == NULL) |
@@ -996,15 +997,17 @@ static struct i2s_dai *i2s_alloc_dai(struct platform_device *pdev, bool sec) | |||
996 | i2s->i2s_dai_drv.capture.channels_max = 2; | 997 | i2s->i2s_dai_drv.capture.channels_max = 2; |
997 | i2s->i2s_dai_drv.capture.rates = SAMSUNG_I2S_RATES; | 998 | i2s->i2s_dai_drv.capture.rates = SAMSUNG_I2S_RATES; |
998 | i2s->i2s_dai_drv.capture.formats = SAMSUNG_I2S_FMTS; | 999 | i2s->i2s_dai_drv.capture.formats = SAMSUNG_I2S_FMTS; |
1000 | dev_set_drvdata(&i2s->pdev->dev, i2s); | ||
999 | } else { /* Create a new platform_device for Secondary */ | 1001 | } else { /* Create a new platform_device for Secondary */ |
1000 | i2s->pdev = platform_device_register_resndata(NULL, | 1002 | i2s->pdev = platform_device_alloc("samsung-i2s-sec", -1); |
1001 | "samsung-i2s-sec", -1, NULL, 0, NULL, 0); | ||
1002 | if (IS_ERR(i2s->pdev)) | 1003 | if (IS_ERR(i2s->pdev)) |
1003 | return NULL; | 1004 | return NULL; |
1004 | } | ||
1005 | 1005 | ||
1006 | /* Pre-assign snd_soc_dai_set_drvdata */ | 1006 | platform_set_drvdata(i2s->pdev, i2s); |
1007 | dev_set_drvdata(&i2s->pdev->dev, i2s); | 1007 | ret = platform_device_add(i2s->pdev); |
1008 | if (ret < 0) | ||
1009 | return NULL; | ||
1010 | } | ||
1008 | 1011 | ||
1009 | return i2s; | 1012 | return i2s; |
1010 | } | 1013 | } |
@@ -1107,6 +1110,10 @@ static int samsung_i2s_probe(struct platform_device *pdev) | |||
1107 | 1110 | ||
1108 | if (samsung_dai_type == TYPE_SEC) { | 1111 | if (samsung_dai_type == TYPE_SEC) { |
1109 | sec_dai = dev_get_drvdata(&pdev->dev); | 1112 | sec_dai = dev_get_drvdata(&pdev->dev); |
1113 | if (!sec_dai) { | ||
1114 | dev_err(&pdev->dev, "Unable to get drvdata\n"); | ||
1115 | return -EFAULT; | ||
1116 | } | ||
1110 | snd_soc_register_dai(&sec_dai->pdev->dev, | 1117 | snd_soc_register_dai(&sec_dai->pdev->dev, |
1111 | &sec_dai->i2s_dai_drv); | 1118 | &sec_dai->i2s_dai_drv); |
1112 | asoc_dma_platform_register(&pdev->dev); | 1119 | asoc_dma_platform_register(&pdev->dev); |
diff --git a/sound/soc/sh/dma-sh7760.c b/sound/soc/sh/dma-sh7760.c index 19eff8fc4fdd..1a8b03e4b41b 100644 --- a/sound/soc/sh/dma-sh7760.c +++ b/sound/soc/sh/dma-sh7760.c | |||
@@ -342,8 +342,8 @@ static int camelot_pcm_new(struct snd_soc_pcm_runtime *rtd) | |||
342 | return 0; | 342 | return 0; |
343 | } | 343 | } |
344 | 344 | ||
345 | static struct snd_soc_platform sh7760_soc_platform = { | 345 | static struct snd_soc_platform_driver sh7760_soc_platform = { |
346 | .pcm_ops = &camelot_pcm_ops, | 346 | .ops = &camelot_pcm_ops, |
347 | .pcm_new = camelot_pcm_new, | 347 | .pcm_new = camelot_pcm_new, |
348 | .pcm_free = camelot_pcm_free, | 348 | .pcm_free = camelot_pcm_free, |
349 | }; | 349 | }; |
diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c index b5b3db71e253..ed0bfb0ddb96 100644 --- a/sound/soc/soc-compress.c +++ b/sound/soc/soc-compress.c | |||
@@ -211,19 +211,27 @@ static int soc_compr_set_params(struct snd_compr_stream *cstream, | |||
211 | if (platform->driver->compr_ops && platform->driver->compr_ops->set_params) { | 211 | if (platform->driver->compr_ops && platform->driver->compr_ops->set_params) { |
212 | ret = platform->driver->compr_ops->set_params(cstream, params); | 212 | ret = platform->driver->compr_ops->set_params(cstream, params); |
213 | if (ret < 0) | 213 | if (ret < 0) |
214 | goto out; | 214 | goto err; |
215 | } | 215 | } |
216 | 216 | ||
217 | if (rtd->dai_link->compr_ops && rtd->dai_link->compr_ops->set_params) { | 217 | if (rtd->dai_link->compr_ops && rtd->dai_link->compr_ops->set_params) { |
218 | ret = rtd->dai_link->compr_ops->set_params(cstream); | 218 | ret = rtd->dai_link->compr_ops->set_params(cstream); |
219 | if (ret < 0) | 219 | if (ret < 0) |
220 | goto out; | 220 | goto err; |
221 | } | 221 | } |
222 | 222 | ||
223 | snd_soc_dapm_stream_event(rtd, SNDRV_PCM_STREAM_PLAYBACK, | 223 | snd_soc_dapm_stream_event(rtd, SNDRV_PCM_STREAM_PLAYBACK, |
224 | SND_SOC_DAPM_STREAM_START); | 224 | SND_SOC_DAPM_STREAM_START); |
225 | 225 | ||
226 | out: | 226 | /* cancel any delayed stream shutdown that is pending */ |
227 | rtd->pop_wait = 0; | ||
228 | mutex_unlock(&rtd->pcm_mutex); | ||
229 | |||
230 | cancel_delayed_work_sync(&rtd->delayed_work); | ||
231 | |||
232 | return ret; | ||
233 | |||
234 | err: | ||
227 | mutex_unlock(&rtd->pcm_mutex); | 235 | mutex_unlock(&rtd->pcm_mutex); |
228 | return ret; | 236 | return ret; |
229 | } | 237 | } |
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index b7e84a7cd9ee..ff4b45a5d796 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c | |||
@@ -2963,7 +2963,7 @@ int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol, | |||
2963 | val = val << shift; | 2963 | val = val << shift; |
2964 | 2964 | ||
2965 | ret = snd_soc_update_bits_locked(codec, reg, val_mask, val); | 2965 | ret = snd_soc_update_bits_locked(codec, reg, val_mask, val); |
2966 | if (ret != 0) | 2966 | if (ret < 0) |
2967 | return ret; | 2967 | return ret; |
2968 | 2968 | ||
2969 | if (snd_soc_volsw_is_stereo(mc)) { | 2969 | if (snd_soc_volsw_is_stereo(mc)) { |
@@ -3140,7 +3140,7 @@ int snd_soc_bytes_put(struct snd_kcontrol *kcontrol, | |||
3140 | if (params->mask) { | 3140 | if (params->mask) { |
3141 | ret = regmap_read(codec->control_data, params->base, &val); | 3141 | ret = regmap_read(codec->control_data, params->base, &val); |
3142 | if (ret != 0) | 3142 | if (ret != 0) |
3143 | return ret; | 3143 | goto out; |
3144 | 3144 | ||
3145 | val &= params->mask; | 3145 | val &= params->mask; |
3146 | 3146 | ||
@@ -3158,13 +3158,15 @@ int snd_soc_bytes_put(struct snd_kcontrol *kcontrol, | |||
3158 | ((u32 *)data)[0] |= cpu_to_be32(val); | 3158 | ((u32 *)data)[0] |= cpu_to_be32(val); |
3159 | break; | 3159 | break; |
3160 | default: | 3160 | default: |
3161 | return -EINVAL; | 3161 | ret = -EINVAL; |
3162 | goto out; | ||
3162 | } | 3163 | } |
3163 | } | 3164 | } |
3164 | 3165 | ||
3165 | ret = regmap_raw_write(codec->control_data, params->base, | 3166 | ret = regmap_raw_write(codec->control_data, params->base, |
3166 | data, len); | 3167 | data, len); |
3167 | 3168 | ||
3169 | out: | ||
3168 | kfree(data); | 3170 | kfree(data); |
3169 | 3171 | ||
3170 | return ret; | 3172 | return ret; |
@@ -4197,7 +4199,6 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card, | |||
4197 | dev_err(card->dev, | 4199 | dev_err(card->dev, |
4198 | "ASoC: Property '%s' index %d could not be read: %d\n", | 4200 | "ASoC: Property '%s' index %d could not be read: %d\n", |
4199 | propname, 2 * i, ret); | 4201 | propname, 2 * i, ret); |
4200 | kfree(routes); | ||
4201 | return -EINVAL; | 4202 | return -EINVAL; |
4202 | } | 4203 | } |
4203 | ret = of_property_read_string_index(np, propname, | 4204 | ret = of_property_read_string_index(np, propname, |
@@ -4206,7 +4207,6 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card, | |||
4206 | dev_err(card->dev, | 4207 | dev_err(card->dev, |
4207 | "ASoC: Property '%s' index %d could not be read: %d\n", | 4208 | "ASoC: Property '%s' index %d could not be read: %d\n", |
4208 | propname, (2 * i) + 1, ret); | 4209 | propname, (2 * i) + 1, ret); |
4209 | kfree(routes); | ||
4210 | return -EINVAL; | 4210 | return -EINVAL; |
4211 | } | 4211 | } |
4212 | } | 4212 | } |
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index 1d6a9b3ceb27..d6d9ba2e6916 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c | |||
@@ -831,6 +831,9 @@ static int is_connected_output_ep(struct snd_soc_dapm_widget *widget, | |||
831 | if (path->weak) | 831 | if (path->weak) |
832 | continue; | 832 | continue; |
833 | 833 | ||
834 | if (path->walking) | ||
835 | return 1; | ||
836 | |||
834 | if (path->walked) | 837 | if (path->walked) |
835 | continue; | 838 | continue; |
836 | 839 | ||
@@ -838,6 +841,7 @@ static int is_connected_output_ep(struct snd_soc_dapm_widget *widget, | |||
838 | 841 | ||
839 | if (path->sink && path->connect) { | 842 | if (path->sink && path->connect) { |
840 | path->walked = 1; | 843 | path->walked = 1; |
844 | path->walking = 1; | ||
841 | 845 | ||
842 | /* do we need to add this widget to the list ? */ | 846 | /* do we need to add this widget to the list ? */ |
843 | if (list) { | 847 | if (list) { |
@@ -847,11 +851,14 @@ static int is_connected_output_ep(struct snd_soc_dapm_widget *widget, | |||
847 | dev_err(widget->dapm->dev, | 851 | dev_err(widget->dapm->dev, |
848 | "ASoC: could not add widget %s\n", | 852 | "ASoC: could not add widget %s\n", |
849 | widget->name); | 853 | widget->name); |
854 | path->walking = 0; | ||
850 | return con; | 855 | return con; |
851 | } | 856 | } |
852 | } | 857 | } |
853 | 858 | ||
854 | con += is_connected_output_ep(path->sink, list); | 859 | con += is_connected_output_ep(path->sink, list); |
860 | |||
861 | path->walking = 0; | ||
855 | } | 862 | } |
856 | } | 863 | } |
857 | 864 | ||
@@ -931,6 +938,9 @@ static int is_connected_input_ep(struct snd_soc_dapm_widget *widget, | |||
931 | if (path->weak) | 938 | if (path->weak) |
932 | continue; | 939 | continue; |
933 | 940 | ||
941 | if (path->walking) | ||
942 | return 1; | ||
943 | |||
934 | if (path->walked) | 944 | if (path->walked) |
935 | continue; | 945 | continue; |
936 | 946 | ||
@@ -938,6 +948,7 @@ static int is_connected_input_ep(struct snd_soc_dapm_widget *widget, | |||
938 | 948 | ||
939 | if (path->source && path->connect) { | 949 | if (path->source && path->connect) { |
940 | path->walked = 1; | 950 | path->walked = 1; |
951 | path->walking = 1; | ||
941 | 952 | ||
942 | /* do we need to add this widget to the list ? */ | 953 | /* do we need to add this widget to the list ? */ |
943 | if (list) { | 954 | if (list) { |
@@ -947,11 +958,14 @@ static int is_connected_input_ep(struct snd_soc_dapm_widget *widget, | |||
947 | dev_err(widget->dapm->dev, | 958 | dev_err(widget->dapm->dev, |
948 | "ASoC: could not add widget %s\n", | 959 | "ASoC: could not add widget %s\n", |
949 | widget->name); | 960 | widget->name); |
961 | path->walking = 0; | ||
950 | return con; | 962 | return con; |
951 | } | 963 | } |
952 | } | 964 | } |
953 | 965 | ||
954 | con += is_connected_input_ep(path->source, list); | 966 | con += is_connected_input_ep(path->source, list); |
967 | |||
968 | path->walking = 0; | ||
955 | } | 969 | } |
956 | } | 970 | } |
957 | 971 | ||
diff --git a/sound/soc/spear/spear_pcm.c b/sound/soc/spear/spear_pcm.c index 9b76cc5a1148..5e7aebe1e664 100644 --- a/sound/soc/spear/spear_pcm.c +++ b/sound/soc/spear/spear_pcm.c | |||
@@ -149,9 +149,9 @@ static void spear_pcm_free(struct snd_pcm *pcm) | |||
149 | 149 | ||
150 | static u64 spear_pcm_dmamask = DMA_BIT_MASK(32); | 150 | static u64 spear_pcm_dmamask = DMA_BIT_MASK(32); |
151 | 151 | ||
152 | static int spear_pcm_new(struct snd_card *card, | 152 | static int spear_pcm_new(struct snd_soc_pcm_runtime *rtd) |
153 | struct snd_soc_dai *dai, struct snd_pcm *pcm) | ||
154 | { | 153 | { |
154 | struct snd_card *card = rtd->card->snd_card; | ||
155 | int ret; | 155 | int ret; |
156 | 156 | ||
157 | if (!card->dev->dma_mask) | 157 | if (!card->dev->dma_mask) |
@@ -159,16 +159,16 @@ static int spear_pcm_new(struct snd_card *card, | |||
159 | if (!card->dev->coherent_dma_mask) | 159 | if (!card->dev->coherent_dma_mask) |
160 | card->dev->coherent_dma_mask = DMA_BIT_MASK(32); | 160 | card->dev->coherent_dma_mask = DMA_BIT_MASK(32); |
161 | 161 | ||
162 | if (dai->driver->playback.channels_min) { | 162 | if (rtd->cpu_dai->driver->playback.channels_min) { |
163 | ret = spear_pcm_preallocate_dma_buffer(pcm, | 163 | ret = spear_pcm_preallocate_dma_buffer(rtd->pcm, |
164 | SNDRV_PCM_STREAM_PLAYBACK, | 164 | SNDRV_PCM_STREAM_PLAYBACK, |
165 | spear_pcm_hardware.buffer_bytes_max); | 165 | spear_pcm_hardware.buffer_bytes_max); |
166 | if (ret) | 166 | if (ret) |
167 | return ret; | 167 | return ret; |
168 | } | 168 | } |
169 | 169 | ||
170 | if (dai->driver->capture.channels_min) { | 170 | if (rtd->cpu_dai->driver->capture.channels_min) { |
171 | ret = spear_pcm_preallocate_dma_buffer(pcm, | 171 | ret = spear_pcm_preallocate_dma_buffer(rtd->pcm, |
172 | SNDRV_PCM_STREAM_CAPTURE, | 172 | SNDRV_PCM_STREAM_CAPTURE, |
173 | spear_pcm_hardware.buffer_bytes_max); | 173 | spear_pcm_hardware.buffer_bytes_max); |
174 | if (ret) | 174 | if (ret) |
diff --git a/sound/soc/tegra/tegra_pcm.c b/sound/soc/tegra/tegra_pcm.c index c925ab0adeb6..5e2c55c5b255 100644 --- a/sound/soc/tegra/tegra_pcm.c +++ b/sound/soc/tegra/tegra_pcm.c | |||
@@ -43,8 +43,6 @@ | |||
43 | static const struct snd_pcm_hardware tegra_pcm_hardware = { | 43 | static const struct snd_pcm_hardware tegra_pcm_hardware = { |
44 | .info = SNDRV_PCM_INFO_MMAP | | 44 | .info = SNDRV_PCM_INFO_MMAP | |
45 | SNDRV_PCM_INFO_MMAP_VALID | | 45 | SNDRV_PCM_INFO_MMAP_VALID | |
46 | SNDRV_PCM_INFO_PAUSE | | ||
47 | SNDRV_PCM_INFO_RESUME | | ||
48 | SNDRV_PCM_INFO_INTERLEAVED, | 46 | SNDRV_PCM_INFO_INTERLEAVED, |
49 | .formats = SNDRV_PCM_FMTBIT_S16_LE, | 47 | .formats = SNDRV_PCM_FMTBIT_S16_LE, |
50 | .channels_min = 2, | 48 | .channels_min = 2, |
@@ -127,26 +125,6 @@ static int tegra_pcm_hw_free(struct snd_pcm_substream *substream) | |||
127 | return 0; | 125 | return 0; |
128 | } | 126 | } |
129 | 127 | ||
130 | static int tegra_pcm_trigger(struct snd_pcm_substream *substream, int cmd) | ||
131 | { | ||
132 | switch (cmd) { | ||
133 | case SNDRV_PCM_TRIGGER_START: | ||
134 | case SNDRV_PCM_TRIGGER_RESUME: | ||
135 | case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: | ||
136 | return snd_dmaengine_pcm_trigger(substream, | ||
137 | SNDRV_PCM_TRIGGER_START); | ||
138 | |||
139 | case SNDRV_PCM_TRIGGER_STOP: | ||
140 | case SNDRV_PCM_TRIGGER_SUSPEND: | ||
141 | case SNDRV_PCM_TRIGGER_PAUSE_PUSH: | ||
142 | return snd_dmaengine_pcm_trigger(substream, | ||
143 | SNDRV_PCM_TRIGGER_STOP); | ||
144 | default: | ||
145 | return -EINVAL; | ||
146 | } | ||
147 | return 0; | ||
148 | } | ||
149 | |||
150 | static int tegra_pcm_mmap(struct snd_pcm_substream *substream, | 128 | static int tegra_pcm_mmap(struct snd_pcm_substream *substream, |
151 | struct vm_area_struct *vma) | 129 | struct vm_area_struct *vma) |
152 | { | 130 | { |
@@ -164,7 +142,7 @@ static struct snd_pcm_ops tegra_pcm_ops = { | |||
164 | .ioctl = snd_pcm_lib_ioctl, | 142 | .ioctl = snd_pcm_lib_ioctl, |
165 | .hw_params = tegra_pcm_hw_params, | 143 | .hw_params = tegra_pcm_hw_params, |
166 | .hw_free = tegra_pcm_hw_free, | 144 | .hw_free = tegra_pcm_hw_free, |
167 | .trigger = tegra_pcm_trigger, | 145 | .trigger = snd_dmaengine_pcm_trigger, |
168 | .pointer = snd_dmaengine_pcm_pointer, | 146 | .pointer = snd_dmaengine_pcm_pointer, |
169 | .mmap = tegra_pcm_mmap, | 147 | .mmap = tegra_pcm_mmap, |
170 | }; | 148 | }; |
diff --git a/sound/usb/clock.c b/sound/usb/clock.c index 5e634a2eb282..9e2703a25156 100644 --- a/sound/usb/clock.c +++ b/sound/usb/clock.c | |||
@@ -253,7 +253,7 @@ static int set_sample_rate_v2(struct snd_usb_audio *chip, int iface, | |||
253 | { | 253 | { |
254 | struct usb_device *dev = chip->dev; | 254 | struct usb_device *dev = chip->dev; |
255 | unsigned char data[4]; | 255 | unsigned char data[4]; |
256 | int err, crate; | 256 | int err, cur_rate, prev_rate; |
257 | int clock = snd_usb_clock_find_source(chip, fmt->clock); | 257 | int clock = snd_usb_clock_find_source(chip, fmt->clock); |
258 | 258 | ||
259 | if (clock < 0) | 259 | if (clock < 0) |
@@ -266,6 +266,19 @@ static int set_sample_rate_v2(struct snd_usb_audio *chip, int iface, | |||
266 | return -ENXIO; | 266 | return -ENXIO; |
267 | } | 267 | } |
268 | 268 | ||
269 | err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC2_CS_CUR, | ||
270 | USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN, | ||
271 | UAC2_CS_CONTROL_SAM_FREQ << 8, | ||
272 | snd_usb_ctrl_intf(chip) | (clock << 8), | ||
273 | data, sizeof(data)); | ||
274 | if (err < 0) { | ||
275 | snd_printk(KERN_WARNING "%d:%d:%d: cannot get freq (v2)\n", | ||
276 | dev->devnum, iface, fmt->altsetting); | ||
277 | prev_rate = 0; | ||
278 | } else { | ||
279 | prev_rate = data[0] | (data[1] << 8) | (data[2] << 16) | (data[3] << 24); | ||
280 | } | ||
281 | |||
269 | data[0] = rate; | 282 | data[0] = rate; |
270 | data[1] = rate >> 8; | 283 | data[1] = rate >> 8; |
271 | data[2] = rate >> 16; | 284 | data[2] = rate >> 16; |
@@ -280,19 +293,31 @@ static int set_sample_rate_v2(struct snd_usb_audio *chip, int iface, | |||
280 | return err; | 293 | return err; |
281 | } | 294 | } |
282 | 295 | ||
283 | if ((err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC2_CS_CUR, | 296 | err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC2_CS_CUR, |
284 | USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN, | 297 | USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN, |
285 | UAC2_CS_CONTROL_SAM_FREQ << 8, | 298 | UAC2_CS_CONTROL_SAM_FREQ << 8, |
286 | snd_usb_ctrl_intf(chip) | (clock << 8), | 299 | snd_usb_ctrl_intf(chip) | (clock << 8), |
287 | data, sizeof(data))) < 0) { | 300 | data, sizeof(data)); |
301 | if (err < 0) { | ||
288 | snd_printk(KERN_WARNING "%d:%d:%d: cannot get freq (v2)\n", | 302 | snd_printk(KERN_WARNING "%d:%d:%d: cannot get freq (v2)\n", |
289 | dev->devnum, iface, fmt->altsetting); | 303 | dev->devnum, iface, fmt->altsetting); |
290 | return err; | 304 | cur_rate = 0; |
305 | } else { | ||
306 | cur_rate = data[0] | (data[1] << 8) | (data[2] << 16) | (data[3] << 24); | ||
291 | } | 307 | } |
292 | 308 | ||
293 | crate = data[0] | (data[1] << 8) | (data[2] << 16) | (data[3] << 24); | 309 | if (cur_rate != rate) { |
294 | if (crate != rate) | 310 | snd_printd(KERN_WARNING |
295 | snd_printd(KERN_WARNING "current rate %d is different from the runtime rate %d\n", crate, rate); | 311 | "current rate %d is different from the runtime rate %d\n", |
312 | cur_rate, rate); | ||
313 | } | ||
314 | |||
315 | /* Some devices doesn't respond to sample rate changes while the | ||
316 | * interface is active. */ | ||
317 | if (rate != prev_rate) { | ||
318 | usb_set_interface(dev, iface, 0); | ||
319 | usb_set_interface(dev, iface, fmt->altsetting); | ||
320 | } | ||
296 | 321 | ||
297 | return 0; | 322 | return 0; |
298 | } | 323 | } |
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index 638e7f738018..ca4739c3f650 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c | |||
@@ -715,8 +715,9 @@ static int check_input_term(struct mixer_build *state, int id, struct usb_audio_ | |||
715 | case UAC2_CLOCK_SELECTOR: { | 715 | case UAC2_CLOCK_SELECTOR: { |
716 | struct uac_selector_unit_descriptor *d = p1; | 716 | struct uac_selector_unit_descriptor *d = p1; |
717 | /* call recursively to retrieve the channel info */ | 717 | /* call recursively to retrieve the channel info */ |
718 | if (check_input_term(state, d->baSourceID[0], term) < 0) | 718 | err = check_input_term(state, d->baSourceID[0], term); |
719 | return -ENODEV; | 719 | if (err < 0) |
720 | return err; | ||
720 | term->type = d->bDescriptorSubtype << 16; /* virtual type */ | 721 | term->type = d->bDescriptorSubtype << 16; /* virtual type */ |
721 | term->id = id; | 722 | term->id = id; |
722 | term->name = uac_selector_unit_iSelector(d); | 723 | term->name = uac_selector_unit_iSelector(d); |
@@ -725,7 +726,8 @@ static int check_input_term(struct mixer_build *state, int id, struct usb_audio_ | |||
725 | case UAC1_PROCESSING_UNIT: | 726 | case UAC1_PROCESSING_UNIT: |
726 | case UAC1_EXTENSION_UNIT: | 727 | case UAC1_EXTENSION_UNIT: |
727 | /* UAC2_PROCESSING_UNIT_V2 */ | 728 | /* UAC2_PROCESSING_UNIT_V2 */ |
728 | /* UAC2_EFFECT_UNIT */ { | 729 | /* UAC2_EFFECT_UNIT */ |
730 | case UAC2_EXTENSION_UNIT_V2: { | ||
729 | struct uac_processing_unit_descriptor *d = p1; | 731 | struct uac_processing_unit_descriptor *d = p1; |
730 | 732 | ||
731 | if (state->mixer->protocol == UAC_VERSION_2 && | 733 | if (state->mixer->protocol == UAC_VERSION_2 && |
@@ -1356,8 +1358,9 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid, void | |||
1356 | return err; | 1358 | return err; |
1357 | 1359 | ||
1358 | /* determine the input source type and name */ | 1360 | /* determine the input source type and name */ |
1359 | if (check_input_term(state, hdr->bSourceID, &iterm) < 0) | 1361 | err = check_input_term(state, hdr->bSourceID, &iterm); |
1360 | return -EINVAL; | 1362 | if (err < 0) |
1363 | return err; | ||
1361 | 1364 | ||
1362 | master_bits = snd_usb_combine_bytes(bmaControls, csize); | 1365 | master_bits = snd_usb_combine_bytes(bmaControls, csize); |
1363 | /* master configuration quirks */ | 1366 | /* master configuration quirks */ |
@@ -2052,6 +2055,8 @@ static int parse_audio_unit(struct mixer_build *state, int unitid) | |||
2052 | return parse_audio_extension_unit(state, unitid, p1); | 2055 | return parse_audio_extension_unit(state, unitid, p1); |
2053 | else /* UAC_VERSION_2 */ | 2056 | else /* UAC_VERSION_2 */ |
2054 | return parse_audio_processing_unit(state, unitid, p1); | 2057 | return parse_audio_processing_unit(state, unitid, p1); |
2058 | case UAC2_EXTENSION_UNIT_V2: | ||
2059 | return parse_audio_extension_unit(state, unitid, p1); | ||
2055 | default: | 2060 | default: |
2056 | snd_printk(KERN_ERR "usbaudio: unit %u: unexpected type 0x%02x\n", unitid, p1[2]); | 2061 | snd_printk(KERN_ERR "usbaudio: unit %u: unexpected type 0x%02x\n", unitid, p1[2]); |
2057 | return -EINVAL; | 2062 | return -EINVAL; |
@@ -2118,7 +2123,7 @@ static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer) | |||
2118 | state.oterm.type = le16_to_cpu(desc->wTerminalType); | 2123 | state.oterm.type = le16_to_cpu(desc->wTerminalType); |
2119 | state.oterm.name = desc->iTerminal; | 2124 | state.oterm.name = desc->iTerminal; |
2120 | err = parse_audio_unit(&state, desc->bSourceID); | 2125 | err = parse_audio_unit(&state, desc->bSourceID); |
2121 | if (err < 0) | 2126 | if (err < 0 && err != -EINVAL) |
2122 | return err; | 2127 | return err; |
2123 | } else { /* UAC_VERSION_2 */ | 2128 | } else { /* UAC_VERSION_2 */ |
2124 | struct uac2_output_terminal_descriptor *desc = p; | 2129 | struct uac2_output_terminal_descriptor *desc = p; |
@@ -2130,12 +2135,12 @@ static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer) | |||
2130 | state.oterm.type = le16_to_cpu(desc->wTerminalType); | 2135 | state.oterm.type = le16_to_cpu(desc->wTerminalType); |
2131 | state.oterm.name = desc->iTerminal; | 2136 | state.oterm.name = desc->iTerminal; |
2132 | err = parse_audio_unit(&state, desc->bSourceID); | 2137 | err = parse_audio_unit(&state, desc->bSourceID); |
2133 | if (err < 0) | 2138 | if (err < 0 && err != -EINVAL) |
2134 | return err; | 2139 | return err; |
2135 | 2140 | ||
2136 | /* for UAC2, use the same approach to also add the clock selectors */ | 2141 | /* for UAC2, use the same approach to also add the clock selectors */ |
2137 | err = parse_audio_unit(&state, desc->bCSourceID); | 2142 | err = parse_audio_unit(&state, desc->bCSourceID); |
2138 | if (err < 0) | 2143 | if (err < 0 && err != -EINVAL) |
2139 | return err; | 2144 | return err; |
2140 | } | 2145 | } |
2141 | } | 2146 | } |
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c index 497d2741d119..ebe91440a068 100644 --- a/sound/usb/mixer_quirks.c +++ b/sound/usb/mixer_quirks.c | |||
@@ -509,7 +509,7 @@ static int snd_nativeinstruments_control_get(struct snd_kcontrol *kcontrol, | |||
509 | else | 509 | else |
510 | ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), bRequest, | 510 | ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), bRequest, |
511 | USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, | 511 | USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, |
512 | 0, cpu_to_le16(wIndex), | 512 | 0, wIndex, |
513 | &tmp, sizeof(tmp), 1000); | 513 | &tmp, sizeof(tmp), 1000); |
514 | up_read(&mixer->chip->shutdown_rwsem); | 514 | up_read(&mixer->chip->shutdown_rwsem); |
515 | 515 | ||
@@ -540,7 +540,7 @@ static int snd_nativeinstruments_control_put(struct snd_kcontrol *kcontrol, | |||
540 | else | 540 | else |
541 | ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), bRequest, | 541 | ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), bRequest, |
542 | USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, | 542 | USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, |
543 | cpu_to_le16(wValue), cpu_to_le16(wIndex), | 543 | wValue, wIndex, |
544 | NULL, 0, 1000); | 544 | NULL, 0, 1000); |
545 | up_read(&mixer->chip->shutdown_rwsem); | 545 | up_read(&mixer->chip->shutdown_rwsem); |
546 | 546 | ||
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index 5325a3869bb7..9c5ab22358b1 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c | |||
@@ -486,7 +486,7 @@ static int snd_usb_nativeinstruments_boot_quirk(struct usb_device *dev) | |||
486 | { | 486 | { |
487 | int ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), | 487 | int ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), |
488 | 0xaf, USB_TYPE_VENDOR | USB_RECIP_DEVICE, | 488 | 0xaf, USB_TYPE_VENDOR | USB_RECIP_DEVICE, |
489 | cpu_to_le16(1), 0, NULL, 0, 1000); | 489 | 1, 0, NULL, 0, 1000); |
490 | 490 | ||
491 | if (ret < 0) | 491 | if (ret < 0) |
492 | return ret; | 492 | return ret; |
diff --git a/tools/lib/traceevent/Makefile b/tools/lib/traceevent/Makefile index a20e32033431..0b0a90787db6 100644 --- a/tools/lib/traceevent/Makefile +++ b/tools/lib/traceevent/Makefile | |||
@@ -122,7 +122,7 @@ export Q VERBOSE | |||
122 | 122 | ||
123 | EVENT_PARSE_VERSION = $(EP_VERSION).$(EP_PATCHLEVEL).$(EP_EXTRAVERSION) | 123 | EVENT_PARSE_VERSION = $(EP_VERSION).$(EP_PATCHLEVEL).$(EP_EXTRAVERSION) |
124 | 124 | ||
125 | INCLUDES = -I. -I/usr/local/include $(CONFIG_INCLUDES) | 125 | INCLUDES = -I. $(CONFIG_INCLUDES) |
126 | 126 | ||
127 | # Set compile option CFLAGS if not set elsewhere | 127 | # Set compile option CFLAGS if not set elsewhere |
128 | CFLAGS ?= -g -Wall | 128 | CFLAGS ?= -g -Wall |
diff --git a/tools/perf/Makefile b/tools/perf/Makefile index a2108ca1cc17..bb74c79cd16e 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile | |||
@@ -95,7 +95,7 @@ ifeq ("$(origin DEBUG)", "command line") | |||
95 | PERF_DEBUG = $(DEBUG) | 95 | PERF_DEBUG = $(DEBUG) |
96 | endif | 96 | endif |
97 | ifndef PERF_DEBUG | 97 | ifndef PERF_DEBUG |
98 | CFLAGS_OPTIMIZE = -O6 -D_FORTIFY_SOURCE=2 | 98 | CFLAGS_OPTIMIZE = -O6 |
99 | endif | 99 | endif |
100 | 100 | ||
101 | ifdef PARSER_DEBUG | 101 | ifdef PARSER_DEBUG |
@@ -180,6 +180,12 @@ ifeq ($(call try-cc,$(SOURCE_HELLO),$(CFLAGS) -Werror -Wvolatile-register-var,-W | |||
180 | CFLAGS := $(CFLAGS) -Wvolatile-register-var | 180 | CFLAGS := $(CFLAGS) -Wvolatile-register-var |
181 | endif | 181 | endif |
182 | 182 | ||
183 | ifndef PERF_DEBUG | ||
184 | ifeq ($(call try-cc,$(SOURCE_HELLO),$(CFLAGS) -D_FORTIFY_SOURCE=2,-D_FORTIFY_SOURCE=2),y) | ||
185 | CFLAGS := $(CFLAGS) -D_FORTIFY_SOURCE=2 | ||
186 | endif | ||
187 | endif | ||
188 | |||
183 | ### --- END CONFIGURATION SECTION --- | 189 | ### --- END CONFIGURATION SECTION --- |
184 | 190 | ||
185 | ifeq ($(srctree),) | 191 | ifeq ($(srctree),) |
diff --git a/tools/perf/bench/bench.h b/tools/perf/bench/bench.h index a5223e6a7b43..0fdc85269c4d 100644 --- a/tools/perf/bench/bench.h +++ b/tools/perf/bench/bench.h | |||
@@ -1,6 +1,30 @@ | |||
1 | #ifndef BENCH_H | 1 | #ifndef BENCH_H |
2 | #define BENCH_H | 2 | #define BENCH_H |
3 | 3 | ||
4 | /* | ||
5 | * The madvise transparent hugepage constants were added in glibc | ||
6 | * 2.13. For compatibility with older versions of glibc, define these | ||
7 | * tokens if they are not already defined. | ||
8 | * | ||
9 | * PA-RISC uses different madvise values from other architectures and | ||
10 | * needs to be special-cased. | ||
11 | */ | ||
12 | #ifdef __hppa__ | ||
13 | # ifndef MADV_HUGEPAGE | ||
14 | # define MADV_HUGEPAGE 67 | ||
15 | # endif | ||
16 | # ifndef MADV_NOHUGEPAGE | ||
17 | # define MADV_NOHUGEPAGE 68 | ||
18 | # endif | ||
19 | #else | ||
20 | # ifndef MADV_HUGEPAGE | ||
21 | # define MADV_HUGEPAGE 14 | ||
22 | # endif | ||
23 | # ifndef MADV_NOHUGEPAGE | ||
24 | # define MADV_NOHUGEPAGE 15 | ||
25 | # endif | ||
26 | #endif | ||
27 | |||
4 | extern int bench_numa(int argc, const char **argv, const char *prefix); | 28 | extern int bench_numa(int argc, const char **argv, const char *prefix); |
5 | extern int bench_sched_messaging(int argc, const char **argv, const char *prefix); | 29 | extern int bench_sched_messaging(int argc, const char **argv, const char *prefix); |
6 | extern int bench_sched_pipe(int argc, const char **argv, const char *prefix); | 30 | extern int bench_sched_pipe(int argc, const char **argv, const char *prefix); |
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 774c90713a53..f1a939ebc19c 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c | |||
@@ -573,13 +573,15 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv) | |||
573 | perf_event__synthesize_guest_os, tool); | 573 | perf_event__synthesize_guest_os, tool); |
574 | } | 574 | } |
575 | 575 | ||
576 | if (!opts->target.system_wide) | 576 | if (perf_target__has_task(&opts->target)) |
577 | err = perf_event__synthesize_thread_map(tool, evsel_list->threads, | 577 | err = perf_event__synthesize_thread_map(tool, evsel_list->threads, |
578 | process_synthesized_event, | 578 | process_synthesized_event, |
579 | machine); | 579 | machine); |
580 | else | 580 | else if (perf_target__has_cpu(&opts->target)) |
581 | err = perf_event__synthesize_threads(tool, process_synthesized_event, | 581 | err = perf_event__synthesize_threads(tool, process_synthesized_event, |
582 | machine); | 582 | machine); |
583 | else /* command specified */ | ||
584 | err = 0; | ||
583 | 585 | ||
584 | if (err != 0) | 586 | if (err != 0) |
585 | goto out_delete_session; | 587 | goto out_delete_session; |
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h index 38624686ee9a..226a4ae2f936 100644 --- a/tools/perf/util/hist.h +++ b/tools/perf/util/hist.h | |||
@@ -208,8 +208,9 @@ static inline int script_browse(const char *script_opt __maybe_unused) | |||
208 | return 0; | 208 | return 0; |
209 | } | 209 | } |
210 | 210 | ||
211 | #define K_LEFT -1 | 211 | #define K_LEFT -1000 |
212 | #define K_RIGHT -2 | 212 | #define K_RIGHT -2000 |
213 | #define K_SWITCH_INPUT_DATA -3000 | ||
213 | #endif | 214 | #endif |
214 | 215 | ||
215 | #ifdef GTK2_SUPPORT | 216 | #ifdef GTK2_SUPPORT |
diff --git a/tools/perf/util/strlist.c b/tools/perf/util/strlist.c index 55433aa42c8f..eabdce0a2daa 100644 --- a/tools/perf/util/strlist.c +++ b/tools/perf/util/strlist.c | |||
@@ -143,7 +143,7 @@ struct strlist *strlist__new(bool dupstr, const char *list) | |||
143 | slist->rblist.node_delete = strlist__node_delete; | 143 | slist->rblist.node_delete = strlist__node_delete; |
144 | 144 | ||
145 | slist->dupstr = dupstr; | 145 | slist->dupstr = dupstr; |
146 | if (slist && strlist__parse_list(slist, list) != 0) | 146 | if (list && strlist__parse_list(slist, list) != 0) |
147 | goto out_error; | 147 | goto out_error; |
148 | } | 148 | } |
149 | 149 | ||
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c index ce82b9401958..5ba005c00e2f 100644 --- a/virt/kvm/ioapic.c +++ b/virt/kvm/ioapic.c | |||
@@ -74,9 +74,12 @@ static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic, | |||
74 | u32 redir_index = (ioapic->ioregsel - 0x10) >> 1; | 74 | u32 redir_index = (ioapic->ioregsel - 0x10) >> 1; |
75 | u64 redir_content; | 75 | u64 redir_content; |
76 | 76 | ||
77 | ASSERT(redir_index < IOAPIC_NUM_PINS); | 77 | if (redir_index < IOAPIC_NUM_PINS) |
78 | redir_content = | ||
79 | ioapic->redirtbl[redir_index].bits; | ||
80 | else | ||
81 | redir_content = ~0ULL; | ||
78 | 82 | ||
79 | redir_content = ioapic->redirtbl[redir_index].bits; | ||
80 | result = (ioapic->ioregsel & 0x1) ? | 83 | result = (ioapic->ioregsel & 0x1) ? |
81 | (redir_content >> 32) & 0xffffffff : | 84 | (redir_content >> 32) & 0xffffffff : |
82 | redir_content & 0xffffffff; | 85 | redir_content & 0xffffffff; |
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index adc68feb5c5a..f18013f09e68 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -1541,21 +1541,38 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, | |||
1541 | } | 1541 | } |
1542 | 1542 | ||
1543 | int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, | 1543 | int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, |
1544 | gpa_t gpa) | 1544 | gpa_t gpa, unsigned long len) |
1545 | { | 1545 | { |
1546 | struct kvm_memslots *slots = kvm_memslots(kvm); | 1546 | struct kvm_memslots *slots = kvm_memslots(kvm); |
1547 | int offset = offset_in_page(gpa); | 1547 | int offset = offset_in_page(gpa); |
1548 | gfn_t gfn = gpa >> PAGE_SHIFT; | 1548 | gfn_t start_gfn = gpa >> PAGE_SHIFT; |
1549 | gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT; | ||
1550 | gfn_t nr_pages_needed = end_gfn - start_gfn + 1; | ||
1551 | gfn_t nr_pages_avail; | ||
1549 | 1552 | ||
1550 | ghc->gpa = gpa; | 1553 | ghc->gpa = gpa; |
1551 | ghc->generation = slots->generation; | 1554 | ghc->generation = slots->generation; |
1552 | ghc->memslot = gfn_to_memslot(kvm, gfn); | 1555 | ghc->len = len; |
1553 | ghc->hva = gfn_to_hva_many(ghc->memslot, gfn, NULL); | 1556 | ghc->memslot = gfn_to_memslot(kvm, start_gfn); |
1554 | if (!kvm_is_error_hva(ghc->hva)) | 1557 | ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, &nr_pages_avail); |
1558 | if (!kvm_is_error_hva(ghc->hva) && nr_pages_avail >= nr_pages_needed) { | ||
1555 | ghc->hva += offset; | 1559 | ghc->hva += offset; |
1556 | else | 1560 | } else { |
1557 | return -EFAULT; | 1561 | /* |
1558 | 1562 | * If the requested region crosses two memslots, we still | |
1563 | * verify that the entire region is valid here. | ||
1564 | */ | ||
1565 | while (start_gfn <= end_gfn) { | ||
1566 | ghc->memslot = gfn_to_memslot(kvm, start_gfn); | ||
1567 | ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, | ||
1568 | &nr_pages_avail); | ||
1569 | if (kvm_is_error_hva(ghc->hva)) | ||
1570 | return -EFAULT; | ||
1571 | start_gfn += nr_pages_avail; | ||
1572 | } | ||
1573 | /* Use the slow path for cross page reads and writes. */ | ||
1574 | ghc->memslot = NULL; | ||
1575 | } | ||
1559 | return 0; | 1576 | return 0; |
1560 | } | 1577 | } |
1561 | EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init); | 1578 | EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init); |
@@ -1566,8 +1583,13 @@ int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, | |||
1566 | struct kvm_memslots *slots = kvm_memslots(kvm); | 1583 | struct kvm_memslots *slots = kvm_memslots(kvm); |
1567 | int r; | 1584 | int r; |
1568 | 1585 | ||
1586 | BUG_ON(len > ghc->len); | ||
1587 | |||
1569 | if (slots->generation != ghc->generation) | 1588 | if (slots->generation != ghc->generation) |
1570 | kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa); | 1589 | kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len); |
1590 | |||
1591 | if (unlikely(!ghc->memslot)) | ||
1592 | return kvm_write_guest(kvm, ghc->gpa, data, len); | ||
1571 | 1593 | ||
1572 | if (kvm_is_error_hva(ghc->hva)) | 1594 | if (kvm_is_error_hva(ghc->hva)) |
1573 | return -EFAULT; | 1595 | return -EFAULT; |
@@ -1587,8 +1609,13 @@ int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, | |||
1587 | struct kvm_memslots *slots = kvm_memslots(kvm); | 1609 | struct kvm_memslots *slots = kvm_memslots(kvm); |
1588 | int r; | 1610 | int r; |
1589 | 1611 | ||
1612 | BUG_ON(len > ghc->len); | ||
1613 | |||
1590 | if (slots->generation != ghc->generation) | 1614 | if (slots->generation != ghc->generation) |
1591 | kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa); | 1615 | kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len); |
1616 | |||
1617 | if (unlikely(!ghc->memslot)) | ||
1618 | return kvm_read_guest(kvm, ghc->gpa, data, len); | ||
1592 | 1619 | ||
1593 | if (kvm_is_error_hva(ghc->hva)) | 1620 | if (kvm_is_error_hva(ghc->hva)) |
1594 | return -EFAULT; | 1621 | return -EFAULT; |