diff options
219 files changed, 2009 insertions, 1418 deletions
diff --git a/Documentation/devicetree/bindings/net/micrel-ks8851.txt b/Documentation/devicetree/bindings/net/micrel-ks8851.txt index 11ace3c3d805..4fc392763611 100644 --- a/Documentation/devicetree/bindings/net/micrel-ks8851.txt +++ b/Documentation/devicetree/bindings/net/micrel-ks8851.txt | |||
@@ -7,3 +7,4 @@ Required properties: | |||
7 | 7 | ||
8 | Optional properties: | 8 | Optional properties: |
9 | - local-mac-address : Ethernet mac address to use | 9 | - local-mac-address : Ethernet mac address to use |
10 | - vdd-supply: supply for Ethernet mac | ||
diff --git a/Documentation/networking/netlink_mmap.txt b/Documentation/networking/netlink_mmap.txt index b26122973525..c6af4bac5aa8 100644 --- a/Documentation/networking/netlink_mmap.txt +++ b/Documentation/networking/netlink_mmap.txt | |||
@@ -226,9 +226,9 @@ Ring setup: | |||
226 | void *rx_ring, *tx_ring; | 226 | void *rx_ring, *tx_ring; |
227 | 227 | ||
228 | /* Configure ring parameters */ | 228 | /* Configure ring parameters */ |
229 | if (setsockopt(fd, NETLINK_RX_RING, &req, sizeof(req)) < 0) | 229 | if (setsockopt(fd, SOL_NETLINK, NETLINK_RX_RING, &req, sizeof(req)) < 0) |
230 | exit(1); | 230 | exit(1); |
231 | if (setsockopt(fd, NETLINK_TX_RING, &req, sizeof(req)) < 0) | 231 | if (setsockopt(fd, SOL_NETLINK, NETLINK_TX_RING, &req, sizeof(req)) < 0) |
232 | exit(1) | 232 | exit(1) |
233 | 233 | ||
234 | /* Calculate size of each individual ring */ | 234 | /* Calculate size of each individual ring */ |
diff --git a/MAINTAINERS b/MAINTAINERS index b3fdb0f004ba..1c323b0290cd 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -911,11 +911,11 @@ F: arch/arm/include/asm/hardware/dec21285.h | |||
911 | F: arch/arm/mach-footbridge/ | 911 | F: arch/arm/mach-footbridge/ |
912 | 912 | ||
913 | ARM/FREESCALE IMX / MXC ARM ARCHITECTURE | 913 | ARM/FREESCALE IMX / MXC ARM ARCHITECTURE |
914 | M: Shawn Guo <shawn.guo@linaro.org> | 914 | M: Shawn Guo <shawn.guo@freescale.com> |
915 | M: Sascha Hauer <kernel@pengutronix.de> | 915 | M: Sascha Hauer <kernel@pengutronix.de> |
916 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 916 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
917 | S: Maintained | 917 | S: Maintained |
918 | T: git git://git.linaro.org/people/shawnguo/linux-2.6.git | 918 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux.git |
919 | F: arch/arm/mach-imx/ | 919 | F: arch/arm/mach-imx/ |
920 | F: arch/arm/boot/dts/imx* | 920 | F: arch/arm/boot/dts/imx* |
921 | F: arch/arm/configs/imx*_defconfig | 921 | F: arch/arm/configs/imx*_defconfig |
@@ -1832,8 +1832,8 @@ F: net/bluetooth/ | |||
1832 | F: include/net/bluetooth/ | 1832 | F: include/net/bluetooth/ |
1833 | 1833 | ||
1834 | BONDING DRIVER | 1834 | BONDING DRIVER |
1835 | M: Jay Vosburgh <fubar@us.ibm.com> | 1835 | M: Jay Vosburgh <j.vosburgh@gmail.com> |
1836 | M: Veaceslav Falico <vfalico@redhat.com> | 1836 | M: Veaceslav Falico <vfalico@gmail.com> |
1837 | M: Andy Gospodarek <andy@greyhouse.net> | 1837 | M: Andy Gospodarek <andy@greyhouse.net> |
1838 | L: netdev@vger.kernel.org | 1838 | L: netdev@vger.kernel.org |
1839 | W: http://sourceforge.net/projects/bonding/ | 1839 | W: http://sourceforge.net/projects/bonding/ |
@@ -2801,9 +2801,9 @@ S: Supported | |||
2801 | F: drivers/acpi/dock.c | 2801 | F: drivers/acpi/dock.c |
2802 | 2802 | ||
2803 | DOCUMENTATION | 2803 | DOCUMENTATION |
2804 | M: Rob Landley <rob@landley.net> | 2804 | M: Randy Dunlap <rdunlap@infradead.org> |
2805 | L: linux-doc@vger.kernel.org | 2805 | L: linux-doc@vger.kernel.org |
2806 | T: TBD | 2806 | T: quilt http://www.infradead.org/~rdunlap/Doc/patches/ |
2807 | S: Maintained | 2807 | S: Maintained |
2808 | F: Documentation/ | 2808 | F: Documentation/ |
2809 | 2809 | ||
@@ -3380,7 +3380,9 @@ F: Documentation/filesystems/ext4.txt | |||
3380 | F: fs/ext4/ | 3380 | F: fs/ext4/ |
3381 | 3381 | ||
3382 | Extended Verification Module (EVM) | 3382 | Extended Verification Module (EVM) |
3383 | M: Mimi Zohar <zohar@us.ibm.com> | 3383 | M: Mimi Zohar <zohar@linux.vnet.ibm.com> |
3384 | L: linux-ima-devel@lists.sourceforge.net | ||
3385 | L: linux-security-module@vger.kernel.org | ||
3384 | S: Supported | 3386 | S: Supported |
3385 | F: security/integrity/evm/ | 3387 | F: security/integrity/evm/ |
3386 | 3388 | ||
@@ -4402,8 +4404,11 @@ S: Maintained | |||
4402 | F: drivers/ipack/ | 4404 | F: drivers/ipack/ |
4403 | 4405 | ||
4404 | INTEGRITY MEASUREMENT ARCHITECTURE (IMA) | 4406 | INTEGRITY MEASUREMENT ARCHITECTURE (IMA) |
4405 | M: Mimi Zohar <zohar@us.ibm.com> | 4407 | M: Mimi Zohar <zohar@linux.vnet.ibm.com> |
4406 | M: Dmitry Kasatkin <d.kasatkin@samsung.com> | 4408 | M: Dmitry Kasatkin <d.kasatkin@samsung.com> |
4409 | L: linux-ima-devel@lists.sourceforge.net | ||
4410 | L: linux-ima-user@lists.sourceforge.net | ||
4411 | L: linux-security-module@vger.kernel.org | ||
4407 | S: Supported | 4412 | S: Supported |
4408 | F: security/integrity/ima/ | 4413 | F: security/integrity/ima/ |
4409 | 4414 | ||
@@ -4545,6 +4550,7 @@ M: Greg Rose <gregory.v.rose@intel.com> | |||
4545 | M: Alex Duyck <alexander.h.duyck@intel.com> | 4550 | M: Alex Duyck <alexander.h.duyck@intel.com> |
4546 | M: John Ronciak <john.ronciak@intel.com> | 4551 | M: John Ronciak <john.ronciak@intel.com> |
4547 | M: Mitch Williams <mitch.a.williams@intel.com> | 4552 | M: Mitch Williams <mitch.a.williams@intel.com> |
4553 | M: Linux NICS <linux.nics@intel.com> | ||
4548 | L: e1000-devel@lists.sourceforge.net | 4554 | L: e1000-devel@lists.sourceforge.net |
4549 | W: http://www.intel.com/support/feedback.htm | 4555 | W: http://www.intel.com/support/feedback.htm |
4550 | W: http://e1000.sourceforge.net/ | 4556 | W: http://e1000.sourceforge.net/ |
@@ -5070,8 +5076,8 @@ F: include/keys/ | |||
5070 | F: security/keys/ | 5076 | F: security/keys/ |
5071 | 5077 | ||
5072 | KEYS-TRUSTED | 5078 | KEYS-TRUSTED |
5073 | M: David Safford <safford@watson.ibm.com> | 5079 | M: David Safford <safford@us.ibm.com> |
5074 | M: Mimi Zohar <zohar@us.ibm.com> | 5080 | M: Mimi Zohar <zohar@linux.vnet.ibm.com> |
5075 | L: linux-security-module@vger.kernel.org | 5081 | L: linux-security-module@vger.kernel.org |
5076 | L: keyrings@linux-nfs.org | 5082 | L: keyrings@linux-nfs.org |
5077 | S: Supported | 5083 | S: Supported |
@@ -5081,8 +5087,8 @@ F: security/keys/trusted.c | |||
5081 | F: security/keys/trusted.h | 5087 | F: security/keys/trusted.h |
5082 | 5088 | ||
5083 | KEYS-ENCRYPTED | 5089 | KEYS-ENCRYPTED |
5084 | M: Mimi Zohar <zohar@us.ibm.com> | 5090 | M: Mimi Zohar <zohar@linux.vnet.ibm.com> |
5085 | M: David Safford <safford@watson.ibm.com> | 5091 | M: David Safford <safford@us.ibm.com> |
5086 | L: linux-security-module@vger.kernel.org | 5092 | L: linux-security-module@vger.kernel.org |
5087 | L: keyrings@linux-nfs.org | 5093 | L: keyrings@linux-nfs.org |
5088 | S: Supported | 5094 | S: Supported |
@@ -6005,6 +6011,7 @@ F: include/uapi/linux/net.h | |||
6005 | F: include/uapi/linux/netdevice.h | 6011 | F: include/uapi/linux/netdevice.h |
6006 | F: tools/net/ | 6012 | F: tools/net/ |
6007 | F: tools/testing/selftests/net/ | 6013 | F: tools/testing/selftests/net/ |
6014 | F: lib/random32.c | ||
6008 | 6015 | ||
6009 | NETWORKING [IPv4/IPv6] | 6016 | NETWORKING [IPv4/IPv6] |
6010 | M: "David S. Miller" <davem@davemloft.net> | 6017 | M: "David S. Miller" <davem@davemloft.net> |
@@ -7728,11 +7735,10 @@ M: Security Officers <security@kernel.org> | |||
7728 | S: Supported | 7735 | S: Supported |
7729 | 7736 | ||
7730 | SELINUX SECURITY MODULE | 7737 | SELINUX SECURITY MODULE |
7738 | M: Paul Moore <paul@paul-moore.com> | ||
7731 | M: Stephen Smalley <sds@tycho.nsa.gov> | 7739 | M: Stephen Smalley <sds@tycho.nsa.gov> |
7732 | M: James Morris <james.l.morris@oracle.com> | ||
7733 | M: Eric Paris <eparis@parisplace.org> | 7740 | M: Eric Paris <eparis@parisplace.org> |
7734 | M: Paul Moore <paul@paul-moore.com> | 7741 | L: selinux@tycho.nsa.gov (moderated for non-subscribers) |
7735 | L: selinux@tycho.nsa.gov (subscribers-only, general discussion) | ||
7736 | W: http://selinuxproject.org | 7742 | W: http://selinuxproject.org |
7737 | T: git git://git.infradead.org/users/pcmoore/selinux | 7743 | T: git git://git.infradead.org/users/pcmoore/selinux |
7738 | S: Supported | 7744 | S: Supported |
@@ -1,7 +1,7 @@ | |||
1 | VERSION = 3 | 1 | VERSION = 3 |
2 | PATCHLEVEL = 14 | 2 | PATCHLEVEL = 14 |
3 | SUBLEVEL = 0 | 3 | SUBLEVEL = 0 |
4 | EXTRAVERSION = -rc6 | 4 | EXTRAVERSION = |
5 | NAME = Shuffling Zombie Juror | 5 | NAME = Shuffling Zombie Juror |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/arm/boot/dts/sama5d36.dtsi b/arch/arm/boot/dts/sama5d36.dtsi index 6c31c26e6cc0..db58cad6acd3 100644 --- a/arch/arm/boot/dts/sama5d36.dtsi +++ b/arch/arm/boot/dts/sama5d36.dtsi | |||
@@ -8,8 +8,8 @@ | |||
8 | */ | 8 | */ |
9 | #include "sama5d3.dtsi" | 9 | #include "sama5d3.dtsi" |
10 | #include "sama5d3_can.dtsi" | 10 | #include "sama5d3_can.dtsi" |
11 | #include "sama5d3_emac.dtsi" | ||
12 | #include "sama5d3_gmac.dtsi" | 11 | #include "sama5d3_gmac.dtsi" |
12 | #include "sama5d3_emac.dtsi" | ||
13 | #include "sama5d3_lcd.dtsi" | 13 | #include "sama5d3_lcd.dtsi" |
14 | #include "sama5d3_mci2.dtsi" | 14 | #include "sama5d3_mci2.dtsi" |
15 | #include "sama5d3_tcb1.dtsi" | 15 | #include "sama5d3_tcb1.dtsi" |
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index dcae3a7035db..95fa1f1d5c8b 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig | |||
@@ -1776,12 +1776,12 @@ endchoice | |||
1776 | 1776 | ||
1777 | config FORCE_MAX_ZONEORDER | 1777 | config FORCE_MAX_ZONEORDER |
1778 | int "Maximum zone order" | 1778 | int "Maximum zone order" |
1779 | range 14 64 if HUGETLB_PAGE && PAGE_SIZE_64KB | 1779 | range 14 64 if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_64KB |
1780 | default "14" if HUGETLB_PAGE && PAGE_SIZE_64KB | 1780 | default "14" if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_64KB |
1781 | range 13 64 if HUGETLB_PAGE && PAGE_SIZE_32KB | 1781 | range 13 64 if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_32KB |
1782 | default "13" if HUGETLB_PAGE && PAGE_SIZE_32KB | 1782 | default "13" if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_32KB |
1783 | range 12 64 if HUGETLB_PAGE && PAGE_SIZE_16KB | 1783 | range 12 64 if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_16KB |
1784 | default "12" if HUGETLB_PAGE && PAGE_SIZE_16KB | 1784 | default "12" if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_16KB |
1785 | range 11 64 | 1785 | range 11 64 |
1786 | default "11" | 1786 | default "11" |
1787 | help | 1787 | help |
@@ -2353,9 +2353,8 @@ config SECCOMP | |||
2353 | If unsure, say Y. Only embedded should say N here. | 2353 | If unsure, say Y. Only embedded should say N here. |
2354 | 2354 | ||
2355 | config MIPS_O32_FP64_SUPPORT | 2355 | config MIPS_O32_FP64_SUPPORT |
2356 | bool "Support for O32 binaries using 64-bit FP" | 2356 | bool "Support for O32 binaries using 64-bit FP (EXPERIMENTAL)" |
2357 | depends on 32BIT || MIPS32_O32 | 2357 | depends on 32BIT || MIPS32_O32 |
2358 | default y | ||
2359 | help | 2358 | help |
2360 | When this is enabled, the kernel will support use of 64-bit floating | 2359 | When this is enabled, the kernel will support use of 64-bit floating |
2361 | point registers with binaries using the O32 ABI along with the | 2360 | point registers with binaries using the O32 ABI along with the |
@@ -2367,7 +2366,14 @@ config MIPS_O32_FP64_SUPPORT | |||
2367 | of your kernel & potentially improve FP emulation performance by | 2366 | of your kernel & potentially improve FP emulation performance by |
2368 | saying N here. | 2367 | saying N here. |
2369 | 2368 | ||
2370 | If unsure, say Y. | 2369 | Although binutils currently supports use of this flag the details |
2370 | concerning its effect upon the O32 ABI in userland are still being | ||
2371 | worked on. In order to avoid userland becoming dependant upon current | ||
2372 | behaviour before the details have been finalised, this option should | ||
2373 | be considered experimental and only enabled by those working upon | ||
2374 | said details. | ||
2375 | |||
2376 | If unsure, say N. | ||
2371 | 2377 | ||
2372 | config USE_OF | 2378 | config USE_OF |
2373 | bool | 2379 | bool |
diff --git a/arch/mips/alchemy/board-gpr.c b/arch/mips/alchemy/board-gpr.c index 9edc35ff8cf1..acf9a2a37f5a 100644 --- a/arch/mips/alchemy/board-gpr.c +++ b/arch/mips/alchemy/board-gpr.c | |||
@@ -53,10 +53,8 @@ void __init prom_init(void) | |||
53 | prom_init_cmdline(); | 53 | prom_init_cmdline(); |
54 | 54 | ||
55 | memsize_str = prom_getenv("memsize"); | 55 | memsize_str = prom_getenv("memsize"); |
56 | if (!memsize_str) | 56 | if (!memsize_str || kstrtoul(memsize_str, 0, &memsize)) |
57 | memsize = 0x04000000; | 57 | memsize = 0x04000000; |
58 | else | ||
59 | strict_strtoul(memsize_str, 0, &memsize); | ||
60 | add_memory_region(0, memsize, BOOT_MEM_RAM); | 58 | add_memory_region(0, memsize, BOOT_MEM_RAM); |
61 | } | 59 | } |
62 | 60 | ||
diff --git a/arch/mips/alchemy/board-mtx1.c b/arch/mips/alchemy/board-mtx1.c index 9969dbab19e3..25a59a23547e 100644 --- a/arch/mips/alchemy/board-mtx1.c +++ b/arch/mips/alchemy/board-mtx1.c | |||
@@ -52,10 +52,8 @@ void __init prom_init(void) | |||
52 | prom_init_cmdline(); | 52 | prom_init_cmdline(); |
53 | 53 | ||
54 | memsize_str = prom_getenv("memsize"); | 54 | memsize_str = prom_getenv("memsize"); |
55 | if (!memsize_str) | 55 | if (!memsize_str || kstrtoul(memsize_str, 0, &memsize)) |
56 | memsize = 0x04000000; | 56 | memsize = 0x04000000; |
57 | else | ||
58 | strict_strtoul(memsize_str, 0, &memsize); | ||
59 | add_memory_region(0, memsize, BOOT_MEM_RAM); | 57 | add_memory_region(0, memsize, BOOT_MEM_RAM); |
60 | } | 58 | } |
61 | 59 | ||
diff --git a/arch/mips/bcm47xx/board.c b/arch/mips/bcm47xx/board.c index 6d612e2b949b..cdd8246f92b3 100644 --- a/arch/mips/bcm47xx/board.c +++ b/arch/mips/bcm47xx/board.c | |||
@@ -1,3 +1,4 @@ | |||
1 | #include <linux/errno.h> | ||
1 | #include <linux/export.h> | 2 | #include <linux/export.h> |
2 | #include <linux/string.h> | 3 | #include <linux/string.h> |
3 | #include <bcm47xx_board.h> | 4 | #include <bcm47xx_board.h> |
diff --git a/arch/mips/bcm47xx/nvram.c b/arch/mips/bcm47xx/nvram.c index 6decb27cf48b..2bed73a684ae 100644 --- a/arch/mips/bcm47xx/nvram.c +++ b/arch/mips/bcm47xx/nvram.c | |||
@@ -196,7 +196,7 @@ int bcm47xx_nvram_gpio_pin(const char *name) | |||
196 | char nvram_var[10]; | 196 | char nvram_var[10]; |
197 | char buf[30]; | 197 | char buf[30]; |
198 | 198 | ||
199 | for (i = 0; i < 16; i++) { | 199 | for (i = 0; i < 32; i++) { |
200 | err = snprintf(nvram_var, sizeof(nvram_var), "gpio%i", i); | 200 | err = snprintf(nvram_var, sizeof(nvram_var), "gpio%i", i); |
201 | if (err <= 0) | 201 | if (err <= 0) |
202 | continue; | 202 | continue; |
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c index 25fbfae06c1f..c2bb4f896ce7 100644 --- a/arch/mips/cavium-octeon/octeon-irq.c +++ b/arch/mips/cavium-octeon/octeon-irq.c | |||
@@ -975,10 +975,6 @@ static int octeon_irq_ciu_xlat(struct irq_domain *d, | |||
975 | if (ciu > 1 || bit > 63) | 975 | if (ciu > 1 || bit > 63) |
976 | return -EINVAL; | 976 | return -EINVAL; |
977 | 977 | ||
978 | /* These are the GPIO lines */ | ||
979 | if (ciu == 0 && bit >= 16 && bit < 32) | ||
980 | return -EINVAL; | ||
981 | |||
982 | *out_hwirq = (ciu << 6) | bit; | 978 | *out_hwirq = (ciu << 6) | bit; |
983 | *out_type = 0; | 979 | *out_type = 0; |
984 | 980 | ||
@@ -1007,6 +1003,10 @@ static int octeon_irq_ciu_map(struct irq_domain *d, | |||
1007 | if (!octeon_irq_virq_in_range(virq)) | 1003 | if (!octeon_irq_virq_in_range(virq)) |
1008 | return -EINVAL; | 1004 | return -EINVAL; |
1009 | 1005 | ||
1006 | /* Don't map irq if it is reserved for GPIO. */ | ||
1007 | if (line == 0 && bit >= 16 && bit <32) | ||
1008 | return 0; | ||
1009 | |||
1010 | if (line > 1 || octeon_irq_ciu_to_irq[line][bit] != 0) | 1010 | if (line > 1 || octeon_irq_ciu_to_irq[line][bit] != 0) |
1011 | return -EINVAL; | 1011 | return -EINVAL; |
1012 | 1012 | ||
@@ -1525,10 +1525,6 @@ static int octeon_irq_ciu2_xlat(struct irq_domain *d, | |||
1525 | ciu = intspec[0]; | 1525 | ciu = intspec[0]; |
1526 | bit = intspec[1]; | 1526 | bit = intspec[1]; |
1527 | 1527 | ||
1528 | /* Line 7 are the GPIO lines */ | ||
1529 | if (ciu > 6 || bit > 63) | ||
1530 | return -EINVAL; | ||
1531 | |||
1532 | *out_hwirq = (ciu << 6) | bit; | 1528 | *out_hwirq = (ciu << 6) | bit; |
1533 | *out_type = 0; | 1529 | *out_type = 0; |
1534 | 1530 | ||
@@ -1570,8 +1566,14 @@ static int octeon_irq_ciu2_map(struct irq_domain *d, | |||
1570 | if (!octeon_irq_virq_in_range(virq)) | 1566 | if (!octeon_irq_virq_in_range(virq)) |
1571 | return -EINVAL; | 1567 | return -EINVAL; |
1572 | 1568 | ||
1573 | /* Line 7 are the GPIO lines */ | 1569 | /* |
1574 | if (line > 6 || octeon_irq_ciu_to_irq[line][bit] != 0) | 1570 | * Don't map irq if it is reserved for GPIO. |
1571 | * (Line 7 are the GPIO lines.) | ||
1572 | */ | ||
1573 | if (line == 7) | ||
1574 | return 0; | ||
1575 | |||
1576 | if (line > 7 || octeon_irq_ciu_to_irq[line][bit] != 0) | ||
1575 | return -EINVAL; | 1577 | return -EINVAL; |
1576 | 1578 | ||
1577 | if (octeon_irq_ciu2_is_edge(line, bit)) | 1579 | if (octeon_irq_ciu2_is_edge(line, bit)) |
diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h index 3220c93ea981..4225e99bd7bf 100644 --- a/arch/mips/include/asm/asmmacro.h +++ b/arch/mips/include/asm/asmmacro.h | |||
@@ -9,6 +9,7 @@ | |||
9 | #define _ASM_ASMMACRO_H | 9 | #define _ASM_ASMMACRO_H |
10 | 10 | ||
11 | #include <asm/hazards.h> | 11 | #include <asm/hazards.h> |
12 | #include <asm/asm-offsets.h> | ||
12 | 13 | ||
13 | #ifdef CONFIG_32BIT | 14 | #ifdef CONFIG_32BIT |
14 | #include <asm/asmmacro-32.h> | 15 | #include <asm/asmmacro-32.h> |
@@ -54,11 +55,21 @@ | |||
54 | .endm | 55 | .endm |
55 | 56 | ||
56 | .macro local_irq_disable reg=t0 | 57 | .macro local_irq_disable reg=t0 |
58 | #ifdef CONFIG_PREEMPT | ||
59 | lw \reg, TI_PRE_COUNT($28) | ||
60 | addi \reg, \reg, 1 | ||
61 | sw \reg, TI_PRE_COUNT($28) | ||
62 | #endif | ||
57 | mfc0 \reg, CP0_STATUS | 63 | mfc0 \reg, CP0_STATUS |
58 | ori \reg, \reg, 1 | 64 | ori \reg, \reg, 1 |
59 | xori \reg, \reg, 1 | 65 | xori \reg, \reg, 1 |
60 | mtc0 \reg, CP0_STATUS | 66 | mtc0 \reg, CP0_STATUS |
61 | irq_disable_hazard | 67 | irq_disable_hazard |
68 | #ifdef CONFIG_PREEMPT | ||
69 | lw \reg, TI_PRE_COUNT($28) | ||
70 | addi \reg, \reg, -1 | ||
71 | sw \reg, TI_PRE_COUNT($28) | ||
72 | #endif | ||
62 | .endm | 73 | .endm |
63 | #endif /* CONFIG_MIPS_MT_SMTC */ | 74 | #endif /* CONFIG_MIPS_MT_SMTC */ |
64 | 75 | ||
@@ -106,7 +117,7 @@ | |||
106 | .endm | 117 | .endm |
107 | 118 | ||
108 | .macro fpu_save_double thread status tmp | 119 | .macro fpu_save_double thread status tmp |
109 | #if defined(CONFIG_MIPS64) || defined(CONFIG_CPU_MIPS32_R2) | 120 | #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) |
110 | sll \tmp, \status, 5 | 121 | sll \tmp, \status, 5 |
111 | bgez \tmp, 10f | 122 | bgez \tmp, 10f |
112 | fpu_save_16odd \thread | 123 | fpu_save_16odd \thread |
@@ -159,7 +170,7 @@ | |||
159 | .endm | 170 | .endm |
160 | 171 | ||
161 | .macro fpu_restore_double thread status tmp | 172 | .macro fpu_restore_double thread status tmp |
162 | #if defined(CONFIG_MIPS64) || defined(CONFIG_CPU_MIPS32_R2) | 173 | #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) |
163 | sll \tmp, \status, 5 | 174 | sll \tmp, \status, 5 |
164 | bgez \tmp, 10f # 16 register mode? | 175 | bgez \tmp, 10f # 16 register mode? |
165 | 176 | ||
diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h index 6b9749540edf..58e50cbdb1a6 100644 --- a/arch/mips/include/asm/fpu.h +++ b/arch/mips/include/asm/fpu.h | |||
@@ -57,7 +57,7 @@ static inline int __enable_fpu(enum fpu_mode mode) | |||
57 | return 0; | 57 | return 0; |
58 | 58 | ||
59 | case FPU_64BIT: | 59 | case FPU_64BIT: |
60 | #if !(defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_MIPS64)) | 60 | #if !(defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_64BIT)) |
61 | /* we only have a 32-bit FPU */ | 61 | /* we only have a 32-bit FPU */ |
62 | return SIGFPE; | 62 | return SIGFPE; |
63 | #endif | 63 | #endif |
diff --git a/arch/mips/include/asm/ftrace.h b/arch/mips/include/asm/ftrace.h index ce35c9af0c28..992aaba603b5 100644 --- a/arch/mips/include/asm/ftrace.h +++ b/arch/mips/include/asm/ftrace.h | |||
@@ -22,12 +22,12 @@ extern void _mcount(void); | |||
22 | #define safe_load(load, src, dst, error) \ | 22 | #define safe_load(load, src, dst, error) \ |
23 | do { \ | 23 | do { \ |
24 | asm volatile ( \ | 24 | asm volatile ( \ |
25 | "1: " load " %[" STR(dst) "], 0(%[" STR(src) "])\n"\ | 25 | "1: " load " %[tmp_dst], 0(%[tmp_src])\n" \ |
26 | " li %[" STR(error) "], 0\n" \ | 26 | " li %[tmp_err], 0\n" \ |
27 | "2:\n" \ | 27 | "2:\n" \ |
28 | \ | 28 | \ |
29 | ".section .fixup, \"ax\"\n" \ | 29 | ".section .fixup, \"ax\"\n" \ |
30 | "3: li %[" STR(error) "], 1\n" \ | 30 | "3: li %[tmp_err], 1\n" \ |
31 | " j 2b\n" \ | 31 | " j 2b\n" \ |
32 | ".previous\n" \ | 32 | ".previous\n" \ |
33 | \ | 33 | \ |
@@ -35,8 +35,8 @@ do { \ | |||
35 | STR(PTR) "\t1b, 3b\n\t" \ | 35 | STR(PTR) "\t1b, 3b\n\t" \ |
36 | ".previous\n" \ | 36 | ".previous\n" \ |
37 | \ | 37 | \ |
38 | : [dst] "=&r" (dst), [error] "=r" (error)\ | 38 | : [tmp_dst] "=&r" (dst), [tmp_err] "=r" (error)\ |
39 | : [src] "r" (src) \ | 39 | : [tmp_src] "r" (src) \ |
40 | : "memory" \ | 40 | : "memory" \ |
41 | ); \ | 41 | ); \ |
42 | } while (0) | 42 | } while (0) |
@@ -44,12 +44,12 @@ do { \ | |||
44 | #define safe_store(store, src, dst, error) \ | 44 | #define safe_store(store, src, dst, error) \ |
45 | do { \ | 45 | do { \ |
46 | asm volatile ( \ | 46 | asm volatile ( \ |
47 | "1: " store " %[" STR(src) "], 0(%[" STR(dst) "])\n"\ | 47 | "1: " store " %[tmp_src], 0(%[tmp_dst])\n"\ |
48 | " li %[" STR(error) "], 0\n" \ | 48 | " li %[tmp_err], 0\n" \ |
49 | "2:\n" \ | 49 | "2:\n" \ |
50 | \ | 50 | \ |
51 | ".section .fixup, \"ax\"\n" \ | 51 | ".section .fixup, \"ax\"\n" \ |
52 | "3: li %[" STR(error) "], 1\n" \ | 52 | "3: li %[tmp_err], 1\n" \ |
53 | " j 2b\n" \ | 53 | " j 2b\n" \ |
54 | ".previous\n" \ | 54 | ".previous\n" \ |
55 | \ | 55 | \ |
@@ -57,8 +57,8 @@ do { \ | |||
57 | STR(PTR) "\t1b, 3b\n\t" \ | 57 | STR(PTR) "\t1b, 3b\n\t" \ |
58 | ".previous\n" \ | 58 | ".previous\n" \ |
59 | \ | 59 | \ |
60 | : [error] "=r" (error) \ | 60 | : [tmp_err] "=r" (error) \ |
61 | : [dst] "r" (dst), [src] "r" (src)\ | 61 | : [tmp_dst] "r" (dst), [tmp_src] "r" (src)\ |
62 | : "memory" \ | 62 | : "memory" \ |
63 | ); \ | 63 | ); \ |
64 | } while (0) | 64 | } while (0) |
diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h index 33e8dbfc1b63..f35b131977e6 100644 --- a/arch/mips/include/asm/syscall.h +++ b/arch/mips/include/asm/syscall.h | |||
@@ -13,6 +13,7 @@ | |||
13 | #ifndef __ASM_MIPS_SYSCALL_H | 13 | #ifndef __ASM_MIPS_SYSCALL_H |
14 | #define __ASM_MIPS_SYSCALL_H | 14 | #define __ASM_MIPS_SYSCALL_H |
15 | 15 | ||
16 | #include <linux/compiler.h> | ||
16 | #include <linux/audit.h> | 17 | #include <linux/audit.h> |
17 | #include <linux/elf-em.h> | 18 | #include <linux/elf-em.h> |
18 | #include <linux/kernel.h> | 19 | #include <linux/kernel.h> |
@@ -39,14 +40,14 @@ static inline unsigned long mips_get_syscall_arg(unsigned long *arg, | |||
39 | 40 | ||
40 | #ifdef CONFIG_32BIT | 41 | #ifdef CONFIG_32BIT |
41 | case 4: case 5: case 6: case 7: | 42 | case 4: case 5: case 6: case 7: |
42 | return get_user(*arg, (int *)usp + 4 * n); | 43 | return get_user(*arg, (int *)usp + n); |
43 | #endif | 44 | #endif |
44 | 45 | ||
45 | #ifdef CONFIG_64BIT | 46 | #ifdef CONFIG_64BIT |
46 | case 4: case 5: case 6: case 7: | 47 | case 4: case 5: case 6: case 7: |
47 | #ifdef CONFIG_MIPS32_O32 | 48 | #ifdef CONFIG_MIPS32_O32 |
48 | if (test_thread_flag(TIF_32BIT_REGS)) | 49 | if (test_thread_flag(TIF_32BIT_REGS)) |
49 | return get_user(*arg, (int *)usp + 4 * n); | 50 | return get_user(*arg, (int *)usp + n); |
50 | else | 51 | else |
51 | #endif | 52 | #endif |
52 | *arg = regs->regs[4 + n]; | 53 | *arg = regs->regs[4 + n]; |
@@ -57,6 +58,8 @@ static inline unsigned long mips_get_syscall_arg(unsigned long *arg, | |||
57 | default: | 58 | default: |
58 | BUG(); | 59 | BUG(); |
59 | } | 60 | } |
61 | |||
62 | unreachable(); | ||
60 | } | 63 | } |
61 | 64 | ||
62 | static inline long syscall_get_return_value(struct task_struct *task, | 65 | static inline long syscall_get_return_value(struct task_struct *task, |
@@ -83,11 +86,10 @@ static inline void syscall_get_arguments(struct task_struct *task, | |||
83 | unsigned int i, unsigned int n, | 86 | unsigned int i, unsigned int n, |
84 | unsigned long *args) | 87 | unsigned long *args) |
85 | { | 88 | { |
86 | unsigned long arg; | ||
87 | int ret; | 89 | int ret; |
88 | 90 | ||
89 | while (n--) | 91 | while (n--) |
90 | ret |= mips_get_syscall_arg(&arg, task, regs, i++); | 92 | ret |= mips_get_syscall_arg(args++, task, regs, i++); |
91 | 93 | ||
92 | /* | 94 | /* |
93 | * No way to communicate an error because this is a void function. | 95 | * No way to communicate an error because this is a void function. |
diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h index b39ba25b41cc..f25181b19941 100644 --- a/arch/mips/include/uapi/asm/inst.h +++ b/arch/mips/include/uapi/asm/inst.h | |||
@@ -163,8 +163,8 @@ enum cop1_sdw_func { | |||
163 | */ | 163 | */ |
164 | enum cop1x_func { | 164 | enum cop1x_func { |
165 | lwxc1_op = 0x00, ldxc1_op = 0x01, | 165 | lwxc1_op = 0x00, ldxc1_op = 0x01, |
166 | pfetch_op = 0x07, swxc1_op = 0x08, | 166 | swxc1_op = 0x08, sdxc1_op = 0x09, |
167 | sdxc1_op = 0x09, madd_s_op = 0x20, | 167 | pfetch_op = 0x0f, madd_s_op = 0x20, |
168 | madd_d_op = 0x21, madd_e_op = 0x22, | 168 | madd_d_op = 0x21, madd_e_op = 0x22, |
169 | msub_s_op = 0x28, msub_d_op = 0x29, | 169 | msub_s_op = 0x28, msub_d_op = 0x29, |
170 | msub_e_op = 0x2a, nmadd_s_op = 0x30, | 170 | msub_e_op = 0x2a, nmadd_s_op = 0x30, |
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c index 185ba258361b..374ed74cd516 100644 --- a/arch/mips/kernel/ftrace.c +++ b/arch/mips/kernel/ftrace.c | |||
@@ -111,11 +111,10 @@ static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1, | |||
111 | safe_store_code(new_code1, ip, faulted); | 111 | safe_store_code(new_code1, ip, faulted); |
112 | if (unlikely(faulted)) | 112 | if (unlikely(faulted)) |
113 | return -EFAULT; | 113 | return -EFAULT; |
114 | ip += 4; | 114 | safe_store_code(new_code2, ip + 4, faulted); |
115 | safe_store_code(new_code2, ip, faulted); | ||
116 | if (unlikely(faulted)) | 115 | if (unlikely(faulted)) |
117 | return -EFAULT; | 116 | return -EFAULT; |
118 | flush_icache_range(ip, ip + 8); /* original ip + 12 */ | 117 | flush_icache_range(ip, ip + 8); |
119 | return 0; | 118 | return 0; |
120 | } | 119 | } |
121 | #endif | 120 | #endif |
diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S index 253b2fb52026..73b0ddf910d4 100644 --- a/arch/mips/kernel/r4k_fpu.S +++ b/arch/mips/kernel/r4k_fpu.S | |||
@@ -35,9 +35,9 @@ | |||
35 | LEAF(_save_fp_context) | 35 | LEAF(_save_fp_context) |
36 | cfc1 t1, fcr31 | 36 | cfc1 t1, fcr31 |
37 | 37 | ||
38 | #if defined(CONFIG_64BIT) || defined(CONFIG_MIPS32_R2) | 38 | #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) |
39 | .set push | 39 | .set push |
40 | #ifdef CONFIG_MIPS32_R2 | 40 | #ifdef CONFIG_CPU_MIPS32_R2 |
41 | .set mips64r2 | 41 | .set mips64r2 |
42 | mfc0 t0, CP0_STATUS | 42 | mfc0 t0, CP0_STATUS |
43 | sll t0, t0, 5 | 43 | sll t0, t0, 5 |
@@ -146,11 +146,11 @@ LEAF(_save_fp_context32) | |||
146 | * - cp1 status/control register | 146 | * - cp1 status/control register |
147 | */ | 147 | */ |
148 | LEAF(_restore_fp_context) | 148 | LEAF(_restore_fp_context) |
149 | EX lw t0, SC_FPC_CSR(a0) | 149 | EX lw t1, SC_FPC_CSR(a0) |
150 | 150 | ||
151 | #if defined(CONFIG_64BIT) || defined(CONFIG_MIPS32_R2) | 151 | #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) |
152 | .set push | 152 | .set push |
153 | #ifdef CONFIG_MIPS32_R2 | 153 | #ifdef CONFIG_CPU_MIPS32_R2 |
154 | .set mips64r2 | 154 | .set mips64r2 |
155 | mfc0 t0, CP0_STATUS | 155 | mfc0 t0, CP0_STATUS |
156 | sll t0, t0, 5 | 156 | sll t0, t0, 5 |
@@ -191,7 +191,7 @@ LEAF(_restore_fp_context) | |||
191 | EX ldc1 $f26, SC_FPREGS+208(a0) | 191 | EX ldc1 $f26, SC_FPREGS+208(a0) |
192 | EX ldc1 $f28, SC_FPREGS+224(a0) | 192 | EX ldc1 $f28, SC_FPREGS+224(a0) |
193 | EX ldc1 $f30, SC_FPREGS+240(a0) | 193 | EX ldc1 $f30, SC_FPREGS+240(a0) |
194 | ctc1 t0, fcr31 | 194 | ctc1 t1, fcr31 |
195 | jr ra | 195 | jr ra |
196 | li v0, 0 # success | 196 | li v0, 0 # success |
197 | END(_restore_fp_context) | 197 | END(_restore_fp_context) |
@@ -199,7 +199,7 @@ LEAF(_restore_fp_context) | |||
199 | #ifdef CONFIG_MIPS32_COMPAT | 199 | #ifdef CONFIG_MIPS32_COMPAT |
200 | LEAF(_restore_fp_context32) | 200 | LEAF(_restore_fp_context32) |
201 | /* Restore an o32 sigcontext. */ | 201 | /* Restore an o32 sigcontext. */ |
202 | EX lw t0, SC32_FPC_CSR(a0) | 202 | EX lw t1, SC32_FPC_CSR(a0) |
203 | 203 | ||
204 | mfc0 t0, CP0_STATUS | 204 | mfc0 t0, CP0_STATUS |
205 | sll t0, t0, 5 | 205 | sll t0, t0, 5 |
@@ -239,7 +239,7 @@ LEAF(_restore_fp_context32) | |||
239 | EX ldc1 $f26, SC32_FPREGS+208(a0) | 239 | EX ldc1 $f26, SC32_FPREGS+208(a0) |
240 | EX ldc1 $f28, SC32_FPREGS+224(a0) | 240 | EX ldc1 $f28, SC32_FPREGS+224(a0) |
241 | EX ldc1 $f30, SC32_FPREGS+240(a0) | 241 | EX ldc1 $f30, SC32_FPREGS+240(a0) |
242 | ctc1 t0, fcr31 | 242 | ctc1 t1, fcr31 |
243 | jr ra | 243 | jr ra |
244 | li v0, 0 # success | 244 | li v0, 0 # success |
245 | END(_restore_fp_context32) | 245 | END(_restore_fp_context32) |
diff --git a/arch/mips/kernel/rtlx-cmp.c b/arch/mips/kernel/rtlx-cmp.c index 56dc69635153..758fb3cd2326 100644 --- a/arch/mips/kernel/rtlx-cmp.c +++ b/arch/mips/kernel/rtlx-cmp.c | |||
@@ -112,5 +112,8 @@ void __exit rtlx_module_exit(void) | |||
112 | 112 | ||
113 | for (i = 0; i < RTLX_CHANNELS; i++) | 113 | for (i = 0; i < RTLX_CHANNELS; i++) |
114 | device_destroy(mt_class, MKDEV(major, i)); | 114 | device_destroy(mt_class, MKDEV(major, i)); |
115 | |||
115 | unregister_chrdev(major, RTLX_MODULE_NAME); | 116 | unregister_chrdev(major, RTLX_MODULE_NAME); |
117 | |||
118 | aprp_hook = NULL; | ||
116 | } | 119 | } |
diff --git a/arch/mips/kernel/rtlx-mt.c b/arch/mips/kernel/rtlx-mt.c index 91d61ba422b4..9c1aca00fd54 100644 --- a/arch/mips/kernel/rtlx-mt.c +++ b/arch/mips/kernel/rtlx-mt.c | |||
@@ -144,5 +144,8 @@ void __exit rtlx_module_exit(void) | |||
144 | 144 | ||
145 | for (i = 0; i < RTLX_CHANNELS; i++) | 145 | for (i = 0; i < RTLX_CHANNELS; i++) |
146 | device_destroy(mt_class, MKDEV(major, i)); | 146 | device_destroy(mt_class, MKDEV(major, i)); |
147 | |||
147 | unregister_chrdev(major, RTLX_MODULE_NAME); | 148 | unregister_chrdev(major, RTLX_MODULE_NAME); |
149 | |||
150 | aprp_hook = NULL; | ||
148 | } | 151 | } |
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c index 506925b2c3f3..0b4e2e38294b 100644 --- a/arch/mips/math-emu/cp1emu.c +++ b/arch/mips/math-emu/cp1emu.c | |||
@@ -1538,10 +1538,10 @@ static int fpux_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, | |||
1538 | break; | 1538 | break; |
1539 | } | 1539 | } |
1540 | 1540 | ||
1541 | case 0x7: /* 7 */ | 1541 | case 0x3: |
1542 | if (MIPSInst_FUNC(ir) != pfetch_op) { | 1542 | if (MIPSInst_FUNC(ir) != pfetch_op) |
1543 | return SIGILL; | 1543 | return SIGILL; |
1544 | } | 1544 | |
1545 | /* ignore prefx operation */ | 1545 | /* ignore prefx operation */ |
1546 | break; | 1546 | break; |
1547 | 1547 | ||
diff --git a/arch/mips/mti-malta/malta-amon.c b/arch/mips/mti-malta/malta-amon.c index 592ac0427426..84ac523b0ce0 100644 --- a/arch/mips/mti-malta/malta-amon.c +++ b/arch/mips/mti-malta/malta-amon.c | |||
@@ -72,7 +72,7 @@ int amon_cpu_start(int cpu, | |||
72 | return 0; | 72 | return 0; |
73 | } | 73 | } |
74 | 74 | ||
75 | #ifdef CONFIG_MIPS_VPE_LOADER | 75 | #ifdef CONFIG_MIPS_VPE_LOADER_CMP |
76 | int vpe_run(struct vpe *v) | 76 | int vpe_run(struct vpe *v) |
77 | { | 77 | { |
78 | struct vpe_notifications *n; | 78 | struct vpe_notifications *n; |
diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c index ca3e3a46a42f..2242181a6284 100644 --- a/arch/mips/mti-malta/malta-int.c +++ b/arch/mips/mti-malta/malta-int.c | |||
@@ -119,7 +119,7 @@ static void malta_hw0_irqdispatch(void) | |||
119 | 119 | ||
120 | do_IRQ(MALTA_INT_BASE + irq); | 120 | do_IRQ(MALTA_INT_BASE + irq); |
121 | 121 | ||
122 | #ifdef MIPS_VPE_APSP_API | 122 | #ifdef CONFIG_MIPS_VPE_APSP_API_MT |
123 | if (aprp_hook) | 123 | if (aprp_hook) |
124 | aprp_hook(); | 124 | aprp_hook(); |
125 | #endif | 125 | #endif |
@@ -310,7 +310,7 @@ static void ipi_call_dispatch(void) | |||
310 | 310 | ||
311 | static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id) | 311 | static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id) |
312 | { | 312 | { |
313 | #ifdef MIPS_VPE_APSP_API | 313 | #ifdef CONFIG_MIPS_VPE_APSP_API_CMP |
314 | if (aprp_hook) | 314 | if (aprp_hook) |
315 | aprp_hook(); | 315 | aprp_hook(); |
316 | #endif | 316 | #endif |
diff --git a/arch/mips/pci/msi-octeon.c b/arch/mips/pci/msi-octeon.c index d37be36dc659..2b91b0e61566 100644 --- a/arch/mips/pci/msi-octeon.c +++ b/arch/mips/pci/msi-octeon.c | |||
@@ -150,6 +150,7 @@ msi_irq_allocated: | |||
150 | msg.address_lo = | 150 | msg.address_lo = |
151 | ((128ul << 20) + CVMX_PCI_MSI_RCV) & 0xffffffff; | 151 | ((128ul << 20) + CVMX_PCI_MSI_RCV) & 0xffffffff; |
152 | msg.address_hi = ((128ul << 20) + CVMX_PCI_MSI_RCV) >> 32; | 152 | msg.address_hi = ((128ul << 20) + CVMX_PCI_MSI_RCV) >> 32; |
153 | break; | ||
153 | case OCTEON_DMA_BAR_TYPE_BIG: | 154 | case OCTEON_DMA_BAR_TYPE_BIG: |
154 | /* When using big bar, Bar 0 is based at 0 */ | 155 | /* When using big bar, Bar 0 is based at 0 */ |
155 | msg.address_lo = (0 + CVMX_PCI_MSI_RCV) & 0xffffffff; | 156 | msg.address_lo = (0 + CVMX_PCI_MSI_RCV) & 0xffffffff; |
diff --git a/arch/parisc/include/asm/page.h b/arch/parisc/include/asm/page.h index 637fe031aa84..60d5d174dfe4 100644 --- a/arch/parisc/include/asm/page.h +++ b/arch/parisc/include/asm/page.h | |||
@@ -32,17 +32,6 @@ void copy_page_asm(void *to, void *from); | |||
32 | void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, | 32 | void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, |
33 | struct page *pg); | 33 | struct page *pg); |
34 | 34 | ||
35 | /* #define CONFIG_PARISC_TMPALIAS */ | ||
36 | |||
37 | #ifdef CONFIG_PARISC_TMPALIAS | ||
38 | void clear_user_highpage(struct page *page, unsigned long vaddr); | ||
39 | #define clear_user_highpage clear_user_highpage | ||
40 | struct vm_area_struct; | ||
41 | void copy_user_highpage(struct page *to, struct page *from, | ||
42 | unsigned long vaddr, struct vm_area_struct *vma); | ||
43 | #define __HAVE_ARCH_COPY_USER_HIGHPAGE | ||
44 | #endif | ||
45 | |||
46 | /* | 35 | /* |
47 | * These are used to make use of C type-checking.. | 36 | * These are used to make use of C type-checking.. |
48 | */ | 37 | */ |
diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h index 3516e0b27044..64f2992e439f 100644 --- a/arch/parisc/include/asm/spinlock.h +++ b/arch/parisc/include/asm/spinlock.h | |||
@@ -191,8 +191,4 @@ static __inline__ int arch_write_can_lock(arch_rwlock_t *rw) | |||
191 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) | 191 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) |
192 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) | 192 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) |
193 | 193 | ||
194 | #define arch_spin_relax(lock) cpu_relax() | ||
195 | #define arch_read_relax(lock) cpu_relax() | ||
196 | #define arch_write_relax(lock) cpu_relax() | ||
197 | |||
198 | #endif /* __ASM_SPINLOCK_H */ | 194 | #endif /* __ASM_SPINLOCK_H */ |
diff --git a/arch/parisc/include/uapi/asm/unistd.h b/arch/parisc/include/uapi/asm/unistd.h index 42706794a36f..265ae5190b0a 100644 --- a/arch/parisc/include/uapi/asm/unistd.h +++ b/arch/parisc/include/uapi/asm/unistd.h | |||
@@ -828,13 +828,13 @@ | |||
828 | #define __NR_finit_module (__NR_Linux + 333) | 828 | #define __NR_finit_module (__NR_Linux + 333) |
829 | #define __NR_sched_setattr (__NR_Linux + 334) | 829 | #define __NR_sched_setattr (__NR_Linux + 334) |
830 | #define __NR_sched_getattr (__NR_Linux + 335) | 830 | #define __NR_sched_getattr (__NR_Linux + 335) |
831 | #define __NR_utimes (__NR_Linux + 336) | ||
831 | 832 | ||
832 | #define __NR_Linux_syscalls (__NR_sched_getattr + 1) | 833 | #define __NR_Linux_syscalls (__NR_utimes + 1) |
833 | 834 | ||
834 | 835 | ||
835 | #define __IGNORE_select /* newselect */ | 836 | #define __IGNORE_select /* newselect */ |
836 | #define __IGNORE_fadvise64 /* fadvise64_64 */ | 837 | #define __IGNORE_fadvise64 /* fadvise64_64 */ |
837 | #define __IGNORE_utimes /* utime */ | ||
838 | 838 | ||
839 | 839 | ||
840 | #define HPUX_GATEWAY_ADDR 0xC0000004 | 840 | #define HPUX_GATEWAY_ADDR 0xC0000004 |
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index ac87a40502e6..a6ffc775a9f8 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c | |||
@@ -581,67 +581,3 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long | |||
581 | __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn)); | 581 | __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn)); |
582 | } | 582 | } |
583 | } | 583 | } |
584 | |||
585 | #ifdef CONFIG_PARISC_TMPALIAS | ||
586 | |||
587 | void clear_user_highpage(struct page *page, unsigned long vaddr) | ||
588 | { | ||
589 | void *vto; | ||
590 | unsigned long flags; | ||
591 | |||
592 | /* Clear using TMPALIAS region. The page doesn't need to | ||
593 | be flushed but the kernel mapping needs to be purged. */ | ||
594 | |||
595 | vto = kmap_atomic(page); | ||
596 | |||
597 | /* The PA-RISC 2.0 Architecture book states on page F-6: | ||
598 | "Before a write-capable translation is enabled, *all* | ||
599 | non-equivalently-aliased translations must be removed | ||
600 | from the page table and purged from the TLB. (Note | ||
601 | that the caches are not required to be flushed at this | ||
602 | time.) Before any non-equivalent aliased translation | ||
603 | is re-enabled, the virtual address range for the writeable | ||
604 | page (the entire page) must be flushed from the cache, | ||
605 | and the write-capable translation removed from the page | ||
606 | table and purged from the TLB." */ | ||
607 | |||
608 | purge_kernel_dcache_page_asm((unsigned long)vto); | ||
609 | purge_tlb_start(flags); | ||
610 | pdtlb_kernel(vto); | ||
611 | purge_tlb_end(flags); | ||
612 | preempt_disable(); | ||
613 | clear_user_page_asm(vto, vaddr); | ||
614 | preempt_enable(); | ||
615 | |||
616 | pagefault_enable(); /* kunmap_atomic(addr, KM_USER0); */ | ||
617 | } | ||
618 | |||
619 | void copy_user_highpage(struct page *to, struct page *from, | ||
620 | unsigned long vaddr, struct vm_area_struct *vma) | ||
621 | { | ||
622 | void *vfrom, *vto; | ||
623 | unsigned long flags; | ||
624 | |||
625 | /* Copy using TMPALIAS region. This has the advantage | ||
626 | that the `from' page doesn't need to be flushed. However, | ||
627 | the `to' page must be flushed in copy_user_page_asm since | ||
628 | it can be used to bring in executable code. */ | ||
629 | |||
630 | vfrom = kmap_atomic(from); | ||
631 | vto = kmap_atomic(to); | ||
632 | |||
633 | purge_kernel_dcache_page_asm((unsigned long)vto); | ||
634 | purge_tlb_start(flags); | ||
635 | pdtlb_kernel(vto); | ||
636 | pdtlb_kernel(vfrom); | ||
637 | purge_tlb_end(flags); | ||
638 | preempt_disable(); | ||
639 | copy_user_page_asm(vto, vfrom, vaddr); | ||
640 | flush_dcache_page_asm(__pa(vto), vaddr); | ||
641 | preempt_enable(); | ||
642 | |||
643 | pagefault_enable(); /* kunmap_atomic(addr, KM_USER1); */ | ||
644 | pagefault_enable(); /* kunmap_atomic(addr, KM_USER0); */ | ||
645 | } | ||
646 | |||
647 | #endif /* CONFIG_PARISC_TMPALIAS */ | ||
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S index 8fa3fbb3e4d3..80e5dd248934 100644 --- a/arch/parisc/kernel/syscall_table.S +++ b/arch/parisc/kernel/syscall_table.S | |||
@@ -431,6 +431,7 @@ | |||
431 | ENTRY_SAME(finit_module) | 431 | ENTRY_SAME(finit_module) |
432 | ENTRY_SAME(sched_setattr) | 432 | ENTRY_SAME(sched_setattr) |
433 | ENTRY_SAME(sched_getattr) /* 335 */ | 433 | ENTRY_SAME(sched_getattr) /* 335 */ |
434 | ENTRY_COMP(utimes) | ||
434 | 435 | ||
435 | /* Nothing yet */ | 436 | /* Nothing yet */ |
436 | 437 | ||
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index e66d4ec04d95..818dce344e82 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S | |||
@@ -1504,73 +1504,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) | |||
1504 | 1: addi r8,r8,16 | 1504 | 1: addi r8,r8,16 |
1505 | .endr | 1505 | .endr |
1506 | 1506 | ||
1507 | /* Save DEC */ | ||
1508 | mfspr r5,SPRN_DEC | ||
1509 | mftb r6 | ||
1510 | extsw r5,r5 | ||
1511 | add r5,r5,r6 | ||
1512 | std r5,VCPU_DEC_EXPIRES(r9) | ||
1513 | |||
1514 | BEGIN_FTR_SECTION | ||
1515 | b 8f | ||
1516 | END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) | ||
1517 | /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */ | ||
1518 | mfmsr r8 | ||
1519 | li r0, 1 | ||
1520 | rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG | ||
1521 | mtmsrd r8 | ||
1522 | |||
1523 | /* Save POWER8-specific registers */ | ||
1524 | mfspr r5, SPRN_IAMR | ||
1525 | mfspr r6, SPRN_PSPB | ||
1526 | mfspr r7, SPRN_FSCR | ||
1527 | std r5, VCPU_IAMR(r9) | ||
1528 | stw r6, VCPU_PSPB(r9) | ||
1529 | std r7, VCPU_FSCR(r9) | ||
1530 | mfspr r5, SPRN_IC | ||
1531 | mfspr r6, SPRN_VTB | ||
1532 | mfspr r7, SPRN_TAR | ||
1533 | std r5, VCPU_IC(r9) | ||
1534 | std r6, VCPU_VTB(r9) | ||
1535 | std r7, VCPU_TAR(r9) | ||
1536 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | ||
1537 | mfspr r5, SPRN_TFHAR | ||
1538 | mfspr r6, SPRN_TFIAR | ||
1539 | mfspr r7, SPRN_TEXASR | ||
1540 | std r5, VCPU_TFHAR(r9) | ||
1541 | std r6, VCPU_TFIAR(r9) | ||
1542 | std r7, VCPU_TEXASR(r9) | ||
1543 | #endif | ||
1544 | mfspr r8, SPRN_EBBHR | ||
1545 | std r8, VCPU_EBBHR(r9) | ||
1546 | mfspr r5, SPRN_EBBRR | ||
1547 | mfspr r6, SPRN_BESCR | ||
1548 | mfspr r7, SPRN_CSIGR | ||
1549 | mfspr r8, SPRN_TACR | ||
1550 | std r5, VCPU_EBBRR(r9) | ||
1551 | std r6, VCPU_BESCR(r9) | ||
1552 | std r7, VCPU_CSIGR(r9) | ||
1553 | std r8, VCPU_TACR(r9) | ||
1554 | mfspr r5, SPRN_TCSCR | ||
1555 | mfspr r6, SPRN_ACOP | ||
1556 | mfspr r7, SPRN_PID | ||
1557 | mfspr r8, SPRN_WORT | ||
1558 | std r5, VCPU_TCSCR(r9) | ||
1559 | std r6, VCPU_ACOP(r9) | ||
1560 | stw r7, VCPU_GUEST_PID(r9) | ||
1561 | std r8, VCPU_WORT(r9) | ||
1562 | 8: | ||
1563 | |||
1564 | /* Save and reset AMR and UAMOR before turning on the MMU */ | ||
1565 | BEGIN_FTR_SECTION | ||
1566 | mfspr r5,SPRN_AMR | ||
1567 | mfspr r6,SPRN_UAMOR | ||
1568 | std r5,VCPU_AMR(r9) | ||
1569 | std r6,VCPU_UAMOR(r9) | ||
1570 | li r6,0 | ||
1571 | mtspr SPRN_AMR,r6 | ||
1572 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) | ||
1573 | |||
1574 | /* Unset guest mode */ | 1507 | /* Unset guest mode */ |
1575 | li r0, KVM_GUEST_MODE_NONE | 1508 | li r0, KVM_GUEST_MODE_NONE |
1576 | stb r0, HSTATE_IN_GUEST(r13) | 1509 | stb r0, HSTATE_IN_GUEST(r13) |
@@ -2203,7 +2136,7 @@ BEGIN_FTR_SECTION | |||
2203 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | 2136 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) |
2204 | #endif | 2137 | #endif |
2205 | mfspr r6,SPRN_VRSAVE | 2138 | mfspr r6,SPRN_VRSAVE |
2206 | stw r6,VCPU_VRSAVE(r3) | 2139 | stw r6,VCPU_VRSAVE(r31) |
2207 | mtlr r30 | 2140 | mtlr r30 |
2208 | mtmsrd r5 | 2141 | mtmsrd r5 |
2209 | isync | 2142 | isync |
@@ -2240,7 +2173,7 @@ BEGIN_FTR_SECTION | |||
2240 | bl .load_vr_state | 2173 | bl .load_vr_state |
2241 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | 2174 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) |
2242 | #endif | 2175 | #endif |
2243 | lwz r7,VCPU_VRSAVE(r4) | 2176 | lwz r7,VCPU_VRSAVE(r31) |
2244 | mtspr SPRN_VRSAVE,r7 | 2177 | mtspr SPRN_VRSAVE,r7 |
2245 | mtlr r30 | 2178 | mtlr r30 |
2246 | mr r4,r31 | 2179 | mr r4,r31 |
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c index 32a280ec38c1..d7b4967f8fa6 100644 --- a/arch/sparc/kernel/process_64.c +++ b/arch/sparc/kernel/process_64.c | |||
@@ -58,9 +58,12 @@ void arch_cpu_idle(void) | |||
58 | { | 58 | { |
59 | if (tlb_type != hypervisor) { | 59 | if (tlb_type != hypervisor) { |
60 | touch_nmi_watchdog(); | 60 | touch_nmi_watchdog(); |
61 | local_irq_enable(); | ||
61 | } else { | 62 | } else { |
62 | unsigned long pstate; | 63 | unsigned long pstate; |
63 | 64 | ||
65 | local_irq_enable(); | ||
66 | |||
64 | /* The sun4v sleeping code requires that we have PSTATE.IE cleared over | 67 | /* The sun4v sleeping code requires that we have PSTATE.IE cleared over |
65 | * the cpu sleep hypervisor call. | 68 | * the cpu sleep hypervisor call. |
66 | */ | 69 | */ |
@@ -82,7 +85,6 @@ void arch_cpu_idle(void) | |||
82 | : "=&r" (pstate) | 85 | : "=&r" (pstate) |
83 | : "i" (PSTATE_IE)); | 86 | : "i" (PSTATE_IE)); |
84 | } | 87 | } |
85 | local_irq_enable(); | ||
86 | } | 88 | } |
87 | 89 | ||
88 | #ifdef CONFIG_HOTPLUG_CPU | 90 | #ifdef CONFIG_HOTPLUG_CPU |
diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S index 87729fff13b9..33a17e7b3ccd 100644 --- a/arch/sparc/kernel/syscalls.S +++ b/arch/sparc/kernel/syscalls.S | |||
@@ -189,7 +189,8 @@ linux_sparc_syscall32: | |||
189 | mov %i0, %l5 ! IEU1 | 189 | mov %i0, %l5 ! IEU1 |
190 | 5: call %l7 ! CTI Group brk forced | 190 | 5: call %l7 ! CTI Group brk forced |
191 | srl %i5, 0, %o5 ! IEU1 | 191 | srl %i5, 0, %o5 ! IEU1 |
192 | ba,a,pt %xcc, 3f | 192 | ba,pt %xcc, 3f |
193 | sra %o0, 0, %o0 | ||
193 | 194 | ||
194 | /* Linux native system calls enter here... */ | 195 | /* Linux native system calls enter here... */ |
195 | .align 32 | 196 | .align 32 |
@@ -217,7 +218,6 @@ linux_sparc_syscall: | |||
217 | 3: stx %o0, [%sp + PTREGS_OFF + PT_V9_I0] | 218 | 3: stx %o0, [%sp + PTREGS_OFF + PT_V9_I0] |
218 | ret_sys_call: | 219 | ret_sys_call: |
219 | ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %g3 | 220 | ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %g3 |
220 | sra %o0, 0, %o0 | ||
221 | mov %ulo(TSTATE_XCARRY | TSTATE_ICARRY), %g2 | 221 | mov %ulo(TSTATE_XCARRY | TSTATE_ICARRY), %g2 |
222 | sllx %g2, 32, %g2 | 222 | sllx %g2, 32, %g2 |
223 | 223 | ||
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c index 3b3a360b429a..f5d506fdddad 100644 --- a/arch/sparc/mm/tsb.c +++ b/arch/sparc/mm/tsb.c | |||
@@ -273,7 +273,7 @@ void __init pgtable_cache_init(void) | |||
273 | prom_halt(); | 273 | prom_halt(); |
274 | } | 274 | } |
275 | 275 | ||
276 | for (i = 0; i < 8; i++) { | 276 | for (i = 0; i < ARRAY_SIZE(tsb_cache_names); i++) { |
277 | unsigned long size = 8192 << i; | 277 | unsigned long size = 8192 << i; |
278 | const char *name = tsb_cache_names[i]; | 278 | const char *name = tsb_cache_names[i]; |
279 | 279 | ||
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 5ad38ad07890..bbc8b12fa443 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h | |||
@@ -445,20 +445,10 @@ static inline int pte_same(pte_t a, pte_t b) | |||
445 | return a.pte == b.pte; | 445 | return a.pte == b.pte; |
446 | } | 446 | } |
447 | 447 | ||
448 | static inline int pteval_present(pteval_t pteval) | ||
449 | { | ||
450 | /* | ||
451 | * Yes Linus, _PAGE_PROTNONE == _PAGE_NUMA. Expressing it this | ||
452 | * way clearly states that the intent is that protnone and numa | ||
453 | * hinting ptes are considered present for the purposes of | ||
454 | * pagetable operations like zapping, protection changes, gup etc. | ||
455 | */ | ||
456 | return pteval & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_NUMA); | ||
457 | } | ||
458 | |||
459 | static inline int pte_present(pte_t a) | 448 | static inline int pte_present(pte_t a) |
460 | { | 449 | { |
461 | return pteval_present(pte_flags(a)); | 450 | return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE | |
451 | _PAGE_NUMA); | ||
462 | } | 452 | } |
463 | 453 | ||
464 | #define pte_accessible pte_accessible | 454 | #define pte_accessible pte_accessible |
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h index d35f24e231cd..1306d117967d 100644 --- a/arch/x86/include/asm/topology.h +++ b/arch/x86/include/asm/topology.h | |||
@@ -119,9 +119,10 @@ static inline void setup_node_to_cpumask_map(void) { } | |||
119 | 119 | ||
120 | extern const struct cpumask *cpu_coregroup_mask(int cpu); | 120 | extern const struct cpumask *cpu_coregroup_mask(int cpu); |
121 | 121 | ||
122 | #ifdef ENABLE_TOPO_DEFINES | ||
123 | #define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id) | 122 | #define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id) |
124 | #define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id) | 123 | #define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id) |
124 | |||
125 | #ifdef ENABLE_TOPO_DEFINES | ||
125 | #define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu)) | 126 | #define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu)) |
126 | #define topology_thread_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu)) | 127 | #define topology_thread_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu)) |
127 | #endif | 128 | #endif |
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c index fd972a3e4cbb..9fa8aa051f54 100644 --- a/arch/x86/kernel/aperture_64.c +++ b/arch/x86/kernel/aperture_64.c | |||
@@ -18,7 +18,6 @@ | |||
18 | #include <linux/pci_ids.h> | 18 | #include <linux/pci_ids.h> |
19 | #include <linux/pci.h> | 19 | #include <linux/pci.h> |
20 | #include <linux/bitops.h> | 20 | #include <linux/bitops.h> |
21 | #include <linux/ioport.h> | ||
22 | #include <linux/suspend.h> | 21 | #include <linux/suspend.h> |
23 | #include <asm/e820.h> | 22 | #include <asm/e820.h> |
24 | #include <asm/io.h> | 23 | #include <asm/io.h> |
@@ -54,18 +53,6 @@ int fallback_aper_force __initdata; | |||
54 | 53 | ||
55 | int fix_aperture __initdata = 1; | 54 | int fix_aperture __initdata = 1; |
56 | 55 | ||
57 | static struct resource gart_resource = { | ||
58 | .name = "GART", | ||
59 | .flags = IORESOURCE_MEM, | ||
60 | }; | ||
61 | |||
62 | static void __init insert_aperture_resource(u32 aper_base, u32 aper_size) | ||
63 | { | ||
64 | gart_resource.start = aper_base; | ||
65 | gart_resource.end = aper_base + aper_size - 1; | ||
66 | insert_resource(&iomem_resource, &gart_resource); | ||
67 | } | ||
68 | |||
69 | /* This code runs before the PCI subsystem is initialized, so just | 56 | /* This code runs before the PCI subsystem is initialized, so just |
70 | access the northbridge directly. */ | 57 | access the northbridge directly. */ |
71 | 58 | ||
@@ -96,7 +83,6 @@ static u32 __init allocate_aperture(void) | |||
96 | memblock_reserve(addr, aper_size); | 83 | memblock_reserve(addr, aper_size); |
97 | printk(KERN_INFO "Mapping aperture over %d KB of RAM @ %lx\n", | 84 | printk(KERN_INFO "Mapping aperture over %d KB of RAM @ %lx\n", |
98 | aper_size >> 10, addr); | 85 | aper_size >> 10, addr); |
99 | insert_aperture_resource((u32)addr, aper_size); | ||
100 | register_nosave_region(addr >> PAGE_SHIFT, | 86 | register_nosave_region(addr >> PAGE_SHIFT, |
101 | (addr+aper_size) >> PAGE_SHIFT); | 87 | (addr+aper_size) >> PAGE_SHIFT); |
102 | 88 | ||
@@ -444,12 +430,8 @@ int __init gart_iommu_hole_init(void) | |||
444 | 430 | ||
445 | out: | 431 | out: |
446 | if (!fix && !fallback_aper_force) { | 432 | if (!fix && !fallback_aper_force) { |
447 | if (last_aper_base) { | 433 | if (last_aper_base) |
448 | unsigned long n = (32 * 1024 * 1024) << last_aper_order; | ||
449 | |||
450 | insert_aperture_resource((u32)last_aper_base, n); | ||
451 | return 1; | 434 | return 1; |
452 | } | ||
453 | return 0; | 435 | return 0; |
454 | } | 436 | } |
455 | 437 | ||
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c index c88f7f4b03ee..047f540cf3f7 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c | |||
@@ -3334,6 +3334,8 @@ static int __init uncore_type_init(struct intel_uncore_type *type) | |||
3334 | if (!pmus) | 3334 | if (!pmus) |
3335 | return -ENOMEM; | 3335 | return -ENOMEM; |
3336 | 3336 | ||
3337 | type->pmus = pmus; | ||
3338 | |||
3337 | type->unconstrainted = (struct event_constraint) | 3339 | type->unconstrainted = (struct event_constraint) |
3338 | __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1, | 3340 | __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1, |
3339 | 0, type->num_counters, 0, 0); | 3341 | 0, type->num_counters, 0, 0); |
@@ -3369,7 +3371,6 @@ static int __init uncore_type_init(struct intel_uncore_type *type) | |||
3369 | } | 3371 | } |
3370 | 3372 | ||
3371 | type->pmu_group = &uncore_pmu_attr_group; | 3373 | type->pmu_group = &uncore_pmu_attr_group; |
3372 | type->pmus = pmus; | ||
3373 | return 0; | 3374 | return 0; |
3374 | fail: | 3375 | fail: |
3375 | uncore_type_exit(type); | 3376 | uncore_type_exit(type); |
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c index e8368c6dd2a2..d5dd80814419 100644 --- a/arch/x86/kernel/i387.c +++ b/arch/x86/kernel/i387.c | |||
@@ -86,10 +86,19 @@ EXPORT_SYMBOL(__kernel_fpu_begin); | |||
86 | 86 | ||
87 | void __kernel_fpu_end(void) | 87 | void __kernel_fpu_end(void) |
88 | { | 88 | { |
89 | if (use_eager_fpu()) | 89 | if (use_eager_fpu()) { |
90 | math_state_restore(); | 90 | /* |
91 | else | 91 | * For eager fpu, most the time, tsk_used_math() is true. |
92 | * Restore the user math as we are done with the kernel usage. | ||
93 | * At few instances during thread exit, signal handling etc, | ||
94 | * tsk_used_math() is false. Those few places will take proper | ||
95 | * actions, so we don't need to restore the math here. | ||
96 | */ | ||
97 | if (likely(tsk_used_math(current))) | ||
98 | math_state_restore(); | ||
99 | } else { | ||
92 | stts(); | 100 | stts(); |
101 | } | ||
93 | } | 102 | } |
94 | EXPORT_SYMBOL(__kernel_fpu_end); | 103 | EXPORT_SYMBOL(__kernel_fpu_end); |
95 | 104 | ||
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c index 7c6acd4b8995..ff898bbf579d 100644 --- a/arch/x86/kernel/quirks.c +++ b/arch/x86/kernel/quirks.c | |||
@@ -529,7 +529,7 @@ static void quirk_amd_nb_node(struct pci_dev *dev) | |||
529 | return; | 529 | return; |
530 | 530 | ||
531 | pci_read_config_dword(nb_ht, 0x60, &val); | 531 | pci_read_config_dword(nb_ht, 0x60, &val); |
532 | node = val & 7; | 532 | node = pcibus_to_node(dev->bus) | (val & 7); |
533 | /* | 533 | /* |
534 | * Some hardware may return an invalid node ID, | 534 | * Some hardware may return an invalid node ID, |
535 | * so check it first: | 535 | * so check it first: |
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 256282e7888b..2423ef04ffea 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -365,7 +365,7 @@ void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, | |||
365 | /* Assume pteval_t is equivalent to all the other *val_t types. */ | 365 | /* Assume pteval_t is equivalent to all the other *val_t types. */ |
366 | static pteval_t pte_mfn_to_pfn(pteval_t val) | 366 | static pteval_t pte_mfn_to_pfn(pteval_t val) |
367 | { | 367 | { |
368 | if (pteval_present(val)) { | 368 | if (val & _PAGE_PRESENT) { |
369 | unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT; | 369 | unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT; |
370 | unsigned long pfn = mfn_to_pfn(mfn); | 370 | unsigned long pfn = mfn_to_pfn(mfn); |
371 | 371 | ||
@@ -381,7 +381,7 @@ static pteval_t pte_mfn_to_pfn(pteval_t val) | |||
381 | 381 | ||
382 | static pteval_t pte_pfn_to_mfn(pteval_t val) | 382 | static pteval_t pte_pfn_to_mfn(pteval_t val) |
383 | { | 383 | { |
384 | if (pteval_present(val)) { | 384 | if (val & _PAGE_PRESENT) { |
385 | unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT; | 385 | unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT; |
386 | pteval_t flags = val & PTE_FLAGS_MASK; | 386 | pteval_t flags = val & PTE_FLAGS_MASK; |
387 | unsigned long mfn; | 387 | unsigned long mfn; |
diff --git a/block/blk-core.c b/block/blk-core.c index 853f92749202..bfe16d5af9f9 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -693,20 +693,11 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) | |||
693 | if (!uninit_q) | 693 | if (!uninit_q) |
694 | return NULL; | 694 | return NULL; |
695 | 695 | ||
696 | uninit_q->flush_rq = kzalloc(sizeof(struct request), GFP_KERNEL); | ||
697 | if (!uninit_q->flush_rq) | ||
698 | goto out_cleanup_queue; | ||
699 | |||
700 | q = blk_init_allocated_queue(uninit_q, rfn, lock); | 696 | q = blk_init_allocated_queue(uninit_q, rfn, lock); |
701 | if (!q) | 697 | if (!q) |
702 | goto out_free_flush_rq; | 698 | blk_cleanup_queue(uninit_q); |
703 | return q; | ||
704 | 699 | ||
705 | out_free_flush_rq: | 700 | return q; |
706 | kfree(uninit_q->flush_rq); | ||
707 | out_cleanup_queue: | ||
708 | blk_cleanup_queue(uninit_q); | ||
709 | return NULL; | ||
710 | } | 701 | } |
711 | EXPORT_SYMBOL(blk_init_queue_node); | 702 | EXPORT_SYMBOL(blk_init_queue_node); |
712 | 703 | ||
@@ -717,9 +708,13 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn, | |||
717 | if (!q) | 708 | if (!q) |
718 | return NULL; | 709 | return NULL; |
719 | 710 | ||
720 | if (blk_init_rl(&q->root_rl, q, GFP_KERNEL)) | 711 | q->flush_rq = kzalloc(sizeof(struct request), GFP_KERNEL); |
712 | if (!q->flush_rq) | ||
721 | return NULL; | 713 | return NULL; |
722 | 714 | ||
715 | if (blk_init_rl(&q->root_rl, q, GFP_KERNEL)) | ||
716 | goto fail; | ||
717 | |||
723 | q->request_fn = rfn; | 718 | q->request_fn = rfn; |
724 | q->prep_rq_fn = NULL; | 719 | q->prep_rq_fn = NULL; |
725 | q->unprep_rq_fn = NULL; | 720 | q->unprep_rq_fn = NULL; |
@@ -742,12 +737,16 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn, | |||
742 | /* init elevator */ | 737 | /* init elevator */ |
743 | if (elevator_init(q, NULL)) { | 738 | if (elevator_init(q, NULL)) { |
744 | mutex_unlock(&q->sysfs_lock); | 739 | mutex_unlock(&q->sysfs_lock); |
745 | return NULL; | 740 | goto fail; |
746 | } | 741 | } |
747 | 742 | ||
748 | mutex_unlock(&q->sysfs_lock); | 743 | mutex_unlock(&q->sysfs_lock); |
749 | 744 | ||
750 | return q; | 745 | return q; |
746 | |||
747 | fail: | ||
748 | kfree(q->flush_rq); | ||
749 | return NULL; | ||
751 | } | 750 | } |
752 | EXPORT_SYMBOL(blk_init_allocated_queue); | 751 | EXPORT_SYMBOL(blk_init_allocated_queue); |
753 | 752 | ||
diff --git a/block/blk-flush.c b/block/blk-flush.c index f598f794c3c6..43e6b4755e9a 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c | |||
@@ -140,14 +140,17 @@ static void mq_flush_run(struct work_struct *work) | |||
140 | blk_mq_insert_request(rq, false, true, false); | 140 | blk_mq_insert_request(rq, false, true, false); |
141 | } | 141 | } |
142 | 142 | ||
143 | static bool blk_flush_queue_rq(struct request *rq) | 143 | static bool blk_flush_queue_rq(struct request *rq, bool add_front) |
144 | { | 144 | { |
145 | if (rq->q->mq_ops) { | 145 | if (rq->q->mq_ops) { |
146 | INIT_WORK(&rq->mq_flush_work, mq_flush_run); | 146 | INIT_WORK(&rq->mq_flush_work, mq_flush_run); |
147 | kblockd_schedule_work(rq->q, &rq->mq_flush_work); | 147 | kblockd_schedule_work(rq->q, &rq->mq_flush_work); |
148 | return false; | 148 | return false; |
149 | } else { | 149 | } else { |
150 | list_add_tail(&rq->queuelist, &rq->q->queue_head); | 150 | if (add_front) |
151 | list_add(&rq->queuelist, &rq->q->queue_head); | ||
152 | else | ||
153 | list_add_tail(&rq->queuelist, &rq->q->queue_head); | ||
151 | return true; | 154 | return true; |
152 | } | 155 | } |
153 | } | 156 | } |
@@ -193,7 +196,7 @@ static bool blk_flush_complete_seq(struct request *rq, unsigned int seq, | |||
193 | 196 | ||
194 | case REQ_FSEQ_DATA: | 197 | case REQ_FSEQ_DATA: |
195 | list_move_tail(&rq->flush.list, &q->flush_data_in_flight); | 198 | list_move_tail(&rq->flush.list, &q->flush_data_in_flight); |
196 | queued = blk_flush_queue_rq(rq); | 199 | queued = blk_flush_queue_rq(rq, true); |
197 | break; | 200 | break; |
198 | 201 | ||
199 | case REQ_FSEQ_DONE: | 202 | case REQ_FSEQ_DONE: |
@@ -326,7 +329,7 @@ static bool blk_kick_flush(struct request_queue *q) | |||
326 | q->flush_rq->rq_disk = first_rq->rq_disk; | 329 | q->flush_rq->rq_disk = first_rq->rq_disk; |
327 | q->flush_rq->end_io = flush_end_io; | 330 | q->flush_rq->end_io = flush_end_io; |
328 | 331 | ||
329 | return blk_flush_queue_rq(q->flush_rq); | 332 | return blk_flush_queue_rq(q->flush_rq, false); |
330 | } | 333 | } |
331 | 334 | ||
332 | static void flush_data_end_io(struct request *rq, int error) | 335 | static void flush_data_end_io(struct request *rq, int error) |
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index b718806657cd..c40fb2e81bbc 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c | |||
@@ -71,6 +71,17 @@ static int acpi_sleep_prepare(u32 acpi_state) | |||
71 | return 0; | 71 | return 0; |
72 | } | 72 | } |
73 | 73 | ||
74 | static bool acpi_sleep_state_supported(u8 sleep_state) | ||
75 | { | ||
76 | acpi_status status; | ||
77 | u8 type_a, type_b; | ||
78 | |||
79 | status = acpi_get_sleep_type_data(sleep_state, &type_a, &type_b); | ||
80 | return ACPI_SUCCESS(status) && (!acpi_gbl_reduced_hardware | ||
81 | || (acpi_gbl_FADT.sleep_control.address | ||
82 | && acpi_gbl_FADT.sleep_status.address)); | ||
83 | } | ||
84 | |||
74 | #ifdef CONFIG_ACPI_SLEEP | 85 | #ifdef CONFIG_ACPI_SLEEP |
75 | static u32 acpi_target_sleep_state = ACPI_STATE_S0; | 86 | static u32 acpi_target_sleep_state = ACPI_STATE_S0; |
76 | 87 | ||
@@ -604,15 +615,9 @@ static void acpi_sleep_suspend_setup(void) | |||
604 | { | 615 | { |
605 | int i; | 616 | int i; |
606 | 617 | ||
607 | for (i = ACPI_STATE_S1; i < ACPI_STATE_S4; i++) { | 618 | for (i = ACPI_STATE_S1; i < ACPI_STATE_S4; i++) |
608 | acpi_status status; | 619 | if (acpi_sleep_state_supported(i)) |
609 | u8 type_a, type_b; | ||
610 | |||
611 | status = acpi_get_sleep_type_data(i, &type_a, &type_b); | ||
612 | if (ACPI_SUCCESS(status)) { | ||
613 | sleep_states[i] = 1; | 620 | sleep_states[i] = 1; |
614 | } | ||
615 | } | ||
616 | 621 | ||
617 | suspend_set_ops(old_suspend_ordering ? | 622 | suspend_set_ops(old_suspend_ordering ? |
618 | &acpi_suspend_ops_old : &acpi_suspend_ops); | 623 | &acpi_suspend_ops_old : &acpi_suspend_ops); |
@@ -740,11 +745,7 @@ static const struct platform_hibernation_ops acpi_hibernation_ops_old = { | |||
740 | 745 | ||
741 | static void acpi_sleep_hibernate_setup(void) | 746 | static void acpi_sleep_hibernate_setup(void) |
742 | { | 747 | { |
743 | acpi_status status; | 748 | if (!acpi_sleep_state_supported(ACPI_STATE_S4)) |
744 | u8 type_a, type_b; | ||
745 | |||
746 | status = acpi_get_sleep_type_data(ACPI_STATE_S4, &type_a, &type_b); | ||
747 | if (ACPI_FAILURE(status)) | ||
748 | return; | 749 | return; |
749 | 750 | ||
750 | hibernation_set_ops(old_suspend_ordering ? | 751 | hibernation_set_ops(old_suspend_ordering ? |
@@ -793,8 +794,6 @@ static void acpi_power_off(void) | |||
793 | 794 | ||
794 | int __init acpi_sleep_init(void) | 795 | int __init acpi_sleep_init(void) |
795 | { | 796 | { |
796 | acpi_status status; | ||
797 | u8 type_a, type_b; | ||
798 | char supported[ACPI_S_STATE_COUNT * 3 + 1]; | 797 | char supported[ACPI_S_STATE_COUNT * 3 + 1]; |
799 | char *pos = supported; | 798 | char *pos = supported; |
800 | int i; | 799 | int i; |
@@ -806,8 +805,7 @@ int __init acpi_sleep_init(void) | |||
806 | acpi_sleep_suspend_setup(); | 805 | acpi_sleep_suspend_setup(); |
807 | acpi_sleep_hibernate_setup(); | 806 | acpi_sleep_hibernate_setup(); |
808 | 807 | ||
809 | status = acpi_get_sleep_type_data(ACPI_STATE_S5, &type_a, &type_b); | 808 | if (acpi_sleep_state_supported(ACPI_STATE_S5)) { |
810 | if (ACPI_SUCCESS(status)) { | ||
811 | sleep_states[ACPI_STATE_S5] = 1; | 809 | sleep_states[ACPI_STATE_S5] = 1; |
812 | pm_power_off_prepare = acpi_power_off_prepare; | 810 | pm_power_off_prepare = acpi_power_off_prepare; |
813 | pm_power_off = acpi_power_off; | 811 | pm_power_off = acpi_power_off; |
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index 516026954be6..d777bb7cea93 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c | |||
@@ -4498,7 +4498,7 @@ static int mtip_pci_probe(struct pci_dev *pdev, | |||
4498 | } | 4498 | } |
4499 | dev_info(&pdev->dev, "NUMA node %d (closest: %d,%d, probe on %d:%d)\n", | 4499 | dev_info(&pdev->dev, "NUMA node %d (closest: %d,%d, probe on %d:%d)\n", |
4500 | my_node, pcibus_to_node(pdev->bus), dev_to_node(&pdev->dev), | 4500 | my_node, pcibus_to_node(pdev->bus), dev_to_node(&pdev->dev), |
4501 | cpu_to_node(smp_processor_id()), smp_processor_id()); | 4501 | cpu_to_node(raw_smp_processor_id()), raw_smp_processor_id()); |
4502 | 4502 | ||
4503 | dd = kzalloc_node(sizeof(struct driver_data), GFP_KERNEL, my_node); | 4503 | dd = kzalloc_node(sizeof(struct driver_data), GFP_KERNEL, my_node); |
4504 | if (dd == NULL) { | 4504 | if (dd == NULL) { |
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index b365e0dfccb6..34898d53395b 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c | |||
@@ -2109,7 +2109,6 @@ static void rbd_img_obj_callback(struct rbd_obj_request *obj_request) | |||
2109 | rbd_assert(img_request->obj_request_count > 0); | 2109 | rbd_assert(img_request->obj_request_count > 0); |
2110 | rbd_assert(which != BAD_WHICH); | 2110 | rbd_assert(which != BAD_WHICH); |
2111 | rbd_assert(which < img_request->obj_request_count); | 2111 | rbd_assert(which < img_request->obj_request_count); |
2112 | rbd_assert(which >= img_request->next_completion); | ||
2113 | 2112 | ||
2114 | spin_lock_irq(&img_request->completion_lock); | 2113 | spin_lock_irq(&img_request->completion_lock); |
2115 | if (which != img_request->next_completion) | 2114 | if (which != img_request->next_completion) |
diff --git a/drivers/clocksource/vf_pit_timer.c b/drivers/clocksource/vf_pit_timer.c index 02821b06a39e..a918bc481c52 100644 --- a/drivers/clocksource/vf_pit_timer.c +++ b/drivers/clocksource/vf_pit_timer.c | |||
@@ -54,7 +54,7 @@ static inline void pit_irq_acknowledge(void) | |||
54 | 54 | ||
55 | static u64 pit_read_sched_clock(void) | 55 | static u64 pit_read_sched_clock(void) |
56 | { | 56 | { |
57 | return __raw_readl(clksrc_base + PITCVAL); | 57 | return ~__raw_readl(clksrc_base + PITCVAL); |
58 | } | 58 | } |
59 | 59 | ||
60 | static int __init pit_clocksource_init(unsigned long rate) | 60 | static int __init pit_clocksource_init(unsigned long rate) |
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index cf485d928903..199b52b7c3e1 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -1129,7 +1129,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif, | |||
1129 | per_cpu(cpufreq_cpu_data, j) = policy; | 1129 | per_cpu(cpufreq_cpu_data, j) = policy; |
1130 | write_unlock_irqrestore(&cpufreq_driver_lock, flags); | 1130 | write_unlock_irqrestore(&cpufreq_driver_lock, flags); |
1131 | 1131 | ||
1132 | if (cpufreq_driver->get) { | 1132 | if (cpufreq_driver->get && !cpufreq_driver->setpolicy) { |
1133 | policy->cur = cpufreq_driver->get(policy->cpu); | 1133 | policy->cur = cpufreq_driver->get(policy->cpu); |
1134 | if (!policy->cur) { | 1134 | if (!policy->cur) { |
1135 | pr_err("%s: ->get() failed\n", __func__); | 1135 | pr_err("%s: ->get() failed\n", __func__); |
@@ -2143,7 +2143,7 @@ int cpufreq_update_policy(unsigned int cpu) | |||
2143 | * BIOS might change freq behind our back | 2143 | * BIOS might change freq behind our back |
2144 | * -> ask driver for current freq and notify governors about a change | 2144 | * -> ask driver for current freq and notify governors about a change |
2145 | */ | 2145 | */ |
2146 | if (cpufreq_driver->get) { | 2146 | if (cpufreq_driver->get && !cpufreq_driver->setpolicy) { |
2147 | new_policy.cur = cpufreq_driver->get(cpu); | 2147 | new_policy.cur = cpufreq_driver->get(cpu); |
2148 | if (!policy->cur) { | 2148 | if (!policy->cur) { |
2149 | pr_debug("Driver did not initialize current freq"); | 2149 | pr_debug("Driver did not initialize current freq"); |
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c index 5736aaa7e86c..f7af69bcf3f4 100644 --- a/drivers/gpu/drm/drm_pci.c +++ b/drivers/gpu/drm/drm_pci.c | |||
@@ -468,8 +468,8 @@ void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver) | |||
468 | } else { | 468 | } else { |
469 | list_for_each_entry_safe(dev, tmp, &driver->legacy_dev_list, | 469 | list_for_each_entry_safe(dev, tmp, &driver->legacy_dev_list, |
470 | legacy_dev_list) { | 470 | legacy_dev_list) { |
471 | drm_put_dev(dev); | ||
472 | list_del(&dev->legacy_dev_list); | 471 | list_del(&dev->legacy_dev_list); |
472 | drm_put_dev(dev); | ||
473 | } | 473 | } |
474 | } | 474 | } |
475 | DRM_INFO("Module unloaded\n"); | 475 | DRM_INFO("Module unloaded\n"); |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index 215131ab1dd2..c204b4e3356e 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c | |||
@@ -172,20 +172,24 @@ static int exynos_drm_open(struct drm_device *dev, struct drm_file *file) | |||
172 | 172 | ||
173 | ret = exynos_drm_subdrv_open(dev, file); | 173 | ret = exynos_drm_subdrv_open(dev, file); |
174 | if (ret) | 174 | if (ret) |
175 | goto out; | 175 | goto err_file_priv_free; |
176 | 176 | ||
177 | anon_filp = anon_inode_getfile("exynos_gem", &exynos_drm_gem_fops, | 177 | anon_filp = anon_inode_getfile("exynos_gem", &exynos_drm_gem_fops, |
178 | NULL, 0); | 178 | NULL, 0); |
179 | if (IS_ERR(anon_filp)) { | 179 | if (IS_ERR(anon_filp)) { |
180 | ret = PTR_ERR(anon_filp); | 180 | ret = PTR_ERR(anon_filp); |
181 | goto out; | 181 | goto err_subdrv_close; |
182 | } | 182 | } |
183 | 183 | ||
184 | anon_filp->f_mode = FMODE_READ | FMODE_WRITE; | 184 | anon_filp->f_mode = FMODE_READ | FMODE_WRITE; |
185 | file_priv->anon_filp = anon_filp; | 185 | file_priv->anon_filp = anon_filp; |
186 | 186 | ||
187 | return ret; | 187 | return ret; |
188 | out: | 188 | |
189 | err_subdrv_close: | ||
190 | exynos_drm_subdrv_close(dev, file); | ||
191 | |||
192 | err_file_priv_free: | ||
189 | kfree(file_priv); | 193 | kfree(file_priv); |
190 | file->driver_priv = NULL; | 194 | file->driver_priv = NULL; |
191 | return ret; | 195 | return ret; |
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 40a2b36b276b..d278be110805 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
@@ -842,7 +842,7 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev) | |||
842 | dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, | 842 | dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, |
843 | dev_priv->gtt.base.start / PAGE_SIZE, | 843 | dev_priv->gtt.base.start / PAGE_SIZE, |
844 | dev_priv->gtt.base.total / PAGE_SIZE, | 844 | dev_priv->gtt.base.total / PAGE_SIZE, |
845 | false); | 845 | true); |
846 | } | 846 | } |
847 | 847 | ||
848 | void i915_gem_restore_gtt_mappings(struct drm_device *dev) | 848 | void i915_gem_restore_gtt_mappings(struct drm_device *dev) |
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index d58b4e287e32..28d24caa49f3 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c | |||
@@ -214,6 +214,13 @@ int i915_gem_init_stolen(struct drm_device *dev) | |||
214 | struct drm_i915_private *dev_priv = dev->dev_private; | 214 | struct drm_i915_private *dev_priv = dev->dev_private; |
215 | int bios_reserved = 0; | 215 | int bios_reserved = 0; |
216 | 216 | ||
217 | #ifdef CONFIG_INTEL_IOMMU | ||
218 | if (intel_iommu_gfx_mapped) { | ||
219 | DRM_INFO("DMAR active, disabling use of stolen memory\n"); | ||
220 | return 0; | ||
221 | } | ||
222 | #endif | ||
223 | |||
217 | if (dev_priv->gtt.stolen_size == 0) | 224 | if (dev_priv->gtt.stolen_size == 0) |
218 | return 0; | 225 | return 0; |
219 | 226 | ||
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 9fec71175571..d554169ac592 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -618,33 +618,25 @@ static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) | |||
618 | 618 | ||
619 | /* raw reads, only for fast reads of display block, no need for forcewake etc. */ | 619 | /* raw reads, only for fast reads of display block, no need for forcewake etc. */ |
620 | #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__)) | 620 | #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__)) |
621 | #define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__)) | ||
622 | 621 | ||
623 | static bool ilk_pipe_in_vblank_locked(struct drm_device *dev, enum pipe pipe) | 622 | static bool ilk_pipe_in_vblank_locked(struct drm_device *dev, enum pipe pipe) |
624 | { | 623 | { |
625 | struct drm_i915_private *dev_priv = dev->dev_private; | 624 | struct drm_i915_private *dev_priv = dev->dev_private; |
626 | uint32_t status; | 625 | uint32_t status; |
627 | 626 | int reg; | |
628 | if (INTEL_INFO(dev)->gen < 7) { | 627 | |
629 | status = pipe == PIPE_A ? | 628 | if (INTEL_INFO(dev)->gen >= 8) { |
630 | DE_PIPEA_VBLANK : | 629 | status = GEN8_PIPE_VBLANK; |
631 | DE_PIPEB_VBLANK; | 630 | reg = GEN8_DE_PIPE_ISR(pipe); |
631 | } else if (INTEL_INFO(dev)->gen >= 7) { | ||
632 | status = DE_PIPE_VBLANK_IVB(pipe); | ||
633 | reg = DEISR; | ||
632 | } else { | 634 | } else { |
633 | switch (pipe) { | 635 | status = DE_PIPE_VBLANK(pipe); |
634 | default: | 636 | reg = DEISR; |
635 | case PIPE_A: | ||
636 | status = DE_PIPEA_VBLANK_IVB; | ||
637 | break; | ||
638 | case PIPE_B: | ||
639 | status = DE_PIPEB_VBLANK_IVB; | ||
640 | break; | ||
641 | case PIPE_C: | ||
642 | status = DE_PIPEC_VBLANK_IVB; | ||
643 | break; | ||
644 | } | ||
645 | } | 637 | } |
646 | 638 | ||
647 | return __raw_i915_read32(dev_priv, DEISR) & status; | 639 | return __raw_i915_read32(dev_priv, reg) & status; |
648 | } | 640 | } |
649 | 641 | ||
650 | static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, | 642 | static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, |
@@ -702,7 +694,28 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, | |||
702 | else | 694 | else |
703 | position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; | 695 | position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; |
704 | 696 | ||
705 | if (HAS_PCH_SPLIT(dev)) { | 697 | if (HAS_DDI(dev)) { |
698 | /* | ||
699 | * On HSW HDMI outputs there seems to be a 2 line | ||
700 | * difference, whereas eDP has the normal 1 line | ||
701 | * difference that earlier platforms have. External | ||
702 | * DP is unknown. For now just check for the 2 line | ||
703 | * difference case on all output types on HSW+. | ||
704 | * | ||
705 | * This might misinterpret the scanline counter being | ||
706 | * one line too far along on eDP, but that's less | ||
707 | * dangerous than the alternative since that would lead | ||
708 | * the vblank timestamp code astray when it sees a | ||
709 | * scanline count before vblank_start during a vblank | ||
710 | * interrupt. | ||
711 | */ | ||
712 | in_vbl = ilk_pipe_in_vblank_locked(dev, pipe); | ||
713 | if ((in_vbl && (position == vbl_start - 2 || | ||
714 | position == vbl_start - 1)) || | ||
715 | (!in_vbl && (position == vbl_end - 2 || | ||
716 | position == vbl_end - 1))) | ||
717 | position = (position + 2) % vtotal; | ||
718 | } else if (HAS_PCH_SPLIT(dev)) { | ||
706 | /* | 719 | /* |
707 | * The scanline counter increments at the leading edge | 720 | * The scanline counter increments at the leading edge |
708 | * of hsync, ie. it completely misses the active portion | 721 | * of hsync, ie. it completely misses the active portion |
@@ -2769,10 +2782,9 @@ static void ibx_irq_postinstall(struct drm_device *dev) | |||
2769 | return; | 2782 | return; |
2770 | 2783 | ||
2771 | if (HAS_PCH_IBX(dev)) { | 2784 | if (HAS_PCH_IBX(dev)) { |
2772 | mask = SDE_GMBUS | SDE_AUX_MASK | SDE_TRANSB_FIFO_UNDER | | 2785 | mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; |
2773 | SDE_TRANSA_FIFO_UNDER | SDE_POISON; | ||
2774 | } else { | 2786 | } else { |
2775 | mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT | SDE_ERROR_CPT; | 2787 | mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; |
2776 | 2788 | ||
2777 | I915_WRITE(SERR_INT, I915_READ(SERR_INT)); | 2789 | I915_WRITE(SERR_INT, I915_READ(SERR_INT)); |
2778 | } | 2790 | } |
@@ -2832,20 +2844,19 @@ static int ironlake_irq_postinstall(struct drm_device *dev) | |||
2832 | display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | | 2844 | display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | |
2833 | DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB | | 2845 | DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB | |
2834 | DE_PLANEB_FLIP_DONE_IVB | | 2846 | DE_PLANEB_FLIP_DONE_IVB | |
2835 | DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB | | 2847 | DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB); |
2836 | DE_ERR_INT_IVB); | ||
2837 | extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | | 2848 | extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | |
2838 | DE_PIPEA_VBLANK_IVB); | 2849 | DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB); |
2839 | 2850 | ||
2840 | I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT)); | 2851 | I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT)); |
2841 | } else { | 2852 | } else { |
2842 | display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | | 2853 | display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | |
2843 | DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | | 2854 | DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | |
2844 | DE_AUX_CHANNEL_A | | 2855 | DE_AUX_CHANNEL_A | |
2845 | DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN | | ||
2846 | DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE | | 2856 | DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE | |
2847 | DE_POISON); | 2857 | DE_POISON); |
2848 | extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT; | 2858 | extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT | |
2859 | DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN; | ||
2849 | } | 2860 | } |
2850 | 2861 | ||
2851 | dev_priv->irq_mask = ~display_mask; | 2862 | dev_priv->irq_mask = ~display_mask; |
@@ -2961,9 +2972,9 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) | |||
2961 | struct drm_device *dev = dev_priv->dev; | 2972 | struct drm_device *dev = dev_priv->dev; |
2962 | uint32_t de_pipe_masked = GEN8_PIPE_FLIP_DONE | | 2973 | uint32_t de_pipe_masked = GEN8_PIPE_FLIP_DONE | |
2963 | GEN8_PIPE_CDCLK_CRC_DONE | | 2974 | GEN8_PIPE_CDCLK_CRC_DONE | |
2964 | GEN8_PIPE_FIFO_UNDERRUN | | ||
2965 | GEN8_DE_PIPE_IRQ_FAULT_ERRORS; | 2975 | GEN8_DE_PIPE_IRQ_FAULT_ERRORS; |
2966 | uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK; | 2976 | uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | |
2977 | GEN8_PIPE_FIFO_UNDERRUN; | ||
2967 | int pipe; | 2978 | int pipe; |
2968 | dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked; | 2979 | dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked; |
2969 | dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked; | 2980 | dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked; |
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index e06b9e017d6b..234ac5f7bc5a 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c | |||
@@ -1244,6 +1244,7 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder) | |||
1244 | if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { | 1244 | if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { |
1245 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); | 1245 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
1246 | intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); | 1246 | intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); |
1247 | ironlake_edp_panel_vdd_on(intel_dp); | ||
1247 | ironlake_edp_panel_off(intel_dp); | 1248 | ironlake_edp_panel_off(intel_dp); |
1248 | } | 1249 | } |
1249 | 1250 | ||
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 57552eb386b0..2688f6d64bb9 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -1249,17 +1249,24 @@ void ironlake_edp_panel_off(struct intel_dp *intel_dp) | |||
1249 | 1249 | ||
1250 | DRM_DEBUG_KMS("Turn eDP power off\n"); | 1250 | DRM_DEBUG_KMS("Turn eDP power off\n"); |
1251 | 1251 | ||
1252 | WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n"); | ||
1253 | |||
1252 | pp = ironlake_get_pp_control(intel_dp); | 1254 | pp = ironlake_get_pp_control(intel_dp); |
1253 | /* We need to switch off panel power _and_ force vdd, for otherwise some | 1255 | /* We need to switch off panel power _and_ force vdd, for otherwise some |
1254 | * panels get very unhappy and cease to work. */ | 1256 | * panels get very unhappy and cease to work. */ |
1255 | pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_BLC_ENABLE); | 1257 | pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE); |
1256 | 1258 | ||
1257 | pp_ctrl_reg = _pp_ctrl_reg(intel_dp); | 1259 | pp_ctrl_reg = _pp_ctrl_reg(intel_dp); |
1258 | 1260 | ||
1259 | I915_WRITE(pp_ctrl_reg, pp); | 1261 | I915_WRITE(pp_ctrl_reg, pp); |
1260 | POSTING_READ(pp_ctrl_reg); | 1262 | POSTING_READ(pp_ctrl_reg); |
1261 | 1263 | ||
1264 | intel_dp->want_panel_vdd = false; | ||
1265 | |||
1262 | ironlake_wait_panel_off(intel_dp); | 1266 | ironlake_wait_panel_off(intel_dp); |
1267 | |||
1268 | /* We got a reference when we enabled the VDD. */ | ||
1269 | intel_runtime_pm_put(dev_priv); | ||
1263 | } | 1270 | } |
1264 | 1271 | ||
1265 | void ironlake_edp_backlight_on(struct intel_dp *intel_dp) | 1272 | void ironlake_edp_backlight_on(struct intel_dp *intel_dp) |
@@ -1639,7 +1646,7 @@ static void intel_edp_psr_enable_source(struct intel_dp *intel_dp) | |||
1639 | val |= EDP_PSR_LINK_DISABLE; | 1646 | val |= EDP_PSR_LINK_DISABLE; |
1640 | 1647 | ||
1641 | I915_WRITE(EDP_PSR_CTL(dev), val | | 1648 | I915_WRITE(EDP_PSR_CTL(dev), val | |
1642 | IS_BROADWELL(dev) ? 0 : link_entry_time | | 1649 | (IS_BROADWELL(dev) ? 0 : link_entry_time) | |
1643 | max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT | | 1650 | max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT | |
1644 | idle_frames << EDP_PSR_IDLE_FRAME_SHIFT | | 1651 | idle_frames << EDP_PSR_IDLE_FRAME_SHIFT | |
1645 | EDP_PSR_ENABLE); | 1652 | EDP_PSR_ENABLE); |
@@ -1784,6 +1791,7 @@ static void intel_disable_dp(struct intel_encoder *encoder) | |||
1784 | 1791 | ||
1785 | /* Make sure the panel is off before trying to change the mode. But also | 1792 | /* Make sure the panel is off before trying to change the mode. But also |
1786 | * ensure that we have vdd while we switch off the panel. */ | 1793 | * ensure that we have vdd while we switch off the panel. */ |
1794 | ironlake_edp_panel_vdd_on(intel_dp); | ||
1787 | ironlake_edp_backlight_off(intel_dp); | 1795 | ironlake_edp_backlight_off(intel_dp); |
1788 | intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); | 1796 | intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); |
1789 | ironlake_edp_panel_off(intel_dp); | 1797 | ironlake_edp_panel_off(intel_dp); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 89c484d8ac26..4ee702ac8907 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c | |||
@@ -866,13 +866,16 @@ static int nouveau_pmops_runtime_suspend(struct device *dev) | |||
866 | struct drm_device *drm_dev = pci_get_drvdata(pdev); | 866 | struct drm_device *drm_dev = pci_get_drvdata(pdev); |
867 | int ret; | 867 | int ret; |
868 | 868 | ||
869 | if (nouveau_runtime_pm == 0) | 869 | if (nouveau_runtime_pm == 0) { |
870 | return -EINVAL; | 870 | pm_runtime_forbid(dev); |
871 | return -EBUSY; | ||
872 | } | ||
871 | 873 | ||
872 | /* are we optimus enabled? */ | 874 | /* are we optimus enabled? */ |
873 | if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) { | 875 | if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) { |
874 | DRM_DEBUG_DRIVER("failing to power off - not optimus\n"); | 876 | DRM_DEBUG_DRIVER("failing to power off - not optimus\n"); |
875 | return -EINVAL; | 877 | pm_runtime_forbid(dev); |
878 | return -EBUSY; | ||
876 | } | 879 | } |
877 | 880 | ||
878 | nv_debug_level(SILENT); | 881 | nv_debug_level(SILENT); |
@@ -923,12 +926,15 @@ static int nouveau_pmops_runtime_idle(struct device *dev) | |||
923 | struct nouveau_drm *drm = nouveau_drm(drm_dev); | 926 | struct nouveau_drm *drm = nouveau_drm(drm_dev); |
924 | struct drm_crtc *crtc; | 927 | struct drm_crtc *crtc; |
925 | 928 | ||
926 | if (nouveau_runtime_pm == 0) | 929 | if (nouveau_runtime_pm == 0) { |
930 | pm_runtime_forbid(dev); | ||
927 | return -EBUSY; | 931 | return -EBUSY; |
932 | } | ||
928 | 933 | ||
929 | /* are we optimus enabled? */ | 934 | /* are we optimus enabled? */ |
930 | if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) { | 935 | if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) { |
931 | DRM_DEBUG_DRIVER("failing to power off - not optimus\n"); | 936 | DRM_DEBUG_DRIVER("failing to power off - not optimus\n"); |
937 | pm_runtime_forbid(dev); | ||
932 | return -EBUSY; | 938 | return -EBUSY; |
933 | } | 939 | } |
934 | 940 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 84a1bbb75f91..f633c2782170 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c | |||
@@ -403,11 +403,15 @@ static int radeon_pmops_runtime_suspend(struct device *dev) | |||
403 | struct drm_device *drm_dev = pci_get_drvdata(pdev); | 403 | struct drm_device *drm_dev = pci_get_drvdata(pdev); |
404 | int ret; | 404 | int ret; |
405 | 405 | ||
406 | if (radeon_runtime_pm == 0) | 406 | if (radeon_runtime_pm == 0) { |
407 | return -EINVAL; | 407 | pm_runtime_forbid(dev); |
408 | return -EBUSY; | ||
409 | } | ||
408 | 410 | ||
409 | if (radeon_runtime_pm == -1 && !radeon_is_px()) | 411 | if (radeon_runtime_pm == -1 && !radeon_is_px()) { |
410 | return -EINVAL; | 412 | pm_runtime_forbid(dev); |
413 | return -EBUSY; | ||
414 | } | ||
411 | 415 | ||
412 | drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; | 416 | drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; |
413 | drm_kms_helper_poll_disable(drm_dev); | 417 | drm_kms_helper_poll_disable(drm_dev); |
@@ -456,12 +460,15 @@ static int radeon_pmops_runtime_idle(struct device *dev) | |||
456 | struct drm_device *drm_dev = pci_get_drvdata(pdev); | 460 | struct drm_device *drm_dev = pci_get_drvdata(pdev); |
457 | struct drm_crtc *crtc; | 461 | struct drm_crtc *crtc; |
458 | 462 | ||
459 | if (radeon_runtime_pm == 0) | 463 | if (radeon_runtime_pm == 0) { |
464 | pm_runtime_forbid(dev); | ||
460 | return -EBUSY; | 465 | return -EBUSY; |
466 | } | ||
461 | 467 | ||
462 | /* are we PX enabled? */ | 468 | /* are we PX enabled? */ |
463 | if (radeon_runtime_pm == -1 && !radeon_is_px()) { | 469 | if (radeon_runtime_pm == -1 && !radeon_is_px()) { |
464 | DRM_DEBUG_DRIVER("failing to power off - not px\n"); | 470 | DRM_DEBUG_DRIVER("failing to power off - not px\n"); |
471 | pm_runtime_forbid(dev); | ||
465 | return -EBUSY; | 472 | return -EBUSY; |
466 | } | 473 | } |
467 | 474 | ||
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c index 8d67b943ac05..0394811251bd 100644 --- a/drivers/gpu/drm/udl/udl_gem.c +++ b/drivers/gpu/drm/udl/udl_gem.c | |||
@@ -177,8 +177,10 @@ void udl_gem_free_object(struct drm_gem_object *gem_obj) | |||
177 | if (obj->vmapping) | 177 | if (obj->vmapping) |
178 | udl_gem_vunmap(obj); | 178 | udl_gem_vunmap(obj); |
179 | 179 | ||
180 | if (gem_obj->import_attach) | 180 | if (gem_obj->import_attach) { |
181 | drm_prime_gem_destroy(gem_obj, obj->sg); | 181 | drm_prime_gem_destroy(gem_obj, obj->sg); |
182 | put_device(gem_obj->dev->dev); | ||
183 | } | ||
182 | 184 | ||
183 | if (obj->pages) | 185 | if (obj->pages) |
184 | udl_gem_put_pages(obj); | 186 | udl_gem_put_pages(obj); |
@@ -256,9 +258,12 @@ struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev, | |||
256 | int ret; | 258 | int ret; |
257 | 259 | ||
258 | /* need to attach */ | 260 | /* need to attach */ |
261 | get_device(dev->dev); | ||
259 | attach = dma_buf_attach(dma_buf, dev->dev); | 262 | attach = dma_buf_attach(dma_buf, dev->dev); |
260 | if (IS_ERR(attach)) | 263 | if (IS_ERR(attach)) { |
264 | put_device(dev->dev); | ||
261 | return ERR_CAST(attach); | 265 | return ERR_CAST(attach); |
266 | } | ||
262 | 267 | ||
263 | get_dma_buf(dma_buf); | 268 | get_dma_buf(dma_buf); |
264 | 269 | ||
@@ -282,6 +287,6 @@ fail_unmap: | |||
282 | fail_detach: | 287 | fail_detach: |
283 | dma_buf_detach(dma_buf, attach); | 288 | dma_buf_detach(dma_buf, attach); |
284 | dma_buf_put(dma_buf); | 289 | dma_buf_put(dma_buf); |
285 | 290 | put_device(dev->dev); | |
286 | return ERR_PTR(ret); | 291 | return ERR_PTR(ret); |
287 | } | 292 | } |
diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c index befe0e336471..24883b4d1a49 100644 --- a/drivers/hid/hid-lg4ff.c +++ b/drivers/hid/hid-lg4ff.c | |||
@@ -43,6 +43,7 @@ | |||
43 | #define G25_REV_MIN 0x22 | 43 | #define G25_REV_MIN 0x22 |
44 | #define G27_REV_MAJ 0x12 | 44 | #define G27_REV_MAJ 0x12 |
45 | #define G27_REV_MIN 0x38 | 45 | #define G27_REV_MIN 0x38 |
46 | #define G27_2_REV_MIN 0x39 | ||
46 | 47 | ||
47 | #define to_hid_device(pdev) container_of(pdev, struct hid_device, dev) | 48 | #define to_hid_device(pdev) container_of(pdev, struct hid_device, dev) |
48 | 49 | ||
@@ -130,6 +131,7 @@ static const struct lg4ff_usb_revision lg4ff_revs[] = { | |||
130 | {DFP_REV_MAJ, DFP_REV_MIN, &native_dfp}, /* Driving Force Pro */ | 131 | {DFP_REV_MAJ, DFP_REV_MIN, &native_dfp}, /* Driving Force Pro */ |
131 | {G25_REV_MAJ, G25_REV_MIN, &native_g25}, /* G25 */ | 132 | {G25_REV_MAJ, G25_REV_MIN, &native_g25}, /* G25 */ |
132 | {G27_REV_MAJ, G27_REV_MIN, &native_g27}, /* G27 */ | 133 | {G27_REV_MAJ, G27_REV_MIN, &native_g27}, /* G27 */ |
134 | {G27_REV_MAJ, G27_2_REV_MIN, &native_g27}, /* G27 v2 */ | ||
133 | }; | 135 | }; |
134 | 136 | ||
135 | /* Recalculates X axis value accordingly to currently selected range */ | 137 | /* Recalculates X axis value accordingly to currently selected range */ |
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c index 12354055d474..2f19b15f47f2 100644 --- a/drivers/hid/hid-sony.c +++ b/drivers/hid/hid-sony.c | |||
@@ -42,6 +42,7 @@ | |||
42 | #define DUALSHOCK4_CONTROLLER_BT BIT(6) | 42 | #define DUALSHOCK4_CONTROLLER_BT BIT(6) |
43 | 43 | ||
44 | #define SONY_LED_SUPPORT (SIXAXIS_CONTROLLER_USB | BUZZ_CONTROLLER | DUALSHOCK4_CONTROLLER_USB) | 44 | #define SONY_LED_SUPPORT (SIXAXIS_CONTROLLER_USB | BUZZ_CONTROLLER | DUALSHOCK4_CONTROLLER_USB) |
45 | #define SONY_FF_SUPPORT (SIXAXIS_CONTROLLER_USB | DUALSHOCK4_CONTROLLER_USB) | ||
45 | 46 | ||
46 | #define MAX_LEDS 4 | 47 | #define MAX_LEDS 4 |
47 | 48 | ||
@@ -499,6 +500,7 @@ struct sony_sc { | |||
499 | __u8 right; | 500 | __u8 right; |
500 | #endif | 501 | #endif |
501 | 502 | ||
503 | __u8 worker_initialized; | ||
502 | __u8 led_state[MAX_LEDS]; | 504 | __u8 led_state[MAX_LEDS]; |
503 | __u8 led_count; | 505 | __u8 led_count; |
504 | }; | 506 | }; |
@@ -993,22 +995,11 @@ static int sony_init_ff(struct hid_device *hdev) | |||
993 | return input_ff_create_memless(input_dev, NULL, sony_play_effect); | 995 | return input_ff_create_memless(input_dev, NULL, sony_play_effect); |
994 | } | 996 | } |
995 | 997 | ||
996 | static void sony_destroy_ff(struct hid_device *hdev) | ||
997 | { | ||
998 | struct sony_sc *sc = hid_get_drvdata(hdev); | ||
999 | |||
1000 | cancel_work_sync(&sc->state_worker); | ||
1001 | } | ||
1002 | |||
1003 | #else | 998 | #else |
1004 | static int sony_init_ff(struct hid_device *hdev) | 999 | static int sony_init_ff(struct hid_device *hdev) |
1005 | { | 1000 | { |
1006 | return 0; | 1001 | return 0; |
1007 | } | 1002 | } |
1008 | |||
1009 | static void sony_destroy_ff(struct hid_device *hdev) | ||
1010 | { | ||
1011 | } | ||
1012 | #endif | 1003 | #endif |
1013 | 1004 | ||
1014 | static int sony_set_output_report(struct sony_sc *sc, int req_id, int req_size) | 1005 | static int sony_set_output_report(struct sony_sc *sc, int req_id, int req_size) |
@@ -1077,6 +1068,8 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id) | |||
1077 | if (sc->quirks & SIXAXIS_CONTROLLER_USB) { | 1068 | if (sc->quirks & SIXAXIS_CONTROLLER_USB) { |
1078 | hdev->hid_output_raw_report = sixaxis_usb_output_raw_report; | 1069 | hdev->hid_output_raw_report = sixaxis_usb_output_raw_report; |
1079 | ret = sixaxis_set_operational_usb(hdev); | 1070 | ret = sixaxis_set_operational_usb(hdev); |
1071 | |||
1072 | sc->worker_initialized = 1; | ||
1080 | INIT_WORK(&sc->state_worker, sixaxis_state_worker); | 1073 | INIT_WORK(&sc->state_worker, sixaxis_state_worker); |
1081 | } | 1074 | } |
1082 | else if (sc->quirks & SIXAXIS_CONTROLLER_BT) | 1075 | else if (sc->quirks & SIXAXIS_CONTROLLER_BT) |
@@ -1087,6 +1080,7 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id) | |||
1087 | if (ret < 0) | 1080 | if (ret < 0) |
1088 | goto err_stop; | 1081 | goto err_stop; |
1089 | 1082 | ||
1083 | sc->worker_initialized = 1; | ||
1090 | INIT_WORK(&sc->state_worker, dualshock4_state_worker); | 1084 | INIT_WORK(&sc->state_worker, dualshock4_state_worker); |
1091 | } else { | 1085 | } else { |
1092 | ret = 0; | 1086 | ret = 0; |
@@ -1101,9 +1095,11 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id) | |||
1101 | goto err_stop; | 1095 | goto err_stop; |
1102 | } | 1096 | } |
1103 | 1097 | ||
1104 | ret = sony_init_ff(hdev); | 1098 | if (sc->quirks & SONY_FF_SUPPORT) { |
1105 | if (ret < 0) | 1099 | ret = sony_init_ff(hdev); |
1106 | goto err_stop; | 1100 | if (ret < 0) |
1101 | goto err_stop; | ||
1102 | } | ||
1107 | 1103 | ||
1108 | return 0; | 1104 | return 0; |
1109 | err_stop: | 1105 | err_stop: |
@@ -1120,7 +1116,8 @@ static void sony_remove(struct hid_device *hdev) | |||
1120 | if (sc->quirks & SONY_LED_SUPPORT) | 1116 | if (sc->quirks & SONY_LED_SUPPORT) |
1121 | sony_leds_remove(hdev); | 1117 | sony_leds_remove(hdev); |
1122 | 1118 | ||
1123 | sony_destroy_ff(hdev); | 1119 | if (sc->worker_initialized) |
1120 | cancel_work_sync(&sc->state_worker); | ||
1124 | 1121 | ||
1125 | hid_hw_stop(hdev); | 1122 | hid_hw_stop(hdev); |
1126 | } | 1123 | } |
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c index cb0137b3718d..ab24ce2eb28f 100644 --- a/drivers/hid/hidraw.c +++ b/drivers/hid/hidraw.c | |||
@@ -320,13 +320,13 @@ static void drop_ref(struct hidraw *hidraw, int exists_bit) | |||
320 | hid_hw_close(hidraw->hid); | 320 | hid_hw_close(hidraw->hid); |
321 | wake_up_interruptible(&hidraw->wait); | 321 | wake_up_interruptible(&hidraw->wait); |
322 | } | 322 | } |
323 | device_destroy(hidraw_class, | ||
324 | MKDEV(hidraw_major, hidraw->minor)); | ||
323 | } else { | 325 | } else { |
324 | --hidraw->open; | 326 | --hidraw->open; |
325 | } | 327 | } |
326 | if (!hidraw->open) { | 328 | if (!hidraw->open) { |
327 | if (!hidraw->exist) { | 329 | if (!hidraw->exist) { |
328 | device_destroy(hidraw_class, | ||
329 | MKDEV(hidraw_major, hidraw->minor)); | ||
330 | hidraw_table[hidraw->minor] = NULL; | 330 | hidraw_table[hidraw->minor] = NULL; |
331 | kfree(hidraw); | 331 | kfree(hidraw); |
332 | } else { | 332 | } else { |
diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c index be7f0a20d634..f3b89a4698b6 100644 --- a/drivers/i2c/busses/i2c-cpm.c +++ b/drivers/i2c/busses/i2c-cpm.c | |||
@@ -39,7 +39,9 @@ | |||
39 | #include <linux/i2c.h> | 39 | #include <linux/i2c.h> |
40 | #include <linux/io.h> | 40 | #include <linux/io.h> |
41 | #include <linux/dma-mapping.h> | 41 | #include <linux/dma-mapping.h> |
42 | #include <linux/of_address.h> | ||
42 | #include <linux/of_device.h> | 43 | #include <linux/of_device.h> |
44 | #include <linux/of_irq.h> | ||
43 | #include <linux/of_platform.h> | 45 | #include <linux/of_platform.h> |
44 | #include <sysdev/fsl_soc.h> | 46 | #include <sysdev/fsl_soc.h> |
45 | #include <asm/cpm.h> | 47 | #include <asm/cpm.h> |
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c index a06e12552886..ce953d895f5b 100644 --- a/drivers/input/evdev.c +++ b/drivers/input/evdev.c | |||
@@ -954,11 +954,13 @@ static long evdev_do_ioctl(struct file *file, unsigned int cmd, | |||
954 | return -EFAULT; | 954 | return -EFAULT; |
955 | 955 | ||
956 | error = input_ff_upload(dev, &effect, file); | 956 | error = input_ff_upload(dev, &effect, file); |
957 | if (error) | ||
958 | return error; | ||
957 | 959 | ||
958 | if (put_user(effect.id, &(((struct ff_effect __user *)p)->id))) | 960 | if (put_user(effect.id, &(((struct ff_effect __user *)p)->id))) |
959 | return -EFAULT; | 961 | return -EFAULT; |
960 | 962 | ||
961 | return error; | 963 | return 0; |
962 | } | 964 | } |
963 | 965 | ||
964 | /* Multi-number variable-length handlers */ | 966 | /* Multi-number variable-length handlers */ |
diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c index bb3b57bea8ba..5ef7fcf0e250 100644 --- a/drivers/input/keyboard/adp5588-keys.c +++ b/drivers/input/keyboard/adp5588-keys.c | |||
@@ -76,8 +76,18 @@ static int adp5588_gpio_get_value(struct gpio_chip *chip, unsigned off) | |||
76 | struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc); | 76 | struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc); |
77 | unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]); | 77 | unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]); |
78 | unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]); | 78 | unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]); |
79 | int val; | ||
79 | 80 | ||
80 | return !!(adp5588_read(kpad->client, GPIO_DAT_STAT1 + bank) & bit); | 81 | mutex_lock(&kpad->gpio_lock); |
82 | |||
83 | if (kpad->dir[bank] & bit) | ||
84 | val = kpad->dat_out[bank]; | ||
85 | else | ||
86 | val = adp5588_read(kpad->client, GPIO_DAT_STAT1 + bank); | ||
87 | |||
88 | mutex_unlock(&kpad->gpio_lock); | ||
89 | |||
90 | return !!(val & bit); | ||
81 | } | 91 | } |
82 | 92 | ||
83 | static void adp5588_gpio_set_value(struct gpio_chip *chip, | 93 | static void adp5588_gpio_set_value(struct gpio_chip *chip, |
diff --git a/drivers/input/misc/da9052_onkey.c b/drivers/input/misc/da9052_onkey.c index 1f695f229ea8..184c8f21ab59 100644 --- a/drivers/input/misc/da9052_onkey.c +++ b/drivers/input/misc/da9052_onkey.c | |||
@@ -27,29 +27,32 @@ struct da9052_onkey { | |||
27 | 27 | ||
28 | static void da9052_onkey_query(struct da9052_onkey *onkey) | 28 | static void da9052_onkey_query(struct da9052_onkey *onkey) |
29 | { | 29 | { |
30 | int key_stat; | 30 | int ret; |
31 | 31 | ||
32 | key_stat = da9052_reg_read(onkey->da9052, DA9052_EVENT_B_REG); | 32 | ret = da9052_reg_read(onkey->da9052, DA9052_STATUS_A_REG); |
33 | if (key_stat < 0) { | 33 | if (ret < 0) { |
34 | dev_err(onkey->da9052->dev, | 34 | dev_err(onkey->da9052->dev, |
35 | "Failed to read onkey event %d\n", key_stat); | 35 | "Failed to read onkey event err=%d\n", ret); |
36 | } else { | 36 | } else { |
37 | /* | 37 | /* |
38 | * Since interrupt for deassertion of ONKEY pin is not | 38 | * Since interrupt for deassertion of ONKEY pin is not |
39 | * generated, onkey event state determines the onkey | 39 | * generated, onkey event state determines the onkey |
40 | * button state. | 40 | * button state. |
41 | */ | 41 | */ |
42 | key_stat &= DA9052_EVENTB_ENONKEY; | 42 | bool pressed = !(ret & DA9052_STATUSA_NONKEY); |
43 | input_report_key(onkey->input, KEY_POWER, key_stat); | 43 | |
44 | input_report_key(onkey->input, KEY_POWER, pressed); | ||
44 | input_sync(onkey->input); | 45 | input_sync(onkey->input); |
45 | } | ||
46 | 46 | ||
47 | /* | 47 | /* |
48 | * Interrupt is generated only when the ONKEY pin is asserted. | 48 | * Interrupt is generated only when the ONKEY pin |
49 | * Hence the deassertion of the pin is simulated through work queue. | 49 | * is asserted. Hence the deassertion of the pin |
50 | */ | 50 | * is simulated through work queue. |
51 | if (key_stat) | 51 | */ |
52 | schedule_delayed_work(&onkey->work, msecs_to_jiffies(50)); | 52 | if (pressed) |
53 | schedule_delayed_work(&onkey->work, | ||
54 | msecs_to_jiffies(50)); | ||
55 | } | ||
53 | } | 56 | } |
54 | 57 | ||
55 | static void da9052_onkey_work(struct work_struct *work) | 58 | static void da9052_onkey_work(struct work_struct *work) |
diff --git a/drivers/input/mouse/cypress_ps2.c b/drivers/input/mouse/cypress_ps2.c index 87095e2f5153..8af34ffe208b 100644 --- a/drivers/input/mouse/cypress_ps2.c +++ b/drivers/input/mouse/cypress_ps2.c | |||
@@ -409,7 +409,6 @@ static int cypress_set_input_params(struct input_dev *input, | |||
409 | __clear_bit(REL_X, input->relbit); | 409 | __clear_bit(REL_X, input->relbit); |
410 | __clear_bit(REL_Y, input->relbit); | 410 | __clear_bit(REL_Y, input->relbit); |
411 | 411 | ||
412 | __set_bit(INPUT_PROP_BUTTONPAD, input->propbit); | ||
413 | __set_bit(EV_KEY, input->evbit); | 412 | __set_bit(EV_KEY, input->evbit); |
414 | __set_bit(BTN_LEFT, input->keybit); | 413 | __set_bit(BTN_LEFT, input->keybit); |
415 | __set_bit(BTN_RIGHT, input->keybit); | 414 | __set_bit(BTN_RIGHT, input->keybit); |
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index 26386f9d2569..d8d49d10f9bb 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c | |||
@@ -265,11 +265,22 @@ static int synaptics_identify(struct psmouse *psmouse) | |||
265 | * Read touchpad resolution and maximum reported coordinates | 265 | * Read touchpad resolution and maximum reported coordinates |
266 | * Resolution is left zero if touchpad does not support the query | 266 | * Resolution is left zero if touchpad does not support the query |
267 | */ | 267 | */ |
268 | |||
269 | static const int *quirk_min_max; | ||
270 | |||
268 | static int synaptics_resolution(struct psmouse *psmouse) | 271 | static int synaptics_resolution(struct psmouse *psmouse) |
269 | { | 272 | { |
270 | struct synaptics_data *priv = psmouse->private; | 273 | struct synaptics_data *priv = psmouse->private; |
271 | unsigned char resp[3]; | 274 | unsigned char resp[3]; |
272 | 275 | ||
276 | if (quirk_min_max) { | ||
277 | priv->x_min = quirk_min_max[0]; | ||
278 | priv->x_max = quirk_min_max[1]; | ||
279 | priv->y_min = quirk_min_max[2]; | ||
280 | priv->y_max = quirk_min_max[3]; | ||
281 | return 0; | ||
282 | } | ||
283 | |||
273 | if (SYN_ID_MAJOR(priv->identity) < 4) | 284 | if (SYN_ID_MAJOR(priv->identity) < 4) |
274 | return 0; | 285 | return 0; |
275 | 286 | ||
@@ -1485,10 +1496,54 @@ static const struct dmi_system_id olpc_dmi_table[] __initconst = { | |||
1485 | { } | 1496 | { } |
1486 | }; | 1497 | }; |
1487 | 1498 | ||
1499 | static const struct dmi_system_id min_max_dmi_table[] __initconst = { | ||
1500 | #if defined(CONFIG_DMI) | ||
1501 | { | ||
1502 | /* Lenovo ThinkPad Helix */ | ||
1503 | .matches = { | ||
1504 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | ||
1505 | DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Helix"), | ||
1506 | }, | ||
1507 | .driver_data = (int []){1024, 5052, 2258, 4832}, | ||
1508 | }, | ||
1509 | { | ||
1510 | /* Lenovo ThinkPad X240 */ | ||
1511 | .matches = { | ||
1512 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | ||
1513 | DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X240"), | ||
1514 | }, | ||
1515 | .driver_data = (int []){1232, 5710, 1156, 4696}, | ||
1516 | }, | ||
1517 | { | ||
1518 | /* Lenovo ThinkPad T440s */ | ||
1519 | .matches = { | ||
1520 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | ||
1521 | DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T440"), | ||
1522 | }, | ||
1523 | .driver_data = (int []){1024, 5112, 2024, 4832}, | ||
1524 | }, | ||
1525 | { | ||
1526 | /* Lenovo ThinkPad T540p */ | ||
1527 | .matches = { | ||
1528 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | ||
1529 | DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T540"), | ||
1530 | }, | ||
1531 | .driver_data = (int []){1024, 5056, 2058, 4832}, | ||
1532 | }, | ||
1533 | #endif | ||
1534 | { } | ||
1535 | }; | ||
1536 | |||
1488 | void __init synaptics_module_init(void) | 1537 | void __init synaptics_module_init(void) |
1489 | { | 1538 | { |
1539 | const struct dmi_system_id *min_max_dmi; | ||
1540 | |||
1490 | impaired_toshiba_kbc = dmi_check_system(toshiba_dmi_table); | 1541 | impaired_toshiba_kbc = dmi_check_system(toshiba_dmi_table); |
1491 | broken_olpc_ec = dmi_check_system(olpc_dmi_table); | 1542 | broken_olpc_ec = dmi_check_system(olpc_dmi_table); |
1543 | |||
1544 | min_max_dmi = dmi_first_match(min_max_dmi_table); | ||
1545 | if (min_max_dmi) | ||
1546 | quirk_min_max = min_max_dmi->driver_data; | ||
1492 | } | 1547 | } |
1493 | 1548 | ||
1494 | static int __synaptics_init(struct psmouse *psmouse, bool absolute_mode) | 1549 | static int __synaptics_init(struct psmouse *psmouse, bool absolute_mode) |
diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c index 4c842c320c2e..b604564dec5c 100644 --- a/drivers/input/mousedev.c +++ b/drivers/input/mousedev.c | |||
@@ -67,7 +67,6 @@ struct mousedev { | |||
67 | struct device dev; | 67 | struct device dev; |
68 | struct cdev cdev; | 68 | struct cdev cdev; |
69 | bool exist; | 69 | bool exist; |
70 | bool is_mixdev; | ||
71 | 70 | ||
72 | struct list_head mixdev_node; | 71 | struct list_head mixdev_node; |
73 | bool opened_by_mixdev; | 72 | bool opened_by_mixdev; |
@@ -77,6 +76,9 @@ struct mousedev { | |||
77 | int old_x[4], old_y[4]; | 76 | int old_x[4], old_y[4]; |
78 | int frac_dx, frac_dy; | 77 | int frac_dx, frac_dy; |
79 | unsigned long touch; | 78 | unsigned long touch; |
79 | |||
80 | int (*open_device)(struct mousedev *mousedev); | ||
81 | void (*close_device)(struct mousedev *mousedev); | ||
80 | }; | 82 | }; |
81 | 83 | ||
82 | enum mousedev_emul { | 84 | enum mousedev_emul { |
@@ -116,9 +118,6 @@ static unsigned char mousedev_imex_seq[] = { 0xf3, 200, 0xf3, 200, 0xf3, 80 }; | |||
116 | static struct mousedev *mousedev_mix; | 118 | static struct mousedev *mousedev_mix; |
117 | static LIST_HEAD(mousedev_mix_list); | 119 | static LIST_HEAD(mousedev_mix_list); |
118 | 120 | ||
119 | static void mixdev_open_devices(void); | ||
120 | static void mixdev_close_devices(void); | ||
121 | |||
122 | #define fx(i) (mousedev->old_x[(mousedev->pkt_count - (i)) & 03]) | 121 | #define fx(i) (mousedev->old_x[(mousedev->pkt_count - (i)) & 03]) |
123 | #define fy(i) (mousedev->old_y[(mousedev->pkt_count - (i)) & 03]) | 122 | #define fy(i) (mousedev->old_y[(mousedev->pkt_count - (i)) & 03]) |
124 | 123 | ||
@@ -428,9 +427,7 @@ static int mousedev_open_device(struct mousedev *mousedev) | |||
428 | if (retval) | 427 | if (retval) |
429 | return retval; | 428 | return retval; |
430 | 429 | ||
431 | if (mousedev->is_mixdev) | 430 | if (!mousedev->exist) |
432 | mixdev_open_devices(); | ||
433 | else if (!mousedev->exist) | ||
434 | retval = -ENODEV; | 431 | retval = -ENODEV; |
435 | else if (!mousedev->open++) { | 432 | else if (!mousedev->open++) { |
436 | retval = input_open_device(&mousedev->handle); | 433 | retval = input_open_device(&mousedev->handle); |
@@ -446,9 +443,7 @@ static void mousedev_close_device(struct mousedev *mousedev) | |||
446 | { | 443 | { |
447 | mutex_lock(&mousedev->mutex); | 444 | mutex_lock(&mousedev->mutex); |
448 | 445 | ||
449 | if (mousedev->is_mixdev) | 446 | if (mousedev->exist && !--mousedev->open) |
450 | mixdev_close_devices(); | ||
451 | else if (mousedev->exist && !--mousedev->open) | ||
452 | input_close_device(&mousedev->handle); | 447 | input_close_device(&mousedev->handle); |
453 | 448 | ||
454 | mutex_unlock(&mousedev->mutex); | 449 | mutex_unlock(&mousedev->mutex); |
@@ -459,21 +454,29 @@ static void mousedev_close_device(struct mousedev *mousedev) | |||
459 | * stream. Note that this function is called with mousedev_mix->mutex | 454 | * stream. Note that this function is called with mousedev_mix->mutex |
460 | * held. | 455 | * held. |
461 | */ | 456 | */ |
462 | static void mixdev_open_devices(void) | 457 | static int mixdev_open_devices(struct mousedev *mixdev) |
463 | { | 458 | { |
464 | struct mousedev *mousedev; | 459 | int error; |
460 | |||
461 | error = mutex_lock_interruptible(&mixdev->mutex); | ||
462 | if (error) | ||
463 | return error; | ||
465 | 464 | ||
466 | if (mousedev_mix->open++) | 465 | if (!mixdev->open++) { |
467 | return; | 466 | struct mousedev *mousedev; |
468 | 467 | ||
469 | list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) { | 468 | list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) { |
470 | if (!mousedev->opened_by_mixdev) { | 469 | if (!mousedev->opened_by_mixdev) { |
471 | if (mousedev_open_device(mousedev)) | 470 | if (mousedev_open_device(mousedev)) |
472 | continue; | 471 | continue; |
473 | 472 | ||
474 | mousedev->opened_by_mixdev = true; | 473 | mousedev->opened_by_mixdev = true; |
474 | } | ||
475 | } | 475 | } |
476 | } | 476 | } |
477 | |||
478 | mutex_unlock(&mixdev->mutex); | ||
479 | return 0; | ||
477 | } | 480 | } |
478 | 481 | ||
479 | /* | 482 | /* |
@@ -481,19 +484,22 @@ static void mixdev_open_devices(void) | |||
481 | * device. Note that this function is called with mousedev_mix->mutex | 484 | * device. Note that this function is called with mousedev_mix->mutex |
482 | * held. | 485 | * held. |
483 | */ | 486 | */ |
484 | static void mixdev_close_devices(void) | 487 | static void mixdev_close_devices(struct mousedev *mixdev) |
485 | { | 488 | { |
486 | struct mousedev *mousedev; | 489 | mutex_lock(&mixdev->mutex); |
487 | 490 | ||
488 | if (--mousedev_mix->open) | 491 | if (!--mixdev->open) { |
489 | return; | 492 | struct mousedev *mousedev; |
490 | 493 | ||
491 | list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) { | 494 | list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) { |
492 | if (mousedev->opened_by_mixdev) { | 495 | if (mousedev->opened_by_mixdev) { |
493 | mousedev->opened_by_mixdev = false; | 496 | mousedev->opened_by_mixdev = false; |
494 | mousedev_close_device(mousedev); | 497 | mousedev_close_device(mousedev); |
498 | } | ||
495 | } | 499 | } |
496 | } | 500 | } |
501 | |||
502 | mutex_unlock(&mixdev->mutex); | ||
497 | } | 503 | } |
498 | 504 | ||
499 | 505 | ||
@@ -522,7 +528,7 @@ static int mousedev_release(struct inode *inode, struct file *file) | |||
522 | mousedev_detach_client(mousedev, client); | 528 | mousedev_detach_client(mousedev, client); |
523 | kfree(client); | 529 | kfree(client); |
524 | 530 | ||
525 | mousedev_close_device(mousedev); | 531 | mousedev->close_device(mousedev); |
526 | 532 | ||
527 | return 0; | 533 | return 0; |
528 | } | 534 | } |
@@ -550,7 +556,7 @@ static int mousedev_open(struct inode *inode, struct file *file) | |||
550 | client->mousedev = mousedev; | 556 | client->mousedev = mousedev; |
551 | mousedev_attach_client(mousedev, client); | 557 | mousedev_attach_client(mousedev, client); |
552 | 558 | ||
553 | error = mousedev_open_device(mousedev); | 559 | error = mousedev->open_device(mousedev); |
554 | if (error) | 560 | if (error) |
555 | goto err_free_client; | 561 | goto err_free_client; |
556 | 562 | ||
@@ -861,16 +867,21 @@ static struct mousedev *mousedev_create(struct input_dev *dev, | |||
861 | 867 | ||
862 | if (mixdev) { | 868 | if (mixdev) { |
863 | dev_set_name(&mousedev->dev, "mice"); | 869 | dev_set_name(&mousedev->dev, "mice"); |
870 | |||
871 | mousedev->open_device = mixdev_open_devices; | ||
872 | mousedev->close_device = mixdev_close_devices; | ||
864 | } else { | 873 | } else { |
865 | int dev_no = minor; | 874 | int dev_no = minor; |
866 | /* Normalize device number if it falls into legacy range */ | 875 | /* Normalize device number if it falls into legacy range */ |
867 | if (dev_no < MOUSEDEV_MINOR_BASE + MOUSEDEV_MINORS) | 876 | if (dev_no < MOUSEDEV_MINOR_BASE + MOUSEDEV_MINORS) |
868 | dev_no -= MOUSEDEV_MINOR_BASE; | 877 | dev_no -= MOUSEDEV_MINOR_BASE; |
869 | dev_set_name(&mousedev->dev, "mouse%d", dev_no); | 878 | dev_set_name(&mousedev->dev, "mouse%d", dev_no); |
879 | |||
880 | mousedev->open_device = mousedev_open_device; | ||
881 | mousedev->close_device = mousedev_close_device; | ||
870 | } | 882 | } |
871 | 883 | ||
872 | mousedev->exist = true; | 884 | mousedev->exist = true; |
873 | mousedev->is_mixdev = mixdev; | ||
874 | mousedev->handle.dev = input_get_device(dev); | 885 | mousedev->handle.dev = input_get_device(dev); |
875 | mousedev->handle.name = dev_name(&mousedev->dev); | 886 | mousedev->handle.name = dev_name(&mousedev->dev); |
876 | mousedev->handle.handler = handler; | 887 | mousedev->handle.handler = handler; |
@@ -919,7 +930,7 @@ static void mousedev_destroy(struct mousedev *mousedev) | |||
919 | device_del(&mousedev->dev); | 930 | device_del(&mousedev->dev); |
920 | mousedev_cleanup(mousedev); | 931 | mousedev_cleanup(mousedev); |
921 | input_free_minor(MINOR(mousedev->dev.devt)); | 932 | input_free_minor(MINOR(mousedev->dev.devt)); |
922 | if (!mousedev->is_mixdev) | 933 | if (mousedev != mousedev_mix) |
923 | input_unregister_handle(&mousedev->handle); | 934 | input_unregister_handle(&mousedev->handle); |
924 | put_device(&mousedev->dev); | 935 | put_device(&mousedev->dev); |
925 | } | 936 | } |
diff --git a/drivers/isdn/capi/Kconfig b/drivers/isdn/capi/Kconfig index f04686580040..9816c51eb5c2 100644 --- a/drivers/isdn/capi/Kconfig +++ b/drivers/isdn/capi/Kconfig | |||
@@ -16,9 +16,17 @@ config CAPI_TRACE | |||
16 | This will increase the size of the kernelcapi module by 20 KB. | 16 | This will increase the size of the kernelcapi module by 20 KB. |
17 | If unsure, say Y. | 17 | If unsure, say Y. |
18 | 18 | ||
19 | config ISDN_CAPI_CAPI20 | ||
20 | tristate "CAPI2.0 /dev/capi support" | ||
21 | help | ||
22 | This option will provide the CAPI 2.0 interface to userspace | ||
23 | applications via /dev/capi20. Applications should use the | ||
24 | standardized libcapi20 to access this functionality. You should say | ||
25 | Y/M here. | ||
26 | |||
19 | config ISDN_CAPI_MIDDLEWARE | 27 | config ISDN_CAPI_MIDDLEWARE |
20 | bool "CAPI2.0 Middleware support" | 28 | bool "CAPI2.0 Middleware support" |
21 | depends on TTY | 29 | depends on ISDN_CAPI_CAPI20 && TTY |
22 | help | 30 | help |
23 | This option will enhance the capabilities of the /dev/capi20 | 31 | This option will enhance the capabilities of the /dev/capi20 |
24 | interface. It will provide a means of moving a data connection, | 32 | interface. It will provide a means of moving a data connection, |
@@ -26,14 +34,6 @@ config ISDN_CAPI_MIDDLEWARE | |||
26 | device. If you want to use pppd with pppdcapiplugin to dial up to | 34 | device. If you want to use pppd with pppdcapiplugin to dial up to |
27 | your ISP, say Y here. | 35 | your ISP, say Y here. |
28 | 36 | ||
29 | config ISDN_CAPI_CAPI20 | ||
30 | tristate "CAPI2.0 /dev/capi support" | ||
31 | help | ||
32 | This option will provide the CAPI 2.0 interface to userspace | ||
33 | applications via /dev/capi20. Applications should use the | ||
34 | standardized libcapi20 to access this functionality. You should say | ||
35 | Y/M here. | ||
36 | |||
37 | config ISDN_CAPI_CAPIDRV | 37 | config ISDN_CAPI_CAPIDRV |
38 | tristate "CAPI2.0 capidrv interface support" | 38 | tristate "CAPI2.0 capidrv interface support" |
39 | depends on ISDN_I4L | 39 | depends on ISDN_I4L |
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 1af70145fab9..074b9c8e4cf0 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c | |||
@@ -979,12 +979,13 @@ static void issue_copy_real(struct dm_cache_migration *mg) | |||
979 | int r; | 979 | int r; |
980 | struct dm_io_region o_region, c_region; | 980 | struct dm_io_region o_region, c_region; |
981 | struct cache *cache = mg->cache; | 981 | struct cache *cache = mg->cache; |
982 | sector_t cblock = from_cblock(mg->cblock); | ||
982 | 983 | ||
983 | o_region.bdev = cache->origin_dev->bdev; | 984 | o_region.bdev = cache->origin_dev->bdev; |
984 | o_region.count = cache->sectors_per_block; | 985 | o_region.count = cache->sectors_per_block; |
985 | 986 | ||
986 | c_region.bdev = cache->cache_dev->bdev; | 987 | c_region.bdev = cache->cache_dev->bdev; |
987 | c_region.sector = from_cblock(mg->cblock) * cache->sectors_per_block; | 988 | c_region.sector = cblock * cache->sectors_per_block; |
988 | c_region.count = cache->sectors_per_block; | 989 | c_region.count = cache->sectors_per_block; |
989 | 990 | ||
990 | if (mg->writeback || mg->demote) { | 991 | if (mg->writeback || mg->demote) { |
@@ -2464,20 +2465,18 @@ static int cache_map(struct dm_target *ti, struct bio *bio) | |||
2464 | bool discarded_block; | 2465 | bool discarded_block; |
2465 | struct dm_bio_prison_cell *cell; | 2466 | struct dm_bio_prison_cell *cell; |
2466 | struct policy_result lookup_result; | 2467 | struct policy_result lookup_result; |
2467 | struct per_bio_data *pb; | 2468 | struct per_bio_data *pb = init_per_bio_data(bio, pb_data_size); |
2468 | 2469 | ||
2469 | if (from_oblock(block) > from_oblock(cache->origin_blocks)) { | 2470 | if (unlikely(from_oblock(block) >= from_oblock(cache->origin_blocks))) { |
2470 | /* | 2471 | /* |
2471 | * This can only occur if the io goes to a partial block at | 2472 | * This can only occur if the io goes to a partial block at |
2472 | * the end of the origin device. We don't cache these. | 2473 | * the end of the origin device. We don't cache these. |
2473 | * Just remap to the origin and carry on. | 2474 | * Just remap to the origin and carry on. |
2474 | */ | 2475 | */ |
2475 | remap_to_origin_clear_discard(cache, bio, block); | 2476 | remap_to_origin(cache, bio); |
2476 | return DM_MAPIO_REMAPPED; | 2477 | return DM_MAPIO_REMAPPED; |
2477 | } | 2478 | } |
2478 | 2479 | ||
2479 | pb = init_per_bio_data(bio, pb_data_size); | ||
2480 | |||
2481 | if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) { | 2480 | if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) { |
2482 | defer_bio(cache, bio); | 2481 | defer_bio(cache, bio); |
2483 | return DM_MAPIO_SUBMITTED; | 2482 | return DM_MAPIO_SUBMITTED; |
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index 2e45f6ec1bf0..380d24922049 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c | |||
@@ -1248,19 +1248,13 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1248 | * shared register for the high 32 bits, so only a single, aligned, | 1248 | * shared register for the high 32 bits, so only a single, aligned, |
1249 | * 4 GB physical address range can be used for descriptors. | 1249 | * 4 GB physical address range can be used for descriptors. |
1250 | */ | 1250 | */ |
1251 | if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) && | 1251 | if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { |
1252 | !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { | ||
1253 | dev_dbg(&pdev->dev, "DMA to 64-BIT addresses\n"); | 1252 | dev_dbg(&pdev->dev, "DMA to 64-BIT addresses\n"); |
1254 | } else { | 1253 | } else { |
1255 | err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); | 1254 | err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); |
1256 | if (err) { | 1255 | if (err) { |
1257 | err = dma_set_coherent_mask(&pdev->dev, | 1256 | dev_err(&pdev->dev, "No usable DMA config, aborting\n"); |
1258 | DMA_BIT_MASK(32)); | 1257 | goto out_pci_disable; |
1259 | if (err) { | ||
1260 | dev_err(&pdev->dev, | ||
1261 | "No usable DMA config, aborting\n"); | ||
1262 | goto out_pci_disable; | ||
1263 | } | ||
1264 | } | 1258 | } |
1265 | } | 1259 | } |
1266 | 1260 | ||
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c index d5c2d3e912e5..422aab27ea1b 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c | |||
@@ -2436,7 +2436,7 @@ err_reset: | |||
2436 | err_register: | 2436 | err_register: |
2437 | err_sw_init: | 2437 | err_sw_init: |
2438 | err_eeprom: | 2438 | err_eeprom: |
2439 | iounmap(adapter->hw.hw_addr); | 2439 | pci_iounmap(pdev, adapter->hw.hw_addr); |
2440 | err_init_netdev: | 2440 | err_init_netdev: |
2441 | err_ioremap: | 2441 | err_ioremap: |
2442 | free_netdev(netdev); | 2442 | free_netdev(netdev); |
@@ -2474,7 +2474,7 @@ static void atl1e_remove(struct pci_dev *pdev) | |||
2474 | unregister_netdev(netdev); | 2474 | unregister_netdev(netdev); |
2475 | atl1e_free_ring_resources(adapter); | 2475 | atl1e_free_ring_resources(adapter); |
2476 | atl1e_force_ps(&adapter->hw); | 2476 | atl1e_force_ps(&adapter->hw); |
2477 | iounmap(adapter->hw.hw_addr); | 2477 | pci_iounmap(pdev, adapter->hw.hw_addr); |
2478 | pci_release_regions(pdev); | 2478 | pci_release_regions(pdev); |
2479 | free_netdev(netdev); | 2479 | free_netdev(netdev); |
2480 | pci_disable_device(pdev); | 2480 | pci_disable_device(pdev); |
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index fcf9105a5476..09f3fefcbf9c 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* cnic.c: Broadcom CNIC core network driver. | 1 | /* cnic.c: Broadcom CNIC core network driver. |
2 | * | 2 | * |
3 | * Copyright (c) 2006-2013 Broadcom Corporation | 3 | * Copyright (c) 2006-2014 Broadcom Corporation |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
6 | * it under the terms of the GNU General Public License as published by | 6 | * it under the terms of the GNU General Public License as published by |
@@ -342,7 +342,7 @@ static int cnic_send_nlmsg(struct cnic_local *cp, u32 type, | |||
342 | while (retry < 3) { | 342 | while (retry < 3) { |
343 | rc = 0; | 343 | rc = 0; |
344 | rcu_read_lock(); | 344 | rcu_read_lock(); |
345 | ulp_ops = rcu_dereference(cnic_ulp_tbl[CNIC_ULP_ISCSI]); | 345 | ulp_ops = rcu_dereference(cp->ulp_ops[CNIC_ULP_ISCSI]); |
346 | if (ulp_ops) | 346 | if (ulp_ops) |
347 | rc = ulp_ops->iscsi_nl_send_msg( | 347 | rc = ulp_ops->iscsi_nl_send_msg( |
348 | cp->ulp_handle[CNIC_ULP_ISCSI], | 348 | cp->ulp_handle[CNIC_ULP_ISCSI], |
@@ -726,7 +726,7 @@ static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma) | |||
726 | 726 | ||
727 | for (i = 0; i < dma->num_pages; i++) { | 727 | for (i = 0; i < dma->num_pages; i++) { |
728 | if (dma->pg_arr[i]) { | 728 | if (dma->pg_arr[i]) { |
729 | dma_free_coherent(&dev->pcidev->dev, BNX2_PAGE_SIZE, | 729 | dma_free_coherent(&dev->pcidev->dev, CNIC_PAGE_SIZE, |
730 | dma->pg_arr[i], dma->pg_map_arr[i]); | 730 | dma->pg_arr[i], dma->pg_map_arr[i]); |
731 | dma->pg_arr[i] = NULL; | 731 | dma->pg_arr[i] = NULL; |
732 | } | 732 | } |
@@ -785,7 +785,7 @@ static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma, | |||
785 | 785 | ||
786 | for (i = 0; i < pages; i++) { | 786 | for (i = 0; i < pages; i++) { |
787 | dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev, | 787 | dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev, |
788 | BNX2_PAGE_SIZE, | 788 | CNIC_PAGE_SIZE, |
789 | &dma->pg_map_arr[i], | 789 | &dma->pg_map_arr[i], |
790 | GFP_ATOMIC); | 790 | GFP_ATOMIC); |
791 | if (dma->pg_arr[i] == NULL) | 791 | if (dma->pg_arr[i] == NULL) |
@@ -794,8 +794,8 @@ static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma, | |||
794 | if (!use_pg_tbl) | 794 | if (!use_pg_tbl) |
795 | return 0; | 795 | return 0; |
796 | 796 | ||
797 | dma->pgtbl_size = ((pages * 8) + BNX2_PAGE_SIZE - 1) & | 797 | dma->pgtbl_size = ((pages * 8) + CNIC_PAGE_SIZE - 1) & |
798 | ~(BNX2_PAGE_SIZE - 1); | 798 | ~(CNIC_PAGE_SIZE - 1); |
799 | dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size, | 799 | dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size, |
800 | &dma->pgtbl_map, GFP_ATOMIC); | 800 | &dma->pgtbl_map, GFP_ATOMIC); |
801 | if (dma->pgtbl == NULL) | 801 | if (dma->pgtbl == NULL) |
@@ -900,8 +900,8 @@ static int cnic_alloc_context(struct cnic_dev *dev) | |||
900 | if (BNX2_CHIP(cp) == BNX2_CHIP_5709) { | 900 | if (BNX2_CHIP(cp) == BNX2_CHIP_5709) { |
901 | int i, k, arr_size; | 901 | int i, k, arr_size; |
902 | 902 | ||
903 | cp->ctx_blk_size = BNX2_PAGE_SIZE; | 903 | cp->ctx_blk_size = CNIC_PAGE_SIZE; |
904 | cp->cids_per_blk = BNX2_PAGE_SIZE / 128; | 904 | cp->cids_per_blk = CNIC_PAGE_SIZE / 128; |
905 | arr_size = BNX2_MAX_CID / cp->cids_per_blk * | 905 | arr_size = BNX2_MAX_CID / cp->cids_per_blk * |
906 | sizeof(struct cnic_ctx); | 906 | sizeof(struct cnic_ctx); |
907 | cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL); | 907 | cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL); |
@@ -933,7 +933,7 @@ static int cnic_alloc_context(struct cnic_dev *dev) | |||
933 | for (i = 0; i < cp->ctx_blks; i++) { | 933 | for (i = 0; i < cp->ctx_blks; i++) { |
934 | cp->ctx_arr[i].ctx = | 934 | cp->ctx_arr[i].ctx = |
935 | dma_alloc_coherent(&dev->pcidev->dev, | 935 | dma_alloc_coherent(&dev->pcidev->dev, |
936 | BNX2_PAGE_SIZE, | 936 | CNIC_PAGE_SIZE, |
937 | &cp->ctx_arr[i].mapping, | 937 | &cp->ctx_arr[i].mapping, |
938 | GFP_KERNEL); | 938 | GFP_KERNEL); |
939 | if (cp->ctx_arr[i].ctx == NULL) | 939 | if (cp->ctx_arr[i].ctx == NULL) |
@@ -1013,7 +1013,7 @@ static int __cnic_alloc_uio_rings(struct cnic_uio_dev *udev, int pages) | |||
1013 | if (udev->l2_ring) | 1013 | if (udev->l2_ring) |
1014 | return 0; | 1014 | return 0; |
1015 | 1015 | ||
1016 | udev->l2_ring_size = pages * BNX2_PAGE_SIZE; | 1016 | udev->l2_ring_size = pages * CNIC_PAGE_SIZE; |
1017 | udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size, | 1017 | udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size, |
1018 | &udev->l2_ring_map, | 1018 | &udev->l2_ring_map, |
1019 | GFP_KERNEL | __GFP_COMP); | 1019 | GFP_KERNEL | __GFP_COMP); |
@@ -1021,7 +1021,7 @@ static int __cnic_alloc_uio_rings(struct cnic_uio_dev *udev, int pages) | |||
1021 | return -ENOMEM; | 1021 | return -ENOMEM; |
1022 | 1022 | ||
1023 | udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size; | 1023 | udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size; |
1024 | udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size); | 1024 | udev->l2_buf_size = CNIC_PAGE_ALIGN(udev->l2_buf_size); |
1025 | udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size, | 1025 | udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size, |
1026 | &udev->l2_buf_map, | 1026 | &udev->l2_buf_map, |
1027 | GFP_KERNEL | __GFP_COMP); | 1027 | GFP_KERNEL | __GFP_COMP); |
@@ -1102,7 +1102,7 @@ static int cnic_init_uio(struct cnic_dev *dev) | |||
1102 | uinfo->mem[0].size = MB_GET_CID_ADDR(TX_TSS_CID + | 1102 | uinfo->mem[0].size = MB_GET_CID_ADDR(TX_TSS_CID + |
1103 | TX_MAX_TSS_RINGS + 1); | 1103 | TX_MAX_TSS_RINGS + 1); |
1104 | uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen & | 1104 | uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen & |
1105 | PAGE_MASK; | 1105 | CNIC_PAGE_MASK; |
1106 | if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) | 1106 | if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) |
1107 | uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9; | 1107 | uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9; |
1108 | else | 1108 | else |
@@ -1113,7 +1113,7 @@ static int cnic_init_uio(struct cnic_dev *dev) | |||
1113 | uinfo->mem[0].size = pci_resource_len(dev->pcidev, 0); | 1113 | uinfo->mem[0].size = pci_resource_len(dev->pcidev, 0); |
1114 | 1114 | ||
1115 | uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk & | 1115 | uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk & |
1116 | PAGE_MASK; | 1116 | CNIC_PAGE_MASK; |
1117 | uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk); | 1117 | uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk); |
1118 | 1118 | ||
1119 | uinfo->name = "bnx2x_cnic"; | 1119 | uinfo->name = "bnx2x_cnic"; |
@@ -1267,14 +1267,14 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev) | |||
1267 | for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++) | 1267 | for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++) |
1268 | cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE; | 1268 | cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE; |
1269 | 1269 | ||
1270 | pages = PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) / | 1270 | pages = CNIC_PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) / |
1271 | PAGE_SIZE; | 1271 | CNIC_PAGE_SIZE; |
1272 | 1272 | ||
1273 | ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0); | 1273 | ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0); |
1274 | if (ret) | 1274 | if (ret) |
1275 | return -ENOMEM; | 1275 | return -ENOMEM; |
1276 | 1276 | ||
1277 | n = PAGE_SIZE / CNIC_KWQ16_DATA_SIZE; | 1277 | n = CNIC_PAGE_SIZE / CNIC_KWQ16_DATA_SIZE; |
1278 | for (i = 0, j = 0; i < cp->max_cid_space; i++) { | 1278 | for (i = 0, j = 0; i < cp->max_cid_space; i++) { |
1279 | long off = CNIC_KWQ16_DATA_SIZE * (i % n); | 1279 | long off = CNIC_KWQ16_DATA_SIZE * (i % n); |
1280 | 1280 | ||
@@ -1296,7 +1296,7 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev) | |||
1296 | goto error; | 1296 | goto error; |
1297 | } | 1297 | } |
1298 | 1298 | ||
1299 | pages = PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / PAGE_SIZE; | 1299 | pages = CNIC_PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / CNIC_PAGE_SIZE; |
1300 | ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0); | 1300 | ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0); |
1301 | if (ret) | 1301 | if (ret) |
1302 | goto error; | 1302 | goto error; |
@@ -1466,8 +1466,8 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe) | |||
1466 | cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS * | 1466 | cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS * |
1467 | BNX2X_ISCSI_R2TQE_SIZE; | 1467 | BNX2X_ISCSI_R2TQE_SIZE; |
1468 | cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE; | 1468 | cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE; |
1469 | pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE; | 1469 | pages = CNIC_PAGE_ALIGN(cp->hq_size) / CNIC_PAGE_SIZE; |
1470 | hq_bds = pages * (PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE); | 1470 | hq_bds = pages * (CNIC_PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE); |
1471 | cp->num_cqs = req1->num_cqs; | 1471 | cp->num_cqs = req1->num_cqs; |
1472 | 1472 | ||
1473 | if (!dev->max_iscsi_conn) | 1473 | if (!dev->max_iscsi_conn) |
@@ -1477,9 +1477,9 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe) | |||
1477 | CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid), | 1477 | CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid), |
1478 | req1->rq_num_wqes); | 1478 | req1->rq_num_wqes); |
1479 | CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), | 1479 | CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), |
1480 | PAGE_SIZE); | 1480 | CNIC_PAGE_SIZE); |
1481 | CNIC_WR8(dev, BAR_TSTRORM_INTMEM + | 1481 | CNIC_WR8(dev, BAR_TSTRORM_INTMEM + |
1482 | TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT); | 1482 | TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS); |
1483 | CNIC_WR16(dev, BAR_TSTRORM_INTMEM + | 1483 | CNIC_WR16(dev, BAR_TSTRORM_INTMEM + |
1484 | TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), | 1484 | TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), |
1485 | req1->num_tasks_per_conn); | 1485 | req1->num_tasks_per_conn); |
@@ -1489,9 +1489,9 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe) | |||
1489 | USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid), | 1489 | USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid), |
1490 | req1->rq_buffer_size); | 1490 | req1->rq_buffer_size); |
1491 | CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), | 1491 | CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), |
1492 | PAGE_SIZE); | 1492 | CNIC_PAGE_SIZE); |
1493 | CNIC_WR8(dev, BAR_USTRORM_INTMEM + | 1493 | CNIC_WR8(dev, BAR_USTRORM_INTMEM + |
1494 | USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT); | 1494 | USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS); |
1495 | CNIC_WR16(dev, BAR_USTRORM_INTMEM + | 1495 | CNIC_WR16(dev, BAR_USTRORM_INTMEM + |
1496 | USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), | 1496 | USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), |
1497 | req1->num_tasks_per_conn); | 1497 | req1->num_tasks_per_conn); |
@@ -1504,9 +1504,9 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe) | |||
1504 | 1504 | ||
1505 | /* init Xstorm RAM */ | 1505 | /* init Xstorm RAM */ |
1506 | CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), | 1506 | CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), |
1507 | PAGE_SIZE); | 1507 | CNIC_PAGE_SIZE); |
1508 | CNIC_WR8(dev, BAR_XSTRORM_INTMEM + | 1508 | CNIC_WR8(dev, BAR_XSTRORM_INTMEM + |
1509 | XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT); | 1509 | XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS); |
1510 | CNIC_WR16(dev, BAR_XSTRORM_INTMEM + | 1510 | CNIC_WR16(dev, BAR_XSTRORM_INTMEM + |
1511 | XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), | 1511 | XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), |
1512 | req1->num_tasks_per_conn); | 1512 | req1->num_tasks_per_conn); |
@@ -1519,9 +1519,9 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe) | |||
1519 | 1519 | ||
1520 | /* init Cstorm RAM */ | 1520 | /* init Cstorm RAM */ |
1521 | CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), | 1521 | CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), |
1522 | PAGE_SIZE); | 1522 | CNIC_PAGE_SIZE); |
1523 | CNIC_WR8(dev, BAR_CSTRORM_INTMEM + | 1523 | CNIC_WR8(dev, BAR_CSTRORM_INTMEM + |
1524 | CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT); | 1524 | CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS); |
1525 | CNIC_WR16(dev, BAR_CSTRORM_INTMEM + | 1525 | CNIC_WR16(dev, BAR_CSTRORM_INTMEM + |
1526 | CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), | 1526 | CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), |
1527 | req1->num_tasks_per_conn); | 1527 | req1->num_tasks_per_conn); |
@@ -1623,18 +1623,18 @@ static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid) | |||
1623 | } | 1623 | } |
1624 | 1624 | ||
1625 | ctx->cid = cid; | 1625 | ctx->cid = cid; |
1626 | pages = PAGE_ALIGN(cp->task_array_size) / PAGE_SIZE; | 1626 | pages = CNIC_PAGE_ALIGN(cp->task_array_size) / CNIC_PAGE_SIZE; |
1627 | 1627 | ||
1628 | ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1); | 1628 | ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1); |
1629 | if (ret) | 1629 | if (ret) |
1630 | goto error; | 1630 | goto error; |
1631 | 1631 | ||
1632 | pages = PAGE_ALIGN(cp->r2tq_size) / PAGE_SIZE; | 1632 | pages = CNIC_PAGE_ALIGN(cp->r2tq_size) / CNIC_PAGE_SIZE; |
1633 | ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1); | 1633 | ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1); |
1634 | if (ret) | 1634 | if (ret) |
1635 | goto error; | 1635 | goto error; |
1636 | 1636 | ||
1637 | pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE; | 1637 | pages = CNIC_PAGE_ALIGN(cp->hq_size) / CNIC_PAGE_SIZE; |
1638 | ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1); | 1638 | ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1); |
1639 | if (ret) | 1639 | if (ret) |
1640 | goto error; | 1640 | goto error; |
@@ -1760,7 +1760,7 @@ static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[], | |||
1760 | ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE; | 1760 | ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE; |
1761 | /* TSTORM requires the base address of RQ DB & not PTE */ | 1761 | /* TSTORM requires the base address of RQ DB & not PTE */ |
1762 | ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo = | 1762 | ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo = |
1763 | req2->rq_page_table_addr_lo & PAGE_MASK; | 1763 | req2->rq_page_table_addr_lo & CNIC_PAGE_MASK; |
1764 | ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi = | 1764 | ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi = |
1765 | req2->rq_page_table_addr_hi; | 1765 | req2->rq_page_table_addr_hi; |
1766 | ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id; | 1766 | ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id; |
@@ -1842,7 +1842,7 @@ static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[], | |||
1842 | /* CSTORM and USTORM initialization is different, CSTORM requires | 1842 | /* CSTORM and USTORM initialization is different, CSTORM requires |
1843 | * CQ DB base & not PTE addr */ | 1843 | * CQ DB base & not PTE addr */ |
1844 | ictx->cstorm_st_context.cq_db_base.lo = | 1844 | ictx->cstorm_st_context.cq_db_base.lo = |
1845 | req1->cq_page_table_addr_lo & PAGE_MASK; | 1845 | req1->cq_page_table_addr_lo & CNIC_PAGE_MASK; |
1846 | ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi; | 1846 | ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi; |
1847 | ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id; | 1847 | ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id; |
1848 | ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1; | 1848 | ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1; |
@@ -2911,7 +2911,7 @@ static int cnic_l2_completion(struct cnic_local *cp) | |||
2911 | u16 hw_cons, sw_cons; | 2911 | u16 hw_cons, sw_cons; |
2912 | struct cnic_uio_dev *udev = cp->udev; | 2912 | struct cnic_uio_dev *udev = cp->udev; |
2913 | union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *) | 2913 | union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *) |
2914 | (udev->l2_ring + (2 * BNX2_PAGE_SIZE)); | 2914 | (udev->l2_ring + (2 * CNIC_PAGE_SIZE)); |
2915 | u32 cmd; | 2915 | u32 cmd; |
2916 | int comp = 0; | 2916 | int comp = 0; |
2917 | 2917 | ||
@@ -3244,7 +3244,8 @@ static int cnic_copy_ulp_stats(struct cnic_dev *dev, int ulp_type) | |||
3244 | int rc; | 3244 | int rc; |
3245 | 3245 | ||
3246 | mutex_lock(&cnic_lock); | 3246 | mutex_lock(&cnic_lock); |
3247 | ulp_ops = cnic_ulp_tbl_prot(ulp_type); | 3247 | ulp_ops = rcu_dereference_protected(cp->ulp_ops[ulp_type], |
3248 | lockdep_is_held(&cnic_lock)); | ||
3248 | if (ulp_ops && ulp_ops->cnic_get_stats) | 3249 | if (ulp_ops && ulp_ops->cnic_get_stats) |
3249 | rc = ulp_ops->cnic_get_stats(cp->ulp_handle[ulp_type]); | 3250 | rc = ulp_ops->cnic_get_stats(cp->ulp_handle[ulp_type]); |
3250 | else | 3251 | else |
@@ -4384,7 +4385,7 @@ static int cnic_setup_5709_context(struct cnic_dev *dev, int valid) | |||
4384 | u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk; | 4385 | u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk; |
4385 | u32 val; | 4386 | u32 val; |
4386 | 4387 | ||
4387 | memset(cp->ctx_arr[i].ctx, 0, BNX2_PAGE_SIZE); | 4388 | memset(cp->ctx_arr[i].ctx, 0, CNIC_PAGE_SIZE); |
4388 | 4389 | ||
4389 | CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0, | 4390 | CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0, |
4390 | (cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit); | 4391 | (cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit); |
@@ -4628,7 +4629,7 @@ static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev) | |||
4628 | val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id); | 4629 | val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id); |
4629 | cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val); | 4630 | cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val); |
4630 | 4631 | ||
4631 | rxbd = udev->l2_ring + BNX2_PAGE_SIZE; | 4632 | rxbd = udev->l2_ring + CNIC_PAGE_SIZE; |
4632 | for (i = 0; i < BNX2_MAX_RX_DESC_CNT; i++, rxbd++) { | 4633 | for (i = 0; i < BNX2_MAX_RX_DESC_CNT; i++, rxbd++) { |
4633 | dma_addr_t buf_map; | 4634 | dma_addr_t buf_map; |
4634 | int n = (i % cp->l2_rx_ring_size) + 1; | 4635 | int n = (i % cp->l2_rx_ring_size) + 1; |
@@ -4639,11 +4640,11 @@ static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev) | |||
4639 | rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32; | 4640 | rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32; |
4640 | rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff; | 4641 | rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff; |
4641 | } | 4642 | } |
4642 | val = (u64) (ring_map + BNX2_PAGE_SIZE) >> 32; | 4643 | val = (u64) (ring_map + CNIC_PAGE_SIZE) >> 32; |
4643 | cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val); | 4644 | cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val); |
4644 | rxbd->rx_bd_haddr_hi = val; | 4645 | rxbd->rx_bd_haddr_hi = val; |
4645 | 4646 | ||
4646 | val = (u64) (ring_map + BNX2_PAGE_SIZE) & 0xffffffff; | 4647 | val = (u64) (ring_map + CNIC_PAGE_SIZE) & 0xffffffff; |
4647 | cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val); | 4648 | cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val); |
4648 | rxbd->rx_bd_haddr_lo = val; | 4649 | rxbd->rx_bd_haddr_lo = val; |
4649 | 4650 | ||
@@ -4709,10 +4710,10 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev) | |||
4709 | 4710 | ||
4710 | val = CNIC_RD(dev, BNX2_MQ_CONFIG); | 4711 | val = CNIC_RD(dev, BNX2_MQ_CONFIG); |
4711 | val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE; | 4712 | val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE; |
4712 | if (BNX2_PAGE_BITS > 12) | 4713 | if (CNIC_PAGE_BITS > 12) |
4713 | val |= (12 - 8) << 4; | 4714 | val |= (12 - 8) << 4; |
4714 | else | 4715 | else |
4715 | val |= (BNX2_PAGE_BITS - 8) << 4; | 4716 | val |= (CNIC_PAGE_BITS - 8) << 4; |
4716 | 4717 | ||
4717 | CNIC_WR(dev, BNX2_MQ_CONFIG, val); | 4718 | CNIC_WR(dev, BNX2_MQ_CONFIG, val); |
4718 | 4719 | ||
@@ -4742,13 +4743,13 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev) | |||
4742 | 4743 | ||
4743 | /* Initialize the kernel work queue context. */ | 4744 | /* Initialize the kernel work queue context. */ |
4744 | val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE | | 4745 | val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE | |
4745 | (BNX2_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ; | 4746 | (CNIC_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ; |
4746 | cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val); | 4747 | cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val); |
4747 | 4748 | ||
4748 | val = (BNX2_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16; | 4749 | val = (CNIC_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16; |
4749 | cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val); | 4750 | cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val); |
4750 | 4751 | ||
4751 | val = ((BNX2_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT; | 4752 | val = ((CNIC_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT; |
4752 | cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val); | 4753 | cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val); |
4753 | 4754 | ||
4754 | val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32); | 4755 | val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32); |
@@ -4768,13 +4769,13 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev) | |||
4768 | 4769 | ||
4769 | /* Initialize the kernel complete queue context. */ | 4770 | /* Initialize the kernel complete queue context. */ |
4770 | val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE | | 4771 | val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE | |
4771 | (BNX2_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ; | 4772 | (CNIC_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ; |
4772 | cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val); | 4773 | cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val); |
4773 | 4774 | ||
4774 | val = (BNX2_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16; | 4775 | val = (CNIC_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16; |
4775 | cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val); | 4776 | cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val); |
4776 | 4777 | ||
4777 | val = ((BNX2_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT; | 4778 | val = ((CNIC_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT; |
4778 | cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val); | 4779 | cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val); |
4779 | 4780 | ||
4780 | val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32); | 4781 | val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32); |
@@ -4918,7 +4919,7 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev, | |||
4918 | u32 cli = cp->ethdev->iscsi_l2_client_id; | 4919 | u32 cli = cp->ethdev->iscsi_l2_client_id; |
4919 | u32 val; | 4920 | u32 val; |
4920 | 4921 | ||
4921 | memset(txbd, 0, BNX2_PAGE_SIZE); | 4922 | memset(txbd, 0, CNIC_PAGE_SIZE); |
4922 | 4923 | ||
4923 | buf_map = udev->l2_buf_map; | 4924 | buf_map = udev->l2_buf_map; |
4924 | for (i = 0; i < BNX2_MAX_TX_DESC_CNT; i += 3, txbd += 3) { | 4925 | for (i = 0; i < BNX2_MAX_TX_DESC_CNT; i += 3, txbd += 3) { |
@@ -4978,9 +4979,9 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev, | |||
4978 | struct bnx2x *bp = netdev_priv(dev->netdev); | 4979 | struct bnx2x *bp = netdev_priv(dev->netdev); |
4979 | struct cnic_uio_dev *udev = cp->udev; | 4980 | struct cnic_uio_dev *udev = cp->udev; |
4980 | struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring + | 4981 | struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring + |
4981 | BNX2_PAGE_SIZE); | 4982 | CNIC_PAGE_SIZE); |
4982 | struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *) | 4983 | struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *) |
4983 | (udev->l2_ring + (2 * BNX2_PAGE_SIZE)); | 4984 | (udev->l2_ring + (2 * CNIC_PAGE_SIZE)); |
4984 | struct host_sp_status_block *sb = cp->bnx2x_def_status_blk; | 4985 | struct host_sp_status_block *sb = cp->bnx2x_def_status_blk; |
4985 | int i; | 4986 | int i; |
4986 | u32 cli = cp->ethdev->iscsi_l2_client_id; | 4987 | u32 cli = cp->ethdev->iscsi_l2_client_id; |
@@ -5004,20 +5005,20 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev, | |||
5004 | rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff); | 5005 | rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff); |
5005 | } | 5006 | } |
5006 | 5007 | ||
5007 | val = (u64) (ring_map + BNX2_PAGE_SIZE) >> 32; | 5008 | val = (u64) (ring_map + CNIC_PAGE_SIZE) >> 32; |
5008 | rxbd->addr_hi = cpu_to_le32(val); | 5009 | rxbd->addr_hi = cpu_to_le32(val); |
5009 | data->rx.bd_page_base.hi = cpu_to_le32(val); | 5010 | data->rx.bd_page_base.hi = cpu_to_le32(val); |
5010 | 5011 | ||
5011 | val = (u64) (ring_map + BNX2_PAGE_SIZE) & 0xffffffff; | 5012 | val = (u64) (ring_map + CNIC_PAGE_SIZE) & 0xffffffff; |
5012 | rxbd->addr_lo = cpu_to_le32(val); | 5013 | rxbd->addr_lo = cpu_to_le32(val); |
5013 | data->rx.bd_page_base.lo = cpu_to_le32(val); | 5014 | data->rx.bd_page_base.lo = cpu_to_le32(val); |
5014 | 5015 | ||
5015 | rxcqe += BNX2X_MAX_RCQ_DESC_CNT; | 5016 | rxcqe += BNX2X_MAX_RCQ_DESC_CNT; |
5016 | val = (u64) (ring_map + (2 * BNX2_PAGE_SIZE)) >> 32; | 5017 | val = (u64) (ring_map + (2 * CNIC_PAGE_SIZE)) >> 32; |
5017 | rxcqe->addr_hi = cpu_to_le32(val); | 5018 | rxcqe->addr_hi = cpu_to_le32(val); |
5018 | data->rx.cqe_page_base.hi = cpu_to_le32(val); | 5019 | data->rx.cqe_page_base.hi = cpu_to_le32(val); |
5019 | 5020 | ||
5020 | val = (u64) (ring_map + (2 * BNX2_PAGE_SIZE)) & 0xffffffff; | 5021 | val = (u64) (ring_map + (2 * CNIC_PAGE_SIZE)) & 0xffffffff; |
5021 | rxcqe->addr_lo = cpu_to_le32(val); | 5022 | rxcqe->addr_lo = cpu_to_le32(val); |
5022 | data->rx.cqe_page_base.lo = cpu_to_le32(val); | 5023 | data->rx.cqe_page_base.lo = cpu_to_le32(val); |
5023 | 5024 | ||
@@ -5265,8 +5266,8 @@ static void cnic_shutdown_rings(struct cnic_dev *dev) | |||
5265 | msleep(10); | 5266 | msleep(10); |
5266 | } | 5267 | } |
5267 | clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags); | 5268 | clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags); |
5268 | rx_ring = udev->l2_ring + BNX2_PAGE_SIZE; | 5269 | rx_ring = udev->l2_ring + CNIC_PAGE_SIZE; |
5269 | memset(rx_ring, 0, BNX2_PAGE_SIZE); | 5270 | memset(rx_ring, 0, CNIC_PAGE_SIZE); |
5270 | } | 5271 | } |
5271 | 5272 | ||
5272 | static int cnic_register_netdev(struct cnic_dev *dev) | 5273 | static int cnic_register_netdev(struct cnic_dev *dev) |
diff --git a/drivers/net/ethernet/broadcom/cnic.h b/drivers/net/ethernet/broadcom/cnic.h index 0d6b13f854d9..d535ae4228b4 100644 --- a/drivers/net/ethernet/broadcom/cnic.h +++ b/drivers/net/ethernet/broadcom/cnic.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* cnic.h: Broadcom CNIC core network driver. | 1 | /* cnic.h: Broadcom CNIC core network driver. |
2 | * | 2 | * |
3 | * Copyright (c) 2006-2013 Broadcom Corporation | 3 | * Copyright (c) 2006-2014 Broadcom Corporation |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
6 | * it under the terms of the GNU General Public License as published by | 6 | * it under the terms of the GNU General Public License as published by |
diff --git a/drivers/net/ethernet/broadcom/cnic_defs.h b/drivers/net/ethernet/broadcom/cnic_defs.h index 95a8e4b11c9f..dcbca6997e8f 100644 --- a/drivers/net/ethernet/broadcom/cnic_defs.h +++ b/drivers/net/ethernet/broadcom/cnic_defs.h | |||
@@ -1,7 +1,7 @@ | |||
1 | 1 | ||
2 | /* cnic.c: Broadcom CNIC core network driver. | 2 | /* cnic.c: Broadcom CNIC core network driver. |
3 | * | 3 | * |
4 | * Copyright (c) 2006-2013 Broadcom Corporation | 4 | * Copyright (c) 2006-2014 Broadcom Corporation |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License as published by | 7 | * it under the terms of the GNU General Public License as published by |
diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h index 8cf6b1926069..5f4d5573a73d 100644 --- a/drivers/net/ethernet/broadcom/cnic_if.h +++ b/drivers/net/ethernet/broadcom/cnic_if.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* cnic_if.h: Broadcom CNIC core network driver. | 1 | /* cnic_if.h: Broadcom CNIC core network driver. |
2 | * | 2 | * |
3 | * Copyright (c) 2006-2013 Broadcom Corporation | 3 | * Copyright (c) 2006-2014 Broadcom Corporation |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
6 | * it under the terms of the GNU General Public License as published by | 6 | * it under the terms of the GNU General Public License as published by |
@@ -14,8 +14,8 @@ | |||
14 | 14 | ||
15 | #include "bnx2x/bnx2x_mfw_req.h" | 15 | #include "bnx2x/bnx2x_mfw_req.h" |
16 | 16 | ||
17 | #define CNIC_MODULE_VERSION "2.5.19" | 17 | #define CNIC_MODULE_VERSION "2.5.20" |
18 | #define CNIC_MODULE_RELDATE "December 19, 2013" | 18 | #define CNIC_MODULE_RELDATE "March 14, 2014" |
19 | 19 | ||
20 | #define CNIC_ULP_RDMA 0 | 20 | #define CNIC_ULP_RDMA 0 |
21 | #define CNIC_ULP_ISCSI 1 | 21 | #define CNIC_ULP_ISCSI 1 |
@@ -24,6 +24,16 @@ | |||
24 | #define MAX_CNIC_ULP_TYPE_EXT 3 | 24 | #define MAX_CNIC_ULP_TYPE_EXT 3 |
25 | #define MAX_CNIC_ULP_TYPE 4 | 25 | #define MAX_CNIC_ULP_TYPE 4 |
26 | 26 | ||
27 | /* Use CPU native page size up to 16K for cnic ring sizes. */ | ||
28 | #if (PAGE_SHIFT > 14) | ||
29 | #define CNIC_PAGE_BITS 14 | ||
30 | #else | ||
31 | #define CNIC_PAGE_BITS PAGE_SHIFT | ||
32 | #endif | ||
33 | #define CNIC_PAGE_SIZE (1 << (CNIC_PAGE_BITS)) | ||
34 | #define CNIC_PAGE_ALIGN(addr) ALIGN(addr, CNIC_PAGE_SIZE) | ||
35 | #define CNIC_PAGE_MASK (~((CNIC_PAGE_SIZE) - 1)) | ||
36 | |||
27 | struct kwqe { | 37 | struct kwqe { |
28 | u32 kwqe_op_flag; | 38 | u32 kwqe_op_flag; |
29 | 39 | ||
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 3b6d0ba86c71..70a225c8df5c 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c | |||
@@ -17649,8 +17649,6 @@ static int tg3_init_one(struct pci_dev *pdev, | |||
17649 | 17649 | ||
17650 | tg3_init_bufmgr_config(tp); | 17650 | tg3_init_bufmgr_config(tp); |
17651 | 17651 | ||
17652 | features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; | ||
17653 | |||
17654 | /* 5700 B0 chips do not support checksumming correctly due | 17652 | /* 5700 B0 chips do not support checksumming correctly due |
17655 | * to hardware bugs. | 17653 | * to hardware bugs. |
17656 | */ | 17654 | */ |
@@ -17682,7 +17680,8 @@ static int tg3_init_one(struct pci_dev *pdev, | |||
17682 | features |= NETIF_F_TSO_ECN; | 17680 | features |= NETIF_F_TSO_ECN; |
17683 | } | 17681 | } |
17684 | 17682 | ||
17685 | dev->features |= features; | 17683 | dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX | |
17684 | NETIF_F_HW_VLAN_CTAG_RX; | ||
17686 | dev->vlan_features |= features; | 17685 | dev->vlan_features |= features; |
17687 | 17686 | ||
17688 | /* | 17687 | /* |
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index f418f4f20f94..8d76fca7fde7 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/interrupt.h> | 22 | #include <linux/interrupt.h> |
23 | #include <net/ip.h> | 23 | #include <net/ip.h> |
24 | #include <net/ipv6.h> | 24 | #include <net/ipv6.h> |
25 | #include <linux/io.h> | ||
25 | #include <linux/of.h> | 26 | #include <linux/of.h> |
26 | #include <linux/of_irq.h> | 27 | #include <linux/of_irq.h> |
27 | #include <linux/of_mdio.h> | 28 | #include <linux/of_mdio.h> |
@@ -88,8 +89,9 @@ | |||
88 | #define MVNETA_TX_IN_PRGRS BIT(1) | 89 | #define MVNETA_TX_IN_PRGRS BIT(1) |
89 | #define MVNETA_TX_FIFO_EMPTY BIT(8) | 90 | #define MVNETA_TX_FIFO_EMPTY BIT(8) |
90 | #define MVNETA_RX_MIN_FRAME_SIZE 0x247c | 91 | #define MVNETA_RX_MIN_FRAME_SIZE 0x247c |
91 | #define MVNETA_SGMII_SERDES_CFG 0x24A0 | 92 | #define MVNETA_SERDES_CFG 0x24A0 |
92 | #define MVNETA_SGMII_SERDES_PROTO 0x0cc7 | 93 | #define MVNETA_SGMII_SERDES_PROTO 0x0cc7 |
94 | #define MVNETA_RGMII_SERDES_PROTO 0x0667 | ||
93 | #define MVNETA_TYPE_PRIO 0x24bc | 95 | #define MVNETA_TYPE_PRIO 0x24bc |
94 | #define MVNETA_FORCE_UNI BIT(21) | 96 | #define MVNETA_FORCE_UNI BIT(21) |
95 | #define MVNETA_TXQ_CMD_1 0x24e4 | 97 | #define MVNETA_TXQ_CMD_1 0x24e4 |
@@ -161,7 +163,7 @@ | |||
161 | #define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc | 163 | #define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc |
162 | #define MVNETA_GMAC0_PORT_ENABLE BIT(0) | 164 | #define MVNETA_GMAC0_PORT_ENABLE BIT(0) |
163 | #define MVNETA_GMAC_CTRL_2 0x2c08 | 165 | #define MVNETA_GMAC_CTRL_2 0x2c08 |
164 | #define MVNETA_GMAC2_PSC_ENABLE BIT(3) | 166 | #define MVNETA_GMAC2_PCS_ENABLE BIT(3) |
165 | #define MVNETA_GMAC2_PORT_RGMII BIT(4) | 167 | #define MVNETA_GMAC2_PORT_RGMII BIT(4) |
166 | #define MVNETA_GMAC2_PORT_RESET BIT(6) | 168 | #define MVNETA_GMAC2_PORT_RESET BIT(6) |
167 | #define MVNETA_GMAC_STATUS 0x2c10 | 169 | #define MVNETA_GMAC_STATUS 0x2c10 |
@@ -710,35 +712,6 @@ static void mvneta_rxq_bm_disable(struct mvneta_port *pp, | |||
710 | mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); | 712 | mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); |
711 | } | 713 | } |
712 | 714 | ||
713 | |||
714 | |||
715 | /* Sets the RGMII Enable bit (RGMIIEn) in port MAC control register */ | ||
716 | static void mvneta_gmac_rgmii_set(struct mvneta_port *pp, int enable) | ||
717 | { | ||
718 | u32 val; | ||
719 | |||
720 | val = mvreg_read(pp, MVNETA_GMAC_CTRL_2); | ||
721 | |||
722 | if (enable) | ||
723 | val |= MVNETA_GMAC2_PORT_RGMII; | ||
724 | else | ||
725 | val &= ~MVNETA_GMAC2_PORT_RGMII; | ||
726 | |||
727 | mvreg_write(pp, MVNETA_GMAC_CTRL_2, val); | ||
728 | } | ||
729 | |||
730 | /* Config SGMII port */ | ||
731 | static void mvneta_port_sgmii_config(struct mvneta_port *pp) | ||
732 | { | ||
733 | u32 val; | ||
734 | |||
735 | val = mvreg_read(pp, MVNETA_GMAC_CTRL_2); | ||
736 | val |= MVNETA_GMAC2_PSC_ENABLE; | ||
737 | mvreg_write(pp, MVNETA_GMAC_CTRL_2, val); | ||
738 | |||
739 | mvreg_write(pp, MVNETA_SGMII_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO); | ||
740 | } | ||
741 | |||
742 | /* Start the Ethernet port RX and TX activity */ | 715 | /* Start the Ethernet port RX and TX activity */ |
743 | static void mvneta_port_up(struct mvneta_port *pp) | 716 | static void mvneta_port_up(struct mvneta_port *pp) |
744 | { | 717 | { |
@@ -2756,12 +2729,15 @@ static void mvneta_port_power_up(struct mvneta_port *pp, int phy_mode) | |||
2756 | mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0); | 2729 | mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0); |
2757 | 2730 | ||
2758 | if (phy_mode == PHY_INTERFACE_MODE_SGMII) | 2731 | if (phy_mode == PHY_INTERFACE_MODE_SGMII) |
2759 | mvneta_port_sgmii_config(pp); | 2732 | mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO); |
2733 | else | ||
2734 | mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_RGMII_SERDES_PROTO); | ||
2735 | |||
2736 | val = mvreg_read(pp, MVNETA_GMAC_CTRL_2); | ||
2760 | 2737 | ||
2761 | mvneta_gmac_rgmii_set(pp, 1); | 2738 | val |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII; |
2762 | 2739 | ||
2763 | /* Cancel Port Reset */ | 2740 | /* Cancel Port Reset */ |
2764 | val = mvreg_read(pp, MVNETA_GMAC_CTRL_2); | ||
2765 | val &= ~MVNETA_GMAC2_PORT_RESET; | 2741 | val &= ~MVNETA_GMAC2_PORT_RESET; |
2766 | mvreg_write(pp, MVNETA_GMAC_CTRL_2, val); | 2742 | mvreg_write(pp, MVNETA_GMAC_CTRL_2, val); |
2767 | 2743 | ||
@@ -2774,6 +2750,7 @@ static void mvneta_port_power_up(struct mvneta_port *pp, int phy_mode) | |||
2774 | static int mvneta_probe(struct platform_device *pdev) | 2750 | static int mvneta_probe(struct platform_device *pdev) |
2775 | { | 2751 | { |
2776 | const struct mbus_dram_target_info *dram_target_info; | 2752 | const struct mbus_dram_target_info *dram_target_info; |
2753 | struct resource *res; | ||
2777 | struct device_node *dn = pdev->dev.of_node; | 2754 | struct device_node *dn = pdev->dev.of_node; |
2778 | struct device_node *phy_node; | 2755 | struct device_node *phy_node; |
2779 | u32 phy_addr; | 2756 | u32 phy_addr; |
@@ -2838,9 +2815,15 @@ static int mvneta_probe(struct platform_device *pdev) | |||
2838 | 2815 | ||
2839 | clk_prepare_enable(pp->clk); | 2816 | clk_prepare_enable(pp->clk); |
2840 | 2817 | ||
2841 | pp->base = of_iomap(dn, 0); | 2818 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
2819 | if (!res) { | ||
2820 | err = -ENODEV; | ||
2821 | goto err_clk; | ||
2822 | } | ||
2823 | |||
2824 | pp->base = devm_ioremap_resource(&pdev->dev, res); | ||
2842 | if (pp->base == NULL) { | 2825 | if (pp->base == NULL) { |
2843 | err = -ENOMEM; | 2826 | err = PTR_ERR(pp->base); |
2844 | goto err_clk; | 2827 | goto err_clk; |
2845 | } | 2828 | } |
2846 | 2829 | ||
@@ -2848,7 +2831,7 @@ static int mvneta_probe(struct platform_device *pdev) | |||
2848 | pp->stats = alloc_percpu(struct mvneta_pcpu_stats); | 2831 | pp->stats = alloc_percpu(struct mvneta_pcpu_stats); |
2849 | if (!pp->stats) { | 2832 | if (!pp->stats) { |
2850 | err = -ENOMEM; | 2833 | err = -ENOMEM; |
2851 | goto err_unmap; | 2834 | goto err_clk; |
2852 | } | 2835 | } |
2853 | 2836 | ||
2854 | for_each_possible_cpu(cpu) { | 2837 | for_each_possible_cpu(cpu) { |
@@ -2913,8 +2896,6 @@ err_deinit: | |||
2913 | mvneta_deinit(pp); | 2896 | mvneta_deinit(pp); |
2914 | err_free_stats: | 2897 | err_free_stats: |
2915 | free_percpu(pp->stats); | 2898 | free_percpu(pp->stats); |
2916 | err_unmap: | ||
2917 | iounmap(pp->base); | ||
2918 | err_clk: | 2899 | err_clk: |
2919 | clk_disable_unprepare(pp->clk); | 2900 | clk_disable_unprepare(pp->clk); |
2920 | err_free_irq: | 2901 | err_free_irq: |
@@ -2934,7 +2915,6 @@ static int mvneta_remove(struct platform_device *pdev) | |||
2934 | mvneta_deinit(pp); | 2915 | mvneta_deinit(pp); |
2935 | clk_disable_unprepare(pp->clk); | 2916 | clk_disable_unprepare(pp->clk); |
2936 | free_percpu(pp->stats); | 2917 | free_percpu(pp->stats); |
2937 | iounmap(pp->base); | ||
2938 | irq_dispose_mapping(dev->irq); | 2918 | irq_dispose_mapping(dev->irq); |
2939 | free_netdev(dev); | 2919 | free_netdev(dev); |
2940 | 2920 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 936c15364739..d413e60071d4 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c | |||
@@ -2681,7 +2681,11 @@ static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev, | |||
2681 | 2681 | ||
2682 | static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev) | 2682 | static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev) |
2683 | { | 2683 | { |
2684 | int ret = __mlx4_init_one(pdev, 0); | 2684 | const struct pci_device_id *id; |
2685 | int ret; | ||
2686 | |||
2687 | id = pci_match_id(mlx4_pci_table, pdev); | ||
2688 | ret = __mlx4_init_one(pdev, id->driver_data); | ||
2685 | 2689 | ||
2686 | return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; | 2690 | return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; |
2687 | } | 2691 | } |
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c index 727b546a9eb8..e0c92e0e5e1d 100644 --- a/drivers/net/ethernet/micrel/ks8851.c +++ b/drivers/net/ethernet/micrel/ks8851.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/crc32.h> | 23 | #include <linux/crc32.h> |
24 | #include <linux/mii.h> | 24 | #include <linux/mii.h> |
25 | #include <linux/eeprom_93cx6.h> | 25 | #include <linux/eeprom_93cx6.h> |
26 | #include <linux/regulator/consumer.h> | ||
26 | 27 | ||
27 | #include <linux/spi/spi.h> | 28 | #include <linux/spi/spi.h> |
28 | 29 | ||
@@ -83,6 +84,7 @@ union ks8851_tx_hdr { | |||
83 | * @rc_rxqcr: Cached copy of KS_RXQCR. | 84 | * @rc_rxqcr: Cached copy of KS_RXQCR. |
84 | * @eeprom_size: Companion eeprom size in Bytes, 0 if no eeprom | 85 | * @eeprom_size: Companion eeprom size in Bytes, 0 if no eeprom |
85 | * @eeprom: 93CX6 EEPROM state for accessing on-board EEPROM. | 86 | * @eeprom: 93CX6 EEPROM state for accessing on-board EEPROM. |
87 | * @vdd_reg: Optional regulator supplying the chip | ||
86 | * | 88 | * |
87 | * The @lock ensures that the chip is protected when certain operations are | 89 | * The @lock ensures that the chip is protected when certain operations are |
88 | * in progress. When the read or write packet transfer is in progress, most | 90 | * in progress. When the read or write packet transfer is in progress, most |
@@ -130,6 +132,7 @@ struct ks8851_net { | |||
130 | struct spi_transfer spi_xfer2[2]; | 132 | struct spi_transfer spi_xfer2[2]; |
131 | 133 | ||
132 | struct eeprom_93cx6 eeprom; | 134 | struct eeprom_93cx6 eeprom; |
135 | struct regulator *vdd_reg; | ||
133 | }; | 136 | }; |
134 | 137 | ||
135 | static int msg_enable; | 138 | static int msg_enable; |
@@ -1414,6 +1417,21 @@ static int ks8851_probe(struct spi_device *spi) | |||
1414 | ks->spidev = spi; | 1417 | ks->spidev = spi; |
1415 | ks->tx_space = 6144; | 1418 | ks->tx_space = 6144; |
1416 | 1419 | ||
1420 | ks->vdd_reg = regulator_get_optional(&spi->dev, "vdd"); | ||
1421 | if (IS_ERR(ks->vdd_reg)) { | ||
1422 | ret = PTR_ERR(ks->vdd_reg); | ||
1423 | if (ret == -EPROBE_DEFER) | ||
1424 | goto err_reg; | ||
1425 | } else { | ||
1426 | ret = regulator_enable(ks->vdd_reg); | ||
1427 | if (ret) { | ||
1428 | dev_err(&spi->dev, "regulator enable fail: %d\n", | ||
1429 | ret); | ||
1430 | goto err_reg_en; | ||
1431 | } | ||
1432 | } | ||
1433 | |||
1434 | |||
1417 | mutex_init(&ks->lock); | 1435 | mutex_init(&ks->lock); |
1418 | spin_lock_init(&ks->statelock); | 1436 | spin_lock_init(&ks->statelock); |
1419 | 1437 | ||
@@ -1508,8 +1526,14 @@ static int ks8851_probe(struct spi_device *spi) | |||
1508 | err_netdev: | 1526 | err_netdev: |
1509 | free_irq(ndev->irq, ks); | 1527 | free_irq(ndev->irq, ks); |
1510 | 1528 | ||
1511 | err_id: | ||
1512 | err_irq: | 1529 | err_irq: |
1530 | err_id: | ||
1531 | if (!IS_ERR(ks->vdd_reg)) | ||
1532 | regulator_disable(ks->vdd_reg); | ||
1533 | err_reg_en: | ||
1534 | if (!IS_ERR(ks->vdd_reg)) | ||
1535 | regulator_put(ks->vdd_reg); | ||
1536 | err_reg: | ||
1513 | free_netdev(ndev); | 1537 | free_netdev(ndev); |
1514 | return ret; | 1538 | return ret; |
1515 | } | 1539 | } |
@@ -1523,6 +1547,10 @@ static int ks8851_remove(struct spi_device *spi) | |||
1523 | 1547 | ||
1524 | unregister_netdev(priv->netdev); | 1548 | unregister_netdev(priv->netdev); |
1525 | free_irq(spi->irq, priv); | 1549 | free_irq(spi->irq, priv); |
1550 | if (!IS_ERR(priv->vdd_reg)) { | ||
1551 | regulator_disable(priv->vdd_reg); | ||
1552 | regulator_put(priv->vdd_reg); | ||
1553 | } | ||
1526 | free_netdev(priv->netdev); | 1554 | free_netdev(priv->netdev); |
1527 | 1555 | ||
1528 | return 0; | 1556 | return 0; |
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index ce2cfddbed50..656c65ddadb4 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c | |||
@@ -4765,7 +4765,9 @@ static int qlge_probe(struct pci_dev *pdev, | |||
4765 | ndev->features = ndev->hw_features; | 4765 | ndev->features = ndev->hw_features; |
4766 | ndev->vlan_features = ndev->hw_features; | 4766 | ndev->vlan_features = ndev->hw_features; |
4767 | /* vlan gets same features (except vlan filter) */ | 4767 | /* vlan gets same features (except vlan filter) */ |
4768 | ndev->vlan_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; | 4768 | ndev->vlan_features &= ~(NETIF_F_HW_VLAN_CTAG_FILTER | |
4769 | NETIF_F_HW_VLAN_CTAG_TX | | ||
4770 | NETIF_F_HW_VLAN_CTAG_RX); | ||
4769 | 4771 | ||
4770 | if (test_bit(QL_DMA64, &qdev->flags)) | 4772 | if (test_bit(QL_DMA64, &qdev->flags)) |
4771 | ndev->features |= NETIF_F_HIGHDMA; | 4773 | ndev->features |= NETIF_F_HIGHDMA; |
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index ffd4d12acf6d..7d6d8ec676c8 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c | |||
@@ -2229,10 +2229,6 @@ static int cpsw_probe(struct platform_device *pdev) | |||
2229 | goto clean_ale_ret; | 2229 | goto clean_ale_ret; |
2230 | } | 2230 | } |
2231 | 2231 | ||
2232 | if (cpts_register(&pdev->dev, priv->cpts, | ||
2233 | data->cpts_clock_mult, data->cpts_clock_shift)) | ||
2234 | dev_err(priv->dev, "error registering cpts device\n"); | ||
2235 | |||
2236 | cpsw_notice(priv, probe, "initialized device (regs %pa, irq %d)\n", | 2232 | cpsw_notice(priv, probe, "initialized device (regs %pa, irq %d)\n", |
2237 | &ss_res->start, ndev->irq); | 2233 | &ss_res->start, ndev->irq); |
2238 | 2234 | ||
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c index 364d0c7952c0..88ef27067bf2 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.c +++ b/drivers/net/ethernet/ti/davinci_cpdma.c | |||
@@ -355,7 +355,7 @@ int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr) | |||
355 | int i; | 355 | int i; |
356 | 356 | ||
357 | spin_lock_irqsave(&ctlr->lock, flags); | 357 | spin_lock_irqsave(&ctlr->lock, flags); |
358 | if (ctlr->state != CPDMA_STATE_ACTIVE) { | 358 | if (ctlr->state == CPDMA_STATE_TEARDOWN) { |
359 | spin_unlock_irqrestore(&ctlr->lock, flags); | 359 | spin_unlock_irqrestore(&ctlr->lock, flags); |
360 | return -EINVAL; | 360 | return -EINVAL; |
361 | } | 361 | } |
@@ -891,7 +891,7 @@ int cpdma_chan_stop(struct cpdma_chan *chan) | |||
891 | unsigned timeout; | 891 | unsigned timeout; |
892 | 892 | ||
893 | spin_lock_irqsave(&chan->lock, flags); | 893 | spin_lock_irqsave(&chan->lock, flags); |
894 | if (chan->state != CPDMA_STATE_ACTIVE) { | 894 | if (chan->state == CPDMA_STATE_TEARDOWN) { |
895 | spin_unlock_irqrestore(&chan->lock, flags); | 895 | spin_unlock_irqrestore(&chan->lock, flags); |
896 | return -EINVAL; | 896 | return -EINVAL; |
897 | } | 897 | } |
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index cd9b164a0434..8f0e69ce07ca 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c | |||
@@ -1532,9 +1532,9 @@ static int emac_dev_open(struct net_device *ndev) | |||
1532 | struct device *emac_dev = &ndev->dev; | 1532 | struct device *emac_dev = &ndev->dev; |
1533 | u32 cnt; | 1533 | u32 cnt; |
1534 | struct resource *res; | 1534 | struct resource *res; |
1535 | int ret; | 1535 | int q, m, ret; |
1536 | int res_num = 0, irq_num = 0; | ||
1536 | int i = 0; | 1537 | int i = 0; |
1537 | int k = 0; | ||
1538 | struct emac_priv *priv = netdev_priv(ndev); | 1538 | struct emac_priv *priv = netdev_priv(ndev); |
1539 | 1539 | ||
1540 | pm_runtime_get(&priv->pdev->dev); | 1540 | pm_runtime_get(&priv->pdev->dev); |
@@ -1564,15 +1564,24 @@ static int emac_dev_open(struct net_device *ndev) | |||
1564 | } | 1564 | } |
1565 | 1565 | ||
1566 | /* Request IRQ */ | 1566 | /* Request IRQ */ |
1567 | while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, | ||
1568 | res_num))) { | ||
1569 | for (irq_num = res->start; irq_num <= res->end; irq_num++) { | ||
1570 | dev_err(emac_dev, "Request IRQ %d\n", irq_num); | ||
1571 | if (request_irq(irq_num, emac_irq, 0, ndev->name, | ||
1572 | ndev)) { | ||
1573 | dev_err(emac_dev, | ||
1574 | "DaVinci EMAC: request_irq() failed\n"); | ||
1575 | ret = -EBUSY; | ||
1567 | 1576 | ||
1568 | while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) { | ||
1569 | for (i = res->start; i <= res->end; i++) { | ||
1570 | if (devm_request_irq(&priv->pdev->dev, i, emac_irq, | ||
1571 | 0, ndev->name, ndev)) | ||
1572 | goto rollback; | 1577 | goto rollback; |
1578 | } | ||
1573 | } | 1579 | } |
1574 | k++; | 1580 | res_num++; |
1575 | } | 1581 | } |
1582 | /* prepare counters for rollback in case of an error */ | ||
1583 | res_num--; | ||
1584 | irq_num--; | ||
1576 | 1585 | ||
1577 | /* Start/Enable EMAC hardware */ | 1586 | /* Start/Enable EMAC hardware */ |
1578 | emac_hw_enable(priv); | 1587 | emac_hw_enable(priv); |
@@ -1639,11 +1648,23 @@ static int emac_dev_open(struct net_device *ndev) | |||
1639 | 1648 | ||
1640 | return 0; | 1649 | return 0; |
1641 | 1650 | ||
1642 | rollback: | ||
1643 | |||
1644 | dev_err(emac_dev, "DaVinci EMAC: devm_request_irq() failed"); | ||
1645 | ret = -EBUSY; | ||
1646 | err: | 1651 | err: |
1652 | emac_int_disable(priv); | ||
1653 | napi_disable(&priv->napi); | ||
1654 | |||
1655 | rollback: | ||
1656 | for (q = res_num; q >= 0; q--) { | ||
1657 | res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, q); | ||
1658 | /* at the first iteration, irq_num is already set to the | ||
1659 | * right value | ||
1660 | */ | ||
1661 | if (q != res_num) | ||
1662 | irq_num = res->end; | ||
1663 | |||
1664 | for (m = irq_num; m >= res->start; m--) | ||
1665 | free_irq(m, ndev); | ||
1666 | } | ||
1667 | cpdma_ctlr_stop(priv->dma); | ||
1647 | pm_runtime_put(&priv->pdev->dev); | 1668 | pm_runtime_put(&priv->pdev->dev); |
1648 | return ret; | 1669 | return ret; |
1649 | } | 1670 | } |
@@ -1659,6 +1680,9 @@ err: | |||
1659 | */ | 1680 | */ |
1660 | static int emac_dev_stop(struct net_device *ndev) | 1681 | static int emac_dev_stop(struct net_device *ndev) |
1661 | { | 1682 | { |
1683 | struct resource *res; | ||
1684 | int i = 0; | ||
1685 | int irq_num; | ||
1662 | struct emac_priv *priv = netdev_priv(ndev); | 1686 | struct emac_priv *priv = netdev_priv(ndev); |
1663 | struct device *emac_dev = &ndev->dev; | 1687 | struct device *emac_dev = &ndev->dev; |
1664 | 1688 | ||
@@ -1674,6 +1698,13 @@ static int emac_dev_stop(struct net_device *ndev) | |||
1674 | if (priv->phydev) | 1698 | if (priv->phydev) |
1675 | phy_disconnect(priv->phydev); | 1699 | phy_disconnect(priv->phydev); |
1676 | 1700 | ||
1701 | /* Free IRQ */ | ||
1702 | while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, i))) { | ||
1703 | for (irq_num = res->start; irq_num <= res->end; irq_num++) | ||
1704 | free_irq(irq_num, priv->ndev); | ||
1705 | i++; | ||
1706 | } | ||
1707 | |||
1677 | if (netif_msg_drv(priv)) | 1708 | if (netif_msg_drv(priv)) |
1678 | dev_notice(emac_dev, "DaVinci EMAC: %s stopped\n", ndev->name); | 1709 | dev_notice(emac_dev, "DaVinci EMAC: %s stopped\n", ndev->name); |
1679 | 1710 | ||
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c index ef312bc6b865..6ac20a6738f4 100644 --- a/drivers/net/ethernet/via/via-rhine.c +++ b/drivers/net/ethernet/via/via-rhine.c | |||
@@ -923,7 +923,7 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
923 | if (rc) { | 923 | if (rc) { |
924 | dev_err(&pdev->dev, | 924 | dev_err(&pdev->dev, |
925 | "32-bit PCI DMA addresses not supported by the card!?\n"); | 925 | "32-bit PCI DMA addresses not supported by the card!?\n"); |
926 | goto err_out; | 926 | goto err_out_pci_disable; |
927 | } | 927 | } |
928 | 928 | ||
929 | /* sanity check */ | 929 | /* sanity check */ |
@@ -931,7 +931,7 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
931 | (pci_resource_len(pdev, 1) < io_size)) { | 931 | (pci_resource_len(pdev, 1) < io_size)) { |
932 | rc = -EIO; | 932 | rc = -EIO; |
933 | dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n"); | 933 | dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n"); |
934 | goto err_out; | 934 | goto err_out_pci_disable; |
935 | } | 935 | } |
936 | 936 | ||
937 | pioaddr = pci_resource_start(pdev, 0); | 937 | pioaddr = pci_resource_start(pdev, 0); |
@@ -942,7 +942,7 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
942 | dev = alloc_etherdev(sizeof(struct rhine_private)); | 942 | dev = alloc_etherdev(sizeof(struct rhine_private)); |
943 | if (!dev) { | 943 | if (!dev) { |
944 | rc = -ENOMEM; | 944 | rc = -ENOMEM; |
945 | goto err_out; | 945 | goto err_out_pci_disable; |
946 | } | 946 | } |
947 | SET_NETDEV_DEV(dev, &pdev->dev); | 947 | SET_NETDEV_DEV(dev, &pdev->dev); |
948 | 948 | ||
@@ -1084,6 +1084,8 @@ err_out_free_res: | |||
1084 | pci_release_regions(pdev); | 1084 | pci_release_regions(pdev); |
1085 | err_out_free_netdev: | 1085 | err_out_free_netdev: |
1086 | free_netdev(dev); | 1086 | free_netdev(dev); |
1087 | err_out_pci_disable: | ||
1088 | pci_disable_device(pdev); | ||
1087 | err_out: | 1089 | err_out: |
1088 | return rc; | 1090 | return rc; |
1089 | } | 1091 | } |
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c index c14d39bf32d0..d7b2e947184b 100644 --- a/drivers/net/ifb.c +++ b/drivers/net/ifb.c | |||
@@ -180,7 +180,8 @@ static void ifb_setup(struct net_device *dev) | |||
180 | dev->tx_queue_len = TX_Q_LIMIT; | 180 | dev->tx_queue_len = TX_Q_LIMIT; |
181 | 181 | ||
182 | dev->features |= IFB_FEATURES; | 182 | dev->features |= IFB_FEATURES; |
183 | dev->vlan_features |= IFB_FEATURES; | 183 | dev->vlan_features |= IFB_FEATURES & ~(NETIF_F_HW_VLAN_CTAG_TX | |
184 | NETIF_F_HW_VLAN_STAG_TX); | ||
184 | 185 | ||
185 | dev->flags |= IFF_NOARP; | 186 | dev->flags |= IFF_NOARP; |
186 | dev->flags &= ~IFF_MULTICAST; | 187 | dev->flags &= ~IFF_MULTICAST; |
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 4b970f7624c0..2f6989b1e0dc 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c | |||
@@ -683,10 +683,9 @@ EXPORT_SYMBOL(phy_detach); | |||
683 | int phy_suspend(struct phy_device *phydev) | 683 | int phy_suspend(struct phy_device *phydev) |
684 | { | 684 | { |
685 | struct phy_driver *phydrv = to_phy_driver(phydev->dev.driver); | 685 | struct phy_driver *phydrv = to_phy_driver(phydev->dev.driver); |
686 | struct ethtool_wolinfo wol; | 686 | struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; |
687 | 687 | ||
688 | /* If the device has WOL enabled, we cannot suspend the PHY */ | 688 | /* If the device has WOL enabled, we cannot suspend the PHY */ |
689 | wol.cmd = ETHTOOL_GWOL; | ||
690 | phy_ethtool_get_wol(phydev, &wol); | 689 | phy_ethtool_get_wol(phydev, &wol); |
691 | if (wol.wolopts) | 690 | if (wol.wolopts) |
692 | return -EBUSY; | 691 | return -EBUSY; |
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index dbff290ed0e4..d350d2795e10 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c | |||
@@ -68,7 +68,6 @@ static struct usb_driver cdc_ncm_driver; | |||
68 | static int cdc_ncm_setup(struct usbnet *dev) | 68 | static int cdc_ncm_setup(struct usbnet *dev) |
69 | { | 69 | { |
70 | struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; | 70 | struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; |
71 | struct usb_cdc_ncm_ntb_parameters ncm_parm; | ||
72 | u32 val; | 71 | u32 val; |
73 | u8 flags; | 72 | u8 flags; |
74 | u8 iface_no; | 73 | u8 iface_no; |
@@ -82,22 +81,22 @@ static int cdc_ncm_setup(struct usbnet *dev) | |||
82 | err = usbnet_read_cmd(dev, USB_CDC_GET_NTB_PARAMETERS, | 81 | err = usbnet_read_cmd(dev, USB_CDC_GET_NTB_PARAMETERS, |
83 | USB_TYPE_CLASS | USB_DIR_IN | 82 | USB_TYPE_CLASS | USB_DIR_IN |
84 | |USB_RECIP_INTERFACE, | 83 | |USB_RECIP_INTERFACE, |
85 | 0, iface_no, &ncm_parm, | 84 | 0, iface_no, &ctx->ncm_parm, |
86 | sizeof(ncm_parm)); | 85 | sizeof(ctx->ncm_parm)); |
87 | if (err < 0) { | 86 | if (err < 0) { |
88 | dev_err(&dev->intf->dev, "failed GET_NTB_PARAMETERS\n"); | 87 | dev_err(&dev->intf->dev, "failed GET_NTB_PARAMETERS\n"); |
89 | return err; /* GET_NTB_PARAMETERS is required */ | 88 | return err; /* GET_NTB_PARAMETERS is required */ |
90 | } | 89 | } |
91 | 90 | ||
92 | /* read correct set of parameters according to device mode */ | 91 | /* read correct set of parameters according to device mode */ |
93 | ctx->rx_max = le32_to_cpu(ncm_parm.dwNtbInMaxSize); | 92 | ctx->rx_max = le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize); |
94 | ctx->tx_max = le32_to_cpu(ncm_parm.dwNtbOutMaxSize); | 93 | ctx->tx_max = le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize); |
95 | ctx->tx_remainder = le16_to_cpu(ncm_parm.wNdpOutPayloadRemainder); | 94 | ctx->tx_remainder = le16_to_cpu(ctx->ncm_parm.wNdpOutPayloadRemainder); |
96 | ctx->tx_modulus = le16_to_cpu(ncm_parm.wNdpOutDivisor); | 95 | ctx->tx_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutDivisor); |
97 | ctx->tx_ndp_modulus = le16_to_cpu(ncm_parm.wNdpOutAlignment); | 96 | ctx->tx_ndp_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutAlignment); |
98 | /* devices prior to NCM Errata shall set this field to zero */ | 97 | /* devices prior to NCM Errata shall set this field to zero */ |
99 | ctx->tx_max_datagrams = le16_to_cpu(ncm_parm.wNtbOutMaxDatagrams); | 98 | ctx->tx_max_datagrams = le16_to_cpu(ctx->ncm_parm.wNtbOutMaxDatagrams); |
100 | ntb_fmt_supported = le16_to_cpu(ncm_parm.bmNtbFormatsSupported); | 99 | ntb_fmt_supported = le16_to_cpu(ctx->ncm_parm.bmNtbFormatsSupported); |
101 | 100 | ||
102 | /* there are some minor differences in NCM and MBIM defaults */ | 101 | /* there are some minor differences in NCM and MBIM defaults */ |
103 | if (cdc_ncm_comm_intf_is_mbim(ctx->control->cur_altsetting)) { | 102 | if (cdc_ncm_comm_intf_is_mbim(ctx->control->cur_altsetting)) { |
@@ -146,7 +145,7 @@ static int cdc_ncm_setup(struct usbnet *dev) | |||
146 | } | 145 | } |
147 | 146 | ||
148 | /* inform device about NTB input size changes */ | 147 | /* inform device about NTB input size changes */ |
149 | if (ctx->rx_max != le32_to_cpu(ncm_parm.dwNtbInMaxSize)) { | 148 | if (ctx->rx_max != le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)) { |
150 | __le32 dwNtbInMaxSize = cpu_to_le32(ctx->rx_max); | 149 | __le32 dwNtbInMaxSize = cpu_to_le32(ctx->rx_max); |
151 | 150 | ||
152 | err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_INPUT_SIZE, | 151 | err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_INPUT_SIZE, |
@@ -162,14 +161,6 @@ static int cdc_ncm_setup(struct usbnet *dev) | |||
162 | dev_dbg(&dev->intf->dev, "Using default maximum transmit length=%d\n", | 161 | dev_dbg(&dev->intf->dev, "Using default maximum transmit length=%d\n", |
163 | CDC_NCM_NTB_MAX_SIZE_TX); | 162 | CDC_NCM_NTB_MAX_SIZE_TX); |
164 | ctx->tx_max = CDC_NCM_NTB_MAX_SIZE_TX; | 163 | ctx->tx_max = CDC_NCM_NTB_MAX_SIZE_TX; |
165 | |||
166 | /* Adding a pad byte here simplifies the handling in | ||
167 | * cdc_ncm_fill_tx_frame, by making tx_max always | ||
168 | * represent the real skb max size. | ||
169 | */ | ||
170 | if (ctx->tx_max % usb_maxpacket(dev->udev, dev->out, 1) == 0) | ||
171 | ctx->tx_max++; | ||
172 | |||
173 | } | 164 | } |
174 | 165 | ||
175 | /* | 166 | /* |
@@ -439,6 +430,10 @@ advance: | |||
439 | goto error2; | 430 | goto error2; |
440 | } | 431 | } |
441 | 432 | ||
433 | /* initialize data interface */ | ||
434 | if (cdc_ncm_setup(dev)) | ||
435 | goto error2; | ||
436 | |||
442 | /* configure data interface */ | 437 | /* configure data interface */ |
443 | temp = usb_set_interface(dev->udev, iface_no, data_altsetting); | 438 | temp = usb_set_interface(dev->udev, iface_no, data_altsetting); |
444 | if (temp) { | 439 | if (temp) { |
@@ -453,12 +448,6 @@ advance: | |||
453 | goto error2; | 448 | goto error2; |
454 | } | 449 | } |
455 | 450 | ||
456 | /* initialize data interface */ | ||
457 | if (cdc_ncm_setup(dev)) { | ||
458 | dev_dbg(&intf->dev, "cdc_ncm_setup() failed\n"); | ||
459 | goto error2; | ||
460 | } | ||
461 | |||
462 | usb_set_intfdata(ctx->data, dev); | 451 | usb_set_intfdata(ctx->data, dev); |
463 | usb_set_intfdata(ctx->control, dev); | 452 | usb_set_intfdata(ctx->control, dev); |
464 | 453 | ||
@@ -475,6 +464,15 @@ advance: | |||
475 | dev->hard_mtu = ctx->tx_max; | 464 | dev->hard_mtu = ctx->tx_max; |
476 | dev->rx_urb_size = ctx->rx_max; | 465 | dev->rx_urb_size = ctx->rx_max; |
477 | 466 | ||
467 | /* cdc_ncm_setup will override dwNtbOutMaxSize if it is | ||
468 | * outside the sane range. Adding a pad byte here if necessary | ||
469 | * simplifies the handling in cdc_ncm_fill_tx_frame, making | ||
470 | * tx_max always represent the real skb max size. | ||
471 | */ | ||
472 | if (ctx->tx_max != le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize) && | ||
473 | ctx->tx_max % usb_maxpacket(dev->udev, dev->out, 1) == 0) | ||
474 | ctx->tx_max++; | ||
475 | |||
478 | return 0; | 476 | return 0; |
479 | 477 | ||
480 | error2: | 478 | error2: |
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index dd10d5817d2a..f9e96c427558 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c | |||
@@ -752,14 +752,12 @@ EXPORT_SYMBOL_GPL(usbnet_unlink_rx_urbs); | |||
752 | // precondition: never called in_interrupt | 752 | // precondition: never called in_interrupt |
753 | static void usbnet_terminate_urbs(struct usbnet *dev) | 753 | static void usbnet_terminate_urbs(struct usbnet *dev) |
754 | { | 754 | { |
755 | DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup); | ||
756 | DECLARE_WAITQUEUE(wait, current); | 755 | DECLARE_WAITQUEUE(wait, current); |
757 | int temp; | 756 | int temp; |
758 | 757 | ||
759 | /* ensure there are no more active urbs */ | 758 | /* ensure there are no more active urbs */ |
760 | add_wait_queue(&unlink_wakeup, &wait); | 759 | add_wait_queue(&dev->wait, &wait); |
761 | set_current_state(TASK_UNINTERRUPTIBLE); | 760 | set_current_state(TASK_UNINTERRUPTIBLE); |
762 | dev->wait = &unlink_wakeup; | ||
763 | temp = unlink_urbs(dev, &dev->txq) + | 761 | temp = unlink_urbs(dev, &dev->txq) + |
764 | unlink_urbs(dev, &dev->rxq); | 762 | unlink_urbs(dev, &dev->rxq); |
765 | 763 | ||
@@ -773,15 +771,14 @@ static void usbnet_terminate_urbs(struct usbnet *dev) | |||
773 | "waited for %d urb completions\n", temp); | 771 | "waited for %d urb completions\n", temp); |
774 | } | 772 | } |
775 | set_current_state(TASK_RUNNING); | 773 | set_current_state(TASK_RUNNING); |
776 | dev->wait = NULL; | 774 | remove_wait_queue(&dev->wait, &wait); |
777 | remove_wait_queue(&unlink_wakeup, &wait); | ||
778 | } | 775 | } |
779 | 776 | ||
780 | int usbnet_stop (struct net_device *net) | 777 | int usbnet_stop (struct net_device *net) |
781 | { | 778 | { |
782 | struct usbnet *dev = netdev_priv(net); | 779 | struct usbnet *dev = netdev_priv(net); |
783 | struct driver_info *info = dev->driver_info; | 780 | struct driver_info *info = dev->driver_info; |
784 | int retval; | 781 | int retval, pm; |
785 | 782 | ||
786 | clear_bit(EVENT_DEV_OPEN, &dev->flags); | 783 | clear_bit(EVENT_DEV_OPEN, &dev->flags); |
787 | netif_stop_queue (net); | 784 | netif_stop_queue (net); |
@@ -791,6 +788,8 @@ int usbnet_stop (struct net_device *net) | |||
791 | net->stats.rx_packets, net->stats.tx_packets, | 788 | net->stats.rx_packets, net->stats.tx_packets, |
792 | net->stats.rx_errors, net->stats.tx_errors); | 789 | net->stats.rx_errors, net->stats.tx_errors); |
793 | 790 | ||
791 | /* to not race resume */ | ||
792 | pm = usb_autopm_get_interface(dev->intf); | ||
794 | /* allow minidriver to stop correctly (wireless devices to turn off | 793 | /* allow minidriver to stop correctly (wireless devices to turn off |
795 | * radio etc) */ | 794 | * radio etc) */ |
796 | if (info->stop) { | 795 | if (info->stop) { |
@@ -817,6 +816,9 @@ int usbnet_stop (struct net_device *net) | |||
817 | dev->flags = 0; | 816 | dev->flags = 0; |
818 | del_timer_sync (&dev->delay); | 817 | del_timer_sync (&dev->delay); |
819 | tasklet_kill (&dev->bh); | 818 | tasklet_kill (&dev->bh); |
819 | if (!pm) | ||
820 | usb_autopm_put_interface(dev->intf); | ||
821 | |||
820 | if (info->manage_power && | 822 | if (info->manage_power && |
821 | !test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags)) | 823 | !test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags)) |
822 | info->manage_power(dev, 0); | 824 | info->manage_power(dev, 0); |
@@ -1437,11 +1439,12 @@ static void usbnet_bh (unsigned long param) | |||
1437 | /* restart RX again after disabling due to high error rate */ | 1439 | /* restart RX again after disabling due to high error rate */ |
1438 | clear_bit(EVENT_RX_KILL, &dev->flags); | 1440 | clear_bit(EVENT_RX_KILL, &dev->flags); |
1439 | 1441 | ||
1440 | // waiting for all pending urbs to complete? | 1442 | /* waiting for all pending urbs to complete? |
1441 | if (dev->wait) { | 1443 | * only then can we forgo submitting anew |
1442 | if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) { | 1444 | */ |
1443 | wake_up (dev->wait); | 1445 | if (waitqueue_active(&dev->wait)) { |
1444 | } | 1446 | if (dev->txq.qlen + dev->rxq.qlen + dev->done.qlen == 0) |
1447 | wake_up_all(&dev->wait); | ||
1445 | 1448 | ||
1446 | // or are we maybe short a few urbs? | 1449 | // or are we maybe short a few urbs? |
1447 | } else if (netif_running (dev->net) && | 1450 | } else if (netif_running (dev->net) && |
@@ -1580,6 +1583,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) | |||
1580 | dev->driver_name = name; | 1583 | dev->driver_name = name; |
1581 | dev->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV | 1584 | dev->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV |
1582 | | NETIF_MSG_PROBE | NETIF_MSG_LINK); | 1585 | | NETIF_MSG_PROBE | NETIF_MSG_LINK); |
1586 | init_waitqueue_head(&dev->wait); | ||
1583 | skb_queue_head_init (&dev->rxq); | 1587 | skb_queue_head_init (&dev->rxq); |
1584 | skb_queue_head_init (&dev->txq); | 1588 | skb_queue_head_init (&dev->txq); |
1585 | skb_queue_head_init (&dev->done); | 1589 | skb_queue_head_init (&dev->done); |
@@ -1791,9 +1795,10 @@ int usbnet_resume (struct usb_interface *intf) | |||
1791 | spin_unlock_irq(&dev->txq.lock); | 1795 | spin_unlock_irq(&dev->txq.lock); |
1792 | 1796 | ||
1793 | if (test_bit(EVENT_DEV_OPEN, &dev->flags)) { | 1797 | if (test_bit(EVENT_DEV_OPEN, &dev->flags)) { |
1794 | /* handle remote wakeup ASAP */ | 1798 | /* handle remote wakeup ASAP |
1795 | if (!dev->wait && | 1799 | * we cannot race against stop |
1796 | netif_device_present(dev->net) && | 1800 | */ |
1801 | if (netif_device_present(dev->net) && | ||
1797 | !timer_pending(&dev->delay) && | 1802 | !timer_pending(&dev->delay) && |
1798 | !test_bit(EVENT_RX_HALT, &dev->flags)) | 1803 | !test_bit(EVENT_RX_HALT, &dev->flags)) |
1799 | rx_alloc_submit(dev, GFP_NOIO); | 1804 | rx_alloc_submit(dev, GFP_NOIO); |
diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 5b374370f71c..c0e7c64765ab 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c | |||
@@ -286,7 +286,10 @@ static void veth_setup(struct net_device *dev) | |||
286 | dev->features |= NETIF_F_LLTX; | 286 | dev->features |= NETIF_F_LLTX; |
287 | dev->features |= VETH_FEATURES; | 287 | dev->features |= VETH_FEATURES; |
288 | dev->vlan_features = dev->features & | 288 | dev->vlan_features = dev->features & |
289 | ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX); | 289 | ~(NETIF_F_HW_VLAN_CTAG_TX | |
290 | NETIF_F_HW_VLAN_STAG_TX | | ||
291 | NETIF_F_HW_VLAN_CTAG_RX | | ||
292 | NETIF_F_HW_VLAN_STAG_RX); | ||
290 | dev->destructor = veth_dev_free; | 293 | dev->destructor = veth_dev_free; |
291 | 294 | ||
292 | dev->hw_features = VETH_FEATURES; | 295 | dev->hw_features = VETH_FEATURES; |
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 5632a99cbbd2..841b60831df1 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
@@ -671,8 +671,7 @@ static bool try_fill_recv(struct receive_queue *rq, gfp_t gfp) | |||
671 | if (err) | 671 | if (err) |
672 | break; | 672 | break; |
673 | } while (rq->vq->num_free); | 673 | } while (rq->vq->num_free); |
674 | if (unlikely(!virtqueue_kick(rq->vq))) | 674 | virtqueue_kick(rq->vq); |
675 | return false; | ||
676 | return !oom; | 675 | return !oom; |
677 | } | 676 | } |
678 | 677 | ||
@@ -877,7 +876,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
877 | err = xmit_skb(sq, skb); | 876 | err = xmit_skb(sq, skb); |
878 | 877 | ||
879 | /* This should not happen! */ | 878 | /* This should not happen! */ |
880 | if (unlikely(err) || unlikely(!virtqueue_kick(sq->vq))) { | 879 | if (unlikely(err)) { |
881 | dev->stats.tx_fifo_errors++; | 880 | dev->stats.tx_fifo_errors++; |
882 | if (net_ratelimit()) | 881 | if (net_ratelimit()) |
883 | dev_warn(&dev->dev, | 882 | dev_warn(&dev->dev, |
@@ -886,6 +885,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
886 | kfree_skb(skb); | 885 | kfree_skb(skb); |
887 | return NETDEV_TX_OK; | 886 | return NETDEV_TX_OK; |
888 | } | 887 | } |
888 | virtqueue_kick(sq->vq); | ||
889 | 889 | ||
890 | /* Don't wait up for transmitted skbs to be freed. */ | 890 | /* Don't wait up for transmitted skbs to be freed. */ |
891 | skb_orphan(skb); | 891 | skb_orphan(skb); |
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index b0f705c2378f..1236812c7be6 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c | |||
@@ -1318,6 +1318,9 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb) | |||
1318 | 1318 | ||
1319 | neigh_release(n); | 1319 | neigh_release(n); |
1320 | 1320 | ||
1321 | if (reply == NULL) | ||
1322 | goto out; | ||
1323 | |||
1321 | skb_reset_mac_header(reply); | 1324 | skb_reset_mac_header(reply); |
1322 | __skb_pull(reply, skb_network_offset(reply)); | 1325 | __skb_pull(reply, skb_network_offset(reply)); |
1323 | reply->ip_summed = CHECKSUM_UNNECESSARY; | 1326 | reply->ip_summed = CHECKSUM_UNNECESSARY; |
@@ -1339,15 +1342,103 @@ out: | |||
1339 | } | 1342 | } |
1340 | 1343 | ||
1341 | #if IS_ENABLED(CONFIG_IPV6) | 1344 | #if IS_ENABLED(CONFIG_IPV6) |
1345 | |||
1346 | static struct sk_buff *vxlan_na_create(struct sk_buff *request, | ||
1347 | struct neighbour *n, bool isrouter) | ||
1348 | { | ||
1349 | struct net_device *dev = request->dev; | ||
1350 | struct sk_buff *reply; | ||
1351 | struct nd_msg *ns, *na; | ||
1352 | struct ipv6hdr *pip6; | ||
1353 | u8 *daddr; | ||
1354 | int na_olen = 8; /* opt hdr + ETH_ALEN for target */ | ||
1355 | int ns_olen; | ||
1356 | int i, len; | ||
1357 | |||
1358 | if (dev == NULL) | ||
1359 | return NULL; | ||
1360 | |||
1361 | len = LL_RESERVED_SPACE(dev) + sizeof(struct ipv6hdr) + | ||
1362 | sizeof(*na) + na_olen + dev->needed_tailroom; | ||
1363 | reply = alloc_skb(len, GFP_ATOMIC); | ||
1364 | if (reply == NULL) | ||
1365 | return NULL; | ||
1366 | |||
1367 | reply->protocol = htons(ETH_P_IPV6); | ||
1368 | reply->dev = dev; | ||
1369 | skb_reserve(reply, LL_RESERVED_SPACE(request->dev)); | ||
1370 | skb_push(reply, sizeof(struct ethhdr)); | ||
1371 | skb_set_mac_header(reply, 0); | ||
1372 | |||
1373 | ns = (struct nd_msg *)skb_transport_header(request); | ||
1374 | |||
1375 | daddr = eth_hdr(request)->h_source; | ||
1376 | ns_olen = request->len - skb_transport_offset(request) - sizeof(*ns); | ||
1377 | for (i = 0; i < ns_olen-1; i += (ns->opt[i+1]<<3)) { | ||
1378 | if (ns->opt[i] == ND_OPT_SOURCE_LL_ADDR) { | ||
1379 | daddr = ns->opt + i + sizeof(struct nd_opt_hdr); | ||
1380 | break; | ||
1381 | } | ||
1382 | } | ||
1383 | |||
1384 | /* Ethernet header */ | ||
1385 | ether_addr_copy(eth_hdr(reply)->h_dest, daddr); | ||
1386 | ether_addr_copy(eth_hdr(reply)->h_source, n->ha); | ||
1387 | eth_hdr(reply)->h_proto = htons(ETH_P_IPV6); | ||
1388 | reply->protocol = htons(ETH_P_IPV6); | ||
1389 | |||
1390 | skb_pull(reply, sizeof(struct ethhdr)); | ||
1391 | skb_set_network_header(reply, 0); | ||
1392 | skb_put(reply, sizeof(struct ipv6hdr)); | ||
1393 | |||
1394 | /* IPv6 header */ | ||
1395 | |||
1396 | pip6 = ipv6_hdr(reply); | ||
1397 | memset(pip6, 0, sizeof(struct ipv6hdr)); | ||
1398 | pip6->version = 6; | ||
1399 | pip6->priority = ipv6_hdr(request)->priority; | ||
1400 | pip6->nexthdr = IPPROTO_ICMPV6; | ||
1401 | pip6->hop_limit = 255; | ||
1402 | pip6->daddr = ipv6_hdr(request)->saddr; | ||
1403 | pip6->saddr = *(struct in6_addr *)n->primary_key; | ||
1404 | |||
1405 | skb_pull(reply, sizeof(struct ipv6hdr)); | ||
1406 | skb_set_transport_header(reply, 0); | ||
1407 | |||
1408 | na = (struct nd_msg *)skb_put(reply, sizeof(*na) + na_olen); | ||
1409 | |||
1410 | /* Neighbor Advertisement */ | ||
1411 | memset(na, 0, sizeof(*na)+na_olen); | ||
1412 | na->icmph.icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT; | ||
1413 | na->icmph.icmp6_router = isrouter; | ||
1414 | na->icmph.icmp6_override = 1; | ||
1415 | na->icmph.icmp6_solicited = 1; | ||
1416 | na->target = ns->target; | ||
1417 | ether_addr_copy(&na->opt[2], n->ha); | ||
1418 | na->opt[0] = ND_OPT_TARGET_LL_ADDR; | ||
1419 | na->opt[1] = na_olen >> 3; | ||
1420 | |||
1421 | na->icmph.icmp6_cksum = csum_ipv6_magic(&pip6->saddr, | ||
1422 | &pip6->daddr, sizeof(*na)+na_olen, IPPROTO_ICMPV6, | ||
1423 | csum_partial(na, sizeof(*na)+na_olen, 0)); | ||
1424 | |||
1425 | pip6->payload_len = htons(sizeof(*na)+na_olen); | ||
1426 | |||
1427 | skb_push(reply, sizeof(struct ipv6hdr)); | ||
1428 | |||
1429 | reply->ip_summed = CHECKSUM_UNNECESSARY; | ||
1430 | |||
1431 | return reply; | ||
1432 | } | ||
1433 | |||
1342 | static int neigh_reduce(struct net_device *dev, struct sk_buff *skb) | 1434 | static int neigh_reduce(struct net_device *dev, struct sk_buff *skb) |
1343 | { | 1435 | { |
1344 | struct vxlan_dev *vxlan = netdev_priv(dev); | 1436 | struct vxlan_dev *vxlan = netdev_priv(dev); |
1345 | struct neighbour *n; | 1437 | struct nd_msg *msg; |
1346 | union vxlan_addr ipa; | ||
1347 | const struct ipv6hdr *iphdr; | 1438 | const struct ipv6hdr *iphdr; |
1348 | const struct in6_addr *saddr, *daddr; | 1439 | const struct in6_addr *saddr, *daddr; |
1349 | struct nd_msg *msg; | 1440 | struct neighbour *n; |
1350 | struct inet6_dev *in6_dev = NULL; | 1441 | struct inet6_dev *in6_dev; |
1351 | 1442 | ||
1352 | in6_dev = __in6_dev_get(dev); | 1443 | in6_dev = __in6_dev_get(dev); |
1353 | if (!in6_dev) | 1444 | if (!in6_dev) |
@@ -1360,19 +1451,20 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb) | |||
1360 | saddr = &iphdr->saddr; | 1451 | saddr = &iphdr->saddr; |
1361 | daddr = &iphdr->daddr; | 1452 | daddr = &iphdr->daddr; |
1362 | 1453 | ||
1363 | if (ipv6_addr_loopback(daddr) || | ||
1364 | ipv6_addr_is_multicast(daddr)) | ||
1365 | goto out; | ||
1366 | |||
1367 | msg = (struct nd_msg *)skb_transport_header(skb); | 1454 | msg = (struct nd_msg *)skb_transport_header(skb); |
1368 | if (msg->icmph.icmp6_code != 0 || | 1455 | if (msg->icmph.icmp6_code != 0 || |
1369 | msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION) | 1456 | msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION) |
1370 | goto out; | 1457 | goto out; |
1371 | 1458 | ||
1372 | n = neigh_lookup(ipv6_stub->nd_tbl, daddr, dev); | 1459 | if (ipv6_addr_loopback(daddr) || |
1460 | ipv6_addr_is_multicast(&msg->target)) | ||
1461 | goto out; | ||
1462 | |||
1463 | n = neigh_lookup(ipv6_stub->nd_tbl, &msg->target, dev); | ||
1373 | 1464 | ||
1374 | if (n) { | 1465 | if (n) { |
1375 | struct vxlan_fdb *f; | 1466 | struct vxlan_fdb *f; |
1467 | struct sk_buff *reply; | ||
1376 | 1468 | ||
1377 | if (!(n->nud_state & NUD_CONNECTED)) { | 1469 | if (!(n->nud_state & NUD_CONNECTED)) { |
1378 | neigh_release(n); | 1470 | neigh_release(n); |
@@ -1386,13 +1478,23 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb) | |||
1386 | goto out; | 1478 | goto out; |
1387 | } | 1479 | } |
1388 | 1480 | ||
1389 | ipv6_stub->ndisc_send_na(dev, n, saddr, &msg->target, | 1481 | reply = vxlan_na_create(skb, n, |
1390 | !!in6_dev->cnf.forwarding, | 1482 | !!(f ? f->flags & NTF_ROUTER : 0)); |
1391 | true, false, false); | 1483 | |
1392 | neigh_release(n); | 1484 | neigh_release(n); |
1485 | |||
1486 | if (reply == NULL) | ||
1487 | goto out; | ||
1488 | |||
1489 | if (netif_rx_ni(reply) == NET_RX_DROP) | ||
1490 | dev->stats.rx_dropped++; | ||
1491 | |||
1393 | } else if (vxlan->flags & VXLAN_F_L3MISS) { | 1492 | } else if (vxlan->flags & VXLAN_F_L3MISS) { |
1394 | ipa.sin6.sin6_addr = *daddr; | 1493 | union vxlan_addr ipa = { |
1395 | ipa.sa.sa_family = AF_INET6; | 1494 | .sin6.sin6_addr = msg->target, |
1495 | .sa.sa_family = AF_INET6, | ||
1496 | }; | ||
1497 | |||
1396 | vxlan_ip_miss(dev, &ipa); | 1498 | vxlan_ip_miss(dev, &ipa); |
1397 | } | 1499 | } |
1398 | 1500 | ||
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 303ce27964c1..9078a6c5a74e 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c | |||
@@ -1548,6 +1548,7 @@ bool ath9k_hw_check_alive(struct ath_hw *ah) | |||
1548 | if (reg != last_val) | 1548 | if (reg != last_val) |
1549 | return true; | 1549 | return true; |
1550 | 1550 | ||
1551 | udelay(1); | ||
1551 | last_val = reg; | 1552 | last_val = reg; |
1552 | if ((reg & 0x7E7FFFEF) == 0x00702400) | 1553 | if ((reg & 0x7E7FFFEF) == 0x00702400) |
1553 | continue; | 1554 | continue; |
@@ -1560,8 +1561,6 @@ bool ath9k_hw_check_alive(struct ath_hw *ah) | |||
1560 | default: | 1561 | default: |
1561 | return true; | 1562 | return true; |
1562 | } | 1563 | } |
1563 | |||
1564 | udelay(1); | ||
1565 | } while (count-- > 0); | 1564 | } while (count-- > 0); |
1566 | 1565 | ||
1567 | return false; | 1566 | return false; |
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index f042a18c8495..55897d508a76 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c | |||
@@ -2063,7 +2063,7 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc, | |||
2063 | 2063 | ||
2064 | ATH_TXBUF_RESET(bf); | 2064 | ATH_TXBUF_RESET(bf); |
2065 | 2065 | ||
2066 | if (tid) { | 2066 | if (tid && ieee80211_is_data_present(hdr->frame_control)) { |
2067 | fragno = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG; | 2067 | fragno = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG; |
2068 | seqno = tid->seq_next; | 2068 | seqno = tid->seq_next; |
2069 | hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT); | 2069 | hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT); |
@@ -2186,7 +2186,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, | |||
2186 | txq->stopped = true; | 2186 | txq->stopped = true; |
2187 | } | 2187 | } |
2188 | 2188 | ||
2189 | if (txctl->an) | 2189 | if (txctl->an && ieee80211_is_data_present(hdr->frame_control)) |
2190 | tid = ath_get_skb_tid(sc, txctl->an, skb); | 2190 | tid = ath_get_skb_tid(sc, txctl->an, skb); |
2191 | 2191 | ||
2192 | if (info->flags & IEEE80211_TX_CTL_PS_RESPONSE) { | 2192 | if (info->flags & IEEE80211_TX_CTL_PS_RESPONSE) { |
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c index 119ee6eaf1c3..ddaa9efd053d 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c | |||
@@ -1948,8 +1948,10 @@ static int brcmf_sdio_txpkt_prep_sg(struct brcmf_sdio *bus, | |||
1948 | if (pkt_pad == NULL) | 1948 | if (pkt_pad == NULL) |
1949 | return -ENOMEM; | 1949 | return -ENOMEM; |
1950 | ret = brcmf_sdio_txpkt_hdalign(bus, pkt_pad); | 1950 | ret = brcmf_sdio_txpkt_hdalign(bus, pkt_pad); |
1951 | if (unlikely(ret < 0)) | 1951 | if (unlikely(ret < 0)) { |
1952 | kfree_skb(pkt_pad); | ||
1952 | return ret; | 1953 | return ret; |
1954 | } | ||
1953 | memcpy(pkt_pad->data, | 1955 | memcpy(pkt_pad->data, |
1954 | pkt->data + pkt->len - tail_chop, | 1956 | pkt->data + pkt->len - tail_chop, |
1955 | tail_chop); | 1957 | tail_chop); |
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index 7f8b5d156c8c..41d4a8167dc3 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c | |||
@@ -5460,14 +5460,15 @@ static void rt2800_init_bbp_53xx(struct rt2x00_dev *rt2x00dev) | |||
5460 | 5460 | ||
5461 | rt2800_bbp_write(rt2x00dev, 68, 0x0b); | 5461 | rt2800_bbp_write(rt2x00dev, 68, 0x0b); |
5462 | 5462 | ||
5463 | rt2800_bbp_write(rt2x00dev, 69, 0x0d); | 5463 | rt2800_bbp_write(rt2x00dev, 69, 0x12); |
5464 | rt2800_bbp_write(rt2x00dev, 70, 0x06); | ||
5465 | rt2800_bbp_write(rt2x00dev, 73, 0x13); | 5464 | rt2800_bbp_write(rt2x00dev, 73, 0x13); |
5466 | rt2800_bbp_write(rt2x00dev, 75, 0x46); | 5465 | rt2800_bbp_write(rt2x00dev, 75, 0x46); |
5467 | rt2800_bbp_write(rt2x00dev, 76, 0x28); | 5466 | rt2800_bbp_write(rt2x00dev, 76, 0x28); |
5468 | 5467 | ||
5469 | rt2800_bbp_write(rt2x00dev, 77, 0x59); | 5468 | rt2800_bbp_write(rt2x00dev, 77, 0x59); |
5470 | 5469 | ||
5470 | rt2800_bbp_write(rt2x00dev, 70, 0x0a); | ||
5471 | |||
5471 | rt2800_bbp_write(rt2x00dev, 79, 0x13); | 5472 | rt2800_bbp_write(rt2x00dev, 79, 0x13); |
5472 | rt2800_bbp_write(rt2x00dev, 80, 0x05); | 5473 | rt2800_bbp_write(rt2x00dev, 80, 0x05); |
5473 | rt2800_bbp_write(rt2x00dev, 81, 0x33); | 5474 | rt2800_bbp_write(rt2x00dev, 81, 0x33); |
@@ -5510,7 +5511,6 @@ static void rt2800_init_bbp_53xx(struct rt2x00_dev *rt2x00dev) | |||
5510 | if (rt2x00_rt(rt2x00dev, RT5392)) { | 5511 | if (rt2x00_rt(rt2x00dev, RT5392)) { |
5511 | rt2800_bbp_write(rt2x00dev, 134, 0xd0); | 5512 | rt2800_bbp_write(rt2x00dev, 134, 0xd0); |
5512 | rt2800_bbp_write(rt2x00dev, 135, 0xf6); | 5513 | rt2800_bbp_write(rt2x00dev, 135, 0xf6); |
5513 | rt2800_bbp_write(rt2x00dev, 148, 0x84); | ||
5514 | } | 5514 | } |
5515 | 5515 | ||
5516 | rt2800_disable_unused_dac_adc(rt2x00dev); | 5516 | rt2800_disable_unused_dac_adc(rt2x00dev); |
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c index 167f3d00c916..66977ebf13b3 100644 --- a/drivers/pnp/pnpacpi/rsparser.c +++ b/drivers/pnp/pnpacpi/rsparser.c | |||
@@ -183,9 +183,7 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res, | |||
183 | struct resource r = {0}; | 183 | struct resource r = {0}; |
184 | int i, flags; | 184 | int i, flags; |
185 | 185 | ||
186 | if (acpi_dev_resource_memory(res, &r) | 186 | if (acpi_dev_resource_address_space(res, &r) |
187 | || acpi_dev_resource_io(res, &r) | ||
188 | || acpi_dev_resource_address_space(res, &r) | ||
189 | || acpi_dev_resource_ext_address_space(res, &r)) { | 187 | || acpi_dev_resource_ext_address_space(res, &r)) { |
190 | pnp_add_resource(dev, &r); | 188 | pnp_add_resource(dev, &r); |
191 | return AE_OK; | 189 | return AE_OK; |
@@ -217,6 +215,17 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res, | |||
217 | } | 215 | } |
218 | 216 | ||
219 | switch (res->type) { | 217 | switch (res->type) { |
218 | case ACPI_RESOURCE_TYPE_MEMORY24: | ||
219 | case ACPI_RESOURCE_TYPE_MEMORY32: | ||
220 | case ACPI_RESOURCE_TYPE_FIXED_MEMORY32: | ||
221 | if (acpi_dev_resource_memory(res, &r)) | ||
222 | pnp_add_resource(dev, &r); | ||
223 | break; | ||
224 | case ACPI_RESOURCE_TYPE_IO: | ||
225 | case ACPI_RESOURCE_TYPE_FIXED_IO: | ||
226 | if (acpi_dev_resource_io(res, &r)) | ||
227 | pnp_add_resource(dev, &r); | ||
228 | break; | ||
220 | case ACPI_RESOURCE_TYPE_DMA: | 229 | case ACPI_RESOURCE_TYPE_DMA: |
221 | dma = &res->data.dma; | 230 | dma = &res->data.dma; |
222 | if (dma->channel_count > 0 && dma->channels[0] != (u8) -1) | 231 | if (dma->channel_count > 0 && dma->channels[0] != (u8) -1) |
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index 1f375051483a..5642a9b250c2 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c | |||
@@ -325,7 +325,7 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc) | |||
325 | if (!abrt_task->sc || abrt_task->state == ISCSI_TASK_FREE) | 325 | if (!abrt_task->sc || abrt_task->state == ISCSI_TASK_FREE) |
326 | continue; | 326 | continue; |
327 | 327 | ||
328 | if (abrt_task->sc->device->lun != abrt_task->sc->device->lun) | 328 | if (sc->device->lun != abrt_task->sc->device->lun) |
329 | continue; | 329 | continue; |
330 | 330 | ||
331 | /* Invalidate WRB Posted for this Task */ | 331 | /* Invalidate WRB Posted for this Task */ |
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c index ed880891cb7c..e9279a8c1e1c 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_io.c +++ b/drivers/scsi/bnx2fc/bnx2fc_io.c | |||
@@ -594,13 +594,13 @@ static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req) | |||
594 | mp_req->mp_resp_bd = NULL; | 594 | mp_req->mp_resp_bd = NULL; |
595 | } | 595 | } |
596 | if (mp_req->req_buf) { | 596 | if (mp_req->req_buf) { |
597 | dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, | 597 | dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, |
598 | mp_req->req_buf, | 598 | mp_req->req_buf, |
599 | mp_req->req_buf_dma); | 599 | mp_req->req_buf_dma); |
600 | mp_req->req_buf = NULL; | 600 | mp_req->req_buf = NULL; |
601 | } | 601 | } |
602 | if (mp_req->resp_buf) { | 602 | if (mp_req->resp_buf) { |
603 | dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, | 603 | dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, |
604 | mp_req->resp_buf, | 604 | mp_req->resp_buf, |
605 | mp_req->resp_buf_dma); | 605 | mp_req->resp_buf_dma); |
606 | mp_req->resp_buf = NULL; | 606 | mp_req->resp_buf = NULL; |
@@ -622,7 +622,7 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req) | |||
622 | 622 | ||
623 | mp_req->req_len = sizeof(struct fcp_cmnd); | 623 | mp_req->req_len = sizeof(struct fcp_cmnd); |
624 | io_req->data_xfer_len = mp_req->req_len; | 624 | io_req->data_xfer_len = mp_req->req_len; |
625 | mp_req->req_buf = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, | 625 | mp_req->req_buf = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, |
626 | &mp_req->req_buf_dma, | 626 | &mp_req->req_buf_dma, |
627 | GFP_ATOMIC); | 627 | GFP_ATOMIC); |
628 | if (!mp_req->req_buf) { | 628 | if (!mp_req->req_buf) { |
@@ -631,7 +631,7 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req) | |||
631 | return FAILED; | 631 | return FAILED; |
632 | } | 632 | } |
633 | 633 | ||
634 | mp_req->resp_buf = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, | 634 | mp_req->resp_buf = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, |
635 | &mp_req->resp_buf_dma, | 635 | &mp_req->resp_buf_dma, |
636 | GFP_ATOMIC); | 636 | GFP_ATOMIC); |
637 | if (!mp_req->resp_buf) { | 637 | if (!mp_req->resp_buf) { |
@@ -639,8 +639,8 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req) | |||
639 | bnx2fc_free_mp_resc(io_req); | 639 | bnx2fc_free_mp_resc(io_req); |
640 | return FAILED; | 640 | return FAILED; |
641 | } | 641 | } |
642 | memset(mp_req->req_buf, 0, PAGE_SIZE); | 642 | memset(mp_req->req_buf, 0, CNIC_PAGE_SIZE); |
643 | memset(mp_req->resp_buf, 0, PAGE_SIZE); | 643 | memset(mp_req->resp_buf, 0, CNIC_PAGE_SIZE); |
644 | 644 | ||
645 | /* Allocate and map mp_req_bd and mp_resp_bd */ | 645 | /* Allocate and map mp_req_bd and mp_resp_bd */ |
646 | sz = sizeof(struct fcoe_bd_ctx); | 646 | sz = sizeof(struct fcoe_bd_ctx); |
@@ -665,7 +665,7 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req) | |||
665 | mp_req_bd = mp_req->mp_req_bd; | 665 | mp_req_bd = mp_req->mp_req_bd; |
666 | mp_req_bd->buf_addr_lo = (u32)addr & 0xffffffff; | 666 | mp_req_bd->buf_addr_lo = (u32)addr & 0xffffffff; |
667 | mp_req_bd->buf_addr_hi = (u32)((u64)addr >> 32); | 667 | mp_req_bd->buf_addr_hi = (u32)((u64)addr >> 32); |
668 | mp_req_bd->buf_len = PAGE_SIZE; | 668 | mp_req_bd->buf_len = CNIC_PAGE_SIZE; |
669 | mp_req_bd->flags = 0; | 669 | mp_req_bd->flags = 0; |
670 | 670 | ||
671 | /* | 671 | /* |
@@ -677,7 +677,7 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req) | |||
677 | addr = mp_req->resp_buf_dma; | 677 | addr = mp_req->resp_buf_dma; |
678 | mp_resp_bd->buf_addr_lo = (u32)addr & 0xffffffff; | 678 | mp_resp_bd->buf_addr_lo = (u32)addr & 0xffffffff; |
679 | mp_resp_bd->buf_addr_hi = (u32)((u64)addr >> 32); | 679 | mp_resp_bd->buf_addr_hi = (u32)((u64)addr >> 32); |
680 | mp_resp_bd->buf_len = PAGE_SIZE; | 680 | mp_resp_bd->buf_len = CNIC_PAGE_SIZE; |
681 | mp_resp_bd->flags = 0; | 681 | mp_resp_bd->flags = 0; |
682 | 682 | ||
683 | return SUCCESS; | 683 | return SUCCESS; |
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c index 4d93177dfb53..d9bae5672273 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c +++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c | |||
@@ -673,7 +673,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, | |||
673 | 673 | ||
674 | /* Allocate and map SQ */ | 674 | /* Allocate and map SQ */ |
675 | tgt->sq_mem_size = tgt->max_sqes * BNX2FC_SQ_WQE_SIZE; | 675 | tgt->sq_mem_size = tgt->max_sqes * BNX2FC_SQ_WQE_SIZE; |
676 | tgt->sq_mem_size = (tgt->sq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; | 676 | tgt->sq_mem_size = (tgt->sq_mem_size + (CNIC_PAGE_SIZE - 1)) & |
677 | CNIC_PAGE_MASK; | ||
677 | 678 | ||
678 | tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size, | 679 | tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size, |
679 | &tgt->sq_dma, GFP_KERNEL); | 680 | &tgt->sq_dma, GFP_KERNEL); |
@@ -686,7 +687,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, | |||
686 | 687 | ||
687 | /* Allocate and map CQ */ | 688 | /* Allocate and map CQ */ |
688 | tgt->cq_mem_size = tgt->max_cqes * BNX2FC_CQ_WQE_SIZE; | 689 | tgt->cq_mem_size = tgt->max_cqes * BNX2FC_CQ_WQE_SIZE; |
689 | tgt->cq_mem_size = (tgt->cq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; | 690 | tgt->cq_mem_size = (tgt->cq_mem_size + (CNIC_PAGE_SIZE - 1)) & |
691 | CNIC_PAGE_MASK; | ||
690 | 692 | ||
691 | tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size, | 693 | tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size, |
692 | &tgt->cq_dma, GFP_KERNEL); | 694 | &tgt->cq_dma, GFP_KERNEL); |
@@ -699,7 +701,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, | |||
699 | 701 | ||
700 | /* Allocate and map RQ and RQ PBL */ | 702 | /* Allocate and map RQ and RQ PBL */ |
701 | tgt->rq_mem_size = tgt->max_rqes * BNX2FC_RQ_WQE_SIZE; | 703 | tgt->rq_mem_size = tgt->max_rqes * BNX2FC_RQ_WQE_SIZE; |
702 | tgt->rq_mem_size = (tgt->rq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; | 704 | tgt->rq_mem_size = (tgt->rq_mem_size + (CNIC_PAGE_SIZE - 1)) & |
705 | CNIC_PAGE_MASK; | ||
703 | 706 | ||
704 | tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size, | 707 | tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size, |
705 | &tgt->rq_dma, GFP_KERNEL); | 708 | &tgt->rq_dma, GFP_KERNEL); |
@@ -710,8 +713,9 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, | |||
710 | } | 713 | } |
711 | memset(tgt->rq, 0, tgt->rq_mem_size); | 714 | memset(tgt->rq, 0, tgt->rq_mem_size); |
712 | 715 | ||
713 | tgt->rq_pbl_size = (tgt->rq_mem_size / PAGE_SIZE) * sizeof(void *); | 716 | tgt->rq_pbl_size = (tgt->rq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *); |
714 | tgt->rq_pbl_size = (tgt->rq_pbl_size + (PAGE_SIZE - 1)) & PAGE_MASK; | 717 | tgt->rq_pbl_size = (tgt->rq_pbl_size + (CNIC_PAGE_SIZE - 1)) & |
718 | CNIC_PAGE_MASK; | ||
715 | 719 | ||
716 | tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size, | 720 | tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size, |
717 | &tgt->rq_pbl_dma, GFP_KERNEL); | 721 | &tgt->rq_pbl_dma, GFP_KERNEL); |
@@ -722,7 +726,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, | |||
722 | } | 726 | } |
723 | 727 | ||
724 | memset(tgt->rq_pbl, 0, tgt->rq_pbl_size); | 728 | memset(tgt->rq_pbl, 0, tgt->rq_pbl_size); |
725 | num_pages = tgt->rq_mem_size / PAGE_SIZE; | 729 | num_pages = tgt->rq_mem_size / CNIC_PAGE_SIZE; |
726 | page = tgt->rq_dma; | 730 | page = tgt->rq_dma; |
727 | pbl = (u32 *)tgt->rq_pbl; | 731 | pbl = (u32 *)tgt->rq_pbl; |
728 | 732 | ||
@@ -731,13 +735,13 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, | |||
731 | pbl++; | 735 | pbl++; |
732 | *pbl = (u32)((u64)page >> 32); | 736 | *pbl = (u32)((u64)page >> 32); |
733 | pbl++; | 737 | pbl++; |
734 | page += PAGE_SIZE; | 738 | page += CNIC_PAGE_SIZE; |
735 | } | 739 | } |
736 | 740 | ||
737 | /* Allocate and map XFERQ */ | 741 | /* Allocate and map XFERQ */ |
738 | tgt->xferq_mem_size = tgt->max_sqes * BNX2FC_XFERQ_WQE_SIZE; | 742 | tgt->xferq_mem_size = tgt->max_sqes * BNX2FC_XFERQ_WQE_SIZE; |
739 | tgt->xferq_mem_size = (tgt->xferq_mem_size + (PAGE_SIZE - 1)) & | 743 | tgt->xferq_mem_size = (tgt->xferq_mem_size + (CNIC_PAGE_SIZE - 1)) & |
740 | PAGE_MASK; | 744 | CNIC_PAGE_MASK; |
741 | 745 | ||
742 | tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev, tgt->xferq_mem_size, | 746 | tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev, tgt->xferq_mem_size, |
743 | &tgt->xferq_dma, GFP_KERNEL); | 747 | &tgt->xferq_dma, GFP_KERNEL); |
@@ -750,8 +754,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, | |||
750 | 754 | ||
751 | /* Allocate and map CONFQ & CONFQ PBL */ | 755 | /* Allocate and map CONFQ & CONFQ PBL */ |
752 | tgt->confq_mem_size = tgt->max_sqes * BNX2FC_CONFQ_WQE_SIZE; | 756 | tgt->confq_mem_size = tgt->max_sqes * BNX2FC_CONFQ_WQE_SIZE; |
753 | tgt->confq_mem_size = (tgt->confq_mem_size + (PAGE_SIZE - 1)) & | 757 | tgt->confq_mem_size = (tgt->confq_mem_size + (CNIC_PAGE_SIZE - 1)) & |
754 | PAGE_MASK; | 758 | CNIC_PAGE_MASK; |
755 | 759 | ||
756 | tgt->confq = dma_alloc_coherent(&hba->pcidev->dev, tgt->confq_mem_size, | 760 | tgt->confq = dma_alloc_coherent(&hba->pcidev->dev, tgt->confq_mem_size, |
757 | &tgt->confq_dma, GFP_KERNEL); | 761 | &tgt->confq_dma, GFP_KERNEL); |
@@ -763,9 +767,9 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, | |||
763 | memset(tgt->confq, 0, tgt->confq_mem_size); | 767 | memset(tgt->confq, 0, tgt->confq_mem_size); |
764 | 768 | ||
765 | tgt->confq_pbl_size = | 769 | tgt->confq_pbl_size = |
766 | (tgt->confq_mem_size / PAGE_SIZE) * sizeof(void *); | 770 | (tgt->confq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *); |
767 | tgt->confq_pbl_size = | 771 | tgt->confq_pbl_size = |
768 | (tgt->confq_pbl_size + (PAGE_SIZE - 1)) & PAGE_MASK; | 772 | (tgt->confq_pbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; |
769 | 773 | ||
770 | tgt->confq_pbl = dma_alloc_coherent(&hba->pcidev->dev, | 774 | tgt->confq_pbl = dma_alloc_coherent(&hba->pcidev->dev, |
771 | tgt->confq_pbl_size, | 775 | tgt->confq_pbl_size, |
@@ -777,7 +781,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, | |||
777 | } | 781 | } |
778 | 782 | ||
779 | memset(tgt->confq_pbl, 0, tgt->confq_pbl_size); | 783 | memset(tgt->confq_pbl, 0, tgt->confq_pbl_size); |
780 | num_pages = tgt->confq_mem_size / PAGE_SIZE; | 784 | num_pages = tgt->confq_mem_size / CNIC_PAGE_SIZE; |
781 | page = tgt->confq_dma; | 785 | page = tgt->confq_dma; |
782 | pbl = (u32 *)tgt->confq_pbl; | 786 | pbl = (u32 *)tgt->confq_pbl; |
783 | 787 | ||
@@ -786,7 +790,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, | |||
786 | pbl++; | 790 | pbl++; |
787 | *pbl = (u32)((u64)page >> 32); | 791 | *pbl = (u32)((u64)page >> 32); |
788 | pbl++; | 792 | pbl++; |
789 | page += PAGE_SIZE; | 793 | page += CNIC_PAGE_SIZE; |
790 | } | 794 | } |
791 | 795 | ||
792 | /* Allocate and map ConnDB */ | 796 | /* Allocate and map ConnDB */ |
@@ -805,8 +809,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, | |||
805 | 809 | ||
806 | /* Allocate and map LCQ */ | 810 | /* Allocate and map LCQ */ |
807 | tgt->lcq_mem_size = (tgt->max_sqes + 8) * BNX2FC_SQ_WQE_SIZE; | 811 | tgt->lcq_mem_size = (tgt->max_sqes + 8) * BNX2FC_SQ_WQE_SIZE; |
808 | tgt->lcq_mem_size = (tgt->lcq_mem_size + (PAGE_SIZE - 1)) & | 812 | tgt->lcq_mem_size = (tgt->lcq_mem_size + (CNIC_PAGE_SIZE - 1)) & |
809 | PAGE_MASK; | 813 | CNIC_PAGE_MASK; |
810 | 814 | ||
811 | tgt->lcq = dma_alloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size, | 815 | tgt->lcq = dma_alloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size, |
812 | &tgt->lcq_dma, GFP_KERNEL); | 816 | &tgt->lcq_dma, GFP_KERNEL); |
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c index e4cf23df4b4f..b87a1933f880 100644 --- a/drivers/scsi/bnx2i/bnx2i_hwi.c +++ b/drivers/scsi/bnx2i/bnx2i_hwi.c | |||
@@ -61,7 +61,7 @@ static void bnx2i_adjust_qp_size(struct bnx2i_hba *hba) | |||
61 | * yield integral num of page buffers | 61 | * yield integral num of page buffers |
62 | */ | 62 | */ |
63 | /* adjust SQ */ | 63 | /* adjust SQ */ |
64 | num_elements_per_pg = PAGE_SIZE / BNX2I_SQ_WQE_SIZE; | 64 | num_elements_per_pg = CNIC_PAGE_SIZE / BNX2I_SQ_WQE_SIZE; |
65 | if (hba->max_sqes < num_elements_per_pg) | 65 | if (hba->max_sqes < num_elements_per_pg) |
66 | hba->max_sqes = num_elements_per_pg; | 66 | hba->max_sqes = num_elements_per_pg; |
67 | else if (hba->max_sqes % num_elements_per_pg) | 67 | else if (hba->max_sqes % num_elements_per_pg) |
@@ -69,7 +69,7 @@ static void bnx2i_adjust_qp_size(struct bnx2i_hba *hba) | |||
69 | ~(num_elements_per_pg - 1); | 69 | ~(num_elements_per_pg - 1); |
70 | 70 | ||
71 | /* adjust CQ */ | 71 | /* adjust CQ */ |
72 | num_elements_per_pg = PAGE_SIZE / BNX2I_CQE_SIZE; | 72 | num_elements_per_pg = CNIC_PAGE_SIZE / BNX2I_CQE_SIZE; |
73 | if (hba->max_cqes < num_elements_per_pg) | 73 | if (hba->max_cqes < num_elements_per_pg) |
74 | hba->max_cqes = num_elements_per_pg; | 74 | hba->max_cqes = num_elements_per_pg; |
75 | else if (hba->max_cqes % num_elements_per_pg) | 75 | else if (hba->max_cqes % num_elements_per_pg) |
@@ -77,7 +77,7 @@ static void bnx2i_adjust_qp_size(struct bnx2i_hba *hba) | |||
77 | ~(num_elements_per_pg - 1); | 77 | ~(num_elements_per_pg - 1); |
78 | 78 | ||
79 | /* adjust RQ */ | 79 | /* adjust RQ */ |
80 | num_elements_per_pg = PAGE_SIZE / BNX2I_RQ_WQE_SIZE; | 80 | num_elements_per_pg = CNIC_PAGE_SIZE / BNX2I_RQ_WQE_SIZE; |
81 | if (hba->max_rqes < num_elements_per_pg) | 81 | if (hba->max_rqes < num_elements_per_pg) |
82 | hba->max_rqes = num_elements_per_pg; | 82 | hba->max_rqes = num_elements_per_pg; |
83 | else if (hba->max_rqes % num_elements_per_pg) | 83 | else if (hba->max_rqes % num_elements_per_pg) |
@@ -959,7 +959,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep) | |||
959 | 959 | ||
960 | /* SQ page table */ | 960 | /* SQ page table */ |
961 | memset(ep->qp.sq_pgtbl_virt, 0, ep->qp.sq_pgtbl_size); | 961 | memset(ep->qp.sq_pgtbl_virt, 0, ep->qp.sq_pgtbl_size); |
962 | num_pages = ep->qp.sq_mem_size / PAGE_SIZE; | 962 | num_pages = ep->qp.sq_mem_size / CNIC_PAGE_SIZE; |
963 | page = ep->qp.sq_phys; | 963 | page = ep->qp.sq_phys; |
964 | 964 | ||
965 | if (cnic_dev_10g) | 965 | if (cnic_dev_10g) |
@@ -973,7 +973,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep) | |||
973 | ptbl++; | 973 | ptbl++; |
974 | *ptbl = (u32) ((u64) page >> 32); | 974 | *ptbl = (u32) ((u64) page >> 32); |
975 | ptbl++; | 975 | ptbl++; |
976 | page += PAGE_SIZE; | 976 | page += CNIC_PAGE_SIZE; |
977 | } else { | 977 | } else { |
978 | /* PTE is written in big endian format for | 978 | /* PTE is written in big endian format for |
979 | * 5706/5708/5709 devices */ | 979 | * 5706/5708/5709 devices */ |
@@ -981,13 +981,13 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep) | |||
981 | ptbl++; | 981 | ptbl++; |
982 | *ptbl = (u32) page; | 982 | *ptbl = (u32) page; |
983 | ptbl++; | 983 | ptbl++; |
984 | page += PAGE_SIZE; | 984 | page += CNIC_PAGE_SIZE; |
985 | } | 985 | } |
986 | } | 986 | } |
987 | 987 | ||
988 | /* RQ page table */ | 988 | /* RQ page table */ |
989 | memset(ep->qp.rq_pgtbl_virt, 0, ep->qp.rq_pgtbl_size); | 989 | memset(ep->qp.rq_pgtbl_virt, 0, ep->qp.rq_pgtbl_size); |
990 | num_pages = ep->qp.rq_mem_size / PAGE_SIZE; | 990 | num_pages = ep->qp.rq_mem_size / CNIC_PAGE_SIZE; |
991 | page = ep->qp.rq_phys; | 991 | page = ep->qp.rq_phys; |
992 | 992 | ||
993 | if (cnic_dev_10g) | 993 | if (cnic_dev_10g) |
@@ -1001,7 +1001,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep) | |||
1001 | ptbl++; | 1001 | ptbl++; |
1002 | *ptbl = (u32) ((u64) page >> 32); | 1002 | *ptbl = (u32) ((u64) page >> 32); |
1003 | ptbl++; | 1003 | ptbl++; |
1004 | page += PAGE_SIZE; | 1004 | page += CNIC_PAGE_SIZE; |
1005 | } else { | 1005 | } else { |
1006 | /* PTE is written in big endian format for | 1006 | /* PTE is written in big endian format for |
1007 | * 5706/5708/5709 devices */ | 1007 | * 5706/5708/5709 devices */ |
@@ -1009,13 +1009,13 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep) | |||
1009 | ptbl++; | 1009 | ptbl++; |
1010 | *ptbl = (u32) page; | 1010 | *ptbl = (u32) page; |
1011 | ptbl++; | 1011 | ptbl++; |
1012 | page += PAGE_SIZE; | 1012 | page += CNIC_PAGE_SIZE; |
1013 | } | 1013 | } |
1014 | } | 1014 | } |
1015 | 1015 | ||
1016 | /* CQ page table */ | 1016 | /* CQ page table */ |
1017 | memset(ep->qp.cq_pgtbl_virt, 0, ep->qp.cq_pgtbl_size); | 1017 | memset(ep->qp.cq_pgtbl_virt, 0, ep->qp.cq_pgtbl_size); |
1018 | num_pages = ep->qp.cq_mem_size / PAGE_SIZE; | 1018 | num_pages = ep->qp.cq_mem_size / CNIC_PAGE_SIZE; |
1019 | page = ep->qp.cq_phys; | 1019 | page = ep->qp.cq_phys; |
1020 | 1020 | ||
1021 | if (cnic_dev_10g) | 1021 | if (cnic_dev_10g) |
@@ -1029,7 +1029,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep) | |||
1029 | ptbl++; | 1029 | ptbl++; |
1030 | *ptbl = (u32) ((u64) page >> 32); | 1030 | *ptbl = (u32) ((u64) page >> 32); |
1031 | ptbl++; | 1031 | ptbl++; |
1032 | page += PAGE_SIZE; | 1032 | page += CNIC_PAGE_SIZE; |
1033 | } else { | 1033 | } else { |
1034 | /* PTE is written in big endian format for | 1034 | /* PTE is written in big endian format for |
1035 | * 5706/5708/5709 devices */ | 1035 | * 5706/5708/5709 devices */ |
@@ -1037,7 +1037,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep) | |||
1037 | ptbl++; | 1037 | ptbl++; |
1038 | *ptbl = (u32) page; | 1038 | *ptbl = (u32) page; |
1039 | ptbl++; | 1039 | ptbl++; |
1040 | page += PAGE_SIZE; | 1040 | page += CNIC_PAGE_SIZE; |
1041 | } | 1041 | } |
1042 | } | 1042 | } |
1043 | } | 1043 | } |
@@ -1064,11 +1064,11 @@ int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) | |||
1064 | /* Allocate page table memory for SQ which is page aligned */ | 1064 | /* Allocate page table memory for SQ which is page aligned */ |
1065 | ep->qp.sq_mem_size = hba->max_sqes * BNX2I_SQ_WQE_SIZE; | 1065 | ep->qp.sq_mem_size = hba->max_sqes * BNX2I_SQ_WQE_SIZE; |
1066 | ep->qp.sq_mem_size = | 1066 | ep->qp.sq_mem_size = |
1067 | (ep->qp.sq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; | 1067 | (ep->qp.sq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; |
1068 | ep->qp.sq_pgtbl_size = | 1068 | ep->qp.sq_pgtbl_size = |
1069 | (ep->qp.sq_mem_size / PAGE_SIZE) * sizeof(void *); | 1069 | (ep->qp.sq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *); |
1070 | ep->qp.sq_pgtbl_size = | 1070 | ep->qp.sq_pgtbl_size = |
1071 | (ep->qp.sq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK; | 1071 | (ep->qp.sq_pgtbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; |
1072 | 1072 | ||
1073 | ep->qp.sq_pgtbl_virt = | 1073 | ep->qp.sq_pgtbl_virt = |
1074 | dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_pgtbl_size, | 1074 | dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_pgtbl_size, |
@@ -1101,11 +1101,11 @@ int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) | |||
1101 | /* Allocate page table memory for CQ which is page aligned */ | 1101 | /* Allocate page table memory for CQ which is page aligned */ |
1102 | ep->qp.cq_mem_size = hba->max_cqes * BNX2I_CQE_SIZE; | 1102 | ep->qp.cq_mem_size = hba->max_cqes * BNX2I_CQE_SIZE; |
1103 | ep->qp.cq_mem_size = | 1103 | ep->qp.cq_mem_size = |
1104 | (ep->qp.cq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; | 1104 | (ep->qp.cq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; |
1105 | ep->qp.cq_pgtbl_size = | 1105 | ep->qp.cq_pgtbl_size = |
1106 | (ep->qp.cq_mem_size / PAGE_SIZE) * sizeof(void *); | 1106 | (ep->qp.cq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *); |
1107 | ep->qp.cq_pgtbl_size = | 1107 | ep->qp.cq_pgtbl_size = |
1108 | (ep->qp.cq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK; | 1108 | (ep->qp.cq_pgtbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; |
1109 | 1109 | ||
1110 | ep->qp.cq_pgtbl_virt = | 1110 | ep->qp.cq_pgtbl_virt = |
1111 | dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_pgtbl_size, | 1111 | dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_pgtbl_size, |
@@ -1144,11 +1144,11 @@ int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep) | |||
1144 | /* Allocate page table memory for RQ which is page aligned */ | 1144 | /* Allocate page table memory for RQ which is page aligned */ |
1145 | ep->qp.rq_mem_size = hba->max_rqes * BNX2I_RQ_WQE_SIZE; | 1145 | ep->qp.rq_mem_size = hba->max_rqes * BNX2I_RQ_WQE_SIZE; |
1146 | ep->qp.rq_mem_size = | 1146 | ep->qp.rq_mem_size = |
1147 | (ep->qp.rq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; | 1147 | (ep->qp.rq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; |
1148 | ep->qp.rq_pgtbl_size = | 1148 | ep->qp.rq_pgtbl_size = |
1149 | (ep->qp.rq_mem_size / PAGE_SIZE) * sizeof(void *); | 1149 | (ep->qp.rq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *); |
1150 | ep->qp.rq_pgtbl_size = | 1150 | ep->qp.rq_pgtbl_size = |
1151 | (ep->qp.rq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK; | 1151 | (ep->qp.rq_pgtbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; |
1152 | 1152 | ||
1153 | ep->qp.rq_pgtbl_virt = | 1153 | ep->qp.rq_pgtbl_virt = |
1154 | dma_alloc_coherent(&hba->pcidev->dev, ep->qp.rq_pgtbl_size, | 1154 | dma_alloc_coherent(&hba->pcidev->dev, ep->qp.rq_pgtbl_size, |
@@ -1270,7 +1270,7 @@ int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba) | |||
1270 | bnx2i_adjust_qp_size(hba); | 1270 | bnx2i_adjust_qp_size(hba); |
1271 | 1271 | ||
1272 | iscsi_init.flags = | 1272 | iscsi_init.flags = |
1273 | ISCSI_PAGE_SIZE_4K << ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT; | 1273 | (CNIC_PAGE_BITS - 8) << ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT; |
1274 | if (en_tcp_dack) | 1274 | if (en_tcp_dack) |
1275 | iscsi_init.flags |= ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE; | 1275 | iscsi_init.flags |= ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE; |
1276 | iscsi_init.reserved0 = 0; | 1276 | iscsi_init.reserved0 = 0; |
@@ -1288,15 +1288,15 @@ int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba) | |||
1288 | ((hba->num_ccell & 0xFFFF) | (hba->max_sqes << 16)); | 1288 | ((hba->num_ccell & 0xFFFF) | (hba->max_sqes << 16)); |
1289 | iscsi_init.num_ccells_per_conn = hba->num_ccell; | 1289 | iscsi_init.num_ccells_per_conn = hba->num_ccell; |
1290 | iscsi_init.num_tasks_per_conn = hba->max_sqes; | 1290 | iscsi_init.num_tasks_per_conn = hba->max_sqes; |
1291 | iscsi_init.sq_wqes_per_page = PAGE_SIZE / BNX2I_SQ_WQE_SIZE; | 1291 | iscsi_init.sq_wqes_per_page = CNIC_PAGE_SIZE / BNX2I_SQ_WQE_SIZE; |
1292 | iscsi_init.sq_num_wqes = hba->max_sqes; | 1292 | iscsi_init.sq_num_wqes = hba->max_sqes; |
1293 | iscsi_init.cq_log_wqes_per_page = | 1293 | iscsi_init.cq_log_wqes_per_page = |
1294 | (u8) bnx2i_power_of2(PAGE_SIZE / BNX2I_CQE_SIZE); | 1294 | (u8) bnx2i_power_of2(CNIC_PAGE_SIZE / BNX2I_CQE_SIZE); |
1295 | iscsi_init.cq_num_wqes = hba->max_cqes; | 1295 | iscsi_init.cq_num_wqes = hba->max_cqes; |
1296 | iscsi_init.cq_num_pages = (hba->max_cqes * BNX2I_CQE_SIZE + | 1296 | iscsi_init.cq_num_pages = (hba->max_cqes * BNX2I_CQE_SIZE + |
1297 | (PAGE_SIZE - 1)) / PAGE_SIZE; | 1297 | (CNIC_PAGE_SIZE - 1)) / CNIC_PAGE_SIZE; |
1298 | iscsi_init.sq_num_pages = (hba->max_sqes * BNX2I_SQ_WQE_SIZE + | 1298 | iscsi_init.sq_num_pages = (hba->max_sqes * BNX2I_SQ_WQE_SIZE + |
1299 | (PAGE_SIZE - 1)) / PAGE_SIZE; | 1299 | (CNIC_PAGE_SIZE - 1)) / CNIC_PAGE_SIZE; |
1300 | iscsi_init.rq_buffer_size = BNX2I_RQ_WQE_SIZE; | 1300 | iscsi_init.rq_buffer_size = BNX2I_RQ_WQE_SIZE; |
1301 | iscsi_init.rq_num_wqes = hba->max_rqes; | 1301 | iscsi_init.rq_num_wqes = hba->max_rqes; |
1302 | 1302 | ||
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c index 854dad7d5b03..c8b0aff5bbd4 100644 --- a/drivers/scsi/bnx2i/bnx2i_iscsi.c +++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c | |||
@@ -525,7 +525,7 @@ static int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba) | |||
525 | struct iscsi_bd *mp_bdt; | 525 | struct iscsi_bd *mp_bdt; |
526 | u64 addr; | 526 | u64 addr; |
527 | 527 | ||
528 | hba->mp_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, | 528 | hba->mp_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, |
529 | &hba->mp_bd_dma, GFP_KERNEL); | 529 | &hba->mp_bd_dma, GFP_KERNEL); |
530 | if (!hba->mp_bd_tbl) { | 530 | if (!hba->mp_bd_tbl) { |
531 | printk(KERN_ERR "unable to allocate Middle Path BDT\n"); | 531 | printk(KERN_ERR "unable to allocate Middle Path BDT\n"); |
@@ -533,11 +533,12 @@ static int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba) | |||
533 | goto out; | 533 | goto out; |
534 | } | 534 | } |
535 | 535 | ||
536 | hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, | 536 | hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev, |
537 | CNIC_PAGE_SIZE, | ||
537 | &hba->dummy_buf_dma, GFP_KERNEL); | 538 | &hba->dummy_buf_dma, GFP_KERNEL); |
538 | if (!hba->dummy_buffer) { | 539 | if (!hba->dummy_buffer) { |
539 | printk(KERN_ERR "unable to alloc Middle Path Dummy Buffer\n"); | 540 | printk(KERN_ERR "unable to alloc Middle Path Dummy Buffer\n"); |
540 | dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, | 541 | dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, |
541 | hba->mp_bd_tbl, hba->mp_bd_dma); | 542 | hba->mp_bd_tbl, hba->mp_bd_dma); |
542 | hba->mp_bd_tbl = NULL; | 543 | hba->mp_bd_tbl = NULL; |
543 | rc = -1; | 544 | rc = -1; |
@@ -548,7 +549,7 @@ static int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba) | |||
548 | addr = (unsigned long) hba->dummy_buf_dma; | 549 | addr = (unsigned long) hba->dummy_buf_dma; |
549 | mp_bdt->buffer_addr_lo = addr & 0xffffffff; | 550 | mp_bdt->buffer_addr_lo = addr & 0xffffffff; |
550 | mp_bdt->buffer_addr_hi = addr >> 32; | 551 | mp_bdt->buffer_addr_hi = addr >> 32; |
551 | mp_bdt->buffer_length = PAGE_SIZE; | 552 | mp_bdt->buffer_length = CNIC_PAGE_SIZE; |
552 | mp_bdt->flags = ISCSI_BD_LAST_IN_BD_CHAIN | | 553 | mp_bdt->flags = ISCSI_BD_LAST_IN_BD_CHAIN | |
553 | ISCSI_BD_FIRST_IN_BD_CHAIN; | 554 | ISCSI_BD_FIRST_IN_BD_CHAIN; |
554 | out: | 555 | out: |
@@ -565,12 +566,12 @@ out: | |||
565 | static void bnx2i_free_mp_bdt(struct bnx2i_hba *hba) | 566 | static void bnx2i_free_mp_bdt(struct bnx2i_hba *hba) |
566 | { | 567 | { |
567 | if (hba->mp_bd_tbl) { | 568 | if (hba->mp_bd_tbl) { |
568 | dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, | 569 | dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, |
569 | hba->mp_bd_tbl, hba->mp_bd_dma); | 570 | hba->mp_bd_tbl, hba->mp_bd_dma); |
570 | hba->mp_bd_tbl = NULL; | 571 | hba->mp_bd_tbl = NULL; |
571 | } | 572 | } |
572 | if (hba->dummy_buffer) { | 573 | if (hba->dummy_buffer) { |
573 | dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, | 574 | dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, |
574 | hba->dummy_buffer, hba->dummy_buf_dma); | 575 | hba->dummy_buffer, hba->dummy_buf_dma); |
575 | hba->dummy_buffer = NULL; | 576 | hba->dummy_buffer = NULL; |
576 | } | 577 | } |
@@ -934,14 +935,14 @@ static void bnx2i_conn_free_login_resources(struct bnx2i_hba *hba, | |||
934 | struct bnx2i_conn *bnx2i_conn) | 935 | struct bnx2i_conn *bnx2i_conn) |
935 | { | 936 | { |
936 | if (bnx2i_conn->gen_pdu.resp_bd_tbl) { | 937 | if (bnx2i_conn->gen_pdu.resp_bd_tbl) { |
937 | dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, | 938 | dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, |
938 | bnx2i_conn->gen_pdu.resp_bd_tbl, | 939 | bnx2i_conn->gen_pdu.resp_bd_tbl, |
939 | bnx2i_conn->gen_pdu.resp_bd_dma); | 940 | bnx2i_conn->gen_pdu.resp_bd_dma); |
940 | bnx2i_conn->gen_pdu.resp_bd_tbl = NULL; | 941 | bnx2i_conn->gen_pdu.resp_bd_tbl = NULL; |
941 | } | 942 | } |
942 | 943 | ||
943 | if (bnx2i_conn->gen_pdu.req_bd_tbl) { | 944 | if (bnx2i_conn->gen_pdu.req_bd_tbl) { |
944 | dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, | 945 | dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, |
945 | bnx2i_conn->gen_pdu.req_bd_tbl, | 946 | bnx2i_conn->gen_pdu.req_bd_tbl, |
946 | bnx2i_conn->gen_pdu.req_bd_dma); | 947 | bnx2i_conn->gen_pdu.req_bd_dma); |
947 | bnx2i_conn->gen_pdu.req_bd_tbl = NULL; | 948 | bnx2i_conn->gen_pdu.req_bd_tbl = NULL; |
@@ -998,13 +999,13 @@ static int bnx2i_conn_alloc_login_resources(struct bnx2i_hba *hba, | |||
998 | bnx2i_conn->gen_pdu.resp_wr_ptr = bnx2i_conn->gen_pdu.resp_buf; | 999 | bnx2i_conn->gen_pdu.resp_wr_ptr = bnx2i_conn->gen_pdu.resp_buf; |
999 | 1000 | ||
1000 | bnx2i_conn->gen_pdu.req_bd_tbl = | 1001 | bnx2i_conn->gen_pdu.req_bd_tbl = |
1001 | dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, | 1002 | dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, |
1002 | &bnx2i_conn->gen_pdu.req_bd_dma, GFP_KERNEL); | 1003 | &bnx2i_conn->gen_pdu.req_bd_dma, GFP_KERNEL); |
1003 | if (bnx2i_conn->gen_pdu.req_bd_tbl == NULL) | 1004 | if (bnx2i_conn->gen_pdu.req_bd_tbl == NULL) |
1004 | goto login_req_bd_tbl_failure; | 1005 | goto login_req_bd_tbl_failure; |
1005 | 1006 | ||
1006 | bnx2i_conn->gen_pdu.resp_bd_tbl = | 1007 | bnx2i_conn->gen_pdu.resp_bd_tbl = |
1007 | dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, | 1008 | dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, |
1008 | &bnx2i_conn->gen_pdu.resp_bd_dma, | 1009 | &bnx2i_conn->gen_pdu.resp_bd_dma, |
1009 | GFP_KERNEL); | 1010 | GFP_KERNEL); |
1010 | if (bnx2i_conn->gen_pdu.resp_bd_tbl == NULL) | 1011 | if (bnx2i_conn->gen_pdu.resp_bd_tbl == NULL) |
@@ -1013,7 +1014,7 @@ static int bnx2i_conn_alloc_login_resources(struct bnx2i_hba *hba, | |||
1013 | return 0; | 1014 | return 0; |
1014 | 1015 | ||
1015 | login_resp_bd_tbl_failure: | 1016 | login_resp_bd_tbl_failure: |
1016 | dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, | 1017 | dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, |
1017 | bnx2i_conn->gen_pdu.req_bd_tbl, | 1018 | bnx2i_conn->gen_pdu.req_bd_tbl, |
1018 | bnx2i_conn->gen_pdu.req_bd_dma); | 1019 | bnx2i_conn->gen_pdu.req_bd_dma); |
1019 | bnx2i_conn->gen_pdu.req_bd_tbl = NULL; | 1020 | bnx2i_conn->gen_pdu.req_bd_tbl = NULL; |
diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h index 4911310a38f5..22a9bb1abae1 100644 --- a/drivers/scsi/isci/host.h +++ b/drivers/scsi/isci/host.h | |||
@@ -311,9 +311,8 @@ static inline struct Scsi_Host *to_shost(struct isci_host *ihost) | |||
311 | } | 311 | } |
312 | 312 | ||
313 | #define for_each_isci_host(id, ihost, pdev) \ | 313 | #define for_each_isci_host(id, ihost, pdev) \ |
314 | for (id = 0, ihost = to_pci_info(pdev)->hosts[id]; \ | 314 | for (id = 0; id < SCI_MAX_CONTROLLERS && \ |
315 | id < ARRAY_SIZE(to_pci_info(pdev)->hosts) && ihost; \ | 315 | (ihost = to_pci_info(pdev)->hosts[id]); id++) |
316 | ihost = to_pci_info(pdev)->hosts[++id]) | ||
317 | 316 | ||
318 | static inline void wait_for_start(struct isci_host *ihost) | 317 | static inline void wait_for_start(struct isci_host *ihost) |
319 | { | 318 | { |
diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c index 85c77f6b802b..ac879745ef80 100644 --- a/drivers/scsi/isci/port_config.c +++ b/drivers/scsi/isci/port_config.c | |||
@@ -615,13 +615,6 @@ static void sci_apc_agent_link_up(struct isci_host *ihost, | |||
615 | SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION); | 615 | SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION); |
616 | } else { | 616 | } else { |
617 | /* the phy is already the part of the port */ | 617 | /* the phy is already the part of the port */ |
618 | u32 port_state = iport->sm.current_state_id; | ||
619 | |||
620 | /* if the PORT'S state is resetting then the link up is from | ||
621 | * port hard reset in this case, we need to tell the port | ||
622 | * that link up is recieved | ||
623 | */ | ||
624 | BUG_ON(port_state != SCI_PORT_RESETTING); | ||
625 | port_agent->phy_ready_mask |= 1 << phy_index; | 618 | port_agent->phy_ready_mask |= 1 << phy_index; |
626 | sci_port_link_up(iport, iphy); | 619 | sci_port_link_up(iport, iphy); |
627 | } | 620 | } |
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c index 0d30ca849e8f..5d6fda72d659 100644 --- a/drivers/scsi/isci/task.c +++ b/drivers/scsi/isci/task.c | |||
@@ -801,7 +801,7 @@ int isci_task_I_T_nexus_reset(struct domain_device *dev) | |||
801 | /* XXX: need to cleanup any ireqs targeting this | 801 | /* XXX: need to cleanup any ireqs targeting this |
802 | * domain_device | 802 | * domain_device |
803 | */ | 803 | */ |
804 | ret = TMF_RESP_FUNC_COMPLETE; | 804 | ret = -ENODEV; |
805 | goto out; | 805 | goto out; |
806 | } | 806 | } |
807 | 807 | ||
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index e1fe95ef23e1..266724b6b899 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h | |||
@@ -2996,8 +2996,7 @@ struct qla_hw_data { | |||
2996 | IS_QLA82XX(ha) || IS_QLA83XX(ha) || \ | 2996 | IS_QLA82XX(ha) || IS_QLA83XX(ha) || \ |
2997 | IS_QLA8044(ha)) | 2997 | IS_QLA8044(ha)) |
2998 | #define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha)) | 2998 | #define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha)) |
2999 | #define IS_NOPOLLING_TYPE(ha) ((IS_QLA25XX(ha) || IS_QLA81XX(ha) || \ | 2999 | #define IS_NOPOLLING_TYPE(ha) (IS_QLA81XX(ha) && (ha)->flags.msix_enabled) |
3000 | IS_QLA83XX(ha)) && (ha)->flags.msix_enabled) | ||
3001 | #define IS_FAC_REQUIRED(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha)) | 3000 | #define IS_FAC_REQUIRED(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha)) |
3002 | #define IS_NOCACHE_VPD_TYPE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha)) | 3001 | #define IS_NOCACHE_VPD_TYPE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha)) |
3003 | #define IS_ALOGIO_CAPABLE(ha) (IS_QLA23XX(ha) || IS_FWI2_CAPABLE(ha)) | 3002 | #define IS_ALOGIO_CAPABLE(ha) (IS_QLA23XX(ha) || IS_FWI2_CAPABLE(ha)) |
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 9bc86b9e86b1..0a1dcb43d18b 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c | |||
@@ -2880,6 +2880,7 @@ static int | |||
2880 | qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) | 2880 | qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) |
2881 | { | 2881 | { |
2882 | #define MIN_MSIX_COUNT 2 | 2882 | #define MIN_MSIX_COUNT 2 |
2883 | #define ATIO_VECTOR 2 | ||
2883 | int i, ret; | 2884 | int i, ret; |
2884 | struct msix_entry *entries; | 2885 | struct msix_entry *entries; |
2885 | struct qla_msix_entry *qentry; | 2886 | struct qla_msix_entry *qentry; |
@@ -2936,34 +2937,47 @@ msix_failed: | |||
2936 | } | 2937 | } |
2937 | 2938 | ||
2938 | /* Enable MSI-X vectors for the base queue */ | 2939 | /* Enable MSI-X vectors for the base queue */ |
2939 | for (i = 0; i < ha->msix_count; i++) { | 2940 | for (i = 0; i < 2; i++) { |
2940 | qentry = &ha->msix_entries[i]; | 2941 | qentry = &ha->msix_entries[i]; |
2941 | if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) { | 2942 | if (IS_P3P_TYPE(ha)) |
2942 | ret = request_irq(qentry->vector, | ||
2943 | qla83xx_msix_entries[i].handler, | ||
2944 | 0, qla83xx_msix_entries[i].name, rsp); | ||
2945 | } else if (IS_P3P_TYPE(ha)) { | ||
2946 | ret = request_irq(qentry->vector, | 2943 | ret = request_irq(qentry->vector, |
2947 | qla82xx_msix_entries[i].handler, | 2944 | qla82xx_msix_entries[i].handler, |
2948 | 0, qla82xx_msix_entries[i].name, rsp); | 2945 | 0, qla82xx_msix_entries[i].name, rsp); |
2949 | } else { | 2946 | else |
2950 | ret = request_irq(qentry->vector, | 2947 | ret = request_irq(qentry->vector, |
2951 | msix_entries[i].handler, | 2948 | msix_entries[i].handler, |
2952 | 0, msix_entries[i].name, rsp); | 2949 | 0, msix_entries[i].name, rsp); |
2953 | } | 2950 | if (ret) |
2954 | if (ret) { | 2951 | goto msix_register_fail; |
2955 | ql_log(ql_log_fatal, vha, 0x00cb, | ||
2956 | "MSI-X: unable to register handler -- %x/%d.\n", | ||
2957 | qentry->vector, ret); | ||
2958 | qla24xx_disable_msix(ha); | ||
2959 | ha->mqenable = 0; | ||
2960 | goto msix_out; | ||
2961 | } | ||
2962 | qentry->have_irq = 1; | 2952 | qentry->have_irq = 1; |
2963 | qentry->rsp = rsp; | 2953 | qentry->rsp = rsp; |
2964 | rsp->msix = qentry; | 2954 | rsp->msix = qentry; |
2965 | } | 2955 | } |
2966 | 2956 | ||
2957 | /* | ||
2958 | * If target mode is enable, also request the vector for the ATIO | ||
2959 | * queue. | ||
2960 | */ | ||
2961 | if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) { | ||
2962 | qentry = &ha->msix_entries[ATIO_VECTOR]; | ||
2963 | ret = request_irq(qentry->vector, | ||
2964 | qla83xx_msix_entries[ATIO_VECTOR].handler, | ||
2965 | 0, qla83xx_msix_entries[ATIO_VECTOR].name, rsp); | ||
2966 | qentry->have_irq = 1; | ||
2967 | qentry->rsp = rsp; | ||
2968 | rsp->msix = qentry; | ||
2969 | } | ||
2970 | |||
2971 | msix_register_fail: | ||
2972 | if (ret) { | ||
2973 | ql_log(ql_log_fatal, vha, 0x00cb, | ||
2974 | "MSI-X: unable to register handler -- %x/%d.\n", | ||
2975 | qentry->vector, ret); | ||
2976 | qla24xx_disable_msix(ha); | ||
2977 | ha->mqenable = 0; | ||
2978 | goto msix_out; | ||
2979 | } | ||
2980 | |||
2967 | /* Enable MSI-X vector for response queue update for queue 0 */ | 2981 | /* Enable MSI-X vector for response queue update for queue 0 */ |
2968 | if (IS_QLA83XX(ha)) { | 2982 | if (IS_QLA83XX(ha)) { |
2969 | if (ha->msixbase && ha->mqiobase && | 2983 | if (ha->msixbase && ha->mqiobase && |
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 17d740427240..9969fa1ef7c4 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c | |||
@@ -1419,6 +1419,9 @@ static void storvsc_device_destroy(struct scsi_device *sdevice) | |||
1419 | { | 1419 | { |
1420 | struct stor_mem_pools *memp = sdevice->hostdata; | 1420 | struct stor_mem_pools *memp = sdevice->hostdata; |
1421 | 1421 | ||
1422 | if (!memp) | ||
1423 | return; | ||
1424 | |||
1422 | mempool_destroy(memp->request_mempool); | 1425 | mempool_destroy(memp->request_mempool); |
1423 | kmem_cache_destroy(memp->request_pool); | 1426 | kmem_cache_destroy(memp->request_pool); |
1424 | kfree(memp); | 1427 | kfree(memp); |
diff --git a/drivers/tty/serial/sunhv.c b/drivers/tty/serial/sunhv.c index cf86e729532b..dc697cee248a 100644 --- a/drivers/tty/serial/sunhv.c +++ b/drivers/tty/serial/sunhv.c | |||
@@ -433,13 +433,10 @@ static void sunhv_console_write_paged(struct console *con, const char *s, unsign | |||
433 | unsigned long flags; | 433 | unsigned long flags; |
434 | int locked = 1; | 434 | int locked = 1; |
435 | 435 | ||
436 | local_irq_save(flags); | 436 | if (port->sysrq || oops_in_progress) |
437 | if (port->sysrq) { | 437 | locked = spin_trylock_irqsave(&port->lock, flags); |
438 | locked = 0; | 438 | else |
439 | } else if (oops_in_progress) { | 439 | spin_lock_irqsave(&port->lock, flags); |
440 | locked = spin_trylock(&port->lock); | ||
441 | } else | ||
442 | spin_lock(&port->lock); | ||
443 | 440 | ||
444 | while (n > 0) { | 441 | while (n > 0) { |
445 | unsigned long ra = __pa(con_write_page); | 442 | unsigned long ra = __pa(con_write_page); |
@@ -470,8 +467,7 @@ static void sunhv_console_write_paged(struct console *con, const char *s, unsign | |||
470 | } | 467 | } |
471 | 468 | ||
472 | if (locked) | 469 | if (locked) |
473 | spin_unlock(&port->lock); | 470 | spin_unlock_irqrestore(&port->lock, flags); |
474 | local_irq_restore(flags); | ||
475 | } | 471 | } |
476 | 472 | ||
477 | static inline void sunhv_console_putchar(struct uart_port *port, char c) | 473 | static inline void sunhv_console_putchar(struct uart_port *port, char c) |
@@ -492,7 +488,10 @@ static void sunhv_console_write_bychar(struct console *con, const char *s, unsig | |||
492 | unsigned long flags; | 488 | unsigned long flags; |
493 | int i, locked = 1; | 489 | int i, locked = 1; |
494 | 490 | ||
495 | local_irq_save(flags); | 491 | if (port->sysrq || oops_in_progress) |
492 | locked = spin_trylock_irqsave(&port->lock, flags); | ||
493 | else | ||
494 | spin_lock_irqsave(&port->lock, flags); | ||
496 | if (port->sysrq) { | 495 | if (port->sysrq) { |
497 | locked = 0; | 496 | locked = 0; |
498 | } else if (oops_in_progress) { | 497 | } else if (oops_in_progress) { |
@@ -507,8 +506,7 @@ static void sunhv_console_write_bychar(struct console *con, const char *s, unsig | |||
507 | } | 506 | } |
508 | 507 | ||
509 | if (locked) | 508 | if (locked) |
510 | spin_unlock(&port->lock); | 509 | spin_unlock_irqrestore(&port->lock, flags); |
511 | local_irq_restore(flags); | ||
512 | } | 510 | } |
513 | 511 | ||
514 | static struct console sunhv_console = { | 512 | static struct console sunhv_console = { |
diff --git a/drivers/tty/serial/sunsab.c b/drivers/tty/serial/sunsab.c index 380fb5355cb2..5faa8e905e98 100644 --- a/drivers/tty/serial/sunsab.c +++ b/drivers/tty/serial/sunsab.c | |||
@@ -844,20 +844,16 @@ static void sunsab_console_write(struct console *con, const char *s, unsigned n) | |||
844 | unsigned long flags; | 844 | unsigned long flags; |
845 | int locked = 1; | 845 | int locked = 1; |
846 | 846 | ||
847 | local_irq_save(flags); | 847 | if (up->port.sysrq || oops_in_progress) |
848 | if (up->port.sysrq) { | 848 | locked = spin_trylock_irqsave(&up->port.lock, flags); |
849 | locked = 0; | 849 | else |
850 | } else if (oops_in_progress) { | 850 | spin_lock_irqsave(&up->port.lock, flags); |
851 | locked = spin_trylock(&up->port.lock); | ||
852 | } else | ||
853 | spin_lock(&up->port.lock); | ||
854 | 851 | ||
855 | uart_console_write(&up->port, s, n, sunsab_console_putchar); | 852 | uart_console_write(&up->port, s, n, sunsab_console_putchar); |
856 | sunsab_tec_wait(up); | 853 | sunsab_tec_wait(up); |
857 | 854 | ||
858 | if (locked) | 855 | if (locked) |
859 | spin_unlock(&up->port.lock); | 856 | spin_unlock_irqrestore(&up->port.lock, flags); |
860 | local_irq_restore(flags); | ||
861 | } | 857 | } |
862 | 858 | ||
863 | static int sunsab_console_setup(struct console *con, char *options) | 859 | static int sunsab_console_setup(struct console *con, char *options) |
diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c index db79b76f5c8e..9a0f24f83720 100644 --- a/drivers/tty/serial/sunsu.c +++ b/drivers/tty/serial/sunsu.c | |||
@@ -1295,13 +1295,10 @@ static void sunsu_console_write(struct console *co, const char *s, | |||
1295 | unsigned int ier; | 1295 | unsigned int ier; |
1296 | int locked = 1; | 1296 | int locked = 1; |
1297 | 1297 | ||
1298 | local_irq_save(flags); | 1298 | if (up->port.sysrq || oops_in_progress) |
1299 | if (up->port.sysrq) { | 1299 | locked = spin_trylock_irqsave(&up->port.lock, flags); |
1300 | locked = 0; | 1300 | else |
1301 | } else if (oops_in_progress) { | 1301 | spin_lock_irqsave(&up->port.lock, flags); |
1302 | locked = spin_trylock(&up->port.lock); | ||
1303 | } else | ||
1304 | spin_lock(&up->port.lock); | ||
1305 | 1302 | ||
1306 | /* | 1303 | /* |
1307 | * First save the UER then disable the interrupts | 1304 | * First save the UER then disable the interrupts |
@@ -1319,8 +1316,7 @@ static void sunsu_console_write(struct console *co, const char *s, | |||
1319 | serial_out(up, UART_IER, ier); | 1316 | serial_out(up, UART_IER, ier); |
1320 | 1317 | ||
1321 | if (locked) | 1318 | if (locked) |
1322 | spin_unlock(&up->port.lock); | 1319 | spin_unlock_irqrestore(&up->port.lock, flags); |
1323 | local_irq_restore(flags); | ||
1324 | } | 1320 | } |
1325 | 1321 | ||
1326 | /* | 1322 | /* |
diff --git a/drivers/tty/serial/sunzilog.c b/drivers/tty/serial/sunzilog.c index 45a8c6aa5837..a2c40ed287d2 100644 --- a/drivers/tty/serial/sunzilog.c +++ b/drivers/tty/serial/sunzilog.c | |||
@@ -1195,20 +1195,16 @@ sunzilog_console_write(struct console *con, const char *s, unsigned int count) | |||
1195 | unsigned long flags; | 1195 | unsigned long flags; |
1196 | int locked = 1; | 1196 | int locked = 1; |
1197 | 1197 | ||
1198 | local_irq_save(flags); | 1198 | if (up->port.sysrq || oops_in_progress) |
1199 | if (up->port.sysrq) { | 1199 | locked = spin_trylock_irqsave(&up->port.lock, flags); |
1200 | locked = 0; | 1200 | else |
1201 | } else if (oops_in_progress) { | 1201 | spin_lock_irqsave(&up->port.lock, flags); |
1202 | locked = spin_trylock(&up->port.lock); | ||
1203 | } else | ||
1204 | spin_lock(&up->port.lock); | ||
1205 | 1202 | ||
1206 | uart_console_write(&up->port, s, count, sunzilog_putchar); | 1203 | uart_console_write(&up->port, s, count, sunzilog_putchar); |
1207 | udelay(2); | 1204 | udelay(2); |
1208 | 1205 | ||
1209 | if (locked) | 1206 | if (locked) |
1210 | spin_unlock(&up->port.lock); | 1207 | spin_unlock_irqrestore(&up->port.lock, flags); |
1211 | local_irq_restore(flags); | ||
1212 | } | 1208 | } |
1213 | 1209 | ||
1214 | static int __init sunzilog_console_setup(struct console *con, char *options) | 1210 | static int __init sunzilog_console_setup(struct console *con, char *options) |
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index a0fa5de210cf..e1e22e0f01e8 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c | |||
@@ -505,9 +505,13 @@ static int get_rx_bufs(struct vhost_virtqueue *vq, | |||
505 | r = -ENOBUFS; | 505 | r = -ENOBUFS; |
506 | goto err; | 506 | goto err; |
507 | } | 507 | } |
508 | d = vhost_get_vq_desc(vq->dev, vq, vq->iov + seg, | 508 | r = vhost_get_vq_desc(vq->dev, vq, vq->iov + seg, |
509 | ARRAY_SIZE(vq->iov) - seg, &out, | 509 | ARRAY_SIZE(vq->iov) - seg, &out, |
510 | &in, log, log_num); | 510 | &in, log, log_num); |
511 | if (unlikely(r < 0)) | ||
512 | goto err; | ||
513 | |||
514 | d = r; | ||
511 | if (d == vq->num) { | 515 | if (d == vq->num) { |
512 | r = 0; | 516 | r = 0; |
513 | goto err; | 517 | goto err; |
@@ -532,6 +536,12 @@ static int get_rx_bufs(struct vhost_virtqueue *vq, | |||
532 | *iovcount = seg; | 536 | *iovcount = seg; |
533 | if (unlikely(log)) | 537 | if (unlikely(log)) |
534 | *log_num = nlogs; | 538 | *log_num = nlogs; |
539 | |||
540 | /* Detect overrun */ | ||
541 | if (unlikely(datalen > 0)) { | ||
542 | r = UIO_MAXIOV + 1; | ||
543 | goto err; | ||
544 | } | ||
535 | return headcount; | 545 | return headcount; |
536 | err: | 546 | err: |
537 | vhost_discard_vq_desc(vq, headcount); | 547 | vhost_discard_vq_desc(vq, headcount); |
@@ -587,6 +597,14 @@ static void handle_rx(struct vhost_net *net) | |||
587 | /* On error, stop handling until the next kick. */ | 597 | /* On error, stop handling until the next kick. */ |
588 | if (unlikely(headcount < 0)) | 598 | if (unlikely(headcount < 0)) |
589 | break; | 599 | break; |
600 | /* On overrun, truncate and discard */ | ||
601 | if (unlikely(headcount > UIO_MAXIOV)) { | ||
602 | msg.msg_iovlen = 1; | ||
603 | err = sock->ops->recvmsg(NULL, sock, &msg, | ||
604 | 1, MSG_DONTWAIT | MSG_TRUNC); | ||
605 | pr_debug("Discarded rx packet: len %zd\n", sock_len); | ||
606 | continue; | ||
607 | } | ||
590 | /* OK, now we need to know about added descriptors. */ | 608 | /* OK, now we need to know about added descriptors. */ |
591 | if (!headcount) { | 609 | if (!headcount) { |
592 | if (unlikely(vhost_enable_notify(&net->dev, vq))) { | 610 | if (unlikely(vhost_enable_notify(&net->dev, vq))) { |
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index 37d06ea624aa..61a6ac8fa8fc 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c | |||
@@ -399,11 +399,25 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) | |||
399 | state = BP_EAGAIN; | 399 | state = BP_EAGAIN; |
400 | break; | 400 | break; |
401 | } | 401 | } |
402 | scrub_page(page); | ||
402 | 403 | ||
403 | pfn = page_to_pfn(page); | 404 | frame_list[i] = page_to_pfn(page); |
404 | frame_list[i] = pfn_to_mfn(pfn); | 405 | } |
405 | 406 | ||
406 | scrub_page(page); | 407 | /* |
408 | * Ensure that ballooned highmem pages don't have kmaps. | ||
409 | * | ||
410 | * Do this before changing the p2m as kmap_flush_unused() | ||
411 | * reads PTEs to obtain pages (and hence needs the original | ||
412 | * p2m entry). | ||
413 | */ | ||
414 | kmap_flush_unused(); | ||
415 | |||
416 | /* Update direct mapping, invalidate P2M, and add to balloon. */ | ||
417 | for (i = 0; i < nr_pages; i++) { | ||
418 | pfn = frame_list[i]; | ||
419 | frame_list[i] = pfn_to_mfn(pfn); | ||
420 | page = pfn_to_page(pfn); | ||
407 | 421 | ||
408 | #ifdef CONFIG_XEN_HAVE_PVMMU | 422 | #ifdef CONFIG_XEN_HAVE_PVMMU |
409 | /* | 423 | /* |
@@ -429,11 +443,9 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) | |||
429 | } | 443 | } |
430 | #endif | 444 | #endif |
431 | 445 | ||
432 | balloon_append(pfn_to_page(pfn)); | 446 | balloon_append(page); |
433 | } | 447 | } |
434 | 448 | ||
435 | /* Ensure that ballooned highmem pages don't have kmaps. */ | ||
436 | kmap_flush_unused(); | ||
437 | flush_tlb_all(); | 449 | flush_tlb_all(); |
438 | 450 | ||
439 | set_xen_guest_handle(reservation.extent_start, frame_list); | 451 | set_xen_guest_handle(reservation.extent_start, frame_list); |
diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c index 24084732b1d0..80ef38c73e5a 100644 --- a/fs/anon_inodes.c +++ b/fs/anon_inodes.c | |||
@@ -41,19 +41,8 @@ static const struct dentry_operations anon_inodefs_dentry_operations = { | |||
41 | static struct dentry *anon_inodefs_mount(struct file_system_type *fs_type, | 41 | static struct dentry *anon_inodefs_mount(struct file_system_type *fs_type, |
42 | int flags, const char *dev_name, void *data) | 42 | int flags, const char *dev_name, void *data) |
43 | { | 43 | { |
44 | struct dentry *root; | 44 | return mount_pseudo(fs_type, "anon_inode:", NULL, |
45 | root = mount_pseudo(fs_type, "anon_inode:", NULL, | ||
46 | &anon_inodefs_dentry_operations, ANON_INODE_FS_MAGIC); | 45 | &anon_inodefs_dentry_operations, ANON_INODE_FS_MAGIC); |
47 | if (!IS_ERR(root)) { | ||
48 | struct super_block *s = root->d_sb; | ||
49 | anon_inode_inode = alloc_anon_inode(s); | ||
50 | if (IS_ERR(anon_inode_inode)) { | ||
51 | dput(root); | ||
52 | deactivate_locked_super(s); | ||
53 | root = ERR_CAST(anon_inode_inode); | ||
54 | } | ||
55 | } | ||
56 | return root; | ||
57 | } | 46 | } |
58 | 47 | ||
59 | static struct file_system_type anon_inode_fs_type = { | 48 | static struct file_system_type anon_inode_fs_type = { |
@@ -175,22 +164,15 @@ EXPORT_SYMBOL_GPL(anon_inode_getfd); | |||
175 | 164 | ||
176 | static int __init anon_inode_init(void) | 165 | static int __init anon_inode_init(void) |
177 | { | 166 | { |
178 | int error; | ||
179 | |||
180 | error = register_filesystem(&anon_inode_fs_type); | ||
181 | if (error) | ||
182 | goto err_exit; | ||
183 | anon_inode_mnt = kern_mount(&anon_inode_fs_type); | 167 | anon_inode_mnt = kern_mount(&anon_inode_fs_type); |
184 | if (IS_ERR(anon_inode_mnt)) { | 168 | if (IS_ERR(anon_inode_mnt)) |
185 | error = PTR_ERR(anon_inode_mnt); | 169 | panic("anon_inode_init() kernel mount failed (%ld)\n", PTR_ERR(anon_inode_mnt)); |
186 | goto err_unregister_filesystem; | ||
187 | } | ||
188 | return 0; | ||
189 | 170 | ||
190 | err_unregister_filesystem: | 171 | anon_inode_inode = alloc_anon_inode(anon_inode_mnt->mnt_sb); |
191 | unregister_filesystem(&anon_inode_fs_type); | 172 | if (IS_ERR(anon_inode_inode)) |
192 | err_exit: | 173 | panic("anon_inode_init() inode allocation failed (%ld)\n", PTR_ERR(anon_inode_inode)); |
193 | panic(KERN_ERR "anon_inode_init() failed (%d)\n", error); | 174 | |
175 | return 0; | ||
194 | } | 176 | } |
195 | 177 | ||
196 | fs_initcall(anon_inode_init); | 178 | fs_initcall(anon_inode_init); |
diff --git a/fs/dcache.c b/fs/dcache.c index 265e0ce9769c..ca02c13a84aa 100644 --- a/fs/dcache.c +++ b/fs/dcache.c | |||
@@ -2833,9 +2833,9 @@ static int prepend_name(char **buffer, int *buflen, struct qstr *name) | |||
2833 | u32 dlen = ACCESS_ONCE(name->len); | 2833 | u32 dlen = ACCESS_ONCE(name->len); |
2834 | char *p; | 2834 | char *p; |
2835 | 2835 | ||
2836 | if (*buflen < dlen + 1) | ||
2837 | return -ENAMETOOLONG; | ||
2838 | *buflen -= dlen + 1; | 2836 | *buflen -= dlen + 1; |
2837 | if (*buflen < 0) | ||
2838 | return -ENAMETOOLONG; | ||
2839 | p = *buffer -= dlen + 1; | 2839 | p = *buffer -= dlen + 1; |
2840 | *p++ = '/'; | 2840 | *p++ = '/'; |
2841 | while (dlen--) { | 2841 | while (dlen--) { |
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 6e39895a91b8..24bfd7ff3049 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <linux/slab.h> | 38 | #include <linux/slab.h> |
39 | #include <linux/ratelimit.h> | 39 | #include <linux/ratelimit.h> |
40 | #include <linux/aio.h> | 40 | #include <linux/aio.h> |
41 | #include <linux/bitops.h> | ||
41 | 42 | ||
42 | #include "ext4_jbd2.h" | 43 | #include "ext4_jbd2.h" |
43 | #include "xattr.h" | 44 | #include "xattr.h" |
@@ -3921,18 +3922,20 @@ int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc) | |||
3921 | void ext4_set_inode_flags(struct inode *inode) | 3922 | void ext4_set_inode_flags(struct inode *inode) |
3922 | { | 3923 | { |
3923 | unsigned int flags = EXT4_I(inode)->i_flags; | 3924 | unsigned int flags = EXT4_I(inode)->i_flags; |
3925 | unsigned int new_fl = 0; | ||
3924 | 3926 | ||
3925 | inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC); | ||
3926 | if (flags & EXT4_SYNC_FL) | 3927 | if (flags & EXT4_SYNC_FL) |
3927 | inode->i_flags |= S_SYNC; | 3928 | new_fl |= S_SYNC; |
3928 | if (flags & EXT4_APPEND_FL) | 3929 | if (flags & EXT4_APPEND_FL) |
3929 | inode->i_flags |= S_APPEND; | 3930 | new_fl |= S_APPEND; |
3930 | if (flags & EXT4_IMMUTABLE_FL) | 3931 | if (flags & EXT4_IMMUTABLE_FL) |
3931 | inode->i_flags |= S_IMMUTABLE; | 3932 | new_fl |= S_IMMUTABLE; |
3932 | if (flags & EXT4_NOATIME_FL) | 3933 | if (flags & EXT4_NOATIME_FL) |
3933 | inode->i_flags |= S_NOATIME; | 3934 | new_fl |= S_NOATIME; |
3934 | if (flags & EXT4_DIRSYNC_FL) | 3935 | if (flags & EXT4_DIRSYNC_FL) |
3935 | inode->i_flags |= S_DIRSYNC; | 3936 | new_fl |= S_DIRSYNC; |
3937 | set_mask_bits(&inode->i_flags, | ||
3938 | S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC, new_fl); | ||
3936 | } | 3939 | } |
3937 | 3940 | ||
3938 | /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */ | 3941 | /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */ |
@@ -713,27 +713,16 @@ unsigned long __fdget_raw(unsigned int fd) | |||
713 | 713 | ||
714 | unsigned long __fdget_pos(unsigned int fd) | 714 | unsigned long __fdget_pos(unsigned int fd) |
715 | { | 715 | { |
716 | struct files_struct *files = current->files; | 716 | unsigned long v = __fdget(fd); |
717 | struct file *file; | 717 | struct file *file = (struct file *)(v & ~3); |
718 | unsigned long v; | ||
719 | |||
720 | if (atomic_read(&files->count) == 1) { | ||
721 | file = __fcheck_files(files, fd); | ||
722 | v = 0; | ||
723 | } else { | ||
724 | file = __fget(fd, 0); | ||
725 | v = FDPUT_FPUT; | ||
726 | } | ||
727 | if (!file) | ||
728 | return 0; | ||
729 | 718 | ||
730 | if (file->f_mode & FMODE_ATOMIC_POS) { | 719 | if (file && (file->f_mode & FMODE_ATOMIC_POS)) { |
731 | if (file_count(file) > 1) { | 720 | if (file_count(file) > 1) { |
732 | v |= FDPUT_POS_UNLOCK; | 721 | v |= FDPUT_POS_UNLOCK; |
733 | mutex_lock(&file->f_pos_lock); | 722 | mutex_lock(&file->f_pos_lock); |
734 | } | 723 | } |
735 | } | 724 | } |
736 | return v | (unsigned long)file; | 725 | return v; |
737 | } | 726 | } |
738 | 727 | ||
739 | /* | 728 | /* |
diff --git a/fs/mount.h b/fs/mount.h index a17458ca6f29..b29e42f05f34 100644 --- a/fs/mount.h +++ b/fs/mount.h | |||
@@ -19,13 +19,13 @@ struct mnt_pcp { | |||
19 | }; | 19 | }; |
20 | 20 | ||
21 | struct mountpoint { | 21 | struct mountpoint { |
22 | struct list_head m_hash; | 22 | struct hlist_node m_hash; |
23 | struct dentry *m_dentry; | 23 | struct dentry *m_dentry; |
24 | int m_count; | 24 | int m_count; |
25 | }; | 25 | }; |
26 | 26 | ||
27 | struct mount { | 27 | struct mount { |
28 | struct list_head mnt_hash; | 28 | struct hlist_node mnt_hash; |
29 | struct mount *mnt_parent; | 29 | struct mount *mnt_parent; |
30 | struct dentry *mnt_mountpoint; | 30 | struct dentry *mnt_mountpoint; |
31 | struct vfsmount mnt; | 31 | struct vfsmount mnt; |
diff --git a/fs/namei.c b/fs/namei.c index 2f730ef9b4b3..4b491b431990 100644 --- a/fs/namei.c +++ b/fs/namei.c | |||
@@ -1109,7 +1109,7 @@ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path, | |||
1109 | return false; | 1109 | return false; |
1110 | 1110 | ||
1111 | if (!d_mountpoint(path->dentry)) | 1111 | if (!d_mountpoint(path->dentry)) |
1112 | break; | 1112 | return true; |
1113 | 1113 | ||
1114 | mounted = __lookup_mnt(path->mnt, path->dentry); | 1114 | mounted = __lookup_mnt(path->mnt, path->dentry); |
1115 | if (!mounted) | 1115 | if (!mounted) |
@@ -1125,20 +1125,7 @@ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path, | |||
1125 | */ | 1125 | */ |
1126 | *inode = path->dentry->d_inode; | 1126 | *inode = path->dentry->d_inode; |
1127 | } | 1127 | } |
1128 | return true; | 1128 | return read_seqretry(&mount_lock, nd->m_seq); |
1129 | } | ||
1130 | |||
1131 | static void follow_mount_rcu(struct nameidata *nd) | ||
1132 | { | ||
1133 | while (d_mountpoint(nd->path.dentry)) { | ||
1134 | struct mount *mounted; | ||
1135 | mounted = __lookup_mnt(nd->path.mnt, nd->path.dentry); | ||
1136 | if (!mounted) | ||
1137 | break; | ||
1138 | nd->path.mnt = &mounted->mnt; | ||
1139 | nd->path.dentry = mounted->mnt.mnt_root; | ||
1140 | nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq); | ||
1141 | } | ||
1142 | } | 1129 | } |
1143 | 1130 | ||
1144 | static int follow_dotdot_rcu(struct nameidata *nd) | 1131 | static int follow_dotdot_rcu(struct nameidata *nd) |
@@ -1166,7 +1153,17 @@ static int follow_dotdot_rcu(struct nameidata *nd) | |||
1166 | break; | 1153 | break; |
1167 | nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq); | 1154 | nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq); |
1168 | } | 1155 | } |
1169 | follow_mount_rcu(nd); | 1156 | while (d_mountpoint(nd->path.dentry)) { |
1157 | struct mount *mounted; | ||
1158 | mounted = __lookup_mnt(nd->path.mnt, nd->path.dentry); | ||
1159 | if (!mounted) | ||
1160 | break; | ||
1161 | nd->path.mnt = &mounted->mnt; | ||
1162 | nd->path.dentry = mounted->mnt.mnt_root; | ||
1163 | nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq); | ||
1164 | if (!read_seqretry(&mount_lock, nd->m_seq)) | ||
1165 | goto failed; | ||
1166 | } | ||
1170 | nd->inode = nd->path.dentry->d_inode; | 1167 | nd->inode = nd->path.dentry->d_inode; |
1171 | return 0; | 1168 | return 0; |
1172 | 1169 | ||
diff --git a/fs/namespace.c b/fs/namespace.c index 22e536705c45..2ffc5a2905d4 100644 --- a/fs/namespace.c +++ b/fs/namespace.c | |||
@@ -23,11 +23,34 @@ | |||
23 | #include <linux/uaccess.h> | 23 | #include <linux/uaccess.h> |
24 | #include <linux/proc_ns.h> | 24 | #include <linux/proc_ns.h> |
25 | #include <linux/magic.h> | 25 | #include <linux/magic.h> |
26 | #include <linux/bootmem.h> | ||
26 | #include "pnode.h" | 27 | #include "pnode.h" |
27 | #include "internal.h" | 28 | #include "internal.h" |
28 | 29 | ||
29 | #define HASH_SHIFT ilog2(PAGE_SIZE / sizeof(struct list_head)) | 30 | static unsigned int m_hash_mask __read_mostly; |
30 | #define HASH_SIZE (1UL << HASH_SHIFT) | 31 | static unsigned int m_hash_shift __read_mostly; |
32 | static unsigned int mp_hash_mask __read_mostly; | ||
33 | static unsigned int mp_hash_shift __read_mostly; | ||
34 | |||
35 | static __initdata unsigned long mhash_entries; | ||
36 | static int __init set_mhash_entries(char *str) | ||
37 | { | ||
38 | if (!str) | ||
39 | return 0; | ||
40 | mhash_entries = simple_strtoul(str, &str, 0); | ||
41 | return 1; | ||
42 | } | ||
43 | __setup("mhash_entries=", set_mhash_entries); | ||
44 | |||
45 | static __initdata unsigned long mphash_entries; | ||
46 | static int __init set_mphash_entries(char *str) | ||
47 | { | ||
48 | if (!str) | ||
49 | return 0; | ||
50 | mphash_entries = simple_strtoul(str, &str, 0); | ||
51 | return 1; | ||
52 | } | ||
53 | __setup("mphash_entries=", set_mphash_entries); | ||
31 | 54 | ||
32 | static int event; | 55 | static int event; |
33 | static DEFINE_IDA(mnt_id_ida); | 56 | static DEFINE_IDA(mnt_id_ida); |
@@ -36,8 +59,8 @@ static DEFINE_SPINLOCK(mnt_id_lock); | |||
36 | static int mnt_id_start = 0; | 59 | static int mnt_id_start = 0; |
37 | static int mnt_group_start = 1; | 60 | static int mnt_group_start = 1; |
38 | 61 | ||
39 | static struct list_head *mount_hashtable __read_mostly; | 62 | static struct hlist_head *mount_hashtable __read_mostly; |
40 | static struct list_head *mountpoint_hashtable __read_mostly; | 63 | static struct hlist_head *mountpoint_hashtable __read_mostly; |
41 | static struct kmem_cache *mnt_cache __read_mostly; | 64 | static struct kmem_cache *mnt_cache __read_mostly; |
42 | static DECLARE_RWSEM(namespace_sem); | 65 | static DECLARE_RWSEM(namespace_sem); |
43 | 66 | ||
@@ -55,12 +78,19 @@ EXPORT_SYMBOL_GPL(fs_kobj); | |||
55 | */ | 78 | */ |
56 | __cacheline_aligned_in_smp DEFINE_SEQLOCK(mount_lock); | 79 | __cacheline_aligned_in_smp DEFINE_SEQLOCK(mount_lock); |
57 | 80 | ||
58 | static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry) | 81 | static inline struct hlist_head *m_hash(struct vfsmount *mnt, struct dentry *dentry) |
59 | { | 82 | { |
60 | unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES); | 83 | unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES); |
61 | tmp += ((unsigned long)dentry / L1_CACHE_BYTES); | 84 | tmp += ((unsigned long)dentry / L1_CACHE_BYTES); |
62 | tmp = tmp + (tmp >> HASH_SHIFT); | 85 | tmp = tmp + (tmp >> m_hash_shift); |
63 | return tmp & (HASH_SIZE - 1); | 86 | return &mount_hashtable[tmp & m_hash_mask]; |
87 | } | ||
88 | |||
89 | static inline struct hlist_head *mp_hash(struct dentry *dentry) | ||
90 | { | ||
91 | unsigned long tmp = ((unsigned long)dentry / L1_CACHE_BYTES); | ||
92 | tmp = tmp + (tmp >> mp_hash_shift); | ||
93 | return &mountpoint_hashtable[tmp & mp_hash_mask]; | ||
64 | } | 94 | } |
65 | 95 | ||
66 | /* | 96 | /* |
@@ -187,7 +217,7 @@ static struct mount *alloc_vfsmnt(const char *name) | |||
187 | mnt->mnt_writers = 0; | 217 | mnt->mnt_writers = 0; |
188 | #endif | 218 | #endif |
189 | 219 | ||
190 | INIT_LIST_HEAD(&mnt->mnt_hash); | 220 | INIT_HLIST_NODE(&mnt->mnt_hash); |
191 | INIT_LIST_HEAD(&mnt->mnt_child); | 221 | INIT_LIST_HEAD(&mnt->mnt_child); |
192 | INIT_LIST_HEAD(&mnt->mnt_mounts); | 222 | INIT_LIST_HEAD(&mnt->mnt_mounts); |
193 | INIT_LIST_HEAD(&mnt->mnt_list); | 223 | INIT_LIST_HEAD(&mnt->mnt_list); |
@@ -575,10 +605,10 @@ bool legitimize_mnt(struct vfsmount *bastard, unsigned seq) | |||
575 | */ | 605 | */ |
576 | struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry) | 606 | struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry) |
577 | { | 607 | { |
578 | struct list_head *head = mount_hashtable + hash(mnt, dentry); | 608 | struct hlist_head *head = m_hash(mnt, dentry); |
579 | struct mount *p; | 609 | struct mount *p; |
580 | 610 | ||
581 | list_for_each_entry_rcu(p, head, mnt_hash) | 611 | hlist_for_each_entry_rcu(p, head, mnt_hash) |
582 | if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry) | 612 | if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry) |
583 | return p; | 613 | return p; |
584 | return NULL; | 614 | return NULL; |
@@ -590,13 +620,17 @@ struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry) | |||
590 | */ | 620 | */ |
591 | struct mount *__lookup_mnt_last(struct vfsmount *mnt, struct dentry *dentry) | 621 | struct mount *__lookup_mnt_last(struct vfsmount *mnt, struct dentry *dentry) |
592 | { | 622 | { |
593 | struct list_head *head = mount_hashtable + hash(mnt, dentry); | 623 | struct mount *p, *res; |
594 | struct mount *p; | 624 | res = p = __lookup_mnt(mnt, dentry); |
595 | 625 | if (!p) | |
596 | list_for_each_entry_reverse(p, head, mnt_hash) | 626 | goto out; |
597 | if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry) | 627 | hlist_for_each_entry_continue(p, mnt_hash) { |
598 | return p; | 628 | if (&p->mnt_parent->mnt != mnt || p->mnt_mountpoint != dentry) |
599 | return NULL; | 629 | break; |
630 | res = p; | ||
631 | } | ||
632 | out: | ||
633 | return res; | ||
600 | } | 634 | } |
601 | 635 | ||
602 | /* | 636 | /* |
@@ -633,11 +667,11 @@ struct vfsmount *lookup_mnt(struct path *path) | |||
633 | 667 | ||
634 | static struct mountpoint *new_mountpoint(struct dentry *dentry) | 668 | static struct mountpoint *new_mountpoint(struct dentry *dentry) |
635 | { | 669 | { |
636 | struct list_head *chain = mountpoint_hashtable + hash(NULL, dentry); | 670 | struct hlist_head *chain = mp_hash(dentry); |
637 | struct mountpoint *mp; | 671 | struct mountpoint *mp; |
638 | int ret; | 672 | int ret; |
639 | 673 | ||
640 | list_for_each_entry(mp, chain, m_hash) { | 674 | hlist_for_each_entry(mp, chain, m_hash) { |
641 | if (mp->m_dentry == dentry) { | 675 | if (mp->m_dentry == dentry) { |
642 | /* might be worth a WARN_ON() */ | 676 | /* might be worth a WARN_ON() */ |
643 | if (d_unlinked(dentry)) | 677 | if (d_unlinked(dentry)) |
@@ -659,7 +693,7 @@ static struct mountpoint *new_mountpoint(struct dentry *dentry) | |||
659 | 693 | ||
660 | mp->m_dentry = dentry; | 694 | mp->m_dentry = dentry; |
661 | mp->m_count = 1; | 695 | mp->m_count = 1; |
662 | list_add(&mp->m_hash, chain); | 696 | hlist_add_head(&mp->m_hash, chain); |
663 | return mp; | 697 | return mp; |
664 | } | 698 | } |
665 | 699 | ||
@@ -670,7 +704,7 @@ static void put_mountpoint(struct mountpoint *mp) | |||
670 | spin_lock(&dentry->d_lock); | 704 | spin_lock(&dentry->d_lock); |
671 | dentry->d_flags &= ~DCACHE_MOUNTED; | 705 | dentry->d_flags &= ~DCACHE_MOUNTED; |
672 | spin_unlock(&dentry->d_lock); | 706 | spin_unlock(&dentry->d_lock); |
673 | list_del(&mp->m_hash); | 707 | hlist_del(&mp->m_hash); |
674 | kfree(mp); | 708 | kfree(mp); |
675 | } | 709 | } |
676 | } | 710 | } |
@@ -712,7 +746,7 @@ static void detach_mnt(struct mount *mnt, struct path *old_path) | |||
712 | mnt->mnt_parent = mnt; | 746 | mnt->mnt_parent = mnt; |
713 | mnt->mnt_mountpoint = mnt->mnt.mnt_root; | 747 | mnt->mnt_mountpoint = mnt->mnt.mnt_root; |
714 | list_del_init(&mnt->mnt_child); | 748 | list_del_init(&mnt->mnt_child); |
715 | list_del_init(&mnt->mnt_hash); | 749 | hlist_del_init_rcu(&mnt->mnt_hash); |
716 | put_mountpoint(mnt->mnt_mp); | 750 | put_mountpoint(mnt->mnt_mp); |
717 | mnt->mnt_mp = NULL; | 751 | mnt->mnt_mp = NULL; |
718 | } | 752 | } |
@@ -739,15 +773,14 @@ static void attach_mnt(struct mount *mnt, | |||
739 | struct mountpoint *mp) | 773 | struct mountpoint *mp) |
740 | { | 774 | { |
741 | mnt_set_mountpoint(parent, mp, mnt); | 775 | mnt_set_mountpoint(parent, mp, mnt); |
742 | list_add_tail(&mnt->mnt_hash, mount_hashtable + | 776 | hlist_add_head_rcu(&mnt->mnt_hash, m_hash(&parent->mnt, mp->m_dentry)); |
743 | hash(&parent->mnt, mp->m_dentry)); | ||
744 | list_add_tail(&mnt->mnt_child, &parent->mnt_mounts); | 777 | list_add_tail(&mnt->mnt_child, &parent->mnt_mounts); |
745 | } | 778 | } |
746 | 779 | ||
747 | /* | 780 | /* |
748 | * vfsmount lock must be held for write | 781 | * vfsmount lock must be held for write |
749 | */ | 782 | */ |
750 | static void commit_tree(struct mount *mnt) | 783 | static void commit_tree(struct mount *mnt, struct mount *shadows) |
751 | { | 784 | { |
752 | struct mount *parent = mnt->mnt_parent; | 785 | struct mount *parent = mnt->mnt_parent; |
753 | struct mount *m; | 786 | struct mount *m; |
@@ -762,8 +795,11 @@ static void commit_tree(struct mount *mnt) | |||
762 | 795 | ||
763 | list_splice(&head, n->list.prev); | 796 | list_splice(&head, n->list.prev); |
764 | 797 | ||
765 | list_add_tail(&mnt->mnt_hash, mount_hashtable + | 798 | if (shadows) |
766 | hash(&parent->mnt, mnt->mnt_mountpoint)); | 799 | hlist_add_after_rcu(&shadows->mnt_hash, &mnt->mnt_hash); |
800 | else | ||
801 | hlist_add_head_rcu(&mnt->mnt_hash, | ||
802 | m_hash(&parent->mnt, mnt->mnt_mountpoint)); | ||
767 | list_add_tail(&mnt->mnt_child, &parent->mnt_mounts); | 803 | list_add_tail(&mnt->mnt_child, &parent->mnt_mounts); |
768 | touch_mnt_namespace(n); | 804 | touch_mnt_namespace(n); |
769 | } | 805 | } |
@@ -1153,26 +1189,28 @@ int may_umount(struct vfsmount *mnt) | |||
1153 | 1189 | ||
1154 | EXPORT_SYMBOL(may_umount); | 1190 | EXPORT_SYMBOL(may_umount); |
1155 | 1191 | ||
1156 | static LIST_HEAD(unmounted); /* protected by namespace_sem */ | 1192 | static HLIST_HEAD(unmounted); /* protected by namespace_sem */ |
1157 | 1193 | ||
1158 | static void namespace_unlock(void) | 1194 | static void namespace_unlock(void) |
1159 | { | 1195 | { |
1160 | struct mount *mnt; | 1196 | struct mount *mnt; |
1161 | LIST_HEAD(head); | 1197 | struct hlist_head head = unmounted; |
1162 | 1198 | ||
1163 | if (likely(list_empty(&unmounted))) { | 1199 | if (likely(hlist_empty(&head))) { |
1164 | up_write(&namespace_sem); | 1200 | up_write(&namespace_sem); |
1165 | return; | 1201 | return; |
1166 | } | 1202 | } |
1167 | 1203 | ||
1168 | list_splice_init(&unmounted, &head); | 1204 | head.first->pprev = &head.first; |
1205 | INIT_HLIST_HEAD(&unmounted); | ||
1206 | |||
1169 | up_write(&namespace_sem); | 1207 | up_write(&namespace_sem); |
1170 | 1208 | ||
1171 | synchronize_rcu(); | 1209 | synchronize_rcu(); |
1172 | 1210 | ||
1173 | while (!list_empty(&head)) { | 1211 | while (!hlist_empty(&head)) { |
1174 | mnt = list_first_entry(&head, struct mount, mnt_hash); | 1212 | mnt = hlist_entry(head.first, struct mount, mnt_hash); |
1175 | list_del_init(&mnt->mnt_hash); | 1213 | hlist_del_init(&mnt->mnt_hash); |
1176 | if (mnt->mnt_ex_mountpoint.mnt) | 1214 | if (mnt->mnt_ex_mountpoint.mnt) |
1177 | path_put(&mnt->mnt_ex_mountpoint); | 1215 | path_put(&mnt->mnt_ex_mountpoint); |
1178 | mntput(&mnt->mnt); | 1216 | mntput(&mnt->mnt); |
@@ -1193,16 +1231,19 @@ static inline void namespace_lock(void) | |||
1193 | */ | 1231 | */ |
1194 | void umount_tree(struct mount *mnt, int how) | 1232 | void umount_tree(struct mount *mnt, int how) |
1195 | { | 1233 | { |
1196 | LIST_HEAD(tmp_list); | 1234 | HLIST_HEAD(tmp_list); |
1197 | struct mount *p; | 1235 | struct mount *p; |
1236 | struct mount *last = NULL; | ||
1198 | 1237 | ||
1199 | for (p = mnt; p; p = next_mnt(p, mnt)) | 1238 | for (p = mnt; p; p = next_mnt(p, mnt)) { |
1200 | list_move(&p->mnt_hash, &tmp_list); | 1239 | hlist_del_init_rcu(&p->mnt_hash); |
1240 | hlist_add_head(&p->mnt_hash, &tmp_list); | ||
1241 | } | ||
1201 | 1242 | ||
1202 | if (how) | 1243 | if (how) |
1203 | propagate_umount(&tmp_list); | 1244 | propagate_umount(&tmp_list); |
1204 | 1245 | ||
1205 | list_for_each_entry(p, &tmp_list, mnt_hash) { | 1246 | hlist_for_each_entry(p, &tmp_list, mnt_hash) { |
1206 | list_del_init(&p->mnt_expire); | 1247 | list_del_init(&p->mnt_expire); |
1207 | list_del_init(&p->mnt_list); | 1248 | list_del_init(&p->mnt_list); |
1208 | __touch_mnt_namespace(p->mnt_ns); | 1249 | __touch_mnt_namespace(p->mnt_ns); |
@@ -1220,8 +1261,13 @@ void umount_tree(struct mount *mnt, int how) | |||
1220 | p->mnt_mp = NULL; | 1261 | p->mnt_mp = NULL; |
1221 | } | 1262 | } |
1222 | change_mnt_propagation(p, MS_PRIVATE); | 1263 | change_mnt_propagation(p, MS_PRIVATE); |
1264 | last = p; | ||
1265 | } | ||
1266 | if (last) { | ||
1267 | last->mnt_hash.next = unmounted.first; | ||
1268 | unmounted.first = tmp_list.first; | ||
1269 | unmounted.first->pprev = &unmounted.first; | ||
1223 | } | 1270 | } |
1224 | list_splice(&tmp_list, &unmounted); | ||
1225 | } | 1271 | } |
1226 | 1272 | ||
1227 | static void shrink_submounts(struct mount *mnt); | 1273 | static void shrink_submounts(struct mount *mnt); |
@@ -1605,24 +1651,23 @@ static int attach_recursive_mnt(struct mount *source_mnt, | |||
1605 | struct mountpoint *dest_mp, | 1651 | struct mountpoint *dest_mp, |
1606 | struct path *parent_path) | 1652 | struct path *parent_path) |
1607 | { | 1653 | { |
1608 | LIST_HEAD(tree_list); | 1654 | HLIST_HEAD(tree_list); |
1609 | struct mount *child, *p; | 1655 | struct mount *child, *p; |
1656 | struct hlist_node *n; | ||
1610 | int err; | 1657 | int err; |
1611 | 1658 | ||
1612 | if (IS_MNT_SHARED(dest_mnt)) { | 1659 | if (IS_MNT_SHARED(dest_mnt)) { |
1613 | err = invent_group_ids(source_mnt, true); | 1660 | err = invent_group_ids(source_mnt, true); |
1614 | if (err) | 1661 | if (err) |
1615 | goto out; | 1662 | goto out; |
1616 | } | 1663 | err = propagate_mnt(dest_mnt, dest_mp, source_mnt, &tree_list); |
1617 | err = propagate_mnt(dest_mnt, dest_mp, source_mnt, &tree_list); | 1664 | if (err) |
1618 | if (err) | 1665 | goto out_cleanup_ids; |
1619 | goto out_cleanup_ids; | 1666 | lock_mount_hash(); |
1620 | |||
1621 | lock_mount_hash(); | ||
1622 | |||
1623 | if (IS_MNT_SHARED(dest_mnt)) { | ||
1624 | for (p = source_mnt; p; p = next_mnt(p, source_mnt)) | 1667 | for (p = source_mnt; p; p = next_mnt(p, source_mnt)) |
1625 | set_mnt_shared(p); | 1668 | set_mnt_shared(p); |
1669 | } else { | ||
1670 | lock_mount_hash(); | ||
1626 | } | 1671 | } |
1627 | if (parent_path) { | 1672 | if (parent_path) { |
1628 | detach_mnt(source_mnt, parent_path); | 1673 | detach_mnt(source_mnt, parent_path); |
@@ -1630,20 +1675,22 @@ static int attach_recursive_mnt(struct mount *source_mnt, | |||
1630 | touch_mnt_namespace(source_mnt->mnt_ns); | 1675 | touch_mnt_namespace(source_mnt->mnt_ns); |
1631 | } else { | 1676 | } else { |
1632 | mnt_set_mountpoint(dest_mnt, dest_mp, source_mnt); | 1677 | mnt_set_mountpoint(dest_mnt, dest_mp, source_mnt); |
1633 | commit_tree(source_mnt); | 1678 | commit_tree(source_mnt, NULL); |
1634 | } | 1679 | } |
1635 | 1680 | ||
1636 | list_for_each_entry_safe(child, p, &tree_list, mnt_hash) { | 1681 | hlist_for_each_entry_safe(child, n, &tree_list, mnt_hash) { |
1637 | list_del_init(&child->mnt_hash); | 1682 | struct mount *q; |
1638 | commit_tree(child); | 1683 | hlist_del_init(&child->mnt_hash); |
1684 | q = __lookup_mnt_last(&child->mnt_parent->mnt, | ||
1685 | child->mnt_mountpoint); | ||
1686 | commit_tree(child, q); | ||
1639 | } | 1687 | } |
1640 | unlock_mount_hash(); | 1688 | unlock_mount_hash(); |
1641 | 1689 | ||
1642 | return 0; | 1690 | return 0; |
1643 | 1691 | ||
1644 | out_cleanup_ids: | 1692 | out_cleanup_ids: |
1645 | if (IS_MNT_SHARED(dest_mnt)) | 1693 | cleanup_group_ids(source_mnt, NULL); |
1646 | cleanup_group_ids(source_mnt, NULL); | ||
1647 | out: | 1694 | out: |
1648 | return err; | 1695 | return err; |
1649 | } | 1696 | } |
@@ -2777,18 +2824,24 @@ void __init mnt_init(void) | |||
2777 | mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct mount), | 2824 | mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct mount), |
2778 | 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); | 2825 | 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); |
2779 | 2826 | ||
2780 | mount_hashtable = (struct list_head *)__get_free_page(GFP_ATOMIC); | 2827 | mount_hashtable = alloc_large_system_hash("Mount-cache", |
2781 | mountpoint_hashtable = (struct list_head *)__get_free_page(GFP_ATOMIC); | 2828 | sizeof(struct hlist_head), |
2829 | mhash_entries, 19, | ||
2830 | 0, | ||
2831 | &m_hash_shift, &m_hash_mask, 0, 0); | ||
2832 | mountpoint_hashtable = alloc_large_system_hash("Mountpoint-cache", | ||
2833 | sizeof(struct hlist_head), | ||
2834 | mphash_entries, 19, | ||
2835 | 0, | ||
2836 | &mp_hash_shift, &mp_hash_mask, 0, 0); | ||
2782 | 2837 | ||
2783 | if (!mount_hashtable || !mountpoint_hashtable) | 2838 | if (!mount_hashtable || !mountpoint_hashtable) |
2784 | panic("Failed to allocate mount hash table\n"); | 2839 | panic("Failed to allocate mount hash table\n"); |
2785 | 2840 | ||
2786 | printk(KERN_INFO "Mount-cache hash table entries: %lu\n", HASH_SIZE); | 2841 | for (u = 0; u <= m_hash_mask; u++) |
2787 | 2842 | INIT_HLIST_HEAD(&mount_hashtable[u]); | |
2788 | for (u = 0; u < HASH_SIZE; u++) | 2843 | for (u = 0; u <= mp_hash_mask; u++) |
2789 | INIT_LIST_HEAD(&mount_hashtable[u]); | 2844 | INIT_HLIST_HEAD(&mountpoint_hashtable[u]); |
2790 | for (u = 0; u < HASH_SIZE; u++) | ||
2791 | INIT_LIST_HEAD(&mountpoint_hashtable[u]); | ||
2792 | 2845 | ||
2793 | kernfs_init(); | 2846 | kernfs_init(); |
2794 | 2847 | ||
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 017d3cb5e99b..6d7be3f80356 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c | |||
@@ -449,6 +449,7 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap, | |||
449 | fh_lock(fhp); | 449 | fh_lock(fhp); |
450 | host_err = notify_change(dentry, iap, NULL); | 450 | host_err = notify_change(dentry, iap, NULL); |
451 | fh_unlock(fhp); | 451 | fh_unlock(fhp); |
452 | err = nfserrno(host_err); | ||
452 | 453 | ||
453 | out_put_write_access: | 454 | out_put_write_access: |
454 | if (size_change) | 455 | if (size_change) |
diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c index 1324e6600e57..ca5ce14cbddc 100644 --- a/fs/ocfs2/stackglue.c +++ b/fs/ocfs2/stackglue.c | |||
@@ -346,7 +346,9 @@ int ocfs2_cluster_connect(const char *stack_name, | |||
346 | 346 | ||
347 | strlcpy(new_conn->cc_name, group, GROUP_NAME_MAX + 1); | 347 | strlcpy(new_conn->cc_name, group, GROUP_NAME_MAX + 1); |
348 | new_conn->cc_namelen = grouplen; | 348 | new_conn->cc_namelen = grouplen; |
349 | strlcpy(new_conn->cc_cluster_name, cluster_name, CLUSTER_NAME_MAX + 1); | 349 | if (cluster_name_len) |
350 | strlcpy(new_conn->cc_cluster_name, cluster_name, | ||
351 | CLUSTER_NAME_MAX + 1); | ||
350 | new_conn->cc_cluster_name_len = cluster_name_len; | 352 | new_conn->cc_cluster_name_len = cluster_name_len; |
351 | new_conn->cc_recovery_handler = recovery_handler; | 353 | new_conn->cc_recovery_handler = recovery_handler; |
352 | new_conn->cc_recovery_data = recovery_data; | 354 | new_conn->cc_recovery_data = recovery_data; |
diff --git a/fs/pnode.c b/fs/pnode.c index c7221bb19801..88396df725b4 100644 --- a/fs/pnode.c +++ b/fs/pnode.c | |||
@@ -220,14 +220,14 @@ static struct mount *get_source(struct mount *dest, | |||
220 | * @tree_list : list of heads of trees to be attached. | 220 | * @tree_list : list of heads of trees to be attached. |
221 | */ | 221 | */ |
222 | int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp, | 222 | int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp, |
223 | struct mount *source_mnt, struct list_head *tree_list) | 223 | struct mount *source_mnt, struct hlist_head *tree_list) |
224 | { | 224 | { |
225 | struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns; | 225 | struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns; |
226 | struct mount *m, *child; | 226 | struct mount *m, *child; |
227 | int ret = 0; | 227 | int ret = 0; |
228 | struct mount *prev_dest_mnt = dest_mnt; | 228 | struct mount *prev_dest_mnt = dest_mnt; |
229 | struct mount *prev_src_mnt = source_mnt; | 229 | struct mount *prev_src_mnt = source_mnt; |
230 | LIST_HEAD(tmp_list); | 230 | HLIST_HEAD(tmp_list); |
231 | 231 | ||
232 | for (m = propagation_next(dest_mnt, dest_mnt); m; | 232 | for (m = propagation_next(dest_mnt, dest_mnt); m; |
233 | m = propagation_next(m, dest_mnt)) { | 233 | m = propagation_next(m, dest_mnt)) { |
@@ -246,27 +246,29 @@ int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp, | |||
246 | child = copy_tree(source, source->mnt.mnt_root, type); | 246 | child = copy_tree(source, source->mnt.mnt_root, type); |
247 | if (IS_ERR(child)) { | 247 | if (IS_ERR(child)) { |
248 | ret = PTR_ERR(child); | 248 | ret = PTR_ERR(child); |
249 | list_splice(tree_list, tmp_list.prev); | 249 | tmp_list = *tree_list; |
250 | tmp_list.first->pprev = &tmp_list.first; | ||
251 | INIT_HLIST_HEAD(tree_list); | ||
250 | goto out; | 252 | goto out; |
251 | } | 253 | } |
252 | 254 | ||
253 | if (is_subdir(dest_mp->m_dentry, m->mnt.mnt_root)) { | 255 | if (is_subdir(dest_mp->m_dentry, m->mnt.mnt_root)) { |
254 | mnt_set_mountpoint(m, dest_mp, child); | 256 | mnt_set_mountpoint(m, dest_mp, child); |
255 | list_add_tail(&child->mnt_hash, tree_list); | 257 | hlist_add_head(&child->mnt_hash, tree_list); |
256 | } else { | 258 | } else { |
257 | /* | 259 | /* |
258 | * This can happen if the parent mount was bind mounted | 260 | * This can happen if the parent mount was bind mounted |
259 | * on some subdirectory of a shared/slave mount. | 261 | * on some subdirectory of a shared/slave mount. |
260 | */ | 262 | */ |
261 | list_add_tail(&child->mnt_hash, &tmp_list); | 263 | hlist_add_head(&child->mnt_hash, &tmp_list); |
262 | } | 264 | } |
263 | prev_dest_mnt = m; | 265 | prev_dest_mnt = m; |
264 | prev_src_mnt = child; | 266 | prev_src_mnt = child; |
265 | } | 267 | } |
266 | out: | 268 | out: |
267 | lock_mount_hash(); | 269 | lock_mount_hash(); |
268 | while (!list_empty(&tmp_list)) { | 270 | while (!hlist_empty(&tmp_list)) { |
269 | child = list_first_entry(&tmp_list, struct mount, mnt_hash); | 271 | child = hlist_entry(tmp_list.first, struct mount, mnt_hash); |
270 | umount_tree(child, 0); | 272 | umount_tree(child, 0); |
271 | } | 273 | } |
272 | unlock_mount_hash(); | 274 | unlock_mount_hash(); |
@@ -338,8 +340,10 @@ static void __propagate_umount(struct mount *mnt) | |||
338 | * umount the child only if the child has no | 340 | * umount the child only if the child has no |
339 | * other children | 341 | * other children |
340 | */ | 342 | */ |
341 | if (child && list_empty(&child->mnt_mounts)) | 343 | if (child && list_empty(&child->mnt_mounts)) { |
342 | list_move_tail(&child->mnt_hash, &mnt->mnt_hash); | 344 | hlist_del_init_rcu(&child->mnt_hash); |
345 | hlist_add_before_rcu(&child->mnt_hash, &mnt->mnt_hash); | ||
346 | } | ||
343 | } | 347 | } |
344 | } | 348 | } |
345 | 349 | ||
@@ -350,11 +354,11 @@ static void __propagate_umount(struct mount *mnt) | |||
350 | * | 354 | * |
351 | * vfsmount lock must be held for write | 355 | * vfsmount lock must be held for write |
352 | */ | 356 | */ |
353 | int propagate_umount(struct list_head *list) | 357 | int propagate_umount(struct hlist_head *list) |
354 | { | 358 | { |
355 | struct mount *mnt; | 359 | struct mount *mnt; |
356 | 360 | ||
357 | list_for_each_entry(mnt, list, mnt_hash) | 361 | hlist_for_each_entry(mnt, list, mnt_hash) |
358 | __propagate_umount(mnt); | 362 | __propagate_umount(mnt); |
359 | return 0; | 363 | return 0; |
360 | } | 364 | } |
diff --git a/fs/pnode.h b/fs/pnode.h index 59e7eda1851e..fc28a27fa892 100644 --- a/fs/pnode.h +++ b/fs/pnode.h | |||
@@ -36,8 +36,8 @@ static inline void set_mnt_shared(struct mount *mnt) | |||
36 | 36 | ||
37 | void change_mnt_propagation(struct mount *, int); | 37 | void change_mnt_propagation(struct mount *, int); |
38 | int propagate_mnt(struct mount *, struct mountpoint *, struct mount *, | 38 | int propagate_mnt(struct mount *, struct mountpoint *, struct mount *, |
39 | struct list_head *); | 39 | struct hlist_head *); |
40 | int propagate_umount(struct list_head *); | 40 | int propagate_umount(struct hlist_head *); |
41 | int propagate_mount_busy(struct mount *, int); | 41 | int propagate_mount_busy(struct mount *, int); |
42 | void mnt_release_group_id(struct mount *); | 42 | void mnt_release_group_id(struct mount *); |
43 | int get_dominating_id(struct mount *mnt, const struct path *root); | 43 | int get_dominating_id(struct mount *mnt, const struct path *root); |
diff --git a/fs/read_write.c b/fs/read_write.c index 54e19b9392dc..28cc9c810744 100644 --- a/fs/read_write.c +++ b/fs/read_write.c | |||
@@ -307,7 +307,7 @@ SYSCALL_DEFINE5(llseek, unsigned int, fd, unsigned long, offset_high, | |||
307 | unsigned int, whence) | 307 | unsigned int, whence) |
308 | { | 308 | { |
309 | int retval; | 309 | int retval; |
310 | struct fd f = fdget(fd); | 310 | struct fd f = fdget_pos(fd); |
311 | loff_t offset; | 311 | loff_t offset; |
312 | 312 | ||
313 | if (!f.file) | 313 | if (!f.file) |
@@ -327,7 +327,7 @@ SYSCALL_DEFINE5(llseek, unsigned int, fd, unsigned long, offset_high, | |||
327 | retval = 0; | 327 | retval = 0; |
328 | } | 328 | } |
329 | out_putf: | 329 | out_putf: |
330 | fdput(f); | 330 | fdput_pos(f); |
331 | return retval; | 331 | return retval; |
332 | } | 332 | } |
333 | #endif | 333 | #endif |
diff --git a/include/linux/bitops.h b/include/linux/bitops.h index abc9ca778456..be5fd38bd5a0 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h | |||
@@ -196,6 +196,21 @@ static inline unsigned long __ffs64(u64 word) | |||
196 | 196 | ||
197 | #ifdef __KERNEL__ | 197 | #ifdef __KERNEL__ |
198 | 198 | ||
199 | #ifndef set_mask_bits | ||
200 | #define set_mask_bits(ptr, _mask, _bits) \ | ||
201 | ({ \ | ||
202 | const typeof(*ptr) mask = (_mask), bits = (_bits); \ | ||
203 | typeof(*ptr) old, new; \ | ||
204 | \ | ||
205 | do { \ | ||
206 | old = ACCESS_ONCE(*ptr); \ | ||
207 | new = (old & ~mask) | bits; \ | ||
208 | } while (cmpxchg(ptr, old, new) != old); \ | ||
209 | \ | ||
210 | new; \ | ||
211 | }) | ||
212 | #endif | ||
213 | |||
199 | #ifndef find_last_bit | 214 | #ifndef find_last_bit |
200 | /** | 215 | /** |
201 | * find_last_bit - find the last set bit in a memory region | 216 | * find_last_bit - find the last set bit in a memory region |
diff --git a/include/linux/fs.h b/include/linux/fs.h index 23b2a35d712e..6e765d28841b 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -589,6 +589,9 @@ struct inode { | |||
589 | atomic_t i_count; | 589 | atomic_t i_count; |
590 | atomic_t i_dio_count; | 590 | atomic_t i_dio_count; |
591 | atomic_t i_writecount; | 591 | atomic_t i_writecount; |
592 | #ifdef CONFIG_IMA | ||
593 | atomic_t i_readcount; /* struct files open RO */ | ||
594 | #endif | ||
592 | const struct file_operations *i_fop; /* former ->i_op->default_file_ops */ | 595 | const struct file_operations *i_fop; /* former ->i_op->default_file_ops */ |
593 | struct file_lock *i_flock; | 596 | struct file_lock *i_flock; |
594 | struct address_space i_data; | 597 | struct address_space i_data; |
@@ -609,9 +612,6 @@ struct inode { | |||
609 | struct hlist_head i_fsnotify_marks; | 612 | struct hlist_head i_fsnotify_marks; |
610 | #endif | 613 | #endif |
611 | 614 | ||
612 | #ifdef CONFIG_IMA | ||
613 | atomic_t i_readcount; /* struct files open RO */ | ||
614 | #endif | ||
615 | void *i_private; /* fs or device private pointer */ | 615 | void *i_private; /* fs or device private pointer */ |
616 | }; | 616 | }; |
617 | 617 | ||
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 4e4cc28623ad..4cdb3a17bcb5 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h | |||
@@ -495,10 +495,6 @@ enum { | |||
495 | FILTER_TRACE_FN, | 495 | FILTER_TRACE_FN, |
496 | }; | 496 | }; |
497 | 497 | ||
498 | #define EVENT_STORAGE_SIZE 128 | ||
499 | extern struct mutex event_storage_mutex; | ||
500 | extern char event_storage[EVENT_STORAGE_SIZE]; | ||
501 | |||
502 | extern int trace_event_raw_init(struct ftrace_event_call *call); | 498 | extern int trace_event_raw_init(struct ftrace_event_call *call); |
503 | extern int trace_define_field(struct ftrace_event_call *call, const char *type, | 499 | extern int trace_define_field(struct ftrace_event_call *call, const char *type, |
504 | const char *name, int offset, int size, | 500 | const char *name, int offset, int size, |
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index 1005ebf17575..5a09a48f2658 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h | |||
@@ -163,4 +163,11 @@ enum { | |||
163 | /* changeable features with no special hardware requirements */ | 163 | /* changeable features with no special hardware requirements */ |
164 | #define NETIF_F_SOFT_FEATURES (NETIF_F_GSO | NETIF_F_GRO) | 164 | #define NETIF_F_SOFT_FEATURES (NETIF_F_GSO | NETIF_F_GRO) |
165 | 165 | ||
166 | #define NETIF_F_VLAN_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | \ | ||
167 | NETIF_F_HW_VLAN_CTAG_RX | \ | ||
168 | NETIF_F_HW_VLAN_CTAG_TX | \ | ||
169 | NETIF_F_HW_VLAN_STAG_FILTER | \ | ||
170 | NETIF_F_HW_VLAN_STAG_RX | \ | ||
171 | NETIF_F_HW_VLAN_STAG_TX) | ||
172 | |||
166 | #endif /* _LINUX_NETDEV_FEATURES_H */ | 173 | #endif /* _LINUX_NETDEV_FEATURES_H */ |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index e8eeebd49a98..daafd9561cbc 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -3014,7 +3014,7 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features) | |||
3014 | { | 3014 | { |
3015 | return __skb_gso_segment(skb, features, true); | 3015 | return __skb_gso_segment(skb, features, true); |
3016 | } | 3016 | } |
3017 | __be16 skb_network_protocol(struct sk_buff *skb); | 3017 | __be16 skb_network_protocol(struct sk_buff *skb, int *depth); |
3018 | 3018 | ||
3019 | static inline bool can_checksum_protocol(netdev_features_t features, | 3019 | static inline bool can_checksum_protocol(netdev_features_t features, |
3020 | __be16 protocol) | 3020 | __be16 protocol) |
diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 1da693d51255..b66c2110cb1f 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h | |||
@@ -250,8 +250,7 @@ struct rmap_walk_control { | |||
250 | int (*rmap_one)(struct page *page, struct vm_area_struct *vma, | 250 | int (*rmap_one)(struct page *page, struct vm_area_struct *vma, |
251 | unsigned long addr, void *arg); | 251 | unsigned long addr, void *arg); |
252 | int (*done)(struct page *page); | 252 | int (*done)(struct page *page); |
253 | int (*file_nonlinear)(struct page *, struct address_space *, | 253 | int (*file_nonlinear)(struct page *, struct address_space *, void *arg); |
254 | struct vm_area_struct *vma); | ||
255 | struct anon_vma *(*anon_lock)(struct page *page); | 254 | struct anon_vma *(*anon_lock)(struct page *page); |
256 | bool (*invalid_vma)(struct vm_area_struct *vma, void *arg); | 255 | bool (*invalid_vma)(struct vm_area_struct *vma, void *arg); |
257 | }; | 256 | }; |
diff --git a/include/linux/security.h b/include/linux/security.h index 3a5ed0cd2751..6726006bc766 100644 --- a/include/linux/security.h +++ b/include/linux/security.h | |||
@@ -1040,6 +1040,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) | |||
1040 | * Allocate a security structure to the xp->security field; the security | 1040 | * Allocate a security structure to the xp->security field; the security |
1041 | * field is initialized to NULL when the xfrm_policy is allocated. | 1041 | * field is initialized to NULL when the xfrm_policy is allocated. |
1042 | * Return 0 if operation was successful (memory to allocate, legal context) | 1042 | * Return 0 if operation was successful (memory to allocate, legal context) |
1043 | * @gfp is to specify the context for the allocation | ||
1043 | * @xfrm_policy_clone_security: | 1044 | * @xfrm_policy_clone_security: |
1044 | * @old_ctx contains an existing xfrm_sec_ctx. | 1045 | * @old_ctx contains an existing xfrm_sec_ctx. |
1045 | * @new_ctxp contains a new xfrm_sec_ctx being cloned from old. | 1046 | * @new_ctxp contains a new xfrm_sec_ctx being cloned from old. |
@@ -1683,7 +1684,7 @@ struct security_operations { | |||
1683 | 1684 | ||
1684 | #ifdef CONFIG_SECURITY_NETWORK_XFRM | 1685 | #ifdef CONFIG_SECURITY_NETWORK_XFRM |
1685 | int (*xfrm_policy_alloc_security) (struct xfrm_sec_ctx **ctxp, | 1686 | int (*xfrm_policy_alloc_security) (struct xfrm_sec_ctx **ctxp, |
1686 | struct xfrm_user_sec_ctx *sec_ctx); | 1687 | struct xfrm_user_sec_ctx *sec_ctx, gfp_t gfp); |
1687 | int (*xfrm_policy_clone_security) (struct xfrm_sec_ctx *old_ctx, struct xfrm_sec_ctx **new_ctx); | 1688 | int (*xfrm_policy_clone_security) (struct xfrm_sec_ctx *old_ctx, struct xfrm_sec_ctx **new_ctx); |
1688 | void (*xfrm_policy_free_security) (struct xfrm_sec_ctx *ctx); | 1689 | void (*xfrm_policy_free_security) (struct xfrm_sec_ctx *ctx); |
1689 | int (*xfrm_policy_delete_security) (struct xfrm_sec_ctx *ctx); | 1690 | int (*xfrm_policy_delete_security) (struct xfrm_sec_ctx *ctx); |
@@ -2859,7 +2860,8 @@ static inline void security_skb_owned_by(struct sk_buff *skb, struct sock *sk) | |||
2859 | 2860 | ||
2860 | #ifdef CONFIG_SECURITY_NETWORK_XFRM | 2861 | #ifdef CONFIG_SECURITY_NETWORK_XFRM |
2861 | 2862 | ||
2862 | int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, struct xfrm_user_sec_ctx *sec_ctx); | 2863 | int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, |
2864 | struct xfrm_user_sec_ctx *sec_ctx, gfp_t gfp); | ||
2863 | int security_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx, struct xfrm_sec_ctx **new_ctxp); | 2865 | int security_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx, struct xfrm_sec_ctx **new_ctxp); |
2864 | void security_xfrm_policy_free(struct xfrm_sec_ctx *ctx); | 2866 | void security_xfrm_policy_free(struct xfrm_sec_ctx *ctx); |
2865 | int security_xfrm_policy_delete(struct xfrm_sec_ctx *ctx); | 2867 | int security_xfrm_policy_delete(struct xfrm_sec_ctx *ctx); |
@@ -2877,7 +2879,9 @@ void security_skb_classify_flow(struct sk_buff *skb, struct flowi *fl); | |||
2877 | 2879 | ||
2878 | #else /* CONFIG_SECURITY_NETWORK_XFRM */ | 2880 | #else /* CONFIG_SECURITY_NETWORK_XFRM */ |
2879 | 2881 | ||
2880 | static inline int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, struct xfrm_user_sec_ctx *sec_ctx) | 2882 | static inline int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, |
2883 | struct xfrm_user_sec_ctx *sec_ctx, | ||
2884 | gfp_t gfp) | ||
2881 | { | 2885 | { |
2882 | return 0; | 2886 | return 0; |
2883 | } | 2887 | } |
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 5e1e6f2d98c2..15ede6a823a6 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
@@ -2451,8 +2451,8 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset, | |||
2451 | unsigned int flags); | 2451 | unsigned int flags); |
2452 | void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to); | 2452 | void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to); |
2453 | unsigned int skb_zerocopy_headlen(const struct sk_buff *from); | 2453 | unsigned int skb_zerocopy_headlen(const struct sk_buff *from); |
2454 | void skb_zerocopy(struct sk_buff *to, const struct sk_buff *from, | 2454 | int skb_zerocopy(struct sk_buff *to, struct sk_buff *from, |
2455 | int len, int hlen); | 2455 | int len, int hlen); |
2456 | void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len); | 2456 | void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len); |
2457 | int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen); | 2457 | int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen); |
2458 | void skb_scrub_packet(struct sk_buff *skb, bool xnet); | 2458 | void skb_scrub_packet(struct sk_buff *skb, bool xnet); |
diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h index c3fa80745996..2c14d9cdd57a 100644 --- a/include/linux/usb/cdc_ncm.h +++ b/include/linux/usb/cdc_ncm.h | |||
@@ -88,6 +88,7 @@ | |||
88 | #define cdc_ncm_data_intf_is_mbim(x) ((x)->desc.bInterfaceProtocol == USB_CDC_MBIM_PROTO_NTB) | 88 | #define cdc_ncm_data_intf_is_mbim(x) ((x)->desc.bInterfaceProtocol == USB_CDC_MBIM_PROTO_NTB) |
89 | 89 | ||
90 | struct cdc_ncm_ctx { | 90 | struct cdc_ncm_ctx { |
91 | struct usb_cdc_ncm_ntb_parameters ncm_parm; | ||
91 | struct hrtimer tx_timer; | 92 | struct hrtimer tx_timer; |
92 | struct tasklet_struct bh; | 93 | struct tasklet_struct bh; |
93 | 94 | ||
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h index e303eef94dd5..0662e98fef72 100644 --- a/include/linux/usb/usbnet.h +++ b/include/linux/usb/usbnet.h | |||
@@ -30,7 +30,7 @@ struct usbnet { | |||
30 | struct driver_info *driver_info; | 30 | struct driver_info *driver_info; |
31 | const char *driver_name; | 31 | const char *driver_name; |
32 | void *driver_priv; | 32 | void *driver_priv; |
33 | wait_queue_head_t *wait; | 33 | wait_queue_head_t wait; |
34 | struct mutex phy_mutex; | 34 | struct mutex phy_mutex; |
35 | unsigned char suspend_count; | 35 | unsigned char suspend_count; |
36 | unsigned char pkt_cnt, pkt_err; | 36 | unsigned char pkt_cnt, pkt_err; |
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h index 9650a3ffd2d2..b4956a5fcc3f 100644 --- a/include/net/if_inet6.h +++ b/include/net/if_inet6.h | |||
@@ -31,8 +31,10 @@ | |||
31 | #define IF_PREFIX_AUTOCONF 0x02 | 31 | #define IF_PREFIX_AUTOCONF 0x02 |
32 | 32 | ||
33 | enum { | 33 | enum { |
34 | INET6_IFADDR_STATE_PREDAD, | ||
34 | INET6_IFADDR_STATE_DAD, | 35 | INET6_IFADDR_STATE_DAD, |
35 | INET6_IFADDR_STATE_POSTDAD, | 36 | INET6_IFADDR_STATE_POSTDAD, |
37 | INET6_IFADDR_STATE_ERRDAD, | ||
36 | INET6_IFADDR_STATE_UP, | 38 | INET6_IFADDR_STATE_UP, |
37 | INET6_IFADDR_STATE_DEAD, | 39 | INET6_IFADDR_STATE_DEAD, |
38 | }; | 40 | }; |
@@ -58,7 +60,7 @@ struct inet6_ifaddr { | |||
58 | unsigned long cstamp; /* created timestamp */ | 60 | unsigned long cstamp; /* created timestamp */ |
59 | unsigned long tstamp; /* updated timestamp */ | 61 | unsigned long tstamp; /* updated timestamp */ |
60 | 62 | ||
61 | struct timer_list dad_timer; | 63 | struct delayed_work dad_work; |
62 | 64 | ||
63 | struct inet6_dev *idev; | 65 | struct inet6_dev *idev; |
64 | struct rt6_info *rt; | 66 | struct rt6_info *rt; |
diff --git a/include/net/tcp.h b/include/net/tcp.h index 8c4dd63134d4..743accec6c76 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h | |||
@@ -480,20 +480,21 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, | |||
480 | #ifdef CONFIG_SYN_COOKIES | 480 | #ifdef CONFIG_SYN_COOKIES |
481 | #include <linux/ktime.h> | 481 | #include <linux/ktime.h> |
482 | 482 | ||
483 | /* Syncookies use a monotonic timer which increments every 64 seconds. | 483 | /* Syncookies use a monotonic timer which increments every 60 seconds. |
484 | * This counter is used both as a hash input and partially encoded into | 484 | * This counter is used both as a hash input and partially encoded into |
485 | * the cookie value. A cookie is only validated further if the delta | 485 | * the cookie value. A cookie is only validated further if the delta |
486 | * between the current counter value and the encoded one is less than this, | 486 | * between the current counter value and the encoded one is less than this, |
487 | * i.e. a sent cookie is valid only at most for 128 seconds (or less if | 487 | * i.e. a sent cookie is valid only at most for 2*60 seconds (or less if |
488 | * the counter advances immediately after a cookie is generated). | 488 | * the counter advances immediately after a cookie is generated). |
489 | */ | 489 | */ |
490 | #define MAX_SYNCOOKIE_AGE 2 | 490 | #define MAX_SYNCOOKIE_AGE 2 |
491 | 491 | ||
492 | static inline u32 tcp_cookie_time(void) | 492 | static inline u32 tcp_cookie_time(void) |
493 | { | 493 | { |
494 | struct timespec now; | 494 | u64 val = get_jiffies_64(); |
495 | getnstimeofday(&now); | 495 | |
496 | return now.tv_sec >> 6; /* 64 seconds granularity */ | 496 | do_div(val, 60 * HZ); |
497 | return val; | ||
497 | } | 498 | } |
498 | 499 | ||
499 | u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th, | 500 | u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th, |
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index 1a8b28db3775..1ee19a24cc5f 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h | |||
@@ -310,15 +310,12 @@ static struct trace_event_functions ftrace_event_type_funcs_##call = { \ | |||
310 | #undef __array | 310 | #undef __array |
311 | #define __array(type, item, len) \ | 311 | #define __array(type, item, len) \ |
312 | do { \ | 312 | do { \ |
313 | mutex_lock(&event_storage_mutex); \ | 313 | char *type_str = #type"["__stringify(len)"]"; \ |
314 | BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \ | 314 | BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \ |
315 | snprintf(event_storage, sizeof(event_storage), \ | 315 | ret = trace_define_field(event_call, type_str, #item, \ |
316 | "%s[%d]", #type, len); \ | ||
317 | ret = trace_define_field(event_call, event_storage, #item, \ | ||
318 | offsetof(typeof(field), item), \ | 316 | offsetof(typeof(field), item), \ |
319 | sizeof(field.item), \ | 317 | sizeof(field.item), \ |
320 | is_signed_type(type), FILTER_OTHER); \ | 318 | is_signed_type(type), FILTER_OTHER); \ |
321 | mutex_unlock(&event_storage_mutex); \ | ||
322 | if (ret) \ | 319 | if (ret) \ |
323 | return ret; \ | 320 | return ret; \ |
324 | } while (0); | 321 | } while (0); |
diff --git a/init/main.c b/init/main.c index eb03090cdced..9c7fd4c9249f 100644 --- a/init/main.c +++ b/init/main.c | |||
@@ -561,7 +561,6 @@ asmlinkage void __init start_kernel(void) | |||
561 | init_timers(); | 561 | init_timers(); |
562 | hrtimers_init(); | 562 | hrtimers_init(); |
563 | softirq_init(); | 563 | softirq_init(); |
564 | acpi_early_init(); | ||
565 | timekeeping_init(); | 564 | timekeeping_init(); |
566 | time_init(); | 565 | time_init(); |
567 | sched_clock_postinit(); | 566 | sched_clock_postinit(); |
@@ -613,6 +612,7 @@ asmlinkage void __init start_kernel(void) | |||
613 | calibrate_delay(); | 612 | calibrate_delay(); |
614 | pidmap_init(); | 613 | pidmap_init(); |
615 | anon_vma_init(); | 614 | anon_vma_init(); |
615 | acpi_early_init(); | ||
616 | #ifdef CONFIG_X86 | 616 | #ifdef CONFIG_X86 |
617 | if (efi_enabled(EFI_RUNTIME_SERVICES)) | 617 | if (efi_enabled(EFI_RUNTIME_SERVICES)) |
618 | efi_enter_virtual_mode(); | 618 | efi_enter_virtual_mode(); |
@@ -901,6 +901,8 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgfl | |||
901 | return -EINVAL; | 901 | return -EINVAL; |
902 | 902 | ||
903 | if (msgflg & MSG_COPY) { | 903 | if (msgflg & MSG_COPY) { |
904 | if ((msgflg & MSG_EXCEPT) || !(msgflg & IPC_NOWAIT)) | ||
905 | return -EINVAL; | ||
904 | copy = prepare_copy(buf, min_t(size_t, bufsz, ns->msg_ctlmax)); | 906 | copy = prepare_copy(buf, min_t(size_t, bufsz, ns->msg_ctlmax)); |
905 | if (IS_ERR(copy)) | 907 | if (IS_ERR(copy)) |
906 | return PTR_ERR(copy); | 908 | return PTR_ERR(copy); |
diff --git a/kernel/audit.c b/kernel/audit.c index 3392d3e0254a..95a20f3f52f1 100644 --- a/kernel/audit.c +++ b/kernel/audit.c | |||
@@ -608,9 +608,19 @@ static int audit_netlink_ok(struct sk_buff *skb, u16 msg_type) | |||
608 | int err = 0; | 608 | int err = 0; |
609 | 609 | ||
610 | /* Only support the initial namespaces for now. */ | 610 | /* Only support the initial namespaces for now. */ |
611 | /* | ||
612 | * We return ECONNREFUSED because it tricks userspace into thinking | ||
613 | * that audit was not configured into the kernel. Lots of users | ||
614 | * configure their PAM stack (because that's what the distro does) | ||
615 | * to reject login if unable to send messages to audit. If we return | ||
616 | * ECONNREFUSED the PAM stack thinks the kernel does not have audit | ||
617 | * configured in and will let login proceed. If we return EPERM | ||
618 | * userspace will reject all logins. This should be removed when we | ||
619 | * support non init namespaces!! | ||
620 | */ | ||
611 | if ((current_user_ns() != &init_user_ns) || | 621 | if ((current_user_ns() != &init_user_ns) || |
612 | (task_active_pid_ns(current) != &init_pid_ns)) | 622 | (task_active_pid_ns(current) != &init_pid_ns)) |
613 | return -EPERM; | 623 | return -ECONNREFUSED; |
614 | 624 | ||
615 | switch (msg_type) { | 625 | switch (msg_type) { |
616 | case AUDIT_LIST: | 626 | case AUDIT_LIST: |
diff --git a/kernel/capability.c b/kernel/capability.c index 34019c57888d..a8d63df0c322 100644 --- a/kernel/capability.c +++ b/kernel/capability.c | |||
@@ -7,6 +7,8 @@ | |||
7 | * 30 May 2002: Cleanup, Robert M. Love <rml@tech9.net> | 7 | * 30 May 2002: Cleanup, Robert M. Love <rml@tech9.net> |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
11 | |||
10 | #include <linux/audit.h> | 12 | #include <linux/audit.h> |
11 | #include <linux/capability.h> | 13 | #include <linux/capability.h> |
12 | #include <linux/mm.h> | 14 | #include <linux/mm.h> |
@@ -42,15 +44,10 @@ __setup("no_file_caps", file_caps_disable); | |||
42 | 44 | ||
43 | static void warn_legacy_capability_use(void) | 45 | static void warn_legacy_capability_use(void) |
44 | { | 46 | { |
45 | static int warned; | 47 | char name[sizeof(current->comm)]; |
46 | if (!warned) { | 48 | |
47 | char name[sizeof(current->comm)]; | 49 | pr_info_once("warning: `%s' uses 32-bit capabilities (legacy support in use)\n", |
48 | 50 | get_task_comm(name, current)); | |
49 | printk(KERN_INFO "warning: `%s' uses 32-bit capabilities" | ||
50 | " (legacy support in use)\n", | ||
51 | get_task_comm(name, current)); | ||
52 | warned = 1; | ||
53 | } | ||
54 | } | 51 | } |
55 | 52 | ||
56 | /* | 53 | /* |
@@ -71,16 +68,10 @@ static void warn_legacy_capability_use(void) | |||
71 | 68 | ||
72 | static void warn_deprecated_v2(void) | 69 | static void warn_deprecated_v2(void) |
73 | { | 70 | { |
74 | static int warned; | 71 | char name[sizeof(current->comm)]; |
75 | 72 | ||
76 | if (!warned) { | 73 | pr_info_once("warning: `%s' uses deprecated v2 capabilities in a way that may be insecure\n", |
77 | char name[sizeof(current->comm)]; | 74 | get_task_comm(name, current)); |
78 | |||
79 | printk(KERN_INFO "warning: `%s' uses deprecated v2" | ||
80 | " capabilities in a way that may be insecure.\n", | ||
81 | get_task_comm(name, current)); | ||
82 | warned = 1; | ||
83 | } | ||
84 | } | 75 | } |
85 | 76 | ||
86 | /* | 77 | /* |
@@ -380,7 +371,7 @@ bool has_capability_noaudit(struct task_struct *t, int cap) | |||
380 | bool ns_capable(struct user_namespace *ns, int cap) | 371 | bool ns_capable(struct user_namespace *ns, int cap) |
381 | { | 372 | { |
382 | if (unlikely(!cap_valid(cap))) { | 373 | if (unlikely(!cap_valid(cap))) { |
383 | printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap); | 374 | pr_crit("capable() called with invalid cap=%u\n", cap); |
384 | BUG(); | 375 | BUG(); |
385 | } | 376 | } |
386 | 377 | ||
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 105f273b6f86..0c753ddd223b 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
@@ -4112,17 +4112,17 @@ static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss) | |||
4112 | 4112 | ||
4113 | err = percpu_ref_init(&css->refcnt, css_release); | 4113 | err = percpu_ref_init(&css->refcnt, css_release); |
4114 | if (err) | 4114 | if (err) |
4115 | goto err_free; | 4115 | goto err_free_css; |
4116 | 4116 | ||
4117 | init_css(css, ss, cgrp); | 4117 | init_css(css, ss, cgrp); |
4118 | 4118 | ||
4119 | err = cgroup_populate_dir(cgrp, 1 << ss->subsys_id); | 4119 | err = cgroup_populate_dir(cgrp, 1 << ss->subsys_id); |
4120 | if (err) | 4120 | if (err) |
4121 | goto err_free; | 4121 | goto err_free_percpu_ref; |
4122 | 4122 | ||
4123 | err = online_css(css); | 4123 | err = online_css(css); |
4124 | if (err) | 4124 | if (err) |
4125 | goto err_free; | 4125 | goto err_clear_dir; |
4126 | 4126 | ||
4127 | dget(cgrp->dentry); | 4127 | dget(cgrp->dentry); |
4128 | css_get(css->parent); | 4128 | css_get(css->parent); |
@@ -4138,8 +4138,11 @@ static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss) | |||
4138 | 4138 | ||
4139 | return 0; | 4139 | return 0; |
4140 | 4140 | ||
4141 | err_free: | 4141 | err_clear_dir: |
4142 | cgroup_clear_dir(css->cgroup, 1 << css->ss->subsys_id); | ||
4143 | err_free_percpu_ref: | ||
4142 | percpu_ref_cancel_init(&css->refcnt); | 4144 | percpu_ref_cancel_init(&css->refcnt); |
4145 | err_free_css: | ||
4143 | ss->css_free(css); | 4146 | ss->css_free(css); |
4144 | return err; | 4147 | return err; |
4145 | } | 4148 | } |
diff --git a/kernel/futex.c b/kernel/futex.c index 44a1261cb9ff..08ec814ad9d2 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
@@ -234,6 +234,7 @@ static const struct futex_q futex_q_init = { | |||
234 | * waiting on a futex. | 234 | * waiting on a futex. |
235 | */ | 235 | */ |
236 | struct futex_hash_bucket { | 236 | struct futex_hash_bucket { |
237 | atomic_t waiters; | ||
237 | spinlock_t lock; | 238 | spinlock_t lock; |
238 | struct plist_head chain; | 239 | struct plist_head chain; |
239 | } ____cacheline_aligned_in_smp; | 240 | } ____cacheline_aligned_in_smp; |
@@ -253,22 +254,37 @@ static inline void futex_get_mm(union futex_key *key) | |||
253 | smp_mb__after_atomic_inc(); | 254 | smp_mb__after_atomic_inc(); |
254 | } | 255 | } |
255 | 256 | ||
256 | static inline bool hb_waiters_pending(struct futex_hash_bucket *hb) | 257 | /* |
258 | * Reflects a new waiter being added to the waitqueue. | ||
259 | */ | ||
260 | static inline void hb_waiters_inc(struct futex_hash_bucket *hb) | ||
257 | { | 261 | { |
258 | #ifdef CONFIG_SMP | 262 | #ifdef CONFIG_SMP |
263 | atomic_inc(&hb->waiters); | ||
259 | /* | 264 | /* |
260 | * Tasks trying to enter the critical region are most likely | 265 | * Full barrier (A), see the ordering comment above. |
261 | * potential waiters that will be added to the plist. Ensure | ||
262 | * that wakers won't miss to-be-slept tasks in the window between | ||
263 | * the wait call and the actual plist_add. | ||
264 | */ | 266 | */ |
265 | if (spin_is_locked(&hb->lock)) | 267 | smp_mb__after_atomic_inc(); |
266 | return true; | 268 | #endif |
267 | smp_rmb(); /* Make sure we check the lock state first */ | 269 | } |
270 | |||
271 | /* | ||
272 | * Reflects a waiter being removed from the waitqueue by wakeup | ||
273 | * paths. | ||
274 | */ | ||
275 | static inline void hb_waiters_dec(struct futex_hash_bucket *hb) | ||
276 | { | ||
277 | #ifdef CONFIG_SMP | ||
278 | atomic_dec(&hb->waiters); | ||
279 | #endif | ||
280 | } | ||
268 | 281 | ||
269 | return !plist_head_empty(&hb->chain); | 282 | static inline int hb_waiters_pending(struct futex_hash_bucket *hb) |
283 | { | ||
284 | #ifdef CONFIG_SMP | ||
285 | return atomic_read(&hb->waiters); | ||
270 | #else | 286 | #else |
271 | return true; | 287 | return 1; |
272 | #endif | 288 | #endif |
273 | } | 289 | } |
274 | 290 | ||
@@ -954,6 +970,7 @@ static void __unqueue_futex(struct futex_q *q) | |||
954 | 970 | ||
955 | hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock); | 971 | hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock); |
956 | plist_del(&q->list, &hb->chain); | 972 | plist_del(&q->list, &hb->chain); |
973 | hb_waiters_dec(hb); | ||
957 | } | 974 | } |
958 | 975 | ||
959 | /* | 976 | /* |
@@ -1257,7 +1274,9 @@ void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1, | |||
1257 | */ | 1274 | */ |
1258 | if (likely(&hb1->chain != &hb2->chain)) { | 1275 | if (likely(&hb1->chain != &hb2->chain)) { |
1259 | plist_del(&q->list, &hb1->chain); | 1276 | plist_del(&q->list, &hb1->chain); |
1277 | hb_waiters_dec(hb1); | ||
1260 | plist_add(&q->list, &hb2->chain); | 1278 | plist_add(&q->list, &hb2->chain); |
1279 | hb_waiters_inc(hb2); | ||
1261 | q->lock_ptr = &hb2->lock; | 1280 | q->lock_ptr = &hb2->lock; |
1262 | } | 1281 | } |
1263 | get_futex_key_refs(key2); | 1282 | get_futex_key_refs(key2); |
@@ -1600,6 +1619,17 @@ static inline struct futex_hash_bucket *queue_lock(struct futex_q *q) | |||
1600 | struct futex_hash_bucket *hb; | 1619 | struct futex_hash_bucket *hb; |
1601 | 1620 | ||
1602 | hb = hash_futex(&q->key); | 1621 | hb = hash_futex(&q->key); |
1622 | |||
1623 | /* | ||
1624 | * Increment the counter before taking the lock so that | ||
1625 | * a potential waker won't miss a to-be-slept task that is | ||
1626 | * waiting for the spinlock. This is safe as all queue_lock() | ||
1627 | * users end up calling queue_me(). Similarly, for housekeeping, | ||
1628 | * decrement the counter at queue_unlock() when some error has | ||
1629 | * occurred and we don't end up adding the task to the list. | ||
1630 | */ | ||
1631 | hb_waiters_inc(hb); | ||
1632 | |||
1603 | q->lock_ptr = &hb->lock; | 1633 | q->lock_ptr = &hb->lock; |
1604 | 1634 | ||
1605 | spin_lock(&hb->lock); /* implies MB (A) */ | 1635 | spin_lock(&hb->lock); /* implies MB (A) */ |
@@ -1611,6 +1641,7 @@ queue_unlock(struct futex_hash_bucket *hb) | |||
1611 | __releases(&hb->lock) | 1641 | __releases(&hb->lock) |
1612 | { | 1642 | { |
1613 | spin_unlock(&hb->lock); | 1643 | spin_unlock(&hb->lock); |
1644 | hb_waiters_dec(hb); | ||
1614 | } | 1645 | } |
1615 | 1646 | ||
1616 | /** | 1647 | /** |
@@ -2342,6 +2373,7 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb, | |||
2342 | * Unqueue the futex_q and determine which it was. | 2373 | * Unqueue the futex_q and determine which it was. |
2343 | */ | 2374 | */ |
2344 | plist_del(&q->list, &hb->chain); | 2375 | plist_del(&q->list, &hb->chain); |
2376 | hb_waiters_dec(hb); | ||
2345 | 2377 | ||
2346 | /* Handle spurious wakeups gracefully */ | 2378 | /* Handle spurious wakeups gracefully */ |
2347 | ret = -EWOULDBLOCK; | 2379 | ret = -EWOULDBLOCK; |
@@ -2875,6 +2907,7 @@ static int __init futex_init(void) | |||
2875 | futex_cmpxchg_enabled = 1; | 2907 | futex_cmpxchg_enabled = 1; |
2876 | 2908 | ||
2877 | for (i = 0; i < futex_hashsize; i++) { | 2909 | for (i = 0; i < futex_hashsize; i++) { |
2910 | atomic_set(&futex_queues[i].waiters, 0); | ||
2878 | plist_head_init(&futex_queues[i].chain); | 2911 | plist_head_init(&futex_queues[i].chain); |
2879 | spin_lock_init(&futex_queues[i].lock); | 2912 | spin_lock_init(&futex_queues[i].lock); |
2880 | } | 2913 | } |
diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c index 43c2bcc35761..b30a2924ef14 100644 --- a/kernel/sched/clock.c +++ b/kernel/sched/clock.c | |||
@@ -301,14 +301,14 @@ u64 sched_clock_cpu(int cpu) | |||
301 | if (unlikely(!sched_clock_running)) | 301 | if (unlikely(!sched_clock_running)) |
302 | return 0ull; | 302 | return 0ull; |
303 | 303 | ||
304 | preempt_disable(); | 304 | preempt_disable_notrace(); |
305 | scd = cpu_sdc(cpu); | 305 | scd = cpu_sdc(cpu); |
306 | 306 | ||
307 | if (cpu != smp_processor_id()) | 307 | if (cpu != smp_processor_id()) |
308 | clock = sched_clock_remote(scd); | 308 | clock = sched_clock_remote(scd); |
309 | else | 309 | else |
310 | clock = sched_clock_local(scd); | 310 | clock = sched_clock_local(scd); |
311 | preempt_enable(); | 311 | preempt_enable_notrace(); |
312 | 312 | ||
313 | return clock; | 313 | return clock; |
314 | } | 314 | } |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 6edbef296ece..f5c6635b806c 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -3338,6 +3338,15 @@ recheck: | |||
3338 | return -EPERM; | 3338 | return -EPERM; |
3339 | } | 3339 | } |
3340 | 3340 | ||
3341 | /* | ||
3342 | * Can't set/change SCHED_DEADLINE policy at all for now | ||
3343 | * (safest behavior); in the future we would like to allow | ||
3344 | * unprivileged DL tasks to increase their relative deadline | ||
3345 | * or reduce their runtime (both ways reducing utilization) | ||
3346 | */ | ||
3347 | if (dl_policy(policy)) | ||
3348 | return -EPERM; | ||
3349 | |||
3341 | /* | 3350 | /* |
3342 | * Treat SCHED_IDLE as nice 20. Only allow a switch to | 3351 | * Treat SCHED_IDLE as nice 20. Only allow a switch to |
3343 | * SCHED_NORMAL if the RLIMIT_NICE would normally permit it. | 3352 | * SCHED_NORMAL if the RLIMIT_NICE would normally permit it. |
diff --git a/kernel/seccomp.c b/kernel/seccomp.c index b7a10048a32c..0e004a70f63a 100644 --- a/kernel/seccomp.c +++ b/kernel/seccomp.c | |||
@@ -293,7 +293,7 @@ fail: | |||
293 | * | 293 | * |
294 | * Returns 0 on success and non-zero otherwise. | 294 | * Returns 0 on success and non-zero otherwise. |
295 | */ | 295 | */ |
296 | long seccomp_attach_user_filter(char __user *user_filter) | 296 | static long seccomp_attach_user_filter(char __user *user_filter) |
297 | { | 297 | { |
298 | struct sock_fprog fprog; | 298 | struct sock_fprog fprog; |
299 | long ret = -EFAULT; | 299 | long ret = -EFAULT; |
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index 84571e09c907..01fbae5b97b7 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c | |||
@@ -293,7 +293,7 @@ int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void * | |||
293 | */ | 293 | */ |
294 | smp_call_function_single(min(cpu1, cpu2), | 294 | smp_call_function_single(min(cpu1, cpu2), |
295 | &irq_cpu_stop_queue_work, | 295 | &irq_cpu_stop_queue_work, |
296 | &call_args, 0); | 296 | &call_args, 1); |
297 | lg_local_unlock(&stop_cpus_lock); | 297 | lg_local_unlock(&stop_cpus_lock); |
298 | preempt_enable(); | 298 | preempt_enable(); |
299 | 299 | ||
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 0aa4ce81bc16..5b40279ecd71 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c | |||
@@ -1435,7 +1435,8 @@ void update_wall_time(void) | |||
1435 | out: | 1435 | out: |
1436 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); | 1436 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); |
1437 | if (clock_set) | 1437 | if (clock_set) |
1438 | clock_was_set(); | 1438 | /* Have to call _delayed version, since in irq context*/ |
1439 | clock_was_set_delayed(); | ||
1439 | } | 1440 | } |
1440 | 1441 | ||
1441 | /** | 1442 | /** |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 815c878f409b..24c1f2382557 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -1600,15 +1600,31 @@ void trace_buffer_unlock_commit(struct ring_buffer *buffer, | |||
1600 | } | 1600 | } |
1601 | EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit); | 1601 | EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit); |
1602 | 1602 | ||
1603 | static struct ring_buffer *temp_buffer; | ||
1604 | |||
1603 | struct ring_buffer_event * | 1605 | struct ring_buffer_event * |
1604 | trace_event_buffer_lock_reserve(struct ring_buffer **current_rb, | 1606 | trace_event_buffer_lock_reserve(struct ring_buffer **current_rb, |
1605 | struct ftrace_event_file *ftrace_file, | 1607 | struct ftrace_event_file *ftrace_file, |
1606 | int type, unsigned long len, | 1608 | int type, unsigned long len, |
1607 | unsigned long flags, int pc) | 1609 | unsigned long flags, int pc) |
1608 | { | 1610 | { |
1611 | struct ring_buffer_event *entry; | ||
1612 | |||
1609 | *current_rb = ftrace_file->tr->trace_buffer.buffer; | 1613 | *current_rb = ftrace_file->tr->trace_buffer.buffer; |
1610 | return trace_buffer_lock_reserve(*current_rb, | 1614 | entry = trace_buffer_lock_reserve(*current_rb, |
1611 | type, len, flags, pc); | 1615 | type, len, flags, pc); |
1616 | /* | ||
1617 | * If tracing is off, but we have triggers enabled | ||
1618 | * we still need to look at the event data. Use the temp_buffer | ||
1619 | * to store the trace event for the tigger to use. It's recusive | ||
1620 | * safe and will not be recorded anywhere. | ||
1621 | */ | ||
1622 | if (!entry && ftrace_file->flags & FTRACE_EVENT_FL_TRIGGER_COND) { | ||
1623 | *current_rb = temp_buffer; | ||
1624 | entry = trace_buffer_lock_reserve(*current_rb, | ||
1625 | type, len, flags, pc); | ||
1626 | } | ||
1627 | return entry; | ||
1612 | } | 1628 | } |
1613 | EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve); | 1629 | EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve); |
1614 | 1630 | ||
@@ -6494,11 +6510,16 @@ __init static int tracer_alloc_buffers(void) | |||
6494 | 6510 | ||
6495 | raw_spin_lock_init(&global_trace.start_lock); | 6511 | raw_spin_lock_init(&global_trace.start_lock); |
6496 | 6512 | ||
6513 | /* Used for event triggers */ | ||
6514 | temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE); | ||
6515 | if (!temp_buffer) | ||
6516 | goto out_free_cpumask; | ||
6517 | |||
6497 | /* TODO: make the number of buffers hot pluggable with CPUS */ | 6518 | /* TODO: make the number of buffers hot pluggable with CPUS */ |
6498 | if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) { | 6519 | if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) { |
6499 | printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); | 6520 | printk(KERN_ERR "tracer: failed to allocate ring buffer!\n"); |
6500 | WARN_ON(1); | 6521 | WARN_ON(1); |
6501 | goto out_free_cpumask; | 6522 | goto out_free_temp_buffer; |
6502 | } | 6523 | } |
6503 | 6524 | ||
6504 | if (global_trace.buffer_disabled) | 6525 | if (global_trace.buffer_disabled) |
@@ -6540,6 +6561,8 @@ __init static int tracer_alloc_buffers(void) | |||
6540 | 6561 | ||
6541 | return 0; | 6562 | return 0; |
6542 | 6563 | ||
6564 | out_free_temp_buffer: | ||
6565 | ring_buffer_free(temp_buffer); | ||
6543 | out_free_cpumask: | 6566 | out_free_cpumask: |
6544 | free_percpu(global_trace.trace_buffer.data); | 6567 | free_percpu(global_trace.trace_buffer.data); |
6545 | #ifdef CONFIG_TRACER_MAX_TRACE | 6568 | #ifdef CONFIG_TRACER_MAX_TRACE |
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index f3989ceb5cd5..7b16d40bd64d 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
@@ -27,12 +27,6 @@ | |||
27 | 27 | ||
28 | DEFINE_MUTEX(event_mutex); | 28 | DEFINE_MUTEX(event_mutex); |
29 | 29 | ||
30 | DEFINE_MUTEX(event_storage_mutex); | ||
31 | EXPORT_SYMBOL_GPL(event_storage_mutex); | ||
32 | |||
33 | char event_storage[EVENT_STORAGE_SIZE]; | ||
34 | EXPORT_SYMBOL_GPL(event_storage); | ||
35 | |||
36 | LIST_HEAD(ftrace_events); | 30 | LIST_HEAD(ftrace_events); |
37 | static LIST_HEAD(ftrace_common_fields); | 31 | static LIST_HEAD(ftrace_common_fields); |
38 | 32 | ||
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c index 7c3e3e72e2b6..ee0a5098ac43 100644 --- a/kernel/trace/trace_export.c +++ b/kernel/trace/trace_export.c | |||
@@ -95,15 +95,12 @@ static void __always_unused ____ftrace_check_##name(void) \ | |||
95 | #undef __array | 95 | #undef __array |
96 | #define __array(type, item, len) \ | 96 | #define __array(type, item, len) \ |
97 | do { \ | 97 | do { \ |
98 | char *type_str = #type"["__stringify(len)"]"; \ | ||
98 | BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \ | 99 | BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \ |
99 | mutex_lock(&event_storage_mutex); \ | 100 | ret = trace_define_field(event_call, type_str, #item, \ |
100 | snprintf(event_storage, sizeof(event_storage), \ | ||
101 | "%s[%d]", #type, len); \ | ||
102 | ret = trace_define_field(event_call, event_storage, #item, \ | ||
103 | offsetof(typeof(field), item), \ | 101 | offsetof(typeof(field), item), \ |
104 | sizeof(field.item), \ | 102 | sizeof(field.item), \ |
105 | is_signed_type(type), filter_type); \ | 103 | is_signed_type(type), filter_type); \ |
106 | mutex_unlock(&event_storage_mutex); \ | ||
107 | if (ret) \ | 104 | if (ret) \ |
108 | return ret; \ | 105 | return ret; \ |
109 | } while (0); | 106 | } while (0); |
diff --git a/lib/fonts/Kconfig b/lib/fonts/Kconfig index 4dc1b990aa23..34fd931b54b5 100644 --- a/lib/fonts/Kconfig +++ b/lib/fonts/Kconfig | |||
@@ -9,7 +9,7 @@ if FONT_SUPPORT | |||
9 | 9 | ||
10 | config FONTS | 10 | config FONTS |
11 | bool "Select compiled-in fonts" | 11 | bool "Select compiled-in fonts" |
12 | depends on FRAMEBUFFER_CONSOLE | 12 | depends on FRAMEBUFFER_CONSOLE || STI_CONSOLE |
13 | help | 13 | help |
14 | Say Y here if you would like to use fonts other than the default | 14 | Say Y here if you would like to use fonts other than the default |
15 | your frame buffer console usually use. | 15 | your frame buffer console usually use. |
@@ -22,7 +22,7 @@ config FONTS | |||
22 | 22 | ||
23 | config FONT_8x8 | 23 | config FONT_8x8 |
24 | bool "VGA 8x8 font" if FONTS | 24 | bool "VGA 8x8 font" if FONTS |
25 | depends on FRAMEBUFFER_CONSOLE | 25 | depends on FRAMEBUFFER_CONSOLE || STI_CONSOLE |
26 | default y if !SPARC && !FONTS | 26 | default y if !SPARC && !FONTS |
27 | help | 27 | help |
28 | This is the "high resolution" font for the VGA frame buffer (the one | 28 | This is the "high resolution" font for the VGA frame buffer (the one |
@@ -45,7 +45,7 @@ config FONT_8x16 | |||
45 | 45 | ||
46 | config FONT_6x11 | 46 | config FONT_6x11 |
47 | bool "Mac console 6x11 font (not supported by all drivers)" if FONTS | 47 | bool "Mac console 6x11 font (not supported by all drivers)" if FONTS |
48 | depends on FRAMEBUFFER_CONSOLE | 48 | depends on FRAMEBUFFER_CONSOLE || STI_CONSOLE |
49 | default y if !SPARC && !FONTS && MAC | 49 | default y if !SPARC && !FONTS && MAC |
50 | help | 50 | help |
51 | Small console font with Macintosh-style high-half glyphs. Some Mac | 51 | Small console font with Macintosh-style high-half glyphs. Some Mac |
diff --git a/lib/random32.c b/lib/random32.c index 1e5b2df44291..614896778700 100644 --- a/lib/random32.c +++ b/lib/random32.c | |||
@@ -244,8 +244,19 @@ static void __prandom_reseed(bool late) | |||
244 | static bool latch = false; | 244 | static bool latch = false; |
245 | static DEFINE_SPINLOCK(lock); | 245 | static DEFINE_SPINLOCK(lock); |
246 | 246 | ||
247 | /* Asking for random bytes might result in bytes getting | ||
248 | * moved into the nonblocking pool and thus marking it | ||
249 | * as initialized. In this case we would double back into | ||
250 | * this function and attempt to do a late reseed. | ||
251 | * Ignore the pointless attempt to reseed again if we're | ||
252 | * already waiting for bytes when the nonblocking pool | ||
253 | * got initialized. | ||
254 | */ | ||
255 | |||
247 | /* only allow initial seeding (late == false) once */ | 256 | /* only allow initial seeding (late == false) once */ |
248 | spin_lock_irqsave(&lock, flags); | 257 | if (!spin_trylock_irqsave(&lock, flags)) |
258 | return; | ||
259 | |||
249 | if (latch && !late) | 260 | if (latch && !late) |
250 | goto out; | 261 | goto out; |
251 | latch = true; | 262 | latch = true; |
diff --git a/mm/fremap.c b/mm/fremap.c index bbc4d660221a..34feba60a17e 100644 --- a/mm/fremap.c +++ b/mm/fremap.c | |||
@@ -23,28 +23,44 @@ | |||
23 | 23 | ||
24 | #include "internal.h" | 24 | #include "internal.h" |
25 | 25 | ||
26 | static int mm_counter(struct page *page) | ||
27 | { | ||
28 | return PageAnon(page) ? MM_ANONPAGES : MM_FILEPAGES; | ||
29 | } | ||
30 | |||
26 | static void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma, | 31 | static void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma, |
27 | unsigned long addr, pte_t *ptep) | 32 | unsigned long addr, pte_t *ptep) |
28 | { | 33 | { |
29 | pte_t pte = *ptep; | 34 | pte_t pte = *ptep; |
35 | struct page *page; | ||
36 | swp_entry_t entry; | ||
30 | 37 | ||
31 | if (pte_present(pte)) { | 38 | if (pte_present(pte)) { |
32 | struct page *page; | ||
33 | |||
34 | flush_cache_page(vma, addr, pte_pfn(pte)); | 39 | flush_cache_page(vma, addr, pte_pfn(pte)); |
35 | pte = ptep_clear_flush(vma, addr, ptep); | 40 | pte = ptep_clear_flush(vma, addr, ptep); |
36 | page = vm_normal_page(vma, addr, pte); | 41 | page = vm_normal_page(vma, addr, pte); |
37 | if (page) { | 42 | if (page) { |
38 | if (pte_dirty(pte)) | 43 | if (pte_dirty(pte)) |
39 | set_page_dirty(page); | 44 | set_page_dirty(page); |
45 | update_hiwater_rss(mm); | ||
46 | dec_mm_counter(mm, mm_counter(page)); | ||
40 | page_remove_rmap(page); | 47 | page_remove_rmap(page); |
41 | page_cache_release(page); | 48 | page_cache_release(page); |
49 | } | ||
50 | } else { /* zap_pte() is not called when pte_none() */ | ||
51 | if (!pte_file(pte)) { | ||
42 | update_hiwater_rss(mm); | 52 | update_hiwater_rss(mm); |
43 | dec_mm_counter(mm, MM_FILEPAGES); | 53 | entry = pte_to_swp_entry(pte); |
54 | if (non_swap_entry(entry)) { | ||
55 | if (is_migration_entry(entry)) { | ||
56 | page = migration_entry_to_page(entry); | ||
57 | dec_mm_counter(mm, mm_counter(page)); | ||
58 | } | ||
59 | } else { | ||
60 | free_swap_and_cache(entry); | ||
61 | dec_mm_counter(mm, MM_SWAPENTS); | ||
62 | } | ||
44 | } | 63 | } |
45 | } else { | ||
46 | if (!pte_file(pte)) | ||
47 | free_swap_and_cache(pte_to_swp_entry(pte)); | ||
48 | pte_clear_not_present_full(mm, addr, ptep, 0); | 64 | pte_clear_not_present_full(mm, addr, ptep, 0); |
49 | } | 65 | } |
50 | } | 66 | } |
diff --git a/mm/migrate.c b/mm/migrate.c index b494fdb9a636..bed48809e5d0 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -178,6 +178,37 @@ out: | |||
178 | } | 178 | } |
179 | 179 | ||
180 | /* | 180 | /* |
181 | * Congratulations to trinity for discovering this bug. | ||
182 | * mm/fremap.c's remap_file_pages() accepts any range within a single vma to | ||
183 | * convert that vma to VM_NONLINEAR; and generic_file_remap_pages() will then | ||
184 | * replace the specified range by file ptes throughout (maybe populated after). | ||
185 | * If page migration finds a page within that range, while it's still located | ||
186 | * by vma_interval_tree rather than lost to i_mmap_nonlinear list, no problem: | ||
187 | * zap_pte() clears the temporary migration entry before mmap_sem is dropped. | ||
188 | * But if the migrating page is in a part of the vma outside the range to be | ||
189 | * remapped, then it will not be cleared, and remove_migration_ptes() needs to | ||
190 | * deal with it. Fortunately, this part of the vma is of course still linear, | ||
191 | * so we just need to use linear location on the nonlinear list. | ||
192 | */ | ||
193 | static int remove_linear_migration_ptes_from_nonlinear(struct page *page, | ||
194 | struct address_space *mapping, void *arg) | ||
195 | { | ||
196 | struct vm_area_struct *vma; | ||
197 | /* hugetlbfs does not support remap_pages, so no huge pgoff worries */ | ||
198 | pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); | ||
199 | unsigned long addr; | ||
200 | |||
201 | list_for_each_entry(vma, | ||
202 | &mapping->i_mmap_nonlinear, shared.nonlinear) { | ||
203 | |||
204 | addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); | ||
205 | if (addr >= vma->vm_start && addr < vma->vm_end) | ||
206 | remove_migration_pte(page, vma, addr, arg); | ||
207 | } | ||
208 | return SWAP_AGAIN; | ||
209 | } | ||
210 | |||
211 | /* | ||
181 | * Get rid of all migration entries and replace them by | 212 | * Get rid of all migration entries and replace them by |
182 | * references to the indicated page. | 213 | * references to the indicated page. |
183 | */ | 214 | */ |
@@ -186,6 +217,7 @@ static void remove_migration_ptes(struct page *old, struct page *new) | |||
186 | struct rmap_walk_control rwc = { | 217 | struct rmap_walk_control rwc = { |
187 | .rmap_one = remove_migration_pte, | 218 | .rmap_one = remove_migration_pte, |
188 | .arg = old, | 219 | .arg = old, |
220 | .file_nonlinear = remove_linear_migration_ptes_from_nonlinear, | ||
189 | }; | 221 | }; |
190 | 222 | ||
191 | rmap_walk(new, &rwc); | 223 | rmap_walk(new, &rwc); |
@@ -1360,8 +1360,9 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount, | |||
1360 | } | 1360 | } |
1361 | 1361 | ||
1362 | static int try_to_unmap_nonlinear(struct page *page, | 1362 | static int try_to_unmap_nonlinear(struct page *page, |
1363 | struct address_space *mapping, struct vm_area_struct *vma) | 1363 | struct address_space *mapping, void *arg) |
1364 | { | 1364 | { |
1365 | struct vm_area_struct *vma; | ||
1365 | int ret = SWAP_AGAIN; | 1366 | int ret = SWAP_AGAIN; |
1366 | unsigned long cursor; | 1367 | unsigned long cursor; |
1367 | unsigned long max_nl_cursor = 0; | 1368 | unsigned long max_nl_cursor = 0; |
@@ -1663,7 +1664,7 @@ static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc) | |||
1663 | if (list_empty(&mapping->i_mmap_nonlinear)) | 1664 | if (list_empty(&mapping->i_mmap_nonlinear)) |
1664 | goto done; | 1665 | goto done; |
1665 | 1666 | ||
1666 | ret = rwc->file_nonlinear(page, mapping, vma); | 1667 | ret = rwc->file_nonlinear(page, mapping, rwc->arg); |
1667 | 1668 | ||
1668 | done: | 1669 | done: |
1669 | mutex_unlock(&mapping->i_mmap_mutex); | 1670 | mutex_unlock(&mapping->i_mmap_mutex); |
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index ec9909935fb6..175273f38cb1 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c | |||
@@ -307,9 +307,11 @@ static void vlan_sync_address(struct net_device *dev, | |||
307 | static void vlan_transfer_features(struct net_device *dev, | 307 | static void vlan_transfer_features(struct net_device *dev, |
308 | struct net_device *vlandev) | 308 | struct net_device *vlandev) |
309 | { | 309 | { |
310 | struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev); | ||
311 | |||
310 | vlandev->gso_max_size = dev->gso_max_size; | 312 | vlandev->gso_max_size = dev->gso_max_size; |
311 | 313 | ||
312 | if (dev->features & NETIF_F_HW_VLAN_CTAG_TX) | 314 | if (vlan_hw_offload_capable(dev->features, vlan->vlan_proto)) |
313 | vlandev->hard_header_len = dev->hard_header_len; | 315 | vlandev->hard_header_len = dev->hard_header_len; |
314 | else | 316 | else |
315 | vlandev->hard_header_len = dev->hard_header_len + VLAN_HLEN; | 317 | vlandev->hard_header_len = dev->hard_header_len + VLAN_HLEN; |
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 4b65aa492fb6..27bfe2f8e2de 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c | |||
@@ -578,6 +578,9 @@ static int vlan_dev_init(struct net_device *dev) | |||
578 | 578 | ||
579 | dev->features |= real_dev->vlan_features | NETIF_F_LLTX; | 579 | dev->features |= real_dev->vlan_features | NETIF_F_LLTX; |
580 | dev->gso_max_size = real_dev->gso_max_size; | 580 | dev->gso_max_size = real_dev->gso_max_size; |
581 | if (dev->features & NETIF_F_VLAN_FEATURES) | ||
582 | netdev_warn(real_dev, "VLAN features are set incorrectly. Q-in-Q configurations may not work correctly.\n"); | ||
583 | |||
581 | 584 | ||
582 | /* ipv6 shared card related stuff */ | 585 | /* ipv6 shared card related stuff */ |
583 | dev->dev_id = real_dev->dev_id; | 586 | dev->dev_id = real_dev->dev_id; |
@@ -592,7 +595,8 @@ static int vlan_dev_init(struct net_device *dev) | |||
592 | #endif | 595 | #endif |
593 | 596 | ||
594 | dev->needed_headroom = real_dev->needed_headroom; | 597 | dev->needed_headroom = real_dev->needed_headroom; |
595 | if (real_dev->features & NETIF_F_HW_VLAN_CTAG_TX) { | 598 | if (vlan_hw_offload_capable(real_dev->features, |
599 | vlan_dev_priv(dev)->vlan_proto)) { | ||
596 | dev->header_ops = &vlan_passthru_header_ops; | 600 | dev->header_ops = &vlan_passthru_header_ops; |
597 | dev->hard_header_len = real_dev->hard_header_len; | 601 | dev->hard_header_len = real_dev->hard_header_len; |
598 | } else { | 602 | } else { |
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index 63f0455c0bc3..8fe8b71b487a 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c | |||
@@ -49,14 +49,14 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) | |||
49 | brstats->tx_bytes += skb->len; | 49 | brstats->tx_bytes += skb->len; |
50 | u64_stats_update_end(&brstats->syncp); | 50 | u64_stats_update_end(&brstats->syncp); |
51 | 51 | ||
52 | if (!br_allowed_ingress(br, br_get_vlan_info(br), skb, &vid)) | ||
53 | goto out; | ||
54 | |||
55 | BR_INPUT_SKB_CB(skb)->brdev = dev; | 52 | BR_INPUT_SKB_CB(skb)->brdev = dev; |
56 | 53 | ||
57 | skb_reset_mac_header(skb); | 54 | skb_reset_mac_header(skb); |
58 | skb_pull(skb, ETH_HLEN); | 55 | skb_pull(skb, ETH_HLEN); |
59 | 56 | ||
57 | if (!br_allowed_ingress(br, br_get_vlan_info(br), skb, &vid)) | ||
58 | goto out; | ||
59 | |||
60 | if (is_broadcast_ether_addr(dest)) | 60 | if (is_broadcast_ether_addr(dest)) |
61 | br_flood_deliver(br, skb, false); | 61 | br_flood_deliver(br, skb, false); |
62 | else if (is_multicast_ether_addr(dest)) { | 62 | else if (is_multicast_ether_addr(dest)) { |
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index 28d544627422..d0cca3c65f01 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c | |||
@@ -29,6 +29,7 @@ static int br_pass_frame_up(struct sk_buff *skb) | |||
29 | struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev; | 29 | struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev; |
30 | struct net_bridge *br = netdev_priv(brdev); | 30 | struct net_bridge *br = netdev_priv(brdev); |
31 | struct pcpu_sw_netstats *brstats = this_cpu_ptr(br->stats); | 31 | struct pcpu_sw_netstats *brstats = this_cpu_ptr(br->stats); |
32 | struct net_port_vlans *pv; | ||
32 | 33 | ||
33 | u64_stats_update_begin(&brstats->syncp); | 34 | u64_stats_update_begin(&brstats->syncp); |
34 | brstats->rx_packets++; | 35 | brstats->rx_packets++; |
@@ -39,18 +40,18 @@ static int br_pass_frame_up(struct sk_buff *skb) | |||
39 | * packet is allowed except in promisc modue when someone | 40 | * packet is allowed except in promisc modue when someone |
40 | * may be running packet capture. | 41 | * may be running packet capture. |
41 | */ | 42 | */ |
43 | pv = br_get_vlan_info(br); | ||
42 | if (!(brdev->flags & IFF_PROMISC) && | 44 | if (!(brdev->flags & IFF_PROMISC) && |
43 | !br_allowed_egress(br, br_get_vlan_info(br), skb)) { | 45 | !br_allowed_egress(br, pv, skb)) { |
44 | kfree_skb(skb); | 46 | kfree_skb(skb); |
45 | return NET_RX_DROP; | 47 | return NET_RX_DROP; |
46 | } | 48 | } |
47 | 49 | ||
48 | skb = br_handle_vlan(br, br_get_vlan_info(br), skb); | ||
49 | if (!skb) | ||
50 | return NET_RX_DROP; | ||
51 | |||
52 | indev = skb->dev; | 50 | indev = skb->dev; |
53 | skb->dev = brdev; | 51 | skb->dev = brdev; |
52 | skb = br_handle_vlan(br, pv, skb); | ||
53 | if (!skb) | ||
54 | return NET_RX_DROP; | ||
54 | 55 | ||
55 | return NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, skb, indev, NULL, | 56 | return NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, skb, indev, NULL, |
56 | netif_receive_skb); | 57 | netif_receive_skb); |
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c index 8249ca764c79..f23c74b3a953 100644 --- a/net/bridge/br_vlan.c +++ b/net/bridge/br_vlan.c | |||
@@ -119,22 +119,6 @@ static void __vlan_flush(struct net_port_vlans *v) | |||
119 | kfree_rcu(v, rcu); | 119 | kfree_rcu(v, rcu); |
120 | } | 120 | } |
121 | 121 | ||
122 | /* Strip the tag from the packet. Will return skb with tci set 0. */ | ||
123 | static struct sk_buff *br_vlan_untag(struct sk_buff *skb) | ||
124 | { | ||
125 | if (skb->protocol != htons(ETH_P_8021Q)) { | ||
126 | skb->vlan_tci = 0; | ||
127 | return skb; | ||
128 | } | ||
129 | |||
130 | skb->vlan_tci = 0; | ||
131 | skb = vlan_untag(skb); | ||
132 | if (skb) | ||
133 | skb->vlan_tci = 0; | ||
134 | |||
135 | return skb; | ||
136 | } | ||
137 | |||
138 | struct sk_buff *br_handle_vlan(struct net_bridge *br, | 122 | struct sk_buff *br_handle_vlan(struct net_bridge *br, |
139 | const struct net_port_vlans *pv, | 123 | const struct net_port_vlans *pv, |
140 | struct sk_buff *skb) | 124 | struct sk_buff *skb) |
@@ -144,13 +128,27 @@ struct sk_buff *br_handle_vlan(struct net_bridge *br, | |||
144 | if (!br->vlan_enabled) | 128 | if (!br->vlan_enabled) |
145 | goto out; | 129 | goto out; |
146 | 130 | ||
131 | /* Vlan filter table must be configured at this point. The | ||
132 | * only exception is the bridge is set in promisc mode and the | ||
133 | * packet is destined for the bridge device. In this case | ||
134 | * pass the packet as is. | ||
135 | */ | ||
136 | if (!pv) { | ||
137 | if ((br->dev->flags & IFF_PROMISC) && skb->dev == br->dev) { | ||
138 | goto out; | ||
139 | } else { | ||
140 | kfree_skb(skb); | ||
141 | return NULL; | ||
142 | } | ||
143 | } | ||
144 | |||
147 | /* At this point, we know that the frame was filtered and contains | 145 | /* At this point, we know that the frame was filtered and contains |
148 | * a valid vlan id. If the vlan id is set in the untagged bitmap, | 146 | * a valid vlan id. If the vlan id is set in the untagged bitmap, |
149 | * send untagged; otherwise, send tagged. | 147 | * send untagged; otherwise, send tagged. |
150 | */ | 148 | */ |
151 | br_vlan_get_tag(skb, &vid); | 149 | br_vlan_get_tag(skb, &vid); |
152 | if (test_bit(vid, pv->untagged_bitmap)) | 150 | if (test_bit(vid, pv->untagged_bitmap)) |
153 | skb = br_vlan_untag(skb); | 151 | skb->vlan_tci = 0; |
154 | 152 | ||
155 | out: | 153 | out: |
156 | return skb; | 154 | return skb; |
@@ -174,6 +172,18 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v, | |||
174 | if (!v) | 172 | if (!v) |
175 | return false; | 173 | return false; |
176 | 174 | ||
175 | /* If vlan tx offload is disabled on bridge device and frame was | ||
176 | * sent from vlan device on the bridge device, it does not have | ||
177 | * HW accelerated vlan tag. | ||
178 | */ | ||
179 | if (unlikely(!vlan_tx_tag_present(skb) && | ||
180 | (skb->protocol == htons(ETH_P_8021Q) || | ||
181 | skb->protocol == htons(ETH_P_8021AD)))) { | ||
182 | skb = vlan_untag(skb); | ||
183 | if (unlikely(!skb)) | ||
184 | return false; | ||
185 | } | ||
186 | |||
177 | err = br_vlan_get_tag(skb, vid); | 187 | err = br_vlan_get_tag(skb, vid); |
178 | if (!*vid) { | 188 | if (!*vid) { |
179 | u16 pvid = br_get_pvid(v); | 189 | u16 pvid = br_get_pvid(v); |
diff --git a/net/core/dev.c b/net/core/dev.c index b1b0c8d4d7df..45fa2f11f84d 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -2286,7 +2286,7 @@ out: | |||
2286 | } | 2286 | } |
2287 | EXPORT_SYMBOL(skb_checksum_help); | 2287 | EXPORT_SYMBOL(skb_checksum_help); |
2288 | 2288 | ||
2289 | __be16 skb_network_protocol(struct sk_buff *skb) | 2289 | __be16 skb_network_protocol(struct sk_buff *skb, int *depth) |
2290 | { | 2290 | { |
2291 | __be16 type = skb->protocol; | 2291 | __be16 type = skb->protocol; |
2292 | int vlan_depth = ETH_HLEN; | 2292 | int vlan_depth = ETH_HLEN; |
@@ -2313,6 +2313,8 @@ __be16 skb_network_protocol(struct sk_buff *skb) | |||
2313 | vlan_depth += VLAN_HLEN; | 2313 | vlan_depth += VLAN_HLEN; |
2314 | } | 2314 | } |
2315 | 2315 | ||
2316 | *depth = vlan_depth; | ||
2317 | |||
2316 | return type; | 2318 | return type; |
2317 | } | 2319 | } |
2318 | 2320 | ||
@@ -2326,12 +2328,13 @@ struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, | |||
2326 | { | 2328 | { |
2327 | struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); | 2329 | struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); |
2328 | struct packet_offload *ptype; | 2330 | struct packet_offload *ptype; |
2329 | __be16 type = skb_network_protocol(skb); | 2331 | int vlan_depth = skb->mac_len; |
2332 | __be16 type = skb_network_protocol(skb, &vlan_depth); | ||
2330 | 2333 | ||
2331 | if (unlikely(!type)) | 2334 | if (unlikely(!type)) |
2332 | return ERR_PTR(-EINVAL); | 2335 | return ERR_PTR(-EINVAL); |
2333 | 2336 | ||
2334 | __skb_pull(skb, skb->mac_len); | 2337 | __skb_pull(skb, vlan_depth); |
2335 | 2338 | ||
2336 | rcu_read_lock(); | 2339 | rcu_read_lock(); |
2337 | list_for_each_entry_rcu(ptype, &offload_base, list) { | 2340 | list_for_each_entry_rcu(ptype, &offload_base, list) { |
@@ -2498,8 +2501,10 @@ static netdev_features_t harmonize_features(struct sk_buff *skb, | |||
2498 | const struct net_device *dev, | 2501 | const struct net_device *dev, |
2499 | netdev_features_t features) | 2502 | netdev_features_t features) |
2500 | { | 2503 | { |
2504 | int tmp; | ||
2505 | |||
2501 | if (skb->ip_summed != CHECKSUM_NONE && | 2506 | if (skb->ip_summed != CHECKSUM_NONE && |
2502 | !can_checksum_protocol(features, skb_network_protocol(skb))) { | 2507 | !can_checksum_protocol(features, skb_network_protocol(skb, &tmp))) { |
2503 | features &= ~NETIF_F_ALL_CSUM; | 2508 | features &= ~NETIF_F_ALL_CSUM; |
2504 | } else if (illegal_highdma(dev, skb)) { | 2509 | } else if (illegal_highdma(dev, skb)) { |
2505 | features &= ~NETIF_F_SG; | 2510 | features &= ~NETIF_F_SG; |
diff --git a/net/core/netpoll.c b/net/core/netpoll.c index a664f7829a6d..df9e6b1a9759 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c | |||
@@ -742,7 +742,7 @@ static bool pkt_is_ns(struct sk_buff *skb) | |||
742 | struct nd_msg *msg; | 742 | struct nd_msg *msg; |
743 | struct ipv6hdr *hdr; | 743 | struct ipv6hdr *hdr; |
744 | 744 | ||
745 | if (skb->protocol != htons(ETH_P_ARP)) | 745 | if (skb->protocol != htons(ETH_P_IPV6)) |
746 | return false; | 746 | return false; |
747 | if (!pskb_may_pull(skb, sizeof(struct ipv6hdr) + sizeof(struct nd_msg))) | 747 | if (!pskb_may_pull(skb, sizeof(struct ipv6hdr) + sizeof(struct nd_msg))) |
748 | return false; | 748 | return false; |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 1a0dac2ef9ad..120eecc0f5a4 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -2121,12 +2121,13 @@ EXPORT_SYMBOL(rtmsg_ifinfo); | |||
2121 | static int nlmsg_populate_fdb_fill(struct sk_buff *skb, | 2121 | static int nlmsg_populate_fdb_fill(struct sk_buff *skb, |
2122 | struct net_device *dev, | 2122 | struct net_device *dev, |
2123 | u8 *addr, u32 pid, u32 seq, | 2123 | u8 *addr, u32 pid, u32 seq, |
2124 | int type, unsigned int flags) | 2124 | int type, unsigned int flags, |
2125 | int nlflags) | ||
2125 | { | 2126 | { |
2126 | struct nlmsghdr *nlh; | 2127 | struct nlmsghdr *nlh; |
2127 | struct ndmsg *ndm; | 2128 | struct ndmsg *ndm; |
2128 | 2129 | ||
2129 | nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), NLM_F_MULTI); | 2130 | nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), nlflags); |
2130 | if (!nlh) | 2131 | if (!nlh) |
2131 | return -EMSGSIZE; | 2132 | return -EMSGSIZE; |
2132 | 2133 | ||
@@ -2164,7 +2165,7 @@ static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, int type) | |||
2164 | if (!skb) | 2165 | if (!skb) |
2165 | goto errout; | 2166 | goto errout; |
2166 | 2167 | ||
2167 | err = nlmsg_populate_fdb_fill(skb, dev, addr, 0, 0, type, NTF_SELF); | 2168 | err = nlmsg_populate_fdb_fill(skb, dev, addr, 0, 0, type, NTF_SELF, 0); |
2168 | if (err < 0) { | 2169 | if (err < 0) { |
2169 | kfree_skb(skb); | 2170 | kfree_skb(skb); |
2170 | goto errout; | 2171 | goto errout; |
@@ -2389,7 +2390,8 @@ static int nlmsg_populate_fdb(struct sk_buff *skb, | |||
2389 | 2390 | ||
2390 | err = nlmsg_populate_fdb_fill(skb, dev, ha->addr, | 2391 | err = nlmsg_populate_fdb_fill(skb, dev, ha->addr, |
2391 | portid, seq, | 2392 | portid, seq, |
2392 | RTM_NEWNEIGH, NTF_SELF); | 2393 | RTM_NEWNEIGH, NTF_SELF, |
2394 | NLM_F_MULTI); | ||
2393 | if (err < 0) | 2395 | if (err < 0) |
2394 | return err; | 2396 | return err; |
2395 | skip: | 2397 | skip: |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 869c7afe3b07..90b96a11b974 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -2127,25 +2127,31 @@ EXPORT_SYMBOL_GPL(skb_zerocopy_headlen); | |||
2127 | * | 2127 | * |
2128 | * The `hlen` as calculated by skb_zerocopy_headlen() specifies the | 2128 | * The `hlen` as calculated by skb_zerocopy_headlen() specifies the |
2129 | * headroom in the `to` buffer. | 2129 | * headroom in the `to` buffer. |
2130 | * | ||
2131 | * Return value: | ||
2132 | * 0: everything is OK | ||
2133 | * -ENOMEM: couldn't orphan frags of @from due to lack of memory | ||
2134 | * -EFAULT: skb_copy_bits() found some problem with skb geometry | ||
2130 | */ | 2135 | */ |
2131 | void | 2136 | int |
2132 | skb_zerocopy(struct sk_buff *to, const struct sk_buff *from, int len, int hlen) | 2137 | skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen) |
2133 | { | 2138 | { |
2134 | int i, j = 0; | 2139 | int i, j = 0; |
2135 | int plen = 0; /* length of skb->head fragment */ | 2140 | int plen = 0; /* length of skb->head fragment */ |
2141 | int ret; | ||
2136 | struct page *page; | 2142 | struct page *page; |
2137 | unsigned int offset; | 2143 | unsigned int offset; |
2138 | 2144 | ||
2139 | BUG_ON(!from->head_frag && !hlen); | 2145 | BUG_ON(!from->head_frag && !hlen); |
2140 | 2146 | ||
2141 | /* dont bother with small payloads */ | 2147 | /* dont bother with small payloads */ |
2142 | if (len <= skb_tailroom(to)) { | 2148 | if (len <= skb_tailroom(to)) |
2143 | skb_copy_bits(from, 0, skb_put(to, len), len); | 2149 | return skb_copy_bits(from, 0, skb_put(to, len), len); |
2144 | return; | ||
2145 | } | ||
2146 | 2150 | ||
2147 | if (hlen) { | 2151 | if (hlen) { |
2148 | skb_copy_bits(from, 0, skb_put(to, hlen), hlen); | 2152 | ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen); |
2153 | if (unlikely(ret)) | ||
2154 | return ret; | ||
2149 | len -= hlen; | 2155 | len -= hlen; |
2150 | } else { | 2156 | } else { |
2151 | plen = min_t(int, skb_headlen(from), len); | 2157 | plen = min_t(int, skb_headlen(from), len); |
@@ -2163,6 +2169,11 @@ skb_zerocopy(struct sk_buff *to, const struct sk_buff *from, int len, int hlen) | |||
2163 | to->len += len + plen; | 2169 | to->len += len + plen; |
2164 | to->data_len += len + plen; | 2170 | to->data_len += len + plen; |
2165 | 2171 | ||
2172 | if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) { | ||
2173 | skb_tx_error(from); | ||
2174 | return -ENOMEM; | ||
2175 | } | ||
2176 | |||
2166 | for (i = 0; i < skb_shinfo(from)->nr_frags; i++) { | 2177 | for (i = 0; i < skb_shinfo(from)->nr_frags; i++) { |
2167 | if (!len) | 2178 | if (!len) |
2168 | break; | 2179 | break; |
@@ -2173,6 +2184,8 @@ skb_zerocopy(struct sk_buff *to, const struct sk_buff *from, int len, int hlen) | |||
2173 | j++; | 2184 | j++; |
2174 | } | 2185 | } |
2175 | skb_shinfo(to)->nr_frags = j; | 2186 | skb_shinfo(to)->nr_frags = j; |
2187 | |||
2188 | return 0; | ||
2176 | } | 2189 | } |
2177 | EXPORT_SYMBOL_GPL(skb_zerocopy); | 2190 | EXPORT_SYMBOL_GPL(skb_zerocopy); |
2178 | 2191 | ||
@@ -2866,8 +2879,9 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb, | |||
2866 | int err = -ENOMEM; | 2879 | int err = -ENOMEM; |
2867 | int i = 0; | 2880 | int i = 0; |
2868 | int pos; | 2881 | int pos; |
2882 | int dummy; | ||
2869 | 2883 | ||
2870 | proto = skb_network_protocol(head_skb); | 2884 | proto = skb_network_protocol(head_skb, &dummy); |
2871 | if (unlikely(!proto)) | 2885 | if (unlikely(!proto)) |
2872 | return ERR_PTR(-EINVAL); | 2886 | return ERR_PTR(-EINVAL); |
2873 | 2887 | ||
diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c index 1863422fb7d5..250be7421ab3 100644 --- a/net/ipv4/gre_demux.c +++ b/net/ipv4/gre_demux.c | |||
@@ -182,6 +182,14 @@ static int gre_cisco_rcv(struct sk_buff *skb) | |||
182 | int i; | 182 | int i; |
183 | bool csum_err = false; | 183 | bool csum_err = false; |
184 | 184 | ||
185 | #ifdef CONFIG_NET_IPGRE_BROADCAST | ||
186 | if (ipv4_is_multicast(ip_hdr(skb)->daddr)) { | ||
187 | /* Looped back packet, drop it! */ | ||
188 | if (rt_is_output_route(skb_rtable(skb))) | ||
189 | goto drop; | ||
190 | } | ||
191 | #endif | ||
192 | |||
185 | if (parse_gre_header(skb, &tpi, &csum_err) < 0) | 193 | if (parse_gre_header(skb, &tpi, &csum_err) < 0) |
186 | goto drop; | 194 | goto drop; |
187 | 195 | ||
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index 78a89e61925d..a82a22d8f77f 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c | |||
@@ -416,9 +416,6 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb, | |||
416 | 416 | ||
417 | #ifdef CONFIG_NET_IPGRE_BROADCAST | 417 | #ifdef CONFIG_NET_IPGRE_BROADCAST |
418 | if (ipv4_is_multicast(iph->daddr)) { | 418 | if (ipv4_is_multicast(iph->daddr)) { |
419 | /* Looped back packet, drop it! */ | ||
420 | if (rt_is_output_route(skb_rtable(skb))) | ||
421 | goto drop; | ||
422 | tunnel->dev->stats.multicast++; | 419 | tunnel->dev->stats.multicast++; |
423 | skb->pkt_type = PACKET_BROADCAST; | 420 | skb->pkt_type = PACKET_BROADCAST; |
424 | } | 421 | } |
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c index 6f847dd56dbc..8d69626f2206 100644 --- a/net/ipv4/ip_tunnel_core.c +++ b/net/ipv4/ip_tunnel_core.c | |||
@@ -108,6 +108,7 @@ int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto) | |||
108 | nf_reset(skb); | 108 | nf_reset(skb); |
109 | secpath_reset(skb); | 109 | secpath_reset(skb); |
110 | skb_clear_hash_if_not_l4(skb); | 110 | skb_clear_hash_if_not_l4(skb); |
111 | skb_dst_drop(skb); | ||
111 | skb->vlan_tci = 0; | 112 | skb->vlan_tci = 0; |
112 | skb_set_queue_mapping(skb, 0); | 113 | skb_set_queue_mapping(skb, 0); |
113 | skb->pkt_type = PACKET_HOST; | 114 | skb->pkt_type = PACKET_HOST; |
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index b9b3472975ba..28863570dd60 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c | |||
@@ -2255,13 +2255,14 @@ int ipmr_get_route(struct net *net, struct sk_buff *skb, | |||
2255 | } | 2255 | } |
2256 | 2256 | ||
2257 | static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, | 2257 | static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, |
2258 | u32 portid, u32 seq, struct mfc_cache *c, int cmd) | 2258 | u32 portid, u32 seq, struct mfc_cache *c, int cmd, |
2259 | int flags) | ||
2259 | { | 2260 | { |
2260 | struct nlmsghdr *nlh; | 2261 | struct nlmsghdr *nlh; |
2261 | struct rtmsg *rtm; | 2262 | struct rtmsg *rtm; |
2262 | int err; | 2263 | int err; |
2263 | 2264 | ||
2264 | nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), NLM_F_MULTI); | 2265 | nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags); |
2265 | if (nlh == NULL) | 2266 | if (nlh == NULL) |
2266 | return -EMSGSIZE; | 2267 | return -EMSGSIZE; |
2267 | 2268 | ||
@@ -2329,7 +2330,7 @@ static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc, | |||
2329 | if (skb == NULL) | 2330 | if (skb == NULL) |
2330 | goto errout; | 2331 | goto errout; |
2331 | 2332 | ||
2332 | err = ipmr_fill_mroute(mrt, skb, 0, 0, mfc, cmd); | 2333 | err = ipmr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0); |
2333 | if (err < 0) | 2334 | if (err < 0) |
2334 | goto errout; | 2335 | goto errout; |
2335 | 2336 | ||
@@ -2368,7 +2369,8 @@ static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb) | |||
2368 | if (ipmr_fill_mroute(mrt, skb, | 2369 | if (ipmr_fill_mroute(mrt, skb, |
2369 | NETLINK_CB(cb->skb).portid, | 2370 | NETLINK_CB(cb->skb).portid, |
2370 | cb->nlh->nlmsg_seq, | 2371 | cb->nlh->nlmsg_seq, |
2371 | mfc, RTM_NEWROUTE) < 0) | 2372 | mfc, RTM_NEWROUTE, |
2373 | NLM_F_MULTI) < 0) | ||
2372 | goto done; | 2374 | goto done; |
2373 | next_entry: | 2375 | next_entry: |
2374 | e++; | 2376 | e++; |
@@ -2382,7 +2384,8 @@ next_entry: | |||
2382 | if (ipmr_fill_mroute(mrt, skb, | 2384 | if (ipmr_fill_mroute(mrt, skb, |
2383 | NETLINK_CB(cb->skb).portid, | 2385 | NETLINK_CB(cb->skb).portid, |
2384 | cb->nlh->nlmsg_seq, | 2386 | cb->nlh->nlmsg_seq, |
2385 | mfc, RTM_NEWROUTE) < 0) { | 2387 | mfc, RTM_NEWROUTE, |
2388 | NLM_F_MULTI) < 0) { | ||
2386 | spin_unlock_bh(&mfc_unres_lock); | 2389 | spin_unlock_bh(&mfc_unres_lock); |
2387 | goto done; | 2390 | goto done; |
2388 | } | 2391 | } |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 3cf976510497..1e4eac779f51 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -2628,7 +2628,7 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw, | |||
2628 | { | 2628 | { |
2629 | __be32 dest, src; | 2629 | __be32 dest, src; |
2630 | __u16 destp, srcp; | 2630 | __u16 destp, srcp; |
2631 | long delta = tw->tw_ttd - jiffies; | 2631 | s32 delta = tw->tw_ttd - inet_tw_time_stamp(); |
2632 | 2632 | ||
2633 | dest = tw->tw_daddr; | 2633 | dest = tw->tw_daddr; |
2634 | src = tw->tw_rcv_saddr; | 2634 | src = tw->tw_rcv_saddr; |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 344e972426df..6c7fa0853fc7 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -133,10 +133,12 @@ static int ipv6_count_addresses(struct inet6_dev *idev); | |||
133 | static struct hlist_head inet6_addr_lst[IN6_ADDR_HSIZE]; | 133 | static struct hlist_head inet6_addr_lst[IN6_ADDR_HSIZE]; |
134 | static DEFINE_SPINLOCK(addrconf_hash_lock); | 134 | static DEFINE_SPINLOCK(addrconf_hash_lock); |
135 | 135 | ||
136 | static void addrconf_verify(unsigned long); | 136 | static void addrconf_verify(void); |
137 | static void addrconf_verify_rtnl(void); | ||
138 | static void addrconf_verify_work(struct work_struct *); | ||
137 | 139 | ||
138 | static DEFINE_TIMER(addr_chk_timer, addrconf_verify, 0, 0); | 140 | static struct workqueue_struct *addrconf_wq; |
139 | static DEFINE_SPINLOCK(addrconf_verify_lock); | 141 | static DECLARE_DELAYED_WORK(addr_chk_work, addrconf_verify_work); |
140 | 142 | ||
141 | static void addrconf_join_anycast(struct inet6_ifaddr *ifp); | 143 | static void addrconf_join_anycast(struct inet6_ifaddr *ifp); |
142 | static void addrconf_leave_anycast(struct inet6_ifaddr *ifp); | 144 | static void addrconf_leave_anycast(struct inet6_ifaddr *ifp); |
@@ -151,7 +153,7 @@ static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx, | |||
151 | u32 flags, u32 noflags); | 153 | u32 flags, u32 noflags); |
152 | 154 | ||
153 | static void addrconf_dad_start(struct inet6_ifaddr *ifp); | 155 | static void addrconf_dad_start(struct inet6_ifaddr *ifp); |
154 | static void addrconf_dad_timer(unsigned long data); | 156 | static void addrconf_dad_work(struct work_struct *w); |
155 | static void addrconf_dad_completed(struct inet6_ifaddr *ifp); | 157 | static void addrconf_dad_completed(struct inet6_ifaddr *ifp); |
156 | static void addrconf_dad_run(struct inet6_dev *idev); | 158 | static void addrconf_dad_run(struct inet6_dev *idev); |
157 | static void addrconf_rs_timer(unsigned long data); | 159 | static void addrconf_rs_timer(unsigned long data); |
@@ -247,9 +249,9 @@ static void addrconf_del_rs_timer(struct inet6_dev *idev) | |||
247 | __in6_dev_put(idev); | 249 | __in6_dev_put(idev); |
248 | } | 250 | } |
249 | 251 | ||
250 | static void addrconf_del_dad_timer(struct inet6_ifaddr *ifp) | 252 | static void addrconf_del_dad_work(struct inet6_ifaddr *ifp) |
251 | { | 253 | { |
252 | if (del_timer(&ifp->dad_timer)) | 254 | if (cancel_delayed_work(&ifp->dad_work)) |
253 | __in6_ifa_put(ifp); | 255 | __in6_ifa_put(ifp); |
254 | } | 256 | } |
255 | 257 | ||
@@ -261,12 +263,12 @@ static void addrconf_mod_rs_timer(struct inet6_dev *idev, | |||
261 | mod_timer(&idev->rs_timer, jiffies + when); | 263 | mod_timer(&idev->rs_timer, jiffies + when); |
262 | } | 264 | } |
263 | 265 | ||
264 | static void addrconf_mod_dad_timer(struct inet6_ifaddr *ifp, | 266 | static void addrconf_mod_dad_work(struct inet6_ifaddr *ifp, |
265 | unsigned long when) | 267 | unsigned long delay) |
266 | { | 268 | { |
267 | if (!timer_pending(&ifp->dad_timer)) | 269 | if (!delayed_work_pending(&ifp->dad_work)) |
268 | in6_ifa_hold(ifp); | 270 | in6_ifa_hold(ifp); |
269 | mod_timer(&ifp->dad_timer, jiffies + when); | 271 | mod_delayed_work(addrconf_wq, &ifp->dad_work, delay); |
270 | } | 272 | } |
271 | 273 | ||
272 | static int snmp6_alloc_dev(struct inet6_dev *idev) | 274 | static int snmp6_alloc_dev(struct inet6_dev *idev) |
@@ -751,8 +753,9 @@ void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp) | |||
751 | 753 | ||
752 | in6_dev_put(ifp->idev); | 754 | in6_dev_put(ifp->idev); |
753 | 755 | ||
754 | if (del_timer(&ifp->dad_timer)) | 756 | if (cancel_delayed_work(&ifp->dad_work)) |
755 | pr_notice("Timer is still running, when freeing ifa=%p\n", ifp); | 757 | pr_notice("delayed DAD work was pending while freeing ifa=%p\n", |
758 | ifp); | ||
756 | 759 | ||
757 | if (ifp->state != INET6_IFADDR_STATE_DEAD) { | 760 | if (ifp->state != INET6_IFADDR_STATE_DEAD) { |
758 | pr_warn("Freeing alive inet6 address %p\n", ifp); | 761 | pr_warn("Freeing alive inet6 address %p\n", ifp); |
@@ -849,8 +852,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, | |||
849 | 852 | ||
850 | spin_lock_init(&ifa->lock); | 853 | spin_lock_init(&ifa->lock); |
851 | spin_lock_init(&ifa->state_lock); | 854 | spin_lock_init(&ifa->state_lock); |
852 | setup_timer(&ifa->dad_timer, addrconf_dad_timer, | 855 | INIT_DELAYED_WORK(&ifa->dad_work, addrconf_dad_work); |
853 | (unsigned long)ifa); | ||
854 | INIT_HLIST_NODE(&ifa->addr_lst); | 856 | INIT_HLIST_NODE(&ifa->addr_lst); |
855 | ifa->scope = scope; | 857 | ifa->scope = scope; |
856 | ifa->prefix_len = pfxlen; | 858 | ifa->prefix_len = pfxlen; |
@@ -990,6 +992,8 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp) | |||
990 | enum cleanup_prefix_rt_t action = CLEANUP_PREFIX_RT_NOP; | 992 | enum cleanup_prefix_rt_t action = CLEANUP_PREFIX_RT_NOP; |
991 | unsigned long expires; | 993 | unsigned long expires; |
992 | 994 | ||
995 | ASSERT_RTNL(); | ||
996 | |||
993 | spin_lock_bh(&ifp->state_lock); | 997 | spin_lock_bh(&ifp->state_lock); |
994 | state = ifp->state; | 998 | state = ifp->state; |
995 | ifp->state = INET6_IFADDR_STATE_DEAD; | 999 | ifp->state = INET6_IFADDR_STATE_DEAD; |
@@ -1021,7 +1025,7 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp) | |||
1021 | 1025 | ||
1022 | write_unlock_bh(&ifp->idev->lock); | 1026 | write_unlock_bh(&ifp->idev->lock); |
1023 | 1027 | ||
1024 | addrconf_del_dad_timer(ifp); | 1028 | addrconf_del_dad_work(ifp); |
1025 | 1029 | ||
1026 | ipv6_ifa_notify(RTM_DELADDR, ifp); | 1030 | ipv6_ifa_notify(RTM_DELADDR, ifp); |
1027 | 1031 | ||
@@ -1604,7 +1608,7 @@ static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed) | |||
1604 | { | 1608 | { |
1605 | if (ifp->flags&IFA_F_PERMANENT) { | 1609 | if (ifp->flags&IFA_F_PERMANENT) { |
1606 | spin_lock_bh(&ifp->lock); | 1610 | spin_lock_bh(&ifp->lock); |
1607 | addrconf_del_dad_timer(ifp); | 1611 | addrconf_del_dad_work(ifp); |
1608 | ifp->flags |= IFA_F_TENTATIVE; | 1612 | ifp->flags |= IFA_F_TENTATIVE; |
1609 | if (dad_failed) | 1613 | if (dad_failed) |
1610 | ifp->flags |= IFA_F_DADFAILED; | 1614 | ifp->flags |= IFA_F_DADFAILED; |
@@ -1625,20 +1629,21 @@ static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed) | |||
1625 | spin_unlock_bh(&ifp->lock); | 1629 | spin_unlock_bh(&ifp->lock); |
1626 | } | 1630 | } |
1627 | ipv6_del_addr(ifp); | 1631 | ipv6_del_addr(ifp); |
1628 | } else | 1632 | } else { |
1629 | ipv6_del_addr(ifp); | 1633 | ipv6_del_addr(ifp); |
1634 | } | ||
1630 | } | 1635 | } |
1631 | 1636 | ||
1632 | static int addrconf_dad_end(struct inet6_ifaddr *ifp) | 1637 | static int addrconf_dad_end(struct inet6_ifaddr *ifp) |
1633 | { | 1638 | { |
1634 | int err = -ENOENT; | 1639 | int err = -ENOENT; |
1635 | 1640 | ||
1636 | spin_lock(&ifp->state_lock); | 1641 | spin_lock_bh(&ifp->state_lock); |
1637 | if (ifp->state == INET6_IFADDR_STATE_DAD) { | 1642 | if (ifp->state == INET6_IFADDR_STATE_DAD) { |
1638 | ifp->state = INET6_IFADDR_STATE_POSTDAD; | 1643 | ifp->state = INET6_IFADDR_STATE_POSTDAD; |
1639 | err = 0; | 1644 | err = 0; |
1640 | } | 1645 | } |
1641 | spin_unlock(&ifp->state_lock); | 1646 | spin_unlock_bh(&ifp->state_lock); |
1642 | 1647 | ||
1643 | return err; | 1648 | return err; |
1644 | } | 1649 | } |
@@ -1671,7 +1676,12 @@ void addrconf_dad_failure(struct inet6_ifaddr *ifp) | |||
1671 | } | 1676 | } |
1672 | } | 1677 | } |
1673 | 1678 | ||
1674 | addrconf_dad_stop(ifp, 1); | 1679 | spin_lock_bh(&ifp->state_lock); |
1680 | /* transition from _POSTDAD to _ERRDAD */ | ||
1681 | ifp->state = INET6_IFADDR_STATE_ERRDAD; | ||
1682 | spin_unlock_bh(&ifp->state_lock); | ||
1683 | |||
1684 | addrconf_mod_dad_work(ifp, 0); | ||
1675 | } | 1685 | } |
1676 | 1686 | ||
1677 | /* Join to solicited addr multicast group. */ | 1687 | /* Join to solicited addr multicast group. */ |
@@ -1680,6 +1690,8 @@ void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr) | |||
1680 | { | 1690 | { |
1681 | struct in6_addr maddr; | 1691 | struct in6_addr maddr; |
1682 | 1692 | ||
1693 | ASSERT_RTNL(); | ||
1694 | |||
1683 | if (dev->flags&(IFF_LOOPBACK|IFF_NOARP)) | 1695 | if (dev->flags&(IFF_LOOPBACK|IFF_NOARP)) |
1684 | return; | 1696 | return; |
1685 | 1697 | ||
@@ -1691,6 +1703,8 @@ void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr) | |||
1691 | { | 1703 | { |
1692 | struct in6_addr maddr; | 1704 | struct in6_addr maddr; |
1693 | 1705 | ||
1706 | ASSERT_RTNL(); | ||
1707 | |||
1694 | if (idev->dev->flags&(IFF_LOOPBACK|IFF_NOARP)) | 1708 | if (idev->dev->flags&(IFF_LOOPBACK|IFF_NOARP)) |
1695 | return; | 1709 | return; |
1696 | 1710 | ||
@@ -1701,6 +1715,9 @@ void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr) | |||
1701 | static void addrconf_join_anycast(struct inet6_ifaddr *ifp) | 1715 | static void addrconf_join_anycast(struct inet6_ifaddr *ifp) |
1702 | { | 1716 | { |
1703 | struct in6_addr addr; | 1717 | struct in6_addr addr; |
1718 | |||
1719 | ASSERT_RTNL(); | ||
1720 | |||
1704 | if (ifp->prefix_len >= 127) /* RFC 6164 */ | 1721 | if (ifp->prefix_len >= 127) /* RFC 6164 */ |
1705 | return; | 1722 | return; |
1706 | ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len); | 1723 | ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len); |
@@ -1712,6 +1729,9 @@ static void addrconf_join_anycast(struct inet6_ifaddr *ifp) | |||
1712 | static void addrconf_leave_anycast(struct inet6_ifaddr *ifp) | 1729 | static void addrconf_leave_anycast(struct inet6_ifaddr *ifp) |
1713 | { | 1730 | { |
1714 | struct in6_addr addr; | 1731 | struct in6_addr addr; |
1732 | |||
1733 | ASSERT_RTNL(); | ||
1734 | |||
1715 | if (ifp->prefix_len >= 127) /* RFC 6164 */ | 1735 | if (ifp->prefix_len >= 127) /* RFC 6164 */ |
1716 | return; | 1736 | return; |
1717 | ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len); | 1737 | ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len); |
@@ -2271,11 +2291,13 @@ ok: | |||
2271 | return; | 2291 | return; |
2272 | } | 2292 | } |
2273 | 2293 | ||
2274 | ifp->flags |= IFA_F_MANAGETEMPADDR; | ||
2275 | update_lft = 0; | 2294 | update_lft = 0; |
2276 | create = 1; | 2295 | create = 1; |
2296 | spin_lock_bh(&ifp->lock); | ||
2297 | ifp->flags |= IFA_F_MANAGETEMPADDR; | ||
2277 | ifp->cstamp = jiffies; | 2298 | ifp->cstamp = jiffies; |
2278 | ifp->tokenized = tokenized; | 2299 | ifp->tokenized = tokenized; |
2300 | spin_unlock_bh(&ifp->lock); | ||
2279 | addrconf_dad_start(ifp); | 2301 | addrconf_dad_start(ifp); |
2280 | } | 2302 | } |
2281 | 2303 | ||
@@ -2326,7 +2348,7 @@ ok: | |||
2326 | create, now); | 2348 | create, now); |
2327 | 2349 | ||
2328 | in6_ifa_put(ifp); | 2350 | in6_ifa_put(ifp); |
2329 | addrconf_verify(0); | 2351 | addrconf_verify(); |
2330 | } | 2352 | } |
2331 | } | 2353 | } |
2332 | inet6_prefix_notify(RTM_NEWPREFIX, in6_dev, pinfo); | 2354 | inet6_prefix_notify(RTM_NEWPREFIX, in6_dev, pinfo); |
@@ -2475,7 +2497,7 @@ static int inet6_addr_add(struct net *net, int ifindex, | |||
2475 | manage_tempaddrs(idev, ifp, valid_lft, prefered_lft, | 2497 | manage_tempaddrs(idev, ifp, valid_lft, prefered_lft, |
2476 | true, jiffies); | 2498 | true, jiffies); |
2477 | in6_ifa_put(ifp); | 2499 | in6_ifa_put(ifp); |
2478 | addrconf_verify(0); | 2500 | addrconf_verify_rtnl(); |
2479 | return 0; | 2501 | return 0; |
2480 | } | 2502 | } |
2481 | 2503 | ||
@@ -3011,7 +3033,7 @@ static int addrconf_ifdown(struct net_device *dev, int how) | |||
3011 | hlist_for_each_entry_rcu(ifa, h, addr_lst) { | 3033 | hlist_for_each_entry_rcu(ifa, h, addr_lst) { |
3012 | if (ifa->idev == idev) { | 3034 | if (ifa->idev == idev) { |
3013 | hlist_del_init_rcu(&ifa->addr_lst); | 3035 | hlist_del_init_rcu(&ifa->addr_lst); |
3014 | addrconf_del_dad_timer(ifa); | 3036 | addrconf_del_dad_work(ifa); |
3015 | goto restart; | 3037 | goto restart; |
3016 | } | 3038 | } |
3017 | } | 3039 | } |
@@ -3049,7 +3071,7 @@ static int addrconf_ifdown(struct net_device *dev, int how) | |||
3049 | while (!list_empty(&idev->addr_list)) { | 3071 | while (!list_empty(&idev->addr_list)) { |
3050 | ifa = list_first_entry(&idev->addr_list, | 3072 | ifa = list_first_entry(&idev->addr_list, |
3051 | struct inet6_ifaddr, if_list); | 3073 | struct inet6_ifaddr, if_list); |
3052 | addrconf_del_dad_timer(ifa); | 3074 | addrconf_del_dad_work(ifa); |
3053 | 3075 | ||
3054 | list_del(&ifa->if_list); | 3076 | list_del(&ifa->if_list); |
3055 | 3077 | ||
@@ -3148,10 +3170,10 @@ static void addrconf_dad_kick(struct inet6_ifaddr *ifp) | |||
3148 | rand_num = prandom_u32() % (idev->cnf.rtr_solicit_delay ? : 1); | 3170 | rand_num = prandom_u32() % (idev->cnf.rtr_solicit_delay ? : 1); |
3149 | 3171 | ||
3150 | ifp->dad_probes = idev->cnf.dad_transmits; | 3172 | ifp->dad_probes = idev->cnf.dad_transmits; |
3151 | addrconf_mod_dad_timer(ifp, rand_num); | 3173 | addrconf_mod_dad_work(ifp, rand_num); |
3152 | } | 3174 | } |
3153 | 3175 | ||
3154 | static void addrconf_dad_start(struct inet6_ifaddr *ifp) | 3176 | static void addrconf_dad_begin(struct inet6_ifaddr *ifp) |
3155 | { | 3177 | { |
3156 | struct inet6_dev *idev = ifp->idev; | 3178 | struct inet6_dev *idev = ifp->idev; |
3157 | struct net_device *dev = idev->dev; | 3179 | struct net_device *dev = idev->dev; |
@@ -3203,25 +3225,68 @@ out: | |||
3203 | read_unlock_bh(&idev->lock); | 3225 | read_unlock_bh(&idev->lock); |
3204 | } | 3226 | } |
3205 | 3227 | ||
3206 | static void addrconf_dad_timer(unsigned long data) | 3228 | static void addrconf_dad_start(struct inet6_ifaddr *ifp) |
3207 | { | 3229 | { |
3208 | struct inet6_ifaddr *ifp = (struct inet6_ifaddr *) data; | 3230 | bool begin_dad = false; |
3231 | |||
3232 | spin_lock_bh(&ifp->state_lock); | ||
3233 | if (ifp->state != INET6_IFADDR_STATE_DEAD) { | ||
3234 | ifp->state = INET6_IFADDR_STATE_PREDAD; | ||
3235 | begin_dad = true; | ||
3236 | } | ||
3237 | spin_unlock_bh(&ifp->state_lock); | ||
3238 | |||
3239 | if (begin_dad) | ||
3240 | addrconf_mod_dad_work(ifp, 0); | ||
3241 | } | ||
3242 | |||
3243 | static void addrconf_dad_work(struct work_struct *w) | ||
3244 | { | ||
3245 | struct inet6_ifaddr *ifp = container_of(to_delayed_work(w), | ||
3246 | struct inet6_ifaddr, | ||
3247 | dad_work); | ||
3209 | struct inet6_dev *idev = ifp->idev; | 3248 | struct inet6_dev *idev = ifp->idev; |
3210 | struct in6_addr mcaddr; | 3249 | struct in6_addr mcaddr; |
3211 | 3250 | ||
3251 | enum { | ||
3252 | DAD_PROCESS, | ||
3253 | DAD_BEGIN, | ||
3254 | DAD_ABORT, | ||
3255 | } action = DAD_PROCESS; | ||
3256 | |||
3257 | rtnl_lock(); | ||
3258 | |||
3259 | spin_lock_bh(&ifp->state_lock); | ||
3260 | if (ifp->state == INET6_IFADDR_STATE_PREDAD) { | ||
3261 | action = DAD_BEGIN; | ||
3262 | ifp->state = INET6_IFADDR_STATE_DAD; | ||
3263 | } else if (ifp->state == INET6_IFADDR_STATE_ERRDAD) { | ||
3264 | action = DAD_ABORT; | ||
3265 | ifp->state = INET6_IFADDR_STATE_POSTDAD; | ||
3266 | } | ||
3267 | spin_unlock_bh(&ifp->state_lock); | ||
3268 | |||
3269 | if (action == DAD_BEGIN) { | ||
3270 | addrconf_dad_begin(ifp); | ||
3271 | goto out; | ||
3272 | } else if (action == DAD_ABORT) { | ||
3273 | addrconf_dad_stop(ifp, 1); | ||
3274 | goto out; | ||
3275 | } | ||
3276 | |||
3212 | if (!ifp->dad_probes && addrconf_dad_end(ifp)) | 3277 | if (!ifp->dad_probes && addrconf_dad_end(ifp)) |
3213 | goto out; | 3278 | goto out; |
3214 | 3279 | ||
3215 | write_lock(&idev->lock); | 3280 | write_lock_bh(&idev->lock); |
3216 | if (idev->dead || !(idev->if_flags & IF_READY)) { | 3281 | if (idev->dead || !(idev->if_flags & IF_READY)) { |
3217 | write_unlock(&idev->lock); | 3282 | write_unlock_bh(&idev->lock); |
3218 | goto out; | 3283 | goto out; |
3219 | } | 3284 | } |
3220 | 3285 | ||
3221 | spin_lock(&ifp->lock); | 3286 | spin_lock(&ifp->lock); |
3222 | if (ifp->state == INET6_IFADDR_STATE_DEAD) { | 3287 | if (ifp->state == INET6_IFADDR_STATE_DEAD) { |
3223 | spin_unlock(&ifp->lock); | 3288 | spin_unlock(&ifp->lock); |
3224 | write_unlock(&idev->lock); | 3289 | write_unlock_bh(&idev->lock); |
3225 | goto out; | 3290 | goto out; |
3226 | } | 3291 | } |
3227 | 3292 | ||
@@ -3232,7 +3297,7 @@ static void addrconf_dad_timer(unsigned long data) | |||
3232 | 3297 | ||
3233 | ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED); | 3298 | ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED); |
3234 | spin_unlock(&ifp->lock); | 3299 | spin_unlock(&ifp->lock); |
3235 | write_unlock(&idev->lock); | 3300 | write_unlock_bh(&idev->lock); |
3236 | 3301 | ||
3237 | addrconf_dad_completed(ifp); | 3302 | addrconf_dad_completed(ifp); |
3238 | 3303 | ||
@@ -3240,16 +3305,17 @@ static void addrconf_dad_timer(unsigned long data) | |||
3240 | } | 3305 | } |
3241 | 3306 | ||
3242 | ifp->dad_probes--; | 3307 | ifp->dad_probes--; |
3243 | addrconf_mod_dad_timer(ifp, | 3308 | addrconf_mod_dad_work(ifp, |
3244 | NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME)); | 3309 | NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME)); |
3245 | spin_unlock(&ifp->lock); | 3310 | spin_unlock(&ifp->lock); |
3246 | write_unlock(&idev->lock); | 3311 | write_unlock_bh(&idev->lock); |
3247 | 3312 | ||
3248 | /* send a neighbour solicitation for our addr */ | 3313 | /* send a neighbour solicitation for our addr */ |
3249 | addrconf_addr_solict_mult(&ifp->addr, &mcaddr); | 3314 | addrconf_addr_solict_mult(&ifp->addr, &mcaddr); |
3250 | ndisc_send_ns(ifp->idev->dev, NULL, &ifp->addr, &mcaddr, &in6addr_any); | 3315 | ndisc_send_ns(ifp->idev->dev, NULL, &ifp->addr, &mcaddr, &in6addr_any); |
3251 | out: | 3316 | out: |
3252 | in6_ifa_put(ifp); | 3317 | in6_ifa_put(ifp); |
3318 | rtnl_unlock(); | ||
3253 | } | 3319 | } |
3254 | 3320 | ||
3255 | /* ifp->idev must be at least read locked */ | 3321 | /* ifp->idev must be at least read locked */ |
@@ -3276,7 +3342,7 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp) | |||
3276 | struct in6_addr lladdr; | 3342 | struct in6_addr lladdr; |
3277 | bool send_rs, send_mld; | 3343 | bool send_rs, send_mld; |
3278 | 3344 | ||
3279 | addrconf_del_dad_timer(ifp); | 3345 | addrconf_del_dad_work(ifp); |
3280 | 3346 | ||
3281 | /* | 3347 | /* |
3282 | * Configure the address for reception. Now it is valid. | 3348 | * Configure the address for reception. Now it is valid. |
@@ -3517,23 +3583,23 @@ int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr) | |||
3517 | * Periodic address status verification | 3583 | * Periodic address status verification |
3518 | */ | 3584 | */ |
3519 | 3585 | ||
3520 | static void addrconf_verify(unsigned long foo) | 3586 | static void addrconf_verify_rtnl(void) |
3521 | { | 3587 | { |
3522 | unsigned long now, next, next_sec, next_sched; | 3588 | unsigned long now, next, next_sec, next_sched; |
3523 | struct inet6_ifaddr *ifp; | 3589 | struct inet6_ifaddr *ifp; |
3524 | int i; | 3590 | int i; |
3525 | 3591 | ||
3592 | ASSERT_RTNL(); | ||
3593 | |||
3526 | rcu_read_lock_bh(); | 3594 | rcu_read_lock_bh(); |
3527 | spin_lock(&addrconf_verify_lock); | ||
3528 | now = jiffies; | 3595 | now = jiffies; |
3529 | next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY); | 3596 | next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY); |
3530 | 3597 | ||
3531 | del_timer(&addr_chk_timer); | 3598 | cancel_delayed_work(&addr_chk_work); |
3532 | 3599 | ||
3533 | for (i = 0; i < IN6_ADDR_HSIZE; i++) { | 3600 | for (i = 0; i < IN6_ADDR_HSIZE; i++) { |
3534 | restart: | 3601 | restart: |
3535 | hlist_for_each_entry_rcu_bh(ifp, | 3602 | hlist_for_each_entry_rcu_bh(ifp, &inet6_addr_lst[i], addr_lst) { |
3536 | &inet6_addr_lst[i], addr_lst) { | ||
3537 | unsigned long age; | 3603 | unsigned long age; |
3538 | 3604 | ||
3539 | /* When setting preferred_lft to a value not zero or | 3605 | /* When setting preferred_lft to a value not zero or |
@@ -3628,13 +3694,22 @@ restart: | |||
3628 | 3694 | ||
3629 | ADBG(KERN_DEBUG "now = %lu, schedule = %lu, rounded schedule = %lu => %lu\n", | 3695 | ADBG(KERN_DEBUG "now = %lu, schedule = %lu, rounded schedule = %lu => %lu\n", |
3630 | now, next, next_sec, next_sched); | 3696 | now, next, next_sec, next_sched); |
3631 | 3697 | mod_delayed_work(addrconf_wq, &addr_chk_work, next_sched - now); | |
3632 | addr_chk_timer.expires = next_sched; | ||
3633 | add_timer(&addr_chk_timer); | ||
3634 | spin_unlock(&addrconf_verify_lock); | ||
3635 | rcu_read_unlock_bh(); | 3698 | rcu_read_unlock_bh(); |
3636 | } | 3699 | } |
3637 | 3700 | ||
3701 | static void addrconf_verify_work(struct work_struct *w) | ||
3702 | { | ||
3703 | rtnl_lock(); | ||
3704 | addrconf_verify_rtnl(); | ||
3705 | rtnl_unlock(); | ||
3706 | } | ||
3707 | |||
3708 | static void addrconf_verify(void) | ||
3709 | { | ||
3710 | mod_delayed_work(addrconf_wq, &addr_chk_work, 0); | ||
3711 | } | ||
3712 | |||
3638 | static struct in6_addr *extract_addr(struct nlattr *addr, struct nlattr *local, | 3713 | static struct in6_addr *extract_addr(struct nlattr *addr, struct nlattr *local, |
3639 | struct in6_addr **peer_pfx) | 3714 | struct in6_addr **peer_pfx) |
3640 | { | 3715 | { |
@@ -3691,6 +3766,8 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, u32 ifa_flags, | |||
3691 | bool was_managetempaddr; | 3766 | bool was_managetempaddr; |
3692 | bool had_prefixroute; | 3767 | bool had_prefixroute; |
3693 | 3768 | ||
3769 | ASSERT_RTNL(); | ||
3770 | |||
3694 | if (!valid_lft || (prefered_lft > valid_lft)) | 3771 | if (!valid_lft || (prefered_lft > valid_lft)) |
3695 | return -EINVAL; | 3772 | return -EINVAL; |
3696 | 3773 | ||
@@ -3756,7 +3833,7 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, u32 ifa_flags, | |||
3756 | !was_managetempaddr, jiffies); | 3833 | !was_managetempaddr, jiffies); |
3757 | } | 3834 | } |
3758 | 3835 | ||
3759 | addrconf_verify(0); | 3836 | addrconf_verify_rtnl(); |
3760 | 3837 | ||
3761 | return 0; | 3838 | return 0; |
3762 | } | 3839 | } |
@@ -4386,6 +4463,8 @@ static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token) | |||
4386 | bool update_rs = false; | 4463 | bool update_rs = false; |
4387 | struct in6_addr ll_addr; | 4464 | struct in6_addr ll_addr; |
4388 | 4465 | ||
4466 | ASSERT_RTNL(); | ||
4467 | |||
4389 | if (token == NULL) | 4468 | if (token == NULL) |
4390 | return -EINVAL; | 4469 | return -EINVAL; |
4391 | if (ipv6_addr_any(token)) | 4470 | if (ipv6_addr_any(token)) |
@@ -4434,7 +4513,7 @@ static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token) | |||
4434 | } | 4513 | } |
4435 | 4514 | ||
4436 | write_unlock_bh(&idev->lock); | 4515 | write_unlock_bh(&idev->lock); |
4437 | addrconf_verify(0); | 4516 | addrconf_verify_rtnl(); |
4438 | return 0; | 4517 | return 0; |
4439 | } | 4518 | } |
4440 | 4519 | ||
@@ -4636,6 +4715,9 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp) | |||
4636 | { | 4715 | { |
4637 | struct net *net = dev_net(ifp->idev->dev); | 4716 | struct net *net = dev_net(ifp->idev->dev); |
4638 | 4717 | ||
4718 | if (event) | ||
4719 | ASSERT_RTNL(); | ||
4720 | |||
4639 | inet6_ifa_notify(event ? : RTM_NEWADDR, ifp); | 4721 | inet6_ifa_notify(event ? : RTM_NEWADDR, ifp); |
4640 | 4722 | ||
4641 | switch (event) { | 4723 | switch (event) { |
@@ -5244,6 +5326,12 @@ int __init addrconf_init(void) | |||
5244 | if (err < 0) | 5326 | if (err < 0) |
5245 | goto out_addrlabel; | 5327 | goto out_addrlabel; |
5246 | 5328 | ||
5329 | addrconf_wq = create_workqueue("ipv6_addrconf"); | ||
5330 | if (!addrconf_wq) { | ||
5331 | err = -ENOMEM; | ||
5332 | goto out_nowq; | ||
5333 | } | ||
5334 | |||
5247 | /* The addrconf netdev notifier requires that loopback_dev | 5335 | /* The addrconf netdev notifier requires that loopback_dev |
5248 | * has it's ipv6 private information allocated and setup | 5336 | * has it's ipv6 private information allocated and setup |
5249 | * before it can bring up and give link-local addresses | 5337 | * before it can bring up and give link-local addresses |
@@ -5274,7 +5362,7 @@ int __init addrconf_init(void) | |||
5274 | 5362 | ||
5275 | register_netdevice_notifier(&ipv6_dev_notf); | 5363 | register_netdevice_notifier(&ipv6_dev_notf); |
5276 | 5364 | ||
5277 | addrconf_verify(0); | 5365 | addrconf_verify(); |
5278 | 5366 | ||
5279 | rtnl_af_register(&inet6_ops); | 5367 | rtnl_af_register(&inet6_ops); |
5280 | 5368 | ||
@@ -5302,6 +5390,8 @@ errout: | |||
5302 | rtnl_af_unregister(&inet6_ops); | 5390 | rtnl_af_unregister(&inet6_ops); |
5303 | unregister_netdevice_notifier(&ipv6_dev_notf); | 5391 | unregister_netdevice_notifier(&ipv6_dev_notf); |
5304 | errlo: | 5392 | errlo: |
5393 | destroy_workqueue(addrconf_wq); | ||
5394 | out_nowq: | ||
5305 | unregister_pernet_subsys(&addrconf_ops); | 5395 | unregister_pernet_subsys(&addrconf_ops); |
5306 | out_addrlabel: | 5396 | out_addrlabel: |
5307 | ipv6_addr_label_cleanup(); | 5397 | ipv6_addr_label_cleanup(); |
@@ -5337,7 +5427,8 @@ void addrconf_cleanup(void) | |||
5337 | for (i = 0; i < IN6_ADDR_HSIZE; i++) | 5427 | for (i = 0; i < IN6_ADDR_HSIZE; i++) |
5338 | WARN_ON(!hlist_empty(&inet6_addr_lst[i])); | 5428 | WARN_ON(!hlist_empty(&inet6_addr_lst[i])); |
5339 | spin_unlock_bh(&addrconf_hash_lock); | 5429 | spin_unlock_bh(&addrconf_hash_lock); |
5340 | 5430 | cancel_delayed_work(&addr_chk_work); | |
5341 | del_timer(&addr_chk_timer); | ||
5342 | rtnl_unlock(); | 5431 | rtnl_unlock(); |
5432 | |||
5433 | destroy_workqueue(addrconf_wq); | ||
5343 | } | 5434 | } |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 16f91a2e7888..64d6073731d3 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
@@ -1101,21 +1101,19 @@ static void ip6_append_data_mtu(unsigned int *mtu, | |||
1101 | unsigned int fragheaderlen, | 1101 | unsigned int fragheaderlen, |
1102 | struct sk_buff *skb, | 1102 | struct sk_buff *skb, |
1103 | struct rt6_info *rt, | 1103 | struct rt6_info *rt, |
1104 | bool pmtuprobe) | 1104 | unsigned int orig_mtu) |
1105 | { | 1105 | { |
1106 | if (!(rt->dst.flags & DST_XFRM_TUNNEL)) { | 1106 | if (!(rt->dst.flags & DST_XFRM_TUNNEL)) { |
1107 | if (skb == NULL) { | 1107 | if (skb == NULL) { |
1108 | /* first fragment, reserve header_len */ | 1108 | /* first fragment, reserve header_len */ |
1109 | *mtu = *mtu - rt->dst.header_len; | 1109 | *mtu = orig_mtu - rt->dst.header_len; |
1110 | 1110 | ||
1111 | } else { | 1111 | } else { |
1112 | /* | 1112 | /* |
1113 | * this fragment is not first, the headers | 1113 | * this fragment is not first, the headers |
1114 | * space is regarded as data space. | 1114 | * space is regarded as data space. |
1115 | */ | 1115 | */ |
1116 | *mtu = min(*mtu, pmtuprobe ? | 1116 | *mtu = orig_mtu; |
1117 | rt->dst.dev->mtu : | ||
1118 | dst_mtu(rt->dst.path)); | ||
1119 | } | 1117 | } |
1120 | *maxfraglen = ((*mtu - fragheaderlen) & ~7) | 1118 | *maxfraglen = ((*mtu - fragheaderlen) & ~7) |
1121 | + fragheaderlen - sizeof(struct frag_hdr); | 1119 | + fragheaderlen - sizeof(struct frag_hdr); |
@@ -1132,7 +1130,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, | |||
1132 | struct ipv6_pinfo *np = inet6_sk(sk); | 1130 | struct ipv6_pinfo *np = inet6_sk(sk); |
1133 | struct inet_cork *cork; | 1131 | struct inet_cork *cork; |
1134 | struct sk_buff *skb, *skb_prev = NULL; | 1132 | struct sk_buff *skb, *skb_prev = NULL; |
1135 | unsigned int maxfraglen, fragheaderlen, mtu; | 1133 | unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu; |
1136 | int exthdrlen; | 1134 | int exthdrlen; |
1137 | int dst_exthdrlen; | 1135 | int dst_exthdrlen; |
1138 | int hh_len; | 1136 | int hh_len; |
@@ -1214,6 +1212,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, | |||
1214 | dst_exthdrlen = 0; | 1212 | dst_exthdrlen = 0; |
1215 | mtu = cork->fragsize; | 1213 | mtu = cork->fragsize; |
1216 | } | 1214 | } |
1215 | orig_mtu = mtu; | ||
1217 | 1216 | ||
1218 | hh_len = LL_RESERVED_SPACE(rt->dst.dev); | 1217 | hh_len = LL_RESERVED_SPACE(rt->dst.dev); |
1219 | 1218 | ||
@@ -1311,8 +1310,7 @@ alloc_new_skb: | |||
1311 | if (skb == NULL || skb_prev == NULL) | 1310 | if (skb == NULL || skb_prev == NULL) |
1312 | ip6_append_data_mtu(&mtu, &maxfraglen, | 1311 | ip6_append_data_mtu(&mtu, &maxfraglen, |
1313 | fragheaderlen, skb, rt, | 1312 | fragheaderlen, skb, rt, |
1314 | np->pmtudisc >= | 1313 | orig_mtu); |
1315 | IPV6_PMTUDISC_PROBE); | ||
1316 | 1314 | ||
1317 | skb_prev = skb; | 1315 | skb_prev = skb; |
1318 | 1316 | ||
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 0eb4038a4d63..8737400af0a0 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c | |||
@@ -2349,13 +2349,14 @@ int ip6mr_get_route(struct net *net, | |||
2349 | } | 2349 | } |
2350 | 2350 | ||
2351 | static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb, | 2351 | static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb, |
2352 | u32 portid, u32 seq, struct mfc6_cache *c, int cmd) | 2352 | u32 portid, u32 seq, struct mfc6_cache *c, int cmd, |
2353 | int flags) | ||
2353 | { | 2354 | { |
2354 | struct nlmsghdr *nlh; | 2355 | struct nlmsghdr *nlh; |
2355 | struct rtmsg *rtm; | 2356 | struct rtmsg *rtm; |
2356 | int err; | 2357 | int err; |
2357 | 2358 | ||
2358 | nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), NLM_F_MULTI); | 2359 | nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags); |
2359 | if (nlh == NULL) | 2360 | if (nlh == NULL) |
2360 | return -EMSGSIZE; | 2361 | return -EMSGSIZE; |
2361 | 2362 | ||
@@ -2423,7 +2424,7 @@ static void mr6_netlink_event(struct mr6_table *mrt, struct mfc6_cache *mfc, | |||
2423 | if (skb == NULL) | 2424 | if (skb == NULL) |
2424 | goto errout; | 2425 | goto errout; |
2425 | 2426 | ||
2426 | err = ip6mr_fill_mroute(mrt, skb, 0, 0, mfc, cmd); | 2427 | err = ip6mr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0); |
2427 | if (err < 0) | 2428 | if (err < 0) |
2428 | goto errout; | 2429 | goto errout; |
2429 | 2430 | ||
@@ -2462,7 +2463,8 @@ static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb) | |||
2462 | if (ip6mr_fill_mroute(mrt, skb, | 2463 | if (ip6mr_fill_mroute(mrt, skb, |
2463 | NETLINK_CB(cb->skb).portid, | 2464 | NETLINK_CB(cb->skb).portid, |
2464 | cb->nlh->nlmsg_seq, | 2465 | cb->nlh->nlmsg_seq, |
2465 | mfc, RTM_NEWROUTE) < 0) | 2466 | mfc, RTM_NEWROUTE, |
2467 | NLM_F_MULTI) < 0) | ||
2466 | goto done; | 2468 | goto done; |
2467 | next_entry: | 2469 | next_entry: |
2468 | e++; | 2470 | e++; |
@@ -2476,7 +2478,8 @@ next_entry: | |||
2476 | if (ip6mr_fill_mroute(mrt, skb, | 2478 | if (ip6mr_fill_mroute(mrt, skb, |
2477 | NETLINK_CB(cb->skb).portid, | 2479 | NETLINK_CB(cb->skb).portid, |
2478 | cb->nlh->nlmsg_seq, | 2480 | cb->nlh->nlmsg_seq, |
2479 | mfc, RTM_NEWROUTE) < 0) { | 2481 | mfc, RTM_NEWROUTE, |
2482 | NLM_F_MULTI) < 0) { | ||
2480 | spin_unlock_bh(&mfc_unres_lock); | 2483 | spin_unlock_bh(&mfc_unres_lock); |
2481 | goto done; | 2484 | goto done; |
2482 | } | 2485 | } |
diff --git a/net/key/af_key.c b/net/key/af_key.c index 1a04c1329362..79326978517a 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c | |||
@@ -433,12 +433,13 @@ static inline int verify_sec_ctx_len(const void *p) | |||
433 | return 0; | 433 | return 0; |
434 | } | 434 | } |
435 | 435 | ||
436 | static inline struct xfrm_user_sec_ctx *pfkey_sadb2xfrm_user_sec_ctx(const struct sadb_x_sec_ctx *sec_ctx) | 436 | static inline struct xfrm_user_sec_ctx *pfkey_sadb2xfrm_user_sec_ctx(const struct sadb_x_sec_ctx *sec_ctx, |
437 | gfp_t gfp) | ||
437 | { | 438 | { |
438 | struct xfrm_user_sec_ctx *uctx = NULL; | 439 | struct xfrm_user_sec_ctx *uctx = NULL; |
439 | int ctx_size = sec_ctx->sadb_x_ctx_len; | 440 | int ctx_size = sec_ctx->sadb_x_ctx_len; |
440 | 441 | ||
441 | uctx = kmalloc((sizeof(*uctx)+ctx_size), GFP_KERNEL); | 442 | uctx = kmalloc((sizeof(*uctx)+ctx_size), gfp); |
442 | 443 | ||
443 | if (!uctx) | 444 | if (!uctx) |
444 | return NULL; | 445 | return NULL; |
@@ -1124,7 +1125,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net, | |||
1124 | 1125 | ||
1125 | sec_ctx = ext_hdrs[SADB_X_EXT_SEC_CTX - 1]; | 1126 | sec_ctx = ext_hdrs[SADB_X_EXT_SEC_CTX - 1]; |
1126 | if (sec_ctx != NULL) { | 1127 | if (sec_ctx != NULL) { |
1127 | struct xfrm_user_sec_ctx *uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx); | 1128 | struct xfrm_user_sec_ctx *uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx, GFP_KERNEL); |
1128 | 1129 | ||
1129 | if (!uctx) | 1130 | if (!uctx) |
1130 | goto out; | 1131 | goto out; |
@@ -2231,14 +2232,14 @@ static int pfkey_spdadd(struct sock *sk, struct sk_buff *skb, const struct sadb_ | |||
2231 | 2232 | ||
2232 | sec_ctx = ext_hdrs[SADB_X_EXT_SEC_CTX - 1]; | 2233 | sec_ctx = ext_hdrs[SADB_X_EXT_SEC_CTX - 1]; |
2233 | if (sec_ctx != NULL) { | 2234 | if (sec_ctx != NULL) { |
2234 | struct xfrm_user_sec_ctx *uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx); | 2235 | struct xfrm_user_sec_ctx *uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx, GFP_KERNEL); |
2235 | 2236 | ||
2236 | if (!uctx) { | 2237 | if (!uctx) { |
2237 | err = -ENOBUFS; | 2238 | err = -ENOBUFS; |
2238 | goto out; | 2239 | goto out; |
2239 | } | 2240 | } |
2240 | 2241 | ||
2241 | err = security_xfrm_policy_alloc(&xp->security, uctx); | 2242 | err = security_xfrm_policy_alloc(&xp->security, uctx, GFP_KERNEL); |
2242 | kfree(uctx); | 2243 | kfree(uctx); |
2243 | 2244 | ||
2244 | if (err) | 2245 | if (err) |
@@ -2335,12 +2336,12 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, const struct sa | |||
2335 | 2336 | ||
2336 | sec_ctx = ext_hdrs[SADB_X_EXT_SEC_CTX - 1]; | 2337 | sec_ctx = ext_hdrs[SADB_X_EXT_SEC_CTX - 1]; |
2337 | if (sec_ctx != NULL) { | 2338 | if (sec_ctx != NULL) { |
2338 | struct xfrm_user_sec_ctx *uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx); | 2339 | struct xfrm_user_sec_ctx *uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx, GFP_KERNEL); |
2339 | 2340 | ||
2340 | if (!uctx) | 2341 | if (!uctx) |
2341 | return -ENOMEM; | 2342 | return -ENOMEM; |
2342 | 2343 | ||
2343 | err = security_xfrm_policy_alloc(&pol_ctx, uctx); | 2344 | err = security_xfrm_policy_alloc(&pol_ctx, uctx, GFP_KERNEL); |
2344 | kfree(uctx); | 2345 | kfree(uctx); |
2345 | if (err) | 2346 | if (err) |
2346 | return err; | 2347 | return err; |
@@ -3239,8 +3240,8 @@ static struct xfrm_policy *pfkey_compile_policy(struct sock *sk, int opt, | |||
3239 | } | 3240 | } |
3240 | if ((*dir = verify_sec_ctx_len(p))) | 3241 | if ((*dir = verify_sec_ctx_len(p))) |
3241 | goto out; | 3242 | goto out; |
3242 | uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx); | 3243 | uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx, GFP_ATOMIC); |
3243 | *dir = security_xfrm_policy_alloc(&xp->security, uctx); | 3244 | *dir = security_xfrm_policy_alloc(&xp->security, uctx, GFP_ATOMIC); |
3244 | kfree(uctx); | 3245 | kfree(uctx); |
3245 | 3246 | ||
3246 | if (*dir) | 3247 | if (*dir) |
diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c index f072fe803510..108120f216b1 100644 --- a/net/netfilter/nfnetlink_queue_core.c +++ b/net/netfilter/nfnetlink_queue_core.c | |||
@@ -354,13 +354,16 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue, | |||
354 | 354 | ||
355 | skb = nfnetlink_alloc_skb(net, size, queue->peer_portid, | 355 | skb = nfnetlink_alloc_skb(net, size, queue->peer_portid, |
356 | GFP_ATOMIC); | 356 | GFP_ATOMIC); |
357 | if (!skb) | 357 | if (!skb) { |
358 | skb_tx_error(entskb); | ||
358 | return NULL; | 359 | return NULL; |
360 | } | ||
359 | 361 | ||
360 | nlh = nlmsg_put(skb, 0, 0, | 362 | nlh = nlmsg_put(skb, 0, 0, |
361 | NFNL_SUBSYS_QUEUE << 8 | NFQNL_MSG_PACKET, | 363 | NFNL_SUBSYS_QUEUE << 8 | NFQNL_MSG_PACKET, |
362 | sizeof(struct nfgenmsg), 0); | 364 | sizeof(struct nfgenmsg), 0); |
363 | if (!nlh) { | 365 | if (!nlh) { |
366 | skb_tx_error(entskb); | ||
364 | kfree_skb(skb); | 367 | kfree_skb(skb); |
365 | return NULL; | 368 | return NULL; |
366 | } | 369 | } |
@@ -488,13 +491,15 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue, | |||
488 | nla->nla_type = NFQA_PAYLOAD; | 491 | nla->nla_type = NFQA_PAYLOAD; |
489 | nla->nla_len = nla_attr_size(data_len); | 492 | nla->nla_len = nla_attr_size(data_len); |
490 | 493 | ||
491 | skb_zerocopy(skb, entskb, data_len, hlen); | 494 | if (skb_zerocopy(skb, entskb, data_len, hlen)) |
495 | goto nla_put_failure; | ||
492 | } | 496 | } |
493 | 497 | ||
494 | nlh->nlmsg_len = skb->len; | 498 | nlh->nlmsg_len = skb->len; |
495 | return skb; | 499 | return skb; |
496 | 500 | ||
497 | nla_put_failure: | 501 | nla_put_failure: |
502 | skb_tx_error(entskb); | ||
498 | kfree_skb(skb); | 503 | kfree_skb(skb); |
499 | net_err_ratelimited("nf_queue: error creating packet message\n"); | 504 | net_err_ratelimited("nf_queue: error creating packet message\n"); |
500 | return NULL; | 505 | return NULL; |
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index e9a48baf8551..270b77dfac30 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c | |||
@@ -464,7 +464,9 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb, | |||
464 | } | 464 | } |
465 | nla->nla_len = nla_attr_size(skb->len); | 465 | nla->nla_len = nla_attr_size(skb->len); |
466 | 466 | ||
467 | skb_zerocopy(user_skb, skb, skb->len, hlen); | 467 | err = skb_zerocopy(user_skb, skb, skb->len, hlen); |
468 | if (err) | ||
469 | goto out; | ||
468 | 470 | ||
469 | /* Pad OVS_PACKET_ATTR_PACKET if linear copy was performed */ | 471 | /* Pad OVS_PACKET_ATTR_PACKET if linear copy was performed */ |
470 | if (!(dp->user_features & OVS_DP_F_UNALIGNED)) { | 472 | if (!(dp->user_features & OVS_DP_F_UNALIGNED)) { |
@@ -478,6 +480,8 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb, | |||
478 | 480 | ||
479 | err = genlmsg_unicast(ovs_dp_get_net(dp), user_skb, upcall_info->portid); | 481 | err = genlmsg_unicast(ovs_dp_get_net(dp), user_skb, upcall_info->portid); |
480 | out: | 482 | out: |
483 | if (err) | ||
484 | skb_tx_error(skb); | ||
481 | kfree_skb(nskb); | 485 | kfree_skb(nskb); |
482 | return err; | 486 | return err; |
483 | } | 487 | } |
@@ -1174,7 +1178,7 @@ static void ovs_dp_reset_user_features(struct sk_buff *skb, struct genl_info *in | |||
1174 | struct datapath *dp; | 1178 | struct datapath *dp; |
1175 | 1179 | ||
1176 | dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs); | 1180 | dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs); |
1177 | if (!dp) | 1181 | if (IS_ERR(dp)) |
1178 | return; | 1182 | return; |
1179 | 1183 | ||
1180 | WARN(dp->user_features, "Dropping previously announced user features\n"); | 1184 | WARN(dp->user_features, "Dropping previously announced user features\n"); |
@@ -1762,11 +1766,12 @@ static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
1762 | int bucket = cb->args[0], skip = cb->args[1]; | 1766 | int bucket = cb->args[0], skip = cb->args[1]; |
1763 | int i, j = 0; | 1767 | int i, j = 0; |
1764 | 1768 | ||
1769 | rcu_read_lock(); | ||
1765 | dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); | 1770 | dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); |
1766 | if (!dp) | 1771 | if (!dp) { |
1772 | rcu_read_unlock(); | ||
1767 | return -ENODEV; | 1773 | return -ENODEV; |
1768 | 1774 | } | |
1769 | rcu_read_lock(); | ||
1770 | for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) { | 1775 | for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) { |
1771 | struct vport *vport; | 1776 | struct vport *vport; |
1772 | 1777 | ||
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index 16f4b46161d4..2998989e76db 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c | |||
@@ -73,6 +73,7 @@ void ovs_flow_stats_update(struct sw_flow *flow, struct sk_buff *skb) | |||
73 | 73 | ||
74 | if ((flow->key.eth.type == htons(ETH_P_IP) || | 74 | if ((flow->key.eth.type == htons(ETH_P_IP) || |
75 | flow->key.eth.type == htons(ETH_P_IPV6)) && | 75 | flow->key.eth.type == htons(ETH_P_IPV6)) && |
76 | flow->key.ip.frag != OVS_FRAG_TYPE_LATER && | ||
76 | flow->key.ip.proto == IPPROTO_TCP && | 77 | flow->key.ip.proto == IPPROTO_TCP && |
77 | likely(skb->len >= skb_transport_offset(skb) + sizeof(struct tcphdr))) { | 78 | likely(skb->len >= skb_transport_offset(skb) + sizeof(struct tcphdr))) { |
78 | tcp_flags = TCP_FLAGS_BE16(tcp_hdr(skb)); | 79 | tcp_flags = TCP_FLAGS_BE16(tcp_hdr(skb)); |
@@ -91,7 +92,7 @@ static void stats_read(struct flow_stats *stats, | |||
91 | unsigned long *used, __be16 *tcp_flags) | 92 | unsigned long *used, __be16 *tcp_flags) |
92 | { | 93 | { |
93 | spin_lock(&stats->lock); | 94 | spin_lock(&stats->lock); |
94 | if (time_after(stats->used, *used)) | 95 | if (!*used || time_after(stats->used, *used)) |
95 | *used = stats->used; | 96 | *used = stats->used; |
96 | *tcp_flags |= stats->tcp_flags; | 97 | *tcp_flags |= stats->tcp_flags; |
97 | ovs_stats->n_packets += stats->packet_count; | 98 | ovs_stats->n_packets += stats->packet_count; |
@@ -102,30 +103,24 @@ static void stats_read(struct flow_stats *stats, | |||
102 | void ovs_flow_stats_get(struct sw_flow *flow, struct ovs_flow_stats *ovs_stats, | 103 | void ovs_flow_stats_get(struct sw_flow *flow, struct ovs_flow_stats *ovs_stats, |
103 | unsigned long *used, __be16 *tcp_flags) | 104 | unsigned long *used, __be16 *tcp_flags) |
104 | { | 105 | { |
105 | int cpu, cur_cpu; | 106 | int cpu; |
106 | 107 | ||
107 | *used = 0; | 108 | *used = 0; |
108 | *tcp_flags = 0; | 109 | *tcp_flags = 0; |
109 | memset(ovs_stats, 0, sizeof(*ovs_stats)); | 110 | memset(ovs_stats, 0, sizeof(*ovs_stats)); |
110 | 111 | ||
112 | local_bh_disable(); | ||
111 | if (!flow->stats.is_percpu) { | 113 | if (!flow->stats.is_percpu) { |
112 | stats_read(flow->stats.stat, ovs_stats, used, tcp_flags); | 114 | stats_read(flow->stats.stat, ovs_stats, used, tcp_flags); |
113 | } else { | 115 | } else { |
114 | cur_cpu = get_cpu(); | ||
115 | for_each_possible_cpu(cpu) { | 116 | for_each_possible_cpu(cpu) { |
116 | struct flow_stats *stats; | 117 | struct flow_stats *stats; |
117 | 118 | ||
118 | if (cpu == cur_cpu) | ||
119 | local_bh_disable(); | ||
120 | |||
121 | stats = per_cpu_ptr(flow->stats.cpu_stats, cpu); | 119 | stats = per_cpu_ptr(flow->stats.cpu_stats, cpu); |
122 | stats_read(stats, ovs_stats, used, tcp_flags); | 120 | stats_read(stats, ovs_stats, used, tcp_flags); |
123 | |||
124 | if (cpu == cur_cpu) | ||
125 | local_bh_enable(); | ||
126 | } | 121 | } |
127 | put_cpu(); | ||
128 | } | 122 | } |
123 | local_bh_enable(); | ||
129 | } | 124 | } |
130 | 125 | ||
131 | static void stats_reset(struct flow_stats *stats) | 126 | static void stats_reset(struct flow_stats *stats) |
@@ -140,25 +135,17 @@ static void stats_reset(struct flow_stats *stats) | |||
140 | 135 | ||
141 | void ovs_flow_stats_clear(struct sw_flow *flow) | 136 | void ovs_flow_stats_clear(struct sw_flow *flow) |
142 | { | 137 | { |
143 | int cpu, cur_cpu; | 138 | int cpu; |
144 | 139 | ||
140 | local_bh_disable(); | ||
145 | if (!flow->stats.is_percpu) { | 141 | if (!flow->stats.is_percpu) { |
146 | stats_reset(flow->stats.stat); | 142 | stats_reset(flow->stats.stat); |
147 | } else { | 143 | } else { |
148 | cur_cpu = get_cpu(); | ||
149 | |||
150 | for_each_possible_cpu(cpu) { | 144 | for_each_possible_cpu(cpu) { |
151 | |||
152 | if (cpu == cur_cpu) | ||
153 | local_bh_disable(); | ||
154 | |||
155 | stats_reset(per_cpu_ptr(flow->stats.cpu_stats, cpu)); | 145 | stats_reset(per_cpu_ptr(flow->stats.cpu_stats, cpu)); |
156 | |||
157 | if (cpu == cur_cpu) | ||
158 | local_bh_enable(); | ||
159 | } | 146 | } |
160 | put_cpu(); | ||
161 | } | 147 | } |
148 | local_bh_enable(); | ||
162 | } | 149 | } |
163 | 150 | ||
164 | static int check_header(struct sk_buff *skb, int len) | 151 | static int check_header(struct sk_buff *skb, int len) |
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c index 11c9ae00837d..642437231ad5 100644 --- a/net/tipc/subscr.c +++ b/net/tipc/subscr.c | |||
@@ -263,9 +263,9 @@ static void subscr_cancel(struct tipc_subscr *s, | |||
263 | * | 263 | * |
264 | * Called with subscriber lock held. | 264 | * Called with subscriber lock held. |
265 | */ | 265 | */ |
266 | static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s, | 266 | static int subscr_subscribe(struct tipc_subscr *s, |
267 | struct tipc_subscriber *subscriber) | 267 | struct tipc_subscriber *subscriber, |
268 | { | 268 | struct tipc_subscription **sub_p) { |
269 | struct tipc_subscription *sub; | 269 | struct tipc_subscription *sub; |
270 | int swap; | 270 | int swap; |
271 | 271 | ||
@@ -276,23 +276,21 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s, | |||
276 | if (s->filter & htohl(TIPC_SUB_CANCEL, swap)) { | 276 | if (s->filter & htohl(TIPC_SUB_CANCEL, swap)) { |
277 | s->filter &= ~htohl(TIPC_SUB_CANCEL, swap); | 277 | s->filter &= ~htohl(TIPC_SUB_CANCEL, swap); |
278 | subscr_cancel(s, subscriber); | 278 | subscr_cancel(s, subscriber); |
279 | return NULL; | 279 | return 0; |
280 | } | 280 | } |
281 | 281 | ||
282 | /* Refuse subscription if global limit exceeded */ | 282 | /* Refuse subscription if global limit exceeded */ |
283 | if (atomic_read(&subscription_count) >= TIPC_MAX_SUBSCRIPTIONS) { | 283 | if (atomic_read(&subscription_count) >= TIPC_MAX_SUBSCRIPTIONS) { |
284 | pr_warn("Subscription rejected, limit reached (%u)\n", | 284 | pr_warn("Subscription rejected, limit reached (%u)\n", |
285 | TIPC_MAX_SUBSCRIPTIONS); | 285 | TIPC_MAX_SUBSCRIPTIONS); |
286 | subscr_terminate(subscriber); | 286 | return -EINVAL; |
287 | return NULL; | ||
288 | } | 287 | } |
289 | 288 | ||
290 | /* Allocate subscription object */ | 289 | /* Allocate subscription object */ |
291 | sub = kmalloc(sizeof(*sub), GFP_ATOMIC); | 290 | sub = kmalloc(sizeof(*sub), GFP_ATOMIC); |
292 | if (!sub) { | 291 | if (!sub) { |
293 | pr_warn("Subscription rejected, no memory\n"); | 292 | pr_warn("Subscription rejected, no memory\n"); |
294 | subscr_terminate(subscriber); | 293 | return -ENOMEM; |
295 | return NULL; | ||
296 | } | 294 | } |
297 | 295 | ||
298 | /* Initialize subscription object */ | 296 | /* Initialize subscription object */ |
@@ -306,8 +304,7 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s, | |||
306 | (sub->seq.lower > sub->seq.upper)) { | 304 | (sub->seq.lower > sub->seq.upper)) { |
307 | pr_warn("Subscription rejected, illegal request\n"); | 305 | pr_warn("Subscription rejected, illegal request\n"); |
308 | kfree(sub); | 306 | kfree(sub); |
309 | subscr_terminate(subscriber); | 307 | return -EINVAL; |
310 | return NULL; | ||
311 | } | 308 | } |
312 | INIT_LIST_HEAD(&sub->nameseq_list); | 309 | INIT_LIST_HEAD(&sub->nameseq_list); |
313 | list_add(&sub->subscription_list, &subscriber->subscription_list); | 310 | list_add(&sub->subscription_list, &subscriber->subscription_list); |
@@ -320,8 +317,8 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s, | |||
320 | (Handler)subscr_timeout, (unsigned long)sub); | 317 | (Handler)subscr_timeout, (unsigned long)sub); |
321 | k_start_timer(&sub->timer, sub->timeout); | 318 | k_start_timer(&sub->timer, sub->timeout); |
322 | } | 319 | } |
323 | 320 | *sub_p = sub; | |
324 | return sub; | 321 | return 0; |
325 | } | 322 | } |
326 | 323 | ||
327 | /* Handle one termination request for the subscriber */ | 324 | /* Handle one termination request for the subscriber */ |
@@ -335,10 +332,14 @@ static void subscr_conn_msg_event(int conid, struct sockaddr_tipc *addr, | |||
335 | void *usr_data, void *buf, size_t len) | 332 | void *usr_data, void *buf, size_t len) |
336 | { | 333 | { |
337 | struct tipc_subscriber *subscriber = usr_data; | 334 | struct tipc_subscriber *subscriber = usr_data; |
338 | struct tipc_subscription *sub; | 335 | struct tipc_subscription *sub = NULL; |
339 | 336 | ||
340 | spin_lock_bh(&subscriber->lock); | 337 | spin_lock_bh(&subscriber->lock); |
341 | sub = subscr_subscribe((struct tipc_subscr *)buf, subscriber); | 338 | if (subscr_subscribe((struct tipc_subscr *)buf, subscriber, &sub) < 0) { |
339 | spin_unlock_bh(&subscriber->lock); | ||
340 | subscr_terminate(subscriber); | ||
341 | return; | ||
342 | } | ||
342 | if (sub) | 343 | if (sub) |
343 | tipc_nametbl_subscribe(sub); | 344 | tipc_nametbl_subscribe(sub); |
344 | spin_unlock_bh(&subscriber->lock); | 345 | spin_unlock_bh(&subscriber->lock); |
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index ce6ec6c2f4de..94404f19f9de 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c | |||
@@ -1787,8 +1787,11 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1787 | goto out; | 1787 | goto out; |
1788 | 1788 | ||
1789 | err = mutex_lock_interruptible(&u->readlock); | 1789 | err = mutex_lock_interruptible(&u->readlock); |
1790 | if (err) { | 1790 | if (unlikely(err)) { |
1791 | err = sock_intr_errno(sock_rcvtimeo(sk, noblock)); | 1791 | /* recvmsg() in non blocking mode is supposed to return -EAGAIN |
1792 | * sk_rcvtimeo is not honored by mutex_lock_interruptible() | ||
1793 | */ | ||
1794 | err = noblock ? -EAGAIN : -ERESTARTSYS; | ||
1792 | goto out; | 1795 | goto out; |
1793 | } | 1796 | } |
1794 | 1797 | ||
@@ -1913,6 +1916,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1913 | struct unix_sock *u = unix_sk(sk); | 1916 | struct unix_sock *u = unix_sk(sk); |
1914 | DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name); | 1917 | DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name); |
1915 | int copied = 0; | 1918 | int copied = 0; |
1919 | int noblock = flags & MSG_DONTWAIT; | ||
1916 | int check_creds = 0; | 1920 | int check_creds = 0; |
1917 | int target; | 1921 | int target; |
1918 | int err = 0; | 1922 | int err = 0; |
@@ -1928,7 +1932,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1928 | goto out; | 1932 | goto out; |
1929 | 1933 | ||
1930 | target = sock_rcvlowat(sk, flags&MSG_WAITALL, size); | 1934 | target = sock_rcvlowat(sk, flags&MSG_WAITALL, size); |
1931 | timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT); | 1935 | timeo = sock_rcvtimeo(sk, noblock); |
1932 | 1936 | ||
1933 | /* Lock the socket to prevent queue disordering | 1937 | /* Lock the socket to prevent queue disordering |
1934 | * while sleeps in memcpy_tomsg | 1938 | * while sleeps in memcpy_tomsg |
@@ -1940,8 +1944,11 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1940 | } | 1944 | } |
1941 | 1945 | ||
1942 | err = mutex_lock_interruptible(&u->readlock); | 1946 | err = mutex_lock_interruptible(&u->readlock); |
1943 | if (err) { | 1947 | if (unlikely(err)) { |
1944 | err = sock_intr_errno(timeo); | 1948 | /* recvmsg() in non blocking mode is supposed to return -EAGAIN |
1949 | * sk_rcvtimeo is not honored by mutex_lock_interruptible() | ||
1950 | */ | ||
1951 | err = noblock ? -EAGAIN : -ERESTARTSYS; | ||
1945 | goto out; | 1952 | goto out; |
1946 | } | 1953 | } |
1947 | 1954 | ||
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index c274179d60a2..2f7ddc3a59b4 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c | |||
@@ -1221,7 +1221,7 @@ static int copy_from_user_sec_ctx(struct xfrm_policy *pol, struct nlattr **attrs | |||
1221 | return 0; | 1221 | return 0; |
1222 | 1222 | ||
1223 | uctx = nla_data(rt); | 1223 | uctx = nla_data(rt); |
1224 | return security_xfrm_policy_alloc(&pol->security, uctx); | 1224 | return security_xfrm_policy_alloc(&pol->security, uctx, GFP_KERNEL); |
1225 | } | 1225 | } |
1226 | 1226 | ||
1227 | static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut, | 1227 | static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut, |
@@ -1626,7 +1626,7 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
1626 | if (rt) { | 1626 | if (rt) { |
1627 | struct xfrm_user_sec_ctx *uctx = nla_data(rt); | 1627 | struct xfrm_user_sec_ctx *uctx = nla_data(rt); |
1628 | 1628 | ||
1629 | err = security_xfrm_policy_alloc(&ctx, uctx); | 1629 | err = security_xfrm_policy_alloc(&ctx, uctx, GFP_KERNEL); |
1630 | if (err) | 1630 | if (err) |
1631 | return err; | 1631 | return err; |
1632 | } | 1632 | } |
@@ -1928,7 +1928,7 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
1928 | if (rt) { | 1928 | if (rt) { |
1929 | struct xfrm_user_sec_ctx *uctx = nla_data(rt); | 1929 | struct xfrm_user_sec_ctx *uctx = nla_data(rt); |
1930 | 1930 | ||
1931 | err = security_xfrm_policy_alloc(&ctx, uctx); | 1931 | err = security_xfrm_policy_alloc(&ctx, uctx, GFP_KERNEL); |
1932 | if (err) | 1932 | if (err) |
1933 | return err; | 1933 | return err; |
1934 | } | 1934 | } |
diff --git a/security/Makefile b/security/Makefile index a5918e01a4f7..05f1c934d74b 100644 --- a/security/Makefile +++ b/security/Makefile | |||
@@ -16,14 +16,14 @@ obj-$(CONFIG_MMU) += min_addr.o | |||
16 | # Object file lists | 16 | # Object file lists |
17 | obj-$(CONFIG_SECURITY) += security.o capability.o | 17 | obj-$(CONFIG_SECURITY) += security.o capability.o |
18 | obj-$(CONFIG_SECURITYFS) += inode.o | 18 | obj-$(CONFIG_SECURITYFS) += inode.o |
19 | obj-$(CONFIG_SECURITY_SELINUX) += selinux/built-in.o | 19 | obj-$(CONFIG_SECURITY_SELINUX) += selinux/ |
20 | obj-$(CONFIG_SECURITY_SMACK) += smack/built-in.o | 20 | obj-$(CONFIG_SECURITY_SMACK) += smack/ |
21 | obj-$(CONFIG_AUDIT) += lsm_audit.o | 21 | obj-$(CONFIG_AUDIT) += lsm_audit.o |
22 | obj-$(CONFIG_SECURITY_TOMOYO) += tomoyo/built-in.o | 22 | obj-$(CONFIG_SECURITY_TOMOYO) += tomoyo/ |
23 | obj-$(CONFIG_SECURITY_APPARMOR) += apparmor/built-in.o | 23 | obj-$(CONFIG_SECURITY_APPARMOR) += apparmor/ |
24 | obj-$(CONFIG_SECURITY_YAMA) += yama/built-in.o | 24 | obj-$(CONFIG_SECURITY_YAMA) += yama/ |
25 | obj-$(CONFIG_CGROUP_DEVICE) += device_cgroup.o | 25 | obj-$(CONFIG_CGROUP_DEVICE) += device_cgroup.o |
26 | 26 | ||
27 | # Object integrity file lists | 27 | # Object integrity file lists |
28 | subdir-$(CONFIG_INTEGRITY) += integrity | 28 | subdir-$(CONFIG_INTEGRITY) += integrity |
29 | obj-$(CONFIG_INTEGRITY) += integrity/built-in.o | 29 | obj-$(CONFIG_INTEGRITY) += integrity/ |
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c index 4257b7e2796b..998100093332 100644 --- a/security/apparmor/lsm.c +++ b/security/apparmor/lsm.c | |||
@@ -751,7 +751,7 @@ module_param_named(enabled, apparmor_enabled, bool, S_IRUGO); | |||
751 | static int __init apparmor_enabled_setup(char *str) | 751 | static int __init apparmor_enabled_setup(char *str) |
752 | { | 752 | { |
753 | unsigned long enabled; | 753 | unsigned long enabled; |
754 | int error = strict_strtoul(str, 0, &enabled); | 754 | int error = kstrtoul(str, 0, &enabled); |
755 | if (!error) | 755 | if (!error) |
756 | apparmor_enabled = enabled ? 1 : 0; | 756 | apparmor_enabled = enabled ? 1 : 0; |
757 | return 1; | 757 | return 1; |
diff --git a/security/capability.c b/security/capability.c index c1426b91f4bd..e76373de3129 100644 --- a/security/capability.c +++ b/security/capability.c | |||
@@ -116,7 +116,7 @@ static int cap_dentry_init_security(struct dentry *dentry, int mode, | |||
116 | struct qstr *name, void **ctx, | 116 | struct qstr *name, void **ctx, |
117 | u32 *ctxlen) | 117 | u32 *ctxlen) |
118 | { | 118 | { |
119 | return 0; | 119 | return -EOPNOTSUPP; |
120 | } | 120 | } |
121 | 121 | ||
122 | static int cap_inode_alloc_security(struct inode *inode) | 122 | static int cap_inode_alloc_security(struct inode *inode) |
@@ -757,7 +757,8 @@ static void cap_skb_owned_by(struct sk_buff *skb, struct sock *sk) | |||
757 | 757 | ||
758 | #ifdef CONFIG_SECURITY_NETWORK_XFRM | 758 | #ifdef CONFIG_SECURITY_NETWORK_XFRM |
759 | static int cap_xfrm_policy_alloc_security(struct xfrm_sec_ctx **ctxp, | 759 | static int cap_xfrm_policy_alloc_security(struct xfrm_sec_ctx **ctxp, |
760 | struct xfrm_user_sec_ctx *sec_ctx) | 760 | struct xfrm_user_sec_ctx *sec_ctx, |
761 | gfp_t gfp) | ||
761 | { | 762 | { |
762 | return 0; | 763 | return 0; |
763 | } | 764 | } |
diff --git a/security/integrity/Makefile b/security/integrity/Makefile index 0f9cffb1f9ad..0793f4811cb7 100644 --- a/security/integrity/Makefile +++ b/security/integrity/Makefile | |||
@@ -10,6 +10,6 @@ obj-$(CONFIG_INTEGRITY_ASYMMETRIC_KEYS) += digsig_asymmetric.o | |||
10 | integrity-y := iint.o | 10 | integrity-y := iint.o |
11 | 11 | ||
12 | subdir-$(CONFIG_IMA) += ima | 12 | subdir-$(CONFIG_IMA) += ima |
13 | obj-$(CONFIG_IMA) += ima/built-in.o | 13 | obj-$(CONFIG_IMA) += ima/ |
14 | subdir-$(CONFIG_EVM) += evm | 14 | subdir-$(CONFIG_EVM) += evm |
15 | obj-$(CONFIG_EVM) += evm/built-in.o | 15 | obj-$(CONFIG_EVM) += evm/ |
diff --git a/security/integrity/evm/Kconfig b/security/integrity/evm/Kconfig index fea9749c3756..d35b4915b00d 100644 --- a/security/integrity/evm/Kconfig +++ b/security/integrity/evm/Kconfig | |||
@@ -1,10 +1,10 @@ | |||
1 | config EVM | 1 | config EVM |
2 | boolean "EVM support" | 2 | boolean "EVM support" |
3 | depends on SECURITY && KEYS && (TRUSTED_KEYS=y || TRUSTED_KEYS=n) | 3 | depends on SECURITY |
4 | select KEYS | ||
5 | select ENCRYPTED_KEYS | ||
4 | select CRYPTO_HMAC | 6 | select CRYPTO_HMAC |
5 | select CRYPTO_MD5 | ||
6 | select CRYPTO_SHA1 | 7 | select CRYPTO_SHA1 |
7 | select ENCRYPTED_KEYS | ||
8 | default n | 8 | default n |
9 | help | 9 | help |
10 | EVM protects a file's security extended attributes against | 10 | EVM protects a file's security extended attributes against |
diff --git a/security/integrity/evm/evm.h b/security/integrity/evm/evm.h index 30bd1ec0232e..37c88ddb3cfe 100644 --- a/security/integrity/evm/evm.h +++ b/security/integrity/evm/evm.h | |||
@@ -32,19 +32,19 @@ extern struct crypto_shash *hash_tfm; | |||
32 | /* List of EVM protected security xattrs */ | 32 | /* List of EVM protected security xattrs */ |
33 | extern char *evm_config_xattrnames[]; | 33 | extern char *evm_config_xattrnames[]; |
34 | 34 | ||
35 | extern int evm_init_key(void); | 35 | int evm_init_key(void); |
36 | extern int evm_update_evmxattr(struct dentry *dentry, | 36 | int evm_update_evmxattr(struct dentry *dentry, |
37 | const char *req_xattr_name, | 37 | const char *req_xattr_name, |
38 | const char *req_xattr_value, | 38 | const char *req_xattr_value, |
39 | size_t req_xattr_value_len); | 39 | size_t req_xattr_value_len); |
40 | extern int evm_calc_hmac(struct dentry *dentry, const char *req_xattr_name, | 40 | int evm_calc_hmac(struct dentry *dentry, const char *req_xattr_name, |
41 | const char *req_xattr_value, | 41 | const char *req_xattr_value, |
42 | size_t req_xattr_value_len, char *digest); | 42 | size_t req_xattr_value_len, char *digest); |
43 | extern int evm_calc_hash(struct dentry *dentry, const char *req_xattr_name, | 43 | int evm_calc_hash(struct dentry *dentry, const char *req_xattr_name, |
44 | const char *req_xattr_value, | 44 | const char *req_xattr_value, |
45 | size_t req_xattr_value_len, char *digest); | 45 | size_t req_xattr_value_len, char *digest); |
46 | extern int evm_init_hmac(struct inode *inode, const struct xattr *xattr, | 46 | int evm_init_hmac(struct inode *inode, const struct xattr *xattr, |
47 | char *hmac_val); | 47 | char *hmac_val); |
48 | extern int evm_init_secfs(void); | 48 | int evm_init_secfs(void); |
49 | 49 | ||
50 | #endif | 50 | #endif |
diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c index 3bab89eb21d6..babd8626bf96 100644 --- a/security/integrity/evm/evm_crypto.c +++ b/security/integrity/evm/evm_crypto.c | |||
@@ -13,6 +13,8 @@ | |||
13 | * Using root's kernel master key (kmk), calculate the HMAC | 13 | * Using root's kernel master key (kmk), calculate the HMAC |
14 | */ | 14 | */ |
15 | 15 | ||
16 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
17 | |||
16 | #include <linux/module.h> | 18 | #include <linux/module.h> |
17 | #include <linux/crypto.h> | 19 | #include <linux/crypto.h> |
18 | #include <linux/xattr.h> | 20 | #include <linux/xattr.h> |
@@ -103,13 +105,13 @@ static void hmac_add_misc(struct shash_desc *desc, struct inode *inode, | |||
103 | umode_t mode; | 105 | umode_t mode; |
104 | } hmac_misc; | 106 | } hmac_misc; |
105 | 107 | ||
106 | memset(&hmac_misc, 0, sizeof hmac_misc); | 108 | memset(&hmac_misc, 0, sizeof(hmac_misc)); |
107 | hmac_misc.ino = inode->i_ino; | 109 | hmac_misc.ino = inode->i_ino; |
108 | hmac_misc.generation = inode->i_generation; | 110 | hmac_misc.generation = inode->i_generation; |
109 | hmac_misc.uid = from_kuid(&init_user_ns, inode->i_uid); | 111 | hmac_misc.uid = from_kuid(&init_user_ns, inode->i_uid); |
110 | hmac_misc.gid = from_kgid(&init_user_ns, inode->i_gid); | 112 | hmac_misc.gid = from_kgid(&init_user_ns, inode->i_gid); |
111 | hmac_misc.mode = inode->i_mode; | 113 | hmac_misc.mode = inode->i_mode; |
112 | crypto_shash_update(desc, (const u8 *)&hmac_misc, sizeof hmac_misc); | 114 | crypto_shash_update(desc, (const u8 *)&hmac_misc, sizeof(hmac_misc)); |
113 | if (evm_hmac_version > 1) | 115 | if (evm_hmac_version > 1) |
114 | crypto_shash_update(desc, inode->i_sb->s_uuid, | 116 | crypto_shash_update(desc, inode->i_sb->s_uuid, |
115 | sizeof(inode->i_sb->s_uuid)); | 117 | sizeof(inode->i_sb->s_uuid)); |
@@ -221,7 +223,7 @@ int evm_init_hmac(struct inode *inode, const struct xattr *lsm_xattr, | |||
221 | 223 | ||
222 | desc = init_desc(EVM_XATTR_HMAC); | 224 | desc = init_desc(EVM_XATTR_HMAC); |
223 | if (IS_ERR(desc)) { | 225 | if (IS_ERR(desc)) { |
224 | printk(KERN_INFO "init_desc failed\n"); | 226 | pr_info("init_desc failed\n"); |
225 | return PTR_ERR(desc); | 227 | return PTR_ERR(desc); |
226 | } | 228 | } |
227 | 229 | ||
diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c index 336b3ddfe63f..996092f21b64 100644 --- a/security/integrity/evm/evm_main.c +++ b/security/integrity/evm/evm_main.c | |||
@@ -14,6 +14,8 @@ | |||
14 | * evm_inode_removexattr, and evm_verifyxattr | 14 | * evm_inode_removexattr, and evm_verifyxattr |
15 | */ | 15 | */ |
16 | 16 | ||
17 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
18 | |||
17 | #include <linux/module.h> | 19 | #include <linux/module.h> |
18 | #include <linux/crypto.h> | 20 | #include <linux/crypto.h> |
19 | #include <linux/audit.h> | 21 | #include <linux/audit.h> |
@@ -432,7 +434,7 @@ static int __init init_evm(void) | |||
432 | 434 | ||
433 | error = evm_init_secfs(); | 435 | error = evm_init_secfs(); |
434 | if (error < 0) { | 436 | if (error < 0) { |
435 | printk(KERN_INFO "EVM: Error registering secfs\n"); | 437 | pr_info("Error registering secfs\n"); |
436 | goto err; | 438 | goto err; |
437 | } | 439 | } |
438 | 440 | ||
@@ -449,7 +451,7 @@ static int __init evm_display_config(void) | |||
449 | char **xattrname; | 451 | char **xattrname; |
450 | 452 | ||
451 | for (xattrname = evm_config_xattrnames; *xattrname != NULL; xattrname++) | 453 | for (xattrname = evm_config_xattrnames; *xattrname != NULL; xattrname++) |
452 | printk(KERN_INFO "EVM: %s\n", *xattrname); | 454 | pr_info("%s\n", *xattrname); |
453 | return 0; | 455 | return 0; |
454 | } | 456 | } |
455 | 457 | ||
diff --git a/security/integrity/evm/evm_secfs.c b/security/integrity/evm/evm_secfs.c index 30f670ad6ac3..cf12a04717d3 100644 --- a/security/integrity/evm/evm_secfs.c +++ b/security/integrity/evm/evm_secfs.c | |||
@@ -13,6 +13,8 @@ | |||
13 | * - Get the key and enable EVM | 13 | * - Get the key and enable EVM |
14 | */ | 14 | */ |
15 | 15 | ||
16 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
17 | |||
16 | #include <linux/uaccess.h> | 18 | #include <linux/uaccess.h> |
17 | #include <linux/module.h> | 19 | #include <linux/module.h> |
18 | #include "evm.h" | 20 | #include "evm.h" |
@@ -79,9 +81,9 @@ static ssize_t evm_write_key(struct file *file, const char __user *buf, | |||
79 | error = evm_init_key(); | 81 | error = evm_init_key(); |
80 | if (!error) { | 82 | if (!error) { |
81 | evm_initialized = 1; | 83 | evm_initialized = 1; |
82 | pr_info("EVM: initialized\n"); | 84 | pr_info("initialized\n"); |
83 | } else | 85 | } else |
84 | pr_err("EVM: initialization failed\n"); | 86 | pr_err("initialization failed\n"); |
85 | return count; | 87 | return count; |
86 | } | 88 | } |
87 | 89 | ||
diff --git a/security/integrity/iint.c b/security/integrity/iint.c index c49d3f14cbec..a521edf4cbd6 100644 --- a/security/integrity/iint.c +++ b/security/integrity/iint.c | |||
@@ -151,7 +151,7 @@ static void init_once(void *foo) | |||
151 | { | 151 | { |
152 | struct integrity_iint_cache *iint = foo; | 152 | struct integrity_iint_cache *iint = foo; |
153 | 153 | ||
154 | memset(iint, 0, sizeof *iint); | 154 | memset(iint, 0, sizeof(*iint)); |
155 | iint->version = 0; | 155 | iint->version = 0; |
156 | iint->flags = 0UL; | 156 | iint->flags = 0UL; |
157 | iint->ima_file_status = INTEGRITY_UNKNOWN; | 157 | iint->ima_file_status = INTEGRITY_UNKNOWN; |
diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h index 0356e1d437ca..f79fa8be203c 100644 --- a/security/integrity/ima/ima.h +++ b/security/integrity/ima/ima.h | |||
@@ -27,7 +27,7 @@ | |||
27 | #include "../integrity.h" | 27 | #include "../integrity.h" |
28 | 28 | ||
29 | enum ima_show_type { IMA_SHOW_BINARY, IMA_SHOW_BINARY_NO_FIELD_LEN, | 29 | enum ima_show_type { IMA_SHOW_BINARY, IMA_SHOW_BINARY_NO_FIELD_LEN, |
30 | IMA_SHOW_ASCII }; | 30 | IMA_SHOW_BINARY_OLD_STRING_FMT, IMA_SHOW_ASCII }; |
31 | enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8 }; | 31 | enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8 }; |
32 | 32 | ||
33 | /* digest size for IMA, fits SHA1 or MD5 */ | 33 | /* digest size for IMA, fits SHA1 or MD5 */ |
diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c index c38bbce8c6a6..ba9e4d792dd5 100644 --- a/security/integrity/ima/ima_api.c +++ b/security/integrity/ima/ima_api.c | |||
@@ -92,8 +92,8 @@ int ima_store_template(struct ima_template_entry *entry, | |||
92 | int violation, struct inode *inode, | 92 | int violation, struct inode *inode, |
93 | const unsigned char *filename) | 93 | const unsigned char *filename) |
94 | { | 94 | { |
95 | const char *op = "add_template_measure"; | 95 | static const char op[] = "add_template_measure"; |
96 | const char *audit_cause = "hashing_error"; | 96 | static const char audit_cause[] = "hashing_error"; |
97 | char *template_name = entry->template_desc->name; | 97 | char *template_name = entry->template_desc->name; |
98 | int result; | 98 | int result; |
99 | struct { | 99 | struct { |
@@ -132,7 +132,7 @@ void ima_add_violation(struct file *file, const unsigned char *filename, | |||
132 | const char *op, const char *cause) | 132 | const char *op, const char *cause) |
133 | { | 133 | { |
134 | struct ima_template_entry *entry; | 134 | struct ima_template_entry *entry; |
135 | struct inode *inode = file->f_dentry->d_inode; | 135 | struct inode *inode = file_inode(file); |
136 | int violation = 1; | 136 | int violation = 1; |
137 | int result; | 137 | int result; |
138 | 138 | ||
@@ -160,10 +160,10 @@ err_out: | |||
160 | * @function: calling function (FILE_CHECK, BPRM_CHECK, MMAP_CHECK, MODULE_CHECK) | 160 | * @function: calling function (FILE_CHECK, BPRM_CHECK, MMAP_CHECK, MODULE_CHECK) |
161 | * | 161 | * |
162 | * The policy is defined in terms of keypairs: | 162 | * The policy is defined in terms of keypairs: |
163 | * subj=, obj=, type=, func=, mask=, fsmagic= | 163 | * subj=, obj=, type=, func=, mask=, fsmagic= |
164 | * subj,obj, and type: are LSM specific. | 164 | * subj,obj, and type: are LSM specific. |
165 | * func: FILE_CHECK | BPRM_CHECK | MMAP_CHECK | MODULE_CHECK | 165 | * func: FILE_CHECK | BPRM_CHECK | MMAP_CHECK | MODULE_CHECK |
166 | * mask: contains the permission mask | 166 | * mask: contains the permission mask |
167 | * fsmagic: hex value | 167 | * fsmagic: hex value |
168 | * | 168 | * |
169 | * Returns IMA_MEASURE, IMA_APPRAISE mask. | 169 | * Returns IMA_MEASURE, IMA_APPRAISE mask. |
@@ -248,7 +248,7 @@ int ima_collect_measurement(struct integrity_iint_cache *iint, | |||
248 | * | 248 | * |
249 | * We only get here if the inode has not already been measured, | 249 | * We only get here if the inode has not already been measured, |
250 | * but the measurement could already exist: | 250 | * but the measurement could already exist: |
251 | * - multiple copies of the same file on either the same or | 251 | * - multiple copies of the same file on either the same or |
252 | * different filesystems. | 252 | * different filesystems. |
253 | * - the inode was previously flushed as well as the iint info, | 253 | * - the inode was previously flushed as well as the iint info, |
254 | * containing the hashing info. | 254 | * containing the hashing info. |
@@ -260,8 +260,8 @@ void ima_store_measurement(struct integrity_iint_cache *iint, | |||
260 | struct evm_ima_xattr_data *xattr_value, | 260 | struct evm_ima_xattr_data *xattr_value, |
261 | int xattr_len) | 261 | int xattr_len) |
262 | { | 262 | { |
263 | const char *op = "add_template_measure"; | 263 | static const char op[] = "add_template_measure"; |
264 | const char *audit_cause = "ENOMEM"; | 264 | static const char audit_cause[] = "ENOMEM"; |
265 | int result = -ENOMEM; | 265 | int result = -ENOMEM; |
266 | struct inode *inode = file_inode(file); | 266 | struct inode *inode = file_inode(file); |
267 | struct ima_template_entry *entry; | 267 | struct ima_template_entry *entry; |
@@ -332,5 +332,5 @@ const char *ima_d_path(struct path *path, char **pathbuf) | |||
332 | pathname = NULL; | 332 | pathname = NULL; |
333 | } | 333 | } |
334 | } | 334 | } |
335 | return pathname; | 335 | return pathname ?: (const char *)path->dentry->d_name.name; |
336 | } | 336 | } |
diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c index 734e9468aca0..291bf0f3a46d 100644 --- a/security/integrity/ima/ima_appraise.c +++ b/security/integrity/ima/ima_appraise.c | |||
@@ -177,11 +177,11 @@ int ima_appraise_measurement(int func, struct integrity_iint_cache *iint, | |||
177 | struct evm_ima_xattr_data *xattr_value, | 177 | struct evm_ima_xattr_data *xattr_value, |
178 | int xattr_len) | 178 | int xattr_len) |
179 | { | 179 | { |
180 | static const char op[] = "appraise_data"; | ||
181 | char *cause = "unknown"; | ||
180 | struct dentry *dentry = file->f_dentry; | 182 | struct dentry *dentry = file->f_dentry; |
181 | struct inode *inode = dentry->d_inode; | 183 | struct inode *inode = dentry->d_inode; |
182 | enum integrity_status status = INTEGRITY_UNKNOWN; | 184 | enum integrity_status status = INTEGRITY_UNKNOWN; |
183 | const char *op = "appraise_data"; | ||
184 | char *cause = "unknown"; | ||
185 | int rc = xattr_len, hash_start = 0; | 185 | int rc = xattr_len, hash_start = 0; |
186 | 186 | ||
187 | if (!ima_appraise) | 187 | if (!ima_appraise) |
diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c index fdf60def52e9..1bde8e627766 100644 --- a/security/integrity/ima/ima_crypto.c +++ b/security/integrity/ima/ima_crypto.c | |||
@@ -10,9 +10,11 @@ | |||
10 | * the Free Software Foundation, version 2 of the License. | 10 | * the Free Software Foundation, version 2 of the License. |
11 | * | 11 | * |
12 | * File: ima_crypto.c | 12 | * File: ima_crypto.c |
13 | * Calculates md5/sha1 file hash, template hash, boot-aggreate hash | 13 | * Calculates md5/sha1 file hash, template hash, boot-aggreate hash |
14 | */ | 14 | */ |
15 | 15 | ||
16 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
17 | |||
16 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
17 | #include <linux/file.h> | 19 | #include <linux/file.h> |
18 | #include <linux/crypto.h> | 20 | #include <linux/crypto.h> |
@@ -85,16 +87,20 @@ static int ima_calc_file_hash_tfm(struct file *file, | |||
85 | if (rc != 0) | 87 | if (rc != 0) |
86 | return rc; | 88 | return rc; |
87 | 89 | ||
88 | rbuf = kzalloc(PAGE_SIZE, GFP_KERNEL); | 90 | i_size = i_size_read(file_inode(file)); |
89 | if (!rbuf) { | 91 | |
90 | rc = -ENOMEM; | 92 | if (i_size == 0) |
91 | goto out; | 93 | goto out; |
92 | } | 94 | |
95 | rbuf = kzalloc(PAGE_SIZE, GFP_KERNEL); | ||
96 | if (!rbuf) | ||
97 | return -ENOMEM; | ||
98 | |||
93 | if (!(file->f_mode & FMODE_READ)) { | 99 | if (!(file->f_mode & FMODE_READ)) { |
94 | file->f_mode |= FMODE_READ; | 100 | file->f_mode |= FMODE_READ; |
95 | read = 1; | 101 | read = 1; |
96 | } | 102 | } |
97 | i_size = i_size_read(file_inode(file)); | 103 | |
98 | while (offset < i_size) { | 104 | while (offset < i_size) { |
99 | int rbuf_len; | 105 | int rbuf_len; |
100 | 106 | ||
@@ -111,12 +117,12 @@ static int ima_calc_file_hash_tfm(struct file *file, | |||
111 | if (rc) | 117 | if (rc) |
112 | break; | 118 | break; |
113 | } | 119 | } |
114 | kfree(rbuf); | ||
115 | if (!rc) | ||
116 | rc = crypto_shash_final(&desc.shash, hash->digest); | ||
117 | if (read) | 120 | if (read) |
118 | file->f_mode &= ~FMODE_READ; | 121 | file->f_mode &= ~FMODE_READ; |
122 | kfree(rbuf); | ||
119 | out: | 123 | out: |
124 | if (!rc) | ||
125 | rc = crypto_shash_final(&desc.shash, hash->digest); | ||
120 | return rc; | 126 | return rc; |
121 | } | 127 | } |
122 | 128 | ||
@@ -161,15 +167,22 @@ static int ima_calc_field_array_hash_tfm(struct ima_field_data *field_data, | |||
161 | return rc; | 167 | return rc; |
162 | 168 | ||
163 | for (i = 0; i < num_fields; i++) { | 169 | for (i = 0; i < num_fields; i++) { |
170 | u8 buffer[IMA_EVENT_NAME_LEN_MAX + 1] = { 0 }; | ||
171 | u8 *data_to_hash = field_data[i].data; | ||
172 | u32 datalen = field_data[i].len; | ||
173 | |||
164 | if (strcmp(td->name, IMA_TEMPLATE_IMA_NAME) != 0) { | 174 | if (strcmp(td->name, IMA_TEMPLATE_IMA_NAME) != 0) { |
165 | rc = crypto_shash_update(&desc.shash, | 175 | rc = crypto_shash_update(&desc.shash, |
166 | (const u8 *) &field_data[i].len, | 176 | (const u8 *) &field_data[i].len, |
167 | sizeof(field_data[i].len)); | 177 | sizeof(field_data[i].len)); |
168 | if (rc) | 178 | if (rc) |
169 | break; | 179 | break; |
180 | } else if (strcmp(td->fields[i]->field_id, "n") == 0) { | ||
181 | memcpy(buffer, data_to_hash, datalen); | ||
182 | data_to_hash = buffer; | ||
183 | datalen = IMA_EVENT_NAME_LEN_MAX + 1; | ||
170 | } | 184 | } |
171 | rc = crypto_shash_update(&desc.shash, field_data[i].data, | 185 | rc = crypto_shash_update(&desc.shash, data_to_hash, datalen); |
172 | field_data[i].len); | ||
173 | if (rc) | 186 | if (rc) |
174 | break; | 187 | break; |
175 | } | 188 | } |
@@ -205,7 +218,7 @@ static void __init ima_pcrread(int idx, u8 *pcr) | |||
205 | return; | 218 | return; |
206 | 219 | ||
207 | if (tpm_pcr_read(TPM_ANY_NUM, idx, pcr) != 0) | 220 | if (tpm_pcr_read(TPM_ANY_NUM, idx, pcr) != 0) |
208 | pr_err("IMA: Error Communicating to TPM chip\n"); | 221 | pr_err("Error Communicating to TPM chip\n"); |
209 | } | 222 | } |
210 | 223 | ||
211 | /* | 224 | /* |
diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c index db01125926bd..da92fcc08d15 100644 --- a/security/integrity/ima/ima_fs.c +++ b/security/integrity/ima/ima_fs.c | |||
@@ -133,14 +133,14 @@ static int ima_measurements_show(struct seq_file *m, void *v) | |||
133 | * PCR used is always the same (config option) in | 133 | * PCR used is always the same (config option) in |
134 | * little-endian format | 134 | * little-endian format |
135 | */ | 135 | */ |
136 | ima_putc(m, &pcr, sizeof pcr); | 136 | ima_putc(m, &pcr, sizeof(pcr)); |
137 | 137 | ||
138 | /* 2nd: template digest */ | 138 | /* 2nd: template digest */ |
139 | ima_putc(m, e->digest, TPM_DIGEST_SIZE); | 139 | ima_putc(m, e->digest, TPM_DIGEST_SIZE); |
140 | 140 | ||
141 | /* 3rd: template name size */ | 141 | /* 3rd: template name size */ |
142 | namelen = strlen(e->template_desc->name); | 142 | namelen = strlen(e->template_desc->name); |
143 | ima_putc(m, &namelen, sizeof namelen); | 143 | ima_putc(m, &namelen, sizeof(namelen)); |
144 | 144 | ||
145 | /* 4th: template name */ | 145 | /* 4th: template name */ |
146 | ima_putc(m, e->template_desc->name, namelen); | 146 | ima_putc(m, e->template_desc->name, namelen); |
@@ -160,6 +160,8 @@ static int ima_measurements_show(struct seq_file *m, void *v) | |||
160 | 160 | ||
161 | if (is_ima_template && strcmp(field->field_id, "d") == 0) | 161 | if (is_ima_template && strcmp(field->field_id, "d") == 0) |
162 | show = IMA_SHOW_BINARY_NO_FIELD_LEN; | 162 | show = IMA_SHOW_BINARY_NO_FIELD_LEN; |
163 | if (is_ima_template && strcmp(field->field_id, "n") == 0) | ||
164 | show = IMA_SHOW_BINARY_OLD_STRING_FMT; | ||
163 | field->field_show(m, show, &e->template_data[i]); | 165 | field->field_show(m, show, &e->template_data[i]); |
164 | } | 166 | } |
165 | return 0; | 167 | return 0; |
@@ -290,7 +292,7 @@ static atomic_t policy_opencount = ATOMIC_INIT(1); | |||
290 | /* | 292 | /* |
291 | * ima_open_policy: sequentialize access to the policy file | 293 | * ima_open_policy: sequentialize access to the policy file |
292 | */ | 294 | */ |
293 | static int ima_open_policy(struct inode * inode, struct file * filp) | 295 | static int ima_open_policy(struct inode *inode, struct file *filp) |
294 | { | 296 | { |
295 | /* No point in being allowed to open it if you aren't going to write */ | 297 | /* No point in being allowed to open it if you aren't going to write */ |
296 | if (!(filp->f_flags & O_WRONLY)) | 298 | if (!(filp->f_flags & O_WRONLY)) |
diff --git a/security/integrity/ima/ima_init.c b/security/integrity/ima/ima_init.c index 37122768554a..e8f9d70a465d 100644 --- a/security/integrity/ima/ima_init.c +++ b/security/integrity/ima/ima_init.c | |||
@@ -14,6 +14,9 @@ | |||
14 | * File: ima_init.c | 14 | * File: ima_init.c |
15 | * initialization and cleanup functions | 15 | * initialization and cleanup functions |
16 | */ | 16 | */ |
17 | |||
18 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
19 | |||
17 | #include <linux/module.h> | 20 | #include <linux/module.h> |
18 | #include <linux/scatterlist.h> | 21 | #include <linux/scatterlist.h> |
19 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
@@ -42,10 +45,10 @@ int ima_used_chip; | |||
42 | */ | 45 | */ |
43 | static void __init ima_add_boot_aggregate(void) | 46 | static void __init ima_add_boot_aggregate(void) |
44 | { | 47 | { |
48 | static const char op[] = "add_boot_aggregate"; | ||
49 | const char *audit_cause = "ENOMEM"; | ||
45 | struct ima_template_entry *entry; | 50 | struct ima_template_entry *entry; |
46 | struct integrity_iint_cache tmp_iint, *iint = &tmp_iint; | 51 | struct integrity_iint_cache tmp_iint, *iint = &tmp_iint; |
47 | const char *op = "add_boot_aggregate"; | ||
48 | const char *audit_cause = "ENOMEM"; | ||
49 | int result = -ENOMEM; | 52 | int result = -ENOMEM; |
50 | int violation = 0; | 53 | int violation = 0; |
51 | struct { | 54 | struct { |
@@ -93,7 +96,7 @@ int __init ima_init(void) | |||
93 | ima_used_chip = 1; | 96 | ima_used_chip = 1; |
94 | 97 | ||
95 | if (!ima_used_chip) | 98 | if (!ima_used_chip) |
96 | pr_info("IMA: No TPM chip found, activating TPM-bypass!\n"); | 99 | pr_info("No TPM chip found, activating TPM-bypass!\n"); |
97 | 100 | ||
98 | rc = ima_init_crypto(); | 101 | rc = ima_init_crypto(); |
99 | if (rc) | 102 | if (rc) |
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c index 149ee1119f87..52ac6cf41f88 100644 --- a/security/integrity/ima/ima_main.c +++ b/security/integrity/ima/ima_main.c | |||
@@ -71,15 +71,14 @@ __setup("ima_hash=", hash_setup); | |||
71 | * ima_rdwr_violation_check | 71 | * ima_rdwr_violation_check |
72 | * | 72 | * |
73 | * Only invalidate the PCR for measured files: | 73 | * Only invalidate the PCR for measured files: |
74 | * - Opening a file for write when already open for read, | 74 | * - Opening a file for write when already open for read, |
75 | * results in a time of measure, time of use (ToMToU) error. | 75 | * results in a time of measure, time of use (ToMToU) error. |
76 | * - Opening a file for read when already open for write, | 76 | * - Opening a file for read when already open for write, |
77 | * could result in a file measurement error. | 77 | * could result in a file measurement error. |
78 | * | 78 | * |
79 | */ | 79 | */ |
80 | static void ima_rdwr_violation_check(struct file *file) | 80 | static void ima_rdwr_violation_check(struct file *file) |
81 | { | 81 | { |
82 | struct dentry *dentry = file->f_path.dentry; | ||
83 | struct inode *inode = file_inode(file); | 82 | struct inode *inode = file_inode(file); |
84 | fmode_t mode = file->f_mode; | 83 | fmode_t mode = file->f_mode; |
85 | int must_measure; | 84 | int must_measure; |
@@ -111,8 +110,6 @@ out: | |||
111 | return; | 110 | return; |
112 | 111 | ||
113 | pathname = ima_d_path(&file->f_path, &pathbuf); | 112 | pathname = ima_d_path(&file->f_path, &pathbuf); |
114 | if (!pathname || strlen(pathname) > IMA_EVENT_NAME_LEN_MAX) | ||
115 | pathname = dentry->d_name.name; | ||
116 | 113 | ||
117 | if (send_tomtou) | 114 | if (send_tomtou) |
118 | ima_add_violation(file, pathname, "invalid_pcr", "ToMToU"); | 115 | ima_add_violation(file, pathname, "invalid_pcr", "ToMToU"); |
@@ -220,9 +217,7 @@ static int process_measurement(struct file *file, const char *filename, | |||
220 | if (rc != 0) | 217 | if (rc != 0) |
221 | goto out_digsig; | 218 | goto out_digsig; |
222 | 219 | ||
223 | pathname = !filename ? ima_d_path(&file->f_path, &pathbuf) : filename; | 220 | pathname = filename ?: ima_d_path(&file->f_path, &pathbuf); |
224 | if (!pathname) | ||
225 | pathname = (const char *)file->f_dentry->d_name.name; | ||
226 | 221 | ||
227 | if (action & IMA_MEASURE) | 222 | if (action & IMA_MEASURE) |
228 | ima_store_measurement(iint, file, pathname, | 223 | ima_store_measurement(iint, file, pathname, |
diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c index a9c3d3cd1990..93873a450ff7 100644 --- a/security/integrity/ima/ima_policy.c +++ b/security/integrity/ima/ima_policy.c | |||
@@ -7,7 +7,7 @@ | |||
7 | * the Free Software Foundation, version 2 of the License. | 7 | * the Free Software Foundation, version 2 of the License. |
8 | * | 8 | * |
9 | * ima_policy.c | 9 | * ima_policy.c |
10 | * - initialize default measure policy rules | 10 | * - initialize default measure policy rules |
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
@@ -21,8 +21,8 @@ | |||
21 | #include "ima.h" | 21 | #include "ima.h" |
22 | 22 | ||
23 | /* flags definitions */ | 23 | /* flags definitions */ |
24 | #define IMA_FUNC 0x0001 | 24 | #define IMA_FUNC 0x0001 |
25 | #define IMA_MASK 0x0002 | 25 | #define IMA_MASK 0x0002 |
26 | #define IMA_FSMAGIC 0x0004 | 26 | #define IMA_FSMAGIC 0x0004 |
27 | #define IMA_UID 0x0008 | 27 | #define IMA_UID 0x0008 |
28 | #define IMA_FOWNER 0x0010 | 28 | #define IMA_FOWNER 0x0010 |
@@ -69,35 +69,35 @@ struct ima_rule_entry { | |||
69 | * and running executables. | 69 | * and running executables. |
70 | */ | 70 | */ |
71 | static struct ima_rule_entry default_rules[] = { | 71 | static struct ima_rule_entry default_rules[] = { |
72 | {.action = DONT_MEASURE,.fsmagic = PROC_SUPER_MAGIC,.flags = IMA_FSMAGIC}, | 72 | {.action = DONT_MEASURE, .fsmagic = PROC_SUPER_MAGIC, .flags = IMA_FSMAGIC}, |
73 | {.action = DONT_MEASURE,.fsmagic = SYSFS_MAGIC,.flags = IMA_FSMAGIC}, | 73 | {.action = DONT_MEASURE, .fsmagic = SYSFS_MAGIC, .flags = IMA_FSMAGIC}, |
74 | {.action = DONT_MEASURE,.fsmagic = DEBUGFS_MAGIC,.flags = IMA_FSMAGIC}, | 74 | {.action = DONT_MEASURE, .fsmagic = DEBUGFS_MAGIC, .flags = IMA_FSMAGIC}, |
75 | {.action = DONT_MEASURE,.fsmagic = TMPFS_MAGIC,.flags = IMA_FSMAGIC}, | 75 | {.action = DONT_MEASURE, .fsmagic = TMPFS_MAGIC, .flags = IMA_FSMAGIC}, |
76 | {.action = DONT_MEASURE,.fsmagic = DEVPTS_SUPER_MAGIC,.flags = IMA_FSMAGIC}, | 76 | {.action = DONT_MEASURE, .fsmagic = DEVPTS_SUPER_MAGIC, .flags = IMA_FSMAGIC}, |
77 | {.action = DONT_MEASURE,.fsmagic = BINFMTFS_MAGIC,.flags = IMA_FSMAGIC}, | 77 | {.action = DONT_MEASURE, .fsmagic = BINFMTFS_MAGIC, .flags = IMA_FSMAGIC}, |
78 | {.action = DONT_MEASURE,.fsmagic = SECURITYFS_MAGIC,.flags = IMA_FSMAGIC}, | 78 | {.action = DONT_MEASURE, .fsmagic = SECURITYFS_MAGIC, .flags = IMA_FSMAGIC}, |
79 | {.action = DONT_MEASURE,.fsmagic = SELINUX_MAGIC,.flags = IMA_FSMAGIC}, | 79 | {.action = DONT_MEASURE, .fsmagic = SELINUX_MAGIC, .flags = IMA_FSMAGIC}, |
80 | {.action = MEASURE,.func = MMAP_CHECK,.mask = MAY_EXEC, | 80 | {.action = MEASURE, .func = MMAP_CHECK, .mask = MAY_EXEC, |
81 | .flags = IMA_FUNC | IMA_MASK}, | 81 | .flags = IMA_FUNC | IMA_MASK}, |
82 | {.action = MEASURE,.func = BPRM_CHECK,.mask = MAY_EXEC, | 82 | {.action = MEASURE, .func = BPRM_CHECK, .mask = MAY_EXEC, |
83 | .flags = IMA_FUNC | IMA_MASK}, | 83 | .flags = IMA_FUNC | IMA_MASK}, |
84 | {.action = MEASURE,.func = FILE_CHECK,.mask = MAY_READ,.uid = GLOBAL_ROOT_UID, | 84 | {.action = MEASURE, .func = FILE_CHECK, .mask = MAY_READ, .uid = GLOBAL_ROOT_UID, |
85 | .flags = IMA_FUNC | IMA_MASK | IMA_UID}, | 85 | .flags = IMA_FUNC | IMA_MASK | IMA_UID}, |
86 | {.action = MEASURE,.func = MODULE_CHECK, .flags = IMA_FUNC}, | 86 | {.action = MEASURE, .func = MODULE_CHECK, .flags = IMA_FUNC}, |
87 | }; | 87 | }; |
88 | 88 | ||
89 | static struct ima_rule_entry default_appraise_rules[] = { | 89 | static struct ima_rule_entry default_appraise_rules[] = { |
90 | {.action = DONT_APPRAISE,.fsmagic = PROC_SUPER_MAGIC,.flags = IMA_FSMAGIC}, | 90 | {.action = DONT_APPRAISE, .fsmagic = PROC_SUPER_MAGIC, .flags = IMA_FSMAGIC}, |
91 | {.action = DONT_APPRAISE,.fsmagic = SYSFS_MAGIC,.flags = IMA_FSMAGIC}, | 91 | {.action = DONT_APPRAISE, .fsmagic = SYSFS_MAGIC, .flags = IMA_FSMAGIC}, |
92 | {.action = DONT_APPRAISE,.fsmagic = DEBUGFS_MAGIC,.flags = IMA_FSMAGIC}, | 92 | {.action = DONT_APPRAISE, .fsmagic = DEBUGFS_MAGIC, .flags = IMA_FSMAGIC}, |
93 | {.action = DONT_APPRAISE,.fsmagic = TMPFS_MAGIC,.flags = IMA_FSMAGIC}, | 93 | {.action = DONT_APPRAISE, .fsmagic = TMPFS_MAGIC, .flags = IMA_FSMAGIC}, |
94 | {.action = DONT_APPRAISE,.fsmagic = RAMFS_MAGIC,.flags = IMA_FSMAGIC}, | 94 | {.action = DONT_APPRAISE, .fsmagic = RAMFS_MAGIC, .flags = IMA_FSMAGIC}, |
95 | {.action = DONT_APPRAISE,.fsmagic = DEVPTS_SUPER_MAGIC,.flags = IMA_FSMAGIC}, | 95 | {.action = DONT_APPRAISE, .fsmagic = DEVPTS_SUPER_MAGIC, .flags = IMA_FSMAGIC}, |
96 | {.action = DONT_APPRAISE,.fsmagic = BINFMTFS_MAGIC,.flags = IMA_FSMAGIC}, | 96 | {.action = DONT_APPRAISE, .fsmagic = BINFMTFS_MAGIC, .flags = IMA_FSMAGIC}, |
97 | {.action = DONT_APPRAISE,.fsmagic = SECURITYFS_MAGIC,.flags = IMA_FSMAGIC}, | 97 | {.action = DONT_APPRAISE, .fsmagic = SECURITYFS_MAGIC, .flags = IMA_FSMAGIC}, |
98 | {.action = DONT_APPRAISE,.fsmagic = SELINUX_MAGIC,.flags = IMA_FSMAGIC}, | 98 | {.action = DONT_APPRAISE, .fsmagic = SELINUX_MAGIC, .flags = IMA_FSMAGIC}, |
99 | {.action = DONT_APPRAISE,.fsmagic = CGROUP_SUPER_MAGIC,.flags = IMA_FSMAGIC}, | 99 | {.action = DONT_APPRAISE, .fsmagic = CGROUP_SUPER_MAGIC, .flags = IMA_FSMAGIC}, |
100 | {.action = APPRAISE,.fowner = GLOBAL_ROOT_UID,.flags = IMA_FOWNER}, | 100 | {.action = APPRAISE, .fowner = GLOBAL_ROOT_UID, .flags = IMA_FOWNER}, |
101 | }; | 101 | }; |
102 | 102 | ||
103 | static LIST_HEAD(ima_default_rules); | 103 | static LIST_HEAD(ima_default_rules); |
@@ -122,12 +122,12 @@ static int __init default_appraise_policy_setup(char *str) | |||
122 | } | 122 | } |
123 | __setup("ima_appraise_tcb", default_appraise_policy_setup); | 123 | __setup("ima_appraise_tcb", default_appraise_policy_setup); |
124 | 124 | ||
125 | /* | 125 | /* |
126 | * Although the IMA policy does not change, the LSM policy can be | 126 | * Although the IMA policy does not change, the LSM policy can be |
127 | * reloaded, leaving the IMA LSM based rules referring to the old, | 127 | * reloaded, leaving the IMA LSM based rules referring to the old, |
128 | * stale LSM policy. | 128 | * stale LSM policy. |
129 | * | 129 | * |
130 | * Update the IMA LSM based rules to reflect the reloaded LSM policy. | 130 | * Update the IMA LSM based rules to reflect the reloaded LSM policy. |
131 | * We assume the rules still exist; and BUG_ON() if they don't. | 131 | * We assume the rules still exist; and BUG_ON() if they don't. |
132 | */ | 132 | */ |
133 | static void ima_lsm_update_rules(void) | 133 | static void ima_lsm_update_rules(void) |
@@ -167,9 +167,11 @@ static bool ima_match_rules(struct ima_rule_entry *rule, | |||
167 | const struct cred *cred = current_cred(); | 167 | const struct cred *cred = current_cred(); |
168 | int i; | 168 | int i; |
169 | 169 | ||
170 | if ((rule->flags & IMA_FUNC) && rule->func != func) | 170 | if ((rule->flags & IMA_FUNC) && |
171 | (rule->func != func && func != POST_SETATTR)) | ||
171 | return false; | 172 | return false; |
172 | if ((rule->flags & IMA_MASK) && rule->mask != mask) | 173 | if ((rule->flags & IMA_MASK) && |
174 | (rule->mask != mask && func != POST_SETATTR)) | ||
173 | return false; | 175 | return false; |
174 | if ((rule->flags & IMA_FSMAGIC) | 176 | if ((rule->flags & IMA_FSMAGIC) |
175 | && rule->fsmagic != inode->i_sb->s_magic) | 177 | && rule->fsmagic != inode->i_sb->s_magic) |
@@ -216,7 +218,7 @@ retry: | |||
216 | retried = 1; | 218 | retried = 1; |
217 | ima_lsm_update_rules(); | 219 | ima_lsm_update_rules(); |
218 | goto retry; | 220 | goto retry; |
219 | } | 221 | } |
220 | if (!rc) | 222 | if (!rc) |
221 | return false; | 223 | return false; |
222 | } | 224 | } |
@@ -232,7 +234,7 @@ static int get_subaction(struct ima_rule_entry *rule, int func) | |||
232 | if (!(rule->flags & IMA_FUNC)) | 234 | if (!(rule->flags & IMA_FUNC)) |
233 | return IMA_FILE_APPRAISE; | 235 | return IMA_FILE_APPRAISE; |
234 | 236 | ||
235 | switch(func) { | 237 | switch (func) { |
236 | case MMAP_CHECK: | 238 | case MMAP_CHECK: |
237 | return IMA_MMAP_APPRAISE; | 239 | return IMA_MMAP_APPRAISE; |
238 | case BPRM_CHECK: | 240 | case BPRM_CHECK: |
@@ -304,7 +306,7 @@ void __init ima_init_policy(void) | |||
304 | measure_entries = ima_use_tcb ? ARRAY_SIZE(default_rules) : 0; | 306 | measure_entries = ima_use_tcb ? ARRAY_SIZE(default_rules) : 0; |
305 | appraise_entries = ima_use_appraise_tcb ? | 307 | appraise_entries = ima_use_appraise_tcb ? |
306 | ARRAY_SIZE(default_appraise_rules) : 0; | 308 | ARRAY_SIZE(default_appraise_rules) : 0; |
307 | 309 | ||
308 | for (i = 0; i < measure_entries + appraise_entries; i++) { | 310 | for (i = 0; i < measure_entries + appraise_entries; i++) { |
309 | if (i < measure_entries) | 311 | if (i < measure_entries) |
310 | list_add_tail(&default_rules[i].list, | 312 | list_add_tail(&default_rules[i].list, |
@@ -329,7 +331,7 @@ void __init ima_init_policy(void) | |||
329 | */ | 331 | */ |
330 | void ima_update_policy(void) | 332 | void ima_update_policy(void) |
331 | { | 333 | { |
332 | const char *op = "policy_update"; | 334 | static const char op[] = "policy_update"; |
333 | const char *cause = "already exists"; | 335 | const char *cause = "already exists"; |
334 | int result = 1; | 336 | int result = 1; |
335 | int audit_info = 0; | 337 | int audit_info = 0; |
@@ -520,8 +522,7 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry) | |||
520 | break; | 522 | break; |
521 | } | 523 | } |
522 | 524 | ||
523 | result = strict_strtoul(args[0].from, 16, | 525 | result = kstrtoul(args[0].from, 16, &entry->fsmagic); |
524 | &entry->fsmagic); | ||
525 | if (!result) | 526 | if (!result) |
526 | entry->flags |= IMA_FSMAGIC; | 527 | entry->flags |= IMA_FSMAGIC; |
527 | break; | 528 | break; |
@@ -547,7 +548,7 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry) | |||
547 | break; | 548 | break; |
548 | } | 549 | } |
549 | 550 | ||
550 | result = strict_strtoul(args[0].from, 10, &lnum); | 551 | result = kstrtoul(args[0].from, 10, &lnum); |
551 | if (!result) { | 552 | if (!result) { |
552 | entry->uid = make_kuid(current_user_ns(), (uid_t)lnum); | 553 | entry->uid = make_kuid(current_user_ns(), (uid_t)lnum); |
553 | if (!uid_valid(entry->uid) || (((uid_t)lnum) != lnum)) | 554 | if (!uid_valid(entry->uid) || (((uid_t)lnum) != lnum)) |
@@ -564,7 +565,7 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry) | |||
564 | break; | 565 | break; |
565 | } | 566 | } |
566 | 567 | ||
567 | result = strict_strtoul(args[0].from, 10, &lnum); | 568 | result = kstrtoul(args[0].from, 10, &lnum); |
568 | if (!result) { | 569 | if (!result) { |
569 | entry->fowner = make_kuid(current_user_ns(), (uid_t)lnum); | 570 | entry->fowner = make_kuid(current_user_ns(), (uid_t)lnum); |
570 | if (!uid_valid(entry->fowner) || (((uid_t)lnum) != lnum)) | 571 | if (!uid_valid(entry->fowner) || (((uid_t)lnum) != lnum)) |
@@ -645,7 +646,7 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry) | |||
645 | */ | 646 | */ |
646 | ssize_t ima_parse_add_rule(char *rule) | 647 | ssize_t ima_parse_add_rule(char *rule) |
647 | { | 648 | { |
648 | const char *op = "update_policy"; | 649 | static const char op[] = "update_policy"; |
649 | char *p; | 650 | char *p; |
650 | struct ima_rule_entry *entry; | 651 | struct ima_rule_entry *entry; |
651 | ssize_t result, len; | 652 | ssize_t result, len; |
diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c index d85e99761f4f..552705d5a78d 100644 --- a/security/integrity/ima/ima_queue.c +++ b/security/integrity/ima/ima_queue.c | |||
@@ -18,6 +18,9 @@ | |||
18 | * The measurement list is append-only. No entry is | 18 | * The measurement list is append-only. No entry is |
19 | * ever removed or changed during the boot-cycle. | 19 | * ever removed or changed during the boot-cycle. |
20 | */ | 20 | */ |
21 | |||
22 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
23 | |||
21 | #include <linux/module.h> | 24 | #include <linux/module.h> |
22 | #include <linux/rculist.h> | 25 | #include <linux/rculist.h> |
23 | #include <linux/slab.h> | 26 | #include <linux/slab.h> |
@@ -72,7 +75,7 @@ static int ima_add_digest_entry(struct ima_template_entry *entry) | |||
72 | 75 | ||
73 | qe = kmalloc(sizeof(*qe), GFP_KERNEL); | 76 | qe = kmalloc(sizeof(*qe), GFP_KERNEL); |
74 | if (qe == NULL) { | 77 | if (qe == NULL) { |
75 | pr_err("IMA: OUT OF MEMORY ERROR creating queue entry.\n"); | 78 | pr_err("OUT OF MEMORY ERROR creating queue entry\n"); |
76 | return -ENOMEM; | 79 | return -ENOMEM; |
77 | } | 80 | } |
78 | qe->entry = entry; | 81 | qe->entry = entry; |
@@ -95,8 +98,7 @@ static int ima_pcr_extend(const u8 *hash) | |||
95 | 98 | ||
96 | result = tpm_pcr_extend(TPM_ANY_NUM, CONFIG_IMA_MEASURE_PCR_IDX, hash); | 99 | result = tpm_pcr_extend(TPM_ANY_NUM, CONFIG_IMA_MEASURE_PCR_IDX, hash); |
97 | if (result != 0) | 100 | if (result != 0) |
98 | pr_err("IMA: Error Communicating to TPM chip, result: %d\n", | 101 | pr_err("Error Communicating to TPM chip, result: %d\n", result); |
99 | result); | ||
100 | return result; | 102 | return result; |
101 | } | 103 | } |
102 | 104 | ||
@@ -115,7 +117,7 @@ int ima_add_template_entry(struct ima_template_entry *entry, int violation, | |||
115 | 117 | ||
116 | mutex_lock(&ima_extend_list_mutex); | 118 | mutex_lock(&ima_extend_list_mutex); |
117 | if (!violation) { | 119 | if (!violation) { |
118 | memcpy(digest, entry->digest, sizeof digest); | 120 | memcpy(digest, entry->digest, sizeof(digest)); |
119 | if (ima_lookup_digest_entry(digest)) { | 121 | if (ima_lookup_digest_entry(digest)) { |
120 | audit_cause = "hash_exists"; | 122 | audit_cause = "hash_exists"; |
121 | result = -EEXIST; | 123 | result = -EEXIST; |
@@ -131,7 +133,7 @@ int ima_add_template_entry(struct ima_template_entry *entry, int violation, | |||
131 | } | 133 | } |
132 | 134 | ||
133 | if (violation) /* invalidate pcr */ | 135 | if (violation) /* invalidate pcr */ |
134 | memset(digest, 0xff, sizeof digest); | 136 | memset(digest, 0xff, sizeof(digest)); |
135 | 137 | ||
136 | tpmresult = ima_pcr_extend(digest); | 138 | tpmresult = ima_pcr_extend(digest); |
137 | if (tpmresult != 0) { | 139 | if (tpmresult != 0) { |
diff --git a/security/integrity/ima/ima_template.c b/security/integrity/ima/ima_template.c index 635695f6a185..a076a967ec47 100644 --- a/security/integrity/ima/ima_template.c +++ b/security/integrity/ima/ima_template.c | |||
@@ -12,6 +12,9 @@ | |||
12 | * File: ima_template.c | 12 | * File: ima_template.c |
13 | * Helpers to manage template descriptors. | 13 | * Helpers to manage template descriptors. |
14 | */ | 14 | */ |
15 | |||
16 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
17 | |||
15 | #include <crypto/hash_info.h> | 18 | #include <crypto/hash_info.h> |
16 | 19 | ||
17 | #include "ima.h" | 20 | #include "ima.h" |
@@ -19,20 +22,20 @@ | |||
19 | 22 | ||
20 | static struct ima_template_desc defined_templates[] = { | 23 | static struct ima_template_desc defined_templates[] = { |
21 | {.name = IMA_TEMPLATE_IMA_NAME, .fmt = IMA_TEMPLATE_IMA_FMT}, | 24 | {.name = IMA_TEMPLATE_IMA_NAME, .fmt = IMA_TEMPLATE_IMA_FMT}, |
22 | {.name = "ima-ng",.fmt = "d-ng|n-ng"}, | 25 | {.name = "ima-ng", .fmt = "d-ng|n-ng"}, |
23 | {.name = "ima-sig",.fmt = "d-ng|n-ng|sig"}, | 26 | {.name = "ima-sig", .fmt = "d-ng|n-ng|sig"}, |
24 | }; | 27 | }; |
25 | 28 | ||
26 | static struct ima_template_field supported_fields[] = { | 29 | static struct ima_template_field supported_fields[] = { |
27 | {.field_id = "d",.field_init = ima_eventdigest_init, | 30 | {.field_id = "d", .field_init = ima_eventdigest_init, |
28 | .field_show = ima_show_template_digest}, | 31 | .field_show = ima_show_template_digest}, |
29 | {.field_id = "n",.field_init = ima_eventname_init, | 32 | {.field_id = "n", .field_init = ima_eventname_init, |
30 | .field_show = ima_show_template_string}, | 33 | .field_show = ima_show_template_string}, |
31 | {.field_id = "d-ng",.field_init = ima_eventdigest_ng_init, | 34 | {.field_id = "d-ng", .field_init = ima_eventdigest_ng_init, |
32 | .field_show = ima_show_template_digest_ng}, | 35 | .field_show = ima_show_template_digest_ng}, |
33 | {.field_id = "n-ng",.field_init = ima_eventname_ng_init, | 36 | {.field_id = "n-ng", .field_init = ima_eventname_ng_init, |
34 | .field_show = ima_show_template_string}, | 37 | .field_show = ima_show_template_string}, |
35 | {.field_id = "sig",.field_init = ima_eventsig_init, | 38 | {.field_id = "sig", .field_init = ima_eventsig_init, |
36 | .field_show = ima_show_template_sig}, | 39 | .field_show = ima_show_template_sig}, |
37 | }; | 40 | }; |
38 | 41 | ||
@@ -58,7 +61,7 @@ static int __init ima_template_setup(char *str) | |||
58 | */ | 61 | */ |
59 | if (template_len == 3 && strcmp(str, IMA_TEMPLATE_IMA_NAME) == 0 && | 62 | if (template_len == 3 && strcmp(str, IMA_TEMPLATE_IMA_NAME) == 0 && |
60 | ima_hash_algo != HASH_ALGO_SHA1 && ima_hash_algo != HASH_ALGO_MD5) { | 63 | ima_hash_algo != HASH_ALGO_SHA1 && ima_hash_algo != HASH_ALGO_MD5) { |
61 | pr_err("IMA: template does not support hash alg\n"); | 64 | pr_err("template does not support hash alg\n"); |
62 | return 1; | 65 | return 1; |
63 | } | 66 | } |
64 | 67 | ||
diff --git a/security/integrity/ima/ima_template_lib.c b/security/integrity/ima/ima_template_lib.c index 1683bbf289a4..1506f0248572 100644 --- a/security/integrity/ima/ima_template_lib.c +++ b/security/integrity/ima/ima_template_lib.c | |||
@@ -27,7 +27,6 @@ static bool ima_template_hash_algo_allowed(u8 algo) | |||
27 | enum data_formats { | 27 | enum data_formats { |
28 | DATA_FMT_DIGEST = 0, | 28 | DATA_FMT_DIGEST = 0, |
29 | DATA_FMT_DIGEST_WITH_ALGO, | 29 | DATA_FMT_DIGEST_WITH_ALGO, |
30 | DATA_FMT_EVENT_NAME, | ||
31 | DATA_FMT_STRING, | 30 | DATA_FMT_STRING, |
32 | DATA_FMT_HEX | 31 | DATA_FMT_HEX |
33 | }; | 32 | }; |
@@ -37,18 +36,10 @@ static int ima_write_template_field_data(const void *data, const u32 datalen, | |||
37 | struct ima_field_data *field_data) | 36 | struct ima_field_data *field_data) |
38 | { | 37 | { |
39 | u8 *buf, *buf_ptr; | 38 | u8 *buf, *buf_ptr; |
40 | u32 buflen; | 39 | u32 buflen = datalen; |
41 | 40 | ||
42 | switch (datafmt) { | 41 | if (datafmt == DATA_FMT_STRING) |
43 | case DATA_FMT_EVENT_NAME: | ||
44 | buflen = IMA_EVENT_NAME_LEN_MAX + 1; | ||
45 | break; | ||
46 | case DATA_FMT_STRING: | ||
47 | buflen = datalen + 1; | 42 | buflen = datalen + 1; |
48 | break; | ||
49 | default: | ||
50 | buflen = datalen; | ||
51 | } | ||
52 | 43 | ||
53 | buf = kzalloc(buflen, GFP_KERNEL); | 44 | buf = kzalloc(buflen, GFP_KERNEL); |
54 | if (!buf) | 45 | if (!buf) |
@@ -63,7 +54,7 @@ static int ima_write_template_field_data(const void *data, const u32 datalen, | |||
63 | * split into multiple template fields (the space is the delimitator | 54 | * split into multiple template fields (the space is the delimitator |
64 | * character for measurements lists in ASCII format). | 55 | * character for measurements lists in ASCII format). |
65 | */ | 56 | */ |
66 | if (datafmt == DATA_FMT_EVENT_NAME || datafmt == DATA_FMT_STRING) { | 57 | if (datafmt == DATA_FMT_STRING) { |
67 | for (buf_ptr = buf; buf_ptr - buf < datalen; buf_ptr++) | 58 | for (buf_ptr = buf; buf_ptr - buf < datalen; buf_ptr++) |
68 | if (*buf_ptr == ' ') | 59 | if (*buf_ptr == ' ') |
69 | *buf_ptr = '_'; | 60 | *buf_ptr = '_'; |
@@ -109,13 +100,16 @@ static void ima_show_template_data_binary(struct seq_file *m, | |||
109 | enum data_formats datafmt, | 100 | enum data_formats datafmt, |
110 | struct ima_field_data *field_data) | 101 | struct ima_field_data *field_data) |
111 | { | 102 | { |
103 | u32 len = (show == IMA_SHOW_BINARY_OLD_STRING_FMT) ? | ||
104 | strlen(field_data->data) : field_data->len; | ||
105 | |||
112 | if (show != IMA_SHOW_BINARY_NO_FIELD_LEN) | 106 | if (show != IMA_SHOW_BINARY_NO_FIELD_LEN) |
113 | ima_putc(m, &field_data->len, sizeof(u32)); | 107 | ima_putc(m, &len, sizeof(len)); |
114 | 108 | ||
115 | if (!field_data->len) | 109 | if (!len) |
116 | return; | 110 | return; |
117 | 111 | ||
118 | ima_putc(m, field_data->data, field_data->len); | 112 | ima_putc(m, field_data->data, len); |
119 | } | 113 | } |
120 | 114 | ||
121 | static void ima_show_template_field_data(struct seq_file *m, | 115 | static void ima_show_template_field_data(struct seq_file *m, |
@@ -129,6 +123,7 @@ static void ima_show_template_field_data(struct seq_file *m, | |||
129 | break; | 123 | break; |
130 | case IMA_SHOW_BINARY: | 124 | case IMA_SHOW_BINARY: |
131 | case IMA_SHOW_BINARY_NO_FIELD_LEN: | 125 | case IMA_SHOW_BINARY_NO_FIELD_LEN: |
126 | case IMA_SHOW_BINARY_OLD_STRING_FMT: | ||
132 | ima_show_template_data_binary(m, show, datafmt, field_data); | 127 | ima_show_template_data_binary(m, show, datafmt, field_data); |
133 | break; | 128 | break; |
134 | default: | 129 | default: |
@@ -277,8 +272,6 @@ static int ima_eventname_init_common(struct integrity_iint_cache *iint, | |||
277 | { | 272 | { |
278 | const char *cur_filename = NULL; | 273 | const char *cur_filename = NULL; |
279 | u32 cur_filename_len = 0; | 274 | u32 cur_filename_len = 0; |
280 | enum data_formats fmt = size_limit ? | ||
281 | DATA_FMT_EVENT_NAME : DATA_FMT_STRING; | ||
282 | 275 | ||
283 | BUG_ON(filename == NULL && file == NULL); | 276 | BUG_ON(filename == NULL && file == NULL); |
284 | 277 | ||
@@ -301,7 +294,7 @@ static int ima_eventname_init_common(struct integrity_iint_cache *iint, | |||
301 | cur_filename_len = IMA_EVENT_NAME_LEN_MAX; | 294 | cur_filename_len = IMA_EVENT_NAME_LEN_MAX; |
302 | out: | 295 | out: |
303 | return ima_write_template_field_data(cur_filename, cur_filename_len, | 296 | return ima_write_template_field_data(cur_filename, cur_filename_len, |
304 | fmt, field_data); | 297 | DATA_FMT_STRING, field_data); |
305 | } | 298 | } |
306 | 299 | ||
307 | /* | 300 | /* |
diff --git a/security/integrity/integrity_audit.c b/security/integrity/integrity_audit.c index d7efb30404aa..aab9fa5a8231 100644 --- a/security/integrity/integrity_audit.c +++ b/security/integrity/integrity_audit.c | |||
@@ -7,7 +7,7 @@ | |||
7 | * the Free Software Foundation, version 2 of the License. | 7 | * the Free Software Foundation, version 2 of the License. |
8 | * | 8 | * |
9 | * File: integrity_audit.c | 9 | * File: integrity_audit.c |
10 | * Audit calls for the integrity subsystem | 10 | * Audit calls for the integrity subsystem |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <linux/fs.h> | 13 | #include <linux/fs.h> |
@@ -22,7 +22,7 @@ static int __init integrity_audit_setup(char *str) | |||
22 | { | 22 | { |
23 | unsigned long audit; | 23 | unsigned long audit; |
24 | 24 | ||
25 | if (!strict_strtoul(str, 0, &audit)) | 25 | if (!kstrtoul(str, 0, &audit)) |
26 | integrity_audit_info = audit ? 1 : 0; | 26 | integrity_audit_info = audit ? 1 : 0; |
27 | return 1; | 27 | return 1; |
28 | } | 28 | } |
@@ -33,6 +33,7 @@ void integrity_audit_msg(int audit_msgno, struct inode *inode, | |||
33 | const char *cause, int result, int audit_info) | 33 | const char *cause, int result, int audit_info) |
34 | { | 34 | { |
35 | struct audit_buffer *ab; | 35 | struct audit_buffer *ab; |
36 | char name[TASK_COMM_LEN]; | ||
36 | 37 | ||
37 | if (!integrity_audit_info && audit_info == 1) /* Skip info messages */ | 38 | if (!integrity_audit_info && audit_info == 1) /* Skip info messages */ |
38 | return; | 39 | return; |
@@ -49,7 +50,7 @@ void integrity_audit_msg(int audit_msgno, struct inode *inode, | |||
49 | audit_log_format(ab, " cause="); | 50 | audit_log_format(ab, " cause="); |
50 | audit_log_string(ab, cause); | 51 | audit_log_string(ab, cause); |
51 | audit_log_format(ab, " comm="); | 52 | audit_log_format(ab, " comm="); |
52 | audit_log_untrustedstring(ab, current->comm); | 53 | audit_log_untrustedstring(ab, get_task_comm(name, current)); |
53 | if (fname) { | 54 | if (fname) { |
54 | audit_log_format(ab, " name="); | 55 | audit_log_format(ab, " name="); |
55 | audit_log_untrustedstring(ab, fname); | 56 | audit_log_untrustedstring(ab, fname); |
diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c index 9e1e005c7596..5fe443d120af 100644 --- a/security/keys/encrypted-keys/encrypted.c +++ b/security/keys/encrypted-keys/encrypted.c | |||
@@ -609,7 +609,7 @@ static struct encrypted_key_payload *encrypted_key_alloc(struct key *key, | |||
609 | long dlen; | 609 | long dlen; |
610 | int ret; | 610 | int ret; |
611 | 611 | ||
612 | ret = strict_strtol(datalen, 10, &dlen); | 612 | ret = kstrtol(datalen, 10, &dlen); |
613 | if (ret < 0 || dlen < MIN_DATA_SIZE || dlen > MAX_DATA_SIZE) | 613 | if (ret < 0 || dlen < MIN_DATA_SIZE || dlen > MAX_DATA_SIZE) |
614 | return ERR_PTR(-EINVAL); | 614 | return ERR_PTR(-EINVAL); |
615 | 615 | ||
diff --git a/security/keys/trusted.c b/security/keys/trusted.c index e13fcf7636f7..6b804aa4529a 100644 --- a/security/keys/trusted.c +++ b/security/keys/trusted.c | |||
@@ -753,7 +753,7 @@ static int getoptions(char *c, struct trusted_key_payload *pay, | |||
753 | return -EINVAL; | 753 | return -EINVAL; |
754 | break; | 754 | break; |
755 | case Opt_keyhandle: | 755 | case Opt_keyhandle: |
756 | res = strict_strtoul(args[0].from, 16, &handle); | 756 | res = kstrtoul(args[0].from, 16, &handle); |
757 | if (res < 0) | 757 | if (res < 0) |
758 | return -EINVAL; | 758 | return -EINVAL; |
759 | opt->keytype = SEAL_keytype; | 759 | opt->keytype = SEAL_keytype; |
@@ -782,7 +782,7 @@ static int getoptions(char *c, struct trusted_key_payload *pay, | |||
782 | return -EINVAL; | 782 | return -EINVAL; |
783 | break; | 783 | break; |
784 | case Opt_pcrlock: | 784 | case Opt_pcrlock: |
785 | res = strict_strtoul(args[0].from, 10, &lock); | 785 | res = kstrtoul(args[0].from, 10, &lock); |
786 | if (res < 0) | 786 | if (res < 0) |
787 | return -EINVAL; | 787 | return -EINVAL; |
788 | opt->pcrlock = lock; | 788 | opt->pcrlock = lock; |
@@ -820,7 +820,7 @@ static int datablob_parse(char *datablob, struct trusted_key_payload *p, | |||
820 | c = strsep(&datablob, " \t"); | 820 | c = strsep(&datablob, " \t"); |
821 | if (!c) | 821 | if (!c) |
822 | return -EINVAL; | 822 | return -EINVAL; |
823 | ret = strict_strtol(c, 10, &keylen); | 823 | ret = kstrtol(c, 10, &keylen); |
824 | if (ret < 0 || keylen < MIN_KEY_SIZE || keylen > MAX_KEY_SIZE) | 824 | if (ret < 0 || keylen < MIN_KEY_SIZE || keylen > MAX_KEY_SIZE) |
825 | return -EINVAL; | 825 | return -EINVAL; |
826 | p->key_len = keylen; | 826 | p->key_len = keylen; |
diff --git a/security/security.c b/security/security.c index 8e428ac90233..d91fec458e90 100644 --- a/security/security.c +++ b/security/security.c | |||
@@ -1317,9 +1317,11 @@ void security_skb_owned_by(struct sk_buff *skb, struct sock *sk) | |||
1317 | 1317 | ||
1318 | #ifdef CONFIG_SECURITY_NETWORK_XFRM | 1318 | #ifdef CONFIG_SECURITY_NETWORK_XFRM |
1319 | 1319 | ||
1320 | int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, struct xfrm_user_sec_ctx *sec_ctx) | 1320 | int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, |
1321 | struct xfrm_user_sec_ctx *sec_ctx, | ||
1322 | gfp_t gfp) | ||
1321 | { | 1323 | { |
1322 | return security_ops->xfrm_policy_alloc_security(ctxp, sec_ctx); | 1324 | return security_ops->xfrm_policy_alloc_security(ctxp, sec_ctx, gfp); |
1323 | } | 1325 | } |
1324 | EXPORT_SYMBOL(security_xfrm_policy_alloc); | 1326 | EXPORT_SYMBOL(security_xfrm_policy_alloc); |
1325 | 1327 | ||
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index eb5c307fcc9a..6ab22720c277 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c | |||
@@ -106,7 +106,7 @@ int selinux_enforcing; | |||
106 | static int __init enforcing_setup(char *str) | 106 | static int __init enforcing_setup(char *str) |
107 | { | 107 | { |
108 | unsigned long enforcing; | 108 | unsigned long enforcing; |
109 | if (!strict_strtoul(str, 0, &enforcing)) | 109 | if (!kstrtoul(str, 0, &enforcing)) |
110 | selinux_enforcing = enforcing ? 1 : 0; | 110 | selinux_enforcing = enforcing ? 1 : 0; |
111 | return 1; | 111 | return 1; |
112 | } | 112 | } |
@@ -119,7 +119,7 @@ int selinux_enabled = CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE; | |||
119 | static int __init selinux_enabled_setup(char *str) | 119 | static int __init selinux_enabled_setup(char *str) |
120 | { | 120 | { |
121 | unsigned long enabled; | 121 | unsigned long enabled; |
122 | if (!strict_strtoul(str, 0, &enabled)) | 122 | if (!kstrtoul(str, 0, &enabled)) |
123 | selinux_enabled = enabled ? 1 : 0; | 123 | selinux_enabled = enabled ? 1 : 0; |
124 | return 1; | 124 | return 1; |
125 | } | 125 | } |
@@ -668,7 +668,7 @@ static int selinux_set_mnt_opts(struct super_block *sb, | |||
668 | if (flags[i] == SBLABEL_MNT) | 668 | if (flags[i] == SBLABEL_MNT) |
669 | continue; | 669 | continue; |
670 | rc = security_context_to_sid(mount_options[i], | 670 | rc = security_context_to_sid(mount_options[i], |
671 | strlen(mount_options[i]), &sid); | 671 | strlen(mount_options[i]), &sid, GFP_KERNEL); |
672 | if (rc) { | 672 | if (rc) { |
673 | printk(KERN_WARNING "SELinux: security_context_to_sid" | 673 | printk(KERN_WARNING "SELinux: security_context_to_sid" |
674 | "(%s) failed for (dev %s, type %s) errno=%d\n", | 674 | "(%s) failed for (dev %s, type %s) errno=%d\n", |
@@ -1418,15 +1418,33 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent | |||
1418 | isec->sid = sbsec->sid; | 1418 | isec->sid = sbsec->sid; |
1419 | 1419 | ||
1420 | if ((sbsec->flags & SE_SBPROC) && !S_ISLNK(inode->i_mode)) { | 1420 | if ((sbsec->flags & SE_SBPROC) && !S_ISLNK(inode->i_mode)) { |
1421 | if (opt_dentry) { | 1421 | /* We must have a dentry to determine the label on |
1422 | isec->sclass = inode_mode_to_security_class(inode->i_mode); | 1422 | * procfs inodes */ |
1423 | rc = selinux_proc_get_sid(opt_dentry, | 1423 | if (opt_dentry) |
1424 | isec->sclass, | 1424 | /* Called from d_instantiate or |
1425 | &sid); | 1425 | * d_splice_alias. */ |
1426 | if (rc) | 1426 | dentry = dget(opt_dentry); |
1427 | goto out_unlock; | 1427 | else |
1428 | isec->sid = sid; | 1428 | /* Called from selinux_complete_init, try to |
1429 | } | 1429 | * find a dentry. */ |
1430 | dentry = d_find_alias(inode); | ||
1431 | /* | ||
1432 | * This can be hit on boot when a file is accessed | ||
1433 | * before the policy is loaded. When we load policy we | ||
1434 | * may find inodes that have no dentry on the | ||
1435 | * sbsec->isec_head list. No reason to complain as | ||
1436 | * these will get fixed up the next time we go through | ||
1437 | * inode_doinit() with a dentry, before these inodes | ||
1438 | * could be used again by userspace. | ||
1439 | */ | ||
1440 | if (!dentry) | ||
1441 | goto out_unlock; | ||
1442 | isec->sclass = inode_mode_to_security_class(inode->i_mode); | ||
1443 | rc = selinux_proc_get_sid(dentry, isec->sclass, &sid); | ||
1444 | dput(dentry); | ||
1445 | if (rc) | ||
1446 | goto out_unlock; | ||
1447 | isec->sid = sid; | ||
1430 | } | 1448 | } |
1431 | break; | 1449 | break; |
1432 | } | 1450 | } |
@@ -2489,7 +2507,8 @@ static int selinux_sb_remount(struct super_block *sb, void *data) | |||
2489 | if (flags[i] == SBLABEL_MNT) | 2507 | if (flags[i] == SBLABEL_MNT) |
2490 | continue; | 2508 | continue; |
2491 | len = strlen(mount_options[i]); | 2509 | len = strlen(mount_options[i]); |
2492 | rc = security_context_to_sid(mount_options[i], len, &sid); | 2510 | rc = security_context_to_sid(mount_options[i], len, &sid, |
2511 | GFP_KERNEL); | ||
2493 | if (rc) { | 2512 | if (rc) { |
2494 | printk(KERN_WARNING "SELinux: security_context_to_sid" | 2513 | printk(KERN_WARNING "SELinux: security_context_to_sid" |
2495 | "(%s) failed for (dev %s, type %s) errno=%d\n", | 2514 | "(%s) failed for (dev %s, type %s) errno=%d\n", |
@@ -2893,7 +2912,7 @@ static int selinux_inode_setxattr(struct dentry *dentry, const char *name, | |||
2893 | if (rc) | 2912 | if (rc) |
2894 | return rc; | 2913 | return rc; |
2895 | 2914 | ||
2896 | rc = security_context_to_sid(value, size, &newsid); | 2915 | rc = security_context_to_sid(value, size, &newsid, GFP_KERNEL); |
2897 | if (rc == -EINVAL) { | 2916 | if (rc == -EINVAL) { |
2898 | if (!capable(CAP_MAC_ADMIN)) { | 2917 | if (!capable(CAP_MAC_ADMIN)) { |
2899 | struct audit_buffer *ab; | 2918 | struct audit_buffer *ab; |
@@ -3050,7 +3069,7 @@ static int selinux_inode_setsecurity(struct inode *inode, const char *name, | |||
3050 | if (!value || !size) | 3069 | if (!value || !size) |
3051 | return -EACCES; | 3070 | return -EACCES; |
3052 | 3071 | ||
3053 | rc = security_context_to_sid((void *)value, size, &newsid); | 3072 | rc = security_context_to_sid((void *)value, size, &newsid, GFP_KERNEL); |
3054 | if (rc) | 3073 | if (rc) |
3055 | return rc; | 3074 | return rc; |
3056 | 3075 | ||
@@ -3204,24 +3223,20 @@ error: | |||
3204 | 3223 | ||
3205 | static int selinux_mmap_addr(unsigned long addr) | 3224 | static int selinux_mmap_addr(unsigned long addr) |
3206 | { | 3225 | { |
3207 | int rc = 0; | 3226 | int rc; |
3208 | u32 sid = current_sid(); | 3227 | |
3228 | /* do DAC check on address space usage */ | ||
3229 | rc = cap_mmap_addr(addr); | ||
3230 | if (rc) | ||
3231 | return rc; | ||
3209 | 3232 | ||
3210 | /* | ||
3211 | * notice that we are intentionally putting the SELinux check before | ||
3212 | * the secondary cap_file_mmap check. This is such a likely attempt | ||
3213 | * at bad behaviour/exploit that we always want to get the AVC, even | ||
3214 | * if DAC would have also denied the operation. | ||
3215 | */ | ||
3216 | if (addr < CONFIG_LSM_MMAP_MIN_ADDR) { | 3233 | if (addr < CONFIG_LSM_MMAP_MIN_ADDR) { |
3234 | u32 sid = current_sid(); | ||
3217 | rc = avc_has_perm(sid, sid, SECCLASS_MEMPROTECT, | 3235 | rc = avc_has_perm(sid, sid, SECCLASS_MEMPROTECT, |
3218 | MEMPROTECT__MMAP_ZERO, NULL); | 3236 | MEMPROTECT__MMAP_ZERO, NULL); |
3219 | if (rc) | ||
3220 | return rc; | ||
3221 | } | 3237 | } |
3222 | 3238 | ||
3223 | /* do DAC check on address space usage */ | 3239 | return rc; |
3224 | return cap_mmap_addr(addr); | ||
3225 | } | 3240 | } |
3226 | 3241 | ||
3227 | static int selinux_mmap_file(struct file *file, unsigned long reqprot, | 3242 | static int selinux_mmap_file(struct file *file, unsigned long reqprot, |
@@ -5529,7 +5544,7 @@ static int selinux_setprocattr(struct task_struct *p, | |||
5529 | str[size-1] = 0; | 5544 | str[size-1] = 0; |
5530 | size--; | 5545 | size--; |
5531 | } | 5546 | } |
5532 | error = security_context_to_sid(value, size, &sid); | 5547 | error = security_context_to_sid(value, size, &sid, GFP_KERNEL); |
5533 | if (error == -EINVAL && !strcmp(name, "fscreate")) { | 5548 | if (error == -EINVAL && !strcmp(name, "fscreate")) { |
5534 | if (!capable(CAP_MAC_ADMIN)) { | 5549 | if (!capable(CAP_MAC_ADMIN)) { |
5535 | struct audit_buffer *ab; | 5550 | struct audit_buffer *ab; |
@@ -5638,7 +5653,7 @@ static int selinux_secid_to_secctx(u32 secid, char **secdata, u32 *seclen) | |||
5638 | 5653 | ||
5639 | static int selinux_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid) | 5654 | static int selinux_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid) |
5640 | { | 5655 | { |
5641 | return security_context_to_sid(secdata, seclen, secid); | 5656 | return security_context_to_sid(secdata, seclen, secid, GFP_KERNEL); |
5642 | } | 5657 | } |
5643 | 5658 | ||
5644 | static void selinux_release_secctx(char *secdata, u32 seclen) | 5659 | static void selinux_release_secctx(char *secdata, u32 seclen) |
diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h index 8ed8daf7f1ee..ce7852cf526b 100644 --- a/security/selinux/include/security.h +++ b/security/selinux/include/security.h | |||
@@ -134,7 +134,7 @@ int security_sid_to_context(u32 sid, char **scontext, | |||
134 | int security_sid_to_context_force(u32 sid, char **scontext, u32 *scontext_len); | 134 | int security_sid_to_context_force(u32 sid, char **scontext, u32 *scontext_len); |
135 | 135 | ||
136 | int security_context_to_sid(const char *scontext, u32 scontext_len, | 136 | int security_context_to_sid(const char *scontext, u32 scontext_len, |
137 | u32 *out_sid); | 137 | u32 *out_sid, gfp_t gfp); |
138 | 138 | ||
139 | int security_context_to_sid_default(const char *scontext, u32 scontext_len, | 139 | int security_context_to_sid_default(const char *scontext, u32 scontext_len, |
140 | u32 *out_sid, u32 def_sid, gfp_t gfp_flags); | 140 | u32 *out_sid, u32 def_sid, gfp_t gfp_flags); |
diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h index 48c3cc94c168..9f0584710c85 100644 --- a/security/selinux/include/xfrm.h +++ b/security/selinux/include/xfrm.h | |||
@@ -10,7 +10,8 @@ | |||
10 | #include <net/flow.h> | 10 | #include <net/flow.h> |
11 | 11 | ||
12 | int selinux_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, | 12 | int selinux_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, |
13 | struct xfrm_user_sec_ctx *uctx); | 13 | struct xfrm_user_sec_ctx *uctx, |
14 | gfp_t gfp); | ||
14 | int selinux_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx, | 15 | int selinux_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx, |
15 | struct xfrm_sec_ctx **new_ctxp); | 16 | struct xfrm_sec_ctx **new_ctxp); |
16 | void selinux_xfrm_policy_free(struct xfrm_sec_ctx *ctx); | 17 | void selinux_xfrm_policy_free(struct xfrm_sec_ctx *ctx); |
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c index 5122affe06a8..c71737f6d1cc 100644 --- a/security/selinux/selinuxfs.c +++ b/security/selinux/selinuxfs.c | |||
@@ -54,7 +54,7 @@ unsigned int selinux_checkreqprot = CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE; | |||
54 | static int __init checkreqprot_setup(char *str) | 54 | static int __init checkreqprot_setup(char *str) |
55 | { | 55 | { |
56 | unsigned long checkreqprot; | 56 | unsigned long checkreqprot; |
57 | if (!strict_strtoul(str, 0, &checkreqprot)) | 57 | if (!kstrtoul(str, 0, &checkreqprot)) |
58 | selinux_checkreqprot = checkreqprot ? 1 : 0; | 58 | selinux_checkreqprot = checkreqprot ? 1 : 0; |
59 | return 1; | 59 | return 1; |
60 | } | 60 | } |
@@ -576,7 +576,7 @@ static ssize_t sel_write_context(struct file *file, char *buf, size_t size) | |||
576 | if (length) | 576 | if (length) |
577 | goto out; | 577 | goto out; |
578 | 578 | ||
579 | length = security_context_to_sid(buf, size, &sid); | 579 | length = security_context_to_sid(buf, size, &sid, GFP_KERNEL); |
580 | if (length) | 580 | if (length) |
581 | goto out; | 581 | goto out; |
582 | 582 | ||
@@ -731,11 +731,13 @@ static ssize_t sel_write_access(struct file *file, char *buf, size_t size) | |||
731 | if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3) | 731 | if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3) |
732 | goto out; | 732 | goto out; |
733 | 733 | ||
734 | length = security_context_to_sid(scon, strlen(scon) + 1, &ssid); | 734 | length = security_context_to_sid(scon, strlen(scon) + 1, &ssid, |
735 | GFP_KERNEL); | ||
735 | if (length) | 736 | if (length) |
736 | goto out; | 737 | goto out; |
737 | 738 | ||
738 | length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid); | 739 | length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid, |
740 | GFP_KERNEL); | ||
739 | if (length) | 741 | if (length) |
740 | goto out; | 742 | goto out; |
741 | 743 | ||
@@ -817,11 +819,13 @@ static ssize_t sel_write_create(struct file *file, char *buf, size_t size) | |||
817 | objname = namebuf; | 819 | objname = namebuf; |
818 | } | 820 | } |
819 | 821 | ||
820 | length = security_context_to_sid(scon, strlen(scon) + 1, &ssid); | 822 | length = security_context_to_sid(scon, strlen(scon) + 1, &ssid, |
823 | GFP_KERNEL); | ||
821 | if (length) | 824 | if (length) |
822 | goto out; | 825 | goto out; |
823 | 826 | ||
824 | length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid); | 827 | length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid, |
828 | GFP_KERNEL); | ||
825 | if (length) | 829 | if (length) |
826 | goto out; | 830 | goto out; |
827 | 831 | ||
@@ -878,11 +882,13 @@ static ssize_t sel_write_relabel(struct file *file, char *buf, size_t size) | |||
878 | if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3) | 882 | if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3) |
879 | goto out; | 883 | goto out; |
880 | 884 | ||
881 | length = security_context_to_sid(scon, strlen(scon) + 1, &ssid); | 885 | length = security_context_to_sid(scon, strlen(scon) + 1, &ssid, |
886 | GFP_KERNEL); | ||
882 | if (length) | 887 | if (length) |
883 | goto out; | 888 | goto out; |
884 | 889 | ||
885 | length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid); | 890 | length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid, |
891 | GFP_KERNEL); | ||
886 | if (length) | 892 | if (length) |
887 | goto out; | 893 | goto out; |
888 | 894 | ||
@@ -934,7 +940,7 @@ static ssize_t sel_write_user(struct file *file, char *buf, size_t size) | |||
934 | if (sscanf(buf, "%s %s", con, user) != 2) | 940 | if (sscanf(buf, "%s %s", con, user) != 2) |
935 | goto out; | 941 | goto out; |
936 | 942 | ||
937 | length = security_context_to_sid(con, strlen(con) + 1, &sid); | 943 | length = security_context_to_sid(con, strlen(con) + 1, &sid, GFP_KERNEL); |
938 | if (length) | 944 | if (length) |
939 | goto out; | 945 | goto out; |
940 | 946 | ||
@@ -994,11 +1000,13 @@ static ssize_t sel_write_member(struct file *file, char *buf, size_t size) | |||
994 | if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3) | 1000 | if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3) |
995 | goto out; | 1001 | goto out; |
996 | 1002 | ||
997 | length = security_context_to_sid(scon, strlen(scon) + 1, &ssid); | 1003 | length = security_context_to_sid(scon, strlen(scon) + 1, &ssid, |
1004 | GFP_KERNEL); | ||
998 | if (length) | 1005 | if (length) |
999 | goto out; | 1006 | goto out; |
1000 | 1007 | ||
1001 | length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid); | 1008 | length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid, |
1009 | GFP_KERNEL); | ||
1002 | if (length) | 1010 | if (length) |
1003 | goto out; | 1011 | goto out; |
1004 | 1012 | ||
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c index 5d0144ee8ed6..4bca49414a40 100644 --- a/security/selinux/ss/services.c +++ b/security/selinux/ss/services.c | |||
@@ -1289,16 +1289,18 @@ out: | |||
1289 | * @scontext: security context | 1289 | * @scontext: security context |
1290 | * @scontext_len: length in bytes | 1290 | * @scontext_len: length in bytes |
1291 | * @sid: security identifier, SID | 1291 | * @sid: security identifier, SID |
1292 | * @gfp: context for the allocation | ||
1292 | * | 1293 | * |
1293 | * Obtains a SID associated with the security context that | 1294 | * Obtains a SID associated with the security context that |
1294 | * has the string representation specified by @scontext. | 1295 | * has the string representation specified by @scontext. |
1295 | * Returns -%EINVAL if the context is invalid, -%ENOMEM if insufficient | 1296 | * Returns -%EINVAL if the context is invalid, -%ENOMEM if insufficient |
1296 | * memory is available, or 0 on success. | 1297 | * memory is available, or 0 on success. |
1297 | */ | 1298 | */ |
1298 | int security_context_to_sid(const char *scontext, u32 scontext_len, u32 *sid) | 1299 | int security_context_to_sid(const char *scontext, u32 scontext_len, u32 *sid, |
1300 | gfp_t gfp) | ||
1299 | { | 1301 | { |
1300 | return security_context_to_sid_core(scontext, scontext_len, | 1302 | return security_context_to_sid_core(scontext, scontext_len, |
1301 | sid, SECSID_NULL, GFP_KERNEL, 0); | 1303 | sid, SECSID_NULL, gfp, 0); |
1302 | } | 1304 | } |
1303 | 1305 | ||
1304 | /** | 1306 | /** |
diff --git a/security/selinux/xfrm.c b/security/selinux/xfrm.c index 0462cb3ff0a7..98b042630a9e 100644 --- a/security/selinux/xfrm.c +++ b/security/selinux/xfrm.c | |||
@@ -78,7 +78,8 @@ static inline int selinux_authorizable_xfrm(struct xfrm_state *x) | |||
78 | * xfrm_user_sec_ctx context. | 78 | * xfrm_user_sec_ctx context. |
79 | */ | 79 | */ |
80 | static int selinux_xfrm_alloc_user(struct xfrm_sec_ctx **ctxp, | 80 | static int selinux_xfrm_alloc_user(struct xfrm_sec_ctx **ctxp, |
81 | struct xfrm_user_sec_ctx *uctx) | 81 | struct xfrm_user_sec_ctx *uctx, |
82 | gfp_t gfp) | ||
82 | { | 83 | { |
83 | int rc; | 84 | int rc; |
84 | const struct task_security_struct *tsec = current_security(); | 85 | const struct task_security_struct *tsec = current_security(); |
@@ -94,7 +95,7 @@ static int selinux_xfrm_alloc_user(struct xfrm_sec_ctx **ctxp, | |||
94 | if (str_len >= PAGE_SIZE) | 95 | if (str_len >= PAGE_SIZE) |
95 | return -ENOMEM; | 96 | return -ENOMEM; |
96 | 97 | ||
97 | ctx = kmalloc(sizeof(*ctx) + str_len + 1, GFP_KERNEL); | 98 | ctx = kmalloc(sizeof(*ctx) + str_len + 1, gfp); |
98 | if (!ctx) | 99 | if (!ctx) |
99 | return -ENOMEM; | 100 | return -ENOMEM; |
100 | 101 | ||
@@ -103,7 +104,7 @@ static int selinux_xfrm_alloc_user(struct xfrm_sec_ctx **ctxp, | |||
103 | ctx->ctx_len = str_len; | 104 | ctx->ctx_len = str_len; |
104 | memcpy(ctx->ctx_str, &uctx[1], str_len); | 105 | memcpy(ctx->ctx_str, &uctx[1], str_len); |
105 | ctx->ctx_str[str_len] = '\0'; | 106 | ctx->ctx_str[str_len] = '\0'; |
106 | rc = security_context_to_sid(ctx->ctx_str, str_len, &ctx->ctx_sid); | 107 | rc = security_context_to_sid(ctx->ctx_str, str_len, &ctx->ctx_sid, gfp); |
107 | if (rc) | 108 | if (rc) |
108 | goto err; | 109 | goto err; |
109 | 110 | ||
@@ -282,9 +283,10 @@ int selinux_xfrm_skb_sid(struct sk_buff *skb, u32 *sid) | |||
282 | * LSM hook implementation that allocs and transfers uctx spec to xfrm_policy. | 283 | * LSM hook implementation that allocs and transfers uctx spec to xfrm_policy. |
283 | */ | 284 | */ |
284 | int selinux_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, | 285 | int selinux_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, |
285 | struct xfrm_user_sec_ctx *uctx) | 286 | struct xfrm_user_sec_ctx *uctx, |
287 | gfp_t gfp) | ||
286 | { | 288 | { |
287 | return selinux_xfrm_alloc_user(ctxp, uctx); | 289 | return selinux_xfrm_alloc_user(ctxp, uctx, gfp); |
288 | } | 290 | } |
289 | 291 | ||
290 | /* | 292 | /* |
@@ -332,7 +334,7 @@ int selinux_xfrm_policy_delete(struct xfrm_sec_ctx *ctx) | |||
332 | int selinux_xfrm_state_alloc(struct xfrm_state *x, | 334 | int selinux_xfrm_state_alloc(struct xfrm_state *x, |
333 | struct xfrm_user_sec_ctx *uctx) | 335 | struct xfrm_user_sec_ctx *uctx) |
334 | { | 336 | { |
335 | return selinux_xfrm_alloc_user(&x->security, uctx); | 337 | return selinux_xfrm_alloc_user(&x->security, uctx, GFP_KERNEL); |
336 | } | 338 | } |
337 | 339 | ||
338 | /* | 340 | /* |
diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c index 7a20897d33db..7403f348ed14 100644 --- a/sound/core/compress_offload.c +++ b/sound/core/compress_offload.c | |||
@@ -133,7 +133,7 @@ static int snd_compr_open(struct inode *inode, struct file *f) | |||
133 | kfree(data); | 133 | kfree(data); |
134 | } | 134 | } |
135 | snd_card_unref(compr->card); | 135 | snd_card_unref(compr->card); |
136 | return 0; | 136 | return ret; |
137 | } | 137 | } |
138 | 138 | ||
139 | static int snd_compr_free(struct inode *inode, struct file *f) | 139 | static int snd_compr_free(struct inode *inode, struct file *f) |
diff --git a/sound/pci/oxygen/xonar_dg.c b/sound/pci/oxygen/xonar_dg.c index ed6f199f8a38..4cf3200e988b 100644 --- a/sound/pci/oxygen/xonar_dg.c +++ b/sound/pci/oxygen/xonar_dg.c | |||
@@ -238,11 +238,21 @@ void set_cs4245_adc_params(struct oxygen *chip, | |||
238 | cs4245_write_spi(chip, CS4245_MCLK_FREQ); | 238 | cs4245_write_spi(chip, CS4245_MCLK_FREQ); |
239 | } | 239 | } |
240 | 240 | ||
241 | static inline unsigned int shift_bits(unsigned int value, | ||
242 | unsigned int shift_from, | ||
243 | unsigned int shift_to, | ||
244 | unsigned int mask) | ||
245 | { | ||
246 | if (shift_from < shift_to) | ||
247 | return (value << (shift_to - shift_from)) & mask; | ||
248 | else | ||
249 | return (value >> (shift_from - shift_to)) & mask; | ||
250 | } | ||
251 | |||
241 | unsigned int adjust_dg_dac_routing(struct oxygen *chip, | 252 | unsigned int adjust_dg_dac_routing(struct oxygen *chip, |
242 | unsigned int play_routing) | 253 | unsigned int play_routing) |
243 | { | 254 | { |
244 | struct dg *data = chip->model_data; | 255 | struct dg *data = chip->model_data; |
245 | unsigned int routing = 0; | ||
246 | 256 | ||
247 | switch (data->output_sel) { | 257 | switch (data->output_sel) { |
248 | case PLAYBACK_DST_HP: | 258 | case PLAYBACK_DST_HP: |
@@ -252,15 +262,23 @@ unsigned int adjust_dg_dac_routing(struct oxygen *chip, | |||
252 | OXYGEN_PLAY_MUTE67, OXYGEN_PLAY_MUTE_MASK); | 262 | OXYGEN_PLAY_MUTE67, OXYGEN_PLAY_MUTE_MASK); |
253 | break; | 263 | break; |
254 | case PLAYBACK_DST_MULTICH: | 264 | case PLAYBACK_DST_MULTICH: |
255 | routing = (0 << OXYGEN_PLAY_DAC0_SOURCE_SHIFT) | | ||
256 | (2 << OXYGEN_PLAY_DAC1_SOURCE_SHIFT) | | ||
257 | (1 << OXYGEN_PLAY_DAC2_SOURCE_SHIFT) | | ||
258 | (0 << OXYGEN_PLAY_DAC3_SOURCE_SHIFT); | ||
259 | oxygen_write8_masked(chip, OXYGEN_PLAY_ROUTING, | 265 | oxygen_write8_masked(chip, OXYGEN_PLAY_ROUTING, |
260 | OXYGEN_PLAY_MUTE01, OXYGEN_PLAY_MUTE_MASK); | 266 | OXYGEN_PLAY_MUTE01, OXYGEN_PLAY_MUTE_MASK); |
261 | break; | 267 | break; |
262 | } | 268 | } |
263 | return routing; | 269 | return (play_routing & OXYGEN_PLAY_DAC0_SOURCE_MASK) | |
270 | shift_bits(play_routing, | ||
271 | OXYGEN_PLAY_DAC2_SOURCE_SHIFT, | ||
272 | OXYGEN_PLAY_DAC1_SOURCE_SHIFT, | ||
273 | OXYGEN_PLAY_DAC1_SOURCE_MASK) | | ||
274 | shift_bits(play_routing, | ||
275 | OXYGEN_PLAY_DAC1_SOURCE_SHIFT, | ||
276 | OXYGEN_PLAY_DAC2_SOURCE_SHIFT, | ||
277 | OXYGEN_PLAY_DAC2_SOURCE_MASK) | | ||
278 | shift_bits(play_routing, | ||
279 | OXYGEN_PLAY_DAC0_SOURCE_SHIFT, | ||
280 | OXYGEN_PLAY_DAC3_SOURCE_SHIFT, | ||
281 | OXYGEN_PLAY_DAC3_SOURCE_MASK); | ||
264 | } | 282 | } |
265 | 283 | ||
266 | void dump_cs4245_registers(struct oxygen *chip, | 284 | void dump_cs4245_registers(struct oxygen *chip, |
diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c index d4c83c60b9b2..97d86d828190 100644 --- a/tools/perf/bench/numa.c +++ b/tools/perf/bench/numa.c | |||
@@ -1593,6 +1593,7 @@ static void init_params(struct params *p, const char *name, int argc, const char | |||
1593 | p->data_rand_walk = true; | 1593 | p->data_rand_walk = true; |
1594 | p->nr_loops = -1; | 1594 | p->nr_loops = -1; |
1595 | p->init_random = true; | 1595 | p->init_random = true; |
1596 | p->run_all = argc == 1; | ||
1596 | } | 1597 | } |
1597 | 1598 | ||
1598 | static int run_bench_numa(const char *name, const char **argv) | 1599 | static int run_bench_numa(const char *name, const char **argv) |
diff --git a/tools/perf/builtin-bench.c b/tools/perf/builtin-bench.c index e47f90cc7b98..8a987d252780 100644 --- a/tools/perf/builtin-bench.c +++ b/tools/perf/builtin-bench.c | |||
@@ -76,7 +76,7 @@ static struct collection collections[] = { | |||
76 | 76 | ||
77 | /* Iterate over all benchmarks within a collection: */ | 77 | /* Iterate over all benchmarks within a collection: */ |
78 | #define for_each_bench(coll, bench) \ | 78 | #define for_each_bench(coll, bench) \ |
79 | for (bench = coll->benchmarks; bench->name; bench++) | 79 | for (bench = coll->benchmarks; bench && bench->name; bench++) |
80 | 80 | ||
81 | static void dump_benchmarks(struct collection *coll) | 81 | static void dump_benchmarks(struct collection *coll) |
82 | { | 82 | { |
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index 6aa6fb6f7bd9..f954c26de231 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c | |||
@@ -825,7 +825,6 @@ static size_t syscall_arg__scnprintf_signum(char *bf, size_t size, struct syscal | |||
825 | P_SIGNUM(PIPE); | 825 | P_SIGNUM(PIPE); |
826 | P_SIGNUM(ALRM); | 826 | P_SIGNUM(ALRM); |
827 | P_SIGNUM(TERM); | 827 | P_SIGNUM(TERM); |
828 | P_SIGNUM(STKFLT); | ||
829 | P_SIGNUM(CHLD); | 828 | P_SIGNUM(CHLD); |
830 | P_SIGNUM(CONT); | 829 | P_SIGNUM(CONT); |
831 | P_SIGNUM(STOP); | 830 | P_SIGNUM(STOP); |
@@ -841,6 +840,15 @@ static size_t syscall_arg__scnprintf_signum(char *bf, size_t size, struct syscal | |||
841 | P_SIGNUM(IO); | 840 | P_SIGNUM(IO); |
842 | P_SIGNUM(PWR); | 841 | P_SIGNUM(PWR); |
843 | P_SIGNUM(SYS); | 842 | P_SIGNUM(SYS); |
843 | #ifdef SIGEMT | ||
844 | P_SIGNUM(EMT); | ||
845 | #endif | ||
846 | #ifdef SIGSTKFLT | ||
847 | P_SIGNUM(STKFLT); | ||
848 | #endif | ||
849 | #ifdef SIGSWI | ||
850 | P_SIGNUM(SWI); | ||
851 | #endif | ||
844 | default: break; | 852 | default: break; |
845 | } | 853 | } |
846 | 854 | ||
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c index c872991e0f65..620a1983b76b 100644 --- a/tools/perf/util/machine.c +++ b/tools/perf/util/machine.c | |||
@@ -1213,7 +1213,7 @@ static void ip__resolve_ams(struct machine *machine, struct thread *thread, | |||
1213 | */ | 1213 | */ |
1214 | thread__find_addr_location(thread, machine, m, MAP__FUNCTION, | 1214 | thread__find_addr_location(thread, machine, m, MAP__FUNCTION, |
1215 | ip, &al); | 1215 | ip, &al); |
1216 | if (al.sym) | 1216 | if (al.map) |
1217 | goto found; | 1217 | goto found; |
1218 | } | 1218 | } |
1219 | found: | 1219 | found: |
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c index 3e9f336740fa..516d19fb999b 100644 --- a/tools/perf/util/symbol-elf.c +++ b/tools/perf/util/symbol-elf.c | |||
@@ -151,15 +151,15 @@ Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep, | |||
151 | 151 | ||
152 | gelf_getshdr(sec, shp); | 152 | gelf_getshdr(sec, shp); |
153 | str = elf_strptr(elf, ep->e_shstrndx, shp->sh_name); | 153 | str = elf_strptr(elf, ep->e_shstrndx, shp->sh_name); |
154 | if (!strcmp(name, str)) { | 154 | if (str && !strcmp(name, str)) { |
155 | if (idx) | 155 | if (idx) |
156 | *idx = cnt; | 156 | *idx = cnt; |
157 | break; | 157 | return sec; |
158 | } | 158 | } |
159 | ++cnt; | 159 | ++cnt; |
160 | } | 160 | } |
161 | 161 | ||
162 | return sec; | 162 | return NULL; |
163 | } | 163 | } |
164 | 164 | ||
165 | #define elf_section__for_each_rel(reldata, pos, pos_mem, idx, nr_entries) \ | 165 | #define elf_section__for_each_rel(reldata, pos, pos_mem, idx, nr_entries) \ |