diff options
265 files changed, 2236 insertions, 1218 deletions
diff --git a/Documentation/devicetree/bindings/dma/ti-edma.txt b/Documentation/devicetree/bindings/dma/ti-edma.txt index d3d0a4fb1c73..079b42a81d7c 100644 --- a/Documentation/devicetree/bindings/dma/ti-edma.txt +++ b/Documentation/devicetree/bindings/dma/ti-edma.txt | |||
| @@ -22,8 +22,7 @@ Required properties: | |||
| 22 | Optional properties: | 22 | Optional properties: |
| 23 | - ti,hwmods: Name of the hwmods associated to the eDMA CC | 23 | - ti,hwmods: Name of the hwmods associated to the eDMA CC |
| 24 | - ti,edma-memcpy-channels: List of channels allocated to be used for memcpy, iow | 24 | - ti,edma-memcpy-channels: List of channels allocated to be used for memcpy, iow |
| 25 | these channels will be SW triggered channels. The list must | 25 | these channels will be SW triggered channels. See example. |
| 26 | contain 16 bits numbers, see example. | ||
| 27 | - ti,edma-reserved-slot-ranges: PaRAM slot ranges which should not be used by | 26 | - ti,edma-reserved-slot-ranges: PaRAM slot ranges which should not be used by |
| 28 | the driver, they are allocated to be used by for example the | 27 | the driver, they are allocated to be used by for example the |
| 29 | DSP. See example. | 28 | DSP. See example. |
| @@ -56,10 +55,9 @@ edma: edma@49000000 { | |||
| 56 | ti,tptcs = <&edma_tptc0 7>, <&edma_tptc1 7>, <&edma_tptc2 0>; | 55 | ti,tptcs = <&edma_tptc0 7>, <&edma_tptc1 7>, <&edma_tptc2 0>; |
| 57 | 56 | ||
| 58 | /* Channel 20 and 21 is allocated for memcpy */ | 57 | /* Channel 20 and 21 is allocated for memcpy */ |
| 59 | ti,edma-memcpy-channels = /bits/ 16 <20 21>; | 58 | ti,edma-memcpy-channels = <20 21>; |
| 60 | /* The following PaRAM slots are reserved: 35-45 and 100-110 */ | 59 | /* The following PaRAM slots are reserved: 35-44 and 100-109 */ |
| 61 | ti,edma-reserved-slot-ranges = /bits/ 16 <35 10>, | 60 | ti,edma-reserved-slot-ranges = <35 10>, <100 10>; |
| 62 | /bits/ 16 <100 10>; | ||
| 63 | }; | 61 | }; |
| 64 | 62 | ||
| 65 | edma_tptc0: tptc@49800000 { | 63 | edma_tptc0: tptc@49800000 { |
diff --git a/Documentation/devicetree/bindings/input/sun4i-lradc-keys.txt b/Documentation/devicetree/bindings/input/sun4i-lradc-keys.txt index b9c32f6fd687..4357e498ef04 100644 --- a/Documentation/devicetree/bindings/input/sun4i-lradc-keys.txt +++ b/Documentation/devicetree/bindings/input/sun4i-lradc-keys.txt | |||
| @@ -12,7 +12,7 @@ Each key is represented as a sub-node of "allwinner,sun4i-a10-lradc-keys": | |||
| 12 | Required subnode-properties: | 12 | Required subnode-properties: |
| 13 | - label: Descriptive name of the key. | 13 | - label: Descriptive name of the key. |
| 14 | - linux,code: Keycode to emit. | 14 | - linux,code: Keycode to emit. |
| 15 | - channel: Channel this key is attached to, mut be 0 or 1. | 15 | - channel: Channel this key is attached to, must be 0 or 1. |
| 16 | - voltage: Voltage in µV at lradc input when this key is pressed. | 16 | - voltage: Voltage in µV at lradc input when this key is pressed. |
| 17 | 17 | ||
| 18 | Example: | 18 | Example: |
diff --git a/Documentation/devicetree/bindings/mtd/partition.txt b/Documentation/devicetree/bindings/mtd/partition.txt index f1e2a02381a4..1c63e40659fc 100644 --- a/Documentation/devicetree/bindings/mtd/partition.txt +++ b/Documentation/devicetree/bindings/mtd/partition.txt | |||
| @@ -6,7 +6,9 @@ used for what purposes, but which don't use an on-flash partition table such | |||
| 6 | as RedBoot. | 6 | as RedBoot. |
| 7 | 7 | ||
| 8 | The partition table should be a subnode of the mtd node and should be named | 8 | The partition table should be a subnode of the mtd node and should be named |
| 9 | 'partitions'. Partitions are defined in subnodes of the partitions node. | 9 | 'partitions'. This node should have the following property: |
| 10 | - compatible : (required) must be "fixed-partitions" | ||
| 11 | Partitions are then defined in subnodes of the partitions node. | ||
| 10 | 12 | ||
| 11 | For backwards compatibility partitions as direct subnodes of the mtd device are | 13 | For backwards compatibility partitions as direct subnodes of the mtd device are |
| 12 | supported. This use is discouraged. | 14 | supported. This use is discouraged. |
| @@ -36,6 +38,7 @@ Examples: | |||
| 36 | 38 | ||
| 37 | flash@0 { | 39 | flash@0 { |
| 38 | partitions { | 40 | partitions { |
| 41 | compatible = "fixed-partitions"; | ||
| 39 | #address-cells = <1>; | 42 | #address-cells = <1>; |
| 40 | #size-cells = <1>; | 43 | #size-cells = <1>; |
| 41 | 44 | ||
| @@ -53,6 +56,7 @@ flash@0 { | |||
| 53 | 56 | ||
| 54 | flash@1 { | 57 | flash@1 { |
| 55 | partitions { | 58 | partitions { |
| 59 | compatible = "fixed-partitions"; | ||
| 56 | #address-cells = <1>; | 60 | #address-cells = <1>; |
| 57 | #size-cells = <2>; | 61 | #size-cells = <2>; |
| 58 | 62 | ||
| @@ -66,6 +70,7 @@ flash@1 { | |||
| 66 | 70 | ||
| 67 | flash@2 { | 71 | flash@2 { |
| 68 | partitions { | 72 | partitions { |
| 73 | compatible = "fixed-partitions"; | ||
| 69 | #address-cells = <2>; | 74 | #address-cells = <2>; |
| 70 | #size-cells = <2>; | 75 | #size-cells = <2>; |
| 71 | 76 | ||
diff --git a/Documentation/networking/e100.txt b/Documentation/networking/e100.txt index f862cf3aff34..42ddbd4b52a9 100644 --- a/Documentation/networking/e100.txt +++ b/Documentation/networking/e100.txt | |||
| @@ -181,17 +181,3 @@ For general information, go to the Intel support website at: | |||
| 181 | If an issue is identified with the released source code on the supported | 181 | If an issue is identified with the released source code on the supported |
| 182 | kernel with a supported adapter, email the specific information related to the | 182 | kernel with a supported adapter, email the specific information related to the |
| 183 | issue to e1000-devel@lists.sourceforge.net. | 183 | issue to e1000-devel@lists.sourceforge.net. |
| 184 | |||
| 185 | |||
| 186 | License | ||
| 187 | ======= | ||
| 188 | |||
| 189 | This software program is released under the terms of a license agreement | ||
| 190 | between you ('Licensee') and Intel. Do not use or load this software or any | ||
| 191 | associated materials (collectively, the 'Software') until you have carefully | ||
| 192 | read the full terms and conditions of the file COPYING located in this software | ||
| 193 | package. By loading or using the Software, you agree to the terms of this | ||
| 194 | Agreement. If you do not agree with the terms of this Agreement, do not install | ||
| 195 | or use the Software. | ||
| 196 | |||
| 197 | * Other names and brands may be claimed as the property of others. | ||
diff --git a/MAINTAINERS b/MAINTAINERS index 9bff63cf326e..233f83464814 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -5578,7 +5578,7 @@ R: Jesse Brandeburg <jesse.brandeburg@intel.com> | |||
| 5578 | R: Shannon Nelson <shannon.nelson@intel.com> | 5578 | R: Shannon Nelson <shannon.nelson@intel.com> |
| 5579 | R: Carolyn Wyborny <carolyn.wyborny@intel.com> | 5579 | R: Carolyn Wyborny <carolyn.wyborny@intel.com> |
| 5580 | R: Don Skidmore <donald.c.skidmore@intel.com> | 5580 | R: Don Skidmore <donald.c.skidmore@intel.com> |
| 5581 | R: Matthew Vick <matthew.vick@intel.com> | 5581 | R: Bruce Allan <bruce.w.allan@intel.com> |
| 5582 | R: John Ronciak <john.ronciak@intel.com> | 5582 | R: John Ronciak <john.ronciak@intel.com> |
| 5583 | R: Mitch Williams <mitch.a.williams@intel.com> | 5583 | R: Mitch Williams <mitch.a.williams@intel.com> |
| 5584 | L: intel-wired-lan@lists.osuosl.org | 5584 | L: intel-wired-lan@lists.osuosl.org |
| @@ -8380,6 +8380,14 @@ L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers) | |||
| 8380 | S: Maintained | 8380 | S: Maintained |
| 8381 | F: drivers/pinctrl/samsung/ | 8381 | F: drivers/pinctrl/samsung/ |
| 8382 | 8382 | ||
| 8383 | PIN CONTROLLER - SINGLE | ||
| 8384 | M: Tony Lindgren <tony@atomide.com> | ||
| 8385 | M: Haojian Zhuang <haojian.zhuang@linaro.org> | ||
| 8386 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | ||
| 8387 | L: linux-omap@vger.kernel.org | ||
| 8388 | S: Maintained | ||
| 8389 | F: drivers/pinctrl/pinctrl-single.c | ||
| 8390 | |||
| 8383 | PIN CONTROLLER - ST SPEAR | 8391 | PIN CONTROLLER - ST SPEAR |
| 8384 | M: Viresh Kumar <vireshk@kernel.org> | 8392 | M: Viresh Kumar <vireshk@kernel.org> |
| 8385 | L: spear-devel@list.st.com | 8393 | L: spear-devel@list.st.com |
| @@ -8946,6 +8954,13 @@ F: drivers/rpmsg/ | |||
| 8946 | F: Documentation/rpmsg.txt | 8954 | F: Documentation/rpmsg.txt |
| 8947 | F: include/linux/rpmsg.h | 8955 | F: include/linux/rpmsg.h |
| 8948 | 8956 | ||
| 8957 | RENESAS ETHERNET DRIVERS | ||
| 8958 | R: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com> | ||
| 8959 | L: netdev@vger.kernel.org | ||
| 8960 | L: linux-sh@vger.kernel.org | ||
| 8961 | F: drivers/net/ethernet/renesas/ | ||
| 8962 | F: include/linux/sh_eth.h | ||
| 8963 | |||
| 8949 | RESET CONTROLLER FRAMEWORK | 8964 | RESET CONTROLLER FRAMEWORK |
| 8950 | M: Philipp Zabel <p.zabel@pengutronix.de> | 8965 | M: Philipp Zabel <p.zabel@pengutronix.de> |
| 8951 | S: Maintained | 8966 | S: Maintained |
| @@ -1,7 +1,7 @@ | |||
| 1 | VERSION = 4 | 1 | VERSION = 4 |
| 2 | PATCHLEVEL = 4 | 2 | PATCHLEVEL = 4 |
| 3 | SUBLEVEL = 0 | 3 | SUBLEVEL = 0 |
| 4 | EXTRAVERSION = -rc5 | 4 | EXTRAVERSION = -rc6 |
| 5 | NAME = Blurry Fish Butt | 5 | NAME = Blurry Fish Butt |
| 6 | 6 | ||
| 7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index 2c2ac3f3ff80..6312f607932f 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig | |||
| @@ -445,6 +445,7 @@ config LINUX_LINK_BASE | |||
| 445 | However some customers have peripherals mapped at this addr, so | 445 | However some customers have peripherals mapped at this addr, so |
| 446 | Linux needs to be scooted a bit. | 446 | Linux needs to be scooted a bit. |
| 447 | If you don't know what the above means, leave this setting alone. | 447 | If you don't know what the above means, leave this setting alone. |
| 448 | This needs to match memory start address specified in Device Tree | ||
| 448 | 449 | ||
| 449 | config HIGHMEM | 450 | config HIGHMEM |
| 450 | bool "High Memory Support" | 451 | bool "High Memory Support" |
diff --git a/arch/arc/boot/dts/axs10x_mb.dtsi b/arch/arc/boot/dts/axs10x_mb.dtsi index f3db32154973..44a578c10732 100644 --- a/arch/arc/boot/dts/axs10x_mb.dtsi +++ b/arch/arc/boot/dts/axs10x_mb.dtsi | |||
| @@ -46,6 +46,7 @@ | |||
| 46 | snps,pbl = < 32 >; | 46 | snps,pbl = < 32 >; |
| 47 | clocks = <&apbclk>; | 47 | clocks = <&apbclk>; |
| 48 | clock-names = "stmmaceth"; | 48 | clock-names = "stmmaceth"; |
| 49 | max-speed = <100>; | ||
| 49 | }; | 50 | }; |
| 50 | 51 | ||
| 51 | ehci@0x40000 { | 52 | ehci@0x40000 { |
diff --git a/arch/arc/boot/dts/nsim_hs.dts b/arch/arc/boot/dts/nsim_hs.dts index b0eb0e7fe21d..fc81879bc1f5 100644 --- a/arch/arc/boot/dts/nsim_hs.dts +++ b/arch/arc/boot/dts/nsim_hs.dts | |||
| @@ -17,7 +17,8 @@ | |||
| 17 | 17 | ||
| 18 | memory { | 18 | memory { |
| 19 | device_type = "memory"; | 19 | device_type = "memory"; |
| 20 | reg = <0x0 0x80000000 0x0 0x40000000 /* 1 GB low mem */ | 20 | /* CONFIG_LINUX_LINK_BASE needs to match low mem start */ |
| 21 | reg = <0x0 0x80000000 0x0 0x20000000 /* 512 MB low mem */ | ||
| 21 | 0x1 0x00000000 0x0 0x40000000>; /* 1 GB highmem */ | 22 | 0x1 0x00000000 0x0 0x40000000>; /* 1 GB highmem */ |
| 22 | }; | 23 | }; |
| 23 | 24 | ||
diff --git a/arch/arc/include/asm/mach_desc.h b/arch/arc/include/asm/mach_desc.h index 6ff657a904b6..c28e6c347b49 100644 --- a/arch/arc/include/asm/mach_desc.h +++ b/arch/arc/include/asm/mach_desc.h | |||
| @@ -23,7 +23,7 @@ | |||
| 23 | * @dt_compat: Array of device tree 'compatible' strings | 23 | * @dt_compat: Array of device tree 'compatible' strings |
| 24 | * (XXX: although only 1st entry is looked at) | 24 | * (XXX: although only 1st entry is looked at) |
| 25 | * @init_early: Very early callback [called from setup_arch()] | 25 | * @init_early: Very early callback [called from setup_arch()] |
| 26 | * @init_cpu_smp: for each CPU as it is coming up (SMP as well as UP) | 26 | * @init_per_cpu: for each CPU as it is coming up (SMP as well as UP) |
| 27 | * [(M):init_IRQ(), (o):start_kernel_secondary()] | 27 | * [(M):init_IRQ(), (o):start_kernel_secondary()] |
| 28 | * @init_machine: arch initcall level callback (e.g. populate static | 28 | * @init_machine: arch initcall level callback (e.g. populate static |
| 29 | * platform devices or parse Devicetree) | 29 | * platform devices or parse Devicetree) |
| @@ -35,7 +35,7 @@ struct machine_desc { | |||
| 35 | const char **dt_compat; | 35 | const char **dt_compat; |
| 36 | void (*init_early)(void); | 36 | void (*init_early)(void); |
| 37 | #ifdef CONFIG_SMP | 37 | #ifdef CONFIG_SMP |
| 38 | void (*init_cpu_smp)(unsigned int); | 38 | void (*init_per_cpu)(unsigned int); |
| 39 | #endif | 39 | #endif |
| 40 | void (*init_machine)(void); | 40 | void (*init_machine)(void); |
| 41 | void (*init_late)(void); | 41 | void (*init_late)(void); |
diff --git a/arch/arc/include/asm/smp.h b/arch/arc/include/asm/smp.h index 133c867d15af..991380438d6b 100644 --- a/arch/arc/include/asm/smp.h +++ b/arch/arc/include/asm/smp.h | |||
| @@ -48,7 +48,7 @@ extern int smp_ipi_irq_setup(int cpu, int irq); | |||
| 48 | * @init_early_smp: A SMP specific h/w block can init itself | 48 | * @init_early_smp: A SMP specific h/w block can init itself |
| 49 | * Could be common across platforms so not covered by | 49 | * Could be common across platforms so not covered by |
| 50 | * mach_desc->init_early() | 50 | * mach_desc->init_early() |
| 51 | * @init_irq_cpu: Called for each core so SMP h/w block driver can do | 51 | * @init_per_cpu: Called for each core so SMP h/w block driver can do |
| 52 | * any needed setup per cpu (e.g. IPI request) | 52 | * any needed setup per cpu (e.g. IPI request) |
| 53 | * @cpu_kick: For Master to kickstart a cpu (optionally at a PC) | 53 | * @cpu_kick: For Master to kickstart a cpu (optionally at a PC) |
| 54 | * @ipi_send: To send IPI to a @cpu | 54 | * @ipi_send: To send IPI to a @cpu |
| @@ -57,7 +57,7 @@ extern int smp_ipi_irq_setup(int cpu, int irq); | |||
| 57 | struct plat_smp_ops { | 57 | struct plat_smp_ops { |
| 58 | const char *info; | 58 | const char *info; |
| 59 | void (*init_early_smp)(void); | 59 | void (*init_early_smp)(void); |
| 60 | void (*init_irq_cpu)(int cpu); | 60 | void (*init_per_cpu)(int cpu); |
| 61 | void (*cpu_kick)(int cpu, unsigned long pc); | 61 | void (*cpu_kick)(int cpu, unsigned long pc); |
| 62 | void (*ipi_send)(int cpu); | 62 | void (*ipi_send)(int cpu); |
| 63 | void (*ipi_clear)(int irq); | 63 | void (*ipi_clear)(int irq); |
diff --git a/arch/arc/include/asm/unwind.h b/arch/arc/include/asm/unwind.h index 7ca628b6ee2a..c11a25bb8158 100644 --- a/arch/arc/include/asm/unwind.h +++ b/arch/arc/include/asm/unwind.h | |||
| @@ -112,7 +112,6 @@ struct unwind_frame_info { | |||
| 112 | 112 | ||
| 113 | extern int arc_unwind(struct unwind_frame_info *frame); | 113 | extern int arc_unwind(struct unwind_frame_info *frame); |
| 114 | extern void arc_unwind_init(void); | 114 | extern void arc_unwind_init(void); |
| 115 | extern void arc_unwind_setup(void); | ||
| 116 | extern void *unwind_add_table(struct module *module, const void *table_start, | 115 | extern void *unwind_add_table(struct module *module, const void *table_start, |
| 117 | unsigned long table_size); | 116 | unsigned long table_size); |
| 118 | extern void unwind_remove_table(void *handle, int init_only); | 117 | extern void unwind_remove_table(void *handle, int init_only); |
| @@ -152,9 +151,6 @@ static inline void arc_unwind_init(void) | |||
| 152 | { | 151 | { |
| 153 | } | 152 | } |
| 154 | 153 | ||
| 155 | static inline void arc_unwind_setup(void) | ||
| 156 | { | ||
| 157 | } | ||
| 158 | #define unwind_add_table(a, b, c) | 154 | #define unwind_add_table(a, b, c) |
| 159 | #define unwind_remove_table(a, b) | 155 | #define unwind_remove_table(a, b) |
| 160 | 156 | ||
diff --git a/arch/arc/kernel/intc-arcv2.c b/arch/arc/kernel/intc-arcv2.c index 26c156827479..0394f9f61b46 100644 --- a/arch/arc/kernel/intc-arcv2.c +++ b/arch/arc/kernel/intc-arcv2.c | |||
| @@ -106,10 +106,21 @@ static struct irq_chip arcv2_irq_chip = { | |||
| 106 | static int arcv2_irq_map(struct irq_domain *d, unsigned int irq, | 106 | static int arcv2_irq_map(struct irq_domain *d, unsigned int irq, |
| 107 | irq_hw_number_t hw) | 107 | irq_hw_number_t hw) |
| 108 | { | 108 | { |
| 109 | if (irq == TIMER0_IRQ || irq == IPI_IRQ) | 109 | /* |
| 110 | * core intc IRQs [16, 23]: | ||
| 111 | * Statically assigned always private-per-core (Timers, WDT, IPI, PCT) | ||
| 112 | */ | ||
| 113 | if (hw < 24) { | ||
| 114 | /* | ||
| 115 | * A subsequent request_percpu_irq() fails if percpu_devid is | ||
| 116 | * not set. That in turns sets NOAUTOEN, meaning each core needs | ||
| 117 | * to call enable_percpu_irq() | ||
| 118 | */ | ||
| 119 | irq_set_percpu_devid(irq); | ||
| 110 | irq_set_chip_and_handler(irq, &arcv2_irq_chip, handle_percpu_irq); | 120 | irq_set_chip_and_handler(irq, &arcv2_irq_chip, handle_percpu_irq); |
| 111 | else | 121 | } else { |
| 112 | irq_set_chip_and_handler(irq, &arcv2_irq_chip, handle_level_irq); | 122 | irq_set_chip_and_handler(irq, &arcv2_irq_chip, handle_level_irq); |
| 123 | } | ||
| 113 | 124 | ||
| 114 | return 0; | 125 | return 0; |
| 115 | } | 126 | } |
diff --git a/arch/arc/kernel/irq.c b/arch/arc/kernel/irq.c index 2ee226546c6a..ba17f85285cf 100644 --- a/arch/arc/kernel/irq.c +++ b/arch/arc/kernel/irq.c | |||
| @@ -29,11 +29,11 @@ void __init init_IRQ(void) | |||
| 29 | 29 | ||
| 30 | #ifdef CONFIG_SMP | 30 | #ifdef CONFIG_SMP |
| 31 | /* a SMP H/w block could do IPI IRQ request here */ | 31 | /* a SMP H/w block could do IPI IRQ request here */ |
| 32 | if (plat_smp_ops.init_irq_cpu) | 32 | if (plat_smp_ops.init_per_cpu) |
| 33 | plat_smp_ops.init_irq_cpu(smp_processor_id()); | 33 | plat_smp_ops.init_per_cpu(smp_processor_id()); |
| 34 | 34 | ||
| 35 | if (machine_desc->init_cpu_smp) | 35 | if (machine_desc->init_per_cpu) |
| 36 | machine_desc->init_cpu_smp(smp_processor_id()); | 36 | machine_desc->init_per_cpu(smp_processor_id()); |
| 37 | #endif | 37 | #endif |
| 38 | } | 38 | } |
| 39 | 39 | ||
| @@ -51,6 +51,18 @@ void arch_do_IRQ(unsigned int irq, struct pt_regs *regs) | |||
| 51 | set_irq_regs(old_regs); | 51 | set_irq_regs(old_regs); |
| 52 | } | 52 | } |
| 53 | 53 | ||
| 54 | /* | ||
| 55 | * API called for requesting percpu interrupts - called by each CPU | ||
| 56 | * - For boot CPU, actually request the IRQ with genirq core + enables | ||
| 57 | * - For subsequent callers only enable called locally | ||
| 58 | * | ||
| 59 | * Relies on being called by boot cpu first (i.e. request called ahead) of | ||
| 60 | * any enable as expected by genirq. Hence Suitable only for TIMER, IPI | ||
| 61 | * which are guaranteed to be setup on boot core first. | ||
| 62 | * Late probed peripherals such as perf can't use this as there no guarantee | ||
| 63 | * of being called on boot CPU first. | ||
| 64 | */ | ||
| 65 | |||
| 54 | void arc_request_percpu_irq(int irq, int cpu, | 66 | void arc_request_percpu_irq(int irq, int cpu, |
| 55 | irqreturn_t (*isr)(int irq, void *dev), | 67 | irqreturn_t (*isr)(int irq, void *dev), |
| 56 | const char *irq_nm, | 68 | const char *irq_nm, |
| @@ -60,14 +72,17 @@ void arc_request_percpu_irq(int irq, int cpu, | |||
| 60 | if (!cpu) { | 72 | if (!cpu) { |
| 61 | int rc; | 73 | int rc; |
| 62 | 74 | ||
| 75 | #ifdef CONFIG_ISA_ARCOMPACT | ||
| 63 | /* | 76 | /* |
| 64 | * These 2 calls are essential to making percpu IRQ APIs work | 77 | * A subsequent request_percpu_irq() fails if percpu_devid is |
| 65 | * Ideally these details could be hidden in irq chip map function | 78 | * not set. That in turns sets NOAUTOEN, meaning each core needs |
| 66 | * but the issue is IPIs IRQs being static (non-DT) and platform | 79 | * to call enable_percpu_irq() |
| 67 | * specific, so we can't identify them there. | 80 | * |
| 81 | * For ARCv2, this is done in irq map function since we know | ||
| 82 | * which irqs are strictly per cpu | ||
| 68 | */ | 83 | */ |
| 69 | irq_set_percpu_devid(irq); | 84 | irq_set_percpu_devid(irq); |
| 70 | irq_modify_status(irq, IRQ_NOAUTOEN, 0); /* @irq, @clr, @set */ | 85 | #endif |
| 71 | 86 | ||
| 72 | rc = request_percpu_irq(irq, isr, irq_nm, percpu_dev); | 87 | rc = request_percpu_irq(irq, isr, irq_nm, percpu_dev); |
| 73 | if (rc) | 88 | if (rc) |
diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c index 74a9b074ac3e..bd237acdf4f2 100644 --- a/arch/arc/kernel/mcip.c +++ b/arch/arc/kernel/mcip.c | |||
| @@ -132,7 +132,7 @@ static void mcip_probe_n_setup(void) | |||
| 132 | struct plat_smp_ops plat_smp_ops = { | 132 | struct plat_smp_ops plat_smp_ops = { |
| 133 | .info = smp_cpuinfo_buf, | 133 | .info = smp_cpuinfo_buf, |
| 134 | .init_early_smp = mcip_probe_n_setup, | 134 | .init_early_smp = mcip_probe_n_setup, |
| 135 | .init_irq_cpu = mcip_setup_per_cpu, | 135 | .init_per_cpu = mcip_setup_per_cpu, |
| 136 | .ipi_send = mcip_ipi_send, | 136 | .ipi_send = mcip_ipi_send, |
| 137 | .ipi_clear = mcip_ipi_clear, | 137 | .ipi_clear = mcip_ipi_clear, |
| 138 | }; | 138 | }; |
diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c index 0c08bb1ce15a..8b134cfe5e1f 100644 --- a/arch/arc/kernel/perf_event.c +++ b/arch/arc/kernel/perf_event.c | |||
| @@ -428,12 +428,11 @@ static irqreturn_t arc_pmu_intr(int irq, void *dev) | |||
| 428 | 428 | ||
| 429 | #endif /* CONFIG_ISA_ARCV2 */ | 429 | #endif /* CONFIG_ISA_ARCV2 */ |
| 430 | 430 | ||
| 431 | void arc_cpu_pmu_irq_init(void) | 431 | static void arc_cpu_pmu_irq_init(void *data) |
| 432 | { | 432 | { |
| 433 | struct arc_pmu_cpu *pmu_cpu = this_cpu_ptr(&arc_pmu_cpu); | 433 | int irq = *(int *)data; |
| 434 | 434 | ||
| 435 | arc_request_percpu_irq(arc_pmu->irq, smp_processor_id(), arc_pmu_intr, | 435 | enable_percpu_irq(irq, IRQ_TYPE_NONE); |
| 436 | "ARC perf counters", pmu_cpu); | ||
| 437 | 436 | ||
| 438 | /* Clear all pending interrupt flags */ | 437 | /* Clear all pending interrupt flags */ |
| 439 | write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff); | 438 | write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff); |
| @@ -515,7 +514,6 @@ static int arc_pmu_device_probe(struct platform_device *pdev) | |||
| 515 | 514 | ||
| 516 | if (has_interrupts) { | 515 | if (has_interrupts) { |
| 517 | int irq = platform_get_irq(pdev, 0); | 516 | int irq = platform_get_irq(pdev, 0); |
| 518 | unsigned long flags; | ||
| 519 | 517 | ||
| 520 | if (irq < 0) { | 518 | if (irq < 0) { |
| 521 | pr_err("Cannot get IRQ number for the platform\n"); | 519 | pr_err("Cannot get IRQ number for the platform\n"); |
| @@ -524,24 +522,12 @@ static int arc_pmu_device_probe(struct platform_device *pdev) | |||
| 524 | 522 | ||
| 525 | arc_pmu->irq = irq; | 523 | arc_pmu->irq = irq; |
| 526 | 524 | ||
| 527 | /* | 525 | /* intc map function ensures irq_set_percpu_devid() called */ |
| 528 | * arc_cpu_pmu_irq_init() needs to be called on all cores for | 526 | request_percpu_irq(irq, arc_pmu_intr, "ARC perf counters", |
| 529 | * their respective local PMU. | 527 | this_cpu_ptr(&arc_pmu_cpu)); |
| 530 | * However we use opencoded on_each_cpu() to ensure it is called | 528 | |
| 531 | * on core0 first, so that arc_request_percpu_irq() sets up | 529 | on_each_cpu(arc_cpu_pmu_irq_init, &irq, 1); |
| 532 | * AUTOEN etc. Otherwise enable_percpu_irq() fails to enable | 530 | |
| 533 | * perf IRQ on non master cores. | ||
| 534 | * see arc_request_percpu_irq() | ||
| 535 | */ | ||
| 536 | preempt_disable(); | ||
| 537 | local_irq_save(flags); | ||
| 538 | arc_cpu_pmu_irq_init(); | ||
| 539 | local_irq_restore(flags); | ||
| 540 | smp_call_function((smp_call_func_t)arc_cpu_pmu_irq_init, 0, 1); | ||
| 541 | preempt_enable(); | ||
| 542 | |||
| 543 | /* Clean all pending interrupt flags */ | ||
| 544 | write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff); | ||
| 545 | } else | 531 | } else |
| 546 | arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; | 532 | arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; |
| 547 | 533 | ||
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c index c33e77c0ad3e..e1b87444ea9a 100644 --- a/arch/arc/kernel/setup.c +++ b/arch/arc/kernel/setup.c | |||
| @@ -429,7 +429,6 @@ void __init setup_arch(char **cmdline_p) | |||
| 429 | #endif | 429 | #endif |
| 430 | 430 | ||
| 431 | arc_unwind_init(); | 431 | arc_unwind_init(); |
| 432 | arc_unwind_setup(); | ||
| 433 | } | 432 | } |
| 434 | 433 | ||
| 435 | static int __init customize_machine(void) | 434 | static int __init customize_machine(void) |
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c index 580587805fa3..ef6e9e15b82a 100644 --- a/arch/arc/kernel/smp.c +++ b/arch/arc/kernel/smp.c | |||
| @@ -132,11 +132,11 @@ void start_kernel_secondary(void) | |||
| 132 | pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu); | 132 | pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu); |
| 133 | 133 | ||
| 134 | /* Some SMP H/w setup - for each cpu */ | 134 | /* Some SMP H/w setup - for each cpu */ |
| 135 | if (plat_smp_ops.init_irq_cpu) | 135 | if (plat_smp_ops.init_per_cpu) |
| 136 | plat_smp_ops.init_irq_cpu(cpu); | 136 | plat_smp_ops.init_per_cpu(cpu); |
| 137 | 137 | ||
| 138 | if (machine_desc->init_cpu_smp) | 138 | if (machine_desc->init_per_cpu) |
| 139 | machine_desc->init_cpu_smp(cpu); | 139 | machine_desc->init_per_cpu(cpu); |
| 140 | 140 | ||
| 141 | arc_local_timer_setup(); | 141 | arc_local_timer_setup(); |
| 142 | 142 | ||
diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c index 7352475451f6..cf2828ab0905 100644 --- a/arch/arc/kernel/unwind.c +++ b/arch/arc/kernel/unwind.c | |||
| @@ -170,6 +170,23 @@ static struct unwind_table *find_table(unsigned long pc) | |||
| 170 | 170 | ||
| 171 | static unsigned long read_pointer(const u8 **pLoc, | 171 | static unsigned long read_pointer(const u8 **pLoc, |
| 172 | const void *end, signed ptrType); | 172 | const void *end, signed ptrType); |
| 173 | static void init_unwind_hdr(struct unwind_table *table, | ||
| 174 | void *(*alloc) (unsigned long)); | ||
| 175 | |||
| 176 | /* | ||
| 177 | * wrappers for header alloc (vs. calling one vs. other at call site) | ||
| 178 | * to elide section mismatches warnings | ||
| 179 | */ | ||
| 180 | static void *__init unw_hdr_alloc_early(unsigned long sz) | ||
| 181 | { | ||
| 182 | return __alloc_bootmem_nopanic(sz, sizeof(unsigned int), | ||
| 183 | MAX_DMA_ADDRESS); | ||
| 184 | } | ||
| 185 | |||
| 186 | static void *unw_hdr_alloc(unsigned long sz) | ||
| 187 | { | ||
| 188 | return kmalloc(sz, GFP_KERNEL); | ||
| 189 | } | ||
| 173 | 190 | ||
| 174 | static void init_unwind_table(struct unwind_table *table, const char *name, | 191 | static void init_unwind_table(struct unwind_table *table, const char *name, |
| 175 | const void *core_start, unsigned long core_size, | 192 | const void *core_start, unsigned long core_size, |
| @@ -209,6 +226,8 @@ void __init arc_unwind_init(void) | |||
| 209 | __start_unwind, __end_unwind - __start_unwind, | 226 | __start_unwind, __end_unwind - __start_unwind, |
| 210 | NULL, 0); | 227 | NULL, 0); |
| 211 | /*__start_unwind_hdr, __end_unwind_hdr - __start_unwind_hdr);*/ | 228 | /*__start_unwind_hdr, __end_unwind_hdr - __start_unwind_hdr);*/ |
| 229 | |||
| 230 | init_unwind_hdr(&root_table, unw_hdr_alloc_early); | ||
| 212 | } | 231 | } |
| 213 | 232 | ||
| 214 | static const u32 bad_cie, not_fde; | 233 | static const u32 bad_cie, not_fde; |
| @@ -241,8 +260,8 @@ static void swap_eh_frame_hdr_table_entries(void *p1, void *p2, int size) | |||
| 241 | e2->fde = v; | 260 | e2->fde = v; |
| 242 | } | 261 | } |
| 243 | 262 | ||
| 244 | static void __init setup_unwind_table(struct unwind_table *table, | 263 | static void init_unwind_hdr(struct unwind_table *table, |
| 245 | void *(*alloc) (unsigned long)) | 264 | void *(*alloc) (unsigned long)) |
| 246 | { | 265 | { |
| 247 | const u8 *ptr; | 266 | const u8 *ptr; |
| 248 | unsigned long tableSize = table->size, hdrSize; | 267 | unsigned long tableSize = table->size, hdrSize; |
| @@ -274,13 +293,13 @@ static void __init setup_unwind_table(struct unwind_table *table, | |||
| 274 | const u32 *cie = cie_for_fde(fde, table); | 293 | const u32 *cie = cie_for_fde(fde, table); |
| 275 | signed ptrType; | 294 | signed ptrType; |
| 276 | 295 | ||
| 277 | if (cie == ¬_fde) | 296 | if (cie == ¬_fde) /* only process FDE here */ |
| 278 | continue; | 297 | continue; |
| 279 | if (cie == NULL || cie == &bad_cie) | 298 | if (cie == NULL || cie == &bad_cie) |
| 280 | return; | 299 | continue; /* say FDE->CIE.version != 1 */ |
| 281 | ptrType = fde_pointer_type(cie); | 300 | ptrType = fde_pointer_type(cie); |
| 282 | if (ptrType < 0) | 301 | if (ptrType < 0) |
| 283 | return; | 302 | continue; |
| 284 | 303 | ||
| 285 | ptr = (const u8 *)(fde + 2); | 304 | ptr = (const u8 *)(fde + 2); |
| 286 | if (!read_pointer(&ptr, (const u8 *)(fde + 1) + *fde, | 305 | if (!read_pointer(&ptr, (const u8 *)(fde + 1) + *fde, |
| @@ -300,9 +319,11 @@ static void __init setup_unwind_table(struct unwind_table *table, | |||
| 300 | 319 | ||
| 301 | hdrSize = 4 + sizeof(unsigned long) + sizeof(unsigned int) | 320 | hdrSize = 4 + sizeof(unsigned long) + sizeof(unsigned int) |
| 302 | + 2 * n * sizeof(unsigned long); | 321 | + 2 * n * sizeof(unsigned long); |
| 322 | |||
| 303 | header = alloc(hdrSize); | 323 | header = alloc(hdrSize); |
| 304 | if (!header) | 324 | if (!header) |
| 305 | return; | 325 | return; |
| 326 | |||
| 306 | header->version = 1; | 327 | header->version = 1; |
| 307 | header->eh_frame_ptr_enc = DW_EH_PE_abs | DW_EH_PE_native; | 328 | header->eh_frame_ptr_enc = DW_EH_PE_abs | DW_EH_PE_native; |
| 308 | header->fde_count_enc = DW_EH_PE_abs | DW_EH_PE_data4; | 329 | header->fde_count_enc = DW_EH_PE_abs | DW_EH_PE_data4; |
| @@ -322,6 +343,10 @@ static void __init setup_unwind_table(struct unwind_table *table, | |||
| 322 | 343 | ||
| 323 | if (fde[1] == 0xffffffff) | 344 | if (fde[1] == 0xffffffff) |
| 324 | continue; /* this is a CIE */ | 345 | continue; /* this is a CIE */ |
| 346 | |||
| 347 | if (*(u8 *)(cie + 2) != 1) | ||
| 348 | continue; /* FDE->CIE.version not supported */ | ||
| 349 | |||
| 325 | ptr = (const u8 *)(fde + 2); | 350 | ptr = (const u8 *)(fde + 2); |
| 326 | header->table[n].start = read_pointer(&ptr, | 351 | header->table[n].start = read_pointer(&ptr, |
| 327 | (const u8 *)(fde + 1) + | 352 | (const u8 *)(fde + 1) + |
| @@ -342,18 +367,6 @@ static void __init setup_unwind_table(struct unwind_table *table, | |||
| 342 | table->header = (const void *)header; | 367 | table->header = (const void *)header; |
| 343 | } | 368 | } |
| 344 | 369 | ||
| 345 | static void *__init balloc(unsigned long sz) | ||
| 346 | { | ||
| 347 | return __alloc_bootmem_nopanic(sz, | ||
| 348 | sizeof(unsigned int), | ||
| 349 | __pa(MAX_DMA_ADDRESS)); | ||
| 350 | } | ||
| 351 | |||
| 352 | void __init arc_unwind_setup(void) | ||
| 353 | { | ||
| 354 | setup_unwind_table(&root_table, balloc); | ||
| 355 | } | ||
| 356 | |||
| 357 | #ifdef CONFIG_MODULES | 370 | #ifdef CONFIG_MODULES |
| 358 | 371 | ||
| 359 | static struct unwind_table *last_table; | 372 | static struct unwind_table *last_table; |
| @@ -377,6 +390,8 @@ void *unwind_add_table(struct module *module, const void *table_start, | |||
| 377 | table_start, table_size, | 390 | table_start, table_size, |
| 378 | NULL, 0); | 391 | NULL, 0); |
| 379 | 392 | ||
| 393 | init_unwind_hdr(table, unw_hdr_alloc); | ||
| 394 | |||
| 380 | #ifdef UNWIND_DEBUG | 395 | #ifdef UNWIND_DEBUG |
| 381 | unw_debug("Table added for [%s] %lx %lx\n", | 396 | unw_debug("Table added for [%s] %lx %lx\n", |
| 382 | module->name, table->core.pc, table->core.range); | 397 | module->name, table->core.pc, table->core.range); |
| @@ -439,6 +454,7 @@ void unwind_remove_table(void *handle, int init_only) | |||
| 439 | info.init_only = init_only; | 454 | info.init_only = init_only; |
| 440 | 455 | ||
| 441 | unlink_table(&info); /* XXX: SMP */ | 456 | unlink_table(&info); /* XXX: SMP */ |
| 457 | kfree(table->header); | ||
| 442 | kfree(table); | 458 | kfree(table); |
| 443 | } | 459 | } |
| 444 | 460 | ||
| @@ -507,7 +523,8 @@ static const u32 *cie_for_fde(const u32 *fde, const struct unwind_table *table) | |||
| 507 | 523 | ||
| 508 | if (*cie <= sizeof(*cie) + 4 || *cie >= fde[1] - sizeof(*fde) | 524 | if (*cie <= sizeof(*cie) + 4 || *cie >= fde[1] - sizeof(*fde) |
| 509 | || (*cie & (sizeof(*cie) - 1)) | 525 | || (*cie & (sizeof(*cie) - 1)) |
| 510 | || (cie[1] != 0xffffffff)) | 526 | || (cie[1] != 0xffffffff) |
| 527 | || ( *(u8 *)(cie + 2) != 1)) /* version 1 supported */ | ||
| 511 | return NULL; /* this is not a (valid) CIE */ | 528 | return NULL; /* this is not a (valid) CIE */ |
| 512 | return cie; | 529 | return cie; |
| 513 | } | 530 | } |
diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c index a9305b5a2cd4..7d2c4fbf4f22 100644 --- a/arch/arc/mm/init.c +++ b/arch/arc/mm/init.c | |||
| @@ -51,7 +51,9 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size) | |||
| 51 | int in_use = 0; | 51 | int in_use = 0; |
| 52 | 52 | ||
| 53 | if (!low_mem_sz) { | 53 | if (!low_mem_sz) { |
| 54 | BUG_ON(base != low_mem_start); | 54 | if (base != low_mem_start) |
| 55 | panic("CONFIG_LINUX_LINK_BASE != DT memory { }"); | ||
| 56 | |||
| 55 | low_mem_sz = size; | 57 | low_mem_sz = size; |
| 56 | in_use = 1; | 58 | in_use = 1; |
| 57 | } else { | 59 | } else { |
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index 8cc85a4ebec2..35c9db857ebe 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h | |||
| @@ -510,10 +510,14 @@ __copy_to_user_std(void __user *to, const void *from, unsigned long n); | |||
| 510 | static inline unsigned long __must_check | 510 | static inline unsigned long __must_check |
| 511 | __copy_to_user(void __user *to, const void *from, unsigned long n) | 511 | __copy_to_user(void __user *to, const void *from, unsigned long n) |
| 512 | { | 512 | { |
| 513 | #ifndef CONFIG_UACCESS_WITH_MEMCPY | ||
| 513 | unsigned int __ua_flags = uaccess_save_and_enable(); | 514 | unsigned int __ua_flags = uaccess_save_and_enable(); |
| 514 | n = arm_copy_to_user(to, from, n); | 515 | n = arm_copy_to_user(to, from, n); |
| 515 | uaccess_restore(__ua_flags); | 516 | uaccess_restore(__ua_flags); |
| 516 | return n; | 517 | return n; |
| 518 | #else | ||
| 519 | return arm_copy_to_user(to, from, n); | ||
| 520 | #endif | ||
| 517 | } | 521 | } |
| 518 | 522 | ||
| 519 | extern unsigned long __must_check | 523 | extern unsigned long __must_check |
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 7a7c4cea5523..4adfb46e3ee9 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c | |||
| @@ -95,6 +95,22 @@ void __show_regs(struct pt_regs *regs) | |||
| 95 | { | 95 | { |
| 96 | unsigned long flags; | 96 | unsigned long flags; |
| 97 | char buf[64]; | 97 | char buf[64]; |
| 98 | #ifndef CONFIG_CPU_V7M | ||
| 99 | unsigned int domain; | ||
| 100 | #ifdef CONFIG_CPU_SW_DOMAIN_PAN | ||
| 101 | /* | ||
| 102 | * Get the domain register for the parent context. In user | ||
| 103 | * mode, we don't save the DACR, so lets use what it should | ||
| 104 | * be. For other modes, we place it after the pt_regs struct. | ||
| 105 | */ | ||
| 106 | if (user_mode(regs)) | ||
| 107 | domain = DACR_UACCESS_ENABLE; | ||
| 108 | else | ||
| 109 | domain = *(unsigned int *)(regs + 1); | ||
| 110 | #else | ||
| 111 | domain = get_domain(); | ||
| 112 | #endif | ||
| 113 | #endif | ||
| 98 | 114 | ||
| 99 | show_regs_print_info(KERN_DEFAULT); | 115 | show_regs_print_info(KERN_DEFAULT); |
| 100 | 116 | ||
| @@ -123,21 +139,8 @@ void __show_regs(struct pt_regs *regs) | |||
| 123 | 139 | ||
| 124 | #ifndef CONFIG_CPU_V7M | 140 | #ifndef CONFIG_CPU_V7M |
| 125 | { | 141 | { |
| 126 | unsigned int domain = get_domain(); | ||
| 127 | const char *segment; | 142 | const char *segment; |
| 128 | 143 | ||
| 129 | #ifdef CONFIG_CPU_SW_DOMAIN_PAN | ||
| 130 | /* | ||
| 131 | * Get the domain register for the parent context. In user | ||
| 132 | * mode, we don't save the DACR, so lets use what it should | ||
| 133 | * be. For other modes, we place it after the pt_regs struct. | ||
| 134 | */ | ||
| 135 | if (user_mode(regs)) | ||
| 136 | domain = DACR_UACCESS_ENABLE; | ||
| 137 | else | ||
| 138 | domain = *(unsigned int *)(regs + 1); | ||
| 139 | #endif | ||
| 140 | |||
| 141 | if ((domain & domain_mask(DOMAIN_USER)) == | 144 | if ((domain & domain_mask(DOMAIN_USER)) == |
| 142 | domain_val(DOMAIN_USER, DOMAIN_NOACCESS)) | 145 | domain_val(DOMAIN_USER, DOMAIN_NOACCESS)) |
| 143 | segment = "none"; | 146 | segment = "none"; |
| @@ -163,11 +166,11 @@ void __show_regs(struct pt_regs *regs) | |||
| 163 | buf[0] = '\0'; | 166 | buf[0] = '\0'; |
| 164 | #ifdef CONFIG_CPU_CP15_MMU | 167 | #ifdef CONFIG_CPU_CP15_MMU |
| 165 | { | 168 | { |
| 166 | unsigned int transbase, dac = get_domain(); | 169 | unsigned int transbase; |
| 167 | asm("mrc p15, 0, %0, c2, c0\n\t" | 170 | asm("mrc p15, 0, %0, c2, c0\n\t" |
| 168 | : "=r" (transbase)); | 171 | : "=r" (transbase)); |
| 169 | snprintf(buf, sizeof(buf), " Table: %08x DAC: %08x", | 172 | snprintf(buf, sizeof(buf), " Table: %08x DAC: %08x", |
| 170 | transbase, dac); | 173 | transbase, domain); |
| 171 | } | 174 | } |
| 172 | #endif | 175 | #endif |
| 173 | asm("mrc p15, 0, %0, c1, c0\n" : "=r" (ctrl)); | 176 | asm("mrc p15, 0, %0, c1, c0\n" : "=r" (ctrl)); |
diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c index 5b26e7efa9ea..c3fe769d7558 100644 --- a/arch/arm/kernel/swp_emulate.c +++ b/arch/arm/kernel/swp_emulate.c | |||
| @@ -36,10 +36,10 @@ | |||
| 36 | */ | 36 | */ |
| 37 | #define __user_swpX_asm(data, addr, res, temp, B) \ | 37 | #define __user_swpX_asm(data, addr, res, temp, B) \ |
| 38 | __asm__ __volatile__( \ | 38 | __asm__ __volatile__( \ |
| 39 | " mov %2, %1\n" \ | 39 | "0: ldrex"B" %2, [%3]\n" \ |
| 40 | "0: ldrex"B" %1, [%3]\n" \ | 40 | "1: strex"B" %0, %1, [%3]\n" \ |
| 41 | "1: strex"B" %0, %2, [%3]\n" \ | ||
| 42 | " cmp %0, #0\n" \ | 41 | " cmp %0, #0\n" \ |
| 42 | " moveq %1, %2\n" \ | ||
| 43 | " movne %0, %4\n" \ | 43 | " movne %0, %4\n" \ |
| 44 | "2:\n" \ | 44 | "2:\n" \ |
| 45 | " .section .text.fixup,\"ax\"\n" \ | 45 | " .section .text.fixup,\"ax\"\n" \ |
diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c index d72b90905132..588bbc288396 100644 --- a/arch/arm/lib/uaccess_with_memcpy.c +++ b/arch/arm/lib/uaccess_with_memcpy.c | |||
| @@ -88,6 +88,7 @@ pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp) | |||
| 88 | static unsigned long noinline | 88 | static unsigned long noinline |
| 89 | __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n) | 89 | __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n) |
| 90 | { | 90 | { |
| 91 | unsigned long ua_flags; | ||
| 91 | int atomic; | 92 | int atomic; |
| 92 | 93 | ||
| 93 | if (unlikely(segment_eq(get_fs(), KERNEL_DS))) { | 94 | if (unlikely(segment_eq(get_fs(), KERNEL_DS))) { |
| @@ -118,7 +119,9 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n) | |||
| 118 | if (tocopy > n) | 119 | if (tocopy > n) |
| 119 | tocopy = n; | 120 | tocopy = n; |
| 120 | 121 | ||
| 122 | ua_flags = uaccess_save_and_enable(); | ||
| 121 | memcpy((void *)to, from, tocopy); | 123 | memcpy((void *)to, from, tocopy); |
| 124 | uaccess_restore(ua_flags); | ||
| 122 | to += tocopy; | 125 | to += tocopy; |
| 123 | from += tocopy; | 126 | from += tocopy; |
| 124 | n -= tocopy; | 127 | n -= tocopy; |
| @@ -145,14 +148,21 @@ arm_copy_to_user(void __user *to, const void *from, unsigned long n) | |||
| 145 | * With frame pointer disabled, tail call optimization kicks in | 148 | * With frame pointer disabled, tail call optimization kicks in |
| 146 | * as well making this test almost invisible. | 149 | * as well making this test almost invisible. |
| 147 | */ | 150 | */ |
| 148 | if (n < 64) | 151 | if (n < 64) { |
| 149 | return __copy_to_user_std(to, from, n); | 152 | unsigned long ua_flags = uaccess_save_and_enable(); |
| 150 | return __copy_to_user_memcpy(to, from, n); | 153 | n = __copy_to_user_std(to, from, n); |
| 154 | uaccess_restore(ua_flags); | ||
| 155 | } else { | ||
| 156 | n = __copy_to_user_memcpy(to, from, n); | ||
| 157 | } | ||
| 158 | return n; | ||
| 151 | } | 159 | } |
| 152 | 160 | ||
| 153 | static unsigned long noinline | 161 | static unsigned long noinline |
| 154 | __clear_user_memset(void __user *addr, unsigned long n) | 162 | __clear_user_memset(void __user *addr, unsigned long n) |
| 155 | { | 163 | { |
| 164 | unsigned long ua_flags; | ||
| 165 | |||
| 156 | if (unlikely(segment_eq(get_fs(), KERNEL_DS))) { | 166 | if (unlikely(segment_eq(get_fs(), KERNEL_DS))) { |
| 157 | memset((void *)addr, 0, n); | 167 | memset((void *)addr, 0, n); |
| 158 | return 0; | 168 | return 0; |
| @@ -175,7 +185,9 @@ __clear_user_memset(void __user *addr, unsigned long n) | |||
| 175 | if (tocopy > n) | 185 | if (tocopy > n) |
| 176 | tocopy = n; | 186 | tocopy = n; |
| 177 | 187 | ||
| 188 | ua_flags = uaccess_save_and_enable(); | ||
| 178 | memset((void *)addr, 0, tocopy); | 189 | memset((void *)addr, 0, tocopy); |
| 190 | uaccess_restore(ua_flags); | ||
| 179 | addr += tocopy; | 191 | addr += tocopy; |
| 180 | n -= tocopy; | 192 | n -= tocopy; |
| 181 | 193 | ||
| @@ -193,9 +205,14 @@ out: | |||
| 193 | unsigned long arm_clear_user(void __user *addr, unsigned long n) | 205 | unsigned long arm_clear_user(void __user *addr, unsigned long n) |
| 194 | { | 206 | { |
| 195 | /* See rational for this in __copy_to_user() above. */ | 207 | /* See rational for this in __copy_to_user() above. */ |
| 196 | if (n < 64) | 208 | if (n < 64) { |
| 197 | return __clear_user_std(addr, n); | 209 | unsigned long ua_flags = uaccess_save_and_enable(); |
| 198 | return __clear_user_memset(addr, n); | 210 | n = __clear_user_std(addr, n); |
| 211 | uaccess_restore(ua_flags); | ||
| 212 | } else { | ||
| 213 | n = __clear_user_memset(addr, n); | ||
| 214 | } | ||
| 215 | return n; | ||
| 199 | } | 216 | } |
| 200 | 217 | ||
| 201 | #if 0 | 218 | #if 0 |
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index 845769e41332..c8c8b9ed02e0 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c | |||
| @@ -165,13 +165,28 @@ static void flush_context(unsigned int cpu) | |||
| 165 | __flush_icache_all(); | 165 | __flush_icache_all(); |
| 166 | } | 166 | } |
| 167 | 167 | ||
| 168 | static int is_reserved_asid(u64 asid) | 168 | static bool check_update_reserved_asid(u64 asid, u64 newasid) |
| 169 | { | 169 | { |
| 170 | int cpu; | 170 | int cpu; |
| 171 | for_each_possible_cpu(cpu) | 171 | bool hit = false; |
| 172 | if (per_cpu(reserved_asids, cpu) == asid) | 172 | |
| 173 | return 1; | 173 | /* |
| 174 | return 0; | 174 | * Iterate over the set of reserved ASIDs looking for a match. |
| 175 | * If we find one, then we can update our mm to use newasid | ||
| 176 | * (i.e. the same ASID in the current generation) but we can't | ||
| 177 | * exit the loop early, since we need to ensure that all copies | ||
| 178 | * of the old ASID are updated to reflect the mm. Failure to do | ||
| 179 | * so could result in us missing the reserved ASID in a future | ||
| 180 | * generation. | ||
| 181 | */ | ||
| 182 | for_each_possible_cpu(cpu) { | ||
| 183 | if (per_cpu(reserved_asids, cpu) == asid) { | ||
| 184 | hit = true; | ||
| 185 | per_cpu(reserved_asids, cpu) = newasid; | ||
| 186 | } | ||
| 187 | } | ||
| 188 | |||
| 189 | return hit; | ||
| 175 | } | 190 | } |
| 176 | 191 | ||
| 177 | static u64 new_context(struct mm_struct *mm, unsigned int cpu) | 192 | static u64 new_context(struct mm_struct *mm, unsigned int cpu) |
| @@ -181,12 +196,14 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu) | |||
| 181 | u64 generation = atomic64_read(&asid_generation); | 196 | u64 generation = atomic64_read(&asid_generation); |
| 182 | 197 | ||
| 183 | if (asid != 0) { | 198 | if (asid != 0) { |
| 199 | u64 newasid = generation | (asid & ~ASID_MASK); | ||
| 200 | |||
| 184 | /* | 201 | /* |
| 185 | * If our current ASID was active during a rollover, we | 202 | * If our current ASID was active during a rollover, we |
| 186 | * can continue to use it and this was just a false alarm. | 203 | * can continue to use it and this was just a false alarm. |
| 187 | */ | 204 | */ |
| 188 | if (is_reserved_asid(asid)) | 205 | if (check_update_reserved_asid(asid, newasid)) |
| 189 | return generation | (asid & ~ASID_MASK); | 206 | return newasid; |
| 190 | 207 | ||
| 191 | /* | 208 | /* |
| 192 | * We had a valid ASID in a previous life, so try to re-use | 209 | * We had a valid ASID in a previous life, so try to re-use |
| @@ -194,7 +211,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu) | |||
| 194 | */ | 211 | */ |
| 195 | asid &= ~ASID_MASK; | 212 | asid &= ~ASID_MASK; |
| 196 | if (!__test_and_set_bit(asid, asid_map)) | 213 | if (!__test_and_set_bit(asid, asid_map)) |
| 197 | goto bump_gen; | 214 | return newasid; |
| 198 | } | 215 | } |
| 199 | 216 | ||
| 200 | /* | 217 | /* |
| @@ -216,11 +233,8 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu) | |||
| 216 | 233 | ||
| 217 | __set_bit(asid, asid_map); | 234 | __set_bit(asid, asid_map); |
| 218 | cur_idx = asid; | 235 | cur_idx = asid; |
| 219 | |||
| 220 | bump_gen: | ||
| 221 | asid |= generation; | ||
| 222 | cpumask_clear(mm_cpumask(mm)); | 236 | cpumask_clear(mm_cpumask(mm)); |
| 223 | return asid; | 237 | return asid | generation; |
| 224 | } | 238 | } |
| 225 | 239 | ||
| 226 | void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) | 240 | void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) |
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index e62400e5fb99..534a60ae282e 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
| @@ -1521,7 +1521,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, | |||
| 1521 | return -ENOMEM; | 1521 | return -ENOMEM; |
| 1522 | 1522 | ||
| 1523 | for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) { | 1523 | for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) { |
| 1524 | phys_addr_t phys = sg_phys(s) & PAGE_MASK; | 1524 | phys_addr_t phys = page_to_phys(sg_page(s)); |
| 1525 | unsigned int len = PAGE_ALIGN(s->offset + s->length); | 1525 | unsigned int len = PAGE_ALIGN(s->offset + s->length); |
| 1526 | 1526 | ||
| 1527 | if (!is_coherent && | 1527 | if (!is_coherent && |
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 8a63b4cdc0f2..7f8cd1b3557f 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c | |||
| @@ -22,6 +22,7 @@ | |||
| 22 | #include <linux/memblock.h> | 22 | #include <linux/memblock.h> |
| 23 | #include <linux/dma-contiguous.h> | 23 | #include <linux/dma-contiguous.h> |
| 24 | #include <linux/sizes.h> | 24 | #include <linux/sizes.h> |
| 25 | #include <linux/stop_machine.h> | ||
| 25 | 26 | ||
| 26 | #include <asm/cp15.h> | 27 | #include <asm/cp15.h> |
| 27 | #include <asm/mach-types.h> | 28 | #include <asm/mach-types.h> |
| @@ -627,12 +628,10 @@ static struct section_perm ro_perms[] = { | |||
| 627 | * safe to be called with preemption disabled, as under stop_machine(). | 628 | * safe to be called with preemption disabled, as under stop_machine(). |
| 628 | */ | 629 | */ |
| 629 | static inline void section_update(unsigned long addr, pmdval_t mask, | 630 | static inline void section_update(unsigned long addr, pmdval_t mask, |
| 630 | pmdval_t prot) | 631 | pmdval_t prot, struct mm_struct *mm) |
| 631 | { | 632 | { |
| 632 | struct mm_struct *mm; | ||
| 633 | pmd_t *pmd; | 633 | pmd_t *pmd; |
| 634 | 634 | ||
| 635 | mm = current->active_mm; | ||
| 636 | pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr); | 635 | pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr); |
| 637 | 636 | ||
| 638 | #ifdef CONFIG_ARM_LPAE | 637 | #ifdef CONFIG_ARM_LPAE |
| @@ -656,49 +655,82 @@ static inline bool arch_has_strict_perms(void) | |||
| 656 | return !!(get_cr() & CR_XP); | 655 | return !!(get_cr() & CR_XP); |
| 657 | } | 656 | } |
| 658 | 657 | ||
| 659 | #define set_section_perms(perms, field) { \ | 658 | void set_section_perms(struct section_perm *perms, int n, bool set, |
| 660 | size_t i; \ | 659 | struct mm_struct *mm) |
| 661 | unsigned long addr; \ | 660 | { |
| 662 | \ | 661 | size_t i; |
| 663 | if (!arch_has_strict_perms()) \ | 662 | unsigned long addr; |
| 664 | return; \ | 663 | |
| 665 | \ | 664 | if (!arch_has_strict_perms()) |
| 666 | for (i = 0; i < ARRAY_SIZE(perms); i++) { \ | 665 | return; |
| 667 | if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) || \ | 666 | |
| 668 | !IS_ALIGNED(perms[i].end, SECTION_SIZE)) { \ | 667 | for (i = 0; i < n; i++) { |
| 669 | pr_err("BUG: section %lx-%lx not aligned to %lx\n", \ | 668 | if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) || |
| 670 | perms[i].start, perms[i].end, \ | 669 | !IS_ALIGNED(perms[i].end, SECTION_SIZE)) { |
| 671 | SECTION_SIZE); \ | 670 | pr_err("BUG: section %lx-%lx not aligned to %lx\n", |
| 672 | continue; \ | 671 | perms[i].start, perms[i].end, |
| 673 | } \ | 672 | SECTION_SIZE); |
| 674 | \ | 673 | continue; |
| 675 | for (addr = perms[i].start; \ | 674 | } |
| 676 | addr < perms[i].end; \ | 675 | |
| 677 | addr += SECTION_SIZE) \ | 676 | for (addr = perms[i].start; |
| 678 | section_update(addr, perms[i].mask, \ | 677 | addr < perms[i].end; |
| 679 | perms[i].field); \ | 678 | addr += SECTION_SIZE) |
| 680 | } \ | 679 | section_update(addr, perms[i].mask, |
| 680 | set ? perms[i].prot : perms[i].clear, mm); | ||
| 681 | } | ||
| 682 | |||
| 681 | } | 683 | } |
| 682 | 684 | ||
| 683 | static inline void fix_kernmem_perms(void) | 685 | static void update_sections_early(struct section_perm perms[], int n) |
| 684 | { | 686 | { |
| 685 | set_section_perms(nx_perms, prot); | 687 | struct task_struct *t, *s; |
| 688 | |||
| 689 | read_lock(&tasklist_lock); | ||
| 690 | for_each_process(t) { | ||
| 691 | if (t->flags & PF_KTHREAD) | ||
| 692 | continue; | ||
| 693 | for_each_thread(t, s) | ||
| 694 | set_section_perms(perms, n, true, s->mm); | ||
| 695 | } | ||
| 696 | read_unlock(&tasklist_lock); | ||
| 697 | set_section_perms(perms, n, true, current->active_mm); | ||
| 698 | set_section_perms(perms, n, true, &init_mm); | ||
| 699 | } | ||
| 700 | |||
| 701 | int __fix_kernmem_perms(void *unused) | ||
| 702 | { | ||
| 703 | update_sections_early(nx_perms, ARRAY_SIZE(nx_perms)); | ||
| 704 | return 0; | ||
| 705 | } | ||
| 706 | |||
| 707 | void fix_kernmem_perms(void) | ||
| 708 | { | ||
| 709 | stop_machine(__fix_kernmem_perms, NULL, NULL); | ||
| 686 | } | 710 | } |
| 687 | 711 | ||
| 688 | #ifdef CONFIG_DEBUG_RODATA | 712 | #ifdef CONFIG_DEBUG_RODATA |
| 713 | int __mark_rodata_ro(void *unused) | ||
| 714 | { | ||
| 715 | update_sections_early(ro_perms, ARRAY_SIZE(ro_perms)); | ||
| 716 | return 0; | ||
| 717 | } | ||
| 718 | |||
| 689 | void mark_rodata_ro(void) | 719 | void mark_rodata_ro(void) |
| 690 | { | 720 | { |
| 691 | set_section_perms(ro_perms, prot); | 721 | stop_machine(__mark_rodata_ro, NULL, NULL); |
| 692 | } | 722 | } |
| 693 | 723 | ||
| 694 | void set_kernel_text_rw(void) | 724 | void set_kernel_text_rw(void) |
| 695 | { | 725 | { |
| 696 | set_section_perms(ro_perms, clear); | 726 | set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), false, |
| 727 | current->active_mm); | ||
| 697 | } | 728 | } |
| 698 | 729 | ||
| 699 | void set_kernel_text_ro(void) | 730 | void set_kernel_text_ro(void) |
| 700 | { | 731 | { |
| 701 | set_section_perms(ro_perms, prot); | 732 | set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true, |
| 733 | current->active_mm); | ||
| 702 | } | 734 | } |
| 703 | #endif /* CONFIG_DEBUG_RODATA */ | 735 | #endif /* CONFIG_DEBUG_RODATA */ |
| 704 | 736 | ||
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index de2b246fed38..8e1ea433c3f1 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S | |||
| @@ -95,7 +95,7 @@ ENDPROC(cpu_v7_dcache_clean_area) | |||
| 95 | .equ cpu_v7_suspend_size, 4 * 9 | 95 | .equ cpu_v7_suspend_size, 4 * 9 |
| 96 | #ifdef CONFIG_ARM_CPU_SUSPEND | 96 | #ifdef CONFIG_ARM_CPU_SUSPEND |
| 97 | ENTRY(cpu_v7_do_suspend) | 97 | ENTRY(cpu_v7_do_suspend) |
| 98 | stmfd sp!, {r4 - r10, lr} | 98 | stmfd sp!, {r4 - r11, lr} |
| 99 | mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID | 99 | mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID |
| 100 | mrc p15, 0, r5, c13, c0, 3 @ User r/o thread ID | 100 | mrc p15, 0, r5, c13, c0, 3 @ User r/o thread ID |
| 101 | stmia r0!, {r4 - r5} | 101 | stmia r0!, {r4 - r5} |
| @@ -112,7 +112,7 @@ ENTRY(cpu_v7_do_suspend) | |||
| 112 | mrc p15, 0, r9, c1, c0, 1 @ Auxiliary control register | 112 | mrc p15, 0, r9, c1, c0, 1 @ Auxiliary control register |
| 113 | mrc p15, 0, r10, c1, c0, 2 @ Co-processor access control | 113 | mrc p15, 0, r10, c1, c0, 2 @ Co-processor access control |
| 114 | stmia r0, {r5 - r11} | 114 | stmia r0, {r5 - r11} |
| 115 | ldmfd sp!, {r4 - r10, pc} | 115 | ldmfd sp!, {r4 - r11, pc} |
| 116 | ENDPROC(cpu_v7_do_suspend) | 116 | ENDPROC(cpu_v7_do_suspend) |
| 117 | 117 | ||
| 118 | ENTRY(cpu_v7_do_resume) | 118 | ENTRY(cpu_v7_do_resume) |
diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h index db73390568c8..74c132d901bd 100644 --- a/arch/ia64/include/asm/unistd.h +++ b/arch/ia64/include/asm/unistd.h | |||
| @@ -11,7 +11,7 @@ | |||
| 11 | 11 | ||
| 12 | 12 | ||
| 13 | 13 | ||
| 14 | #define NR_syscalls 322 /* length of syscall table */ | 14 | #define NR_syscalls 323 /* length of syscall table */ |
| 15 | 15 | ||
| 16 | /* | 16 | /* |
| 17 | * The following defines stop scripts/checksyscalls.sh from complaining about | 17 | * The following defines stop scripts/checksyscalls.sh from complaining about |
diff --git a/arch/ia64/include/uapi/asm/unistd.h b/arch/ia64/include/uapi/asm/unistd.h index 9038726e7d26..762edce7572e 100644 --- a/arch/ia64/include/uapi/asm/unistd.h +++ b/arch/ia64/include/uapi/asm/unistd.h | |||
| @@ -335,5 +335,6 @@ | |||
| 335 | #define __NR_userfaultfd 1343 | 335 | #define __NR_userfaultfd 1343 |
| 336 | #define __NR_membarrier 1344 | 336 | #define __NR_membarrier 1344 |
| 337 | #define __NR_kcmp 1345 | 337 | #define __NR_kcmp 1345 |
| 338 | #define __NR_mlock2 1346 | ||
| 338 | 339 | ||
| 339 | #endif /* _UAPI_ASM_IA64_UNISTD_H */ | 340 | #endif /* _UAPI_ASM_IA64_UNISTD_H */ |
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index dcd97f84d065..534a74acb849 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S | |||
| @@ -1771,5 +1771,6 @@ sys_call_table: | |||
| 1771 | data8 sys_userfaultfd | 1771 | data8 sys_userfaultfd |
| 1772 | data8 sys_membarrier | 1772 | data8 sys_membarrier |
| 1773 | data8 sys_kcmp // 1345 | 1773 | data8 sys_kcmp // 1345 |
| 1774 | data8 sys_mlock2 | ||
| 1774 | 1775 | ||
| 1775 | .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls | 1776 | .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls |
diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c index c89da6312954..bf4dec229437 100644 --- a/arch/microblaze/kernel/dma.c +++ b/arch/microblaze/kernel/dma.c | |||
| @@ -61,7 +61,8 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, | |||
| 61 | /* FIXME this part of code is untested */ | 61 | /* FIXME this part of code is untested */ |
| 62 | for_each_sg(sgl, sg, nents, i) { | 62 | for_each_sg(sgl, sg, nents, i) { |
| 63 | sg->dma_address = sg_phys(sg); | 63 | sg->dma_address = sg_phys(sg); |
| 64 | __dma_sync(sg_phys(sg), sg->length, direction); | 64 | __dma_sync(page_to_phys(sg_page(sg)) + sg->offset, |
| 65 | sg->length, direction); | ||
| 65 | } | 66 | } |
| 66 | 67 | ||
| 67 | return nents; | 68 | return nents; |
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h index f2b0b1b0c72a..5654ece02c0d 100644 --- a/arch/powerpc/include/asm/systbl.h +++ b/arch/powerpc/include/asm/systbl.h | |||
| @@ -370,16 +370,16 @@ COMPAT_SYS(execveat) | |||
| 370 | PPC64ONLY(switch_endian) | 370 | PPC64ONLY(switch_endian) |
| 371 | SYSCALL_SPU(userfaultfd) | 371 | SYSCALL_SPU(userfaultfd) |
| 372 | SYSCALL_SPU(membarrier) | 372 | SYSCALL_SPU(membarrier) |
| 373 | SYSCALL(semop) | 373 | SYSCALL(ni_syscall) |
| 374 | SYSCALL(semget) | 374 | SYSCALL(ni_syscall) |
| 375 | COMPAT_SYS(semctl) | 375 | SYSCALL(ni_syscall) |
| 376 | COMPAT_SYS(semtimedop) | 376 | SYSCALL(ni_syscall) |
| 377 | COMPAT_SYS(msgsnd) | 377 | SYSCALL(ni_syscall) |
| 378 | COMPAT_SYS(msgrcv) | 378 | SYSCALL(ni_syscall) |
| 379 | SYSCALL(msgget) | 379 | SYSCALL(ni_syscall) |
| 380 | COMPAT_SYS(msgctl) | 380 | SYSCALL(ni_syscall) |
| 381 | COMPAT_SYS(shmat) | 381 | SYSCALL(ni_syscall) |
| 382 | SYSCALL(shmdt) | 382 | SYSCALL(ni_syscall) |
| 383 | SYSCALL(shmget) | 383 | SYSCALL(ni_syscall) |
| 384 | COMPAT_SYS(shmctl) | 384 | SYSCALL(ni_syscall) |
| 385 | SYSCALL(mlock2) | 385 | SYSCALL(mlock2) |
diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h index 1effea5193d6..12a05652377a 100644 --- a/arch/powerpc/include/uapi/asm/unistd.h +++ b/arch/powerpc/include/uapi/asm/unistd.h | |||
| @@ -388,18 +388,6 @@ | |||
| 388 | #define __NR_switch_endian 363 | 388 | #define __NR_switch_endian 363 |
| 389 | #define __NR_userfaultfd 364 | 389 | #define __NR_userfaultfd 364 |
| 390 | #define __NR_membarrier 365 | 390 | #define __NR_membarrier 365 |
| 391 | #define __NR_semop 366 | ||
| 392 | #define __NR_semget 367 | ||
| 393 | #define __NR_semctl 368 | ||
| 394 | #define __NR_semtimedop 369 | ||
| 395 | #define __NR_msgsnd 370 | ||
| 396 | #define __NR_msgrcv 371 | ||
| 397 | #define __NR_msgget 372 | ||
| 398 | #define __NR_msgctl 373 | ||
| 399 | #define __NR_shmat 374 | ||
| 400 | #define __NR_shmdt 375 | ||
| 401 | #define __NR_shmget 376 | ||
| 402 | #define __NR_shmctl 377 | ||
| 403 | #define __NR_mlock2 378 | 391 | #define __NR_mlock2 378 |
| 404 | 392 | ||
| 405 | #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */ | 393 | #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */ |
diff --git a/arch/powerpc/platforms/powernv/opal-irqchip.c b/arch/powerpc/platforms/powernv/opal-irqchip.c index 0a00e2aed393..e505223b4ec5 100644 --- a/arch/powerpc/platforms/powernv/opal-irqchip.c +++ b/arch/powerpc/platforms/powernv/opal-irqchip.c | |||
| @@ -83,7 +83,19 @@ static void opal_event_unmask(struct irq_data *d) | |||
| 83 | set_bit(d->hwirq, &opal_event_irqchip.mask); | 83 | set_bit(d->hwirq, &opal_event_irqchip.mask); |
| 84 | 84 | ||
| 85 | opal_poll_events(&events); | 85 | opal_poll_events(&events); |
| 86 | opal_handle_events(be64_to_cpu(events)); | 86 | last_outstanding_events = be64_to_cpu(events); |
| 87 | |||
| 88 | /* | ||
| 89 | * We can't just handle the events now with opal_handle_events(). | ||
| 90 | * If we did we would deadlock when opal_event_unmask() is called from | ||
| 91 | * handle_level_irq() with the irq descriptor lock held, because | ||
| 92 | * calling opal_handle_events() would call generic_handle_irq() and | ||
| 93 | * then handle_level_irq() which would try to take the descriptor lock | ||
| 94 | * again. Instead queue the events for later. | ||
| 95 | */ | ||
| 96 | if (last_outstanding_events & opal_event_irqchip.mask) | ||
| 97 | /* Need to retrigger the interrupt */ | ||
| 98 | irq_work_queue(&opal_event_irq_work); | ||
| 87 | } | 99 | } |
| 88 | 100 | ||
| 89 | static int opal_event_set_type(struct irq_data *d, unsigned int flow_type) | 101 | static int opal_event_set_type(struct irq_data *d, unsigned int flow_type) |
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c index 4296d55e88f3..57cffb80bc36 100644 --- a/arch/powerpc/platforms/powernv/opal.c +++ b/arch/powerpc/platforms/powernv/opal.c | |||
| @@ -278,7 +278,7 @@ static void opal_handle_message(void) | |||
| 278 | 278 | ||
| 279 | /* Sanity check */ | 279 | /* Sanity check */ |
| 280 | if (type >= OPAL_MSG_TYPE_MAX) { | 280 | if (type >= OPAL_MSG_TYPE_MAX) { |
| 281 | pr_warning("%s: Unknown message type: %u\n", __func__, type); | 281 | pr_warn_once("%s: Unknown message type: %u\n", __func__, type); |
| 282 | return; | 282 | return; |
| 283 | } | 283 | } |
| 284 | opal_message_do_notify(type, (void *)&msg); | 284 | opal_message_do_notify(type, (void *)&msg); |
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c index a035c2aa7801..0f1c6fc3ddd8 100644 --- a/arch/x86/mm/dump_pagetables.c +++ b/arch/x86/mm/dump_pagetables.c | |||
| @@ -89,7 +89,7 @@ static struct addr_marker address_markers[] = { | |||
| 89 | { 0/* VMALLOC_START */, "vmalloc() Area" }, | 89 | { 0/* VMALLOC_START */, "vmalloc() Area" }, |
| 90 | { 0/*VMALLOC_END*/, "vmalloc() End" }, | 90 | { 0/*VMALLOC_END*/, "vmalloc() End" }, |
| 91 | # ifdef CONFIG_HIGHMEM | 91 | # ifdef CONFIG_HIGHMEM |
| 92 | { 0/*PKMAP_BASE*/, "Persisent kmap() Area" }, | 92 | { 0/*PKMAP_BASE*/, "Persistent kmap() Area" }, |
| 93 | # endif | 93 | # endif |
| 94 | { 0/*FIXADDR_START*/, "Fixmap Area" }, | 94 | { 0/*FIXADDR_START*/, "Fixmap Area" }, |
| 95 | #endif | 95 | #endif |
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index ac161db63388..cb5e266a8bf7 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
| @@ -2495,14 +2495,9 @@ void __init xen_init_mmu_ops(void) | |||
| 2495 | { | 2495 | { |
| 2496 | x86_init.paging.pagetable_init = xen_pagetable_init; | 2496 | x86_init.paging.pagetable_init = xen_pagetable_init; |
| 2497 | 2497 | ||
| 2498 | /* Optimization - we can use the HVM one but it has no idea which | 2498 | if (xen_feature(XENFEAT_auto_translated_physmap)) |
| 2499 | * VCPUs are descheduled - which means that it will needlessly IPI | ||
| 2500 | * them. Xen knows so let it do the job. | ||
| 2501 | */ | ||
| 2502 | if (xen_feature(XENFEAT_auto_translated_physmap)) { | ||
| 2503 | pv_mmu_ops.flush_tlb_others = xen_flush_tlb_others; | ||
| 2504 | return; | 2499 | return; |
| 2505 | } | 2500 | |
| 2506 | pv_mmu_ops = xen_mmu_ops; | 2501 | pv_mmu_ops = xen_mmu_ops; |
| 2507 | 2502 | ||
| 2508 | memset(dummy_mapping, 0xff, PAGE_SIZE); | 2503 | memset(dummy_mapping, 0xff, PAGE_SIZE); |
diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c index feddabdab448..3705eabd7e22 100644 --- a/arch/x86/xen/suspend.c +++ b/arch/x86/xen/suspend.c | |||
| @@ -68,26 +68,16 @@ static void xen_pv_post_suspend(int suspend_cancelled) | |||
| 68 | 68 | ||
| 69 | void xen_arch_pre_suspend(void) | 69 | void xen_arch_pre_suspend(void) |
| 70 | { | 70 | { |
| 71 | int cpu; | ||
| 72 | |||
| 73 | for_each_online_cpu(cpu) | ||
| 74 | xen_pmu_finish(cpu); | ||
| 75 | |||
| 76 | if (xen_pv_domain()) | 71 | if (xen_pv_domain()) |
| 77 | xen_pv_pre_suspend(); | 72 | xen_pv_pre_suspend(); |
| 78 | } | 73 | } |
| 79 | 74 | ||
| 80 | void xen_arch_post_suspend(int cancelled) | 75 | void xen_arch_post_suspend(int cancelled) |
| 81 | { | 76 | { |
| 82 | int cpu; | ||
| 83 | |||
| 84 | if (xen_pv_domain()) | 77 | if (xen_pv_domain()) |
| 85 | xen_pv_post_suspend(cancelled); | 78 | xen_pv_post_suspend(cancelled); |
| 86 | else | 79 | else |
| 87 | xen_hvm_post_suspend(cancelled); | 80 | xen_hvm_post_suspend(cancelled); |
| 88 | |||
| 89 | for_each_online_cpu(cpu) | ||
| 90 | xen_pmu_init(cpu); | ||
| 91 | } | 81 | } |
| 92 | 82 | ||
| 93 | static void xen_vcpu_notify_restore(void *data) | 83 | static void xen_vcpu_notify_restore(void *data) |
| @@ -106,10 +96,20 @@ static void xen_vcpu_notify_suspend(void *data) | |||
| 106 | 96 | ||
| 107 | void xen_arch_resume(void) | 97 | void xen_arch_resume(void) |
| 108 | { | 98 | { |
| 99 | int cpu; | ||
| 100 | |||
| 109 | on_each_cpu(xen_vcpu_notify_restore, NULL, 1); | 101 | on_each_cpu(xen_vcpu_notify_restore, NULL, 1); |
| 102 | |||
| 103 | for_each_online_cpu(cpu) | ||
| 104 | xen_pmu_init(cpu); | ||
| 110 | } | 105 | } |
| 111 | 106 | ||
| 112 | void xen_arch_suspend(void) | 107 | void xen_arch_suspend(void) |
| 113 | { | 108 | { |
| 109 | int cpu; | ||
| 110 | |||
| 111 | for_each_online_cpu(cpu) | ||
| 112 | xen_pmu_finish(cpu); | ||
| 113 | |||
| 114 | on_each_cpu(xen_vcpu_notify_suspend, NULL, 1); | 114 | on_each_cpu(xen_vcpu_notify_suspend, NULL, 1); |
| 115 | } | 115 | } |
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c index b4ffc5be1a93..e5b5721809e2 100644 --- a/crypto/ablkcipher.c +++ b/crypto/ablkcipher.c | |||
| @@ -277,12 +277,12 @@ static int ablkcipher_walk_first(struct ablkcipher_request *req, | |||
| 277 | if (WARN_ON_ONCE(in_irq())) | 277 | if (WARN_ON_ONCE(in_irq())) |
| 278 | return -EDEADLK; | 278 | return -EDEADLK; |
| 279 | 279 | ||
| 280 | walk->iv = req->info; | ||
| 280 | walk->nbytes = walk->total; | 281 | walk->nbytes = walk->total; |
| 281 | if (unlikely(!walk->total)) | 282 | if (unlikely(!walk->total)) |
| 282 | return 0; | 283 | return 0; |
| 283 | 284 | ||
| 284 | walk->iv_buffer = NULL; | 285 | walk->iv_buffer = NULL; |
| 285 | walk->iv = req->info; | ||
| 286 | if (unlikely(((unsigned long)walk->iv & alignmask))) { | 286 | if (unlikely(((unsigned long)walk->iv & alignmask))) { |
| 287 | int err = ablkcipher_copy_iv(walk, tfm, alignmask); | 287 | int err = ablkcipher_copy_iv(walk, tfm, alignmask); |
| 288 | 288 | ||
diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c index 11b981492031..8cc1622b2ee0 100644 --- a/crypto/blkcipher.c +++ b/crypto/blkcipher.c | |||
| @@ -326,12 +326,12 @@ static int blkcipher_walk_first(struct blkcipher_desc *desc, | |||
| 326 | if (WARN_ON_ONCE(in_irq())) | 326 | if (WARN_ON_ONCE(in_irq())) |
| 327 | return -EDEADLK; | 327 | return -EDEADLK; |
| 328 | 328 | ||
| 329 | walk->iv = desc->info; | ||
| 329 | walk->nbytes = walk->total; | 330 | walk->nbytes = walk->total; |
| 330 | if (unlikely(!walk->total)) | 331 | if (unlikely(!walk->total)) |
| 331 | return 0; | 332 | return 0; |
| 332 | 333 | ||
| 333 | walk->buffer = NULL; | 334 | walk->buffer = NULL; |
| 334 | walk->iv = desc->info; | ||
| 335 | if (unlikely(((unsigned long)walk->iv & walk->alignmask))) { | 335 | if (unlikely(((unsigned long)walk->iv & walk->alignmask))) { |
| 336 | int err = blkcipher_copy_iv(walk); | 336 | int err = blkcipher_copy_iv(walk); |
| 337 | if (err) | 337 | if (err) |
diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c index e7ed39bab97d..aa45d4802707 100644 --- a/drivers/acpi/nfit.c +++ b/drivers/acpi/nfit.c | |||
| @@ -1810,7 +1810,7 @@ static void acpi_nfit_notify(struct acpi_device *adev, u32 event) | |||
| 1810 | if (!dev->driver) { | 1810 | if (!dev->driver) { |
| 1811 | /* dev->driver may be null if we're being removed */ | 1811 | /* dev->driver may be null if we're being removed */ |
| 1812 | dev_dbg(dev, "%s: no driver found for dev\n", __func__); | 1812 | dev_dbg(dev, "%s: no driver found for dev\n", __func__); |
| 1813 | return; | 1813 | goto out_unlock; |
| 1814 | } | 1814 | } |
| 1815 | 1815 | ||
| 1816 | if (!acpi_desc) { | 1816 | if (!acpi_desc) { |
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 167418e73445..65f50eccd49b 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c | |||
| @@ -390,6 +390,7 @@ static int pm_genpd_runtime_suspend(struct device *dev) | |||
| 390 | struct generic_pm_domain *genpd; | 390 | struct generic_pm_domain *genpd; |
| 391 | bool (*stop_ok)(struct device *__dev); | 391 | bool (*stop_ok)(struct device *__dev); |
| 392 | struct gpd_timing_data *td = &dev_gpd_data(dev)->td; | 392 | struct gpd_timing_data *td = &dev_gpd_data(dev)->td; |
| 393 | bool runtime_pm = pm_runtime_enabled(dev); | ||
| 393 | ktime_t time_start; | 394 | ktime_t time_start; |
| 394 | s64 elapsed_ns; | 395 | s64 elapsed_ns; |
| 395 | int ret; | 396 | int ret; |
| @@ -400,12 +401,19 @@ static int pm_genpd_runtime_suspend(struct device *dev) | |||
| 400 | if (IS_ERR(genpd)) | 401 | if (IS_ERR(genpd)) |
| 401 | return -EINVAL; | 402 | return -EINVAL; |
| 402 | 403 | ||
| 404 | /* | ||
| 405 | * A runtime PM centric subsystem/driver may re-use the runtime PM | ||
| 406 | * callbacks for other purposes than runtime PM. In those scenarios | ||
| 407 | * runtime PM is disabled. Under these circumstances, we shall skip | ||
| 408 | * validating/measuring the PM QoS latency. | ||
| 409 | */ | ||
| 403 | stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL; | 410 | stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL; |
| 404 | if (stop_ok && !stop_ok(dev)) | 411 | if (runtime_pm && stop_ok && !stop_ok(dev)) |
| 405 | return -EBUSY; | 412 | return -EBUSY; |
| 406 | 413 | ||
| 407 | /* Measure suspend latency. */ | 414 | /* Measure suspend latency. */ |
| 408 | time_start = ktime_get(); | 415 | if (runtime_pm) |
| 416 | time_start = ktime_get(); | ||
| 409 | 417 | ||
| 410 | ret = genpd_save_dev(genpd, dev); | 418 | ret = genpd_save_dev(genpd, dev); |
| 411 | if (ret) | 419 | if (ret) |
| @@ -418,13 +426,15 @@ static int pm_genpd_runtime_suspend(struct device *dev) | |||
| 418 | } | 426 | } |
| 419 | 427 | ||
| 420 | /* Update suspend latency value if the measured time exceeds it. */ | 428 | /* Update suspend latency value if the measured time exceeds it. */ |
| 421 | elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); | 429 | if (runtime_pm) { |
| 422 | if (elapsed_ns > td->suspend_latency_ns) { | 430 | elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); |
| 423 | td->suspend_latency_ns = elapsed_ns; | 431 | if (elapsed_ns > td->suspend_latency_ns) { |
| 424 | dev_dbg(dev, "suspend latency exceeded, %lld ns\n", | 432 | td->suspend_latency_ns = elapsed_ns; |
| 425 | elapsed_ns); | 433 | dev_dbg(dev, "suspend latency exceeded, %lld ns\n", |
| 426 | genpd->max_off_time_changed = true; | 434 | elapsed_ns); |
| 427 | td->constraint_changed = true; | 435 | genpd->max_off_time_changed = true; |
| 436 | td->constraint_changed = true; | ||
| 437 | } | ||
| 428 | } | 438 | } |
| 429 | 439 | ||
| 430 | /* | 440 | /* |
| @@ -453,6 +463,7 @@ static int pm_genpd_runtime_resume(struct device *dev) | |||
| 453 | { | 463 | { |
| 454 | struct generic_pm_domain *genpd; | 464 | struct generic_pm_domain *genpd; |
| 455 | struct gpd_timing_data *td = &dev_gpd_data(dev)->td; | 465 | struct gpd_timing_data *td = &dev_gpd_data(dev)->td; |
| 466 | bool runtime_pm = pm_runtime_enabled(dev); | ||
| 456 | ktime_t time_start; | 467 | ktime_t time_start; |
| 457 | s64 elapsed_ns; | 468 | s64 elapsed_ns; |
| 458 | int ret; | 469 | int ret; |
| @@ -479,14 +490,14 @@ static int pm_genpd_runtime_resume(struct device *dev) | |||
| 479 | 490 | ||
| 480 | out: | 491 | out: |
| 481 | /* Measure resume latency. */ | 492 | /* Measure resume latency. */ |
| 482 | if (timed) | 493 | if (timed && runtime_pm) |
| 483 | time_start = ktime_get(); | 494 | time_start = ktime_get(); |
| 484 | 495 | ||
| 485 | genpd_start_dev(genpd, dev); | 496 | genpd_start_dev(genpd, dev); |
| 486 | genpd_restore_dev(genpd, dev); | 497 | genpd_restore_dev(genpd, dev); |
| 487 | 498 | ||
| 488 | /* Update resume latency value if the measured time exceeds it. */ | 499 | /* Update resume latency value if the measured time exceeds it. */ |
| 489 | if (timed) { | 500 | if (timed && runtime_pm) { |
| 490 | elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); | 501 | elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); |
| 491 | if (elapsed_ns > td->resume_latency_ns) { | 502 | if (elapsed_ns > td->resume_latency_ns) { |
| 492 | td->resume_latency_ns = elapsed_ns; | 503 | td->resume_latency_ns = elapsed_ns; |
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index f9099940c272..41fb1a917b17 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c | |||
| @@ -950,6 +950,8 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req, | |||
| 950 | goto unmap; | 950 | goto unmap; |
| 951 | 951 | ||
| 952 | for (n = 0, i = 0; n < nseg; n++) { | 952 | for (n = 0, i = 0; n < nseg; n++) { |
| 953 | uint8_t first_sect, last_sect; | ||
| 954 | |||
| 953 | if ((n % SEGS_PER_INDIRECT_FRAME) == 0) { | 955 | if ((n % SEGS_PER_INDIRECT_FRAME) == 0) { |
| 954 | /* Map indirect segments */ | 956 | /* Map indirect segments */ |
| 955 | if (segments) | 957 | if (segments) |
| @@ -957,15 +959,18 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req, | |||
| 957 | segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page); | 959 | segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page); |
| 958 | } | 960 | } |
| 959 | i = n % SEGS_PER_INDIRECT_FRAME; | 961 | i = n % SEGS_PER_INDIRECT_FRAME; |
| 962 | |||
| 960 | pending_req->segments[n]->gref = segments[i].gref; | 963 | pending_req->segments[n]->gref = segments[i].gref; |
| 961 | seg[n].nsec = segments[i].last_sect - | 964 | |
| 962 | segments[i].first_sect + 1; | 965 | first_sect = READ_ONCE(segments[i].first_sect); |
| 963 | seg[n].offset = (segments[i].first_sect << 9); | 966 | last_sect = READ_ONCE(segments[i].last_sect); |
| 964 | if ((segments[i].last_sect >= (XEN_PAGE_SIZE >> 9)) || | 967 | if (last_sect >= (XEN_PAGE_SIZE >> 9) || last_sect < first_sect) { |
| 965 | (segments[i].last_sect < segments[i].first_sect)) { | ||
| 966 | rc = -EINVAL; | 968 | rc = -EINVAL; |
| 967 | goto unmap; | 969 | goto unmap; |
| 968 | } | 970 | } |
| 971 | |||
| 972 | seg[n].nsec = last_sect - first_sect + 1; | ||
| 973 | seg[n].offset = first_sect << 9; | ||
| 969 | preq->nr_sects += seg[n].nsec; | 974 | preq->nr_sects += seg[n].nsec; |
| 970 | } | 975 | } |
| 971 | 976 | ||
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h index 68e87a037b99..c929ae22764c 100644 --- a/drivers/block/xen-blkback/common.h +++ b/drivers/block/xen-blkback/common.h | |||
| @@ -408,8 +408,8 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst, | |||
| 408 | struct blkif_x86_32_request *src) | 408 | struct blkif_x86_32_request *src) |
| 409 | { | 409 | { |
| 410 | int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j; | 410 | int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j; |
| 411 | dst->operation = src->operation; | 411 | dst->operation = READ_ONCE(src->operation); |
| 412 | switch (src->operation) { | 412 | switch (dst->operation) { |
| 413 | case BLKIF_OP_READ: | 413 | case BLKIF_OP_READ: |
| 414 | case BLKIF_OP_WRITE: | 414 | case BLKIF_OP_WRITE: |
| 415 | case BLKIF_OP_WRITE_BARRIER: | 415 | case BLKIF_OP_WRITE_BARRIER: |
| @@ -456,8 +456,8 @@ static inline void blkif_get_x86_64_req(struct blkif_request *dst, | |||
| 456 | struct blkif_x86_64_request *src) | 456 | struct blkif_x86_64_request *src) |
| 457 | { | 457 | { |
| 458 | int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j; | 458 | int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j; |
| 459 | dst->operation = src->operation; | 459 | dst->operation = READ_ONCE(src->operation); |
| 460 | switch (src->operation) { | 460 | switch (dst->operation) { |
| 461 | case BLKIF_OP_READ: | 461 | case BLKIF_OP_READ: |
| 462 | case BLKIF_OP_WRITE: | 462 | case BLKIF_OP_WRITE: |
| 463 | case BLKIF_OP_WRITE_BARRIER: | 463 | case BLKIF_OP_WRITE_BARRIER: |
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index 235a1ba73d92..b1f8a73e5a94 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm | |||
| @@ -226,7 +226,7 @@ config ARM_TEGRA20_CPUFREQ | |||
| 226 | 226 | ||
| 227 | config ARM_TEGRA124_CPUFREQ | 227 | config ARM_TEGRA124_CPUFREQ |
| 228 | tristate "Tegra124 CPUFreq support" | 228 | tristate "Tegra124 CPUFreq support" |
| 229 | depends on ARCH_TEGRA && CPUFREQ_DT | 229 | depends on ARCH_TEGRA && CPUFREQ_DT && REGULATOR |
| 230 | default y | 230 | default y |
| 231 | help | 231 | help |
| 232 | This adds the CPUFreq driver support for Tegra124 SOCs. | 232 | This adds the CPUFreq driver support for Tegra124 SOCs. |
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 4d07cbd2b23c..98fb8821382d 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c | |||
| @@ -1123,7 +1123,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) | |||
| 1123 | limits->max_sysfs_pct); | 1123 | limits->max_sysfs_pct); |
| 1124 | limits->max_perf_pct = max(limits->min_policy_pct, | 1124 | limits->max_perf_pct = max(limits->min_policy_pct, |
| 1125 | limits->max_perf_pct); | 1125 | limits->max_perf_pct); |
| 1126 | limits->max_perf = round_up(limits->max_perf, 8); | 1126 | limits->max_perf = round_up(limits->max_perf, FRAC_BITS); |
| 1127 | 1127 | ||
| 1128 | /* Make sure min_perf_pct <= max_perf_pct */ | 1128 | /* Make sure min_perf_pct <= max_perf_pct */ |
| 1129 | limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct); | 1129 | limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct); |
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index 7f039de143f0..370c661c7d7b 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c | |||
| @@ -156,7 +156,7 @@ | |||
| 156 | #define AT_XDMAC_CC_WRIP (0x1 << 23) /* Write in Progress (read only) */ | 156 | #define AT_XDMAC_CC_WRIP (0x1 << 23) /* Write in Progress (read only) */ |
| 157 | #define AT_XDMAC_CC_WRIP_DONE (0x0 << 23) | 157 | #define AT_XDMAC_CC_WRIP_DONE (0x0 << 23) |
| 158 | #define AT_XDMAC_CC_WRIP_IN_PROGRESS (0x1 << 23) | 158 | #define AT_XDMAC_CC_WRIP_IN_PROGRESS (0x1 << 23) |
| 159 | #define AT_XDMAC_CC_PERID(i) (0x7f & (h) << 24) /* Channel Peripheral Identifier */ | 159 | #define AT_XDMAC_CC_PERID(i) (0x7f & (i) << 24) /* Channel Peripheral Identifier */ |
| 160 | #define AT_XDMAC_CDS_MSP 0x2C /* Channel Data Stride Memory Set Pattern */ | 160 | #define AT_XDMAC_CDS_MSP 0x2C /* Channel Data Stride Memory Set Pattern */ |
| 161 | #define AT_XDMAC_CSUS 0x30 /* Channel Source Microblock Stride */ | 161 | #define AT_XDMAC_CSUS 0x30 /* Channel Source Microblock Stride */ |
| 162 | #define AT_XDMAC_CDUS 0x34 /* Channel Destination Microblock Stride */ | 162 | #define AT_XDMAC_CDUS 0x34 /* Channel Destination Microblock Stride */ |
| @@ -965,7 +965,9 @@ at_xdmac_prep_interleaved(struct dma_chan *chan, | |||
| 965 | NULL, | 965 | NULL, |
| 966 | src_addr, dst_addr, | 966 | src_addr, dst_addr, |
| 967 | xt, xt->sgl); | 967 | xt, xt->sgl); |
| 968 | for (i = 0; i < xt->numf; i++) | 968 | |
| 969 | /* Length of the block is (BLEN+1) microblocks. */ | ||
| 970 | for (i = 0; i < xt->numf - 1; i++) | ||
| 969 | at_xdmac_increment_block_count(chan, first); | 971 | at_xdmac_increment_block_count(chan, first); |
| 970 | 972 | ||
| 971 | dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n", | 973 | dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n", |
| @@ -1086,6 +1088,7 @@ at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |||
| 1086 | /* Check remaining length and change data width if needed. */ | 1088 | /* Check remaining length and change data width if needed. */ |
| 1087 | dwidth = at_xdmac_align_width(chan, | 1089 | dwidth = at_xdmac_align_width(chan, |
| 1088 | src_addr | dst_addr | xfer_size); | 1090 | src_addr | dst_addr | xfer_size); |
| 1091 | chan_cc &= ~AT_XDMAC_CC_DWIDTH_MASK; | ||
| 1089 | chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth); | 1092 | chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth); |
| 1090 | 1093 | ||
| 1091 | ublen = xfer_size >> dwidth; | 1094 | ublen = xfer_size >> dwidth; |
| @@ -1333,7 +1336,7 @@ at_xdmac_prep_dma_memset_sg(struct dma_chan *chan, struct scatterlist *sgl, | |||
| 1333 | * since we don't care about the stride anymore. | 1336 | * since we don't care about the stride anymore. |
| 1334 | */ | 1337 | */ |
| 1335 | if ((i == (sg_len - 1)) && | 1338 | if ((i == (sg_len - 1)) && |
| 1336 | sg_dma_len(ppsg) == sg_dma_len(psg)) { | 1339 | sg_dma_len(psg) == sg_dma_len(sg)) { |
| 1337 | dev_dbg(chan2dev(chan), | 1340 | dev_dbg(chan2dev(chan), |
| 1338 | "%s: desc 0x%p can be merged with desc 0x%p\n", | 1341 | "%s: desc 0x%p can be merged with desc 0x%p\n", |
| 1339 | __func__, desc, pdesc); | 1342 | __func__, desc, pdesc); |
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c index c92d6a70ccf3..996c4b00d323 100644 --- a/drivers/dma/bcm2835-dma.c +++ b/drivers/dma/bcm2835-dma.c | |||
| @@ -31,6 +31,7 @@ | |||
| 31 | */ | 31 | */ |
| 32 | #include <linux/dmaengine.h> | 32 | #include <linux/dmaengine.h> |
| 33 | #include <linux/dma-mapping.h> | 33 | #include <linux/dma-mapping.h> |
| 34 | #include <linux/dmapool.h> | ||
| 34 | #include <linux/err.h> | 35 | #include <linux/err.h> |
| 35 | #include <linux/init.h> | 36 | #include <linux/init.h> |
| 36 | #include <linux/interrupt.h> | 37 | #include <linux/interrupt.h> |
| @@ -62,6 +63,11 @@ struct bcm2835_dma_cb { | |||
| 62 | uint32_t pad[2]; | 63 | uint32_t pad[2]; |
| 63 | }; | 64 | }; |
| 64 | 65 | ||
| 66 | struct bcm2835_cb_entry { | ||
| 67 | struct bcm2835_dma_cb *cb; | ||
| 68 | dma_addr_t paddr; | ||
| 69 | }; | ||
| 70 | |||
| 65 | struct bcm2835_chan { | 71 | struct bcm2835_chan { |
| 66 | struct virt_dma_chan vc; | 72 | struct virt_dma_chan vc; |
| 67 | struct list_head node; | 73 | struct list_head node; |
| @@ -72,18 +78,18 @@ struct bcm2835_chan { | |||
| 72 | 78 | ||
| 73 | int ch; | 79 | int ch; |
| 74 | struct bcm2835_desc *desc; | 80 | struct bcm2835_desc *desc; |
| 81 | struct dma_pool *cb_pool; | ||
| 75 | 82 | ||
| 76 | void __iomem *chan_base; | 83 | void __iomem *chan_base; |
| 77 | int irq_number; | 84 | int irq_number; |
| 78 | }; | 85 | }; |
| 79 | 86 | ||
| 80 | struct bcm2835_desc { | 87 | struct bcm2835_desc { |
| 88 | struct bcm2835_chan *c; | ||
| 81 | struct virt_dma_desc vd; | 89 | struct virt_dma_desc vd; |
| 82 | enum dma_transfer_direction dir; | 90 | enum dma_transfer_direction dir; |
| 83 | 91 | ||
| 84 | unsigned int control_block_size; | 92 | struct bcm2835_cb_entry *cb_list; |
| 85 | struct bcm2835_dma_cb *control_block_base; | ||
| 86 | dma_addr_t control_block_base_phys; | ||
| 87 | 93 | ||
| 88 | unsigned int frames; | 94 | unsigned int frames; |
| 89 | size_t size; | 95 | size_t size; |
| @@ -143,10 +149,13 @@ static inline struct bcm2835_desc *to_bcm2835_dma_desc( | |||
| 143 | static void bcm2835_dma_desc_free(struct virt_dma_desc *vd) | 149 | static void bcm2835_dma_desc_free(struct virt_dma_desc *vd) |
| 144 | { | 150 | { |
| 145 | struct bcm2835_desc *desc = container_of(vd, struct bcm2835_desc, vd); | 151 | struct bcm2835_desc *desc = container_of(vd, struct bcm2835_desc, vd); |
| 146 | dma_free_coherent(desc->vd.tx.chan->device->dev, | 152 | int i; |
| 147 | desc->control_block_size, | 153 | |
| 148 | desc->control_block_base, | 154 | for (i = 0; i < desc->frames; i++) |
| 149 | desc->control_block_base_phys); | 155 | dma_pool_free(desc->c->cb_pool, desc->cb_list[i].cb, |
| 156 | desc->cb_list[i].paddr); | ||
| 157 | |||
| 158 | kfree(desc->cb_list); | ||
| 150 | kfree(desc); | 159 | kfree(desc); |
| 151 | } | 160 | } |
| 152 | 161 | ||
| @@ -199,7 +208,7 @@ static void bcm2835_dma_start_desc(struct bcm2835_chan *c) | |||
| 199 | 208 | ||
| 200 | c->desc = d = to_bcm2835_dma_desc(&vd->tx); | 209 | c->desc = d = to_bcm2835_dma_desc(&vd->tx); |
| 201 | 210 | ||
| 202 | writel(d->control_block_base_phys, c->chan_base + BCM2835_DMA_ADDR); | 211 | writel(d->cb_list[0].paddr, c->chan_base + BCM2835_DMA_ADDR); |
| 203 | writel(BCM2835_DMA_ACTIVE, c->chan_base + BCM2835_DMA_CS); | 212 | writel(BCM2835_DMA_ACTIVE, c->chan_base + BCM2835_DMA_CS); |
| 204 | } | 213 | } |
| 205 | 214 | ||
| @@ -232,9 +241,16 @@ static irqreturn_t bcm2835_dma_callback(int irq, void *data) | |||
| 232 | static int bcm2835_dma_alloc_chan_resources(struct dma_chan *chan) | 241 | static int bcm2835_dma_alloc_chan_resources(struct dma_chan *chan) |
| 233 | { | 242 | { |
| 234 | struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); | 243 | struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); |
| 244 | struct device *dev = c->vc.chan.device->dev; | ||
| 245 | |||
| 246 | dev_dbg(dev, "Allocating DMA channel %d\n", c->ch); | ||
| 235 | 247 | ||
| 236 | dev_dbg(c->vc.chan.device->dev, | 248 | c->cb_pool = dma_pool_create(dev_name(dev), dev, |
| 237 | "Allocating DMA channel %d\n", c->ch); | 249 | sizeof(struct bcm2835_dma_cb), 0, 0); |
| 250 | if (!c->cb_pool) { | ||
| 251 | dev_err(dev, "unable to allocate descriptor pool\n"); | ||
| 252 | return -ENOMEM; | ||
| 253 | } | ||
| 238 | 254 | ||
| 239 | return request_irq(c->irq_number, | 255 | return request_irq(c->irq_number, |
| 240 | bcm2835_dma_callback, 0, "DMA IRQ", c); | 256 | bcm2835_dma_callback, 0, "DMA IRQ", c); |
| @@ -246,6 +262,7 @@ static void bcm2835_dma_free_chan_resources(struct dma_chan *chan) | |||
| 246 | 262 | ||
| 247 | vchan_free_chan_resources(&c->vc); | 263 | vchan_free_chan_resources(&c->vc); |
| 248 | free_irq(c->irq_number, c); | 264 | free_irq(c->irq_number, c); |
| 265 | dma_pool_destroy(c->cb_pool); | ||
| 249 | 266 | ||
| 250 | dev_dbg(c->vc.chan.device->dev, "Freeing DMA channel %u\n", c->ch); | 267 | dev_dbg(c->vc.chan.device->dev, "Freeing DMA channel %u\n", c->ch); |
| 251 | } | 268 | } |
| @@ -261,8 +278,7 @@ static size_t bcm2835_dma_desc_size_pos(struct bcm2835_desc *d, dma_addr_t addr) | |||
| 261 | size_t size; | 278 | size_t size; |
| 262 | 279 | ||
| 263 | for (size = i = 0; i < d->frames; i++) { | 280 | for (size = i = 0; i < d->frames; i++) { |
| 264 | struct bcm2835_dma_cb *control_block = | 281 | struct bcm2835_dma_cb *control_block = d->cb_list[i].cb; |
| 265 | &d->control_block_base[i]; | ||
| 266 | size_t this_size = control_block->length; | 282 | size_t this_size = control_block->length; |
| 267 | dma_addr_t dma; | 283 | dma_addr_t dma; |
| 268 | 284 | ||
| @@ -343,6 +359,7 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic( | |||
| 343 | dma_addr_t dev_addr; | 359 | dma_addr_t dev_addr; |
| 344 | unsigned int es, sync_type; | 360 | unsigned int es, sync_type; |
| 345 | unsigned int frame; | 361 | unsigned int frame; |
| 362 | int i; | ||
| 346 | 363 | ||
| 347 | /* Grab configuration */ | 364 | /* Grab configuration */ |
| 348 | if (!is_slave_direction(direction)) { | 365 | if (!is_slave_direction(direction)) { |
| @@ -374,27 +391,31 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic( | |||
| 374 | if (!d) | 391 | if (!d) |
| 375 | return NULL; | 392 | return NULL; |
| 376 | 393 | ||
| 394 | d->c = c; | ||
| 377 | d->dir = direction; | 395 | d->dir = direction; |
| 378 | d->frames = buf_len / period_len; | 396 | d->frames = buf_len / period_len; |
| 379 | 397 | ||
| 380 | /* Allocate memory for control blocks */ | 398 | d->cb_list = kcalloc(d->frames, sizeof(*d->cb_list), GFP_KERNEL); |
| 381 | d->control_block_size = d->frames * sizeof(struct bcm2835_dma_cb); | 399 | if (!d->cb_list) { |
| 382 | d->control_block_base = dma_zalloc_coherent(chan->device->dev, | ||
| 383 | d->control_block_size, &d->control_block_base_phys, | ||
| 384 | GFP_NOWAIT); | ||
| 385 | |||
| 386 | if (!d->control_block_base) { | ||
| 387 | kfree(d); | 400 | kfree(d); |
| 388 | return NULL; | 401 | return NULL; |
| 389 | } | 402 | } |
| 403 | /* Allocate memory for control blocks */ | ||
| 404 | for (i = 0; i < d->frames; i++) { | ||
| 405 | struct bcm2835_cb_entry *cb_entry = &d->cb_list[i]; | ||
| 406 | |||
| 407 | cb_entry->cb = dma_pool_zalloc(c->cb_pool, GFP_ATOMIC, | ||
| 408 | &cb_entry->paddr); | ||
| 409 | if (!cb_entry->cb) | ||
| 410 | goto error_cb; | ||
| 411 | } | ||
| 390 | 412 | ||
| 391 | /* | 413 | /* |
| 392 | * Iterate over all frames, create a control block | 414 | * Iterate over all frames, create a control block |
| 393 | * for each frame and link them together. | 415 | * for each frame and link them together. |
| 394 | */ | 416 | */ |
| 395 | for (frame = 0; frame < d->frames; frame++) { | 417 | for (frame = 0; frame < d->frames; frame++) { |
| 396 | struct bcm2835_dma_cb *control_block = | 418 | struct bcm2835_dma_cb *control_block = d->cb_list[frame].cb; |
| 397 | &d->control_block_base[frame]; | ||
| 398 | 419 | ||
| 399 | /* Setup adresses */ | 420 | /* Setup adresses */ |
| 400 | if (d->dir == DMA_DEV_TO_MEM) { | 421 | if (d->dir == DMA_DEV_TO_MEM) { |
| @@ -428,12 +449,21 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic( | |||
| 428 | * This DMA engine driver currently only supports cyclic DMA. | 449 | * This DMA engine driver currently only supports cyclic DMA. |
| 429 | * Therefore, wrap around at number of frames. | 450 | * Therefore, wrap around at number of frames. |
| 430 | */ | 451 | */ |
| 431 | control_block->next = d->control_block_base_phys + | 452 | control_block->next = d->cb_list[((frame + 1) % d->frames)].paddr; |
| 432 | sizeof(struct bcm2835_dma_cb) | ||
| 433 | * ((frame + 1) % d->frames); | ||
| 434 | } | 453 | } |
| 435 | 454 | ||
| 436 | return vchan_tx_prep(&c->vc, &d->vd, flags); | 455 | return vchan_tx_prep(&c->vc, &d->vd, flags); |
| 456 | error_cb: | ||
| 457 | i--; | ||
| 458 | for (; i >= 0; i--) { | ||
| 459 | struct bcm2835_cb_entry *cb_entry = &d->cb_list[i]; | ||
| 460 | |||
| 461 | dma_pool_free(c->cb_pool, cb_entry->cb, cb_entry->paddr); | ||
| 462 | } | ||
| 463 | |||
| 464 | kfree(d->cb_list); | ||
| 465 | kfree(d); | ||
| 466 | return NULL; | ||
| 437 | } | 467 | } |
| 438 | 468 | ||
| 439 | static int bcm2835_dma_slave_config(struct dma_chan *chan, | 469 | static int bcm2835_dma_slave_config(struct dma_chan *chan, |
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index 0675e268d577..16fe773fb846 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c | |||
| @@ -1752,16 +1752,14 @@ static enum dma_status edma_tx_status(struct dma_chan *chan, | |||
| 1752 | return ret; | 1752 | return ret; |
| 1753 | } | 1753 | } |
| 1754 | 1754 | ||
| 1755 | static bool edma_is_memcpy_channel(int ch_num, u16 *memcpy_channels) | 1755 | static bool edma_is_memcpy_channel(int ch_num, s32 *memcpy_channels) |
| 1756 | { | 1756 | { |
| 1757 | s16 *memcpy_ch = memcpy_channels; | ||
| 1758 | |||
| 1759 | if (!memcpy_channels) | 1757 | if (!memcpy_channels) |
| 1760 | return false; | 1758 | return false; |
| 1761 | while (*memcpy_ch != -1) { | 1759 | while (*memcpy_channels != -1) { |
| 1762 | if (*memcpy_ch == ch_num) | 1760 | if (*memcpy_channels == ch_num) |
| 1763 | return true; | 1761 | return true; |
| 1764 | memcpy_ch++; | 1762 | memcpy_channels++; |
| 1765 | } | 1763 | } |
| 1766 | return false; | 1764 | return false; |
| 1767 | } | 1765 | } |
| @@ -1775,7 +1773,7 @@ static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode) | |||
| 1775 | { | 1773 | { |
| 1776 | struct dma_device *s_ddev = &ecc->dma_slave; | 1774 | struct dma_device *s_ddev = &ecc->dma_slave; |
| 1777 | struct dma_device *m_ddev = NULL; | 1775 | struct dma_device *m_ddev = NULL; |
| 1778 | s16 *memcpy_channels = ecc->info->memcpy_channels; | 1776 | s32 *memcpy_channels = ecc->info->memcpy_channels; |
| 1779 | int i, j; | 1777 | int i, j; |
| 1780 | 1778 | ||
| 1781 | dma_cap_zero(s_ddev->cap_mask); | 1779 | dma_cap_zero(s_ddev->cap_mask); |
| @@ -1996,16 +1994,16 @@ static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev, | |||
| 1996 | prop = of_find_property(dev->of_node, "ti,edma-memcpy-channels", &sz); | 1994 | prop = of_find_property(dev->of_node, "ti,edma-memcpy-channels", &sz); |
| 1997 | if (prop) { | 1995 | if (prop) { |
| 1998 | const char pname[] = "ti,edma-memcpy-channels"; | 1996 | const char pname[] = "ti,edma-memcpy-channels"; |
| 1999 | size_t nelm = sz / sizeof(s16); | 1997 | size_t nelm = sz / sizeof(s32); |
| 2000 | s16 *memcpy_ch; | 1998 | s32 *memcpy_ch; |
| 2001 | 1999 | ||
| 2002 | memcpy_ch = devm_kcalloc(dev, nelm + 1, sizeof(s16), | 2000 | memcpy_ch = devm_kcalloc(dev, nelm + 1, sizeof(s32), |
| 2003 | GFP_KERNEL); | 2001 | GFP_KERNEL); |
| 2004 | if (!memcpy_ch) | 2002 | if (!memcpy_ch) |
| 2005 | return ERR_PTR(-ENOMEM); | 2003 | return ERR_PTR(-ENOMEM); |
| 2006 | 2004 | ||
| 2007 | ret = of_property_read_u16_array(dev->of_node, pname, | 2005 | ret = of_property_read_u32_array(dev->of_node, pname, |
| 2008 | (u16 *)memcpy_ch, nelm); | 2006 | (u32 *)memcpy_ch, nelm); |
| 2009 | if (ret) | 2007 | if (ret) |
| 2010 | return ERR_PTR(ret); | 2008 | return ERR_PTR(ret); |
| 2011 | 2009 | ||
| @@ -2017,31 +2015,50 @@ static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev, | |||
| 2017 | &sz); | 2015 | &sz); |
| 2018 | if (prop) { | 2016 | if (prop) { |
| 2019 | const char pname[] = "ti,edma-reserved-slot-ranges"; | 2017 | const char pname[] = "ti,edma-reserved-slot-ranges"; |
| 2018 | u32 (*tmp)[2]; | ||
| 2020 | s16 (*rsv_slots)[2]; | 2019 | s16 (*rsv_slots)[2]; |
| 2021 | size_t nelm = sz / sizeof(*rsv_slots); | 2020 | size_t nelm = sz / sizeof(*tmp); |
| 2022 | struct edma_rsv_info *rsv_info; | 2021 | struct edma_rsv_info *rsv_info; |
| 2022 | int i; | ||
| 2023 | 2023 | ||
| 2024 | if (!nelm) | 2024 | if (!nelm) |
| 2025 | return info; | 2025 | return info; |
| 2026 | 2026 | ||
| 2027 | tmp = kcalloc(nelm, sizeof(*tmp), GFP_KERNEL); | ||
| 2028 | if (!tmp) | ||
| 2029 | return ERR_PTR(-ENOMEM); | ||
| 2030 | |||
| 2027 | rsv_info = devm_kzalloc(dev, sizeof(*rsv_info), GFP_KERNEL); | 2031 | rsv_info = devm_kzalloc(dev, sizeof(*rsv_info), GFP_KERNEL); |
| 2028 | if (!rsv_info) | 2032 | if (!rsv_info) { |
| 2033 | kfree(tmp); | ||
| 2029 | return ERR_PTR(-ENOMEM); | 2034 | return ERR_PTR(-ENOMEM); |
| 2035 | } | ||
| 2030 | 2036 | ||
| 2031 | rsv_slots = devm_kcalloc(dev, nelm + 1, sizeof(*rsv_slots), | 2037 | rsv_slots = devm_kcalloc(dev, nelm + 1, sizeof(*rsv_slots), |
| 2032 | GFP_KERNEL); | 2038 | GFP_KERNEL); |
| 2033 | if (!rsv_slots) | 2039 | if (!rsv_slots) { |
| 2040 | kfree(tmp); | ||
| 2034 | return ERR_PTR(-ENOMEM); | 2041 | return ERR_PTR(-ENOMEM); |
| 2042 | } | ||
| 2035 | 2043 | ||
| 2036 | ret = of_property_read_u16_array(dev->of_node, pname, | 2044 | ret = of_property_read_u32_array(dev->of_node, pname, |
| 2037 | (u16 *)rsv_slots, nelm * 2); | 2045 | (u32 *)tmp, nelm * 2); |
| 2038 | if (ret) | 2046 | if (ret) { |
| 2047 | kfree(tmp); | ||
| 2039 | return ERR_PTR(ret); | 2048 | return ERR_PTR(ret); |
| 2049 | } | ||
| 2040 | 2050 | ||
| 2051 | for (i = 0; i < nelm; i++) { | ||
| 2052 | rsv_slots[i][0] = tmp[i][0]; | ||
| 2053 | rsv_slots[i][1] = tmp[i][1]; | ||
| 2054 | } | ||
| 2041 | rsv_slots[nelm][0] = -1; | 2055 | rsv_slots[nelm][0] = -1; |
| 2042 | rsv_slots[nelm][1] = -1; | 2056 | rsv_slots[nelm][1] = -1; |
| 2057 | |||
| 2043 | info->rsv = rsv_info; | 2058 | info->rsv = rsv_info; |
| 2044 | info->rsv->rsv_slots = (const s16 (*)[2])rsv_slots; | 2059 | info->rsv->rsv_slots = (const s16 (*)[2])rsv_slots; |
| 2060 | |||
| 2061 | kfree(tmp); | ||
| 2045 | } | 2062 | } |
| 2046 | 2063 | ||
| 2047 | return info; | 2064 | return info; |
diff --git a/drivers/dma/mic_x100_dma.c b/drivers/dma/mic_x100_dma.c index 068e920ecb68..cddfa8dbf4bd 100644 --- a/drivers/dma/mic_x100_dma.c +++ b/drivers/dma/mic_x100_dma.c | |||
| @@ -317,6 +317,7 @@ mic_dma_prep_memcpy_lock(struct dma_chan *ch, dma_addr_t dma_dest, | |||
| 317 | struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch); | 317 | struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch); |
| 318 | struct device *dev = mic_dma_ch_to_device(mic_ch); | 318 | struct device *dev = mic_dma_ch_to_device(mic_ch); |
| 319 | int result; | 319 | int result; |
| 320 | struct dma_async_tx_descriptor *tx = NULL; | ||
| 320 | 321 | ||
| 321 | if (!len && !flags) | 322 | if (!len && !flags) |
| 322 | return NULL; | 323 | return NULL; |
| @@ -324,10 +325,13 @@ mic_dma_prep_memcpy_lock(struct dma_chan *ch, dma_addr_t dma_dest, | |||
| 324 | spin_lock(&mic_ch->prep_lock); | 325 | spin_lock(&mic_ch->prep_lock); |
| 325 | result = mic_dma_do_dma(mic_ch, flags, dma_src, dma_dest, len); | 326 | result = mic_dma_do_dma(mic_ch, flags, dma_src, dma_dest, len); |
| 326 | if (result >= 0) | 327 | if (result >= 0) |
| 327 | return allocate_tx(mic_ch); | 328 | tx = allocate_tx(mic_ch); |
| 328 | dev_err(dev, "Error enqueueing dma, error=%d\n", result); | 329 | |
| 330 | if (!tx) | ||
| 331 | dev_err(dev, "Error enqueueing dma, error=%d\n", result); | ||
| 332 | |||
| 329 | spin_unlock(&mic_ch->prep_lock); | 333 | spin_unlock(&mic_ch->prep_lock); |
| 330 | return NULL; | 334 | return tx; |
| 331 | } | 335 | } |
| 332 | 336 | ||
| 333 | static struct dma_async_tx_descriptor * | 337 | static struct dma_async_tx_descriptor * |
| @@ -335,13 +339,14 @@ mic_dma_prep_interrupt_lock(struct dma_chan *ch, unsigned long flags) | |||
| 335 | { | 339 | { |
| 336 | struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch); | 340 | struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch); |
| 337 | int ret; | 341 | int ret; |
| 342 | struct dma_async_tx_descriptor *tx = NULL; | ||
| 338 | 343 | ||
| 339 | spin_lock(&mic_ch->prep_lock); | 344 | spin_lock(&mic_ch->prep_lock); |
| 340 | ret = mic_dma_do_dma(mic_ch, flags, 0, 0, 0); | 345 | ret = mic_dma_do_dma(mic_ch, flags, 0, 0, 0); |
| 341 | if (!ret) | 346 | if (!ret) |
| 342 | return allocate_tx(mic_ch); | 347 | tx = allocate_tx(mic_ch); |
| 343 | spin_unlock(&mic_ch->prep_lock); | 348 | spin_unlock(&mic_ch->prep_lock); |
| 344 | return NULL; | 349 | return tx; |
| 345 | } | 350 | } |
| 346 | 351 | ||
| 347 | /* Return the status of the transaction */ | 352 | /* Return the status of the transaction */ |
diff --git a/drivers/gpio/gpio-ath79.c b/drivers/gpio/gpio-ath79.c index e5827a56ff3b..5eaea8b812cf 100644 --- a/drivers/gpio/gpio-ath79.c +++ b/drivers/gpio/gpio-ath79.c | |||
| @@ -113,7 +113,7 @@ static int ar934x_gpio_direction_output(struct gpio_chip *chip, unsigned offset, | |||
| 113 | __raw_writel(BIT(offset), ctrl->base + AR71XX_GPIO_REG_CLEAR); | 113 | __raw_writel(BIT(offset), ctrl->base + AR71XX_GPIO_REG_CLEAR); |
| 114 | 114 | ||
| 115 | __raw_writel( | 115 | __raw_writel( |
| 116 | __raw_readl(ctrl->base + AR71XX_GPIO_REG_OE) & BIT(offset), | 116 | __raw_readl(ctrl->base + AR71XX_GPIO_REG_OE) & ~BIT(offset), |
| 117 | ctrl->base + AR71XX_GPIO_REG_OE); | 117 | ctrl->base + AR71XX_GPIO_REG_OE); |
| 118 | 118 | ||
| 119 | spin_unlock_irqrestore(&ctrl->lock, flags); | 119 | spin_unlock_irqrestore(&ctrl->lock, flags); |
diff --git a/drivers/gpio/gpio-generic.c b/drivers/gpio/gpio-generic.c index bd5193c67a9c..88ae70ddb127 100644 --- a/drivers/gpio/gpio-generic.c +++ b/drivers/gpio/gpio-generic.c | |||
| @@ -141,9 +141,9 @@ static int bgpio_get_set(struct gpio_chip *gc, unsigned int gpio) | |||
| 141 | unsigned long pinmask = bgc->pin2mask(bgc, gpio); | 141 | unsigned long pinmask = bgc->pin2mask(bgc, gpio); |
| 142 | 142 | ||
| 143 | if (bgc->dir & pinmask) | 143 | if (bgc->dir & pinmask) |
| 144 | return bgc->read_reg(bgc->reg_set) & pinmask; | 144 | return !!(bgc->read_reg(bgc->reg_set) & pinmask); |
| 145 | else | 145 | else |
| 146 | return bgc->read_reg(bgc->reg_dat) & pinmask; | 146 | return !!(bgc->read_reg(bgc->reg_dat) & pinmask); |
| 147 | } | 147 | } |
| 148 | 148 | ||
| 149 | static int bgpio_get(struct gpio_chip *gc, unsigned int gpio) | 149 | static int bgpio_get(struct gpio_chip *gc, unsigned int gpio) |
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 2a91f3287e3b..4e4c3083ae56 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c | |||
| @@ -1279,7 +1279,13 @@ static int _gpiod_get_raw_value(const struct gpio_desc *desc) | |||
| 1279 | chip = desc->chip; | 1279 | chip = desc->chip; |
| 1280 | offset = gpio_chip_hwgpio(desc); | 1280 | offset = gpio_chip_hwgpio(desc); |
| 1281 | value = chip->get ? chip->get(chip, offset) : -EIO; | 1281 | value = chip->get ? chip->get(chip, offset) : -EIO; |
| 1282 | value = value < 0 ? value : !!value; | 1282 | /* |
| 1283 | * FIXME: fix all drivers to clamp to [0,1] or return negative, | ||
| 1284 | * then change this to: | ||
| 1285 | * value = value < 0 ? value : !!value; | ||
| 1286 | * so we can properly propagate error codes. | ||
| 1287 | */ | ||
| 1288 | value = !!value; | ||
| 1283 | trace_gpio_value(desc_to_gpio(desc), 1, value); | 1289 | trace_gpio_value(desc_to_gpio(desc), 1, value); |
| 1284 | return value; | 1290 | return value; |
| 1285 | } | 1291 | } |
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index a18164f2f6d2..f8b5fcfa91a2 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c | |||
| @@ -229,7 +229,8 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect | |||
| 229 | mode_flags |= DRM_MODE_FLAG_3D_MASK; | 229 | mode_flags |= DRM_MODE_FLAG_3D_MASK; |
| 230 | 230 | ||
| 231 | list_for_each_entry(mode, &connector->modes, head) { | 231 | list_for_each_entry(mode, &connector->modes, head) { |
| 232 | mode->status = drm_mode_validate_basic(mode); | 232 | if (mode->status == MODE_OK) |
| 233 | mode->status = drm_mode_validate_basic(mode); | ||
| 233 | 234 | ||
| 234 | if (mode->status == MODE_OK) | 235 | if (mode->status == MODE_OK) |
| 235 | mode->status = drm_mode_validate_size(mode, maxX, maxY); | 236 | mode->status = drm_mode_validate_size(mode, maxX, maxY); |
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 8c688a5f1589..02ceb7a4b481 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c | |||
| @@ -141,8 +141,6 @@ static void i915_gem_context_clean(struct intel_context *ctx) | |||
| 141 | if (!ppgtt) | 141 | if (!ppgtt) |
| 142 | return; | 142 | return; |
| 143 | 143 | ||
| 144 | WARN_ON(!list_empty(&ppgtt->base.active_list)); | ||
| 145 | |||
| 146 | list_for_each_entry_safe(vma, next, &ppgtt->base.inactive_list, | 144 | list_for_each_entry_safe(vma, next, &ppgtt->base.inactive_list, |
| 147 | mm_list) { | 145 | mm_list) { |
| 148 | if (WARN_ON(__i915_vma_unbind_no_wait(vma))) | 146 | if (WARN_ON(__i915_vma_unbind_no_wait(vma))) |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 22e86d2e408d..62211abe4922 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
| @@ -6309,9 +6309,11 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc) | |||
| 6309 | if (to_intel_plane_state(crtc->primary->state)->visible) { | 6309 | if (to_intel_plane_state(crtc->primary->state)->visible) { |
| 6310 | intel_crtc_wait_for_pending_flips(crtc); | 6310 | intel_crtc_wait_for_pending_flips(crtc); |
| 6311 | intel_pre_disable_primary(crtc); | 6311 | intel_pre_disable_primary(crtc); |
| 6312 | |||
| 6313 | intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary)); | ||
| 6314 | to_intel_plane_state(crtc->primary->state)->visible = false; | ||
| 6312 | } | 6315 | } |
| 6313 | 6316 | ||
| 6314 | intel_crtc_disable_planes(crtc, crtc->state->plane_mask); | ||
| 6315 | dev_priv->display.crtc_disable(crtc); | 6317 | dev_priv->display.crtc_disable(crtc); |
| 6316 | intel_crtc->active = false; | 6318 | intel_crtc->active = false; |
| 6317 | intel_update_watermarks(crtc); | 6319 | intel_update_watermarks(crtc); |
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 071a76b9ac52..f091ad12d694 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
| @@ -4782,8 +4782,7 @@ static void gen9_enable_rc6(struct drm_device *dev) | |||
| 4782 | /* 2b: Program RC6 thresholds.*/ | 4782 | /* 2b: Program RC6 thresholds.*/ |
| 4783 | 4783 | ||
| 4784 | /* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */ | 4784 | /* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */ |
| 4785 | if (IS_SKYLAKE(dev) && !((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && | 4785 | if (IS_SKYLAKE(dev)) |
| 4786 | (INTEL_REVID(dev) <= SKL_REVID_E0))) | ||
| 4787 | I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16); | 4786 | I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16); |
| 4788 | else | 4787 | else |
| 4789 | I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16); | 4788 | I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16); |
| @@ -4825,7 +4824,7 @@ static void gen9_enable_rc6(struct drm_device *dev) | |||
| 4825 | * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6. | 4824 | * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6. |
| 4826 | */ | 4825 | */ |
| 4827 | if ((IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) || | 4826 | if ((IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) || |
| 4828 | ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && (INTEL_REVID(dev) <= SKL_REVID_E0))) | 4827 | ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && (INTEL_REVID(dev) <= SKL_REVID_F0))) |
| 4829 | I915_WRITE(GEN9_PG_ENABLE, 0); | 4828 | I915_WRITE(GEN9_PG_ENABLE, 0); |
| 4830 | else | 4829 | else |
| 4831 | I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? | 4830 | I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? |
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c index b8e4cdec28c3..24f92bea39c7 100644 --- a/drivers/gpu/drm/omapdrm/omap_fbdev.c +++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c | |||
| @@ -112,11 +112,8 @@ static int omap_fbdev_create(struct drm_fb_helper *helper, | |||
| 112 | dma_addr_t paddr; | 112 | dma_addr_t paddr; |
| 113 | int ret; | 113 | int ret; |
| 114 | 114 | ||
| 115 | /* only doing ARGB32 since this is what is needed to alpha-blend | ||
| 116 | * with video overlays: | ||
| 117 | */ | ||
| 118 | sizes->surface_bpp = 32; | 115 | sizes->surface_bpp = 32; |
| 119 | sizes->surface_depth = 32; | 116 | sizes->surface_depth = 24; |
| 120 | 117 | ||
| 121 | DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width, | 118 | DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width, |
| 122 | sizes->surface_height, sizes->surface_bpp, | 119 | sizes->surface_height, sizes->surface_bpp, |
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index 8f59f057cdf4..80a73bfc1a65 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig | |||
| @@ -1217,6 +1217,7 @@ config SENSORS_PWM_FAN | |||
| 1217 | config SENSORS_SHT15 | 1217 | config SENSORS_SHT15 |
| 1218 | tristate "Sensiron humidity and temperature sensors. SHT15 and compat." | 1218 | tristate "Sensiron humidity and temperature sensors. SHT15 and compat." |
| 1219 | depends on GPIOLIB || COMPILE_TEST | 1219 | depends on GPIOLIB || COMPILE_TEST |
| 1220 | select BITREVERSE | ||
| 1220 | help | 1221 | help |
| 1221 | If you say yes here you get support for the Sensiron SHT10, SHT11, | 1222 | If you say yes here you get support for the Sensiron SHT10, SHT11, |
| 1222 | SHT15, SHT71, SHT75 humidity and temperature sensors. | 1223 | SHT15, SHT71, SHT75 humidity and temperature sensors. |
diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c index 65482624ea2c..5289aa0980a8 100644 --- a/drivers/hwmon/tmp102.c +++ b/drivers/hwmon/tmp102.c | |||
| @@ -58,6 +58,7 @@ struct tmp102 { | |||
| 58 | u16 config_orig; | 58 | u16 config_orig; |
| 59 | unsigned long last_update; | 59 | unsigned long last_update; |
| 60 | int temp[3]; | 60 | int temp[3]; |
| 61 | bool first_time; | ||
| 61 | }; | 62 | }; |
| 62 | 63 | ||
| 63 | /* convert left adjusted 13-bit TMP102 register value to milliCelsius */ | 64 | /* convert left adjusted 13-bit TMP102 register value to milliCelsius */ |
| @@ -93,6 +94,7 @@ static struct tmp102 *tmp102_update_device(struct device *dev) | |||
| 93 | tmp102->temp[i] = tmp102_reg_to_mC(status); | 94 | tmp102->temp[i] = tmp102_reg_to_mC(status); |
| 94 | } | 95 | } |
| 95 | tmp102->last_update = jiffies; | 96 | tmp102->last_update = jiffies; |
| 97 | tmp102->first_time = false; | ||
| 96 | } | 98 | } |
| 97 | mutex_unlock(&tmp102->lock); | 99 | mutex_unlock(&tmp102->lock); |
| 98 | return tmp102; | 100 | return tmp102; |
| @@ -102,6 +104,12 @@ static int tmp102_read_temp(void *dev, int *temp) | |||
| 102 | { | 104 | { |
| 103 | struct tmp102 *tmp102 = tmp102_update_device(dev); | 105 | struct tmp102 *tmp102 = tmp102_update_device(dev); |
| 104 | 106 | ||
| 107 | /* Is it too early even to return a conversion? */ | ||
| 108 | if (tmp102->first_time) { | ||
| 109 | dev_dbg(dev, "%s: Conversion not ready yet..\n", __func__); | ||
| 110 | return -EAGAIN; | ||
| 111 | } | ||
| 112 | |||
| 105 | *temp = tmp102->temp[0]; | 113 | *temp = tmp102->temp[0]; |
| 106 | 114 | ||
| 107 | return 0; | 115 | return 0; |
| @@ -114,6 +122,10 @@ static ssize_t tmp102_show_temp(struct device *dev, | |||
| 114 | struct sensor_device_attribute *sda = to_sensor_dev_attr(attr); | 122 | struct sensor_device_attribute *sda = to_sensor_dev_attr(attr); |
| 115 | struct tmp102 *tmp102 = tmp102_update_device(dev); | 123 | struct tmp102 *tmp102 = tmp102_update_device(dev); |
| 116 | 124 | ||
| 125 | /* Is it too early even to return a read? */ | ||
| 126 | if (tmp102->first_time) | ||
| 127 | return -EAGAIN; | ||
| 128 | |||
| 117 | return sprintf(buf, "%d\n", tmp102->temp[sda->index]); | 129 | return sprintf(buf, "%d\n", tmp102->temp[sda->index]); |
| 118 | } | 130 | } |
| 119 | 131 | ||
| @@ -207,7 +219,9 @@ static int tmp102_probe(struct i2c_client *client, | |||
| 207 | status = -ENODEV; | 219 | status = -ENODEV; |
| 208 | goto fail_restore_config; | 220 | goto fail_restore_config; |
| 209 | } | 221 | } |
| 210 | tmp102->last_update = jiffies - HZ; | 222 | tmp102->last_update = jiffies; |
| 223 | /* Mark that we are not ready with data until conversion is complete */ | ||
| 224 | tmp102->first_time = true; | ||
| 211 | mutex_init(&tmp102->lock); | 225 | mutex_init(&tmp102->lock); |
| 212 | 226 | ||
| 213 | hwmon_dev = hwmon_device_register_with_groups(dev, client->name, | 227 | hwmon_dev = hwmon_device_register_with_groups(dev, client->name, |
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c index c5628a42170a..a8bdcb5292f5 100644 --- a/drivers/i2c/busses/i2c-davinci.c +++ b/drivers/i2c/busses/i2c-davinci.c | |||
| @@ -202,8 +202,15 @@ static void i2c_davinci_calc_clk_dividers(struct davinci_i2c_dev *dev) | |||
| 202 | * d is always 6 on Keystone I2C controller | 202 | * d is always 6 on Keystone I2C controller |
| 203 | */ | 203 | */ |
| 204 | 204 | ||
| 205 | /* get minimum of 7 MHz clock, but max of 12 MHz */ | 205 | /* |
| 206 | psc = (input_clock / 7000000) - 1; | 206 | * Both Davinci and current Keystone User Guides recommend a value |
| 207 | * between 7MHz and 12MHz. In reality 7MHz module clock doesn't | ||
| 208 | * always produce enough margin between SDA and SCL transitions. | ||
| 209 | * Measurements show that the higher the module clock is, the | ||
| 210 | * bigger is the margin, providing more reliable communication. | ||
| 211 | * So we better target for 12MHz. | ||
| 212 | */ | ||
| 213 | psc = (input_clock / 12000000) - 1; | ||
| 207 | if ((input_clock / (psc + 1)) > 12000000) | 214 | if ((input_clock / (psc + 1)) > 12000000) |
| 208 | psc++; /* better to run under spec than over */ | 215 | psc++; /* better to run under spec than over */ |
| 209 | d = (psc >= 2) ? 5 : 7 - psc; | 216 | d = (psc >= 2) ? 5 : 7 - psc; |
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c index 8c48b27ba059..de7fbbb374cd 100644 --- a/drivers/i2c/busses/i2c-designware-core.c +++ b/drivers/i2c/busses/i2c-designware-core.c | |||
| @@ -813,6 +813,12 @@ static irqreturn_t i2c_dw_isr(int this_irq, void *dev_id) | |||
| 813 | tx_aborted: | 813 | tx_aborted: |
| 814 | if ((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET)) || dev->msg_err) | 814 | if ((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET)) || dev->msg_err) |
| 815 | complete(&dev->cmd_complete); | 815 | complete(&dev->cmd_complete); |
| 816 | else if (unlikely(dev->accessor_flags & ACCESS_INTR_MASK)) { | ||
| 817 | /* workaround to trigger pending interrupt */ | ||
| 818 | stat = dw_readl(dev, DW_IC_INTR_MASK); | ||
| 819 | i2c_dw_disable_int(dev); | ||
| 820 | dw_writel(dev, stat, DW_IC_INTR_MASK); | ||
| 821 | } | ||
| 816 | 822 | ||
| 817 | return IRQ_HANDLED; | 823 | return IRQ_HANDLED; |
| 818 | } | 824 | } |
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h index 1d50898e7b24..9ffb63a60f95 100644 --- a/drivers/i2c/busses/i2c-designware-core.h +++ b/drivers/i2c/busses/i2c-designware-core.h | |||
| @@ -111,6 +111,7 @@ struct dw_i2c_dev { | |||
| 111 | 111 | ||
| 112 | #define ACCESS_SWAP 0x00000001 | 112 | #define ACCESS_SWAP 0x00000001 |
| 113 | #define ACCESS_16BIT 0x00000002 | 113 | #define ACCESS_16BIT 0x00000002 |
| 114 | #define ACCESS_INTR_MASK 0x00000004 | ||
| 114 | 115 | ||
| 115 | extern int i2c_dw_init(struct dw_i2c_dev *dev); | 116 | extern int i2c_dw_init(struct dw_i2c_dev *dev); |
| 116 | extern void i2c_dw_disable(struct dw_i2c_dev *dev); | 117 | extern void i2c_dw_disable(struct dw_i2c_dev *dev); |
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index 809579ecb5a4..6b00061c3746 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c | |||
| @@ -93,6 +93,7 @@ static void dw_i2c_acpi_params(struct platform_device *pdev, char method[], | |||
| 93 | static int dw_i2c_acpi_configure(struct platform_device *pdev) | 93 | static int dw_i2c_acpi_configure(struct platform_device *pdev) |
| 94 | { | 94 | { |
| 95 | struct dw_i2c_dev *dev = platform_get_drvdata(pdev); | 95 | struct dw_i2c_dev *dev = platform_get_drvdata(pdev); |
| 96 | const struct acpi_device_id *id; | ||
| 96 | 97 | ||
| 97 | dev->adapter.nr = -1; | 98 | dev->adapter.nr = -1; |
| 98 | dev->tx_fifo_depth = 32; | 99 | dev->tx_fifo_depth = 32; |
| @@ -106,6 +107,10 @@ static int dw_i2c_acpi_configure(struct platform_device *pdev) | |||
| 106 | dw_i2c_acpi_params(pdev, "FMCN", &dev->fs_hcnt, &dev->fs_lcnt, | 107 | dw_i2c_acpi_params(pdev, "FMCN", &dev->fs_hcnt, &dev->fs_lcnt, |
| 107 | &dev->sda_hold_time); | 108 | &dev->sda_hold_time); |
| 108 | 109 | ||
| 110 | id = acpi_match_device(pdev->dev.driver->acpi_match_table, &pdev->dev); | ||
| 111 | if (id && id->driver_data) | ||
| 112 | dev->accessor_flags |= (u32)id->driver_data; | ||
| 113 | |||
| 109 | return 0; | 114 | return 0; |
| 110 | } | 115 | } |
| 111 | 116 | ||
| @@ -116,7 +121,7 @@ static const struct acpi_device_id dw_i2c_acpi_match[] = { | |||
| 116 | { "INT3433", 0 }, | 121 | { "INT3433", 0 }, |
| 117 | { "80860F41", 0 }, | 122 | { "80860F41", 0 }, |
| 118 | { "808622C1", 0 }, | 123 | { "808622C1", 0 }, |
| 119 | { "AMD0010", 0 }, | 124 | { "AMD0010", ACCESS_INTR_MASK }, |
| 120 | { } | 125 | { } |
| 121 | }; | 126 | }; |
| 122 | MODULE_DEVICE_TABLE(acpi, dw_i2c_acpi_match); | 127 | MODULE_DEVICE_TABLE(acpi, dw_i2c_acpi_match); |
| @@ -240,12 +245,10 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) | |||
| 240 | } | 245 | } |
| 241 | 246 | ||
| 242 | r = i2c_dw_probe(dev); | 247 | r = i2c_dw_probe(dev); |
| 243 | if (r) { | 248 | if (r && !dev->pm_runtime_disabled) |
| 244 | pm_runtime_disable(&pdev->dev); | 249 | pm_runtime_disable(&pdev->dev); |
| 245 | return r; | ||
| 246 | } | ||
| 247 | 250 | ||
| 248 | return 0; | 251 | return r; |
| 249 | } | 252 | } |
| 250 | 253 | ||
| 251 | static int dw_i2c_plat_remove(struct platform_device *pdev) | 254 | static int dw_i2c_plat_remove(struct platform_device *pdev) |
| @@ -260,7 +263,8 @@ static int dw_i2c_plat_remove(struct platform_device *pdev) | |||
| 260 | 263 | ||
| 261 | pm_runtime_dont_use_autosuspend(&pdev->dev); | 264 | pm_runtime_dont_use_autosuspend(&pdev->dev); |
| 262 | pm_runtime_put_sync(&pdev->dev); | 265 | pm_runtime_put_sync(&pdev->dev); |
| 263 | pm_runtime_disable(&pdev->dev); | 266 | if (!dev->pm_runtime_disabled) |
| 267 | pm_runtime_disable(&pdev->dev); | ||
| 264 | 268 | ||
| 265 | return 0; | 269 | return 0; |
| 266 | } | 270 | } |
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c index 9bb0b056b25f..d4d853680ae4 100644 --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c | |||
| @@ -1119,6 +1119,8 @@ static int i2c_imx_probe(struct platform_device *pdev) | |||
| 1119 | i2c_imx, IMX_I2C_I2CR); | 1119 | i2c_imx, IMX_I2C_I2CR); |
| 1120 | imx_i2c_write_reg(i2c_imx->hwdata->i2sr_clr_opcode, i2c_imx, IMX_I2C_I2SR); | 1120 | imx_i2c_write_reg(i2c_imx->hwdata->i2sr_clr_opcode, i2c_imx, IMX_I2C_I2SR); |
| 1121 | 1121 | ||
| 1122 | i2c_imx_init_recovery_info(i2c_imx, pdev); | ||
| 1123 | |||
| 1122 | /* Add I2C adapter */ | 1124 | /* Add I2C adapter */ |
| 1123 | ret = i2c_add_numbered_adapter(&i2c_imx->adapter); | 1125 | ret = i2c_add_numbered_adapter(&i2c_imx->adapter); |
| 1124 | if (ret < 0) { | 1126 | if (ret < 0) { |
| @@ -1126,8 +1128,6 @@ static int i2c_imx_probe(struct platform_device *pdev) | |||
| 1126 | goto clk_disable; | 1128 | goto clk_disable; |
| 1127 | } | 1129 | } |
| 1128 | 1130 | ||
| 1129 | i2c_imx_init_recovery_info(i2c_imx, pdev); | ||
| 1130 | |||
| 1131 | /* Set up platform driver data */ | 1131 | /* Set up platform driver data */ |
| 1132 | platform_set_drvdata(pdev, i2c_imx); | 1132 | platform_set_drvdata(pdev, i2c_imx); |
| 1133 | clk_disable_unprepare(i2c_imx->clk); | 1133 | clk_disable_unprepare(i2c_imx->clk); |
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c index 5801227b97ab..43207f52e5a3 100644 --- a/drivers/i2c/busses/i2c-mv64xxx.c +++ b/drivers/i2c/busses/i2c-mv64xxx.c | |||
| @@ -146,6 +146,8 @@ struct mv64xxx_i2c_data { | |||
| 146 | bool errata_delay; | 146 | bool errata_delay; |
| 147 | struct reset_control *rstc; | 147 | struct reset_control *rstc; |
| 148 | bool irq_clear_inverted; | 148 | bool irq_clear_inverted; |
| 149 | /* Clk div is 2 to the power n, not 2 to the power n + 1 */ | ||
| 150 | bool clk_n_base_0; | ||
| 149 | }; | 151 | }; |
| 150 | 152 | ||
| 151 | static struct mv64xxx_i2c_regs mv64xxx_i2c_regs_mv64xxx = { | 153 | static struct mv64xxx_i2c_regs mv64xxx_i2c_regs_mv64xxx = { |
| @@ -757,25 +759,29 @@ MODULE_DEVICE_TABLE(of, mv64xxx_i2c_of_match_table); | |||
| 757 | #ifdef CONFIG_OF | 759 | #ifdef CONFIG_OF |
| 758 | #ifdef CONFIG_HAVE_CLK | 760 | #ifdef CONFIG_HAVE_CLK |
| 759 | static int | 761 | static int |
| 760 | mv64xxx_calc_freq(const int tclk, const int n, const int m) | 762 | mv64xxx_calc_freq(struct mv64xxx_i2c_data *drv_data, |
| 763 | const int tclk, const int n, const int m) | ||
| 761 | { | 764 | { |
| 762 | return tclk / (10 * (m + 1) * (2 << n)); | 765 | if (drv_data->clk_n_base_0) |
| 766 | return tclk / (10 * (m + 1) * (1 << n)); | ||
| 767 | else | ||
| 768 | return tclk / (10 * (m + 1) * (2 << n)); | ||
| 763 | } | 769 | } |
| 764 | 770 | ||
| 765 | static bool | 771 | static bool |
| 766 | mv64xxx_find_baud_factors(const u32 req_freq, const u32 tclk, u32 *best_n, | 772 | mv64xxx_find_baud_factors(struct mv64xxx_i2c_data *drv_data, |
| 767 | u32 *best_m) | 773 | const u32 req_freq, const u32 tclk) |
| 768 | { | 774 | { |
| 769 | int freq, delta, best_delta = INT_MAX; | 775 | int freq, delta, best_delta = INT_MAX; |
| 770 | int m, n; | 776 | int m, n; |
| 771 | 777 | ||
| 772 | for (n = 0; n <= 7; n++) | 778 | for (n = 0; n <= 7; n++) |
| 773 | for (m = 0; m <= 15; m++) { | 779 | for (m = 0; m <= 15; m++) { |
| 774 | freq = mv64xxx_calc_freq(tclk, n, m); | 780 | freq = mv64xxx_calc_freq(drv_data, tclk, n, m); |
| 775 | delta = req_freq - freq; | 781 | delta = req_freq - freq; |
| 776 | if (delta >= 0 && delta < best_delta) { | 782 | if (delta >= 0 && delta < best_delta) { |
| 777 | *best_m = m; | 783 | drv_data->freq_m = m; |
| 778 | *best_n = n; | 784 | drv_data->freq_n = n; |
| 779 | best_delta = delta; | 785 | best_delta = delta; |
| 780 | } | 786 | } |
| 781 | if (best_delta == 0) | 787 | if (best_delta == 0) |
| @@ -813,8 +819,11 @@ mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data, | |||
| 813 | if (of_property_read_u32(np, "clock-frequency", &bus_freq)) | 819 | if (of_property_read_u32(np, "clock-frequency", &bus_freq)) |
| 814 | bus_freq = 100000; /* 100kHz by default */ | 820 | bus_freq = 100000; /* 100kHz by default */ |
| 815 | 821 | ||
| 816 | if (!mv64xxx_find_baud_factors(bus_freq, tclk, | 822 | if (of_device_is_compatible(np, "allwinner,sun4i-a10-i2c") || |
| 817 | &drv_data->freq_n, &drv_data->freq_m)) { | 823 | of_device_is_compatible(np, "allwinner,sun6i-a31-i2c")) |
| 824 | drv_data->clk_n_base_0 = true; | ||
| 825 | |||
| 826 | if (!mv64xxx_find_baud_factors(drv_data, bus_freq, tclk)) { | ||
| 818 | rc = -EINVAL; | 827 | rc = -EINVAL; |
| 819 | goto out; | 828 | goto out; |
| 820 | } | 829 | } |
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c index b0ae560b38c3..599c0d7bd906 100644 --- a/drivers/i2c/busses/i2c-rcar.c +++ b/drivers/i2c/busses/i2c-rcar.c | |||
| @@ -576,7 +576,7 @@ static int rcar_reg_slave(struct i2c_client *slave) | |||
| 576 | if (slave->flags & I2C_CLIENT_TEN) | 576 | if (slave->flags & I2C_CLIENT_TEN) |
| 577 | return -EAFNOSUPPORT; | 577 | return -EAFNOSUPPORT; |
| 578 | 578 | ||
| 579 | pm_runtime_forbid(rcar_i2c_priv_to_dev(priv)); | 579 | pm_runtime_get_sync(rcar_i2c_priv_to_dev(priv)); |
| 580 | 580 | ||
| 581 | priv->slave = slave; | 581 | priv->slave = slave; |
| 582 | rcar_i2c_write(priv, ICSAR, slave->addr); | 582 | rcar_i2c_write(priv, ICSAR, slave->addr); |
| @@ -598,7 +598,7 @@ static int rcar_unreg_slave(struct i2c_client *slave) | |||
| 598 | 598 | ||
| 599 | priv->slave = NULL; | 599 | priv->slave = NULL; |
| 600 | 600 | ||
| 601 | pm_runtime_allow(rcar_i2c_priv_to_dev(priv)); | 601 | pm_runtime_put(rcar_i2c_priv_to_dev(priv)); |
| 602 | 602 | ||
| 603 | return 0; | 603 | return 0; |
| 604 | } | 604 | } |
diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c index c1935ebd6a9c..9096d17beb5b 100644 --- a/drivers/i2c/busses/i2c-rk3x.c +++ b/drivers/i2c/busses/i2c-rk3x.c | |||
| @@ -908,7 +908,7 @@ static int rk3x_i2c_probe(struct platform_device *pdev) | |||
| 908 | &i2c->scl_fall_ns)) | 908 | &i2c->scl_fall_ns)) |
| 909 | i2c->scl_fall_ns = 300; | 909 | i2c->scl_fall_ns = 300; |
| 910 | if (of_property_read_u32(pdev->dev.of_node, "i2c-sda-falling-time-ns", | 910 | if (of_property_read_u32(pdev->dev.of_node, "i2c-sda-falling-time-ns", |
| 911 | &i2c->scl_fall_ns)) | 911 | &i2c->sda_fall_ns)) |
| 912 | i2c->sda_fall_ns = i2c->scl_fall_ns; | 912 | i2c->sda_fall_ns = i2c->scl_fall_ns; |
| 913 | 913 | ||
| 914 | strlcpy(i2c->adap.name, "rk3x-i2c", sizeof(i2c->adap.name)); | 914 | strlcpy(i2c->adap.name, "rk3x-i2c", sizeof(i2c->adap.name)); |
diff --git a/drivers/i2c/busses/i2c-st.c b/drivers/i2c/busses/i2c-st.c index ea72dca32fdf..25020ec777c9 100644 --- a/drivers/i2c/busses/i2c-st.c +++ b/drivers/i2c/busses/i2c-st.c | |||
| @@ -822,7 +822,7 @@ static int st_i2c_probe(struct platform_device *pdev) | |||
| 822 | 822 | ||
| 823 | adap = &i2c_dev->adap; | 823 | adap = &i2c_dev->adap; |
| 824 | i2c_set_adapdata(adap, i2c_dev); | 824 | i2c_set_adapdata(adap, i2c_dev); |
| 825 | snprintf(adap->name, sizeof(adap->name), "ST I2C(0x%pa)", &res->start); | 825 | snprintf(adap->name, sizeof(adap->name), "ST I2C(%pa)", &res->start); |
| 826 | adap->owner = THIS_MODULE; | 826 | adap->owner = THIS_MODULE; |
| 827 | adap->timeout = 2 * HZ; | 827 | adap->timeout = 2 * HZ; |
| 828 | adap->retries = 0; | 828 | adap->retries = 0; |
diff --git a/drivers/input/joystick/db9.c b/drivers/input/joystick/db9.c index 932d07307454..da326090c2b0 100644 --- a/drivers/input/joystick/db9.c +++ b/drivers/input/joystick/db9.c | |||
| @@ -592,6 +592,7 @@ static void db9_attach(struct parport *pp) | |||
| 592 | return; | 592 | return; |
| 593 | } | 593 | } |
| 594 | 594 | ||
| 595 | memset(&db9_parport_cb, 0, sizeof(db9_parport_cb)); | ||
| 595 | db9_parport_cb.flags = PARPORT_FLAG_EXCL; | 596 | db9_parport_cb.flags = PARPORT_FLAG_EXCL; |
| 596 | 597 | ||
| 597 | pd = parport_register_dev_model(pp, "db9", &db9_parport_cb, port_idx); | 598 | pd = parport_register_dev_model(pp, "db9", &db9_parport_cb, port_idx); |
diff --git a/drivers/input/joystick/gamecon.c b/drivers/input/joystick/gamecon.c index 5a672dcac0d8..eae14d512353 100644 --- a/drivers/input/joystick/gamecon.c +++ b/drivers/input/joystick/gamecon.c | |||
| @@ -951,6 +951,7 @@ static void gc_attach(struct parport *pp) | |||
| 951 | pads = gc_cfg[port_idx].args + 1; | 951 | pads = gc_cfg[port_idx].args + 1; |
| 952 | n_pads = gc_cfg[port_idx].nargs - 1; | 952 | n_pads = gc_cfg[port_idx].nargs - 1; |
| 953 | 953 | ||
| 954 | memset(&gc_parport_cb, 0, sizeof(gc_parport_cb)); | ||
| 954 | gc_parport_cb.flags = PARPORT_FLAG_EXCL; | 955 | gc_parport_cb.flags = PARPORT_FLAG_EXCL; |
| 955 | 956 | ||
| 956 | pd = parport_register_dev_model(pp, "gamecon", &gc_parport_cb, | 957 | pd = parport_register_dev_model(pp, "gamecon", &gc_parport_cb, |
diff --git a/drivers/input/joystick/turbografx.c b/drivers/input/joystick/turbografx.c index 9f5bca26bd2f..77f575dd0901 100644 --- a/drivers/input/joystick/turbografx.c +++ b/drivers/input/joystick/turbografx.c | |||
| @@ -181,6 +181,7 @@ static void tgfx_attach(struct parport *pp) | |||
| 181 | n_buttons = tgfx_cfg[port_idx].args + 1; | 181 | n_buttons = tgfx_cfg[port_idx].args + 1; |
| 182 | n_devs = tgfx_cfg[port_idx].nargs - 1; | 182 | n_devs = tgfx_cfg[port_idx].nargs - 1; |
| 183 | 183 | ||
| 184 | memset(&tgfx_parport_cb, 0, sizeof(tgfx_parport_cb)); | ||
| 184 | tgfx_parport_cb.flags = PARPORT_FLAG_EXCL; | 185 | tgfx_parport_cb.flags = PARPORT_FLAG_EXCL; |
| 185 | 186 | ||
| 186 | pd = parport_register_dev_model(pp, "turbografx", &tgfx_parport_cb, | 187 | pd = parport_register_dev_model(pp, "turbografx", &tgfx_parport_cb, |
diff --git a/drivers/input/joystick/walkera0701.c b/drivers/input/joystick/walkera0701.c index 9c07fe911075..70a893a17467 100644 --- a/drivers/input/joystick/walkera0701.c +++ b/drivers/input/joystick/walkera0701.c | |||
| @@ -218,6 +218,7 @@ static void walkera0701_attach(struct parport *pp) | |||
| 218 | 218 | ||
| 219 | w->parport = pp; | 219 | w->parport = pp; |
| 220 | 220 | ||
| 221 | memset(&walkera0701_parport_cb, 0, sizeof(walkera0701_parport_cb)); | ||
| 221 | walkera0701_parport_cb.flags = PARPORT_FLAG_EXCL; | 222 | walkera0701_parport_cb.flags = PARPORT_FLAG_EXCL; |
| 222 | walkera0701_parport_cb.irq_func = walkera0701_irq_handler; | 223 | walkera0701_parport_cb.irq_func = walkera0701_irq_handler; |
| 223 | walkera0701_parport_cb.private = w; | 224 | walkera0701_parport_cb.private = w; |
diff --git a/drivers/input/misc/arizona-haptics.c b/drivers/input/misc/arizona-haptics.c index 4bf678541496..d5994a745ffa 100644 --- a/drivers/input/misc/arizona-haptics.c +++ b/drivers/input/misc/arizona-haptics.c | |||
| @@ -97,8 +97,7 @@ static void arizona_haptics_work(struct work_struct *work) | |||
| 97 | 97 | ||
| 98 | ret = regmap_update_bits(arizona->regmap, | 98 | ret = regmap_update_bits(arizona->regmap, |
| 99 | ARIZONA_HAPTICS_CONTROL_1, | 99 | ARIZONA_HAPTICS_CONTROL_1, |
| 100 | ARIZONA_HAP_CTRL_MASK, | 100 | ARIZONA_HAP_CTRL_MASK, 0); |
| 101 | 1 << ARIZONA_HAP_CTRL_SHIFT); | ||
| 102 | if (ret != 0) { | 101 | if (ret != 0) { |
| 103 | dev_err(arizona->dev, "Failed to stop haptics: %d\n", | 102 | dev_err(arizona->dev, "Failed to stop haptics: %d\n", |
| 104 | ret); | 103 | ret); |
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c index 5e1665bbaa0b..2f589857a039 100644 --- a/drivers/input/mouse/elan_i2c_core.c +++ b/drivers/input/mouse/elan_i2c_core.c | |||
| @@ -41,6 +41,7 @@ | |||
| 41 | 41 | ||
| 42 | #define DRIVER_NAME "elan_i2c" | 42 | #define DRIVER_NAME "elan_i2c" |
| 43 | #define ELAN_DRIVER_VERSION "1.6.1" | 43 | #define ELAN_DRIVER_VERSION "1.6.1" |
| 44 | #define ELAN_VENDOR_ID 0x04f3 | ||
| 44 | #define ETP_MAX_PRESSURE 255 | 45 | #define ETP_MAX_PRESSURE 255 |
| 45 | #define ETP_FWIDTH_REDUCE 90 | 46 | #define ETP_FWIDTH_REDUCE 90 |
| 46 | #define ETP_FINGER_WIDTH 15 | 47 | #define ETP_FINGER_WIDTH 15 |
| @@ -914,6 +915,8 @@ static int elan_setup_input_device(struct elan_tp_data *data) | |||
| 914 | 915 | ||
| 915 | input->name = "Elan Touchpad"; | 916 | input->name = "Elan Touchpad"; |
| 916 | input->id.bustype = BUS_I2C; | 917 | input->id.bustype = BUS_I2C; |
| 918 | input->id.vendor = ELAN_VENDOR_ID; | ||
| 919 | input->id.product = data->product_id; | ||
| 917 | input_set_drvdata(input, data); | 920 | input_set_drvdata(input, data); |
| 918 | 921 | ||
| 919 | error = input_mt_init_slots(input, ETP_MAX_FINGERS, | 922 | error = input_mt_init_slots(input, ETP_MAX_FINGERS, |
diff --git a/drivers/input/serio/parkbd.c b/drivers/input/serio/parkbd.c index 92c31b8f8fb4..1edfac78d4ac 100644 --- a/drivers/input/serio/parkbd.c +++ b/drivers/input/serio/parkbd.c | |||
| @@ -145,6 +145,7 @@ static int parkbd_getport(struct parport *pp) | |||
| 145 | { | 145 | { |
| 146 | struct pardev_cb parkbd_parport_cb; | 146 | struct pardev_cb parkbd_parport_cb; |
| 147 | 147 | ||
| 148 | memset(&parkbd_parport_cb, 0, sizeof(parkbd_parport_cb)); | ||
| 148 | parkbd_parport_cb.irq_func = parkbd_interrupt; | 149 | parkbd_parport_cb.irq_func = parkbd_interrupt; |
| 149 | parkbd_parport_cb.flags = PARPORT_FLAG_EXCL; | 150 | parkbd_parport_cb.flags = PARPORT_FLAG_EXCL; |
| 150 | 151 | ||
diff --git a/drivers/input/tablet/aiptek.c b/drivers/input/tablet/aiptek.c index e7f966da6efa..78ca44840d60 100644 --- a/drivers/input/tablet/aiptek.c +++ b/drivers/input/tablet/aiptek.c | |||
| @@ -1819,6 +1819,14 @@ aiptek_probe(struct usb_interface *intf, const struct usb_device_id *id) | |||
| 1819 | input_set_abs_params(inputdev, ABS_TILT_Y, AIPTEK_TILT_MIN, AIPTEK_TILT_MAX, 0, 0); | 1819 | input_set_abs_params(inputdev, ABS_TILT_Y, AIPTEK_TILT_MIN, AIPTEK_TILT_MAX, 0, 0); |
| 1820 | input_set_abs_params(inputdev, ABS_WHEEL, AIPTEK_WHEEL_MIN, AIPTEK_WHEEL_MAX - 1, 0, 0); | 1820 | input_set_abs_params(inputdev, ABS_WHEEL, AIPTEK_WHEEL_MIN, AIPTEK_WHEEL_MAX - 1, 0, 0); |
| 1821 | 1821 | ||
| 1822 | /* Verify that a device really has an endpoint */ | ||
| 1823 | if (intf->altsetting[0].desc.bNumEndpoints < 1) { | ||
| 1824 | dev_err(&intf->dev, | ||
| 1825 | "interface has %d endpoints, but must have minimum 1\n", | ||
| 1826 | intf->altsetting[0].desc.bNumEndpoints); | ||
| 1827 | err = -EINVAL; | ||
| 1828 | goto fail3; | ||
| 1829 | } | ||
| 1822 | endpoint = &intf->altsetting[0].endpoint[0].desc; | 1830 | endpoint = &intf->altsetting[0].endpoint[0].desc; |
| 1823 | 1831 | ||
| 1824 | /* Go set up our URB, which is called when the tablet receives | 1832 | /* Go set up our URB, which is called when the tablet receives |
| @@ -1861,6 +1869,7 @@ aiptek_probe(struct usb_interface *intf, const struct usb_device_id *id) | |||
| 1861 | if (i == ARRAY_SIZE(speeds)) { | 1869 | if (i == ARRAY_SIZE(speeds)) { |
| 1862 | dev_info(&intf->dev, | 1870 | dev_info(&intf->dev, |
| 1863 | "Aiptek tried all speeds, no sane response\n"); | 1871 | "Aiptek tried all speeds, no sane response\n"); |
| 1872 | err = -EINVAL; | ||
| 1864 | goto fail3; | 1873 | goto fail3; |
| 1865 | } | 1874 | } |
| 1866 | 1875 | ||
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c index c5622058c22b..2d5794ec338b 100644 --- a/drivers/input/touchscreen/atmel_mxt_ts.c +++ b/drivers/input/touchscreen/atmel_mxt_ts.c | |||
| @@ -2487,6 +2487,31 @@ static struct mxt_acpi_platform_data samus_platform_data[] = { | |||
| 2487 | { } | 2487 | { } |
| 2488 | }; | 2488 | }; |
| 2489 | 2489 | ||
| 2490 | static unsigned int chromebook_tp_buttons[] = { | ||
| 2491 | KEY_RESERVED, | ||
| 2492 | KEY_RESERVED, | ||
| 2493 | KEY_RESERVED, | ||
| 2494 | KEY_RESERVED, | ||
| 2495 | KEY_RESERVED, | ||
| 2496 | BTN_LEFT | ||
| 2497 | }; | ||
| 2498 | |||
| 2499 | static struct mxt_acpi_platform_data chromebook_platform_data[] = { | ||
| 2500 | { | ||
| 2501 | /* Touchpad */ | ||
| 2502 | .hid = "ATML0000", | ||
| 2503 | .pdata = { | ||
| 2504 | .t19_num_keys = ARRAY_SIZE(chromebook_tp_buttons), | ||
| 2505 | .t19_keymap = chromebook_tp_buttons, | ||
| 2506 | }, | ||
| 2507 | }, | ||
| 2508 | { | ||
| 2509 | /* Touchscreen */ | ||
| 2510 | .hid = "ATML0001", | ||
| 2511 | }, | ||
| 2512 | { } | ||
| 2513 | }; | ||
| 2514 | |||
| 2490 | static const struct dmi_system_id mxt_dmi_table[] = { | 2515 | static const struct dmi_system_id mxt_dmi_table[] = { |
| 2491 | { | 2516 | { |
| 2492 | /* 2015 Google Pixel */ | 2517 | /* 2015 Google Pixel */ |
| @@ -2497,6 +2522,14 @@ static const struct dmi_system_id mxt_dmi_table[] = { | |||
| 2497 | }, | 2522 | }, |
| 2498 | .driver_data = samus_platform_data, | 2523 | .driver_data = samus_platform_data, |
| 2499 | }, | 2524 | }, |
| 2525 | { | ||
| 2526 | /* Other Google Chromebooks */ | ||
| 2527 | .ident = "Chromebook", | ||
| 2528 | .matches = { | ||
| 2529 | DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), | ||
| 2530 | }, | ||
| 2531 | .driver_data = chromebook_platform_data, | ||
| 2532 | }, | ||
| 2500 | { } | 2533 | { } |
| 2501 | }; | 2534 | }; |
| 2502 | 2535 | ||
| @@ -2701,6 +2734,7 @@ static const struct i2c_device_id mxt_id[] = { | |||
| 2701 | { "qt602240_ts", 0 }, | 2734 | { "qt602240_ts", 0 }, |
| 2702 | { "atmel_mxt_ts", 0 }, | 2735 | { "atmel_mxt_ts", 0 }, |
| 2703 | { "atmel_mxt_tp", 0 }, | 2736 | { "atmel_mxt_tp", 0 }, |
| 2737 | { "maxtouch", 0 }, | ||
| 2704 | { "mXT224", 0 }, | 2738 | { "mXT224", 0 }, |
| 2705 | { } | 2739 | { } |
| 2706 | }; | 2740 | }; |
diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c index 17cc20ef4923..ac09855fa435 100644 --- a/drivers/input/touchscreen/elants_i2c.c +++ b/drivers/input/touchscreen/elants_i2c.c | |||
| @@ -1316,7 +1316,13 @@ static int __maybe_unused elants_i2c_suspend(struct device *dev) | |||
| 1316 | 1316 | ||
| 1317 | disable_irq(client->irq); | 1317 | disable_irq(client->irq); |
| 1318 | 1318 | ||
| 1319 | if (device_may_wakeup(dev) || ts->keep_power_in_suspend) { | 1319 | if (device_may_wakeup(dev)) { |
| 1320 | /* | ||
| 1321 | * The device will automatically enter idle mode | ||
| 1322 | * that has reduced power consumption. | ||
| 1323 | */ | ||
| 1324 | ts->wake_irq_enabled = (enable_irq_wake(client->irq) == 0); | ||
| 1325 | } else if (ts->keep_power_in_suspend) { | ||
| 1320 | for (retry_cnt = 0; retry_cnt < MAX_RETRIES; retry_cnt++) { | 1326 | for (retry_cnt = 0; retry_cnt < MAX_RETRIES; retry_cnt++) { |
| 1321 | error = elants_i2c_send(client, set_sleep_cmd, | 1327 | error = elants_i2c_send(client, set_sleep_cmd, |
| 1322 | sizeof(set_sleep_cmd)); | 1328 | sizeof(set_sleep_cmd)); |
| @@ -1326,10 +1332,6 @@ static int __maybe_unused elants_i2c_suspend(struct device *dev) | |||
| 1326 | dev_err(&client->dev, | 1332 | dev_err(&client->dev, |
| 1327 | "suspend command failed: %d\n", error); | 1333 | "suspend command failed: %d\n", error); |
| 1328 | } | 1334 | } |
| 1329 | |||
| 1330 | if (device_may_wakeup(dev)) | ||
| 1331 | ts->wake_irq_enabled = | ||
| 1332 | (enable_irq_wake(client->irq) == 0); | ||
| 1333 | } else { | 1335 | } else { |
| 1334 | elants_i2c_power_off(ts); | 1336 | elants_i2c_power_off(ts); |
| 1335 | } | 1337 | } |
| @@ -1345,10 +1347,11 @@ static int __maybe_unused elants_i2c_resume(struct device *dev) | |||
| 1345 | int retry_cnt; | 1347 | int retry_cnt; |
| 1346 | int error; | 1348 | int error; |
| 1347 | 1349 | ||
| 1348 | if (device_may_wakeup(dev) && ts->wake_irq_enabled) | 1350 | if (device_may_wakeup(dev)) { |
| 1349 | disable_irq_wake(client->irq); | 1351 | if (ts->wake_irq_enabled) |
| 1350 | 1352 | disable_irq_wake(client->irq); | |
| 1351 | if (ts->keep_power_in_suspend) { | 1353 | elants_i2c_sw_reset(client); |
| 1354 | } else if (ts->keep_power_in_suspend) { | ||
| 1352 | for (retry_cnt = 0; retry_cnt < MAX_RETRIES; retry_cnt++) { | 1355 | for (retry_cnt = 0; retry_cnt < MAX_RETRIES; retry_cnt++) { |
| 1353 | error = elants_i2c_send(client, set_active_cmd, | 1356 | error = elants_i2c_send(client, set_active_cmd, |
| 1354 | sizeof(set_active_cmd)); | 1357 | sizeof(set_active_cmd)); |
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c index d21d4edf7236..7caf2fa237f2 100644 --- a/drivers/iommu/amd_iommu_v2.c +++ b/drivers/iommu/amd_iommu_v2.c | |||
| @@ -494,6 +494,22 @@ static void handle_fault_error(struct fault *fault) | |||
| 494 | } | 494 | } |
| 495 | } | 495 | } |
| 496 | 496 | ||
| 497 | static bool access_error(struct vm_area_struct *vma, struct fault *fault) | ||
| 498 | { | ||
| 499 | unsigned long requested = 0; | ||
| 500 | |||
| 501 | if (fault->flags & PPR_FAULT_EXEC) | ||
| 502 | requested |= VM_EXEC; | ||
| 503 | |||
| 504 | if (fault->flags & PPR_FAULT_READ) | ||
| 505 | requested |= VM_READ; | ||
| 506 | |||
| 507 | if (fault->flags & PPR_FAULT_WRITE) | ||
| 508 | requested |= VM_WRITE; | ||
| 509 | |||
| 510 | return (requested & ~vma->vm_flags) != 0; | ||
| 511 | } | ||
| 512 | |||
| 497 | static void do_fault(struct work_struct *work) | 513 | static void do_fault(struct work_struct *work) |
| 498 | { | 514 | { |
| 499 | struct fault *fault = container_of(work, struct fault, work); | 515 | struct fault *fault = container_of(work, struct fault, work); |
| @@ -516,8 +532,8 @@ static void do_fault(struct work_struct *work) | |||
| 516 | goto out; | 532 | goto out; |
| 517 | } | 533 | } |
| 518 | 534 | ||
| 519 | if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) { | 535 | /* Check if we have the right permissions on the vma */ |
| 520 | /* handle_mm_fault would BUG_ON() */ | 536 | if (access_error(vma, fault)) { |
| 521 | up_read(&mm->mmap_sem); | 537 | up_read(&mm->mmap_sem); |
| 522 | handle_fault_error(fault); | 538 | handle_fault_error(fault); |
| 523 | goto out; | 539 | goto out; |
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index f1042daef9ad..ac7387686ddc 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
| @@ -2159,7 +2159,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, | |||
| 2159 | sg_res = aligned_nrpages(sg->offset, sg->length); | 2159 | sg_res = aligned_nrpages(sg->offset, sg->length); |
| 2160 | sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset; | 2160 | sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset; |
| 2161 | sg->dma_length = sg->length; | 2161 | sg->dma_length = sg->length; |
| 2162 | pteval = (sg_phys(sg) & PAGE_MASK) | prot; | 2162 | pteval = page_to_phys(sg_page(sg)) | prot; |
| 2163 | phys_pfn = pteval >> VTD_PAGE_SHIFT; | 2163 | phys_pfn = pteval >> VTD_PAGE_SHIFT; |
| 2164 | } | 2164 | } |
| 2165 | 2165 | ||
| @@ -3704,7 +3704,7 @@ static int intel_nontranslate_map_sg(struct device *hddev, | |||
| 3704 | 3704 | ||
| 3705 | for_each_sg(sglist, sg, nelems, i) { | 3705 | for_each_sg(sglist, sg, nelems, i) { |
| 3706 | BUG_ON(!sg_page(sg)); | 3706 | BUG_ON(!sg_page(sg)); |
| 3707 | sg->dma_address = sg_phys(sg); | 3707 | sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset; |
| 3708 | sg->dma_length = sg->length; | 3708 | sg->dma_length = sg->length; |
| 3709 | } | 3709 | } |
| 3710 | return nelems; | 3710 | return nelems; |
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c index c69e3f9ec958..50464833d0b8 100644 --- a/drivers/iommu/intel-svm.c +++ b/drivers/iommu/intel-svm.c | |||
| @@ -484,6 +484,23 @@ struct page_req_dsc { | |||
| 484 | }; | 484 | }; |
| 485 | 485 | ||
| 486 | #define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x10) | 486 | #define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x10) |
| 487 | |||
| 488 | static bool access_error(struct vm_area_struct *vma, struct page_req_dsc *req) | ||
| 489 | { | ||
| 490 | unsigned long requested = 0; | ||
| 491 | |||
| 492 | if (req->exe_req) | ||
| 493 | requested |= VM_EXEC; | ||
| 494 | |||
| 495 | if (req->rd_req) | ||
| 496 | requested |= VM_READ; | ||
| 497 | |||
| 498 | if (req->wr_req) | ||
| 499 | requested |= VM_WRITE; | ||
| 500 | |||
| 501 | return (requested & ~vma->vm_flags) != 0; | ||
| 502 | } | ||
| 503 | |||
| 487 | static irqreturn_t prq_event_thread(int irq, void *d) | 504 | static irqreturn_t prq_event_thread(int irq, void *d) |
| 488 | { | 505 | { |
| 489 | struct intel_iommu *iommu = d; | 506 | struct intel_iommu *iommu = d; |
| @@ -539,6 +556,9 @@ static irqreturn_t prq_event_thread(int irq, void *d) | |||
| 539 | if (!vma || address < vma->vm_start) | 556 | if (!vma || address < vma->vm_start) |
| 540 | goto invalid; | 557 | goto invalid; |
| 541 | 558 | ||
| 559 | if (access_error(vma, req)) | ||
| 560 | goto invalid; | ||
| 561 | |||
| 542 | ret = handle_mm_fault(svm->mm, vma, address, | 562 | ret = handle_mm_fault(svm->mm, vma, address, |
| 543 | req->wr_req ? FAULT_FLAG_WRITE : 0); | 563 | req->wr_req ? FAULT_FLAG_WRITE : 0); |
| 544 | if (ret & VM_FAULT_ERROR) | 564 | if (ret & VM_FAULT_ERROR) |
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index abae363c7b9b..0e3b0092ec92 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c | |||
| @@ -1430,7 +1430,7 @@ size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova, | |||
| 1430 | min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap); | 1430 | min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap); |
| 1431 | 1431 | ||
| 1432 | for_each_sg(sg, s, nents, i) { | 1432 | for_each_sg(sg, s, nents, i) { |
| 1433 | phys_addr_t phys = sg_phys(s); | 1433 | phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset; |
| 1434 | 1434 | ||
| 1435 | /* | 1435 | /* |
| 1436 | * We are mapping on IOMMU page boundaries, so offset within | 1436 | * We are mapping on IOMMU page boundaries, so offset within |
diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c index 375be509e95f..2a506fe0c8a4 100644 --- a/drivers/isdn/gigaset/ser-gigaset.c +++ b/drivers/isdn/gigaset/ser-gigaset.c | |||
| @@ -67,8 +67,7 @@ static int write_modem(struct cardstate *cs) | |||
| 67 | struct sk_buff *skb = bcs->tx_skb; | 67 | struct sk_buff *skb = bcs->tx_skb; |
| 68 | int sent = -EOPNOTSUPP; | 68 | int sent = -EOPNOTSUPP; |
| 69 | 69 | ||
| 70 | if (!tty || !tty->driver || !skb) | 70 | WARN_ON(!tty || !tty->ops || !skb); |
| 71 | return -EINVAL; | ||
| 72 | 71 | ||
| 73 | if (!skb->len) { | 72 | if (!skb->len) { |
| 74 | dev_kfree_skb_any(skb); | 73 | dev_kfree_skb_any(skb); |
| @@ -109,8 +108,7 @@ static int send_cb(struct cardstate *cs) | |||
| 109 | unsigned long flags; | 108 | unsigned long flags; |
| 110 | int sent = 0; | 109 | int sent = 0; |
| 111 | 110 | ||
| 112 | if (!tty || !tty->driver) | 111 | WARN_ON(!tty || !tty->ops); |
| 113 | return -EFAULT; | ||
| 114 | 112 | ||
| 115 | cb = cs->cmdbuf; | 113 | cb = cs->cmdbuf; |
| 116 | if (!cb) | 114 | if (!cb) |
| @@ -370,19 +368,18 @@ static void gigaset_freecshw(struct cardstate *cs) | |||
| 370 | tasklet_kill(&cs->write_tasklet); | 368 | tasklet_kill(&cs->write_tasklet); |
| 371 | if (!cs->hw.ser) | 369 | if (!cs->hw.ser) |
| 372 | return; | 370 | return; |
| 373 | dev_set_drvdata(&cs->hw.ser->dev.dev, NULL); | ||
| 374 | platform_device_unregister(&cs->hw.ser->dev); | 371 | platform_device_unregister(&cs->hw.ser->dev); |
| 375 | kfree(cs->hw.ser); | ||
| 376 | cs->hw.ser = NULL; | ||
| 377 | } | 372 | } |
| 378 | 373 | ||
| 379 | static void gigaset_device_release(struct device *dev) | 374 | static void gigaset_device_release(struct device *dev) |
| 380 | { | 375 | { |
| 381 | struct platform_device *pdev = to_platform_device(dev); | 376 | struct cardstate *cs = dev_get_drvdata(dev); |
| 382 | 377 | ||
| 383 | /* adapted from platform_device_release() in drivers/base/platform.c */ | 378 | if (!cs) |
| 384 | kfree(dev->platform_data); | 379 | return; |
| 385 | kfree(pdev->resource); | 380 | dev_set_drvdata(dev, NULL); |
| 381 | kfree(cs->hw.ser); | ||
| 382 | cs->hw.ser = NULL; | ||
| 386 | } | 383 | } |
| 387 | 384 | ||
| 388 | /* | 385 | /* |
| @@ -432,7 +429,9 @@ static int gigaset_set_modem_ctrl(struct cardstate *cs, unsigned old_state, | |||
| 432 | struct tty_struct *tty = cs->hw.ser->tty; | 429 | struct tty_struct *tty = cs->hw.ser->tty; |
| 433 | unsigned int set, clear; | 430 | unsigned int set, clear; |
| 434 | 431 | ||
| 435 | if (!tty || !tty->driver || !tty->ops->tiocmset) | 432 | WARN_ON(!tty || !tty->ops); |
| 433 | /* tiocmset is an optional tty driver method */ | ||
| 434 | if (!tty->ops->tiocmset) | ||
| 436 | return -EINVAL; | 435 | return -EINVAL; |
| 437 | set = new_state & ~old_state; | 436 | set = new_state & ~old_state; |
| 438 | clear = old_state & ~new_state; | 437 | clear = old_state & ~new_state; |
diff --git a/drivers/isdn/hardware/mISDN/mISDNipac.c b/drivers/isdn/hardware/mISDN/mISDNipac.c index a77eea594b69..cb428b9ee441 100644 --- a/drivers/isdn/hardware/mISDN/mISDNipac.c +++ b/drivers/isdn/hardware/mISDN/mISDNipac.c | |||
| @@ -1170,7 +1170,7 @@ mISDNipac_irq(struct ipac_hw *ipac, int maxloop) | |||
| 1170 | 1170 | ||
| 1171 | if (ipac->type & IPAC_TYPE_IPACX) { | 1171 | if (ipac->type & IPAC_TYPE_IPACX) { |
| 1172 | ista = ReadIPAC(ipac, ISACX_ISTA); | 1172 | ista = ReadIPAC(ipac, ISACX_ISTA); |
| 1173 | while (ista && cnt--) { | 1173 | while (ista && --cnt) { |
| 1174 | pr_debug("%s: ISTA %02x\n", ipac->name, ista); | 1174 | pr_debug("%s: ISTA %02x\n", ipac->name, ista); |
| 1175 | if (ista & IPACX__ICA) | 1175 | if (ista & IPACX__ICA) |
| 1176 | ipac_irq(&ipac->hscx[0], ista); | 1176 | ipac_irq(&ipac->hscx[0], ista); |
| @@ -1182,7 +1182,7 @@ mISDNipac_irq(struct ipac_hw *ipac, int maxloop) | |||
| 1182 | } | 1182 | } |
| 1183 | } else if (ipac->type & IPAC_TYPE_IPAC) { | 1183 | } else if (ipac->type & IPAC_TYPE_IPAC) { |
| 1184 | ista = ReadIPAC(ipac, IPAC_ISTA); | 1184 | ista = ReadIPAC(ipac, IPAC_ISTA); |
| 1185 | while (ista && cnt--) { | 1185 | while (ista && --cnt) { |
| 1186 | pr_debug("%s: ISTA %02x\n", ipac->name, ista); | 1186 | pr_debug("%s: ISTA %02x\n", ipac->name, ista); |
| 1187 | if (ista & (IPAC__ICD | IPAC__EXD)) { | 1187 | if (ista & (IPAC__ICD | IPAC__EXD)) { |
| 1188 | istad = ReadISAC(isac, ISAC_ISTA); | 1188 | istad = ReadISAC(isac, ISAC_ISTA); |
| @@ -1200,7 +1200,7 @@ mISDNipac_irq(struct ipac_hw *ipac, int maxloop) | |||
| 1200 | ista = ReadIPAC(ipac, IPAC_ISTA); | 1200 | ista = ReadIPAC(ipac, IPAC_ISTA); |
| 1201 | } | 1201 | } |
| 1202 | } else if (ipac->type & IPAC_TYPE_HSCX) { | 1202 | } else if (ipac->type & IPAC_TYPE_HSCX) { |
| 1203 | while (cnt) { | 1203 | while (--cnt) { |
| 1204 | ista = ReadIPAC(ipac, IPAC_ISTAB + ipac->hscx[1].off); | 1204 | ista = ReadIPAC(ipac, IPAC_ISTAB + ipac->hscx[1].off); |
| 1205 | pr_debug("%s: B2 ISTA %02x\n", ipac->name, ista); | 1205 | pr_debug("%s: B2 ISTA %02x\n", ipac->name, ista); |
| 1206 | if (ista) | 1206 | if (ista) |
| @@ -1211,7 +1211,6 @@ mISDNipac_irq(struct ipac_hw *ipac, int maxloop) | |||
| 1211 | mISDNisac_irq(isac, istad); | 1211 | mISDNisac_irq(isac, istad); |
| 1212 | if (0 == (ista | istad)) | 1212 | if (0 == (ista | istad)) |
| 1213 | break; | 1213 | break; |
| 1214 | cnt--; | ||
| 1215 | } | 1214 | } |
| 1216 | } | 1215 | } |
| 1217 | if (cnt > maxloop) /* only for ISAC/HSCX without PCI IRQ test */ | 1216 | if (cnt > maxloop) /* only for ISAC/HSCX without PCI IRQ test */ |
diff --git a/drivers/md/md.c b/drivers/md/md.c index 807095f4c793..dbedc58d8c00 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
| @@ -314,8 +314,8 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio) | |||
| 314 | */ | 314 | */ |
| 315 | void mddev_suspend(struct mddev *mddev) | 315 | void mddev_suspend(struct mddev *mddev) |
| 316 | { | 316 | { |
| 317 | BUG_ON(mddev->suspended); | 317 | if (mddev->suspended++) |
| 318 | mddev->suspended = 1; | 318 | return; |
| 319 | synchronize_rcu(); | 319 | synchronize_rcu(); |
| 320 | wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0); | 320 | wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0); |
| 321 | mddev->pers->quiesce(mddev, 1); | 321 | mddev->pers->quiesce(mddev, 1); |
| @@ -326,7 +326,8 @@ EXPORT_SYMBOL_GPL(mddev_suspend); | |||
| 326 | 326 | ||
| 327 | void mddev_resume(struct mddev *mddev) | 327 | void mddev_resume(struct mddev *mddev) |
| 328 | { | 328 | { |
| 329 | mddev->suspended = 0; | 329 | if (--mddev->suspended) |
| 330 | return; | ||
| 330 | wake_up(&mddev->sb_wait); | 331 | wake_up(&mddev->sb_wait); |
| 331 | mddev->pers->quiesce(mddev, 0); | 332 | mddev->pers->quiesce(mddev, 0); |
| 332 | 333 | ||
| @@ -1652,7 +1653,7 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev) | |||
| 1652 | rdev->journal_tail = le64_to_cpu(sb->journal_tail); | 1653 | rdev->journal_tail = le64_to_cpu(sb->journal_tail); |
| 1653 | if (mddev->recovery_cp == MaxSector) | 1654 | if (mddev->recovery_cp == MaxSector) |
| 1654 | set_bit(MD_JOURNAL_CLEAN, &mddev->flags); | 1655 | set_bit(MD_JOURNAL_CLEAN, &mddev->flags); |
| 1655 | rdev->raid_disk = mddev->raid_disks; | 1656 | rdev->raid_disk = 0; |
| 1656 | break; | 1657 | break; |
| 1657 | default: | 1658 | default: |
| 1658 | rdev->saved_raid_disk = role; | 1659 | rdev->saved_raid_disk = role; |
| @@ -2773,6 +2774,7 @@ slot_store(struct md_rdev *rdev, const char *buf, size_t len) | |||
| 2773 | /* Activating a spare .. or possibly reactivating | 2774 | /* Activating a spare .. or possibly reactivating |
| 2774 | * if we ever get bitmaps working here. | 2775 | * if we ever get bitmaps working here. |
| 2775 | */ | 2776 | */ |
| 2777 | int err; | ||
| 2776 | 2778 | ||
| 2777 | if (rdev->raid_disk != -1) | 2779 | if (rdev->raid_disk != -1) |
| 2778 | return -EBUSY; | 2780 | return -EBUSY; |
| @@ -2794,9 +2796,15 @@ slot_store(struct md_rdev *rdev, const char *buf, size_t len) | |||
| 2794 | rdev->saved_raid_disk = -1; | 2796 | rdev->saved_raid_disk = -1; |
| 2795 | clear_bit(In_sync, &rdev->flags); | 2797 | clear_bit(In_sync, &rdev->flags); |
| 2796 | clear_bit(Bitmap_sync, &rdev->flags); | 2798 | clear_bit(Bitmap_sync, &rdev->flags); |
| 2797 | remove_and_add_spares(rdev->mddev, rdev); | 2799 | err = rdev->mddev->pers-> |
| 2798 | if (rdev->raid_disk == -1) | 2800 | hot_add_disk(rdev->mddev, rdev); |
| 2799 | return -EBUSY; | 2801 | if (err) { |
| 2802 | rdev->raid_disk = -1; | ||
| 2803 | return err; | ||
| 2804 | } else | ||
| 2805 | sysfs_notify_dirent_safe(rdev->sysfs_state); | ||
| 2806 | if (sysfs_link_rdev(rdev->mddev, rdev)) | ||
| 2807 | /* failure here is OK */; | ||
| 2800 | /* don't wakeup anyone, leave that to userspace. */ | 2808 | /* don't wakeup anyone, leave that to userspace. */ |
| 2801 | } else { | 2809 | } else { |
| 2802 | if (slot >= rdev->mddev->raid_disks && | 2810 | if (slot >= rdev->mddev->raid_disks && |
diff --git a/drivers/md/md.h b/drivers/md/md.h index 2bea51edfab7..ca0b643fe3c1 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h | |||
| @@ -566,7 +566,9 @@ static inline char * mdname (struct mddev * mddev) | |||
| 566 | static inline int sysfs_link_rdev(struct mddev *mddev, struct md_rdev *rdev) | 566 | static inline int sysfs_link_rdev(struct mddev *mddev, struct md_rdev *rdev) |
| 567 | { | 567 | { |
| 568 | char nm[20]; | 568 | char nm[20]; |
| 569 | if (!test_bit(Replacement, &rdev->flags) && mddev->kobj.sd) { | 569 | if (!test_bit(Replacement, &rdev->flags) && |
| 570 | !test_bit(Journal, &rdev->flags) && | ||
| 571 | mddev->kobj.sd) { | ||
| 570 | sprintf(nm, "rd%d", rdev->raid_disk); | 572 | sprintf(nm, "rd%d", rdev->raid_disk); |
| 571 | return sysfs_create_link(&mddev->kobj, &rdev->kobj, nm); | 573 | return sysfs_create_link(&mddev->kobj, &rdev->kobj, nm); |
| 572 | } else | 574 | } else |
| @@ -576,7 +578,9 @@ static inline int sysfs_link_rdev(struct mddev *mddev, struct md_rdev *rdev) | |||
| 576 | static inline void sysfs_unlink_rdev(struct mddev *mddev, struct md_rdev *rdev) | 578 | static inline void sysfs_unlink_rdev(struct mddev *mddev, struct md_rdev *rdev) |
| 577 | { | 579 | { |
| 578 | char nm[20]; | 580 | char nm[20]; |
| 579 | if (!test_bit(Replacement, &rdev->flags) && mddev->kobj.sd) { | 581 | if (!test_bit(Replacement, &rdev->flags) && |
| 582 | !test_bit(Journal, &rdev->flags) && | ||
| 583 | mddev->kobj.sd) { | ||
| 580 | sprintf(nm, "rd%d", rdev->raid_disk); | 584 | sprintf(nm, "rd%d", rdev->raid_disk); |
| 581 | sysfs_remove_link(&mddev->kobj, nm); | 585 | sysfs_remove_link(&mddev->kobj, nm); |
| 582 | } | 586 | } |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 41d70bc9ba2f..84e597e1c489 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
| @@ -1946,6 +1946,8 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) | |||
| 1946 | 1946 | ||
| 1947 | first = i; | 1947 | first = i; |
| 1948 | fbio = r10_bio->devs[i].bio; | 1948 | fbio = r10_bio->devs[i].bio; |
| 1949 | fbio->bi_iter.bi_size = r10_bio->sectors << 9; | ||
| 1950 | fbio->bi_iter.bi_idx = 0; | ||
| 1949 | 1951 | ||
| 1950 | vcnt = (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9); | 1952 | vcnt = (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9); |
| 1951 | /* now find blocks with errors */ | 1953 | /* now find blocks with errors */ |
| @@ -1989,7 +1991,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) | |||
| 1989 | bio_reset(tbio); | 1991 | bio_reset(tbio); |
| 1990 | 1992 | ||
| 1991 | tbio->bi_vcnt = vcnt; | 1993 | tbio->bi_vcnt = vcnt; |
| 1992 | tbio->bi_iter.bi_size = r10_bio->sectors << 9; | 1994 | tbio->bi_iter.bi_size = fbio->bi_iter.bi_size; |
| 1993 | tbio->bi_rw = WRITE; | 1995 | tbio->bi_rw = WRITE; |
| 1994 | tbio->bi_private = r10_bio; | 1996 | tbio->bi_private = r10_bio; |
| 1995 | tbio->bi_iter.bi_sector = r10_bio->devs[i].addr; | 1997 | tbio->bi_iter.bi_sector = r10_bio->devs[i].addr; |
diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c index 8616fa8193bc..c2e60b4f292d 100644 --- a/drivers/media/pci/ivtv/ivtv-driver.c +++ b/drivers/media/pci/ivtv/ivtv-driver.c | |||
| @@ -805,11 +805,11 @@ static void ivtv_init_struct2(struct ivtv *itv) | |||
| 805 | { | 805 | { |
| 806 | int i; | 806 | int i; |
| 807 | 807 | ||
| 808 | for (i = 0; i < IVTV_CARD_MAX_VIDEO_INPUTS - 1; i++) | 808 | for (i = 0; i < IVTV_CARD_MAX_VIDEO_INPUTS; i++) |
| 809 | if (itv->card->video_inputs[i].video_type == 0) | 809 | if (itv->card->video_inputs[i].video_type == 0) |
| 810 | break; | 810 | break; |
| 811 | itv->nof_inputs = i; | 811 | itv->nof_inputs = i; |
| 812 | for (i = 0; i < IVTV_CARD_MAX_AUDIO_INPUTS - 1; i++) | 812 | for (i = 0; i < IVTV_CARD_MAX_AUDIO_INPUTS; i++) |
| 813 | if (itv->card->audio_inputs[i].audio_type == 0) | 813 | if (itv->card->audio_inputs[i].audio_type == 0) |
| 814 | break; | 814 | break; |
| 815 | itv->nof_audio_inputs = i; | 815 | itv->nof_audio_inputs = i; |
diff --git a/drivers/media/usb/airspy/airspy.c b/drivers/media/usb/airspy/airspy.c index fcbb49757614..565a59310747 100644 --- a/drivers/media/usb/airspy/airspy.c +++ b/drivers/media/usb/airspy/airspy.c | |||
| @@ -134,7 +134,7 @@ struct airspy { | |||
| 134 | int urbs_submitted; | 134 | int urbs_submitted; |
| 135 | 135 | ||
| 136 | /* USB control message buffer */ | 136 | /* USB control message buffer */ |
| 137 | #define BUF_SIZE 24 | 137 | #define BUF_SIZE 128 |
| 138 | u8 buf[BUF_SIZE]; | 138 | u8 buf[BUF_SIZE]; |
| 139 | 139 | ||
| 140 | /* Current configuration */ | 140 | /* Current configuration */ |
diff --git a/drivers/media/usb/hackrf/hackrf.c b/drivers/media/usb/hackrf/hackrf.c index e05bfec90f46..0fe5cb2c260c 100644 --- a/drivers/media/usb/hackrf/hackrf.c +++ b/drivers/media/usb/hackrf/hackrf.c | |||
| @@ -24,6 +24,15 @@ | |||
| 24 | #include <media/videobuf2-v4l2.h> | 24 | #include <media/videobuf2-v4l2.h> |
| 25 | #include <media/videobuf2-vmalloc.h> | 25 | #include <media/videobuf2-vmalloc.h> |
| 26 | 26 | ||
| 27 | /* | ||
| 28 | * Used Avago MGA-81563 RF amplifier could be destroyed pretty easily with too | ||
| 29 | * strong signal or transmitting to bad antenna. | ||
| 30 | * Set RF gain control to 'grabbed' state by default for sure. | ||
| 31 | */ | ||
| 32 | static bool hackrf_enable_rf_gain_ctrl; | ||
| 33 | module_param_named(enable_rf_gain_ctrl, hackrf_enable_rf_gain_ctrl, bool, 0644); | ||
| 34 | MODULE_PARM_DESC(enable_rf_gain_ctrl, "enable RX/TX RF amplifier control (warn: could damage amplifier)"); | ||
| 35 | |||
| 27 | /* HackRF USB API commands (from HackRF Library) */ | 36 | /* HackRF USB API commands (from HackRF Library) */ |
| 28 | enum { | 37 | enum { |
| 29 | CMD_SET_TRANSCEIVER_MODE = 0x01, | 38 | CMD_SET_TRANSCEIVER_MODE = 0x01, |
| @@ -1451,6 +1460,7 @@ static int hackrf_probe(struct usb_interface *intf, | |||
| 1451 | dev_err(dev->dev, "Could not initialize controls\n"); | 1460 | dev_err(dev->dev, "Could not initialize controls\n"); |
| 1452 | goto err_v4l2_ctrl_handler_free_rx; | 1461 | goto err_v4l2_ctrl_handler_free_rx; |
| 1453 | } | 1462 | } |
| 1463 | v4l2_ctrl_grab(dev->rx_rf_gain, !hackrf_enable_rf_gain_ctrl); | ||
| 1454 | v4l2_ctrl_handler_setup(&dev->rx_ctrl_handler); | 1464 | v4l2_ctrl_handler_setup(&dev->rx_ctrl_handler); |
| 1455 | 1465 | ||
| 1456 | /* Register controls for transmitter */ | 1466 | /* Register controls for transmitter */ |
| @@ -1471,6 +1481,7 @@ static int hackrf_probe(struct usb_interface *intf, | |||
| 1471 | dev_err(dev->dev, "Could not initialize controls\n"); | 1481 | dev_err(dev->dev, "Could not initialize controls\n"); |
| 1472 | goto err_v4l2_ctrl_handler_free_tx; | 1482 | goto err_v4l2_ctrl_handler_free_tx; |
| 1473 | } | 1483 | } |
| 1484 | v4l2_ctrl_grab(dev->tx_rf_gain, !hackrf_enable_rf_gain_ctrl); | ||
| 1474 | v4l2_ctrl_handler_setup(&dev->tx_ctrl_handler); | 1485 | v4l2_ctrl_handler_setup(&dev->tx_ctrl_handler); |
| 1475 | 1486 | ||
| 1476 | /* Register the v4l2_device structure */ | 1487 | /* Register the v4l2_device structure */ |
| @@ -1530,7 +1541,7 @@ err_v4l2_ctrl_handler_free_rx: | |||
| 1530 | err_kfree: | 1541 | err_kfree: |
| 1531 | kfree(dev); | 1542 | kfree(dev); |
| 1532 | err: | 1543 | err: |
| 1533 | dev_dbg(dev->dev, "failed=%d\n", ret); | 1544 | dev_dbg(&intf->dev, "failed=%d\n", ret); |
| 1534 | return ret; | 1545 | return ret; |
| 1535 | } | 1546 | } |
| 1536 | 1547 | ||
diff --git a/drivers/mtd/ofpart.c b/drivers/mtd/ofpart.c index 669c3452f278..9ed6038e47d2 100644 --- a/drivers/mtd/ofpart.c +++ b/drivers/mtd/ofpart.c | |||
| @@ -46,10 +46,18 @@ static int parse_ofpart_partitions(struct mtd_info *master, | |||
| 46 | 46 | ||
| 47 | ofpart_node = of_get_child_by_name(mtd_node, "partitions"); | 47 | ofpart_node = of_get_child_by_name(mtd_node, "partitions"); |
| 48 | if (!ofpart_node) { | 48 | if (!ofpart_node) { |
| 49 | pr_warn("%s: 'partitions' subnode not found on %s. Trying to parse direct subnodes as partitions.\n", | 49 | /* |
| 50 | master->name, mtd_node->full_name); | 50 | * We might get here even when ofpart isn't used at all (e.g., |
| 51 | * when using another parser), so don't be louder than | ||
| 52 | * KERN_DEBUG | ||
| 53 | */ | ||
| 54 | pr_debug("%s: 'partitions' subnode not found on %s. Trying to parse direct subnodes as partitions.\n", | ||
| 55 | master->name, mtd_node->full_name); | ||
| 51 | ofpart_node = mtd_node; | 56 | ofpart_node = mtd_node; |
| 52 | dedicated = false; | 57 | dedicated = false; |
| 58 | } else if (!of_device_is_compatible(ofpart_node, "fixed-partitions")) { | ||
| 59 | /* The 'partitions' subnode might be used by another parser */ | ||
| 60 | return 0; | ||
| 53 | } | 61 | } |
| 54 | 62 | ||
| 55 | /* First count the subnodes */ | 63 | /* First count the subnodes */ |
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index 970781a9e677..f6a7161e3b85 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c | |||
| @@ -1849,7 +1849,7 @@ static int xgbe_exit(struct xgbe_prv_data *pdata) | |||
| 1849 | usleep_range(10, 15); | 1849 | usleep_range(10, 15); |
| 1850 | 1850 | ||
| 1851 | /* Poll Until Poll Condition */ | 1851 | /* Poll Until Poll Condition */ |
| 1852 | while (count-- && XGMAC_IOREAD_BITS(pdata, DMA_MR, SWR)) | 1852 | while (--count && XGMAC_IOREAD_BITS(pdata, DMA_MR, SWR)) |
| 1853 | usleep_range(500, 600); | 1853 | usleep_range(500, 600); |
| 1854 | 1854 | ||
| 1855 | if (!count) | 1855 | if (!count) |
| @@ -1873,7 +1873,7 @@ static int xgbe_flush_tx_queues(struct xgbe_prv_data *pdata) | |||
| 1873 | /* Poll Until Poll Condition */ | 1873 | /* Poll Until Poll Condition */ |
| 1874 | for (i = 0; i < pdata->tx_q_count; i++) { | 1874 | for (i = 0; i < pdata->tx_q_count; i++) { |
| 1875 | count = 2000; | 1875 | count = 2000; |
| 1876 | while (count-- && XGMAC_MTL_IOREAD_BITS(pdata, i, | 1876 | while (--count && XGMAC_MTL_IOREAD_BITS(pdata, i, |
| 1877 | MTL_Q_TQOMR, FTQ)) | 1877 | MTL_Q_TQOMR, FTQ)) |
| 1878 | usleep_range(500, 600); | 1878 | usleep_range(500, 600); |
| 1879 | 1879 | ||
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index 9147a0107c44..d0ae1a6cc212 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c | |||
| @@ -289,6 +289,7 @@ static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring, | |||
| 289 | struct sk_buff *skb) | 289 | struct sk_buff *skb) |
| 290 | { | 290 | { |
| 291 | struct device *dev = ndev_to_dev(tx_ring->ndev); | 291 | struct device *dev = ndev_to_dev(tx_ring->ndev); |
| 292 | struct xgene_enet_pdata *pdata = netdev_priv(tx_ring->ndev); | ||
| 292 | struct xgene_enet_raw_desc *raw_desc; | 293 | struct xgene_enet_raw_desc *raw_desc; |
| 293 | __le64 *exp_desc = NULL, *exp_bufs = NULL; | 294 | __le64 *exp_desc = NULL, *exp_bufs = NULL; |
| 294 | dma_addr_t dma_addr, pbuf_addr, *frag_dma_addr; | 295 | dma_addr_t dma_addr, pbuf_addr, *frag_dma_addr; |
| @@ -419,6 +420,7 @@ out: | |||
| 419 | raw_desc->m0 = cpu_to_le64(SET_VAL(LL, ll) | SET_VAL(NV, nv) | | 420 | raw_desc->m0 = cpu_to_le64(SET_VAL(LL, ll) | SET_VAL(NV, nv) | |
| 420 | SET_VAL(USERINFO, tx_ring->tail)); | 421 | SET_VAL(USERINFO, tx_ring->tail)); |
| 421 | tx_ring->cp_ring->cp_skb[tx_ring->tail] = skb; | 422 | tx_ring->cp_ring->cp_skb[tx_ring->tail] = skb; |
| 423 | pdata->tx_level += count; | ||
| 422 | tx_ring->tail = tail; | 424 | tx_ring->tail = tail; |
| 423 | 425 | ||
| 424 | return count; | 426 | return count; |
| @@ -429,14 +431,13 @@ static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb, | |||
| 429 | { | 431 | { |
| 430 | struct xgene_enet_pdata *pdata = netdev_priv(ndev); | 432 | struct xgene_enet_pdata *pdata = netdev_priv(ndev); |
| 431 | struct xgene_enet_desc_ring *tx_ring = pdata->tx_ring; | 433 | struct xgene_enet_desc_ring *tx_ring = pdata->tx_ring; |
| 432 | struct xgene_enet_desc_ring *cp_ring = tx_ring->cp_ring; | 434 | u32 tx_level = pdata->tx_level; |
| 433 | u32 tx_level, cq_level; | ||
| 434 | int count; | 435 | int count; |
| 435 | 436 | ||
| 436 | tx_level = pdata->ring_ops->len(tx_ring); | 437 | if (tx_level < pdata->txc_level) |
| 437 | cq_level = pdata->ring_ops->len(cp_ring); | 438 | tx_level += ((typeof(pdata->tx_level))~0U); |
| 438 | if (unlikely(tx_level > pdata->tx_qcnt_hi || | 439 | |
| 439 | cq_level > pdata->cp_qcnt_hi)) { | 440 | if ((tx_level - pdata->txc_level) > pdata->tx_qcnt_hi) { |
| 440 | netif_stop_queue(ndev); | 441 | netif_stop_queue(ndev); |
| 441 | return NETDEV_TX_BUSY; | 442 | return NETDEV_TX_BUSY; |
| 442 | } | 443 | } |
| @@ -539,10 +540,13 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring, | |||
| 539 | struct xgene_enet_raw_desc *raw_desc, *exp_desc; | 540 | struct xgene_enet_raw_desc *raw_desc, *exp_desc; |
| 540 | u16 head = ring->head; | 541 | u16 head = ring->head; |
| 541 | u16 slots = ring->slots - 1; | 542 | u16 slots = ring->slots - 1; |
| 542 | int ret, count = 0, processed = 0; | 543 | int ret, desc_count, count = 0, processed = 0; |
| 544 | bool is_completion; | ||
| 543 | 545 | ||
| 544 | do { | 546 | do { |
| 545 | raw_desc = &ring->raw_desc[head]; | 547 | raw_desc = &ring->raw_desc[head]; |
| 548 | desc_count = 0; | ||
| 549 | is_completion = false; | ||
| 546 | exp_desc = NULL; | 550 | exp_desc = NULL; |
| 547 | if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc))) | 551 | if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc))) |
| 548 | break; | 552 | break; |
| @@ -559,18 +563,24 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring, | |||
| 559 | } | 563 | } |
| 560 | dma_rmb(); | 564 | dma_rmb(); |
| 561 | count++; | 565 | count++; |
| 566 | desc_count++; | ||
| 562 | } | 567 | } |
| 563 | if (is_rx_desc(raw_desc)) | 568 | if (is_rx_desc(raw_desc)) { |
| 564 | ret = xgene_enet_rx_frame(ring, raw_desc); | 569 | ret = xgene_enet_rx_frame(ring, raw_desc); |
| 565 | else | 570 | } else { |
| 566 | ret = xgene_enet_tx_completion(ring, raw_desc); | 571 | ret = xgene_enet_tx_completion(ring, raw_desc); |
| 572 | is_completion = true; | ||
| 573 | } | ||
| 567 | xgene_enet_mark_desc_slot_empty(raw_desc); | 574 | xgene_enet_mark_desc_slot_empty(raw_desc); |
| 568 | if (exp_desc) | 575 | if (exp_desc) |
| 569 | xgene_enet_mark_desc_slot_empty(exp_desc); | 576 | xgene_enet_mark_desc_slot_empty(exp_desc); |
| 570 | 577 | ||
| 571 | head = (head + 1) & slots; | 578 | head = (head + 1) & slots; |
| 572 | count++; | 579 | count++; |
| 580 | desc_count++; | ||
| 573 | processed++; | 581 | processed++; |
| 582 | if (is_completion) | ||
| 583 | pdata->txc_level += desc_count; | ||
| 574 | 584 | ||
| 575 | if (ret) | 585 | if (ret) |
| 576 | break; | 586 | break; |
| @@ -580,10 +590,8 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring, | |||
| 580 | pdata->ring_ops->wr_cmd(ring, -count); | 590 | pdata->ring_ops->wr_cmd(ring, -count); |
| 581 | ring->head = head; | 591 | ring->head = head; |
| 582 | 592 | ||
| 583 | if (netif_queue_stopped(ring->ndev)) { | 593 | if (netif_queue_stopped(ring->ndev)) |
| 584 | if (pdata->ring_ops->len(ring) < pdata->cp_qcnt_low) | 594 | netif_start_queue(ring->ndev); |
| 585 | netif_wake_queue(ring->ndev); | ||
| 586 | } | ||
| 587 | } | 595 | } |
| 588 | 596 | ||
| 589 | return processed; | 597 | return processed; |
| @@ -1033,9 +1041,7 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev) | |||
| 1033 | pdata->tx_ring->cp_ring = cp_ring; | 1041 | pdata->tx_ring->cp_ring = cp_ring; |
| 1034 | pdata->tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring); | 1042 | pdata->tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring); |
| 1035 | 1043 | ||
| 1036 | pdata->tx_qcnt_hi = pdata->tx_ring->slots / 2; | 1044 | pdata->tx_qcnt_hi = pdata->tx_ring->slots - 128; |
| 1037 | pdata->cp_qcnt_hi = pdata->rx_ring->slots / 2; | ||
| 1038 | pdata->cp_qcnt_low = pdata->cp_qcnt_hi / 2; | ||
| 1039 | 1045 | ||
| 1040 | return 0; | 1046 | return 0; |
| 1041 | 1047 | ||
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h index a6e56b88c0a0..1aa72c787f8d 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h | |||
| @@ -155,11 +155,11 @@ struct xgene_enet_pdata { | |||
| 155 | enum xgene_enet_id enet_id; | 155 | enum xgene_enet_id enet_id; |
| 156 | struct xgene_enet_desc_ring *tx_ring; | 156 | struct xgene_enet_desc_ring *tx_ring; |
| 157 | struct xgene_enet_desc_ring *rx_ring; | 157 | struct xgene_enet_desc_ring *rx_ring; |
| 158 | u16 tx_level; | ||
| 159 | u16 txc_level; | ||
| 158 | char *dev_name; | 160 | char *dev_name; |
| 159 | u32 rx_buff_cnt; | 161 | u32 rx_buff_cnt; |
| 160 | u32 tx_qcnt_hi; | 162 | u32 tx_qcnt_hi; |
| 161 | u32 cp_qcnt_hi; | ||
| 162 | u32 cp_qcnt_low; | ||
| 163 | u32 rx_irq; | 163 | u32 rx_irq; |
| 164 | u32 txc_irq; | 164 | u32 txc_irq; |
| 165 | u8 cq_cnt; | 165 | u8 cq_cnt; |
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index 2795d6db10e1..8b5988e210d5 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c | |||
| @@ -1016,13 +1016,12 @@ static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter) | |||
| 1016 | sizeof(struct atl1c_recv_ret_status) * rx_desc_count + | 1016 | sizeof(struct atl1c_recv_ret_status) * rx_desc_count + |
| 1017 | 8 * 4; | 1017 | 8 * 4; |
| 1018 | 1018 | ||
| 1019 | ring_header->desc = pci_alloc_consistent(pdev, ring_header->size, | 1019 | ring_header->desc = dma_zalloc_coherent(&pdev->dev, ring_header->size, |
| 1020 | &ring_header->dma); | 1020 | &ring_header->dma, GFP_KERNEL); |
| 1021 | if (unlikely(!ring_header->desc)) { | 1021 | if (unlikely(!ring_header->desc)) { |
| 1022 | dev_err(&pdev->dev, "pci_alloc_consistend failed\n"); | 1022 | dev_err(&pdev->dev, "could not get memory for DMA buffer\n"); |
| 1023 | goto err_nomem; | 1023 | goto err_nomem; |
| 1024 | } | 1024 | } |
| 1025 | memset(ring_header->desc, 0, ring_header->size); | ||
| 1026 | /* init TPD ring */ | 1025 | /* init TPD ring */ |
| 1027 | 1026 | ||
| 1028 | tpd_ring[0].dma = roundup(ring_header->dma, 8); | 1027 | tpd_ring[0].dma = roundup(ring_header->dma, 8); |
diff --git a/drivers/net/ethernet/aurora/Kconfig b/drivers/net/ethernet/aurora/Kconfig index a3c7106fdf85..8ba7f8ff3434 100644 --- a/drivers/net/ethernet/aurora/Kconfig +++ b/drivers/net/ethernet/aurora/Kconfig | |||
| @@ -13,6 +13,7 @@ if NET_VENDOR_AURORA | |||
| 13 | 13 | ||
| 14 | config AURORA_NB8800 | 14 | config AURORA_NB8800 |
| 15 | tristate "Aurora AU-NB8800 support" | 15 | tristate "Aurora AU-NB8800 support" |
| 16 | depends on HAS_DMA | ||
| 16 | select PHYLIB | 17 | select PHYLIB |
| 17 | help | 18 | help |
| 18 | Support for the AU-NB8800 gigabit Ethernet controller. | 19 | Support for the AU-NB8800 gigabit Ethernet controller. |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index bdf094fb6ef9..07f5f239cb65 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c | |||
| @@ -2693,17 +2693,16 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp) | |||
| 2693 | req.ver_upd = DRV_VER_UPD; | 2693 | req.ver_upd = DRV_VER_UPD; |
| 2694 | 2694 | ||
| 2695 | if (BNXT_PF(bp)) { | 2695 | if (BNXT_PF(bp)) { |
| 2696 | unsigned long vf_req_snif_bmap[4]; | 2696 | DECLARE_BITMAP(vf_req_snif_bmap, 256); |
| 2697 | u32 *data = (u32 *)vf_req_snif_bmap; | 2697 | u32 *data = (u32 *)vf_req_snif_bmap; |
| 2698 | 2698 | ||
| 2699 | memset(vf_req_snif_bmap, 0, 32); | 2699 | memset(vf_req_snif_bmap, 0, sizeof(vf_req_snif_bmap)); |
| 2700 | for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) | 2700 | for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) |
| 2701 | __set_bit(bnxt_vf_req_snif[i], vf_req_snif_bmap); | 2701 | __set_bit(bnxt_vf_req_snif[i], vf_req_snif_bmap); |
| 2702 | 2702 | ||
| 2703 | for (i = 0; i < 8; i++) { | 2703 | for (i = 0; i < 8; i++) |
| 2704 | req.vf_req_fwd[i] = cpu_to_le32(*data); | 2704 | req.vf_req_fwd[i] = cpu_to_le32(data[i]); |
| 2705 | data++; | 2705 | |
| 2706 | } | ||
| 2707 | req.enables |= | 2706 | req.enables |= |
| 2708 | cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD); | 2707 | cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD); |
| 2709 | } | 2708 | } |
| @@ -4603,7 +4602,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) | |||
| 4603 | bp->nge_port_cnt = 1; | 4602 | bp->nge_port_cnt = 1; |
| 4604 | } | 4603 | } |
| 4605 | 4604 | ||
| 4606 | bp->state = BNXT_STATE_OPEN; | 4605 | set_bit(BNXT_STATE_OPEN, &bp->state); |
| 4607 | bnxt_enable_int(bp); | 4606 | bnxt_enable_int(bp); |
| 4608 | /* Enable TX queues */ | 4607 | /* Enable TX queues */ |
| 4609 | bnxt_tx_enable(bp); | 4608 | bnxt_tx_enable(bp); |
| @@ -4679,8 +4678,10 @@ int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) | |||
| 4679 | /* Change device state to avoid TX queue wake up's */ | 4678 | /* Change device state to avoid TX queue wake up's */ |
| 4680 | bnxt_tx_disable(bp); | 4679 | bnxt_tx_disable(bp); |
| 4681 | 4680 | ||
| 4682 | bp->state = BNXT_STATE_CLOSED; | 4681 | clear_bit(BNXT_STATE_OPEN, &bp->state); |
| 4683 | cancel_work_sync(&bp->sp_task); | 4682 | smp_mb__after_atomic(); |
| 4683 | while (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state)) | ||
| 4684 | msleep(20); | ||
| 4684 | 4685 | ||
| 4685 | /* Flush rings before disabling interrupts */ | 4686 | /* Flush rings before disabling interrupts */ |
| 4686 | bnxt_shutdown_nic(bp, irq_re_init); | 4687 | bnxt_shutdown_nic(bp, irq_re_init); |
| @@ -5030,8 +5031,10 @@ static void bnxt_dbg_dump_states(struct bnxt *bp) | |||
| 5030 | static void bnxt_reset_task(struct bnxt *bp) | 5031 | static void bnxt_reset_task(struct bnxt *bp) |
| 5031 | { | 5032 | { |
| 5032 | bnxt_dbg_dump_states(bp); | 5033 | bnxt_dbg_dump_states(bp); |
| 5033 | if (netif_running(bp->dev)) | 5034 | if (netif_running(bp->dev)) { |
| 5034 | bnxt_tx_disable(bp); /* prevent tx timout again */ | 5035 | bnxt_close_nic(bp, false, false); |
| 5036 | bnxt_open_nic(bp, false, false); | ||
| 5037 | } | ||
| 5035 | } | 5038 | } |
| 5036 | 5039 | ||
| 5037 | static void bnxt_tx_timeout(struct net_device *dev) | 5040 | static void bnxt_tx_timeout(struct net_device *dev) |
| @@ -5081,8 +5084,12 @@ static void bnxt_sp_task(struct work_struct *work) | |||
| 5081 | struct bnxt *bp = container_of(work, struct bnxt, sp_task); | 5084 | struct bnxt *bp = container_of(work, struct bnxt, sp_task); |
| 5082 | int rc; | 5085 | int rc; |
| 5083 | 5086 | ||
| 5084 | if (bp->state != BNXT_STATE_OPEN) | 5087 | set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); |
| 5088 | smp_mb__after_atomic(); | ||
| 5089 | if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { | ||
| 5090 | clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); | ||
| 5085 | return; | 5091 | return; |
| 5092 | } | ||
| 5086 | 5093 | ||
| 5087 | if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event)) | 5094 | if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event)) |
| 5088 | bnxt_cfg_rx_mode(bp); | 5095 | bnxt_cfg_rx_mode(bp); |
| @@ -5106,8 +5113,19 @@ static void bnxt_sp_task(struct work_struct *work) | |||
| 5106 | bnxt_hwrm_tunnel_dst_port_free( | 5113 | bnxt_hwrm_tunnel_dst_port_free( |
| 5107 | bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); | 5114 | bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); |
| 5108 | } | 5115 | } |
| 5109 | if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) | 5116 | if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) { |
| 5117 | /* bnxt_reset_task() calls bnxt_close_nic() which waits | ||
| 5118 | * for BNXT_STATE_IN_SP_TASK to clear. | ||
| 5119 | */ | ||
| 5120 | clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); | ||
| 5121 | rtnl_lock(); | ||
| 5110 | bnxt_reset_task(bp); | 5122 | bnxt_reset_task(bp); |
| 5123 | set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); | ||
| 5124 | rtnl_unlock(); | ||
| 5125 | } | ||
| 5126 | |||
| 5127 | smp_mb__before_atomic(); | ||
| 5128 | clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); | ||
| 5111 | } | 5129 | } |
| 5112 | 5130 | ||
| 5113 | static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) | 5131 | static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) |
| @@ -5186,7 +5204,7 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) | |||
| 5186 | bp->timer.function = bnxt_timer; | 5204 | bp->timer.function = bnxt_timer; |
| 5187 | bp->current_interval = BNXT_TIMER_INTERVAL; | 5205 | bp->current_interval = BNXT_TIMER_INTERVAL; |
| 5188 | 5206 | ||
| 5189 | bp->state = BNXT_STATE_CLOSED; | 5207 | clear_bit(BNXT_STATE_OPEN, &bp->state); |
| 5190 | 5208 | ||
| 5191 | return 0; | 5209 | return 0; |
| 5192 | 5210 | ||
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 674bc5159b91..f199f4cc8ffe 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h | |||
| @@ -925,9 +925,9 @@ struct bnxt { | |||
| 925 | 925 | ||
| 926 | struct timer_list timer; | 926 | struct timer_list timer; |
| 927 | 927 | ||
| 928 | int state; | 928 | unsigned long state; |
| 929 | #define BNXT_STATE_CLOSED 0 | 929 | #define BNXT_STATE_OPEN 0 |
| 930 | #define BNXT_STATE_OPEN 1 | 930 | #define BNXT_STATE_IN_SP_TASK 1 |
| 931 | 931 | ||
| 932 | struct bnxt_irq *irq_tbl; | 932 | struct bnxt_irq *irq_tbl; |
| 933 | u8 mac_addr[ETH_ALEN]; | 933 | u8 mac_addr[ETH_ALEN]; |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index 7a9af2887d8e..ea044bbcd384 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c | |||
| @@ -21,7 +21,7 @@ | |||
| 21 | #ifdef CONFIG_BNXT_SRIOV | 21 | #ifdef CONFIG_BNXT_SRIOV |
| 22 | static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id) | 22 | static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id) |
| 23 | { | 23 | { |
| 24 | if (bp->state != BNXT_STATE_OPEN) { | 24 | if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { |
| 25 | netdev_err(bp->dev, "vf ndo called though PF is down\n"); | 25 | netdev_err(bp->dev, "vf ndo called though PF is down\n"); |
| 26 | return -EINVAL; | 26 | return -EINVAL; |
| 27 | } | 27 | } |
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c index 4b7fd63ae57c..5f24d11cb16a 100644 --- a/drivers/net/ethernet/cavium/thunder/nic_main.c +++ b/drivers/net/ethernet/cavium/thunder/nic_main.c | |||
| @@ -37,7 +37,6 @@ struct nicpf { | |||
| 37 | #define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) ((map >> 4) & 0xF) | 37 | #define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) ((map >> 4) & 0xF) |
| 38 | #define NIC_GET_LMAC_FROM_VF_LMAC_MAP(map) (map & 0xF) | 38 | #define NIC_GET_LMAC_FROM_VF_LMAC_MAP(map) (map & 0xF) |
| 39 | u8 vf_lmac_map[MAX_LMAC]; | 39 | u8 vf_lmac_map[MAX_LMAC]; |
| 40 | u8 lmac_cnt; | ||
| 41 | struct delayed_work dwork; | 40 | struct delayed_work dwork; |
| 42 | struct workqueue_struct *check_link; | 41 | struct workqueue_struct *check_link; |
| 43 | u8 link[MAX_LMAC]; | 42 | u8 link[MAX_LMAC]; |
| @@ -280,7 +279,6 @@ static void nic_set_lmac_vf_mapping(struct nicpf *nic) | |||
| 280 | u64 lmac_credit; | 279 | u64 lmac_credit; |
| 281 | 280 | ||
| 282 | nic->num_vf_en = 0; | 281 | nic->num_vf_en = 0; |
| 283 | nic->lmac_cnt = 0; | ||
| 284 | 282 | ||
| 285 | for (bgx = 0; bgx < NIC_MAX_BGX; bgx++) { | 283 | for (bgx = 0; bgx < NIC_MAX_BGX; bgx++) { |
| 286 | if (!(bgx_map & (1 << bgx))) | 284 | if (!(bgx_map & (1 << bgx))) |
| @@ -290,7 +288,6 @@ static void nic_set_lmac_vf_mapping(struct nicpf *nic) | |||
| 290 | nic->vf_lmac_map[next_bgx_lmac++] = | 288 | nic->vf_lmac_map[next_bgx_lmac++] = |
| 291 | NIC_SET_VF_LMAC_MAP(bgx, lmac); | 289 | NIC_SET_VF_LMAC_MAP(bgx, lmac); |
| 292 | nic->num_vf_en += lmac_cnt; | 290 | nic->num_vf_en += lmac_cnt; |
| 293 | nic->lmac_cnt += lmac_cnt; | ||
| 294 | 291 | ||
| 295 | /* Program LMAC credits */ | 292 | /* Program LMAC credits */ |
| 296 | lmac_credit = (1ull << 1); /* channel credit enable */ | 293 | lmac_credit = (1ull << 1); /* channel credit enable */ |
| @@ -618,6 +615,21 @@ static int nic_config_loopback(struct nicpf *nic, struct set_loopback *lbk) | |||
| 618 | return 0; | 615 | return 0; |
| 619 | } | 616 | } |
| 620 | 617 | ||
| 618 | static void nic_enable_vf(struct nicpf *nic, int vf, bool enable) | ||
| 619 | { | ||
| 620 | int bgx, lmac; | ||
| 621 | |||
| 622 | nic->vf_enabled[vf] = enable; | ||
| 623 | |||
| 624 | if (vf >= nic->num_vf_en) | ||
| 625 | return; | ||
| 626 | |||
| 627 | bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); | ||
| 628 | lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); | ||
| 629 | |||
| 630 | bgx_lmac_rx_tx_enable(nic->node, bgx, lmac, enable); | ||
| 631 | } | ||
| 632 | |||
| 621 | /* Interrupt handler to handle mailbox messages from VFs */ | 633 | /* Interrupt handler to handle mailbox messages from VFs */ |
| 622 | static void nic_handle_mbx_intr(struct nicpf *nic, int vf) | 634 | static void nic_handle_mbx_intr(struct nicpf *nic, int vf) |
| 623 | { | 635 | { |
| @@ -717,29 +729,14 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) | |||
| 717 | break; | 729 | break; |
| 718 | case NIC_MBOX_MSG_CFG_DONE: | 730 | case NIC_MBOX_MSG_CFG_DONE: |
| 719 | /* Last message of VF config msg sequence */ | 731 | /* Last message of VF config msg sequence */ |
| 720 | nic->vf_enabled[vf] = true; | 732 | nic_enable_vf(nic, vf, true); |
| 721 | if (vf >= nic->lmac_cnt) | ||
| 722 | goto unlock; | ||
| 723 | |||
| 724 | bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); | ||
| 725 | lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); | ||
| 726 | |||
| 727 | bgx_lmac_rx_tx_enable(nic->node, bgx, lmac, true); | ||
| 728 | goto unlock; | 733 | goto unlock; |
| 729 | case NIC_MBOX_MSG_SHUTDOWN: | 734 | case NIC_MBOX_MSG_SHUTDOWN: |
| 730 | /* First msg in VF teardown sequence */ | 735 | /* First msg in VF teardown sequence */ |
| 731 | nic->vf_enabled[vf] = false; | ||
| 732 | if (vf >= nic->num_vf_en) | 736 | if (vf >= nic->num_vf_en) |
| 733 | nic->sqs_used[vf - nic->num_vf_en] = false; | 737 | nic->sqs_used[vf - nic->num_vf_en] = false; |
| 734 | nic->pqs_vf[vf] = 0; | 738 | nic->pqs_vf[vf] = 0; |
| 735 | 739 | nic_enable_vf(nic, vf, false); | |
| 736 | if (vf >= nic->lmac_cnt) | ||
| 737 | break; | ||
| 738 | |||
| 739 | bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); | ||
| 740 | lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); | ||
| 741 | |||
| 742 | bgx_lmac_rx_tx_enable(nic->node, bgx, lmac, false); | ||
| 743 | break; | 740 | break; |
| 744 | case NIC_MBOX_MSG_ALLOC_SQS: | 741 | case NIC_MBOX_MSG_ALLOC_SQS: |
| 745 | nic_alloc_sqs(nic, &mbx.sqs_alloc); | 742 | nic_alloc_sqs(nic, &mbx.sqs_alloc); |
| @@ -958,7 +955,7 @@ static void nic_poll_for_link(struct work_struct *work) | |||
| 958 | 955 | ||
| 959 | mbx.link_status.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE; | 956 | mbx.link_status.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE; |
| 960 | 957 | ||
| 961 | for (vf = 0; vf < nic->lmac_cnt; vf++) { | 958 | for (vf = 0; vf < nic->num_vf_en; vf++) { |
| 962 | /* Poll only if VF is UP */ | 959 | /* Poll only if VF is UP */ |
| 963 | if (!nic->vf_enabled[vf]) | 960 | if (!nic->vf_enabled[vf]) |
| 964 | continue; | 961 | continue; |
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c index 63c2bcf8031a..b1026689b78f 100644 --- a/drivers/net/ethernet/ezchip/nps_enet.c +++ b/drivers/net/ethernet/ezchip/nps_enet.c | |||
| @@ -48,21 +48,15 @@ static void nps_enet_read_rx_fifo(struct net_device *ndev, | |||
| 48 | *reg = nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF); | 48 | *reg = nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF); |
| 49 | else { /* !dst_is_aligned */ | 49 | else { /* !dst_is_aligned */ |
| 50 | for (i = 0; i < len; i++, reg++) { | 50 | for (i = 0; i < len; i++, reg++) { |
| 51 | u32 buf = | 51 | u32 buf = nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF); |
| 52 | nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF); | 52 | put_unaligned(buf, reg); |
| 53 | |||
| 54 | /* to accommodate word-unaligned address of "reg" | ||
| 55 | * we have to do memcpy_toio() instead of simple "=". | ||
| 56 | */ | ||
| 57 | memcpy_toio((void __iomem *)reg, &buf, sizeof(buf)); | ||
| 58 | } | 53 | } |
| 59 | } | 54 | } |
| 60 | 55 | ||
| 61 | /* copy last bytes (if any) */ | 56 | /* copy last bytes (if any) */ |
| 62 | if (last) { | 57 | if (last) { |
| 63 | u32 buf = nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF); | 58 | u32 buf = nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF); |
| 64 | 59 | memcpy((u8*)reg, &buf, last); | |
| 65 | memcpy_toio((void __iomem *)reg, &buf, last); | ||
| 66 | } | 60 | } |
| 67 | } | 61 | } |
| 68 | 62 | ||
| @@ -367,7 +361,7 @@ static void nps_enet_send_frame(struct net_device *ndev, | |||
| 367 | struct nps_enet_tx_ctl tx_ctrl; | 361 | struct nps_enet_tx_ctl tx_ctrl; |
| 368 | short length = skb->len; | 362 | short length = skb->len; |
| 369 | u32 i, len = DIV_ROUND_UP(length, sizeof(u32)); | 363 | u32 i, len = DIV_ROUND_UP(length, sizeof(u32)); |
| 370 | u32 *src = (u32 *)virt_to_phys(skb->data); | 364 | u32 *src = (void *)skb->data; |
| 371 | bool src_is_aligned = IS_ALIGNED((unsigned long)src, sizeof(u32)); | 365 | bool src_is_aligned = IS_ALIGNED((unsigned long)src, sizeof(u32)); |
| 372 | 366 | ||
| 373 | tx_ctrl.value = 0; | 367 | tx_ctrl.value = 0; |
| @@ -375,17 +369,11 @@ static void nps_enet_send_frame(struct net_device *ndev, | |||
| 375 | if (src_is_aligned) | 369 | if (src_is_aligned) |
| 376 | for (i = 0; i < len; i++, src++) | 370 | for (i = 0; i < len; i++, src++) |
| 377 | nps_enet_reg_set(priv, NPS_ENET_REG_TX_BUF, *src); | 371 | nps_enet_reg_set(priv, NPS_ENET_REG_TX_BUF, *src); |
| 378 | else { /* !src_is_aligned */ | 372 | else /* !src_is_aligned */ |
| 379 | for (i = 0; i < len; i++, src++) { | 373 | for (i = 0; i < len; i++, src++) |
| 380 | u32 buf; | 374 | nps_enet_reg_set(priv, NPS_ENET_REG_TX_BUF, |
| 381 | 375 | get_unaligned(src)); | |
| 382 | /* to accommodate word-unaligned address of "src" | 376 | |
| 383 | * we have to do memcpy_fromio() instead of simple "=" | ||
| 384 | */ | ||
| 385 | memcpy_fromio(&buf, (void __iomem *)src, sizeof(buf)); | ||
| 386 | nps_enet_reg_set(priv, NPS_ENET_REG_TX_BUF, buf); | ||
| 387 | } | ||
| 388 | } | ||
| 389 | /* Write the length of the Frame */ | 377 | /* Write the length of the Frame */ |
| 390 | tx_ctrl.nt = length; | 378 | tx_ctrl.nt = length; |
| 391 | 379 | ||
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c index 08f5b911d96b..52e0091b4fb2 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c +++ b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c | |||
| @@ -552,7 +552,7 @@ static void tx_restart(struct net_device *dev) | |||
| 552 | cbd_t __iomem *prev_bd; | 552 | cbd_t __iomem *prev_bd; |
| 553 | cbd_t __iomem *last_tx_bd; | 553 | cbd_t __iomem *last_tx_bd; |
| 554 | 554 | ||
| 555 | last_tx_bd = fep->tx_bd_base + (fpi->tx_ring * sizeof(cbd_t)); | 555 | last_tx_bd = fep->tx_bd_base + ((fpi->tx_ring - 1) * sizeof(cbd_t)); |
| 556 | 556 | ||
| 557 | /* get the current bd held in TBPTR and scan back from this point */ | 557 | /* get the current bd held in TBPTR and scan back from this point */ |
| 558 | recheck_bd = curr_tbptr = (cbd_t __iomem *) | 558 | recheck_bd = curr_tbptr = (cbd_t __iomem *) |
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c index 55c36230e176..40071dad1c57 100644 --- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c +++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c | |||
| @@ -464,7 +464,7 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev) | |||
| 464 | * address). Print error message but continue anyway. | 464 | * address). Print error message but continue anyway. |
| 465 | */ | 465 | */ |
| 466 | if ((void *)tbipa > priv->map + resource_size(&res) - 4) | 466 | if ((void *)tbipa > priv->map + resource_size(&res) - 4) |
| 467 | dev_err(&pdev->dev, "invalid register map (should be at least 0x%04x to contain TBI address)\n", | 467 | dev_err(&pdev->dev, "invalid register map (should be at least 0x%04zx to contain TBI address)\n", |
| 468 | ((void *)tbipa - priv->map) + 4); | 468 | ((void *)tbipa - priv->map) + 4); |
| 469 | 469 | ||
| 470 | iowrite32be(be32_to_cpup(prop), tbipa); | 470 | iowrite32be(be32_to_cpup(prop), tbipa); |
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 7cf898455e60..3e233d924cce 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c | |||
| @@ -894,7 +894,8 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) | |||
| 894 | FSL_GIANFAR_DEV_HAS_VLAN | | 894 | FSL_GIANFAR_DEV_HAS_VLAN | |
| 895 | FSL_GIANFAR_DEV_HAS_MAGIC_PACKET | | 895 | FSL_GIANFAR_DEV_HAS_MAGIC_PACKET | |
| 896 | FSL_GIANFAR_DEV_HAS_EXTENDED_HASH | | 896 | FSL_GIANFAR_DEV_HAS_EXTENDED_HASH | |
| 897 | FSL_GIANFAR_DEV_HAS_TIMER; | 897 | FSL_GIANFAR_DEV_HAS_TIMER | |
| 898 | FSL_GIANFAR_DEV_HAS_RX_FILER; | ||
| 898 | 899 | ||
| 899 | err = of_property_read_string(np, "phy-connection-type", &ctype); | 900 | err = of_property_read_string(np, "phy-connection-type", &ctype); |
| 900 | 901 | ||
| @@ -1396,8 +1397,9 @@ static int gfar_probe(struct platform_device *ofdev) | |||
| 1396 | priv->rx_queue[i]->rxic = DEFAULT_RXIC; | 1397 | priv->rx_queue[i]->rxic = DEFAULT_RXIC; |
| 1397 | } | 1398 | } |
| 1398 | 1399 | ||
| 1399 | /* always enable rx filer */ | 1400 | /* Always enable rx filer if available */ |
| 1400 | priv->rx_filer_enable = 1; | 1401 | priv->rx_filer_enable = |
| 1402 | (priv->device_flags & FSL_GIANFAR_DEV_HAS_RX_FILER) ? 1 : 0; | ||
| 1401 | /* Enable most messages by default */ | 1403 | /* Enable most messages by default */ |
| 1402 | priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; | 1404 | priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; |
| 1403 | /* use pritority h/w tx queue scheduling for single queue devices */ | 1405 | /* use pritority h/w tx queue scheduling for single queue devices */ |
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h index f266b20f9ef5..cb77667971a7 100644 --- a/drivers/net/ethernet/freescale/gianfar.h +++ b/drivers/net/ethernet/freescale/gianfar.h | |||
| @@ -923,6 +923,7 @@ struct gfar { | |||
| 923 | #define FSL_GIANFAR_DEV_HAS_BUF_STASHING 0x00000400 | 923 | #define FSL_GIANFAR_DEV_HAS_BUF_STASHING 0x00000400 |
| 924 | #define FSL_GIANFAR_DEV_HAS_TIMER 0x00000800 | 924 | #define FSL_GIANFAR_DEV_HAS_TIMER 0x00000800 |
| 925 | #define FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER 0x00001000 | 925 | #define FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER 0x00001000 |
| 926 | #define FSL_GIANFAR_DEV_HAS_RX_FILER 0x00002000 | ||
| 926 | 927 | ||
| 927 | #if (MAXGROUPS == 2) | 928 | #if (MAXGROUPS == 2) |
| 928 | #define DEFAULT_MAPPING 0xAA | 929 | #define DEFAULT_MAPPING 0xAA |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index 2a98eba660c0..b674414a4d72 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c | |||
| @@ -1259,12 +1259,8 @@ int hns_dsaf_set_mac_uc_entry( | |||
| 1259 | if (MAC_IS_ALL_ZEROS(mac_entry->addr) || | 1259 | if (MAC_IS_ALL_ZEROS(mac_entry->addr) || |
| 1260 | MAC_IS_BROADCAST(mac_entry->addr) || | 1260 | MAC_IS_BROADCAST(mac_entry->addr) || |
| 1261 | MAC_IS_MULTICAST(mac_entry->addr)) { | 1261 | MAC_IS_MULTICAST(mac_entry->addr)) { |
| 1262 | dev_err(dsaf_dev->dev, | 1262 | dev_err(dsaf_dev->dev, "set_uc %s Mac %pM err!\n", |
| 1263 | "set_uc %s Mac %02x:%02x:%02x:%02x:%02x:%02x err!\n", | 1263 | dsaf_dev->ae_dev.name, mac_entry->addr); |
| 1264 | dsaf_dev->ae_dev.name, mac_entry->addr[0], | ||
| 1265 | mac_entry->addr[1], mac_entry->addr[2], | ||
| 1266 | mac_entry->addr[3], mac_entry->addr[4], | ||
| 1267 | mac_entry->addr[5]); | ||
| 1268 | return -EINVAL; | 1264 | return -EINVAL; |
| 1269 | } | 1265 | } |
| 1270 | 1266 | ||
| @@ -1331,12 +1327,8 @@ int hns_dsaf_set_mac_mc_entry( | |||
| 1331 | 1327 | ||
| 1332 | /* mac addr check */ | 1328 | /* mac addr check */ |
| 1333 | if (MAC_IS_ALL_ZEROS(mac_entry->addr)) { | 1329 | if (MAC_IS_ALL_ZEROS(mac_entry->addr)) { |
| 1334 | dev_err(dsaf_dev->dev, | 1330 | dev_err(dsaf_dev->dev, "set uc %s Mac %pM err!\n", |
| 1335 | "set uc %s Mac %02x:%02x:%02x:%02x:%02x:%02x err!\n", | 1331 | dsaf_dev->ae_dev.name, mac_entry->addr); |
| 1336 | dsaf_dev->ae_dev.name, mac_entry->addr[0], | ||
| 1337 | mac_entry->addr[1], mac_entry->addr[2], | ||
| 1338 | mac_entry->addr[3], | ||
| 1339 | mac_entry->addr[4], mac_entry->addr[5]); | ||
| 1340 | return -EINVAL; | 1332 | return -EINVAL; |
| 1341 | } | 1333 | } |
| 1342 | 1334 | ||
| @@ -1410,11 +1402,8 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev, | |||
| 1410 | 1402 | ||
| 1411 | /*chechk mac addr */ | 1403 | /*chechk mac addr */ |
| 1412 | if (MAC_IS_ALL_ZEROS(mac_entry->addr)) { | 1404 | if (MAC_IS_ALL_ZEROS(mac_entry->addr)) { |
| 1413 | dev_err(dsaf_dev->dev, | 1405 | dev_err(dsaf_dev->dev, "set_entry failed,addr %pM!\n", |
| 1414 | "set_entry failed,addr %02x:%02x:%02x:%02x:%02x:%02x!\n", | 1406 | mac_entry->addr); |
| 1415 | mac_entry->addr[0], mac_entry->addr[1], | ||
| 1416 | mac_entry->addr[2], mac_entry->addr[3], | ||
| 1417 | mac_entry->addr[4], mac_entry->addr[5]); | ||
| 1418 | return -EINVAL; | 1407 | return -EINVAL; |
| 1419 | } | 1408 | } |
| 1420 | 1409 | ||
| @@ -1497,9 +1486,8 @@ int hns_dsaf_del_mac_entry(struct dsaf_device *dsaf_dev, u16 vlan_id, | |||
| 1497 | 1486 | ||
| 1498 | /*check mac addr */ | 1487 | /*check mac addr */ |
| 1499 | if (MAC_IS_ALL_ZEROS(addr) || MAC_IS_BROADCAST(addr)) { | 1488 | if (MAC_IS_ALL_ZEROS(addr) || MAC_IS_BROADCAST(addr)) { |
| 1500 | dev_err(dsaf_dev->dev, | 1489 | dev_err(dsaf_dev->dev, "del_entry failed,addr %pM!\n", |
| 1501 | "del_entry failed,addr %02x:%02x:%02x:%02x:%02x:%02x!\n", | 1490 | addr); |
| 1502 | addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); | ||
| 1503 | return -EINVAL; | 1491 | return -EINVAL; |
| 1504 | } | 1492 | } |
| 1505 | 1493 | ||
| @@ -1563,11 +1551,8 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev, | |||
| 1563 | 1551 | ||
| 1564 | /*check mac addr */ | 1552 | /*check mac addr */ |
| 1565 | if (MAC_IS_ALL_ZEROS(mac_entry->addr)) { | 1553 | if (MAC_IS_ALL_ZEROS(mac_entry->addr)) { |
| 1566 | dev_err(dsaf_dev->dev, | 1554 | dev_err(dsaf_dev->dev, "del_port failed, addr %pM!\n", |
| 1567 | "del_port failed, addr %02x:%02x:%02x:%02x:%02x:%02x!\n", | 1555 | mac_entry->addr); |
| 1568 | mac_entry->addr[0], mac_entry->addr[1], | ||
| 1569 | mac_entry->addr[2], mac_entry->addr[3], | ||
| 1570 | mac_entry->addr[4], mac_entry->addr[5]); | ||
| 1571 | return -EINVAL; | 1556 | return -EINVAL; |
| 1572 | } | 1557 | } |
| 1573 | 1558 | ||
| @@ -1644,11 +1629,8 @@ int hns_dsaf_get_mac_uc_entry(struct dsaf_device *dsaf_dev, | |||
| 1644 | /* check macaddr */ | 1629 | /* check macaddr */ |
| 1645 | if (MAC_IS_ALL_ZEROS(mac_entry->addr) || | 1630 | if (MAC_IS_ALL_ZEROS(mac_entry->addr) || |
| 1646 | MAC_IS_BROADCAST(mac_entry->addr)) { | 1631 | MAC_IS_BROADCAST(mac_entry->addr)) { |
| 1647 | dev_err(dsaf_dev->dev, | 1632 | dev_err(dsaf_dev->dev, "get_entry failed,addr %pM\n", |
| 1648 | "get_entry failed,addr %02x:%02x:%02x:%02x:%02x:%02x\n", | 1633 | mac_entry->addr); |
| 1649 | mac_entry->addr[0], mac_entry->addr[1], | ||
| 1650 | mac_entry->addr[2], mac_entry->addr[3], | ||
| 1651 | mac_entry->addr[4], mac_entry->addr[5]); | ||
| 1652 | return -EINVAL; | 1634 | return -EINVAL; |
| 1653 | } | 1635 | } |
| 1654 | 1636 | ||
| @@ -1695,11 +1677,8 @@ int hns_dsaf_get_mac_mc_entry(struct dsaf_device *dsaf_dev, | |||
| 1695 | /*check mac addr */ | 1677 | /*check mac addr */ |
| 1696 | if (MAC_IS_ALL_ZEROS(mac_entry->addr) || | 1678 | if (MAC_IS_ALL_ZEROS(mac_entry->addr) || |
| 1697 | MAC_IS_BROADCAST(mac_entry->addr)) { | 1679 | MAC_IS_BROADCAST(mac_entry->addr)) { |
| 1698 | dev_err(dsaf_dev->dev, | 1680 | dev_err(dsaf_dev->dev, "get_entry failed,addr %pM\n", |
| 1699 | "get_entry failed,addr %02x:%02x:%02x:%02x:%02x:%02x\n", | 1681 | mac_entry->addr); |
| 1700 | mac_entry->addr[0], mac_entry->addr[1], | ||
| 1701 | mac_entry->addr[2], mac_entry->addr[3], | ||
| 1702 | mac_entry->addr[4], mac_entry->addr[5]); | ||
| 1703 | return -EINVAL; | 1682 | return -EINVAL; |
| 1704 | } | 1683 | } |
| 1705 | 1684 | ||
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h index b475e1bf2e6f..bdbd80423b17 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h | |||
| @@ -898,7 +898,7 @@ | |||
| 898 | #define XGMAC_PAUSE_CTL_RSP_MODE_B 2 | 898 | #define XGMAC_PAUSE_CTL_RSP_MODE_B 2 |
| 899 | #define XGMAC_PAUSE_CTL_TX_XOFF_B 3 | 899 | #define XGMAC_PAUSE_CTL_TX_XOFF_B 3 |
| 900 | 900 | ||
| 901 | static inline void dsaf_write_reg(void *base, u32 reg, u32 value) | 901 | static inline void dsaf_write_reg(void __iomem *base, u32 reg, u32 value) |
| 902 | { | 902 | { |
| 903 | u8 __iomem *reg_addr = ACCESS_ONCE(base); | 903 | u8 __iomem *reg_addr = ACCESS_ONCE(base); |
| 904 | 904 | ||
| @@ -908,7 +908,7 @@ static inline void dsaf_write_reg(void *base, u32 reg, u32 value) | |||
| 908 | #define dsaf_write_dev(a, reg, value) \ | 908 | #define dsaf_write_dev(a, reg, value) \ |
| 909 | dsaf_write_reg((a)->io_base, (reg), (value)) | 909 | dsaf_write_reg((a)->io_base, (reg), (value)) |
| 910 | 910 | ||
| 911 | static inline u32 dsaf_read_reg(u8 *base, u32 reg) | 911 | static inline u32 dsaf_read_reg(u8 __iomem *base, u32 reg) |
| 912 | { | 912 | { |
| 913 | u8 __iomem *reg_addr = ACCESS_ONCE(base); | 913 | u8 __iomem *reg_addr = ACCESS_ONCE(base); |
| 914 | 914 | ||
| @@ -927,8 +927,8 @@ static inline u32 dsaf_read_reg(u8 *base, u32 reg) | |||
| 927 | #define dsaf_set_bit(origin, shift, val) \ | 927 | #define dsaf_set_bit(origin, shift, val) \ |
| 928 | dsaf_set_field((origin), (1ull << (shift)), (shift), (val)) | 928 | dsaf_set_field((origin), (1ull << (shift)), (shift), (val)) |
| 929 | 929 | ||
| 930 | static inline void dsaf_set_reg_field(void *base, u32 reg, u32 mask, u32 shift, | 930 | static inline void dsaf_set_reg_field(void __iomem *base, u32 reg, u32 mask, |
| 931 | u32 val) | 931 | u32 shift, u32 val) |
| 932 | { | 932 | { |
| 933 | u32 origin = dsaf_read_reg(base, reg); | 933 | u32 origin = dsaf_read_reg(base, reg); |
| 934 | 934 | ||
| @@ -947,7 +947,8 @@ static inline void dsaf_set_reg_field(void *base, u32 reg, u32 mask, u32 shift, | |||
| 947 | #define dsaf_get_bit(origin, shift) \ | 947 | #define dsaf_get_bit(origin, shift) \ |
| 948 | dsaf_get_field((origin), (1ull << (shift)), (shift)) | 948 | dsaf_get_field((origin), (1ull << (shift)), (shift)) |
| 949 | 949 | ||
| 950 | static inline u32 dsaf_get_reg_field(void *base, u32 reg, u32 mask, u32 shift) | 950 | static inline u32 dsaf_get_reg_field(void __iomem *base, u32 reg, u32 mask, |
| 951 | u32 shift) | ||
| 951 | { | 952 | { |
| 952 | u32 origin; | 953 | u32 origin; |
| 953 | 954 | ||
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c index 0ff8f01e57ee..1fd5ea82a9bc 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c | |||
| @@ -567,10 +567,6 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw) | |||
| 567 | goto init_adminq_exit; | 567 | goto init_adminq_exit; |
| 568 | } | 568 | } |
| 569 | 569 | ||
| 570 | /* initialize locks */ | ||
| 571 | mutex_init(&hw->aq.asq_mutex); | ||
| 572 | mutex_init(&hw->aq.arq_mutex); | ||
| 573 | |||
| 574 | /* Set up register offsets */ | 570 | /* Set up register offsets */ |
| 575 | i40e_adminq_init_regs(hw); | 571 | i40e_adminq_init_regs(hw); |
| 576 | 572 | ||
| @@ -664,8 +660,6 @@ i40e_status i40e_shutdown_adminq(struct i40e_hw *hw) | |||
| 664 | i40e_shutdown_asq(hw); | 660 | i40e_shutdown_asq(hw); |
| 665 | i40e_shutdown_arq(hw); | 661 | i40e_shutdown_arq(hw); |
| 666 | 662 | ||
| 667 | /* destroy the locks */ | ||
| 668 | |||
| 669 | if (hw->nvm_buff.va) | 663 | if (hw->nvm_buff.va) |
| 670 | i40e_free_virt_mem(hw, &hw->nvm_buff); | 664 | i40e_free_virt_mem(hw, &hw->nvm_buff); |
| 671 | 665 | ||
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index b825f978d441..4a9873ec28c7 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c | |||
| @@ -10295,6 +10295,12 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 10295 | /* set up a default setting for link flow control */ | 10295 | /* set up a default setting for link flow control */ |
| 10296 | pf->hw.fc.requested_mode = I40E_FC_NONE; | 10296 | pf->hw.fc.requested_mode = I40E_FC_NONE; |
| 10297 | 10297 | ||
| 10298 | /* set up the locks for the AQ, do this only once in probe | ||
| 10299 | * and destroy them only once in remove | ||
| 10300 | */ | ||
| 10301 | mutex_init(&hw->aq.asq_mutex); | ||
| 10302 | mutex_init(&hw->aq.arq_mutex); | ||
| 10303 | |||
| 10298 | err = i40e_init_adminq(hw); | 10304 | err = i40e_init_adminq(hw); |
| 10299 | 10305 | ||
| 10300 | /* provide nvm, fw, api versions */ | 10306 | /* provide nvm, fw, api versions */ |
| @@ -10697,7 +10703,6 @@ static void i40e_remove(struct pci_dev *pdev) | |||
| 10697 | set_bit(__I40E_DOWN, &pf->state); | 10703 | set_bit(__I40E_DOWN, &pf->state); |
| 10698 | del_timer_sync(&pf->service_timer); | 10704 | del_timer_sync(&pf->service_timer); |
| 10699 | cancel_work_sync(&pf->service_task); | 10705 | cancel_work_sync(&pf->service_task); |
| 10700 | i40e_fdir_teardown(pf); | ||
| 10701 | 10706 | ||
| 10702 | if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { | 10707 | if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { |
| 10703 | i40e_free_vfs(pf); | 10708 | i40e_free_vfs(pf); |
| @@ -10740,6 +10745,10 @@ static void i40e_remove(struct pci_dev *pdev) | |||
| 10740 | "Failed to destroy the Admin Queue resources: %d\n", | 10745 | "Failed to destroy the Admin Queue resources: %d\n", |
| 10741 | ret_code); | 10746 | ret_code); |
| 10742 | 10747 | ||
| 10748 | /* destroy the locks only once, here */ | ||
| 10749 | mutex_destroy(&hw->aq.arq_mutex); | ||
| 10750 | mutex_destroy(&hw->aq.asq_mutex); | ||
| 10751 | |||
| 10743 | /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */ | 10752 | /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */ |
| 10744 | i40e_clear_interrupt_scheme(pf); | 10753 | i40e_clear_interrupt_scheme(pf); |
| 10745 | for (i = 0; i < pf->num_alloc_vsi; i++) { | 10754 | for (i = 0; i < pf->num_alloc_vsi; i++) { |
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c index fd123ca60761..3f65e39b3fe4 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c | |||
| @@ -551,10 +551,6 @@ i40e_status i40evf_init_adminq(struct i40e_hw *hw) | |||
| 551 | goto init_adminq_exit; | 551 | goto init_adminq_exit; |
| 552 | } | 552 | } |
| 553 | 553 | ||
| 554 | /* initialize locks */ | ||
| 555 | mutex_init(&hw->aq.asq_mutex); | ||
| 556 | mutex_init(&hw->aq.arq_mutex); | ||
| 557 | |||
| 558 | /* Set up register offsets */ | 554 | /* Set up register offsets */ |
| 559 | i40e_adminq_init_regs(hw); | 555 | i40e_adminq_init_regs(hw); |
| 560 | 556 | ||
| @@ -596,8 +592,6 @@ i40e_status i40evf_shutdown_adminq(struct i40e_hw *hw) | |||
| 596 | i40e_shutdown_asq(hw); | 592 | i40e_shutdown_asq(hw); |
| 597 | i40e_shutdown_arq(hw); | 593 | i40e_shutdown_arq(hw); |
| 598 | 594 | ||
| 599 | /* destroy the locks */ | ||
| 600 | |||
| 601 | if (hw->nvm_buff.va) | 595 | if (hw->nvm_buff.va) |
| 602 | i40e_free_virt_mem(hw, &hw->nvm_buff); | 596 | i40e_free_virt_mem(hw, &hw->nvm_buff); |
| 603 | 597 | ||
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index d962164dfb0f..99d2cffae0cd 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c | |||
| @@ -2476,6 +2476,12 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 2476 | hw->bus.device = PCI_SLOT(pdev->devfn); | 2476 | hw->bus.device = PCI_SLOT(pdev->devfn); |
| 2477 | hw->bus.func = PCI_FUNC(pdev->devfn); | 2477 | hw->bus.func = PCI_FUNC(pdev->devfn); |
| 2478 | 2478 | ||
| 2479 | /* set up the locks for the AQ, do this only once in probe | ||
| 2480 | * and destroy them only once in remove | ||
| 2481 | */ | ||
| 2482 | mutex_init(&hw->aq.asq_mutex); | ||
| 2483 | mutex_init(&hw->aq.arq_mutex); | ||
| 2484 | |||
| 2479 | INIT_LIST_HEAD(&adapter->mac_filter_list); | 2485 | INIT_LIST_HEAD(&adapter->mac_filter_list); |
| 2480 | INIT_LIST_HEAD(&adapter->vlan_filter_list); | 2486 | INIT_LIST_HEAD(&adapter->vlan_filter_list); |
| 2481 | 2487 | ||
| @@ -2629,6 +2635,10 @@ static void i40evf_remove(struct pci_dev *pdev) | |||
| 2629 | if (hw->aq.asq.count) | 2635 | if (hw->aq.asq.count) |
| 2630 | i40evf_shutdown_adminq(hw); | 2636 | i40evf_shutdown_adminq(hw); |
| 2631 | 2637 | ||
| 2638 | /* destroy the locks only once, here */ | ||
| 2639 | mutex_destroy(&hw->aq.arq_mutex); | ||
| 2640 | mutex_destroy(&hw->aq.asq_mutex); | ||
| 2641 | |||
| 2632 | iounmap(hw->hw_addr); | 2642 | iounmap(hw->hw_addr); |
| 2633 | pci_release_regions(pdev); | 2643 | pci_release_regions(pdev); |
| 2634 | 2644 | ||
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 47395ff5d908..aed8d029b23d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
| @@ -7920,6 +7920,9 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) | |||
| 7920 | */ | 7920 | */ |
| 7921 | if (netif_running(dev)) | 7921 | if (netif_running(dev)) |
| 7922 | ixgbe_close(dev); | 7922 | ixgbe_close(dev); |
| 7923 | else | ||
| 7924 | ixgbe_reset(adapter); | ||
| 7925 | |||
| 7923 | ixgbe_clear_interrupt_scheme(adapter); | 7926 | ixgbe_clear_interrupt_scheme(adapter); |
| 7924 | 7927 | ||
| 7925 | #ifdef CONFIG_IXGBE_DCB | 7928 | #ifdef CONFIG_IXGBE_DCB |
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index d9884fd15b45..a4beccf1fd46 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c | |||
| @@ -3413,16 +3413,23 @@ static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv, | |||
| 3413 | } | 3413 | } |
| 3414 | 3414 | ||
| 3415 | /* Free all buffers from the pool */ | 3415 | /* Free all buffers from the pool */ |
| 3416 | static void mvpp2_bm_bufs_free(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool) | 3416 | static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv, |
| 3417 | struct mvpp2_bm_pool *bm_pool) | ||
| 3417 | { | 3418 | { |
| 3418 | int i; | 3419 | int i; |
| 3419 | 3420 | ||
| 3420 | for (i = 0; i < bm_pool->buf_num; i++) { | 3421 | for (i = 0; i < bm_pool->buf_num; i++) { |
| 3422 | dma_addr_t buf_phys_addr; | ||
| 3421 | u32 vaddr; | 3423 | u32 vaddr; |
| 3422 | 3424 | ||
| 3423 | /* Get buffer virtual address (indirect access) */ | 3425 | /* Get buffer virtual address (indirect access) */ |
| 3424 | mvpp2_read(priv, MVPP2_BM_PHY_ALLOC_REG(bm_pool->id)); | 3426 | buf_phys_addr = mvpp2_read(priv, |
| 3427 | MVPP2_BM_PHY_ALLOC_REG(bm_pool->id)); | ||
| 3425 | vaddr = mvpp2_read(priv, MVPP2_BM_VIRT_ALLOC_REG); | 3428 | vaddr = mvpp2_read(priv, MVPP2_BM_VIRT_ALLOC_REG); |
| 3429 | |||
| 3430 | dma_unmap_single(dev, buf_phys_addr, | ||
| 3431 | bm_pool->buf_size, DMA_FROM_DEVICE); | ||
| 3432 | |||
| 3426 | if (!vaddr) | 3433 | if (!vaddr) |
| 3427 | break; | 3434 | break; |
| 3428 | dev_kfree_skb_any((struct sk_buff *)vaddr); | 3435 | dev_kfree_skb_any((struct sk_buff *)vaddr); |
| @@ -3439,7 +3446,7 @@ static int mvpp2_bm_pool_destroy(struct platform_device *pdev, | |||
| 3439 | { | 3446 | { |
| 3440 | u32 val; | 3447 | u32 val; |
| 3441 | 3448 | ||
| 3442 | mvpp2_bm_bufs_free(priv, bm_pool); | 3449 | mvpp2_bm_bufs_free(&pdev->dev, priv, bm_pool); |
| 3443 | if (bm_pool->buf_num) { | 3450 | if (bm_pool->buf_num) { |
| 3444 | WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id); | 3451 | WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id); |
| 3445 | return 0; | 3452 | return 0; |
| @@ -3692,7 +3699,8 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type, | |||
| 3692 | MVPP2_BM_LONG_BUF_NUM : | 3699 | MVPP2_BM_LONG_BUF_NUM : |
| 3693 | MVPP2_BM_SHORT_BUF_NUM; | 3700 | MVPP2_BM_SHORT_BUF_NUM; |
| 3694 | else | 3701 | else |
| 3695 | mvpp2_bm_bufs_free(port->priv, new_pool); | 3702 | mvpp2_bm_bufs_free(port->dev->dev.parent, |
| 3703 | port->priv, new_pool); | ||
| 3696 | 3704 | ||
| 3697 | new_pool->pkt_size = pkt_size; | 3705 | new_pool->pkt_size = pkt_size; |
| 3698 | 3706 | ||
| @@ -3756,7 +3764,7 @@ static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu) | |||
| 3756 | int pkt_size = MVPP2_RX_PKT_SIZE(mtu); | 3764 | int pkt_size = MVPP2_RX_PKT_SIZE(mtu); |
| 3757 | 3765 | ||
| 3758 | /* Update BM pool with new buffer size */ | 3766 | /* Update BM pool with new buffer size */ |
| 3759 | mvpp2_bm_bufs_free(port->priv, port_pool); | 3767 | mvpp2_bm_bufs_free(dev->dev.parent, port->priv, port_pool); |
| 3760 | if (port_pool->buf_num) { | 3768 | if (port_pool->buf_num) { |
| 3761 | WARN(1, "cannot free all buffers in pool %d\n", port_pool->id); | 3769 | WARN(1, "cannot free all buffers in pool %d\n", port_pool->id); |
| 3762 | return -EIO; | 3770 | return -EIO; |
| @@ -4401,11 +4409,10 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port, | |||
| 4401 | 4409 | ||
| 4402 | mvpp2_txq_inc_get(txq_pcpu); | 4410 | mvpp2_txq_inc_get(txq_pcpu); |
| 4403 | 4411 | ||
| 4404 | if (!skb) | ||
| 4405 | continue; | ||
| 4406 | |||
| 4407 | dma_unmap_single(port->dev->dev.parent, buf_phys_addr, | 4412 | dma_unmap_single(port->dev->dev.parent, buf_phys_addr, |
| 4408 | skb_headlen(skb), DMA_TO_DEVICE); | 4413 | skb_headlen(skb), DMA_TO_DEVICE); |
| 4414 | if (!skb) | ||
| 4415 | continue; | ||
| 4409 | dev_kfree_skb_any(skb); | 4416 | dev_kfree_skb_any(skb); |
| 4410 | } | 4417 | } |
| 4411 | } | 4418 | } |
| @@ -5092,7 +5099,8 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo, | |||
| 5092 | struct mvpp2_rx_queue *rxq) | 5099 | struct mvpp2_rx_queue *rxq) |
| 5093 | { | 5100 | { |
| 5094 | struct net_device *dev = port->dev; | 5101 | struct net_device *dev = port->dev; |
| 5095 | int rx_received, rx_filled, i; | 5102 | int rx_received; |
| 5103 | int rx_done = 0; | ||
| 5096 | u32 rcvd_pkts = 0; | 5104 | u32 rcvd_pkts = 0; |
| 5097 | u32 rcvd_bytes = 0; | 5105 | u32 rcvd_bytes = 0; |
| 5098 | 5106 | ||
| @@ -5101,17 +5109,18 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo, | |||
| 5101 | if (rx_todo > rx_received) | 5109 | if (rx_todo > rx_received) |
| 5102 | rx_todo = rx_received; | 5110 | rx_todo = rx_received; |
| 5103 | 5111 | ||
| 5104 | rx_filled = 0; | 5112 | while (rx_done < rx_todo) { |
| 5105 | for (i = 0; i < rx_todo; i++) { | ||
| 5106 | struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); | 5113 | struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); |
| 5107 | struct mvpp2_bm_pool *bm_pool; | 5114 | struct mvpp2_bm_pool *bm_pool; |
| 5108 | struct sk_buff *skb; | 5115 | struct sk_buff *skb; |
| 5116 | dma_addr_t phys_addr; | ||
| 5109 | u32 bm, rx_status; | 5117 | u32 bm, rx_status; |
| 5110 | int pool, rx_bytes, err; | 5118 | int pool, rx_bytes, err; |
| 5111 | 5119 | ||
| 5112 | rx_filled++; | 5120 | rx_done++; |
| 5113 | rx_status = rx_desc->status; | 5121 | rx_status = rx_desc->status; |
| 5114 | rx_bytes = rx_desc->data_size - MVPP2_MH_SIZE; | 5122 | rx_bytes = rx_desc->data_size - MVPP2_MH_SIZE; |
| 5123 | phys_addr = rx_desc->buf_phys_addr; | ||
| 5115 | 5124 | ||
| 5116 | bm = mvpp2_bm_cookie_build(rx_desc); | 5125 | bm = mvpp2_bm_cookie_build(rx_desc); |
| 5117 | pool = mvpp2_bm_cookie_pool_get(bm); | 5126 | pool = mvpp2_bm_cookie_pool_get(bm); |
| @@ -5128,8 +5137,10 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo, | |||
| 5128 | * comprised by the RX descriptor. | 5137 | * comprised by the RX descriptor. |
| 5129 | */ | 5138 | */ |
| 5130 | if (rx_status & MVPP2_RXD_ERR_SUMMARY) { | 5139 | if (rx_status & MVPP2_RXD_ERR_SUMMARY) { |
| 5140 | err_drop_frame: | ||
| 5131 | dev->stats.rx_errors++; | 5141 | dev->stats.rx_errors++; |
| 5132 | mvpp2_rx_error(port, rx_desc); | 5142 | mvpp2_rx_error(port, rx_desc); |
| 5143 | /* Return the buffer to the pool */ | ||
| 5133 | mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr, | 5144 | mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr, |
| 5134 | rx_desc->buf_cookie); | 5145 | rx_desc->buf_cookie); |
| 5135 | continue; | 5146 | continue; |
| @@ -5137,6 +5148,15 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo, | |||
| 5137 | 5148 | ||
| 5138 | skb = (struct sk_buff *)rx_desc->buf_cookie; | 5149 | skb = (struct sk_buff *)rx_desc->buf_cookie; |
| 5139 | 5150 | ||
| 5151 | err = mvpp2_rx_refill(port, bm_pool, bm, 0); | ||
| 5152 | if (err) { | ||
| 5153 | netdev_err(port->dev, "failed to refill BM pools\n"); | ||
| 5154 | goto err_drop_frame; | ||
| 5155 | } | ||
| 5156 | |||
| 5157 | dma_unmap_single(dev->dev.parent, phys_addr, | ||
| 5158 | bm_pool->buf_size, DMA_FROM_DEVICE); | ||
| 5159 | |||
| 5140 | rcvd_pkts++; | 5160 | rcvd_pkts++; |
| 5141 | rcvd_bytes += rx_bytes; | 5161 | rcvd_bytes += rx_bytes; |
| 5142 | atomic_inc(&bm_pool->in_use); | 5162 | atomic_inc(&bm_pool->in_use); |
| @@ -5147,12 +5167,6 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo, | |||
| 5147 | mvpp2_rx_csum(port, rx_status, skb); | 5167 | mvpp2_rx_csum(port, rx_status, skb); |
| 5148 | 5168 | ||
| 5149 | napi_gro_receive(&port->napi, skb); | 5169 | napi_gro_receive(&port->napi, skb); |
| 5150 | |||
| 5151 | err = mvpp2_rx_refill(port, bm_pool, bm, 0); | ||
| 5152 | if (err) { | ||
| 5153 | netdev_err(port->dev, "failed to refill BM pools\n"); | ||
| 5154 | rx_filled--; | ||
| 5155 | } | ||
| 5156 | } | 5170 | } |
| 5157 | 5171 | ||
| 5158 | if (rcvd_pkts) { | 5172 | if (rcvd_pkts) { |
| @@ -5166,7 +5180,7 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo, | |||
| 5166 | 5180 | ||
| 5167 | /* Update Rx queue management counters */ | 5181 | /* Update Rx queue management counters */ |
| 5168 | wmb(); | 5182 | wmb(); |
| 5169 | mvpp2_rxq_status_update(port, rxq->id, rx_todo, rx_filled); | 5183 | mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done); |
| 5170 | 5184 | ||
| 5171 | return rx_todo; | 5185 | return rx_todo; |
| 5172 | } | 5186 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 6fec3e993d02..cad6c44df91c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c | |||
| @@ -4306,9 +4306,10 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, | |||
| 4306 | return -EOPNOTSUPP; | 4306 | return -EOPNOTSUPP; |
| 4307 | 4307 | ||
| 4308 | ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf; | 4308 | ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf; |
| 4309 | ctrl->port = mlx4_slave_convert_port(dev, slave, ctrl->port); | 4309 | err = mlx4_slave_convert_port(dev, slave, ctrl->port); |
| 4310 | if (ctrl->port <= 0) | 4310 | if (err <= 0) |
| 4311 | return -EINVAL; | 4311 | return -EINVAL; |
| 4312 | ctrl->port = err; | ||
| 4312 | qpn = be32_to_cpu(ctrl->qpn) & 0xffffff; | 4313 | qpn = be32_to_cpu(ctrl->qpn) & 0xffffff; |
| 4313 | err = get_res(dev, slave, qpn, RES_QP, &rqp); | 4314 | err = get_res(dev, slave, qpn, RES_QP, &rqp); |
| 4314 | if (err) { | 4315 | if (err) { |
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index ac17d8669b1a..1292c360390c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h | |||
| @@ -299,6 +299,7 @@ struct qed_hwfn { | |||
| 299 | 299 | ||
| 300 | /* Flag indicating whether interrupts are enabled or not*/ | 300 | /* Flag indicating whether interrupts are enabled or not*/ |
| 301 | bool b_int_enabled; | 301 | bool b_int_enabled; |
| 302 | bool b_int_requested; | ||
| 302 | 303 | ||
| 303 | struct qed_mcp_info *mcp_info; | 304 | struct qed_mcp_info *mcp_info; |
| 304 | 305 | ||
| @@ -491,6 +492,8 @@ u32 qed_unzip_data(struct qed_hwfn *p_hwfn, | |||
| 491 | u32 input_len, u8 *input_buf, | 492 | u32 input_len, u8 *input_buf, |
| 492 | u32 max_size, u8 *unzip_buf); | 493 | u32 max_size, u8 *unzip_buf); |
| 493 | 494 | ||
| 495 | int qed_slowpath_irq_req(struct qed_hwfn *hwfn); | ||
| 496 | |||
| 494 | #define QED_ETH_INTERFACE_VERSION 300 | 497 | #define QED_ETH_INTERFACE_VERSION 300 |
| 495 | 498 | ||
| 496 | #endif /* _QED_H */ | 499 | #endif /* _QED_H */ |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 803b190ccada..817bbd5476ff 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c | |||
| @@ -1385,52 +1385,63 @@ err0: | |||
| 1385 | return rc; | 1385 | return rc; |
| 1386 | } | 1386 | } |
| 1387 | 1387 | ||
| 1388 | static u32 qed_hw_bar_size(struct qed_dev *cdev, | 1388 | static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn, |
| 1389 | u8 bar_id) | 1389 | u8 bar_id) |
| 1390 | { | 1390 | { |
| 1391 | u32 size = pci_resource_len(cdev->pdev, (bar_id > 0) ? 2 : 0); | 1391 | u32 bar_reg = (bar_id == 0 ? PGLUE_B_REG_PF_BAR0_SIZE |
| 1392 | : PGLUE_B_REG_PF_BAR1_SIZE); | ||
| 1393 | u32 val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg); | ||
| 1392 | 1394 | ||
| 1393 | return size / cdev->num_hwfns; | 1395 | /* Get the BAR size(in KB) from hardware given val */ |
| 1396 | return 1 << (val + 15); | ||
| 1394 | } | 1397 | } |
| 1395 | 1398 | ||
| 1396 | int qed_hw_prepare(struct qed_dev *cdev, | 1399 | int qed_hw_prepare(struct qed_dev *cdev, |
| 1397 | int personality) | 1400 | int personality) |
| 1398 | { | 1401 | { |
| 1399 | int rc, i; | 1402 | struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); |
| 1403 | int rc; | ||
| 1400 | 1404 | ||
| 1401 | /* Store the precompiled init data ptrs */ | 1405 | /* Store the precompiled init data ptrs */ |
| 1402 | qed_init_iro_array(cdev); | 1406 | qed_init_iro_array(cdev); |
| 1403 | 1407 | ||
| 1404 | /* Initialize the first hwfn - will learn number of hwfns */ | 1408 | /* Initialize the first hwfn - will learn number of hwfns */ |
| 1405 | rc = qed_hw_prepare_single(&cdev->hwfns[0], cdev->regview, | 1409 | rc = qed_hw_prepare_single(p_hwfn, |
| 1410 | cdev->regview, | ||
| 1406 | cdev->doorbells, personality); | 1411 | cdev->doorbells, personality); |
| 1407 | if (rc) | 1412 | if (rc) |
| 1408 | return rc; | 1413 | return rc; |
| 1409 | 1414 | ||
| 1410 | personality = cdev->hwfns[0].hw_info.personality; | 1415 | personality = p_hwfn->hw_info.personality; |
| 1411 | 1416 | ||
| 1412 | /* Initialize the rest of the hwfns */ | 1417 | /* Initialize the rest of the hwfns */ |
| 1413 | for (i = 1; i < cdev->num_hwfns; i++) { | 1418 | if (cdev->num_hwfns > 1) { |
| 1414 | void __iomem *p_regview, *p_doorbell; | 1419 | void __iomem *p_regview, *p_doorbell; |
| 1420 | u8 __iomem *addr; | ||
| 1421 | |||
| 1422 | /* adjust bar offset for second engine */ | ||
| 1423 | addr = cdev->regview + qed_hw_bar_size(p_hwfn, 0) / 2; | ||
| 1424 | p_regview = addr; | ||
| 1415 | 1425 | ||
| 1416 | p_regview = cdev->regview + | 1426 | /* adjust doorbell bar offset for second engine */ |
| 1417 | i * qed_hw_bar_size(cdev, 0); | 1427 | addr = cdev->doorbells + qed_hw_bar_size(p_hwfn, 1) / 2; |
| 1418 | p_doorbell = cdev->doorbells + | 1428 | p_doorbell = addr; |
| 1419 | i * qed_hw_bar_size(cdev, 1); | 1429 | |
| 1420 | rc = qed_hw_prepare_single(&cdev->hwfns[i], p_regview, | 1430 | /* prepare second hw function */ |
| 1431 | rc = qed_hw_prepare_single(&cdev->hwfns[1], p_regview, | ||
| 1421 | p_doorbell, personality); | 1432 | p_doorbell, personality); |
| 1433 | |||
| 1434 | /* in case of error, need to free the previously | ||
| 1435 | * initiliazed hwfn 0. | ||
| 1436 | */ | ||
| 1422 | if (rc) { | 1437 | if (rc) { |
| 1423 | /* Cleanup previously initialized hwfns */ | 1438 | qed_init_free(p_hwfn); |
| 1424 | while (--i >= 0) { | 1439 | qed_mcp_free(p_hwfn); |
| 1425 | qed_init_free(&cdev->hwfns[i]); | 1440 | qed_hw_hwfn_free(p_hwfn); |
| 1426 | qed_mcp_free(&cdev->hwfns[i]); | ||
| 1427 | qed_hw_hwfn_free(&cdev->hwfns[i]); | ||
| 1428 | } | ||
| 1429 | return rc; | ||
| 1430 | } | 1441 | } |
| 1431 | } | 1442 | } |
| 1432 | 1443 | ||
| 1433 | return 0; | 1444 | return rc; |
| 1434 | } | 1445 | } |
| 1435 | 1446 | ||
| 1436 | void qed_hw_remove(struct qed_dev *cdev) | 1447 | void qed_hw_remove(struct qed_dev *cdev) |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index de50e84902af..9cc9d62c1fec 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c | |||
| @@ -783,22 +783,16 @@ void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn, | |||
| 783 | qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf); | 783 | qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf); |
| 784 | } | 784 | } |
| 785 | 785 | ||
| 786 | void qed_int_igu_enable(struct qed_hwfn *p_hwfn, | 786 | int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, |
| 787 | struct qed_ptt *p_ptt, | 787 | enum qed_int_mode int_mode) |
| 788 | enum qed_int_mode int_mode) | ||
| 789 | { | 788 | { |
| 790 | int i; | 789 | int rc, i; |
| 791 | |||
| 792 | p_hwfn->b_int_enabled = 1; | ||
| 793 | 790 | ||
| 794 | /* Mask non-link attentions */ | 791 | /* Mask non-link attentions */ |
| 795 | for (i = 0; i < 9; i++) | 792 | for (i = 0; i < 9; i++) |
| 796 | qed_wr(p_hwfn, p_ptt, | 793 | qed_wr(p_hwfn, p_ptt, |
| 797 | MISC_REG_AEU_ENABLE1_IGU_OUT_0 + (i << 2), 0); | 794 | MISC_REG_AEU_ENABLE1_IGU_OUT_0 + (i << 2), 0); |
| 798 | 795 | ||
| 799 | /* Enable interrupt Generation */ | ||
| 800 | qed_int_igu_enable_int(p_hwfn, p_ptt, int_mode); | ||
| 801 | |||
| 802 | /* Configure AEU signal change to produce attentions for link */ | 796 | /* Configure AEU signal change to produce attentions for link */ |
| 803 | qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff); | 797 | qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff); |
| 804 | qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff); | 798 | qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff); |
| @@ -808,6 +802,19 @@ void qed_int_igu_enable(struct qed_hwfn *p_hwfn, | |||
| 808 | 802 | ||
| 809 | /* Unmask AEU signals toward IGU */ | 803 | /* Unmask AEU signals toward IGU */ |
| 810 | qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff); | 804 | qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff); |
| 805 | if ((int_mode != QED_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) { | ||
| 806 | rc = qed_slowpath_irq_req(p_hwfn); | ||
| 807 | if (rc != 0) { | ||
| 808 | DP_NOTICE(p_hwfn, "Slowpath IRQ request failed\n"); | ||
| 809 | return -EINVAL; | ||
| 810 | } | ||
| 811 | p_hwfn->b_int_requested = true; | ||
| 812 | } | ||
| 813 | /* Enable interrupt Generation */ | ||
| 814 | qed_int_igu_enable_int(p_hwfn, p_ptt, int_mode); | ||
| 815 | p_hwfn->b_int_enabled = 1; | ||
| 816 | |||
| 817 | return rc; | ||
| 811 | } | 818 | } |
| 812 | 819 | ||
| 813 | void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn, | 820 | void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn, |
| @@ -1127,3 +1134,11 @@ int qed_int_get_num_sbs(struct qed_hwfn *p_hwfn, | |||
| 1127 | 1134 | ||
| 1128 | return info->igu_sb_cnt; | 1135 | return info->igu_sb_cnt; |
| 1129 | } | 1136 | } |
| 1137 | |||
| 1138 | void qed_int_disable_post_isr_release(struct qed_dev *cdev) | ||
| 1139 | { | ||
| 1140 | int i; | ||
| 1141 | |||
| 1142 | for_each_hwfn(cdev, i) | ||
| 1143 | cdev->hwfns[i].b_int_requested = false; | ||
| 1144 | } | ||
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h index 16b57518e706..51e0b09a7f47 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.h +++ b/drivers/net/ethernet/qlogic/qed/qed_int.h | |||
| @@ -169,10 +169,14 @@ int qed_int_get_num_sbs(struct qed_hwfn *p_hwfn, | |||
| 169 | int *p_iov_blks); | 169 | int *p_iov_blks); |
| 170 | 170 | ||
| 171 | /** | 171 | /** |
| 172 | * @file | 172 | * @brief qed_int_disable_post_isr_release - performs the cleanup post ISR |
| 173 | * release. The API need to be called after releasing all slowpath IRQs | ||
| 174 | * of the device. | ||
| 175 | * | ||
| 176 | * @param cdev | ||
| 173 | * | 177 | * |
| 174 | * @brief Interrupt handler | ||
| 175 | */ | 178 | */ |
| 179 | void qed_int_disable_post_isr_release(struct qed_dev *cdev); | ||
| 176 | 180 | ||
| 177 | #define QED_CAU_DEF_RX_TIMER_RES 0 | 181 | #define QED_CAU_DEF_RX_TIMER_RES 0 |
| 178 | #define QED_CAU_DEF_TX_TIMER_RES 0 | 182 | #define QED_CAU_DEF_TX_TIMER_RES 0 |
| @@ -366,10 +370,11 @@ void qed_int_setup(struct qed_hwfn *p_hwfn, | |||
| 366 | * @param p_hwfn | 370 | * @param p_hwfn |
| 367 | * @param p_ptt | 371 | * @param p_ptt |
| 368 | * @param int_mode | 372 | * @param int_mode |
| 373 | * | ||
| 374 | * @return int | ||
| 369 | */ | 375 | */ |
| 370 | void qed_int_igu_enable(struct qed_hwfn *p_hwfn, | 376 | int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, |
| 371 | struct qed_ptt *p_ptt, | 377 | enum qed_int_mode int_mode); |
| 372 | enum qed_int_mode int_mode); | ||
| 373 | 378 | ||
| 374 | /** | 379 | /** |
| 375 | * @brief - Initialize CAU status block entry | 380 | * @brief - Initialize CAU status block entry |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 947c7af72b25..174f7341c5c3 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c | |||
| @@ -476,41 +476,22 @@ static irqreturn_t qed_single_int(int irq, void *dev_instance) | |||
| 476 | return rc; | 476 | return rc; |
| 477 | } | 477 | } |
| 478 | 478 | ||
| 479 | static int qed_slowpath_irq_req(struct qed_dev *cdev) | 479 | int qed_slowpath_irq_req(struct qed_hwfn *hwfn) |
| 480 | { | 480 | { |
| 481 | int i = 0, rc = 0; | 481 | struct qed_dev *cdev = hwfn->cdev; |
| 482 | int rc = 0; | ||
| 483 | u8 id; | ||
| 482 | 484 | ||
| 483 | if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { | 485 | if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { |
| 484 | /* Request all the slowpath MSI-X vectors */ | 486 | id = hwfn->my_id; |
| 485 | for (i = 0; i < cdev->num_hwfns; i++) { | 487 | snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x", |
| 486 | snprintf(cdev->hwfns[i].name, NAME_SIZE, | 488 | id, cdev->pdev->bus->number, |
| 487 | "sp-%d-%02x:%02x.%02x", | 489 | PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id); |
| 488 | i, cdev->pdev->bus->number, | 490 | rc = request_irq(cdev->int_params.msix_table[id].vector, |
| 489 | PCI_SLOT(cdev->pdev->devfn), | 491 | qed_msix_sp_int, 0, hwfn->name, hwfn->sp_dpc); |
| 490 | cdev->hwfns[i].abs_pf_id); | 492 | if (!rc) |
| 491 | 493 | DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP), | |
| 492 | rc = request_irq(cdev->int_params.msix_table[i].vector, | ||
| 493 | qed_msix_sp_int, 0, | ||
| 494 | cdev->hwfns[i].name, | ||
| 495 | cdev->hwfns[i].sp_dpc); | ||
| 496 | if (rc) | ||
| 497 | break; | ||
| 498 | |||
| 499 | DP_VERBOSE(&cdev->hwfns[i], | ||
| 500 | (NETIF_MSG_INTR | QED_MSG_SP), | ||
| 501 | "Requested slowpath MSI-X\n"); | 494 | "Requested slowpath MSI-X\n"); |
| 502 | } | ||
| 503 | |||
| 504 | if (i != cdev->num_hwfns) { | ||
| 505 | /* Free already request MSI-X vectors */ | ||
| 506 | for (i--; i >= 0; i--) { | ||
| 507 | unsigned int vec = | ||
| 508 | cdev->int_params.msix_table[i].vector; | ||
| 509 | synchronize_irq(vec); | ||
| 510 | free_irq(cdev->int_params.msix_table[i].vector, | ||
| 511 | cdev->hwfns[i].sp_dpc); | ||
| 512 | } | ||
| 513 | } | ||
| 514 | } else { | 495 | } else { |
| 515 | unsigned long flags = 0; | 496 | unsigned long flags = 0; |
| 516 | 497 | ||
| @@ -534,13 +515,17 @@ static void qed_slowpath_irq_free(struct qed_dev *cdev) | |||
| 534 | 515 | ||
| 535 | if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { | 516 | if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { |
| 536 | for_each_hwfn(cdev, i) { | 517 | for_each_hwfn(cdev, i) { |
| 518 | if (!cdev->hwfns[i].b_int_requested) | ||
| 519 | break; | ||
| 537 | synchronize_irq(cdev->int_params.msix_table[i].vector); | 520 | synchronize_irq(cdev->int_params.msix_table[i].vector); |
| 538 | free_irq(cdev->int_params.msix_table[i].vector, | 521 | free_irq(cdev->int_params.msix_table[i].vector, |
| 539 | cdev->hwfns[i].sp_dpc); | 522 | cdev->hwfns[i].sp_dpc); |
| 540 | } | 523 | } |
| 541 | } else { | 524 | } else { |
| 542 | free_irq(cdev->pdev->irq, cdev); | 525 | if (QED_LEADING_HWFN(cdev)->b_int_requested) |
| 526 | free_irq(cdev->pdev->irq, cdev); | ||
| 543 | } | 527 | } |
| 528 | qed_int_disable_post_isr_release(cdev); | ||
| 544 | } | 529 | } |
| 545 | 530 | ||
| 546 | static int qed_nic_stop(struct qed_dev *cdev) | 531 | static int qed_nic_stop(struct qed_dev *cdev) |
| @@ -765,16 +750,11 @@ static int qed_slowpath_start(struct qed_dev *cdev, | |||
| 765 | if (rc) | 750 | if (rc) |
| 766 | goto err1; | 751 | goto err1; |
| 767 | 752 | ||
| 768 | /* Request the slowpath IRQ */ | ||
| 769 | rc = qed_slowpath_irq_req(cdev); | ||
| 770 | if (rc) | ||
| 771 | goto err2; | ||
| 772 | |||
| 773 | /* Allocate stream for unzipping */ | 753 | /* Allocate stream for unzipping */ |
| 774 | rc = qed_alloc_stream_mem(cdev); | 754 | rc = qed_alloc_stream_mem(cdev); |
| 775 | if (rc) { | 755 | if (rc) { |
| 776 | DP_NOTICE(cdev, "Failed to allocate stream memory\n"); | 756 | DP_NOTICE(cdev, "Failed to allocate stream memory\n"); |
| 777 | goto err3; | 757 | goto err2; |
| 778 | } | 758 | } |
| 779 | 759 | ||
| 780 | /* Start the slowpath */ | 760 | /* Start the slowpath */ |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h index 7a5ce5914ace..e8df12335a97 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h +++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h | |||
| @@ -363,4 +363,8 @@ | |||
| 363 | 0x7 << 0) | 363 | 0x7 << 0) |
| 364 | #define MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT \ | 364 | #define MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT \ |
| 365 | 0 | 365 | 0 |
| 366 | #define PGLUE_B_REG_PF_BAR0_SIZE \ | ||
| 367 | 0x2aae60UL | ||
| 368 | #define PGLUE_B_REG_PF_BAR1_SIZE \ | ||
| 369 | 0x2aae64UL | ||
| 366 | #endif | 370 | #endif |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h index 31a1f1eb4f56..287fadfab52d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h | |||
| @@ -124,8 +124,12 @@ struct qed_spq { | |||
| 124 | dma_addr_t p_phys; | 124 | dma_addr_t p_phys; |
| 125 | struct qed_spq_entry *p_virt; | 125 | struct qed_spq_entry *p_virt; |
| 126 | 126 | ||
| 127 | /* Used as index for completions (returns on EQ by FW) */ | 127 | #define SPQ_RING_SIZE \ |
| 128 | u16 echo_idx; | 128 | (CORE_SPQE_PAGE_SIZE_BYTES / sizeof(struct slow_path_element)) |
| 129 | |||
| 130 | /* Bitmap for handling out-of-order completions */ | ||
| 131 | DECLARE_BITMAP(p_comp_bitmap, SPQ_RING_SIZE); | ||
| 132 | u8 comp_bitmap_idx; | ||
| 129 | 133 | ||
| 130 | /* Statistics */ | 134 | /* Statistics */ |
| 131 | u32 unlimited_pending_count; | 135 | u32 unlimited_pending_count; |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c index 7c0b8459666e..3dd548ab8df1 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c | |||
| @@ -112,8 +112,6 @@ static int | |||
| 112 | qed_spq_fill_entry(struct qed_hwfn *p_hwfn, | 112 | qed_spq_fill_entry(struct qed_hwfn *p_hwfn, |
| 113 | struct qed_spq_entry *p_ent) | 113 | struct qed_spq_entry *p_ent) |
| 114 | { | 114 | { |
| 115 | p_ent->elem.hdr.echo = 0; | ||
| 116 | p_hwfn->p_spq->echo_idx++; | ||
| 117 | p_ent->flags = 0; | 115 | p_ent->flags = 0; |
| 118 | 116 | ||
| 119 | switch (p_ent->comp_mode) { | 117 | switch (p_ent->comp_mode) { |
| @@ -195,10 +193,12 @@ static int qed_spq_hw_post(struct qed_hwfn *p_hwfn, | |||
| 195 | struct qed_spq *p_spq, | 193 | struct qed_spq *p_spq, |
| 196 | struct qed_spq_entry *p_ent) | 194 | struct qed_spq_entry *p_ent) |
| 197 | { | 195 | { |
| 198 | struct qed_chain *p_chain = &p_hwfn->p_spq->chain; | 196 | struct qed_chain *p_chain = &p_hwfn->p_spq->chain; |
| 197 | u16 echo = qed_chain_get_prod_idx(p_chain); | ||
| 199 | struct slow_path_element *elem; | 198 | struct slow_path_element *elem; |
| 200 | struct core_db_data db; | 199 | struct core_db_data db; |
| 201 | 200 | ||
| 201 | p_ent->elem.hdr.echo = cpu_to_le16(echo); | ||
| 202 | elem = qed_chain_produce(p_chain); | 202 | elem = qed_chain_produce(p_chain); |
| 203 | if (!elem) { | 203 | if (!elem) { |
| 204 | DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n"); | 204 | DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n"); |
| @@ -437,7 +437,9 @@ void qed_spq_setup(struct qed_hwfn *p_hwfn) | |||
| 437 | p_spq->comp_count = 0; | 437 | p_spq->comp_count = 0; |
| 438 | p_spq->comp_sent_count = 0; | 438 | p_spq->comp_sent_count = 0; |
| 439 | p_spq->unlimited_pending_count = 0; | 439 | p_spq->unlimited_pending_count = 0; |
| 440 | p_spq->echo_idx = 0; | 440 | |
| 441 | bitmap_zero(p_spq->p_comp_bitmap, SPQ_RING_SIZE); | ||
| 442 | p_spq->comp_bitmap_idx = 0; | ||
| 441 | 443 | ||
| 442 | /* SPQ cid, cannot fail */ | 444 | /* SPQ cid, cannot fail */ |
| 443 | qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid); | 445 | qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid); |
| @@ -582,26 +584,32 @@ qed_spq_add_entry(struct qed_hwfn *p_hwfn, | |||
| 582 | struct qed_spq *p_spq = p_hwfn->p_spq; | 584 | struct qed_spq *p_spq = p_hwfn->p_spq; |
| 583 | 585 | ||
| 584 | if (p_ent->queue == &p_spq->unlimited_pending) { | 586 | if (p_ent->queue == &p_spq->unlimited_pending) { |
| 585 | struct qed_spq_entry *p_en2; | ||
| 586 | 587 | ||
| 587 | if (list_empty(&p_spq->free_pool)) { | 588 | if (list_empty(&p_spq->free_pool)) { |
| 588 | list_add_tail(&p_ent->list, &p_spq->unlimited_pending); | 589 | list_add_tail(&p_ent->list, &p_spq->unlimited_pending); |
| 589 | p_spq->unlimited_pending_count++; | 590 | p_spq->unlimited_pending_count++; |
| 590 | 591 | ||
| 591 | return 0; | 592 | return 0; |
| 592 | } | 593 | } else { |
| 594 | struct qed_spq_entry *p_en2; | ||
| 593 | 595 | ||
| 594 | p_en2 = list_first_entry(&p_spq->free_pool, | 596 | p_en2 = list_first_entry(&p_spq->free_pool, |
| 595 | struct qed_spq_entry, | 597 | struct qed_spq_entry, |
| 596 | list); | 598 | list); |
| 597 | list_del(&p_en2->list); | 599 | list_del(&p_en2->list); |
| 600 | |||
| 601 | /* Copy the ring element physical pointer to the new | ||
| 602 | * entry, since we are about to override the entire ring | ||
| 603 | * entry and don't want to lose the pointer. | ||
| 604 | */ | ||
| 605 | p_ent->elem.data_ptr = p_en2->elem.data_ptr; | ||
| 598 | 606 | ||
| 599 | /* Strcut assignment */ | 607 | *p_en2 = *p_ent; |
| 600 | *p_en2 = *p_ent; | ||
| 601 | 608 | ||
| 602 | kfree(p_ent); | 609 | kfree(p_ent); |
| 603 | 610 | ||
| 604 | p_ent = p_en2; | 611 | p_ent = p_en2; |
| 612 | } | ||
| 605 | } | 613 | } |
| 606 | 614 | ||
| 607 | /* entry is to be placed in 'pending' queue */ | 615 | /* entry is to be placed in 'pending' queue */ |
| @@ -777,13 +785,38 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn, | |||
| 777 | list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, | 785 | list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, |
| 778 | list) { | 786 | list) { |
| 779 | if (p_ent->elem.hdr.echo == echo) { | 787 | if (p_ent->elem.hdr.echo == echo) { |
| 788 | u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE; | ||
| 789 | |||
| 780 | list_del(&p_ent->list); | 790 | list_del(&p_ent->list); |
| 781 | 791 | ||
| 782 | qed_chain_return_produced(&p_spq->chain); | 792 | /* Avoid overriding of SPQ entries when getting |
| 793 | * out-of-order completions, by marking the completions | ||
| 794 | * in a bitmap and increasing the chain consumer only | ||
| 795 | * for the first successive completed entries. | ||
| 796 | */ | ||
| 797 | bitmap_set(p_spq->p_comp_bitmap, pos, SPQ_RING_SIZE); | ||
| 798 | |||
| 799 | while (test_bit(p_spq->comp_bitmap_idx, | ||
| 800 | p_spq->p_comp_bitmap)) { | ||
| 801 | bitmap_clear(p_spq->p_comp_bitmap, | ||
| 802 | p_spq->comp_bitmap_idx, | ||
| 803 | SPQ_RING_SIZE); | ||
| 804 | p_spq->comp_bitmap_idx++; | ||
| 805 | qed_chain_return_produced(&p_spq->chain); | ||
| 806 | } | ||
| 807 | |||
| 783 | p_spq->comp_count++; | 808 | p_spq->comp_count++; |
| 784 | found = p_ent; | 809 | found = p_ent; |
| 785 | break; | 810 | break; |
| 786 | } | 811 | } |
| 812 | |||
| 813 | /* This is relatively uncommon - depends on scenarios | ||
| 814 | * which have mutliple per-PF sent ramrods. | ||
| 815 | */ | ||
| 816 | DP_VERBOSE(p_hwfn, QED_MSG_SPQ, | ||
| 817 | "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n", | ||
| 818 | le16_to_cpu(echo), | ||
| 819 | le16_to_cpu(p_ent->elem.hdr.echo)); | ||
| 787 | } | 820 | } |
| 788 | 821 | ||
| 789 | /* Release lock before callback, as callback may post | 822 | /* Release lock before callback, as callback may post |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c index be7d7a62cc0d..b1a452f291ee 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c | |||
| @@ -246,7 +246,8 @@ int qlcnic_83xx_check_vnic_state(struct qlcnic_adapter *adapter) | |||
| 246 | u32 state; | 246 | u32 state; |
| 247 | 247 | ||
| 248 | state = QLCRDX(ahw, QLC_83XX_VNIC_STATE); | 248 | state = QLCRDX(ahw, QLC_83XX_VNIC_STATE); |
| 249 | while (state != QLCNIC_DEV_NPAR_OPER && idc->vnic_wait_limit--) { | 249 | while (state != QLCNIC_DEV_NPAR_OPER && idc->vnic_wait_limit) { |
| 250 | idc->vnic_wait_limit--; | ||
| 250 | msleep(1000); | 251 | msleep(1000); |
| 251 | state = QLCRDX(ahw, QLC_83XX_VNIC_STATE); | 252 | state = QLCRDX(ahw, QLC_83XX_VNIC_STATE); |
| 252 | } | 253 | } |
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index 02b7115b6aaa..997976426799 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c | |||
| @@ -4211,8 +4211,9 @@ static int ql_change_rx_buffers(struct ql_adapter *qdev) | |||
| 4211 | 4211 | ||
| 4212 | /* Wait for an outstanding reset to complete. */ | 4212 | /* Wait for an outstanding reset to complete. */ |
| 4213 | if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) { | 4213 | if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) { |
| 4214 | int i = 3; | 4214 | int i = 4; |
| 4215 | while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) { | 4215 | |
| 4216 | while (--i && !test_bit(QL_ADAPTER_UP, &qdev->flags)) { | ||
| 4216 | netif_err(qdev, ifup, qdev->ndev, | 4217 | netif_err(qdev, ifup, qdev->ndev, |
| 4217 | "Waiting for adapter UP...\n"); | 4218 | "Waiting for adapter UP...\n"); |
| 4218 | ssleep(1); | 4219 | ssleep(1); |
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c index ddb2c6c6ec94..689a4a5c8dcf 100644 --- a/drivers/net/ethernet/qualcomm/qca_spi.c +++ b/drivers/net/ethernet/qualcomm/qca_spi.c | |||
| @@ -736,9 +736,8 @@ qcaspi_netdev_tx_timeout(struct net_device *dev) | |||
| 736 | netdev_info(qca->net_dev, "Transmit timeout at %ld, latency %ld\n", | 736 | netdev_info(qca->net_dev, "Transmit timeout at %ld, latency %ld\n", |
| 737 | jiffies, jiffies - dev->trans_start); | 737 | jiffies, jiffies - dev->trans_start); |
| 738 | qca->net_dev->stats.tx_errors++; | 738 | qca->net_dev->stats.tx_errors++; |
| 739 | /* wake the queue if there is room */ | 739 | /* Trigger tx queue flush and QCA7000 reset */ |
| 740 | if (qcaspi_tx_ring_has_space(&qca->txr)) | 740 | qca->sync = QCASPI_SYNC_UNKNOWN; |
| 741 | netif_wake_queue(dev); | ||
| 742 | } | 741 | } |
| 743 | 742 | ||
| 744 | static int | 743 | static int |
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index ed5da4d47668..467d41698fd5 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c | |||
| @@ -905,6 +905,9 @@ static int ravb_phy_init(struct net_device *ndev) | |||
| 905 | netdev_info(ndev, "limited PHY to 100Mbit/s\n"); | 905 | netdev_info(ndev, "limited PHY to 100Mbit/s\n"); |
| 906 | } | 906 | } |
| 907 | 907 | ||
| 908 | /* 10BASE is not supported */ | ||
| 909 | phydev->supported &= ~PHY_10BT_FEATURES; | ||
| 910 | |||
| 908 | netdev_info(ndev, "attached PHY %d (IRQ %d) to driver %s\n", | 911 | netdev_info(ndev, "attached PHY %d (IRQ %d) to driver %s\n", |
| 909 | phydev->addr, phydev->irq, phydev->drv->name); | 912 | phydev->addr, phydev->irq, phydev->drv->name); |
| 910 | 913 | ||
| @@ -1037,7 +1040,7 @@ static const char ravb_gstrings_stats[][ETH_GSTRING_LEN] = { | |||
| 1037 | "rx_queue_1_mcast_packets", | 1040 | "rx_queue_1_mcast_packets", |
| 1038 | "rx_queue_1_errors", | 1041 | "rx_queue_1_errors", |
| 1039 | "rx_queue_1_crc_errors", | 1042 | "rx_queue_1_crc_errors", |
| 1040 | "rx_queue_1_frame_errors_", | 1043 | "rx_queue_1_frame_errors", |
| 1041 | "rx_queue_1_length_errors", | 1044 | "rx_queue_1_length_errors", |
| 1042 | "rx_queue_1_missed_errors", | 1045 | "rx_queue_1_missed_errors", |
| 1043 | "rx_queue_1_over_errors", | 1046 | "rx_queue_1_over_errors", |
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index e7bab7909ed9..a0eaf50499a2 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c | |||
| @@ -52,6 +52,8 @@ | |||
| 52 | NETIF_MSG_RX_ERR| \ | 52 | NETIF_MSG_RX_ERR| \ |
| 53 | NETIF_MSG_TX_ERR) | 53 | NETIF_MSG_TX_ERR) |
| 54 | 54 | ||
| 55 | #define SH_ETH_OFFSET_INVALID ((u16)~0) | ||
| 56 | |||
| 55 | #define SH_ETH_OFFSET_DEFAULTS \ | 57 | #define SH_ETH_OFFSET_DEFAULTS \ |
| 56 | [0 ... SH_ETH_MAX_REGISTER_OFFSET - 1] = SH_ETH_OFFSET_INVALID | 58 | [0 ... SH_ETH_MAX_REGISTER_OFFSET - 1] = SH_ETH_OFFSET_INVALID |
| 57 | 59 | ||
| @@ -404,6 +406,28 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = { | |||
| 404 | static void sh_eth_rcv_snd_disable(struct net_device *ndev); | 406 | static void sh_eth_rcv_snd_disable(struct net_device *ndev); |
| 405 | static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev); | 407 | static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev); |
| 406 | 408 | ||
| 409 | static void sh_eth_write(struct net_device *ndev, u32 data, int enum_index) | ||
| 410 | { | ||
| 411 | struct sh_eth_private *mdp = netdev_priv(ndev); | ||
| 412 | u16 offset = mdp->reg_offset[enum_index]; | ||
| 413 | |||
| 414 | if (WARN_ON(offset == SH_ETH_OFFSET_INVALID)) | ||
| 415 | return; | ||
| 416 | |||
| 417 | iowrite32(data, mdp->addr + offset); | ||
| 418 | } | ||
| 419 | |||
| 420 | static u32 sh_eth_read(struct net_device *ndev, int enum_index) | ||
| 421 | { | ||
| 422 | struct sh_eth_private *mdp = netdev_priv(ndev); | ||
| 423 | u16 offset = mdp->reg_offset[enum_index]; | ||
| 424 | |||
| 425 | if (WARN_ON(offset == SH_ETH_OFFSET_INVALID)) | ||
| 426 | return ~0U; | ||
| 427 | |||
| 428 | return ioread32(mdp->addr + offset); | ||
| 429 | } | ||
| 430 | |||
| 407 | static bool sh_eth_is_gether(struct sh_eth_private *mdp) | 431 | static bool sh_eth_is_gether(struct sh_eth_private *mdp) |
| 408 | { | 432 | { |
| 409 | return mdp->reg_offset == sh_eth_offset_gigabit; | 433 | return mdp->reg_offset == sh_eth_offset_gigabit; |
| @@ -1172,7 +1196,7 @@ static void sh_eth_ring_format(struct net_device *ndev) | |||
| 1172 | break; | 1196 | break; |
| 1173 | } | 1197 | } |
| 1174 | mdp->rx_skbuff[i] = skb; | 1198 | mdp->rx_skbuff[i] = skb; |
| 1175 | rxdesc->addr = dma_addr; | 1199 | rxdesc->addr = cpu_to_edmac(mdp, dma_addr); |
| 1176 | rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP); | 1200 | rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP); |
| 1177 | 1201 | ||
| 1178 | /* Rx descriptor address set */ | 1202 | /* Rx descriptor address set */ |
| @@ -1403,7 +1427,8 @@ static int sh_eth_txfree(struct net_device *ndev) | |||
| 1403 | entry, edmac_to_cpu(mdp, txdesc->status)); | 1427 | entry, edmac_to_cpu(mdp, txdesc->status)); |
| 1404 | /* Free the original skb. */ | 1428 | /* Free the original skb. */ |
| 1405 | if (mdp->tx_skbuff[entry]) { | 1429 | if (mdp->tx_skbuff[entry]) { |
| 1406 | dma_unmap_single(&ndev->dev, txdesc->addr, | 1430 | dma_unmap_single(&ndev->dev, |
| 1431 | edmac_to_cpu(mdp, txdesc->addr), | ||
| 1407 | txdesc->buffer_length, DMA_TO_DEVICE); | 1432 | txdesc->buffer_length, DMA_TO_DEVICE); |
| 1408 | dev_kfree_skb_irq(mdp->tx_skbuff[entry]); | 1433 | dev_kfree_skb_irq(mdp->tx_skbuff[entry]); |
| 1409 | mdp->tx_skbuff[entry] = NULL; | 1434 | mdp->tx_skbuff[entry] = NULL; |
| @@ -1462,6 +1487,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) | |||
| 1462 | if (mdp->cd->shift_rd0) | 1487 | if (mdp->cd->shift_rd0) |
| 1463 | desc_status >>= 16; | 1488 | desc_status >>= 16; |
| 1464 | 1489 | ||
| 1490 | skb = mdp->rx_skbuff[entry]; | ||
| 1465 | if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 | | 1491 | if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 | |
| 1466 | RD_RFS5 | RD_RFS6 | RD_RFS10)) { | 1492 | RD_RFS5 | RD_RFS6 | RD_RFS10)) { |
| 1467 | ndev->stats.rx_errors++; | 1493 | ndev->stats.rx_errors++; |
| @@ -1477,16 +1503,16 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) | |||
| 1477 | ndev->stats.rx_missed_errors++; | 1503 | ndev->stats.rx_missed_errors++; |
| 1478 | if (desc_status & RD_RFS10) | 1504 | if (desc_status & RD_RFS10) |
| 1479 | ndev->stats.rx_over_errors++; | 1505 | ndev->stats.rx_over_errors++; |
| 1480 | } else { | 1506 | } else if (skb) { |
| 1507 | dma_addr = edmac_to_cpu(mdp, rxdesc->addr); | ||
| 1481 | if (!mdp->cd->hw_swap) | 1508 | if (!mdp->cd->hw_swap) |
| 1482 | sh_eth_soft_swap( | 1509 | sh_eth_soft_swap( |
| 1483 | phys_to_virt(ALIGN(rxdesc->addr, 4)), | 1510 | phys_to_virt(ALIGN(dma_addr, 4)), |
| 1484 | pkt_len + 2); | 1511 | pkt_len + 2); |
| 1485 | skb = mdp->rx_skbuff[entry]; | ||
| 1486 | mdp->rx_skbuff[entry] = NULL; | 1512 | mdp->rx_skbuff[entry] = NULL; |
| 1487 | if (mdp->cd->rpadir) | 1513 | if (mdp->cd->rpadir) |
| 1488 | skb_reserve(skb, NET_IP_ALIGN); | 1514 | skb_reserve(skb, NET_IP_ALIGN); |
| 1489 | dma_unmap_single(&ndev->dev, rxdesc->addr, | 1515 | dma_unmap_single(&ndev->dev, dma_addr, |
| 1490 | ALIGN(mdp->rx_buf_sz, 32), | 1516 | ALIGN(mdp->rx_buf_sz, 32), |
| 1491 | DMA_FROM_DEVICE); | 1517 | DMA_FROM_DEVICE); |
| 1492 | skb_put(skb, pkt_len); | 1518 | skb_put(skb, pkt_len); |
| @@ -1523,7 +1549,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) | |||
| 1523 | mdp->rx_skbuff[entry] = skb; | 1549 | mdp->rx_skbuff[entry] = skb; |
| 1524 | 1550 | ||
| 1525 | skb_checksum_none_assert(skb); | 1551 | skb_checksum_none_assert(skb); |
| 1526 | rxdesc->addr = dma_addr; | 1552 | rxdesc->addr = cpu_to_edmac(mdp, dma_addr); |
| 1527 | } | 1553 | } |
| 1528 | dma_wmb(); /* RACT bit must be set after all the above writes */ | 1554 | dma_wmb(); /* RACT bit must be set after all the above writes */ |
| 1529 | if (entry >= mdp->num_rx_ring - 1) | 1555 | if (entry >= mdp->num_rx_ring - 1) |
| @@ -2331,8 +2357,8 @@ static void sh_eth_tx_timeout(struct net_device *ndev) | |||
| 2331 | /* Free all the skbuffs in the Rx queue. */ | 2357 | /* Free all the skbuffs in the Rx queue. */ |
| 2332 | for (i = 0; i < mdp->num_rx_ring; i++) { | 2358 | for (i = 0; i < mdp->num_rx_ring; i++) { |
| 2333 | rxdesc = &mdp->rx_ring[i]; | 2359 | rxdesc = &mdp->rx_ring[i]; |
| 2334 | rxdesc->status = 0; | 2360 | rxdesc->status = cpu_to_edmac(mdp, 0); |
| 2335 | rxdesc->addr = 0xBADF00D0; | 2361 | rxdesc->addr = cpu_to_edmac(mdp, 0xBADF00D0); |
| 2336 | dev_kfree_skb(mdp->rx_skbuff[i]); | 2362 | dev_kfree_skb(mdp->rx_skbuff[i]); |
| 2337 | mdp->rx_skbuff[i] = NULL; | 2363 | mdp->rx_skbuff[i] = NULL; |
| 2338 | } | 2364 | } |
| @@ -2350,6 +2376,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
| 2350 | { | 2376 | { |
| 2351 | struct sh_eth_private *mdp = netdev_priv(ndev); | 2377 | struct sh_eth_private *mdp = netdev_priv(ndev); |
| 2352 | struct sh_eth_txdesc *txdesc; | 2378 | struct sh_eth_txdesc *txdesc; |
| 2379 | dma_addr_t dma_addr; | ||
| 2353 | u32 entry; | 2380 | u32 entry; |
| 2354 | unsigned long flags; | 2381 | unsigned long flags; |
| 2355 | 2382 | ||
| @@ -2372,14 +2399,14 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
| 2372 | txdesc = &mdp->tx_ring[entry]; | 2399 | txdesc = &mdp->tx_ring[entry]; |
| 2373 | /* soft swap. */ | 2400 | /* soft swap. */ |
| 2374 | if (!mdp->cd->hw_swap) | 2401 | if (!mdp->cd->hw_swap) |
| 2375 | sh_eth_soft_swap(phys_to_virt(ALIGN(txdesc->addr, 4)), | 2402 | sh_eth_soft_swap(PTR_ALIGN(skb->data, 4), skb->len + 2); |
| 2376 | skb->len + 2); | 2403 | dma_addr = dma_map_single(&ndev->dev, skb->data, skb->len, |
| 2377 | txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len, | 2404 | DMA_TO_DEVICE); |
| 2378 | DMA_TO_DEVICE); | 2405 | if (dma_mapping_error(&ndev->dev, dma_addr)) { |
| 2379 | if (dma_mapping_error(&ndev->dev, txdesc->addr)) { | ||
| 2380 | kfree_skb(skb); | 2406 | kfree_skb(skb); |
| 2381 | return NETDEV_TX_OK; | 2407 | return NETDEV_TX_OK; |
| 2382 | } | 2408 | } |
| 2409 | txdesc->addr = cpu_to_edmac(mdp, dma_addr); | ||
| 2383 | txdesc->buffer_length = skb->len; | 2410 | txdesc->buffer_length = skb->len; |
| 2384 | 2411 | ||
| 2385 | dma_wmb(); /* TACT bit must be set after all the above writes */ | 2412 | dma_wmb(); /* TACT bit must be set after all the above writes */ |
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index 50382b1c9ddc..26ad1cf0bcf1 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h | |||
| @@ -546,31 +546,6 @@ static inline void sh_eth_soft_swap(char *src, int len) | |||
| 546 | #endif | 546 | #endif |
| 547 | } | 547 | } |
| 548 | 548 | ||
| 549 | #define SH_ETH_OFFSET_INVALID ((u16) ~0) | ||
| 550 | |||
| 551 | static inline void sh_eth_write(struct net_device *ndev, u32 data, | ||
| 552 | int enum_index) | ||
| 553 | { | ||
| 554 | struct sh_eth_private *mdp = netdev_priv(ndev); | ||
| 555 | u16 offset = mdp->reg_offset[enum_index]; | ||
| 556 | |||
| 557 | if (WARN_ON(offset == SH_ETH_OFFSET_INVALID)) | ||
| 558 | return; | ||
| 559 | |||
| 560 | iowrite32(data, mdp->addr + offset); | ||
| 561 | } | ||
| 562 | |||
| 563 | static inline u32 sh_eth_read(struct net_device *ndev, int enum_index) | ||
| 564 | { | ||
| 565 | struct sh_eth_private *mdp = netdev_priv(ndev); | ||
| 566 | u16 offset = mdp->reg_offset[enum_index]; | ||
| 567 | |||
| 568 | if (WARN_ON(offset == SH_ETH_OFFSET_INVALID)) | ||
| 569 | return ~0U; | ||
| 570 | |||
| 571 | return ioread32(mdp->addr + offset); | ||
| 572 | } | ||
| 573 | |||
| 574 | static inline void *sh_eth_tsu_get_offset(struct sh_eth_private *mdp, | 549 | static inline void *sh_eth_tsu_get_offset(struct sh_eth_private *mdp, |
| 575 | int enum_index) | 550 | int enum_index) |
| 576 | { | 551 | { |
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index bc6d21b471be..e6a084a6be12 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c | |||
| @@ -3299,7 +3299,8 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx, | |||
| 3299 | 3299 | ||
| 3300 | new_spec.priority = EFX_FILTER_PRI_AUTO; | 3300 | new_spec.priority = EFX_FILTER_PRI_AUTO; |
| 3301 | new_spec.flags = (EFX_FILTER_FLAG_RX | | 3301 | new_spec.flags = (EFX_FILTER_FLAG_RX | |
| 3302 | EFX_FILTER_FLAG_RX_RSS); | 3302 | (efx_rss_enabled(efx) ? |
| 3303 | EFX_FILTER_FLAG_RX_RSS : 0)); | ||
| 3303 | new_spec.dmaq_id = 0; | 3304 | new_spec.dmaq_id = 0; |
| 3304 | new_spec.rss_context = EFX_FILTER_RSS_CONTEXT_DEFAULT; | 3305 | new_spec.rss_context = EFX_FILTER_RSS_CONTEXT_DEFAULT; |
| 3305 | rc = efx_ef10_filter_push(efx, &new_spec, | 3306 | rc = efx_ef10_filter_push(efx, &new_spec, |
| @@ -3921,6 +3922,7 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx, | |||
| 3921 | { | 3922 | { |
| 3922 | struct efx_ef10_filter_table *table = efx->filter_state; | 3923 | struct efx_ef10_filter_table *table = efx->filter_state; |
| 3923 | struct efx_ef10_dev_addr *addr_list; | 3924 | struct efx_ef10_dev_addr *addr_list; |
| 3925 | enum efx_filter_flags filter_flags; | ||
| 3924 | struct efx_filter_spec spec; | 3926 | struct efx_filter_spec spec; |
| 3925 | u8 baddr[ETH_ALEN]; | 3927 | u8 baddr[ETH_ALEN]; |
| 3926 | unsigned int i, j; | 3928 | unsigned int i, j; |
| @@ -3935,11 +3937,11 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx, | |||
| 3935 | addr_count = table->dev_uc_count; | 3937 | addr_count = table->dev_uc_count; |
| 3936 | } | 3938 | } |
| 3937 | 3939 | ||
| 3940 | filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0; | ||
| 3941 | |||
| 3938 | /* Insert/renew filters */ | 3942 | /* Insert/renew filters */ |
| 3939 | for (i = 0; i < addr_count; i++) { | 3943 | for (i = 0; i < addr_count; i++) { |
| 3940 | efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, | 3944 | efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); |
| 3941 | EFX_FILTER_FLAG_RX_RSS, | ||
| 3942 | 0); | ||
| 3943 | efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, | 3945 | efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, |
| 3944 | addr_list[i].addr); | 3946 | addr_list[i].addr); |
| 3945 | rc = efx_ef10_filter_insert(efx, &spec, true); | 3947 | rc = efx_ef10_filter_insert(efx, &spec, true); |
| @@ -3968,9 +3970,7 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx, | |||
| 3968 | 3970 | ||
| 3969 | if (multicast && rollback) { | 3971 | if (multicast && rollback) { |
| 3970 | /* Also need an Ethernet broadcast filter */ | 3972 | /* Also need an Ethernet broadcast filter */ |
| 3971 | efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, | 3973 | efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); |
| 3972 | EFX_FILTER_FLAG_RX_RSS, | ||
| 3973 | 0); | ||
| 3974 | eth_broadcast_addr(baddr); | 3974 | eth_broadcast_addr(baddr); |
| 3975 | efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, baddr); | 3975 | efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, baddr); |
| 3976 | rc = efx_ef10_filter_insert(efx, &spec, true); | 3976 | rc = efx_ef10_filter_insert(efx, &spec, true); |
| @@ -4000,13 +4000,14 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, bool multicast, | |||
| 4000 | { | 4000 | { |
| 4001 | struct efx_ef10_filter_table *table = efx->filter_state; | 4001 | struct efx_ef10_filter_table *table = efx->filter_state; |
| 4002 | struct efx_ef10_nic_data *nic_data = efx->nic_data; | 4002 | struct efx_ef10_nic_data *nic_data = efx->nic_data; |
| 4003 | enum efx_filter_flags filter_flags; | ||
| 4003 | struct efx_filter_spec spec; | 4004 | struct efx_filter_spec spec; |
| 4004 | u8 baddr[ETH_ALEN]; | 4005 | u8 baddr[ETH_ALEN]; |
| 4005 | int rc; | 4006 | int rc; |
| 4006 | 4007 | ||
| 4007 | efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, | 4008 | filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0; |
| 4008 | EFX_FILTER_FLAG_RX_RSS, | 4009 | |
| 4009 | 0); | 4010 | efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); |
| 4010 | 4011 | ||
| 4011 | if (multicast) | 4012 | if (multicast) |
| 4012 | efx_filter_set_mc_def(&spec); | 4013 | efx_filter_set_mc_def(&spec); |
| @@ -4023,8 +4024,7 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, bool multicast, | |||
| 4023 | if (!nic_data->workaround_26807) { | 4024 | if (!nic_data->workaround_26807) { |
| 4024 | /* Also need an Ethernet broadcast filter */ | 4025 | /* Also need an Ethernet broadcast filter */ |
| 4025 | efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, | 4026 | efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, |
| 4026 | EFX_FILTER_FLAG_RX_RSS, | 4027 | filter_flags, 0); |
| 4027 | 0); | ||
| 4028 | eth_broadcast_addr(baddr); | 4028 | eth_broadcast_addr(baddr); |
| 4029 | efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, | 4029 | efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, |
| 4030 | baddr); | 4030 | baddr); |
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h index 1aaf76c1ace8..10827476bc0b 100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h | |||
| @@ -76,6 +76,11 @@ void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue); | |||
| 76 | #define EFX_TXQ_MAX_ENT(efx) (EFX_WORKAROUND_35388(efx) ? \ | 76 | #define EFX_TXQ_MAX_ENT(efx) (EFX_WORKAROUND_35388(efx) ? \ |
| 77 | EFX_MAX_DMAQ_SIZE / 2 : EFX_MAX_DMAQ_SIZE) | 77 | EFX_MAX_DMAQ_SIZE / 2 : EFX_MAX_DMAQ_SIZE) |
| 78 | 78 | ||
| 79 | static inline bool efx_rss_enabled(struct efx_nic *efx) | ||
| 80 | { | ||
| 81 | return efx->rss_spread > 1; | ||
| 82 | } | ||
| 83 | |||
| 79 | /* Filters */ | 84 | /* Filters */ |
| 80 | 85 | ||
| 81 | void efx_mac_reconfigure(struct efx_nic *efx); | 86 | void efx_mac_reconfigure(struct efx_nic *efx); |
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c index 5a1c5a8f278a..133e9e35be9e 100644 --- a/drivers/net/ethernet/sfc/farch.c +++ b/drivers/net/ethernet/sfc/farch.c | |||
| @@ -2242,7 +2242,7 @@ efx_farch_filter_init_rx_auto(struct efx_nic *efx, | |||
| 2242 | */ | 2242 | */ |
| 2243 | spec->priority = EFX_FILTER_PRI_AUTO; | 2243 | spec->priority = EFX_FILTER_PRI_AUTO; |
| 2244 | spec->flags = (EFX_FILTER_FLAG_RX | | 2244 | spec->flags = (EFX_FILTER_FLAG_RX | |
| 2245 | (efx->n_rx_channels > 1 ? EFX_FILTER_FLAG_RX_RSS : 0) | | 2245 | (efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0) | |
| 2246 | (efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0)); | 2246 | (efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0)); |
| 2247 | spec->dmaq_id = 0; | 2247 | spec->dmaq_id = 0; |
| 2248 | } | 2248 | } |
diff --git a/drivers/net/ethernet/sfc/txc43128_phy.c b/drivers/net/ethernet/sfc/txc43128_phy.c index 3d5ee3259885..194f67d9f3bf 100644 --- a/drivers/net/ethernet/sfc/txc43128_phy.c +++ b/drivers/net/ethernet/sfc/txc43128_phy.c | |||
| @@ -418,7 +418,7 @@ static void txc_reset_logic_mmd(struct efx_nic *efx, int mmd) | |||
| 418 | 418 | ||
| 419 | val |= (1 << TXC_GLCMD_LMTSWRST_LBN); | 419 | val |= (1 << TXC_GLCMD_LMTSWRST_LBN); |
| 420 | efx_mdio_write(efx, mmd, TXC_GLRGS_GLCMD, val); | 420 | efx_mdio_write(efx, mmd, TXC_GLRGS_GLCMD, val); |
| 421 | while (tries--) { | 421 | while (--tries) { |
| 422 | val = efx_mdio_read(efx, mmd, TXC_GLRGS_GLCMD); | 422 | val = efx_mdio_read(efx, mmd, TXC_GLRGS_GLCMD); |
| 423 | if (!(val & (1 << TXC_GLCMD_LMTSWRST_LBN))) | 423 | if (!(val & (1 << TXC_GLCMD_LMTSWRST_LBN))) |
| 424 | break; | 424 | break; |
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c index 52b8ed9bd87c..adff46375a32 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c | |||
| @@ -153,7 +153,11 @@ static int sun7i_gmac_probe(struct platform_device *pdev) | |||
| 153 | if (ret) | 153 | if (ret) |
| 154 | return ret; | 154 | return ret; |
| 155 | 155 | ||
| 156 | return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); | 156 | ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); |
| 157 | if (ret) | ||
| 158 | sun7i_gmac_exit(pdev, plat_dat->bsp_priv); | ||
| 159 | |||
| 160 | return ret; | ||
| 157 | } | 161 | } |
| 158 | 162 | ||
| 159 | static const struct of_device_id sun7i_dwmac_match[] = { | 163 | static const struct of_device_id sun7i_dwmac_match[] = { |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 3c6549aee11d..a5b869eb4678 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
| @@ -3046,8 +3046,6 @@ int stmmac_suspend(struct net_device *ndev) | |||
| 3046 | priv->hw->dma->stop_tx(priv->ioaddr); | 3046 | priv->hw->dma->stop_tx(priv->ioaddr); |
| 3047 | priv->hw->dma->stop_rx(priv->ioaddr); | 3047 | priv->hw->dma->stop_rx(priv->ioaddr); |
| 3048 | 3048 | ||
| 3049 | stmmac_clear_descriptors(priv); | ||
| 3050 | |||
| 3051 | /* Enable Power down mode by programming the PMT regs */ | 3049 | /* Enable Power down mode by programming the PMT regs */ |
| 3052 | if (device_may_wakeup(priv->device)) { | 3050 | if (device_may_wakeup(priv->device)) { |
| 3053 | priv->hw->mac->pmt(priv->hw, priv->wolopts); | 3051 | priv->hw->mac->pmt(priv->hw, priv->wolopts); |
| @@ -3105,7 +3103,12 @@ int stmmac_resume(struct net_device *ndev) | |||
| 3105 | 3103 | ||
| 3106 | netif_device_attach(ndev); | 3104 | netif_device_attach(ndev); |
| 3107 | 3105 | ||
| 3108 | init_dma_desc_rings(ndev, GFP_ATOMIC); | 3106 | priv->cur_rx = 0; |
| 3107 | priv->dirty_rx = 0; | ||
| 3108 | priv->dirty_tx = 0; | ||
| 3109 | priv->cur_tx = 0; | ||
| 3110 | stmmac_clear_descriptors(priv); | ||
| 3111 | |||
| 3109 | stmmac_hw_setup(ndev, false); | 3112 | stmmac_hw_setup(ndev, false); |
| 3110 | stmmac_init_tx_coalesce(priv); | 3113 | stmmac_init_tx_coalesce(priv); |
| 3111 | stmmac_set_rx_mode(ndev); | 3114 | stmmac_set_rx_mode(ndev); |
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index de5c30c9f059..c2b79f5d1c89 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c | |||
| @@ -967,8 +967,6 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, | |||
| 967 | err = udp_tunnel6_xmit_skb(dst, gs6->sock->sk, skb, dev, | 967 | err = udp_tunnel6_xmit_skb(dst, gs6->sock->sk, skb, dev, |
| 968 | &fl6.saddr, &fl6.daddr, prio, ttl, | 968 | &fl6.saddr, &fl6.daddr, prio, ttl, |
| 969 | sport, geneve->dst_port, !udp_csum); | 969 | sport, geneve->dst_port, !udp_csum); |
| 970 | |||
| 971 | iptunnel_xmit_stats(err, &dev->stats, dev->tstats); | ||
| 972 | return NETDEV_TX_OK; | 970 | return NETDEV_TX_OK; |
| 973 | 971 | ||
| 974 | tx_error: | 972 | tx_error: |
diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/phy/mdio-mux.c index 908e8d486342..7f8e7662e28c 100644 --- a/drivers/net/phy/mdio-mux.c +++ b/drivers/net/phy/mdio-mux.c | |||
| @@ -149,9 +149,14 @@ int mdio_mux_init(struct device *dev, | |||
| 149 | } | 149 | } |
| 150 | cb->bus_number = v; | 150 | cb->bus_number = v; |
| 151 | cb->parent = pb; | 151 | cb->parent = pb; |
| 152 | |||
| 152 | cb->mii_bus = mdiobus_alloc(); | 153 | cb->mii_bus = mdiobus_alloc(); |
| 154 | if (!cb->mii_bus) { | ||
| 155 | ret_val = -ENOMEM; | ||
| 156 | of_node_put(child_bus_node); | ||
| 157 | break; | ||
| 158 | } | ||
| 153 | cb->mii_bus->priv = cb; | 159 | cb->mii_bus->priv = cb; |
| 154 | |||
| 155 | cb->mii_bus->irq = cb->phy_irq; | 160 | cb->mii_bus->irq = cb->phy_irq; |
| 156 | cb->mii_bus->name = "mdio_mux"; | 161 | cb->mii_bus->name = "mdio_mux"; |
| 157 | snprintf(cb->mii_bus->id, MII_BUS_ID_SIZE, "%x.%x", | 162 | snprintf(cb->mii_bus->id, MII_BUS_ID_SIZE, "%x.%x", |
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index cf6312fafea5..e13ad6cdcc22 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c | |||
| @@ -339,9 +339,18 @@ static int ksz9021_config_init(struct phy_device *phydev) | |||
| 339 | { | 339 | { |
| 340 | const struct device *dev = &phydev->dev; | 340 | const struct device *dev = &phydev->dev; |
| 341 | const struct device_node *of_node = dev->of_node; | 341 | const struct device_node *of_node = dev->of_node; |
| 342 | const struct device *dev_walker; | ||
| 342 | 343 | ||
| 343 | if (!of_node && dev->parent->of_node) | 344 | /* The Micrel driver has a deprecated option to place phy OF |
| 344 | of_node = dev->parent->of_node; | 345 | * properties in the MAC node. Walk up the tree of devices to |
| 346 | * find a device with an OF node. | ||
| 347 | */ | ||
| 348 | dev_walker = &phydev->dev; | ||
| 349 | do { | ||
| 350 | of_node = dev_walker->of_node; | ||
| 351 | dev_walker = dev_walker->parent; | ||
| 352 | |||
| 353 | } while (!of_node && dev_walker); | ||
| 345 | 354 | ||
| 346 | if (of_node) { | 355 | if (of_node) { |
| 347 | ksz9021_load_values_from_of(phydev, of_node, | 356 | ksz9021_load_values_from_of(phydev, of_node, |
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c index 5e0b43283bce..0a37f840fcc5 100644 --- a/drivers/net/ppp/pppoe.c +++ b/drivers/net/ppp/pppoe.c | |||
| @@ -568,6 +568,9 @@ static int pppoe_create(struct net *net, struct socket *sock, int kern) | |||
| 568 | sk->sk_family = PF_PPPOX; | 568 | sk->sk_family = PF_PPPOX; |
| 569 | sk->sk_protocol = PX_PROTO_OE; | 569 | sk->sk_protocol = PX_PROTO_OE; |
| 570 | 570 | ||
| 571 | INIT_WORK(&pppox_sk(sk)->proto.pppoe.padt_work, | ||
| 572 | pppoe_unbind_sock_work); | ||
| 573 | |||
| 571 | return 0; | 574 | return 0; |
| 572 | } | 575 | } |
| 573 | 576 | ||
| @@ -632,8 +635,6 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr, | |||
| 632 | 635 | ||
| 633 | lock_sock(sk); | 636 | lock_sock(sk); |
| 634 | 637 | ||
| 635 | INIT_WORK(&po->proto.pppoe.padt_work, pppoe_unbind_sock_work); | ||
| 636 | |||
| 637 | error = -EINVAL; | 638 | error = -EINVAL; |
| 638 | if (sp->sa_protocol != PX_PROTO_OE) | 639 | if (sp->sa_protocol != PX_PROTO_OE) |
| 639 | goto end; | 640 | goto end; |
| @@ -663,8 +664,13 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr, | |||
| 663 | po->pppoe_dev = NULL; | 664 | po->pppoe_dev = NULL; |
| 664 | } | 665 | } |
| 665 | 666 | ||
| 666 | memset(sk_pppox(po) + 1, 0, | 667 | po->pppoe_ifindex = 0; |
| 667 | sizeof(struct pppox_sock) - sizeof(struct sock)); | 668 | memset(&po->pppoe_pa, 0, sizeof(po->pppoe_pa)); |
| 669 | memset(&po->pppoe_relay, 0, sizeof(po->pppoe_relay)); | ||
| 670 | memset(&po->chan, 0, sizeof(po->chan)); | ||
| 671 | po->next = NULL; | ||
| 672 | po->num = 0; | ||
| 673 | |||
| 668 | sk->sk_state = PPPOX_NONE; | 674 | sk->sk_state = PPPOX_NONE; |
| 669 | } | 675 | } |
| 670 | 676 | ||
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c index fc69e41d0950..597c53e0a2ec 100644 --- a/drivers/net/ppp/pptp.c +++ b/drivers/net/ppp/pptp.c | |||
| @@ -419,6 +419,9 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr, | |||
| 419 | struct pptp_opt *opt = &po->proto.pptp; | 419 | struct pptp_opt *opt = &po->proto.pptp; |
| 420 | int error = 0; | 420 | int error = 0; |
| 421 | 421 | ||
| 422 | if (sockaddr_len < sizeof(struct sockaddr_pppox)) | ||
| 423 | return -EINVAL; | ||
| 424 | |||
| 422 | lock_sock(sk); | 425 | lock_sock(sk); |
| 423 | 426 | ||
| 424 | opt->src_addr = sp->sa_addr.pptp; | 427 | opt->src_addr = sp->sa_addr.pptp; |
| @@ -440,6 +443,9 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr, | |||
| 440 | struct flowi4 fl4; | 443 | struct flowi4 fl4; |
| 441 | int error = 0; | 444 | int error = 0; |
| 442 | 445 | ||
| 446 | if (sockaddr_len < sizeof(struct sockaddr_pppox)) | ||
| 447 | return -EINVAL; | ||
| 448 | |||
| 443 | if (sp->sa_protocol != PX_PROTO_PPTP) | 449 | if (sp->sa_protocol != PX_PROTO_PPTP) |
| 444 | return -EINVAL; | 450 | return -EINVAL; |
| 445 | 451 | ||
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c index bbde9884ab8a..8973abdec9f6 100644 --- a/drivers/net/usb/cdc_mbim.c +++ b/drivers/net/usb/cdc_mbim.c | |||
| @@ -158,7 +158,7 @@ static int cdc_mbim_bind(struct usbnet *dev, struct usb_interface *intf) | |||
| 158 | if (!cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting)) | 158 | if (!cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting)) |
| 159 | goto err; | 159 | goto err; |
| 160 | 160 | ||
| 161 | ret = cdc_ncm_bind_common(dev, intf, data_altsetting, 0); | 161 | ret = cdc_ncm_bind_common(dev, intf, data_altsetting, dev->driver_info->data); |
| 162 | if (ret) | 162 | if (ret) |
| 163 | goto err; | 163 | goto err; |
| 164 | 164 | ||
| @@ -582,6 +582,26 @@ static const struct driver_info cdc_mbim_info_zlp = { | |||
| 582 | .tx_fixup = cdc_mbim_tx_fixup, | 582 | .tx_fixup = cdc_mbim_tx_fixup, |
| 583 | }; | 583 | }; |
| 584 | 584 | ||
| 585 | /* The spefication explicitly allows NDPs to be placed anywhere in the | ||
| 586 | * frame, but some devices fail unless the NDP is placed after the IP | ||
| 587 | * packets. Using the CDC_NCM_FLAG_NDP_TO_END flags to force this | ||
| 588 | * behaviour. | ||
| 589 | * | ||
| 590 | * Note: The current implementation of this feature restricts each NTB | ||
| 591 | * to a single NDP, implying that multiplexed sessions cannot share an | ||
| 592 | * NTB. This might affect performace for multiplexed sessions. | ||
| 593 | */ | ||
| 594 | static const struct driver_info cdc_mbim_info_ndp_to_end = { | ||
| 595 | .description = "CDC MBIM", | ||
| 596 | .flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET | FLAG_WWAN, | ||
| 597 | .bind = cdc_mbim_bind, | ||
| 598 | .unbind = cdc_mbim_unbind, | ||
| 599 | .manage_power = cdc_mbim_manage_power, | ||
| 600 | .rx_fixup = cdc_mbim_rx_fixup, | ||
| 601 | .tx_fixup = cdc_mbim_tx_fixup, | ||
| 602 | .data = CDC_NCM_FLAG_NDP_TO_END, | ||
| 603 | }; | ||
| 604 | |||
| 585 | static const struct usb_device_id mbim_devs[] = { | 605 | static const struct usb_device_id mbim_devs[] = { |
| 586 | /* This duplicate NCM entry is intentional. MBIM devices can | 606 | /* This duplicate NCM entry is intentional. MBIM devices can |
| 587 | * be disguised as NCM by default, and this is necessary to | 607 | * be disguised as NCM by default, and this is necessary to |
| @@ -597,6 +617,10 @@ static const struct usb_device_id mbim_devs[] = { | |||
| 597 | { USB_VENDOR_AND_INTERFACE_INFO(0x0bdb, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), | 617 | { USB_VENDOR_AND_INTERFACE_INFO(0x0bdb, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), |
| 598 | .driver_info = (unsigned long)&cdc_mbim_info, | 618 | .driver_info = (unsigned long)&cdc_mbim_info, |
| 599 | }, | 619 | }, |
| 620 | /* Huawei E3372 fails unless NDP comes after the IP packets */ | ||
| 621 | { USB_DEVICE_AND_INTERFACE_INFO(0x12d1, 0x157d, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), | ||
| 622 | .driver_info = (unsigned long)&cdc_mbim_info_ndp_to_end, | ||
| 623 | }, | ||
| 600 | /* default entry */ | 624 | /* default entry */ |
| 601 | { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), | 625 | { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), |
| 602 | .driver_info = (unsigned long)&cdc_mbim_info_zlp, | 626 | .driver_info = (unsigned long)&cdc_mbim_info_zlp, |
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 3b1ba8237768..1e9843a41168 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c | |||
| @@ -955,10 +955,18 @@ static struct usb_cdc_ncm_ndp16 *cdc_ncm_ndp(struct cdc_ncm_ctx *ctx, struct sk_ | |||
| 955 | * NTH16 header as we would normally do. NDP isn't written to the SKB yet, and | 955 | * NTH16 header as we would normally do. NDP isn't written to the SKB yet, and |
| 956 | * the wNdpIndex field in the header is actually not consistent with reality. It will be later. | 956 | * the wNdpIndex field in the header is actually not consistent with reality. It will be later. |
| 957 | */ | 957 | */ |
| 958 | if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) | 958 | if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) { |
| 959 | if (ctx->delayed_ndp16->dwSignature == sign) | 959 | if (ctx->delayed_ndp16->dwSignature == sign) |
| 960 | return ctx->delayed_ndp16; | 960 | return ctx->delayed_ndp16; |
| 961 | 961 | ||
| 962 | /* We can only push a single NDP to the end. Return | ||
| 963 | * NULL to send what we've already got and queue this | ||
| 964 | * skb for later. | ||
| 965 | */ | ||
| 966 | else if (ctx->delayed_ndp16->dwSignature) | ||
| 967 | return NULL; | ||
| 968 | } | ||
| 969 | |||
| 962 | /* follow the chain of NDPs, looking for a match */ | 970 | /* follow the chain of NDPs, looking for a match */ |
| 963 | while (ndpoffset) { | 971 | while (ndpoffset) { |
| 964 | ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb->data + ndpoffset); | 972 | ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb->data + ndpoffset); |
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index d9427ca3dba7..2e32c41536ae 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c | |||
| @@ -3067,17 +3067,6 @@ static int rtl8152_open(struct net_device *netdev) | |||
| 3067 | 3067 | ||
| 3068 | mutex_lock(&tp->control); | 3068 | mutex_lock(&tp->control); |
| 3069 | 3069 | ||
| 3070 | /* The WORK_ENABLE may be set when autoresume occurs */ | ||
| 3071 | if (test_bit(WORK_ENABLE, &tp->flags)) { | ||
| 3072 | clear_bit(WORK_ENABLE, &tp->flags); | ||
| 3073 | usb_kill_urb(tp->intr_urb); | ||
| 3074 | cancel_delayed_work_sync(&tp->schedule); | ||
| 3075 | |||
| 3076 | /* disable the tx/rx, if the workqueue has enabled them. */ | ||
| 3077 | if (netif_carrier_ok(netdev)) | ||
| 3078 | tp->rtl_ops.disable(tp); | ||
| 3079 | } | ||
| 3080 | |||
| 3081 | tp->rtl_ops.up(tp); | 3070 | tp->rtl_ops.up(tp); |
| 3082 | 3071 | ||
| 3083 | rtl8152_set_speed(tp, AUTONEG_ENABLE, | 3072 | rtl8152_set_speed(tp, AUTONEG_ENABLE, |
| @@ -3124,12 +3113,6 @@ static int rtl8152_close(struct net_device *netdev) | |||
| 3124 | } else { | 3113 | } else { |
| 3125 | mutex_lock(&tp->control); | 3114 | mutex_lock(&tp->control); |
| 3126 | 3115 | ||
| 3127 | /* The autosuspend may have been enabled and wouldn't | ||
| 3128 | * be disable when autoresume occurs, because the | ||
| 3129 | * netif_running() would be false. | ||
| 3130 | */ | ||
| 3131 | rtl_runtime_suspend_enable(tp, false); | ||
| 3132 | |||
| 3133 | tp->rtl_ops.down(tp); | 3116 | tp->rtl_ops.down(tp); |
| 3134 | 3117 | ||
| 3135 | mutex_unlock(&tp->control); | 3118 | mutex_unlock(&tp->control); |
| @@ -3512,7 +3495,7 @@ static int rtl8152_resume(struct usb_interface *intf) | |||
| 3512 | netif_device_attach(tp->netdev); | 3495 | netif_device_attach(tp->netdev); |
| 3513 | } | 3496 | } |
| 3514 | 3497 | ||
| 3515 | if (netif_running(tp->netdev)) { | 3498 | if (netif_running(tp->netdev) && tp->netdev->flags & IFF_UP) { |
| 3516 | if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { | 3499 | if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { |
| 3517 | rtl_runtime_suspend_enable(tp, false); | 3500 | rtl_runtime_suspend_enable(tp, false); |
| 3518 | clear_bit(SELECTIVE_SUSPEND, &tp->flags); | 3501 | clear_bit(SELECTIVE_SUSPEND, &tp->flags); |
| @@ -3532,6 +3515,8 @@ static int rtl8152_resume(struct usb_interface *intf) | |||
| 3532 | } | 3515 | } |
| 3533 | usb_submit_urb(tp->intr_urb, GFP_KERNEL); | 3516 | usb_submit_urb(tp->intr_urb, GFP_KERNEL); |
| 3534 | } else if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { | 3517 | } else if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { |
| 3518 | if (tp->netdev->flags & IFF_UP) | ||
| 3519 | rtl_runtime_suspend_enable(tp, false); | ||
| 3535 | clear_bit(SELECTIVE_SUSPEND, &tp->flags); | 3520 | clear_bit(SELECTIVE_SUSPEND, &tp->flags); |
| 3536 | } | 3521 | } |
| 3537 | 3522 | ||
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 6369a5734d4c..ba363cedef80 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c | |||
| @@ -1158,7 +1158,6 @@ static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb, | |||
| 1158 | struct pcpu_sw_netstats *stats; | 1158 | struct pcpu_sw_netstats *stats; |
| 1159 | union vxlan_addr saddr; | 1159 | union vxlan_addr saddr; |
| 1160 | int err = 0; | 1160 | int err = 0; |
| 1161 | union vxlan_addr *remote_ip; | ||
| 1162 | 1161 | ||
| 1163 | /* For flow based devices, map all packets to VNI 0 */ | 1162 | /* For flow based devices, map all packets to VNI 0 */ |
| 1164 | if (vs->flags & VXLAN_F_COLLECT_METADATA) | 1163 | if (vs->flags & VXLAN_F_COLLECT_METADATA) |
| @@ -1169,7 +1168,6 @@ static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb, | |||
| 1169 | if (!vxlan) | 1168 | if (!vxlan) |
| 1170 | goto drop; | 1169 | goto drop; |
| 1171 | 1170 | ||
| 1172 | remote_ip = &vxlan->default_dst.remote_ip; | ||
| 1173 | skb_reset_mac_header(skb); | 1171 | skb_reset_mac_header(skb); |
| 1174 | skb_scrub_packet(skb, !net_eq(vxlan->net, dev_net(vxlan->dev))); | 1172 | skb_scrub_packet(skb, !net_eq(vxlan->net, dev_net(vxlan->dev))); |
| 1175 | skb->protocol = eth_type_trans(skb, vxlan->dev); | 1173 | skb->protocol = eth_type_trans(skb, vxlan->dev); |
| @@ -1179,8 +1177,8 @@ static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb, | |||
| 1179 | if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr)) | 1177 | if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr)) |
| 1180 | goto drop; | 1178 | goto drop; |
| 1181 | 1179 | ||
| 1182 | /* Re-examine inner Ethernet packet */ | 1180 | /* Get data from the outer IP header */ |
| 1183 | if (remote_ip->sa.sa_family == AF_INET) { | 1181 | if (vxlan_get_sk_family(vs) == AF_INET) { |
| 1184 | oip = ip_hdr(skb); | 1182 | oip = ip_hdr(skb); |
| 1185 | saddr.sin.sin_addr.s_addr = oip->saddr; | 1183 | saddr.sin.sin_addr.s_addr = oip->saddr; |
| 1186 | saddr.sa.sa_family = AF_INET; | 1184 | saddr.sa.sa_family = AF_INET; |
| @@ -1848,6 +1846,34 @@ static int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *sk | |||
| 1848 | !(vxflags & VXLAN_F_UDP_CSUM)); | 1846 | !(vxflags & VXLAN_F_UDP_CSUM)); |
| 1849 | } | 1847 | } |
| 1850 | 1848 | ||
| 1849 | #if IS_ENABLED(CONFIG_IPV6) | ||
| 1850 | static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan, | ||
| 1851 | struct sk_buff *skb, int oif, | ||
| 1852 | const struct in6_addr *daddr, | ||
| 1853 | struct in6_addr *saddr) | ||
| 1854 | { | ||
| 1855 | struct dst_entry *ndst; | ||
| 1856 | struct flowi6 fl6; | ||
| 1857 | int err; | ||
| 1858 | |||
| 1859 | memset(&fl6, 0, sizeof(fl6)); | ||
| 1860 | fl6.flowi6_oif = oif; | ||
| 1861 | fl6.daddr = *daddr; | ||
| 1862 | fl6.saddr = vxlan->cfg.saddr.sin6.sin6_addr; | ||
| 1863 | fl6.flowi6_mark = skb->mark; | ||
| 1864 | fl6.flowi6_proto = IPPROTO_UDP; | ||
| 1865 | |||
| 1866 | err = ipv6_stub->ipv6_dst_lookup(vxlan->net, | ||
| 1867 | vxlan->vn6_sock->sock->sk, | ||
| 1868 | &ndst, &fl6); | ||
| 1869 | if (err < 0) | ||
| 1870 | return ERR_PTR(err); | ||
| 1871 | |||
| 1872 | *saddr = fl6.saddr; | ||
| 1873 | return ndst; | ||
| 1874 | } | ||
| 1875 | #endif | ||
| 1876 | |||
| 1851 | /* Bypass encapsulation if the destination is local */ | 1877 | /* Bypass encapsulation if the destination is local */ |
| 1852 | static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, | 1878 | static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, |
| 1853 | struct vxlan_dev *dst_vxlan) | 1879 | struct vxlan_dev *dst_vxlan) |
| @@ -2035,21 +2061,17 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, | |||
| 2035 | #if IS_ENABLED(CONFIG_IPV6) | 2061 | #if IS_ENABLED(CONFIG_IPV6) |
| 2036 | } else { | 2062 | } else { |
| 2037 | struct dst_entry *ndst; | 2063 | struct dst_entry *ndst; |
| 2038 | struct flowi6 fl6; | 2064 | struct in6_addr saddr; |
| 2039 | u32 rt6i_flags; | 2065 | u32 rt6i_flags; |
| 2040 | 2066 | ||
| 2041 | if (!vxlan->vn6_sock) | 2067 | if (!vxlan->vn6_sock) |
| 2042 | goto drop; | 2068 | goto drop; |
| 2043 | sk = vxlan->vn6_sock->sock->sk; | 2069 | sk = vxlan->vn6_sock->sock->sk; |
| 2044 | 2070 | ||
| 2045 | memset(&fl6, 0, sizeof(fl6)); | 2071 | ndst = vxlan6_get_route(vxlan, skb, |
| 2046 | fl6.flowi6_oif = rdst ? rdst->remote_ifindex : 0; | 2072 | rdst ? rdst->remote_ifindex : 0, |
| 2047 | fl6.daddr = dst->sin6.sin6_addr; | 2073 | &dst->sin6.sin6_addr, &saddr); |
| 2048 | fl6.saddr = vxlan->cfg.saddr.sin6.sin6_addr; | 2074 | if (IS_ERR(ndst)) { |
| 2049 | fl6.flowi6_mark = skb->mark; | ||
| 2050 | fl6.flowi6_proto = IPPROTO_UDP; | ||
| 2051 | |||
| 2052 | if (ipv6_stub->ipv6_dst_lookup(vxlan->net, sk, &ndst, &fl6)) { | ||
| 2053 | netdev_dbg(dev, "no route to %pI6\n", | 2075 | netdev_dbg(dev, "no route to %pI6\n", |
| 2054 | &dst->sin6.sin6_addr); | 2076 | &dst->sin6.sin6_addr); |
| 2055 | dev->stats.tx_carrier_errors++; | 2077 | dev->stats.tx_carrier_errors++; |
| @@ -2081,7 +2103,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, | |||
| 2081 | } | 2103 | } |
| 2082 | 2104 | ||
| 2083 | ttl = ttl ? : ip6_dst_hoplimit(ndst); | 2105 | ttl = ttl ? : ip6_dst_hoplimit(ndst); |
| 2084 | err = vxlan6_xmit_skb(ndst, sk, skb, dev, &fl6.saddr, &fl6.daddr, | 2106 | err = vxlan6_xmit_skb(ndst, sk, skb, dev, &saddr, &dst->sin6.sin6_addr, |
| 2085 | 0, ttl, src_port, dst_port, htonl(vni << 8), md, | 2107 | 0, ttl, src_port, dst_port, htonl(vni << 8), md, |
| 2086 | !net_eq(vxlan->net, dev_net(vxlan->dev)), | 2108 | !net_eq(vxlan->net, dev_net(vxlan->dev)), |
| 2087 | flags); | 2109 | flags); |
| @@ -2395,9 +2417,30 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) | |||
| 2395 | vxlan->cfg.port_max, true); | 2417 | vxlan->cfg.port_max, true); |
| 2396 | dport = info->key.tp_dst ? : vxlan->cfg.dst_port; | 2418 | dport = info->key.tp_dst ? : vxlan->cfg.dst_port; |
| 2397 | 2419 | ||
| 2398 | if (ip_tunnel_info_af(info) == AF_INET) | 2420 | if (ip_tunnel_info_af(info) == AF_INET) { |
| 2421 | if (!vxlan->vn4_sock) | ||
| 2422 | return -EINVAL; | ||
| 2399 | return egress_ipv4_tun_info(dev, skb, info, sport, dport); | 2423 | return egress_ipv4_tun_info(dev, skb, info, sport, dport); |
| 2400 | return -EINVAL; | 2424 | } else { |
| 2425 | #if IS_ENABLED(CONFIG_IPV6) | ||
| 2426 | struct dst_entry *ndst; | ||
| 2427 | |||
| 2428 | if (!vxlan->vn6_sock) | ||
| 2429 | return -EINVAL; | ||
| 2430 | ndst = vxlan6_get_route(vxlan, skb, 0, | ||
| 2431 | &info->key.u.ipv6.dst, | ||
| 2432 | &info->key.u.ipv6.src); | ||
| 2433 | if (IS_ERR(ndst)) | ||
| 2434 | return PTR_ERR(ndst); | ||
| 2435 | dst_release(ndst); | ||
| 2436 | |||
| 2437 | info->key.tp_src = sport; | ||
| 2438 | info->key.tp_dst = dport; | ||
| 2439 | #else /* !CONFIG_IPV6 */ | ||
| 2440 | return -EPFNOSUPPORT; | ||
| 2441 | #endif | ||
| 2442 | } | ||
| 2443 | return 0; | ||
| 2401 | } | 2444 | } |
| 2402 | 2445 | ||
| 2403 | static const struct net_device_ops vxlan_netdev_ops = { | 2446 | static const struct net_device_ops vxlan_netdev_ops = { |
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index e481f3710bd3..1049c34e7d43 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c | |||
| @@ -258,18 +258,18 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue, | |||
| 258 | struct netrx_pending_operations *npo) | 258 | struct netrx_pending_operations *npo) |
| 259 | { | 259 | { |
| 260 | struct xenvif_rx_meta *meta; | 260 | struct xenvif_rx_meta *meta; |
| 261 | struct xen_netif_rx_request *req; | 261 | struct xen_netif_rx_request req; |
| 262 | 262 | ||
| 263 | req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++); | 263 | RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req); |
| 264 | 264 | ||
| 265 | meta = npo->meta + npo->meta_prod++; | 265 | meta = npo->meta + npo->meta_prod++; |
| 266 | meta->gso_type = XEN_NETIF_GSO_TYPE_NONE; | 266 | meta->gso_type = XEN_NETIF_GSO_TYPE_NONE; |
| 267 | meta->gso_size = 0; | 267 | meta->gso_size = 0; |
| 268 | meta->size = 0; | 268 | meta->size = 0; |
| 269 | meta->id = req->id; | 269 | meta->id = req.id; |
| 270 | 270 | ||
| 271 | npo->copy_off = 0; | 271 | npo->copy_off = 0; |
| 272 | npo->copy_gref = req->gref; | 272 | npo->copy_gref = req.gref; |
| 273 | 273 | ||
| 274 | return meta; | 274 | return meta; |
| 275 | } | 275 | } |
| @@ -424,7 +424,7 @@ static int xenvif_gop_skb(struct sk_buff *skb, | |||
| 424 | struct xenvif *vif = netdev_priv(skb->dev); | 424 | struct xenvif *vif = netdev_priv(skb->dev); |
| 425 | int nr_frags = skb_shinfo(skb)->nr_frags; | 425 | int nr_frags = skb_shinfo(skb)->nr_frags; |
| 426 | int i; | 426 | int i; |
| 427 | struct xen_netif_rx_request *req; | 427 | struct xen_netif_rx_request req; |
| 428 | struct xenvif_rx_meta *meta; | 428 | struct xenvif_rx_meta *meta; |
| 429 | unsigned char *data; | 429 | unsigned char *data; |
| 430 | int head = 1; | 430 | int head = 1; |
| @@ -443,15 +443,15 @@ static int xenvif_gop_skb(struct sk_buff *skb, | |||
| 443 | 443 | ||
| 444 | /* Set up a GSO prefix descriptor, if necessary */ | 444 | /* Set up a GSO prefix descriptor, if necessary */ |
| 445 | if ((1 << gso_type) & vif->gso_prefix_mask) { | 445 | if ((1 << gso_type) & vif->gso_prefix_mask) { |
| 446 | req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++); | 446 | RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req); |
| 447 | meta = npo->meta + npo->meta_prod++; | 447 | meta = npo->meta + npo->meta_prod++; |
| 448 | meta->gso_type = gso_type; | 448 | meta->gso_type = gso_type; |
| 449 | meta->gso_size = skb_shinfo(skb)->gso_size; | 449 | meta->gso_size = skb_shinfo(skb)->gso_size; |
| 450 | meta->size = 0; | 450 | meta->size = 0; |
| 451 | meta->id = req->id; | 451 | meta->id = req.id; |
| 452 | } | 452 | } |
| 453 | 453 | ||
| 454 | req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++); | 454 | RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req); |
| 455 | meta = npo->meta + npo->meta_prod++; | 455 | meta = npo->meta + npo->meta_prod++; |
| 456 | 456 | ||
| 457 | if ((1 << gso_type) & vif->gso_mask) { | 457 | if ((1 << gso_type) & vif->gso_mask) { |
| @@ -463,9 +463,9 @@ static int xenvif_gop_skb(struct sk_buff *skb, | |||
| 463 | } | 463 | } |
| 464 | 464 | ||
| 465 | meta->size = 0; | 465 | meta->size = 0; |
| 466 | meta->id = req->id; | 466 | meta->id = req.id; |
| 467 | npo->copy_off = 0; | 467 | npo->copy_off = 0; |
| 468 | npo->copy_gref = req->gref; | 468 | npo->copy_gref = req.gref; |
| 469 | 469 | ||
| 470 | data = skb->data; | 470 | data = skb->data; |
| 471 | while (data < skb_tail_pointer(skb)) { | 471 | while (data < skb_tail_pointer(skb)) { |
| @@ -679,9 +679,7 @@ static void tx_add_credit(struct xenvif_queue *queue) | |||
| 679 | * Allow a burst big enough to transmit a jumbo packet of up to 128kB. | 679 | * Allow a burst big enough to transmit a jumbo packet of up to 128kB. |
| 680 | * Otherwise the interface can seize up due to insufficient credit. | 680 | * Otherwise the interface can seize up due to insufficient credit. |
| 681 | */ | 681 | */ |
| 682 | max_burst = RING_GET_REQUEST(&queue->tx, queue->tx.req_cons)->size; | 682 | max_burst = max(131072UL, queue->credit_bytes); |
| 683 | max_burst = min(max_burst, 131072UL); | ||
| 684 | max_burst = max(max_burst, queue->credit_bytes); | ||
| 685 | 683 | ||
| 686 | /* Take care that adding a new chunk of credit doesn't wrap to zero. */ | 684 | /* Take care that adding a new chunk of credit doesn't wrap to zero. */ |
| 687 | max_credit = queue->remaining_credit + queue->credit_bytes; | 685 | max_credit = queue->remaining_credit + queue->credit_bytes; |
| @@ -711,7 +709,7 @@ static void xenvif_tx_err(struct xenvif_queue *queue, | |||
| 711 | spin_unlock_irqrestore(&queue->response_lock, flags); | 709 | spin_unlock_irqrestore(&queue->response_lock, flags); |
| 712 | if (cons == end) | 710 | if (cons == end) |
| 713 | break; | 711 | break; |
| 714 | txp = RING_GET_REQUEST(&queue->tx, cons++); | 712 | RING_COPY_REQUEST(&queue->tx, cons++, txp); |
| 715 | } while (1); | 713 | } while (1); |
| 716 | queue->tx.req_cons = cons; | 714 | queue->tx.req_cons = cons; |
| 717 | } | 715 | } |
| @@ -778,8 +776,7 @@ static int xenvif_count_requests(struct xenvif_queue *queue, | |||
| 778 | if (drop_err) | 776 | if (drop_err) |
| 779 | txp = &dropped_tx; | 777 | txp = &dropped_tx; |
| 780 | 778 | ||
| 781 | memcpy(txp, RING_GET_REQUEST(&queue->tx, cons + slots), | 779 | RING_COPY_REQUEST(&queue->tx, cons + slots, txp); |
| 782 | sizeof(*txp)); | ||
| 783 | 780 | ||
| 784 | /* If the guest submitted a frame >= 64 KiB then | 781 | /* If the guest submitted a frame >= 64 KiB then |
| 785 | * first->size overflowed and following slots will | 782 | * first->size overflowed and following slots will |
| @@ -1112,8 +1109,7 @@ static int xenvif_get_extras(struct xenvif_queue *queue, | |||
| 1112 | return -EBADR; | 1109 | return -EBADR; |
| 1113 | } | 1110 | } |
| 1114 | 1111 | ||
| 1115 | memcpy(&extra, RING_GET_REQUEST(&queue->tx, cons), | 1112 | RING_COPY_REQUEST(&queue->tx, cons, &extra); |
| 1116 | sizeof(extra)); | ||
| 1117 | if (unlikely(!extra.type || | 1113 | if (unlikely(!extra.type || |
| 1118 | extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) { | 1114 | extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) { |
| 1119 | queue->tx.req_cons = ++cons; | 1115 | queue->tx.req_cons = ++cons; |
| @@ -1322,7 +1318,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, | |||
| 1322 | 1318 | ||
| 1323 | idx = queue->tx.req_cons; | 1319 | idx = queue->tx.req_cons; |
| 1324 | rmb(); /* Ensure that we see the request before we copy it. */ | 1320 | rmb(); /* Ensure that we see the request before we copy it. */ |
| 1325 | memcpy(&txreq, RING_GET_REQUEST(&queue->tx, idx), sizeof(txreq)); | 1321 | RING_COPY_REQUEST(&queue->tx, idx, &txreq); |
| 1326 | 1322 | ||
| 1327 | /* Credit-based scheduling. */ | 1323 | /* Credit-based scheduling. */ |
| 1328 | if (txreq.size > queue->remaining_credit && | 1324 | if (txreq.size > queue->remaining_credit && |
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig index 7eb5859dd035..03cb3ea2d2c0 100644 --- a/drivers/phy/Kconfig +++ b/drivers/phy/Kconfig | |||
| @@ -233,6 +233,7 @@ config PHY_SUN9I_USB | |||
| 233 | tristate "Allwinner sun9i SoC USB PHY driver" | 233 | tristate "Allwinner sun9i SoC USB PHY driver" |
| 234 | depends on ARCH_SUNXI && HAS_IOMEM && OF | 234 | depends on ARCH_SUNXI && HAS_IOMEM && OF |
| 235 | depends on RESET_CONTROLLER | 235 | depends on RESET_CONTROLLER |
| 236 | depends on USB_COMMON | ||
| 236 | select GENERIC_PHY | 237 | select GENERIC_PHY |
| 237 | help | 238 | help |
| 238 | Enable this to support the transceiver that is part of Allwinner | 239 | Enable this to support the transceiver that is part of Allwinner |
diff --git a/drivers/phy/phy-bcm-cygnus-pcie.c b/drivers/phy/phy-bcm-cygnus-pcie.c index 7ad72b7d2b98..082c03f6438f 100644 --- a/drivers/phy/phy-bcm-cygnus-pcie.c +++ b/drivers/phy/phy-bcm-cygnus-pcie.c | |||
| @@ -128,6 +128,7 @@ static int cygnus_pcie_phy_probe(struct platform_device *pdev) | |||
| 128 | struct phy_provider *provider; | 128 | struct phy_provider *provider; |
| 129 | struct resource *res; | 129 | struct resource *res; |
| 130 | unsigned cnt = 0; | 130 | unsigned cnt = 0; |
| 131 | int ret; | ||
| 131 | 132 | ||
| 132 | if (of_get_child_count(node) == 0) { | 133 | if (of_get_child_count(node) == 0) { |
| 133 | dev_err(dev, "PHY no child node\n"); | 134 | dev_err(dev, "PHY no child node\n"); |
| @@ -154,24 +155,28 @@ static int cygnus_pcie_phy_probe(struct platform_device *pdev) | |||
| 154 | if (of_property_read_u32(child, "reg", &id)) { | 155 | if (of_property_read_u32(child, "reg", &id)) { |
| 155 | dev_err(dev, "missing reg property for %s\n", | 156 | dev_err(dev, "missing reg property for %s\n", |
| 156 | child->name); | 157 | child->name); |
| 157 | return -EINVAL; | 158 | ret = -EINVAL; |
| 159 | goto put_child; | ||
| 158 | } | 160 | } |
| 159 | 161 | ||
| 160 | if (id >= MAX_NUM_PHYS) { | 162 | if (id >= MAX_NUM_PHYS) { |
| 161 | dev_err(dev, "invalid PHY id: %u\n", id); | 163 | dev_err(dev, "invalid PHY id: %u\n", id); |
| 162 | return -EINVAL; | 164 | ret = -EINVAL; |
| 165 | goto put_child; | ||
| 163 | } | 166 | } |
| 164 | 167 | ||
| 165 | if (core->phys[id].phy) { | 168 | if (core->phys[id].phy) { |
| 166 | dev_err(dev, "duplicated PHY id: %u\n", id); | 169 | dev_err(dev, "duplicated PHY id: %u\n", id); |
| 167 | return -EINVAL; | 170 | ret = -EINVAL; |
| 171 | goto put_child; | ||
| 168 | } | 172 | } |
| 169 | 173 | ||
| 170 | p = &core->phys[id]; | 174 | p = &core->phys[id]; |
| 171 | p->phy = devm_phy_create(dev, child, &cygnus_pcie_phy_ops); | 175 | p->phy = devm_phy_create(dev, child, &cygnus_pcie_phy_ops); |
| 172 | if (IS_ERR(p->phy)) { | 176 | if (IS_ERR(p->phy)) { |
| 173 | dev_err(dev, "failed to create PHY\n"); | 177 | dev_err(dev, "failed to create PHY\n"); |
| 174 | return PTR_ERR(p->phy); | 178 | ret = PTR_ERR(p->phy); |
| 179 | goto put_child; | ||
| 175 | } | 180 | } |
| 176 | 181 | ||
| 177 | p->core = core; | 182 | p->core = core; |
| @@ -191,6 +196,9 @@ static int cygnus_pcie_phy_probe(struct platform_device *pdev) | |||
| 191 | dev_dbg(dev, "registered %u PCIe PHY(s)\n", cnt); | 196 | dev_dbg(dev, "registered %u PCIe PHY(s)\n", cnt); |
| 192 | 197 | ||
| 193 | return 0; | 198 | return 0; |
| 199 | put_child: | ||
| 200 | of_node_put(child); | ||
| 201 | return ret; | ||
| 194 | } | 202 | } |
| 195 | 203 | ||
| 196 | static const struct of_device_id cygnus_pcie_phy_match_table[] = { | 204 | static const struct of_device_id cygnus_pcie_phy_match_table[] = { |
diff --git a/drivers/phy/phy-berlin-sata.c b/drivers/phy/phy-berlin-sata.c index 77a2e054fdea..f84a33a1bdd9 100644 --- a/drivers/phy/phy-berlin-sata.c +++ b/drivers/phy/phy-berlin-sata.c | |||
| @@ -195,7 +195,7 @@ static int phy_berlin_sata_probe(struct platform_device *pdev) | |||
| 195 | struct phy_provider *phy_provider; | 195 | struct phy_provider *phy_provider; |
| 196 | struct phy_berlin_priv *priv; | 196 | struct phy_berlin_priv *priv; |
| 197 | struct resource *res; | 197 | struct resource *res; |
| 198 | int i = 0; | 198 | int ret, i = 0; |
| 199 | u32 phy_id; | 199 | u32 phy_id; |
| 200 | 200 | ||
| 201 | priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); | 201 | priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); |
| @@ -237,22 +237,27 @@ static int phy_berlin_sata_probe(struct platform_device *pdev) | |||
| 237 | if (of_property_read_u32(child, "reg", &phy_id)) { | 237 | if (of_property_read_u32(child, "reg", &phy_id)) { |
| 238 | dev_err(dev, "missing reg property in node %s\n", | 238 | dev_err(dev, "missing reg property in node %s\n", |
| 239 | child->name); | 239 | child->name); |
| 240 | return -EINVAL; | 240 | ret = -EINVAL; |
| 241 | goto put_child; | ||
| 241 | } | 242 | } |
| 242 | 243 | ||
| 243 | if (phy_id >= ARRAY_SIZE(phy_berlin_power_down_bits)) { | 244 | if (phy_id >= ARRAY_SIZE(phy_berlin_power_down_bits)) { |
| 244 | dev_err(dev, "invalid reg in node %s\n", child->name); | 245 | dev_err(dev, "invalid reg in node %s\n", child->name); |
| 245 | return -EINVAL; | 246 | ret = -EINVAL; |
| 247 | goto put_child; | ||
| 246 | } | 248 | } |
| 247 | 249 | ||
| 248 | phy_desc = devm_kzalloc(dev, sizeof(*phy_desc), GFP_KERNEL); | 250 | phy_desc = devm_kzalloc(dev, sizeof(*phy_desc), GFP_KERNEL); |
| 249 | if (!phy_desc) | 251 | if (!phy_desc) { |
| 250 | return -ENOMEM; | 252 | ret = -ENOMEM; |
| 253 | goto put_child; | ||
| 254 | } | ||
| 251 | 255 | ||
| 252 | phy = devm_phy_create(dev, NULL, &phy_berlin_sata_ops); | 256 | phy = devm_phy_create(dev, NULL, &phy_berlin_sata_ops); |
| 253 | if (IS_ERR(phy)) { | 257 | if (IS_ERR(phy)) { |
| 254 | dev_err(dev, "failed to create PHY %d\n", phy_id); | 258 | dev_err(dev, "failed to create PHY %d\n", phy_id); |
| 255 | return PTR_ERR(phy); | 259 | ret = PTR_ERR(phy); |
| 260 | goto put_child; | ||
| 256 | } | 261 | } |
| 257 | 262 | ||
| 258 | phy_desc->phy = phy; | 263 | phy_desc->phy = phy; |
| @@ -269,6 +274,9 @@ static int phy_berlin_sata_probe(struct platform_device *pdev) | |||
| 269 | phy_provider = | 274 | phy_provider = |
| 270 | devm_of_phy_provider_register(dev, phy_berlin_sata_phy_xlate); | 275 | devm_of_phy_provider_register(dev, phy_berlin_sata_phy_xlate); |
| 271 | return PTR_ERR_OR_ZERO(phy_provider); | 276 | return PTR_ERR_OR_ZERO(phy_provider); |
| 277 | put_child: | ||
| 278 | of_node_put(child); | ||
| 279 | return ret; | ||
| 272 | } | 280 | } |
| 273 | 281 | ||
| 274 | static const struct of_device_id phy_berlin_sata_of_match[] = { | 282 | static const struct of_device_id phy_berlin_sata_of_match[] = { |
diff --git a/drivers/phy/phy-brcmstb-sata.c b/drivers/phy/phy-brcmstb-sata.c index 8a2cb16a1937..cd9dba820566 100644 --- a/drivers/phy/phy-brcmstb-sata.c +++ b/drivers/phy/phy-brcmstb-sata.c | |||
| @@ -140,7 +140,7 @@ static int brcm_sata_phy_probe(struct platform_device *pdev) | |||
| 140 | struct brcm_sata_phy *priv; | 140 | struct brcm_sata_phy *priv; |
| 141 | struct resource *res; | 141 | struct resource *res; |
| 142 | struct phy_provider *provider; | 142 | struct phy_provider *provider; |
| 143 | int count = 0; | 143 | int ret, count = 0; |
| 144 | 144 | ||
| 145 | if (of_get_child_count(dn) == 0) | 145 | if (of_get_child_count(dn) == 0) |
| 146 | return -ENODEV; | 146 | return -ENODEV; |
| @@ -163,16 +163,19 @@ static int brcm_sata_phy_probe(struct platform_device *pdev) | |||
| 163 | if (of_property_read_u32(child, "reg", &id)) { | 163 | if (of_property_read_u32(child, "reg", &id)) { |
| 164 | dev_err(dev, "missing reg property in node %s\n", | 164 | dev_err(dev, "missing reg property in node %s\n", |
| 165 | child->name); | 165 | child->name); |
| 166 | return -EINVAL; | 166 | ret = -EINVAL; |
| 167 | goto put_child; | ||
| 167 | } | 168 | } |
| 168 | 169 | ||
| 169 | if (id >= MAX_PORTS) { | 170 | if (id >= MAX_PORTS) { |
| 170 | dev_err(dev, "invalid reg: %u\n", id); | 171 | dev_err(dev, "invalid reg: %u\n", id); |
| 171 | return -EINVAL; | 172 | ret = -EINVAL; |
| 173 | goto put_child; | ||
| 172 | } | 174 | } |
| 173 | if (priv->phys[id].phy) { | 175 | if (priv->phys[id].phy) { |
| 174 | dev_err(dev, "already registered port %u\n", id); | 176 | dev_err(dev, "already registered port %u\n", id); |
| 175 | return -EINVAL; | 177 | ret = -EINVAL; |
| 178 | goto put_child; | ||
| 176 | } | 179 | } |
| 177 | 180 | ||
| 178 | port = &priv->phys[id]; | 181 | port = &priv->phys[id]; |
| @@ -182,7 +185,8 @@ static int brcm_sata_phy_probe(struct platform_device *pdev) | |||
| 182 | port->ssc_en = of_property_read_bool(child, "brcm,enable-ssc"); | 185 | port->ssc_en = of_property_read_bool(child, "brcm,enable-ssc"); |
| 183 | if (IS_ERR(port->phy)) { | 186 | if (IS_ERR(port->phy)) { |
| 184 | dev_err(dev, "failed to create PHY\n"); | 187 | dev_err(dev, "failed to create PHY\n"); |
| 185 | return PTR_ERR(port->phy); | 188 | ret = PTR_ERR(port->phy); |
| 189 | goto put_child; | ||
| 186 | } | 190 | } |
| 187 | 191 | ||
| 188 | phy_set_drvdata(port->phy, port); | 192 | phy_set_drvdata(port->phy, port); |
| @@ -198,6 +202,9 @@ static int brcm_sata_phy_probe(struct platform_device *pdev) | |||
| 198 | dev_info(dev, "registered %d port(s)\n", count); | 202 | dev_info(dev, "registered %d port(s)\n", count); |
| 199 | 203 | ||
| 200 | return 0; | 204 | return 0; |
| 205 | put_child: | ||
| 206 | of_node_put(child); | ||
| 207 | return ret; | ||
| 201 | } | 208 | } |
| 202 | 209 | ||
| 203 | static struct platform_driver brcm_sata_phy_driver = { | 210 | static struct platform_driver brcm_sata_phy_driver = { |
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c index fc48fac003a6..8c7f27db6ad3 100644 --- a/drivers/phy/phy-core.c +++ b/drivers/phy/phy-core.c | |||
| @@ -636,8 +636,9 @@ EXPORT_SYMBOL_GPL(devm_of_phy_get); | |||
| 636 | * @np: node containing the phy | 636 | * @np: node containing the phy |
| 637 | * @index: index of the phy | 637 | * @index: index of the phy |
| 638 | * | 638 | * |
| 639 | * Gets the phy using _of_phy_get(), and associates a device with it using | 639 | * Gets the phy using _of_phy_get(), then gets a refcount to it, |
| 640 | * devres. On driver detach, release function is invoked on the devres data, | 640 | * and associates a device with it using devres. On driver detach, |
| 641 | * release function is invoked on the devres data, | ||
| 641 | * then, devres data is freed. | 642 | * then, devres data is freed. |
| 642 | * | 643 | * |
| 643 | */ | 644 | */ |
| @@ -651,13 +652,21 @@ struct phy *devm_of_phy_get_by_index(struct device *dev, struct device_node *np, | |||
| 651 | return ERR_PTR(-ENOMEM); | 652 | return ERR_PTR(-ENOMEM); |
| 652 | 653 | ||
| 653 | phy = _of_phy_get(np, index); | 654 | phy = _of_phy_get(np, index); |
| 654 | if (!IS_ERR(phy)) { | 655 | if (IS_ERR(phy)) { |
| 655 | *ptr = phy; | ||
| 656 | devres_add(dev, ptr); | ||
| 657 | } else { | ||
| 658 | devres_free(ptr); | 656 | devres_free(ptr); |
| 657 | return phy; | ||
| 659 | } | 658 | } |
| 660 | 659 | ||
| 660 | if (!try_module_get(phy->ops->owner)) { | ||
| 661 | devres_free(ptr); | ||
| 662 | return ERR_PTR(-EPROBE_DEFER); | ||
| 663 | } | ||
| 664 | |||
| 665 | get_device(&phy->dev); | ||
| 666 | |||
| 667 | *ptr = phy; | ||
| 668 | devres_add(dev, ptr); | ||
| 669 | |||
| 661 | return phy; | 670 | return phy; |
| 662 | } | 671 | } |
| 663 | EXPORT_SYMBOL_GPL(devm_of_phy_get_by_index); | 672 | EXPORT_SYMBOL_GPL(devm_of_phy_get_by_index); |
diff --git a/drivers/phy/phy-miphy28lp.c b/drivers/phy/phy-miphy28lp.c index c47b56b4a2b8..3acd2a1808df 100644 --- a/drivers/phy/phy-miphy28lp.c +++ b/drivers/phy/phy-miphy28lp.c | |||
| @@ -1226,15 +1226,18 @@ static int miphy28lp_probe(struct platform_device *pdev) | |||
| 1226 | 1226 | ||
| 1227 | miphy_phy = devm_kzalloc(&pdev->dev, sizeof(*miphy_phy), | 1227 | miphy_phy = devm_kzalloc(&pdev->dev, sizeof(*miphy_phy), |
| 1228 | GFP_KERNEL); | 1228 | GFP_KERNEL); |
| 1229 | if (!miphy_phy) | 1229 | if (!miphy_phy) { |
| 1230 | return -ENOMEM; | 1230 | ret = -ENOMEM; |
| 1231 | goto put_child; | ||
| 1232 | } | ||
| 1231 | 1233 | ||
| 1232 | miphy_dev->phys[port] = miphy_phy; | 1234 | miphy_dev->phys[port] = miphy_phy; |
| 1233 | 1235 | ||
| 1234 | phy = devm_phy_create(&pdev->dev, child, &miphy28lp_ops); | 1236 | phy = devm_phy_create(&pdev->dev, child, &miphy28lp_ops); |
| 1235 | if (IS_ERR(phy)) { | 1237 | if (IS_ERR(phy)) { |
| 1236 | dev_err(&pdev->dev, "failed to create PHY\n"); | 1238 | dev_err(&pdev->dev, "failed to create PHY\n"); |
| 1237 | return PTR_ERR(phy); | 1239 | ret = PTR_ERR(phy); |
| 1240 | goto put_child; | ||
| 1238 | } | 1241 | } |
| 1239 | 1242 | ||
| 1240 | miphy_dev->phys[port]->phy = phy; | 1243 | miphy_dev->phys[port]->phy = phy; |
| @@ -1242,11 +1245,11 @@ static int miphy28lp_probe(struct platform_device *pdev) | |||
| 1242 | 1245 | ||
| 1243 | ret = miphy28lp_of_probe(child, miphy_phy); | 1246 | ret = miphy28lp_of_probe(child, miphy_phy); |
| 1244 | if (ret) | 1247 | if (ret) |
| 1245 | return ret; | 1248 | goto put_child; |
| 1246 | 1249 | ||
| 1247 | ret = miphy28lp_probe_resets(child, miphy_dev->phys[port]); | 1250 | ret = miphy28lp_probe_resets(child, miphy_dev->phys[port]); |
| 1248 | if (ret) | 1251 | if (ret) |
| 1249 | return ret; | 1252 | goto put_child; |
| 1250 | 1253 | ||
| 1251 | phy_set_drvdata(phy, miphy_dev->phys[port]); | 1254 | phy_set_drvdata(phy, miphy_dev->phys[port]); |
| 1252 | port++; | 1255 | port++; |
| @@ -1255,6 +1258,9 @@ static int miphy28lp_probe(struct platform_device *pdev) | |||
| 1255 | 1258 | ||
| 1256 | provider = devm_of_phy_provider_register(&pdev->dev, miphy28lp_xlate); | 1259 | provider = devm_of_phy_provider_register(&pdev->dev, miphy28lp_xlate); |
| 1257 | return PTR_ERR_OR_ZERO(provider); | 1260 | return PTR_ERR_OR_ZERO(provider); |
| 1261 | put_child: | ||
| 1262 | of_node_put(child); | ||
| 1263 | return ret; | ||
| 1258 | } | 1264 | } |
| 1259 | 1265 | ||
| 1260 | static const struct of_device_id miphy28lp_of_match[] = { | 1266 | static const struct of_device_id miphy28lp_of_match[] = { |
diff --git a/drivers/phy/phy-miphy365x.c b/drivers/phy/phy-miphy365x.c index 00a686a073ed..e661f3b36eaa 100644 --- a/drivers/phy/phy-miphy365x.c +++ b/drivers/phy/phy-miphy365x.c | |||
| @@ -566,22 +566,25 @@ static int miphy365x_probe(struct platform_device *pdev) | |||
| 566 | 566 | ||
| 567 | miphy_phy = devm_kzalloc(&pdev->dev, sizeof(*miphy_phy), | 567 | miphy_phy = devm_kzalloc(&pdev->dev, sizeof(*miphy_phy), |
| 568 | GFP_KERNEL); | 568 | GFP_KERNEL); |
| 569 | if (!miphy_phy) | 569 | if (!miphy_phy) { |
| 570 | return -ENOMEM; | 570 | ret = -ENOMEM; |
| 571 | goto put_child; | ||
| 572 | } | ||
| 571 | 573 | ||
| 572 | miphy_dev->phys[port] = miphy_phy; | 574 | miphy_dev->phys[port] = miphy_phy; |
| 573 | 575 | ||
| 574 | phy = devm_phy_create(&pdev->dev, child, &miphy365x_ops); | 576 | phy = devm_phy_create(&pdev->dev, child, &miphy365x_ops); |
| 575 | if (IS_ERR(phy)) { | 577 | if (IS_ERR(phy)) { |
| 576 | dev_err(&pdev->dev, "failed to create PHY\n"); | 578 | dev_err(&pdev->dev, "failed to create PHY\n"); |
| 577 | return PTR_ERR(phy); | 579 | ret = PTR_ERR(phy); |
| 580 | goto put_child; | ||
| 578 | } | 581 | } |
| 579 | 582 | ||
| 580 | miphy_dev->phys[port]->phy = phy; | 583 | miphy_dev->phys[port]->phy = phy; |
| 581 | 584 | ||
| 582 | ret = miphy365x_of_probe(child, miphy_phy); | 585 | ret = miphy365x_of_probe(child, miphy_phy); |
| 583 | if (ret) | 586 | if (ret) |
| 584 | return ret; | 587 | goto put_child; |
| 585 | 588 | ||
| 586 | phy_set_drvdata(phy, miphy_dev->phys[port]); | 589 | phy_set_drvdata(phy, miphy_dev->phys[port]); |
| 587 | 590 | ||
| @@ -591,12 +594,15 @@ static int miphy365x_probe(struct platform_device *pdev) | |||
| 591 | &miphy_phy->ctrlreg); | 594 | &miphy_phy->ctrlreg); |
| 592 | if (ret) { | 595 | if (ret) { |
| 593 | dev_err(&pdev->dev, "No sysconfig offset found\n"); | 596 | dev_err(&pdev->dev, "No sysconfig offset found\n"); |
| 594 | return ret; | 597 | goto put_child; |
| 595 | } | 598 | } |
| 596 | } | 599 | } |
| 597 | 600 | ||
| 598 | provider = devm_of_phy_provider_register(&pdev->dev, miphy365x_xlate); | 601 | provider = devm_of_phy_provider_register(&pdev->dev, miphy365x_xlate); |
| 599 | return PTR_ERR_OR_ZERO(provider); | 602 | return PTR_ERR_OR_ZERO(provider); |
| 603 | put_child: | ||
| 604 | of_node_put(child); | ||
| 605 | return ret; | ||
| 600 | } | 606 | } |
| 601 | 607 | ||
| 602 | static const struct of_device_id miphy365x_of_match[] = { | 608 | static const struct of_device_id miphy365x_of_match[] = { |
diff --git a/drivers/phy/phy-mt65xx-usb3.c b/drivers/phy/phy-mt65xx-usb3.c index f30b28bd41fe..e427c3b788ff 100644 --- a/drivers/phy/phy-mt65xx-usb3.c +++ b/drivers/phy/phy-mt65xx-usb3.c | |||
| @@ -415,7 +415,7 @@ static int mt65xx_u3phy_probe(struct platform_device *pdev) | |||
| 415 | struct resource *sif_res; | 415 | struct resource *sif_res; |
| 416 | struct mt65xx_u3phy *u3phy; | 416 | struct mt65xx_u3phy *u3phy; |
| 417 | struct resource res; | 417 | struct resource res; |
| 418 | int port; | 418 | int port, retval; |
| 419 | 419 | ||
| 420 | u3phy = devm_kzalloc(dev, sizeof(*u3phy), GFP_KERNEL); | 420 | u3phy = devm_kzalloc(dev, sizeof(*u3phy), GFP_KERNEL); |
| 421 | if (!u3phy) | 421 | if (!u3phy) |
| @@ -447,31 +447,34 @@ static int mt65xx_u3phy_probe(struct platform_device *pdev) | |||
| 447 | for_each_child_of_node(np, child_np) { | 447 | for_each_child_of_node(np, child_np) { |
| 448 | struct mt65xx_phy_instance *instance; | 448 | struct mt65xx_phy_instance *instance; |
| 449 | struct phy *phy; | 449 | struct phy *phy; |
| 450 | int retval; | ||
| 451 | 450 | ||
| 452 | instance = devm_kzalloc(dev, sizeof(*instance), GFP_KERNEL); | 451 | instance = devm_kzalloc(dev, sizeof(*instance), GFP_KERNEL); |
| 453 | if (!instance) | 452 | if (!instance) { |
| 454 | return -ENOMEM; | 453 | retval = -ENOMEM; |
| 454 | goto put_child; | ||
| 455 | } | ||
| 455 | 456 | ||
| 456 | u3phy->phys[port] = instance; | 457 | u3phy->phys[port] = instance; |
| 457 | 458 | ||
| 458 | phy = devm_phy_create(dev, child_np, &mt65xx_u3phy_ops); | 459 | phy = devm_phy_create(dev, child_np, &mt65xx_u3phy_ops); |
| 459 | if (IS_ERR(phy)) { | 460 | if (IS_ERR(phy)) { |
| 460 | dev_err(dev, "failed to create phy\n"); | 461 | dev_err(dev, "failed to create phy\n"); |
| 461 | return PTR_ERR(phy); | 462 | retval = PTR_ERR(phy); |
| 463 | goto put_child; | ||
| 462 | } | 464 | } |
| 463 | 465 | ||
| 464 | retval = of_address_to_resource(child_np, 0, &res); | 466 | retval = of_address_to_resource(child_np, 0, &res); |
| 465 | if (retval) { | 467 | if (retval) { |
| 466 | dev_err(dev, "failed to get address resource(id-%d)\n", | 468 | dev_err(dev, "failed to get address resource(id-%d)\n", |
| 467 | port); | 469 | port); |
| 468 | return retval; | 470 | goto put_child; |
| 469 | } | 471 | } |
| 470 | 472 | ||
| 471 | instance->port_base = devm_ioremap_resource(&phy->dev, &res); | 473 | instance->port_base = devm_ioremap_resource(&phy->dev, &res); |
| 472 | if (IS_ERR(instance->port_base)) { | 474 | if (IS_ERR(instance->port_base)) { |
| 473 | dev_err(dev, "failed to remap phy regs\n"); | 475 | dev_err(dev, "failed to remap phy regs\n"); |
| 474 | return PTR_ERR(instance->port_base); | 476 | retval = PTR_ERR(instance->port_base); |
| 477 | goto put_child; | ||
| 475 | } | 478 | } |
| 476 | 479 | ||
| 477 | instance->phy = phy; | 480 | instance->phy = phy; |
| @@ -483,6 +486,9 @@ static int mt65xx_u3phy_probe(struct platform_device *pdev) | |||
| 483 | provider = devm_of_phy_provider_register(dev, mt65xx_phy_xlate); | 486 | provider = devm_of_phy_provider_register(dev, mt65xx_phy_xlate); |
| 484 | 487 | ||
| 485 | return PTR_ERR_OR_ZERO(provider); | 488 | return PTR_ERR_OR_ZERO(provider); |
| 489 | put_child: | ||
| 490 | of_node_put(child_np); | ||
| 491 | return retval; | ||
| 486 | } | 492 | } |
| 487 | 493 | ||
| 488 | static const struct of_device_id mt65xx_u3phy_id_table[] = { | 494 | static const struct of_device_id mt65xx_u3phy_id_table[] = { |
diff --git a/drivers/phy/phy-rockchip-usb.c b/drivers/phy/phy-rockchip-usb.c index 91d6f342c565..62c43c435194 100644 --- a/drivers/phy/phy-rockchip-usb.c +++ b/drivers/phy/phy-rockchip-usb.c | |||
| @@ -108,13 +108,16 @@ static int rockchip_usb_phy_probe(struct platform_device *pdev) | |||
| 108 | 108 | ||
| 109 | for_each_available_child_of_node(dev->of_node, child) { | 109 | for_each_available_child_of_node(dev->of_node, child) { |
| 110 | rk_phy = devm_kzalloc(dev, sizeof(*rk_phy), GFP_KERNEL); | 110 | rk_phy = devm_kzalloc(dev, sizeof(*rk_phy), GFP_KERNEL); |
| 111 | if (!rk_phy) | 111 | if (!rk_phy) { |
| 112 | return -ENOMEM; | 112 | err = -ENOMEM; |
| 113 | goto put_child; | ||
| 114 | } | ||
| 113 | 115 | ||
| 114 | if (of_property_read_u32(child, "reg", ®_offset)) { | 116 | if (of_property_read_u32(child, "reg", ®_offset)) { |
| 115 | dev_err(dev, "missing reg property in node %s\n", | 117 | dev_err(dev, "missing reg property in node %s\n", |
| 116 | child->name); | 118 | child->name); |
| 117 | return -EINVAL; | 119 | err = -EINVAL; |
| 120 | goto put_child; | ||
| 118 | } | 121 | } |
| 119 | 122 | ||
| 120 | rk_phy->reg_offset = reg_offset; | 123 | rk_phy->reg_offset = reg_offset; |
| @@ -127,18 +130,22 @@ static int rockchip_usb_phy_probe(struct platform_device *pdev) | |||
| 127 | rk_phy->phy = devm_phy_create(dev, child, &ops); | 130 | rk_phy->phy = devm_phy_create(dev, child, &ops); |
| 128 | if (IS_ERR(rk_phy->phy)) { | 131 | if (IS_ERR(rk_phy->phy)) { |
| 129 | dev_err(dev, "failed to create PHY\n"); | 132 | dev_err(dev, "failed to create PHY\n"); |
| 130 | return PTR_ERR(rk_phy->phy); | 133 | err = PTR_ERR(rk_phy->phy); |
| 134 | goto put_child; | ||
| 131 | } | 135 | } |
| 132 | phy_set_drvdata(rk_phy->phy, rk_phy); | 136 | phy_set_drvdata(rk_phy->phy, rk_phy); |
| 133 | 137 | ||
| 134 | /* only power up usb phy when it use, so disable it when init*/ | 138 | /* only power up usb phy when it use, so disable it when init*/ |
| 135 | err = rockchip_usb_phy_power(rk_phy, 1); | 139 | err = rockchip_usb_phy_power(rk_phy, 1); |
| 136 | if (err) | 140 | if (err) |
| 137 | return err; | 141 | goto put_child; |
| 138 | } | 142 | } |
| 139 | 143 | ||
| 140 | phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); | 144 | phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); |
| 141 | return PTR_ERR_OR_ZERO(phy_provider); | 145 | return PTR_ERR_OR_ZERO(phy_provider); |
| 146 | put_child: | ||
| 147 | of_node_put(child); | ||
| 148 | return err; | ||
| 142 | } | 149 | } |
| 143 | 150 | ||
| 144 | static const struct of_device_id rockchip_usb_phy_dt_ids[] = { | 151 | static const struct of_device_id rockchip_usb_phy_dt_ids[] = { |
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c index a1ea565fcd46..2e6ca69635aa 100644 --- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c +++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c | |||
| @@ -342,12 +342,6 @@ static int bcm2835_gpio_get(struct gpio_chip *chip, unsigned offset) | |||
| 342 | return bcm2835_gpio_get_bit(pc, GPLEV0, offset); | 342 | return bcm2835_gpio_get_bit(pc, GPLEV0, offset); |
| 343 | } | 343 | } |
| 344 | 344 | ||
| 345 | static int bcm2835_gpio_direction_output(struct gpio_chip *chip, | ||
| 346 | unsigned offset, int value) | ||
| 347 | { | ||
| 348 | return pinctrl_gpio_direction_output(chip->base + offset); | ||
| 349 | } | ||
| 350 | |||
| 351 | static void bcm2835_gpio_set(struct gpio_chip *chip, unsigned offset, int value) | 345 | static void bcm2835_gpio_set(struct gpio_chip *chip, unsigned offset, int value) |
| 352 | { | 346 | { |
| 353 | struct bcm2835_pinctrl *pc = dev_get_drvdata(chip->dev); | 347 | struct bcm2835_pinctrl *pc = dev_get_drvdata(chip->dev); |
| @@ -355,6 +349,13 @@ static void bcm2835_gpio_set(struct gpio_chip *chip, unsigned offset, int value) | |||
| 355 | bcm2835_gpio_set_bit(pc, value ? GPSET0 : GPCLR0, offset); | 349 | bcm2835_gpio_set_bit(pc, value ? GPSET0 : GPCLR0, offset); |
| 356 | } | 350 | } |
| 357 | 351 | ||
| 352 | static int bcm2835_gpio_direction_output(struct gpio_chip *chip, | ||
| 353 | unsigned offset, int value) | ||
| 354 | { | ||
| 355 | bcm2835_gpio_set(chip, offset, value); | ||
| 356 | return pinctrl_gpio_direction_output(chip->base + offset); | ||
| 357 | } | ||
| 358 | |||
| 358 | static int bcm2835_gpio_to_irq(struct gpio_chip *chip, unsigned offset) | 359 | static int bcm2835_gpio_to_irq(struct gpio_chip *chip, unsigned offset) |
| 359 | { | 360 | { |
| 360 | struct bcm2835_pinctrl *pc = dev_get_drvdata(chip->dev); | 361 | struct bcm2835_pinctrl *pc = dev_get_drvdata(chip->dev); |
diff --git a/drivers/pinctrl/freescale/pinctrl-vf610.c b/drivers/pinctrl/freescale/pinctrl-vf610.c index 37a037543d29..587d1ff6210e 100644 --- a/drivers/pinctrl/freescale/pinctrl-vf610.c +++ b/drivers/pinctrl/freescale/pinctrl-vf610.c | |||
| @@ -299,7 +299,7 @@ static const struct pinctrl_pin_desc vf610_pinctrl_pads[] = { | |||
| 299 | static struct imx_pinctrl_soc_info vf610_pinctrl_info = { | 299 | static struct imx_pinctrl_soc_info vf610_pinctrl_info = { |
| 300 | .pins = vf610_pinctrl_pads, | 300 | .pins = vf610_pinctrl_pads, |
| 301 | .npins = ARRAY_SIZE(vf610_pinctrl_pads), | 301 | .npins = ARRAY_SIZE(vf610_pinctrl_pads), |
| 302 | .flags = SHARE_MUX_CONF_REG, | 302 | .flags = SHARE_MUX_CONF_REG | ZERO_OFFSET_VALID, |
| 303 | }; | 303 | }; |
| 304 | 304 | ||
| 305 | static const struct of_device_id vf610_pinctrl_of_match[] = { | 305 | static const struct of_device_id vf610_pinctrl_of_match[] = { |
diff --git a/drivers/pinctrl/intel/pinctrl-broxton.c b/drivers/pinctrl/intel/pinctrl-broxton.c index e42d5d4183f5..5979d38c46b2 100644 --- a/drivers/pinctrl/intel/pinctrl-broxton.c +++ b/drivers/pinctrl/intel/pinctrl-broxton.c | |||
| @@ -28,6 +28,7 @@ | |||
| 28 | .padcfglock_offset = BXT_PADCFGLOCK, \ | 28 | .padcfglock_offset = BXT_PADCFGLOCK, \ |
| 29 | .hostown_offset = BXT_HOSTSW_OWN, \ | 29 | .hostown_offset = BXT_HOSTSW_OWN, \ |
| 30 | .ie_offset = BXT_GPI_IE, \ | 30 | .ie_offset = BXT_GPI_IE, \ |
| 31 | .gpp_size = 32, \ | ||
| 31 | .pin_base = (s), \ | 32 | .pin_base = (s), \ |
| 32 | .npins = ((e) - (s) + 1), \ | 33 | .npins = ((e) - (s) + 1), \ |
| 33 | } | 34 | } |
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c index 392e28d3f48d..26f6b6ffea5b 100644 --- a/drivers/pinctrl/intel/pinctrl-intel.c +++ b/drivers/pinctrl/intel/pinctrl-intel.c | |||
| @@ -25,9 +25,6 @@ | |||
| 25 | 25 | ||
| 26 | #include "pinctrl-intel.h" | 26 | #include "pinctrl-intel.h" |
| 27 | 27 | ||
| 28 | /* Maximum number of pads in each group */ | ||
| 29 | #define NPADS_IN_GPP 24 | ||
| 30 | |||
| 31 | /* Offset from regs */ | 28 | /* Offset from regs */ |
| 32 | #define PADBAR 0x00c | 29 | #define PADBAR 0x00c |
| 33 | #define GPI_IS 0x100 | 30 | #define GPI_IS 0x100 |
| @@ -37,6 +34,7 @@ | |||
| 37 | #define PADOWN_BITS 4 | 34 | #define PADOWN_BITS 4 |
| 38 | #define PADOWN_SHIFT(p) ((p) % 8 * PADOWN_BITS) | 35 | #define PADOWN_SHIFT(p) ((p) % 8 * PADOWN_BITS) |
| 39 | #define PADOWN_MASK(p) (0xf << PADOWN_SHIFT(p)) | 36 | #define PADOWN_MASK(p) (0xf << PADOWN_SHIFT(p)) |
| 37 | #define PADOWN_GPP(p) ((p) / 8) | ||
| 40 | 38 | ||
| 41 | /* Offset from pad_regs */ | 39 | /* Offset from pad_regs */ |
| 42 | #define PADCFG0 0x000 | 40 | #define PADCFG0 0x000 |
| @@ -142,7 +140,7 @@ static void __iomem *intel_get_padcfg(struct intel_pinctrl *pctrl, unsigned pin, | |||
| 142 | static bool intel_pad_owned_by_host(struct intel_pinctrl *pctrl, unsigned pin) | 140 | static bool intel_pad_owned_by_host(struct intel_pinctrl *pctrl, unsigned pin) |
| 143 | { | 141 | { |
| 144 | const struct intel_community *community; | 142 | const struct intel_community *community; |
| 145 | unsigned padno, gpp, gpp_offset, offset; | 143 | unsigned padno, gpp, offset, group; |
| 146 | void __iomem *padown; | 144 | void __iomem *padown; |
| 147 | 145 | ||
| 148 | community = intel_get_community(pctrl, pin); | 146 | community = intel_get_community(pctrl, pin); |
| @@ -152,9 +150,9 @@ static bool intel_pad_owned_by_host(struct intel_pinctrl *pctrl, unsigned pin) | |||
| 152 | return true; | 150 | return true; |
| 153 | 151 | ||
| 154 | padno = pin_to_padno(community, pin); | 152 | padno = pin_to_padno(community, pin); |
| 155 | gpp = padno / NPADS_IN_GPP; | 153 | group = padno / community->gpp_size; |
| 156 | gpp_offset = padno % NPADS_IN_GPP; | 154 | gpp = PADOWN_GPP(padno % community->gpp_size); |
| 157 | offset = community->padown_offset + gpp * 16 + (gpp_offset / 8) * 4; | 155 | offset = community->padown_offset + 0x10 * group + gpp * 4; |
| 158 | padown = community->regs + offset; | 156 | padown = community->regs + offset; |
| 159 | 157 | ||
| 160 | return !(readl(padown) & PADOWN_MASK(padno)); | 158 | return !(readl(padown) & PADOWN_MASK(padno)); |
| @@ -173,11 +171,11 @@ static bool intel_pad_acpi_mode(struct intel_pinctrl *pctrl, unsigned pin) | |||
| 173 | return false; | 171 | return false; |
| 174 | 172 | ||
| 175 | padno = pin_to_padno(community, pin); | 173 | padno = pin_to_padno(community, pin); |
| 176 | gpp = padno / NPADS_IN_GPP; | 174 | gpp = padno / community->gpp_size; |
| 177 | offset = community->hostown_offset + gpp * 4; | 175 | offset = community->hostown_offset + gpp * 4; |
| 178 | hostown = community->regs + offset; | 176 | hostown = community->regs + offset; |
| 179 | 177 | ||
| 180 | return !(readl(hostown) & BIT(padno % NPADS_IN_GPP)); | 178 | return !(readl(hostown) & BIT(padno % community->gpp_size)); |
| 181 | } | 179 | } |
| 182 | 180 | ||
| 183 | static bool intel_pad_locked(struct intel_pinctrl *pctrl, unsigned pin) | 181 | static bool intel_pad_locked(struct intel_pinctrl *pctrl, unsigned pin) |
| @@ -193,7 +191,7 @@ static bool intel_pad_locked(struct intel_pinctrl *pctrl, unsigned pin) | |||
| 193 | return false; | 191 | return false; |
| 194 | 192 | ||
| 195 | padno = pin_to_padno(community, pin); | 193 | padno = pin_to_padno(community, pin); |
| 196 | gpp = padno / NPADS_IN_GPP; | 194 | gpp = padno / community->gpp_size; |
| 197 | 195 | ||
| 198 | /* | 196 | /* |
| 199 | * If PADCFGLOCK and PADCFGLOCKTX bits are both clear for this pad, | 197 | * If PADCFGLOCK and PADCFGLOCKTX bits are both clear for this pad, |
| @@ -202,12 +200,12 @@ static bool intel_pad_locked(struct intel_pinctrl *pctrl, unsigned pin) | |||
| 202 | */ | 200 | */ |
| 203 | offset = community->padcfglock_offset + gpp * 8; | 201 | offset = community->padcfglock_offset + gpp * 8; |
| 204 | value = readl(community->regs + offset); | 202 | value = readl(community->regs + offset); |
| 205 | if (value & BIT(pin % NPADS_IN_GPP)) | 203 | if (value & BIT(pin % community->gpp_size)) |
| 206 | return true; | 204 | return true; |
| 207 | 205 | ||
| 208 | offset = community->padcfglock_offset + 4 + gpp * 8; | 206 | offset = community->padcfglock_offset + 4 + gpp * 8; |
| 209 | value = readl(community->regs + offset); | 207 | value = readl(community->regs + offset); |
| 210 | if (value & BIT(pin % NPADS_IN_GPP)) | 208 | if (value & BIT(pin % community->gpp_size)) |
| 211 | return true; | 209 | return true; |
| 212 | 210 | ||
| 213 | return false; | 211 | return false; |
| @@ -663,8 +661,8 @@ static void intel_gpio_irq_ack(struct irq_data *d) | |||
| 663 | community = intel_get_community(pctrl, pin); | 661 | community = intel_get_community(pctrl, pin); |
| 664 | if (community) { | 662 | if (community) { |
| 665 | unsigned padno = pin_to_padno(community, pin); | 663 | unsigned padno = pin_to_padno(community, pin); |
| 666 | unsigned gpp_offset = padno % NPADS_IN_GPP; | 664 | unsigned gpp_offset = padno % community->gpp_size; |
| 667 | unsigned gpp = padno / NPADS_IN_GPP; | 665 | unsigned gpp = padno / community->gpp_size; |
| 668 | 666 | ||
| 669 | writel(BIT(gpp_offset), community->regs + GPI_IS + gpp * 4); | 667 | writel(BIT(gpp_offset), community->regs + GPI_IS + gpp * 4); |
| 670 | } | 668 | } |
| @@ -685,8 +683,8 @@ static void intel_gpio_irq_mask_unmask(struct irq_data *d, bool mask) | |||
| 685 | community = intel_get_community(pctrl, pin); | 683 | community = intel_get_community(pctrl, pin); |
| 686 | if (community) { | 684 | if (community) { |
| 687 | unsigned padno = pin_to_padno(community, pin); | 685 | unsigned padno = pin_to_padno(community, pin); |
| 688 | unsigned gpp_offset = padno % NPADS_IN_GPP; | 686 | unsigned gpp_offset = padno % community->gpp_size; |
| 689 | unsigned gpp = padno / NPADS_IN_GPP; | 687 | unsigned gpp = padno / community->gpp_size; |
| 690 | void __iomem *reg; | 688 | void __iomem *reg; |
| 691 | u32 value; | 689 | u32 value; |
| 692 | 690 | ||
| @@ -780,8 +778,8 @@ static int intel_gpio_irq_wake(struct irq_data *d, unsigned int on) | |||
| 780 | return -EINVAL; | 778 | return -EINVAL; |
| 781 | 779 | ||
| 782 | padno = pin_to_padno(community, pin); | 780 | padno = pin_to_padno(community, pin); |
| 783 | gpp = padno / NPADS_IN_GPP; | 781 | gpp = padno / community->gpp_size; |
| 784 | gpp_offset = padno % NPADS_IN_GPP; | 782 | gpp_offset = padno % community->gpp_size; |
| 785 | 783 | ||
| 786 | /* Clear the existing wake status */ | 784 | /* Clear the existing wake status */ |
| 787 | writel(BIT(gpp_offset), community->regs + GPI_GPE_STS + gpp * 4); | 785 | writel(BIT(gpp_offset), community->regs + GPI_GPE_STS + gpp * 4); |
| @@ -819,14 +817,14 @@ static irqreturn_t intel_gpio_community_irq_handler(struct intel_pinctrl *pctrl, | |||
| 819 | /* Only interrupts that are enabled */ | 817 | /* Only interrupts that are enabled */ |
| 820 | pending &= enabled; | 818 | pending &= enabled; |
| 821 | 819 | ||
| 822 | for_each_set_bit(gpp_offset, &pending, NPADS_IN_GPP) { | 820 | for_each_set_bit(gpp_offset, &pending, community->gpp_size) { |
| 823 | unsigned padno, irq; | 821 | unsigned padno, irq; |
| 824 | 822 | ||
| 825 | /* | 823 | /* |
| 826 | * The last group in community can have less pins | 824 | * The last group in community can have less pins |
| 827 | * than NPADS_IN_GPP. | 825 | * than NPADS_IN_GPP. |
| 828 | */ | 826 | */ |
| 829 | padno = gpp_offset + gpp * NPADS_IN_GPP; | 827 | padno = gpp_offset + gpp * community->gpp_size; |
| 830 | if (padno >= community->npins) | 828 | if (padno >= community->npins) |
| 831 | break; | 829 | break; |
| 832 | 830 | ||
| @@ -1002,7 +1000,8 @@ int intel_pinctrl_probe(struct platform_device *pdev, | |||
| 1002 | 1000 | ||
| 1003 | community->regs = regs; | 1001 | community->regs = regs; |
| 1004 | community->pad_regs = regs + padbar; | 1002 | community->pad_regs = regs + padbar; |
| 1005 | community->ngpps = DIV_ROUND_UP(community->npins, NPADS_IN_GPP); | 1003 | community->ngpps = DIV_ROUND_UP(community->npins, |
| 1004 | community->gpp_size); | ||
| 1006 | } | 1005 | } |
| 1007 | 1006 | ||
| 1008 | irq = platform_get_irq(pdev, 0); | 1007 | irq = platform_get_irq(pdev, 0); |
diff --git a/drivers/pinctrl/intel/pinctrl-intel.h b/drivers/pinctrl/intel/pinctrl-intel.h index 4ec8b572a288..b60215793017 100644 --- a/drivers/pinctrl/intel/pinctrl-intel.h +++ b/drivers/pinctrl/intel/pinctrl-intel.h | |||
| @@ -55,6 +55,8 @@ struct intel_function { | |||
| 55 | * ACPI). | 55 | * ACPI). |
| 56 | * @ie_offset: Register offset of GPI_IE from @regs. | 56 | * @ie_offset: Register offset of GPI_IE from @regs. |
| 57 | * @pin_base: Starting pin of pins in this community | 57 | * @pin_base: Starting pin of pins in this community |
| 58 | * @gpp_size: Maximum number of pads in each group, such as PADCFGLOCK, | ||
| 59 | * HOSTSW_OWN, GPI_IS, GPI_IE, etc. | ||
| 58 | * @npins: Number of pins in this community | 60 | * @npins: Number of pins in this community |
| 59 | * @regs: Community specific common registers (reserved for core driver) | 61 | * @regs: Community specific common registers (reserved for core driver) |
| 60 | * @pad_regs: Community specific pad registers (reserved for core driver) | 62 | * @pad_regs: Community specific pad registers (reserved for core driver) |
| @@ -68,6 +70,7 @@ struct intel_community { | |||
| 68 | unsigned hostown_offset; | 70 | unsigned hostown_offset; |
| 69 | unsigned ie_offset; | 71 | unsigned ie_offset; |
| 70 | unsigned pin_base; | 72 | unsigned pin_base; |
| 73 | unsigned gpp_size; | ||
| 71 | size_t npins; | 74 | size_t npins; |
| 72 | void __iomem *regs; | 75 | void __iomem *regs; |
| 73 | void __iomem *pad_regs; | 76 | void __iomem *pad_regs; |
diff --git a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c index 1de9ae5010db..c725a5313b4e 100644 --- a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c +++ b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c | |||
| @@ -30,6 +30,7 @@ | |||
| 30 | .padcfglock_offset = SPT_PADCFGLOCK, \ | 30 | .padcfglock_offset = SPT_PADCFGLOCK, \ |
| 31 | .hostown_offset = SPT_HOSTSW_OWN, \ | 31 | .hostown_offset = SPT_HOSTSW_OWN, \ |
| 32 | .ie_offset = SPT_GPI_IE, \ | 32 | .ie_offset = SPT_GPI_IE, \ |
| 33 | .gpp_size = 24, \ | ||
| 33 | .pin_base = (s), \ | 34 | .pin_base = (s), \ |
| 34 | .npins = ((e) - (s) + 1), \ | 35 | .npins = ((e) - (s) + 1), \ |
| 35 | } | 36 | } |
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c index cc97f0869791..48747c28a43d 100644 --- a/drivers/powercap/intel_rapl.c +++ b/drivers/powercap/intel_rapl.c | |||
| @@ -1341,10 +1341,13 @@ static int rapl_detect_domains(struct rapl_package *rp, int cpu) | |||
| 1341 | 1341 | ||
| 1342 | for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) { | 1342 | for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) { |
| 1343 | /* check if the domain is locked by BIOS */ | 1343 | /* check if the domain is locked by BIOS */ |
| 1344 | if (rapl_read_data_raw(rd, FW_LOCK, false, &locked)) { | 1344 | ret = rapl_read_data_raw(rd, FW_LOCK, false, &locked); |
| 1345 | if (ret) | ||
| 1346 | return ret; | ||
| 1347 | if (locked) { | ||
| 1345 | pr_info("RAPL package %d domain %s locked by BIOS\n", | 1348 | pr_info("RAPL package %d domain %s locked by BIOS\n", |
| 1346 | rp->id, rd->name); | 1349 | rp->id, rd->name); |
| 1347 | rd->state |= DOMAIN_STATE_BIOS_LOCKED; | 1350 | rd->state |= DOMAIN_STATE_BIOS_LOCKED; |
| 1348 | } | 1351 | } |
| 1349 | } | 1352 | } |
| 1350 | 1353 | ||
diff --git a/drivers/rtc/rtc-da9063.c b/drivers/rtc/rtc-da9063.c index 284b587da65c..d6c853bbfa9f 100644 --- a/drivers/rtc/rtc-da9063.c +++ b/drivers/rtc/rtc-da9063.c | |||
| @@ -483,24 +483,23 @@ static int da9063_rtc_probe(struct platform_device *pdev) | |||
| 483 | 483 | ||
| 484 | platform_set_drvdata(pdev, rtc); | 484 | platform_set_drvdata(pdev, rtc); |
| 485 | 485 | ||
| 486 | rtc->rtc_dev = devm_rtc_device_register(&pdev->dev, DA9063_DRVNAME_RTC, | ||
| 487 | &da9063_rtc_ops, THIS_MODULE); | ||
| 488 | if (IS_ERR(rtc->rtc_dev)) | ||
| 489 | return PTR_ERR(rtc->rtc_dev); | ||
| 490 | |||
| 491 | da9063_data_to_tm(data, &rtc->alarm_time, rtc); | ||
| 492 | rtc->rtc_sync = false; | ||
| 493 | |||
| 486 | irq_alarm = platform_get_irq_byname(pdev, "ALARM"); | 494 | irq_alarm = platform_get_irq_byname(pdev, "ALARM"); |
| 487 | ret = devm_request_threaded_irq(&pdev->dev, irq_alarm, NULL, | 495 | ret = devm_request_threaded_irq(&pdev->dev, irq_alarm, NULL, |
| 488 | da9063_alarm_event, | 496 | da9063_alarm_event, |
| 489 | IRQF_TRIGGER_LOW | IRQF_ONESHOT, | 497 | IRQF_TRIGGER_LOW | IRQF_ONESHOT, |
| 490 | "ALARM", rtc); | 498 | "ALARM", rtc); |
| 491 | if (ret) { | 499 | if (ret) |
| 492 | dev_err(&pdev->dev, "Failed to request ALARM IRQ %d: %d\n", | 500 | dev_err(&pdev->dev, "Failed to request ALARM IRQ %d: %d\n", |
| 493 | irq_alarm, ret); | 501 | irq_alarm, ret); |
| 494 | return ret; | ||
| 495 | } | ||
| 496 | |||
| 497 | rtc->rtc_dev = devm_rtc_device_register(&pdev->dev, DA9063_DRVNAME_RTC, | ||
| 498 | &da9063_rtc_ops, THIS_MODULE); | ||
| 499 | if (IS_ERR(rtc->rtc_dev)) | ||
| 500 | return PTR_ERR(rtc->rtc_dev); | ||
| 501 | 502 | ||
| 502 | da9063_data_to_tm(data, &rtc->alarm_time, rtc); | ||
| 503 | rtc->rtc_sync = false; | ||
| 504 | return ret; | 503 | return ret; |
| 505 | } | 504 | } |
| 506 | 505 | ||
diff --git a/drivers/rtc/rtc-rk808.c b/drivers/rtc/rtc-rk808.c index 91ca0bc1b484..35c9aada07c8 100644 --- a/drivers/rtc/rtc-rk808.c +++ b/drivers/rtc/rtc-rk808.c | |||
| @@ -56,6 +56,42 @@ struct rk808_rtc { | |||
| 56 | int irq; | 56 | int irq; |
| 57 | }; | 57 | }; |
| 58 | 58 | ||
| 59 | /* | ||
| 60 | * The Rockchip calendar used by the RK808 counts November with 31 days. We use | ||
| 61 | * these translation functions to convert its dates to/from the Gregorian | ||
| 62 | * calendar used by the rest of the world. We arbitrarily define Jan 1st, 2016 | ||
| 63 | * as the day when both calendars were in sync, and treat all other dates | ||
| 64 | * relative to that. | ||
| 65 | * NOTE: Other system software (e.g. firmware) that reads the same hardware must | ||
| 66 | * implement this exact same conversion algorithm, with the same anchor date. | ||
| 67 | */ | ||
| 68 | static time64_t nov2dec_transitions(struct rtc_time *tm) | ||
| 69 | { | ||
| 70 | return (tm->tm_year + 1900) - 2016 + (tm->tm_mon + 1 > 11 ? 1 : 0); | ||
| 71 | } | ||
| 72 | |||
| 73 | static void rockchip_to_gregorian(struct rtc_time *tm) | ||
| 74 | { | ||
| 75 | /* If it's Nov 31st, rtc_tm_to_time64() will count that like Dec 1st */ | ||
| 76 | time64_t time = rtc_tm_to_time64(tm); | ||
| 77 | rtc_time64_to_tm(time + nov2dec_transitions(tm) * 86400, tm); | ||
| 78 | } | ||
| 79 | |||
| 80 | static void gregorian_to_rockchip(struct rtc_time *tm) | ||
| 81 | { | ||
| 82 | time64_t extra_days = nov2dec_transitions(tm); | ||
| 83 | time64_t time = rtc_tm_to_time64(tm); | ||
| 84 | rtc_time64_to_tm(time - extra_days * 86400, tm); | ||
| 85 | |||
| 86 | /* Compensate if we went back over Nov 31st (will work up to 2381) */ | ||
| 87 | if (nov2dec_transitions(tm) < extra_days) { | ||
| 88 | if (tm->tm_mon + 1 == 11) | ||
| 89 | tm->tm_mday++; /* This may result in 31! */ | ||
| 90 | else | ||
| 91 | rtc_time64_to_tm(time - (extra_days - 1) * 86400, tm); | ||
| 92 | } | ||
| 93 | } | ||
| 94 | |||
| 59 | /* Read current time and date in RTC */ | 95 | /* Read current time and date in RTC */ |
| 60 | static int rk808_rtc_readtime(struct device *dev, struct rtc_time *tm) | 96 | static int rk808_rtc_readtime(struct device *dev, struct rtc_time *tm) |
| 61 | { | 97 | { |
| @@ -101,9 +137,10 @@ static int rk808_rtc_readtime(struct device *dev, struct rtc_time *tm) | |||
| 101 | tm->tm_mon = (bcd2bin(rtc_data[4] & MONTHS_REG_MSK)) - 1; | 137 | tm->tm_mon = (bcd2bin(rtc_data[4] & MONTHS_REG_MSK)) - 1; |
| 102 | tm->tm_year = (bcd2bin(rtc_data[5] & YEARS_REG_MSK)) + 100; | 138 | tm->tm_year = (bcd2bin(rtc_data[5] & YEARS_REG_MSK)) + 100; |
| 103 | tm->tm_wday = bcd2bin(rtc_data[6] & WEEKS_REG_MSK); | 139 | tm->tm_wday = bcd2bin(rtc_data[6] & WEEKS_REG_MSK); |
| 140 | rockchip_to_gregorian(tm); | ||
| 104 | dev_dbg(dev, "RTC date/time %4d-%02d-%02d(%d) %02d:%02d:%02d\n", | 141 | dev_dbg(dev, "RTC date/time %4d-%02d-%02d(%d) %02d:%02d:%02d\n", |
| 105 | 1900 + tm->tm_year, tm->tm_mon + 1, tm->tm_mday, | 142 | 1900 + tm->tm_year, tm->tm_mon + 1, tm->tm_mday, |
| 106 | tm->tm_wday, tm->tm_hour , tm->tm_min, tm->tm_sec); | 143 | tm->tm_wday, tm->tm_hour, tm->tm_min, tm->tm_sec); |
| 107 | 144 | ||
| 108 | return ret; | 145 | return ret; |
| 109 | } | 146 | } |
| @@ -116,6 +153,10 @@ static int rk808_rtc_set_time(struct device *dev, struct rtc_time *tm) | |||
| 116 | u8 rtc_data[NUM_TIME_REGS]; | 153 | u8 rtc_data[NUM_TIME_REGS]; |
| 117 | int ret; | 154 | int ret; |
| 118 | 155 | ||
| 156 | dev_dbg(dev, "set RTC date/time %4d-%02d-%02d(%d) %02d:%02d:%02d\n", | ||
| 157 | 1900 + tm->tm_year, tm->tm_mon + 1, tm->tm_mday, | ||
| 158 | tm->tm_wday, tm->tm_hour, tm->tm_min, tm->tm_sec); | ||
| 159 | gregorian_to_rockchip(tm); | ||
| 119 | rtc_data[0] = bin2bcd(tm->tm_sec); | 160 | rtc_data[0] = bin2bcd(tm->tm_sec); |
| 120 | rtc_data[1] = bin2bcd(tm->tm_min); | 161 | rtc_data[1] = bin2bcd(tm->tm_min); |
| 121 | rtc_data[2] = bin2bcd(tm->tm_hour); | 162 | rtc_data[2] = bin2bcd(tm->tm_hour); |
| @@ -123,9 +164,6 @@ static int rk808_rtc_set_time(struct device *dev, struct rtc_time *tm) | |||
| 123 | rtc_data[4] = bin2bcd(tm->tm_mon + 1); | 164 | rtc_data[4] = bin2bcd(tm->tm_mon + 1); |
| 124 | rtc_data[5] = bin2bcd(tm->tm_year - 100); | 165 | rtc_data[5] = bin2bcd(tm->tm_year - 100); |
| 125 | rtc_data[6] = bin2bcd(tm->tm_wday); | 166 | rtc_data[6] = bin2bcd(tm->tm_wday); |
| 126 | dev_dbg(dev, "set RTC date/time %4d-%02d-%02d(%d) %02d:%02d:%02d\n", | ||
| 127 | 1900 + tm->tm_year, tm->tm_mon + 1, tm->tm_mday, | ||
| 128 | tm->tm_wday, tm->tm_hour , tm->tm_min, tm->tm_sec); | ||
| 129 | 167 | ||
| 130 | /* Stop RTC while updating the RTC registers */ | 168 | /* Stop RTC while updating the RTC registers */ |
| 131 | ret = regmap_update_bits(rk808->regmap, RK808_RTC_CTRL_REG, | 169 | ret = regmap_update_bits(rk808->regmap, RK808_RTC_CTRL_REG, |
| @@ -170,6 +208,7 @@ static int rk808_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm) | |||
| 170 | alrm->time.tm_mday = bcd2bin(alrm_data[3] & DAYS_REG_MSK); | 208 | alrm->time.tm_mday = bcd2bin(alrm_data[3] & DAYS_REG_MSK); |
| 171 | alrm->time.tm_mon = (bcd2bin(alrm_data[4] & MONTHS_REG_MSK)) - 1; | 209 | alrm->time.tm_mon = (bcd2bin(alrm_data[4] & MONTHS_REG_MSK)) - 1; |
| 172 | alrm->time.tm_year = (bcd2bin(alrm_data[5] & YEARS_REG_MSK)) + 100; | 210 | alrm->time.tm_year = (bcd2bin(alrm_data[5] & YEARS_REG_MSK)) + 100; |
| 211 | rockchip_to_gregorian(&alrm->time); | ||
| 173 | 212 | ||
| 174 | ret = regmap_read(rk808->regmap, RK808_RTC_INT_REG, &int_reg); | 213 | ret = regmap_read(rk808->regmap, RK808_RTC_INT_REG, &int_reg); |
| 175 | if (ret) { | 214 | if (ret) { |
| @@ -227,6 +266,7 @@ static int rk808_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm) | |||
| 227 | alrm->time.tm_mday, alrm->time.tm_wday, alrm->time.tm_hour, | 266 | alrm->time.tm_mday, alrm->time.tm_wday, alrm->time.tm_hour, |
| 228 | alrm->time.tm_min, alrm->time.tm_sec); | 267 | alrm->time.tm_min, alrm->time.tm_sec); |
| 229 | 268 | ||
| 269 | gregorian_to_rockchip(&alrm->time); | ||
| 230 | alrm_data[0] = bin2bcd(alrm->time.tm_sec); | 270 | alrm_data[0] = bin2bcd(alrm->time.tm_sec); |
| 231 | alrm_data[1] = bin2bcd(alrm->time.tm_min); | 271 | alrm_data[1] = bin2bcd(alrm->time.tm_min); |
| 232 | alrm_data[2] = bin2bcd(alrm->time.tm_hour); | 272 | alrm_data[2] = bin2bcd(alrm->time.tm_hour); |
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c index e4b799837948..459abe1dcc87 100644 --- a/drivers/scsi/scsi_pm.c +++ b/drivers/scsi/scsi_pm.c | |||
| @@ -219,13 +219,13 @@ static int sdev_runtime_suspend(struct device *dev) | |||
| 219 | struct scsi_device *sdev = to_scsi_device(dev); | 219 | struct scsi_device *sdev = to_scsi_device(dev); |
| 220 | int err = 0; | 220 | int err = 0; |
| 221 | 221 | ||
| 222 | if (pm && pm->runtime_suspend) { | 222 | err = blk_pre_runtime_suspend(sdev->request_queue); |
| 223 | err = blk_pre_runtime_suspend(sdev->request_queue); | 223 | if (err) |
| 224 | if (err) | 224 | return err; |
| 225 | return err; | 225 | if (pm && pm->runtime_suspend) |
| 226 | err = pm->runtime_suspend(dev); | 226 | err = pm->runtime_suspend(dev); |
| 227 | blk_post_runtime_suspend(sdev->request_queue, err); | 227 | blk_post_runtime_suspend(sdev->request_queue, err); |
| 228 | } | 228 | |
| 229 | return err; | 229 | return err; |
| 230 | } | 230 | } |
| 231 | 231 | ||
| @@ -248,11 +248,11 @@ static int sdev_runtime_resume(struct device *dev) | |||
| 248 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | 248 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; |
| 249 | int err = 0; | 249 | int err = 0; |
| 250 | 250 | ||
| 251 | if (pm && pm->runtime_resume) { | 251 | blk_pre_runtime_resume(sdev->request_queue); |
| 252 | blk_pre_runtime_resume(sdev->request_queue); | 252 | if (pm && pm->runtime_resume) |
| 253 | err = pm->runtime_resume(dev); | 253 | err = pm->runtime_resume(dev); |
| 254 | blk_post_runtime_resume(sdev->request_queue, err); | 254 | blk_post_runtime_resume(sdev->request_queue, err); |
| 255 | } | 255 | |
| 256 | return err; | 256 | return err; |
| 257 | } | 257 | } |
| 258 | 258 | ||
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c index dcb0d76d7312..044d06410d4c 100644 --- a/drivers/scsi/ses.c +++ b/drivers/scsi/ses.c | |||
| @@ -84,6 +84,7 @@ static void init_device_slot_control(unsigned char *dest_desc, | |||
| 84 | static int ses_recv_diag(struct scsi_device *sdev, int page_code, | 84 | static int ses_recv_diag(struct scsi_device *sdev, int page_code, |
| 85 | void *buf, int bufflen) | 85 | void *buf, int bufflen) |
| 86 | { | 86 | { |
| 87 | int ret; | ||
| 87 | unsigned char cmd[] = { | 88 | unsigned char cmd[] = { |
| 88 | RECEIVE_DIAGNOSTIC, | 89 | RECEIVE_DIAGNOSTIC, |
| 89 | 1, /* Set PCV bit */ | 90 | 1, /* Set PCV bit */ |
| @@ -92,9 +93,26 @@ static int ses_recv_diag(struct scsi_device *sdev, int page_code, | |||
| 92 | bufflen & 0xff, | 93 | bufflen & 0xff, |
| 93 | 0 | 94 | 0 |
| 94 | }; | 95 | }; |
| 96 | unsigned char recv_page_code; | ||
| 95 | 97 | ||
| 96 | return scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen, | 98 | ret = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen, |
| 97 | NULL, SES_TIMEOUT, SES_RETRIES, NULL); | 99 | NULL, SES_TIMEOUT, SES_RETRIES, NULL); |
| 100 | if (unlikely(!ret)) | ||
| 101 | return ret; | ||
| 102 | |||
| 103 | recv_page_code = ((unsigned char *)buf)[0]; | ||
| 104 | |||
| 105 | if (likely(recv_page_code == page_code)) | ||
| 106 | return ret; | ||
| 107 | |||
| 108 | /* successful diagnostic but wrong page code. This happens to some | ||
| 109 | * USB devices, just print a message and pretend there was an error */ | ||
| 110 | |||
| 111 | sdev_printk(KERN_ERR, sdev, | ||
| 112 | "Wrong diagnostic page; asked for %d got %u\n", | ||
| 113 | page_code, recv_page_code); | ||
| 114 | |||
| 115 | return -EINVAL; | ||
| 98 | } | 116 | } |
| 99 | 117 | ||
| 100 | static int ses_send_diag(struct scsi_device *sdev, int page_code, | 118 | static int ses_send_diag(struct scsi_device *sdev, int page_code, |
| @@ -541,7 +559,15 @@ static void ses_enclosure_data_process(struct enclosure_device *edev, | |||
| 541 | if (desc_ptr) | 559 | if (desc_ptr) |
| 542 | desc_ptr += len; | 560 | desc_ptr += len; |
| 543 | 561 | ||
| 544 | if (addl_desc_ptr) | 562 | if (addl_desc_ptr && |
| 563 | /* only find additional descriptions for specific devices */ | ||
| 564 | (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE || | ||
| 565 | type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE || | ||
| 566 | type_ptr[0] == ENCLOSURE_COMPONENT_SAS_EXPANDER || | ||
| 567 | /* these elements are optional */ | ||
| 568 | type_ptr[0] == ENCLOSURE_COMPONENT_SCSI_TARGET_PORT || | ||
| 569 | type_ptr[0] == ENCLOSURE_COMPONENT_SCSI_INITIATOR_PORT || | ||
| 570 | type_ptr[0] == ENCLOSURE_COMPONENT_CONTROLLER_ELECTRONICS)) | ||
| 545 | addl_desc_ptr += addl_desc_ptr[1] + 2; | 571 | addl_desc_ptr += addl_desc_ptr[1] + 2; |
| 546 | 572 | ||
| 547 | } | 573 | } |
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c index 59a11437db70..39412c9097c6 100644 --- a/drivers/spi/spi-fsl-dspi.c +++ b/drivers/spi/spi-fsl-dspi.c | |||
| @@ -167,7 +167,7 @@ static inline int is_double_byte_mode(struct fsl_dspi *dspi) | |||
| 167 | { | 167 | { |
| 168 | unsigned int val; | 168 | unsigned int val; |
| 169 | 169 | ||
| 170 | regmap_read(dspi->regmap, SPI_CTAR(dspi->cs), &val); | 170 | regmap_read(dspi->regmap, SPI_CTAR(0), &val); |
| 171 | 171 | ||
| 172 | return ((val & SPI_FRAME_BITS_MASK) == SPI_FRAME_BITS(8)) ? 0 : 1; | 172 | return ((val & SPI_FRAME_BITS_MASK) == SPI_FRAME_BITS(8)) ? 0 : 1; |
| 173 | } | 173 | } |
| @@ -257,7 +257,7 @@ static u32 dspi_data_to_pushr(struct fsl_dspi *dspi, int tx_word) | |||
| 257 | 257 | ||
| 258 | return SPI_PUSHR_TXDATA(d16) | | 258 | return SPI_PUSHR_TXDATA(d16) | |
| 259 | SPI_PUSHR_PCS(dspi->cs) | | 259 | SPI_PUSHR_PCS(dspi->cs) | |
| 260 | SPI_PUSHR_CTAS(dspi->cs) | | 260 | SPI_PUSHR_CTAS(0) | |
| 261 | SPI_PUSHR_CONT; | 261 | SPI_PUSHR_CONT; |
| 262 | } | 262 | } |
| 263 | 263 | ||
| @@ -290,7 +290,7 @@ static int dspi_eoq_write(struct fsl_dspi *dspi) | |||
| 290 | */ | 290 | */ |
| 291 | if (tx_word && (dspi->len == 1)) { | 291 | if (tx_word && (dspi->len == 1)) { |
| 292 | dspi->dataflags |= TRAN_STATE_WORD_ODD_NUM; | 292 | dspi->dataflags |= TRAN_STATE_WORD_ODD_NUM; |
| 293 | regmap_update_bits(dspi->regmap, SPI_CTAR(dspi->cs), | 293 | regmap_update_bits(dspi->regmap, SPI_CTAR(0), |
| 294 | SPI_FRAME_BITS_MASK, SPI_FRAME_BITS(8)); | 294 | SPI_FRAME_BITS_MASK, SPI_FRAME_BITS(8)); |
| 295 | tx_word = 0; | 295 | tx_word = 0; |
| 296 | } | 296 | } |
| @@ -339,7 +339,7 @@ static int dspi_tcfq_write(struct fsl_dspi *dspi) | |||
| 339 | 339 | ||
| 340 | if (tx_word && (dspi->len == 1)) { | 340 | if (tx_word && (dspi->len == 1)) { |
| 341 | dspi->dataflags |= TRAN_STATE_WORD_ODD_NUM; | 341 | dspi->dataflags |= TRAN_STATE_WORD_ODD_NUM; |
| 342 | regmap_update_bits(dspi->regmap, SPI_CTAR(dspi->cs), | 342 | regmap_update_bits(dspi->regmap, SPI_CTAR(0), |
| 343 | SPI_FRAME_BITS_MASK, SPI_FRAME_BITS(8)); | 343 | SPI_FRAME_BITS_MASK, SPI_FRAME_BITS(8)); |
| 344 | tx_word = 0; | 344 | tx_word = 0; |
| 345 | } | 345 | } |
| @@ -407,7 +407,7 @@ static int dspi_transfer_one_message(struct spi_master *master, | |||
| 407 | regmap_update_bits(dspi->regmap, SPI_MCR, | 407 | regmap_update_bits(dspi->regmap, SPI_MCR, |
| 408 | SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF, | 408 | SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF, |
| 409 | SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF); | 409 | SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF); |
| 410 | regmap_write(dspi->regmap, SPI_CTAR(dspi->cs), | 410 | regmap_write(dspi->regmap, SPI_CTAR(0), |
| 411 | dspi->cur_chip->ctar_val); | 411 | dspi->cur_chip->ctar_val); |
| 412 | 412 | ||
| 413 | trans_mode = dspi->devtype_data->trans_mode; | 413 | trans_mode = dspi->devtype_data->trans_mode; |
| @@ -566,7 +566,7 @@ static irqreturn_t dspi_interrupt(int irq, void *dev_id) | |||
| 566 | if (!dspi->len) { | 566 | if (!dspi->len) { |
| 567 | if (dspi->dataflags & TRAN_STATE_WORD_ODD_NUM) { | 567 | if (dspi->dataflags & TRAN_STATE_WORD_ODD_NUM) { |
| 568 | regmap_update_bits(dspi->regmap, | 568 | regmap_update_bits(dspi->regmap, |
| 569 | SPI_CTAR(dspi->cs), | 569 | SPI_CTAR(0), |
| 570 | SPI_FRAME_BITS_MASK, | 570 | SPI_FRAME_BITS_MASK, |
| 571 | SPI_FRAME_BITS(16)); | 571 | SPI_FRAME_BITS(16)); |
| 572 | dspi->dataflags &= ~TRAN_STATE_WORD_ODD_NUM; | 572 | dspi->dataflags &= ~TRAN_STATE_WORD_ODD_NUM; |
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 2b0a8ec3affb..dee1cb87d24f 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c | |||
| @@ -1705,7 +1705,7 @@ struct spi_master *spi_alloc_master(struct device *dev, unsigned size) | |||
| 1705 | master->bus_num = -1; | 1705 | master->bus_num = -1; |
| 1706 | master->num_chipselect = 1; | 1706 | master->num_chipselect = 1; |
| 1707 | master->dev.class = &spi_master_class; | 1707 | master->dev.class = &spi_master_class; |
| 1708 | master->dev.parent = get_device(dev); | 1708 | master->dev.parent = dev; |
| 1709 | spi_master_set_devdata(master, &master[1]); | 1709 | spi_master_set_devdata(master, &master[1]); |
| 1710 | 1710 | ||
| 1711 | return master; | 1711 | return master; |
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c index 91a0fcd72423..d0e7dfc647cf 100644 --- a/drivers/spi/spidev.c +++ b/drivers/spi/spidev.c | |||
| @@ -651,11 +651,11 @@ static int spidev_release(struct inode *inode, struct file *filp) | |||
| 651 | kfree(spidev->rx_buffer); | 651 | kfree(spidev->rx_buffer); |
| 652 | spidev->rx_buffer = NULL; | 652 | spidev->rx_buffer = NULL; |
| 653 | 653 | ||
| 654 | spin_lock_irq(&spidev->spi_lock); | ||
| 654 | if (spidev->spi) | 655 | if (spidev->spi) |
| 655 | spidev->speed_hz = spidev->spi->max_speed_hz; | 656 | spidev->speed_hz = spidev->spi->max_speed_hz; |
| 656 | 657 | ||
| 657 | /* ... after we unbound from the underlying device? */ | 658 | /* ... after we unbound from the underlying device? */ |
| 658 | spin_lock_irq(&spidev->spi_lock); | ||
| 659 | dofree = (spidev->spi == NULL); | 659 | dofree = (spidev->spi == NULL); |
| 660 | spin_unlock_irq(&spidev->spi_lock); | 660 | spin_unlock_irq(&spidev->spi_lock); |
| 661 | 661 | ||
diff --git a/drivers/staging/android/ion/ion_chunk_heap.c b/drivers/staging/android/ion/ion_chunk_heap.c index 195c41d7bd53..0813163f962f 100644 --- a/drivers/staging/android/ion/ion_chunk_heap.c +++ b/drivers/staging/android/ion/ion_chunk_heap.c | |||
| @@ -81,7 +81,7 @@ static int ion_chunk_heap_allocate(struct ion_heap *heap, | |||
| 81 | err: | 81 | err: |
| 82 | sg = table->sgl; | 82 | sg = table->sgl; |
| 83 | for (i -= 1; i >= 0; i--) { | 83 | for (i -= 1; i >= 0; i--) { |
| 84 | gen_pool_free(chunk_heap->pool, sg_phys(sg) & PAGE_MASK, | 84 | gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)), |
| 85 | sg->length); | 85 | sg->length); |
| 86 | sg = sg_next(sg); | 86 | sg = sg_next(sg); |
| 87 | } | 87 | } |
| @@ -109,7 +109,7 @@ static void ion_chunk_heap_free(struct ion_buffer *buffer) | |||
| 109 | DMA_BIDIRECTIONAL); | 109 | DMA_BIDIRECTIONAL); |
| 110 | 110 | ||
| 111 | for_each_sg(table->sgl, sg, table->nents, i) { | 111 | for_each_sg(table->sgl, sg, table->nents, i) { |
| 112 | gen_pool_free(chunk_heap->pool, sg_phys(sg) & PAGE_MASK, | 112 | gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)), |
| 113 | sg->length); | 113 | sg->length); |
| 114 | } | 114 | } |
| 115 | chunk_heap->allocated -= allocated_size; | 115 | chunk_heap->allocated -= allocated_size; |
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index ed776149261e..e49c2bce551d 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c | |||
| @@ -2054,13 +2054,13 @@ static int canon_copy_from_read_buf(struct tty_struct *tty, | |||
| 2054 | size_t eol; | 2054 | size_t eol; |
| 2055 | size_t tail; | 2055 | size_t tail; |
| 2056 | int ret, found = 0; | 2056 | int ret, found = 0; |
| 2057 | bool eof_push = 0; | ||
| 2058 | 2057 | ||
| 2059 | /* N.B. avoid overrun if nr == 0 */ | 2058 | /* N.B. avoid overrun if nr == 0 */ |
| 2060 | n = min(*nr, smp_load_acquire(&ldata->canon_head) - ldata->read_tail); | 2059 | if (!*nr) |
| 2061 | if (!n) | ||
| 2062 | return 0; | 2060 | return 0; |
| 2063 | 2061 | ||
| 2062 | n = min(*nr + 1, smp_load_acquire(&ldata->canon_head) - ldata->read_tail); | ||
| 2063 | |||
| 2064 | tail = ldata->read_tail & (N_TTY_BUF_SIZE - 1); | 2064 | tail = ldata->read_tail & (N_TTY_BUF_SIZE - 1); |
| 2065 | size = min_t(size_t, tail + n, N_TTY_BUF_SIZE); | 2065 | size = min_t(size_t, tail + n, N_TTY_BUF_SIZE); |
| 2066 | 2066 | ||
| @@ -2081,12 +2081,11 @@ static int canon_copy_from_read_buf(struct tty_struct *tty, | |||
| 2081 | n = eol - tail; | 2081 | n = eol - tail; |
| 2082 | if (n > N_TTY_BUF_SIZE) | 2082 | if (n > N_TTY_BUF_SIZE) |
| 2083 | n += N_TTY_BUF_SIZE; | 2083 | n += N_TTY_BUF_SIZE; |
| 2084 | n += found; | 2084 | c = n + found; |
| 2085 | c = n; | ||
| 2086 | 2085 | ||
| 2087 | if (found && !ldata->push && read_buf(ldata, eol) == __DISABLED_CHAR) { | 2086 | if (!found || read_buf(ldata, eol) != __DISABLED_CHAR) { |
| 2088 | n--; | 2087 | c = min(*nr, c); |
| 2089 | eof_push = !n && ldata->read_tail != ldata->line_start; | 2088 | n = c; |
| 2090 | } | 2089 | } |
| 2091 | 2090 | ||
| 2092 | n_tty_trace("%s: eol:%zu found:%d n:%zu c:%zu size:%zu more:%zu\n", | 2091 | n_tty_trace("%s: eol:%zu found:%d n:%zu c:%zu size:%zu more:%zu\n", |
| @@ -2116,7 +2115,7 @@ static int canon_copy_from_read_buf(struct tty_struct *tty, | |||
| 2116 | ldata->push = 0; | 2115 | ldata->push = 0; |
| 2117 | tty_audit_push(tty); | 2116 | tty_audit_push(tty); |
| 2118 | } | 2117 | } |
| 2119 | return eof_push ? -EAGAIN : 0; | 2118 | return 0; |
| 2120 | } | 2119 | } |
| 2121 | 2120 | ||
| 2122 | extern ssize_t redirected_tty_write(struct file *, const char __user *, | 2121 | extern ssize_t redirected_tty_write(struct file *, const char __user *, |
| @@ -2273,10 +2272,7 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file, | |||
| 2273 | 2272 | ||
| 2274 | if (ldata->icanon && !L_EXTPROC(tty)) { | 2273 | if (ldata->icanon && !L_EXTPROC(tty)) { |
| 2275 | retval = canon_copy_from_read_buf(tty, &b, &nr); | 2274 | retval = canon_copy_from_read_buf(tty, &b, &nr); |
| 2276 | if (retval == -EAGAIN) { | 2275 | if (retval) |
| 2277 | retval = 0; | ||
| 2278 | continue; | ||
| 2279 | } else if (retval) | ||
| 2280 | break; | 2276 | break; |
| 2281 | } else { | 2277 | } else { |
| 2282 | int uncopied; | 2278 | int uncopied; |
diff --git a/drivers/tty/serial/8250/8250_uniphier.c b/drivers/tty/serial/8250/8250_uniphier.c index d11621e2cf1d..245edbb68d4b 100644 --- a/drivers/tty/serial/8250/8250_uniphier.c +++ b/drivers/tty/serial/8250/8250_uniphier.c | |||
| @@ -115,12 +115,16 @@ static void uniphier_serial_out(struct uart_port *p, int offset, int value) | |||
| 115 | */ | 115 | */ |
| 116 | static int uniphier_serial_dl_read(struct uart_8250_port *up) | 116 | static int uniphier_serial_dl_read(struct uart_8250_port *up) |
| 117 | { | 117 | { |
| 118 | return readl(up->port.membase + UNIPHIER_UART_DLR); | 118 | int offset = UNIPHIER_UART_DLR << up->port.regshift; |
| 119 | |||
| 120 | return readl(up->port.membase + offset); | ||
| 119 | } | 121 | } |
| 120 | 122 | ||
| 121 | static void uniphier_serial_dl_write(struct uart_8250_port *up, int value) | 123 | static void uniphier_serial_dl_write(struct uart_8250_port *up, int value) |
| 122 | { | 124 | { |
| 123 | writel(value, up->port.membase + UNIPHIER_UART_DLR); | 125 | int offset = UNIPHIER_UART_DLR << up->port.regshift; |
| 126 | |||
| 127 | writel(value, up->port.membase + offset); | ||
| 124 | } | 128 | } |
| 125 | 129 | ||
| 126 | static int uniphier_of_serial_setup(struct device *dev, struct uart_port *port, | 130 | static int uniphier_of_serial_setup(struct device *dev, struct uart_port *port, |
diff --git a/drivers/tty/serial/earlycon.c b/drivers/tty/serial/earlycon.c index f09636083426..b5b2f2be6be7 100644 --- a/drivers/tty/serial/earlycon.c +++ b/drivers/tty/serial/earlycon.c | |||
| @@ -115,6 +115,7 @@ static int __init register_earlycon(char *buf, const struct earlycon_id *match) | |||
| 115 | if (buf && !parse_options(&early_console_dev, buf)) | 115 | if (buf && !parse_options(&early_console_dev, buf)) |
| 116 | buf = NULL; | 116 | buf = NULL; |
| 117 | 117 | ||
| 118 | spin_lock_init(&port->lock); | ||
| 118 | port->uartclk = BASE_BAUD * 16; | 119 | port->uartclk = BASE_BAUD * 16; |
| 119 | if (port->mapbase) | 120 | if (port->mapbase) |
| 120 | port->membase = earlycon_map(port->mapbase, 64); | 121 | port->membase = earlycon_map(port->mapbase, 64); |
| @@ -202,6 +203,7 @@ int __init of_setup_earlycon(unsigned long addr, | |||
| 202 | int err; | 203 | int err; |
| 203 | struct uart_port *port = &early_console_dev.port; | 204 | struct uart_port *port = &early_console_dev.port; |
| 204 | 205 | ||
| 206 | spin_lock_init(&port->lock); | ||
| 205 | port->iotype = UPIO_MEM; | 207 | port->iotype = UPIO_MEM; |
| 206 | port->mapbase = addr; | 208 | port->mapbase = addr; |
| 207 | port->uartclk = BASE_BAUD * 16; | 209 | port->uartclk = BASE_BAUD * 16; |
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index 960e50a97558..51c7507b0444 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c | |||
| @@ -1437,7 +1437,7 @@ static void sci_request_dma(struct uart_port *port) | |||
| 1437 | sg_init_table(sg, 1); | 1437 | sg_init_table(sg, 1); |
| 1438 | s->rx_buf[i] = buf; | 1438 | s->rx_buf[i] = buf; |
| 1439 | sg_dma_address(sg) = dma; | 1439 | sg_dma_address(sg) = dma; |
| 1440 | sg->length = s->buf_len_rx; | 1440 | sg_dma_len(sg) = s->buf_len_rx; |
| 1441 | 1441 | ||
| 1442 | buf += s->buf_len_rx; | 1442 | buf += s->buf_len_rx; |
| 1443 | dma += s->buf_len_rx; | 1443 | dma += s->buf_len_rx; |
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c index 9a479e61791a..3cd31e0d4bd9 100644 --- a/drivers/tty/tty_buffer.c +++ b/drivers/tty/tty_buffer.c | |||
| @@ -450,7 +450,7 @@ receive_buf(struct tty_struct *tty, struct tty_buffer *head, int count) | |||
| 450 | count = disc->ops->receive_buf2(tty, p, f, count); | 450 | count = disc->ops->receive_buf2(tty, p, f, count); |
| 451 | else { | 451 | else { |
| 452 | count = min_t(int, count, tty->receive_room); | 452 | count = min_t(int, count, tty->receive_room); |
| 453 | if (count) | 453 | if (count && disc->ops->receive_buf) |
| 454 | disc->ops->receive_buf(tty, p, f, count); | 454 | disc->ops->receive_buf(tty, p, f, count); |
| 455 | } | 455 | } |
| 456 | return count; | 456 | return count; |
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index a5cc032ef77a..ddbf32d599cb 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c | |||
| @@ -1035,10 +1035,20 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) | |||
| 1035 | unsigned delay; | 1035 | unsigned delay; |
| 1036 | 1036 | ||
| 1037 | /* Continue a partial initialization */ | 1037 | /* Continue a partial initialization */ |
| 1038 | if (type == HUB_INIT2) | 1038 | if (type == HUB_INIT2 || type == HUB_INIT3) { |
| 1039 | goto init2; | 1039 | device_lock(hub->intfdev); |
| 1040 | if (type == HUB_INIT3) | 1040 | |
| 1041 | /* Was the hub disconnected while we were waiting? */ | ||
| 1042 | if (hub->disconnected) { | ||
| 1043 | device_unlock(hub->intfdev); | ||
| 1044 | kref_put(&hub->kref, hub_release); | ||
| 1045 | return; | ||
| 1046 | } | ||
| 1047 | if (type == HUB_INIT2) | ||
| 1048 | goto init2; | ||
| 1041 | goto init3; | 1049 | goto init3; |
| 1050 | } | ||
| 1051 | kref_get(&hub->kref); | ||
| 1042 | 1052 | ||
| 1043 | /* The superspeed hub except for root hub has to use Hub Depth | 1053 | /* The superspeed hub except for root hub has to use Hub Depth |
| 1044 | * value as an offset into the route string to locate the bits | 1054 | * value as an offset into the route string to locate the bits |
| @@ -1236,6 +1246,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) | |||
| 1236 | queue_delayed_work(system_power_efficient_wq, | 1246 | queue_delayed_work(system_power_efficient_wq, |
| 1237 | &hub->init_work, | 1247 | &hub->init_work, |
| 1238 | msecs_to_jiffies(delay)); | 1248 | msecs_to_jiffies(delay)); |
| 1249 | device_unlock(hub->intfdev); | ||
| 1239 | return; /* Continues at init3: below */ | 1250 | return; /* Continues at init3: below */ |
| 1240 | } else { | 1251 | } else { |
| 1241 | msleep(delay); | 1252 | msleep(delay); |
| @@ -1257,6 +1268,11 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) | |||
| 1257 | /* Allow autosuspend if it was suppressed */ | 1268 | /* Allow autosuspend if it was suppressed */ |
| 1258 | if (type <= HUB_INIT3) | 1269 | if (type <= HUB_INIT3) |
| 1259 | usb_autopm_put_interface_async(to_usb_interface(hub->intfdev)); | 1270 | usb_autopm_put_interface_async(to_usb_interface(hub->intfdev)); |
| 1271 | |||
| 1272 | if (type == HUB_INIT2 || type == HUB_INIT3) | ||
| 1273 | device_unlock(hub->intfdev); | ||
| 1274 | |||
| 1275 | kref_put(&hub->kref, hub_release); | ||
| 1260 | } | 1276 | } |
| 1261 | 1277 | ||
| 1262 | /* Implement the continuations for the delays above */ | 1278 | /* Implement the continuations for the delays above */ |
diff --git a/drivers/usb/serial/ipaq.c b/drivers/usb/serial/ipaq.c index f51a5d52c0ed..ec1b8f2c1183 100644 --- a/drivers/usb/serial/ipaq.c +++ b/drivers/usb/serial/ipaq.c | |||
| @@ -531,7 +531,8 @@ static int ipaq_open(struct tty_struct *tty, | |||
| 531 | * through. Since this has a reasonably high failure rate, we retry | 531 | * through. Since this has a reasonably high failure rate, we retry |
| 532 | * several times. | 532 | * several times. |
| 533 | */ | 533 | */ |
| 534 | while (retries--) { | 534 | while (retries) { |
| 535 | retries--; | ||
| 535 | result = usb_control_msg(serial->dev, | 536 | result = usb_control_msg(serial->dev, |
| 536 | usb_sndctrlpipe(serial->dev, 0), 0x22, 0x21, | 537 | usb_sndctrlpipe(serial->dev, 0), 0x22, 0x21, |
| 537 | 0x1, 0, NULL, 0, 100); | 538 | 0x1, 0, NULL, 0, 100); |
diff --git a/drivers/video/fbdev/fsl-diu-fb.c b/drivers/video/fbdev/fsl-diu-fb.c index b335c1ae8625..fe00a07c122e 100644 --- a/drivers/video/fbdev/fsl-diu-fb.c +++ b/drivers/video/fbdev/fsl-diu-fb.c | |||
| @@ -479,7 +479,10 @@ static enum fsl_diu_monitor_port fsl_diu_name_to_port(const char *s) | |||
| 479 | port = FSL_DIU_PORT_DLVDS; | 479 | port = FSL_DIU_PORT_DLVDS; |
| 480 | } | 480 | } |
| 481 | 481 | ||
| 482 | return diu_ops.valid_monitor_port(port); | 482 | if (diu_ops.valid_monitor_port) |
| 483 | port = diu_ops.valid_monitor_port(port); | ||
| 484 | |||
| 485 | return port; | ||
| 483 | } | 486 | } |
| 484 | 487 | ||
| 485 | /* | 488 | /* |
| @@ -1915,6 +1918,14 @@ static int __init fsl_diu_init(void) | |||
| 1915 | #else | 1918 | #else |
| 1916 | monitor_port = fsl_diu_name_to_port(monitor_string); | 1919 | monitor_port = fsl_diu_name_to_port(monitor_string); |
| 1917 | #endif | 1920 | #endif |
| 1921 | |||
| 1922 | /* | ||
| 1923 | * Must to verify set_pixel_clock. If not implement on platform, | ||
| 1924 | * then that means that there is no platform support for the DIU. | ||
| 1925 | */ | ||
| 1926 | if (!diu_ops.set_pixel_clock) | ||
| 1927 | return -ENODEV; | ||
| 1928 | |||
| 1918 | pr_info("Freescale Display Interface Unit (DIU) framebuffer driver\n"); | 1929 | pr_info("Freescale Display Interface Unit (DIU) framebuffer driver\n"); |
| 1919 | 1930 | ||
| 1920 | #ifdef CONFIG_NOT_COHERENT_CACHE | 1931 | #ifdef CONFIG_NOT_COHERENT_CACHE |
diff --git a/drivers/video/fbdev/omap2/dss/venc.c b/drivers/video/fbdev/omap2/dss/venc.c index 99ca268c1cdd..d05a54922ba6 100644 --- a/drivers/video/fbdev/omap2/dss/venc.c +++ b/drivers/video/fbdev/omap2/dss/venc.c | |||
| @@ -275,6 +275,12 @@ const struct omap_video_timings omap_dss_pal_timings = { | |||
| 275 | .vbp = 41, | 275 | .vbp = 41, |
| 276 | 276 | ||
| 277 | .interlace = true, | 277 | .interlace = true, |
| 278 | |||
| 279 | .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, | ||
| 280 | .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, | ||
| 281 | .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, | ||
| 282 | .de_level = OMAPDSS_SIG_ACTIVE_HIGH, | ||
| 283 | .sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE, | ||
| 278 | }; | 284 | }; |
| 279 | EXPORT_SYMBOL(omap_dss_pal_timings); | 285 | EXPORT_SYMBOL(omap_dss_pal_timings); |
| 280 | 286 | ||
| @@ -290,6 +296,12 @@ const struct omap_video_timings omap_dss_ntsc_timings = { | |||
| 290 | .vbp = 31, | 296 | .vbp = 31, |
| 291 | 297 | ||
| 292 | .interlace = true, | 298 | .interlace = true, |
| 299 | |||
| 300 | .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, | ||
| 301 | .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, | ||
| 302 | .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, | ||
| 303 | .de_level = OMAPDSS_SIG_ACTIVE_HIGH, | ||
| 304 | .sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE, | ||
| 293 | }; | 305 | }; |
| 294 | EXPORT_SYMBOL(omap_dss_ntsc_timings); | 306 | EXPORT_SYMBOL(omap_dss_ntsc_timings); |
| 295 | 307 | ||
diff --git a/drivers/xen/events/events_fifo.c b/drivers/xen/events/events_fifo.c index e3e9e3d46d1b..96a1b8da5371 100644 --- a/drivers/xen/events/events_fifo.c +++ b/drivers/xen/events/events_fifo.c | |||
| @@ -281,7 +281,8 @@ static void handle_irq_for_port(unsigned port) | |||
| 281 | 281 | ||
| 282 | static void consume_one_event(unsigned cpu, | 282 | static void consume_one_event(unsigned cpu, |
| 283 | struct evtchn_fifo_control_block *control_block, | 283 | struct evtchn_fifo_control_block *control_block, |
| 284 | unsigned priority, unsigned long *ready) | 284 | unsigned priority, unsigned long *ready, |
| 285 | bool drop) | ||
| 285 | { | 286 | { |
| 286 | struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu); | 287 | struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu); |
| 287 | uint32_t head; | 288 | uint32_t head; |
| @@ -313,13 +314,17 @@ static void consume_one_event(unsigned cpu, | |||
| 313 | if (head == 0) | 314 | if (head == 0) |
| 314 | clear_bit(priority, ready); | 315 | clear_bit(priority, ready); |
| 315 | 316 | ||
| 316 | if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port)) | 317 | if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port)) { |
| 317 | handle_irq_for_port(port); | 318 | if (unlikely(drop)) |
| 319 | pr_warn("Dropping pending event for port %u\n", port); | ||
| 320 | else | ||
| 321 | handle_irq_for_port(port); | ||
| 322 | } | ||
| 318 | 323 | ||
| 319 | q->head[priority] = head; | 324 | q->head[priority] = head; |
| 320 | } | 325 | } |
| 321 | 326 | ||
| 322 | static void evtchn_fifo_handle_events(unsigned cpu) | 327 | static void __evtchn_fifo_handle_events(unsigned cpu, bool drop) |
| 323 | { | 328 | { |
| 324 | struct evtchn_fifo_control_block *control_block; | 329 | struct evtchn_fifo_control_block *control_block; |
| 325 | unsigned long ready; | 330 | unsigned long ready; |
| @@ -331,11 +336,16 @@ static void evtchn_fifo_handle_events(unsigned cpu) | |||
| 331 | 336 | ||
| 332 | while (ready) { | 337 | while (ready) { |
| 333 | q = find_first_bit(&ready, EVTCHN_FIFO_MAX_QUEUES); | 338 | q = find_first_bit(&ready, EVTCHN_FIFO_MAX_QUEUES); |
| 334 | consume_one_event(cpu, control_block, q, &ready); | 339 | consume_one_event(cpu, control_block, q, &ready, drop); |
| 335 | ready |= xchg(&control_block->ready, 0); | 340 | ready |= xchg(&control_block->ready, 0); |
| 336 | } | 341 | } |
| 337 | } | 342 | } |
| 338 | 343 | ||
| 344 | static void evtchn_fifo_handle_events(unsigned cpu) | ||
| 345 | { | ||
| 346 | __evtchn_fifo_handle_events(cpu, false); | ||
| 347 | } | ||
| 348 | |||
| 339 | static void evtchn_fifo_resume(void) | 349 | static void evtchn_fifo_resume(void) |
| 340 | { | 350 | { |
| 341 | unsigned cpu; | 351 | unsigned cpu; |
| @@ -420,6 +430,9 @@ static int evtchn_fifo_cpu_notification(struct notifier_block *self, | |||
| 420 | if (!per_cpu(cpu_control_block, cpu)) | 430 | if (!per_cpu(cpu_control_block, cpu)) |
| 421 | ret = evtchn_fifo_alloc_control_block(cpu); | 431 | ret = evtchn_fifo_alloc_control_block(cpu); |
| 422 | break; | 432 | break; |
| 433 | case CPU_DEAD: | ||
| 434 | __evtchn_fifo_handle_events(cpu, true); | ||
| 435 | break; | ||
| 423 | default: | 436 | default: |
| 424 | break; | 437 | break; |
| 425 | } | 438 | } |
diff --git a/drivers/xen/xen-pciback/pciback.h b/drivers/xen/xen-pciback/pciback.h index 58e38d586f52..4d529f3e40df 100644 --- a/drivers/xen/xen-pciback/pciback.h +++ b/drivers/xen/xen-pciback/pciback.h | |||
| @@ -37,6 +37,7 @@ struct xen_pcibk_device { | |||
| 37 | struct xen_pci_sharedinfo *sh_info; | 37 | struct xen_pci_sharedinfo *sh_info; |
| 38 | unsigned long flags; | 38 | unsigned long flags; |
| 39 | struct work_struct op_work; | 39 | struct work_struct op_work; |
| 40 | struct xen_pci_op op; | ||
| 40 | }; | 41 | }; |
| 41 | 42 | ||
| 42 | struct xen_pcibk_dev_data { | 43 | struct xen_pcibk_dev_data { |
diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c index c4a0666de6f5..73dafdc494aa 100644 --- a/drivers/xen/xen-pciback/pciback_ops.c +++ b/drivers/xen/xen-pciback/pciback_ops.c | |||
| @@ -70,6 +70,13 @@ static void xen_pcibk_control_isr(struct pci_dev *dev, int reset) | |||
| 70 | enable ? "enable" : "disable"); | 70 | enable ? "enable" : "disable"); |
| 71 | 71 | ||
| 72 | if (enable) { | 72 | if (enable) { |
| 73 | /* | ||
| 74 | * The MSI or MSI-X should not have an IRQ handler. Otherwise | ||
| 75 | * if the guest terminates we BUG_ON in free_msi_irqs. | ||
| 76 | */ | ||
| 77 | if (dev->msi_enabled || dev->msix_enabled) | ||
| 78 | goto out; | ||
| 79 | |||
| 73 | rc = request_irq(dev_data->irq, | 80 | rc = request_irq(dev_data->irq, |
| 74 | xen_pcibk_guest_interrupt, IRQF_SHARED, | 81 | xen_pcibk_guest_interrupt, IRQF_SHARED, |
| 75 | dev_data->irq_name, dev); | 82 | dev_data->irq_name, dev); |
| @@ -144,7 +151,12 @@ int xen_pcibk_enable_msi(struct xen_pcibk_device *pdev, | |||
| 144 | if (unlikely(verbose_request)) | 151 | if (unlikely(verbose_request)) |
| 145 | printk(KERN_DEBUG DRV_NAME ": %s: enable MSI\n", pci_name(dev)); | 152 | printk(KERN_DEBUG DRV_NAME ": %s: enable MSI\n", pci_name(dev)); |
| 146 | 153 | ||
| 147 | status = pci_enable_msi(dev); | 154 | if (dev->msi_enabled) |
| 155 | status = -EALREADY; | ||
| 156 | else if (dev->msix_enabled) | ||
| 157 | status = -ENXIO; | ||
| 158 | else | ||
| 159 | status = pci_enable_msi(dev); | ||
| 148 | 160 | ||
| 149 | if (status) { | 161 | if (status) { |
| 150 | pr_warn_ratelimited("%s: error enabling MSI for guest %u: err %d\n", | 162 | pr_warn_ratelimited("%s: error enabling MSI for guest %u: err %d\n", |
| @@ -173,20 +185,23 @@ static | |||
| 173 | int xen_pcibk_disable_msi(struct xen_pcibk_device *pdev, | 185 | int xen_pcibk_disable_msi(struct xen_pcibk_device *pdev, |
| 174 | struct pci_dev *dev, struct xen_pci_op *op) | 186 | struct pci_dev *dev, struct xen_pci_op *op) |
| 175 | { | 187 | { |
| 176 | struct xen_pcibk_dev_data *dev_data; | ||
| 177 | |||
| 178 | if (unlikely(verbose_request)) | 188 | if (unlikely(verbose_request)) |
| 179 | printk(KERN_DEBUG DRV_NAME ": %s: disable MSI\n", | 189 | printk(KERN_DEBUG DRV_NAME ": %s: disable MSI\n", |
| 180 | pci_name(dev)); | 190 | pci_name(dev)); |
| 181 | pci_disable_msi(dev); | ||
| 182 | 191 | ||
| 192 | if (dev->msi_enabled) { | ||
| 193 | struct xen_pcibk_dev_data *dev_data; | ||
| 194 | |||
| 195 | pci_disable_msi(dev); | ||
| 196 | |||
| 197 | dev_data = pci_get_drvdata(dev); | ||
| 198 | if (dev_data) | ||
| 199 | dev_data->ack_intr = 1; | ||
| 200 | } | ||
| 183 | op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0; | 201 | op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0; |
| 184 | if (unlikely(verbose_request)) | 202 | if (unlikely(verbose_request)) |
| 185 | printk(KERN_DEBUG DRV_NAME ": %s: MSI: %d\n", pci_name(dev), | 203 | printk(KERN_DEBUG DRV_NAME ": %s: MSI: %d\n", pci_name(dev), |
| 186 | op->value); | 204 | op->value); |
| 187 | dev_data = pci_get_drvdata(dev); | ||
| 188 | if (dev_data) | ||
| 189 | dev_data->ack_intr = 1; | ||
| 190 | return 0; | 205 | return 0; |
| 191 | } | 206 | } |
| 192 | 207 | ||
| @@ -197,13 +212,26 @@ int xen_pcibk_enable_msix(struct xen_pcibk_device *pdev, | |||
| 197 | struct xen_pcibk_dev_data *dev_data; | 212 | struct xen_pcibk_dev_data *dev_data; |
| 198 | int i, result; | 213 | int i, result; |
| 199 | struct msix_entry *entries; | 214 | struct msix_entry *entries; |
| 215 | u16 cmd; | ||
| 200 | 216 | ||
| 201 | if (unlikely(verbose_request)) | 217 | if (unlikely(verbose_request)) |
| 202 | printk(KERN_DEBUG DRV_NAME ": %s: enable MSI-X\n", | 218 | printk(KERN_DEBUG DRV_NAME ": %s: enable MSI-X\n", |
| 203 | pci_name(dev)); | 219 | pci_name(dev)); |
| 220 | |||
| 204 | if (op->value > SH_INFO_MAX_VEC) | 221 | if (op->value > SH_INFO_MAX_VEC) |
| 205 | return -EINVAL; | 222 | return -EINVAL; |
| 206 | 223 | ||
| 224 | if (dev->msix_enabled) | ||
| 225 | return -EALREADY; | ||
| 226 | |||
| 227 | /* | ||
| 228 | * PCI_COMMAND_MEMORY must be enabled, otherwise we may not be able | ||
| 229 | * to access the BARs where the MSI-X entries reside. | ||
| 230 | */ | ||
| 231 | pci_read_config_word(dev, PCI_COMMAND, &cmd); | ||
| 232 | if (dev->msi_enabled || !(cmd & PCI_COMMAND_MEMORY)) | ||
| 233 | return -ENXIO; | ||
| 234 | |||
| 207 | entries = kmalloc(op->value * sizeof(*entries), GFP_KERNEL); | 235 | entries = kmalloc(op->value * sizeof(*entries), GFP_KERNEL); |
| 208 | if (entries == NULL) | 236 | if (entries == NULL) |
| 209 | return -ENOMEM; | 237 | return -ENOMEM; |
| @@ -245,23 +273,27 @@ static | |||
| 245 | int xen_pcibk_disable_msix(struct xen_pcibk_device *pdev, | 273 | int xen_pcibk_disable_msix(struct xen_pcibk_device *pdev, |
| 246 | struct pci_dev *dev, struct xen_pci_op *op) | 274 | struct pci_dev *dev, struct xen_pci_op *op) |
| 247 | { | 275 | { |
| 248 | struct xen_pcibk_dev_data *dev_data; | ||
| 249 | if (unlikely(verbose_request)) | 276 | if (unlikely(verbose_request)) |
| 250 | printk(KERN_DEBUG DRV_NAME ": %s: disable MSI-X\n", | 277 | printk(KERN_DEBUG DRV_NAME ": %s: disable MSI-X\n", |
| 251 | pci_name(dev)); | 278 | pci_name(dev)); |
| 252 | pci_disable_msix(dev); | ||
| 253 | 279 | ||
| 280 | if (dev->msix_enabled) { | ||
| 281 | struct xen_pcibk_dev_data *dev_data; | ||
| 282 | |||
| 283 | pci_disable_msix(dev); | ||
| 284 | |||
| 285 | dev_data = pci_get_drvdata(dev); | ||
| 286 | if (dev_data) | ||
| 287 | dev_data->ack_intr = 1; | ||
| 288 | } | ||
| 254 | /* | 289 | /* |
| 255 | * SR-IOV devices (which don't have any legacy IRQ) have | 290 | * SR-IOV devices (which don't have any legacy IRQ) have |
| 256 | * an undefined IRQ value of zero. | 291 | * an undefined IRQ value of zero. |
| 257 | */ | 292 | */ |
| 258 | op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0; | 293 | op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0; |
| 259 | if (unlikely(verbose_request)) | 294 | if (unlikely(verbose_request)) |
| 260 | printk(KERN_DEBUG DRV_NAME ": %s: MSI-X: %d\n", pci_name(dev), | 295 | printk(KERN_DEBUG DRV_NAME ": %s: MSI-X: %d\n", |
| 261 | op->value); | 296 | pci_name(dev), op->value); |
| 262 | dev_data = pci_get_drvdata(dev); | ||
| 263 | if (dev_data) | ||
| 264 | dev_data->ack_intr = 1; | ||
| 265 | return 0; | 297 | return 0; |
| 266 | } | 298 | } |
| 267 | #endif | 299 | #endif |
| @@ -298,9 +330,11 @@ void xen_pcibk_do_op(struct work_struct *data) | |||
| 298 | container_of(data, struct xen_pcibk_device, op_work); | 330 | container_of(data, struct xen_pcibk_device, op_work); |
| 299 | struct pci_dev *dev; | 331 | struct pci_dev *dev; |
| 300 | struct xen_pcibk_dev_data *dev_data = NULL; | 332 | struct xen_pcibk_dev_data *dev_data = NULL; |
| 301 | struct xen_pci_op *op = &pdev->sh_info->op; | 333 | struct xen_pci_op *op = &pdev->op; |
| 302 | int test_intx = 0; | 334 | int test_intx = 0; |
| 303 | 335 | ||
| 336 | *op = pdev->sh_info->op; | ||
| 337 | barrier(); | ||
| 304 | dev = xen_pcibk_get_pci_dev(pdev, op->domain, op->bus, op->devfn); | 338 | dev = xen_pcibk_get_pci_dev(pdev, op->domain, op->bus, op->devfn); |
| 305 | 339 | ||
| 306 | if (dev == NULL) | 340 | if (dev == NULL) |
| @@ -342,6 +376,17 @@ void xen_pcibk_do_op(struct work_struct *data) | |||
| 342 | if ((dev_data->enable_intx != test_intx)) | 376 | if ((dev_data->enable_intx != test_intx)) |
| 343 | xen_pcibk_control_isr(dev, 0 /* no reset */); | 377 | xen_pcibk_control_isr(dev, 0 /* no reset */); |
| 344 | } | 378 | } |
| 379 | pdev->sh_info->op.err = op->err; | ||
| 380 | pdev->sh_info->op.value = op->value; | ||
| 381 | #ifdef CONFIG_PCI_MSI | ||
| 382 | if (op->cmd == XEN_PCI_OP_enable_msix && op->err == 0) { | ||
| 383 | unsigned int i; | ||
| 384 | |||
| 385 | for (i = 0; i < op->value; i++) | ||
| 386 | pdev->sh_info->op.msix_entries[i].vector = | ||
| 387 | op->msix_entries[i].vector; | ||
| 388 | } | ||
| 389 | #endif | ||
| 345 | /* Tell the driver domain that we're done. */ | 390 | /* Tell the driver domain that we're done. */ |
| 346 | wmb(); | 391 | wmb(); |
| 347 | clear_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags); | 392 | clear_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags); |
diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c index 98bc345f296e..4843741e703a 100644 --- a/drivers/xen/xen-pciback/xenbus.c +++ b/drivers/xen/xen-pciback/xenbus.c | |||
| @@ -44,7 +44,6 @@ static struct xen_pcibk_device *alloc_pdev(struct xenbus_device *xdev) | |||
| 44 | dev_dbg(&xdev->dev, "allocated pdev @ 0x%p\n", pdev); | 44 | dev_dbg(&xdev->dev, "allocated pdev @ 0x%p\n", pdev); |
| 45 | 45 | ||
| 46 | pdev->xdev = xdev; | 46 | pdev->xdev = xdev; |
| 47 | dev_set_drvdata(&xdev->dev, pdev); | ||
| 48 | 47 | ||
| 49 | mutex_init(&pdev->dev_lock); | 48 | mutex_init(&pdev->dev_lock); |
| 50 | 49 | ||
| @@ -58,6 +57,9 @@ static struct xen_pcibk_device *alloc_pdev(struct xenbus_device *xdev) | |||
| 58 | kfree(pdev); | 57 | kfree(pdev); |
| 59 | pdev = NULL; | 58 | pdev = NULL; |
| 60 | } | 59 | } |
| 60 | |||
| 61 | dev_set_drvdata(&xdev->dev, pdev); | ||
| 62 | |||
| 61 | out: | 63 | out: |
| 62 | return pdev; | 64 | return pdev; |
| 63 | } | 65 | } |
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c index 43bcae852546..ad4eb1024d1f 100644 --- a/drivers/xen/xen-scsiback.c +++ b/drivers/xen/xen-scsiback.c | |||
| @@ -726,7 +726,7 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info) | |||
| 726 | if (!pending_req) | 726 | if (!pending_req) |
| 727 | return 1; | 727 | return 1; |
| 728 | 728 | ||
| 729 | ring_req = *RING_GET_REQUEST(ring, rc); | 729 | RING_COPY_REQUEST(ring, rc, &ring_req); |
| 730 | ring->req_cons = ++rc; | 730 | ring->req_cons = ++rc; |
| 731 | 731 | ||
| 732 | err = prepare_pending_reqs(info, &ring_req, pending_req); | 732 | err = prepare_pending_reqs(info, &ring_req, pending_req); |
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 4b89680a1923..c4661db2b72a 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
| @@ -10480,11 +10480,15 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) | |||
| 10480 | * until transaction commit to do the actual discard. | 10480 | * until transaction commit to do the actual discard. |
| 10481 | */ | 10481 | */ |
| 10482 | if (trimming) { | 10482 | if (trimming) { |
| 10483 | WARN_ON(!list_empty(&block_group->bg_list)); | 10483 | spin_lock(&fs_info->unused_bgs_lock); |
| 10484 | spin_lock(&trans->transaction->deleted_bgs_lock); | 10484 | /* |
| 10485 | * A concurrent scrub might have added us to the list | ||
| 10486 | * fs_info->unused_bgs, so use a list_move operation | ||
| 10487 | * to add the block group to the deleted_bgs list. | ||
| 10488 | */ | ||
| 10485 | list_move(&block_group->bg_list, | 10489 | list_move(&block_group->bg_list, |
| 10486 | &trans->transaction->deleted_bgs); | 10490 | &trans->transaction->deleted_bgs); |
| 10487 | spin_unlock(&trans->transaction->deleted_bgs_lock); | 10491 | spin_unlock(&fs_info->unused_bgs_lock); |
| 10488 | btrfs_get_block_group(block_group); | 10492 | btrfs_get_block_group(block_group); |
| 10489 | } | 10493 | } |
| 10490 | end_trans: | 10494 | end_trans: |
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 72e73461c064..0f09526aa7d9 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c | |||
| @@ -1291,7 +1291,8 @@ out: | |||
| 1291 | * on error we return an unlocked page and the error value | 1291 | * on error we return an unlocked page and the error value |
| 1292 | * on success we return a locked page and 0 | 1292 | * on success we return a locked page and 0 |
| 1293 | */ | 1293 | */ |
| 1294 | static int prepare_uptodate_page(struct page *page, u64 pos, | 1294 | static int prepare_uptodate_page(struct inode *inode, |
| 1295 | struct page *page, u64 pos, | ||
| 1295 | bool force_uptodate) | 1296 | bool force_uptodate) |
| 1296 | { | 1297 | { |
| 1297 | int ret = 0; | 1298 | int ret = 0; |
| @@ -1306,6 +1307,10 @@ static int prepare_uptodate_page(struct page *page, u64 pos, | |||
| 1306 | unlock_page(page); | 1307 | unlock_page(page); |
| 1307 | return -EIO; | 1308 | return -EIO; |
| 1308 | } | 1309 | } |
| 1310 | if (page->mapping != inode->i_mapping) { | ||
| 1311 | unlock_page(page); | ||
| 1312 | return -EAGAIN; | ||
| 1313 | } | ||
| 1309 | } | 1314 | } |
| 1310 | return 0; | 1315 | return 0; |
| 1311 | } | 1316 | } |
| @@ -1324,6 +1329,7 @@ static noinline int prepare_pages(struct inode *inode, struct page **pages, | |||
| 1324 | int faili; | 1329 | int faili; |
| 1325 | 1330 | ||
| 1326 | for (i = 0; i < num_pages; i++) { | 1331 | for (i = 0; i < num_pages; i++) { |
| 1332 | again: | ||
| 1327 | pages[i] = find_or_create_page(inode->i_mapping, index + i, | 1333 | pages[i] = find_or_create_page(inode->i_mapping, index + i, |
| 1328 | mask | __GFP_WRITE); | 1334 | mask | __GFP_WRITE); |
| 1329 | if (!pages[i]) { | 1335 | if (!pages[i]) { |
| @@ -1333,13 +1339,17 @@ static noinline int prepare_pages(struct inode *inode, struct page **pages, | |||
| 1333 | } | 1339 | } |
| 1334 | 1340 | ||
| 1335 | if (i == 0) | 1341 | if (i == 0) |
| 1336 | err = prepare_uptodate_page(pages[i], pos, | 1342 | err = prepare_uptodate_page(inode, pages[i], pos, |
| 1337 | force_uptodate); | 1343 | force_uptodate); |
| 1338 | if (i == num_pages - 1) | 1344 | if (!err && i == num_pages - 1) |
| 1339 | err = prepare_uptodate_page(pages[i], | 1345 | err = prepare_uptodate_page(inode, pages[i], |
| 1340 | pos + write_bytes, false); | 1346 | pos + write_bytes, false); |
| 1341 | if (err) { | 1347 | if (err) { |
| 1342 | page_cache_release(pages[i]); | 1348 | page_cache_release(pages[i]); |
| 1349 | if (err == -EAGAIN) { | ||
| 1350 | err = 0; | ||
| 1351 | goto again; | ||
| 1352 | } | ||
| 1343 | faili = i - 1; | 1353 | faili = i - 1; |
| 1344 | goto fail; | 1354 | goto fail; |
| 1345 | } | 1355 | } |
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 85a1f8621b51..cfe99bec49de 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c | |||
| @@ -891,7 +891,7 @@ out: | |||
| 891 | spin_unlock(&block_group->lock); | 891 | spin_unlock(&block_group->lock); |
| 892 | ret = 0; | 892 | ret = 0; |
| 893 | 893 | ||
| 894 | btrfs_warn(fs_info, "failed to load free space cache for block group %llu, rebuild it now", | 894 | btrfs_warn(fs_info, "failed to load free space cache for block group %llu, rebuilding it now", |
| 895 | block_group->key.objectid); | 895 | block_group->key.objectid); |
| 896 | } | 896 | } |
| 897 | 897 | ||
| @@ -2972,7 +2972,7 @@ setup_cluster_bitmap(struct btrfs_block_group_cache *block_group, | |||
| 2972 | u64 cont1_bytes, u64 min_bytes) | 2972 | u64 cont1_bytes, u64 min_bytes) |
| 2973 | { | 2973 | { |
| 2974 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | 2974 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
| 2975 | struct btrfs_free_space *entry; | 2975 | struct btrfs_free_space *entry = NULL; |
| 2976 | int ret = -ENOSPC; | 2976 | int ret = -ENOSPC; |
| 2977 | u64 bitmap_offset = offset_to_bitmap(ctl, offset); | 2977 | u64 bitmap_offset = offset_to_bitmap(ctl, offset); |
| 2978 | 2978 | ||
| @@ -2983,8 +2983,10 @@ setup_cluster_bitmap(struct btrfs_block_group_cache *block_group, | |||
| 2983 | * The bitmap that covers offset won't be in the list unless offset | 2983 | * The bitmap that covers offset won't be in the list unless offset |
| 2984 | * is just its start offset. | 2984 | * is just its start offset. |
| 2985 | */ | 2985 | */ |
| 2986 | entry = list_first_entry(bitmaps, struct btrfs_free_space, list); | 2986 | if (!list_empty(bitmaps)) |
| 2987 | if (entry->offset != bitmap_offset) { | 2987 | entry = list_first_entry(bitmaps, struct btrfs_free_space, list); |
| 2988 | |||
| 2989 | if (!entry || entry->offset != bitmap_offset) { | ||
| 2988 | entry = tree_search_offset(ctl, bitmap_offset, 1, 0); | 2990 | entry = tree_search_offset(ctl, bitmap_offset, 1, 0); |
| 2989 | if (entry && list_empty(&entry->list)) | 2991 | if (entry && list_empty(&entry->list)) |
| 2990 | list_add(&entry->list, bitmaps); | 2992 | list_add(&entry->list, bitmaps); |
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 3367a3c6f214..be8eae80ff65 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c | |||
| @@ -274,7 +274,6 @@ loop: | |||
| 274 | cur_trans->num_dirty_bgs = 0; | 274 | cur_trans->num_dirty_bgs = 0; |
| 275 | spin_lock_init(&cur_trans->dirty_bgs_lock); | 275 | spin_lock_init(&cur_trans->dirty_bgs_lock); |
| 276 | INIT_LIST_HEAD(&cur_trans->deleted_bgs); | 276 | INIT_LIST_HEAD(&cur_trans->deleted_bgs); |
| 277 | spin_lock_init(&cur_trans->deleted_bgs_lock); | ||
| 278 | spin_lock_init(&cur_trans->dropped_roots_lock); | 277 | spin_lock_init(&cur_trans->dropped_roots_lock); |
| 279 | list_add_tail(&cur_trans->list, &fs_info->trans_list); | 278 | list_add_tail(&cur_trans->list, &fs_info->trans_list); |
| 280 | extent_io_tree_init(&cur_trans->dirty_pages, | 279 | extent_io_tree_init(&cur_trans->dirty_pages, |
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index 0da21ca9b3fb..64c8221b6165 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h | |||
| @@ -77,8 +77,8 @@ struct btrfs_transaction { | |||
| 77 | */ | 77 | */ |
| 78 | struct mutex cache_write_mutex; | 78 | struct mutex cache_write_mutex; |
| 79 | spinlock_t dirty_bgs_lock; | 79 | spinlock_t dirty_bgs_lock; |
| 80 | /* Protected by spin lock fs_info->unused_bgs_lock. */ | ||
| 80 | struct list_head deleted_bgs; | 81 | struct list_head deleted_bgs; |
| 81 | spinlock_t deleted_bgs_lock; | ||
| 82 | spinlock_t dropped_roots_lock; | 82 | spinlock_t dropped_roots_lock; |
| 83 | struct btrfs_delayed_ref_root delayed_refs; | 83 | struct btrfs_delayed_ref_root delayed_refs; |
| 84 | int aborted; | 84 | int aborted; |
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 456452206609..a23399e8e3ab 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c | |||
| @@ -3548,12 +3548,11 @@ again: | |||
| 3548 | 3548 | ||
| 3549 | ret = btrfs_force_chunk_alloc(trans, chunk_root, | 3549 | ret = btrfs_force_chunk_alloc(trans, chunk_root, |
| 3550 | BTRFS_BLOCK_GROUP_DATA); | 3550 | BTRFS_BLOCK_GROUP_DATA); |
| 3551 | btrfs_end_transaction(trans, chunk_root); | ||
| 3551 | if (ret < 0) { | 3552 | if (ret < 0) { |
| 3552 | mutex_unlock(&fs_info->delete_unused_bgs_mutex); | 3553 | mutex_unlock(&fs_info->delete_unused_bgs_mutex); |
| 3553 | goto error; | 3554 | goto error; |
| 3554 | } | 3555 | } |
| 3555 | |||
| 3556 | btrfs_end_transaction(trans, chunk_root); | ||
| 3557 | chunk_reserved = 1; | 3556 | chunk_reserved = 1; |
| 3558 | } | 3557 | } |
| 3559 | 3558 | ||
diff --git a/fs/proc/base.c b/fs/proc/base.c index bd3e9e68125b..4bd5d3118acd 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c | |||
| @@ -2494,6 +2494,7 @@ static ssize_t proc_coredump_filter_write(struct file *file, | |||
| 2494 | mm = get_task_mm(task); | 2494 | mm = get_task_mm(task); |
| 2495 | if (!mm) | 2495 | if (!mm) |
| 2496 | goto out_no_mm; | 2496 | goto out_no_mm; |
| 2497 | ret = 0; | ||
| 2497 | 2498 | ||
| 2498 | for (i = 0, mask = 1; i < MMF_DUMP_FILTER_BITS; i++, mask <<= 1) { | 2499 | for (i = 0, mask = 1; i < MMF_DUMP_FILTER_BITS; i++, mask <<= 1) { |
| 2499 | if (val & mask) | 2500 | if (val & mask) |
diff --git a/include/linux/enclosure.h b/include/linux/enclosure.h index 7be22da321f3..a4cf57cd0f75 100644 --- a/include/linux/enclosure.h +++ b/include/linux/enclosure.h | |||
| @@ -29,7 +29,11 @@ | |||
| 29 | /* A few generic types ... taken from ses-2 */ | 29 | /* A few generic types ... taken from ses-2 */ |
| 30 | enum enclosure_component_type { | 30 | enum enclosure_component_type { |
| 31 | ENCLOSURE_COMPONENT_DEVICE = 0x01, | 31 | ENCLOSURE_COMPONENT_DEVICE = 0x01, |
| 32 | ENCLOSURE_COMPONENT_CONTROLLER_ELECTRONICS = 0x07, | ||
| 33 | ENCLOSURE_COMPONENT_SCSI_TARGET_PORT = 0x14, | ||
| 34 | ENCLOSURE_COMPONENT_SCSI_INITIATOR_PORT = 0x15, | ||
| 32 | ENCLOSURE_COMPONENT_ARRAY_DEVICE = 0x17, | 35 | ENCLOSURE_COMPONENT_ARRAY_DEVICE = 0x17, |
| 36 | ENCLOSURE_COMPONENT_SAS_EXPANDER = 0x18, | ||
| 33 | }; | 37 | }; |
| 34 | 38 | ||
| 35 | /* ses-2 common element status */ | 39 | /* ses-2 common element status */ |
diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h index 877ef226f90f..772362adf471 100644 --- a/include/linux/mmdebug.h +++ b/include/linux/mmdebug.h | |||
| @@ -1,6 +1,7 @@ | |||
| 1 | #ifndef LINUX_MM_DEBUG_H | 1 | #ifndef LINUX_MM_DEBUG_H |
| 2 | #define LINUX_MM_DEBUG_H 1 | 2 | #define LINUX_MM_DEBUG_H 1 |
| 3 | 3 | ||
| 4 | #include <linux/bug.h> | ||
| 4 | #include <linux/stringify.h> | 5 | #include <linux/stringify.h> |
| 5 | 6 | ||
| 6 | struct page; | 7 | struct page; |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 3b5d134e945a..3143c847bddb 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
| @@ -2084,7 +2084,7 @@ struct pcpu_sw_netstats { | |||
| 2084 | }) | 2084 | }) |
| 2085 | 2085 | ||
| 2086 | #define netdev_alloc_pcpu_stats(type) \ | 2086 | #define netdev_alloc_pcpu_stats(type) \ |
| 2087 | __netdev_alloc_pcpu_stats(type, GFP_KERNEL); | 2087 | __netdev_alloc_pcpu_stats(type, GFP_KERNEL) |
| 2088 | 2088 | ||
| 2089 | #include <linux/notifier.h> | 2089 | #include <linux/notifier.h> |
| 2090 | 2090 | ||
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h index 249d1bb01e03..5646b24bfc64 100644 --- a/include/linux/netfilter/nfnetlink.h +++ b/include/linux/netfilter/nfnetlink.h | |||
| @@ -14,7 +14,7 @@ struct nfnl_callback { | |||
| 14 | int (*call_rcu)(struct sock *nl, struct sk_buff *skb, | 14 | int (*call_rcu)(struct sock *nl, struct sk_buff *skb, |
| 15 | const struct nlmsghdr *nlh, | 15 | const struct nlmsghdr *nlh, |
| 16 | const struct nlattr * const cda[]); | 16 | const struct nlattr * const cda[]); |
| 17 | int (*call_batch)(struct sock *nl, struct sk_buff *skb, | 17 | int (*call_batch)(struct net *net, struct sock *nl, struct sk_buff *skb, |
| 18 | const struct nlmsghdr *nlh, | 18 | const struct nlmsghdr *nlh, |
| 19 | const struct nlattr * const cda[]); | 19 | const struct nlattr * const cda[]); |
| 20 | const struct nla_policy *policy; /* netlink attribute policy */ | 20 | const struct nla_policy *policy; /* netlink attribute policy */ |
diff --git a/include/linux/platform_data/edma.h b/include/linux/platform_data/edma.h index e2878baeb90e..4299f4ba03bd 100644 --- a/include/linux/platform_data/edma.h +++ b/include/linux/platform_data/edma.h | |||
| @@ -72,7 +72,7 @@ struct edma_soc_info { | |||
| 72 | struct edma_rsv_info *rsv; | 72 | struct edma_rsv_info *rsv; |
| 73 | 73 | ||
| 74 | /* List of channels allocated for memcpy, terminated with -1 */ | 74 | /* List of channels allocated for memcpy, terminated with -1 */ |
| 75 | s16 *memcpy_channels; | 75 | s32 *memcpy_channels; |
| 76 | 76 | ||
| 77 | s8 (*queue_priority_mapping)[2]; | 77 | s8 (*queue_priority_mapping)[2]; |
| 78 | const s16 (*xbar_chans)[2]; | 78 | const s16 (*xbar_chans)[2]; |
diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h index 6a4347639c03..1d1ba2c5ee7a 100644 --- a/include/linux/qed/common_hsi.h +++ b/include/linux/qed/common_hsi.h | |||
| @@ -9,6 +9,8 @@ | |||
| 9 | #ifndef __COMMON_HSI__ | 9 | #ifndef __COMMON_HSI__ |
| 10 | #define __COMMON_HSI__ | 10 | #define __COMMON_HSI__ |
| 11 | 11 | ||
| 12 | #define CORE_SPQE_PAGE_SIZE_BYTES 4096 | ||
| 13 | |||
| 12 | #define FW_MAJOR_VERSION 8 | 14 | #define FW_MAJOR_VERSION 8 |
| 13 | #define FW_MINOR_VERSION 4 | 15 | #define FW_MINOR_VERSION 4 |
| 14 | #define FW_REVISION_VERSION 2 | 16 | #define FW_REVISION_VERSION 2 |
diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h index b920c3605c46..41b9049b57e2 100644 --- a/include/linux/qed/qed_chain.h +++ b/include/linux/qed/qed_chain.h | |||
| @@ -111,7 +111,8 @@ static inline u16 qed_chain_get_elem_left(struct qed_chain *p_chain) | |||
| 111 | used = ((u32)0x10000u + (u32)(p_chain->prod_idx)) - | 111 | used = ((u32)0x10000u + (u32)(p_chain->prod_idx)) - |
| 112 | (u32)p_chain->cons_idx; | 112 | (u32)p_chain->cons_idx; |
| 113 | if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR) | 113 | if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR) |
| 114 | used -= (used / p_chain->elem_per_page); | 114 | used -= p_chain->prod_idx / p_chain->elem_per_page - |
| 115 | p_chain->cons_idx / p_chain->elem_per_page; | ||
| 115 | 116 | ||
| 116 | return p_chain->capacity - used; | 117 | return p_chain->capacity - used; |
| 117 | } | 118 | } |
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index 843ceca9a21e..e50b31d18462 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h | |||
| @@ -19,6 +19,7 @@ | |||
| 19 | 19 | ||
| 20 | #include <linux/atomic.h> | 20 | #include <linux/atomic.h> |
| 21 | #include <linux/compiler.h> | 21 | #include <linux/compiler.h> |
| 22 | #include <linux/err.h> | ||
| 22 | #include <linux/errno.h> | 23 | #include <linux/errno.h> |
| 23 | #include <linux/jhash.h> | 24 | #include <linux/jhash.h> |
| 24 | #include <linux/list_nulls.h> | 25 | #include <linux/list_nulls.h> |
| @@ -339,10 +340,11 @@ static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, | |||
| 339 | int rhashtable_init(struct rhashtable *ht, | 340 | int rhashtable_init(struct rhashtable *ht, |
| 340 | const struct rhashtable_params *params); | 341 | const struct rhashtable_params *params); |
| 341 | 342 | ||
| 342 | int rhashtable_insert_slow(struct rhashtable *ht, const void *key, | 343 | struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht, |
| 343 | struct rhash_head *obj, | 344 | const void *key, |
| 344 | struct bucket_table *old_tbl); | 345 | struct rhash_head *obj, |
| 345 | int rhashtable_insert_rehash(struct rhashtable *ht); | 346 | struct bucket_table *old_tbl); |
| 347 | int rhashtable_insert_rehash(struct rhashtable *ht, struct bucket_table *tbl); | ||
| 346 | 348 | ||
| 347 | int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter); | 349 | int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter); |
| 348 | void rhashtable_walk_exit(struct rhashtable_iter *iter); | 350 | void rhashtable_walk_exit(struct rhashtable_iter *iter); |
| @@ -598,9 +600,11 @@ restart: | |||
| 598 | 600 | ||
| 599 | new_tbl = rht_dereference_rcu(tbl->future_tbl, ht); | 601 | new_tbl = rht_dereference_rcu(tbl->future_tbl, ht); |
| 600 | if (unlikely(new_tbl)) { | 602 | if (unlikely(new_tbl)) { |
| 601 | err = rhashtable_insert_slow(ht, key, obj, new_tbl); | 603 | tbl = rhashtable_insert_slow(ht, key, obj, new_tbl); |
| 602 | if (err == -EAGAIN) | 604 | if (!IS_ERR_OR_NULL(tbl)) |
| 603 | goto slow_path; | 605 | goto slow_path; |
| 606 | |||
| 607 | err = PTR_ERR(tbl); | ||
| 604 | goto out; | 608 | goto out; |
| 605 | } | 609 | } |
| 606 | 610 | ||
| @@ -611,7 +615,7 @@ restart: | |||
| 611 | if (unlikely(rht_grow_above_100(ht, tbl))) { | 615 | if (unlikely(rht_grow_above_100(ht, tbl))) { |
| 612 | slow_path: | 616 | slow_path: |
| 613 | spin_unlock_bh(lock); | 617 | spin_unlock_bh(lock); |
| 614 | err = rhashtable_insert_rehash(ht); | 618 | err = rhashtable_insert_rehash(ht, tbl); |
| 615 | rcu_read_unlock(); | 619 | rcu_read_unlock(); |
| 616 | if (err) | 620 | if (err) |
| 617 | return err; | 621 | return err; |
diff --git a/include/net/dst.h b/include/net/dst.h index 1279f9b09791..c7329dcd90cc 100644 --- a/include/net/dst.h +++ b/include/net/dst.h | |||
| @@ -322,6 +322,39 @@ static inline void skb_dst_force(struct sk_buff *skb) | |||
| 322 | } | 322 | } |
| 323 | } | 323 | } |
| 324 | 324 | ||
| 325 | /** | ||
| 326 | * dst_hold_safe - Take a reference on a dst if possible | ||
| 327 | * @dst: pointer to dst entry | ||
| 328 | * | ||
| 329 | * This helper returns false if it could not safely | ||
| 330 | * take a reference on a dst. | ||
| 331 | */ | ||
| 332 | static inline bool dst_hold_safe(struct dst_entry *dst) | ||
| 333 | { | ||
| 334 | if (dst->flags & DST_NOCACHE) | ||
| 335 | return atomic_inc_not_zero(&dst->__refcnt); | ||
| 336 | dst_hold(dst); | ||
| 337 | return true; | ||
| 338 | } | ||
| 339 | |||
| 340 | /** | ||
| 341 | * skb_dst_force_safe - makes sure skb dst is refcounted | ||
| 342 | * @skb: buffer | ||
| 343 | * | ||
| 344 | * If dst is not yet refcounted and not destroyed, grab a ref on it. | ||
| 345 | */ | ||
| 346 | static inline void skb_dst_force_safe(struct sk_buff *skb) | ||
| 347 | { | ||
| 348 | if (skb_dst_is_noref(skb)) { | ||
| 349 | struct dst_entry *dst = skb_dst(skb); | ||
| 350 | |||
| 351 | if (!dst_hold_safe(dst)) | ||
| 352 | dst = NULL; | ||
| 353 | |||
| 354 | skb->_skb_refdst = (unsigned long)dst; | ||
| 355 | } | ||
| 356 | } | ||
| 357 | |||
| 325 | 358 | ||
| 326 | /** | 359 | /** |
| 327 | * __skb_tunnel_rx - prepare skb for rx reinsert | 360 | * __skb_tunnel_rx - prepare skb for rx reinsert |
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h index 2134e6d815bc..625bdf95d673 100644 --- a/include/net/inet_sock.h +++ b/include/net/inet_sock.h | |||
| @@ -210,18 +210,37 @@ struct inet_sock { | |||
| 210 | #define IP_CMSG_ORIGDSTADDR BIT(6) | 210 | #define IP_CMSG_ORIGDSTADDR BIT(6) |
| 211 | #define IP_CMSG_CHECKSUM BIT(7) | 211 | #define IP_CMSG_CHECKSUM BIT(7) |
| 212 | 212 | ||
| 213 | /* SYNACK messages might be attached to request sockets. | 213 | /** |
| 214 | * sk_to_full_sk - Access to a full socket | ||
| 215 | * @sk: pointer to a socket | ||
| 216 | * | ||
| 217 | * SYNACK messages might be attached to request sockets. | ||
| 214 | * Some places want to reach the listener in this case. | 218 | * Some places want to reach the listener in this case. |
| 215 | */ | 219 | */ |
| 216 | static inline struct sock *skb_to_full_sk(const struct sk_buff *skb) | 220 | static inline struct sock *sk_to_full_sk(struct sock *sk) |
| 217 | { | 221 | { |
| 218 | struct sock *sk = skb->sk; | 222 | #ifdef CONFIG_INET |
| 219 | |||
| 220 | if (sk && sk->sk_state == TCP_NEW_SYN_RECV) | 223 | if (sk && sk->sk_state == TCP_NEW_SYN_RECV) |
| 221 | sk = inet_reqsk(sk)->rsk_listener; | 224 | sk = inet_reqsk(sk)->rsk_listener; |
| 225 | #endif | ||
| 226 | return sk; | ||
| 227 | } | ||
| 228 | |||
| 229 | /* sk_to_full_sk() variant with a const argument */ | ||
| 230 | static inline const struct sock *sk_const_to_full_sk(const struct sock *sk) | ||
| 231 | { | ||
| 232 | #ifdef CONFIG_INET | ||
| 233 | if (sk && sk->sk_state == TCP_NEW_SYN_RECV) | ||
| 234 | sk = ((const struct request_sock *)sk)->rsk_listener; | ||
| 235 | #endif | ||
| 222 | return sk; | 236 | return sk; |
| 223 | } | 237 | } |
| 224 | 238 | ||
| 239 | static inline struct sock *skb_to_full_sk(const struct sk_buff *skb) | ||
| 240 | { | ||
| 241 | return sk_to_full_sk(skb->sk); | ||
| 242 | } | ||
| 243 | |||
| 225 | static inline struct inet_sock *inet_sk(const struct sock *sk) | 244 | static inline struct inet_sock *inet_sk(const struct sock *sk) |
| 226 | { | 245 | { |
| 227 | return (struct inet_sock *)sk; | 246 | return (struct inet_sock *)sk; |
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h index 4a6009d4486b..235c7811a86a 100644 --- a/include/net/inetpeer.h +++ b/include/net/inetpeer.h | |||
| @@ -78,6 +78,7 @@ void inet_initpeers(void) __init; | |||
| 78 | static inline void inetpeer_set_addr_v4(struct inetpeer_addr *iaddr, __be32 ip) | 78 | static inline void inetpeer_set_addr_v4(struct inetpeer_addr *iaddr, __be32 ip) |
| 79 | { | 79 | { |
| 80 | iaddr->a4.addr = ip; | 80 | iaddr->a4.addr = ip; |
| 81 | iaddr->a4.vif = 0; | ||
| 81 | iaddr->family = AF_INET; | 82 | iaddr->family = AF_INET; |
| 82 | } | 83 | } |
| 83 | 84 | ||
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 7bbb71081aeb..eea9bdeecba2 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h | |||
| @@ -1493,7 +1493,8 @@ struct sctp_association { | |||
| 1493 | * : SACK's are not delayed (see Section 6). | 1493 | * : SACK's are not delayed (see Section 6). |
| 1494 | */ | 1494 | */ |
| 1495 | __u8 sack_needed:1, /* Do we need to sack the peer? */ | 1495 | __u8 sack_needed:1, /* Do we need to sack the peer? */ |
| 1496 | sack_generation:1; | 1496 | sack_generation:1, |
| 1497 | zero_window_announced:1; | ||
| 1497 | __u32 sack_cnt; | 1498 | __u32 sack_cnt; |
| 1498 | 1499 | ||
| 1499 | __u32 adaptation_ind; /* Adaptation Code point. */ | 1500 | __u32 adaptation_ind; /* Adaptation Code point. */ |
diff --git a/include/net/sock.h b/include/net/sock.h index 52d27ee924f4..14d3c0734007 100644 --- a/include/net/sock.h +++ b/include/net/sock.h | |||
| @@ -388,7 +388,7 @@ struct sock { | |||
| 388 | struct socket_wq *sk_wq_raw; | 388 | struct socket_wq *sk_wq_raw; |
| 389 | }; | 389 | }; |
| 390 | #ifdef CONFIG_XFRM | 390 | #ifdef CONFIG_XFRM |
| 391 | struct xfrm_policy *sk_policy[2]; | 391 | struct xfrm_policy __rcu *sk_policy[2]; |
| 392 | #endif | 392 | #endif |
| 393 | struct dst_entry *sk_rx_dst; | 393 | struct dst_entry *sk_rx_dst; |
| 394 | struct dst_entry __rcu *sk_dst_cache; | 394 | struct dst_entry __rcu *sk_dst_cache; |
| @@ -404,6 +404,7 @@ struct sock { | |||
| 404 | sk_userlocks : 4, | 404 | sk_userlocks : 4, |
| 405 | sk_protocol : 8, | 405 | sk_protocol : 8, |
| 406 | sk_type : 16; | 406 | sk_type : 16; |
| 407 | #define SK_PROTOCOL_MAX U8_MAX | ||
| 407 | kmemcheck_bitfield_end(flags); | 408 | kmemcheck_bitfield_end(flags); |
| 408 | int sk_wmem_queued; | 409 | int sk_wmem_queued; |
| 409 | gfp_t sk_allocation; | 410 | gfp_t sk_allocation; |
| @@ -740,6 +741,8 @@ enum sock_flags { | |||
| 740 | SOCK_SELECT_ERR_QUEUE, /* Wake select on error queue */ | 741 | SOCK_SELECT_ERR_QUEUE, /* Wake select on error queue */ |
| 741 | }; | 742 | }; |
| 742 | 743 | ||
| 744 | #define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE)) | ||
| 745 | |||
| 743 | static inline void sock_copy_flags(struct sock *nsk, struct sock *osk) | 746 | static inline void sock_copy_flags(struct sock *nsk, struct sock *osk) |
| 744 | { | 747 | { |
| 745 | nsk->sk_flags = osk->sk_flags; | 748 | nsk->sk_flags = osk->sk_flags; |
| @@ -814,7 +817,7 @@ void sk_stream_write_space(struct sock *sk); | |||
| 814 | static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb) | 817 | static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb) |
| 815 | { | 818 | { |
| 816 | /* dont let skb dst not refcounted, we are going to leave rcu lock */ | 819 | /* dont let skb dst not refcounted, we are going to leave rcu lock */ |
| 817 | skb_dst_force(skb); | 820 | skb_dst_force_safe(skb); |
| 818 | 821 | ||
| 819 | if (!sk->sk_backlog.tail) | 822 | if (!sk->sk_backlog.tail) |
| 820 | sk->sk_backlog.head = skb; | 823 | sk->sk_backlog.head = skb; |
diff --git a/include/net/vxlan.h b/include/net/vxlan.h index c1c899c3a51b..e289ada6adf6 100644 --- a/include/net/vxlan.h +++ b/include/net/vxlan.h | |||
| @@ -79,7 +79,7 @@ struct vxlanhdr { | |||
| 79 | }; | 79 | }; |
| 80 | 80 | ||
| 81 | /* VXLAN header flags. */ | 81 | /* VXLAN header flags. */ |
| 82 | #define VXLAN_HF_RCO BIT(24) | 82 | #define VXLAN_HF_RCO BIT(21) |
| 83 | #define VXLAN_HF_VNI BIT(27) | 83 | #define VXLAN_HF_VNI BIT(27) |
| 84 | #define VXLAN_HF_GBP BIT(31) | 84 | #define VXLAN_HF_GBP BIT(31) |
| 85 | 85 | ||
diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 4a9c21f9b4ea..d6f6e5006ee9 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h | |||
| @@ -548,6 +548,7 @@ struct xfrm_policy { | |||
| 548 | u16 family; | 548 | u16 family; |
| 549 | struct xfrm_sec_ctx *security; | 549 | struct xfrm_sec_ctx *security; |
| 550 | struct xfrm_tmpl xfrm_vec[XFRM_MAX_DEPTH]; | 550 | struct xfrm_tmpl xfrm_vec[XFRM_MAX_DEPTH]; |
| 551 | struct rcu_head rcu; | ||
| 551 | }; | 552 | }; |
| 552 | 553 | ||
| 553 | static inline struct net *xp_net(const struct xfrm_policy *xp) | 554 | static inline struct net *xp_net(const struct xfrm_policy *xp) |
| @@ -1141,12 +1142,14 @@ static inline int xfrm6_route_forward(struct sk_buff *skb) | |||
| 1141 | return xfrm_route_forward(skb, AF_INET6); | 1142 | return xfrm_route_forward(skb, AF_INET6); |
| 1142 | } | 1143 | } |
| 1143 | 1144 | ||
| 1144 | int __xfrm_sk_clone_policy(struct sock *sk); | 1145 | int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk); |
| 1145 | 1146 | ||
| 1146 | static inline int xfrm_sk_clone_policy(struct sock *sk) | 1147 | static inline int xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk) |
| 1147 | { | 1148 | { |
| 1148 | if (unlikely(sk->sk_policy[0] || sk->sk_policy[1])) | 1149 | sk->sk_policy[0] = NULL; |
| 1149 | return __xfrm_sk_clone_policy(sk); | 1150 | sk->sk_policy[1] = NULL; |
| 1151 | if (unlikely(osk->sk_policy[0] || osk->sk_policy[1])) | ||
| 1152 | return __xfrm_sk_clone_policy(sk, osk); | ||
| 1150 | return 0; | 1153 | return 0; |
| 1151 | } | 1154 | } |
| 1152 | 1155 | ||
| @@ -1154,12 +1157,16 @@ int xfrm_policy_delete(struct xfrm_policy *pol, int dir); | |||
| 1154 | 1157 | ||
| 1155 | static inline void xfrm_sk_free_policy(struct sock *sk) | 1158 | static inline void xfrm_sk_free_policy(struct sock *sk) |
| 1156 | { | 1159 | { |
| 1157 | if (unlikely(sk->sk_policy[0] != NULL)) { | 1160 | struct xfrm_policy *pol; |
| 1158 | xfrm_policy_delete(sk->sk_policy[0], XFRM_POLICY_MAX); | 1161 | |
| 1162 | pol = rcu_dereference_protected(sk->sk_policy[0], 1); | ||
| 1163 | if (unlikely(pol != NULL)) { | ||
| 1164 | xfrm_policy_delete(pol, XFRM_POLICY_MAX); | ||
| 1159 | sk->sk_policy[0] = NULL; | 1165 | sk->sk_policy[0] = NULL; |
| 1160 | } | 1166 | } |
| 1161 | if (unlikely(sk->sk_policy[1] != NULL)) { | 1167 | pol = rcu_dereference_protected(sk->sk_policy[1], 1); |
| 1162 | xfrm_policy_delete(sk->sk_policy[1], XFRM_POLICY_MAX+1); | 1168 | if (unlikely(pol != NULL)) { |
| 1169 | xfrm_policy_delete(pol, XFRM_POLICY_MAX+1); | ||
| 1163 | sk->sk_policy[1] = NULL; | 1170 | sk->sk_policy[1] = NULL; |
| 1164 | } | 1171 | } |
| 1165 | } | 1172 | } |
| @@ -1169,7 +1176,7 @@ void xfrm_garbage_collect(struct net *net); | |||
| 1169 | #else | 1176 | #else |
| 1170 | 1177 | ||
| 1171 | static inline void xfrm_sk_free_policy(struct sock *sk) {} | 1178 | static inline void xfrm_sk_free_policy(struct sock *sk) {} |
| 1172 | static inline int xfrm_sk_clone_policy(struct sock *sk) { return 0; } | 1179 | static inline int xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk) { return 0; } |
| 1173 | static inline int xfrm6_route_forward(struct sk_buff *skb) { return 1; } | 1180 | static inline int xfrm6_route_forward(struct sk_buff *skb) { return 1; } |
| 1174 | static inline int xfrm4_route_forward(struct sk_buff *skb) { return 1; } | 1181 | static inline int xfrm4_route_forward(struct sk_buff *skb) { return 1; } |
| 1175 | static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb) | 1182 | static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb) |
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild index 628e6e64c2fb..c2e5d6cb34e3 100644 --- a/include/uapi/linux/Kbuild +++ b/include/uapi/linux/Kbuild | |||
| @@ -186,6 +186,7 @@ header-y += if_tunnel.h | |||
| 186 | header-y += if_vlan.h | 186 | header-y += if_vlan.h |
| 187 | header-y += if_x25.h | 187 | header-y += if_x25.h |
| 188 | header-y += igmp.h | 188 | header-y += igmp.h |
| 189 | header-y += ila.h | ||
| 189 | header-y += in6.h | 190 | header-y += in6.h |
| 190 | header-y += inet_diag.h | 191 | header-y += inet_diag.h |
| 191 | header-y += in.h | 192 | header-y += in.h |
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h index 28ccedd000f5..a27222d5b413 100644 --- a/include/uapi/linux/openvswitch.h +++ b/include/uapi/linux/openvswitch.h | |||
| @@ -628,7 +628,7 @@ struct ovs_action_hash { | |||
| 628 | * @OVS_CT_ATTR_MARK: u32 value followed by u32 mask. For each bit set in the | 628 | * @OVS_CT_ATTR_MARK: u32 value followed by u32 mask. For each bit set in the |
| 629 | * mask, the corresponding bit in the value is copied to the connection | 629 | * mask, the corresponding bit in the value is copied to the connection |
| 630 | * tracking mark field in the connection. | 630 | * tracking mark field in the connection. |
| 631 | * @OVS_CT_ATTR_LABEL: %OVS_CT_LABELS_LEN value followed by %OVS_CT_LABELS_LEN | 631 | * @OVS_CT_ATTR_LABELS: %OVS_CT_LABELS_LEN value followed by %OVS_CT_LABELS_LEN |
| 632 | * mask. For each bit set in the mask, the corresponding bit in the value is | 632 | * mask. For each bit set in the mask, the corresponding bit in the value is |
| 633 | * copied to the connection tracking label field in the connection. | 633 | * copied to the connection tracking label field in the connection. |
| 634 | * @OVS_CT_ATTR_HELPER: variable length string defining conntrack ALG. | 634 | * @OVS_CT_ATTR_HELPER: variable length string defining conntrack ALG. |
diff --git a/include/xen/interface/io/ring.h b/include/xen/interface/io/ring.h index 7d28aff605c7..7dc685b4057d 100644 --- a/include/xen/interface/io/ring.h +++ b/include/xen/interface/io/ring.h | |||
| @@ -181,6 +181,20 @@ struct __name##_back_ring { \ | |||
| 181 | #define RING_GET_REQUEST(_r, _idx) \ | 181 | #define RING_GET_REQUEST(_r, _idx) \ |
| 182 | (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req)) | 182 | (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req)) |
| 183 | 183 | ||
| 184 | /* | ||
| 185 | * Get a local copy of a request. | ||
| 186 | * | ||
| 187 | * Use this in preference to RING_GET_REQUEST() so all processing is | ||
| 188 | * done on a local copy that cannot be modified by the other end. | ||
| 189 | * | ||
| 190 | * Note that https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 may cause this | ||
| 191 | * to be ineffective where _req is a struct which consists of only bitfields. | ||
| 192 | */ | ||
| 193 | #define RING_COPY_REQUEST(_r, _idx, _req) do { \ | ||
| 194 | /* Use volatile to force the copy into _req. */ \ | ||
| 195 | *(_req) = *(volatile typeof(_req))RING_GET_REQUEST(_r, _idx); \ | ||
| 196 | } while (0) | ||
| 197 | |||
| 184 | #define RING_GET_RESPONSE(_r, _idx) \ | 198 | #define RING_GET_RESPONSE(_r, _idx) \ |
| 185 | (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp)) | 199 | (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp)) |
| 186 | 200 | ||
diff --git a/kernel/locking/osq_lock.c b/kernel/locking/osq_lock.c index d092a0c9c2d4..05a37857ab55 100644 --- a/kernel/locking/osq_lock.c +++ b/kernel/locking/osq_lock.c | |||
| @@ -93,10 +93,12 @@ bool osq_lock(struct optimistic_spin_queue *lock) | |||
| 93 | node->cpu = curr; | 93 | node->cpu = curr; |
| 94 | 94 | ||
| 95 | /* | 95 | /* |
| 96 | * ACQUIRE semantics, pairs with corresponding RELEASE | 96 | * We need both ACQUIRE (pairs with corresponding RELEASE in |
| 97 | * in unlock() uncontended, or fastpath. | 97 | * unlock() uncontended, or fastpath) and RELEASE (to publish |
| 98 | * the node fields we just initialised) semantics when updating | ||
| 99 | * the lock tail. | ||
| 98 | */ | 100 | */ |
| 99 | old = atomic_xchg_acquire(&lock->tail, curr); | 101 | old = atomic_xchg(&lock->tail, curr); |
| 100 | if (old == OSQ_UNLOCKED_VAL) | 102 | if (old == OSQ_UNLOCKED_VAL) |
| 101 | return true; | 103 | return true; |
| 102 | 104 | ||
diff --git a/lib/dma-debug.c b/lib/dma-debug.c index 8855f019ebe8..d34bd24c2c84 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c | |||
| @@ -1464,7 +1464,7 @@ void debug_dma_alloc_coherent(struct device *dev, size_t size, | |||
| 1464 | entry->type = dma_debug_coherent; | 1464 | entry->type = dma_debug_coherent; |
| 1465 | entry->dev = dev; | 1465 | entry->dev = dev; |
| 1466 | entry->pfn = page_to_pfn(virt_to_page(virt)); | 1466 | entry->pfn = page_to_pfn(virt_to_page(virt)); |
| 1467 | entry->offset = (size_t) virt & PAGE_MASK; | 1467 | entry->offset = (size_t) virt & ~PAGE_MASK; |
| 1468 | entry->size = size; | 1468 | entry->size = size; |
| 1469 | entry->dev_addr = dma_addr; | 1469 | entry->dev_addr = dma_addr; |
| 1470 | entry->direction = DMA_BIDIRECTIONAL; | 1470 | entry->direction = DMA_BIDIRECTIONAL; |
| @@ -1480,7 +1480,7 @@ void debug_dma_free_coherent(struct device *dev, size_t size, | |||
| 1480 | .type = dma_debug_coherent, | 1480 | .type = dma_debug_coherent, |
| 1481 | .dev = dev, | 1481 | .dev = dev, |
| 1482 | .pfn = page_to_pfn(virt_to_page(virt)), | 1482 | .pfn = page_to_pfn(virt_to_page(virt)), |
| 1483 | .offset = (size_t) virt & PAGE_MASK, | 1483 | .offset = (size_t) virt & ~PAGE_MASK, |
| 1484 | .dev_addr = addr, | 1484 | .dev_addr = addr, |
| 1485 | .size = size, | 1485 | .size = size, |
| 1486 | .direction = DMA_BIDIRECTIONAL, | 1486 | .direction = DMA_BIDIRECTIONAL, |
diff --git a/lib/rhashtable.c b/lib/rhashtable.c index a54ff8949f91..eb9240c458fa 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c | |||
| @@ -389,33 +389,31 @@ static bool rhashtable_check_elasticity(struct rhashtable *ht, | |||
| 389 | return false; | 389 | return false; |
| 390 | } | 390 | } |
| 391 | 391 | ||
| 392 | int rhashtable_insert_rehash(struct rhashtable *ht) | 392 | int rhashtable_insert_rehash(struct rhashtable *ht, |
| 393 | struct bucket_table *tbl) | ||
| 393 | { | 394 | { |
| 394 | struct bucket_table *old_tbl; | 395 | struct bucket_table *old_tbl; |
| 395 | struct bucket_table *new_tbl; | 396 | struct bucket_table *new_tbl; |
| 396 | struct bucket_table *tbl; | ||
| 397 | unsigned int size; | 397 | unsigned int size; |
| 398 | int err; | 398 | int err; |
| 399 | 399 | ||
| 400 | old_tbl = rht_dereference_rcu(ht->tbl, ht); | 400 | old_tbl = rht_dereference_rcu(ht->tbl, ht); |
| 401 | tbl = rhashtable_last_table(ht, old_tbl); | ||
| 402 | 401 | ||
| 403 | size = tbl->size; | 402 | size = tbl->size; |
| 404 | 403 | ||
| 404 | err = -EBUSY; | ||
| 405 | |||
| 405 | if (rht_grow_above_75(ht, tbl)) | 406 | if (rht_grow_above_75(ht, tbl)) |
| 406 | size *= 2; | 407 | size *= 2; |
| 407 | /* Do not schedule more than one rehash */ | 408 | /* Do not schedule more than one rehash */ |
| 408 | else if (old_tbl != tbl) | 409 | else if (old_tbl != tbl) |
| 409 | return -EBUSY; | 410 | goto fail; |
| 411 | |||
| 412 | err = -ENOMEM; | ||
| 410 | 413 | ||
| 411 | new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC); | 414 | new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC); |
| 412 | if (new_tbl == NULL) { | 415 | if (new_tbl == NULL) |
| 413 | /* Schedule async resize/rehash to try allocation | 416 | goto fail; |
| 414 | * non-atomic context. | ||
| 415 | */ | ||
| 416 | schedule_work(&ht->run_work); | ||
| 417 | return -ENOMEM; | ||
| 418 | } | ||
| 419 | 417 | ||
| 420 | err = rhashtable_rehash_attach(ht, tbl, new_tbl); | 418 | err = rhashtable_rehash_attach(ht, tbl, new_tbl); |
| 421 | if (err) { | 419 | if (err) { |
| @@ -426,12 +424,24 @@ int rhashtable_insert_rehash(struct rhashtable *ht) | |||
| 426 | schedule_work(&ht->run_work); | 424 | schedule_work(&ht->run_work); |
| 427 | 425 | ||
| 428 | return err; | 426 | return err; |
| 427 | |||
| 428 | fail: | ||
| 429 | /* Do not fail the insert if someone else did a rehash. */ | ||
| 430 | if (likely(rcu_dereference_raw(tbl->future_tbl))) | ||
| 431 | return 0; | ||
| 432 | |||
| 433 | /* Schedule async rehash to retry allocation in process context. */ | ||
| 434 | if (err == -ENOMEM) | ||
| 435 | schedule_work(&ht->run_work); | ||
| 436 | |||
| 437 | return err; | ||
| 429 | } | 438 | } |
| 430 | EXPORT_SYMBOL_GPL(rhashtable_insert_rehash); | 439 | EXPORT_SYMBOL_GPL(rhashtable_insert_rehash); |
| 431 | 440 | ||
| 432 | int rhashtable_insert_slow(struct rhashtable *ht, const void *key, | 441 | struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht, |
| 433 | struct rhash_head *obj, | 442 | const void *key, |
| 434 | struct bucket_table *tbl) | 443 | struct rhash_head *obj, |
| 444 | struct bucket_table *tbl) | ||
| 435 | { | 445 | { |
| 436 | struct rhash_head *head; | 446 | struct rhash_head *head; |
| 437 | unsigned int hash; | 447 | unsigned int hash; |
| @@ -467,7 +477,12 @@ int rhashtable_insert_slow(struct rhashtable *ht, const void *key, | |||
| 467 | exit: | 477 | exit: |
| 468 | spin_unlock(rht_bucket_lock(tbl, hash)); | 478 | spin_unlock(rht_bucket_lock(tbl, hash)); |
| 469 | 479 | ||
| 470 | return err; | 480 | if (err == 0) |
| 481 | return NULL; | ||
| 482 | else if (err == -EAGAIN) | ||
| 483 | return tbl; | ||
| 484 | else | ||
| 485 | return ERR_PTR(err); | ||
| 471 | } | 486 | } |
| 472 | EXPORT_SYMBOL_GPL(rhashtable_insert_slow); | 487 | EXPORT_SYMBOL_GPL(rhashtable_insert_slow); |
| 473 | 488 | ||
| @@ -503,10 +518,10 @@ int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter) | |||
| 503 | if (!iter->walker) | 518 | if (!iter->walker) |
| 504 | return -ENOMEM; | 519 | return -ENOMEM; |
| 505 | 520 | ||
| 506 | mutex_lock(&ht->mutex); | 521 | spin_lock(&ht->lock); |
| 507 | iter->walker->tbl = rht_dereference(ht->tbl, ht); | 522 | iter->walker->tbl = rht_dereference(ht->tbl, ht); |
| 508 | list_add(&iter->walker->list, &iter->walker->tbl->walkers); | 523 | list_add(&iter->walker->list, &iter->walker->tbl->walkers); |
| 509 | mutex_unlock(&ht->mutex); | 524 | spin_unlock(&ht->lock); |
| 510 | 525 | ||
| 511 | return 0; | 526 | return 0; |
| 512 | } | 527 | } |
| @@ -520,10 +535,10 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_init); | |||
| 520 | */ | 535 | */ |
| 521 | void rhashtable_walk_exit(struct rhashtable_iter *iter) | 536 | void rhashtable_walk_exit(struct rhashtable_iter *iter) |
| 522 | { | 537 | { |
| 523 | mutex_lock(&iter->ht->mutex); | 538 | spin_lock(&iter->ht->lock); |
| 524 | if (iter->walker->tbl) | 539 | if (iter->walker->tbl) |
| 525 | list_del(&iter->walker->list); | 540 | list_del(&iter->walker->list); |
| 526 | mutex_unlock(&iter->ht->mutex); | 541 | spin_unlock(&iter->ht->lock); |
| 527 | kfree(iter->walker); | 542 | kfree(iter->walker); |
| 528 | } | 543 | } |
| 529 | EXPORT_SYMBOL_GPL(rhashtable_walk_exit); | 544 | EXPORT_SYMBOL_GPL(rhashtable_walk_exit); |
| @@ -547,14 +562,12 @@ int rhashtable_walk_start(struct rhashtable_iter *iter) | |||
| 547 | { | 562 | { |
| 548 | struct rhashtable *ht = iter->ht; | 563 | struct rhashtable *ht = iter->ht; |
| 549 | 564 | ||
| 550 | mutex_lock(&ht->mutex); | 565 | rcu_read_lock(); |
| 551 | 566 | ||
| 567 | spin_lock(&ht->lock); | ||
| 552 | if (iter->walker->tbl) | 568 | if (iter->walker->tbl) |
| 553 | list_del(&iter->walker->list); | 569 | list_del(&iter->walker->list); |
| 554 | 570 | spin_unlock(&ht->lock); | |
| 555 | rcu_read_lock(); | ||
| 556 | |||
| 557 | mutex_unlock(&ht->mutex); | ||
| 558 | 571 | ||
| 559 | if (!iter->walker->tbl) { | 572 | if (!iter->walker->tbl) { |
| 560 | iter->walker->tbl = rht_dereference_rcu(ht->tbl, ht); | 573 | iter->walker->tbl = rht_dereference_rcu(ht->tbl, ht); |
| @@ -723,9 +736,6 @@ int rhashtable_init(struct rhashtable *ht, | |||
| 723 | if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT)) | 736 | if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT)) |
| 724 | return -EINVAL; | 737 | return -EINVAL; |
| 725 | 738 | ||
| 726 | if (params->nelem_hint) | ||
| 727 | size = rounded_hashtable_size(params); | ||
| 728 | |||
| 729 | memset(ht, 0, sizeof(*ht)); | 739 | memset(ht, 0, sizeof(*ht)); |
| 730 | mutex_init(&ht->mutex); | 740 | mutex_init(&ht->mutex); |
| 731 | spin_lock_init(&ht->lock); | 741 | spin_lock_init(&ht->lock); |
| @@ -745,6 +755,9 @@ int rhashtable_init(struct rhashtable *ht, | |||
| 745 | 755 | ||
| 746 | ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE); | 756 | ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE); |
| 747 | 757 | ||
| 758 | if (params->nelem_hint) | ||
| 759 | size = rounded_hashtable_size(&ht->p); | ||
| 760 | |||
| 748 | /* The maximum (not average) chain length grows with the | 761 | /* The maximum (not average) chain length grows with the |
| 749 | * size of the hash table, at a rate of (log N)/(log log N). | 762 | * size of the hash table, at a rate of (log N)/(log log N). |
| 750 | * The value of 16 is selected so that even if the hash | 763 | * The value of 16 is selected so that even if the hash |
diff --git a/mm/zswap.c b/mm/zswap.c index 025f8dc723de..bf14508afd64 100644 --- a/mm/zswap.c +++ b/mm/zswap.c | |||
| @@ -541,6 +541,7 @@ static struct zswap_pool *zswap_pool_last_get(void) | |||
| 541 | return last; | 541 | return last; |
| 542 | } | 542 | } |
| 543 | 543 | ||
| 544 | /* type and compressor must be null-terminated */ | ||
| 544 | static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor) | 545 | static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor) |
| 545 | { | 546 | { |
| 546 | struct zswap_pool *pool; | 547 | struct zswap_pool *pool; |
| @@ -548,10 +549,9 @@ static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor) | |||
| 548 | assert_spin_locked(&zswap_pools_lock); | 549 | assert_spin_locked(&zswap_pools_lock); |
| 549 | 550 | ||
| 550 | list_for_each_entry_rcu(pool, &zswap_pools, list) { | 551 | list_for_each_entry_rcu(pool, &zswap_pools, list) { |
| 551 | if (strncmp(pool->tfm_name, compressor, sizeof(pool->tfm_name))) | 552 | if (strcmp(pool->tfm_name, compressor)) |
| 552 | continue; | 553 | continue; |
| 553 | if (strncmp(zpool_get_type(pool->zpool), type, | 554 | if (strcmp(zpool_get_type(pool->zpool), type)) |
| 554 | sizeof(zswap_zpool_type))) | ||
| 555 | continue; | 555 | continue; |
| 556 | /* if we can't get it, it's about to be destroyed */ | 556 | /* if we can't get it, it's about to be destroyed */ |
| 557 | if (!zswap_pool_get(pool)) | 557 | if (!zswap_pool_get(pool)) |
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index ae3a47f9d1d5..fbd0acf80b13 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c | |||
| @@ -805,6 +805,9 @@ static int ax25_create(struct net *net, struct socket *sock, int protocol, | |||
| 805 | struct sock *sk; | 805 | struct sock *sk; |
| 806 | ax25_cb *ax25; | 806 | ax25_cb *ax25; |
| 807 | 807 | ||
| 808 | if (protocol < 0 || protocol > SK_PROTOCOL_MAX) | ||
| 809 | return -EINVAL; | ||
| 810 | |||
| 808 | if (!net_eq(net, &init_net)) | 811 | if (!net_eq(net, &init_net)) |
| 809 | return -EAFNOSUPPORT; | 812 | return -EAFNOSUPPORT; |
| 810 | 813 | ||
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c index 83bc1aaf5800..a49c705fb86b 100644 --- a/net/batman-adv/distributed-arp-table.c +++ b/net/batman-adv/distributed-arp-table.c | |||
| @@ -566,6 +566,7 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst) | |||
| 566 | int select; | 566 | int select; |
| 567 | batadv_dat_addr_t last_max = BATADV_DAT_ADDR_MAX, ip_key; | 567 | batadv_dat_addr_t last_max = BATADV_DAT_ADDR_MAX, ip_key; |
| 568 | struct batadv_dat_candidate *res; | 568 | struct batadv_dat_candidate *res; |
| 569 | struct batadv_dat_entry dat; | ||
| 569 | 570 | ||
| 570 | if (!bat_priv->orig_hash) | 571 | if (!bat_priv->orig_hash) |
| 571 | return NULL; | 572 | return NULL; |
| @@ -575,7 +576,9 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst) | |||
| 575 | if (!res) | 576 | if (!res) |
| 576 | return NULL; | 577 | return NULL; |
| 577 | 578 | ||
| 578 | ip_key = (batadv_dat_addr_t)batadv_hash_dat(&ip_dst, | 579 | dat.ip = ip_dst; |
| 580 | dat.vid = 0; | ||
| 581 | ip_key = (batadv_dat_addr_t)batadv_hash_dat(&dat, | ||
| 579 | BATADV_DAT_ADDR_MAX); | 582 | BATADV_DAT_ADDR_MAX); |
| 580 | 583 | ||
| 581 | batadv_dbg(BATADV_DBG_DAT, bat_priv, | 584 | batadv_dbg(BATADV_DBG_DAT, bat_priv, |
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c index 8d990b070a2e..3207667e69de 100644 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c | |||
| @@ -836,6 +836,7 @@ int batadv_recv_unicast_packet(struct sk_buff *skb, | |||
| 836 | u8 *orig_addr; | 836 | u8 *orig_addr; |
| 837 | struct batadv_orig_node *orig_node = NULL; | 837 | struct batadv_orig_node *orig_node = NULL; |
| 838 | int check, hdr_size = sizeof(*unicast_packet); | 838 | int check, hdr_size = sizeof(*unicast_packet); |
| 839 | enum batadv_subtype subtype; | ||
| 839 | bool is4addr; | 840 | bool is4addr; |
| 840 | 841 | ||
| 841 | unicast_packet = (struct batadv_unicast_packet *)skb->data; | 842 | unicast_packet = (struct batadv_unicast_packet *)skb->data; |
| @@ -863,10 +864,20 @@ int batadv_recv_unicast_packet(struct sk_buff *skb, | |||
| 863 | /* packet for me */ | 864 | /* packet for me */ |
| 864 | if (batadv_is_my_mac(bat_priv, unicast_packet->dest)) { | 865 | if (batadv_is_my_mac(bat_priv, unicast_packet->dest)) { |
| 865 | if (is4addr) { | 866 | if (is4addr) { |
| 866 | batadv_dat_inc_counter(bat_priv, | 867 | subtype = unicast_4addr_packet->subtype; |
| 867 | unicast_4addr_packet->subtype); | 868 | batadv_dat_inc_counter(bat_priv, subtype); |
| 868 | orig_addr = unicast_4addr_packet->src; | 869 | |
| 869 | orig_node = batadv_orig_hash_find(bat_priv, orig_addr); | 870 | /* Only payload data should be considered for speedy |
| 871 | * join. For example, DAT also uses unicast 4addr | ||
| 872 | * types, but those packets should not be considered | ||
| 873 | * for speedy join, since the clients do not actually | ||
| 874 | * reside at the sending originator. | ||
| 875 | */ | ||
| 876 | if (subtype == BATADV_P_DATA) { | ||
| 877 | orig_addr = unicast_4addr_packet->src; | ||
| 878 | orig_node = batadv_orig_hash_find(bat_priv, | ||
| 879 | orig_addr); | ||
| 880 | } | ||
| 870 | } | 881 | } |
| 871 | 882 | ||
| 872 | if (batadv_dat_snoop_incoming_arp_request(bat_priv, skb, | 883 | if (batadv_dat_snoop_incoming_arp_request(bat_priv, skb, |
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index 4228b10c47ea..76f19ba62462 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c | |||
| @@ -68,13 +68,15 @@ static void batadv_tt_global_del(struct batadv_priv *bat_priv, | |||
| 68 | unsigned short vid, const char *message, | 68 | unsigned short vid, const char *message, |
| 69 | bool roaming); | 69 | bool roaming); |
| 70 | 70 | ||
| 71 | /* returns 1 if they are the same mac addr */ | 71 | /* returns 1 if they are the same mac addr and vid */ |
| 72 | static int batadv_compare_tt(const struct hlist_node *node, const void *data2) | 72 | static int batadv_compare_tt(const struct hlist_node *node, const void *data2) |
| 73 | { | 73 | { |
| 74 | const void *data1 = container_of(node, struct batadv_tt_common_entry, | 74 | const void *data1 = container_of(node, struct batadv_tt_common_entry, |
| 75 | hash_entry); | 75 | hash_entry); |
| 76 | const struct batadv_tt_common_entry *tt1 = data1; | ||
| 77 | const struct batadv_tt_common_entry *tt2 = data2; | ||
| 76 | 78 | ||
| 77 | return batadv_compare_eth(data1, data2); | 79 | return (tt1->vid == tt2->vid) && batadv_compare_eth(data1, data2); |
| 78 | } | 80 | } |
| 79 | 81 | ||
| 80 | /** | 82 | /** |
| @@ -1427,9 +1429,15 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv, | |||
| 1427 | } | 1429 | } |
| 1428 | 1430 | ||
| 1429 | /* if the client was temporary added before receiving the first | 1431 | /* if the client was temporary added before receiving the first |
| 1430 | * OGM announcing it, we have to clear the TEMP flag | 1432 | * OGM announcing it, we have to clear the TEMP flag. Also, |
| 1433 | * remove the previous temporary orig node and re-add it | ||
| 1434 | * if required. If the orig entry changed, the new one which | ||
| 1435 | * is a non-temporary entry is preferred. | ||
| 1431 | */ | 1436 | */ |
| 1432 | common->flags &= ~BATADV_TT_CLIENT_TEMP; | 1437 | if (common->flags & BATADV_TT_CLIENT_TEMP) { |
| 1438 | batadv_tt_global_del_orig_list(tt_global_entry); | ||
| 1439 | common->flags &= ~BATADV_TT_CLIENT_TEMP; | ||
| 1440 | } | ||
| 1433 | 1441 | ||
| 1434 | /* the change can carry possible "attribute" flags like the | 1442 | /* the change can carry possible "attribute" flags like the |
| 1435 | * TT_CLIENT_WIFI, therefore they have to be copied in the | 1443 | * TT_CLIENT_WIFI, therefore they have to be copied in the |
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index fe129663bd3f..f52bcbf2e58c 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c | |||
| @@ -526,6 +526,9 @@ static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, | |||
| 526 | if (!addr || addr->sa_family != AF_BLUETOOTH) | 526 | if (!addr || addr->sa_family != AF_BLUETOOTH) |
| 527 | return -EINVAL; | 527 | return -EINVAL; |
| 528 | 528 | ||
| 529 | if (addr_len < sizeof(struct sockaddr_sco)) | ||
| 530 | return -EINVAL; | ||
| 531 | |||
| 529 | lock_sock(sk); | 532 | lock_sock(sk); |
| 530 | 533 | ||
| 531 | if (sk->sk_state != BT_OPEN) { | 534 | if (sk->sk_state != BT_OPEN) { |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 152b9c70e252..b2df375ec9c2 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
| @@ -3643,7 +3643,8 @@ static void __skb_complete_tx_timestamp(struct sk_buff *skb, | |||
| 3643 | serr->ee.ee_info = tstype; | 3643 | serr->ee.ee_info = tstype; |
| 3644 | if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) { | 3644 | if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) { |
| 3645 | serr->ee.ee_data = skb_shinfo(skb)->tskey; | 3645 | serr->ee.ee_data = skb_shinfo(skb)->tskey; |
| 3646 | if (sk->sk_protocol == IPPROTO_TCP) | 3646 | if (sk->sk_protocol == IPPROTO_TCP && |
| 3647 | sk->sk_type == SOCK_STREAM) | ||
| 3647 | serr->ee.ee_data -= sk->sk_tskey; | 3648 | serr->ee.ee_data -= sk->sk_tskey; |
| 3648 | } | 3649 | } |
| 3649 | 3650 | ||
| @@ -4268,7 +4269,7 @@ static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb) | |||
| 4268 | return NULL; | 4269 | return NULL; |
| 4269 | } | 4270 | } |
| 4270 | 4271 | ||
| 4271 | memmove(skb->data - ETH_HLEN, skb->data - skb->mac_len, | 4272 | memmove(skb->data - ETH_HLEN, skb->data - skb->mac_len - VLAN_HLEN, |
| 4272 | 2 * ETH_ALEN); | 4273 | 2 * ETH_ALEN); |
| 4273 | skb->mac_header += VLAN_HLEN; | 4274 | skb->mac_header += VLAN_HLEN; |
| 4274 | return skb; | 4275 | return skb; |
diff --git a/net/core/sock.c b/net/core/sock.c index e31dfcee1729..0d91f7dca751 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
| @@ -433,8 +433,6 @@ static bool sock_needs_netstamp(const struct sock *sk) | |||
| 433 | } | 433 | } |
| 434 | } | 434 | } |
| 435 | 435 | ||
| 436 | #define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE)) | ||
| 437 | |||
| 438 | static void sock_disable_timestamp(struct sock *sk, unsigned long flags) | 436 | static void sock_disable_timestamp(struct sock *sk, unsigned long flags) |
| 439 | { | 437 | { |
| 440 | if (sk->sk_flags & flags) { | 438 | if (sk->sk_flags & flags) { |
| @@ -874,7 +872,8 @@ set_rcvbuf: | |||
| 874 | 872 | ||
| 875 | if (val & SOF_TIMESTAMPING_OPT_ID && | 873 | if (val & SOF_TIMESTAMPING_OPT_ID && |
| 876 | !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) { | 874 | !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) { |
| 877 | if (sk->sk_protocol == IPPROTO_TCP) { | 875 | if (sk->sk_protocol == IPPROTO_TCP && |
| 876 | sk->sk_type == SOCK_STREAM) { | ||
| 878 | if (sk->sk_state != TCP_ESTABLISHED) { | 877 | if (sk->sk_state != TCP_ESTABLISHED) { |
| 879 | ret = -EINVAL; | 878 | ret = -EINVAL; |
| 880 | break; | 879 | break; |
| @@ -1552,7 +1551,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) | |||
| 1552 | */ | 1551 | */ |
| 1553 | is_charged = sk_filter_charge(newsk, filter); | 1552 | is_charged = sk_filter_charge(newsk, filter); |
| 1554 | 1553 | ||
| 1555 | if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk))) { | 1554 | if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) { |
| 1556 | /* It is still raw copy of parent, so invalidate | 1555 | /* It is still raw copy of parent, so invalidate |
| 1557 | * destructor and make plain sk_free() */ | 1556 | * destructor and make plain sk_free() */ |
| 1558 | newsk->sk_destruct = NULL; | 1557 | newsk->sk_destruct = NULL; |
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c index eebf5ac8ce18..13d6b1a6e0fc 100644 --- a/net/decnet/af_decnet.c +++ b/net/decnet/af_decnet.c | |||
| @@ -678,6 +678,9 @@ static int dn_create(struct net *net, struct socket *sock, int protocol, | |||
| 678 | { | 678 | { |
| 679 | struct sock *sk; | 679 | struct sock *sk; |
| 680 | 680 | ||
| 681 | if (protocol < 0 || protocol > SK_PROTOCOL_MAX) | ||
| 682 | return -EINVAL; | ||
| 683 | |||
| 681 | if (!net_eq(net, &init_net)) | 684 | if (!net_eq(net, &init_net)) |
| 682 | return -EAFNOSUPPORT; | 685 | return -EAFNOSUPPORT; |
| 683 | 686 | ||
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 11c4ca13ec3b..5c5db6636704 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c | |||
| @@ -257,6 +257,9 @@ static int inet_create(struct net *net, struct socket *sock, int protocol, | |||
| 257 | int try_loading_module = 0; | 257 | int try_loading_module = 0; |
| 258 | int err; | 258 | int err; |
| 259 | 259 | ||
| 260 | if (protocol < 0 || protocol >= IPPROTO_MAX) | ||
| 261 | return -EINVAL; | ||
| 262 | |||
| 260 | sock->state = SS_UNCONNECTED; | 263 | sock->state = SS_UNCONNECTED; |
| 261 | 264 | ||
| 262 | /* Look for the requested type/protocol pair. */ | 265 | /* Look for the requested type/protocol pair. */ |
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index cc8f3e506cde..473447593060 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c | |||
| @@ -1155,6 +1155,7 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event, | |||
| 1155 | static int fib_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) | 1155 | static int fib_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) |
| 1156 | { | 1156 | { |
| 1157 | struct net_device *dev = netdev_notifier_info_to_dev(ptr); | 1157 | struct net_device *dev = netdev_notifier_info_to_dev(ptr); |
| 1158 | struct netdev_notifier_changeupper_info *info; | ||
| 1158 | struct in_device *in_dev; | 1159 | struct in_device *in_dev; |
| 1159 | struct net *net = dev_net(dev); | 1160 | struct net *net = dev_net(dev); |
| 1160 | unsigned int flags; | 1161 | unsigned int flags; |
| @@ -1193,6 +1194,14 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo | |||
| 1193 | case NETDEV_CHANGEMTU: | 1194 | case NETDEV_CHANGEMTU: |
| 1194 | rt_cache_flush(net); | 1195 | rt_cache_flush(net); |
| 1195 | break; | 1196 | break; |
| 1197 | case NETDEV_CHANGEUPPER: | ||
| 1198 | info = ptr; | ||
| 1199 | /* flush all routes if dev is linked to or unlinked from | ||
| 1200 | * an L3 master device (e.g., VRF) | ||
| 1201 | */ | ||
| 1202 | if (info->upper_dev && netif_is_l3_master(info->upper_dev)) | ||
| 1203 | fib_disable_ip(dev, NETDEV_DOWN, true); | ||
| 1204 | break; | ||
| 1196 | } | 1205 | } |
| 1197 | return NOTIFY_DONE; | 1206 | return NOTIFY_DONE; |
| 1198 | } | 1207 | } |
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c index e0fcbbbcfe54..bd903fe0f750 100644 --- a/net/ipv4/fou.c +++ b/net/ipv4/fou.c | |||
| @@ -24,6 +24,7 @@ struct fou { | |||
| 24 | u16 type; | 24 | u16 type; |
| 25 | struct udp_offload udp_offloads; | 25 | struct udp_offload udp_offloads; |
| 26 | struct list_head list; | 26 | struct list_head list; |
| 27 | struct rcu_head rcu; | ||
| 27 | }; | 28 | }; |
| 28 | 29 | ||
| 29 | #define FOU_F_REMCSUM_NOPARTIAL BIT(0) | 30 | #define FOU_F_REMCSUM_NOPARTIAL BIT(0) |
| @@ -417,7 +418,7 @@ static void fou_release(struct fou *fou) | |||
| 417 | list_del(&fou->list); | 418 | list_del(&fou->list); |
| 418 | udp_tunnel_sock_release(sock); | 419 | udp_tunnel_sock_release(sock); |
| 419 | 420 | ||
| 420 | kfree(fou); | 421 | kfree_rcu(fou, rcu); |
| 421 | } | 422 | } |
| 422 | 423 | ||
| 423 | static int fou_encap_init(struct sock *sk, struct fou *fou, struct fou_cfg *cfg) | 424 | static int fou_encap_init(struct sock *sk, struct fou *fou, struct fou_cfg *cfg) |
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig index a35584176535..c187c60e3e0c 100644 --- a/net/ipv4/netfilter/Kconfig +++ b/net/ipv4/netfilter/Kconfig | |||
| @@ -60,6 +60,7 @@ config NFT_REJECT_IPV4 | |||
| 60 | 60 | ||
| 61 | config NFT_DUP_IPV4 | 61 | config NFT_DUP_IPV4 |
| 62 | tristate "IPv4 nf_tables packet duplication support" | 62 | tristate "IPv4 nf_tables packet duplication support" |
| 63 | depends on !NF_CONNTRACK || NF_CONNTRACK | ||
| 63 | select NF_DUP_IPV4 | 64 | select NF_DUP_IPV4 |
| 64 | help | 65 | help |
| 65 | This module enables IPv4 packet duplication support for nf_tables. | 66 | This module enables IPv4 packet duplication support for nf_tables. |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index db003438aaf5..d8841a2f1569 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
| @@ -1493,7 +1493,7 @@ bool tcp_prequeue(struct sock *sk, struct sk_buff *skb) | |||
| 1493 | if (likely(sk->sk_rx_dst)) | 1493 | if (likely(sk->sk_rx_dst)) |
| 1494 | skb_dst_drop(skb); | 1494 | skb_dst_drop(skb); |
| 1495 | else | 1495 | else |
| 1496 | skb_dst_force(skb); | 1496 | skb_dst_force_safe(skb); |
| 1497 | 1497 | ||
| 1498 | __skb_queue_tail(&tp->ucopy.prequeue, skb); | 1498 | __skb_queue_tail(&tp->ucopy.prequeue, skb); |
| 1499 | tp->ucopy.memory += skb->truesize; | 1499 | tp->ucopy.memory += skb->truesize; |
| @@ -1721,8 +1721,7 @@ void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) | |||
| 1721 | { | 1721 | { |
| 1722 | struct dst_entry *dst = skb_dst(skb); | 1722 | struct dst_entry *dst = skb_dst(skb); |
| 1723 | 1723 | ||
| 1724 | if (dst) { | 1724 | if (dst && dst_hold_safe(dst)) { |
| 1725 | dst_hold(dst); | ||
| 1726 | sk->sk_rx_dst = dst; | 1725 | sk->sk_rx_dst = dst; |
| 1727 | inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; | 1726 | inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; |
| 1728 | } | 1727 | } |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index cb7ca569052c..9bfc39ff2285 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
| @@ -3150,7 +3150,7 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn) | |||
| 3150 | { | 3150 | { |
| 3151 | struct tcp_sock *tp = tcp_sk(sk); | 3151 | struct tcp_sock *tp = tcp_sk(sk); |
| 3152 | struct tcp_fastopen_request *fo = tp->fastopen_req; | 3152 | struct tcp_fastopen_request *fo = tp->fastopen_req; |
| 3153 | int syn_loss = 0, space, err = 0, copied; | 3153 | int syn_loss = 0, space, err = 0; |
| 3154 | unsigned long last_syn_loss = 0; | 3154 | unsigned long last_syn_loss = 0; |
| 3155 | struct sk_buff *syn_data; | 3155 | struct sk_buff *syn_data; |
| 3156 | 3156 | ||
| @@ -3188,17 +3188,18 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn) | |||
| 3188 | goto fallback; | 3188 | goto fallback; |
| 3189 | syn_data->ip_summed = CHECKSUM_PARTIAL; | 3189 | syn_data->ip_summed = CHECKSUM_PARTIAL; |
| 3190 | memcpy(syn_data->cb, syn->cb, sizeof(syn->cb)); | 3190 | memcpy(syn_data->cb, syn->cb, sizeof(syn->cb)); |
| 3191 | copied = copy_from_iter(skb_put(syn_data, space), space, | 3191 | if (space) { |
| 3192 | &fo->data->msg_iter); | 3192 | int copied = copy_from_iter(skb_put(syn_data, space), space, |
| 3193 | if (unlikely(!copied)) { | 3193 | &fo->data->msg_iter); |
| 3194 | kfree_skb(syn_data); | 3194 | if (unlikely(!copied)) { |
| 3195 | goto fallback; | 3195 | kfree_skb(syn_data); |
| 3196 | } | 3196 | goto fallback; |
| 3197 | if (copied != space) { | 3197 | } |
| 3198 | skb_trim(syn_data, copied); | 3198 | if (copied != space) { |
| 3199 | space = copied; | 3199 | skb_trim(syn_data, copied); |
| 3200 | space = copied; | ||
| 3201 | } | ||
| 3200 | } | 3202 | } |
| 3201 | |||
| 3202 | /* No more data pending in inet_wait_for_connect() */ | 3203 | /* No more data pending in inet_wait_for_connect() */ |
| 3203 | if (space == fo->size) | 3204 | if (space == fo->size) |
| 3204 | fo->data = NULL; | 3205 | fo->data = NULL; |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 61f26851655c..17f8e7ea133b 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
| @@ -350,6 +350,12 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev) | |||
| 350 | setup_timer(&ndev->rs_timer, addrconf_rs_timer, | 350 | setup_timer(&ndev->rs_timer, addrconf_rs_timer, |
| 351 | (unsigned long)ndev); | 351 | (unsigned long)ndev); |
| 352 | memcpy(&ndev->cnf, dev_net(dev)->ipv6.devconf_dflt, sizeof(ndev->cnf)); | 352 | memcpy(&ndev->cnf, dev_net(dev)->ipv6.devconf_dflt, sizeof(ndev->cnf)); |
| 353 | |||
| 354 | if (ndev->cnf.stable_secret.initialized) | ||
| 355 | ndev->addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY; | ||
| 356 | else | ||
| 357 | ndev->addr_gen_mode = IN6_ADDR_GEN_MODE_EUI64; | ||
| 358 | |||
| 353 | ndev->cnf.mtu6 = dev->mtu; | 359 | ndev->cnf.mtu6 = dev->mtu; |
| 354 | ndev->cnf.sysctl = NULL; | 360 | ndev->cnf.sysctl = NULL; |
| 355 | ndev->nd_parms = neigh_parms_alloc(dev, &nd_tbl); | 361 | ndev->nd_parms = neigh_parms_alloc(dev, &nd_tbl); |
| @@ -2455,7 +2461,7 @@ ok: | |||
| 2455 | #ifdef CONFIG_IPV6_OPTIMISTIC_DAD | 2461 | #ifdef CONFIG_IPV6_OPTIMISTIC_DAD |
| 2456 | if (in6_dev->cnf.optimistic_dad && | 2462 | if (in6_dev->cnf.optimistic_dad && |
| 2457 | !net->ipv6.devconf_all->forwarding && sllao) | 2463 | !net->ipv6.devconf_all->forwarding && sllao) |
| 2458 | addr_flags = IFA_F_OPTIMISTIC; | 2464 | addr_flags |= IFA_F_OPTIMISTIC; |
| 2459 | #endif | 2465 | #endif |
| 2460 | 2466 | ||
| 2461 | /* Do not allow to create too much of autoconfigured | 2467 | /* Do not allow to create too much of autoconfigured |
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 8ec0df75f1c4..9f5137cd604e 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c | |||
| @@ -109,6 +109,9 @@ static int inet6_create(struct net *net, struct socket *sock, int protocol, | |||
| 109 | int try_loading_module = 0; | 109 | int try_loading_module = 0; |
| 110 | int err; | 110 | int err; |
| 111 | 111 | ||
| 112 | if (protocol < 0 || protocol >= IPPROTO_MAX) | ||
| 113 | return -EINVAL; | ||
| 114 | |||
| 112 | /* Look for the requested type/protocol pair. */ | 115 | /* Look for the requested type/protocol pair. */ |
| 113 | lookup_protocol: | 116 | lookup_protocol: |
| 114 | err = -ESOCKTNOSUPPORT; | 117 | err = -ESOCKTNOSUPPORT; |
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 3c7b9310b33f..e5ea177d34c6 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c | |||
| @@ -1571,13 +1571,11 @@ static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[], | |||
| 1571 | return -EEXIST; | 1571 | return -EEXIST; |
| 1572 | } else { | 1572 | } else { |
| 1573 | t = nt; | 1573 | t = nt; |
| 1574 | |||
| 1575 | ip6gre_tunnel_unlink(ign, t); | ||
| 1576 | ip6gre_tnl_change(t, &p, !tb[IFLA_MTU]); | ||
| 1577 | ip6gre_tunnel_link(ign, t); | ||
| 1578 | netdev_state_change(dev); | ||
| 1579 | } | 1574 | } |
| 1580 | 1575 | ||
| 1576 | ip6gre_tunnel_unlink(ign, t); | ||
| 1577 | ip6gre_tnl_change(t, &p, !tb[IFLA_MTU]); | ||
| 1578 | ip6gre_tunnel_link(ign, t); | ||
| 1581 | return 0; | 1579 | return 0; |
| 1582 | } | 1580 | } |
| 1583 | 1581 | ||
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig index f6a024e141e5..e10a04c9cdc7 100644 --- a/net/ipv6/netfilter/Kconfig +++ b/net/ipv6/netfilter/Kconfig | |||
| @@ -49,6 +49,7 @@ config NFT_REJECT_IPV6 | |||
| 49 | 49 | ||
| 50 | config NFT_DUP_IPV6 | 50 | config NFT_DUP_IPV6 |
| 51 | tristate "IPv6 nf_tables packet duplication support" | 51 | tristate "IPv6 nf_tables packet duplication support" |
| 52 | depends on !NF_CONNTRACK || NF_CONNTRACK | ||
| 52 | select NF_DUP_IPV6 | 53 | select NF_DUP_IPV6 |
| 53 | help | 54 | help |
| 54 | This module enables IPv6 packet duplication support for nf_tables. | 55 | This module enables IPv6 packet duplication support for nf_tables. |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index e7aab561b7b4..6b8a8a9091fa 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
| @@ -93,10 +93,9 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) | |||
| 93 | { | 93 | { |
| 94 | struct dst_entry *dst = skb_dst(skb); | 94 | struct dst_entry *dst = skb_dst(skb); |
| 95 | 95 | ||
| 96 | if (dst) { | 96 | if (dst && dst_hold_safe(dst)) { |
| 97 | const struct rt6_info *rt = (const struct rt6_info *)dst; | 97 | const struct rt6_info *rt = (const struct rt6_info *)dst; |
| 98 | 98 | ||
| 99 | dst_hold(dst); | ||
| 100 | sk->sk_rx_dst = dst; | 99 | sk->sk_rx_dst = dst; |
| 101 | inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; | 100 | inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; |
| 102 | inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt); | 101 | inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt); |
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c index e6aa48b5395c..923abd6b3064 100644 --- a/net/irda/af_irda.c +++ b/net/irda/af_irda.c | |||
| @@ -1086,6 +1086,9 @@ static int irda_create(struct net *net, struct socket *sock, int protocol, | |||
| 1086 | struct sock *sk; | 1086 | struct sock *sk; |
| 1087 | struct irda_sock *self; | 1087 | struct irda_sock *self; |
| 1088 | 1088 | ||
| 1089 | if (protocol < 0 || protocol > SK_PROTOCOL_MAX) | ||
| 1090 | return -EINVAL; | ||
| 1091 | |||
| 1089 | if (net != &init_net) | 1092 | if (net != &init_net) |
| 1090 | return -EAFNOSUPPORT; | 1093 | return -EAFNOSUPPORT; |
| 1091 | 1094 | ||
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index da471eef07bb..c12f348138ac 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c | |||
| @@ -1169,8 +1169,7 @@ static int sta_apply_parameters(struct ieee80211_local *local, | |||
| 1169 | * rc isn't initialized here yet, so ignore it | 1169 | * rc isn't initialized here yet, so ignore it |
| 1170 | */ | 1170 | */ |
| 1171 | __ieee80211_vht_handle_opmode(sdata, sta, | 1171 | __ieee80211_vht_handle_opmode(sdata, sta, |
| 1172 | params->opmode_notif, | 1172 | params->opmode_notif, band); |
| 1173 | band, false); | ||
| 1174 | } | 1173 | } |
| 1175 | 1174 | ||
| 1176 | if (ieee80211_vif_is_mesh(&sdata->vif)) | 1175 | if (ieee80211_vif_is_mesh(&sdata->vif)) |
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index d832bd59236b..5322b4c71630 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h | |||
| @@ -1709,10 +1709,10 @@ enum ieee80211_sta_rx_bandwidth ieee80211_sta_cur_vht_bw(struct sta_info *sta); | |||
| 1709 | void ieee80211_sta_set_rx_nss(struct sta_info *sta); | 1709 | void ieee80211_sta_set_rx_nss(struct sta_info *sta); |
| 1710 | u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata, | 1710 | u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata, |
| 1711 | struct sta_info *sta, u8 opmode, | 1711 | struct sta_info *sta, u8 opmode, |
| 1712 | enum ieee80211_band band, bool nss_only); | 1712 | enum ieee80211_band band); |
| 1713 | void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata, | 1713 | void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata, |
| 1714 | struct sta_info *sta, u8 opmode, | 1714 | struct sta_info *sta, u8 opmode, |
| 1715 | enum ieee80211_band band, bool nss_only); | 1715 | enum ieee80211_band band); |
| 1716 | void ieee80211_apply_vhtcap_overrides(struct ieee80211_sub_if_data *sdata, | 1716 | void ieee80211_apply_vhtcap_overrides(struct ieee80211_sub_if_data *sdata, |
| 1717 | struct ieee80211_sta_vht_cap *vht_cap); | 1717 | struct ieee80211_sta_vht_cap *vht_cap); |
| 1718 | void ieee80211_get_vht_mask_from_cap(__le16 vht_cap, | 1718 | void ieee80211_get_vht_mask_from_cap(__le16 vht_cap, |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index b140cc6651f4..3aa04344942b 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
| @@ -1379,21 +1379,26 @@ static u32 ieee80211_handle_pwr_constr(struct ieee80211_sub_if_data *sdata, | |||
| 1379 | */ | 1379 | */ |
| 1380 | if (has_80211h_pwr && | 1380 | if (has_80211h_pwr && |
| 1381 | (!has_cisco_pwr || pwr_level_80211h <= pwr_level_cisco)) { | 1381 | (!has_cisco_pwr || pwr_level_80211h <= pwr_level_cisco)) { |
| 1382 | new_ap_level = pwr_level_80211h; | ||
| 1383 | |||
| 1384 | if (sdata->ap_power_level == new_ap_level) | ||
| 1385 | return 0; | ||
| 1386 | |||
| 1382 | sdata_dbg(sdata, | 1387 | sdata_dbg(sdata, |
| 1383 | "Limiting TX power to %d (%d - %d) dBm as advertised by %pM\n", | 1388 | "Limiting TX power to %d (%d - %d) dBm as advertised by %pM\n", |
| 1384 | pwr_level_80211h, chan_pwr, pwr_reduction_80211h, | 1389 | pwr_level_80211h, chan_pwr, pwr_reduction_80211h, |
| 1385 | sdata->u.mgd.bssid); | 1390 | sdata->u.mgd.bssid); |
| 1386 | new_ap_level = pwr_level_80211h; | ||
| 1387 | } else { /* has_cisco_pwr is always true here. */ | 1391 | } else { /* has_cisco_pwr is always true here. */ |
| 1392 | new_ap_level = pwr_level_cisco; | ||
| 1393 | |||
| 1394 | if (sdata->ap_power_level == new_ap_level) | ||
| 1395 | return 0; | ||
| 1396 | |||
| 1388 | sdata_dbg(sdata, | 1397 | sdata_dbg(sdata, |
| 1389 | "Limiting TX power to %d dBm as advertised by %pM\n", | 1398 | "Limiting TX power to %d dBm as advertised by %pM\n", |
| 1390 | pwr_level_cisco, sdata->u.mgd.bssid); | 1399 | pwr_level_cisco, sdata->u.mgd.bssid); |
| 1391 | new_ap_level = pwr_level_cisco; | ||
| 1392 | } | 1400 | } |
| 1393 | 1401 | ||
| 1394 | if (sdata->ap_power_level == new_ap_level) | ||
| 1395 | return 0; | ||
| 1396 | |||
| 1397 | sdata->ap_power_level = new_ap_level; | 1402 | sdata->ap_power_level = new_ap_level; |
| 1398 | if (__ieee80211_recalc_txpower(sdata)) | 1403 | if (__ieee80211_recalc_txpower(sdata)) |
| 1399 | return BSS_CHANGED_TXPOWER; | 1404 | return BSS_CHANGED_TXPOWER; |
| @@ -3575,7 +3580,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, | |||
| 3575 | 3580 | ||
| 3576 | if (sta && elems.opmode_notif) | 3581 | if (sta && elems.opmode_notif) |
| 3577 | ieee80211_vht_handle_opmode(sdata, sta, *elems.opmode_notif, | 3582 | ieee80211_vht_handle_opmode(sdata, sta, *elems.opmode_notif, |
| 3578 | rx_status->band, true); | 3583 | rx_status->band); |
| 3579 | mutex_unlock(&local->sta_mtx); | 3584 | mutex_unlock(&local->sta_mtx); |
| 3580 | 3585 | ||
| 3581 | changed |= ieee80211_handle_pwr_constr(sdata, chan, mgmt, | 3586 | changed |= ieee80211_handle_pwr_constr(sdata, chan, mgmt, |
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 8bae5de0dc44..82af407fea7a 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
| @@ -2736,8 +2736,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) | |||
| 2736 | opmode = mgmt->u.action.u.vht_opmode_notif.operating_mode; | 2736 | opmode = mgmt->u.action.u.vht_opmode_notif.operating_mode; |
| 2737 | 2737 | ||
| 2738 | ieee80211_vht_handle_opmode(rx->sdata, rx->sta, | 2738 | ieee80211_vht_handle_opmode(rx->sdata, rx->sta, |
| 2739 | opmode, status->band, | 2739 | opmode, status->band); |
| 2740 | false); | ||
| 2741 | goto handled; | 2740 | goto handled; |
| 2742 | } | 2741 | } |
| 2743 | default: | 2742 | default: |
diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 74058020b7d6..33344f5a66a8 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c | |||
| @@ -1641,6 +1641,29 @@ void ieee80211_stop_device(struct ieee80211_local *local) | |||
| 1641 | drv_stop(local); | 1641 | drv_stop(local); |
| 1642 | } | 1642 | } |
| 1643 | 1643 | ||
| 1644 | static void ieee80211_flush_completed_scan(struct ieee80211_local *local, | ||
| 1645 | bool aborted) | ||
| 1646 | { | ||
| 1647 | /* It's possible that we don't handle the scan completion in | ||
| 1648 | * time during suspend, so if it's still marked as completed | ||
| 1649 | * here, queue the work and flush it to clean things up. | ||
| 1650 | * Instead of calling the worker function directly here, we | ||
| 1651 | * really queue it to avoid potential races with other flows | ||
| 1652 | * scheduling the same work. | ||
| 1653 | */ | ||
| 1654 | if (test_bit(SCAN_COMPLETED, &local->scanning)) { | ||
| 1655 | /* If coming from reconfiguration failure, abort the scan so | ||
| 1656 | * we don't attempt to continue a partial HW scan - which is | ||
| 1657 | * possible otherwise if (e.g.) the 2.4 GHz portion was the | ||
| 1658 | * completed scan, and a 5 GHz portion is still pending. | ||
| 1659 | */ | ||
| 1660 | if (aborted) | ||
| 1661 | set_bit(SCAN_ABORTED, &local->scanning); | ||
| 1662 | ieee80211_queue_delayed_work(&local->hw, &local->scan_work, 0); | ||
| 1663 | flush_delayed_work(&local->scan_work); | ||
| 1664 | } | ||
| 1665 | } | ||
| 1666 | |||
| 1644 | static void ieee80211_handle_reconfig_failure(struct ieee80211_local *local) | 1667 | static void ieee80211_handle_reconfig_failure(struct ieee80211_local *local) |
| 1645 | { | 1668 | { |
| 1646 | struct ieee80211_sub_if_data *sdata; | 1669 | struct ieee80211_sub_if_data *sdata; |
| @@ -1660,6 +1683,8 @@ static void ieee80211_handle_reconfig_failure(struct ieee80211_local *local) | |||
| 1660 | local->suspended = false; | 1683 | local->suspended = false; |
| 1661 | local->in_reconfig = false; | 1684 | local->in_reconfig = false; |
| 1662 | 1685 | ||
| 1686 | ieee80211_flush_completed_scan(local, true); | ||
| 1687 | |||
| 1663 | /* scheduled scan clearly can't be running any more, but tell | 1688 | /* scheduled scan clearly can't be running any more, but tell |
| 1664 | * cfg80211 and clear local state | 1689 | * cfg80211 and clear local state |
| 1665 | */ | 1690 | */ |
| @@ -1698,6 +1723,27 @@ static void ieee80211_assign_chanctx(struct ieee80211_local *local, | |||
| 1698 | mutex_unlock(&local->chanctx_mtx); | 1723 | mutex_unlock(&local->chanctx_mtx); |
| 1699 | } | 1724 | } |
| 1700 | 1725 | ||
| 1726 | static void ieee80211_reconfig_stations(struct ieee80211_sub_if_data *sdata) | ||
| 1727 | { | ||
| 1728 | struct ieee80211_local *local = sdata->local; | ||
| 1729 | struct sta_info *sta; | ||
| 1730 | |||
| 1731 | /* add STAs back */ | ||
| 1732 | mutex_lock(&local->sta_mtx); | ||
| 1733 | list_for_each_entry(sta, &local->sta_list, list) { | ||
| 1734 | enum ieee80211_sta_state state; | ||
| 1735 | |||
| 1736 | if (!sta->uploaded || sta->sdata != sdata) | ||
| 1737 | continue; | ||
| 1738 | |||
| 1739 | for (state = IEEE80211_STA_NOTEXIST; | ||
| 1740 | state < sta->sta_state; state++) | ||
| 1741 | WARN_ON(drv_sta_state(local, sta->sdata, sta, state, | ||
| 1742 | state + 1)); | ||
| 1743 | } | ||
| 1744 | mutex_unlock(&local->sta_mtx); | ||
| 1745 | } | ||
| 1746 | |||
| 1701 | int ieee80211_reconfig(struct ieee80211_local *local) | 1747 | int ieee80211_reconfig(struct ieee80211_local *local) |
| 1702 | { | 1748 | { |
| 1703 | struct ieee80211_hw *hw = &local->hw; | 1749 | struct ieee80211_hw *hw = &local->hw; |
| @@ -1833,50 +1879,11 @@ int ieee80211_reconfig(struct ieee80211_local *local) | |||
| 1833 | WARN_ON(drv_add_chanctx(local, ctx)); | 1879 | WARN_ON(drv_add_chanctx(local, ctx)); |
| 1834 | mutex_unlock(&local->chanctx_mtx); | 1880 | mutex_unlock(&local->chanctx_mtx); |
| 1835 | 1881 | ||
| 1836 | list_for_each_entry(sdata, &local->interfaces, list) { | ||
| 1837 | if (!ieee80211_sdata_running(sdata)) | ||
| 1838 | continue; | ||
| 1839 | ieee80211_assign_chanctx(local, sdata); | ||
| 1840 | } | ||
| 1841 | |||
| 1842 | sdata = rtnl_dereference(local->monitor_sdata); | 1882 | sdata = rtnl_dereference(local->monitor_sdata); |
| 1843 | if (sdata && ieee80211_sdata_running(sdata)) | 1883 | if (sdata && ieee80211_sdata_running(sdata)) |
| 1844 | ieee80211_assign_chanctx(local, sdata); | 1884 | ieee80211_assign_chanctx(local, sdata); |
| 1845 | } | 1885 | } |
| 1846 | 1886 | ||
| 1847 | /* add STAs back */ | ||
| 1848 | mutex_lock(&local->sta_mtx); | ||
| 1849 | list_for_each_entry(sta, &local->sta_list, list) { | ||
| 1850 | enum ieee80211_sta_state state; | ||
| 1851 | |||
| 1852 | if (!sta->uploaded) | ||
| 1853 | continue; | ||
| 1854 | |||
| 1855 | /* AP-mode stations will be added later */ | ||
| 1856 | if (sta->sdata->vif.type == NL80211_IFTYPE_AP) | ||
| 1857 | continue; | ||
| 1858 | |||
| 1859 | for (state = IEEE80211_STA_NOTEXIST; | ||
| 1860 | state < sta->sta_state; state++) | ||
| 1861 | WARN_ON(drv_sta_state(local, sta->sdata, sta, state, | ||
| 1862 | state + 1)); | ||
| 1863 | } | ||
| 1864 | mutex_unlock(&local->sta_mtx); | ||
| 1865 | |||
| 1866 | /* reconfigure tx conf */ | ||
| 1867 | if (hw->queues >= IEEE80211_NUM_ACS) { | ||
| 1868 | list_for_each_entry(sdata, &local->interfaces, list) { | ||
| 1869 | if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN || | ||
| 1870 | sdata->vif.type == NL80211_IFTYPE_MONITOR || | ||
| 1871 | !ieee80211_sdata_running(sdata)) | ||
| 1872 | continue; | ||
| 1873 | |||
| 1874 | for (i = 0; i < IEEE80211_NUM_ACS; i++) | ||
| 1875 | drv_conf_tx(local, sdata, i, | ||
| 1876 | &sdata->tx_conf[i]); | ||
| 1877 | } | ||
| 1878 | } | ||
| 1879 | |||
| 1880 | /* reconfigure hardware */ | 1887 | /* reconfigure hardware */ |
| 1881 | ieee80211_hw_config(local, ~0); | 1888 | ieee80211_hw_config(local, ~0); |
| 1882 | 1889 | ||
| @@ -1889,6 +1896,22 @@ int ieee80211_reconfig(struct ieee80211_local *local) | |||
| 1889 | if (!ieee80211_sdata_running(sdata)) | 1896 | if (!ieee80211_sdata_running(sdata)) |
| 1890 | continue; | 1897 | continue; |
| 1891 | 1898 | ||
| 1899 | ieee80211_assign_chanctx(local, sdata); | ||
| 1900 | |||
| 1901 | switch (sdata->vif.type) { | ||
| 1902 | case NL80211_IFTYPE_AP_VLAN: | ||
| 1903 | case NL80211_IFTYPE_MONITOR: | ||
| 1904 | break; | ||
| 1905 | default: | ||
| 1906 | ieee80211_reconfig_stations(sdata); | ||
| 1907 | /* fall through */ | ||
| 1908 | case NL80211_IFTYPE_AP: /* AP stations are handled later */ | ||
| 1909 | for (i = 0; i < IEEE80211_NUM_ACS; i++) | ||
| 1910 | drv_conf_tx(local, sdata, i, | ||
| 1911 | &sdata->tx_conf[i]); | ||
| 1912 | break; | ||
| 1913 | } | ||
| 1914 | |||
| 1892 | /* common change flags for all interface types */ | 1915 | /* common change flags for all interface types */ |
| 1893 | changed = BSS_CHANGED_ERP_CTS_PROT | | 1916 | changed = BSS_CHANGED_ERP_CTS_PROT | |
| 1894 | BSS_CHANGED_ERP_PREAMBLE | | 1917 | BSS_CHANGED_ERP_PREAMBLE | |
| @@ -2074,17 +2097,7 @@ int ieee80211_reconfig(struct ieee80211_local *local) | |||
| 2074 | mb(); | 2097 | mb(); |
| 2075 | local->resuming = false; | 2098 | local->resuming = false; |
| 2076 | 2099 | ||
| 2077 | /* It's possible that we don't handle the scan completion in | 2100 | ieee80211_flush_completed_scan(local, false); |
| 2078 | * time during suspend, so if it's still marked as completed | ||
| 2079 | * here, queue the work and flush it to clean things up. | ||
| 2080 | * Instead of calling the worker function directly here, we | ||
| 2081 | * really queue it to avoid potential races with other flows | ||
| 2082 | * scheduling the same work. | ||
| 2083 | */ | ||
| 2084 | if (test_bit(SCAN_COMPLETED, &local->scanning)) { | ||
| 2085 | ieee80211_queue_delayed_work(&local->hw, &local->scan_work, 0); | ||
| 2086 | flush_delayed_work(&local->scan_work); | ||
| 2087 | } | ||
| 2088 | 2101 | ||
| 2089 | if (local->open_count && !reconfig_due_to_wowlan) | 2102 | if (local->open_count && !reconfig_due_to_wowlan) |
| 2090 | drv_reconfig_complete(local, IEEE80211_RECONFIG_TYPE_SUSPEND); | 2103 | drv_reconfig_complete(local, IEEE80211_RECONFIG_TYPE_SUSPEND); |
diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c index ff1c798921a6..c38b2f07a919 100644 --- a/net/mac80211/vht.c +++ b/net/mac80211/vht.c | |||
| @@ -378,7 +378,7 @@ void ieee80211_sta_set_rx_nss(struct sta_info *sta) | |||
| 378 | 378 | ||
| 379 | u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata, | 379 | u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata, |
| 380 | struct sta_info *sta, u8 opmode, | 380 | struct sta_info *sta, u8 opmode, |
| 381 | enum ieee80211_band band, bool nss_only) | 381 | enum ieee80211_band band) |
| 382 | { | 382 | { |
| 383 | struct ieee80211_local *local = sdata->local; | 383 | struct ieee80211_local *local = sdata->local; |
| 384 | struct ieee80211_supported_band *sband; | 384 | struct ieee80211_supported_band *sband; |
| @@ -401,9 +401,6 @@ u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata, | |||
| 401 | changed |= IEEE80211_RC_NSS_CHANGED; | 401 | changed |= IEEE80211_RC_NSS_CHANGED; |
| 402 | } | 402 | } |
| 403 | 403 | ||
| 404 | if (nss_only) | ||
| 405 | return changed; | ||
| 406 | |||
| 407 | switch (opmode & IEEE80211_OPMODE_NOTIF_CHANWIDTH_MASK) { | 404 | switch (opmode & IEEE80211_OPMODE_NOTIF_CHANWIDTH_MASK) { |
| 408 | case IEEE80211_OPMODE_NOTIF_CHANWIDTH_20MHZ: | 405 | case IEEE80211_OPMODE_NOTIF_CHANWIDTH_20MHZ: |
| 409 | sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_20; | 406 | sta->cur_max_bandwidth = IEEE80211_STA_RX_BW_20; |
| @@ -430,13 +427,12 @@ u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata, | |||
| 430 | 427 | ||
| 431 | void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata, | 428 | void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata, |
| 432 | struct sta_info *sta, u8 opmode, | 429 | struct sta_info *sta, u8 opmode, |
| 433 | enum ieee80211_band band, bool nss_only) | 430 | enum ieee80211_band band) |
| 434 | { | 431 | { |
| 435 | struct ieee80211_local *local = sdata->local; | 432 | struct ieee80211_local *local = sdata->local; |
| 436 | struct ieee80211_supported_band *sband = local->hw.wiphy->bands[band]; | 433 | struct ieee80211_supported_band *sband = local->hw.wiphy->bands[band]; |
| 437 | 434 | ||
| 438 | u32 changed = __ieee80211_vht_handle_opmode(sdata, sta, opmode, | 435 | u32 changed = __ieee80211_vht_handle_opmode(sdata, sta, opmode, band); |
| 439 | band, nss_only); | ||
| 440 | 436 | ||
| 441 | if (changed > 0) | 437 | if (changed > 0) |
| 442 | rate_control_rate_update(local, sband, sta, changed); | 438 | rate_control_rate_update(local, sband, sta, changed); |
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c index c70d750148b6..c32fc411a911 100644 --- a/net/mpls/af_mpls.c +++ b/net/mpls/af_mpls.c | |||
| @@ -27,6 +27,8 @@ | |||
| 27 | */ | 27 | */ |
| 28 | #define MAX_MP_SELECT_LABELS 4 | 28 | #define MAX_MP_SELECT_LABELS 4 |
| 29 | 29 | ||
| 30 | #define MPLS_NEIGH_TABLE_UNSPEC (NEIGH_LINK_TABLE + 1) | ||
| 31 | |||
| 30 | static int zero = 0; | 32 | static int zero = 0; |
| 31 | static int label_limit = (1 << 20) - 1; | 33 | static int label_limit = (1 << 20) - 1; |
| 32 | 34 | ||
| @@ -317,7 +319,13 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev, | |||
| 317 | } | 319 | } |
| 318 | } | 320 | } |
| 319 | 321 | ||
| 320 | err = neigh_xmit(nh->nh_via_table, out_dev, mpls_nh_via(rt, nh), skb); | 322 | /* If via wasn't specified then send out using device address */ |
| 323 | if (nh->nh_via_table == MPLS_NEIGH_TABLE_UNSPEC) | ||
| 324 | err = neigh_xmit(NEIGH_LINK_TABLE, out_dev, | ||
| 325 | out_dev->dev_addr, skb); | ||
| 326 | else | ||
| 327 | err = neigh_xmit(nh->nh_via_table, out_dev, | ||
| 328 | mpls_nh_via(rt, nh), skb); | ||
| 321 | if (err) | 329 | if (err) |
| 322 | net_dbg_ratelimited("%s: packet transmission failed: %d\n", | 330 | net_dbg_ratelimited("%s: packet transmission failed: %d\n", |
| 323 | __func__, err); | 331 | __func__, err); |
| @@ -534,6 +542,10 @@ static int mpls_nh_assign_dev(struct net *net, struct mpls_route *rt, | |||
| 534 | if (!mpls_dev_get(dev)) | 542 | if (!mpls_dev_get(dev)) |
| 535 | goto errout; | 543 | goto errout; |
| 536 | 544 | ||
| 545 | if ((nh->nh_via_table == NEIGH_LINK_TABLE) && | ||
| 546 | (dev->addr_len != nh->nh_via_alen)) | ||
| 547 | goto errout; | ||
| 548 | |||
| 537 | RCU_INIT_POINTER(nh->nh_dev, dev); | 549 | RCU_INIT_POINTER(nh->nh_dev, dev); |
| 538 | 550 | ||
| 539 | return 0; | 551 | return 0; |
| @@ -592,10 +604,14 @@ static int mpls_nh_build(struct net *net, struct mpls_route *rt, | |||
| 592 | goto errout; | 604 | goto errout; |
| 593 | } | 605 | } |
| 594 | 606 | ||
| 595 | err = nla_get_via(via, &nh->nh_via_alen, &nh->nh_via_table, | 607 | if (via) { |
| 596 | __mpls_nh_via(rt, nh)); | 608 | err = nla_get_via(via, &nh->nh_via_alen, &nh->nh_via_table, |
| 597 | if (err) | 609 | __mpls_nh_via(rt, nh)); |
| 598 | goto errout; | 610 | if (err) |
| 611 | goto errout; | ||
| 612 | } else { | ||
| 613 | nh->nh_via_table = MPLS_NEIGH_TABLE_UNSPEC; | ||
| 614 | } | ||
| 599 | 615 | ||
| 600 | err = mpls_nh_assign_dev(net, rt, nh, oif); | 616 | err = mpls_nh_assign_dev(net, rt, nh, oif); |
| 601 | if (err) | 617 | if (err) |
| @@ -677,9 +693,6 @@ static int mpls_nh_build_multi(struct mpls_route_config *cfg, | |||
| 677 | nla_newdst = nla_find(attrs, attrlen, RTA_NEWDST); | 693 | nla_newdst = nla_find(attrs, attrlen, RTA_NEWDST); |
| 678 | } | 694 | } |
| 679 | 695 | ||
| 680 | if (!nla_via) | ||
| 681 | goto errout; | ||
| 682 | |||
| 683 | err = mpls_nh_build(cfg->rc_nlinfo.nl_net, rt, nh, | 696 | err = mpls_nh_build(cfg->rc_nlinfo.nl_net, rt, nh, |
| 684 | rtnh->rtnh_ifindex, nla_via, | 697 | rtnh->rtnh_ifindex, nla_via, |
| 685 | nla_newdst); | 698 | nla_newdst); |
| @@ -1118,6 +1131,7 @@ static int rtm_to_route_config(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
| 1118 | 1131 | ||
| 1119 | cfg->rc_label = LABEL_NOT_SPECIFIED; | 1132 | cfg->rc_label = LABEL_NOT_SPECIFIED; |
| 1120 | cfg->rc_protocol = rtm->rtm_protocol; | 1133 | cfg->rc_protocol = rtm->rtm_protocol; |
| 1134 | cfg->rc_via_table = MPLS_NEIGH_TABLE_UNSPEC; | ||
| 1121 | cfg->rc_nlflags = nlh->nlmsg_flags; | 1135 | cfg->rc_nlflags = nlh->nlmsg_flags; |
| 1122 | cfg->rc_nlinfo.portid = NETLINK_CB(skb).portid; | 1136 | cfg->rc_nlinfo.portid = NETLINK_CB(skb).portid; |
| 1123 | cfg->rc_nlinfo.nlh = nlh; | 1137 | cfg->rc_nlinfo.nlh = nlh; |
| @@ -1231,7 +1245,8 @@ static int mpls_dump_route(struct sk_buff *skb, u32 portid, u32 seq, int event, | |||
| 1231 | nla_put_labels(skb, RTA_NEWDST, nh->nh_labels, | 1245 | nla_put_labels(skb, RTA_NEWDST, nh->nh_labels, |
| 1232 | nh->nh_label)) | 1246 | nh->nh_label)) |
| 1233 | goto nla_put_failure; | 1247 | goto nla_put_failure; |
| 1234 | if (nla_put_via(skb, nh->nh_via_table, mpls_nh_via(rt, nh), | 1248 | if (nh->nh_via_table != MPLS_NEIGH_TABLE_UNSPEC && |
| 1249 | nla_put_via(skb, nh->nh_via_table, mpls_nh_via(rt, nh), | ||
| 1235 | nh->nh_via_alen)) | 1250 | nh->nh_via_alen)) |
| 1236 | goto nla_put_failure; | 1251 | goto nla_put_failure; |
| 1237 | dev = rtnl_dereference(nh->nh_dev); | 1252 | dev = rtnl_dereference(nh->nh_dev); |
| @@ -1257,7 +1272,8 @@ static int mpls_dump_route(struct sk_buff *skb, u32 portid, u32 seq, int event, | |||
| 1257 | nh->nh_labels, | 1272 | nh->nh_labels, |
| 1258 | nh->nh_label)) | 1273 | nh->nh_label)) |
| 1259 | goto nla_put_failure; | 1274 | goto nla_put_failure; |
| 1260 | if (nla_put_via(skb, nh->nh_via_table, | 1275 | if (nh->nh_via_table != MPLS_NEIGH_TABLE_UNSPEC && |
| 1276 | nla_put_via(skb, nh->nh_via_table, | ||
| 1261 | mpls_nh_via(rt, nh), | 1277 | mpls_nh_via(rt, nh), |
| 1262 | nh->nh_via_alen)) | 1278 | nh->nh_via_alen)) |
| 1263 | goto nla_put_failure; | 1279 | goto nla_put_failure; |
| @@ -1319,7 +1335,8 @@ static inline size_t lfib_nlmsg_size(struct mpls_route *rt) | |||
| 1319 | 1335 | ||
| 1320 | if (nh->nh_dev) | 1336 | if (nh->nh_dev) |
| 1321 | payload += nla_total_size(4); /* RTA_OIF */ | 1337 | payload += nla_total_size(4); /* RTA_OIF */ |
| 1322 | payload += nla_total_size(2 + nh->nh_via_alen); /* RTA_VIA */ | 1338 | if (nh->nh_via_table != MPLS_NEIGH_TABLE_UNSPEC) /* RTA_VIA */ |
| 1339 | payload += nla_total_size(2 + nh->nh_via_alen); | ||
| 1323 | if (nh->nh_labels) /* RTA_NEWDST */ | 1340 | if (nh->nh_labels) /* RTA_NEWDST */ |
| 1324 | payload += nla_total_size(nh->nh_labels * 4); | 1341 | payload += nla_total_size(nh->nh_labels * 4); |
| 1325 | } else { | 1342 | } else { |
| @@ -1328,7 +1345,9 @@ static inline size_t lfib_nlmsg_size(struct mpls_route *rt) | |||
| 1328 | 1345 | ||
| 1329 | for_nexthops(rt) { | 1346 | for_nexthops(rt) { |
| 1330 | nhsize += nla_total_size(sizeof(struct rtnexthop)); | 1347 | nhsize += nla_total_size(sizeof(struct rtnexthop)); |
| 1331 | nhsize += nla_total_size(2 + nh->nh_via_alen); | 1348 | /* RTA_VIA */ |
| 1349 | if (nh->nh_via_table != MPLS_NEIGH_TABLE_UNSPEC) | ||
| 1350 | nhsize += nla_total_size(2 + nh->nh_via_alen); | ||
| 1332 | if (nh->nh_labels) | 1351 | if (nh->nh_labels) |
| 1333 | nhsize += nla_total_size(nh->nh_labels * 4); | 1352 | nhsize += nla_total_size(nh->nh_labels * 4); |
| 1334 | } endfor_nexthops(rt); | 1353 | } endfor_nexthops(rt); |
diff --git a/net/mpls/mpls_iptunnel.c b/net/mpls/mpls_iptunnel.c index 67591aef9cae..64afd3d0b144 100644 --- a/net/mpls/mpls_iptunnel.c +++ b/net/mpls/mpls_iptunnel.c | |||
| @@ -54,10 +54,10 @@ int mpls_output(struct net *net, struct sock *sk, struct sk_buff *skb) | |||
| 54 | unsigned int ttl; | 54 | unsigned int ttl; |
| 55 | 55 | ||
| 56 | /* Obtain the ttl */ | 56 | /* Obtain the ttl */ |
| 57 | if (skb->protocol == htons(ETH_P_IP)) { | 57 | if (dst->ops->family == AF_INET) { |
| 58 | ttl = ip_hdr(skb)->ttl; | 58 | ttl = ip_hdr(skb)->ttl; |
| 59 | rt = (struct rtable *)dst; | 59 | rt = (struct rtable *)dst; |
| 60 | } else if (skb->protocol == htons(ETH_P_IPV6)) { | 60 | } else if (dst->ops->family == AF_INET6) { |
| 61 | ttl = ipv6_hdr(skb)->hop_limit; | 61 | ttl = ipv6_hdr(skb)->hop_limit; |
| 62 | rt6 = (struct rt6_info *)dst; | 62 | rt6 = (struct rt6_info *)dst; |
| 63 | } else { | 63 | } else { |
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 93cc4737018f..2cb429d34c03 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c | |||
| @@ -89,6 +89,7 @@ nf_tables_afinfo_lookup(struct net *net, int family, bool autoload) | |||
| 89 | } | 89 | } |
| 90 | 90 | ||
| 91 | static void nft_ctx_init(struct nft_ctx *ctx, | 91 | static void nft_ctx_init(struct nft_ctx *ctx, |
| 92 | struct net *net, | ||
| 92 | const struct sk_buff *skb, | 93 | const struct sk_buff *skb, |
| 93 | const struct nlmsghdr *nlh, | 94 | const struct nlmsghdr *nlh, |
| 94 | struct nft_af_info *afi, | 95 | struct nft_af_info *afi, |
| @@ -96,7 +97,7 @@ static void nft_ctx_init(struct nft_ctx *ctx, | |||
| 96 | struct nft_chain *chain, | 97 | struct nft_chain *chain, |
| 97 | const struct nlattr * const *nla) | 98 | const struct nlattr * const *nla) |
| 98 | { | 99 | { |
| 99 | ctx->net = sock_net(skb->sk); | 100 | ctx->net = net; |
| 100 | ctx->afi = afi; | 101 | ctx->afi = afi; |
| 101 | ctx->table = table; | 102 | ctx->table = table; |
| 102 | ctx->chain = chain; | 103 | ctx->chain = chain; |
| @@ -672,15 +673,14 @@ err: | |||
| 672 | return ret; | 673 | return ret; |
| 673 | } | 674 | } |
| 674 | 675 | ||
| 675 | static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb, | 676 | static int nf_tables_newtable(struct net *net, struct sock *nlsk, |
| 676 | const struct nlmsghdr *nlh, | 677 | struct sk_buff *skb, const struct nlmsghdr *nlh, |
| 677 | const struct nlattr * const nla[]) | 678 | const struct nlattr * const nla[]) |
| 678 | { | 679 | { |
| 679 | const struct nfgenmsg *nfmsg = nlmsg_data(nlh); | 680 | const struct nfgenmsg *nfmsg = nlmsg_data(nlh); |
| 680 | const struct nlattr *name; | 681 | const struct nlattr *name; |
| 681 | struct nft_af_info *afi; | 682 | struct nft_af_info *afi; |
| 682 | struct nft_table *table; | 683 | struct nft_table *table; |
| 683 | struct net *net = sock_net(skb->sk); | ||
| 684 | int family = nfmsg->nfgen_family; | 684 | int family = nfmsg->nfgen_family; |
| 685 | u32 flags = 0; | 685 | u32 flags = 0; |
| 686 | struct nft_ctx ctx; | 686 | struct nft_ctx ctx; |
| @@ -706,7 +706,7 @@ static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb, | |||
| 706 | if (nlh->nlmsg_flags & NLM_F_REPLACE) | 706 | if (nlh->nlmsg_flags & NLM_F_REPLACE) |
| 707 | return -EOPNOTSUPP; | 707 | return -EOPNOTSUPP; |
| 708 | 708 | ||
| 709 | nft_ctx_init(&ctx, skb, nlh, afi, table, NULL, nla); | 709 | nft_ctx_init(&ctx, net, skb, nlh, afi, table, NULL, nla); |
| 710 | return nf_tables_updtable(&ctx); | 710 | return nf_tables_updtable(&ctx); |
| 711 | } | 711 | } |
| 712 | 712 | ||
| @@ -730,7 +730,7 @@ static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb, | |||
| 730 | INIT_LIST_HEAD(&table->sets); | 730 | INIT_LIST_HEAD(&table->sets); |
| 731 | table->flags = flags; | 731 | table->flags = flags; |
| 732 | 732 | ||
| 733 | nft_ctx_init(&ctx, skb, nlh, afi, table, NULL, nla); | 733 | nft_ctx_init(&ctx, net, skb, nlh, afi, table, NULL, nla); |
| 734 | err = nft_trans_table_add(&ctx, NFT_MSG_NEWTABLE); | 734 | err = nft_trans_table_add(&ctx, NFT_MSG_NEWTABLE); |
| 735 | if (err < 0) | 735 | if (err < 0) |
| 736 | goto err3; | 736 | goto err3; |
| @@ -810,18 +810,17 @@ out: | |||
| 810 | return err; | 810 | return err; |
| 811 | } | 811 | } |
| 812 | 812 | ||
| 813 | static int nf_tables_deltable(struct sock *nlsk, struct sk_buff *skb, | 813 | static int nf_tables_deltable(struct net *net, struct sock *nlsk, |
| 814 | const struct nlmsghdr *nlh, | 814 | struct sk_buff *skb, const struct nlmsghdr *nlh, |
| 815 | const struct nlattr * const nla[]) | 815 | const struct nlattr * const nla[]) |
| 816 | { | 816 | { |
| 817 | const struct nfgenmsg *nfmsg = nlmsg_data(nlh); | 817 | const struct nfgenmsg *nfmsg = nlmsg_data(nlh); |
| 818 | struct nft_af_info *afi; | 818 | struct nft_af_info *afi; |
| 819 | struct nft_table *table; | 819 | struct nft_table *table; |
| 820 | struct net *net = sock_net(skb->sk); | ||
| 821 | int family = nfmsg->nfgen_family; | 820 | int family = nfmsg->nfgen_family; |
| 822 | struct nft_ctx ctx; | 821 | struct nft_ctx ctx; |
| 823 | 822 | ||
| 824 | nft_ctx_init(&ctx, skb, nlh, NULL, NULL, NULL, nla); | 823 | nft_ctx_init(&ctx, net, skb, nlh, NULL, NULL, NULL, nla); |
| 825 | if (family == AF_UNSPEC || nla[NFTA_TABLE_NAME] == NULL) | 824 | if (family == AF_UNSPEC || nla[NFTA_TABLE_NAME] == NULL) |
| 826 | return nft_flush(&ctx, family); | 825 | return nft_flush(&ctx, family); |
| 827 | 826 | ||
| @@ -1221,8 +1220,8 @@ static void nf_tables_chain_destroy(struct nft_chain *chain) | |||
| 1221 | } | 1220 | } |
| 1222 | } | 1221 | } |
| 1223 | 1222 | ||
| 1224 | static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb, | 1223 | static int nf_tables_newchain(struct net *net, struct sock *nlsk, |
| 1225 | const struct nlmsghdr *nlh, | 1224 | struct sk_buff *skb, const struct nlmsghdr *nlh, |
| 1226 | const struct nlattr * const nla[]) | 1225 | const struct nlattr * const nla[]) |
| 1227 | { | 1226 | { |
| 1228 | const struct nfgenmsg *nfmsg = nlmsg_data(nlh); | 1227 | const struct nfgenmsg *nfmsg = nlmsg_data(nlh); |
| @@ -1232,7 +1231,6 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb, | |||
| 1232 | struct nft_chain *chain; | 1231 | struct nft_chain *chain; |
| 1233 | struct nft_base_chain *basechain = NULL; | 1232 | struct nft_base_chain *basechain = NULL; |
| 1234 | struct nlattr *ha[NFTA_HOOK_MAX + 1]; | 1233 | struct nlattr *ha[NFTA_HOOK_MAX + 1]; |
| 1235 | struct net *net = sock_net(skb->sk); | ||
| 1236 | int family = nfmsg->nfgen_family; | 1234 | int family = nfmsg->nfgen_family; |
| 1237 | struct net_device *dev = NULL; | 1235 | struct net_device *dev = NULL; |
| 1238 | u8 policy = NF_ACCEPT; | 1236 | u8 policy = NF_ACCEPT; |
| @@ -1313,7 +1311,7 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb, | |||
| 1313 | return PTR_ERR(stats); | 1311 | return PTR_ERR(stats); |
| 1314 | } | 1312 | } |
| 1315 | 1313 | ||
| 1316 | nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla); | 1314 | nft_ctx_init(&ctx, net, skb, nlh, afi, table, chain, nla); |
| 1317 | trans = nft_trans_alloc(&ctx, NFT_MSG_NEWCHAIN, | 1315 | trans = nft_trans_alloc(&ctx, NFT_MSG_NEWCHAIN, |
| 1318 | sizeof(struct nft_trans_chain)); | 1316 | sizeof(struct nft_trans_chain)); |
| 1319 | if (trans == NULL) { | 1317 | if (trans == NULL) { |
| @@ -1461,7 +1459,7 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb, | |||
| 1461 | if (err < 0) | 1459 | if (err < 0) |
| 1462 | goto err1; | 1460 | goto err1; |
| 1463 | 1461 | ||
| 1464 | nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla); | 1462 | nft_ctx_init(&ctx, net, skb, nlh, afi, table, chain, nla); |
| 1465 | err = nft_trans_chain_add(&ctx, NFT_MSG_NEWCHAIN); | 1463 | err = nft_trans_chain_add(&ctx, NFT_MSG_NEWCHAIN); |
| 1466 | if (err < 0) | 1464 | if (err < 0) |
| 1467 | goto err2; | 1465 | goto err2; |
| @@ -1476,15 +1474,14 @@ err1: | |||
| 1476 | return err; | 1474 | return err; |
| 1477 | } | 1475 | } |
| 1478 | 1476 | ||
| 1479 | static int nf_tables_delchain(struct sock *nlsk, struct sk_buff *skb, | 1477 | static int nf_tables_delchain(struct net *net, struct sock *nlsk, |
| 1480 | const struct nlmsghdr *nlh, | 1478 | struct sk_buff *skb, const struct nlmsghdr *nlh, |
| 1481 | const struct nlattr * const nla[]) | 1479 | const struct nlattr * const nla[]) |
| 1482 | { | 1480 | { |
| 1483 | const struct nfgenmsg *nfmsg = nlmsg_data(nlh); | 1481 | const struct nfgenmsg *nfmsg = nlmsg_data(nlh); |
| 1484 | struct nft_af_info *afi; | 1482 | struct nft_af_info *afi; |
| 1485 | struct nft_table *table; | 1483 | struct nft_table *table; |
| 1486 | struct nft_chain *chain; | 1484 | struct nft_chain *chain; |
| 1487 | struct net *net = sock_net(skb->sk); | ||
| 1488 | int family = nfmsg->nfgen_family; | 1485 | int family = nfmsg->nfgen_family; |
| 1489 | struct nft_ctx ctx; | 1486 | struct nft_ctx ctx; |
| 1490 | 1487 | ||
| @@ -1506,7 +1503,7 @@ static int nf_tables_delchain(struct sock *nlsk, struct sk_buff *skb, | |||
| 1506 | if (chain->use > 0) | 1503 | if (chain->use > 0) |
| 1507 | return -EBUSY; | 1504 | return -EBUSY; |
| 1508 | 1505 | ||
| 1509 | nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla); | 1506 | nft_ctx_init(&ctx, net, skb, nlh, afi, table, chain, nla); |
| 1510 | 1507 | ||
| 1511 | return nft_delchain(&ctx); | 1508 | return nft_delchain(&ctx); |
| 1512 | } | 1509 | } |
| @@ -2010,13 +2007,12 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx, | |||
| 2010 | 2007 | ||
| 2011 | static struct nft_expr_info *info; | 2008 | static struct nft_expr_info *info; |
| 2012 | 2009 | ||
| 2013 | static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb, | 2010 | static int nf_tables_newrule(struct net *net, struct sock *nlsk, |
| 2014 | const struct nlmsghdr *nlh, | 2011 | struct sk_buff *skb, const struct nlmsghdr *nlh, |
| 2015 | const struct nlattr * const nla[]) | 2012 | const struct nlattr * const nla[]) |
| 2016 | { | 2013 | { |
| 2017 | const struct nfgenmsg *nfmsg = nlmsg_data(nlh); | 2014 | const struct nfgenmsg *nfmsg = nlmsg_data(nlh); |
| 2018 | struct nft_af_info *afi; | 2015 | struct nft_af_info *afi; |
| 2019 | struct net *net = sock_net(skb->sk); | ||
| 2020 | struct nft_table *table; | 2016 | struct nft_table *table; |
| 2021 | struct nft_chain *chain; | 2017 | struct nft_chain *chain; |
| 2022 | struct nft_rule *rule, *old_rule = NULL; | 2018 | struct nft_rule *rule, *old_rule = NULL; |
| @@ -2075,7 +2071,7 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb, | |||
| 2075 | return PTR_ERR(old_rule); | 2071 | return PTR_ERR(old_rule); |
| 2076 | } | 2072 | } |
| 2077 | 2073 | ||
| 2078 | nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla); | 2074 | nft_ctx_init(&ctx, net, skb, nlh, afi, table, chain, nla); |
| 2079 | 2075 | ||
| 2080 | n = 0; | 2076 | n = 0; |
| 2081 | size = 0; | 2077 | size = 0; |
| @@ -2176,13 +2172,12 @@ err1: | |||
| 2176 | return err; | 2172 | return err; |
| 2177 | } | 2173 | } |
| 2178 | 2174 | ||
| 2179 | static int nf_tables_delrule(struct sock *nlsk, struct sk_buff *skb, | 2175 | static int nf_tables_delrule(struct net *net, struct sock *nlsk, |
| 2180 | const struct nlmsghdr *nlh, | 2176 | struct sk_buff *skb, const struct nlmsghdr *nlh, |
| 2181 | const struct nlattr * const nla[]) | 2177 | const struct nlattr * const nla[]) |
| 2182 | { | 2178 | { |
| 2183 | const struct nfgenmsg *nfmsg = nlmsg_data(nlh); | 2179 | const struct nfgenmsg *nfmsg = nlmsg_data(nlh); |
| 2184 | struct nft_af_info *afi; | 2180 | struct nft_af_info *afi; |
| 2185 | struct net *net = sock_net(skb->sk); | ||
| 2186 | struct nft_table *table; | 2181 | struct nft_table *table; |
| 2187 | struct nft_chain *chain = NULL; | 2182 | struct nft_chain *chain = NULL; |
| 2188 | struct nft_rule *rule; | 2183 | struct nft_rule *rule; |
| @@ -2205,7 +2200,7 @@ static int nf_tables_delrule(struct sock *nlsk, struct sk_buff *skb, | |||
| 2205 | return PTR_ERR(chain); | 2200 | return PTR_ERR(chain); |
| 2206 | } | 2201 | } |
| 2207 | 2202 | ||
| 2208 | nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla); | 2203 | nft_ctx_init(&ctx, net, skb, nlh, afi, table, chain, nla); |
| 2209 | 2204 | ||
| 2210 | if (chain) { | 2205 | if (chain) { |
| 2211 | if (nla[NFTA_RULE_HANDLE]) { | 2206 | if (nla[NFTA_RULE_HANDLE]) { |
| @@ -2344,12 +2339,11 @@ static const struct nla_policy nft_set_desc_policy[NFTA_SET_DESC_MAX + 1] = { | |||
| 2344 | [NFTA_SET_DESC_SIZE] = { .type = NLA_U32 }, | 2339 | [NFTA_SET_DESC_SIZE] = { .type = NLA_U32 }, |
| 2345 | }; | 2340 | }; |
| 2346 | 2341 | ||
| 2347 | static int nft_ctx_init_from_setattr(struct nft_ctx *ctx, | 2342 | static int nft_ctx_init_from_setattr(struct nft_ctx *ctx, struct net *net, |
| 2348 | const struct sk_buff *skb, | 2343 | const struct sk_buff *skb, |
| 2349 | const struct nlmsghdr *nlh, | 2344 | const struct nlmsghdr *nlh, |
| 2350 | const struct nlattr * const nla[]) | 2345 | const struct nlattr * const nla[]) |
| 2351 | { | 2346 | { |
| 2352 | struct net *net = sock_net(skb->sk); | ||
| 2353 | const struct nfgenmsg *nfmsg = nlmsg_data(nlh); | 2347 | const struct nfgenmsg *nfmsg = nlmsg_data(nlh); |
| 2354 | struct nft_af_info *afi = NULL; | 2348 | struct nft_af_info *afi = NULL; |
| 2355 | struct nft_table *table = NULL; | 2349 | struct nft_table *table = NULL; |
| @@ -2371,7 +2365,7 @@ static int nft_ctx_init_from_setattr(struct nft_ctx *ctx, | |||
| 2371 | return -ENOENT; | 2365 | return -ENOENT; |
| 2372 | } | 2366 | } |
| 2373 | 2367 | ||
| 2374 | nft_ctx_init(ctx, skb, nlh, afi, table, NULL, nla); | 2368 | nft_ctx_init(ctx, net, skb, nlh, afi, table, NULL, nla); |
| 2375 | return 0; | 2369 | return 0; |
| 2376 | } | 2370 | } |
| 2377 | 2371 | ||
| @@ -2623,6 +2617,7 @@ static int nf_tables_getset(struct sock *nlsk, struct sk_buff *skb, | |||
| 2623 | const struct nlmsghdr *nlh, | 2617 | const struct nlmsghdr *nlh, |
| 2624 | const struct nlattr * const nla[]) | 2618 | const struct nlattr * const nla[]) |
| 2625 | { | 2619 | { |
| 2620 | struct net *net = sock_net(skb->sk); | ||
| 2626 | const struct nft_set *set; | 2621 | const struct nft_set *set; |
| 2627 | struct nft_ctx ctx; | 2622 | struct nft_ctx ctx; |
| 2628 | struct sk_buff *skb2; | 2623 | struct sk_buff *skb2; |
| @@ -2630,7 +2625,7 @@ static int nf_tables_getset(struct sock *nlsk, struct sk_buff *skb, | |||
| 2630 | int err; | 2625 | int err; |
| 2631 | 2626 | ||
| 2632 | /* Verify existence before starting dump */ | 2627 | /* Verify existence before starting dump */ |
| 2633 | err = nft_ctx_init_from_setattr(&ctx, skb, nlh, nla); | 2628 | err = nft_ctx_init_from_setattr(&ctx, net, skb, nlh, nla); |
| 2634 | if (err < 0) | 2629 | if (err < 0) |
| 2635 | return err; | 2630 | return err; |
| 2636 | 2631 | ||
| @@ -2693,14 +2688,13 @@ static int nf_tables_set_desc_parse(const struct nft_ctx *ctx, | |||
| 2693 | return 0; | 2688 | return 0; |
| 2694 | } | 2689 | } |
| 2695 | 2690 | ||
| 2696 | static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb, | 2691 | static int nf_tables_newset(struct net *net, struct sock *nlsk, |
| 2697 | const struct nlmsghdr *nlh, | 2692 | struct sk_buff *skb, const struct nlmsghdr *nlh, |
| 2698 | const struct nlattr * const nla[]) | 2693 | const struct nlattr * const nla[]) |
| 2699 | { | 2694 | { |
| 2700 | const struct nfgenmsg *nfmsg = nlmsg_data(nlh); | 2695 | const struct nfgenmsg *nfmsg = nlmsg_data(nlh); |
| 2701 | const struct nft_set_ops *ops; | 2696 | const struct nft_set_ops *ops; |
| 2702 | struct nft_af_info *afi; | 2697 | struct nft_af_info *afi; |
| 2703 | struct net *net = sock_net(skb->sk); | ||
| 2704 | struct nft_table *table; | 2698 | struct nft_table *table; |
| 2705 | struct nft_set *set; | 2699 | struct nft_set *set; |
| 2706 | struct nft_ctx ctx; | 2700 | struct nft_ctx ctx; |
| @@ -2798,7 +2792,7 @@ static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb, | |||
| 2798 | if (IS_ERR(table)) | 2792 | if (IS_ERR(table)) |
| 2799 | return PTR_ERR(table); | 2793 | return PTR_ERR(table); |
| 2800 | 2794 | ||
| 2801 | nft_ctx_init(&ctx, skb, nlh, afi, table, NULL, nla); | 2795 | nft_ctx_init(&ctx, net, skb, nlh, afi, table, NULL, nla); |
| 2802 | 2796 | ||
| 2803 | set = nf_tables_set_lookup(table, nla[NFTA_SET_NAME]); | 2797 | set = nf_tables_set_lookup(table, nla[NFTA_SET_NAME]); |
| 2804 | if (IS_ERR(set)) { | 2798 | if (IS_ERR(set)) { |
| @@ -2882,8 +2876,8 @@ static void nf_tables_set_destroy(const struct nft_ctx *ctx, struct nft_set *set | |||
| 2882 | nft_set_destroy(set); | 2876 | nft_set_destroy(set); |
| 2883 | } | 2877 | } |
| 2884 | 2878 | ||
| 2885 | static int nf_tables_delset(struct sock *nlsk, struct sk_buff *skb, | 2879 | static int nf_tables_delset(struct net *net, struct sock *nlsk, |
| 2886 | const struct nlmsghdr *nlh, | 2880 | struct sk_buff *skb, const struct nlmsghdr *nlh, |
| 2887 | const struct nlattr * const nla[]) | 2881 | const struct nlattr * const nla[]) |
| 2888 | { | 2882 | { |
| 2889 | const struct nfgenmsg *nfmsg = nlmsg_data(nlh); | 2883 | const struct nfgenmsg *nfmsg = nlmsg_data(nlh); |
| @@ -2896,7 +2890,7 @@ static int nf_tables_delset(struct sock *nlsk, struct sk_buff *skb, | |||
| 2896 | if (nla[NFTA_SET_TABLE] == NULL) | 2890 | if (nla[NFTA_SET_TABLE] == NULL) |
| 2897 | return -EINVAL; | 2891 | return -EINVAL; |
| 2898 | 2892 | ||
| 2899 | err = nft_ctx_init_from_setattr(&ctx, skb, nlh, nla); | 2893 | err = nft_ctx_init_from_setattr(&ctx, net, skb, nlh, nla); |
| 2900 | if (err < 0) | 2894 | if (err < 0) |
| 2901 | return err; | 2895 | return err; |
| 2902 | 2896 | ||
| @@ -3024,7 +3018,7 @@ static const struct nla_policy nft_set_elem_list_policy[NFTA_SET_ELEM_LIST_MAX + | |||
| 3024 | [NFTA_SET_ELEM_LIST_SET_ID] = { .type = NLA_U32 }, | 3018 | [NFTA_SET_ELEM_LIST_SET_ID] = { .type = NLA_U32 }, |
| 3025 | }; | 3019 | }; |
| 3026 | 3020 | ||
| 3027 | static int nft_ctx_init_from_elemattr(struct nft_ctx *ctx, | 3021 | static int nft_ctx_init_from_elemattr(struct nft_ctx *ctx, struct net *net, |
| 3028 | const struct sk_buff *skb, | 3022 | const struct sk_buff *skb, |
| 3029 | const struct nlmsghdr *nlh, | 3023 | const struct nlmsghdr *nlh, |
| 3030 | const struct nlattr * const nla[], | 3024 | const struct nlattr * const nla[], |
| @@ -3033,7 +3027,6 @@ static int nft_ctx_init_from_elemattr(struct nft_ctx *ctx, | |||
| 3033 | const struct nfgenmsg *nfmsg = nlmsg_data(nlh); | 3027 | const struct nfgenmsg *nfmsg = nlmsg_data(nlh); |
| 3034 | struct nft_af_info *afi; | 3028 | struct nft_af_info *afi; |
| 3035 | struct nft_table *table; | 3029 | struct nft_table *table; |
| 3036 | struct net *net = sock_net(skb->sk); | ||
| 3037 | 3030 | ||
| 3038 | afi = nf_tables_afinfo_lookup(net, nfmsg->nfgen_family, false); | 3031 | afi = nf_tables_afinfo_lookup(net, nfmsg->nfgen_family, false); |
| 3039 | if (IS_ERR(afi)) | 3032 | if (IS_ERR(afi)) |
| @@ -3045,7 +3038,7 @@ static int nft_ctx_init_from_elemattr(struct nft_ctx *ctx, | |||
| 3045 | if (!trans && (table->flags & NFT_TABLE_INACTIVE)) | 3038 | if (!trans && (table->flags & NFT_TABLE_INACTIVE)) |
| 3046 | return -ENOENT; | 3039 | return -ENOENT; |
| 3047 | 3040 | ||
| 3048 | nft_ctx_init(ctx, skb, nlh, afi, table, NULL, nla); | 3041 | nft_ctx_init(ctx, net, skb, nlh, afi, table, NULL, nla); |
| 3049 | return 0; | 3042 | return 0; |
| 3050 | } | 3043 | } |
| 3051 | 3044 | ||
| @@ -3135,6 +3128,7 @@ static int nf_tables_dump_setelem(const struct nft_ctx *ctx, | |||
| 3135 | 3128 | ||
| 3136 | static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb) | 3129 | static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb) |
| 3137 | { | 3130 | { |
| 3131 | struct net *net = sock_net(skb->sk); | ||
| 3138 | const struct nft_set *set; | 3132 | const struct nft_set *set; |
| 3139 | struct nft_set_dump_args args; | 3133 | struct nft_set_dump_args args; |
| 3140 | struct nft_ctx ctx; | 3134 | struct nft_ctx ctx; |
| @@ -3150,8 +3144,8 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb) | |||
| 3150 | if (err < 0) | 3144 | if (err < 0) |
| 3151 | return err; | 3145 | return err; |
| 3152 | 3146 | ||
| 3153 | err = nft_ctx_init_from_elemattr(&ctx, cb->skb, cb->nlh, (void *)nla, | 3147 | err = nft_ctx_init_from_elemattr(&ctx, net, cb->skb, cb->nlh, |
| 3154 | false); | 3148 | (void *)nla, false); |
| 3155 | if (err < 0) | 3149 | if (err < 0) |
| 3156 | return err; | 3150 | return err; |
| 3157 | 3151 | ||
| @@ -3212,11 +3206,12 @@ static int nf_tables_getsetelem(struct sock *nlsk, struct sk_buff *skb, | |||
| 3212 | const struct nlmsghdr *nlh, | 3206 | const struct nlmsghdr *nlh, |
| 3213 | const struct nlattr * const nla[]) | 3207 | const struct nlattr * const nla[]) |
| 3214 | { | 3208 | { |
| 3209 | struct net *net = sock_net(skb->sk); | ||
| 3215 | const struct nft_set *set; | 3210 | const struct nft_set *set; |
| 3216 | struct nft_ctx ctx; | 3211 | struct nft_ctx ctx; |
| 3217 | int err; | 3212 | int err; |
| 3218 | 3213 | ||
| 3219 | err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla, false); | 3214 | err = nft_ctx_init_from_elemattr(&ctx, net, skb, nlh, nla, false); |
| 3220 | if (err < 0) | 3215 | if (err < 0) |
| 3221 | return err; | 3216 | return err; |
| 3222 | 3217 | ||
| @@ -3528,11 +3523,10 @@ err1: | |||
| 3528 | return err; | 3523 | return err; |
| 3529 | } | 3524 | } |
| 3530 | 3525 | ||
| 3531 | static int nf_tables_newsetelem(struct sock *nlsk, struct sk_buff *skb, | 3526 | static int nf_tables_newsetelem(struct net *net, struct sock *nlsk, |
| 3532 | const struct nlmsghdr *nlh, | 3527 | struct sk_buff *skb, const struct nlmsghdr *nlh, |
| 3533 | const struct nlattr * const nla[]) | 3528 | const struct nlattr * const nla[]) |
| 3534 | { | 3529 | { |
| 3535 | struct net *net = sock_net(skb->sk); | ||
| 3536 | const struct nlattr *attr; | 3530 | const struct nlattr *attr; |
| 3537 | struct nft_set *set; | 3531 | struct nft_set *set; |
| 3538 | struct nft_ctx ctx; | 3532 | struct nft_ctx ctx; |
| @@ -3541,7 +3535,7 @@ static int nf_tables_newsetelem(struct sock *nlsk, struct sk_buff *skb, | |||
| 3541 | if (nla[NFTA_SET_ELEM_LIST_ELEMENTS] == NULL) | 3535 | if (nla[NFTA_SET_ELEM_LIST_ELEMENTS] == NULL) |
| 3542 | return -EINVAL; | 3536 | return -EINVAL; |
| 3543 | 3537 | ||
| 3544 | err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla, true); | 3538 | err = nft_ctx_init_from_elemattr(&ctx, net, skb, nlh, nla, true); |
| 3545 | if (err < 0) | 3539 | if (err < 0) |
| 3546 | return err; | 3540 | return err; |
| 3547 | 3541 | ||
| @@ -3623,8 +3617,8 @@ err1: | |||
| 3623 | return err; | 3617 | return err; |
| 3624 | } | 3618 | } |
| 3625 | 3619 | ||
| 3626 | static int nf_tables_delsetelem(struct sock *nlsk, struct sk_buff *skb, | 3620 | static int nf_tables_delsetelem(struct net *net, struct sock *nlsk, |
| 3627 | const struct nlmsghdr *nlh, | 3621 | struct sk_buff *skb, const struct nlmsghdr *nlh, |
| 3628 | const struct nlattr * const nla[]) | 3622 | const struct nlattr * const nla[]) |
| 3629 | { | 3623 | { |
| 3630 | const struct nlattr *attr; | 3624 | const struct nlattr *attr; |
| @@ -3635,7 +3629,7 @@ static int nf_tables_delsetelem(struct sock *nlsk, struct sk_buff *skb, | |||
| 3635 | if (nla[NFTA_SET_ELEM_LIST_ELEMENTS] == NULL) | 3629 | if (nla[NFTA_SET_ELEM_LIST_ELEMENTS] == NULL) |
| 3636 | return -EINVAL; | 3630 | return -EINVAL; |
| 3637 | 3631 | ||
| 3638 | err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla, false); | 3632 | err = nft_ctx_init_from_elemattr(&ctx, net, skb, nlh, nla, false); |
| 3639 | if (err < 0) | 3633 | if (err < 0) |
| 3640 | return err; | 3634 | return err; |
| 3641 | 3635 | ||
| @@ -4030,7 +4024,8 @@ static int nf_tables_abort(struct sk_buff *skb) | |||
| 4030 | struct nft_trans *trans, *next; | 4024 | struct nft_trans *trans, *next; |
| 4031 | struct nft_trans_elem *te; | 4025 | struct nft_trans_elem *te; |
| 4032 | 4026 | ||
| 4033 | list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) { | 4027 | list_for_each_entry_safe_reverse(trans, next, &net->nft.commit_list, |
| 4028 | list) { | ||
| 4034 | switch (trans->msg_type) { | 4029 | switch (trans->msg_type) { |
| 4035 | case NFT_MSG_NEWTABLE: | 4030 | case NFT_MSG_NEWTABLE: |
| 4036 | if (nft_trans_table_update(trans)) { | 4031 | if (nft_trans_table_update(trans)) { |
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index 46453ab318db..77afe913d03d 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c | |||
| @@ -295,8 +295,6 @@ replay: | |||
| 295 | if (!skb) | 295 | if (!skb) |
| 296 | return netlink_ack(oskb, nlh, -ENOMEM); | 296 | return netlink_ack(oskb, nlh, -ENOMEM); |
| 297 | 297 | ||
| 298 | skb->sk = oskb->sk; | ||
| 299 | |||
| 300 | nfnl_lock(subsys_id); | 298 | nfnl_lock(subsys_id); |
| 301 | ss = rcu_dereference_protected(table[subsys_id].subsys, | 299 | ss = rcu_dereference_protected(table[subsys_id].subsys, |
| 302 | lockdep_is_held(&table[subsys_id].mutex)); | 300 | lockdep_is_held(&table[subsys_id].mutex)); |
| @@ -381,7 +379,7 @@ replay: | |||
| 381 | goto ack; | 379 | goto ack; |
| 382 | 380 | ||
| 383 | if (nc->call_batch) { | 381 | if (nc->call_batch) { |
| 384 | err = nc->call_batch(net->nfnl, skb, nlh, | 382 | err = nc->call_batch(net, net->nfnl, skb, nlh, |
| 385 | (const struct nlattr **)cda); | 383 | (const struct nlattr **)cda); |
| 386 | } | 384 | } |
| 387 | 385 | ||
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index 7d81d280cb4f..861c6615253b 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c | |||
| @@ -365,8 +365,9 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue, | |||
| 365 | break; | 365 | break; |
| 366 | } | 366 | } |
| 367 | 367 | ||
| 368 | nfnl_ct = rcu_dereference(nfnl_ct_hook); | ||
| 369 | |||
| 368 | if (queue->flags & NFQA_CFG_F_CONNTRACK) { | 370 | if (queue->flags & NFQA_CFG_F_CONNTRACK) { |
| 369 | nfnl_ct = rcu_dereference(nfnl_ct_hook); | ||
| 370 | if (nfnl_ct != NULL) { | 371 | if (nfnl_ct != NULL) { |
| 371 | ct = nfnl_ct->get_ct(entskb, &ctinfo); | 372 | ct = nfnl_ct->get_ct(entskb, &ctinfo); |
| 372 | if (ct != NULL) | 373 | if (ct != NULL) |
| @@ -1064,9 +1065,10 @@ nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb, | |||
| 1064 | if (entry == NULL) | 1065 | if (entry == NULL) |
| 1065 | return -ENOENT; | 1066 | return -ENOENT; |
| 1066 | 1067 | ||
| 1068 | /* rcu lock already held from nfnl->call_rcu. */ | ||
| 1069 | nfnl_ct = rcu_dereference(nfnl_ct_hook); | ||
| 1070 | |||
| 1067 | if (nfqa[NFQA_CT]) { | 1071 | if (nfqa[NFQA_CT]) { |
| 1068 | /* rcu lock already held from nfnl->call_rcu. */ | ||
| 1069 | nfnl_ct = rcu_dereference(nfnl_ct_hook); | ||
| 1070 | if (nfnl_ct != NULL) | 1072 | if (nfnl_ct != NULL) |
| 1071 | ct = nfqnl_ct_parse(nfnl_ct, nlh, nfqa, entry, &ctinfo); | 1073 | ct = nfqnl_ct_parse(nfnl_ct, nlh, nfqa, entry, &ctinfo); |
| 1072 | } | 1074 | } |
| @@ -1417,6 +1419,7 @@ static int __init nfnetlink_queue_init(void) | |||
| 1417 | 1419 | ||
| 1418 | cleanup_netlink_notifier: | 1420 | cleanup_netlink_notifier: |
| 1419 | netlink_unregister_notifier(&nfqnl_rtnl_notifier); | 1421 | netlink_unregister_notifier(&nfqnl_rtnl_notifier); |
| 1422 | unregister_pernet_subsys(&nfnl_queue_net_ops); | ||
| 1420 | out: | 1423 | out: |
| 1421 | return status; | 1424 | return status; |
| 1422 | } | 1425 | } |
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index c2cc11168fd5..3e8892216f94 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c | |||
| @@ -53,6 +53,8 @@ struct ovs_conntrack_info { | |||
| 53 | struct md_labels labels; | 53 | struct md_labels labels; |
| 54 | }; | 54 | }; |
| 55 | 55 | ||
| 56 | static void __ovs_ct_free_action(struct ovs_conntrack_info *ct_info); | ||
| 57 | |||
| 56 | static u16 key_to_nfproto(const struct sw_flow_key *key) | 58 | static u16 key_to_nfproto(const struct sw_flow_key *key) |
| 57 | { | 59 | { |
| 58 | switch (ntohs(key->eth.type)) { | 60 | switch (ntohs(key->eth.type)) { |
| @@ -141,6 +143,7 @@ static void __ovs_ct_update_key(struct sw_flow_key *key, u8 state, | |||
| 141 | * previously sent the packet to conntrack via the ct action. | 143 | * previously sent the packet to conntrack via the ct action. |
| 142 | */ | 144 | */ |
| 143 | static void ovs_ct_update_key(const struct sk_buff *skb, | 145 | static void ovs_ct_update_key(const struct sk_buff *skb, |
| 146 | const struct ovs_conntrack_info *info, | ||
| 144 | struct sw_flow_key *key, bool post_ct) | 147 | struct sw_flow_key *key, bool post_ct) |
| 145 | { | 148 | { |
| 146 | const struct nf_conntrack_zone *zone = &nf_ct_zone_dflt; | 149 | const struct nf_conntrack_zone *zone = &nf_ct_zone_dflt; |
| @@ -158,13 +161,15 @@ static void ovs_ct_update_key(const struct sk_buff *skb, | |||
| 158 | zone = nf_ct_zone(ct); | 161 | zone = nf_ct_zone(ct); |
| 159 | } else if (post_ct) { | 162 | } else if (post_ct) { |
| 160 | state = OVS_CS_F_TRACKED | OVS_CS_F_INVALID; | 163 | state = OVS_CS_F_TRACKED | OVS_CS_F_INVALID; |
| 164 | if (info) | ||
| 165 | zone = &info->zone; | ||
| 161 | } | 166 | } |
| 162 | __ovs_ct_update_key(key, state, zone, ct); | 167 | __ovs_ct_update_key(key, state, zone, ct); |
| 163 | } | 168 | } |
| 164 | 169 | ||
| 165 | void ovs_ct_fill_key(const struct sk_buff *skb, struct sw_flow_key *key) | 170 | void ovs_ct_fill_key(const struct sk_buff *skb, struct sw_flow_key *key) |
| 166 | { | 171 | { |
| 167 | ovs_ct_update_key(skb, key, false); | 172 | ovs_ct_update_key(skb, NULL, key, false); |
| 168 | } | 173 | } |
| 169 | 174 | ||
| 170 | int ovs_ct_put_key(const struct sw_flow_key *key, struct sk_buff *skb) | 175 | int ovs_ct_put_key(const struct sw_flow_key *key, struct sk_buff *skb) |
| @@ -418,7 +423,7 @@ static int __ovs_ct_lookup(struct net *net, struct sw_flow_key *key, | |||
| 418 | } | 423 | } |
| 419 | } | 424 | } |
| 420 | 425 | ||
| 421 | ovs_ct_update_key(skb, key, true); | 426 | ovs_ct_update_key(skb, info, key, true); |
| 422 | 427 | ||
| 423 | return 0; | 428 | return 0; |
| 424 | } | 429 | } |
| @@ -708,7 +713,7 @@ int ovs_ct_copy_action(struct net *net, const struct nlattr *attr, | |||
| 708 | nf_conntrack_get(&ct_info.ct->ct_general); | 713 | nf_conntrack_get(&ct_info.ct->ct_general); |
| 709 | return 0; | 714 | return 0; |
| 710 | err_free_ct: | 715 | err_free_ct: |
| 711 | nf_conntrack_free(ct_info.ct); | 716 | __ovs_ct_free_action(&ct_info); |
| 712 | return err; | 717 | return err; |
| 713 | } | 718 | } |
| 714 | 719 | ||
| @@ -750,6 +755,11 @@ void ovs_ct_free_action(const struct nlattr *a) | |||
| 750 | { | 755 | { |
| 751 | struct ovs_conntrack_info *ct_info = nla_data(a); | 756 | struct ovs_conntrack_info *ct_info = nla_data(a); |
| 752 | 757 | ||
| 758 | __ovs_ct_free_action(ct_info); | ||
| 759 | } | ||
| 760 | |||
| 761 | static void __ovs_ct_free_action(struct ovs_conntrack_info *ct_info) | ||
| 762 | { | ||
| 753 | if (ct_info->helper) | 763 | if (ct_info->helper) |
| 754 | module_put(ct_info->helper->me); | 764 | module_put(ct_info->helper->me); |
| 755 | if (ct_info->ct) | 765 | if (ct_info->ct) |
diff --git a/net/rfkill/core.c b/net/rfkill/core.c index b41e9ea2ffff..f53bf3b6558b 100644 --- a/net/rfkill/core.c +++ b/net/rfkill/core.c | |||
| @@ -49,7 +49,6 @@ | |||
| 49 | struct rfkill { | 49 | struct rfkill { |
| 50 | spinlock_t lock; | 50 | spinlock_t lock; |
| 51 | 51 | ||
| 52 | const char *name; | ||
| 53 | enum rfkill_type type; | 52 | enum rfkill_type type; |
| 54 | 53 | ||
| 55 | unsigned long state; | 54 | unsigned long state; |
| @@ -73,6 +72,7 @@ struct rfkill { | |||
| 73 | struct delayed_work poll_work; | 72 | struct delayed_work poll_work; |
| 74 | struct work_struct uevent_work; | 73 | struct work_struct uevent_work; |
| 75 | struct work_struct sync_work; | 74 | struct work_struct sync_work; |
| 75 | char name[]; | ||
| 76 | }; | 76 | }; |
| 77 | #define to_rfkill(d) container_of(d, struct rfkill, dev) | 77 | #define to_rfkill(d) container_of(d, struct rfkill, dev) |
| 78 | 78 | ||
| @@ -876,14 +876,14 @@ struct rfkill * __must_check rfkill_alloc(const char *name, | |||
| 876 | if (WARN_ON(type == RFKILL_TYPE_ALL || type >= NUM_RFKILL_TYPES)) | 876 | if (WARN_ON(type == RFKILL_TYPE_ALL || type >= NUM_RFKILL_TYPES)) |
| 877 | return NULL; | 877 | return NULL; |
| 878 | 878 | ||
| 879 | rfkill = kzalloc(sizeof(*rfkill), GFP_KERNEL); | 879 | rfkill = kzalloc(sizeof(*rfkill) + strlen(name) + 1, GFP_KERNEL); |
| 880 | if (!rfkill) | 880 | if (!rfkill) |
| 881 | return NULL; | 881 | return NULL; |
| 882 | 882 | ||
| 883 | spin_lock_init(&rfkill->lock); | 883 | spin_lock_init(&rfkill->lock); |
| 884 | INIT_LIST_HEAD(&rfkill->node); | 884 | INIT_LIST_HEAD(&rfkill->node); |
| 885 | rfkill->type = type; | 885 | rfkill->type = type; |
| 886 | rfkill->name = name; | 886 | strcpy(rfkill->name, name); |
| 887 | rfkill->ops = ops; | 887 | rfkill->ops = ops; |
| 888 | rfkill->data = ops_data; | 888 | rfkill->data = ops_data; |
| 889 | 889 | ||
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 7ec667dd4ce1..b5c2cf2aa6d4 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c | |||
| @@ -950,7 +950,7 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue, | |||
| 950 | } | 950 | } |
| 951 | lockdep_set_class(qdisc_lock(sch), &qdisc_tx_lock); | 951 | lockdep_set_class(qdisc_lock(sch), &qdisc_tx_lock); |
| 952 | if (!netif_is_multiqueue(dev)) | 952 | if (!netif_is_multiqueue(dev)) |
| 953 | sch->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; | 953 | sch->flags |= TCQ_F_ONETXQUEUE; |
| 954 | } | 954 | } |
| 955 | 955 | ||
| 956 | sch->handle = handle; | 956 | sch->handle = handle; |
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index acb45b8c2a9d..ec529121f38a 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c | |||
| @@ -323,14 +323,13 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, | |||
| 323 | } | 323 | } |
| 324 | } | 324 | } |
| 325 | } | 325 | } |
| 326 | rcu_read_unlock(); | ||
| 327 | |||
| 328 | if (baddr) { | 326 | if (baddr) { |
| 329 | fl6->saddr = baddr->v6.sin6_addr; | 327 | fl6->saddr = baddr->v6.sin6_addr; |
| 330 | fl6->fl6_sport = baddr->v6.sin6_port; | 328 | fl6->fl6_sport = baddr->v6.sin6_port; |
| 331 | final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final); | 329 | final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final); |
| 332 | dst = ip6_dst_lookup_flow(sk, fl6, final_p); | 330 | dst = ip6_dst_lookup_flow(sk, fl6, final_p); |
| 333 | } | 331 | } |
| 332 | rcu_read_unlock(); | ||
| 334 | 333 | ||
| 335 | out: | 334 | out: |
| 336 | if (!IS_ERR_OR_NULL(dst)) { | 335 | if (!IS_ERR_OR_NULL(dst)) { |
| @@ -642,6 +641,7 @@ static struct sock *sctp_v6_create_accept_sk(struct sock *sk, | |||
| 642 | struct sock *newsk; | 641 | struct sock *newsk; |
| 643 | struct ipv6_pinfo *newnp, *np = inet6_sk(sk); | 642 | struct ipv6_pinfo *newnp, *np = inet6_sk(sk); |
| 644 | struct sctp6_sock *newsctp6sk; | 643 | struct sctp6_sock *newsctp6sk; |
| 644 | struct ipv6_txoptions *opt; | ||
| 645 | 645 | ||
| 646 | newsk = sk_alloc(sock_net(sk), PF_INET6, GFP_KERNEL, sk->sk_prot, 0); | 646 | newsk = sk_alloc(sock_net(sk), PF_INET6, GFP_KERNEL, sk->sk_prot, 0); |
| 647 | if (!newsk) | 647 | if (!newsk) |
| @@ -661,6 +661,13 @@ static struct sock *sctp_v6_create_accept_sk(struct sock *sk, | |||
| 661 | 661 | ||
| 662 | memcpy(newnp, np, sizeof(struct ipv6_pinfo)); | 662 | memcpy(newnp, np, sizeof(struct ipv6_pinfo)); |
| 663 | 663 | ||
| 664 | rcu_read_lock(); | ||
| 665 | opt = rcu_dereference(np->opt); | ||
| 666 | if (opt) | ||
| 667 | opt = ipv6_dup_options(newsk, opt); | ||
| 668 | RCU_INIT_POINTER(newnp->opt, opt); | ||
| 669 | rcu_read_unlock(); | ||
| 670 | |||
| 664 | /* Initialize sk's sport, dport, rcv_saddr and daddr for getsockname() | 671 | /* Initialize sk's sport, dport, rcv_saddr and daddr for getsockname() |
| 665 | * and getpeername(). | 672 | * and getpeername(). |
| 666 | */ | 673 | */ |
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index 7e8f0a117106..c0380cfb16ae 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c | |||
| @@ -324,6 +324,7 @@ int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk) | |||
| 324 | sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) : | 324 | sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) : |
| 325 | "illegal chunk"); | 325 | "illegal chunk"); |
| 326 | 326 | ||
| 327 | sctp_chunk_hold(chunk); | ||
| 327 | sctp_outq_tail_data(q, chunk); | 328 | sctp_outq_tail_data(q, chunk); |
| 328 | if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) | 329 | if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) |
| 329 | SCTP_INC_STATS(net, SCTP_MIB_OUTUNORDERCHUNKS); | 330 | SCTP_INC_STATS(net, SCTP_MIB_OUTUNORDERCHUNKS); |
| @@ -1251,6 +1252,7 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_chunk *chunk) | |||
| 1251 | */ | 1252 | */ |
| 1252 | 1253 | ||
| 1253 | sack_a_rwnd = ntohl(sack->a_rwnd); | 1254 | sack_a_rwnd = ntohl(sack->a_rwnd); |
| 1255 | asoc->peer.zero_window_announced = !sack_a_rwnd; | ||
| 1254 | outstanding = q->outstanding_bytes; | 1256 | outstanding = q->outstanding_bytes; |
| 1255 | 1257 | ||
| 1256 | if (outstanding < sack_a_rwnd) | 1258 | if (outstanding < sack_a_rwnd) |
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 763e06a55155..5d6a03fad378 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c | |||
| @@ -1652,7 +1652,7 @@ static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep, | |||
| 1652 | 1652 | ||
| 1653 | /* Set an expiration time for the cookie. */ | 1653 | /* Set an expiration time for the cookie. */ |
| 1654 | cookie->c.expiration = ktime_add(asoc->cookie_life, | 1654 | cookie->c.expiration = ktime_add(asoc->cookie_life, |
| 1655 | ktime_get()); | 1655 | ktime_get_real()); |
| 1656 | 1656 | ||
| 1657 | /* Copy the peer's init packet. */ | 1657 | /* Copy the peer's init packet. */ |
| 1658 | memcpy(&cookie->c.peer_init[0], init_chunk->chunk_hdr, | 1658 | memcpy(&cookie->c.peer_init[0], init_chunk->chunk_hdr, |
| @@ -1780,7 +1780,7 @@ no_hmac: | |||
| 1780 | if (sock_flag(ep->base.sk, SOCK_TIMESTAMP)) | 1780 | if (sock_flag(ep->base.sk, SOCK_TIMESTAMP)) |
| 1781 | kt = skb_get_ktime(skb); | 1781 | kt = skb_get_ktime(skb); |
| 1782 | else | 1782 | else |
| 1783 | kt = ktime_get(); | 1783 | kt = ktime_get_real(); |
| 1784 | 1784 | ||
| 1785 | if (!asoc && ktime_before(bear_cookie->expiration, kt)) { | 1785 | if (!asoc && ktime_before(bear_cookie->expiration, kt)) { |
| 1786 | /* | 1786 | /* |
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 6f46aa16cb76..cd34a4a34065 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c | |||
| @@ -5412,7 +5412,8 @@ sctp_disposition_t sctp_sf_do_6_3_3_rtx(struct net *net, | |||
| 5412 | SCTP_INC_STATS(net, SCTP_MIB_T3_RTX_EXPIREDS); | 5412 | SCTP_INC_STATS(net, SCTP_MIB_T3_RTX_EXPIREDS); |
| 5413 | 5413 | ||
| 5414 | if (asoc->overall_error_count >= asoc->max_retrans) { | 5414 | if (asoc->overall_error_count >= asoc->max_retrans) { |
| 5415 | if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING) { | 5415 | if (asoc->peer.zero_window_announced && |
| 5416 | asoc->state == SCTP_STATE_SHUTDOWN_PENDING) { | ||
| 5416 | /* | 5417 | /* |
| 5417 | * We are here likely because the receiver had its rwnd | 5418 | * We are here likely because the receiver had its rwnd |
| 5418 | * closed for a while and we have not been able to | 5419 | * closed for a while and we have not been able to |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 03c8256063ec..9b6cc6de80d8 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
| @@ -1952,8 +1952,6 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len) | |||
| 1952 | 1952 | ||
| 1953 | /* Now send the (possibly) fragmented message. */ | 1953 | /* Now send the (possibly) fragmented message. */ |
| 1954 | list_for_each_entry(chunk, &datamsg->chunks, frag_list) { | 1954 | list_for_each_entry(chunk, &datamsg->chunks, frag_list) { |
| 1955 | sctp_chunk_hold(chunk); | ||
| 1956 | |||
| 1957 | /* Do accounting for the write space. */ | 1955 | /* Do accounting for the write space. */ |
| 1958 | sctp_set_owner_w(chunk); | 1956 | sctp_set_owner_w(chunk); |
| 1959 | 1957 | ||
| @@ -1966,15 +1964,13 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len) | |||
| 1966 | * breaks. | 1964 | * breaks. |
| 1967 | */ | 1965 | */ |
| 1968 | err = sctp_primitive_SEND(net, asoc, datamsg); | 1966 | err = sctp_primitive_SEND(net, asoc, datamsg); |
| 1967 | sctp_datamsg_put(datamsg); | ||
| 1969 | /* Did the lower layer accept the chunk? */ | 1968 | /* Did the lower layer accept the chunk? */ |
| 1970 | if (err) { | 1969 | if (err) |
| 1971 | sctp_datamsg_free(datamsg); | ||
| 1972 | goto out_free; | 1970 | goto out_free; |
| 1973 | } | ||
| 1974 | 1971 | ||
| 1975 | pr_debug("%s: we sent primitively\n", __func__); | 1972 | pr_debug("%s: we sent primitively\n", __func__); |
| 1976 | 1973 | ||
| 1977 | sctp_datamsg_put(datamsg); | ||
| 1978 | err = msg_len; | 1974 | err = msg_len; |
| 1979 | 1975 | ||
| 1980 | if (unlikely(wait_connect)) { | 1976 | if (unlikely(wait_connect)) { |
| @@ -7167,6 +7163,7 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk, | |||
| 7167 | newsk->sk_type = sk->sk_type; | 7163 | newsk->sk_type = sk->sk_type; |
| 7168 | newsk->sk_bound_dev_if = sk->sk_bound_dev_if; | 7164 | newsk->sk_bound_dev_if = sk->sk_bound_dev_if; |
| 7169 | newsk->sk_flags = sk->sk_flags; | 7165 | newsk->sk_flags = sk->sk_flags; |
| 7166 | newsk->sk_tsflags = sk->sk_tsflags; | ||
| 7170 | newsk->sk_no_check_tx = sk->sk_no_check_tx; | 7167 | newsk->sk_no_check_tx = sk->sk_no_check_tx; |
| 7171 | newsk->sk_no_check_rx = sk->sk_no_check_rx; | 7168 | newsk->sk_no_check_rx = sk->sk_no_check_rx; |
| 7172 | newsk->sk_reuse = sk->sk_reuse; | 7169 | newsk->sk_reuse = sk->sk_reuse; |
| @@ -7199,6 +7196,9 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk, | |||
| 7199 | newinet->mc_ttl = 1; | 7196 | newinet->mc_ttl = 1; |
| 7200 | newinet->mc_index = 0; | 7197 | newinet->mc_index = 0; |
| 7201 | newinet->mc_list = NULL; | 7198 | newinet->mc_list = NULL; |
| 7199 | |||
| 7200 | if (newsk->sk_flags & SK_FLAGS_TIMESTAMP) | ||
| 7201 | net_enable_timestamp(); | ||
| 7202 | } | 7202 | } |
| 7203 | 7203 | ||
| 7204 | static inline void sctp_copy_descendant(struct sock *sk_to, | 7204 | static inline void sctp_copy_descendant(struct sock *sk_to, |
diff --git a/net/socket.c b/net/socket.c index 456fadb3d819..29822d6dd91e 100644 --- a/net/socket.c +++ b/net/socket.c | |||
| @@ -1695,6 +1695,7 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size, | |||
| 1695 | msg.msg_name = addr ? (struct sockaddr *)&address : NULL; | 1695 | msg.msg_name = addr ? (struct sockaddr *)&address : NULL; |
| 1696 | /* We assume all kernel code knows the size of sockaddr_storage */ | 1696 | /* We assume all kernel code knows the size of sockaddr_storage */ |
| 1697 | msg.msg_namelen = 0; | 1697 | msg.msg_namelen = 0; |
| 1698 | msg.msg_iocb = NULL; | ||
| 1698 | if (sock->file->f_flags & O_NONBLOCK) | 1699 | if (sock->file->f_flags & O_NONBLOCK) |
| 1699 | flags |= MSG_DONTWAIT; | 1700 | flags |= MSG_DONTWAIT; |
| 1700 | err = sock_recvmsg(sock, &msg, iov_iter_count(&msg.msg_iter), flags); | 1701 | err = sock_recvmsg(sock, &msg, iov_iter_count(&msg.msg_iter), flags); |
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 45aebd966978..a4631477cedf 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c | |||
| @@ -2256,14 +2256,7 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state) | |||
| 2256 | /* Lock the socket to prevent queue disordering | 2256 | /* Lock the socket to prevent queue disordering |
| 2257 | * while sleeps in memcpy_tomsg | 2257 | * while sleeps in memcpy_tomsg |
| 2258 | */ | 2258 | */ |
| 2259 | err = mutex_lock_interruptible(&u->readlock); | 2259 | mutex_lock(&u->readlock); |
| 2260 | if (unlikely(err)) { | ||
| 2261 | /* recvmsg() in non blocking mode is supposed to return -EAGAIN | ||
| 2262 | * sk_rcvtimeo is not honored by mutex_lock_interruptible() | ||
| 2263 | */ | ||
| 2264 | err = noblock ? -EAGAIN : -ERESTARTSYS; | ||
| 2265 | goto out; | ||
| 2266 | } | ||
| 2267 | 2260 | ||
| 2268 | if (flags & MSG_PEEK) | 2261 | if (flags & MSG_PEEK) |
| 2269 | skip = sk_peek_offset(sk, flags); | 2262 | skip = sk_peek_offset(sk, flags); |
| @@ -2307,12 +2300,12 @@ again: | |||
| 2307 | timeo = unix_stream_data_wait(sk, timeo, last, | 2300 | timeo = unix_stream_data_wait(sk, timeo, last, |
| 2308 | last_len); | 2301 | last_len); |
| 2309 | 2302 | ||
| 2310 | if (signal_pending(current) || | 2303 | if (signal_pending(current)) { |
| 2311 | mutex_lock_interruptible(&u->readlock)) { | ||
| 2312 | err = sock_intr_errno(timeo); | 2304 | err = sock_intr_errno(timeo); |
| 2313 | goto out; | 2305 | goto out; |
| 2314 | } | 2306 | } |
| 2315 | 2307 | ||
| 2308 | mutex_lock(&u->readlock); | ||
| 2316 | continue; | 2309 | continue; |
| 2317 | unlock: | 2310 | unlock: |
| 2318 | unix_state_unlock(sk); | 2311 | unix_state_unlock(sk); |
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index c71e274c810a..75b0d23ee882 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
| @@ -7941,8 +7941,10 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info) | |||
| 7941 | if (nla_get_flag(info->attrs[NL80211_ATTR_USE_RRM])) { | 7941 | if (nla_get_flag(info->attrs[NL80211_ATTR_USE_RRM])) { |
| 7942 | if (!(rdev->wiphy.features & | 7942 | if (!(rdev->wiphy.features & |
| 7943 | NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES) || | 7943 | NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES) || |
| 7944 | !(rdev->wiphy.features & NL80211_FEATURE_QUIET)) | 7944 | !(rdev->wiphy.features & NL80211_FEATURE_QUIET)) { |
| 7945 | kzfree(connkeys); | ||
| 7945 | return -EINVAL; | 7946 | return -EINVAL; |
| 7947 | } | ||
| 7946 | connect.flags |= ASSOC_REQ_USE_RRM; | 7948 | connect.flags |= ASSOC_REQ_USE_RRM; |
| 7947 | } | 7949 | } |
| 7948 | 7950 | ||
| @@ -9503,6 +9505,7 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info) | |||
| 9503 | if (new_triggers.tcp && new_triggers.tcp->sock) | 9505 | if (new_triggers.tcp && new_triggers.tcp->sock) |
| 9504 | sock_release(new_triggers.tcp->sock); | 9506 | sock_release(new_triggers.tcp->sock); |
| 9505 | kfree(new_triggers.tcp); | 9507 | kfree(new_triggers.tcp); |
| 9508 | kfree(new_triggers.nd_config); | ||
| 9506 | return err; | 9509 | return err; |
| 9507 | } | 9510 | } |
| 9508 | #endif | 9511 | #endif |
diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 2e8d6f39ed56..06d050da0d94 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c | |||
| @@ -3029,6 +3029,7 @@ int set_regdom(const struct ieee80211_regdomain *rd, | |||
| 3029 | break; | 3029 | break; |
| 3030 | default: | 3030 | default: |
| 3031 | WARN(1, "invalid initiator %d\n", lr->initiator); | 3031 | WARN(1, "invalid initiator %d\n", lr->initiator); |
| 3032 | kfree(rd); | ||
| 3032 | return -EINVAL; | 3033 | return -EINVAL; |
| 3033 | } | 3034 | } |
| 3034 | 3035 | ||
| @@ -3221,8 +3222,10 @@ int __init regulatory_init(void) | |||
| 3221 | /* We always try to get an update for the static regdomain */ | 3222 | /* We always try to get an update for the static regdomain */ |
| 3222 | err = regulatory_hint_core(cfg80211_world_regdom->alpha2); | 3223 | err = regulatory_hint_core(cfg80211_world_regdom->alpha2); |
| 3223 | if (err) { | 3224 | if (err) { |
| 3224 | if (err == -ENOMEM) | 3225 | if (err == -ENOMEM) { |
| 3226 | platform_device_unregister(reg_pdev); | ||
| 3225 | return err; | 3227 | return err; |
| 3228 | } | ||
| 3226 | /* | 3229 | /* |
| 3227 | * N.B. kobject_uevent_env() can fail mainly for when we're out | 3230 | * N.B. kobject_uevent_env() can fail mainly for when we're out |
| 3228 | * memory which is handled and propagated appropriately above | 3231 | * memory which is handled and propagated appropriately above |
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 09bfcbac63bb..948fa5560de5 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
| @@ -303,6 +303,14 @@ struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp) | |||
| 303 | } | 303 | } |
| 304 | EXPORT_SYMBOL(xfrm_policy_alloc); | 304 | EXPORT_SYMBOL(xfrm_policy_alloc); |
| 305 | 305 | ||
| 306 | static void xfrm_policy_destroy_rcu(struct rcu_head *head) | ||
| 307 | { | ||
| 308 | struct xfrm_policy *policy = container_of(head, struct xfrm_policy, rcu); | ||
| 309 | |||
| 310 | security_xfrm_policy_free(policy->security); | ||
| 311 | kfree(policy); | ||
| 312 | } | ||
| 313 | |||
| 306 | /* Destroy xfrm_policy: descendant resources must be released to this moment. */ | 314 | /* Destroy xfrm_policy: descendant resources must be released to this moment. */ |
| 307 | 315 | ||
| 308 | void xfrm_policy_destroy(struct xfrm_policy *policy) | 316 | void xfrm_policy_destroy(struct xfrm_policy *policy) |
| @@ -312,8 +320,7 @@ void xfrm_policy_destroy(struct xfrm_policy *policy) | |||
| 312 | if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer)) | 320 | if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer)) |
| 313 | BUG(); | 321 | BUG(); |
| 314 | 322 | ||
| 315 | security_xfrm_policy_free(policy->security); | 323 | call_rcu(&policy->rcu, xfrm_policy_destroy_rcu); |
| 316 | kfree(policy); | ||
| 317 | } | 324 | } |
| 318 | EXPORT_SYMBOL(xfrm_policy_destroy); | 325 | EXPORT_SYMBOL(xfrm_policy_destroy); |
| 319 | 326 | ||
| @@ -1214,8 +1221,10 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir, | |||
| 1214 | struct xfrm_policy *pol; | 1221 | struct xfrm_policy *pol; |
| 1215 | struct net *net = sock_net(sk); | 1222 | struct net *net = sock_net(sk); |
| 1216 | 1223 | ||
| 1224 | rcu_read_lock(); | ||
| 1217 | read_lock_bh(&net->xfrm.xfrm_policy_lock); | 1225 | read_lock_bh(&net->xfrm.xfrm_policy_lock); |
| 1218 | if ((pol = sk->sk_policy[dir]) != NULL) { | 1226 | pol = rcu_dereference(sk->sk_policy[dir]); |
| 1227 | if (pol != NULL) { | ||
| 1219 | bool match = xfrm_selector_match(&pol->selector, fl, | 1228 | bool match = xfrm_selector_match(&pol->selector, fl, |
| 1220 | sk->sk_family); | 1229 | sk->sk_family); |
| 1221 | int err = 0; | 1230 | int err = 0; |
| @@ -1239,6 +1248,7 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir, | |||
| 1239 | } | 1248 | } |
| 1240 | out: | 1249 | out: |
| 1241 | read_unlock_bh(&net->xfrm.xfrm_policy_lock); | 1250 | read_unlock_bh(&net->xfrm.xfrm_policy_lock); |
| 1251 | rcu_read_unlock(); | ||
| 1242 | return pol; | 1252 | return pol; |
| 1243 | } | 1253 | } |
| 1244 | 1254 | ||
| @@ -1307,13 +1317,14 @@ int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol) | |||
| 1307 | #endif | 1317 | #endif |
| 1308 | 1318 | ||
| 1309 | write_lock_bh(&net->xfrm.xfrm_policy_lock); | 1319 | write_lock_bh(&net->xfrm.xfrm_policy_lock); |
| 1310 | old_pol = sk->sk_policy[dir]; | 1320 | old_pol = rcu_dereference_protected(sk->sk_policy[dir], |
| 1311 | sk->sk_policy[dir] = pol; | 1321 | lockdep_is_held(&net->xfrm.xfrm_policy_lock)); |
| 1312 | if (pol) { | 1322 | if (pol) { |
| 1313 | pol->curlft.add_time = get_seconds(); | 1323 | pol->curlft.add_time = get_seconds(); |
| 1314 | pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir, 0); | 1324 | pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir, 0); |
| 1315 | xfrm_sk_policy_link(pol, dir); | 1325 | xfrm_sk_policy_link(pol, dir); |
| 1316 | } | 1326 | } |
| 1327 | rcu_assign_pointer(sk->sk_policy[dir], pol); | ||
| 1317 | if (old_pol) { | 1328 | if (old_pol) { |
| 1318 | if (pol) | 1329 | if (pol) |
| 1319 | xfrm_policy_requeue(old_pol, pol); | 1330 | xfrm_policy_requeue(old_pol, pol); |
| @@ -1361,17 +1372,26 @@ static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir) | |||
| 1361 | return newp; | 1372 | return newp; |
| 1362 | } | 1373 | } |
| 1363 | 1374 | ||
| 1364 | int __xfrm_sk_clone_policy(struct sock *sk) | 1375 | int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk) |
| 1365 | { | 1376 | { |
| 1366 | struct xfrm_policy *p0 = sk->sk_policy[0], | 1377 | const struct xfrm_policy *p; |
| 1367 | *p1 = sk->sk_policy[1]; | 1378 | struct xfrm_policy *np; |
| 1379 | int i, ret = 0; | ||
| 1368 | 1380 | ||
| 1369 | sk->sk_policy[0] = sk->sk_policy[1] = NULL; | 1381 | rcu_read_lock(); |
| 1370 | if (p0 && (sk->sk_policy[0] = clone_policy(p0, 0)) == NULL) | 1382 | for (i = 0; i < 2; i++) { |
| 1371 | return -ENOMEM; | 1383 | p = rcu_dereference(osk->sk_policy[i]); |
| 1372 | if (p1 && (sk->sk_policy[1] = clone_policy(p1, 1)) == NULL) | 1384 | if (p) { |
| 1373 | return -ENOMEM; | 1385 | np = clone_policy(p, i); |
| 1374 | return 0; | 1386 | if (unlikely(!np)) { |
| 1387 | ret = -ENOMEM; | ||
| 1388 | break; | ||
| 1389 | } | ||
| 1390 | rcu_assign_pointer(sk->sk_policy[i], np); | ||
| 1391 | } | ||
| 1392 | } | ||
| 1393 | rcu_read_unlock(); | ||
| 1394 | return ret; | ||
| 1375 | } | 1395 | } |
| 1376 | 1396 | ||
| 1377 | static int | 1397 | static int |
| @@ -2198,6 +2218,7 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig, | |||
| 2198 | xdst = NULL; | 2218 | xdst = NULL; |
| 2199 | route = NULL; | 2219 | route = NULL; |
| 2200 | 2220 | ||
| 2221 | sk = sk_const_to_full_sk(sk); | ||
| 2201 | if (sk && sk->sk_policy[XFRM_POLICY_OUT]) { | 2222 | if (sk && sk->sk_policy[XFRM_POLICY_OUT]) { |
| 2202 | num_pols = 1; | 2223 | num_pols = 1; |
| 2203 | pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl); | 2224 | pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl); |
| @@ -2477,6 +2498,7 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, | |||
| 2477 | } | 2498 | } |
| 2478 | 2499 | ||
| 2479 | pol = NULL; | 2500 | pol = NULL; |
| 2501 | sk = sk_to_full_sk(sk); | ||
| 2480 | if (sk && sk->sk_policy[dir]) { | 2502 | if (sk && sk->sk_policy[dir]) { |
| 2481 | pol = xfrm_sk_policy_lookup(sk, dir, &fl); | 2503 | pol = xfrm_sk_policy_lookup(sk, dir, &fl); |
| 2482 | if (IS_ERR(pol)) { | 2504 | if (IS_ERR(pol)) { |
diff --git a/sound/soc/codecs/es8328.c b/sound/soc/codecs/es8328.c index 84f5eb07a91b..afa6c5db9dcc 100644 --- a/sound/soc/codecs/es8328.c +++ b/sound/soc/codecs/es8328.c | |||
| @@ -85,7 +85,15 @@ static const DECLARE_TLV_DB_SCALE(pga_tlv, 0, 300, 0); | |||
| 85 | static const DECLARE_TLV_DB_SCALE(bypass_tlv, -1500, 300, 0); | 85 | static const DECLARE_TLV_DB_SCALE(bypass_tlv, -1500, 300, 0); |
| 86 | static const DECLARE_TLV_DB_SCALE(mic_tlv, 0, 300, 0); | 86 | static const DECLARE_TLV_DB_SCALE(mic_tlv, 0, 300, 0); |
| 87 | 87 | ||
| 88 | static const int deemph_settings[] = { 0, 32000, 44100, 48000 }; | 88 | static const struct { |
| 89 | int rate; | ||
| 90 | unsigned int val; | ||
| 91 | } deemph_settings[] = { | ||
| 92 | { 0, ES8328_DACCONTROL6_DEEMPH_OFF }, | ||
| 93 | { 32000, ES8328_DACCONTROL6_DEEMPH_32k }, | ||
| 94 | { 44100, ES8328_DACCONTROL6_DEEMPH_44_1k }, | ||
| 95 | { 48000, ES8328_DACCONTROL6_DEEMPH_48k }, | ||
| 96 | }; | ||
| 89 | 97 | ||
| 90 | static int es8328_set_deemph(struct snd_soc_codec *codec) | 98 | static int es8328_set_deemph(struct snd_soc_codec *codec) |
| 91 | { | 99 | { |
| @@ -97,21 +105,22 @@ static int es8328_set_deemph(struct snd_soc_codec *codec) | |||
| 97 | * rate. | 105 | * rate. |
| 98 | */ | 106 | */ |
| 99 | if (es8328->deemph) { | 107 | if (es8328->deemph) { |
| 100 | best = 1; | 108 | best = 0; |
| 101 | for (i = 2; i < ARRAY_SIZE(deemph_settings); i++) { | 109 | for (i = 1; i < ARRAY_SIZE(deemph_settings); i++) { |
| 102 | if (abs(deemph_settings[i] - es8328->playback_fs) < | 110 | if (abs(deemph_settings[i].rate - es8328->playback_fs) < |
| 103 | abs(deemph_settings[best] - es8328->playback_fs)) | 111 | abs(deemph_settings[best].rate - es8328->playback_fs)) |
| 104 | best = i; | 112 | best = i; |
| 105 | } | 113 | } |
| 106 | 114 | ||
| 107 | val = best << 1; | 115 | val = deemph_settings[best].val; |
| 108 | } else { | 116 | } else { |
| 109 | val = 0; | 117 | val = ES8328_DACCONTROL6_DEEMPH_OFF; |
| 110 | } | 118 | } |
| 111 | 119 | ||
| 112 | dev_dbg(codec->dev, "Set deemphasis %d\n", val); | 120 | dev_dbg(codec->dev, "Set deemphasis %d\n", val); |
| 113 | 121 | ||
| 114 | return snd_soc_update_bits(codec, ES8328_DACCONTROL6, 0x6, val); | 122 | return snd_soc_update_bits(codec, ES8328_DACCONTROL6, |
| 123 | ES8328_DACCONTROL6_DEEMPH_MASK, val); | ||
| 115 | } | 124 | } |
| 116 | 125 | ||
| 117 | static int es8328_get_deemph(struct snd_kcontrol *kcontrol, | 126 | static int es8328_get_deemph(struct snd_kcontrol *kcontrol, |
diff --git a/sound/soc/codecs/es8328.h b/sound/soc/codecs/es8328.h index cb36afe10c0e..156c748c89c7 100644 --- a/sound/soc/codecs/es8328.h +++ b/sound/soc/codecs/es8328.h | |||
| @@ -153,6 +153,7 @@ int es8328_probe(struct device *dev, struct regmap *regmap); | |||
| 153 | #define ES8328_DACCONTROL6_CLICKFREE (1 << 3) | 153 | #define ES8328_DACCONTROL6_CLICKFREE (1 << 3) |
| 154 | #define ES8328_DACCONTROL6_DAC_INVR (1 << 4) | 154 | #define ES8328_DACCONTROL6_DAC_INVR (1 << 4) |
| 155 | #define ES8328_DACCONTROL6_DAC_INVL (1 << 5) | 155 | #define ES8328_DACCONTROL6_DAC_INVL (1 << 5) |
| 156 | #define ES8328_DACCONTROL6_DEEMPH_MASK (3 << 6) | ||
| 156 | #define ES8328_DACCONTROL6_DEEMPH_OFF (0 << 6) | 157 | #define ES8328_DACCONTROL6_DEEMPH_OFF (0 << 6) |
| 157 | #define ES8328_DACCONTROL6_DEEMPH_32k (1 << 6) | 158 | #define ES8328_DACCONTROL6_DEEMPH_32k (1 << 6) |
| 158 | #define ES8328_DACCONTROL6_DEEMPH_44_1k (2 << 6) | 159 | #define ES8328_DACCONTROL6_DEEMPH_44_1k (2 << 6) |
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c index f540f82b1f27..08b40460663c 100644 --- a/sound/soc/codecs/sgtl5000.c +++ b/sound/soc/codecs/sgtl5000.c | |||
| @@ -189,6 +189,7 @@ static int power_vag_event(struct snd_soc_dapm_widget *w, | |||
| 189 | case SND_SOC_DAPM_POST_PMU: | 189 | case SND_SOC_DAPM_POST_PMU: |
| 190 | snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER, | 190 | snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER, |
| 191 | SGTL5000_VAG_POWERUP, SGTL5000_VAG_POWERUP); | 191 | SGTL5000_VAG_POWERUP, SGTL5000_VAG_POWERUP); |
| 192 | msleep(400); | ||
| 192 | break; | 193 | break; |
| 193 | 194 | ||
| 194 | case SND_SOC_DAPM_PRE_PMD: | 195 | case SND_SOC_DAPM_PRE_PMD: |
diff --git a/sound/soc/codecs/wm8974.c b/sound/soc/codecs/wm8974.c index 0a60677397b3..4c29bd2ae75c 100644 --- a/sound/soc/codecs/wm8974.c +++ b/sound/soc/codecs/wm8974.c | |||
| @@ -574,6 +574,7 @@ static const struct regmap_config wm8974_regmap = { | |||
| 574 | .max_register = WM8974_MONOMIX, | 574 | .max_register = WM8974_MONOMIX, |
| 575 | .reg_defaults = wm8974_reg_defaults, | 575 | .reg_defaults = wm8974_reg_defaults, |
| 576 | .num_reg_defaults = ARRAY_SIZE(wm8974_reg_defaults), | 576 | .num_reg_defaults = ARRAY_SIZE(wm8974_reg_defaults), |
| 577 | .cache_type = REGCACHE_FLAT, | ||
| 577 | }; | 578 | }; |
| 578 | 579 | ||
| 579 | static int wm8974_probe(struct snd_soc_codec *codec) | 580 | static int wm8974_probe(struct snd_soc_codec *codec) |
diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c index c1c9c2e3525b..2ccb8bccc9d4 100644 --- a/sound/soc/davinci/davinci-mcasp.c +++ b/sound/soc/davinci/davinci-mcasp.c | |||
| @@ -223,8 +223,8 @@ static void mcasp_start_tx(struct davinci_mcasp *mcasp) | |||
| 223 | 223 | ||
| 224 | /* wait for XDATA to be cleared */ | 224 | /* wait for XDATA to be cleared */ |
| 225 | cnt = 0; | 225 | cnt = 0; |
| 226 | while (!(mcasp_get_reg(mcasp, DAVINCI_MCASP_TXSTAT_REG) & | 226 | while ((mcasp_get_reg(mcasp, DAVINCI_MCASP_TXSTAT_REG) & XRDATA) && |
| 227 | ~XRDATA) && (cnt < 100000)) | 227 | (cnt < 100000)) |
| 228 | cnt++; | 228 | cnt++; |
| 229 | 229 | ||
| 230 | /* Release TX state machine */ | 230 | /* Release TX state machine */ |
diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c index ffd5f9acc849..08b460ba06ef 100644 --- a/sound/soc/fsl/fsl_sai.c +++ b/sound/soc/fsl/fsl_sai.c | |||
| @@ -505,6 +505,24 @@ static int fsl_sai_trigger(struct snd_pcm_substream *substream, int cmd, | |||
| 505 | FSL_SAI_CSR_FR, FSL_SAI_CSR_FR); | 505 | FSL_SAI_CSR_FR, FSL_SAI_CSR_FR); |
| 506 | regmap_update_bits(sai->regmap, FSL_SAI_RCSR, | 506 | regmap_update_bits(sai->regmap, FSL_SAI_RCSR, |
| 507 | FSL_SAI_CSR_FR, FSL_SAI_CSR_FR); | 507 | FSL_SAI_CSR_FR, FSL_SAI_CSR_FR); |
| 508 | |||
| 509 | /* | ||
| 510 | * For sai master mode, after several open/close sai, | ||
| 511 | * there will be no frame clock, and can't recover | ||
| 512 | * anymore. Add software reset to fix this issue. | ||
| 513 | * This is a hardware bug, and will be fix in the | ||
| 514 | * next sai version. | ||
| 515 | */ | ||
| 516 | if (!sai->is_slave_mode) { | ||
| 517 | /* Software Reset for both Tx and Rx */ | ||
| 518 | regmap_write(sai->regmap, | ||
| 519 | FSL_SAI_TCSR, FSL_SAI_CSR_SR); | ||
| 520 | regmap_write(sai->regmap, | ||
| 521 | FSL_SAI_RCSR, FSL_SAI_CSR_SR); | ||
| 522 | /* Clear SR bit to finish the reset */ | ||
| 523 | regmap_write(sai->regmap, FSL_SAI_TCSR, 0); | ||
| 524 | regmap_write(sai->regmap, FSL_SAI_RCSR, 0); | ||
| 525 | } | ||
| 508 | } | 526 | } |
| 509 | break; | 527 | break; |
| 510 | default: | 528 | default: |
diff --git a/sound/soc/rockchip/rockchip_spdif.c b/sound/soc/rockchip/rockchip_spdif.c index ac72ff5055bb..5a806da89f42 100644 --- a/sound/soc/rockchip/rockchip_spdif.c +++ b/sound/soc/rockchip/rockchip_spdif.c | |||
| @@ -152,8 +152,10 @@ static int rk_spdif_trigger(struct snd_pcm_substream *substream, | |||
| 152 | case SNDRV_PCM_TRIGGER_RESUME: | 152 | case SNDRV_PCM_TRIGGER_RESUME: |
| 153 | case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: | 153 | case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: |
| 154 | ret = regmap_update_bits(spdif->regmap, SPDIF_DMACR, | 154 | ret = regmap_update_bits(spdif->regmap, SPDIF_DMACR, |
| 155 | SPDIF_DMACR_TDE_ENABLE, | 155 | SPDIF_DMACR_TDE_ENABLE | |
| 156 | SPDIF_DMACR_TDE_ENABLE); | 156 | SPDIF_DMACR_TDL_MASK, |
| 157 | SPDIF_DMACR_TDE_ENABLE | | ||
| 158 | SPDIF_DMACR_TDL(16)); | ||
| 157 | 159 | ||
| 158 | if (ret != 0) | 160 | if (ret != 0) |
| 159 | return ret; | 161 | return ret; |
diff --git a/sound/soc/rockchip/rockchip_spdif.h b/sound/soc/rockchip/rockchip_spdif.h index 921b4095fb92..3ef12770ae12 100644 --- a/sound/soc/rockchip/rockchip_spdif.h +++ b/sound/soc/rockchip/rockchip_spdif.h | |||
| @@ -42,7 +42,7 @@ | |||
| 42 | 42 | ||
| 43 | #define SPDIF_DMACR_TDL_SHIFT 0 | 43 | #define SPDIF_DMACR_TDL_SHIFT 0 |
| 44 | #define SPDIF_DMACR_TDL(x) ((x) << SPDIF_DMACR_TDL_SHIFT) | 44 | #define SPDIF_DMACR_TDL(x) ((x) << SPDIF_DMACR_TDL_SHIFT) |
| 45 | #define SPDIF_DMACR_TDL_MASK (0x1f << SDPIF_DMACR_TDL_SHIFT) | 45 | #define SPDIF_DMACR_TDL_MASK (0x1f << SPDIF_DMACR_TDL_SHIFT) |
| 46 | 46 | ||
| 47 | /* | 47 | /* |
| 48 | * XFER | 48 | * XFER |
