diff options
author | Dave Airlie <airlied@redhat.com> | 2015-12-07 20:04:26 -0500 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2015-12-07 20:04:26 -0500 |
commit | e876b41ab074561d65f213bf5e0fc68cf5bc7380 (patch) | |
tree | 5bcbd7e4f7b6ac2a34e61011f8eb2cde90bf8603 | |
parent | 47c0fd72822159eb501411f975f5672a0bf7a7fb (diff) | |
parent | 527e9316f8ec44bd53d90fb9f611fa7ffff52bb9 (diff) |
Back merge tag 'v4.4-rc4' into drm-next
We've picked up a few conflicts and it would be nice
to resolve them before we move onwards.
357 files changed, 4884 insertions, 1597 deletions
diff --git a/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt b/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt index f5a8ca29aff0..aeea50c84e92 100644 --- a/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt +++ b/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt | |||
@@ -8,6 +8,11 @@ Required properties: | |||
8 | - phy-mode: See ethernet.txt file in the same directory | 8 | - phy-mode: See ethernet.txt file in the same directory |
9 | - clocks: a pointer to the reference clock for this device. | 9 | - clocks: a pointer to the reference clock for this device. |
10 | 10 | ||
11 | Optional properties: | ||
12 | - tx-csum-limit: maximum mtu supported by port that allow TX checksum. | ||
13 | Value is presented in bytes. If not used, by default 1600B is set for | ||
14 | "marvell,armada-370-neta" and 9800B for others. | ||
15 | |||
11 | Example: | 16 | Example: |
12 | 17 | ||
13 | ethernet@d0070000 { | 18 | ethernet@d0070000 { |
@@ -15,6 +20,7 @@ ethernet@d0070000 { | |||
15 | reg = <0xd0070000 0x2500>; | 20 | reg = <0xd0070000 0x2500>; |
16 | interrupts = <8>; | 21 | interrupts = <8>; |
17 | clocks = <&gate_clk 4>; | 22 | clocks = <&gate_clk 4>; |
23 | tx-csum-limit = <9800> | ||
18 | status = "okay"; | 24 | status = "okay"; |
19 | phy = <&phy0>; | 25 | phy = <&phy0>; |
20 | phy-mode = "rgmii-id"; | 26 | phy-mode = "rgmii-id"; |
diff --git a/MAINTAINERS b/MAINTAINERS index cba790b42f23..69c8a9c3289a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -318,7 +318,7 @@ M: Zhang Rui <rui.zhang@intel.com> | |||
318 | L: linux-acpi@vger.kernel.org | 318 | L: linux-acpi@vger.kernel.org |
319 | W: https://01.org/linux-acpi | 319 | W: https://01.org/linux-acpi |
320 | S: Supported | 320 | S: Supported |
321 | F: drivers/acpi/video.c | 321 | F: drivers/acpi/acpi_video.c |
322 | 322 | ||
323 | ACPI WMI DRIVER | 323 | ACPI WMI DRIVER |
324 | L: platform-driver-x86@vger.kernel.org | 324 | L: platform-driver-x86@vger.kernel.org |
@@ -1847,7 +1847,7 @@ S: Supported | |||
1847 | F: drivers/net/wireless/ath/ath6kl/ | 1847 | F: drivers/net/wireless/ath/ath6kl/ |
1848 | 1848 | ||
1849 | WILOCITY WIL6210 WIRELESS DRIVER | 1849 | WILOCITY WIL6210 WIRELESS DRIVER |
1850 | M: Vladimir Kondratiev <qca_vkondrat@qca.qualcomm.com> | 1850 | M: Maya Erez <qca_merez@qca.qualcomm.com> |
1851 | L: linux-wireless@vger.kernel.org | 1851 | L: linux-wireless@vger.kernel.org |
1852 | L: wil6210@qca.qualcomm.com | 1852 | L: wil6210@qca.qualcomm.com |
1853 | S: Supported | 1853 | S: Supported |
@@ -9427,8 +9427,10 @@ F: include/scsi/sg.h | |||
9427 | 9427 | ||
9428 | SCSI SUBSYSTEM | 9428 | SCSI SUBSYSTEM |
9429 | M: "James E.J. Bottomley" <JBottomley@odin.com> | 9429 | M: "James E.J. Bottomley" <JBottomley@odin.com> |
9430 | L: linux-scsi@vger.kernel.org | ||
9431 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi.git | 9430 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi.git |
9431 | M: "Martin K. Petersen" <martin.petersen@oracle.com> | ||
9432 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkp/scsi.git | ||
9433 | L: linux-scsi@vger.kernel.org | ||
9432 | S: Maintained | 9434 | S: Maintained |
9433 | F: drivers/scsi/ | 9435 | F: drivers/scsi/ |
9434 | F: include/scsi/ | 9436 | F: include/scsi/ |
@@ -10903,9 +10905,9 @@ S: Maintained | |||
10903 | F: drivers/media/tuners/tua9001* | 10905 | F: drivers/media/tuners/tua9001* |
10904 | 10906 | ||
10905 | TULIP NETWORK DRIVERS | 10907 | TULIP NETWORK DRIVERS |
10906 | M: Grant Grundler <grundler@parisc-linux.org> | ||
10907 | L: netdev@vger.kernel.org | 10908 | L: netdev@vger.kernel.org |
10908 | S: Maintained | 10909 | L: linux-parisc@vger.kernel.org |
10910 | S: Orphan | ||
10909 | F: drivers/net/ethernet/dec/tulip/ | 10911 | F: drivers/net/ethernet/dec/tulip/ |
10910 | 10912 | ||
10911 | TUN/TAP driver | 10913 | TUN/TAP driver |
@@ -1,7 +1,7 @@ | |||
1 | VERSION = 4 | 1 | VERSION = 4 |
2 | PATCHLEVEL = 4 | 2 | PATCHLEVEL = 4 |
3 | SUBLEVEL = 0 | 3 | SUBLEVEL = 0 |
4 | EXTRAVERSION = -rc3 | 4 | EXTRAVERSION = -rc4 |
5 | NAME = Blurry Fish Butt | 5 | NAME = Blurry Fish Butt |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/arm/boot/dts/armada-38x.dtsi b/arch/arm/boot/dts/armada-38x.dtsi index c6a0e9d7f1a9..e8b7f6726772 100644 --- a/arch/arm/boot/dts/armada-38x.dtsi +++ b/arch/arm/boot/dts/armada-38x.dtsi | |||
@@ -498,6 +498,7 @@ | |||
498 | reg = <0x70000 0x4000>; | 498 | reg = <0x70000 0x4000>; |
499 | interrupts-extended = <&mpic 8>; | 499 | interrupts-extended = <&mpic 8>; |
500 | clocks = <&gateclk 4>; | 500 | clocks = <&gateclk 4>; |
501 | tx-csum-limit = <9800>; | ||
501 | status = "disabled"; | 502 | status = "disabled"; |
502 | }; | 503 | }; |
503 | 504 | ||
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index a9c80a2ea1a7..3095df091ff8 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h | |||
@@ -28,6 +28,18 @@ | |||
28 | unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num); | 28 | unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num); |
29 | unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu); | 29 | unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu); |
30 | 30 | ||
31 | static inline unsigned long vcpu_get_reg(struct kvm_vcpu *vcpu, | ||
32 | u8 reg_num) | ||
33 | { | ||
34 | return *vcpu_reg(vcpu, reg_num); | ||
35 | } | ||
36 | |||
37 | static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num, | ||
38 | unsigned long val) | ||
39 | { | ||
40 | *vcpu_reg(vcpu, reg_num) = val; | ||
41 | } | ||
42 | |||
31 | bool kvm_condition_valid(struct kvm_vcpu *vcpu); | 43 | bool kvm_condition_valid(struct kvm_vcpu *vcpu); |
32 | void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr); | 44 | void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr); |
33 | void kvm_inject_undefined(struct kvm_vcpu *vcpu); | 45 | void kvm_inject_undefined(struct kvm_vcpu *vcpu); |
diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c index 974b1c606d04..3a10c9f1d0a4 100644 --- a/arch/arm/kvm/mmio.c +++ b/arch/arm/kvm/mmio.c | |||
@@ -115,7 +115,7 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
115 | trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr, | 115 | trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr, |
116 | data); | 116 | data); |
117 | data = vcpu_data_host_to_guest(vcpu, data, len); | 117 | data = vcpu_data_host_to_guest(vcpu, data, len); |
118 | *vcpu_reg(vcpu, vcpu->arch.mmio_decode.rt) = data; | 118 | vcpu_set_reg(vcpu, vcpu->arch.mmio_decode.rt, data); |
119 | } | 119 | } |
120 | 120 | ||
121 | return 0; | 121 | return 0; |
@@ -186,7 +186,8 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, | |||
186 | rt = vcpu->arch.mmio_decode.rt; | 186 | rt = vcpu->arch.mmio_decode.rt; |
187 | 187 | ||
188 | if (is_write) { | 188 | if (is_write) { |
189 | data = vcpu_data_guest_to_host(vcpu, *vcpu_reg(vcpu, rt), len); | 189 | data = vcpu_data_guest_to_host(vcpu, vcpu_get_reg(vcpu, rt), |
190 | len); | ||
190 | 191 | ||
191 | trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, data); | 192 | trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, data); |
192 | mmio_write_buf(data_buf, len, data); | 193 | mmio_write_buf(data_buf, len, data); |
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 7dace909d5cf..61d96a645ff3 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c | |||
@@ -218,7 +218,7 @@ static void unmap_ptes(struct kvm *kvm, pmd_t *pmd, | |||
218 | kvm_tlb_flush_vmid_ipa(kvm, addr); | 218 | kvm_tlb_flush_vmid_ipa(kvm, addr); |
219 | 219 | ||
220 | /* No need to invalidate the cache for device mappings */ | 220 | /* No need to invalidate the cache for device mappings */ |
221 | if (!kvm_is_device_pfn(__phys_to_pfn(addr))) | 221 | if (!kvm_is_device_pfn(pte_pfn(old_pte))) |
222 | kvm_flush_dcache_pte(old_pte); | 222 | kvm_flush_dcache_pte(old_pte); |
223 | 223 | ||
224 | put_page(virt_to_page(pte)); | 224 | put_page(virt_to_page(pte)); |
@@ -310,7 +310,7 @@ static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd, | |||
310 | 310 | ||
311 | pte = pte_offset_kernel(pmd, addr); | 311 | pte = pte_offset_kernel(pmd, addr); |
312 | do { | 312 | do { |
313 | if (!pte_none(*pte) && !kvm_is_device_pfn(__phys_to_pfn(addr))) | 313 | if (!pte_none(*pte) && !kvm_is_device_pfn(pte_pfn(*pte))) |
314 | kvm_flush_dcache_pte(*pte); | 314 | kvm_flush_dcache_pte(*pte); |
315 | } while (pte++, addr += PAGE_SIZE, addr != end); | 315 | } while (pte++, addr += PAGE_SIZE, addr != end); |
316 | } | 316 | } |
diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c index 0b556968a6da..a9b3b905e661 100644 --- a/arch/arm/kvm/psci.c +++ b/arch/arm/kvm/psci.c | |||
@@ -75,7 +75,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) | |||
75 | unsigned long context_id; | 75 | unsigned long context_id; |
76 | phys_addr_t target_pc; | 76 | phys_addr_t target_pc; |
77 | 77 | ||
78 | cpu_id = *vcpu_reg(source_vcpu, 1) & MPIDR_HWID_BITMASK; | 78 | cpu_id = vcpu_get_reg(source_vcpu, 1) & MPIDR_HWID_BITMASK; |
79 | if (vcpu_mode_is_32bit(source_vcpu)) | 79 | if (vcpu_mode_is_32bit(source_vcpu)) |
80 | cpu_id &= ~((u32) 0); | 80 | cpu_id &= ~((u32) 0); |
81 | 81 | ||
@@ -94,8 +94,8 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) | |||
94 | return PSCI_RET_INVALID_PARAMS; | 94 | return PSCI_RET_INVALID_PARAMS; |
95 | } | 95 | } |
96 | 96 | ||
97 | target_pc = *vcpu_reg(source_vcpu, 2); | 97 | target_pc = vcpu_get_reg(source_vcpu, 2); |
98 | context_id = *vcpu_reg(source_vcpu, 3); | 98 | context_id = vcpu_get_reg(source_vcpu, 3); |
99 | 99 | ||
100 | kvm_reset_vcpu(vcpu); | 100 | kvm_reset_vcpu(vcpu); |
101 | 101 | ||
@@ -114,7 +114,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) | |||
114 | * NOTE: We always update r0 (or x0) because for PSCI v0.1 | 114 | * NOTE: We always update r0 (or x0) because for PSCI v0.1 |
115 | * the general puspose registers are undefined upon CPU_ON. | 115 | * the general puspose registers are undefined upon CPU_ON. |
116 | */ | 116 | */ |
117 | *vcpu_reg(vcpu, 0) = context_id; | 117 | vcpu_set_reg(vcpu, 0, context_id); |
118 | vcpu->arch.power_off = false; | 118 | vcpu->arch.power_off = false; |
119 | smp_mb(); /* Make sure the above is visible */ | 119 | smp_mb(); /* Make sure the above is visible */ |
120 | 120 | ||
@@ -134,8 +134,8 @@ static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu) | |||
134 | struct kvm *kvm = vcpu->kvm; | 134 | struct kvm *kvm = vcpu->kvm; |
135 | struct kvm_vcpu *tmp; | 135 | struct kvm_vcpu *tmp; |
136 | 136 | ||
137 | target_affinity = *vcpu_reg(vcpu, 1); | 137 | target_affinity = vcpu_get_reg(vcpu, 1); |
138 | lowest_affinity_level = *vcpu_reg(vcpu, 2); | 138 | lowest_affinity_level = vcpu_get_reg(vcpu, 2); |
139 | 139 | ||
140 | /* Determine target affinity mask */ | 140 | /* Determine target affinity mask */ |
141 | target_affinity_mask = psci_affinity_mask(lowest_affinity_level); | 141 | target_affinity_mask = psci_affinity_mask(lowest_affinity_level); |
@@ -209,7 +209,7 @@ int kvm_psci_version(struct kvm_vcpu *vcpu) | |||
209 | static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu) | 209 | static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu) |
210 | { | 210 | { |
211 | int ret = 1; | 211 | int ret = 1; |
212 | unsigned long psci_fn = *vcpu_reg(vcpu, 0) & ~((u32) 0); | 212 | unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0); |
213 | unsigned long val; | 213 | unsigned long val; |
214 | 214 | ||
215 | switch (psci_fn) { | 215 | switch (psci_fn) { |
@@ -273,13 +273,13 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu) | |||
273 | break; | 273 | break; |
274 | } | 274 | } |
275 | 275 | ||
276 | *vcpu_reg(vcpu, 0) = val; | 276 | vcpu_set_reg(vcpu, 0, val); |
277 | return ret; | 277 | return ret; |
278 | } | 278 | } |
279 | 279 | ||
280 | static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu) | 280 | static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu) |
281 | { | 281 | { |
282 | unsigned long psci_fn = *vcpu_reg(vcpu, 0) & ~((u32) 0); | 282 | unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0); |
283 | unsigned long val; | 283 | unsigned long val; |
284 | 284 | ||
285 | switch (psci_fn) { | 285 | switch (psci_fn) { |
@@ -295,7 +295,7 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu) | |||
295 | break; | 295 | break; |
296 | } | 296 | } |
297 | 297 | ||
298 | *vcpu_reg(vcpu, 0) = val; | 298 | vcpu_set_reg(vcpu, 0, val); |
299 | return 1; | 299 | return 1; |
300 | } | 300 | } |
301 | 301 | ||
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 3ca894ecf699..25a40213bd9b 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h | |||
@@ -100,13 +100,21 @@ static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) | |||
100 | } | 100 | } |
101 | 101 | ||
102 | /* | 102 | /* |
103 | * vcpu_reg should always be passed a register number coming from a | 103 | * vcpu_get_reg and vcpu_set_reg should always be passed a register number |
104 | * read of ESR_EL2. Otherwise, it may give the wrong result on AArch32 | 104 | * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on |
105 | * with banked registers. | 105 | * AArch32 with banked registers. |
106 | */ | 106 | */ |
107 | static inline unsigned long *vcpu_reg(const struct kvm_vcpu *vcpu, u8 reg_num) | 107 | static inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu, |
108 | u8 reg_num) | ||
108 | { | 109 | { |
109 | return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.regs[reg_num]; | 110 | return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs.regs[reg_num]; |
111 | } | ||
112 | |||
113 | static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num, | ||
114 | unsigned long val) | ||
115 | { | ||
116 | if (reg_num != 31) | ||
117 | vcpu_gp_regs(vcpu)->regs.regs[reg_num] = val; | ||
110 | } | 118 | } |
111 | 119 | ||
112 | /* Get vcpu SPSR for current mode */ | 120 | /* Get vcpu SPSR for current mode */ |
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index 68a0759b1375..15f0477b0d2a 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c | |||
@@ -37,7 +37,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
37 | { | 37 | { |
38 | int ret; | 38 | int ret; |
39 | 39 | ||
40 | trace_kvm_hvc_arm64(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0), | 40 | trace_kvm_hvc_arm64(*vcpu_pc(vcpu), vcpu_get_reg(vcpu, 0), |
41 | kvm_vcpu_hvc_get_imm(vcpu)); | 41 | kvm_vcpu_hvc_get_imm(vcpu)); |
42 | 42 | ||
43 | ret = kvm_psci_call(vcpu); | 43 | ret = kvm_psci_call(vcpu); |
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 87a64e8db04c..d2650e84faf2 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c | |||
@@ -78,7 +78,7 @@ static u32 get_ccsidr(u32 csselr) | |||
78 | * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized). | 78 | * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized). |
79 | */ | 79 | */ |
80 | static bool access_dcsw(struct kvm_vcpu *vcpu, | 80 | static bool access_dcsw(struct kvm_vcpu *vcpu, |
81 | const struct sys_reg_params *p, | 81 | struct sys_reg_params *p, |
82 | const struct sys_reg_desc *r) | 82 | const struct sys_reg_desc *r) |
83 | { | 83 | { |
84 | if (!p->is_write) | 84 | if (!p->is_write) |
@@ -94,21 +94,19 @@ static bool access_dcsw(struct kvm_vcpu *vcpu, | |||
94 | * sys_regs and leave it in complete control of the caches. | 94 | * sys_regs and leave it in complete control of the caches. |
95 | */ | 95 | */ |
96 | static bool access_vm_reg(struct kvm_vcpu *vcpu, | 96 | static bool access_vm_reg(struct kvm_vcpu *vcpu, |
97 | const struct sys_reg_params *p, | 97 | struct sys_reg_params *p, |
98 | const struct sys_reg_desc *r) | 98 | const struct sys_reg_desc *r) |
99 | { | 99 | { |
100 | unsigned long val; | ||
101 | bool was_enabled = vcpu_has_cache_enabled(vcpu); | 100 | bool was_enabled = vcpu_has_cache_enabled(vcpu); |
102 | 101 | ||
103 | BUG_ON(!p->is_write); | 102 | BUG_ON(!p->is_write); |
104 | 103 | ||
105 | val = *vcpu_reg(vcpu, p->Rt); | ||
106 | if (!p->is_aarch32) { | 104 | if (!p->is_aarch32) { |
107 | vcpu_sys_reg(vcpu, r->reg) = val; | 105 | vcpu_sys_reg(vcpu, r->reg) = p->regval; |
108 | } else { | 106 | } else { |
109 | if (!p->is_32bit) | 107 | if (!p->is_32bit) |
110 | vcpu_cp15_64_high(vcpu, r->reg) = val >> 32; | 108 | vcpu_cp15_64_high(vcpu, r->reg) = upper_32_bits(p->regval); |
111 | vcpu_cp15_64_low(vcpu, r->reg) = val & 0xffffffffUL; | 109 | vcpu_cp15_64_low(vcpu, r->reg) = lower_32_bits(p->regval); |
112 | } | 110 | } |
113 | 111 | ||
114 | kvm_toggle_cache(vcpu, was_enabled); | 112 | kvm_toggle_cache(vcpu, was_enabled); |
@@ -122,22 +120,19 @@ static bool access_vm_reg(struct kvm_vcpu *vcpu, | |||
122 | * for both AArch64 and AArch32 accesses. | 120 | * for both AArch64 and AArch32 accesses. |
123 | */ | 121 | */ |
124 | static bool access_gic_sgi(struct kvm_vcpu *vcpu, | 122 | static bool access_gic_sgi(struct kvm_vcpu *vcpu, |
125 | const struct sys_reg_params *p, | 123 | struct sys_reg_params *p, |
126 | const struct sys_reg_desc *r) | 124 | const struct sys_reg_desc *r) |
127 | { | 125 | { |
128 | u64 val; | ||
129 | |||
130 | if (!p->is_write) | 126 | if (!p->is_write) |
131 | return read_from_write_only(vcpu, p); | 127 | return read_from_write_only(vcpu, p); |
132 | 128 | ||
133 | val = *vcpu_reg(vcpu, p->Rt); | 129 | vgic_v3_dispatch_sgi(vcpu, p->regval); |
134 | vgic_v3_dispatch_sgi(vcpu, val); | ||
135 | 130 | ||
136 | return true; | 131 | return true; |
137 | } | 132 | } |
138 | 133 | ||
139 | static bool trap_raz_wi(struct kvm_vcpu *vcpu, | 134 | static bool trap_raz_wi(struct kvm_vcpu *vcpu, |
140 | const struct sys_reg_params *p, | 135 | struct sys_reg_params *p, |
141 | const struct sys_reg_desc *r) | 136 | const struct sys_reg_desc *r) |
142 | { | 137 | { |
143 | if (p->is_write) | 138 | if (p->is_write) |
@@ -147,19 +142,19 @@ static bool trap_raz_wi(struct kvm_vcpu *vcpu, | |||
147 | } | 142 | } |
148 | 143 | ||
149 | static bool trap_oslsr_el1(struct kvm_vcpu *vcpu, | 144 | static bool trap_oslsr_el1(struct kvm_vcpu *vcpu, |
150 | const struct sys_reg_params *p, | 145 | struct sys_reg_params *p, |
151 | const struct sys_reg_desc *r) | 146 | const struct sys_reg_desc *r) |
152 | { | 147 | { |
153 | if (p->is_write) { | 148 | if (p->is_write) { |
154 | return ignore_write(vcpu, p); | 149 | return ignore_write(vcpu, p); |
155 | } else { | 150 | } else { |
156 | *vcpu_reg(vcpu, p->Rt) = (1 << 3); | 151 | p->regval = (1 << 3); |
157 | return true; | 152 | return true; |
158 | } | 153 | } |
159 | } | 154 | } |
160 | 155 | ||
161 | static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu, | 156 | static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu, |
162 | const struct sys_reg_params *p, | 157 | struct sys_reg_params *p, |
163 | const struct sys_reg_desc *r) | 158 | const struct sys_reg_desc *r) |
164 | { | 159 | { |
165 | if (p->is_write) { | 160 | if (p->is_write) { |
@@ -167,7 +162,7 @@ static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu, | |||
167 | } else { | 162 | } else { |
168 | u32 val; | 163 | u32 val; |
169 | asm volatile("mrs %0, dbgauthstatus_el1" : "=r" (val)); | 164 | asm volatile("mrs %0, dbgauthstatus_el1" : "=r" (val)); |
170 | *vcpu_reg(vcpu, p->Rt) = val; | 165 | p->regval = val; |
171 | return true; | 166 | return true; |
172 | } | 167 | } |
173 | } | 168 | } |
@@ -200,17 +195,17 @@ static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu, | |||
200 | * now use the debug registers. | 195 | * now use the debug registers. |
201 | */ | 196 | */ |
202 | static bool trap_debug_regs(struct kvm_vcpu *vcpu, | 197 | static bool trap_debug_regs(struct kvm_vcpu *vcpu, |
203 | const struct sys_reg_params *p, | 198 | struct sys_reg_params *p, |
204 | const struct sys_reg_desc *r) | 199 | const struct sys_reg_desc *r) |
205 | { | 200 | { |
206 | if (p->is_write) { | 201 | if (p->is_write) { |
207 | vcpu_sys_reg(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt); | 202 | vcpu_sys_reg(vcpu, r->reg) = p->regval; |
208 | vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; | 203 | vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; |
209 | } else { | 204 | } else { |
210 | *vcpu_reg(vcpu, p->Rt) = vcpu_sys_reg(vcpu, r->reg); | 205 | p->regval = vcpu_sys_reg(vcpu, r->reg); |
211 | } | 206 | } |
212 | 207 | ||
213 | trace_trap_reg(__func__, r->reg, p->is_write, *vcpu_reg(vcpu, p->Rt)); | 208 | trace_trap_reg(__func__, r->reg, p->is_write, p->regval); |
214 | 209 | ||
215 | return true; | 210 | return true; |
216 | } | 211 | } |
@@ -225,10 +220,10 @@ static bool trap_debug_regs(struct kvm_vcpu *vcpu, | |||
225 | * hyp.S code switches between host and guest values in future. | 220 | * hyp.S code switches between host and guest values in future. |
226 | */ | 221 | */ |
227 | static inline void reg_to_dbg(struct kvm_vcpu *vcpu, | 222 | static inline void reg_to_dbg(struct kvm_vcpu *vcpu, |
228 | const struct sys_reg_params *p, | 223 | struct sys_reg_params *p, |
229 | u64 *dbg_reg) | 224 | u64 *dbg_reg) |
230 | { | 225 | { |
231 | u64 val = *vcpu_reg(vcpu, p->Rt); | 226 | u64 val = p->regval; |
232 | 227 | ||
233 | if (p->is_32bit) { | 228 | if (p->is_32bit) { |
234 | val &= 0xffffffffUL; | 229 | val &= 0xffffffffUL; |
@@ -240,19 +235,16 @@ static inline void reg_to_dbg(struct kvm_vcpu *vcpu, | |||
240 | } | 235 | } |
241 | 236 | ||
242 | static inline void dbg_to_reg(struct kvm_vcpu *vcpu, | 237 | static inline void dbg_to_reg(struct kvm_vcpu *vcpu, |
243 | const struct sys_reg_params *p, | 238 | struct sys_reg_params *p, |
244 | u64 *dbg_reg) | 239 | u64 *dbg_reg) |
245 | { | 240 | { |
246 | u64 val = *dbg_reg; | 241 | p->regval = *dbg_reg; |
247 | |||
248 | if (p->is_32bit) | 242 | if (p->is_32bit) |
249 | val &= 0xffffffffUL; | 243 | p->regval &= 0xffffffffUL; |
250 | |||
251 | *vcpu_reg(vcpu, p->Rt) = val; | ||
252 | } | 244 | } |
253 | 245 | ||
254 | static inline bool trap_bvr(struct kvm_vcpu *vcpu, | 246 | static inline bool trap_bvr(struct kvm_vcpu *vcpu, |
255 | const struct sys_reg_params *p, | 247 | struct sys_reg_params *p, |
256 | const struct sys_reg_desc *rd) | 248 | const struct sys_reg_desc *rd) |
257 | { | 249 | { |
258 | u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; | 250 | u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; |
@@ -294,7 +286,7 @@ static inline void reset_bvr(struct kvm_vcpu *vcpu, | |||
294 | } | 286 | } |
295 | 287 | ||
296 | static inline bool trap_bcr(struct kvm_vcpu *vcpu, | 288 | static inline bool trap_bcr(struct kvm_vcpu *vcpu, |
297 | const struct sys_reg_params *p, | 289 | struct sys_reg_params *p, |
298 | const struct sys_reg_desc *rd) | 290 | const struct sys_reg_desc *rd) |
299 | { | 291 | { |
300 | u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg]; | 292 | u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg]; |
@@ -337,7 +329,7 @@ static inline void reset_bcr(struct kvm_vcpu *vcpu, | |||
337 | } | 329 | } |
338 | 330 | ||
339 | static inline bool trap_wvr(struct kvm_vcpu *vcpu, | 331 | static inline bool trap_wvr(struct kvm_vcpu *vcpu, |
340 | const struct sys_reg_params *p, | 332 | struct sys_reg_params *p, |
341 | const struct sys_reg_desc *rd) | 333 | const struct sys_reg_desc *rd) |
342 | { | 334 | { |
343 | u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]; | 335 | u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]; |
@@ -380,7 +372,7 @@ static inline void reset_wvr(struct kvm_vcpu *vcpu, | |||
380 | } | 372 | } |
381 | 373 | ||
382 | static inline bool trap_wcr(struct kvm_vcpu *vcpu, | 374 | static inline bool trap_wcr(struct kvm_vcpu *vcpu, |
383 | const struct sys_reg_params *p, | 375 | struct sys_reg_params *p, |
384 | const struct sys_reg_desc *rd) | 376 | const struct sys_reg_desc *rd) |
385 | { | 377 | { |
386 | u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg]; | 378 | u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg]; |
@@ -687,7 +679,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { | |||
687 | }; | 679 | }; |
688 | 680 | ||
689 | static bool trap_dbgidr(struct kvm_vcpu *vcpu, | 681 | static bool trap_dbgidr(struct kvm_vcpu *vcpu, |
690 | const struct sys_reg_params *p, | 682 | struct sys_reg_params *p, |
691 | const struct sys_reg_desc *r) | 683 | const struct sys_reg_desc *r) |
692 | { | 684 | { |
693 | if (p->is_write) { | 685 | if (p->is_write) { |
@@ -697,23 +689,23 @@ static bool trap_dbgidr(struct kvm_vcpu *vcpu, | |||
697 | u64 pfr = read_system_reg(SYS_ID_AA64PFR0_EL1); | 689 | u64 pfr = read_system_reg(SYS_ID_AA64PFR0_EL1); |
698 | u32 el3 = !!cpuid_feature_extract_field(pfr, ID_AA64PFR0_EL3_SHIFT); | 690 | u32 el3 = !!cpuid_feature_extract_field(pfr, ID_AA64PFR0_EL3_SHIFT); |
699 | 691 | ||
700 | *vcpu_reg(vcpu, p->Rt) = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) | | 692 | p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) | |
701 | (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) | | 693 | (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) | |
702 | (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20) | | 694 | (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20) |
703 | (6 << 16) | (el3 << 14) | (el3 << 12)); | 695 | | (6 << 16) | (el3 << 14) | (el3 << 12)); |
704 | return true; | 696 | return true; |
705 | } | 697 | } |
706 | } | 698 | } |
707 | 699 | ||
708 | static bool trap_debug32(struct kvm_vcpu *vcpu, | 700 | static bool trap_debug32(struct kvm_vcpu *vcpu, |
709 | const struct sys_reg_params *p, | 701 | struct sys_reg_params *p, |
710 | const struct sys_reg_desc *r) | 702 | const struct sys_reg_desc *r) |
711 | { | 703 | { |
712 | if (p->is_write) { | 704 | if (p->is_write) { |
713 | vcpu_cp14(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt); | 705 | vcpu_cp14(vcpu, r->reg) = p->regval; |
714 | vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; | 706 | vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; |
715 | } else { | 707 | } else { |
716 | *vcpu_reg(vcpu, p->Rt) = vcpu_cp14(vcpu, r->reg); | 708 | p->regval = vcpu_cp14(vcpu, r->reg); |
717 | } | 709 | } |
718 | 710 | ||
719 | return true; | 711 | return true; |
@@ -731,7 +723,7 @@ static bool trap_debug32(struct kvm_vcpu *vcpu, | |||
731 | */ | 723 | */ |
732 | 724 | ||
733 | static inline bool trap_xvr(struct kvm_vcpu *vcpu, | 725 | static inline bool trap_xvr(struct kvm_vcpu *vcpu, |
734 | const struct sys_reg_params *p, | 726 | struct sys_reg_params *p, |
735 | const struct sys_reg_desc *rd) | 727 | const struct sys_reg_desc *rd) |
736 | { | 728 | { |
737 | u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; | 729 | u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; |
@@ -740,12 +732,12 @@ static inline bool trap_xvr(struct kvm_vcpu *vcpu, | |||
740 | u64 val = *dbg_reg; | 732 | u64 val = *dbg_reg; |
741 | 733 | ||
742 | val &= 0xffffffffUL; | 734 | val &= 0xffffffffUL; |
743 | val |= *vcpu_reg(vcpu, p->Rt) << 32; | 735 | val |= p->regval << 32; |
744 | *dbg_reg = val; | 736 | *dbg_reg = val; |
745 | 737 | ||
746 | vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; | 738 | vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; |
747 | } else { | 739 | } else { |
748 | *vcpu_reg(vcpu, p->Rt) = *dbg_reg >> 32; | 740 | p->regval = *dbg_reg >> 32; |
749 | } | 741 | } |
750 | 742 | ||
751 | trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg); | 743 | trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg); |
@@ -991,7 +983,7 @@ int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
991 | * Return 0 if the access has been handled, and -1 if not. | 983 | * Return 0 if the access has been handled, and -1 if not. |
992 | */ | 984 | */ |
993 | static int emulate_cp(struct kvm_vcpu *vcpu, | 985 | static int emulate_cp(struct kvm_vcpu *vcpu, |
994 | const struct sys_reg_params *params, | 986 | struct sys_reg_params *params, |
995 | const struct sys_reg_desc *table, | 987 | const struct sys_reg_desc *table, |
996 | size_t num) | 988 | size_t num) |
997 | { | 989 | { |
@@ -1062,12 +1054,12 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu, | |||
1062 | { | 1054 | { |
1063 | struct sys_reg_params params; | 1055 | struct sys_reg_params params; |
1064 | u32 hsr = kvm_vcpu_get_hsr(vcpu); | 1056 | u32 hsr = kvm_vcpu_get_hsr(vcpu); |
1057 | int Rt = (hsr >> 5) & 0xf; | ||
1065 | int Rt2 = (hsr >> 10) & 0xf; | 1058 | int Rt2 = (hsr >> 10) & 0xf; |
1066 | 1059 | ||
1067 | params.is_aarch32 = true; | 1060 | params.is_aarch32 = true; |
1068 | params.is_32bit = false; | 1061 | params.is_32bit = false; |
1069 | params.CRm = (hsr >> 1) & 0xf; | 1062 | params.CRm = (hsr >> 1) & 0xf; |
1070 | params.Rt = (hsr >> 5) & 0xf; | ||
1071 | params.is_write = ((hsr & 1) == 0); | 1063 | params.is_write = ((hsr & 1) == 0); |
1072 | 1064 | ||
1073 | params.Op0 = 0; | 1065 | params.Op0 = 0; |
@@ -1076,15 +1068,12 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu, | |||
1076 | params.CRn = 0; | 1068 | params.CRn = 0; |
1077 | 1069 | ||
1078 | /* | 1070 | /* |
1079 | * Massive hack here. Store Rt2 in the top 32bits so we only | 1071 | * Make a 64-bit value out of Rt and Rt2. As we use the same trap |
1080 | * have one register to deal with. As we use the same trap | ||
1081 | * backends between AArch32 and AArch64, we get away with it. | 1072 | * backends between AArch32 and AArch64, we get away with it. |
1082 | */ | 1073 | */ |
1083 | if (params.is_write) { | 1074 | if (params.is_write) { |
1084 | u64 val = *vcpu_reg(vcpu, params.Rt); | 1075 | params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff; |
1085 | val &= 0xffffffff; | 1076 | params.regval |= vcpu_get_reg(vcpu, Rt2) << 32; |
1086 | val |= *vcpu_reg(vcpu, Rt2) << 32; | ||
1087 | *vcpu_reg(vcpu, params.Rt) = val; | ||
1088 | } | 1077 | } |
1089 | 1078 | ||
1090 | if (!emulate_cp(vcpu, ¶ms, target_specific, nr_specific)) | 1079 | if (!emulate_cp(vcpu, ¶ms, target_specific, nr_specific)) |
@@ -1095,11 +1084,10 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu, | |||
1095 | unhandled_cp_access(vcpu, ¶ms); | 1084 | unhandled_cp_access(vcpu, ¶ms); |
1096 | 1085 | ||
1097 | out: | 1086 | out: |
1098 | /* Do the opposite hack for the read side */ | 1087 | /* Split up the value between registers for the read side */ |
1099 | if (!params.is_write) { | 1088 | if (!params.is_write) { |
1100 | u64 val = *vcpu_reg(vcpu, params.Rt); | 1089 | vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval)); |
1101 | val >>= 32; | 1090 | vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval)); |
1102 | *vcpu_reg(vcpu, Rt2) = val; | ||
1103 | } | 1091 | } |
1104 | 1092 | ||
1105 | return 1; | 1093 | return 1; |
@@ -1118,21 +1106,24 @@ static int kvm_handle_cp_32(struct kvm_vcpu *vcpu, | |||
1118 | { | 1106 | { |
1119 | struct sys_reg_params params; | 1107 | struct sys_reg_params params; |
1120 | u32 hsr = kvm_vcpu_get_hsr(vcpu); | 1108 | u32 hsr = kvm_vcpu_get_hsr(vcpu); |
1109 | int Rt = (hsr >> 5) & 0xf; | ||
1121 | 1110 | ||
1122 | params.is_aarch32 = true; | 1111 | params.is_aarch32 = true; |
1123 | params.is_32bit = true; | 1112 | params.is_32bit = true; |
1124 | params.CRm = (hsr >> 1) & 0xf; | 1113 | params.CRm = (hsr >> 1) & 0xf; |
1125 | params.Rt = (hsr >> 5) & 0xf; | 1114 | params.regval = vcpu_get_reg(vcpu, Rt); |
1126 | params.is_write = ((hsr & 1) == 0); | 1115 | params.is_write = ((hsr & 1) == 0); |
1127 | params.CRn = (hsr >> 10) & 0xf; | 1116 | params.CRn = (hsr >> 10) & 0xf; |
1128 | params.Op0 = 0; | 1117 | params.Op0 = 0; |
1129 | params.Op1 = (hsr >> 14) & 0x7; | 1118 | params.Op1 = (hsr >> 14) & 0x7; |
1130 | params.Op2 = (hsr >> 17) & 0x7; | 1119 | params.Op2 = (hsr >> 17) & 0x7; |
1131 | 1120 | ||
1132 | if (!emulate_cp(vcpu, ¶ms, target_specific, nr_specific)) | 1121 | if (!emulate_cp(vcpu, ¶ms, target_specific, nr_specific) || |
1133 | return 1; | 1122 | !emulate_cp(vcpu, ¶ms, global, nr_global)) { |
1134 | if (!emulate_cp(vcpu, ¶ms, global, nr_global)) | 1123 | if (!params.is_write) |
1124 | vcpu_set_reg(vcpu, Rt, params.regval); | ||
1135 | return 1; | 1125 | return 1; |
1126 | } | ||
1136 | 1127 | ||
1137 | unhandled_cp_access(vcpu, ¶ms); | 1128 | unhandled_cp_access(vcpu, ¶ms); |
1138 | return 1; | 1129 | return 1; |
@@ -1175,7 +1166,7 @@ int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
1175 | } | 1166 | } |
1176 | 1167 | ||
1177 | static int emulate_sys_reg(struct kvm_vcpu *vcpu, | 1168 | static int emulate_sys_reg(struct kvm_vcpu *vcpu, |
1178 | const struct sys_reg_params *params) | 1169 | struct sys_reg_params *params) |
1179 | { | 1170 | { |
1180 | size_t num; | 1171 | size_t num; |
1181 | const struct sys_reg_desc *table, *r; | 1172 | const struct sys_reg_desc *table, *r; |
@@ -1230,6 +1221,8 @@ int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
1230 | { | 1221 | { |
1231 | struct sys_reg_params params; | 1222 | struct sys_reg_params params; |
1232 | unsigned long esr = kvm_vcpu_get_hsr(vcpu); | 1223 | unsigned long esr = kvm_vcpu_get_hsr(vcpu); |
1224 | int Rt = (esr >> 5) & 0x1f; | ||
1225 | int ret; | ||
1233 | 1226 | ||
1234 | trace_kvm_handle_sys_reg(esr); | 1227 | trace_kvm_handle_sys_reg(esr); |
1235 | 1228 | ||
@@ -1240,10 +1233,14 @@ int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
1240 | params.CRn = (esr >> 10) & 0xf; | 1233 | params.CRn = (esr >> 10) & 0xf; |
1241 | params.CRm = (esr >> 1) & 0xf; | 1234 | params.CRm = (esr >> 1) & 0xf; |
1242 | params.Op2 = (esr >> 17) & 0x7; | 1235 | params.Op2 = (esr >> 17) & 0x7; |
1243 | params.Rt = (esr >> 5) & 0x1f; | 1236 | params.regval = vcpu_get_reg(vcpu, Rt); |
1244 | params.is_write = !(esr & 1); | 1237 | params.is_write = !(esr & 1); |
1245 | 1238 | ||
1246 | return emulate_sys_reg(vcpu, ¶ms); | 1239 | ret = emulate_sys_reg(vcpu, ¶ms); |
1240 | |||
1241 | if (!params.is_write) | ||
1242 | vcpu_set_reg(vcpu, Rt, params.regval); | ||
1243 | return ret; | ||
1247 | } | 1244 | } |
1248 | 1245 | ||
1249 | /****************************************************************************** | 1246 | /****************************************************************************** |
diff --git a/arch/arm64/kvm/sys_regs.h b/arch/arm64/kvm/sys_regs.h index eaa324e4db4d..dbbb01cfbee9 100644 --- a/arch/arm64/kvm/sys_regs.h +++ b/arch/arm64/kvm/sys_regs.h | |||
@@ -28,7 +28,7 @@ struct sys_reg_params { | |||
28 | u8 CRn; | 28 | u8 CRn; |
29 | u8 CRm; | 29 | u8 CRm; |
30 | u8 Op2; | 30 | u8 Op2; |
31 | u8 Rt; | 31 | u64 regval; |
32 | bool is_write; | 32 | bool is_write; |
33 | bool is_aarch32; | 33 | bool is_aarch32; |
34 | bool is_32bit; /* Only valid if is_aarch32 is true */ | 34 | bool is_32bit; /* Only valid if is_aarch32 is true */ |
@@ -44,7 +44,7 @@ struct sys_reg_desc { | |||
44 | 44 | ||
45 | /* Trapped access from guest, if non-NULL. */ | 45 | /* Trapped access from guest, if non-NULL. */ |
46 | bool (*access)(struct kvm_vcpu *, | 46 | bool (*access)(struct kvm_vcpu *, |
47 | const struct sys_reg_params *, | 47 | struct sys_reg_params *, |
48 | const struct sys_reg_desc *); | 48 | const struct sys_reg_desc *); |
49 | 49 | ||
50 | /* Initialization for vcpu. */ | 50 | /* Initialization for vcpu. */ |
@@ -77,9 +77,9 @@ static inline bool ignore_write(struct kvm_vcpu *vcpu, | |||
77 | } | 77 | } |
78 | 78 | ||
79 | static inline bool read_zero(struct kvm_vcpu *vcpu, | 79 | static inline bool read_zero(struct kvm_vcpu *vcpu, |
80 | const struct sys_reg_params *p) | 80 | struct sys_reg_params *p) |
81 | { | 81 | { |
82 | *vcpu_reg(vcpu, p->Rt) = 0; | 82 | p->regval = 0; |
83 | return true; | 83 | return true; |
84 | } | 84 | } |
85 | 85 | ||
diff --git a/arch/arm64/kvm/sys_regs_generic_v8.c b/arch/arm64/kvm/sys_regs_generic_v8.c index 1e4576824165..ed90578fa120 100644 --- a/arch/arm64/kvm/sys_regs_generic_v8.c +++ b/arch/arm64/kvm/sys_regs_generic_v8.c | |||
@@ -31,13 +31,13 @@ | |||
31 | #include "sys_regs.h" | 31 | #include "sys_regs.h" |
32 | 32 | ||
33 | static bool access_actlr(struct kvm_vcpu *vcpu, | 33 | static bool access_actlr(struct kvm_vcpu *vcpu, |
34 | const struct sys_reg_params *p, | 34 | struct sys_reg_params *p, |
35 | const struct sys_reg_desc *r) | 35 | const struct sys_reg_desc *r) |
36 | { | 36 | { |
37 | if (p->is_write) | 37 | if (p->is_write) |
38 | return ignore_write(vcpu, p); | 38 | return ignore_write(vcpu, p); |
39 | 39 | ||
40 | *vcpu_reg(vcpu, p->Rt) = vcpu_sys_reg(vcpu, ACTLR_EL1); | 40 | p->regval = vcpu_sys_reg(vcpu, ACTLR_EL1); |
41 | return true; | 41 | return true; |
42 | } | 42 | } |
43 | 43 | ||
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index d6a53ef2350b..b162ad70effc 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c | |||
@@ -139,6 +139,12 @@ static inline int epilogue_offset(const struct jit_ctx *ctx) | |||
139 | /* Stack must be multiples of 16B */ | 139 | /* Stack must be multiples of 16B */ |
140 | #define STACK_ALIGN(sz) (((sz) + 15) & ~15) | 140 | #define STACK_ALIGN(sz) (((sz) + 15) & ~15) |
141 | 141 | ||
142 | #define _STACK_SIZE \ | ||
143 | (MAX_BPF_STACK \ | ||
144 | + 4 /* extra for skb_copy_bits buffer */) | ||
145 | |||
146 | #define STACK_SIZE STACK_ALIGN(_STACK_SIZE) | ||
147 | |||
142 | static void build_prologue(struct jit_ctx *ctx) | 148 | static void build_prologue(struct jit_ctx *ctx) |
143 | { | 149 | { |
144 | const u8 r6 = bpf2a64[BPF_REG_6]; | 150 | const u8 r6 = bpf2a64[BPF_REG_6]; |
@@ -150,10 +156,6 @@ static void build_prologue(struct jit_ctx *ctx) | |||
150 | const u8 rx = bpf2a64[BPF_REG_X]; | 156 | const u8 rx = bpf2a64[BPF_REG_X]; |
151 | const u8 tmp1 = bpf2a64[TMP_REG_1]; | 157 | const u8 tmp1 = bpf2a64[TMP_REG_1]; |
152 | const u8 tmp2 = bpf2a64[TMP_REG_2]; | 158 | const u8 tmp2 = bpf2a64[TMP_REG_2]; |
153 | int stack_size = MAX_BPF_STACK; | ||
154 | |||
155 | stack_size += 4; /* extra for skb_copy_bits buffer */ | ||
156 | stack_size = STACK_ALIGN(stack_size); | ||
157 | 159 | ||
158 | /* | 160 | /* |
159 | * BPF prog stack layout | 161 | * BPF prog stack layout |
@@ -165,12 +167,13 @@ static void build_prologue(struct jit_ctx *ctx) | |||
165 | * | ... | callee saved registers | 167 | * | ... | callee saved registers |
166 | * +-----+ | 168 | * +-----+ |
167 | * | | x25/x26 | 169 | * | | x25/x26 |
168 | * BPF fp register => -80:+-----+ | 170 | * BPF fp register => -80:+-----+ <= (BPF_FP) |
169 | * | | | 171 | * | | |
170 | * | ... | BPF prog stack | 172 | * | ... | BPF prog stack |
171 | * | | | 173 | * | | |
172 | * | | | 174 | * +-----+ <= (BPF_FP - MAX_BPF_STACK) |
173 | * current A64_SP => +-----+ | 175 | * |RSVD | JIT scratchpad |
176 | * current A64_SP => +-----+ <= (BPF_FP - STACK_SIZE) | ||
174 | * | | | 177 | * | | |
175 | * | ... | Function call stack | 178 | * | ... | Function call stack |
176 | * | | | 179 | * | | |
@@ -196,7 +199,7 @@ static void build_prologue(struct jit_ctx *ctx) | |||
196 | emit(A64_MOV(1, fp, A64_SP), ctx); | 199 | emit(A64_MOV(1, fp, A64_SP), ctx); |
197 | 200 | ||
198 | /* Set up function call stack */ | 201 | /* Set up function call stack */ |
199 | emit(A64_SUB_I(1, A64_SP, A64_SP, stack_size), ctx); | 202 | emit(A64_SUB_I(1, A64_SP, A64_SP, STACK_SIZE), ctx); |
200 | 203 | ||
201 | /* Clear registers A and X */ | 204 | /* Clear registers A and X */ |
202 | emit_a64_mov_i64(ra, 0, ctx); | 205 | emit_a64_mov_i64(ra, 0, ctx); |
@@ -213,13 +216,9 @@ static void build_epilogue(struct jit_ctx *ctx) | |||
213 | const u8 fp = bpf2a64[BPF_REG_FP]; | 216 | const u8 fp = bpf2a64[BPF_REG_FP]; |
214 | const u8 tmp1 = bpf2a64[TMP_REG_1]; | 217 | const u8 tmp1 = bpf2a64[TMP_REG_1]; |
215 | const u8 tmp2 = bpf2a64[TMP_REG_2]; | 218 | const u8 tmp2 = bpf2a64[TMP_REG_2]; |
216 | int stack_size = MAX_BPF_STACK; | ||
217 | |||
218 | stack_size += 4; /* extra for skb_copy_bits buffer */ | ||
219 | stack_size = STACK_ALIGN(stack_size); | ||
220 | 219 | ||
221 | /* We're done with BPF stack */ | 220 | /* We're done with BPF stack */ |
222 | emit(A64_ADD_I(1, A64_SP, A64_SP, stack_size), ctx); | 221 | emit(A64_ADD_I(1, A64_SP, A64_SP, STACK_SIZE), ctx); |
223 | 222 | ||
224 | /* Restore fs (x25) and x26 */ | 223 | /* Restore fs (x25) and x26 */ |
225 | emit(A64_POP(fp, A64_R(26), A64_SP), ctx); | 224 | emit(A64_POP(fp, A64_R(26), A64_SP), ctx); |
@@ -591,7 +590,25 @@ emit_cond_jmp: | |||
591 | case BPF_ST | BPF_MEM | BPF_H: | 590 | case BPF_ST | BPF_MEM | BPF_H: |
592 | case BPF_ST | BPF_MEM | BPF_B: | 591 | case BPF_ST | BPF_MEM | BPF_B: |
593 | case BPF_ST | BPF_MEM | BPF_DW: | 592 | case BPF_ST | BPF_MEM | BPF_DW: |
594 | goto notyet; | 593 | /* Load imm to a register then store it */ |
594 | ctx->tmp_used = 1; | ||
595 | emit_a64_mov_i(1, tmp2, off, ctx); | ||
596 | emit_a64_mov_i(1, tmp, imm, ctx); | ||
597 | switch (BPF_SIZE(code)) { | ||
598 | case BPF_W: | ||
599 | emit(A64_STR32(tmp, dst, tmp2), ctx); | ||
600 | break; | ||
601 | case BPF_H: | ||
602 | emit(A64_STRH(tmp, dst, tmp2), ctx); | ||
603 | break; | ||
604 | case BPF_B: | ||
605 | emit(A64_STRB(tmp, dst, tmp2), ctx); | ||
606 | break; | ||
607 | case BPF_DW: | ||
608 | emit(A64_STR64(tmp, dst, tmp2), ctx); | ||
609 | break; | ||
610 | } | ||
611 | break; | ||
595 | 612 | ||
596 | /* STX: *(size *)(dst + off) = src */ | 613 | /* STX: *(size *)(dst + off) = src */ |
597 | case BPF_STX | BPF_MEM | BPF_W: | 614 | case BPF_STX | BPF_MEM | BPF_W: |
@@ -658,7 +675,7 @@ emit_cond_jmp: | |||
658 | return -EINVAL; | 675 | return -EINVAL; |
659 | } | 676 | } |
660 | emit_a64_mov_i64(r3, size, ctx); | 677 | emit_a64_mov_i64(r3, size, ctx); |
661 | emit(A64_ADD_I(1, r4, fp, MAX_BPF_STACK), ctx); | 678 | emit(A64_SUB_I(1, r4, fp, STACK_SIZE), ctx); |
662 | emit_a64_mov_i64(r5, (unsigned long)bpf_load_pointer, ctx); | 679 | emit_a64_mov_i64(r5, (unsigned long)bpf_load_pointer, ctx); |
663 | emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx); | 680 | emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx); |
664 | emit(A64_MOV(1, A64_FP, A64_SP), ctx); | 681 | emit(A64_MOV(1, A64_FP, A64_SP), ctx); |
diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig index 4434b54e1d87..78ae5552fdb8 100644 --- a/arch/mn10300/Kconfig +++ b/arch/mn10300/Kconfig | |||
@@ -1,6 +1,7 @@ | |||
1 | config MN10300 | 1 | config MN10300 |
2 | def_bool y | 2 | def_bool y |
3 | select HAVE_OPROFILE | 3 | select HAVE_OPROFILE |
4 | select HAVE_UID16 | ||
4 | select GENERIC_IRQ_SHOW | 5 | select GENERIC_IRQ_SHOW |
5 | select ARCH_WANT_IPC_PARSE_VERSION | 6 | select ARCH_WANT_IPC_PARSE_VERSION |
6 | select HAVE_ARCH_TRACEHOOK | 7 | select HAVE_ARCH_TRACEHOOK |
@@ -37,9 +38,6 @@ config HIGHMEM | |||
37 | config NUMA | 38 | config NUMA |
38 | def_bool n | 39 | def_bool n |
39 | 40 | ||
40 | config UID16 | ||
41 | def_bool y | ||
42 | |||
43 | config RWSEM_GENERIC_SPINLOCK | 41 | config RWSEM_GENERIC_SPINLOCK |
44 | def_bool y | 42 | def_bool y |
45 | 43 | ||
diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h index 0033e96c3f09..9011a88353de 100644 --- a/arch/x86/boot/boot.h +++ b/arch/x86/boot/boot.h | |||
@@ -23,7 +23,6 @@ | |||
23 | #include <stdarg.h> | 23 | #include <stdarg.h> |
24 | #include <linux/types.h> | 24 | #include <linux/types.h> |
25 | #include <linux/edd.h> | 25 | #include <linux/edd.h> |
26 | #include <asm/boot.h> | ||
27 | #include <asm/setup.h> | 26 | #include <asm/setup.h> |
28 | #include "bitops.h" | 27 | #include "bitops.h" |
29 | #include "ctype.h" | 28 | #include "ctype.h" |
diff --git a/arch/x86/boot/video-mode.c b/arch/x86/boot/video-mode.c index aa8a96b052e3..95c7a818c0ed 100644 --- a/arch/x86/boot/video-mode.c +++ b/arch/x86/boot/video-mode.c | |||
@@ -19,6 +19,8 @@ | |||
19 | #include "video.h" | 19 | #include "video.h" |
20 | #include "vesa.h" | 20 | #include "vesa.h" |
21 | 21 | ||
22 | #include <uapi/asm/boot.h> | ||
23 | |||
22 | /* | 24 | /* |
23 | * Common variables | 25 | * Common variables |
24 | */ | 26 | */ |
diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c index 05111bb8d018..77780e386e9b 100644 --- a/arch/x86/boot/video.c +++ b/arch/x86/boot/video.c | |||
@@ -13,6 +13,8 @@ | |||
13 | * Select video mode | 13 | * Select video mode |
14 | */ | 14 | */ |
15 | 15 | ||
16 | #include <uapi/asm/boot.h> | ||
17 | |||
16 | #include "boot.h" | 18 | #include "boot.h" |
17 | #include "video.h" | 19 | #include "video.h" |
18 | #include "vesa.h" | 20 | #include "vesa.h" |
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 53616ca03244..a55697d19824 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S | |||
@@ -509,6 +509,17 @@ END(irq_entries_start) | |||
509 | * tracking that we're in kernel mode. | 509 | * tracking that we're in kernel mode. |
510 | */ | 510 | */ |
511 | SWAPGS | 511 | SWAPGS |
512 | |||
513 | /* | ||
514 | * We need to tell lockdep that IRQs are off. We can't do this until | ||
515 | * we fix gsbase, and we should do it before enter_from_user_mode | ||
516 | * (which can take locks). Since TRACE_IRQS_OFF idempotent, | ||
517 | * the simplest way to handle it is to just call it twice if | ||
518 | * we enter from user mode. There's no reason to optimize this since | ||
519 | * TRACE_IRQS_OFF is a no-op if lockdep is off. | ||
520 | */ | ||
521 | TRACE_IRQS_OFF | ||
522 | |||
512 | #ifdef CONFIG_CONTEXT_TRACKING | 523 | #ifdef CONFIG_CONTEXT_TRACKING |
513 | call enter_from_user_mode | 524 | call enter_from_user_mode |
514 | #endif | 525 | #endif |
@@ -1049,12 +1060,18 @@ ENTRY(error_entry) | |||
1049 | SWAPGS | 1060 | SWAPGS |
1050 | 1061 | ||
1051 | .Lerror_entry_from_usermode_after_swapgs: | 1062 | .Lerror_entry_from_usermode_after_swapgs: |
1063 | /* | ||
1064 | * We need to tell lockdep that IRQs are off. We can't do this until | ||
1065 | * we fix gsbase, and we should do it before enter_from_user_mode | ||
1066 | * (which can take locks). | ||
1067 | */ | ||
1068 | TRACE_IRQS_OFF | ||
1052 | #ifdef CONFIG_CONTEXT_TRACKING | 1069 | #ifdef CONFIG_CONTEXT_TRACKING |
1053 | call enter_from_user_mode | 1070 | call enter_from_user_mode |
1054 | #endif | 1071 | #endif |
1072 | ret | ||
1055 | 1073 | ||
1056 | .Lerror_entry_done: | 1074 | .Lerror_entry_done: |
1057 | |||
1058 | TRACE_IRQS_OFF | 1075 | TRACE_IRQS_OFF |
1059 | ret | 1076 | ret |
1060 | 1077 | ||
diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h index c5b7fb2774d0..cc071c6f7d4d 100644 --- a/arch/x86/include/asm/page_types.h +++ b/arch/x86/include/asm/page_types.h | |||
@@ -9,19 +9,21 @@ | |||
9 | #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) | 9 | #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) |
10 | #define PAGE_MASK (~(PAGE_SIZE-1)) | 10 | #define PAGE_MASK (~(PAGE_SIZE-1)) |
11 | 11 | ||
12 | #define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT) | ||
13 | #define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1)) | ||
14 | |||
15 | #define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT) | ||
16 | #define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1)) | ||
17 | |||
12 | #define __PHYSICAL_MASK ((phys_addr_t)((1ULL << __PHYSICAL_MASK_SHIFT) - 1)) | 18 | #define __PHYSICAL_MASK ((phys_addr_t)((1ULL << __PHYSICAL_MASK_SHIFT) - 1)) |
13 | #define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1) | 19 | #define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1) |
14 | 20 | ||
15 | /* Cast PAGE_MASK to a signed type so that it is sign-extended if | 21 | /* Cast *PAGE_MASK to a signed type so that it is sign-extended if |
16 | virtual addresses are 32-bits but physical addresses are larger | 22 | virtual addresses are 32-bits but physical addresses are larger |
17 | (ie, 32-bit PAE). */ | 23 | (ie, 32-bit PAE). */ |
18 | #define PHYSICAL_PAGE_MASK (((signed long)PAGE_MASK) & __PHYSICAL_MASK) | 24 | #define PHYSICAL_PAGE_MASK (((signed long)PAGE_MASK) & __PHYSICAL_MASK) |
19 | 25 | #define PHYSICAL_PMD_PAGE_MASK (((signed long)PMD_PAGE_MASK) & __PHYSICAL_MASK) | |
20 | #define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT) | 26 | #define PHYSICAL_PUD_PAGE_MASK (((signed long)PUD_PAGE_MASK) & __PHYSICAL_MASK) |
21 | #define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1)) | ||
22 | |||
23 | #define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT) | ||
24 | #define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1)) | ||
25 | 27 | ||
26 | #define HPAGE_SHIFT PMD_SHIFT | 28 | #define HPAGE_SHIFT PMD_SHIFT |
27 | #define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT) | 29 | #define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT) |
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index dd5b0aa9dd2f..a471cadb9630 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h | |||
@@ -279,17 +279,14 @@ static inline pmdval_t native_pmd_val(pmd_t pmd) | |||
279 | static inline pudval_t pud_pfn_mask(pud_t pud) | 279 | static inline pudval_t pud_pfn_mask(pud_t pud) |
280 | { | 280 | { |
281 | if (native_pud_val(pud) & _PAGE_PSE) | 281 | if (native_pud_val(pud) & _PAGE_PSE) |
282 | return PUD_PAGE_MASK & PHYSICAL_PAGE_MASK; | 282 | return PHYSICAL_PUD_PAGE_MASK; |
283 | else | 283 | else |
284 | return PTE_PFN_MASK; | 284 | return PTE_PFN_MASK; |
285 | } | 285 | } |
286 | 286 | ||
287 | static inline pudval_t pud_flags_mask(pud_t pud) | 287 | static inline pudval_t pud_flags_mask(pud_t pud) |
288 | { | 288 | { |
289 | if (native_pud_val(pud) & _PAGE_PSE) | 289 | return ~pud_pfn_mask(pud); |
290 | return ~(PUD_PAGE_MASK & (pudval_t)PHYSICAL_PAGE_MASK); | ||
291 | else | ||
292 | return ~PTE_PFN_MASK; | ||
293 | } | 290 | } |
294 | 291 | ||
295 | static inline pudval_t pud_flags(pud_t pud) | 292 | static inline pudval_t pud_flags(pud_t pud) |
@@ -300,17 +297,14 @@ static inline pudval_t pud_flags(pud_t pud) | |||
300 | static inline pmdval_t pmd_pfn_mask(pmd_t pmd) | 297 | static inline pmdval_t pmd_pfn_mask(pmd_t pmd) |
301 | { | 298 | { |
302 | if (native_pmd_val(pmd) & _PAGE_PSE) | 299 | if (native_pmd_val(pmd) & _PAGE_PSE) |
303 | return PMD_PAGE_MASK & PHYSICAL_PAGE_MASK; | 300 | return PHYSICAL_PMD_PAGE_MASK; |
304 | else | 301 | else |
305 | return PTE_PFN_MASK; | 302 | return PTE_PFN_MASK; |
306 | } | 303 | } |
307 | 304 | ||
308 | static inline pmdval_t pmd_flags_mask(pmd_t pmd) | 305 | static inline pmdval_t pmd_flags_mask(pmd_t pmd) |
309 | { | 306 | { |
310 | if (native_pmd_val(pmd) & _PAGE_PSE) | 307 | return ~pmd_pfn_mask(pmd); |
311 | return ~(PMD_PAGE_MASK & (pmdval_t)PHYSICAL_PAGE_MASK); | ||
312 | else | ||
313 | return ~PTE_PFN_MASK; | ||
314 | } | 308 | } |
315 | 309 | ||
316 | static inline pmdval_t pmd_flags(pmd_t pmd) | 310 | static inline pmdval_t pmd_flags(pmd_t pmd) |
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h index 48d34d28f5a6..cd0fc0cc78bc 100644 --- a/arch/x86/include/asm/x86_init.h +++ b/arch/x86/include/asm/x86_init.h | |||
@@ -1,7 +1,6 @@ | |||
1 | #ifndef _ASM_X86_PLATFORM_H | 1 | #ifndef _ASM_X86_PLATFORM_H |
2 | #define _ASM_X86_PLATFORM_H | 2 | #define _ASM_X86_PLATFORM_H |
3 | 3 | ||
4 | #include <asm/pgtable_types.h> | ||
5 | #include <asm/bootparam.h> | 4 | #include <asm/bootparam.h> |
6 | 5 | ||
7 | struct mpc_bus; | 6 | struct mpc_bus; |
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index 7fc27f1cca58..b3e94ef461fd 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c | |||
@@ -698,3 +698,4 @@ int __init microcode_init(void) | |||
698 | return error; | 698 | return error; |
699 | 699 | ||
700 | } | 700 | } |
701 | late_initcall(microcode_init); | ||
diff --git a/arch/x86/kernel/pmem.c b/arch/x86/kernel/pmem.c index 4f00b63d7ff3..14415aff1813 100644 --- a/arch/x86/kernel/pmem.c +++ b/arch/x86/kernel/pmem.c | |||
@@ -4,10 +4,22 @@ | |||
4 | */ | 4 | */ |
5 | #include <linux/platform_device.h> | 5 | #include <linux/platform_device.h> |
6 | #include <linux/module.h> | 6 | #include <linux/module.h> |
7 | #include <linux/ioport.h> | ||
8 | |||
9 | static int found(u64 start, u64 end, void *data) | ||
10 | { | ||
11 | return 1; | ||
12 | } | ||
7 | 13 | ||
8 | static __init int register_e820_pmem(void) | 14 | static __init int register_e820_pmem(void) |
9 | { | 15 | { |
16 | char *pmem = "Persistent Memory (legacy)"; | ||
10 | struct platform_device *pdev; | 17 | struct platform_device *pdev; |
18 | int rc; | ||
19 | |||
20 | rc = walk_iomem_res(pmem, IORESOURCE_MEM, 0, -1, NULL, found); | ||
21 | if (rc <= 0) | ||
22 | return 0; | ||
11 | 23 | ||
12 | /* | 24 | /* |
13 | * See drivers/nvdimm/e820.c for the implementation, this is | 25 | * See drivers/nvdimm/e820.c for the implementation, this is |
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 29db25f9a745..d2bbe343fda7 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
@@ -1250,8 +1250,6 @@ void __init setup_arch(char **cmdline_p) | |||
1250 | if (efi_enabled(EFI_BOOT)) | 1250 | if (efi_enabled(EFI_BOOT)) |
1251 | efi_apply_memmap_quirks(); | 1251 | efi_apply_memmap_quirks(); |
1252 | #endif | 1252 | #endif |
1253 | |||
1254 | microcode_init(); | ||
1255 | } | 1253 | } |
1256 | 1254 | ||
1257 | #ifdef CONFIG_X86_32 | 1255 | #ifdef CONFIG_X86_32 |
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index b7ffb7c00075..cb6282c3638f 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c | |||
@@ -690,12 +690,15 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs) | |||
690 | signal_setup_done(failed, ksig, stepping); | 690 | signal_setup_done(failed, ksig, stepping); |
691 | } | 691 | } |
692 | 692 | ||
693 | #ifdef CONFIG_X86_32 | 693 | static inline unsigned long get_nr_restart_syscall(const struct pt_regs *regs) |
694 | #define NR_restart_syscall __NR_restart_syscall | 694 | { |
695 | #else /* !CONFIG_X86_32 */ | 695 | #if defined(CONFIG_X86_32) || !defined(CONFIG_X86_64) |
696 | #define NR_restart_syscall \ | 696 | return __NR_restart_syscall; |
697 | test_thread_flag(TIF_IA32) ? __NR_ia32_restart_syscall : __NR_restart_syscall | 697 | #else /* !CONFIG_X86_32 && CONFIG_X86_64 */ |
698 | #endif /* CONFIG_X86_32 */ | 698 | return test_thread_flag(TIF_IA32) ? __NR_ia32_restart_syscall : |
699 | __NR_restart_syscall | (regs->orig_ax & __X32_SYSCALL_BIT); | ||
700 | #endif /* CONFIG_X86_32 || !CONFIG_X86_64 */ | ||
701 | } | ||
699 | 702 | ||
700 | /* | 703 | /* |
701 | * Note that 'init' is a special process: it doesn't get signals it doesn't | 704 | * Note that 'init' is a special process: it doesn't get signals it doesn't |
@@ -724,7 +727,7 @@ void do_signal(struct pt_regs *regs) | |||
724 | break; | 727 | break; |
725 | 728 | ||
726 | case -ERESTART_RESTARTBLOCK: | 729 | case -ERESTART_RESTARTBLOCK: |
727 | regs->ax = NR_restart_syscall; | 730 | regs->ax = get_nr_restart_syscall(regs); |
728 | regs->ip -= 2; | 731 | regs->ip -= 2; |
729 | break; | 732 | break; |
730 | } | 733 | } |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 892ee2e5ecbc..fbabe4fcc7fb 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -509,7 +509,7 @@ void __inquire_remote_apic(int apicid) | |||
509 | */ | 509 | */ |
510 | #define UDELAY_10MS_DEFAULT 10000 | 510 | #define UDELAY_10MS_DEFAULT 10000 |
511 | 511 | ||
512 | static unsigned int init_udelay = INT_MAX; | 512 | static unsigned int init_udelay = UINT_MAX; |
513 | 513 | ||
514 | static int __init cpu_init_udelay(char *str) | 514 | static int __init cpu_init_udelay(char *str) |
515 | { | 515 | { |
@@ -522,14 +522,15 @@ early_param("cpu_init_udelay", cpu_init_udelay); | |||
522 | static void __init smp_quirk_init_udelay(void) | 522 | static void __init smp_quirk_init_udelay(void) |
523 | { | 523 | { |
524 | /* if cmdline changed it from default, leave it alone */ | 524 | /* if cmdline changed it from default, leave it alone */ |
525 | if (init_udelay != INT_MAX) | 525 | if (init_udelay != UINT_MAX) |
526 | return; | 526 | return; |
527 | 527 | ||
528 | /* if modern processor, use no delay */ | 528 | /* if modern processor, use no delay */ |
529 | if (((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 == 6)) || | 529 | if (((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 == 6)) || |
530 | ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && (boot_cpu_data.x86 >= 0xF))) | 530 | ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && (boot_cpu_data.x86 >= 0xF))) { |
531 | init_udelay = 0; | 531 | init_udelay = 0; |
532 | 532 | return; | |
533 | } | ||
533 | /* else, use legacy delay */ | 534 | /* else, use legacy delay */ |
534 | init_udelay = UDELAY_10MS_DEFAULT; | 535 | init_udelay = UDELAY_10MS_DEFAULT; |
535 | } | 536 | } |
diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c index 1202d5ca2fb5..b2fd67da1701 100644 --- a/arch/x86/mm/mpx.c +++ b/arch/x86/mm/mpx.c | |||
@@ -101,19 +101,19 @@ static int get_reg_offset(struct insn *insn, struct pt_regs *regs, | |||
101 | switch (type) { | 101 | switch (type) { |
102 | case REG_TYPE_RM: | 102 | case REG_TYPE_RM: |
103 | regno = X86_MODRM_RM(insn->modrm.value); | 103 | regno = X86_MODRM_RM(insn->modrm.value); |
104 | if (X86_REX_B(insn->rex_prefix.value) == 1) | 104 | if (X86_REX_B(insn->rex_prefix.value)) |
105 | regno += 8; | 105 | regno += 8; |
106 | break; | 106 | break; |
107 | 107 | ||
108 | case REG_TYPE_INDEX: | 108 | case REG_TYPE_INDEX: |
109 | regno = X86_SIB_INDEX(insn->sib.value); | 109 | regno = X86_SIB_INDEX(insn->sib.value); |
110 | if (X86_REX_X(insn->rex_prefix.value) == 1) | 110 | if (X86_REX_X(insn->rex_prefix.value)) |
111 | regno += 8; | 111 | regno += 8; |
112 | break; | 112 | break; |
113 | 113 | ||
114 | case REG_TYPE_BASE: | 114 | case REG_TYPE_BASE: |
115 | regno = X86_SIB_BASE(insn->sib.value); | 115 | regno = X86_SIB_BASE(insn->sib.value); |
116 | if (X86_REX_B(insn->rex_prefix.value) == 1) | 116 | if (X86_REX_B(insn->rex_prefix.value)) |
117 | regno += 8; | 117 | regno += 8; |
118 | break; | 118 | break; |
119 | 119 | ||
diff --git a/arch/x86/pci/bus_numa.c b/arch/x86/pci/bus_numa.c index 7bcf06a7cd12..6eb3c8af96e2 100644 --- a/arch/x86/pci/bus_numa.c +++ b/arch/x86/pci/bus_numa.c | |||
@@ -50,18 +50,9 @@ void x86_pci_root_bus_resources(int bus, struct list_head *resources) | |||
50 | if (!found) | 50 | if (!found) |
51 | pci_add_resource(resources, &info->busn); | 51 | pci_add_resource(resources, &info->busn); |
52 | 52 | ||
53 | list_for_each_entry(root_res, &info->resources, list) { | 53 | list_for_each_entry(root_res, &info->resources, list) |
54 | struct resource *res; | 54 | pci_add_resource(resources, &root_res->res); |
55 | struct resource *root; | ||
56 | 55 | ||
57 | res = &root_res->res; | ||
58 | pci_add_resource(resources, res); | ||
59 | if (res->flags & IORESOURCE_IO) | ||
60 | root = &ioport_resource; | ||
61 | else | ||
62 | root = &iomem_resource; | ||
63 | insert_resource(root, res); | ||
64 | } | ||
65 | return; | 56 | return; |
66 | 57 | ||
67 | default_resources: | 58 | default_resources: |
diff --git a/block/blk-core.c b/block/blk-core.c index 5131993b23a1..a0af4043dda2 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -2114,7 +2114,8 @@ blk_qc_t submit_bio(int rw, struct bio *bio) | |||
2114 | EXPORT_SYMBOL(submit_bio); | 2114 | EXPORT_SYMBOL(submit_bio); |
2115 | 2115 | ||
2116 | /** | 2116 | /** |
2117 | * blk_rq_check_limits - Helper function to check a request for the queue limit | 2117 | * blk_cloned_rq_check_limits - Helper function to check a cloned request |
2118 | * for new the queue limits | ||
2118 | * @q: the queue | 2119 | * @q: the queue |
2119 | * @rq: the request being checked | 2120 | * @rq: the request being checked |
2120 | * | 2121 | * |
@@ -2125,20 +2126,13 @@ EXPORT_SYMBOL(submit_bio); | |||
2125 | * after it is inserted to @q, it should be checked against @q before | 2126 | * after it is inserted to @q, it should be checked against @q before |
2126 | * the insertion using this generic function. | 2127 | * the insertion using this generic function. |
2127 | * | 2128 | * |
2128 | * This function should also be useful for request stacking drivers | ||
2129 | * in some cases below, so export this function. | ||
2130 | * Request stacking drivers like request-based dm may change the queue | 2129 | * Request stacking drivers like request-based dm may change the queue |
2131 | * limits while requests are in the queue (e.g. dm's table swapping). | 2130 | * limits when retrying requests on other queues. Those requests need |
2132 | * Such request stacking drivers should check those requests against | 2131 | * to be checked against the new queue limits again during dispatch. |
2133 | * the new queue limits again when they dispatch those requests, | ||
2134 | * although such checkings are also done against the old queue limits | ||
2135 | * when submitting requests. | ||
2136 | */ | 2132 | */ |
2137 | int blk_rq_check_limits(struct request_queue *q, struct request *rq) | 2133 | static int blk_cloned_rq_check_limits(struct request_queue *q, |
2134 | struct request *rq) | ||
2138 | { | 2135 | { |
2139 | if (!rq_mergeable(rq)) | ||
2140 | return 0; | ||
2141 | |||
2142 | if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, rq->cmd_flags)) { | 2136 | if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, rq->cmd_flags)) { |
2143 | printk(KERN_ERR "%s: over max size limit.\n", __func__); | 2137 | printk(KERN_ERR "%s: over max size limit.\n", __func__); |
2144 | return -EIO; | 2138 | return -EIO; |
@@ -2158,7 +2152,6 @@ int blk_rq_check_limits(struct request_queue *q, struct request *rq) | |||
2158 | 2152 | ||
2159 | return 0; | 2153 | return 0; |
2160 | } | 2154 | } |
2161 | EXPORT_SYMBOL_GPL(blk_rq_check_limits); | ||
2162 | 2155 | ||
2163 | /** | 2156 | /** |
2164 | * blk_insert_cloned_request - Helper for stacking drivers to submit a request | 2157 | * blk_insert_cloned_request - Helper for stacking drivers to submit a request |
@@ -2170,7 +2163,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq) | |||
2170 | unsigned long flags; | 2163 | unsigned long flags; |
2171 | int where = ELEVATOR_INSERT_BACK; | 2164 | int where = ELEVATOR_INSERT_BACK; |
2172 | 2165 | ||
2173 | if (blk_rq_check_limits(q, rq)) | 2166 | if (blk_cloned_rq_check_limits(q, rq)) |
2174 | return -EIO; | 2167 | return -EIO; |
2175 | 2168 | ||
2176 | if (rq->rq_disk && | 2169 | if (rq->rq_disk && |
diff --git a/block/blk-merge.c b/block/blk-merge.c index 41a55ba0d78e..e01405a3e8b3 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c | |||
@@ -103,6 +103,9 @@ static struct bio *blk_bio_segment_split(struct request_queue *q, | |||
103 | bvprv = bv; | 103 | bvprv = bv; |
104 | bvprvp = &bvprv; | 104 | bvprvp = &bvprv; |
105 | sectors += bv.bv_len >> 9; | 105 | sectors += bv.bv_len >> 9; |
106 | |||
107 | if (nsegs == 1 && seg_size > front_seg_size) | ||
108 | front_seg_size = seg_size; | ||
106 | continue; | 109 | continue; |
107 | } | 110 | } |
108 | new_segment: | 111 | new_segment: |
diff --git a/block/blk-settings.c b/block/blk-settings.c index 7d8f129a1516..dd4973583978 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c | |||
@@ -91,7 +91,8 @@ void blk_set_default_limits(struct queue_limits *lim) | |||
91 | lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; | 91 | lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; |
92 | lim->virt_boundary_mask = 0; | 92 | lim->virt_boundary_mask = 0; |
93 | lim->max_segment_size = BLK_MAX_SEGMENT_SIZE; | 93 | lim->max_segment_size = BLK_MAX_SEGMENT_SIZE; |
94 | lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS; | 94 | lim->max_sectors = lim->max_dev_sectors = lim->max_hw_sectors = |
95 | BLK_SAFE_MAX_SECTORS; | ||
95 | lim->chunk_sectors = 0; | 96 | lim->chunk_sectors = 0; |
96 | lim->max_write_same_sectors = 0; | 97 | lim->max_write_same_sectors = 0; |
97 | lim->max_discard_sectors = 0; | 98 | lim->max_discard_sectors = 0; |
@@ -127,6 +128,7 @@ void blk_set_stacking_limits(struct queue_limits *lim) | |||
127 | lim->max_hw_sectors = UINT_MAX; | 128 | lim->max_hw_sectors = UINT_MAX; |
128 | lim->max_segment_size = UINT_MAX; | 129 | lim->max_segment_size = UINT_MAX; |
129 | lim->max_sectors = UINT_MAX; | 130 | lim->max_sectors = UINT_MAX; |
131 | lim->max_dev_sectors = UINT_MAX; | ||
130 | lim->max_write_same_sectors = UINT_MAX; | 132 | lim->max_write_same_sectors = UINT_MAX; |
131 | } | 133 | } |
132 | EXPORT_SYMBOL(blk_set_stacking_limits); | 134 | EXPORT_SYMBOL(blk_set_stacking_limits); |
@@ -214,8 +216,8 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 max_addr) | |||
214 | EXPORT_SYMBOL(blk_queue_bounce_limit); | 216 | EXPORT_SYMBOL(blk_queue_bounce_limit); |
215 | 217 | ||
216 | /** | 218 | /** |
217 | * blk_limits_max_hw_sectors - set hard and soft limit of max sectors for request | 219 | * blk_queue_max_hw_sectors - set max sectors for a request for this queue |
218 | * @limits: the queue limits | 220 | * @q: the request queue for the device |
219 | * @max_hw_sectors: max hardware sectors in the usual 512b unit | 221 | * @max_hw_sectors: max hardware sectors in the usual 512b unit |
220 | * | 222 | * |
221 | * Description: | 223 | * Description: |
@@ -224,13 +226,19 @@ EXPORT_SYMBOL(blk_queue_bounce_limit); | |||
224 | * the device driver based upon the capabilities of the I/O | 226 | * the device driver based upon the capabilities of the I/O |
225 | * controller. | 227 | * controller. |
226 | * | 228 | * |
229 | * max_dev_sectors is a hard limit imposed by the storage device for | ||
230 | * READ/WRITE requests. It is set by the disk driver. | ||
231 | * | ||
227 | * max_sectors is a soft limit imposed by the block layer for | 232 | * max_sectors is a soft limit imposed by the block layer for |
228 | * filesystem type requests. This value can be overridden on a | 233 | * filesystem type requests. This value can be overridden on a |
229 | * per-device basis in /sys/block/<device>/queue/max_sectors_kb. | 234 | * per-device basis in /sys/block/<device>/queue/max_sectors_kb. |
230 | * The soft limit can not exceed max_hw_sectors. | 235 | * The soft limit can not exceed max_hw_sectors. |
231 | **/ | 236 | **/ |
232 | void blk_limits_max_hw_sectors(struct queue_limits *limits, unsigned int max_hw_sectors) | 237 | void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors) |
233 | { | 238 | { |
239 | struct queue_limits *limits = &q->limits; | ||
240 | unsigned int max_sectors; | ||
241 | |||
234 | if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) { | 242 | if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) { |
235 | max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9); | 243 | max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9); |
236 | printk(KERN_INFO "%s: set to minimum %d\n", | 244 | printk(KERN_INFO "%s: set to minimum %d\n", |
@@ -238,22 +246,9 @@ void blk_limits_max_hw_sectors(struct queue_limits *limits, unsigned int max_hw_ | |||
238 | } | 246 | } |
239 | 247 | ||
240 | limits->max_hw_sectors = max_hw_sectors; | 248 | limits->max_hw_sectors = max_hw_sectors; |
241 | limits->max_sectors = min_t(unsigned int, max_hw_sectors, | 249 | max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors); |
242 | BLK_DEF_MAX_SECTORS); | 250 | max_sectors = min_t(unsigned int, max_sectors, BLK_DEF_MAX_SECTORS); |
243 | } | 251 | limits->max_sectors = max_sectors; |
244 | EXPORT_SYMBOL(blk_limits_max_hw_sectors); | ||
245 | |||
246 | /** | ||
247 | * blk_queue_max_hw_sectors - set max sectors for a request for this queue | ||
248 | * @q: the request queue for the device | ||
249 | * @max_hw_sectors: max hardware sectors in the usual 512b unit | ||
250 | * | ||
251 | * Description: | ||
252 | * See description for blk_limits_max_hw_sectors(). | ||
253 | **/ | ||
254 | void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors) | ||
255 | { | ||
256 | blk_limits_max_hw_sectors(&q->limits, max_hw_sectors); | ||
257 | } | 252 | } |
258 | EXPORT_SYMBOL(blk_queue_max_hw_sectors); | 253 | EXPORT_SYMBOL(blk_queue_max_hw_sectors); |
259 | 254 | ||
@@ -527,6 +522,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, | |||
527 | 522 | ||
528 | t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); | 523 | t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); |
529 | t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); | 524 | t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); |
525 | t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors); | ||
530 | t->max_write_same_sectors = min(t->max_write_same_sectors, | 526 | t->max_write_same_sectors = min(t->max_write_same_sectors, |
531 | b->max_write_same_sectors); | 527 | b->max_write_same_sectors); |
532 | t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn); | 528 | t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn); |
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 565b8dac5782..e140cc487ce1 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c | |||
@@ -205,6 +205,9 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) | |||
205 | if (ret < 0) | 205 | if (ret < 0) |
206 | return ret; | 206 | return ret; |
207 | 207 | ||
208 | max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb, (unsigned long) | ||
209 | q->limits.max_dev_sectors >> 1); | ||
210 | |||
208 | if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb) | 211 | if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb) |
209 | return -EINVAL; | 212 | return -EINVAL; |
210 | 213 | ||
diff --git a/block/partition-generic.c b/block/partition-generic.c index 3b030157ec85..746935a5973c 100644 --- a/block/partition-generic.c +++ b/block/partition-generic.c | |||
@@ -397,7 +397,7 @@ static int drop_partitions(struct gendisk *disk, struct block_device *bdev) | |||
397 | struct hd_struct *part; | 397 | struct hd_struct *part; |
398 | int res; | 398 | int res; |
399 | 399 | ||
400 | if (bdev->bd_part_count) | 400 | if (bdev->bd_part_count || bdev->bd_super) |
401 | return -EBUSY; | 401 | return -EBUSY; |
402 | res = invalidate_partition(disk, 0); | 402 | res = invalidate_partition(disk, 0); |
403 | if (res) | 403 | if (res) |
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index 0aa6fdfb448a..6d4d4569447e 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c | |||
@@ -125,7 +125,7 @@ static int aead_wait_for_data(struct sock *sk, unsigned flags) | |||
125 | if (flags & MSG_DONTWAIT) | 125 | if (flags & MSG_DONTWAIT) |
126 | return -EAGAIN; | 126 | return -EAGAIN; |
127 | 127 | ||
128 | set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); | 128 | sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); |
129 | 129 | ||
130 | for (;;) { | 130 | for (;;) { |
131 | if (signal_pending(current)) | 131 | if (signal_pending(current)) |
@@ -139,7 +139,7 @@ static int aead_wait_for_data(struct sock *sk, unsigned flags) | |||
139 | } | 139 | } |
140 | finish_wait(sk_sleep(sk), &wait); | 140 | finish_wait(sk_sleep(sk), &wait); |
141 | 141 | ||
142 | clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); | 142 | sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); |
143 | 143 | ||
144 | return err; | 144 | return err; |
145 | } | 145 | } |
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index af31a0ee4057..ca9efe17db1a 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c | |||
@@ -212,7 +212,7 @@ static int skcipher_wait_for_wmem(struct sock *sk, unsigned flags) | |||
212 | if (flags & MSG_DONTWAIT) | 212 | if (flags & MSG_DONTWAIT) |
213 | return -EAGAIN; | 213 | return -EAGAIN; |
214 | 214 | ||
215 | set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); | 215 | sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); |
216 | 216 | ||
217 | for (;;) { | 217 | for (;;) { |
218 | if (signal_pending(current)) | 218 | if (signal_pending(current)) |
@@ -258,7 +258,7 @@ static int skcipher_wait_for_data(struct sock *sk, unsigned flags) | |||
258 | return -EAGAIN; | 258 | return -EAGAIN; |
259 | } | 259 | } |
260 | 260 | ||
261 | set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); | 261 | sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); |
262 | 262 | ||
263 | for (;;) { | 263 | for (;;) { |
264 | if (signal_pending(current)) | 264 | if (signal_pending(current)) |
@@ -272,7 +272,7 @@ static int skcipher_wait_for_data(struct sock *sk, unsigned flags) | |||
272 | } | 272 | } |
273 | finish_wait(sk_sleep(sk), &wait); | 273 | finish_wait(sk_sleep(sk), &wait); |
274 | 274 | ||
275 | clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); | 275 | sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); |
276 | 276 | ||
277 | return err; | 277 | return err; |
278 | } | 278 | } |
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index 25dbb76c02cc..5eef4cb4f70e 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig | |||
@@ -58,10 +58,10 @@ config ACPI_CCA_REQUIRED | |||
58 | bool | 58 | bool |
59 | 59 | ||
60 | config ACPI_DEBUGGER | 60 | config ACPI_DEBUGGER |
61 | bool "In-kernel debugger (EXPERIMENTAL)" | 61 | bool "AML debugger interface (EXPERIMENTAL)" |
62 | select ACPI_DEBUG | 62 | select ACPI_DEBUG |
63 | help | 63 | help |
64 | Enable in-kernel debugging facilities: statistics, internal | 64 | Enable in-kernel debugging of AML facilities: statistics, internal |
65 | object dump, single step control method execution. | 65 | object dump, single step control method execution. |
66 | This is still under development, currently enabling this only | 66 | This is still under development, currently enabling this only |
67 | results in the compilation of the ACPICA debugger files. | 67 | results in the compilation of the ACPICA debugger files. |
diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c index f7dab53b352a..e7ed39bab97d 100644 --- a/drivers/acpi/nfit.c +++ b/drivers/acpi/nfit.c | |||
@@ -233,11 +233,12 @@ static bool add_spa(struct acpi_nfit_desc *acpi_desc, | |||
233 | struct nfit_table_prev *prev, | 233 | struct nfit_table_prev *prev, |
234 | struct acpi_nfit_system_address *spa) | 234 | struct acpi_nfit_system_address *spa) |
235 | { | 235 | { |
236 | size_t length = min_t(size_t, sizeof(*spa), spa->header.length); | ||
236 | struct device *dev = acpi_desc->dev; | 237 | struct device *dev = acpi_desc->dev; |
237 | struct nfit_spa *nfit_spa; | 238 | struct nfit_spa *nfit_spa; |
238 | 239 | ||
239 | list_for_each_entry(nfit_spa, &prev->spas, list) { | 240 | list_for_each_entry(nfit_spa, &prev->spas, list) { |
240 | if (memcmp(nfit_spa->spa, spa, sizeof(*spa)) == 0) { | 241 | if (memcmp(nfit_spa->spa, spa, length) == 0) { |
241 | list_move_tail(&nfit_spa->list, &acpi_desc->spas); | 242 | list_move_tail(&nfit_spa->list, &acpi_desc->spas); |
242 | return true; | 243 | return true; |
243 | } | 244 | } |
@@ -259,11 +260,12 @@ static bool add_memdev(struct acpi_nfit_desc *acpi_desc, | |||
259 | struct nfit_table_prev *prev, | 260 | struct nfit_table_prev *prev, |
260 | struct acpi_nfit_memory_map *memdev) | 261 | struct acpi_nfit_memory_map *memdev) |
261 | { | 262 | { |
263 | size_t length = min_t(size_t, sizeof(*memdev), memdev->header.length); | ||
262 | struct device *dev = acpi_desc->dev; | 264 | struct device *dev = acpi_desc->dev; |
263 | struct nfit_memdev *nfit_memdev; | 265 | struct nfit_memdev *nfit_memdev; |
264 | 266 | ||
265 | list_for_each_entry(nfit_memdev, &prev->memdevs, list) | 267 | list_for_each_entry(nfit_memdev, &prev->memdevs, list) |
266 | if (memcmp(nfit_memdev->memdev, memdev, sizeof(*memdev)) == 0) { | 268 | if (memcmp(nfit_memdev->memdev, memdev, length) == 0) { |
267 | list_move_tail(&nfit_memdev->list, &acpi_desc->memdevs); | 269 | list_move_tail(&nfit_memdev->list, &acpi_desc->memdevs); |
268 | return true; | 270 | return true; |
269 | } | 271 | } |
@@ -284,11 +286,12 @@ static bool add_dcr(struct acpi_nfit_desc *acpi_desc, | |||
284 | struct nfit_table_prev *prev, | 286 | struct nfit_table_prev *prev, |
285 | struct acpi_nfit_control_region *dcr) | 287 | struct acpi_nfit_control_region *dcr) |
286 | { | 288 | { |
289 | size_t length = min_t(size_t, sizeof(*dcr), dcr->header.length); | ||
287 | struct device *dev = acpi_desc->dev; | 290 | struct device *dev = acpi_desc->dev; |
288 | struct nfit_dcr *nfit_dcr; | 291 | struct nfit_dcr *nfit_dcr; |
289 | 292 | ||
290 | list_for_each_entry(nfit_dcr, &prev->dcrs, list) | 293 | list_for_each_entry(nfit_dcr, &prev->dcrs, list) |
291 | if (memcmp(nfit_dcr->dcr, dcr, sizeof(*dcr)) == 0) { | 294 | if (memcmp(nfit_dcr->dcr, dcr, length) == 0) { |
292 | list_move_tail(&nfit_dcr->list, &acpi_desc->dcrs); | 295 | list_move_tail(&nfit_dcr->list, &acpi_desc->dcrs); |
293 | return true; | 296 | return true; |
294 | } | 297 | } |
@@ -308,11 +311,12 @@ static bool add_bdw(struct acpi_nfit_desc *acpi_desc, | |||
308 | struct nfit_table_prev *prev, | 311 | struct nfit_table_prev *prev, |
309 | struct acpi_nfit_data_region *bdw) | 312 | struct acpi_nfit_data_region *bdw) |
310 | { | 313 | { |
314 | size_t length = min_t(size_t, sizeof(*bdw), bdw->header.length); | ||
311 | struct device *dev = acpi_desc->dev; | 315 | struct device *dev = acpi_desc->dev; |
312 | struct nfit_bdw *nfit_bdw; | 316 | struct nfit_bdw *nfit_bdw; |
313 | 317 | ||
314 | list_for_each_entry(nfit_bdw, &prev->bdws, list) | 318 | list_for_each_entry(nfit_bdw, &prev->bdws, list) |
315 | if (memcmp(nfit_bdw->bdw, bdw, sizeof(*bdw)) == 0) { | 319 | if (memcmp(nfit_bdw->bdw, bdw, length) == 0) { |
316 | list_move_tail(&nfit_bdw->list, &acpi_desc->bdws); | 320 | list_move_tail(&nfit_bdw->list, &acpi_desc->bdws); |
317 | return true; | 321 | return true; |
318 | } | 322 | } |
@@ -332,11 +336,12 @@ static bool add_idt(struct acpi_nfit_desc *acpi_desc, | |||
332 | struct nfit_table_prev *prev, | 336 | struct nfit_table_prev *prev, |
333 | struct acpi_nfit_interleave *idt) | 337 | struct acpi_nfit_interleave *idt) |
334 | { | 338 | { |
339 | size_t length = min_t(size_t, sizeof(*idt), idt->header.length); | ||
335 | struct device *dev = acpi_desc->dev; | 340 | struct device *dev = acpi_desc->dev; |
336 | struct nfit_idt *nfit_idt; | 341 | struct nfit_idt *nfit_idt; |
337 | 342 | ||
338 | list_for_each_entry(nfit_idt, &prev->idts, list) | 343 | list_for_each_entry(nfit_idt, &prev->idts, list) |
339 | if (memcmp(nfit_idt->idt, idt, sizeof(*idt)) == 0) { | 344 | if (memcmp(nfit_idt->idt, idt, length) == 0) { |
340 | list_move_tail(&nfit_idt->list, &acpi_desc->idts); | 345 | list_move_tail(&nfit_idt->list, &acpi_desc->idts); |
341 | return true; | 346 | return true; |
342 | } | 347 | } |
@@ -356,11 +361,12 @@ static bool add_flush(struct acpi_nfit_desc *acpi_desc, | |||
356 | struct nfit_table_prev *prev, | 361 | struct nfit_table_prev *prev, |
357 | struct acpi_nfit_flush_address *flush) | 362 | struct acpi_nfit_flush_address *flush) |
358 | { | 363 | { |
364 | size_t length = min_t(size_t, sizeof(*flush), flush->header.length); | ||
359 | struct device *dev = acpi_desc->dev; | 365 | struct device *dev = acpi_desc->dev; |
360 | struct nfit_flush *nfit_flush; | 366 | struct nfit_flush *nfit_flush; |
361 | 367 | ||
362 | list_for_each_entry(nfit_flush, &prev->flushes, list) | 368 | list_for_each_entry(nfit_flush, &prev->flushes, list) |
363 | if (memcmp(nfit_flush->flush, flush, sizeof(*flush)) == 0) { | 369 | if (memcmp(nfit_flush->flush, flush, length) == 0) { |
364 | list_move_tail(&nfit_flush->list, &acpi_desc->flushes); | 370 | list_move_tail(&nfit_flush->list, &acpi_desc->flushes); |
365 | return true; | 371 | return true; |
366 | } | 372 | } |
@@ -655,7 +661,7 @@ static ssize_t revision_show(struct device *dev, | |||
655 | struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); | 661 | struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); |
656 | struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); | 662 | struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); |
657 | 663 | ||
658 | return sprintf(buf, "%d\n", acpi_desc->nfit->header.revision); | 664 | return sprintf(buf, "%d\n", acpi_desc->acpi_header.revision); |
659 | } | 665 | } |
660 | static DEVICE_ATTR_RO(revision); | 666 | static DEVICE_ATTR_RO(revision); |
661 | 667 | ||
@@ -1652,7 +1658,6 @@ int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, acpi_size sz) | |||
1652 | 1658 | ||
1653 | data = (u8 *) acpi_desc->nfit; | 1659 | data = (u8 *) acpi_desc->nfit; |
1654 | end = data + sz; | 1660 | end = data + sz; |
1655 | data += sizeof(struct acpi_table_nfit); | ||
1656 | while (!IS_ERR_OR_NULL(data)) | 1661 | while (!IS_ERR_OR_NULL(data)) |
1657 | data = add_table(acpi_desc, &prev, data, end); | 1662 | data = add_table(acpi_desc, &prev, data, end); |
1658 | 1663 | ||
@@ -1748,13 +1753,29 @@ static int acpi_nfit_add(struct acpi_device *adev) | |||
1748 | return PTR_ERR(acpi_desc); | 1753 | return PTR_ERR(acpi_desc); |
1749 | } | 1754 | } |
1750 | 1755 | ||
1751 | acpi_desc->nfit = (struct acpi_table_nfit *) tbl; | 1756 | /* |
1757 | * Save the acpi header for later and then skip it, | ||
1758 | * making nfit point to the first nfit table header. | ||
1759 | */ | ||
1760 | acpi_desc->acpi_header = *tbl; | ||
1761 | acpi_desc->nfit = (void *) tbl + sizeof(struct acpi_table_nfit); | ||
1762 | sz -= sizeof(struct acpi_table_nfit); | ||
1752 | 1763 | ||
1753 | /* Evaluate _FIT and override with that if present */ | 1764 | /* Evaluate _FIT and override with that if present */ |
1754 | status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf); | 1765 | status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf); |
1755 | if (ACPI_SUCCESS(status) && buf.length > 0) { | 1766 | if (ACPI_SUCCESS(status) && buf.length > 0) { |
1756 | acpi_desc->nfit = (struct acpi_table_nfit *)buf.pointer; | 1767 | union acpi_object *obj; |
1757 | sz = buf.length; | 1768 | /* |
1769 | * Adjust for the acpi_object header of the _FIT | ||
1770 | */ | ||
1771 | obj = buf.pointer; | ||
1772 | if (obj->type == ACPI_TYPE_BUFFER) { | ||
1773 | acpi_desc->nfit = | ||
1774 | (struct acpi_nfit_header *)obj->buffer.pointer; | ||
1775 | sz = obj->buffer.length; | ||
1776 | } else | ||
1777 | dev_dbg(dev, "%s invalid type %d, ignoring _FIT\n", | ||
1778 | __func__, (int) obj->type); | ||
1758 | } | 1779 | } |
1759 | 1780 | ||
1760 | rc = acpi_nfit_init(acpi_desc, sz); | 1781 | rc = acpi_nfit_init(acpi_desc, sz); |
@@ -1777,7 +1798,8 @@ static void acpi_nfit_notify(struct acpi_device *adev, u32 event) | |||
1777 | { | 1798 | { |
1778 | struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev); | 1799 | struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev); |
1779 | struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; | 1800 | struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; |
1780 | struct acpi_table_nfit *nfit_saved; | 1801 | struct acpi_nfit_header *nfit_saved; |
1802 | union acpi_object *obj; | ||
1781 | struct device *dev = &adev->dev; | 1803 | struct device *dev = &adev->dev; |
1782 | acpi_status status; | 1804 | acpi_status status; |
1783 | int ret; | 1805 | int ret; |
@@ -1808,12 +1830,19 @@ static void acpi_nfit_notify(struct acpi_device *adev, u32 event) | |||
1808 | } | 1830 | } |
1809 | 1831 | ||
1810 | nfit_saved = acpi_desc->nfit; | 1832 | nfit_saved = acpi_desc->nfit; |
1811 | acpi_desc->nfit = (struct acpi_table_nfit *)buf.pointer; | 1833 | obj = buf.pointer; |
1812 | ret = acpi_nfit_init(acpi_desc, buf.length); | 1834 | if (obj->type == ACPI_TYPE_BUFFER) { |
1813 | if (!ret) { | 1835 | acpi_desc->nfit = |
1814 | /* Merge failed, restore old nfit, and exit */ | 1836 | (struct acpi_nfit_header *)obj->buffer.pointer; |
1815 | acpi_desc->nfit = nfit_saved; | 1837 | ret = acpi_nfit_init(acpi_desc, obj->buffer.length); |
1816 | dev_err(dev, "failed to merge updated NFIT\n"); | 1838 | if (ret) { |
1839 | /* Merge failed, restore old nfit, and exit */ | ||
1840 | acpi_desc->nfit = nfit_saved; | ||
1841 | dev_err(dev, "failed to merge updated NFIT\n"); | ||
1842 | } | ||
1843 | } else { | ||
1844 | /* Bad _FIT, restore old nfit */ | ||
1845 | dev_err(dev, "Invalid _FIT\n"); | ||
1817 | } | 1846 | } |
1818 | kfree(buf.pointer); | 1847 | kfree(buf.pointer); |
1819 | 1848 | ||
diff --git a/drivers/acpi/nfit.h b/drivers/acpi/nfit.h index 2ea5c0797c8f..3d549a383659 100644 --- a/drivers/acpi/nfit.h +++ b/drivers/acpi/nfit.h | |||
@@ -96,7 +96,8 @@ struct nfit_mem { | |||
96 | 96 | ||
97 | struct acpi_nfit_desc { | 97 | struct acpi_nfit_desc { |
98 | struct nvdimm_bus_descriptor nd_desc; | 98 | struct nvdimm_bus_descriptor nd_desc; |
99 | struct acpi_table_nfit *nfit; | 99 | struct acpi_table_header acpi_header; |
100 | struct acpi_nfit_header *nfit; | ||
100 | struct mutex spa_map_mutex; | 101 | struct mutex spa_map_mutex; |
101 | struct mutex init_mutex; | 102 | struct mutex init_mutex; |
102 | struct list_head spa_maps; | 103 | struct list_head spa_maps; |
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index 850d7bf0c873..ae3fe4e64203 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c | |||
@@ -768,6 +768,13 @@ static void pci_acpi_root_add_resources(struct acpi_pci_root_info *info) | |||
768 | else | 768 | else |
769 | continue; | 769 | continue; |
770 | 770 | ||
771 | /* | ||
772 | * Some legacy x86 host bridge drivers use iomem_resource and | ||
773 | * ioport_resource as default resource pool, skip it. | ||
774 | */ | ||
775 | if (res == root) | ||
776 | continue; | ||
777 | |||
771 | conflict = insert_resource_conflict(root, res); | 778 | conflict = insert_resource_conflict(root, res); |
772 | if (conflict) { | 779 | if (conflict) { |
773 | dev_info(&info->bridge->dev, | 780 | dev_info(&info->bridge->dev, |
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index e03b1ad25a90..167418e73445 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c | |||
@@ -1775,10 +1775,10 @@ int genpd_dev_pm_attach(struct device *dev) | |||
1775 | } | 1775 | } |
1776 | 1776 | ||
1777 | pd = of_genpd_get_from_provider(&pd_args); | 1777 | pd = of_genpd_get_from_provider(&pd_args); |
1778 | of_node_put(pd_args.np); | ||
1778 | if (IS_ERR(pd)) { | 1779 | if (IS_ERR(pd)) { |
1779 | dev_dbg(dev, "%s() failed to find PM domain: %ld\n", | 1780 | dev_dbg(dev, "%s() failed to find PM domain: %ld\n", |
1780 | __func__, PTR_ERR(pd)); | 1781 | __func__, PTR_ERR(pd)); |
1781 | of_node_put(dev->of_node); | ||
1782 | return -EPROBE_DEFER; | 1782 | return -EPROBE_DEFER; |
1783 | } | 1783 | } |
1784 | 1784 | ||
@@ -1796,7 +1796,6 @@ int genpd_dev_pm_attach(struct device *dev) | |||
1796 | if (ret < 0) { | 1796 | if (ret < 0) { |
1797 | dev_err(dev, "failed to add to PM domain %s: %d", | 1797 | dev_err(dev, "failed to add to PM domain %s: %d", |
1798 | pd->name, ret); | 1798 | pd->name, ret); |
1799 | of_node_put(dev->of_node); | ||
1800 | goto out; | 1799 | goto out; |
1801 | } | 1800 | } |
1802 | 1801 | ||
diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c index e60dd12e23aa..1e937ac5f456 100644 --- a/drivers/base/power/domain_governor.c +++ b/drivers/base/power/domain_governor.c | |||
@@ -160,9 +160,6 @@ static bool default_power_down_ok(struct dev_pm_domain *pd) | |||
160 | struct gpd_timing_data *td; | 160 | struct gpd_timing_data *td; |
161 | s64 constraint_ns; | 161 | s64 constraint_ns; |
162 | 162 | ||
163 | if (!pdd->dev->driver) | ||
164 | continue; | ||
165 | |||
166 | /* | 163 | /* |
167 | * Check if the device is allowed to be off long enough for the | 164 | * Check if the device is allowed to be off long enough for the |
168 | * domain to turn off and on (that's how much time it will | 165 | * domain to turn off and on (that's how much time it will |
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index 5c8ba5484d86..0c3940ec5e62 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c | |||
@@ -18,6 +18,7 @@ struct nullb_cmd { | |||
18 | struct bio *bio; | 18 | struct bio *bio; |
19 | unsigned int tag; | 19 | unsigned int tag; |
20 | struct nullb_queue *nq; | 20 | struct nullb_queue *nq; |
21 | struct hrtimer timer; | ||
21 | }; | 22 | }; |
22 | 23 | ||
23 | struct nullb_queue { | 24 | struct nullb_queue { |
@@ -49,17 +50,6 @@ static int null_major; | |||
49 | static int nullb_indexes; | 50 | static int nullb_indexes; |
50 | static struct kmem_cache *ppa_cache; | 51 | static struct kmem_cache *ppa_cache; |
51 | 52 | ||
52 | struct completion_queue { | ||
53 | struct llist_head list; | ||
54 | struct hrtimer timer; | ||
55 | }; | ||
56 | |||
57 | /* | ||
58 | * These are per-cpu for now, they will need to be configured by the | ||
59 | * complete_queues parameter and appropriately mapped. | ||
60 | */ | ||
61 | static DEFINE_PER_CPU(struct completion_queue, completion_queues); | ||
62 | |||
63 | enum { | 53 | enum { |
64 | NULL_IRQ_NONE = 0, | 54 | NULL_IRQ_NONE = 0, |
65 | NULL_IRQ_SOFTIRQ = 1, | 55 | NULL_IRQ_SOFTIRQ = 1, |
@@ -142,8 +132,8 @@ static const struct kernel_param_ops null_irqmode_param_ops = { | |||
142 | device_param_cb(irqmode, &null_irqmode_param_ops, &irqmode, S_IRUGO); | 132 | device_param_cb(irqmode, &null_irqmode_param_ops, &irqmode, S_IRUGO); |
143 | MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer"); | 133 | MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer"); |
144 | 134 | ||
145 | static int completion_nsec = 10000; | 135 | static unsigned long completion_nsec = 10000; |
146 | module_param(completion_nsec, int, S_IRUGO); | 136 | module_param(completion_nsec, ulong, S_IRUGO); |
147 | MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns"); | 137 | MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns"); |
148 | 138 | ||
149 | static int hw_queue_depth = 64; | 139 | static int hw_queue_depth = 64; |
@@ -180,6 +170,8 @@ static void free_cmd(struct nullb_cmd *cmd) | |||
180 | put_tag(cmd->nq, cmd->tag); | 170 | put_tag(cmd->nq, cmd->tag); |
181 | } | 171 | } |
182 | 172 | ||
173 | static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer); | ||
174 | |||
183 | static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq) | 175 | static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq) |
184 | { | 176 | { |
185 | struct nullb_cmd *cmd; | 177 | struct nullb_cmd *cmd; |
@@ -190,6 +182,11 @@ static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq) | |||
190 | cmd = &nq->cmds[tag]; | 182 | cmd = &nq->cmds[tag]; |
191 | cmd->tag = tag; | 183 | cmd->tag = tag; |
192 | cmd->nq = nq; | 184 | cmd->nq = nq; |
185 | if (irqmode == NULL_IRQ_TIMER) { | ||
186 | hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, | ||
187 | HRTIMER_MODE_REL); | ||
188 | cmd->timer.function = null_cmd_timer_expired; | ||
189 | } | ||
193 | return cmd; | 190 | return cmd; |
194 | } | 191 | } |
195 | 192 | ||
@@ -220,6 +217,8 @@ static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait) | |||
220 | 217 | ||
221 | static void end_cmd(struct nullb_cmd *cmd) | 218 | static void end_cmd(struct nullb_cmd *cmd) |
222 | { | 219 | { |
220 | struct request_queue *q = NULL; | ||
221 | |||
223 | switch (queue_mode) { | 222 | switch (queue_mode) { |
224 | case NULL_Q_MQ: | 223 | case NULL_Q_MQ: |
225 | blk_mq_end_request(cmd->rq, 0); | 224 | blk_mq_end_request(cmd->rq, 0); |
@@ -230,55 +229,37 @@ static void end_cmd(struct nullb_cmd *cmd) | |||
230 | break; | 229 | break; |
231 | case NULL_Q_BIO: | 230 | case NULL_Q_BIO: |
232 | bio_endio(cmd->bio); | 231 | bio_endio(cmd->bio); |
233 | break; | 232 | goto free_cmd; |
234 | } | 233 | } |
235 | 234 | ||
235 | if (cmd->rq) | ||
236 | q = cmd->rq->q; | ||
237 | |||
238 | /* Restart queue if needed, as we are freeing a tag */ | ||
239 | if (q && !q->mq_ops && blk_queue_stopped(q)) { | ||
240 | unsigned long flags; | ||
241 | |||
242 | spin_lock_irqsave(q->queue_lock, flags); | ||
243 | if (blk_queue_stopped(q)) | ||
244 | blk_start_queue(q); | ||
245 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
246 | } | ||
247 | free_cmd: | ||
236 | free_cmd(cmd); | 248 | free_cmd(cmd); |
237 | } | 249 | } |
238 | 250 | ||
239 | static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer) | 251 | static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer) |
240 | { | 252 | { |
241 | struct completion_queue *cq; | 253 | end_cmd(container_of(timer, struct nullb_cmd, timer)); |
242 | struct llist_node *entry; | ||
243 | struct nullb_cmd *cmd; | ||
244 | |||
245 | cq = &per_cpu(completion_queues, smp_processor_id()); | ||
246 | |||
247 | while ((entry = llist_del_all(&cq->list)) != NULL) { | ||
248 | entry = llist_reverse_order(entry); | ||
249 | do { | ||
250 | struct request_queue *q = NULL; | ||
251 | |||
252 | cmd = container_of(entry, struct nullb_cmd, ll_list); | ||
253 | entry = entry->next; | ||
254 | if (cmd->rq) | ||
255 | q = cmd->rq->q; | ||
256 | end_cmd(cmd); | ||
257 | |||
258 | if (q && !q->mq_ops && blk_queue_stopped(q)) { | ||
259 | spin_lock(q->queue_lock); | ||
260 | if (blk_queue_stopped(q)) | ||
261 | blk_start_queue(q); | ||
262 | spin_unlock(q->queue_lock); | ||
263 | } | ||
264 | } while (entry); | ||
265 | } | ||
266 | 254 | ||
267 | return HRTIMER_NORESTART; | 255 | return HRTIMER_NORESTART; |
268 | } | 256 | } |
269 | 257 | ||
270 | static void null_cmd_end_timer(struct nullb_cmd *cmd) | 258 | static void null_cmd_end_timer(struct nullb_cmd *cmd) |
271 | { | 259 | { |
272 | struct completion_queue *cq = &per_cpu(completion_queues, get_cpu()); | 260 | ktime_t kt = ktime_set(0, completion_nsec); |
273 | |||
274 | cmd->ll_list.next = NULL; | ||
275 | if (llist_add(&cmd->ll_list, &cq->list)) { | ||
276 | ktime_t kt = ktime_set(0, completion_nsec); | ||
277 | |||
278 | hrtimer_start(&cq->timer, kt, HRTIMER_MODE_REL_PINNED); | ||
279 | } | ||
280 | 261 | ||
281 | put_cpu(); | 262 | hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL); |
282 | } | 263 | } |
283 | 264 | ||
284 | static void null_softirq_done_fn(struct request *rq) | 265 | static void null_softirq_done_fn(struct request *rq) |
@@ -376,6 +357,10 @@ static int null_queue_rq(struct blk_mq_hw_ctx *hctx, | |||
376 | { | 357 | { |
377 | struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq); | 358 | struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq); |
378 | 359 | ||
360 | if (irqmode == NULL_IRQ_TIMER) { | ||
361 | hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | ||
362 | cmd->timer.function = null_cmd_timer_expired; | ||
363 | } | ||
379 | cmd->rq = bd->rq; | 364 | cmd->rq = bd->rq; |
380 | cmd->nq = hctx->driver_data; | 365 | cmd->nq = hctx->driver_data; |
381 | 366 | ||
@@ -813,19 +798,6 @@ static int __init null_init(void) | |||
813 | 798 | ||
814 | mutex_init(&lock); | 799 | mutex_init(&lock); |
815 | 800 | ||
816 | /* Initialize a separate list for each CPU for issuing softirqs */ | ||
817 | for_each_possible_cpu(i) { | ||
818 | struct completion_queue *cq = &per_cpu(completion_queues, i); | ||
819 | |||
820 | init_llist_head(&cq->list); | ||
821 | |||
822 | if (irqmode != NULL_IRQ_TIMER) | ||
823 | continue; | ||
824 | |||
825 | hrtimer_init(&cq->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | ||
826 | cq->timer.function = null_cmd_timer_expired; | ||
827 | } | ||
828 | |||
829 | null_major = register_blkdev(0, "nullb"); | 801 | null_major = register_blkdev(0, "nullb"); |
830 | if (null_major < 0) | 802 | if (null_major < 0) |
831 | return null_major; | 803 | return null_major; |
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 235708c7c46e..81ea69fee7ca 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c | |||
@@ -3442,6 +3442,7 @@ static void rbd_queue_workfn(struct work_struct *work) | |||
3442 | goto err_rq; | 3442 | goto err_rq; |
3443 | } | 3443 | } |
3444 | img_request->rq = rq; | 3444 | img_request->rq = rq; |
3445 | snapc = NULL; /* img_request consumes a ref */ | ||
3445 | 3446 | ||
3446 | if (op_type == OBJ_OP_DISCARD) | 3447 | if (op_type == OBJ_OP_DISCARD) |
3447 | result = rbd_img_request_fill(img_request, OBJ_REQUEST_NODATA, | 3448 | result = rbd_img_request_fill(img_request, OBJ_REQUEST_NODATA, |
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index a83c995a62df..8412ce5f93a7 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -976,10 +976,14 @@ static int cpufreq_init_policy(struct cpufreq_policy *policy) | |||
976 | 976 | ||
977 | new_policy.governor = gov; | 977 | new_policy.governor = gov; |
978 | 978 | ||
979 | /* Use the default policy if its valid. */ | 979 | /* Use the default policy if there is no last_policy. */ |
980 | if (cpufreq_driver->setpolicy) | 980 | if (cpufreq_driver->setpolicy) { |
981 | cpufreq_parse_governor(gov->name, &new_policy.policy, NULL); | 981 | if (policy->last_policy) |
982 | 982 | new_policy.policy = policy->last_policy; | |
983 | else | ||
984 | cpufreq_parse_governor(gov->name, &new_policy.policy, | ||
985 | NULL); | ||
986 | } | ||
983 | /* set default policy */ | 987 | /* set default policy */ |
984 | return cpufreq_set_policy(policy, &new_policy); | 988 | return cpufreq_set_policy(policy, &new_policy); |
985 | } | 989 | } |
@@ -1330,6 +1334,8 @@ static void cpufreq_offline_prepare(unsigned int cpu) | |||
1330 | if (has_target()) | 1334 | if (has_target()) |
1331 | strncpy(policy->last_governor, policy->governor->name, | 1335 | strncpy(policy->last_governor, policy->governor->name, |
1332 | CPUFREQ_NAME_LEN); | 1336 | CPUFREQ_NAME_LEN); |
1337 | else | ||
1338 | policy->last_policy = policy->policy; | ||
1333 | } else if (cpu == policy->cpu) { | 1339 | } else if (cpu == policy->cpu) { |
1334 | /* Nominate new CPU */ | 1340 | /* Nominate new CPU */ |
1335 | policy->cpu = cpumask_any(policy->cpus); | 1341 | policy->cpu = cpumask_any(policy->cpus); |
diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c index 73ef49922788..7038f364acb5 100644 --- a/drivers/crypto/nx/nx-aes-ccm.c +++ b/drivers/crypto/nx/nx-aes-ccm.c | |||
@@ -409,7 +409,7 @@ static int ccm_nx_decrypt(struct aead_request *req, | |||
409 | processed += to_process; | 409 | processed += to_process; |
410 | } while (processed < nbytes); | 410 | } while (processed < nbytes); |
411 | 411 | ||
412 | rc = memcmp(csbcpb->cpb.aes_ccm.out_pat_or_mac, priv->oauth_tag, | 412 | rc = crypto_memneq(csbcpb->cpb.aes_ccm.out_pat_or_mac, priv->oauth_tag, |
413 | authsize) ? -EBADMSG : 0; | 413 | authsize) ? -EBADMSG : 0; |
414 | out: | 414 | out: |
415 | spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); | 415 | spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); |
diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c index eee624f589b6..abd465f479c4 100644 --- a/drivers/crypto/nx/nx-aes-gcm.c +++ b/drivers/crypto/nx/nx-aes-gcm.c | |||
@@ -21,6 +21,7 @@ | |||
21 | 21 | ||
22 | #include <crypto/internal/aead.h> | 22 | #include <crypto/internal/aead.h> |
23 | #include <crypto/aes.h> | 23 | #include <crypto/aes.h> |
24 | #include <crypto/algapi.h> | ||
24 | #include <crypto/scatterwalk.h> | 25 | #include <crypto/scatterwalk.h> |
25 | #include <linux/module.h> | 26 | #include <linux/module.h> |
26 | #include <linux/types.h> | 27 | #include <linux/types.h> |
@@ -418,7 +419,7 @@ mac: | |||
418 | itag, req->src, req->assoclen + nbytes, | 419 | itag, req->src, req->assoclen + nbytes, |
419 | crypto_aead_authsize(crypto_aead_reqtfm(req)), | 420 | crypto_aead_authsize(crypto_aead_reqtfm(req)), |
420 | SCATTERWALK_FROM_SG); | 421 | SCATTERWALK_FROM_SG); |
421 | rc = memcmp(itag, otag, | 422 | rc = crypto_memneq(itag, otag, |
422 | crypto_aead_authsize(crypto_aead_reqtfm(req))) ? | 423 | crypto_aead_authsize(crypto_aead_reqtfm(req))) ? |
423 | -EBADMSG : 0; | 424 | -EBADMSG : 0; |
424 | } | 425 | } |
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index 46f531e19ccf..b6f9f42e2985 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c | |||
@@ -977,7 +977,7 @@ static void ipsec_esp_decrypt_swauth_done(struct device *dev, | |||
977 | } else | 977 | } else |
978 | oicv = (char *)&edesc->link_tbl[0]; | 978 | oicv = (char *)&edesc->link_tbl[0]; |
979 | 979 | ||
980 | err = memcmp(oicv, icv, authsize) ? -EBADMSG : 0; | 980 | err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0; |
981 | } | 981 | } |
982 | 982 | ||
983 | kfree(edesc); | 983 | kfree(edesc); |
diff --git a/drivers/gpio/gpio-74xx-mmio.c b/drivers/gpio/gpio-74xx-mmio.c index 6ed7c0fb3378..6b186829087c 100644 --- a/drivers/gpio/gpio-74xx-mmio.c +++ b/drivers/gpio/gpio-74xx-mmio.c | |||
@@ -113,13 +113,16 @@ static int mmio_74xx_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) | |||
113 | 113 | ||
114 | static int mmio_74xx_gpio_probe(struct platform_device *pdev) | 114 | static int mmio_74xx_gpio_probe(struct platform_device *pdev) |
115 | { | 115 | { |
116 | const struct of_device_id *of_id = | 116 | const struct of_device_id *of_id; |
117 | of_match_device(mmio_74xx_gpio_ids, &pdev->dev); | ||
118 | struct mmio_74xx_gpio_priv *priv; | 117 | struct mmio_74xx_gpio_priv *priv; |
119 | struct resource *res; | 118 | struct resource *res; |
120 | void __iomem *dat; | 119 | void __iomem *dat; |
121 | int err; | 120 | int err; |
122 | 121 | ||
122 | of_id = of_match_device(mmio_74xx_gpio_ids, &pdev->dev); | ||
123 | if (!of_id) | ||
124 | return -ENODEV; | ||
125 | |||
123 | priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); | 126 | priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); |
124 | if (!priv) | 127 | if (!priv) |
125 | return -ENOMEM; | 128 | return -ENOMEM; |
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c index 56d2d026e62e..f7fbb46d5d79 100644 --- a/drivers/gpio/gpio-omap.c +++ b/drivers/gpio/gpio-omap.c | |||
@@ -1122,8 +1122,6 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc) | |||
1122 | /* MPUIO is a bit different, reading IRQ status clears it */ | 1122 | /* MPUIO is a bit different, reading IRQ status clears it */ |
1123 | if (bank->is_mpuio) { | 1123 | if (bank->is_mpuio) { |
1124 | irqc->irq_ack = dummy_irq_chip.irq_ack; | 1124 | irqc->irq_ack = dummy_irq_chip.irq_ack; |
1125 | irqc->irq_mask = irq_gc_mask_set_bit; | ||
1126 | irqc->irq_unmask = irq_gc_mask_clr_bit; | ||
1127 | if (!bank->regs->wkup_en) | 1125 | if (!bank->regs->wkup_en) |
1128 | irqc->irq_set_wake = NULL; | 1126 | irqc->irq_set_wake = NULL; |
1129 | } | 1127 | } |
diff --git a/drivers/gpio/gpio-palmas.c b/drivers/gpio/gpio-palmas.c index 171a6389f9ce..52b447c071cb 100644 --- a/drivers/gpio/gpio-palmas.c +++ b/drivers/gpio/gpio-palmas.c | |||
@@ -167,6 +167,8 @@ static int palmas_gpio_probe(struct platform_device *pdev) | |||
167 | const struct palmas_device_data *dev_data; | 167 | const struct palmas_device_data *dev_data; |
168 | 168 | ||
169 | match = of_match_device(of_palmas_gpio_match, &pdev->dev); | 169 | match = of_match_device(of_palmas_gpio_match, &pdev->dev); |
170 | if (!match) | ||
171 | return -ENODEV; | ||
170 | dev_data = match->data; | 172 | dev_data = match->data; |
171 | if (!dev_data) | 173 | if (!dev_data) |
172 | dev_data = &palmas_dev_data; | 174 | dev_data = &palmas_dev_data; |
diff --git a/drivers/gpio/gpio-syscon.c b/drivers/gpio/gpio-syscon.c index 045a952576c7..7b25fdf64802 100644 --- a/drivers/gpio/gpio-syscon.c +++ b/drivers/gpio/gpio-syscon.c | |||
@@ -187,11 +187,15 @@ MODULE_DEVICE_TABLE(of, syscon_gpio_ids); | |||
187 | static int syscon_gpio_probe(struct platform_device *pdev) | 187 | static int syscon_gpio_probe(struct platform_device *pdev) |
188 | { | 188 | { |
189 | struct device *dev = &pdev->dev; | 189 | struct device *dev = &pdev->dev; |
190 | const struct of_device_id *of_id = of_match_device(syscon_gpio_ids, dev); | 190 | const struct of_device_id *of_id; |
191 | struct syscon_gpio_priv *priv; | 191 | struct syscon_gpio_priv *priv; |
192 | struct device_node *np = dev->of_node; | 192 | struct device_node *np = dev->of_node; |
193 | int ret; | 193 | int ret; |
194 | 194 | ||
195 | of_id = of_match_device(syscon_gpio_ids, dev); | ||
196 | if (!of_id) | ||
197 | return -ENODEV; | ||
198 | |||
195 | priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); | 199 | priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); |
196 | if (!priv) | 200 | if (!priv) |
197 | return -ENOMEM; | 201 | return -ENOMEM; |
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c index 027e5f47dd28..896bf29776b0 100644 --- a/drivers/gpio/gpio-tegra.c +++ b/drivers/gpio/gpio-tegra.c | |||
@@ -375,6 +375,60 @@ static int tegra_gpio_irq_set_wake(struct irq_data *d, unsigned int enable) | |||
375 | } | 375 | } |
376 | #endif | 376 | #endif |
377 | 377 | ||
378 | #ifdef CONFIG_DEBUG_FS | ||
379 | |||
380 | #include <linux/debugfs.h> | ||
381 | #include <linux/seq_file.h> | ||
382 | |||
383 | static int dbg_gpio_show(struct seq_file *s, void *unused) | ||
384 | { | ||
385 | int i; | ||
386 | int j; | ||
387 | |||
388 | for (i = 0; i < tegra_gpio_bank_count; i++) { | ||
389 | for (j = 0; j < 4; j++) { | ||
390 | int gpio = tegra_gpio_compose(i, j, 0); | ||
391 | seq_printf(s, | ||
392 | "%d:%d %02x %02x %02x %02x %02x %02x %06x\n", | ||
393 | i, j, | ||
394 | tegra_gpio_readl(GPIO_CNF(gpio)), | ||
395 | tegra_gpio_readl(GPIO_OE(gpio)), | ||
396 | tegra_gpio_readl(GPIO_OUT(gpio)), | ||
397 | tegra_gpio_readl(GPIO_IN(gpio)), | ||
398 | tegra_gpio_readl(GPIO_INT_STA(gpio)), | ||
399 | tegra_gpio_readl(GPIO_INT_ENB(gpio)), | ||
400 | tegra_gpio_readl(GPIO_INT_LVL(gpio))); | ||
401 | } | ||
402 | } | ||
403 | return 0; | ||
404 | } | ||
405 | |||
406 | static int dbg_gpio_open(struct inode *inode, struct file *file) | ||
407 | { | ||
408 | return single_open(file, dbg_gpio_show, &inode->i_private); | ||
409 | } | ||
410 | |||
411 | static const struct file_operations debug_fops = { | ||
412 | .open = dbg_gpio_open, | ||
413 | .read = seq_read, | ||
414 | .llseek = seq_lseek, | ||
415 | .release = single_release, | ||
416 | }; | ||
417 | |||
418 | static void tegra_gpio_debuginit(void) | ||
419 | { | ||
420 | (void) debugfs_create_file("tegra_gpio", S_IRUGO, | ||
421 | NULL, NULL, &debug_fops); | ||
422 | } | ||
423 | |||
424 | #else | ||
425 | |||
426 | static inline void tegra_gpio_debuginit(void) | ||
427 | { | ||
428 | } | ||
429 | |||
430 | #endif | ||
431 | |||
378 | static struct irq_chip tegra_gpio_irq_chip = { | 432 | static struct irq_chip tegra_gpio_irq_chip = { |
379 | .name = "GPIO", | 433 | .name = "GPIO", |
380 | .irq_ack = tegra_gpio_irq_ack, | 434 | .irq_ack = tegra_gpio_irq_ack, |
@@ -519,6 +573,8 @@ static int tegra_gpio_probe(struct platform_device *pdev) | |||
519 | spin_lock_init(&bank->lvl_lock[j]); | 573 | spin_lock_init(&bank->lvl_lock[j]); |
520 | } | 574 | } |
521 | 575 | ||
576 | tegra_gpio_debuginit(); | ||
577 | |||
522 | return 0; | 578 | return 0; |
523 | } | 579 | } |
524 | 580 | ||
@@ -536,52 +592,3 @@ static int __init tegra_gpio_init(void) | |||
536 | return platform_driver_register(&tegra_gpio_driver); | 592 | return platform_driver_register(&tegra_gpio_driver); |
537 | } | 593 | } |
538 | postcore_initcall(tegra_gpio_init); | 594 | postcore_initcall(tegra_gpio_init); |
539 | |||
540 | #ifdef CONFIG_DEBUG_FS | ||
541 | |||
542 | #include <linux/debugfs.h> | ||
543 | #include <linux/seq_file.h> | ||
544 | |||
545 | static int dbg_gpio_show(struct seq_file *s, void *unused) | ||
546 | { | ||
547 | int i; | ||
548 | int j; | ||
549 | |||
550 | for (i = 0; i < tegra_gpio_bank_count; i++) { | ||
551 | for (j = 0; j < 4; j++) { | ||
552 | int gpio = tegra_gpio_compose(i, j, 0); | ||
553 | seq_printf(s, | ||
554 | "%d:%d %02x %02x %02x %02x %02x %02x %06x\n", | ||
555 | i, j, | ||
556 | tegra_gpio_readl(GPIO_CNF(gpio)), | ||
557 | tegra_gpio_readl(GPIO_OE(gpio)), | ||
558 | tegra_gpio_readl(GPIO_OUT(gpio)), | ||
559 | tegra_gpio_readl(GPIO_IN(gpio)), | ||
560 | tegra_gpio_readl(GPIO_INT_STA(gpio)), | ||
561 | tegra_gpio_readl(GPIO_INT_ENB(gpio)), | ||
562 | tegra_gpio_readl(GPIO_INT_LVL(gpio))); | ||
563 | } | ||
564 | } | ||
565 | return 0; | ||
566 | } | ||
567 | |||
568 | static int dbg_gpio_open(struct inode *inode, struct file *file) | ||
569 | { | ||
570 | return single_open(file, dbg_gpio_show, &inode->i_private); | ||
571 | } | ||
572 | |||
573 | static const struct file_operations debug_fops = { | ||
574 | .open = dbg_gpio_open, | ||
575 | .read = seq_read, | ||
576 | .llseek = seq_lseek, | ||
577 | .release = single_release, | ||
578 | }; | ||
579 | |||
580 | static int __init tegra_gpio_debuginit(void) | ||
581 | { | ||
582 | (void) debugfs_create_file("tegra_gpio", S_IRUGO, | ||
583 | NULL, NULL, &debug_fops); | ||
584 | return 0; | ||
585 | } | ||
586 | late_initcall(tegra_gpio_debuginit); | ||
587 | #endif | ||
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index a18f00fc1bb8..2a91f3287e3b 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c | |||
@@ -233,7 +233,7 @@ static struct gpio_desc *gpio_name_to_desc(const char * const name) | |||
233 | for (i = 0; i != chip->ngpio; ++i) { | 233 | for (i = 0; i != chip->ngpio; ++i) { |
234 | struct gpio_desc *gpio = &chip->desc[i]; | 234 | struct gpio_desc *gpio = &chip->desc[i]; |
235 | 235 | ||
236 | if (!gpio->name) | 236 | if (!gpio->name || !name) |
237 | continue; | 237 | continue; |
238 | 238 | ||
239 | if (!strcmp(gpio->name, name)) { | 239 | if (!strcmp(gpio->name, name)) { |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 251b14736de9..5a5f04d0902d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h | |||
@@ -539,6 +539,7 @@ struct amdgpu_bo { | |||
539 | /* Constant after initialization */ | 539 | /* Constant after initialization */ |
540 | struct amdgpu_device *adev; | 540 | struct amdgpu_device *adev; |
541 | struct drm_gem_object gem_base; | 541 | struct drm_gem_object gem_base; |
542 | struct amdgpu_bo *parent; | ||
542 | 543 | ||
543 | struct ttm_bo_kmap_obj dma_buf_vmap; | 544 | struct ttm_bo_kmap_obj dma_buf_vmap; |
544 | pid_t pid; | 545 | pid_t pid; |
@@ -955,6 +956,8 @@ struct amdgpu_vm { | |||
955 | struct amdgpu_vm_id ids[AMDGPU_MAX_RINGS]; | 956 | struct amdgpu_vm_id ids[AMDGPU_MAX_RINGS]; |
956 | /* for interval tree */ | 957 | /* for interval tree */ |
957 | spinlock_t it_lock; | 958 | spinlock_t it_lock; |
959 | /* protecting freed */ | ||
960 | spinlock_t freed_lock; | ||
958 | }; | 961 | }; |
959 | 962 | ||
960 | struct amdgpu_vm_manager { | 963 | struct amdgpu_vm_manager { |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 1d44d508d4d4..4f352ec9dec4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | |||
@@ -222,6 +222,8 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) | |||
222 | } | 222 | } |
223 | 223 | ||
224 | p->uf.bo = gem_to_amdgpu_bo(gobj); | 224 | p->uf.bo = gem_to_amdgpu_bo(gobj); |
225 | amdgpu_bo_ref(p->uf.bo); | ||
226 | drm_gem_object_unreference_unlocked(gobj); | ||
225 | p->uf.offset = fence_data->offset; | 227 | p->uf.offset = fence_data->offset; |
226 | } else { | 228 | } else { |
227 | ret = -EINVAL; | 229 | ret = -EINVAL; |
@@ -487,7 +489,7 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo | |||
487 | amdgpu_ib_free(parser->adev, &parser->ibs[i]); | 489 | amdgpu_ib_free(parser->adev, &parser->ibs[i]); |
488 | kfree(parser->ibs); | 490 | kfree(parser->ibs); |
489 | if (parser->uf.bo) | 491 | if (parser->uf.bo) |
490 | drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base); | 492 | amdgpu_bo_unref(&parser->uf.bo); |
491 | } | 493 | } |
492 | 494 | ||
493 | static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, | 495 | static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p, |
@@ -776,7 +778,7 @@ static int amdgpu_cs_free_job(struct amdgpu_job *job) | |||
776 | amdgpu_ib_free(job->adev, &job->ibs[i]); | 778 | amdgpu_ib_free(job->adev, &job->ibs[i]); |
777 | kfree(job->ibs); | 779 | kfree(job->ibs); |
778 | if (job->uf.bo) | 780 | if (job->uf.bo) |
779 | drm_gem_object_unreference_unlocked(&job->uf.bo->gem_base); | 781 | amdgpu_bo_unref(&job->uf.bo); |
780 | return 0; | 782 | return 0; |
781 | } | 783 | } |
782 | 784 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 7d5e0583c95c..acd066d0a805 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c | |||
@@ -73,6 +73,8 @@ static void amdgpu_flip_work_func(struct work_struct *__work) | |||
73 | struct drm_crtc *crtc = &amdgpuCrtc->base; | 73 | struct drm_crtc *crtc = &amdgpuCrtc->base; |
74 | unsigned long flags; | 74 | unsigned long flags; |
75 | unsigned i; | 75 | unsigned i; |
76 | int vpos, hpos, stat, min_udelay; | ||
77 | struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id]; | ||
76 | 78 | ||
77 | amdgpu_flip_wait_fence(adev, &work->excl); | 79 | amdgpu_flip_wait_fence(adev, &work->excl); |
78 | for (i = 0; i < work->shared_count; ++i) | 80 | for (i = 0; i < work->shared_count; ++i) |
@@ -81,6 +83,41 @@ static void amdgpu_flip_work_func(struct work_struct *__work) | |||
81 | /* We borrow the event spin lock for protecting flip_status */ | 83 | /* We borrow the event spin lock for protecting flip_status */ |
82 | spin_lock_irqsave(&crtc->dev->event_lock, flags); | 84 | spin_lock_irqsave(&crtc->dev->event_lock, flags); |
83 | 85 | ||
86 | /* If this happens to execute within the "virtually extended" vblank | ||
87 | * interval before the start of the real vblank interval then it needs | ||
88 | * to delay programming the mmio flip until the real vblank is entered. | ||
89 | * This prevents completing a flip too early due to the way we fudge | ||
90 | * our vblank counter and vblank timestamps in order to work around the | ||
91 | * problem that the hw fires vblank interrupts before actual start of | ||
92 | * vblank (when line buffer refilling is done for a frame). It | ||
93 | * complements the fudging logic in amdgpu_get_crtc_scanoutpos() for | ||
94 | * timestamping and amdgpu_get_vblank_counter_kms() for vblank counts. | ||
95 | * | ||
96 | * In practice this won't execute very often unless on very fast | ||
97 | * machines because the time window for this to happen is very small. | ||
98 | */ | ||
99 | for (;;) { | ||
100 | /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank | ||
101 | * start in hpos, and to the "fudged earlier" vblank start in | ||
102 | * vpos. | ||
103 | */ | ||
104 | stat = amdgpu_get_crtc_scanoutpos(adev->ddev, work->crtc_id, | ||
105 | GET_DISTANCE_TO_VBLANKSTART, | ||
106 | &vpos, &hpos, NULL, NULL, | ||
107 | &crtc->hwmode); | ||
108 | |||
109 | if ((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) != | ||
110 | (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE) || | ||
111 | !(vpos >= 0 && hpos <= 0)) | ||
112 | break; | ||
113 | |||
114 | /* Sleep at least until estimated real start of hw vblank */ | ||
115 | spin_unlock_irqrestore(&crtc->dev->event_lock, flags); | ||
116 | min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5); | ||
117 | usleep_range(min_udelay, 2 * min_udelay); | ||
118 | spin_lock_irqsave(&crtc->dev->event_lock, flags); | ||
119 | }; | ||
120 | |||
84 | /* do the flip (mmio) */ | 121 | /* do the flip (mmio) */ |
85 | adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base); | 122 | adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base); |
86 | /* set the flip status */ | 123 | /* set the flip status */ |
@@ -109,7 +146,7 @@ static void amdgpu_unpin_work_func(struct work_struct *__work) | |||
109 | } else | 146 | } else |
110 | DRM_ERROR("failed to reserve buffer after flip\n"); | 147 | DRM_ERROR("failed to reserve buffer after flip\n"); |
111 | 148 | ||
112 | drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); | 149 | amdgpu_bo_unref(&work->old_rbo); |
113 | kfree(work->shared); | 150 | kfree(work->shared); |
114 | kfree(work); | 151 | kfree(work); |
115 | } | 152 | } |
@@ -148,8 +185,8 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc, | |||
148 | obj = old_amdgpu_fb->obj; | 185 | obj = old_amdgpu_fb->obj; |
149 | 186 | ||
150 | /* take a reference to the old object */ | 187 | /* take a reference to the old object */ |
151 | drm_gem_object_reference(obj); | ||
152 | work->old_rbo = gem_to_amdgpu_bo(obj); | 188 | work->old_rbo = gem_to_amdgpu_bo(obj); |
189 | amdgpu_bo_ref(work->old_rbo); | ||
153 | 190 | ||
154 | new_amdgpu_fb = to_amdgpu_framebuffer(fb); | 191 | new_amdgpu_fb = to_amdgpu_framebuffer(fb); |
155 | obj = new_amdgpu_fb->obj; | 192 | obj = new_amdgpu_fb->obj; |
@@ -222,7 +259,7 @@ pflip_cleanup: | |||
222 | amdgpu_bo_unreserve(new_rbo); | 259 | amdgpu_bo_unreserve(new_rbo); |
223 | 260 | ||
224 | cleanup: | 261 | cleanup: |
225 | drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); | 262 | amdgpu_bo_unref(&work->old_rbo); |
226 | fence_put(work->excl); | 263 | fence_put(work->excl); |
227 | for (i = 0; i < work->shared_count; ++i) | 264 | for (i = 0; i < work->shared_count; ++i) |
228 | fence_put(work->shared[i]); | 265 | fence_put(work->shared[i]); |
@@ -712,6 +749,15 @@ bool amdgpu_crtc_scaling_mode_fixup(struct drm_crtc *crtc, | |||
712 | * \param dev Device to query. | 749 | * \param dev Device to query. |
713 | * \param pipe Crtc to query. | 750 | * \param pipe Crtc to query. |
714 | * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0). | 751 | * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0). |
752 | * For driver internal use only also supports these flags: | ||
753 | * | ||
754 | * USE_REAL_VBLANKSTART to use the real start of vblank instead | ||
755 | * of a fudged earlier start of vblank. | ||
756 | * | ||
757 | * GET_DISTANCE_TO_VBLANKSTART to return distance to the | ||
758 | * fudged earlier start of vblank in *vpos and the distance | ||
759 | * to true start of vblank in *hpos. | ||
760 | * | ||
715 | * \param *vpos Location where vertical scanout position should be stored. | 761 | * \param *vpos Location where vertical scanout position should be stored. |
716 | * \param *hpos Location where horizontal scanout position should go. | 762 | * \param *hpos Location where horizontal scanout position should go. |
717 | * \param *stime Target location for timestamp taken immediately before | 763 | * \param *stime Target location for timestamp taken immediately before |
@@ -776,10 +822,40 @@ int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, | |||
776 | vbl_end = 0; | 822 | vbl_end = 0; |
777 | } | 823 | } |
778 | 824 | ||
825 | /* Called from driver internal vblank counter query code? */ | ||
826 | if (flags & GET_DISTANCE_TO_VBLANKSTART) { | ||
827 | /* Caller wants distance from real vbl_start in *hpos */ | ||
828 | *hpos = *vpos - vbl_start; | ||
829 | } | ||
830 | |||
831 | /* Fudge vblank to start a few scanlines earlier to handle the | ||
832 | * problem that vblank irqs fire a few scanlines before start | ||
833 | * of vblank. Some driver internal callers need the true vblank | ||
834 | * start to be used and signal this via the USE_REAL_VBLANKSTART flag. | ||
835 | * | ||
836 | * The cause of the "early" vblank irq is that the irq is triggered | ||
837 | * by the line buffer logic when the line buffer read position enters | ||
838 | * the vblank, whereas our crtc scanout position naturally lags the | ||
839 | * line buffer read position. | ||
840 | */ | ||
841 | if (!(flags & USE_REAL_VBLANKSTART)) | ||
842 | vbl_start -= adev->mode_info.crtcs[pipe]->lb_vblank_lead_lines; | ||
843 | |||
779 | /* Test scanout position against vblank region. */ | 844 | /* Test scanout position against vblank region. */ |
780 | if ((*vpos < vbl_start) && (*vpos >= vbl_end)) | 845 | if ((*vpos < vbl_start) && (*vpos >= vbl_end)) |
781 | in_vbl = false; | 846 | in_vbl = false; |
782 | 847 | ||
848 | /* In vblank? */ | ||
849 | if (in_vbl) | ||
850 | ret |= DRM_SCANOUTPOS_IN_VBLANK; | ||
851 | |||
852 | /* Called from driver internal vblank counter query code? */ | ||
853 | if (flags & GET_DISTANCE_TO_VBLANKSTART) { | ||
854 | /* Caller wants distance from fudged earlier vbl_start */ | ||
855 | *vpos -= vbl_start; | ||
856 | return ret; | ||
857 | } | ||
858 | |||
783 | /* Check if inside vblank area and apply corrective offsets: | 859 | /* Check if inside vblank area and apply corrective offsets: |
784 | * vpos will then be >=0 in video scanout area, but negative | 860 | * vpos will then be >=0 in video scanout area, but negative |
785 | * within vblank area, counting down the number of lines until | 861 | * within vblank area, counting down the number of lines until |
@@ -795,32 +871,6 @@ int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, | |||
795 | /* Correct for shifted end of vbl at vbl_end. */ | 871 | /* Correct for shifted end of vbl at vbl_end. */ |
796 | *vpos = *vpos - vbl_end; | 872 | *vpos = *vpos - vbl_end; |
797 | 873 | ||
798 | /* In vblank? */ | ||
799 | if (in_vbl) | ||
800 | ret |= DRM_SCANOUTPOS_IN_VBLANK; | ||
801 | |||
802 | /* Is vpos outside nominal vblank area, but less than | ||
803 | * 1/100 of a frame height away from start of vblank? | ||
804 | * If so, assume this isn't a massively delayed vblank | ||
805 | * interrupt, but a vblank interrupt that fired a few | ||
806 | * microseconds before true start of vblank. Compensate | ||
807 | * by adding a full frame duration to the final timestamp. | ||
808 | * Happens, e.g., on ATI R500, R600. | ||
809 | * | ||
810 | * We only do this if DRM_CALLED_FROM_VBLIRQ. | ||
811 | */ | ||
812 | if ((flags & DRM_CALLED_FROM_VBLIRQ) && !in_vbl) { | ||
813 | vbl_start = mode->crtc_vdisplay; | ||
814 | vtotal = mode->crtc_vtotal; | ||
815 | |||
816 | if (vbl_start - *vpos < vtotal / 100) { | ||
817 | *vpos -= vtotal; | ||
818 | |||
819 | /* Signal this correction as "applied". */ | ||
820 | ret |= 0x8; | ||
821 | } | ||
822 | } | ||
823 | |||
824 | return ret; | 874 | return ret; |
825 | } | 875 | } |
826 | 876 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index fc32fc01a64b..f6ea4b43a60c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | |||
@@ -235,8 +235,9 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, | |||
235 | AMDGPU_GEM_USERPTR_REGISTER)) | 235 | AMDGPU_GEM_USERPTR_REGISTER)) |
236 | return -EINVAL; | 236 | return -EINVAL; |
237 | 237 | ||
238 | if (!(args->flags & AMDGPU_GEM_USERPTR_ANONONLY) || | 238 | if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) && ( |
239 | !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) { | 239 | !(args->flags & AMDGPU_GEM_USERPTR_ANONONLY) || |
240 | !(args->flags & AMDGPU_GEM_USERPTR_REGISTER))) { | ||
240 | 241 | ||
241 | /* if we want to write to it we must require anonymous | 242 | /* if we want to write to it we must require anonymous |
242 | memory and install a MMU notifier */ | 243 | memory and install a MMU notifier */ |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 1618e2294a16..e23843f4d877 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | |||
@@ -611,13 +611,59 @@ void amdgpu_driver_preclose_kms(struct drm_device *dev, | |||
611 | u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe) | 611 | u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe) |
612 | { | 612 | { |
613 | struct amdgpu_device *adev = dev->dev_private; | 613 | struct amdgpu_device *adev = dev->dev_private; |
614 | int vpos, hpos, stat; | ||
615 | u32 count; | ||
614 | 616 | ||
615 | if (pipe >= adev->mode_info.num_crtc) { | 617 | if (pipe >= adev->mode_info.num_crtc) { |
616 | DRM_ERROR("Invalid crtc %u\n", pipe); | 618 | DRM_ERROR("Invalid crtc %u\n", pipe); |
617 | return -EINVAL; | 619 | return -EINVAL; |
618 | } | 620 | } |
619 | 621 | ||
620 | return amdgpu_display_vblank_get_counter(adev, pipe); | 622 | /* The hw increments its frame counter at start of vsync, not at start |
623 | * of vblank, as is required by DRM core vblank counter handling. | ||
624 | * Cook the hw count here to make it appear to the caller as if it | ||
625 | * incremented at start of vblank. We measure distance to start of | ||
626 | * vblank in vpos. vpos therefore will be >= 0 between start of vblank | ||
627 | * and start of vsync, so vpos >= 0 means to bump the hw frame counter | ||
628 | * result by 1 to give the proper appearance to caller. | ||
629 | */ | ||
630 | if (adev->mode_info.crtcs[pipe]) { | ||
631 | /* Repeat readout if needed to provide stable result if | ||
632 | * we cross start of vsync during the queries. | ||
633 | */ | ||
634 | do { | ||
635 | count = amdgpu_display_vblank_get_counter(adev, pipe); | ||
636 | /* Ask amdgpu_get_crtc_scanoutpos to return vpos as | ||
637 | * distance to start of vblank, instead of regular | ||
638 | * vertical scanout pos. | ||
639 | */ | ||
640 | stat = amdgpu_get_crtc_scanoutpos( | ||
641 | dev, pipe, GET_DISTANCE_TO_VBLANKSTART, | ||
642 | &vpos, &hpos, NULL, NULL, | ||
643 | &adev->mode_info.crtcs[pipe]->base.hwmode); | ||
644 | } while (count != amdgpu_display_vblank_get_counter(adev, pipe)); | ||
645 | |||
646 | if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) != | ||
647 | (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) { | ||
648 | DRM_DEBUG_VBL("Query failed! stat %d\n", stat); | ||
649 | } else { | ||
650 | DRM_DEBUG_VBL("crtc %d: dist from vblank start %d\n", | ||
651 | pipe, vpos); | ||
652 | |||
653 | /* Bump counter if we are at >= leading edge of vblank, | ||
654 | * but before vsync where vpos would turn negative and | ||
655 | * the hw counter really increments. | ||
656 | */ | ||
657 | if (vpos >= 0) | ||
658 | count++; | ||
659 | } | ||
660 | } else { | ||
661 | /* Fallback to use value as is. */ | ||
662 | count = amdgpu_display_vblank_get_counter(adev, pipe); | ||
663 | DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n"); | ||
664 | } | ||
665 | |||
666 | return count; | ||
621 | } | 667 | } |
622 | 668 | ||
623 | /** | 669 | /** |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h index de4529969778..a53d756672fe 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h | |||
@@ -407,6 +407,7 @@ struct amdgpu_crtc { | |||
407 | u32 line_time; | 407 | u32 line_time; |
408 | u32 wm_low; | 408 | u32 wm_low; |
409 | u32 wm_high; | 409 | u32 wm_high; |
410 | u32 lb_vblank_lead_lines; | ||
410 | struct drm_display_mode hw_mode; | 411 | struct drm_display_mode hw_mode; |
411 | }; | 412 | }; |
412 | 413 | ||
@@ -528,6 +529,10 @@ struct amdgpu_framebuffer { | |||
528 | #define ENCODER_MODE_IS_DP(em) (((em) == ATOM_ENCODER_MODE_DP) || \ | 529 | #define ENCODER_MODE_IS_DP(em) (((em) == ATOM_ENCODER_MODE_DP) || \ |
529 | ((em) == ATOM_ENCODER_MODE_DP_MST)) | 530 | ((em) == ATOM_ENCODER_MODE_DP_MST)) |
530 | 531 | ||
532 | /* Driver internal use only flags of amdgpu_get_crtc_scanoutpos() */ | ||
533 | #define USE_REAL_VBLANKSTART (1 << 30) | ||
534 | #define GET_DISTANCE_TO_VBLANKSTART (1 << 31) | ||
535 | |||
531 | void amdgpu_link_encoder_connector(struct drm_device *dev); | 536 | void amdgpu_link_encoder_connector(struct drm_device *dev); |
532 | 537 | ||
533 | struct drm_connector * | 538 | struct drm_connector * |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 0d524384ff79..c3ce103b6a33 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | |||
@@ -100,6 +100,7 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) | |||
100 | list_del_init(&bo->list); | 100 | list_del_init(&bo->list); |
101 | mutex_unlock(&bo->adev->gem.mutex); | 101 | mutex_unlock(&bo->adev->gem.mutex); |
102 | drm_gem_object_release(&bo->gem_base); | 102 | drm_gem_object_release(&bo->gem_base); |
103 | amdgpu_bo_unref(&bo->parent); | ||
103 | kfree(bo->metadata); | 104 | kfree(bo->metadata); |
104 | kfree(bo); | 105 | kfree(bo); |
105 | } | 106 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index d4bac5f49939..8a1752ff3d8e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | |||
@@ -587,9 +587,13 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, | |||
587 | uint32_t flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem); | 587 | uint32_t flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem); |
588 | int r; | 588 | int r; |
589 | 589 | ||
590 | if (gtt->userptr) | 590 | if (gtt->userptr) { |
591 | amdgpu_ttm_tt_pin_userptr(ttm); | 591 | r = amdgpu_ttm_tt_pin_userptr(ttm); |
592 | 592 | if (r) { | |
593 | DRM_ERROR("failed to pin userptr\n"); | ||
594 | return r; | ||
595 | } | ||
596 | } | ||
593 | gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT); | 597 | gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT); |
594 | if (!ttm->num_pages) { | 598 | if (!ttm->num_pages) { |
595 | WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", | 599 | WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", |
@@ -797,11 +801,12 @@ uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, | |||
797 | if (mem && mem->mem_type != TTM_PL_SYSTEM) | 801 | if (mem && mem->mem_type != TTM_PL_SYSTEM) |
798 | flags |= AMDGPU_PTE_VALID; | 802 | flags |= AMDGPU_PTE_VALID; |
799 | 803 | ||
800 | if (mem && mem->mem_type == TTM_PL_TT) | 804 | if (mem && mem->mem_type == TTM_PL_TT) { |
801 | flags |= AMDGPU_PTE_SYSTEM; | 805 | flags |= AMDGPU_PTE_SYSTEM; |
802 | 806 | ||
803 | if (!ttm || ttm->caching_state == tt_cached) | 807 | if (ttm->caching_state == tt_cached) |
804 | flags |= AMDGPU_PTE_SNOOPED; | 808 | flags |= AMDGPU_PTE_SNOOPED; |
809 | } | ||
805 | 810 | ||
806 | if (adev->asic_type >= CHIP_TOPAZ) | 811 | if (adev->asic_type >= CHIP_TOPAZ) |
807 | flags |= AMDGPU_PTE_EXECUTABLE; | 812 | flags |= AMDGPU_PTE_EXECUTABLE; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index ae037e5b6ad0..b53d273eb7a1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | |||
@@ -885,17 +885,21 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev, | |||
885 | struct amdgpu_bo_va_mapping *mapping; | 885 | struct amdgpu_bo_va_mapping *mapping; |
886 | int r; | 886 | int r; |
887 | 887 | ||
888 | spin_lock(&vm->freed_lock); | ||
888 | while (!list_empty(&vm->freed)) { | 889 | while (!list_empty(&vm->freed)) { |
889 | mapping = list_first_entry(&vm->freed, | 890 | mapping = list_first_entry(&vm->freed, |
890 | struct amdgpu_bo_va_mapping, list); | 891 | struct amdgpu_bo_va_mapping, list); |
891 | list_del(&mapping->list); | 892 | list_del(&mapping->list); |
892 | 893 | spin_unlock(&vm->freed_lock); | |
893 | r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, 0, 0, NULL); | 894 | r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, 0, 0, NULL); |
894 | kfree(mapping); | 895 | kfree(mapping); |
895 | if (r) | 896 | if (r) |
896 | return r; | 897 | return r; |
897 | 898 | ||
899 | spin_lock(&vm->freed_lock); | ||
898 | } | 900 | } |
901 | spin_unlock(&vm->freed_lock); | ||
902 | |||
899 | return 0; | 903 | return 0; |
900 | 904 | ||
901 | } | 905 | } |
@@ -1079,6 +1083,11 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, | |||
1079 | if (r) | 1083 | if (r) |
1080 | goto error_free; | 1084 | goto error_free; |
1081 | 1085 | ||
1086 | /* Keep a reference to the page table to avoid freeing | ||
1087 | * them up in the wrong order. | ||
1088 | */ | ||
1089 | pt->parent = amdgpu_bo_ref(vm->page_directory); | ||
1090 | |||
1082 | r = amdgpu_vm_clear_bo(adev, pt); | 1091 | r = amdgpu_vm_clear_bo(adev, pt); |
1083 | if (r) { | 1092 | if (r) { |
1084 | amdgpu_bo_unref(&pt); | 1093 | amdgpu_bo_unref(&pt); |
@@ -1150,10 +1159,13 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, | |||
1150 | spin_unlock(&vm->it_lock); | 1159 | spin_unlock(&vm->it_lock); |
1151 | trace_amdgpu_vm_bo_unmap(bo_va, mapping); | 1160 | trace_amdgpu_vm_bo_unmap(bo_va, mapping); |
1152 | 1161 | ||
1153 | if (valid) | 1162 | if (valid) { |
1163 | spin_lock(&vm->freed_lock); | ||
1154 | list_add(&mapping->list, &vm->freed); | 1164 | list_add(&mapping->list, &vm->freed); |
1155 | else | 1165 | spin_unlock(&vm->freed_lock); |
1166 | } else { | ||
1156 | kfree(mapping); | 1167 | kfree(mapping); |
1168 | } | ||
1157 | 1169 | ||
1158 | return 0; | 1170 | return 0; |
1159 | } | 1171 | } |
@@ -1186,7 +1198,9 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, | |||
1186 | interval_tree_remove(&mapping->it, &vm->va); | 1198 | interval_tree_remove(&mapping->it, &vm->va); |
1187 | spin_unlock(&vm->it_lock); | 1199 | spin_unlock(&vm->it_lock); |
1188 | trace_amdgpu_vm_bo_unmap(bo_va, mapping); | 1200 | trace_amdgpu_vm_bo_unmap(bo_va, mapping); |
1201 | spin_lock(&vm->freed_lock); | ||
1189 | list_add(&mapping->list, &vm->freed); | 1202 | list_add(&mapping->list, &vm->freed); |
1203 | spin_unlock(&vm->freed_lock); | ||
1190 | } | 1204 | } |
1191 | list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) { | 1205 | list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) { |
1192 | list_del(&mapping->list); | 1206 | list_del(&mapping->list); |
@@ -1247,6 +1261,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) | |||
1247 | INIT_LIST_HEAD(&vm->cleared); | 1261 | INIT_LIST_HEAD(&vm->cleared); |
1248 | INIT_LIST_HEAD(&vm->freed); | 1262 | INIT_LIST_HEAD(&vm->freed); |
1249 | spin_lock_init(&vm->it_lock); | 1263 | spin_lock_init(&vm->it_lock); |
1264 | spin_lock_init(&vm->freed_lock); | ||
1250 | pd_size = amdgpu_vm_directory_size(adev); | 1265 | pd_size = amdgpu_vm_directory_size(adev); |
1251 | pd_entries = amdgpu_vm_num_pdes(adev); | 1266 | pd_entries = amdgpu_vm_num_pdes(adev); |
1252 | 1267 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index cb0f7747e3dc..4dcc8fba5792 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c | |||
@@ -1250,7 +1250,7 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev, | |||
1250 | u32 pixel_period; | 1250 | u32 pixel_period; |
1251 | u32 line_time = 0; | 1251 | u32 line_time = 0; |
1252 | u32 latency_watermark_a = 0, latency_watermark_b = 0; | 1252 | u32 latency_watermark_a = 0, latency_watermark_b = 0; |
1253 | u32 tmp, wm_mask; | 1253 | u32 tmp, wm_mask, lb_vblank_lead_lines = 0; |
1254 | 1254 | ||
1255 | if (amdgpu_crtc->base.enabled && num_heads && mode) { | 1255 | if (amdgpu_crtc->base.enabled && num_heads && mode) { |
1256 | pixel_period = 1000000 / (u32)mode->clock; | 1256 | pixel_period = 1000000 / (u32)mode->clock; |
@@ -1333,6 +1333,7 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev, | |||
1333 | (adev->mode_info.disp_priority == 2)) { | 1333 | (adev->mode_info.disp_priority == 2)) { |
1334 | DRM_DEBUG_KMS("force priority to high\n"); | 1334 | DRM_DEBUG_KMS("force priority to high\n"); |
1335 | } | 1335 | } |
1336 | lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay); | ||
1336 | } | 1337 | } |
1337 | 1338 | ||
1338 | /* select wm A */ | 1339 | /* select wm A */ |
@@ -1357,6 +1358,8 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev, | |||
1357 | amdgpu_crtc->line_time = line_time; | 1358 | amdgpu_crtc->line_time = line_time; |
1358 | amdgpu_crtc->wm_high = latency_watermark_a; | 1359 | amdgpu_crtc->wm_high = latency_watermark_a; |
1359 | amdgpu_crtc->wm_low = latency_watermark_b; | 1360 | amdgpu_crtc->wm_low = latency_watermark_b; |
1361 | /* Save number of lines the linebuffer leads before the scanout */ | ||
1362 | amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines; | ||
1360 | } | 1363 | } |
1361 | 1364 | ||
1362 | /** | 1365 | /** |
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 5af3721851d6..8f1e51128b33 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c | |||
@@ -1238,7 +1238,7 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev, | |||
1238 | u32 pixel_period; | 1238 | u32 pixel_period; |
1239 | u32 line_time = 0; | 1239 | u32 line_time = 0; |
1240 | u32 latency_watermark_a = 0, latency_watermark_b = 0; | 1240 | u32 latency_watermark_a = 0, latency_watermark_b = 0; |
1241 | u32 tmp, wm_mask; | 1241 | u32 tmp, wm_mask, lb_vblank_lead_lines = 0; |
1242 | 1242 | ||
1243 | if (amdgpu_crtc->base.enabled && num_heads && mode) { | 1243 | if (amdgpu_crtc->base.enabled && num_heads && mode) { |
1244 | pixel_period = 1000000 / (u32)mode->clock; | 1244 | pixel_period = 1000000 / (u32)mode->clock; |
@@ -1321,6 +1321,7 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev, | |||
1321 | (adev->mode_info.disp_priority == 2)) { | 1321 | (adev->mode_info.disp_priority == 2)) { |
1322 | DRM_DEBUG_KMS("force priority to high\n"); | 1322 | DRM_DEBUG_KMS("force priority to high\n"); |
1323 | } | 1323 | } |
1324 | lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay); | ||
1324 | } | 1325 | } |
1325 | 1326 | ||
1326 | /* select wm A */ | 1327 | /* select wm A */ |
@@ -1345,6 +1346,8 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev, | |||
1345 | amdgpu_crtc->line_time = line_time; | 1346 | amdgpu_crtc->line_time = line_time; |
1346 | amdgpu_crtc->wm_high = latency_watermark_a; | 1347 | amdgpu_crtc->wm_high = latency_watermark_a; |
1347 | amdgpu_crtc->wm_low = latency_watermark_b; | 1348 | amdgpu_crtc->wm_low = latency_watermark_b; |
1349 | /* Save number of lines the linebuffer leads before the scanout */ | ||
1350 | amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines; | ||
1348 | } | 1351 | } |
1349 | 1352 | ||
1350 | /** | 1353 | /** |
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 4f7b49a6dc50..42d954dc436d 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c | |||
@@ -1193,7 +1193,7 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev, | |||
1193 | u32 pixel_period; | 1193 | u32 pixel_period; |
1194 | u32 line_time = 0; | 1194 | u32 line_time = 0; |
1195 | u32 latency_watermark_a = 0, latency_watermark_b = 0; | 1195 | u32 latency_watermark_a = 0, latency_watermark_b = 0; |
1196 | u32 tmp, wm_mask; | 1196 | u32 tmp, wm_mask, lb_vblank_lead_lines = 0; |
1197 | 1197 | ||
1198 | if (amdgpu_crtc->base.enabled && num_heads && mode) { | 1198 | if (amdgpu_crtc->base.enabled && num_heads && mode) { |
1199 | pixel_period = 1000000 / (u32)mode->clock; | 1199 | pixel_period = 1000000 / (u32)mode->clock; |
@@ -1276,6 +1276,7 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev, | |||
1276 | (adev->mode_info.disp_priority == 2)) { | 1276 | (adev->mode_info.disp_priority == 2)) { |
1277 | DRM_DEBUG_KMS("force priority to high\n"); | 1277 | DRM_DEBUG_KMS("force priority to high\n"); |
1278 | } | 1278 | } |
1279 | lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay); | ||
1279 | } | 1280 | } |
1280 | 1281 | ||
1281 | /* select wm A */ | 1282 | /* select wm A */ |
@@ -1302,6 +1303,8 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev, | |||
1302 | amdgpu_crtc->line_time = line_time; | 1303 | amdgpu_crtc->line_time = line_time; |
1303 | amdgpu_crtc->wm_high = latency_watermark_a; | 1304 | amdgpu_crtc->wm_high = latency_watermark_a; |
1304 | amdgpu_crtc->wm_low = latency_watermark_b; | 1305 | amdgpu_crtc->wm_low = latency_watermark_b; |
1306 | /* Save number of lines the linebuffer leads before the scanout */ | ||
1307 | amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines; | ||
1305 | } | 1308 | } |
1306 | 1309 | ||
1307 | /** | 1310 | /** |
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 7427d8cd4c43..ed8abb58a785 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | |||
@@ -513,7 +513,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev) | |||
513 | WREG32(mmVM_L2_CNTL3, tmp); | 513 | WREG32(mmVM_L2_CNTL3, tmp); |
514 | /* setup context0 */ | 514 | /* setup context0 */ |
515 | WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12); | 515 | WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12); |
516 | WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, (adev->mc.gtt_end >> 12) - 1); | 516 | WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12); |
517 | WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12); | 517 | WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12); |
518 | WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, | 518 | WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, |
519 | (u32)(adev->dummy_page.addr >> 12)); | 519 | (u32)(adev->dummy_page.addr >> 12)); |
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index cb0e50ebb528..d39028440814 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | |||
@@ -657,7 +657,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev) | |||
657 | WREG32(mmVM_L2_CNTL4, tmp); | 657 | WREG32(mmVM_L2_CNTL4, tmp); |
658 | /* setup context0 */ | 658 | /* setup context0 */ |
659 | WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12); | 659 | WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gtt_start >> 12); |
660 | WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, (adev->mc.gtt_end >> 12) - 1); | 660 | WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gtt_end >> 12); |
661 | WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12); | 661 | WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12); |
662 | WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, | 662 | WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, |
663 | (u32)(adev->dummy_page.addr >> 12)); | 663 | (u32)(adev->dummy_page.addr >> 12)); |
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index 651129f2ec1d..3a4820e863ec 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | |||
@@ -288,6 +288,7 @@ amd_sched_entity_pop_job(struct amd_sched_entity *entity) | |||
288 | */ | 288 | */ |
289 | static bool amd_sched_entity_in(struct amd_sched_job *sched_job) | 289 | static bool amd_sched_entity_in(struct amd_sched_job *sched_job) |
290 | { | 290 | { |
291 | struct amd_gpu_scheduler *sched = sched_job->sched; | ||
291 | struct amd_sched_entity *entity = sched_job->s_entity; | 292 | struct amd_sched_entity *entity = sched_job->s_entity; |
292 | bool added, first = false; | 293 | bool added, first = false; |
293 | 294 | ||
@@ -302,7 +303,7 @@ static bool amd_sched_entity_in(struct amd_sched_job *sched_job) | |||
302 | 303 | ||
303 | /* first job wakes up scheduler */ | 304 | /* first job wakes up scheduler */ |
304 | if (first) | 305 | if (first) |
305 | amd_sched_wakeup(sched_job->sched); | 306 | amd_sched_wakeup(sched); |
306 | 307 | ||
307 | return added; | 308 | return added; |
308 | } | 309 | } |
@@ -318,9 +319,9 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job) | |||
318 | { | 319 | { |
319 | struct amd_sched_entity *entity = sched_job->s_entity; | 320 | struct amd_sched_entity *entity = sched_job->s_entity; |
320 | 321 | ||
322 | trace_amd_sched_job(sched_job); | ||
321 | wait_event(entity->sched->job_scheduled, | 323 | wait_event(entity->sched->job_scheduled, |
322 | amd_sched_entity_in(sched_job)); | 324 | amd_sched_entity_in(sched_job)); |
323 | trace_amd_sched_job(sched_job); | ||
324 | } | 325 | } |
325 | 326 | ||
326 | /** | 327 | /** |
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 9362609df38a..7dd6728dd092 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c | |||
@@ -160,6 +160,11 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data, | |||
160 | goto out_unlock; | 160 | goto out_unlock; |
161 | } | 161 | } |
162 | 162 | ||
163 | if (!file_priv->allowed_master) { | ||
164 | ret = drm_new_set_master(dev, file_priv); | ||
165 | goto out_unlock; | ||
166 | } | ||
167 | |||
163 | file_priv->minor->master = drm_master_get(file_priv->master); | 168 | file_priv->minor->master = drm_master_get(file_priv->master); |
164 | file_priv->is_master = 1; | 169 | file_priv->is_master = 1; |
165 | if (dev->driver->master_set) { | 170 | if (dev->driver->master_set) { |
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index 81df9ae95e2e..1ea8790e5090 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c | |||
@@ -126,6 +126,60 @@ static int drm_cpu_valid(void) | |||
126 | } | 126 | } |
127 | 127 | ||
128 | /** | 128 | /** |
129 | * drm_new_set_master - Allocate a new master object and become master for the | ||
130 | * associated master realm. | ||
131 | * | ||
132 | * @dev: The associated device. | ||
133 | * @fpriv: File private identifying the client. | ||
134 | * | ||
135 | * This function must be called with dev::struct_mutex held. | ||
136 | * Returns negative error code on failure. Zero on success. | ||
137 | */ | ||
138 | int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv) | ||
139 | { | ||
140 | struct drm_master *old_master; | ||
141 | int ret; | ||
142 | |||
143 | lockdep_assert_held_once(&dev->master_mutex); | ||
144 | |||
145 | /* create a new master */ | ||
146 | fpriv->minor->master = drm_master_create(fpriv->minor); | ||
147 | if (!fpriv->minor->master) | ||
148 | return -ENOMEM; | ||
149 | |||
150 | /* take another reference for the copy in the local file priv */ | ||
151 | old_master = fpriv->master; | ||
152 | fpriv->master = drm_master_get(fpriv->minor->master); | ||
153 | |||
154 | if (dev->driver->master_create) { | ||
155 | ret = dev->driver->master_create(dev, fpriv->master); | ||
156 | if (ret) | ||
157 | goto out_err; | ||
158 | } | ||
159 | if (dev->driver->master_set) { | ||
160 | ret = dev->driver->master_set(dev, fpriv, true); | ||
161 | if (ret) | ||
162 | goto out_err; | ||
163 | } | ||
164 | |||
165 | fpriv->is_master = 1; | ||
166 | fpriv->allowed_master = 1; | ||
167 | fpriv->authenticated = 1; | ||
168 | if (old_master) | ||
169 | drm_master_put(&old_master); | ||
170 | |||
171 | return 0; | ||
172 | |||
173 | out_err: | ||
174 | /* drop both references and restore old master on failure */ | ||
175 | drm_master_put(&fpriv->minor->master); | ||
176 | drm_master_put(&fpriv->master); | ||
177 | fpriv->master = old_master; | ||
178 | |||
179 | return ret; | ||
180 | } | ||
181 | |||
182 | /** | ||
129 | * Called whenever a process opens /dev/drm. | 183 | * Called whenever a process opens /dev/drm. |
130 | * | 184 | * |
131 | * \param filp file pointer. | 185 | * \param filp file pointer. |
@@ -191,35 +245,9 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor) | |||
191 | mutex_lock(&dev->master_mutex); | 245 | mutex_lock(&dev->master_mutex); |
192 | if (drm_is_primary_client(priv) && !priv->minor->master) { | 246 | if (drm_is_primary_client(priv) && !priv->minor->master) { |
193 | /* create a new master */ | 247 | /* create a new master */ |
194 | priv->minor->master = drm_master_create(priv->minor); | 248 | ret = drm_new_set_master(dev, priv); |
195 | if (!priv->minor->master) { | 249 | if (ret) |
196 | ret = -ENOMEM; | ||
197 | goto out_close; | 250 | goto out_close; |
198 | } | ||
199 | |||
200 | priv->is_master = 1; | ||
201 | /* take another reference for the copy in the local file priv */ | ||
202 | priv->master = drm_master_get(priv->minor->master); | ||
203 | priv->authenticated = 1; | ||
204 | |||
205 | if (dev->driver->master_create) { | ||
206 | ret = dev->driver->master_create(dev, priv->master); | ||
207 | if (ret) { | ||
208 | /* drop both references if this fails */ | ||
209 | drm_master_put(&priv->minor->master); | ||
210 | drm_master_put(&priv->master); | ||
211 | goto out_close; | ||
212 | } | ||
213 | } | ||
214 | if (dev->driver->master_set) { | ||
215 | ret = dev->driver->master_set(dev, priv, true); | ||
216 | if (ret) { | ||
217 | /* drop both references if this fails */ | ||
218 | drm_master_put(&priv->minor->master); | ||
219 | drm_master_put(&priv->master); | ||
220 | goto out_close; | ||
221 | } | ||
222 | } | ||
223 | } else if (drm_is_primary_client(priv)) { | 251 | } else if (drm_is_primary_client(priv)) { |
224 | /* get a reference to the master */ | 252 | /* get a reference to the master */ |
225 | priv->master = drm_master_get(priv->minor->master); | 253 | priv->master = drm_master_get(priv->minor->master); |
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 2151ea551d3b..607f493ae801 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c | |||
@@ -980,7 +980,8 @@ static void send_vblank_event(struct drm_device *dev, | |||
980 | struct drm_pending_vblank_event *e, | 980 | struct drm_pending_vblank_event *e, |
981 | unsigned long seq, struct timeval *now) | 981 | unsigned long seq, struct timeval *now) |
982 | { | 982 | { |
983 | WARN_ON_SMP(!spin_is_locked(&dev->event_lock)); | 983 | assert_spin_locked(&dev->event_lock); |
984 | |||
984 | e->event.sequence = seq; | 985 | e->event.sequence = seq; |
985 | e->event.tv_sec = now->tv_sec; | 986 | e->event.tv_sec = now->tv_sec; |
986 | e->event.tv_usec = now->tv_usec; | 987 | e->event.tv_usec = now->tv_usec; |
@@ -993,6 +994,57 @@ static void send_vblank_event(struct drm_device *dev, | |||
993 | } | 994 | } |
994 | 995 | ||
995 | /** | 996 | /** |
997 | * drm_arm_vblank_event - arm vblank event after pageflip | ||
998 | * @dev: DRM device | ||
999 | * @pipe: CRTC index | ||
1000 | * @e: the event to prepare to send | ||
1001 | * | ||
1002 | * A lot of drivers need to generate vblank events for the very next vblank | ||
1003 | * interrupt. For example when the page flip interrupt happens when the page | ||
1004 | * flip gets armed, but not when it actually executes within the next vblank | ||
1005 | * period. This helper function implements exactly the required vblank arming | ||
1006 | * behaviour. | ||
1007 | * | ||
1008 | * Caller must hold event lock. Caller must also hold a vblank reference for | ||
1009 | * the event @e, which will be dropped when the next vblank arrives. | ||
1010 | * | ||
1011 | * This is the legacy version of drm_crtc_arm_vblank_event(). | ||
1012 | */ | ||
1013 | void drm_arm_vblank_event(struct drm_device *dev, unsigned int pipe, | ||
1014 | struct drm_pending_vblank_event *e) | ||
1015 | { | ||
1016 | assert_spin_locked(&dev->event_lock); | ||
1017 | |||
1018 | e->pipe = pipe; | ||
1019 | e->event.sequence = drm_vblank_count(dev, pipe); | ||
1020 | list_add_tail(&e->base.link, &dev->vblank_event_list); | ||
1021 | } | ||
1022 | EXPORT_SYMBOL(drm_arm_vblank_event); | ||
1023 | |||
1024 | /** | ||
1025 | * drm_crtc_arm_vblank_event - arm vblank event after pageflip | ||
1026 | * @crtc: the source CRTC of the vblank event | ||
1027 | * @e: the event to send | ||
1028 | * | ||
1029 | * A lot of drivers need to generate vblank events for the very next vblank | ||
1030 | * interrupt. For example when the page flip interrupt happens when the page | ||
1031 | * flip gets armed, but not when it actually executes within the next vblank | ||
1032 | * period. This helper function implements exactly the required vblank arming | ||
1033 | * behaviour. | ||
1034 | * | ||
1035 | * Caller must hold event lock. Caller must also hold a vblank reference for | ||
1036 | * the event @e, which will be dropped when the next vblank arrives. | ||
1037 | * | ||
1038 | * This is the native KMS version of drm_arm_vblank_event(). | ||
1039 | */ | ||
1040 | void drm_crtc_arm_vblank_event(struct drm_crtc *crtc, | ||
1041 | struct drm_pending_vblank_event *e) | ||
1042 | { | ||
1043 | drm_arm_vblank_event(crtc->dev, drm_crtc_index(crtc), e); | ||
1044 | } | ||
1045 | EXPORT_SYMBOL(drm_crtc_arm_vblank_event); | ||
1046 | |||
1047 | /** | ||
996 | * drm_send_vblank_event - helper to send vblank event after pageflip | 1048 | * drm_send_vblank_event - helper to send vblank event after pageflip |
997 | * @dev: DRM device | 1049 | * @dev: DRM device |
998 | * @pipe: CRTC index | 1050 | * @pipe: CRTC index |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 33adc8f8ab20..a6997a8a3eaa 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -1210,8 +1210,16 @@ int __i915_wait_request(struct drm_i915_gem_request *req, | |||
1210 | if (i915_gem_request_completed(req, true)) | 1210 | if (i915_gem_request_completed(req, true)) |
1211 | return 0; | 1211 | return 0; |
1212 | 1212 | ||
1213 | timeout_expire = timeout ? | 1213 | timeout_expire = 0; |
1214 | jiffies + nsecs_to_jiffies_timeout((u64)*timeout) : 0; | 1214 | if (timeout) { |
1215 | if (WARN_ON(*timeout < 0)) | ||
1216 | return -EINVAL; | ||
1217 | |||
1218 | if (*timeout == 0) | ||
1219 | return -ETIME; | ||
1220 | |||
1221 | timeout_expire = jiffies + nsecs_to_jiffies_timeout(*timeout); | ||
1222 | } | ||
1215 | 1223 | ||
1216 | if (INTEL_INFO(dev_priv)->gen >= 6) | 1224 | if (INTEL_INFO(dev_priv)->gen >= 6) |
1217 | gen6_rps_boost(dev_priv, rps, req->emitted_jiffies); | 1225 | gen6_rps_boost(dev_priv, rps, req->emitted_jiffies); |
diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c index b80d0456fe03..598198543dcd 100644 --- a/drivers/gpu/drm/i915/i915_gem_fence.c +++ b/drivers/gpu/drm/i915/i915_gem_fence.c | |||
@@ -642,11 +642,10 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) | |||
642 | } | 642 | } |
643 | 643 | ||
644 | /* check for L-shaped memory aka modified enhanced addressing */ | 644 | /* check for L-shaped memory aka modified enhanced addressing */ |
645 | if (IS_GEN4(dev)) { | 645 | if (IS_GEN4(dev) && |
646 | uint32_t ddc2 = I915_READ(DCC2); | 646 | !(I915_READ(DCC2) & DCC2_MODIFIED_ENHANCED_DISABLE)) { |
647 | 647 | swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; | |
648 | if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE)) | 648 | swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; |
649 | dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES; | ||
650 | } | 649 | } |
651 | 650 | ||
652 | if (dcc == 0xffffffff) { | 651 | if (dcc == 0xffffffff) { |
@@ -675,16 +674,35 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) | |||
675 | * matching, which was the case for the swizzling required in | 674 | * matching, which was the case for the swizzling required in |
676 | * the table above, or from the 1-ch value being less than | 675 | * the table above, or from the 1-ch value being less than |
677 | * the minimum size of a rank. | 676 | * the minimum size of a rank. |
677 | * | ||
678 | * Reports indicate that the swizzling actually | ||
679 | * varies depending upon page placement inside the | ||
680 | * channels, i.e. we see swizzled pages where the | ||
681 | * banks of memory are paired and unswizzled on the | ||
682 | * uneven portion, so leave that as unknown. | ||
678 | */ | 683 | */ |
679 | if (I915_READ16(C0DRB3) != I915_READ16(C1DRB3)) { | 684 | if (I915_READ16(C0DRB3) == I915_READ16(C1DRB3)) { |
680 | swizzle_x = I915_BIT_6_SWIZZLE_NONE; | ||
681 | swizzle_y = I915_BIT_6_SWIZZLE_NONE; | ||
682 | } else { | ||
683 | swizzle_x = I915_BIT_6_SWIZZLE_9_10; | 685 | swizzle_x = I915_BIT_6_SWIZZLE_9_10; |
684 | swizzle_y = I915_BIT_6_SWIZZLE_9; | 686 | swizzle_y = I915_BIT_6_SWIZZLE_9; |
685 | } | 687 | } |
686 | } | 688 | } |
687 | 689 | ||
690 | if (swizzle_x == I915_BIT_6_SWIZZLE_UNKNOWN || | ||
691 | swizzle_y == I915_BIT_6_SWIZZLE_UNKNOWN) { | ||
692 | /* Userspace likes to explode if it sees unknown swizzling, | ||
693 | * so lie. We will finish the lie when reporting through | ||
694 | * the get-tiling-ioctl by reporting the physical swizzle | ||
695 | * mode as unknown instead. | ||
696 | * | ||
697 | * As we don't strictly know what the swizzling is, it may be | ||
698 | * bit17 dependent, and so we need to also prevent the pages | ||
699 | * from being moved. | ||
700 | */ | ||
701 | dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES; | ||
702 | swizzle_x = I915_BIT_6_SWIZZLE_NONE; | ||
703 | swizzle_y = I915_BIT_6_SWIZZLE_NONE; | ||
704 | } | ||
705 | |||
688 | dev_priv->mm.bit_6_swizzle_x = swizzle_x; | 706 | dev_priv->mm.bit_6_swizzle_x = swizzle_x; |
689 | dev_priv->mm.bit_6_swizzle_y = swizzle_y; | 707 | dev_priv->mm.bit_6_swizzle_y = swizzle_y; |
690 | } | 708 | } |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 9228ec018e98..696f7543d264 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -12582,7 +12582,6 @@ intel_pipe_config_compare(struct drm_device *dev, | |||
12582 | if (INTEL_INFO(dev)->gen < 8) { | 12582 | if (INTEL_INFO(dev)->gen < 8) { |
12583 | PIPE_CONF_CHECK_M_N(dp_m_n); | 12583 | PIPE_CONF_CHECK_M_N(dp_m_n); |
12584 | 12584 | ||
12585 | PIPE_CONF_CHECK_I(has_drrs); | ||
12586 | if (current_config->has_drrs) | 12585 | if (current_config->has_drrs) |
12587 | PIPE_CONF_CHECK_M_N(dp_m2_n2); | 12586 | PIPE_CONF_CHECK_M_N(dp_m2_n2); |
12588 | } else | 12587 | } else |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index bec443a629da..e1ceff7ab265 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -4962,7 +4962,8 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) | |||
4962 | enum intel_display_power_domain power_domain; | 4962 | enum intel_display_power_domain power_domain; |
4963 | enum irqreturn ret = IRQ_NONE; | 4963 | enum irqreturn ret = IRQ_NONE; |
4964 | 4964 | ||
4965 | if (intel_dig_port->base.type != INTEL_OUTPUT_EDP) | 4965 | if (intel_dig_port->base.type != INTEL_OUTPUT_EDP && |
4966 | intel_dig_port->base.type != INTEL_OUTPUT_HDMI) | ||
4966 | intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT; | 4967 | intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT; |
4967 | 4968 | ||
4968 | if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) { | 4969 | if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) { |
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c index 6faa735376ec..882cf3d4b7a8 100644 --- a/drivers/gpu/drm/imx/imx-drm-core.c +++ b/drivers/gpu/drm/imx/imx-drm-core.c | |||
@@ -64,8 +64,7 @@ static void imx_drm_driver_lastclose(struct drm_device *drm) | |||
64 | { | 64 | { |
65 | struct imx_drm_device *imxdrm = drm->dev_private; | 65 | struct imx_drm_device *imxdrm = drm->dev_private; |
66 | 66 | ||
67 | if (imxdrm->fbhelper) | 67 | drm_fbdev_cma_restore_mode(imxdrm->fbhelper); |
68 | drm_fbdev_cma_restore_mode(imxdrm->fbhelper); | ||
69 | } | 68 | } |
70 | 69 | ||
71 | static int imx_drm_driver_unload(struct drm_device *drm) | 70 | static int imx_drm_driver_unload(struct drm_device *drm) |
@@ -334,7 +333,7 @@ err_kms: | |||
334 | * imx_drm_add_crtc - add a new crtc | 333 | * imx_drm_add_crtc - add a new crtc |
335 | */ | 334 | */ |
336 | int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc, | 335 | int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc, |
337 | struct imx_drm_crtc **new_crtc, | 336 | struct imx_drm_crtc **new_crtc, struct drm_plane *primary_plane, |
338 | const struct imx_drm_crtc_helper_funcs *imx_drm_helper_funcs, | 337 | const struct imx_drm_crtc_helper_funcs *imx_drm_helper_funcs, |
339 | struct device_node *port) | 338 | struct device_node *port) |
340 | { | 339 | { |
@@ -373,7 +372,7 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc, | |||
373 | drm_crtc_helper_add(crtc, | 372 | drm_crtc_helper_add(crtc, |
374 | imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs); | 373 | imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs); |
375 | 374 | ||
376 | drm_crtc_init(drm, crtc, | 375 | drm_crtc_init_with_planes(drm, crtc, primary_plane, NULL, |
377 | imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs); | 376 | imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs); |
378 | 377 | ||
379 | return 0; | 378 | return 0; |
diff --git a/drivers/gpu/drm/imx/imx-drm.h b/drivers/gpu/drm/imx/imx-drm.h index 28e776d8d9d2..83284b4d4be1 100644 --- a/drivers/gpu/drm/imx/imx-drm.h +++ b/drivers/gpu/drm/imx/imx-drm.h | |||
@@ -9,6 +9,7 @@ struct drm_display_mode; | |||
9 | struct drm_encoder; | 9 | struct drm_encoder; |
10 | struct drm_fbdev_cma; | 10 | struct drm_fbdev_cma; |
11 | struct drm_framebuffer; | 11 | struct drm_framebuffer; |
12 | struct drm_plane; | ||
12 | struct imx_drm_crtc; | 13 | struct imx_drm_crtc; |
13 | struct platform_device; | 14 | struct platform_device; |
14 | 15 | ||
@@ -24,7 +25,7 @@ struct imx_drm_crtc_helper_funcs { | |||
24 | }; | 25 | }; |
25 | 26 | ||
26 | int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc, | 27 | int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc, |
27 | struct imx_drm_crtc **new_crtc, | 28 | struct imx_drm_crtc **new_crtc, struct drm_plane *primary_plane, |
28 | const struct imx_drm_crtc_helper_funcs *imx_helper_funcs, | 29 | const struct imx_drm_crtc_helper_funcs *imx_helper_funcs, |
29 | struct device_node *port); | 30 | struct device_node *port); |
30 | int imx_drm_remove_crtc(struct imx_drm_crtc *); | 31 | int imx_drm_remove_crtc(struct imx_drm_crtc *); |
diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c index e671ad369416..f9597146dc67 100644 --- a/drivers/gpu/drm/imx/imx-tve.c +++ b/drivers/gpu/drm/imx/imx-tve.c | |||
@@ -721,6 +721,7 @@ static const struct of_device_id imx_tve_dt_ids[] = { | |||
721 | { .compatible = "fsl,imx53-tve", }, | 721 | { .compatible = "fsl,imx53-tve", }, |
722 | { /* sentinel */ } | 722 | { /* sentinel */ } |
723 | }; | 723 | }; |
724 | MODULE_DEVICE_TABLE(of, imx_tve_dt_ids); | ||
724 | 725 | ||
725 | static struct platform_driver imx_tve_driver = { | 726 | static struct platform_driver imx_tve_driver = { |
726 | .probe = imx_tve_probe, | 727 | .probe = imx_tve_probe, |
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c index 7bc8301fafff..4ab841eebee1 100644 --- a/drivers/gpu/drm/imx/ipuv3-crtc.c +++ b/drivers/gpu/drm/imx/ipuv3-crtc.c | |||
@@ -212,7 +212,8 @@ static void ipu_crtc_handle_pageflip(struct ipu_crtc *ipu_crtc) | |||
212 | 212 | ||
213 | spin_lock_irqsave(&drm->event_lock, flags); | 213 | spin_lock_irqsave(&drm->event_lock, flags); |
214 | if (ipu_crtc->page_flip_event) | 214 | if (ipu_crtc->page_flip_event) |
215 | drm_send_vblank_event(drm, -1, ipu_crtc->page_flip_event); | 215 | drm_crtc_send_vblank_event(&ipu_crtc->base, |
216 | ipu_crtc->page_flip_event); | ||
216 | ipu_crtc->page_flip_event = NULL; | 217 | ipu_crtc->page_flip_event = NULL; |
217 | imx_drm_crtc_vblank_put(ipu_crtc->imx_crtc); | 218 | imx_drm_crtc_vblank_put(ipu_crtc->imx_crtc); |
218 | spin_unlock_irqrestore(&drm->event_lock, flags); | 219 | spin_unlock_irqrestore(&drm->event_lock, flags); |
@@ -349,7 +350,6 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc, | |||
349 | struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent); | 350 | struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent); |
350 | int dp = -EINVAL; | 351 | int dp = -EINVAL; |
351 | int ret; | 352 | int ret; |
352 | int id; | ||
353 | 353 | ||
354 | ret = ipu_get_resources(ipu_crtc, pdata); | 354 | ret = ipu_get_resources(ipu_crtc, pdata); |
355 | if (ret) { | 355 | if (ret) { |
@@ -358,18 +358,23 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc, | |||
358 | return ret; | 358 | return ret; |
359 | } | 359 | } |
360 | 360 | ||
361 | if (pdata->dp >= 0) | ||
362 | dp = IPU_DP_FLOW_SYNC_BG; | ||
363 | ipu_crtc->plane[0] = ipu_plane_init(drm, ipu, pdata->dma[0], dp, 0, | ||
364 | DRM_PLANE_TYPE_PRIMARY); | ||
365 | if (IS_ERR(ipu_crtc->plane[0])) { | ||
366 | ret = PTR_ERR(ipu_crtc->plane[0]); | ||
367 | goto err_put_resources; | ||
368 | } | ||
369 | |||
361 | ret = imx_drm_add_crtc(drm, &ipu_crtc->base, &ipu_crtc->imx_crtc, | 370 | ret = imx_drm_add_crtc(drm, &ipu_crtc->base, &ipu_crtc->imx_crtc, |
362 | &ipu_crtc_helper_funcs, ipu_crtc->dev->of_node); | 371 | &ipu_crtc->plane[0]->base, &ipu_crtc_helper_funcs, |
372 | ipu_crtc->dev->of_node); | ||
363 | if (ret) { | 373 | if (ret) { |
364 | dev_err(ipu_crtc->dev, "adding crtc failed with %d.\n", ret); | 374 | dev_err(ipu_crtc->dev, "adding crtc failed with %d.\n", ret); |
365 | goto err_put_resources; | 375 | goto err_put_resources; |
366 | } | 376 | } |
367 | 377 | ||
368 | if (pdata->dp >= 0) | ||
369 | dp = IPU_DP_FLOW_SYNC_BG; | ||
370 | id = imx_drm_crtc_id(ipu_crtc->imx_crtc); | ||
371 | ipu_crtc->plane[0] = ipu_plane_init(ipu_crtc->base.dev, ipu, | ||
372 | pdata->dma[0], dp, BIT(id), true); | ||
373 | ret = ipu_plane_get_resources(ipu_crtc->plane[0]); | 378 | ret = ipu_plane_get_resources(ipu_crtc->plane[0]); |
374 | if (ret) { | 379 | if (ret) { |
375 | dev_err(ipu_crtc->dev, "getting plane 0 resources failed with %d.\n", | 380 | dev_err(ipu_crtc->dev, "getting plane 0 resources failed with %d.\n", |
@@ -379,10 +384,10 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc, | |||
379 | 384 | ||
380 | /* If this crtc is using the DP, add an overlay plane */ | 385 | /* If this crtc is using the DP, add an overlay plane */ |
381 | if (pdata->dp >= 0 && pdata->dma[1] > 0) { | 386 | if (pdata->dp >= 0 && pdata->dma[1] > 0) { |
382 | ipu_crtc->plane[1] = ipu_plane_init(ipu_crtc->base.dev, ipu, | 387 | ipu_crtc->plane[1] = ipu_plane_init(drm, ipu, pdata->dma[1], |
383 | pdata->dma[1], | 388 | IPU_DP_FLOW_SYNC_FG, |
384 | IPU_DP_FLOW_SYNC_FG, | 389 | drm_crtc_mask(&ipu_crtc->base), |
385 | BIT(id), false); | 390 | DRM_PLANE_TYPE_OVERLAY); |
386 | if (IS_ERR(ipu_crtc->plane[1])) | 391 | if (IS_ERR(ipu_crtc->plane[1])) |
387 | ipu_crtc->plane[1] = NULL; | 392 | ipu_crtc->plane[1] = NULL; |
388 | } | 393 | } |
@@ -407,28 +412,6 @@ err_put_resources: | |||
407 | return ret; | 412 | return ret; |
408 | } | 413 | } |
409 | 414 | ||
410 | static struct device_node *ipu_drm_get_port_by_id(struct device_node *parent, | ||
411 | int port_id) | ||
412 | { | ||
413 | struct device_node *port; | ||
414 | int id, ret; | ||
415 | |||
416 | port = of_get_child_by_name(parent, "port"); | ||
417 | while (port) { | ||
418 | ret = of_property_read_u32(port, "reg", &id); | ||
419 | if (!ret && id == port_id) | ||
420 | return port; | ||
421 | |||
422 | do { | ||
423 | port = of_get_next_child(parent, port); | ||
424 | if (!port) | ||
425 | return NULL; | ||
426 | } while (of_node_cmp(port->name, "port")); | ||
427 | } | ||
428 | |||
429 | return NULL; | ||
430 | } | ||
431 | |||
432 | static int ipu_drm_bind(struct device *dev, struct device *master, void *data) | 415 | static int ipu_drm_bind(struct device *dev, struct device *master, void *data) |
433 | { | 416 | { |
434 | struct ipu_client_platformdata *pdata = dev->platform_data; | 417 | struct ipu_client_platformdata *pdata = dev->platform_data; |
@@ -470,23 +453,11 @@ static const struct component_ops ipu_crtc_ops = { | |||
470 | static int ipu_drm_probe(struct platform_device *pdev) | 453 | static int ipu_drm_probe(struct platform_device *pdev) |
471 | { | 454 | { |
472 | struct device *dev = &pdev->dev; | 455 | struct device *dev = &pdev->dev; |
473 | struct ipu_client_platformdata *pdata = dev->platform_data; | ||
474 | int ret; | 456 | int ret; |
475 | 457 | ||
476 | if (!dev->platform_data) | 458 | if (!dev->platform_data) |
477 | return -EINVAL; | 459 | return -EINVAL; |
478 | 460 | ||
479 | if (!dev->of_node) { | ||
480 | /* Associate crtc device with the corresponding DI port node */ | ||
481 | dev->of_node = ipu_drm_get_port_by_id(dev->parent->of_node, | ||
482 | pdata->di + 2); | ||
483 | if (!dev->of_node) { | ||
484 | dev_err(dev, "missing port@%d node in %s\n", | ||
485 | pdata->di + 2, dev->parent->of_node->full_name); | ||
486 | return -ENODEV; | ||
487 | } | ||
488 | } | ||
489 | |||
490 | ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32)); | 461 | ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32)); |
491 | if (ret) | 462 | if (ret) |
492 | return ret; | 463 | return ret; |
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c index 575f4c84388f..e2ff410bab74 100644 --- a/drivers/gpu/drm/imx/ipuv3-plane.c +++ b/drivers/gpu/drm/imx/ipuv3-plane.c | |||
@@ -381,7 +381,7 @@ static struct drm_plane_funcs ipu_plane_funcs = { | |||
381 | 381 | ||
382 | struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu, | 382 | struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu, |
383 | int dma, int dp, unsigned int possible_crtcs, | 383 | int dma, int dp, unsigned int possible_crtcs, |
384 | bool priv) | 384 | enum drm_plane_type type) |
385 | { | 385 | { |
386 | struct ipu_plane *ipu_plane; | 386 | struct ipu_plane *ipu_plane; |
387 | int ret; | 387 | int ret; |
@@ -399,10 +399,9 @@ struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu, | |||
399 | ipu_plane->dma = dma; | 399 | ipu_plane->dma = dma; |
400 | ipu_plane->dp_flow = dp; | 400 | ipu_plane->dp_flow = dp; |
401 | 401 | ||
402 | ret = drm_plane_init(dev, &ipu_plane->base, possible_crtcs, | 402 | ret = drm_universal_plane_init(dev, &ipu_plane->base, possible_crtcs, |
403 | &ipu_plane_funcs, ipu_plane_formats, | 403 | &ipu_plane_funcs, ipu_plane_formats, |
404 | ARRAY_SIZE(ipu_plane_formats), | 404 | ARRAY_SIZE(ipu_plane_formats), type); |
405 | priv); | ||
406 | if (ret) { | 405 | if (ret) { |
407 | DRM_ERROR("failed to initialize plane\n"); | 406 | DRM_ERROR("failed to initialize plane\n"); |
408 | kfree(ipu_plane); | 407 | kfree(ipu_plane); |
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.h b/drivers/gpu/drm/imx/ipuv3-plane.h index 9b5eff18f5b8..3a443b413c60 100644 --- a/drivers/gpu/drm/imx/ipuv3-plane.h +++ b/drivers/gpu/drm/imx/ipuv3-plane.h | |||
@@ -34,7 +34,7 @@ struct ipu_plane { | |||
34 | 34 | ||
35 | struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu, | 35 | struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu, |
36 | int dma, int dp, unsigned int possible_crtcs, | 36 | int dma, int dp, unsigned int possible_crtcs, |
37 | bool priv); | 37 | enum drm_plane_type type); |
38 | 38 | ||
39 | /* Init IDMAC, DMFC, DP */ | 39 | /* Init IDMAC, DMFC, DP */ |
40 | int ipu_plane_mode_set(struct ipu_plane *plane, struct drm_crtc *crtc, | 40 | int ipu_plane_mode_set(struct ipu_plane *plane, struct drm_crtc *crtc, |
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c index b4deb9cf9d71..2e9b9f1b5cd2 100644 --- a/drivers/gpu/drm/imx/parallel-display.c +++ b/drivers/gpu/drm/imx/parallel-display.c | |||
@@ -54,7 +54,11 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector) | |||
54 | 54 | ||
55 | if (imxpd->panel && imxpd->panel->funcs && | 55 | if (imxpd->panel && imxpd->panel->funcs && |
56 | imxpd->panel->funcs->get_modes) { | 56 | imxpd->panel->funcs->get_modes) { |
57 | struct drm_display_info *di = &connector->display_info; | ||
58 | |||
57 | num_modes = imxpd->panel->funcs->get_modes(imxpd->panel); | 59 | num_modes = imxpd->panel->funcs->get_modes(imxpd->panel); |
60 | if (!imxpd->bus_format && di->num_bus_formats) | ||
61 | imxpd->bus_format = di->bus_formats[0]; | ||
58 | if (num_modes > 0) | 62 | if (num_modes > 0) |
59 | return num_modes; | 63 | return num_modes; |
60 | } | 64 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index ea9d3bc91266..18676b8c1721 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c | |||
@@ -829,7 +829,6 @@ nouveau_finish_page_flip(struct nouveau_channel *chan, | |||
829 | struct drm_device *dev = drm->dev; | 829 | struct drm_device *dev = drm->dev; |
830 | struct nouveau_page_flip_state *s; | 830 | struct nouveau_page_flip_state *s; |
831 | unsigned long flags; | 831 | unsigned long flags; |
832 | int crtcid = -1; | ||
833 | 832 | ||
834 | spin_lock_irqsave(&dev->event_lock, flags); | 833 | spin_lock_irqsave(&dev->event_lock, flags); |
835 | 834 | ||
@@ -841,15 +840,19 @@ nouveau_finish_page_flip(struct nouveau_channel *chan, | |||
841 | 840 | ||
842 | s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head); | 841 | s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head); |
843 | if (s->event) { | 842 | if (s->event) { |
844 | /* Vblank timestamps/counts are only correct on >= NV-50 */ | 843 | if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) { |
845 | if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) | 844 | drm_arm_vblank_event(dev, s->crtc, s->event); |
846 | crtcid = s->crtc; | 845 | } else { |
846 | drm_send_vblank_event(dev, s->crtc, s->event); | ||
847 | 847 | ||
848 | drm_send_vblank_event(dev, crtcid, s->event); | 848 | /* Give up ownership of vblank for page-flipped crtc */ |
849 | drm_vblank_put(dev, s->crtc); | ||
850 | } | ||
851 | } | ||
852 | else { | ||
853 | /* Give up ownership of vblank for page-flipped crtc */ | ||
854 | drm_vblank_put(dev, s->crtc); | ||
849 | } | 855 | } |
850 | |||
851 | /* Give up ownership of vblank for page-flipped crtc */ | ||
852 | drm_vblank_put(dev, s->crtc); | ||
853 | 856 | ||
854 | list_del(&s->head); | 857 | list_del(&s->head); |
855 | if (ps) | 858 | if (ps) |
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 248953d2fdb7..0154db43860c 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c | |||
@@ -8472,7 +8472,7 @@ restart_ih: | |||
8472 | if (queue_dp) | 8472 | if (queue_dp) |
8473 | schedule_work(&rdev->dp_work); | 8473 | schedule_work(&rdev->dp_work); |
8474 | if (queue_hotplug) | 8474 | if (queue_hotplug) |
8475 | schedule_work(&rdev->hotplug_work); | 8475 | schedule_delayed_work(&rdev->hotplug_work, 0); |
8476 | if (queue_reset) { | 8476 | if (queue_reset) { |
8477 | rdev->needs_reset = true; | 8477 | rdev->needs_reset = true; |
8478 | wake_up_all(&rdev->fence_queue); | 8478 | wake_up_all(&rdev->fence_queue); |
@@ -9630,6 +9630,9 @@ static void dce8_program_watermarks(struct radeon_device *rdev, | |||
9630 | (rdev->disp_priority == 2)) { | 9630 | (rdev->disp_priority == 2)) { |
9631 | DRM_DEBUG_KMS("force priority to high\n"); | 9631 | DRM_DEBUG_KMS("force priority to high\n"); |
9632 | } | 9632 | } |
9633 | |||
9634 | /* Save number of lines the linebuffer leads before the scanout */ | ||
9635 | radeon_crtc->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay); | ||
9633 | } | 9636 | } |
9634 | 9637 | ||
9635 | /* select wm A */ | 9638 | /* select wm A */ |
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 7f33767d7ed6..2ad462896896 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
@@ -2372,6 +2372,9 @@ static void evergreen_program_watermarks(struct radeon_device *rdev, | |||
2372 | c.full = dfixed_div(c, a); | 2372 | c.full = dfixed_div(c, a); |
2373 | priority_b_mark = dfixed_trunc(c); | 2373 | priority_b_mark = dfixed_trunc(c); |
2374 | priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK; | 2374 | priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK; |
2375 | |||
2376 | /* Save number of lines the linebuffer leads before the scanout */ | ||
2377 | radeon_crtc->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay); | ||
2375 | } | 2378 | } |
2376 | 2379 | ||
2377 | /* select wm A */ | 2380 | /* select wm A */ |
@@ -5344,7 +5347,7 @@ restart_ih: | |||
5344 | if (queue_dp) | 5347 | if (queue_dp) |
5345 | schedule_work(&rdev->dp_work); | 5348 | schedule_work(&rdev->dp_work); |
5346 | if (queue_hotplug) | 5349 | if (queue_hotplug) |
5347 | schedule_work(&rdev->hotplug_work); | 5350 | schedule_delayed_work(&rdev->hotplug_work, 0); |
5348 | if (queue_hdmi) | 5351 | if (queue_hdmi) |
5349 | schedule_work(&rdev->audio_work); | 5352 | schedule_work(&rdev->audio_work); |
5350 | if (queue_thermal && rdev->pm.dpm_enabled) | 5353 | if (queue_thermal && rdev->pm.dpm_enabled) |
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 238b13f045c1..9e7e2bf03b81 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
@@ -806,7 +806,7 @@ int r100_irq_process(struct radeon_device *rdev) | |||
806 | status = r100_irq_ack(rdev); | 806 | status = r100_irq_ack(rdev); |
807 | } | 807 | } |
808 | if (queue_hotplug) | 808 | if (queue_hotplug) |
809 | schedule_work(&rdev->hotplug_work); | 809 | schedule_delayed_work(&rdev->hotplug_work, 0); |
810 | if (rdev->msi_enabled) { | 810 | if (rdev->msi_enabled) { |
811 | switch (rdev->family) { | 811 | switch (rdev->family) { |
812 | case CHIP_RS400: | 812 | case CHIP_RS400: |
@@ -3217,6 +3217,9 @@ void r100_bandwidth_update(struct radeon_device *rdev) | |||
3217 | uint32_t pixel_bytes1 = 0; | 3217 | uint32_t pixel_bytes1 = 0; |
3218 | uint32_t pixel_bytes2 = 0; | 3218 | uint32_t pixel_bytes2 = 0; |
3219 | 3219 | ||
3220 | /* Guess line buffer size to be 8192 pixels */ | ||
3221 | u32 lb_size = 8192; | ||
3222 | |||
3220 | if (!rdev->mode_info.mode_config_initialized) | 3223 | if (!rdev->mode_info.mode_config_initialized) |
3221 | return; | 3224 | return; |
3222 | 3225 | ||
@@ -3631,6 +3634,13 @@ void r100_bandwidth_update(struct radeon_device *rdev) | |||
3631 | DRM_DEBUG_KMS("GRPH2_BUFFER_CNTL from to %x\n", | 3634 | DRM_DEBUG_KMS("GRPH2_BUFFER_CNTL from to %x\n", |
3632 | (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL)); | 3635 | (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL)); |
3633 | } | 3636 | } |
3637 | |||
3638 | /* Save number of lines the linebuffer leads before the scanout */ | ||
3639 | if (mode1) | ||
3640 | rdev->mode_info.crtcs[0]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode1->crtc_hdisplay); | ||
3641 | |||
3642 | if (mode2) | ||
3643 | rdev->mode_info.crtcs[1]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode2->crtc_hdisplay); | ||
3634 | } | 3644 | } |
3635 | 3645 | ||
3636 | int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) | 3646 | int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 4ea5b10ff5f4..cc2fdf0be37a 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -4276,7 +4276,7 @@ restart_ih: | |||
4276 | WREG32(IH_RB_RPTR, rptr); | 4276 | WREG32(IH_RB_RPTR, rptr); |
4277 | } | 4277 | } |
4278 | if (queue_hotplug) | 4278 | if (queue_hotplug) |
4279 | schedule_work(&rdev->hotplug_work); | 4279 | schedule_delayed_work(&rdev->hotplug_work, 0); |
4280 | if (queue_hdmi) | 4280 | if (queue_hdmi) |
4281 | schedule_work(&rdev->audio_work); | 4281 | schedule_work(&rdev->audio_work); |
4282 | if (queue_thermal && rdev->pm.dpm_enabled) | 4282 | if (queue_thermal && rdev->pm.dpm_enabled) |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index b6cbd816537e..87db64983ea8 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -2414,7 +2414,7 @@ struct radeon_device { | |||
2414 | struct r600_ih ih; /* r6/700 interrupt ring */ | 2414 | struct r600_ih ih; /* r6/700 interrupt ring */ |
2415 | struct radeon_rlc rlc; | 2415 | struct radeon_rlc rlc; |
2416 | struct radeon_mec mec; | 2416 | struct radeon_mec mec; |
2417 | struct work_struct hotplug_work; | 2417 | struct delayed_work hotplug_work; |
2418 | struct work_struct dp_work; | 2418 | struct work_struct dp_work; |
2419 | struct work_struct audio_work; | 2419 | struct work_struct audio_work; |
2420 | int num_crtc; /* number of crtcs */ | 2420 | int num_crtc; /* number of crtcs */ |
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c index fe994aac3b04..c77d349c561c 100644 --- a/drivers/gpu/drm/radeon/radeon_agp.c +++ b/drivers/gpu/drm/radeon/radeon_agp.c | |||
@@ -54,6 +54,9 @@ static struct radeon_agpmode_quirk radeon_agpmode_quirk_list[] = { | |||
54 | /* Intel 82855PM host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (lp #195051) */ | 54 | /* Intel 82855PM host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (lp #195051) */ |
55 | { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4e50, | 55 | { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4e50, |
56 | PCI_VENDOR_ID_IBM, 0x0550, 1}, | 56 | PCI_VENDOR_ID_IBM, 0x0550, 1}, |
57 | /* Intel 82855PM host bridge / RV250/M9 GL [Mobility FireGL 9000/Radeon 9000] needs AGPMode 1 (Thinkpad T40p) */ | ||
58 | { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c66, | ||
59 | PCI_VENDOR_ID_IBM, 0x054d, 1}, | ||
57 | /* Intel 82855PM host bridge / Mobility M7 needs AGPMode 1 */ | 60 | /* Intel 82855PM host bridge / Mobility M7 needs AGPMode 1 */ |
58 | { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c57, | 61 | { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c57, |
59 | PCI_VENDOR_ID_IBM, 0x0530, 1}, | 62 | PCI_VENDOR_ID_IBM, 0x0530, 1}, |
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 5a2cafb4f1bc..340f3f549f29 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c | |||
@@ -1234,13 +1234,32 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) | |||
1234 | if (r < 0) | 1234 | if (r < 0) |
1235 | return connector_status_disconnected; | 1235 | return connector_status_disconnected; |
1236 | 1236 | ||
1237 | if (radeon_connector->detected_hpd_without_ddc) { | ||
1238 | force = true; | ||
1239 | radeon_connector->detected_hpd_without_ddc = false; | ||
1240 | } | ||
1241 | |||
1237 | if (!force && radeon_check_hpd_status_unchanged(connector)) { | 1242 | if (!force && radeon_check_hpd_status_unchanged(connector)) { |
1238 | ret = connector->status; | 1243 | ret = connector->status; |
1239 | goto exit; | 1244 | goto exit; |
1240 | } | 1245 | } |
1241 | 1246 | ||
1242 | if (radeon_connector->ddc_bus) | 1247 | if (radeon_connector->ddc_bus) { |
1243 | dret = radeon_ddc_probe(radeon_connector, false); | 1248 | dret = radeon_ddc_probe(radeon_connector, false); |
1249 | |||
1250 | /* Sometimes the pins required for the DDC probe on DVI | ||
1251 | * connectors don't make contact at the same time that the ones | ||
1252 | * for HPD do. If the DDC probe fails even though we had an HPD | ||
1253 | * signal, try again later */ | ||
1254 | if (!dret && !force && | ||
1255 | connector->status != connector_status_connected) { | ||
1256 | DRM_DEBUG_KMS("hpd detected without ddc, retrying in 1 second\n"); | ||
1257 | radeon_connector->detected_hpd_without_ddc = true; | ||
1258 | schedule_delayed_work(&rdev->hotplug_work, | ||
1259 | msecs_to_jiffies(1000)); | ||
1260 | goto exit; | ||
1261 | } | ||
1262 | } | ||
1244 | if (dret) { | 1263 | if (dret) { |
1245 | radeon_connector->detected_by_load = false; | 1264 | radeon_connector->detected_by_load = false; |
1246 | radeon_connector_free_edid(connector); | 1265 | radeon_connector_free_edid(connector); |
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index ded51fbb8fa2..b3bb92368ae0 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c | |||
@@ -322,7 +322,9 @@ void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id) | |||
322 | * to complete in this vblank? | 322 | * to complete in this vblank? |
323 | */ | 323 | */ |
324 | if (update_pending && | 324 | if (update_pending && |
325 | (DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev, crtc_id, 0, | 325 | (DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev, |
326 | crtc_id, | ||
327 | USE_REAL_VBLANKSTART, | ||
326 | &vpos, &hpos, NULL, NULL, | 328 | &vpos, &hpos, NULL, NULL, |
327 | &rdev->mode_info.crtcs[crtc_id]->base.hwmode)) && | 329 | &rdev->mode_info.crtcs[crtc_id]->base.hwmode)) && |
328 | ((vpos >= (99 * rdev->mode_info.crtcs[crtc_id]->base.hwmode.crtc_vdisplay)/100) || | 330 | ((vpos >= (99 * rdev->mode_info.crtcs[crtc_id]->base.hwmode.crtc_vdisplay)/100) || |
@@ -401,6 +403,8 @@ static void radeon_flip_work_func(struct work_struct *__work) | |||
401 | struct drm_crtc *crtc = &radeon_crtc->base; | 403 | struct drm_crtc *crtc = &radeon_crtc->base; |
402 | unsigned long flags; | 404 | unsigned long flags; |
403 | int r; | 405 | int r; |
406 | int vpos, hpos, stat, min_udelay; | ||
407 | struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id]; | ||
404 | 408 | ||
405 | down_read(&rdev->exclusive_lock); | 409 | down_read(&rdev->exclusive_lock); |
406 | if (work->fence) { | 410 | if (work->fence) { |
@@ -437,6 +441,41 @@ static void radeon_flip_work_func(struct work_struct *__work) | |||
437 | /* set the proper interrupt */ | 441 | /* set the proper interrupt */ |
438 | radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id); | 442 | radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id); |
439 | 443 | ||
444 | /* If this happens to execute within the "virtually extended" vblank | ||
445 | * interval before the start of the real vblank interval then it needs | ||
446 | * to delay programming the mmio flip until the real vblank is entered. | ||
447 | * This prevents completing a flip too early due to the way we fudge | ||
448 | * our vblank counter and vblank timestamps in order to work around the | ||
449 | * problem that the hw fires vblank interrupts before actual start of | ||
450 | * vblank (when line buffer refilling is done for a frame). It | ||
451 | * complements the fudging logic in radeon_get_crtc_scanoutpos() for | ||
452 | * timestamping and radeon_get_vblank_counter_kms() for vblank counts. | ||
453 | * | ||
454 | * In practice this won't execute very often unless on very fast | ||
455 | * machines because the time window for this to happen is very small. | ||
456 | */ | ||
457 | for (;;) { | ||
458 | /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank | ||
459 | * start in hpos, and to the "fudged earlier" vblank start in | ||
460 | * vpos. | ||
461 | */ | ||
462 | stat = radeon_get_crtc_scanoutpos(rdev->ddev, work->crtc_id, | ||
463 | GET_DISTANCE_TO_VBLANKSTART, | ||
464 | &vpos, &hpos, NULL, NULL, | ||
465 | &crtc->hwmode); | ||
466 | |||
467 | if ((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) != | ||
468 | (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE) || | ||
469 | !(vpos >= 0 && hpos <= 0)) | ||
470 | break; | ||
471 | |||
472 | /* Sleep at least until estimated real start of hw vblank */ | ||
473 | spin_unlock_irqrestore(&crtc->dev->event_lock, flags); | ||
474 | min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5); | ||
475 | usleep_range(min_udelay, 2 * min_udelay); | ||
476 | spin_lock_irqsave(&crtc->dev->event_lock, flags); | ||
477 | }; | ||
478 | |||
440 | /* do the flip (mmio) */ | 479 | /* do the flip (mmio) */ |
441 | radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base); | 480 | radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base); |
442 | 481 | ||
@@ -1768,6 +1807,15 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, | |||
1768 | * \param dev Device to query. | 1807 | * \param dev Device to query. |
1769 | * \param crtc Crtc to query. | 1808 | * \param crtc Crtc to query. |
1770 | * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0). | 1809 | * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0). |
1810 | * For driver internal use only also supports these flags: | ||
1811 | * | ||
1812 | * USE_REAL_VBLANKSTART to use the real start of vblank instead | ||
1813 | * of a fudged earlier start of vblank. | ||
1814 | * | ||
1815 | * GET_DISTANCE_TO_VBLANKSTART to return distance to the | ||
1816 | * fudged earlier start of vblank in *vpos and the distance | ||
1817 | * to true start of vblank in *hpos. | ||
1818 | * | ||
1771 | * \param *vpos Location where vertical scanout position should be stored. | 1819 | * \param *vpos Location where vertical scanout position should be stored. |
1772 | * \param *hpos Location where horizontal scanout position should go. | 1820 | * \param *hpos Location where horizontal scanout position should go. |
1773 | * \param *stime Target location for timestamp taken immediately before | 1821 | * \param *stime Target location for timestamp taken immediately before |
@@ -1911,10 +1959,40 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, | |||
1911 | vbl_end = 0; | 1959 | vbl_end = 0; |
1912 | } | 1960 | } |
1913 | 1961 | ||
1962 | /* Called from driver internal vblank counter query code? */ | ||
1963 | if (flags & GET_DISTANCE_TO_VBLANKSTART) { | ||
1964 | /* Caller wants distance from real vbl_start in *hpos */ | ||
1965 | *hpos = *vpos - vbl_start; | ||
1966 | } | ||
1967 | |||
1968 | /* Fudge vblank to start a few scanlines earlier to handle the | ||
1969 | * problem that vblank irqs fire a few scanlines before start | ||
1970 | * of vblank. Some driver internal callers need the true vblank | ||
1971 | * start to be used and signal this via the USE_REAL_VBLANKSTART flag. | ||
1972 | * | ||
1973 | * The cause of the "early" vblank irq is that the irq is triggered | ||
1974 | * by the line buffer logic when the line buffer read position enters | ||
1975 | * the vblank, whereas our crtc scanout position naturally lags the | ||
1976 | * line buffer read position. | ||
1977 | */ | ||
1978 | if (!(flags & USE_REAL_VBLANKSTART)) | ||
1979 | vbl_start -= rdev->mode_info.crtcs[pipe]->lb_vblank_lead_lines; | ||
1980 | |||
1914 | /* Test scanout position against vblank region. */ | 1981 | /* Test scanout position against vblank region. */ |
1915 | if ((*vpos < vbl_start) && (*vpos >= vbl_end)) | 1982 | if ((*vpos < vbl_start) && (*vpos >= vbl_end)) |
1916 | in_vbl = false; | 1983 | in_vbl = false; |
1917 | 1984 | ||
1985 | /* In vblank? */ | ||
1986 | if (in_vbl) | ||
1987 | ret |= DRM_SCANOUTPOS_IN_VBLANK; | ||
1988 | |||
1989 | /* Called from driver internal vblank counter query code? */ | ||
1990 | if (flags & GET_DISTANCE_TO_VBLANKSTART) { | ||
1991 | /* Caller wants distance from fudged earlier vbl_start */ | ||
1992 | *vpos -= vbl_start; | ||
1993 | return ret; | ||
1994 | } | ||
1995 | |||
1918 | /* Check if inside vblank area and apply corrective offsets: | 1996 | /* Check if inside vblank area and apply corrective offsets: |
1919 | * vpos will then be >=0 in video scanout area, but negative | 1997 | * vpos will then be >=0 in video scanout area, but negative |
1920 | * within vblank area, counting down the number of lines until | 1998 | * within vblank area, counting down the number of lines until |
@@ -1930,31 +2008,5 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, | |||
1930 | /* Correct for shifted end of vbl at vbl_end. */ | 2008 | /* Correct for shifted end of vbl at vbl_end. */ |
1931 | *vpos = *vpos - vbl_end; | 2009 | *vpos = *vpos - vbl_end; |
1932 | 2010 | ||
1933 | /* In vblank? */ | ||
1934 | if (in_vbl) | ||
1935 | ret |= DRM_SCANOUTPOS_IN_VBLANK; | ||
1936 | |||
1937 | /* Is vpos outside nominal vblank area, but less than | ||
1938 | * 1/100 of a frame height away from start of vblank? | ||
1939 | * If so, assume this isn't a massively delayed vblank | ||
1940 | * interrupt, but a vblank interrupt that fired a few | ||
1941 | * microseconds before true start of vblank. Compensate | ||
1942 | * by adding a full frame duration to the final timestamp. | ||
1943 | * Happens, e.g., on ATI R500, R600. | ||
1944 | * | ||
1945 | * We only do this if DRM_CALLED_FROM_VBLIRQ. | ||
1946 | */ | ||
1947 | if ((flags & DRM_CALLED_FROM_VBLIRQ) && !in_vbl) { | ||
1948 | vbl_start = mode->crtc_vdisplay; | ||
1949 | vtotal = mode->crtc_vtotal; | ||
1950 | |||
1951 | if (vbl_start - *vpos < vtotal / 100) { | ||
1952 | *vpos -= vtotal; | ||
1953 | |||
1954 | /* Signal this correction as "applied". */ | ||
1955 | ret |= 0x8; | ||
1956 | } | ||
1957 | } | ||
1958 | |||
1959 | return ret; | 2011 | return ret; |
1960 | } | 2012 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index 171d3e43c30c..979f3bf65f2c 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c | |||
@@ -74,7 +74,7 @@ irqreturn_t radeon_driver_irq_handler_kms(int irq, void *arg) | |||
74 | static void radeon_hotplug_work_func(struct work_struct *work) | 74 | static void radeon_hotplug_work_func(struct work_struct *work) |
75 | { | 75 | { |
76 | struct radeon_device *rdev = container_of(work, struct radeon_device, | 76 | struct radeon_device *rdev = container_of(work, struct radeon_device, |
77 | hotplug_work); | 77 | hotplug_work.work); |
78 | struct drm_device *dev = rdev->ddev; | 78 | struct drm_device *dev = rdev->ddev; |
79 | struct drm_mode_config *mode_config = &dev->mode_config; | 79 | struct drm_mode_config *mode_config = &dev->mode_config; |
80 | struct drm_connector *connector; | 80 | struct drm_connector *connector; |
@@ -302,7 +302,7 @@ int radeon_irq_kms_init(struct radeon_device *rdev) | |||
302 | } | 302 | } |
303 | } | 303 | } |
304 | 304 | ||
305 | INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func); | 305 | INIT_DELAYED_WORK(&rdev->hotplug_work, radeon_hotplug_work_func); |
306 | INIT_WORK(&rdev->dp_work, radeon_dp_work_func); | 306 | INIT_WORK(&rdev->dp_work, radeon_dp_work_func); |
307 | INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi); | 307 | INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi); |
308 | 308 | ||
@@ -310,7 +310,7 @@ int radeon_irq_kms_init(struct radeon_device *rdev) | |||
310 | r = drm_irq_install(rdev->ddev, rdev->ddev->pdev->irq); | 310 | r = drm_irq_install(rdev->ddev, rdev->ddev->pdev->irq); |
311 | if (r) { | 311 | if (r) { |
312 | rdev->irq.installed = false; | 312 | rdev->irq.installed = false; |
313 | flush_work(&rdev->hotplug_work); | 313 | flush_delayed_work(&rdev->hotplug_work); |
314 | return r; | 314 | return r; |
315 | } | 315 | } |
316 | 316 | ||
@@ -333,7 +333,7 @@ void radeon_irq_kms_fini(struct radeon_device *rdev) | |||
333 | rdev->irq.installed = false; | 333 | rdev->irq.installed = false; |
334 | if (rdev->msi_enabled) | 334 | if (rdev->msi_enabled) |
335 | pci_disable_msi(rdev->pdev); | 335 | pci_disable_msi(rdev->pdev); |
336 | flush_work(&rdev->hotplug_work); | 336 | flush_delayed_work(&rdev->hotplug_work); |
337 | } | 337 | } |
338 | } | 338 | } |
339 | 339 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 0ec6fcca16d3..d290a8a09036 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c | |||
@@ -755,6 +755,8 @@ void radeon_driver_preclose_kms(struct drm_device *dev, | |||
755 | */ | 755 | */ |
756 | u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc) | 756 | u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc) |
757 | { | 757 | { |
758 | int vpos, hpos, stat; | ||
759 | u32 count; | ||
758 | struct radeon_device *rdev = dev->dev_private; | 760 | struct radeon_device *rdev = dev->dev_private; |
759 | 761 | ||
760 | if (crtc < 0 || crtc >= rdev->num_crtc) { | 762 | if (crtc < 0 || crtc >= rdev->num_crtc) { |
@@ -762,7 +764,53 @@ u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc) | |||
762 | return -EINVAL; | 764 | return -EINVAL; |
763 | } | 765 | } |
764 | 766 | ||
765 | return radeon_get_vblank_counter(rdev, crtc); | 767 | /* The hw increments its frame counter at start of vsync, not at start |
768 | * of vblank, as is required by DRM core vblank counter handling. | ||
769 | * Cook the hw count here to make it appear to the caller as if it | ||
770 | * incremented at start of vblank. We measure distance to start of | ||
771 | * vblank in vpos. vpos therefore will be >= 0 between start of vblank | ||
772 | * and start of vsync, so vpos >= 0 means to bump the hw frame counter | ||
773 | * result by 1 to give the proper appearance to caller. | ||
774 | */ | ||
775 | if (rdev->mode_info.crtcs[crtc]) { | ||
776 | /* Repeat readout if needed to provide stable result if | ||
777 | * we cross start of vsync during the queries. | ||
778 | */ | ||
779 | do { | ||
780 | count = radeon_get_vblank_counter(rdev, crtc); | ||
781 | /* Ask radeon_get_crtc_scanoutpos to return vpos as | ||
782 | * distance to start of vblank, instead of regular | ||
783 | * vertical scanout pos. | ||
784 | */ | ||
785 | stat = radeon_get_crtc_scanoutpos( | ||
786 | dev, crtc, GET_DISTANCE_TO_VBLANKSTART, | ||
787 | &vpos, &hpos, NULL, NULL, | ||
788 | &rdev->mode_info.crtcs[crtc]->base.hwmode); | ||
789 | } while (count != radeon_get_vblank_counter(rdev, crtc)); | ||
790 | |||
791 | if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) != | ||
792 | (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) { | ||
793 | DRM_DEBUG_VBL("Query failed! stat %d\n", stat); | ||
794 | } | ||
795 | else { | ||
796 | DRM_DEBUG_VBL("crtc %d: dist from vblank start %d\n", | ||
797 | crtc, vpos); | ||
798 | |||
799 | /* Bump counter if we are at >= leading edge of vblank, | ||
800 | * but before vsync where vpos would turn negative and | ||
801 | * the hw counter really increments. | ||
802 | */ | ||
803 | if (vpos >= 0) | ||
804 | count++; | ||
805 | } | ||
806 | } | ||
807 | else { | ||
808 | /* Fallback to use value as is. */ | ||
809 | count = radeon_get_vblank_counter(rdev, crtc); | ||
810 | DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n"); | ||
811 | } | ||
812 | |||
813 | return count; | ||
766 | } | 814 | } |
767 | 815 | ||
768 | /** | 816 | /** |
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index b8e3c277a95b..cddd41b32eda 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h | |||
@@ -367,6 +367,7 @@ struct radeon_crtc { | |||
367 | u32 line_time; | 367 | u32 line_time; |
368 | u32 wm_low; | 368 | u32 wm_low; |
369 | u32 wm_high; | 369 | u32 wm_high; |
370 | u32 lb_vblank_lead_lines; | ||
370 | struct drm_display_mode hw_mode; | 371 | struct drm_display_mode hw_mode; |
371 | enum radeon_output_csc output_csc; | 372 | enum radeon_output_csc output_csc; |
372 | }; | 373 | }; |
@@ -553,6 +554,7 @@ struct radeon_connector { | |||
553 | void *con_priv; | 554 | void *con_priv; |
554 | bool dac_load_detect; | 555 | bool dac_load_detect; |
555 | bool detected_by_load; /* if the connection status was determined by load */ | 556 | bool detected_by_load; /* if the connection status was determined by load */ |
557 | bool detected_hpd_without_ddc; /* if an HPD signal was detected on DVI, but ddc probing failed */ | ||
556 | uint16_t connector_object_id; | 558 | uint16_t connector_object_id; |
557 | struct radeon_hpd hpd; | 559 | struct radeon_hpd hpd; |
558 | struct radeon_router router; | 560 | struct radeon_router router; |
@@ -686,6 +688,9 @@ struct atom_voltage_table | |||
686 | struct atom_voltage_table_entry entries[MAX_VOLTAGE_ENTRIES]; | 688 | struct atom_voltage_table_entry entries[MAX_VOLTAGE_ENTRIES]; |
687 | }; | 689 | }; |
688 | 690 | ||
691 | /* Driver internal use only flags of radeon_get_crtc_scanoutpos() */ | ||
692 | #define USE_REAL_VBLANKSTART (1 << 30) | ||
693 | #define GET_DISTANCE_TO_VBLANKSTART (1 << 31) | ||
689 | 694 | ||
690 | extern void | 695 | extern void |
691 | radeon_add_atom_connector(struct drm_device *dev, | 696 | radeon_add_atom_connector(struct drm_device *dev, |
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index f4f03dcc1530..59abebd6b5dc 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c | |||
@@ -1756,7 +1756,9 @@ static bool radeon_pm_in_vbl(struct radeon_device *rdev) | |||
1756 | */ | 1756 | */ |
1757 | for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) { | 1757 | for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) { |
1758 | if (rdev->pm.active_crtcs & (1 << crtc)) { | 1758 | if (rdev->pm.active_crtcs & (1 << crtc)) { |
1759 | vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, 0, | 1759 | vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, |
1760 | crtc, | ||
1761 | USE_REAL_VBLANKSTART, | ||
1760 | &vpos, &hpos, NULL, NULL, | 1762 | &vpos, &hpos, NULL, NULL, |
1761 | &rdev->mode_info.crtcs[crtc]->base.hwmode); | 1763 | &rdev->mode_info.crtcs[crtc]->base.hwmode); |
1762 | if ((vbl_status & DRM_SCANOUTPOS_VALID) && | 1764 | if ((vbl_status & DRM_SCANOUTPOS_VALID) && |
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 97a904835759..6244f4e44e9a 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c | |||
@@ -813,7 +813,7 @@ int rs600_irq_process(struct radeon_device *rdev) | |||
813 | status = rs600_irq_ack(rdev); | 813 | status = rs600_irq_ack(rdev); |
814 | } | 814 | } |
815 | if (queue_hotplug) | 815 | if (queue_hotplug) |
816 | schedule_work(&rdev->hotplug_work); | 816 | schedule_delayed_work(&rdev->hotplug_work, 0); |
817 | if (queue_hdmi) | 817 | if (queue_hdmi) |
818 | schedule_work(&rdev->audio_work); | 818 | schedule_work(&rdev->audio_work); |
819 | if (rdev->msi_enabled) { | 819 | if (rdev->msi_enabled) { |
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index 516ca27cfa12..6bc44c24e837 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c | |||
@@ -207,6 +207,9 @@ void rs690_line_buffer_adjust(struct radeon_device *rdev, | |||
207 | { | 207 | { |
208 | u32 tmp; | 208 | u32 tmp; |
209 | 209 | ||
210 | /* Guess line buffer size to be 8192 pixels */ | ||
211 | u32 lb_size = 8192; | ||
212 | |||
210 | /* | 213 | /* |
211 | * Line Buffer Setup | 214 | * Line Buffer Setup |
212 | * There is a single line buffer shared by both display controllers. | 215 | * There is a single line buffer shared by both display controllers. |
@@ -243,6 +246,13 @@ void rs690_line_buffer_adjust(struct radeon_device *rdev, | |||
243 | tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; | 246 | tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; |
244 | } | 247 | } |
245 | WREG32(R_006520_DC_LB_MEMORY_SPLIT, tmp); | 248 | WREG32(R_006520_DC_LB_MEMORY_SPLIT, tmp); |
249 | |||
250 | /* Save number of lines the linebuffer leads before the scanout */ | ||
251 | if (mode1) | ||
252 | rdev->mode_info.crtcs[0]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode1->crtc_hdisplay); | ||
253 | |||
254 | if (mode2) | ||
255 | rdev->mode_info.crtcs[1]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode2->crtc_hdisplay); | ||
246 | } | 256 | } |
247 | 257 | ||
248 | struct rs690_watermark { | 258 | struct rs690_watermark { |
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 07037e32dea3..f878d6962da5 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c | |||
@@ -2376,6 +2376,9 @@ static void dce6_program_watermarks(struct radeon_device *rdev, | |||
2376 | c.full = dfixed_div(c, a); | 2376 | c.full = dfixed_div(c, a); |
2377 | priority_b_mark = dfixed_trunc(c); | 2377 | priority_b_mark = dfixed_trunc(c); |
2378 | priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK; | 2378 | priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK; |
2379 | |||
2380 | /* Save number of lines the linebuffer leads before the scanout */ | ||
2381 | radeon_crtc->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay); | ||
2379 | } | 2382 | } |
2380 | 2383 | ||
2381 | /* select wm A */ | 2384 | /* select wm A */ |
@@ -6848,7 +6851,7 @@ restart_ih: | |||
6848 | if (queue_dp) | 6851 | if (queue_dp) |
6849 | schedule_work(&rdev->dp_work); | 6852 | schedule_work(&rdev->dp_work); |
6850 | if (queue_hotplug) | 6853 | if (queue_hotplug) |
6851 | schedule_work(&rdev->hotplug_work); | 6854 | schedule_delayed_work(&rdev->hotplug_work, 0); |
6852 | if (queue_thermal && rdev->pm.dpm_enabled) | 6855 | if (queue_thermal && rdev->pm.dpm_enabled) |
6853 | schedule_work(&rdev->pm.dpm.thermal.work); | 6856 | schedule_work(&rdev->pm.dpm.thermal.work); |
6854 | rdev->ih.rptr = rptr; | 6857 | rdev->ih.rptr = rptr; |
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c index 8caea0a33dd8..d908321b94ce 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c | |||
@@ -67,6 +67,7 @@ static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj, | |||
67 | * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap(). | 67 | * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap(). |
68 | */ | 68 | */ |
69 | vma->vm_flags &= ~VM_PFNMAP; | 69 | vma->vm_flags &= ~VM_PFNMAP; |
70 | vma->vm_pgoff = 0; | ||
70 | 71 | ||
71 | ret = dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr, | 72 | ret = dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr, |
72 | obj->size, &rk_obj->dma_attrs); | 73 | obj->size, &rk_obj->dma_attrs); |
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index 5d8ae5e49c44..03c47eeadc81 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c | |||
@@ -374,6 +374,7 @@ static const struct of_device_id vop_driver_dt_match[] = { | |||
374 | .data = &rk3288_vop }, | 374 | .data = &rk3288_vop }, |
375 | {}, | 375 | {}, |
376 | }; | 376 | }; |
377 | MODULE_DEVICE_TABLE(of, vop_driver_dt_match); | ||
377 | 378 | ||
378 | static inline void vop_writel(struct vop *vop, uint32_t offset, uint32_t v) | 379 | static inline void vop_writel(struct vop *vop, uint32_t offset, uint32_t v) |
379 | { | 380 | { |
@@ -959,8 +960,8 @@ static int vop_update_plane_event(struct drm_plane *plane, | |||
959 | val = (dest.y2 - dest.y1 - 1) << 16; | 960 | val = (dest.y2 - dest.y1 - 1) << 16; |
960 | val |= (dest.x2 - dest.x1 - 1) & 0xffff; | 961 | val |= (dest.x2 - dest.x1 - 1) & 0xffff; |
961 | VOP_WIN_SET(vop, win, dsp_info, val); | 962 | VOP_WIN_SET(vop, win, dsp_info, val); |
962 | val = (dsp_sty - 1) << 16; | 963 | val = dsp_sty << 16; |
963 | val |= (dsp_stx - 1) & 0xffff; | 964 | val |= dsp_stx & 0xffff; |
964 | VOP_WIN_SET(vop, win, dsp_st, val); | 965 | VOP_WIN_SET(vop, win, dsp_st, val); |
965 | VOP_WIN_SET(vop, win, rb_swap, rb_swap); | 966 | VOP_WIN_SET(vop, win, rb_swap, rb_swap); |
966 | 967 | ||
@@ -1289,7 +1290,7 @@ static void vop_win_state_complete(struct vop_win *vop_win, | |||
1289 | 1290 | ||
1290 | if (state->event) { | 1291 | if (state->event) { |
1291 | spin_lock_irqsave(&drm->event_lock, flags); | 1292 | spin_lock_irqsave(&drm->event_lock, flags); |
1292 | drm_send_vblank_event(drm, -1, state->event); | 1293 | drm_crtc_send_vblank_event(crtc, state->event); |
1293 | spin_unlock_irqrestore(&drm->event_lock, flags); | 1294 | spin_unlock_irqrestore(&drm->event_lock, flags); |
1294 | } | 1295 | } |
1295 | 1296 | ||
@@ -1575,32 +1576,25 @@ static int vop_initial(struct vop *vop) | |||
1575 | return PTR_ERR(vop->dclk); | 1576 | return PTR_ERR(vop->dclk); |
1576 | } | 1577 | } |
1577 | 1578 | ||
1578 | ret = clk_prepare(vop->hclk); | ||
1579 | if (ret < 0) { | ||
1580 | dev_err(vop->dev, "failed to prepare hclk\n"); | ||
1581 | return ret; | ||
1582 | } | ||
1583 | |||
1584 | ret = clk_prepare(vop->dclk); | 1579 | ret = clk_prepare(vop->dclk); |
1585 | if (ret < 0) { | 1580 | if (ret < 0) { |
1586 | dev_err(vop->dev, "failed to prepare dclk\n"); | 1581 | dev_err(vop->dev, "failed to prepare dclk\n"); |
1587 | goto err_unprepare_hclk; | 1582 | return ret; |
1588 | } | 1583 | } |
1589 | 1584 | ||
1590 | ret = clk_prepare(vop->aclk); | 1585 | /* Enable both the hclk and aclk to setup the vop */ |
1586 | ret = clk_prepare_enable(vop->hclk); | ||
1591 | if (ret < 0) { | 1587 | if (ret < 0) { |
1592 | dev_err(vop->dev, "failed to prepare aclk\n"); | 1588 | dev_err(vop->dev, "failed to prepare/enable hclk\n"); |
1593 | goto err_unprepare_dclk; | 1589 | goto err_unprepare_dclk; |
1594 | } | 1590 | } |
1595 | 1591 | ||
1596 | /* | 1592 | ret = clk_prepare_enable(vop->aclk); |
1597 | * enable hclk, so that we can config vop register. | ||
1598 | */ | ||
1599 | ret = clk_enable(vop->hclk); | ||
1600 | if (ret < 0) { | 1593 | if (ret < 0) { |
1601 | dev_err(vop->dev, "failed to prepare aclk\n"); | 1594 | dev_err(vop->dev, "failed to prepare/enable aclk\n"); |
1602 | goto err_unprepare_aclk; | 1595 | goto err_disable_hclk; |
1603 | } | 1596 | } |
1597 | |||
1604 | /* | 1598 | /* |
1605 | * do hclk_reset, reset all vop registers. | 1599 | * do hclk_reset, reset all vop registers. |
1606 | */ | 1600 | */ |
@@ -1608,7 +1602,7 @@ static int vop_initial(struct vop *vop) | |||
1608 | if (IS_ERR(ahb_rst)) { | 1602 | if (IS_ERR(ahb_rst)) { |
1609 | dev_err(vop->dev, "failed to get ahb reset\n"); | 1603 | dev_err(vop->dev, "failed to get ahb reset\n"); |
1610 | ret = PTR_ERR(ahb_rst); | 1604 | ret = PTR_ERR(ahb_rst); |
1611 | goto err_disable_hclk; | 1605 | goto err_disable_aclk; |
1612 | } | 1606 | } |
1613 | reset_control_assert(ahb_rst); | 1607 | reset_control_assert(ahb_rst); |
1614 | usleep_range(10, 20); | 1608 | usleep_range(10, 20); |
@@ -1634,26 +1628,25 @@ static int vop_initial(struct vop *vop) | |||
1634 | if (IS_ERR(vop->dclk_rst)) { | 1628 | if (IS_ERR(vop->dclk_rst)) { |
1635 | dev_err(vop->dev, "failed to get dclk reset\n"); | 1629 | dev_err(vop->dev, "failed to get dclk reset\n"); |
1636 | ret = PTR_ERR(vop->dclk_rst); | 1630 | ret = PTR_ERR(vop->dclk_rst); |
1637 | goto err_unprepare_aclk; | 1631 | goto err_disable_aclk; |
1638 | } | 1632 | } |
1639 | reset_control_assert(vop->dclk_rst); | 1633 | reset_control_assert(vop->dclk_rst); |
1640 | usleep_range(10, 20); | 1634 | usleep_range(10, 20); |
1641 | reset_control_deassert(vop->dclk_rst); | 1635 | reset_control_deassert(vop->dclk_rst); |
1642 | 1636 | ||
1643 | clk_disable(vop->hclk); | 1637 | clk_disable(vop->hclk); |
1638 | clk_disable(vop->aclk); | ||
1644 | 1639 | ||
1645 | vop->is_enabled = false; | 1640 | vop->is_enabled = false; |
1646 | 1641 | ||
1647 | return 0; | 1642 | return 0; |
1648 | 1643 | ||
1644 | err_disable_aclk: | ||
1645 | clk_disable_unprepare(vop->aclk); | ||
1649 | err_disable_hclk: | 1646 | err_disable_hclk: |
1650 | clk_disable(vop->hclk); | 1647 | clk_disable_unprepare(vop->hclk); |
1651 | err_unprepare_aclk: | ||
1652 | clk_unprepare(vop->aclk); | ||
1653 | err_unprepare_dclk: | 1648 | err_unprepare_dclk: |
1654 | clk_unprepare(vop->dclk); | 1649 | clk_unprepare(vop->dclk); |
1655 | err_unprepare_hclk: | ||
1656 | clk_unprepare(vop->hclk); | ||
1657 | return ret; | 1650 | return ret; |
1658 | } | 1651 | } |
1659 | 1652 | ||
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c index 306a7df7d013..8e6044d7660a 100644 --- a/drivers/gpu/drm/virtio/virtgpu_display.c +++ b/drivers/gpu/drm/virtio/virtgpu_display.c | |||
@@ -412,7 +412,7 @@ static const struct drm_connector_funcs virtio_gpu_connector_funcs = { | |||
412 | .save = virtio_gpu_conn_save, | 412 | .save = virtio_gpu_conn_save, |
413 | .restore = virtio_gpu_conn_restore, | 413 | .restore = virtio_gpu_conn_restore, |
414 | .detect = virtio_gpu_conn_detect, | 414 | .detect = virtio_gpu_conn_detect, |
415 | .fill_modes = drm_helper_probe_single_connector_modes, | 415 | .fill_modes = drm_helper_probe_single_connector_modes_nomerge, |
416 | .destroy = virtio_gpu_conn_destroy, | 416 | .destroy = virtio_gpu_conn_destroy, |
417 | .reset = drm_atomic_helper_connector_reset, | 417 | .reset = drm_atomic_helper_connector_reset, |
418 | .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, | 418 | .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, |
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c index ba47b30d28fa..f2e13eb8339f 100644 --- a/drivers/gpu/ipu-v3/ipu-common.c +++ b/drivers/gpu/ipu-v3/ipu-common.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/irqchip/chained_irq.h> | 28 | #include <linux/irqchip/chained_irq.h> |
29 | #include <linux/irqdomain.h> | 29 | #include <linux/irqdomain.h> |
30 | #include <linux/of_device.h> | 30 | #include <linux/of_device.h> |
31 | #include <linux/of_graph.h> | ||
31 | 32 | ||
32 | #include <drm/drm_fourcc.h> | 33 | #include <drm/drm_fourcc.h> |
33 | 34 | ||
@@ -993,12 +994,26 @@ static void platform_device_unregister_children(struct platform_device *pdev) | |||
993 | struct ipu_platform_reg { | 994 | struct ipu_platform_reg { |
994 | struct ipu_client_platformdata pdata; | 995 | struct ipu_client_platformdata pdata; |
995 | const char *name; | 996 | const char *name; |
996 | int reg_offset; | ||
997 | }; | 997 | }; |
998 | 998 | ||
999 | /* These must be in the order of the corresponding device tree port nodes */ | ||
999 | static const struct ipu_platform_reg client_reg[] = { | 1000 | static const struct ipu_platform_reg client_reg[] = { |
1000 | { | 1001 | { |
1001 | .pdata = { | 1002 | .pdata = { |
1003 | .csi = 0, | ||
1004 | .dma[0] = IPUV3_CHANNEL_CSI0, | ||
1005 | .dma[1] = -EINVAL, | ||
1006 | }, | ||
1007 | .name = "imx-ipuv3-camera", | ||
1008 | }, { | ||
1009 | .pdata = { | ||
1010 | .csi = 1, | ||
1011 | .dma[0] = IPUV3_CHANNEL_CSI1, | ||
1012 | .dma[1] = -EINVAL, | ||
1013 | }, | ||
1014 | .name = "imx-ipuv3-camera", | ||
1015 | }, { | ||
1016 | .pdata = { | ||
1002 | .di = 0, | 1017 | .di = 0, |
1003 | .dc = 5, | 1018 | .dc = 5, |
1004 | .dp = IPU_DP_FLOW_SYNC_BG, | 1019 | .dp = IPU_DP_FLOW_SYNC_BG, |
@@ -1015,22 +1030,6 @@ static const struct ipu_platform_reg client_reg[] = { | |||
1015 | .dma[1] = -EINVAL, | 1030 | .dma[1] = -EINVAL, |
1016 | }, | 1031 | }, |
1017 | .name = "imx-ipuv3-crtc", | 1032 | .name = "imx-ipuv3-crtc", |
1018 | }, { | ||
1019 | .pdata = { | ||
1020 | .csi = 0, | ||
1021 | .dma[0] = IPUV3_CHANNEL_CSI0, | ||
1022 | .dma[1] = -EINVAL, | ||
1023 | }, | ||
1024 | .reg_offset = IPU_CM_CSI0_REG_OFS, | ||
1025 | .name = "imx-ipuv3-camera", | ||
1026 | }, { | ||
1027 | .pdata = { | ||
1028 | .csi = 1, | ||
1029 | .dma[0] = IPUV3_CHANNEL_CSI1, | ||
1030 | .dma[1] = -EINVAL, | ||
1031 | }, | ||
1032 | .reg_offset = IPU_CM_CSI1_REG_OFS, | ||
1033 | .name = "imx-ipuv3-camera", | ||
1034 | }, | 1033 | }, |
1035 | }; | 1034 | }; |
1036 | 1035 | ||
@@ -1051,22 +1050,30 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base) | |||
1051 | for (i = 0; i < ARRAY_SIZE(client_reg); i++) { | 1050 | for (i = 0; i < ARRAY_SIZE(client_reg); i++) { |
1052 | const struct ipu_platform_reg *reg = &client_reg[i]; | 1051 | const struct ipu_platform_reg *reg = &client_reg[i]; |
1053 | struct platform_device *pdev; | 1052 | struct platform_device *pdev; |
1054 | struct resource res; | 1053 | |
1055 | 1054 | pdev = platform_device_alloc(reg->name, id++); | |
1056 | if (reg->reg_offset) { | 1055 | if (!pdev) { |
1057 | memset(&res, 0, sizeof(res)); | 1056 | ret = -ENOMEM; |
1058 | res.flags = IORESOURCE_MEM; | 1057 | goto err_register; |
1059 | res.start = ipu_base + ipu->devtype->cm_ofs + reg->reg_offset; | 1058 | } |
1060 | res.end = res.start + PAGE_SIZE - 1; | 1059 | |
1061 | pdev = platform_device_register_resndata(dev, reg->name, | 1060 | pdev->dev.parent = dev; |
1062 | id++, &res, 1, ®->pdata, sizeof(reg->pdata)); | 1061 | |
1063 | } else { | 1062 | /* Associate subdevice with the corresponding port node */ |
1064 | pdev = platform_device_register_data(dev, reg->name, | 1063 | pdev->dev.of_node = of_graph_get_port_by_id(dev->of_node, i); |
1065 | id++, ®->pdata, sizeof(reg->pdata)); | 1064 | if (!pdev->dev.of_node) { |
1065 | dev_err(dev, "missing port@%d node in %s\n", i, | ||
1066 | dev->of_node->full_name); | ||
1067 | ret = -ENODEV; | ||
1068 | goto err_register; | ||
1066 | } | 1069 | } |
1067 | 1070 | ||
1068 | if (IS_ERR(pdev)) { | 1071 | ret = platform_device_add_data(pdev, ®->pdata, |
1069 | ret = PTR_ERR(pdev); | 1072 | sizeof(reg->pdata)); |
1073 | if (!ret) | ||
1074 | ret = platform_device_add(pdev); | ||
1075 | if (ret) { | ||
1076 | platform_device_put(pdev); | ||
1070 | goto err_register; | 1077 | goto err_register; |
1071 | } | 1078 | } |
1072 | } | 1079 | } |
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index ac1feea51be3..9024a3de4032 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h | |||
@@ -609,6 +609,7 @@ | |||
609 | #define USB_DEVICE_ID_LOGITECH_HARMONY_FIRST 0xc110 | 609 | #define USB_DEVICE_ID_LOGITECH_HARMONY_FIRST 0xc110 |
610 | #define USB_DEVICE_ID_LOGITECH_HARMONY_LAST 0xc14f | 610 | #define USB_DEVICE_ID_LOGITECH_HARMONY_LAST 0xc14f |
611 | #define USB_DEVICE_ID_LOGITECH_HARMONY_PS3 0x0306 | 611 | #define USB_DEVICE_ID_LOGITECH_HARMONY_PS3 0x0306 |
612 | #define USB_DEVICE_ID_LOGITECH_KEYBOARD_G710_PLUS 0xc24d | ||
612 | #define USB_DEVICE_ID_LOGITECH_MOUSE_C01A 0xc01a | 613 | #define USB_DEVICE_ID_LOGITECH_MOUSE_C01A 0xc01a |
613 | #define USB_DEVICE_ID_LOGITECH_MOUSE_C05A 0xc05a | 614 | #define USB_DEVICE_ID_LOGITECH_MOUSE_C05A 0xc05a |
614 | #define USB_DEVICE_ID_LOGITECH_MOUSE_C06A 0xc06a | 615 | #define USB_DEVICE_ID_LOGITECH_MOUSE_C06A 0xc06a |
diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c index c20ac76c0a8c..c690fae02cf8 100644 --- a/drivers/hid/hid-lg.c +++ b/drivers/hid/hid-lg.c | |||
@@ -665,8 +665,9 @@ static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id) | |||
665 | struct lg_drv_data *drv_data; | 665 | struct lg_drv_data *drv_data; |
666 | int ret; | 666 | int ret; |
667 | 667 | ||
668 | /* Only work with the 1st interface (G29 presents multiple) */ | 668 | /* G29 only work with the 1st interface */ |
669 | if (iface_num != 0) { | 669 | if ((hdev->product == USB_DEVICE_ID_LOGITECH_G29_WHEEL) && |
670 | (iface_num != 0)) { | ||
670 | dbg_hid("%s: ignoring ifnum %d\n", __func__, iface_num); | 671 | dbg_hid("%s: ignoring ifnum %d\n", __func__, iface_num); |
671 | return -ENODEV; | 672 | return -ENODEV; |
672 | } | 673 | } |
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index 94bb137abe32..2324520b006d 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c | |||
@@ -84,6 +84,7 @@ static const struct hid_blacklist { | |||
84 | { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A, HID_QUIRK_ALWAYS_POLL }, | 84 | { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A, HID_QUIRK_ALWAYS_POLL }, |
85 | { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL }, | 85 | { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL }, |
86 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077, HID_QUIRK_ALWAYS_POLL }, | 86 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077, HID_QUIRK_ALWAYS_POLL }, |
87 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_KEYBOARD_G710_PLUS, HID_QUIRK_NOGET }, | ||
87 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C01A, HID_QUIRK_ALWAYS_POLL }, | 88 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C01A, HID_QUIRK_ALWAYS_POLL }, |
88 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C05A, HID_QUIRK_ALWAYS_POLL }, | 89 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C05A, HID_QUIRK_ALWAYS_POLL }, |
89 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C06A, HID_QUIRK_ALWAYS_POLL }, | 90 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C06A, HID_QUIRK_ALWAYS_POLL }, |
diff --git a/drivers/isdn/hisax/config.c b/drivers/isdn/hisax/config.c index b33f53b3ca93..bf04d2a3cf4a 100644 --- a/drivers/isdn/hisax/config.c +++ b/drivers/isdn/hisax/config.c | |||
@@ -1896,7 +1896,7 @@ static void EChannel_proc_rcv(struct hisax_d_if *d_if) | |||
1896 | ptr--; | 1896 | ptr--; |
1897 | *ptr++ = '\n'; | 1897 | *ptr++ = '\n'; |
1898 | *ptr = 0; | 1898 | *ptr = 0; |
1899 | HiSax_putstatus(cs, NULL, "%s", cs->dlog); | 1899 | HiSax_putstatus(cs, NULL, cs->dlog); |
1900 | } else | 1900 | } else |
1901 | HiSax_putstatus(cs, "LogEcho: ", | 1901 | HiSax_putstatus(cs, "LogEcho: ", |
1902 | "warning Frame too big (%d)", | 1902 | "warning Frame too big (%d)", |
diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c index 4a4825528188..90449e1e91e5 100644 --- a/drivers/isdn/hisax/hfc_pci.c +++ b/drivers/isdn/hisax/hfc_pci.c | |||
@@ -901,7 +901,7 @@ Begin: | |||
901 | ptr--; | 901 | ptr--; |
902 | *ptr++ = '\n'; | 902 | *ptr++ = '\n'; |
903 | *ptr = 0; | 903 | *ptr = 0; |
904 | HiSax_putstatus(cs, NULL, "%s", cs->dlog); | 904 | HiSax_putstatus(cs, NULL, cs->dlog); |
905 | } else | 905 | } else |
906 | HiSax_putstatus(cs, "LogEcho: ", "warning Frame too big (%d)", total - 3); | 906 | HiSax_putstatus(cs, "LogEcho: ", "warning Frame too big (%d)", total - 3); |
907 | } | 907 | } |
diff --git a/drivers/isdn/hisax/hfc_sx.c b/drivers/isdn/hisax/hfc_sx.c index b1fad81f0722..13b2151c10f5 100644 --- a/drivers/isdn/hisax/hfc_sx.c +++ b/drivers/isdn/hisax/hfc_sx.c | |||
@@ -674,7 +674,7 @@ receive_emsg(struct IsdnCardState *cs) | |||
674 | ptr--; | 674 | ptr--; |
675 | *ptr++ = '\n'; | 675 | *ptr++ = '\n'; |
676 | *ptr = 0; | 676 | *ptr = 0; |
677 | HiSax_putstatus(cs, NULL, "%s", cs->dlog); | 677 | HiSax_putstatus(cs, NULL, cs->dlog); |
678 | } else | 678 | } else |
679 | HiSax_putstatus(cs, "LogEcho: ", "warning Frame too big (%d)", skb->len); | 679 | HiSax_putstatus(cs, "LogEcho: ", "warning Frame too big (%d)", skb->len); |
680 | } | 680 | } |
diff --git a/drivers/isdn/hisax/q931.c b/drivers/isdn/hisax/q931.c index b420f8bd862e..ba4beb25d872 100644 --- a/drivers/isdn/hisax/q931.c +++ b/drivers/isdn/hisax/q931.c | |||
@@ -1179,7 +1179,7 @@ LogFrame(struct IsdnCardState *cs, u_char *buf, int size) | |||
1179 | dp--; | 1179 | dp--; |
1180 | *dp++ = '\n'; | 1180 | *dp++ = '\n'; |
1181 | *dp = 0; | 1181 | *dp = 0; |
1182 | HiSax_putstatus(cs, NULL, "%s", cs->dlog); | 1182 | HiSax_putstatus(cs, NULL, cs->dlog); |
1183 | } else | 1183 | } else |
1184 | HiSax_putstatus(cs, "LogFrame: ", "warning Frame too big (%d)", size); | 1184 | HiSax_putstatus(cs, "LogFrame: ", "warning Frame too big (%d)", size); |
1185 | } | 1185 | } |
@@ -1246,7 +1246,7 @@ dlogframe(struct IsdnCardState *cs, struct sk_buff *skb, int dir) | |||
1246 | } | 1246 | } |
1247 | if (finish) { | 1247 | if (finish) { |
1248 | *dp = 0; | 1248 | *dp = 0; |
1249 | HiSax_putstatus(cs, NULL, "%s", cs->dlog); | 1249 | HiSax_putstatus(cs, NULL, cs->dlog); |
1250 | return; | 1250 | return; |
1251 | } | 1251 | } |
1252 | if ((0xfe & buf[0]) == PROTO_DIS_N0) { /* 1TR6 */ | 1252 | if ((0xfe & buf[0]) == PROTO_DIS_N0) { /* 1TR6 */ |
@@ -1509,5 +1509,5 @@ dlogframe(struct IsdnCardState *cs, struct sk_buff *skb, int dir) | |||
1509 | dp += sprintf(dp, "Unknown protocol %x!", buf[0]); | 1509 | dp += sprintf(dp, "Unknown protocol %x!", buf[0]); |
1510 | } | 1510 | } |
1511 | *dp = 0; | 1511 | *dp = 0; |
1512 | HiSax_putstatus(cs, NULL, "%s", cs->dlog); | 1512 | HiSax_putstatus(cs, NULL, cs->dlog); |
1513 | } | 1513 | } |
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c index 5178645ac42b..86ce887b2ed6 100644 --- a/drivers/lightnvm/core.c +++ b/drivers/lightnvm/core.c | |||
@@ -123,6 +123,26 @@ void nvm_unregister_mgr(struct nvmm_type *mt) | |||
123 | } | 123 | } |
124 | EXPORT_SYMBOL(nvm_unregister_mgr); | 124 | EXPORT_SYMBOL(nvm_unregister_mgr); |
125 | 125 | ||
126 | /* register with device with a supported manager */ | ||
127 | static int register_mgr(struct nvm_dev *dev) | ||
128 | { | ||
129 | struct nvmm_type *mt; | ||
130 | int ret = 0; | ||
131 | |||
132 | list_for_each_entry(mt, &nvm_mgrs, list) { | ||
133 | ret = mt->register_mgr(dev); | ||
134 | if (ret > 0) { | ||
135 | dev->mt = mt; | ||
136 | break; /* successfully initialized */ | ||
137 | } | ||
138 | } | ||
139 | |||
140 | if (!ret) | ||
141 | pr_info("nvm: no compatible nvm manager found.\n"); | ||
142 | |||
143 | return ret; | ||
144 | } | ||
145 | |||
126 | static struct nvm_dev *nvm_find_nvm_dev(const char *name) | 146 | static struct nvm_dev *nvm_find_nvm_dev(const char *name) |
127 | { | 147 | { |
128 | struct nvm_dev *dev; | 148 | struct nvm_dev *dev; |
@@ -221,7 +241,6 @@ static void nvm_free(struct nvm_dev *dev) | |||
221 | 241 | ||
222 | static int nvm_init(struct nvm_dev *dev) | 242 | static int nvm_init(struct nvm_dev *dev) |
223 | { | 243 | { |
224 | struct nvmm_type *mt; | ||
225 | int ret = -EINVAL; | 244 | int ret = -EINVAL; |
226 | 245 | ||
227 | if (!dev->q || !dev->ops) | 246 | if (!dev->q || !dev->ops) |
@@ -252,21 +271,13 @@ static int nvm_init(struct nvm_dev *dev) | |||
252 | goto err; | 271 | goto err; |
253 | } | 272 | } |
254 | 273 | ||
255 | /* register with device with a supported manager */ | 274 | down_write(&nvm_lock); |
256 | list_for_each_entry(mt, &nvm_mgrs, list) { | 275 | ret = register_mgr(dev); |
257 | ret = mt->register_mgr(dev); | 276 | up_write(&nvm_lock); |
258 | if (ret < 0) | 277 | if (ret < 0) |
259 | goto err; /* initialization failed */ | 278 | goto err; |
260 | if (ret > 0) { | 279 | if (!ret) |
261 | dev->mt = mt; | ||
262 | break; /* successfully initialized */ | ||
263 | } | ||
264 | } | ||
265 | |||
266 | if (!ret) { | ||
267 | pr_info("nvm: no compatible manager found.\n"); | ||
268 | return 0; | 280 | return 0; |
269 | } | ||
270 | 281 | ||
271 | pr_info("nvm: registered %s [%u/%u/%u/%u/%u/%u]\n", | 282 | pr_info("nvm: registered %s [%u/%u/%u/%u/%u/%u]\n", |
272 | dev->name, dev->sec_per_pg, dev->nr_planes, | 283 | dev->name, dev->sec_per_pg, dev->nr_planes, |
@@ -308,6 +319,12 @@ int nvm_register(struct request_queue *q, char *disk_name, | |||
308 | if (ret) | 319 | if (ret) |
309 | goto err_init; | 320 | goto err_init; |
310 | 321 | ||
322 | if (dev->ops->max_phys_sect > 256) { | ||
323 | pr_info("nvm: max sectors supported is 256.\n"); | ||
324 | ret = -EINVAL; | ||
325 | goto err_init; | ||
326 | } | ||
327 | |||
311 | if (dev->ops->max_phys_sect > 1) { | 328 | if (dev->ops->max_phys_sect > 1) { |
312 | dev->ppalist_pool = dev->ops->create_dma_pool(dev->q, | 329 | dev->ppalist_pool = dev->ops->create_dma_pool(dev->q, |
313 | "ppalist"); | 330 | "ppalist"); |
@@ -316,10 +333,6 @@ int nvm_register(struct request_queue *q, char *disk_name, | |||
316 | ret = -ENOMEM; | 333 | ret = -ENOMEM; |
317 | goto err_init; | 334 | goto err_init; |
318 | } | 335 | } |
319 | } else if (dev->ops->max_phys_sect > 256) { | ||
320 | pr_info("nvm: max sectors supported is 256.\n"); | ||
321 | ret = -EINVAL; | ||
322 | goto err_init; | ||
323 | } | 336 | } |
324 | 337 | ||
325 | down_write(&nvm_lock); | 338 | down_write(&nvm_lock); |
@@ -335,15 +348,17 @@ EXPORT_SYMBOL(nvm_register); | |||
335 | 348 | ||
336 | void nvm_unregister(char *disk_name) | 349 | void nvm_unregister(char *disk_name) |
337 | { | 350 | { |
338 | struct nvm_dev *dev = nvm_find_nvm_dev(disk_name); | 351 | struct nvm_dev *dev; |
339 | 352 | ||
353 | down_write(&nvm_lock); | ||
354 | dev = nvm_find_nvm_dev(disk_name); | ||
340 | if (!dev) { | 355 | if (!dev) { |
341 | pr_err("nvm: could not find device %s to unregister\n", | 356 | pr_err("nvm: could not find device %s to unregister\n", |
342 | disk_name); | 357 | disk_name); |
358 | up_write(&nvm_lock); | ||
343 | return; | 359 | return; |
344 | } | 360 | } |
345 | 361 | ||
346 | down_write(&nvm_lock); | ||
347 | list_del(&dev->devices); | 362 | list_del(&dev->devices); |
348 | up_write(&nvm_lock); | 363 | up_write(&nvm_lock); |
349 | 364 | ||
@@ -361,38 +376,30 @@ static int nvm_create_target(struct nvm_dev *dev, | |||
361 | { | 376 | { |
362 | struct nvm_ioctl_create_simple *s = &create->conf.s; | 377 | struct nvm_ioctl_create_simple *s = &create->conf.s; |
363 | struct request_queue *tqueue; | 378 | struct request_queue *tqueue; |
364 | struct nvmm_type *mt; | ||
365 | struct gendisk *tdisk; | 379 | struct gendisk *tdisk; |
366 | struct nvm_tgt_type *tt; | 380 | struct nvm_tgt_type *tt; |
367 | struct nvm_target *t; | 381 | struct nvm_target *t; |
368 | void *targetdata; | 382 | void *targetdata; |
369 | int ret = 0; | 383 | int ret = 0; |
370 | 384 | ||
385 | down_write(&nvm_lock); | ||
371 | if (!dev->mt) { | 386 | if (!dev->mt) { |
372 | /* register with device with a supported NVM manager */ | 387 | ret = register_mgr(dev); |
373 | list_for_each_entry(mt, &nvm_mgrs, list) { | 388 | if (!ret) |
374 | ret = mt->register_mgr(dev); | 389 | ret = -ENODEV; |
375 | if (ret < 0) | 390 | if (ret < 0) { |
376 | return ret; /* initialization failed */ | 391 | up_write(&nvm_lock); |
377 | if (ret > 0) { | 392 | return ret; |
378 | dev->mt = mt; | ||
379 | break; /* successfully initialized */ | ||
380 | } | ||
381 | } | ||
382 | |||
383 | if (!ret) { | ||
384 | pr_info("nvm: no compatible nvm manager found.\n"); | ||
385 | return -ENODEV; | ||
386 | } | 393 | } |
387 | } | 394 | } |
388 | 395 | ||
389 | tt = nvm_find_target_type(create->tgttype); | 396 | tt = nvm_find_target_type(create->tgttype); |
390 | if (!tt) { | 397 | if (!tt) { |
391 | pr_err("nvm: target type %s not found\n", create->tgttype); | 398 | pr_err("nvm: target type %s not found\n", create->tgttype); |
399 | up_write(&nvm_lock); | ||
392 | return -EINVAL; | 400 | return -EINVAL; |
393 | } | 401 | } |
394 | 402 | ||
395 | down_write(&nvm_lock); | ||
396 | list_for_each_entry(t, &dev->online_targets, list) { | 403 | list_for_each_entry(t, &dev->online_targets, list) { |
397 | if (!strcmp(create->tgtname, t->disk->disk_name)) { | 404 | if (!strcmp(create->tgtname, t->disk->disk_name)) { |
398 | pr_err("nvm: target name already exists.\n"); | 405 | pr_err("nvm: target name already exists.\n"); |
@@ -476,7 +483,9 @@ static int __nvm_configure_create(struct nvm_ioctl_create *create) | |||
476 | struct nvm_dev *dev; | 483 | struct nvm_dev *dev; |
477 | struct nvm_ioctl_create_simple *s; | 484 | struct nvm_ioctl_create_simple *s; |
478 | 485 | ||
486 | down_write(&nvm_lock); | ||
479 | dev = nvm_find_nvm_dev(create->dev); | 487 | dev = nvm_find_nvm_dev(create->dev); |
488 | up_write(&nvm_lock); | ||
480 | if (!dev) { | 489 | if (!dev) { |
481 | pr_err("nvm: device not found\n"); | 490 | pr_err("nvm: device not found\n"); |
482 | return -EINVAL; | 491 | return -EINVAL; |
@@ -535,7 +544,9 @@ static int nvm_configure_show(const char *val) | |||
535 | return -EINVAL; | 544 | return -EINVAL; |
536 | } | 545 | } |
537 | 546 | ||
547 | down_write(&nvm_lock); | ||
538 | dev = nvm_find_nvm_dev(devname); | 548 | dev = nvm_find_nvm_dev(devname); |
549 | up_write(&nvm_lock); | ||
539 | if (!dev) { | 550 | if (!dev) { |
540 | pr_err("nvm: device not found\n"); | 551 | pr_err("nvm: device not found\n"); |
541 | return -EINVAL; | 552 | return -EINVAL; |
@@ -680,8 +691,10 @@ static long nvm_ioctl_info(struct file *file, void __user *arg) | |||
680 | info->tgtsize = tgt_iter; | 691 | info->tgtsize = tgt_iter; |
681 | up_write(&nvm_lock); | 692 | up_write(&nvm_lock); |
682 | 693 | ||
683 | if (copy_to_user(arg, info, sizeof(struct nvm_ioctl_info))) | 694 | if (copy_to_user(arg, info, sizeof(struct nvm_ioctl_info))) { |
695 | kfree(info); | ||
684 | return -EFAULT; | 696 | return -EFAULT; |
697 | } | ||
685 | 698 | ||
686 | kfree(info); | 699 | kfree(info); |
687 | return 0; | 700 | return 0; |
@@ -724,8 +737,11 @@ static long nvm_ioctl_get_devices(struct file *file, void __user *arg) | |||
724 | 737 | ||
725 | devices->nr_devices = i; | 738 | devices->nr_devices = i; |
726 | 739 | ||
727 | if (copy_to_user(arg, devices, sizeof(struct nvm_ioctl_get_devices))) | 740 | if (copy_to_user(arg, devices, |
741 | sizeof(struct nvm_ioctl_get_devices))) { | ||
742 | kfree(devices); | ||
728 | return -EFAULT; | 743 | return -EFAULT; |
744 | } | ||
729 | 745 | ||
730 | kfree(devices); | 746 | kfree(devices); |
731 | return 0; | 747 | return 0; |
diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c index e20e74ec6b91..35dde84b71e9 100644 --- a/drivers/lightnvm/gennvm.c +++ b/drivers/lightnvm/gennvm.c | |||
@@ -75,7 +75,6 @@ static int gennvm_block_bb(struct ppa_addr ppa, int nr_blocks, u8 *blks, | |||
75 | struct nvm_block *blk; | 75 | struct nvm_block *blk; |
76 | int i; | 76 | int i; |
77 | 77 | ||
78 | ppa = dev_to_generic_addr(gn->dev, ppa); | ||
79 | lun = &gn->luns[(dev->nr_luns * ppa.g.ch) + ppa.g.lun]; | 78 | lun = &gn->luns[(dev->nr_luns * ppa.g.ch) + ppa.g.lun]; |
80 | 79 | ||
81 | for (i = 0; i < nr_blocks; i++) { | 80 | for (i = 0; i < nr_blocks; i++) { |
@@ -187,7 +186,7 @@ static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn) | |||
187 | ppa.g.lun = lun->vlun.id; | 186 | ppa.g.lun = lun->vlun.id; |
188 | ppa = generic_to_dev_addr(dev, ppa); | 187 | ppa = generic_to_dev_addr(dev, ppa); |
189 | 188 | ||
190 | ret = dev->ops->get_bb_tbl(dev->q, ppa, | 189 | ret = dev->ops->get_bb_tbl(dev, ppa, |
191 | dev->blks_per_lun, | 190 | dev->blks_per_lun, |
192 | gennvm_block_bb, gn); | 191 | gennvm_block_bb, gn); |
193 | if (ret) | 192 | if (ret) |
@@ -207,6 +206,14 @@ static int gennvm_blocks_init(struct nvm_dev *dev, struct gen_nvm *gn) | |||
207 | return 0; | 206 | return 0; |
208 | } | 207 | } |
209 | 208 | ||
209 | static void gennvm_free(struct nvm_dev *dev) | ||
210 | { | ||
211 | gennvm_blocks_free(dev); | ||
212 | gennvm_luns_free(dev); | ||
213 | kfree(dev->mp); | ||
214 | dev->mp = NULL; | ||
215 | } | ||
216 | |||
210 | static int gennvm_register(struct nvm_dev *dev) | 217 | static int gennvm_register(struct nvm_dev *dev) |
211 | { | 218 | { |
212 | struct gen_nvm *gn; | 219 | struct gen_nvm *gn; |
@@ -234,16 +241,13 @@ static int gennvm_register(struct nvm_dev *dev) | |||
234 | 241 | ||
235 | return 1; | 242 | return 1; |
236 | err: | 243 | err: |
237 | kfree(gn); | 244 | gennvm_free(dev); |
238 | return ret; | 245 | return ret; |
239 | } | 246 | } |
240 | 247 | ||
241 | static void gennvm_unregister(struct nvm_dev *dev) | 248 | static void gennvm_unregister(struct nvm_dev *dev) |
242 | { | 249 | { |
243 | gennvm_blocks_free(dev); | 250 | gennvm_free(dev); |
244 | gennvm_luns_free(dev); | ||
245 | kfree(dev->mp); | ||
246 | dev->mp = NULL; | ||
247 | } | 251 | } |
248 | 252 | ||
249 | static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev, | 253 | static struct nvm_block *gennvm_get_blk(struct nvm_dev *dev, |
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c index 57dadd52b428..1deb8ff90a89 100644 --- a/drivers/net/can/bfin_can.c +++ b/drivers/net/can/bfin_can.c | |||
@@ -501,8 +501,6 @@ static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status) | |||
501 | cf->data[2] |= CAN_ERR_PROT_FORM; | 501 | cf->data[2] |= CAN_ERR_PROT_FORM; |
502 | else if (status & SER) | 502 | else if (status & SER) |
503 | cf->data[2] |= CAN_ERR_PROT_STUFF; | 503 | cf->data[2] |= CAN_ERR_PROT_STUFF; |
504 | else | ||
505 | cf->data[2] |= CAN_ERR_PROT_UNSPEC; | ||
506 | } | 504 | } |
507 | 505 | ||
508 | priv->can.state = state; | 506 | priv->can.state = state; |
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c index 5d214d135332..f91b094288da 100644 --- a/drivers/net/can/c_can/c_can.c +++ b/drivers/net/can/c_can/c_can.c | |||
@@ -962,7 +962,6 @@ static int c_can_handle_bus_err(struct net_device *dev, | |||
962 | * type of the last error to occur on the CAN bus | 962 | * type of the last error to occur on the CAN bus |
963 | */ | 963 | */ |
964 | cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; | 964 | cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; |
965 | cf->data[2] |= CAN_ERR_PROT_UNSPEC; | ||
966 | 965 | ||
967 | switch (lec_type) { | 966 | switch (lec_type) { |
968 | case LEC_STUFF_ERROR: | 967 | case LEC_STUFF_ERROR: |
@@ -975,8 +974,7 @@ static int c_can_handle_bus_err(struct net_device *dev, | |||
975 | break; | 974 | break; |
976 | case LEC_ACK_ERROR: | 975 | case LEC_ACK_ERROR: |
977 | netdev_dbg(dev, "ack error\n"); | 976 | netdev_dbg(dev, "ack error\n"); |
978 | cf->data[3] |= (CAN_ERR_PROT_LOC_ACK | | 977 | cf->data[3] = CAN_ERR_PROT_LOC_ACK; |
979 | CAN_ERR_PROT_LOC_ACK_DEL); | ||
980 | break; | 978 | break; |
981 | case LEC_BIT1_ERROR: | 979 | case LEC_BIT1_ERROR: |
982 | netdev_dbg(dev, "bit1 error\n"); | 980 | netdev_dbg(dev, "bit1 error\n"); |
@@ -988,8 +986,7 @@ static int c_can_handle_bus_err(struct net_device *dev, | |||
988 | break; | 986 | break; |
989 | case LEC_CRC_ERROR: | 987 | case LEC_CRC_ERROR: |
990 | netdev_dbg(dev, "CRC error\n"); | 988 | netdev_dbg(dev, "CRC error\n"); |
991 | cf->data[3] |= (CAN_ERR_PROT_LOC_CRC_SEQ | | 989 | cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; |
992 | CAN_ERR_PROT_LOC_CRC_DEL); | ||
993 | break; | 990 | break; |
994 | default: | 991 | default: |
995 | break; | 992 | break; |
diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c index 70a8cbb29e75..1e37313054f3 100644 --- a/drivers/net/can/cc770/cc770.c +++ b/drivers/net/can/cc770/cc770.c | |||
@@ -578,7 +578,7 @@ static int cc770_err(struct net_device *dev, u8 status) | |||
578 | cf->data[2] |= CAN_ERR_PROT_BIT0; | 578 | cf->data[2] |= CAN_ERR_PROT_BIT0; |
579 | break; | 579 | break; |
580 | case STAT_LEC_CRC: | 580 | case STAT_LEC_CRC: |
581 | cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ; | 581 | cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; |
582 | break; | 582 | break; |
583 | } | 583 | } |
584 | } | 584 | } |
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 868fe945e35a..41c0fc9f3b14 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c | |||
@@ -535,13 +535,13 @@ static void do_bus_err(struct net_device *dev, | |||
535 | if (reg_esr & FLEXCAN_ESR_ACK_ERR) { | 535 | if (reg_esr & FLEXCAN_ESR_ACK_ERR) { |
536 | netdev_dbg(dev, "ACK_ERR irq\n"); | 536 | netdev_dbg(dev, "ACK_ERR irq\n"); |
537 | cf->can_id |= CAN_ERR_ACK; | 537 | cf->can_id |= CAN_ERR_ACK; |
538 | cf->data[3] |= CAN_ERR_PROT_LOC_ACK; | 538 | cf->data[3] = CAN_ERR_PROT_LOC_ACK; |
539 | tx_errors = 1; | 539 | tx_errors = 1; |
540 | } | 540 | } |
541 | if (reg_esr & FLEXCAN_ESR_CRC_ERR) { | 541 | if (reg_esr & FLEXCAN_ESR_CRC_ERR) { |
542 | netdev_dbg(dev, "CRC_ERR irq\n"); | 542 | netdev_dbg(dev, "CRC_ERR irq\n"); |
543 | cf->data[2] |= CAN_ERR_PROT_BIT; | 543 | cf->data[2] |= CAN_ERR_PROT_BIT; |
544 | cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ; | 544 | cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; |
545 | rx_errors = 1; | 545 | rx_errors = 1; |
546 | } | 546 | } |
547 | if (reg_esr & FLEXCAN_ESR_FRM_ERR) { | 547 | if (reg_esr & FLEXCAN_ESR_FRM_ERR) { |
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c index c1e85368a198..5d04f5464faf 100644 --- a/drivers/net/can/janz-ican3.c +++ b/drivers/net/can/janz-ican3.c | |||
@@ -1096,7 +1096,6 @@ static int ican3_handle_cevtind(struct ican3_dev *mod, struct ican3_msg *msg) | |||
1096 | cf->data[2] |= CAN_ERR_PROT_STUFF; | 1096 | cf->data[2] |= CAN_ERR_PROT_STUFF; |
1097 | break; | 1097 | break; |
1098 | default: | 1098 | default: |
1099 | cf->data[2] |= CAN_ERR_PROT_UNSPEC; | ||
1100 | cf->data[3] = ecc & ECC_SEG; | 1099 | cf->data[3] = ecc & ECC_SEG; |
1101 | break; | 1100 | break; |
1102 | } | 1101 | } |
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index ef655177bb5e..39cf911f7a1e 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c | |||
@@ -487,7 +487,6 @@ static int m_can_handle_lec_err(struct net_device *dev, | |||
487 | * type of the last error to occur on the CAN bus | 487 | * type of the last error to occur on the CAN bus |
488 | */ | 488 | */ |
489 | cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; | 489 | cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; |
490 | cf->data[2] |= CAN_ERR_PROT_UNSPEC; | ||
491 | 490 | ||
492 | switch (lec_type) { | 491 | switch (lec_type) { |
493 | case LEC_STUFF_ERROR: | 492 | case LEC_STUFF_ERROR: |
@@ -500,8 +499,7 @@ static int m_can_handle_lec_err(struct net_device *dev, | |||
500 | break; | 499 | break; |
501 | case LEC_ACK_ERROR: | 500 | case LEC_ACK_ERROR: |
502 | netdev_dbg(dev, "ack error\n"); | 501 | netdev_dbg(dev, "ack error\n"); |
503 | cf->data[3] |= (CAN_ERR_PROT_LOC_ACK | | 502 | cf->data[3] = CAN_ERR_PROT_LOC_ACK; |
504 | CAN_ERR_PROT_LOC_ACK_DEL); | ||
505 | break; | 503 | break; |
506 | case LEC_BIT1_ERROR: | 504 | case LEC_BIT1_ERROR: |
507 | netdev_dbg(dev, "bit1 error\n"); | 505 | netdev_dbg(dev, "bit1 error\n"); |
@@ -513,8 +511,7 @@ static int m_can_handle_lec_err(struct net_device *dev, | |||
513 | break; | 511 | break; |
514 | case LEC_CRC_ERROR: | 512 | case LEC_CRC_ERROR: |
515 | netdev_dbg(dev, "CRC error\n"); | 513 | netdev_dbg(dev, "CRC error\n"); |
516 | cf->data[3] |= (CAN_ERR_PROT_LOC_CRC_SEQ | | 514 | cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; |
517 | CAN_ERR_PROT_LOC_CRC_DEL); | ||
518 | break; | 515 | break; |
519 | default: | 516 | default: |
520 | break; | 517 | break; |
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c index e187ca783da0..c1317889d3d8 100644 --- a/drivers/net/can/pch_can.c +++ b/drivers/net/can/pch_can.c | |||
@@ -559,8 +559,7 @@ static void pch_can_error(struct net_device *ndev, u32 status) | |||
559 | stats->rx_errors++; | 559 | stats->rx_errors++; |
560 | break; | 560 | break; |
561 | case PCH_CRC_ERR: | 561 | case PCH_CRC_ERR: |
562 | cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ | | 562 | cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; |
563 | CAN_ERR_PROT_LOC_CRC_DEL; | ||
564 | priv->can.can_stats.bus_error++; | 563 | priv->can.can_stats.bus_error++; |
565 | stats->rx_errors++; | 564 | stats->rx_errors++; |
566 | break; | 565 | break; |
diff --git a/drivers/net/can/rcar_can.c b/drivers/net/can/rcar_can.c index 7bd54191f962..bc46be39549d 100644 --- a/drivers/net/can/rcar_can.c +++ b/drivers/net/can/rcar_can.c | |||
@@ -241,17 +241,16 @@ static void rcar_can_error(struct net_device *ndev) | |||
241 | u8 ecsr; | 241 | u8 ecsr; |
242 | 242 | ||
243 | netdev_dbg(priv->ndev, "Bus error interrupt:\n"); | 243 | netdev_dbg(priv->ndev, "Bus error interrupt:\n"); |
244 | if (skb) { | 244 | if (skb) |
245 | cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT; | 245 | cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT; |
246 | cf->data[2] = CAN_ERR_PROT_UNSPEC; | 246 | |
247 | } | ||
248 | ecsr = readb(&priv->regs->ecsr); | 247 | ecsr = readb(&priv->regs->ecsr); |
249 | if (ecsr & RCAR_CAN_ECSR_ADEF) { | 248 | if (ecsr & RCAR_CAN_ECSR_ADEF) { |
250 | netdev_dbg(priv->ndev, "ACK Delimiter Error\n"); | 249 | netdev_dbg(priv->ndev, "ACK Delimiter Error\n"); |
251 | tx_errors++; | 250 | tx_errors++; |
252 | writeb(~RCAR_CAN_ECSR_ADEF, &priv->regs->ecsr); | 251 | writeb(~RCAR_CAN_ECSR_ADEF, &priv->regs->ecsr); |
253 | if (skb) | 252 | if (skb) |
254 | cf->data[3] |= CAN_ERR_PROT_LOC_ACK_DEL; | 253 | cf->data[3] = CAN_ERR_PROT_LOC_ACK_DEL; |
255 | } | 254 | } |
256 | if (ecsr & RCAR_CAN_ECSR_BE0F) { | 255 | if (ecsr & RCAR_CAN_ECSR_BE0F) { |
257 | netdev_dbg(priv->ndev, "Bit Error (dominant)\n"); | 256 | netdev_dbg(priv->ndev, "Bit Error (dominant)\n"); |
@@ -272,7 +271,7 @@ static void rcar_can_error(struct net_device *ndev) | |||
272 | rx_errors++; | 271 | rx_errors++; |
273 | writeb(~RCAR_CAN_ECSR_CEF, &priv->regs->ecsr); | 272 | writeb(~RCAR_CAN_ECSR_CEF, &priv->regs->ecsr); |
274 | if (skb) | 273 | if (skb) |
275 | cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ; | 274 | cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; |
276 | } | 275 | } |
277 | if (ecsr & RCAR_CAN_ECSR_AEF) { | 276 | if (ecsr & RCAR_CAN_ECSR_AEF) { |
278 | netdev_dbg(priv->ndev, "ACK Error\n"); | 277 | netdev_dbg(priv->ndev, "ACK Error\n"); |
@@ -280,7 +279,7 @@ static void rcar_can_error(struct net_device *ndev) | |||
280 | writeb(~RCAR_CAN_ECSR_AEF, &priv->regs->ecsr); | 279 | writeb(~RCAR_CAN_ECSR_AEF, &priv->regs->ecsr); |
281 | if (skb) { | 280 | if (skb) { |
282 | cf->can_id |= CAN_ERR_ACK; | 281 | cf->can_id |= CAN_ERR_ACK; |
283 | cf->data[3] |= CAN_ERR_PROT_LOC_ACK; | 282 | cf->data[3] = CAN_ERR_PROT_LOC_ACK; |
284 | } | 283 | } |
285 | } | 284 | } |
286 | if (ecsr & RCAR_CAN_ECSR_FEF) { | 285 | if (ecsr & RCAR_CAN_ECSR_FEF) { |
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c index 7b92e911a616..8dda3b703d39 100644 --- a/drivers/net/can/sja1000/sja1000.c +++ b/drivers/net/can/sja1000/sja1000.c | |||
@@ -218,6 +218,9 @@ static void sja1000_start(struct net_device *dev) | |||
218 | priv->write_reg(priv, SJA1000_RXERR, 0x0); | 218 | priv->write_reg(priv, SJA1000_RXERR, 0x0); |
219 | priv->read_reg(priv, SJA1000_ECC); | 219 | priv->read_reg(priv, SJA1000_ECC); |
220 | 220 | ||
221 | /* clear interrupt flags */ | ||
222 | priv->read_reg(priv, SJA1000_IR); | ||
223 | |||
221 | /* leave reset mode */ | 224 | /* leave reset mode */ |
222 | set_normal_mode(dev); | 225 | set_normal_mode(dev); |
223 | } | 226 | } |
@@ -446,7 +449,6 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status) | |||
446 | cf->data[2] |= CAN_ERR_PROT_STUFF; | 449 | cf->data[2] |= CAN_ERR_PROT_STUFF; |
447 | break; | 450 | break; |
448 | default: | 451 | default: |
449 | cf->data[2] |= CAN_ERR_PROT_UNSPEC; | ||
450 | cf->data[3] = ecc & ECC_SEG; | 452 | cf->data[3] = ecc & ECC_SEG; |
451 | break; | 453 | break; |
452 | } | 454 | } |
diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c index d9a42c646783..68ef0a4cd821 100644 --- a/drivers/net/can/sun4i_can.c +++ b/drivers/net/can/sun4i_can.c | |||
@@ -575,7 +575,6 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status) | |||
575 | cf->data[2] |= CAN_ERR_PROT_STUFF; | 575 | cf->data[2] |= CAN_ERR_PROT_STUFF; |
576 | break; | 576 | break; |
577 | default: | 577 | default: |
578 | cf->data[2] |= CAN_ERR_PROT_UNSPEC; | ||
579 | cf->data[3] = (ecc & SUN4I_STA_ERR_SEG_CODE) | 578 | cf->data[3] = (ecc & SUN4I_STA_ERR_SEG_CODE) |
580 | >> 16; | 579 | >> 16; |
581 | break; | 580 | break; |
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c index cf345cbfe819..680d1ff07a55 100644 --- a/drivers/net/can/ti_hecc.c +++ b/drivers/net/can/ti_hecc.c | |||
@@ -722,7 +722,6 @@ static int ti_hecc_error(struct net_device *ndev, int int_status, | |||
722 | if (err_status & HECC_BUS_ERROR) { | 722 | if (err_status & HECC_BUS_ERROR) { |
723 | ++priv->can.can_stats.bus_error; | 723 | ++priv->can.can_stats.bus_error; |
724 | cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT; | 724 | cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT; |
725 | cf->data[2] |= CAN_ERR_PROT_UNSPEC; | ||
726 | if (err_status & HECC_CANES_FE) { | 725 | if (err_status & HECC_CANES_FE) { |
727 | hecc_set_bit(priv, HECC_CANES, HECC_CANES_FE); | 726 | hecc_set_bit(priv, HECC_CANES, HECC_CANES_FE); |
728 | cf->data[2] |= CAN_ERR_PROT_FORM; | 727 | cf->data[2] |= CAN_ERR_PROT_FORM; |
@@ -737,13 +736,11 @@ static int ti_hecc_error(struct net_device *ndev, int int_status, | |||
737 | } | 736 | } |
738 | if (err_status & HECC_CANES_CRCE) { | 737 | if (err_status & HECC_CANES_CRCE) { |
739 | hecc_set_bit(priv, HECC_CANES, HECC_CANES_CRCE); | 738 | hecc_set_bit(priv, HECC_CANES, HECC_CANES_CRCE); |
740 | cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ | | 739 | cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; |
741 | CAN_ERR_PROT_LOC_CRC_DEL; | ||
742 | } | 740 | } |
743 | if (err_status & HECC_CANES_ACKE) { | 741 | if (err_status & HECC_CANES_ACKE) { |
744 | hecc_set_bit(priv, HECC_CANES, HECC_CANES_ACKE); | 742 | hecc_set_bit(priv, HECC_CANES, HECC_CANES_ACKE); |
745 | cf->data[3] |= CAN_ERR_PROT_LOC_ACK | | 743 | cf->data[3] = CAN_ERR_PROT_LOC_ACK; |
746 | CAN_ERR_PROT_LOC_ACK_DEL; | ||
747 | } | 744 | } |
748 | } | 745 | } |
749 | 746 | ||
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c index 2d390384ef3b..fc5b75675cd8 100644 --- a/drivers/net/can/usb/ems_usb.c +++ b/drivers/net/can/usb/ems_usb.c | |||
@@ -377,7 +377,6 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg) | |||
377 | cf->data[2] |= CAN_ERR_PROT_STUFF; | 377 | cf->data[2] |= CAN_ERR_PROT_STUFF; |
378 | break; | 378 | break; |
379 | default: | 379 | default: |
380 | cf->data[2] |= CAN_ERR_PROT_UNSPEC; | ||
381 | cf->data[3] = ecc & SJA1000_ECC_SEG; | 380 | cf->data[3] = ecc & SJA1000_ECC_SEG; |
382 | break; | 381 | break; |
383 | } | 382 | } |
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c index 0e5a4493ba4f..113e64fcd73b 100644 --- a/drivers/net/can/usb/esd_usb2.c +++ b/drivers/net/can/usb/esd_usb2.c | |||
@@ -282,7 +282,6 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv, | |||
282 | cf->data[2] |= CAN_ERR_PROT_STUFF; | 282 | cf->data[2] |= CAN_ERR_PROT_STUFF; |
283 | break; | 283 | break; |
284 | default: | 284 | default: |
285 | cf->data[2] |= CAN_ERR_PROT_UNSPEC; | ||
286 | cf->data[3] = ecc & SJA1000_ECC_SEG; | 285 | cf->data[3] = ecc & SJA1000_ECC_SEG; |
287 | break; | 286 | break; |
288 | } | 287 | } |
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c index 8b17a9065b0b..022bfa13ebfa 100644 --- a/drivers/net/can/usb/kvaser_usb.c +++ b/drivers/net/can/usb/kvaser_usb.c | |||
@@ -944,10 +944,9 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev, | |||
944 | cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT; | 944 | cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT; |
945 | 945 | ||
946 | if (es->leaf.error_factor & M16C_EF_ACKE) | 946 | if (es->leaf.error_factor & M16C_EF_ACKE) |
947 | cf->data[3] |= (CAN_ERR_PROT_LOC_ACK); | 947 | cf->data[3] = CAN_ERR_PROT_LOC_ACK; |
948 | if (es->leaf.error_factor & M16C_EF_CRCE) | 948 | if (es->leaf.error_factor & M16C_EF_CRCE) |
949 | cf->data[3] |= (CAN_ERR_PROT_LOC_CRC_SEQ | | 949 | cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; |
950 | CAN_ERR_PROT_LOC_CRC_DEL); | ||
951 | if (es->leaf.error_factor & M16C_EF_FORME) | 950 | if (es->leaf.error_factor & M16C_EF_FORME) |
952 | cf->data[2] |= CAN_ERR_PROT_FORM; | 951 | cf->data[2] |= CAN_ERR_PROT_FORM; |
953 | if (es->leaf.error_factor & M16C_EF_STFE) | 952 | if (es->leaf.error_factor & M16C_EF_STFE) |
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c index de95b1ccba3e..a731720f1d13 100644 --- a/drivers/net/can/usb/usb_8dev.c +++ b/drivers/net/can/usb/usb_8dev.c | |||
@@ -401,9 +401,7 @@ static void usb_8dev_rx_err_msg(struct usb_8dev_priv *priv, | |||
401 | tx_errors = 1; | 401 | tx_errors = 1; |
402 | break; | 402 | break; |
403 | case USB_8DEV_STATUSMSG_CRC: | 403 | case USB_8DEV_STATUSMSG_CRC: |
404 | cf->data[2] |= CAN_ERR_PROT_UNSPEC; | 404 | cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; |
405 | cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ | | ||
406 | CAN_ERR_PROT_LOC_CRC_DEL; | ||
407 | rx_errors = 1; | 405 | rx_errors = 1; |
408 | break; | 406 | break; |
409 | case USB_8DEV_STATUSMSG_BIT0: | 407 | case USB_8DEV_STATUSMSG_BIT0: |
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c index fc55e8e0351d..51670b322409 100644 --- a/drivers/net/can/xilinx_can.c +++ b/drivers/net/can/xilinx_can.c | |||
@@ -608,17 +608,15 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr) | |||
608 | 608 | ||
609 | /* Check for error interrupt */ | 609 | /* Check for error interrupt */ |
610 | if (isr & XCAN_IXR_ERROR_MASK) { | 610 | if (isr & XCAN_IXR_ERROR_MASK) { |
611 | if (skb) { | 611 | if (skb) |
612 | cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; | 612 | cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; |
613 | cf->data[2] |= CAN_ERR_PROT_UNSPEC; | ||
614 | } | ||
615 | 613 | ||
616 | /* Check for Ack error interrupt */ | 614 | /* Check for Ack error interrupt */ |
617 | if (err_status & XCAN_ESR_ACKER_MASK) { | 615 | if (err_status & XCAN_ESR_ACKER_MASK) { |
618 | stats->tx_errors++; | 616 | stats->tx_errors++; |
619 | if (skb) { | 617 | if (skb) { |
620 | cf->can_id |= CAN_ERR_ACK; | 618 | cf->can_id |= CAN_ERR_ACK; |
621 | cf->data[3] |= CAN_ERR_PROT_LOC_ACK; | 619 | cf->data[3] = CAN_ERR_PROT_LOC_ACK; |
622 | } | 620 | } |
623 | } | 621 | } |
624 | 622 | ||
@@ -654,8 +652,7 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr) | |||
654 | stats->rx_errors++; | 652 | stats->rx_errors++; |
655 | if (skb) { | 653 | if (skb) { |
656 | cf->can_id |= CAN_ERR_PROT; | 654 | cf->can_id |= CAN_ERR_PROT; |
657 | cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ | | 655 | cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; |
658 | CAN_ERR_PROT_LOC_CRC_DEL; | ||
659 | } | 656 | } |
660 | } | 657 | } |
661 | priv->can.can_stats.bus_error++; | 658 | priv->can.can_stats.bus_error++; |
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 955d06b9cdba..31c5e476fd64 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig | |||
@@ -29,6 +29,7 @@ source "drivers/net/ethernet/apm/Kconfig" | |||
29 | source "drivers/net/ethernet/apple/Kconfig" | 29 | source "drivers/net/ethernet/apple/Kconfig" |
30 | source "drivers/net/ethernet/arc/Kconfig" | 30 | source "drivers/net/ethernet/arc/Kconfig" |
31 | source "drivers/net/ethernet/atheros/Kconfig" | 31 | source "drivers/net/ethernet/atheros/Kconfig" |
32 | source "drivers/net/ethernet/aurora/Kconfig" | ||
32 | source "drivers/net/ethernet/cadence/Kconfig" | 33 | source "drivers/net/ethernet/cadence/Kconfig" |
33 | source "drivers/net/ethernet/adi/Kconfig" | 34 | source "drivers/net/ethernet/adi/Kconfig" |
34 | source "drivers/net/ethernet/broadcom/Kconfig" | 35 | source "drivers/net/ethernet/broadcom/Kconfig" |
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 4a2ee98738f0..071f84eb6f3f 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile | |||
@@ -15,6 +15,7 @@ obj-$(CONFIG_NET_XGENE) += apm/ | |||
15 | obj-$(CONFIG_NET_VENDOR_APPLE) += apple/ | 15 | obj-$(CONFIG_NET_VENDOR_APPLE) += apple/ |
16 | obj-$(CONFIG_NET_VENDOR_ARC) += arc/ | 16 | obj-$(CONFIG_NET_VENDOR_ARC) += arc/ |
17 | obj-$(CONFIG_NET_VENDOR_ATHEROS) += atheros/ | 17 | obj-$(CONFIG_NET_VENDOR_ATHEROS) += atheros/ |
18 | obj-$(CONFIG_NET_VENDOR_AURORA) += aurora/ | ||
18 | obj-$(CONFIG_NET_CADENCE) += cadence/ | 19 | obj-$(CONFIG_NET_CADENCE) += cadence/ |
19 | obj-$(CONFIG_NET_BFIN) += adi/ | 20 | obj-$(CONFIG_NET_BFIN) += adi/ |
20 | obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/ | 21 | obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/ |
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index 991412ce6f48..9147a0107c44 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c | |||
@@ -450,12 +450,12 @@ static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb, | |||
450 | return NETDEV_TX_OK; | 450 | return NETDEV_TX_OK; |
451 | } | 451 | } |
452 | 452 | ||
453 | pdata->ring_ops->wr_cmd(tx_ring, count); | ||
454 | skb_tx_timestamp(skb); | 453 | skb_tx_timestamp(skb); |
455 | 454 | ||
456 | pdata->stats.tx_packets++; | 455 | pdata->stats.tx_packets++; |
457 | pdata->stats.tx_bytes += skb->len; | 456 | pdata->stats.tx_bytes += skb->len; |
458 | 457 | ||
458 | pdata->ring_ops->wr_cmd(tx_ring, count); | ||
459 | return NETDEV_TX_OK; | 459 | return NETDEV_TX_OK; |
460 | } | 460 | } |
461 | 461 | ||
@@ -688,10 +688,10 @@ static int xgene_enet_open(struct net_device *ndev) | |||
688 | mac_ops->tx_enable(pdata); | 688 | mac_ops->tx_enable(pdata); |
689 | mac_ops->rx_enable(pdata); | 689 | mac_ops->rx_enable(pdata); |
690 | 690 | ||
691 | xgene_enet_napi_enable(pdata); | ||
691 | ret = xgene_enet_register_irq(ndev); | 692 | ret = xgene_enet_register_irq(ndev); |
692 | if (ret) | 693 | if (ret) |
693 | return ret; | 694 | return ret; |
694 | xgene_enet_napi_enable(pdata); | ||
695 | 695 | ||
696 | if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) | 696 | if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) |
697 | phy_start(pdata->phy_dev); | 697 | phy_start(pdata->phy_dev); |
@@ -715,13 +715,13 @@ static int xgene_enet_close(struct net_device *ndev) | |||
715 | else | 715 | else |
716 | cancel_delayed_work_sync(&pdata->link_work); | 716 | cancel_delayed_work_sync(&pdata->link_work); |
717 | 717 | ||
718 | xgene_enet_napi_disable(pdata); | ||
719 | xgene_enet_free_irq(ndev); | ||
720 | xgene_enet_process_ring(pdata->rx_ring, -1); | ||
721 | |||
722 | mac_ops->tx_disable(pdata); | 718 | mac_ops->tx_disable(pdata); |
723 | mac_ops->rx_disable(pdata); | 719 | mac_ops->rx_disable(pdata); |
724 | 720 | ||
721 | xgene_enet_free_irq(ndev); | ||
722 | xgene_enet_napi_disable(pdata); | ||
723 | xgene_enet_process_ring(pdata->rx_ring, -1); | ||
724 | |||
725 | return 0; | 725 | return 0; |
726 | } | 726 | } |
727 | 727 | ||
@@ -1474,15 +1474,15 @@ static int xgene_enet_probe(struct platform_device *pdev) | |||
1474 | } | 1474 | } |
1475 | ndev->hw_features = ndev->features; | 1475 | ndev->hw_features = ndev->features; |
1476 | 1476 | ||
1477 | ret = register_netdev(ndev); | 1477 | ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64)); |
1478 | if (ret) { | 1478 | if (ret) { |
1479 | netdev_err(ndev, "Failed to register netdev\n"); | 1479 | netdev_err(ndev, "No usable DMA configuration\n"); |
1480 | goto err; | 1480 | goto err; |
1481 | } | 1481 | } |
1482 | 1482 | ||
1483 | ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64)); | 1483 | ret = register_netdev(ndev); |
1484 | if (ret) { | 1484 | if (ret) { |
1485 | netdev_err(ndev, "No usable DMA configuration\n"); | 1485 | netdev_err(ndev, "Failed to register netdev\n"); |
1486 | goto err; | 1486 | goto err; |
1487 | } | 1487 | } |
1488 | 1488 | ||
@@ -1490,14 +1490,17 @@ static int xgene_enet_probe(struct platform_device *pdev) | |||
1490 | if (ret) | 1490 | if (ret) |
1491 | goto err; | 1491 | goto err; |
1492 | 1492 | ||
1493 | xgene_enet_napi_add(pdata); | ||
1494 | mac_ops = pdata->mac_ops; | 1493 | mac_ops = pdata->mac_ops; |
1495 | if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) | 1494 | if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) { |
1496 | ret = xgene_enet_mdio_config(pdata); | 1495 | ret = xgene_enet_mdio_config(pdata); |
1497 | else | 1496 | if (ret) |
1497 | goto err; | ||
1498 | } else { | ||
1498 | INIT_DELAYED_WORK(&pdata->link_work, mac_ops->link_state); | 1499 | INIT_DELAYED_WORK(&pdata->link_work, mac_ops->link_state); |
1500 | } | ||
1499 | 1501 | ||
1500 | return ret; | 1502 | xgene_enet_napi_add(pdata); |
1503 | return 0; | ||
1501 | err: | 1504 | err: |
1502 | unregister_netdev(ndev); | 1505 | unregister_netdev(ndev); |
1503 | free_netdev(ndev); | 1506 | free_netdev(ndev); |
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index c8af3ce3ea38..bd377a6b067d 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c | |||
@@ -1534,6 +1534,8 @@ static const struct pci_device_id alx_pci_tbl[] = { | |||
1534 | .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, | 1534 | .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, |
1535 | { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2200), | 1535 | { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2200), |
1536 | .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, | 1536 | .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, |
1537 | { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2400), | ||
1538 | .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, | ||
1537 | { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8162), | 1539 | { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8162), |
1538 | .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, | 1540 | .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, |
1539 | { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8171) }, | 1541 | { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8171) }, |
diff --git a/drivers/net/ethernet/atheros/alx/reg.h b/drivers/net/ethernet/atheros/alx/reg.h index af006b44b2a6..0959e6824cb6 100644 --- a/drivers/net/ethernet/atheros/alx/reg.h +++ b/drivers/net/ethernet/atheros/alx/reg.h | |||
@@ -37,6 +37,7 @@ | |||
37 | 37 | ||
38 | #define ALX_DEV_ID_AR8161 0x1091 | 38 | #define ALX_DEV_ID_AR8161 0x1091 |
39 | #define ALX_DEV_ID_E2200 0xe091 | 39 | #define ALX_DEV_ID_E2200 0xe091 |
40 | #define ALX_DEV_ID_E2400 0xe0a1 | ||
40 | #define ALX_DEV_ID_AR8162 0x1090 | 41 | #define ALX_DEV_ID_AR8162 0x1090 |
41 | #define ALX_DEV_ID_AR8171 0x10A1 | 42 | #define ALX_DEV_ID_AR8171 0x10A1 |
42 | #define ALX_DEV_ID_AR8172 0x10A0 | 43 | #define ALX_DEV_ID_AR8172 0x10A0 |
diff --git a/drivers/net/ethernet/aurora/Kconfig b/drivers/net/ethernet/aurora/Kconfig new file mode 100644 index 000000000000..a3c7106fdf85 --- /dev/null +++ b/drivers/net/ethernet/aurora/Kconfig | |||
@@ -0,0 +1,20 @@ | |||
1 | config NET_VENDOR_AURORA | ||
2 | bool "Aurora VLSI devices" | ||
3 | help | ||
4 | If you have a network (Ethernet) device belonging to this class, | ||
5 | say Y. | ||
6 | |||
7 | Note that the answer to this question doesn't directly affect the | ||
8 | kernel: saying N will just cause the configurator to skip all | ||
9 | questions about Aurora devices. If you say Y, you will be asked | ||
10 | for your specific device in the following questions. | ||
11 | |||
12 | if NET_VENDOR_AURORA | ||
13 | |||
14 | config AURORA_NB8800 | ||
15 | tristate "Aurora AU-NB8800 support" | ||
16 | select PHYLIB | ||
17 | help | ||
18 | Support for the AU-NB8800 gigabit Ethernet controller. | ||
19 | |||
20 | endif | ||
diff --git a/drivers/net/ethernet/aurora/Makefile b/drivers/net/ethernet/aurora/Makefile new file mode 100644 index 000000000000..6cb528a2fc26 --- /dev/null +++ b/drivers/net/ethernet/aurora/Makefile | |||
@@ -0,0 +1 @@ | |||
obj-$(CONFIG_AURORA_NB8800) += nb8800.o | |||
diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c new file mode 100644 index 000000000000..ecc4a334c507 --- /dev/null +++ b/drivers/net/ethernet/aurora/nb8800.c | |||
@@ -0,0 +1,1552 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2015 Mans Rullgard <mans@mansr.com> | ||
3 | * | ||
4 | * Mostly rewritten, based on driver from Sigma Designs. Original | ||
5 | * copyright notice below. | ||
6 | * | ||
7 | * | ||
8 | * Driver for tangox SMP864x/SMP865x/SMP867x/SMP868x builtin Ethernet Mac. | ||
9 | * | ||
10 | * Copyright (C) 2005 Maxime Bizon <mbizon@freebox.fr> | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or modify | ||
13 | * it under the terms of the GNU General Public License as published by | ||
14 | * the Free Software Foundation; either version 2 of the License, or | ||
15 | * (at your option) any later version. | ||
16 | * | ||
17 | * This program is distributed in the hope that it will be useful, | ||
18 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
19 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
20 | * GNU General Public License for more details. | ||
21 | */ | ||
22 | |||
23 | #include <linux/module.h> | ||
24 | #include <linux/etherdevice.h> | ||
25 | #include <linux/delay.h> | ||
26 | #include <linux/ethtool.h> | ||
27 | #include <linux/interrupt.h> | ||
28 | #include <linux/platform_device.h> | ||
29 | #include <linux/of_device.h> | ||
30 | #include <linux/of_mdio.h> | ||
31 | #include <linux/of_net.h> | ||
32 | #include <linux/dma-mapping.h> | ||
33 | #include <linux/phy.h> | ||
34 | #include <linux/cache.h> | ||
35 | #include <linux/jiffies.h> | ||
36 | #include <linux/io.h> | ||
37 | #include <linux/iopoll.h> | ||
38 | #include <asm/barrier.h> | ||
39 | |||
40 | #include "nb8800.h" | ||
41 | |||
42 | static void nb8800_tx_done(struct net_device *dev); | ||
43 | static int nb8800_dma_stop(struct net_device *dev); | ||
44 | |||
45 | static inline u8 nb8800_readb(struct nb8800_priv *priv, int reg) | ||
46 | { | ||
47 | return readb_relaxed(priv->base + reg); | ||
48 | } | ||
49 | |||
50 | static inline u32 nb8800_readl(struct nb8800_priv *priv, int reg) | ||
51 | { | ||
52 | return readl_relaxed(priv->base + reg); | ||
53 | } | ||
54 | |||
55 | static inline void nb8800_writeb(struct nb8800_priv *priv, int reg, u8 val) | ||
56 | { | ||
57 | writeb_relaxed(val, priv->base + reg); | ||
58 | } | ||
59 | |||
60 | static inline void nb8800_writew(struct nb8800_priv *priv, int reg, u16 val) | ||
61 | { | ||
62 | writew_relaxed(val, priv->base + reg); | ||
63 | } | ||
64 | |||
65 | static inline void nb8800_writel(struct nb8800_priv *priv, int reg, u32 val) | ||
66 | { | ||
67 | writel_relaxed(val, priv->base + reg); | ||
68 | } | ||
69 | |||
70 | static inline void nb8800_maskb(struct nb8800_priv *priv, int reg, | ||
71 | u32 mask, u32 val) | ||
72 | { | ||
73 | u32 old = nb8800_readb(priv, reg); | ||
74 | u32 new = (old & ~mask) | (val & mask); | ||
75 | |||
76 | if (new != old) | ||
77 | nb8800_writeb(priv, reg, new); | ||
78 | } | ||
79 | |||
80 | static inline void nb8800_maskl(struct nb8800_priv *priv, int reg, | ||
81 | u32 mask, u32 val) | ||
82 | { | ||
83 | u32 old = nb8800_readl(priv, reg); | ||
84 | u32 new = (old & ~mask) | (val & mask); | ||
85 | |||
86 | if (new != old) | ||
87 | nb8800_writel(priv, reg, new); | ||
88 | } | ||
89 | |||
90 | static inline void nb8800_modb(struct nb8800_priv *priv, int reg, u8 bits, | ||
91 | bool set) | ||
92 | { | ||
93 | nb8800_maskb(priv, reg, bits, set ? bits : 0); | ||
94 | } | ||
95 | |||
96 | static inline void nb8800_setb(struct nb8800_priv *priv, int reg, u8 bits) | ||
97 | { | ||
98 | nb8800_maskb(priv, reg, bits, bits); | ||
99 | } | ||
100 | |||
101 | static inline void nb8800_clearb(struct nb8800_priv *priv, int reg, u8 bits) | ||
102 | { | ||
103 | nb8800_maskb(priv, reg, bits, 0); | ||
104 | } | ||
105 | |||
106 | static inline void nb8800_modl(struct nb8800_priv *priv, int reg, u32 bits, | ||
107 | bool set) | ||
108 | { | ||
109 | nb8800_maskl(priv, reg, bits, set ? bits : 0); | ||
110 | } | ||
111 | |||
112 | static inline void nb8800_setl(struct nb8800_priv *priv, int reg, u32 bits) | ||
113 | { | ||
114 | nb8800_maskl(priv, reg, bits, bits); | ||
115 | } | ||
116 | |||
117 | static inline void nb8800_clearl(struct nb8800_priv *priv, int reg, u32 bits) | ||
118 | { | ||
119 | nb8800_maskl(priv, reg, bits, 0); | ||
120 | } | ||
121 | |||
122 | static int nb8800_mdio_wait(struct mii_bus *bus) | ||
123 | { | ||
124 | struct nb8800_priv *priv = bus->priv; | ||
125 | u32 val; | ||
126 | |||
127 | return readl_poll_timeout_atomic(priv->base + NB8800_MDIO_CMD, | ||
128 | val, !(val & MDIO_CMD_GO), 1, 1000); | ||
129 | } | ||
130 | |||
131 | static int nb8800_mdio_cmd(struct mii_bus *bus, u32 cmd) | ||
132 | { | ||
133 | struct nb8800_priv *priv = bus->priv; | ||
134 | int err; | ||
135 | |||
136 | err = nb8800_mdio_wait(bus); | ||
137 | if (err) | ||
138 | return err; | ||
139 | |||
140 | nb8800_writel(priv, NB8800_MDIO_CMD, cmd); | ||
141 | udelay(10); | ||
142 | nb8800_writel(priv, NB8800_MDIO_CMD, cmd | MDIO_CMD_GO); | ||
143 | |||
144 | return nb8800_mdio_wait(bus); | ||
145 | } | ||
146 | |||
147 | static int nb8800_mdio_read(struct mii_bus *bus, int phy_id, int reg) | ||
148 | { | ||
149 | struct nb8800_priv *priv = bus->priv; | ||
150 | u32 val; | ||
151 | int err; | ||
152 | |||
153 | err = nb8800_mdio_cmd(bus, MDIO_CMD_ADDR(phy_id) | MDIO_CMD_REG(reg)); | ||
154 | if (err) | ||
155 | return err; | ||
156 | |||
157 | val = nb8800_readl(priv, NB8800_MDIO_STS); | ||
158 | if (val & MDIO_STS_ERR) | ||
159 | return 0xffff; | ||
160 | |||
161 | return val & 0xffff; | ||
162 | } | ||
163 | |||
164 | static int nb8800_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val) | ||
165 | { | ||
166 | u32 cmd = MDIO_CMD_ADDR(phy_id) | MDIO_CMD_REG(reg) | | ||
167 | MDIO_CMD_DATA(val) | MDIO_CMD_WR; | ||
168 | |||
169 | return nb8800_mdio_cmd(bus, cmd); | ||
170 | } | ||
171 | |||
172 | static void nb8800_mac_tx(struct net_device *dev, bool enable) | ||
173 | { | ||
174 | struct nb8800_priv *priv = netdev_priv(dev); | ||
175 | |||
176 | while (nb8800_readl(priv, NB8800_TXC_CR) & TCR_EN) | ||
177 | cpu_relax(); | ||
178 | |||
179 | nb8800_modb(priv, NB8800_TX_CTL1, TX_EN, enable); | ||
180 | } | ||
181 | |||
182 | static void nb8800_mac_rx(struct net_device *dev, bool enable) | ||
183 | { | ||
184 | nb8800_modb(netdev_priv(dev), NB8800_RX_CTL, RX_EN, enable); | ||
185 | } | ||
186 | |||
187 | static void nb8800_mac_af(struct net_device *dev, bool enable) | ||
188 | { | ||
189 | nb8800_modb(netdev_priv(dev), NB8800_RX_CTL, RX_AF_EN, enable); | ||
190 | } | ||
191 | |||
192 | static void nb8800_start_rx(struct net_device *dev) | ||
193 | { | ||
194 | nb8800_setl(netdev_priv(dev), NB8800_RXC_CR, RCR_EN); | ||
195 | } | ||
196 | |||
197 | static int nb8800_alloc_rx(struct net_device *dev, unsigned int i, bool napi) | ||
198 | { | ||
199 | struct nb8800_priv *priv = netdev_priv(dev); | ||
200 | struct nb8800_rx_desc *rxd = &priv->rx_descs[i]; | ||
201 | struct nb8800_rx_buf *rxb = &priv->rx_bufs[i]; | ||
202 | int size = L1_CACHE_ALIGN(RX_BUF_SIZE); | ||
203 | dma_addr_t dma_addr; | ||
204 | struct page *page; | ||
205 | unsigned long offset; | ||
206 | void *data; | ||
207 | |||
208 | data = napi ? napi_alloc_frag(size) : netdev_alloc_frag(size); | ||
209 | if (!data) | ||
210 | return -ENOMEM; | ||
211 | |||
212 | page = virt_to_head_page(data); | ||
213 | offset = data - page_address(page); | ||
214 | |||
215 | dma_addr = dma_map_page(&dev->dev, page, offset, RX_BUF_SIZE, | ||
216 | DMA_FROM_DEVICE); | ||
217 | |||
218 | if (dma_mapping_error(&dev->dev, dma_addr)) { | ||
219 | skb_free_frag(data); | ||
220 | return -ENOMEM; | ||
221 | } | ||
222 | |||
223 | rxb->page = page; | ||
224 | rxb->offset = offset; | ||
225 | rxd->desc.s_addr = dma_addr; | ||
226 | |||
227 | return 0; | ||
228 | } | ||
229 | |||
230 | static void nb8800_receive(struct net_device *dev, unsigned int i, | ||
231 | unsigned int len) | ||
232 | { | ||
233 | struct nb8800_priv *priv = netdev_priv(dev); | ||
234 | struct nb8800_rx_desc *rxd = &priv->rx_descs[i]; | ||
235 | struct page *page = priv->rx_bufs[i].page; | ||
236 | int offset = priv->rx_bufs[i].offset; | ||
237 | void *data = page_address(page) + offset; | ||
238 | dma_addr_t dma = rxd->desc.s_addr; | ||
239 | struct sk_buff *skb; | ||
240 | unsigned int size; | ||
241 | int err; | ||
242 | |||
243 | size = len <= RX_COPYBREAK ? len : RX_COPYHDR; | ||
244 | |||
245 | skb = napi_alloc_skb(&priv->napi, size); | ||
246 | if (!skb) { | ||
247 | netdev_err(dev, "rx skb allocation failed\n"); | ||
248 | dev->stats.rx_dropped++; | ||
249 | return; | ||
250 | } | ||
251 | |||
252 | if (len <= RX_COPYBREAK) { | ||
253 | dma_sync_single_for_cpu(&dev->dev, dma, len, DMA_FROM_DEVICE); | ||
254 | memcpy(skb_put(skb, len), data, len); | ||
255 | dma_sync_single_for_device(&dev->dev, dma, len, | ||
256 | DMA_FROM_DEVICE); | ||
257 | } else { | ||
258 | err = nb8800_alloc_rx(dev, i, true); | ||
259 | if (err) { | ||
260 | netdev_err(dev, "rx buffer allocation failed\n"); | ||
261 | dev->stats.rx_dropped++; | ||
262 | return; | ||
263 | } | ||
264 | |||
265 | dma_unmap_page(&dev->dev, dma, RX_BUF_SIZE, DMA_FROM_DEVICE); | ||
266 | memcpy(skb_put(skb, RX_COPYHDR), data, RX_COPYHDR); | ||
267 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, | ||
268 | offset + RX_COPYHDR, len - RX_COPYHDR, | ||
269 | RX_BUF_SIZE); | ||
270 | } | ||
271 | |||
272 | skb->protocol = eth_type_trans(skb, dev); | ||
273 | napi_gro_receive(&priv->napi, skb); | ||
274 | } | ||
275 | |||
276 | static void nb8800_rx_error(struct net_device *dev, u32 report) | ||
277 | { | ||
278 | if (report & RX_LENGTH_ERR) | ||
279 | dev->stats.rx_length_errors++; | ||
280 | |||
281 | if (report & RX_FCS_ERR) | ||
282 | dev->stats.rx_crc_errors++; | ||
283 | |||
284 | if (report & RX_FIFO_OVERRUN) | ||
285 | dev->stats.rx_fifo_errors++; | ||
286 | |||
287 | if (report & RX_ALIGNMENT_ERROR) | ||
288 | dev->stats.rx_frame_errors++; | ||
289 | |||
290 | dev->stats.rx_errors++; | ||
291 | } | ||
292 | |||
293 | static int nb8800_poll(struct napi_struct *napi, int budget) | ||
294 | { | ||
295 | struct net_device *dev = napi->dev; | ||
296 | struct nb8800_priv *priv = netdev_priv(dev); | ||
297 | struct nb8800_rx_desc *rxd; | ||
298 | unsigned int last = priv->rx_eoc; | ||
299 | unsigned int next; | ||
300 | int work = 0; | ||
301 | |||
302 | nb8800_tx_done(dev); | ||
303 | |||
304 | again: | ||
305 | while (work < budget) { | ||
306 | struct nb8800_rx_buf *rxb; | ||
307 | unsigned int len; | ||
308 | |||
309 | next = (last + 1) % RX_DESC_COUNT; | ||
310 | |||
311 | rxb = &priv->rx_bufs[next]; | ||
312 | rxd = &priv->rx_descs[next]; | ||
313 | |||
314 | if (!rxd->report) | ||
315 | break; | ||
316 | |||
317 | len = RX_BYTES_TRANSFERRED(rxd->report); | ||
318 | |||
319 | if (IS_RX_ERROR(rxd->report)) | ||
320 | nb8800_rx_error(dev, rxd->report); | ||
321 | else | ||
322 | nb8800_receive(dev, next, len); | ||
323 | |||
324 | dev->stats.rx_packets++; | ||
325 | dev->stats.rx_bytes += len; | ||
326 | |||
327 | if (rxd->report & RX_MULTICAST_PKT) | ||
328 | dev->stats.multicast++; | ||
329 | |||
330 | rxd->report = 0; | ||
331 | last = next; | ||
332 | work++; | ||
333 | } | ||
334 | |||
335 | if (work) { | ||
336 | priv->rx_descs[last].desc.config |= DESC_EOC; | ||
337 | wmb(); /* ensure new EOC is written before clearing old */ | ||
338 | priv->rx_descs[priv->rx_eoc].desc.config &= ~DESC_EOC; | ||
339 | priv->rx_eoc = last; | ||
340 | nb8800_start_rx(dev); | ||
341 | } | ||
342 | |||
343 | if (work < budget) { | ||
344 | nb8800_writel(priv, NB8800_RX_ITR, priv->rx_itr_irq); | ||
345 | |||
346 | /* If a packet arrived after we last checked but | ||
347 | * before writing RX_ITR, the interrupt will be | ||
348 | * delayed, so we retrieve it now. | ||
349 | */ | ||
350 | if (priv->rx_descs[next].report) | ||
351 | goto again; | ||
352 | |||
353 | napi_complete_done(napi, work); | ||
354 | } | ||
355 | |||
356 | return work; | ||
357 | } | ||
358 | |||
359 | static void __nb8800_tx_dma_start(struct net_device *dev) | ||
360 | { | ||
361 | struct nb8800_priv *priv = netdev_priv(dev); | ||
362 | struct nb8800_tx_buf *txb; | ||
363 | u32 txc_cr; | ||
364 | |||
365 | txb = &priv->tx_bufs[priv->tx_queue]; | ||
366 | if (!txb->ready) | ||
367 | return; | ||
368 | |||
369 | txc_cr = nb8800_readl(priv, NB8800_TXC_CR); | ||
370 | if (txc_cr & TCR_EN) | ||
371 | return; | ||
372 | |||
373 | nb8800_writel(priv, NB8800_TX_DESC_ADDR, txb->dma_desc); | ||
374 | wmb(); /* ensure desc addr is written before starting DMA */ | ||
375 | nb8800_writel(priv, NB8800_TXC_CR, txc_cr | TCR_EN); | ||
376 | |||
377 | priv->tx_queue = (priv->tx_queue + txb->chain_len) % TX_DESC_COUNT; | ||
378 | } | ||
379 | |||
380 | static void nb8800_tx_dma_start(struct net_device *dev) | ||
381 | { | ||
382 | struct nb8800_priv *priv = netdev_priv(dev); | ||
383 | |||
384 | spin_lock_irq(&priv->tx_lock); | ||
385 | __nb8800_tx_dma_start(dev); | ||
386 | spin_unlock_irq(&priv->tx_lock); | ||
387 | } | ||
388 | |||
389 | static void nb8800_tx_dma_start_irq(struct net_device *dev) | ||
390 | { | ||
391 | struct nb8800_priv *priv = netdev_priv(dev); | ||
392 | |||
393 | spin_lock(&priv->tx_lock); | ||
394 | __nb8800_tx_dma_start(dev); | ||
395 | spin_unlock(&priv->tx_lock); | ||
396 | } | ||
397 | |||
398 | static int nb8800_xmit(struct sk_buff *skb, struct net_device *dev) | ||
399 | { | ||
400 | struct nb8800_priv *priv = netdev_priv(dev); | ||
401 | struct nb8800_tx_desc *txd; | ||
402 | struct nb8800_tx_buf *txb; | ||
403 | struct nb8800_dma_desc *desc; | ||
404 | dma_addr_t dma_addr; | ||
405 | unsigned int dma_len; | ||
406 | unsigned int align; | ||
407 | unsigned int next; | ||
408 | |||
409 | if (atomic_read(&priv->tx_free) <= NB8800_DESC_LOW) { | ||
410 | netif_stop_queue(dev); | ||
411 | return NETDEV_TX_BUSY; | ||
412 | } | ||
413 | |||
414 | align = (8 - (uintptr_t)skb->data) & 7; | ||
415 | |||
416 | dma_len = skb->len - align; | ||
417 | dma_addr = dma_map_single(&dev->dev, skb->data + align, | ||
418 | dma_len, DMA_TO_DEVICE); | ||
419 | |||
420 | if (dma_mapping_error(&dev->dev, dma_addr)) { | ||
421 | netdev_err(dev, "tx dma mapping error\n"); | ||
422 | kfree_skb(skb); | ||
423 | dev->stats.tx_dropped++; | ||
424 | return NETDEV_TX_OK; | ||
425 | } | ||
426 | |||
427 | if (atomic_dec_return(&priv->tx_free) <= NB8800_DESC_LOW) { | ||
428 | netif_stop_queue(dev); | ||
429 | skb->xmit_more = 0; | ||
430 | } | ||
431 | |||
432 | next = priv->tx_next; | ||
433 | txb = &priv->tx_bufs[next]; | ||
434 | txd = &priv->tx_descs[next]; | ||
435 | desc = &txd->desc[0]; | ||
436 | |||
437 | next = (next + 1) % TX_DESC_COUNT; | ||
438 | |||
439 | if (align) { | ||
440 | memcpy(txd->buf, skb->data, align); | ||
441 | |||
442 | desc->s_addr = | ||
443 | txb->dma_desc + offsetof(struct nb8800_tx_desc, buf); | ||
444 | desc->n_addr = txb->dma_desc + sizeof(txd->desc[0]); | ||
445 | desc->config = DESC_BTS(2) | DESC_DS | align; | ||
446 | |||
447 | desc++; | ||
448 | } | ||
449 | |||
450 | desc->s_addr = dma_addr; | ||
451 | desc->n_addr = priv->tx_bufs[next].dma_desc; | ||
452 | desc->config = DESC_BTS(2) | DESC_DS | DESC_EOF | dma_len; | ||
453 | |||
454 | if (!skb->xmit_more) | ||
455 | desc->config |= DESC_EOC; | ||
456 | |||
457 | txb->skb = skb; | ||
458 | txb->dma_addr = dma_addr; | ||
459 | txb->dma_len = dma_len; | ||
460 | |||
461 | if (!priv->tx_chain) { | ||
462 | txb->chain_len = 1; | ||
463 | priv->tx_chain = txb; | ||
464 | } else { | ||
465 | priv->tx_chain->chain_len++; | ||
466 | } | ||
467 | |||
468 | netdev_sent_queue(dev, skb->len); | ||
469 | |||
470 | priv->tx_next = next; | ||
471 | |||
472 | if (!skb->xmit_more) { | ||
473 | smp_wmb(); | ||
474 | priv->tx_chain->ready = true; | ||
475 | priv->tx_chain = NULL; | ||
476 | nb8800_tx_dma_start(dev); | ||
477 | } | ||
478 | |||
479 | return NETDEV_TX_OK; | ||
480 | } | ||
481 | |||
482 | static void nb8800_tx_error(struct net_device *dev, u32 report) | ||
483 | { | ||
484 | if (report & TX_LATE_COLLISION) | ||
485 | dev->stats.collisions++; | ||
486 | |||
487 | if (report & TX_PACKET_DROPPED) | ||
488 | dev->stats.tx_dropped++; | ||
489 | |||
490 | if (report & TX_FIFO_UNDERRUN) | ||
491 | dev->stats.tx_fifo_errors++; | ||
492 | |||
493 | dev->stats.tx_errors++; | ||
494 | } | ||
495 | |||
496 | static void nb8800_tx_done(struct net_device *dev) | ||
497 | { | ||
498 | struct nb8800_priv *priv = netdev_priv(dev); | ||
499 | unsigned int limit = priv->tx_next; | ||
500 | unsigned int done = priv->tx_done; | ||
501 | unsigned int packets = 0; | ||
502 | unsigned int len = 0; | ||
503 | |||
504 | while (done != limit) { | ||
505 | struct nb8800_tx_desc *txd = &priv->tx_descs[done]; | ||
506 | struct nb8800_tx_buf *txb = &priv->tx_bufs[done]; | ||
507 | struct sk_buff *skb; | ||
508 | |||
509 | if (!txd->report) | ||
510 | break; | ||
511 | |||
512 | skb = txb->skb; | ||
513 | len += skb->len; | ||
514 | |||
515 | dma_unmap_single(&dev->dev, txb->dma_addr, txb->dma_len, | ||
516 | DMA_TO_DEVICE); | ||
517 | |||
518 | if (IS_TX_ERROR(txd->report)) { | ||
519 | nb8800_tx_error(dev, txd->report); | ||
520 | kfree_skb(skb); | ||
521 | } else { | ||
522 | consume_skb(skb); | ||
523 | } | ||
524 | |||
525 | dev->stats.tx_packets++; | ||
526 | dev->stats.tx_bytes += TX_BYTES_TRANSFERRED(txd->report); | ||
527 | dev->stats.collisions += TX_EARLY_COLLISIONS(txd->report); | ||
528 | |||
529 | txb->skb = NULL; | ||
530 | txb->ready = false; | ||
531 | txd->report = 0; | ||
532 | |||
533 | done = (done + 1) % TX_DESC_COUNT; | ||
534 | packets++; | ||
535 | } | ||
536 | |||
537 | if (packets) { | ||
538 | smp_mb__before_atomic(); | ||
539 | atomic_add(packets, &priv->tx_free); | ||
540 | netdev_completed_queue(dev, packets, len); | ||
541 | netif_wake_queue(dev); | ||
542 | priv->tx_done = done; | ||
543 | } | ||
544 | } | ||
545 | |||
546 | static irqreturn_t nb8800_irq(int irq, void *dev_id) | ||
547 | { | ||
548 | struct net_device *dev = dev_id; | ||
549 | struct nb8800_priv *priv = netdev_priv(dev); | ||
550 | irqreturn_t ret = IRQ_NONE; | ||
551 | u32 val; | ||
552 | |||
553 | /* tx interrupt */ | ||
554 | val = nb8800_readl(priv, NB8800_TXC_SR); | ||
555 | if (val) { | ||
556 | nb8800_writel(priv, NB8800_TXC_SR, val); | ||
557 | |||
558 | if (val & TSR_DI) | ||
559 | nb8800_tx_dma_start_irq(dev); | ||
560 | |||
561 | if (val & TSR_TI) | ||
562 | napi_schedule_irqoff(&priv->napi); | ||
563 | |||
564 | if (unlikely(val & TSR_DE)) | ||
565 | netdev_err(dev, "TX DMA error\n"); | ||
566 | |||
567 | /* should never happen with automatic status retrieval */ | ||
568 | if (unlikely(val & TSR_TO)) | ||
569 | netdev_err(dev, "TX Status FIFO overflow\n"); | ||
570 | |||
571 | ret = IRQ_HANDLED; | ||
572 | } | ||
573 | |||
574 | /* rx interrupt */ | ||
575 | val = nb8800_readl(priv, NB8800_RXC_SR); | ||
576 | if (val) { | ||
577 | nb8800_writel(priv, NB8800_RXC_SR, val); | ||
578 | |||
579 | if (likely(val & (RSR_RI | RSR_DI))) { | ||
580 | nb8800_writel(priv, NB8800_RX_ITR, priv->rx_itr_poll); | ||
581 | napi_schedule_irqoff(&priv->napi); | ||
582 | } | ||
583 | |||
584 | if (unlikely(val & RSR_DE)) | ||
585 | netdev_err(dev, "RX DMA error\n"); | ||
586 | |||
587 | /* should never happen with automatic status retrieval */ | ||
588 | if (unlikely(val & RSR_RO)) | ||
589 | netdev_err(dev, "RX Status FIFO overflow\n"); | ||
590 | |||
591 | ret = IRQ_HANDLED; | ||
592 | } | ||
593 | |||
594 | return ret; | ||
595 | } | ||
596 | |||
597 | static void nb8800_mac_config(struct net_device *dev) | ||
598 | { | ||
599 | struct nb8800_priv *priv = netdev_priv(dev); | ||
600 | bool gigabit = priv->speed == SPEED_1000; | ||
601 | u32 mac_mode_mask = RGMII_MODE | HALF_DUPLEX | GMAC_MODE; | ||
602 | u32 mac_mode = 0; | ||
603 | u32 slot_time; | ||
604 | u32 phy_clk; | ||
605 | u32 ict; | ||
606 | |||
607 | if (!priv->duplex) | ||
608 | mac_mode |= HALF_DUPLEX; | ||
609 | |||
610 | if (gigabit) { | ||
611 | if (priv->phy_mode == PHY_INTERFACE_MODE_RGMII) | ||
612 | mac_mode |= RGMII_MODE; | ||
613 | |||
614 | mac_mode |= GMAC_MODE; | ||
615 | phy_clk = 125000000; | ||
616 | |||
617 | /* Should be 512 but register is only 8 bits */ | ||
618 | slot_time = 255; | ||
619 | } else { | ||
620 | phy_clk = 25000000; | ||
621 | slot_time = 128; | ||
622 | } | ||
623 | |||
624 | ict = DIV_ROUND_UP(phy_clk, clk_get_rate(priv->clk)); | ||
625 | |||
626 | nb8800_writeb(priv, NB8800_IC_THRESHOLD, ict); | ||
627 | nb8800_writeb(priv, NB8800_SLOT_TIME, slot_time); | ||
628 | nb8800_maskb(priv, NB8800_MAC_MODE, mac_mode_mask, mac_mode); | ||
629 | } | ||
630 | |||
631 | static void nb8800_pause_config(struct net_device *dev) | ||
632 | { | ||
633 | struct nb8800_priv *priv = netdev_priv(dev); | ||
634 | struct phy_device *phydev = priv->phydev; | ||
635 | u32 rxcr; | ||
636 | |||
637 | if (priv->pause_aneg) { | ||
638 | if (!phydev || !phydev->link) | ||
639 | return; | ||
640 | |||
641 | priv->pause_rx = phydev->pause; | ||
642 | priv->pause_tx = phydev->pause ^ phydev->asym_pause; | ||
643 | } | ||
644 | |||
645 | nb8800_modb(priv, NB8800_RX_CTL, RX_PAUSE_EN, priv->pause_rx); | ||
646 | |||
647 | rxcr = nb8800_readl(priv, NB8800_RXC_CR); | ||
648 | if (!!(rxcr & RCR_FL) == priv->pause_tx) | ||
649 | return; | ||
650 | |||
651 | if (netif_running(dev)) { | ||
652 | napi_disable(&priv->napi); | ||
653 | netif_tx_lock_bh(dev); | ||
654 | nb8800_dma_stop(dev); | ||
655 | nb8800_modl(priv, NB8800_RXC_CR, RCR_FL, priv->pause_tx); | ||
656 | nb8800_start_rx(dev); | ||
657 | netif_tx_unlock_bh(dev); | ||
658 | napi_enable(&priv->napi); | ||
659 | } else { | ||
660 | nb8800_modl(priv, NB8800_RXC_CR, RCR_FL, priv->pause_tx); | ||
661 | } | ||
662 | } | ||
663 | |||
664 | static void nb8800_link_reconfigure(struct net_device *dev) | ||
665 | { | ||
666 | struct nb8800_priv *priv = netdev_priv(dev); | ||
667 | struct phy_device *phydev = priv->phydev; | ||
668 | int change = 0; | ||
669 | |||
670 | if (phydev->link) { | ||
671 | if (phydev->speed != priv->speed) { | ||
672 | priv->speed = phydev->speed; | ||
673 | change = 1; | ||
674 | } | ||
675 | |||
676 | if (phydev->duplex != priv->duplex) { | ||
677 | priv->duplex = phydev->duplex; | ||
678 | change = 1; | ||
679 | } | ||
680 | |||
681 | if (change) | ||
682 | nb8800_mac_config(dev); | ||
683 | |||
684 | nb8800_pause_config(dev); | ||
685 | } | ||
686 | |||
687 | if (phydev->link != priv->link) { | ||
688 | priv->link = phydev->link; | ||
689 | change = 1; | ||
690 | } | ||
691 | |||
692 | if (change) | ||
693 | phy_print_status(priv->phydev); | ||
694 | } | ||
695 | |||
696 | static void nb8800_update_mac_addr(struct net_device *dev) | ||
697 | { | ||
698 | struct nb8800_priv *priv = netdev_priv(dev); | ||
699 | int i; | ||
700 | |||
701 | for (i = 0; i < ETH_ALEN; i++) | ||
702 | nb8800_writeb(priv, NB8800_SRC_ADDR(i), dev->dev_addr[i]); | ||
703 | |||
704 | for (i = 0; i < ETH_ALEN; i++) | ||
705 | nb8800_writeb(priv, NB8800_UC_ADDR(i), dev->dev_addr[i]); | ||
706 | } | ||
707 | |||
708 | static int nb8800_set_mac_address(struct net_device *dev, void *addr) | ||
709 | { | ||
710 | struct sockaddr *sock = addr; | ||
711 | |||
712 | if (netif_running(dev)) | ||
713 | return -EBUSY; | ||
714 | |||
715 | ether_addr_copy(dev->dev_addr, sock->sa_data); | ||
716 | nb8800_update_mac_addr(dev); | ||
717 | |||
718 | return 0; | ||
719 | } | ||
720 | |||
721 | static void nb8800_mc_init(struct net_device *dev, int val) | ||
722 | { | ||
723 | struct nb8800_priv *priv = netdev_priv(dev); | ||
724 | |||
725 | nb8800_writeb(priv, NB8800_MC_INIT, val); | ||
726 | readb_poll_timeout_atomic(priv->base + NB8800_MC_INIT, val, !val, | ||
727 | 1, 1000); | ||
728 | } | ||
729 | |||
730 | static void nb8800_set_rx_mode(struct net_device *dev) | ||
731 | { | ||
732 | struct nb8800_priv *priv = netdev_priv(dev); | ||
733 | struct netdev_hw_addr *ha; | ||
734 | int i; | ||
735 | |||
736 | if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) { | ||
737 | nb8800_mac_af(dev, false); | ||
738 | return; | ||
739 | } | ||
740 | |||
741 | nb8800_mac_af(dev, true); | ||
742 | nb8800_mc_init(dev, 0); | ||
743 | |||
744 | netdev_for_each_mc_addr(ha, dev) { | ||
745 | for (i = 0; i < ETH_ALEN; i++) | ||
746 | nb8800_writeb(priv, NB8800_MC_ADDR(i), ha->addr[i]); | ||
747 | |||
748 | nb8800_mc_init(dev, 0xff); | ||
749 | } | ||
750 | } | ||
751 | |||
752 | #define RX_DESC_SIZE (RX_DESC_COUNT * sizeof(struct nb8800_rx_desc)) | ||
753 | #define TX_DESC_SIZE (TX_DESC_COUNT * sizeof(struct nb8800_tx_desc)) | ||
754 | |||
755 | static void nb8800_dma_free(struct net_device *dev) | ||
756 | { | ||
757 | struct nb8800_priv *priv = netdev_priv(dev); | ||
758 | unsigned int i; | ||
759 | |||
760 | if (priv->rx_bufs) { | ||
761 | for (i = 0; i < RX_DESC_COUNT; i++) | ||
762 | if (priv->rx_bufs[i].page) | ||
763 | put_page(priv->rx_bufs[i].page); | ||
764 | |||
765 | kfree(priv->rx_bufs); | ||
766 | priv->rx_bufs = NULL; | ||
767 | } | ||
768 | |||
769 | if (priv->tx_bufs) { | ||
770 | for (i = 0; i < TX_DESC_COUNT; i++) | ||
771 | kfree_skb(priv->tx_bufs[i].skb); | ||
772 | |||
773 | kfree(priv->tx_bufs); | ||
774 | priv->tx_bufs = NULL; | ||
775 | } | ||
776 | |||
777 | if (priv->rx_descs) { | ||
778 | dma_free_coherent(dev->dev.parent, RX_DESC_SIZE, priv->rx_descs, | ||
779 | priv->rx_desc_dma); | ||
780 | priv->rx_descs = NULL; | ||
781 | } | ||
782 | |||
783 | if (priv->tx_descs) { | ||
784 | dma_free_coherent(dev->dev.parent, TX_DESC_SIZE, priv->tx_descs, | ||
785 | priv->tx_desc_dma); | ||
786 | priv->tx_descs = NULL; | ||
787 | } | ||
788 | } | ||
789 | |||
790 | static void nb8800_dma_reset(struct net_device *dev) | ||
791 | { | ||
792 | struct nb8800_priv *priv = netdev_priv(dev); | ||
793 | struct nb8800_rx_desc *rxd; | ||
794 | struct nb8800_tx_desc *txd; | ||
795 | unsigned int i; | ||
796 | |||
797 | for (i = 0; i < RX_DESC_COUNT; i++) { | ||
798 | dma_addr_t rx_dma = priv->rx_desc_dma + i * sizeof(*rxd); | ||
799 | |||
800 | rxd = &priv->rx_descs[i]; | ||
801 | rxd->desc.n_addr = rx_dma + sizeof(*rxd); | ||
802 | rxd->desc.r_addr = | ||
803 | rx_dma + offsetof(struct nb8800_rx_desc, report); | ||
804 | rxd->desc.config = priv->rx_dma_config; | ||
805 | rxd->report = 0; | ||
806 | } | ||
807 | |||
808 | rxd->desc.n_addr = priv->rx_desc_dma; | ||
809 | rxd->desc.config |= DESC_EOC; | ||
810 | |||
811 | priv->rx_eoc = RX_DESC_COUNT - 1; | ||
812 | |||
813 | for (i = 0; i < TX_DESC_COUNT; i++) { | ||
814 | struct nb8800_tx_buf *txb = &priv->tx_bufs[i]; | ||
815 | dma_addr_t r_dma = txb->dma_desc + | ||
816 | offsetof(struct nb8800_tx_desc, report); | ||
817 | |||
818 | txd = &priv->tx_descs[i]; | ||
819 | txd->desc[0].r_addr = r_dma; | ||
820 | txd->desc[1].r_addr = r_dma; | ||
821 | txd->report = 0; | ||
822 | } | ||
823 | |||
824 | priv->tx_next = 0; | ||
825 | priv->tx_queue = 0; | ||
826 | priv->tx_done = 0; | ||
827 | atomic_set(&priv->tx_free, TX_DESC_COUNT); | ||
828 | |||
829 | nb8800_writel(priv, NB8800_RX_DESC_ADDR, priv->rx_desc_dma); | ||
830 | |||
831 | wmb(); /* ensure all setup is written before starting */ | ||
832 | } | ||
833 | |||
834 | static int nb8800_dma_init(struct net_device *dev) | ||
835 | { | ||
836 | struct nb8800_priv *priv = netdev_priv(dev); | ||
837 | unsigned int n_rx = RX_DESC_COUNT; | ||
838 | unsigned int n_tx = TX_DESC_COUNT; | ||
839 | unsigned int i; | ||
840 | int err; | ||
841 | |||
842 | priv->rx_descs = dma_alloc_coherent(dev->dev.parent, RX_DESC_SIZE, | ||
843 | &priv->rx_desc_dma, GFP_KERNEL); | ||
844 | if (!priv->rx_descs) | ||
845 | goto err_out; | ||
846 | |||
847 | priv->rx_bufs = kcalloc(n_rx, sizeof(*priv->rx_bufs), GFP_KERNEL); | ||
848 | if (!priv->rx_bufs) | ||
849 | goto err_out; | ||
850 | |||
851 | for (i = 0; i < n_rx; i++) { | ||
852 | err = nb8800_alloc_rx(dev, i, false); | ||
853 | if (err) | ||
854 | goto err_out; | ||
855 | } | ||
856 | |||
857 | priv->tx_descs = dma_alloc_coherent(dev->dev.parent, TX_DESC_SIZE, | ||
858 | &priv->tx_desc_dma, GFP_KERNEL); | ||
859 | if (!priv->tx_descs) | ||
860 | goto err_out; | ||
861 | |||
862 | priv->tx_bufs = kcalloc(n_tx, sizeof(*priv->tx_bufs), GFP_KERNEL); | ||
863 | if (!priv->tx_bufs) | ||
864 | goto err_out; | ||
865 | |||
866 | for (i = 0; i < n_tx; i++) | ||
867 | priv->tx_bufs[i].dma_desc = | ||
868 | priv->tx_desc_dma + i * sizeof(struct nb8800_tx_desc); | ||
869 | |||
870 | nb8800_dma_reset(dev); | ||
871 | |||
872 | return 0; | ||
873 | |||
874 | err_out: | ||
875 | nb8800_dma_free(dev); | ||
876 | |||
877 | return -ENOMEM; | ||
878 | } | ||
879 | |||
880 | static int nb8800_dma_stop(struct net_device *dev) | ||
881 | { | ||
882 | struct nb8800_priv *priv = netdev_priv(dev); | ||
883 | struct nb8800_tx_buf *txb = &priv->tx_bufs[0]; | ||
884 | struct nb8800_tx_desc *txd = &priv->tx_descs[0]; | ||
885 | int retry = 5; | ||
886 | u32 txcr; | ||
887 | u32 rxcr; | ||
888 | int err; | ||
889 | unsigned int i; | ||
890 | |||
891 | /* wait for tx to finish */ | ||
892 | err = readl_poll_timeout_atomic(priv->base + NB8800_TXC_CR, txcr, | ||
893 | !(txcr & TCR_EN) && | ||
894 | priv->tx_done == priv->tx_next, | ||
895 | 1000, 1000000); | ||
896 | if (err) | ||
897 | return err; | ||
898 | |||
899 | /* The rx DMA only stops if it reaches the end of chain. | ||
900 | * To make this happen, we set the EOC flag on all rx | ||
901 | * descriptors, put the device in loopback mode, and send | ||
902 | * a few dummy frames. The interrupt handler will ignore | ||
903 | * these since NAPI is disabled and no real frames are in | ||
904 | * the tx queue. | ||
905 | */ | ||
906 | |||
907 | for (i = 0; i < RX_DESC_COUNT; i++) | ||
908 | priv->rx_descs[i].desc.config |= DESC_EOC; | ||
909 | |||
910 | txd->desc[0].s_addr = | ||
911 | txb->dma_desc + offsetof(struct nb8800_tx_desc, buf); | ||
912 | txd->desc[0].config = DESC_BTS(2) | DESC_DS | DESC_EOF | DESC_EOC | 8; | ||
913 | memset(txd->buf, 0, sizeof(txd->buf)); | ||
914 | |||
915 | nb8800_mac_af(dev, false); | ||
916 | nb8800_setb(priv, NB8800_MAC_MODE, LOOPBACK_EN); | ||
917 | |||
918 | do { | ||
919 | nb8800_writel(priv, NB8800_TX_DESC_ADDR, txb->dma_desc); | ||
920 | wmb(); | ||
921 | nb8800_writel(priv, NB8800_TXC_CR, txcr | TCR_EN); | ||
922 | |||
923 | err = readl_poll_timeout_atomic(priv->base + NB8800_RXC_CR, | ||
924 | rxcr, !(rxcr & RCR_EN), | ||
925 | 1000, 100000); | ||
926 | } while (err && --retry); | ||
927 | |||
928 | nb8800_mac_af(dev, true); | ||
929 | nb8800_clearb(priv, NB8800_MAC_MODE, LOOPBACK_EN); | ||
930 | nb8800_dma_reset(dev); | ||
931 | |||
932 | return retry ? 0 : -ETIMEDOUT; | ||
933 | } | ||
934 | |||
935 | static void nb8800_pause_adv(struct net_device *dev) | ||
936 | { | ||
937 | struct nb8800_priv *priv = netdev_priv(dev); | ||
938 | u32 adv = 0; | ||
939 | |||
940 | if (!priv->phydev) | ||
941 | return; | ||
942 | |||
943 | if (priv->pause_rx) | ||
944 | adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause; | ||
945 | if (priv->pause_tx) | ||
946 | adv ^= ADVERTISED_Asym_Pause; | ||
947 | |||
948 | priv->phydev->supported |= adv; | ||
949 | priv->phydev->advertising |= adv; | ||
950 | } | ||
951 | |||
952 | static int nb8800_open(struct net_device *dev) | ||
953 | { | ||
954 | struct nb8800_priv *priv = netdev_priv(dev); | ||
955 | int err; | ||
956 | |||
957 | /* clear any pending interrupts */ | ||
958 | nb8800_writel(priv, NB8800_RXC_SR, 0xf); | ||
959 | nb8800_writel(priv, NB8800_TXC_SR, 0xf); | ||
960 | |||
961 | err = nb8800_dma_init(dev); | ||
962 | if (err) | ||
963 | return err; | ||
964 | |||
965 | err = request_irq(dev->irq, nb8800_irq, 0, dev_name(&dev->dev), dev); | ||
966 | if (err) | ||
967 | goto err_free_dma; | ||
968 | |||
969 | nb8800_mac_rx(dev, true); | ||
970 | nb8800_mac_tx(dev, true); | ||
971 | |||
972 | priv->phydev = of_phy_connect(dev, priv->phy_node, | ||
973 | nb8800_link_reconfigure, 0, | ||
974 | priv->phy_mode); | ||
975 | if (!priv->phydev) | ||
976 | goto err_free_irq; | ||
977 | |||
978 | nb8800_pause_adv(dev); | ||
979 | |||
980 | netdev_reset_queue(dev); | ||
981 | napi_enable(&priv->napi); | ||
982 | netif_start_queue(dev); | ||
983 | |||
984 | nb8800_start_rx(dev); | ||
985 | phy_start(priv->phydev); | ||
986 | |||
987 | return 0; | ||
988 | |||
989 | err_free_irq: | ||
990 | free_irq(dev->irq, dev); | ||
991 | err_free_dma: | ||
992 | nb8800_dma_free(dev); | ||
993 | |||
994 | return err; | ||
995 | } | ||
996 | |||
997 | static int nb8800_stop(struct net_device *dev) | ||
998 | { | ||
999 | struct nb8800_priv *priv = netdev_priv(dev); | ||
1000 | |||
1001 | phy_stop(priv->phydev); | ||
1002 | |||
1003 | netif_stop_queue(dev); | ||
1004 | napi_disable(&priv->napi); | ||
1005 | |||
1006 | nb8800_dma_stop(dev); | ||
1007 | nb8800_mac_rx(dev, false); | ||
1008 | nb8800_mac_tx(dev, false); | ||
1009 | |||
1010 | phy_disconnect(priv->phydev); | ||
1011 | priv->phydev = NULL; | ||
1012 | |||
1013 | free_irq(dev->irq, dev); | ||
1014 | |||
1015 | nb8800_dma_free(dev); | ||
1016 | |||
1017 | return 0; | ||
1018 | } | ||
1019 | |||
1020 | static int nb8800_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | ||
1021 | { | ||
1022 | struct nb8800_priv *priv = netdev_priv(dev); | ||
1023 | |||
1024 | return phy_mii_ioctl(priv->phydev, rq, cmd); | ||
1025 | } | ||
1026 | |||
1027 | static const struct net_device_ops nb8800_netdev_ops = { | ||
1028 | .ndo_open = nb8800_open, | ||
1029 | .ndo_stop = nb8800_stop, | ||
1030 | .ndo_start_xmit = nb8800_xmit, | ||
1031 | .ndo_set_mac_address = nb8800_set_mac_address, | ||
1032 | .ndo_set_rx_mode = nb8800_set_rx_mode, | ||
1033 | .ndo_do_ioctl = nb8800_ioctl, | ||
1034 | .ndo_change_mtu = eth_change_mtu, | ||
1035 | .ndo_validate_addr = eth_validate_addr, | ||
1036 | }; | ||
1037 | |||
1038 | static int nb8800_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
1039 | { | ||
1040 | struct nb8800_priv *priv = netdev_priv(dev); | ||
1041 | |||
1042 | if (!priv->phydev) | ||
1043 | return -ENODEV; | ||
1044 | |||
1045 | return phy_ethtool_gset(priv->phydev, cmd); | ||
1046 | } | ||
1047 | |||
1048 | static int nb8800_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
1049 | { | ||
1050 | struct nb8800_priv *priv = netdev_priv(dev); | ||
1051 | |||
1052 | if (!priv->phydev) | ||
1053 | return -ENODEV; | ||
1054 | |||
1055 | return phy_ethtool_sset(priv->phydev, cmd); | ||
1056 | } | ||
1057 | |||
1058 | static int nb8800_nway_reset(struct net_device *dev) | ||
1059 | { | ||
1060 | struct nb8800_priv *priv = netdev_priv(dev); | ||
1061 | |||
1062 | if (!priv->phydev) | ||
1063 | return -ENODEV; | ||
1064 | |||
1065 | return genphy_restart_aneg(priv->phydev); | ||
1066 | } | ||
1067 | |||
1068 | static void nb8800_get_pauseparam(struct net_device *dev, | ||
1069 | struct ethtool_pauseparam *pp) | ||
1070 | { | ||
1071 | struct nb8800_priv *priv = netdev_priv(dev); | ||
1072 | |||
1073 | pp->autoneg = priv->pause_aneg; | ||
1074 | pp->rx_pause = priv->pause_rx; | ||
1075 | pp->tx_pause = priv->pause_tx; | ||
1076 | } | ||
1077 | |||
1078 | static int nb8800_set_pauseparam(struct net_device *dev, | ||
1079 | struct ethtool_pauseparam *pp) | ||
1080 | { | ||
1081 | struct nb8800_priv *priv = netdev_priv(dev); | ||
1082 | |||
1083 | priv->pause_aneg = pp->autoneg; | ||
1084 | priv->pause_rx = pp->rx_pause; | ||
1085 | priv->pause_tx = pp->tx_pause; | ||
1086 | |||
1087 | nb8800_pause_adv(dev); | ||
1088 | |||
1089 | if (!priv->pause_aneg) | ||
1090 | nb8800_pause_config(dev); | ||
1091 | else if (priv->phydev) | ||
1092 | phy_start_aneg(priv->phydev); | ||
1093 | |||
1094 | return 0; | ||
1095 | } | ||
1096 | |||
1097 | static const char nb8800_stats_names[][ETH_GSTRING_LEN] = { | ||
1098 | "rx_bytes_ok", | ||
1099 | "rx_frames_ok", | ||
1100 | "rx_undersize_frames", | ||
1101 | "rx_fragment_frames", | ||
1102 | "rx_64_byte_frames", | ||
1103 | "rx_127_byte_frames", | ||
1104 | "rx_255_byte_frames", | ||
1105 | "rx_511_byte_frames", | ||
1106 | "rx_1023_byte_frames", | ||
1107 | "rx_max_size_frames", | ||
1108 | "rx_oversize_frames", | ||
1109 | "rx_bad_fcs_frames", | ||
1110 | "rx_broadcast_frames", | ||
1111 | "rx_multicast_frames", | ||
1112 | "rx_control_frames", | ||
1113 | "rx_pause_frames", | ||
1114 | "rx_unsup_control_frames", | ||
1115 | "rx_align_error_frames", | ||
1116 | "rx_overrun_frames", | ||
1117 | "rx_jabber_frames", | ||
1118 | "rx_bytes", | ||
1119 | "rx_frames", | ||
1120 | |||
1121 | "tx_bytes_ok", | ||
1122 | "tx_frames_ok", | ||
1123 | "tx_64_byte_frames", | ||
1124 | "tx_127_byte_frames", | ||
1125 | "tx_255_byte_frames", | ||
1126 | "tx_511_byte_frames", | ||
1127 | "tx_1023_byte_frames", | ||
1128 | "tx_max_size_frames", | ||
1129 | "tx_oversize_frames", | ||
1130 | "tx_broadcast_frames", | ||
1131 | "tx_multicast_frames", | ||
1132 | "tx_control_frames", | ||
1133 | "tx_pause_frames", | ||
1134 | "tx_underrun_frames", | ||
1135 | "tx_single_collision_frames", | ||
1136 | "tx_multi_collision_frames", | ||
1137 | "tx_deferred_collision_frames", | ||
1138 | "tx_late_collision_frames", | ||
1139 | "tx_excessive_collision_frames", | ||
1140 | "tx_bytes", | ||
1141 | "tx_frames", | ||
1142 | "tx_collisions", | ||
1143 | }; | ||
1144 | |||
1145 | #define NB8800_NUM_STATS ARRAY_SIZE(nb8800_stats_names) | ||
1146 | |||
1147 | static int nb8800_get_sset_count(struct net_device *dev, int sset) | ||
1148 | { | ||
1149 | if (sset == ETH_SS_STATS) | ||
1150 | return NB8800_NUM_STATS; | ||
1151 | |||
1152 | return -EOPNOTSUPP; | ||
1153 | } | ||
1154 | |||
1155 | static void nb8800_get_strings(struct net_device *dev, u32 sset, u8 *buf) | ||
1156 | { | ||
1157 | if (sset == ETH_SS_STATS) | ||
1158 | memcpy(buf, &nb8800_stats_names, sizeof(nb8800_stats_names)); | ||
1159 | } | ||
1160 | |||
1161 | static u32 nb8800_read_stat(struct net_device *dev, int index) | ||
1162 | { | ||
1163 | struct nb8800_priv *priv = netdev_priv(dev); | ||
1164 | |||
1165 | nb8800_writeb(priv, NB8800_STAT_INDEX, index); | ||
1166 | |||
1167 | return nb8800_readl(priv, NB8800_STAT_DATA); | ||
1168 | } | ||
1169 | |||
1170 | static void nb8800_get_ethtool_stats(struct net_device *dev, | ||
1171 | struct ethtool_stats *estats, u64 *st) | ||
1172 | { | ||
1173 | unsigned int i; | ||
1174 | u32 rx, tx; | ||
1175 | |||
1176 | for (i = 0; i < NB8800_NUM_STATS / 2; i++) { | ||
1177 | rx = nb8800_read_stat(dev, i); | ||
1178 | tx = nb8800_read_stat(dev, i | 0x80); | ||
1179 | st[i] = rx; | ||
1180 | st[i + NB8800_NUM_STATS / 2] = tx; | ||
1181 | } | ||
1182 | } | ||
1183 | |||
1184 | static const struct ethtool_ops nb8800_ethtool_ops = { | ||
1185 | .get_settings = nb8800_get_settings, | ||
1186 | .set_settings = nb8800_set_settings, | ||
1187 | .nway_reset = nb8800_nway_reset, | ||
1188 | .get_link = ethtool_op_get_link, | ||
1189 | .get_pauseparam = nb8800_get_pauseparam, | ||
1190 | .set_pauseparam = nb8800_set_pauseparam, | ||
1191 | .get_sset_count = nb8800_get_sset_count, | ||
1192 | .get_strings = nb8800_get_strings, | ||
1193 | .get_ethtool_stats = nb8800_get_ethtool_stats, | ||
1194 | }; | ||
1195 | |||
1196 | static int nb8800_hw_init(struct net_device *dev) | ||
1197 | { | ||
1198 | struct nb8800_priv *priv = netdev_priv(dev); | ||
1199 | u32 val; | ||
1200 | |||
1201 | val = TX_RETRY_EN | TX_PAD_EN | TX_APPEND_FCS; | ||
1202 | nb8800_writeb(priv, NB8800_TX_CTL1, val); | ||
1203 | |||
1204 | /* Collision retry count */ | ||
1205 | nb8800_writeb(priv, NB8800_TX_CTL2, 5); | ||
1206 | |||
1207 | val = RX_PAD_STRIP | RX_AF_EN; | ||
1208 | nb8800_writeb(priv, NB8800_RX_CTL, val); | ||
1209 | |||
1210 | /* Chosen by fair dice roll */ | ||
1211 | nb8800_writeb(priv, NB8800_RANDOM_SEED, 4); | ||
1212 | |||
1213 | /* TX cycles per deferral period */ | ||
1214 | nb8800_writeb(priv, NB8800_TX_SDP, 12); | ||
1215 | |||
1216 | /* The following three threshold values have been | ||
1217 | * experimentally determined for good results. | ||
1218 | */ | ||
1219 | |||
1220 | /* RX/TX FIFO threshold for partial empty (64-bit entries) */ | ||
1221 | nb8800_writeb(priv, NB8800_PE_THRESHOLD, 0); | ||
1222 | |||
1223 | /* RX/TX FIFO threshold for partial full (64-bit entries) */ | ||
1224 | nb8800_writeb(priv, NB8800_PF_THRESHOLD, 255); | ||
1225 | |||
1226 | /* Buffer size for transmit (64-bit entries) */ | ||
1227 | nb8800_writeb(priv, NB8800_TX_BUFSIZE, 64); | ||
1228 | |||
1229 | /* Configure tx DMA */ | ||
1230 | |||
1231 | val = nb8800_readl(priv, NB8800_TXC_CR); | ||
1232 | val &= TCR_LE; /* keep endian setting */ | ||
1233 | val |= TCR_DM; /* DMA descriptor mode */ | ||
1234 | val |= TCR_RS; /* automatically store tx status */ | ||
1235 | val |= TCR_DIE; /* interrupt on DMA chain completion */ | ||
1236 | val |= TCR_TFI(7); /* interrupt after 7 frames transmitted */ | ||
1237 | val |= TCR_BTS(2); /* 32-byte bus transaction size */ | ||
1238 | nb8800_writel(priv, NB8800_TXC_CR, val); | ||
1239 | |||
1240 | /* TX complete interrupt after 10 ms or 7 frames (see above) */ | ||
1241 | val = clk_get_rate(priv->clk) / 100; | ||
1242 | nb8800_writel(priv, NB8800_TX_ITR, val); | ||
1243 | |||
1244 | /* Configure rx DMA */ | ||
1245 | |||
1246 | val = nb8800_readl(priv, NB8800_RXC_CR); | ||
1247 | val &= RCR_LE; /* keep endian setting */ | ||
1248 | val |= RCR_DM; /* DMA descriptor mode */ | ||
1249 | val |= RCR_RS; /* automatically store rx status */ | ||
1250 | val |= RCR_DIE; /* interrupt at end of DMA chain */ | ||
1251 | val |= RCR_RFI(7); /* interrupt after 7 frames received */ | ||
1252 | val |= RCR_BTS(2); /* 32-byte bus transaction size */ | ||
1253 | nb8800_writel(priv, NB8800_RXC_CR, val); | ||
1254 | |||
1255 | /* The rx interrupt can fire before the DMA has completed | ||
1256 | * unless a small delay is added. 50 us is hopefully enough. | ||
1257 | */ | ||
1258 | priv->rx_itr_irq = clk_get_rate(priv->clk) / 20000; | ||
1259 | |||
1260 | /* In NAPI poll mode we want to disable interrupts, but the | ||
1261 | * hardware does not permit this. Delay 10 ms instead. | ||
1262 | */ | ||
1263 | priv->rx_itr_poll = clk_get_rate(priv->clk) / 100; | ||
1264 | |||
1265 | nb8800_writel(priv, NB8800_RX_ITR, priv->rx_itr_irq); | ||
1266 | |||
1267 | priv->rx_dma_config = RX_BUF_SIZE | DESC_BTS(2) | DESC_DS | DESC_EOF; | ||
1268 | |||
1269 | /* Flow control settings */ | ||
1270 | |||
1271 | /* Pause time of 0.1 ms */ | ||
1272 | val = 100000 / 512; | ||
1273 | nb8800_writeb(priv, NB8800_PQ1, val >> 8); | ||
1274 | nb8800_writeb(priv, NB8800_PQ2, val & 0xff); | ||
1275 | |||
1276 | /* Auto-negotiate by default */ | ||
1277 | priv->pause_aneg = true; | ||
1278 | priv->pause_rx = true; | ||
1279 | priv->pause_tx = true; | ||
1280 | |||
1281 | nb8800_mc_init(dev, 0); | ||
1282 | |||
1283 | return 0; | ||
1284 | } | ||
1285 | |||
1286 | static int nb8800_tangox_init(struct net_device *dev) | ||
1287 | { | ||
1288 | struct nb8800_priv *priv = netdev_priv(dev); | ||
1289 | u32 pad_mode = PAD_MODE_MII; | ||
1290 | |||
1291 | switch (priv->phy_mode) { | ||
1292 | case PHY_INTERFACE_MODE_MII: | ||
1293 | case PHY_INTERFACE_MODE_GMII: | ||
1294 | pad_mode = PAD_MODE_MII; | ||
1295 | break; | ||
1296 | |||
1297 | case PHY_INTERFACE_MODE_RGMII: | ||
1298 | pad_mode = PAD_MODE_RGMII; | ||
1299 | break; | ||
1300 | |||
1301 | case PHY_INTERFACE_MODE_RGMII_TXID: | ||
1302 | pad_mode = PAD_MODE_RGMII | PAD_MODE_GTX_CLK_DELAY; | ||
1303 | break; | ||
1304 | |||
1305 | default: | ||
1306 | dev_err(dev->dev.parent, "unsupported phy mode %s\n", | ||
1307 | phy_modes(priv->phy_mode)); | ||
1308 | return -EINVAL; | ||
1309 | } | ||
1310 | |||
1311 | nb8800_writeb(priv, NB8800_TANGOX_PAD_MODE, pad_mode); | ||
1312 | |||
1313 | return 0; | ||
1314 | } | ||
1315 | |||
1316 | static int nb8800_tangox_reset(struct net_device *dev) | ||
1317 | { | ||
1318 | struct nb8800_priv *priv = netdev_priv(dev); | ||
1319 | int clk_div; | ||
1320 | |||
1321 | nb8800_writeb(priv, NB8800_TANGOX_RESET, 0); | ||
1322 | usleep_range(1000, 10000); | ||
1323 | nb8800_writeb(priv, NB8800_TANGOX_RESET, 1); | ||
1324 | |||
1325 | wmb(); /* ensure reset is cleared before proceeding */ | ||
1326 | |||
1327 | clk_div = DIV_ROUND_UP(clk_get_rate(priv->clk), 2 * MAX_MDC_CLOCK); | ||
1328 | nb8800_writew(priv, NB8800_TANGOX_MDIO_CLKDIV, clk_div); | ||
1329 | |||
1330 | return 0; | ||
1331 | } | ||
1332 | |||
1333 | static const struct nb8800_ops nb8800_tangox_ops = { | ||
1334 | .init = nb8800_tangox_init, | ||
1335 | .reset = nb8800_tangox_reset, | ||
1336 | }; | ||
1337 | |||
1338 | static int nb8800_tango4_init(struct net_device *dev) | ||
1339 | { | ||
1340 | struct nb8800_priv *priv = netdev_priv(dev); | ||
1341 | int err; | ||
1342 | |||
1343 | err = nb8800_tangox_init(dev); | ||
1344 | if (err) | ||
1345 | return err; | ||
1346 | |||
1347 | /* On tango4 interrupt on DMA completion per frame works and gives | ||
1348 | * better performance despite generating more rx interrupts. | ||
1349 | */ | ||
1350 | |||
1351 | /* Disable unnecessary interrupt on rx completion */ | ||
1352 | nb8800_clearl(priv, NB8800_RXC_CR, RCR_RFI(7)); | ||
1353 | |||
1354 | /* Request interrupt on descriptor DMA completion */ | ||
1355 | priv->rx_dma_config |= DESC_ID; | ||
1356 | |||
1357 | return 0; | ||
1358 | } | ||
1359 | |||
1360 | static const struct nb8800_ops nb8800_tango4_ops = { | ||
1361 | .init = nb8800_tango4_init, | ||
1362 | .reset = nb8800_tangox_reset, | ||
1363 | }; | ||
1364 | |||
1365 | static const struct of_device_id nb8800_dt_ids[] = { | ||
1366 | { | ||
1367 | .compatible = "aurora,nb8800", | ||
1368 | }, | ||
1369 | { | ||
1370 | .compatible = "sigma,smp8642-ethernet", | ||
1371 | .data = &nb8800_tangox_ops, | ||
1372 | }, | ||
1373 | { | ||
1374 | .compatible = "sigma,smp8734-ethernet", | ||
1375 | .data = &nb8800_tango4_ops, | ||
1376 | }, | ||
1377 | { } | ||
1378 | }; | ||
1379 | |||
1380 | static int nb8800_probe(struct platform_device *pdev) | ||
1381 | { | ||
1382 | const struct of_device_id *match; | ||
1383 | const struct nb8800_ops *ops = NULL; | ||
1384 | struct nb8800_priv *priv; | ||
1385 | struct resource *res; | ||
1386 | struct net_device *dev; | ||
1387 | struct mii_bus *bus; | ||
1388 | const unsigned char *mac; | ||
1389 | void __iomem *base; | ||
1390 | int irq; | ||
1391 | int ret; | ||
1392 | |||
1393 | match = of_match_device(nb8800_dt_ids, &pdev->dev); | ||
1394 | if (match) | ||
1395 | ops = match->data; | ||
1396 | |||
1397 | irq = platform_get_irq(pdev, 0); | ||
1398 | if (irq <= 0) { | ||
1399 | dev_err(&pdev->dev, "No IRQ\n"); | ||
1400 | return -EINVAL; | ||
1401 | } | ||
1402 | |||
1403 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1404 | base = devm_ioremap_resource(&pdev->dev, res); | ||
1405 | if (IS_ERR(base)) | ||
1406 | return PTR_ERR(base); | ||
1407 | |||
1408 | dev_dbg(&pdev->dev, "AU-NB8800 Ethernet at %pa\n", &res->start); | ||
1409 | |||
1410 | dev = alloc_etherdev(sizeof(*priv)); | ||
1411 | if (!dev) | ||
1412 | return -ENOMEM; | ||
1413 | |||
1414 | platform_set_drvdata(pdev, dev); | ||
1415 | SET_NETDEV_DEV(dev, &pdev->dev); | ||
1416 | |||
1417 | priv = netdev_priv(dev); | ||
1418 | priv->base = base; | ||
1419 | |||
1420 | priv->phy_mode = of_get_phy_mode(pdev->dev.of_node); | ||
1421 | if (priv->phy_mode < 0) | ||
1422 | priv->phy_mode = PHY_INTERFACE_MODE_RGMII; | ||
1423 | |||
1424 | priv->clk = devm_clk_get(&pdev->dev, NULL); | ||
1425 | if (IS_ERR(priv->clk)) { | ||
1426 | dev_err(&pdev->dev, "failed to get clock\n"); | ||
1427 | ret = PTR_ERR(priv->clk); | ||
1428 | goto err_free_dev; | ||
1429 | } | ||
1430 | |||
1431 | ret = clk_prepare_enable(priv->clk); | ||
1432 | if (ret) | ||
1433 | goto err_free_dev; | ||
1434 | |||
1435 | spin_lock_init(&priv->tx_lock); | ||
1436 | |||
1437 | if (ops && ops->reset) { | ||
1438 | ret = ops->reset(dev); | ||
1439 | if (ret) | ||
1440 | goto err_free_dev; | ||
1441 | } | ||
1442 | |||
1443 | bus = devm_mdiobus_alloc(&pdev->dev); | ||
1444 | if (!bus) { | ||
1445 | ret = -ENOMEM; | ||
1446 | goto err_disable_clk; | ||
1447 | } | ||
1448 | |||
1449 | bus->name = "nb8800-mii"; | ||
1450 | bus->read = nb8800_mdio_read; | ||
1451 | bus->write = nb8800_mdio_write; | ||
1452 | bus->parent = &pdev->dev; | ||
1453 | snprintf(bus->id, MII_BUS_ID_SIZE, "%lx.nb8800-mii", | ||
1454 | (unsigned long)res->start); | ||
1455 | bus->priv = priv; | ||
1456 | |||
1457 | ret = of_mdiobus_register(bus, pdev->dev.of_node); | ||
1458 | if (ret) { | ||
1459 | dev_err(&pdev->dev, "failed to register MII bus\n"); | ||
1460 | goto err_disable_clk; | ||
1461 | } | ||
1462 | |||
1463 | priv->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); | ||
1464 | if (!priv->phy_node) { | ||
1465 | dev_err(&pdev->dev, "no PHY specified\n"); | ||
1466 | ret = -ENODEV; | ||
1467 | goto err_free_bus; | ||
1468 | } | ||
1469 | |||
1470 | priv->mii_bus = bus; | ||
1471 | |||
1472 | ret = nb8800_hw_init(dev); | ||
1473 | if (ret) | ||
1474 | goto err_free_bus; | ||
1475 | |||
1476 | if (ops && ops->init) { | ||
1477 | ret = ops->init(dev); | ||
1478 | if (ret) | ||
1479 | goto err_free_bus; | ||
1480 | } | ||
1481 | |||
1482 | dev->netdev_ops = &nb8800_netdev_ops; | ||
1483 | dev->ethtool_ops = &nb8800_ethtool_ops; | ||
1484 | dev->flags |= IFF_MULTICAST; | ||
1485 | dev->irq = irq; | ||
1486 | |||
1487 | mac = of_get_mac_address(pdev->dev.of_node); | ||
1488 | if (mac) | ||
1489 | ether_addr_copy(dev->dev_addr, mac); | ||
1490 | |||
1491 | if (!is_valid_ether_addr(dev->dev_addr)) | ||
1492 | eth_hw_addr_random(dev); | ||
1493 | |||
1494 | nb8800_update_mac_addr(dev); | ||
1495 | |||
1496 | netif_carrier_off(dev); | ||
1497 | |||
1498 | ret = register_netdev(dev); | ||
1499 | if (ret) { | ||
1500 | netdev_err(dev, "failed to register netdev\n"); | ||
1501 | goto err_free_dma; | ||
1502 | } | ||
1503 | |||
1504 | netif_napi_add(dev, &priv->napi, nb8800_poll, NAPI_POLL_WEIGHT); | ||
1505 | |||
1506 | netdev_info(dev, "MAC address %pM\n", dev->dev_addr); | ||
1507 | |||
1508 | return 0; | ||
1509 | |||
1510 | err_free_dma: | ||
1511 | nb8800_dma_free(dev); | ||
1512 | err_free_bus: | ||
1513 | mdiobus_unregister(bus); | ||
1514 | err_disable_clk: | ||
1515 | clk_disable_unprepare(priv->clk); | ||
1516 | err_free_dev: | ||
1517 | free_netdev(dev); | ||
1518 | |||
1519 | return ret; | ||
1520 | } | ||
1521 | |||
1522 | static int nb8800_remove(struct platform_device *pdev) | ||
1523 | { | ||
1524 | struct net_device *ndev = platform_get_drvdata(pdev); | ||
1525 | struct nb8800_priv *priv = netdev_priv(ndev); | ||
1526 | |||
1527 | unregister_netdev(ndev); | ||
1528 | |||
1529 | mdiobus_unregister(priv->mii_bus); | ||
1530 | |||
1531 | clk_disable_unprepare(priv->clk); | ||
1532 | |||
1533 | nb8800_dma_free(ndev); | ||
1534 | free_netdev(ndev); | ||
1535 | |||
1536 | return 0; | ||
1537 | } | ||
1538 | |||
1539 | static struct platform_driver nb8800_driver = { | ||
1540 | .driver = { | ||
1541 | .name = "nb8800", | ||
1542 | .of_match_table = nb8800_dt_ids, | ||
1543 | }, | ||
1544 | .probe = nb8800_probe, | ||
1545 | .remove = nb8800_remove, | ||
1546 | }; | ||
1547 | |||
1548 | module_platform_driver(nb8800_driver); | ||
1549 | |||
1550 | MODULE_DESCRIPTION("Aurora AU-NB8800 Ethernet driver"); | ||
1551 | MODULE_AUTHOR("Mans Rullgard <mans@mansr.com>"); | ||
1552 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/net/ethernet/aurora/nb8800.h b/drivers/net/ethernet/aurora/nb8800.h new file mode 100644 index 000000000000..e5adbc2aac9f --- /dev/null +++ b/drivers/net/ethernet/aurora/nb8800.h | |||
@@ -0,0 +1,316 @@ | |||
1 | #ifndef _NB8800_H_ | ||
2 | #define _NB8800_H_ | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | #include <linux/skbuff.h> | ||
6 | #include <linux/phy.h> | ||
7 | #include <linux/clk.h> | ||
8 | #include <linux/bitops.h> | ||
9 | |||
10 | #define RX_DESC_COUNT 256 | ||
11 | #define TX_DESC_COUNT 256 | ||
12 | |||
13 | #define NB8800_DESC_LOW 4 | ||
14 | |||
15 | #define RX_BUF_SIZE 1552 | ||
16 | |||
17 | #define RX_COPYBREAK 256 | ||
18 | #define RX_COPYHDR 128 | ||
19 | |||
20 | #define MAX_MDC_CLOCK 2500000 | ||
21 | |||
22 | /* Stargate Solutions SSN8800 core registers */ | ||
23 | #define NB8800_TX_CTL1 0x000 | ||
24 | #define TX_TPD BIT(5) | ||
25 | #define TX_APPEND_FCS BIT(4) | ||
26 | #define TX_PAD_EN BIT(3) | ||
27 | #define TX_RETRY_EN BIT(2) | ||
28 | #define TX_EN BIT(0) | ||
29 | |||
30 | #define NB8800_TX_CTL2 0x001 | ||
31 | |||
32 | #define NB8800_RX_CTL 0x004 | ||
33 | #define RX_BC_DISABLE BIT(7) | ||
34 | #define RX_RUNT BIT(6) | ||
35 | #define RX_AF_EN BIT(5) | ||
36 | #define RX_PAUSE_EN BIT(3) | ||
37 | #define RX_SEND_CRC BIT(2) | ||
38 | #define RX_PAD_STRIP BIT(1) | ||
39 | #define RX_EN BIT(0) | ||
40 | |||
41 | #define NB8800_RANDOM_SEED 0x008 | ||
42 | #define NB8800_TX_SDP 0x14 | ||
43 | #define NB8800_TX_TPDP1 0x18 | ||
44 | #define NB8800_TX_TPDP2 0x19 | ||
45 | #define NB8800_SLOT_TIME 0x1c | ||
46 | |||
47 | #define NB8800_MDIO_CMD 0x020 | ||
48 | #define MDIO_CMD_GO BIT(31) | ||
49 | #define MDIO_CMD_WR BIT(26) | ||
50 | #define MDIO_CMD_ADDR(x) ((x) << 21) | ||
51 | #define MDIO_CMD_REG(x) ((x) << 16) | ||
52 | #define MDIO_CMD_DATA(x) ((x) << 0) | ||
53 | |||
54 | #define NB8800_MDIO_STS 0x024 | ||
55 | #define MDIO_STS_ERR BIT(31) | ||
56 | |||
57 | #define NB8800_MC_ADDR(i) (0x028 + (i)) | ||
58 | #define NB8800_MC_INIT 0x02e | ||
59 | #define NB8800_UC_ADDR(i) (0x03c + (i)) | ||
60 | |||
61 | #define NB8800_MAC_MODE 0x044 | ||
62 | #define RGMII_MODE BIT(7) | ||
63 | #define HALF_DUPLEX BIT(4) | ||
64 | #define BURST_EN BIT(3) | ||
65 | #define LOOPBACK_EN BIT(2) | ||
66 | #define GMAC_MODE BIT(0) | ||
67 | |||
68 | #define NB8800_IC_THRESHOLD 0x050 | ||
69 | #define NB8800_PE_THRESHOLD 0x051 | ||
70 | #define NB8800_PF_THRESHOLD 0x052 | ||
71 | #define NB8800_TX_BUFSIZE 0x054 | ||
72 | #define NB8800_FIFO_CTL 0x056 | ||
73 | #define NB8800_PQ1 0x060 | ||
74 | #define NB8800_PQ2 0x061 | ||
75 | #define NB8800_SRC_ADDR(i) (0x06a + (i)) | ||
76 | #define NB8800_STAT_DATA 0x078 | ||
77 | #define NB8800_STAT_INDEX 0x07c | ||
78 | #define NB8800_STAT_CLEAR 0x07d | ||
79 | |||
80 | #define NB8800_SLEEP_MODE 0x07e | ||
81 | #define SLEEP_MODE BIT(0) | ||
82 | |||
83 | #define NB8800_WAKEUP 0x07f | ||
84 | #define WAKEUP BIT(0) | ||
85 | |||
86 | /* Aurora NB8800 host interface registers */ | ||
87 | #define NB8800_TXC_CR 0x100 | ||
88 | #define TCR_LK BIT(12) | ||
89 | #define TCR_DS BIT(11) | ||
90 | #define TCR_BTS(x) (((x) & 0x7) << 8) | ||
91 | #define TCR_DIE BIT(7) | ||
92 | #define TCR_TFI(x) (((x) & 0x7) << 4) | ||
93 | #define TCR_LE BIT(3) | ||
94 | #define TCR_RS BIT(2) | ||
95 | #define TCR_DM BIT(1) | ||
96 | #define TCR_EN BIT(0) | ||
97 | |||
98 | #define NB8800_TXC_SR 0x104 | ||
99 | #define TSR_DE BIT(3) | ||
100 | #define TSR_DI BIT(2) | ||
101 | #define TSR_TO BIT(1) | ||
102 | #define TSR_TI BIT(0) | ||
103 | |||
104 | #define NB8800_TX_SAR 0x108 | ||
105 | #define NB8800_TX_DESC_ADDR 0x10c | ||
106 | |||
107 | #define NB8800_TX_REPORT_ADDR 0x110 | ||
108 | #define TX_BYTES_TRANSFERRED(x) (((x) >> 16) & 0xffff) | ||
109 | #define TX_FIRST_DEFERRAL BIT(7) | ||
110 | #define TX_EARLY_COLLISIONS(x) (((x) >> 3) & 0xf) | ||
111 | #define TX_LATE_COLLISION BIT(2) | ||
112 | #define TX_PACKET_DROPPED BIT(1) | ||
113 | #define TX_FIFO_UNDERRUN BIT(0) | ||
114 | #define IS_TX_ERROR(r) ((r) & 0x07) | ||
115 | |||
116 | #define NB8800_TX_FIFO_SR 0x114 | ||
117 | #define NB8800_TX_ITR 0x118 | ||
118 | |||
119 | #define NB8800_RXC_CR 0x200 | ||
120 | #define RCR_FL BIT(13) | ||
121 | #define RCR_LK BIT(12) | ||
122 | #define RCR_DS BIT(11) | ||
123 | #define RCR_BTS(x) (((x) & 7) << 8) | ||
124 | #define RCR_DIE BIT(7) | ||
125 | #define RCR_RFI(x) (((x) & 7) << 4) | ||
126 | #define RCR_LE BIT(3) | ||
127 | #define RCR_RS BIT(2) | ||
128 | #define RCR_DM BIT(1) | ||
129 | #define RCR_EN BIT(0) | ||
130 | |||
131 | #define NB8800_RXC_SR 0x204 | ||
132 | #define RSR_DE BIT(3) | ||
133 | #define RSR_DI BIT(2) | ||
134 | #define RSR_RO BIT(1) | ||
135 | #define RSR_RI BIT(0) | ||
136 | |||
137 | #define NB8800_RX_SAR 0x208 | ||
138 | #define NB8800_RX_DESC_ADDR 0x20c | ||
139 | |||
140 | #define NB8800_RX_REPORT_ADDR 0x210 | ||
141 | #define RX_BYTES_TRANSFERRED(x) (((x) >> 16) & 0xFFFF) | ||
142 | #define RX_MULTICAST_PKT BIT(9) | ||
143 | #define RX_BROADCAST_PKT BIT(8) | ||
144 | #define RX_LENGTH_ERR BIT(7) | ||
145 | #define RX_FCS_ERR BIT(6) | ||
146 | #define RX_RUNT_PKT BIT(5) | ||
147 | #define RX_FIFO_OVERRUN BIT(4) | ||
148 | #define RX_LATE_COLLISION BIT(3) | ||
149 | #define RX_ALIGNMENT_ERROR BIT(2) | ||
150 | #define RX_ERROR_MASK 0xfc | ||
151 | #define IS_RX_ERROR(r) ((r) & RX_ERROR_MASK) | ||
152 | |||
153 | #define NB8800_RX_FIFO_SR 0x214 | ||
154 | #define NB8800_RX_ITR 0x218 | ||
155 | |||
156 | /* Sigma Designs SMP86xx additional registers */ | ||
157 | #define NB8800_TANGOX_PAD_MODE 0x400 | ||
158 | #define PAD_MODE_MASK 0x7 | ||
159 | #define PAD_MODE_MII 0x0 | ||
160 | #define PAD_MODE_RGMII 0x1 | ||
161 | #define PAD_MODE_GTX_CLK_INV BIT(3) | ||
162 | #define PAD_MODE_GTX_CLK_DELAY BIT(4) | ||
163 | |||
164 | #define NB8800_TANGOX_MDIO_CLKDIV 0x420 | ||
165 | #define NB8800_TANGOX_RESET 0x424 | ||
166 | |||
167 | /* Hardware DMA descriptor */ | ||
168 | struct nb8800_dma_desc { | ||
169 | u32 s_addr; /* start address */ | ||
170 | u32 n_addr; /* next descriptor address */ | ||
171 | u32 r_addr; /* report address */ | ||
172 | u32 config; | ||
173 | } __aligned(8); | ||
174 | |||
175 | #define DESC_ID BIT(23) | ||
176 | #define DESC_EOC BIT(22) | ||
177 | #define DESC_EOF BIT(21) | ||
178 | #define DESC_LK BIT(20) | ||
179 | #define DESC_DS BIT(19) | ||
180 | #define DESC_BTS(x) (((x) & 0x7) << 16) | ||
181 | |||
182 | /* DMA descriptor and associated data for rx. | ||
183 | * Allocated from coherent memory. | ||
184 | */ | ||
185 | struct nb8800_rx_desc { | ||
186 | /* DMA descriptor */ | ||
187 | struct nb8800_dma_desc desc; | ||
188 | |||
189 | /* Status report filled in by hardware */ | ||
190 | u32 report; | ||
191 | }; | ||
192 | |||
193 | /* Address of buffer on rx ring */ | ||
194 | struct nb8800_rx_buf { | ||
195 | struct page *page; | ||
196 | unsigned long offset; | ||
197 | }; | ||
198 | |||
199 | /* DMA descriptors and associated data for tx. | ||
200 | * Allocated from coherent memory. | ||
201 | */ | ||
202 | struct nb8800_tx_desc { | ||
203 | /* DMA descriptor. The second descriptor is used if packet | ||
204 | * data is unaligned. | ||
205 | */ | ||
206 | struct nb8800_dma_desc desc[2]; | ||
207 | |||
208 | /* Status report filled in by hardware */ | ||
209 | u32 report; | ||
210 | |||
211 | /* Bounce buffer for initial unaligned part of packet */ | ||
212 | u8 buf[8] __aligned(8); | ||
213 | }; | ||
214 | |||
215 | /* Packet in tx queue */ | ||
216 | struct nb8800_tx_buf { | ||
217 | /* Currently queued skb */ | ||
218 | struct sk_buff *skb; | ||
219 | |||
220 | /* DMA address of the first descriptor */ | ||
221 | dma_addr_t dma_desc; | ||
222 | |||
223 | /* DMA address of packet data */ | ||
224 | dma_addr_t dma_addr; | ||
225 | |||
226 | /* Length of DMA mapping, less than skb->len if alignment | ||
227 | * buffer is used. | ||
228 | */ | ||
229 | unsigned int dma_len; | ||
230 | |||
231 | /* Number of packets in chain starting here */ | ||
232 | unsigned int chain_len; | ||
233 | |||
234 | /* Packet chain ready to be submitted to hardware */ | ||
235 | bool ready; | ||
236 | }; | ||
237 | |||
238 | struct nb8800_priv { | ||
239 | struct napi_struct napi; | ||
240 | |||
241 | void __iomem *base; | ||
242 | |||
243 | /* RX DMA descriptors */ | ||
244 | struct nb8800_rx_desc *rx_descs; | ||
245 | |||
246 | /* RX buffers referenced by DMA descriptors */ | ||
247 | struct nb8800_rx_buf *rx_bufs; | ||
248 | |||
249 | /* Current end of chain */ | ||
250 | u32 rx_eoc; | ||
251 | |||
252 | /* Value for rx interrupt time register in NAPI interrupt mode */ | ||
253 | u32 rx_itr_irq; | ||
254 | |||
255 | /* Value for rx interrupt time register in NAPI poll mode */ | ||
256 | u32 rx_itr_poll; | ||
257 | |||
258 | /* Value for config field of rx DMA descriptors */ | ||
259 | u32 rx_dma_config; | ||
260 | |||
261 | /* TX DMA descriptors */ | ||
262 | struct nb8800_tx_desc *tx_descs; | ||
263 | |||
264 | /* TX packet queue */ | ||
265 | struct nb8800_tx_buf *tx_bufs; | ||
266 | |||
267 | /* Number of free tx queue entries */ | ||
268 | atomic_t tx_free; | ||
269 | |||
270 | /* First free tx queue entry */ | ||
271 | u32 tx_next; | ||
272 | |||
273 | /* Next buffer to transmit */ | ||
274 | u32 tx_queue; | ||
275 | |||
276 | /* Start of current packet chain */ | ||
277 | struct nb8800_tx_buf *tx_chain; | ||
278 | |||
279 | /* Next buffer to reclaim */ | ||
280 | u32 tx_done; | ||
281 | |||
282 | /* Lock for DMA activation */ | ||
283 | spinlock_t tx_lock; | ||
284 | |||
285 | struct mii_bus *mii_bus; | ||
286 | struct device_node *phy_node; | ||
287 | struct phy_device *phydev; | ||
288 | |||
289 | /* PHY connection type from DT */ | ||
290 | int phy_mode; | ||
291 | |||
292 | /* Current link status */ | ||
293 | int speed; | ||
294 | int duplex; | ||
295 | int link; | ||
296 | |||
297 | /* Pause settings */ | ||
298 | bool pause_aneg; | ||
299 | bool pause_rx; | ||
300 | bool pause_tx; | ||
301 | |||
302 | /* DMA base address of rx descriptors, see rx_descs above */ | ||
303 | dma_addr_t rx_desc_dma; | ||
304 | |||
305 | /* DMA base address of tx descriptors, see tx_descs above */ | ||
306 | dma_addr_t tx_desc_dma; | ||
307 | |||
308 | struct clk *clk; | ||
309 | }; | ||
310 | |||
311 | struct nb8800_ops { | ||
312 | int (*init)(struct net_device *dev); | ||
313 | int (*reset)(struct net_device *dev); | ||
314 | }; | ||
315 | |||
316 | #endif /* _NB8800_H_ */ | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index c9b036789184..2e611dc5f162 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | |||
@@ -10139,8 +10139,8 @@ static void __bnx2x_del_vxlan_port(struct bnx2x *bp, u16 port) | |||
10139 | DP(BNX2X_MSG_SP, "Invalid vxlan port\n"); | 10139 | DP(BNX2X_MSG_SP, "Invalid vxlan port\n"); |
10140 | return; | 10140 | return; |
10141 | } | 10141 | } |
10142 | bp->vxlan_dst_port--; | 10142 | bp->vxlan_dst_port_count--; |
10143 | if (bp->vxlan_dst_port) | 10143 | if (bp->vxlan_dst_port_count) |
10144 | return; | 10144 | return; |
10145 | 10145 | ||
10146 | if (netif_running(bp->dev)) { | 10146 | if (netif_running(bp->dev)) { |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index db15c5ee09c5..bdf094fb6ef9 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c | |||
@@ -3625,6 +3625,7 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp) | |||
3625 | pf->fw_fid = le16_to_cpu(resp->fid); | 3625 | pf->fw_fid = le16_to_cpu(resp->fid); |
3626 | pf->port_id = le16_to_cpu(resp->port_id); | 3626 | pf->port_id = le16_to_cpu(resp->port_id); |
3627 | memcpy(pf->mac_addr, resp->perm_mac_address, ETH_ALEN); | 3627 | memcpy(pf->mac_addr, resp->perm_mac_address, ETH_ALEN); |
3628 | memcpy(bp->dev->dev_addr, pf->mac_addr, ETH_ALEN); | ||
3628 | pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); | 3629 | pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); |
3629 | pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); | 3630 | pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); |
3630 | pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings); | 3631 | pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings); |
@@ -3648,8 +3649,11 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp) | |||
3648 | 3649 | ||
3649 | vf->fw_fid = le16_to_cpu(resp->fid); | 3650 | vf->fw_fid = le16_to_cpu(resp->fid); |
3650 | memcpy(vf->mac_addr, resp->perm_mac_address, ETH_ALEN); | 3651 | memcpy(vf->mac_addr, resp->perm_mac_address, ETH_ALEN); |
3651 | if (!is_valid_ether_addr(vf->mac_addr)) | 3652 | if (is_valid_ether_addr(vf->mac_addr)) |
3652 | random_ether_addr(vf->mac_addr); | 3653 | /* overwrite netdev dev_adr with admin VF MAC */ |
3654 | memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN); | ||
3655 | else | ||
3656 | random_ether_addr(bp->dev->dev_addr); | ||
3653 | 3657 | ||
3654 | vf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); | 3658 | vf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); |
3655 | vf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); | 3659 | vf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); |
@@ -3880,6 +3884,8 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp) | |||
3880 | #endif | 3884 | #endif |
3881 | } | 3885 | } |
3882 | 3886 | ||
3887 | static int bnxt_cfg_rx_mode(struct bnxt *); | ||
3888 | |||
3883 | static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) | 3889 | static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) |
3884 | { | 3890 | { |
3885 | int rc = 0; | 3891 | int rc = 0; |
@@ -3946,11 +3952,9 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) | |||
3946 | bp->vnic_info[0].rx_mask |= | 3952 | bp->vnic_info[0].rx_mask |= |
3947 | CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; | 3953 | CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; |
3948 | 3954 | ||
3949 | rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0); | 3955 | rc = bnxt_cfg_rx_mode(bp); |
3950 | if (rc) { | 3956 | if (rc) |
3951 | netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n", rc); | ||
3952 | goto err_out; | 3957 | goto err_out; |
3953 | } | ||
3954 | 3958 | ||
3955 | rc = bnxt_hwrm_set_coal(bp); | 3959 | rc = bnxt_hwrm_set_coal(bp); |
3956 | if (rc) | 3960 | if (rc) |
@@ -4865,7 +4869,7 @@ static void bnxt_set_rx_mode(struct net_device *dev) | |||
4865 | } | 4869 | } |
4866 | } | 4870 | } |
4867 | 4871 | ||
4868 | static void bnxt_cfg_rx_mode(struct bnxt *bp) | 4872 | static int bnxt_cfg_rx_mode(struct bnxt *bp) |
4869 | { | 4873 | { |
4870 | struct net_device *dev = bp->dev; | 4874 | struct net_device *dev = bp->dev; |
4871 | struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; | 4875 | struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; |
@@ -4914,6 +4918,7 @@ static void bnxt_cfg_rx_mode(struct bnxt *bp) | |||
4914 | netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", | 4918 | netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", |
4915 | rc); | 4919 | rc); |
4916 | vnic->uc_filter_count = i; | 4920 | vnic->uc_filter_count = i; |
4921 | return rc; | ||
4917 | } | 4922 | } |
4918 | } | 4923 | } |
4919 | 4924 | ||
@@ -4922,6 +4927,8 @@ skip_uc: | |||
4922 | if (rc) | 4927 | if (rc) |
4923 | netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n", | 4928 | netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n", |
4924 | rc); | 4929 | rc); |
4930 | |||
4931 | return rc; | ||
4925 | } | 4932 | } |
4926 | 4933 | ||
4927 | static netdev_features_t bnxt_fix_features(struct net_device *dev, | 4934 | static netdev_features_t bnxt_fix_features(struct net_device *dev, |
@@ -5212,13 +5219,27 @@ init_err: | |||
5212 | static int bnxt_change_mac_addr(struct net_device *dev, void *p) | 5219 | static int bnxt_change_mac_addr(struct net_device *dev, void *p) |
5213 | { | 5220 | { |
5214 | struct sockaddr *addr = p; | 5221 | struct sockaddr *addr = p; |
5222 | struct bnxt *bp = netdev_priv(dev); | ||
5223 | int rc = 0; | ||
5215 | 5224 | ||
5216 | if (!is_valid_ether_addr(addr->sa_data)) | 5225 | if (!is_valid_ether_addr(addr->sa_data)) |
5217 | return -EADDRNOTAVAIL; | 5226 | return -EADDRNOTAVAIL; |
5218 | 5227 | ||
5228 | #ifdef CONFIG_BNXT_SRIOV | ||
5229 | if (BNXT_VF(bp) && is_valid_ether_addr(bp->vf.mac_addr)) | ||
5230 | return -EADDRNOTAVAIL; | ||
5231 | #endif | ||
5232 | |||
5233 | if (ether_addr_equal(addr->sa_data, dev->dev_addr)) | ||
5234 | return 0; | ||
5235 | |||
5219 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); | 5236 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); |
5237 | if (netif_running(dev)) { | ||
5238 | bnxt_close_nic(bp, false, false); | ||
5239 | rc = bnxt_open_nic(bp, false, false); | ||
5240 | } | ||
5220 | 5241 | ||
5221 | return 0; | 5242 | return rc; |
5222 | } | 5243 | } |
5223 | 5244 | ||
5224 | /* rtnl_lock held */ | 5245 | /* rtnl_lock held */ |
@@ -5686,15 +5707,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
5686 | bnxt_set_tpa_flags(bp); | 5707 | bnxt_set_tpa_flags(bp); |
5687 | bnxt_set_ring_params(bp); | 5708 | bnxt_set_ring_params(bp); |
5688 | dflt_rings = netif_get_num_default_rss_queues(); | 5709 | dflt_rings = netif_get_num_default_rss_queues(); |
5689 | if (BNXT_PF(bp)) { | 5710 | if (BNXT_PF(bp)) |
5690 | memcpy(dev->dev_addr, bp->pf.mac_addr, ETH_ALEN); | ||
5691 | bp->pf.max_irqs = max_irqs; | 5711 | bp->pf.max_irqs = max_irqs; |
5692 | } else { | ||
5693 | #if defined(CONFIG_BNXT_SRIOV) | 5712 | #if defined(CONFIG_BNXT_SRIOV) |
5694 | memcpy(dev->dev_addr, bp->vf.mac_addr, ETH_ALEN); | 5713 | else |
5695 | bp->vf.max_irqs = max_irqs; | 5714 | bp->vf.max_irqs = max_irqs; |
5696 | #endif | 5715 | #endif |
5697 | } | ||
5698 | bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings); | 5716 | bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings); |
5699 | bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings); | 5717 | bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings); |
5700 | bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings); | 5718 | bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings); |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index f4cf68861069..7a9af2887d8e 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c | |||
@@ -804,10 +804,9 @@ void bnxt_update_vf_mac(struct bnxt *bp) | |||
804 | if (!is_valid_ether_addr(resp->perm_mac_address)) | 804 | if (!is_valid_ether_addr(resp->perm_mac_address)) |
805 | goto update_vf_mac_exit; | 805 | goto update_vf_mac_exit; |
806 | 806 | ||
807 | if (ether_addr_equal(resp->perm_mac_address, bp->vf.mac_addr)) | 807 | if (!ether_addr_equal(resp->perm_mac_address, bp->vf.mac_addr)) |
808 | goto update_vf_mac_exit; | 808 | memcpy(bp->vf.mac_addr, resp->perm_mac_address, ETH_ALEN); |
809 | 809 | /* overwrite netdev dev_adr with admin VF MAC */ | |
810 | memcpy(bp->vf.mac_addr, resp->perm_mac_address, ETH_ALEN); | ||
811 | memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN); | 810 | memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN); |
812 | update_vf_mac_exit: | 811 | update_vf_mac_exit: |
813 | mutex_unlock(&bp->hwrm_cmd_lock); | 812 | mutex_unlock(&bp->hwrm_cmd_lock); |
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index 88c1e1a834f8..169059c92f80 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c | |||
@@ -1682,6 +1682,8 @@ static void macb_init_hw(struct macb *bp) | |||
1682 | macb_set_hwaddr(bp); | 1682 | macb_set_hwaddr(bp); |
1683 | 1683 | ||
1684 | config = macb_mdc_clk_div(bp); | 1684 | config = macb_mdc_clk_div(bp); |
1685 | if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) | ||
1686 | config |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL); | ||
1685 | config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */ | 1687 | config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */ |
1686 | config |= MACB_BIT(PAE); /* PAuse Enable */ | 1688 | config |= MACB_BIT(PAE); /* PAuse Enable */ |
1687 | config |= MACB_BIT(DRFCS); /* Discard Rx FCS */ | 1689 | config |= MACB_BIT(DRFCS); /* Discard Rx FCS */ |
@@ -2416,6 +2418,8 @@ static int macb_init(struct platform_device *pdev) | |||
2416 | /* Set MII management clock divider */ | 2418 | /* Set MII management clock divider */ |
2417 | val = macb_mdc_clk_div(bp); | 2419 | val = macb_mdc_clk_div(bp); |
2418 | val |= macb_dbw(bp); | 2420 | val |= macb_dbw(bp); |
2421 | if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) | ||
2422 | val |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL); | ||
2419 | macb_writel(bp, NCFGR, val); | 2423 | macb_writel(bp, NCFGR, val); |
2420 | 2424 | ||
2421 | return 0; | 2425 | return 0; |
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index 6e1faea00ca8..d83b0db77821 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h | |||
@@ -215,12 +215,17 @@ | |||
215 | /* GEM specific NCFGR bitfields. */ | 215 | /* GEM specific NCFGR bitfields. */ |
216 | #define GEM_GBE_OFFSET 10 /* Gigabit mode enable */ | 216 | #define GEM_GBE_OFFSET 10 /* Gigabit mode enable */ |
217 | #define GEM_GBE_SIZE 1 | 217 | #define GEM_GBE_SIZE 1 |
218 | #define GEM_PCSSEL_OFFSET 11 | ||
219 | #define GEM_PCSSEL_SIZE 1 | ||
218 | #define GEM_CLK_OFFSET 18 /* MDC clock division */ | 220 | #define GEM_CLK_OFFSET 18 /* MDC clock division */ |
219 | #define GEM_CLK_SIZE 3 | 221 | #define GEM_CLK_SIZE 3 |
220 | #define GEM_DBW_OFFSET 21 /* Data bus width */ | 222 | #define GEM_DBW_OFFSET 21 /* Data bus width */ |
221 | #define GEM_DBW_SIZE 2 | 223 | #define GEM_DBW_SIZE 2 |
222 | #define GEM_RXCOEN_OFFSET 24 | 224 | #define GEM_RXCOEN_OFFSET 24 |
223 | #define GEM_RXCOEN_SIZE 1 | 225 | #define GEM_RXCOEN_SIZE 1 |
226 | #define GEM_SGMIIEN_OFFSET 27 | ||
227 | #define GEM_SGMIIEN_SIZE 1 | ||
228 | |||
224 | 229 | ||
225 | /* Constants for data bus width. */ | 230 | /* Constants for data bus width. */ |
226 | #define GEM_DBW32 0 /* 32 bit AMBA AHB data bus width */ | 231 | #define GEM_DBW32 0 /* 32 bit AMBA AHB data bus width */ |
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h index d3950b20feb9..39ca6744a4e6 100644 --- a/drivers/net/ethernet/cavium/thunder/nic.h +++ b/drivers/net/ethernet/cavium/thunder/nic.h | |||
@@ -120,10 +120,9 @@ | |||
120 | * Calculated for SCLK of 700Mhz | 120 | * Calculated for SCLK of 700Mhz |
121 | * value written should be a 1/16th of what is expected | 121 | * value written should be a 1/16th of what is expected |
122 | * | 122 | * |
123 | * 1 tick per 0.05usec = value of 2.2 | 123 | * 1 tick per 0.025usec |
124 | * This 10% would be covered in CQ timer thresh value | ||
125 | */ | 124 | */ |
126 | #define NICPF_CLK_PER_INT_TICK 2 | 125 | #define NICPF_CLK_PER_INT_TICK 1 |
127 | 126 | ||
128 | /* Time to wait before we decide that a SQ is stuck. | 127 | /* Time to wait before we decide that a SQ is stuck. |
129 | * | 128 | * |
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c index c561fdcb79a7..4b7fd63ae57c 100644 --- a/drivers/net/ethernet/cavium/thunder/nic_main.c +++ b/drivers/net/ethernet/cavium/thunder/nic_main.c | |||
@@ -37,6 +37,7 @@ struct nicpf { | |||
37 | #define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) ((map >> 4) & 0xF) | 37 | #define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) ((map >> 4) & 0xF) |
38 | #define NIC_GET_LMAC_FROM_VF_LMAC_MAP(map) (map & 0xF) | 38 | #define NIC_GET_LMAC_FROM_VF_LMAC_MAP(map) (map & 0xF) |
39 | u8 vf_lmac_map[MAX_LMAC]; | 39 | u8 vf_lmac_map[MAX_LMAC]; |
40 | u8 lmac_cnt; | ||
40 | struct delayed_work dwork; | 41 | struct delayed_work dwork; |
41 | struct workqueue_struct *check_link; | 42 | struct workqueue_struct *check_link; |
42 | u8 link[MAX_LMAC]; | 43 | u8 link[MAX_LMAC]; |
@@ -279,6 +280,7 @@ static void nic_set_lmac_vf_mapping(struct nicpf *nic) | |||
279 | u64 lmac_credit; | 280 | u64 lmac_credit; |
280 | 281 | ||
281 | nic->num_vf_en = 0; | 282 | nic->num_vf_en = 0; |
283 | nic->lmac_cnt = 0; | ||
282 | 284 | ||
283 | for (bgx = 0; bgx < NIC_MAX_BGX; bgx++) { | 285 | for (bgx = 0; bgx < NIC_MAX_BGX; bgx++) { |
284 | if (!(bgx_map & (1 << bgx))) | 286 | if (!(bgx_map & (1 << bgx))) |
@@ -288,6 +290,7 @@ static void nic_set_lmac_vf_mapping(struct nicpf *nic) | |||
288 | nic->vf_lmac_map[next_bgx_lmac++] = | 290 | nic->vf_lmac_map[next_bgx_lmac++] = |
289 | NIC_SET_VF_LMAC_MAP(bgx, lmac); | 291 | NIC_SET_VF_LMAC_MAP(bgx, lmac); |
290 | nic->num_vf_en += lmac_cnt; | 292 | nic->num_vf_en += lmac_cnt; |
293 | nic->lmac_cnt += lmac_cnt; | ||
291 | 294 | ||
292 | /* Program LMAC credits */ | 295 | /* Program LMAC credits */ |
293 | lmac_credit = (1ull << 1); /* channel credit enable */ | 296 | lmac_credit = (1ull << 1); /* channel credit enable */ |
@@ -715,6 +718,13 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) | |||
715 | case NIC_MBOX_MSG_CFG_DONE: | 718 | case NIC_MBOX_MSG_CFG_DONE: |
716 | /* Last message of VF config msg sequence */ | 719 | /* Last message of VF config msg sequence */ |
717 | nic->vf_enabled[vf] = true; | 720 | nic->vf_enabled[vf] = true; |
721 | if (vf >= nic->lmac_cnt) | ||
722 | goto unlock; | ||
723 | |||
724 | bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); | ||
725 | lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); | ||
726 | |||
727 | bgx_lmac_rx_tx_enable(nic->node, bgx, lmac, true); | ||
718 | goto unlock; | 728 | goto unlock; |
719 | case NIC_MBOX_MSG_SHUTDOWN: | 729 | case NIC_MBOX_MSG_SHUTDOWN: |
720 | /* First msg in VF teardown sequence */ | 730 | /* First msg in VF teardown sequence */ |
@@ -722,6 +732,14 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) | |||
722 | if (vf >= nic->num_vf_en) | 732 | if (vf >= nic->num_vf_en) |
723 | nic->sqs_used[vf - nic->num_vf_en] = false; | 733 | nic->sqs_used[vf - nic->num_vf_en] = false; |
724 | nic->pqs_vf[vf] = 0; | 734 | nic->pqs_vf[vf] = 0; |
735 | |||
736 | if (vf >= nic->lmac_cnt) | ||
737 | break; | ||
738 | |||
739 | bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); | ||
740 | lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); | ||
741 | |||
742 | bgx_lmac_rx_tx_enable(nic->node, bgx, lmac, false); | ||
725 | break; | 743 | break; |
726 | case NIC_MBOX_MSG_ALLOC_SQS: | 744 | case NIC_MBOX_MSG_ALLOC_SQS: |
727 | nic_alloc_sqs(nic, &mbx.sqs_alloc); | 745 | nic_alloc_sqs(nic, &mbx.sqs_alloc); |
@@ -940,7 +958,7 @@ static void nic_poll_for_link(struct work_struct *work) | |||
940 | 958 | ||
941 | mbx.link_status.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE; | 959 | mbx.link_status.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE; |
942 | 960 | ||
943 | for (vf = 0; vf < nic->num_vf_en; vf++) { | 961 | for (vf = 0; vf < nic->lmac_cnt; vf++) { |
944 | /* Poll only if VF is UP */ | 962 | /* Poll only if VF is UP */ |
945 | if (!nic->vf_enabled[vf]) | 963 | if (!nic->vf_enabled[vf]) |
946 | continue; | 964 | continue; |
@@ -1074,8 +1092,7 @@ static void nic_remove(struct pci_dev *pdev) | |||
1074 | 1092 | ||
1075 | if (nic->check_link) { | 1093 | if (nic->check_link) { |
1076 | /* Destroy work Queue */ | 1094 | /* Destroy work Queue */ |
1077 | cancel_delayed_work(&nic->dwork); | 1095 | cancel_delayed_work_sync(&nic->dwork); |
1078 | flush_workqueue(nic->check_link); | ||
1079 | destroy_workqueue(nic->check_link); | 1096 | destroy_workqueue(nic->check_link); |
1080 | } | 1097 | } |
1081 | 1098 | ||
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c index af54c10945c2..a12b2e38cf61 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c | |||
@@ -112,6 +112,13 @@ static int nicvf_get_settings(struct net_device *netdev, | |||
112 | 112 | ||
113 | cmd->supported = 0; | 113 | cmd->supported = 0; |
114 | cmd->transceiver = XCVR_EXTERNAL; | 114 | cmd->transceiver = XCVR_EXTERNAL; |
115 | |||
116 | if (!nic->link_up) { | ||
117 | cmd->duplex = DUPLEX_UNKNOWN; | ||
118 | ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); | ||
119 | return 0; | ||
120 | } | ||
121 | |||
115 | if (nic->speed <= 1000) { | 122 | if (nic->speed <= 1000) { |
116 | cmd->port = PORT_MII; | 123 | cmd->port = PORT_MII; |
117 | cmd->autoneg = AUTONEG_ENABLE; | 124 | cmd->autoneg = AUTONEG_ENABLE; |
@@ -125,6 +132,13 @@ static int nicvf_get_settings(struct net_device *netdev, | |||
125 | return 0; | 132 | return 0; |
126 | } | 133 | } |
127 | 134 | ||
135 | static u32 nicvf_get_link(struct net_device *netdev) | ||
136 | { | ||
137 | struct nicvf *nic = netdev_priv(netdev); | ||
138 | |||
139 | return nic->link_up; | ||
140 | } | ||
141 | |||
128 | static void nicvf_get_drvinfo(struct net_device *netdev, | 142 | static void nicvf_get_drvinfo(struct net_device *netdev, |
129 | struct ethtool_drvinfo *info) | 143 | struct ethtool_drvinfo *info) |
130 | { | 144 | { |
@@ -660,7 +674,7 @@ static int nicvf_set_channels(struct net_device *dev, | |||
660 | 674 | ||
661 | static const struct ethtool_ops nicvf_ethtool_ops = { | 675 | static const struct ethtool_ops nicvf_ethtool_ops = { |
662 | .get_settings = nicvf_get_settings, | 676 | .get_settings = nicvf_get_settings, |
663 | .get_link = ethtool_op_get_link, | 677 | .get_link = nicvf_get_link, |
664 | .get_drvinfo = nicvf_get_drvinfo, | 678 | .get_drvinfo = nicvf_get_drvinfo, |
665 | .get_msglevel = nicvf_get_msglevel, | 679 | .get_msglevel = nicvf_get_msglevel, |
666 | .set_msglevel = nicvf_set_msglevel, | 680 | .set_msglevel = nicvf_set_msglevel, |
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index 7f709cbdcd87..dde8dc720cd3 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c | |||
@@ -1057,6 +1057,7 @@ int nicvf_stop(struct net_device *netdev) | |||
1057 | 1057 | ||
1058 | netif_carrier_off(netdev); | 1058 | netif_carrier_off(netdev); |
1059 | netif_tx_stop_all_queues(nic->netdev); | 1059 | netif_tx_stop_all_queues(nic->netdev); |
1060 | nic->link_up = false; | ||
1060 | 1061 | ||
1061 | /* Teardown secondary qsets first */ | 1062 | /* Teardown secondary qsets first */ |
1062 | if (!nic->sqs_mode) { | 1063 | if (!nic->sqs_mode) { |
@@ -1211,9 +1212,6 @@ int nicvf_open(struct net_device *netdev) | |||
1211 | nic->drv_stats.txq_stop = 0; | 1212 | nic->drv_stats.txq_stop = 0; |
1212 | nic->drv_stats.txq_wake = 0; | 1213 | nic->drv_stats.txq_wake = 0; |
1213 | 1214 | ||
1214 | netif_carrier_on(netdev); | ||
1215 | netif_tx_start_all_queues(netdev); | ||
1216 | |||
1217 | return 0; | 1215 | return 0; |
1218 | cleanup: | 1216 | cleanup: |
1219 | nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0); | 1217 | nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0); |
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index e404ea837727..206b6a71a545 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c | |||
@@ -592,7 +592,7 @@ void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs, | |||
592 | /* Set threshold value for interrupt generation */ | 592 | /* Set threshold value for interrupt generation */ |
593 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh); | 593 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh); |
594 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, | 594 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, |
595 | qidx, nic->cq_coalesce_usecs); | 595 | qidx, CMP_QUEUE_TIMER_THRESH); |
596 | } | 596 | } |
597 | 597 | ||
598 | /* Configures transmit queue */ | 598 | /* Configures transmit queue */ |
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h index fb4957d09914..033e8306e91c 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h | |||
@@ -76,7 +76,7 @@ | |||
76 | #define CMP_QSIZE CMP_QUEUE_SIZE2 | 76 | #define CMP_QSIZE CMP_QUEUE_SIZE2 |
77 | #define CMP_QUEUE_LEN (1ULL << (CMP_QSIZE + 10)) | 77 | #define CMP_QUEUE_LEN (1ULL << (CMP_QSIZE + 10)) |
78 | #define CMP_QUEUE_CQE_THRESH 0 | 78 | #define CMP_QUEUE_CQE_THRESH 0 |
79 | #define CMP_QUEUE_TIMER_THRESH 220 /* 10usec */ | 79 | #define CMP_QUEUE_TIMER_THRESH 80 /* ~2usec */ |
80 | 80 | ||
81 | #define RBDR_SIZE RBDR_SIZE0 | 81 | #define RBDR_SIZE RBDR_SIZE0 |
82 | #define RCV_BUF_COUNT (1ULL << (RBDR_SIZE + 13)) | 82 | #define RCV_BUF_COUNT (1ULL << (RBDR_SIZE + 13)) |
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index 180aa9fabf48..9df26c2263bc 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c | |||
@@ -186,6 +186,23 @@ void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac) | |||
186 | } | 186 | } |
187 | EXPORT_SYMBOL(bgx_set_lmac_mac); | 187 | EXPORT_SYMBOL(bgx_set_lmac_mac); |
188 | 188 | ||
189 | void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable) | ||
190 | { | ||
191 | struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx]; | ||
192 | u64 cfg; | ||
193 | |||
194 | if (!bgx) | ||
195 | return; | ||
196 | |||
197 | cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG); | ||
198 | if (enable) | ||
199 | cfg |= CMR_PKT_RX_EN | CMR_PKT_TX_EN; | ||
200 | else | ||
201 | cfg &= ~(CMR_PKT_RX_EN | CMR_PKT_TX_EN); | ||
202 | bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg); | ||
203 | } | ||
204 | EXPORT_SYMBOL(bgx_lmac_rx_tx_enable); | ||
205 | |||
189 | static void bgx_sgmii_change_link_state(struct lmac *lmac) | 206 | static void bgx_sgmii_change_link_state(struct lmac *lmac) |
190 | { | 207 | { |
191 | struct bgx *bgx = lmac->bgx; | 208 | struct bgx *bgx = lmac->bgx; |
@@ -612,6 +629,8 @@ static void bgx_poll_for_link(struct work_struct *work) | |||
612 | lmac->last_duplex = 1; | 629 | lmac->last_duplex = 1; |
613 | } else { | 630 | } else { |
614 | lmac->link_up = 0; | 631 | lmac->link_up = 0; |
632 | lmac->last_speed = SPEED_UNKNOWN; | ||
633 | lmac->last_duplex = DUPLEX_UNKNOWN; | ||
615 | } | 634 | } |
616 | 635 | ||
617 | if (lmac->last_link != lmac->link_up) { | 636 | if (lmac->last_link != lmac->link_up) { |
@@ -654,8 +673,7 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid) | |||
654 | } | 673 | } |
655 | 674 | ||
656 | /* Enable lmac */ | 675 | /* Enable lmac */ |
657 | bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, | 676 | bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN); |
658 | CMR_EN | CMR_PKT_RX_EN | CMR_PKT_TX_EN); | ||
659 | 677 | ||
660 | /* Restore default cfg, incase low level firmware changed it */ | 678 | /* Restore default cfg, incase low level firmware changed it */ |
661 | bgx_reg_write(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL, 0x03); | 679 | bgx_reg_write(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL, 0x03); |
@@ -695,8 +713,7 @@ static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid) | |||
695 | lmac = &bgx->lmac[lmacid]; | 713 | lmac = &bgx->lmac[lmacid]; |
696 | if (lmac->check_link) { | 714 | if (lmac->check_link) { |
697 | /* Destroy work queue */ | 715 | /* Destroy work queue */ |
698 | cancel_delayed_work(&lmac->dwork); | 716 | cancel_delayed_work_sync(&lmac->dwork); |
699 | flush_workqueue(lmac->check_link); | ||
700 | destroy_workqueue(lmac->check_link); | 717 | destroy_workqueue(lmac->check_link); |
701 | } | 718 | } |
702 | 719 | ||
@@ -1009,6 +1026,9 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1009 | struct bgx *bgx = NULL; | 1026 | struct bgx *bgx = NULL; |
1010 | u8 lmac; | 1027 | u8 lmac; |
1011 | 1028 | ||
1029 | /* Load octeon mdio driver */ | ||
1030 | octeon_mdiobus_force_mod_depencency(); | ||
1031 | |||
1012 | bgx = devm_kzalloc(dev, sizeof(*bgx), GFP_KERNEL); | 1032 | bgx = devm_kzalloc(dev, sizeof(*bgx), GFP_KERNEL); |
1013 | if (!bgx) | 1033 | if (!bgx) |
1014 | return -ENOMEM; | 1034 | return -ENOMEM; |
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h index 07b7ec66c60d..149e179363a1 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h | |||
@@ -182,6 +182,8 @@ enum MCAST_MODE { | |||
182 | #define BCAST_ACCEPT 1 | 182 | #define BCAST_ACCEPT 1 |
183 | #define CAM_ACCEPT 1 | 183 | #define CAM_ACCEPT 1 |
184 | 184 | ||
185 | void octeon_mdiobus_force_mod_depencency(void); | ||
186 | void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable); | ||
185 | void bgx_add_dmac_addr(u64 dmac, int node, int bgx_idx, int lmac); | 187 | void bgx_add_dmac_addr(u64 dmac, int node, int bgx_idx, int lmac); |
186 | unsigned bgx_get_map(int node); | 188 | unsigned bgx_get_map(int node); |
187 | int bgx_get_lmac_count(int node, int bgx); | 189 | int bgx_get_lmac_count(int node, int bgx); |
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c index ed41559bae77..b553409e04ad 100644 --- a/drivers/net/ethernet/dec/tulip/tulip_core.c +++ b/drivers/net/ethernet/dec/tulip/tulip_core.c | |||
@@ -98,8 +98,7 @@ static int csr0 = 0x01A00000 | 0x4800; | |||
98 | #elif defined(__mips__) | 98 | #elif defined(__mips__) |
99 | static int csr0 = 0x00200000 | 0x4000; | 99 | static int csr0 = 0x00200000 | 0x4000; |
100 | #else | 100 | #else |
101 | #warning Processor architecture undefined! | 101 | static int csr0; |
102 | static int csr0 = 0x00A00000 | 0x4800; | ||
103 | #endif | 102 | #endif |
104 | 103 | ||
105 | /* Operational parameters that usually are not changed. */ | 104 | /* Operational parameters that usually are not changed. */ |
@@ -1982,6 +1981,12 @@ static int __init tulip_init (void) | |||
1982 | pr_info("%s", version); | 1981 | pr_info("%s", version); |
1983 | #endif | 1982 | #endif |
1984 | 1983 | ||
1984 | if (!csr0) { | ||
1985 | pr_warn("tulip: unknown CPU architecture, using default csr0\n"); | ||
1986 | /* default to 8 longword cache line alignment */ | ||
1987 | csr0 = 0x00A00000 | 0x4800; | ||
1988 | } | ||
1989 | |||
1985 | /* copy module parms into globals */ | 1990 | /* copy module parms into globals */ |
1986 | tulip_rx_copybreak = rx_copybreak; | 1991 | tulip_rx_copybreak = rx_copybreak; |
1987 | tulip_max_interrupt_work = max_interrupt_work; | 1992 | tulip_max_interrupt_work = max_interrupt_work; |
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c index 9beb3d34d4ba..3c0e4d5c5fef 100644 --- a/drivers/net/ethernet/dec/tulip/winbond-840.c +++ b/drivers/net/ethernet/dec/tulip/winbond-840.c | |||
@@ -907,7 +907,7 @@ static void init_registers(struct net_device *dev) | |||
907 | #elif defined(CONFIG_SPARC) || defined (CONFIG_PARISC) || defined(CONFIG_ARM) | 907 | #elif defined(CONFIG_SPARC) || defined (CONFIG_PARISC) || defined(CONFIG_ARM) |
908 | i |= 0x4800; | 908 | i |= 0x4800; |
909 | #else | 909 | #else |
910 | #warning Processor architecture undefined | 910 | dev_warn(&dev->dev, "unknown CPU architecture, using default csr0 setting\n"); |
911 | i |= 0x4800; | 911 | i |= 0x4800; |
912 | #endif | 912 | #endif |
913 | iowrite32(i, ioaddr + PCIBusCfg); | 913 | iowrite32(i, ioaddr + PCIBusCfg); |
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig index ff76d4e9dc1b..bee32a9d9876 100644 --- a/drivers/net/ethernet/freescale/Kconfig +++ b/drivers/net/ethernet/freescale/Kconfig | |||
@@ -7,7 +7,8 @@ config NET_VENDOR_FREESCALE | |||
7 | default y | 7 | default y |
8 | depends on FSL_SOC || QUICC_ENGINE || CPM1 || CPM2 || PPC_MPC512x || \ | 8 | depends on FSL_SOC || QUICC_ENGINE || CPM1 || CPM2 || PPC_MPC512x || \ |
9 | M523x || M527x || M5272 || M528x || M520x || M532x || \ | 9 | M523x || M527x || M5272 || M528x || M520x || M532x || \ |
10 | ARCH_MXC || ARCH_MXS || (PPC_MPC52xx && PPC_BESTCOMM) | 10 | ARCH_MXC || ARCH_MXS || (PPC_MPC52xx && PPC_BESTCOMM) || \ |
11 | ARCH_LAYERSCAPE | ||
11 | ---help--- | 12 | ---help--- |
12 | If you have a network (Ethernet) card belonging to this class, say Y. | 13 | If you have a network (Ethernet) card belonging to this class, say Y. |
13 | 14 | ||
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 3e6b9b437497..7cf898455e60 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c | |||
@@ -647,9 +647,9 @@ static int gfar_parse_group(struct device_node *np, | |||
647 | if (model && strcasecmp(model, "FEC")) { | 647 | if (model && strcasecmp(model, "FEC")) { |
648 | gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1); | 648 | gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1); |
649 | gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2); | 649 | gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2); |
650 | if (gfar_irq(grp, TX)->irq == NO_IRQ || | 650 | if (!gfar_irq(grp, TX)->irq || |
651 | gfar_irq(grp, RX)->irq == NO_IRQ || | 651 | !gfar_irq(grp, RX)->irq || |
652 | gfar_irq(grp, ER)->irq == NO_IRQ) | 652 | !gfar_irq(grp, ER)->irq) |
653 | return -EINVAL; | 653 | return -EINVAL; |
654 | } | 654 | } |
655 | 655 | ||
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c index 664d0c261269..b40fba929d65 100644 --- a/drivers/net/ethernet/freescale/gianfar_ptp.c +++ b/drivers/net/ethernet/freescale/gianfar_ptp.c | |||
@@ -467,7 +467,7 @@ static int gianfar_ptp_probe(struct platform_device *dev) | |||
467 | 467 | ||
468 | etsects->irq = platform_get_irq(dev, 0); | 468 | etsects->irq = platform_get_irq(dev, 0); |
469 | 469 | ||
470 | if (etsects->irq == NO_IRQ) { | 470 | if (etsects->irq < 0) { |
471 | pr_err("irq not in device tree\n"); | 471 | pr_err("irq not in device tree\n"); |
472 | goto no_node; | 472 | goto no_node; |
473 | } | 473 | } |
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c index 639263d5e833..7781e80896a6 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c | |||
@@ -627,8 +627,10 @@ static netdev_tx_t fm10k_xmit_frame(struct sk_buff *skb, struct net_device *dev) | |||
627 | 627 | ||
628 | /* verify the skb head is not shared */ | 628 | /* verify the skb head is not shared */ |
629 | err = skb_cow_head(skb, 0); | 629 | err = skb_cow_head(skb, 0); |
630 | if (err) | 630 | if (err) { |
631 | dev_kfree_skb(skb); | ||
631 | return NETDEV_TX_OK; | 632 | return NETDEV_TX_OK; |
633 | } | ||
632 | 634 | ||
633 | /* locate vlan header */ | 635 | /* locate vlan header */ |
634 | vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN); | 636 | vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN); |
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index e84c7f2634d3..ed622fa29dfa 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c | |||
@@ -36,7 +36,7 @@ | |||
36 | 36 | ||
37 | /* Registers */ | 37 | /* Registers */ |
38 | #define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2)) | 38 | #define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2)) |
39 | #define MVNETA_RXQ_HW_BUF_ALLOC BIT(1) | 39 | #define MVNETA_RXQ_HW_BUF_ALLOC BIT(0) |
40 | #define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8) | 40 | #define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8) |
41 | #define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8) | 41 | #define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8) |
42 | #define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2)) | 42 | #define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2)) |
@@ -62,6 +62,7 @@ | |||
62 | #define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3)) | 62 | #define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3)) |
63 | #define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2)) | 63 | #define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2)) |
64 | #define MVNETA_BASE_ADDR_ENABLE 0x2290 | 64 | #define MVNETA_BASE_ADDR_ENABLE 0x2290 |
65 | #define MVNETA_ACCESS_PROTECT_ENABLE 0x2294 | ||
65 | #define MVNETA_PORT_CONFIG 0x2400 | 66 | #define MVNETA_PORT_CONFIG 0x2400 |
66 | #define MVNETA_UNI_PROMISC_MODE BIT(0) | 67 | #define MVNETA_UNI_PROMISC_MODE BIT(0) |
67 | #define MVNETA_DEF_RXQ(q) ((q) << 1) | 68 | #define MVNETA_DEF_RXQ(q) ((q) << 1) |
@@ -159,7 +160,7 @@ | |||
159 | 160 | ||
160 | #define MVNETA_INTR_ENABLE 0x25b8 | 161 | #define MVNETA_INTR_ENABLE 0x25b8 |
161 | #define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00 | 162 | #define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00 |
162 | #define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0xff000000 // note: neta says it's 0x000000FF | 163 | #define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0x000000ff |
163 | 164 | ||
164 | #define MVNETA_RXQ_CMD 0x2680 | 165 | #define MVNETA_RXQ_CMD 0x2680 |
165 | #define MVNETA_RXQ_DISABLE_SHIFT 8 | 166 | #define MVNETA_RXQ_DISABLE_SHIFT 8 |
@@ -242,6 +243,7 @@ | |||
242 | #define MVNETA_VLAN_TAG_LEN 4 | 243 | #define MVNETA_VLAN_TAG_LEN 4 |
243 | 244 | ||
244 | #define MVNETA_CPU_D_CACHE_LINE_SIZE 32 | 245 | #define MVNETA_CPU_D_CACHE_LINE_SIZE 32 |
246 | #define MVNETA_TX_CSUM_DEF_SIZE 1600 | ||
245 | #define MVNETA_TX_CSUM_MAX_SIZE 9800 | 247 | #define MVNETA_TX_CSUM_MAX_SIZE 9800 |
246 | #define MVNETA_ACC_MODE_EXT 1 | 248 | #define MVNETA_ACC_MODE_EXT 1 |
247 | 249 | ||
@@ -1579,12 +1581,16 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo, | |||
1579 | } | 1581 | } |
1580 | 1582 | ||
1581 | skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size); | 1583 | skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size); |
1582 | if (!skb) | ||
1583 | goto err_drop_frame; | ||
1584 | 1584 | ||
1585 | /* After refill old buffer has to be unmapped regardless | ||
1586 | * the skb is successfully built or not. | ||
1587 | */ | ||
1585 | dma_unmap_single(dev->dev.parent, phys_addr, | 1588 | dma_unmap_single(dev->dev.parent, phys_addr, |
1586 | MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE); | 1589 | MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE); |
1587 | 1590 | ||
1591 | if (!skb) | ||
1592 | goto err_drop_frame; | ||
1593 | |||
1588 | rcvd_pkts++; | 1594 | rcvd_pkts++; |
1589 | rcvd_bytes += rx_bytes; | 1595 | rcvd_bytes += rx_bytes; |
1590 | 1596 | ||
@@ -3191,6 +3197,7 @@ static void mvneta_conf_mbus_windows(struct mvneta_port *pp, | |||
3191 | } | 3197 | } |
3192 | 3198 | ||
3193 | mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable); | 3199 | mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable); |
3200 | mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect); | ||
3194 | } | 3201 | } |
3195 | 3202 | ||
3196 | /* Power up the port */ | 3203 | /* Power up the port */ |
@@ -3250,6 +3257,7 @@ static int mvneta_probe(struct platform_device *pdev) | |||
3250 | char hw_mac_addr[ETH_ALEN]; | 3257 | char hw_mac_addr[ETH_ALEN]; |
3251 | const char *mac_from; | 3258 | const char *mac_from; |
3252 | const char *managed; | 3259 | const char *managed; |
3260 | int tx_csum_limit; | ||
3253 | int phy_mode; | 3261 | int phy_mode; |
3254 | int err; | 3262 | int err; |
3255 | int cpu; | 3263 | int cpu; |
@@ -3350,8 +3358,21 @@ static int mvneta_probe(struct platform_device *pdev) | |||
3350 | } | 3358 | } |
3351 | } | 3359 | } |
3352 | 3360 | ||
3353 | if (of_device_is_compatible(dn, "marvell,armada-370-neta")) | 3361 | if (!of_property_read_u32(dn, "tx-csum-limit", &tx_csum_limit)) { |
3354 | pp->tx_csum_limit = 1600; | 3362 | if (tx_csum_limit < 0 || |
3363 | tx_csum_limit > MVNETA_TX_CSUM_MAX_SIZE) { | ||
3364 | tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE; | ||
3365 | dev_info(&pdev->dev, | ||
3366 | "Wrong TX csum limit in DT, set to %dB\n", | ||
3367 | MVNETA_TX_CSUM_DEF_SIZE); | ||
3368 | } | ||
3369 | } else if (of_device_is_compatible(dn, "marvell,armada-370-neta")) { | ||
3370 | tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE; | ||
3371 | } else { | ||
3372 | tx_csum_limit = MVNETA_TX_CSUM_MAX_SIZE; | ||
3373 | } | ||
3374 | |||
3375 | pp->tx_csum_limit = tx_csum_limit; | ||
3355 | 3376 | ||
3356 | pp->tx_ring_size = MVNETA_MAX_TXD; | 3377 | pp->tx_ring_size = MVNETA_MAX_TXD; |
3357 | pp->rx_ring_size = MVNETA_MAX_RXD; | 3378 | pp->rx_ring_size = MVNETA_MAX_RXD; |
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index b159ef8303cc..057665180f13 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c | |||
@@ -1326,7 +1326,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev) | |||
1326 | /* Get platform resources */ | 1326 | /* Get platform resources */ |
1327 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1327 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1328 | irq = platform_get_irq(pdev, 0); | 1328 | irq = platform_get_irq(pdev, 0); |
1329 | if ((!res) || (irq < 0) || (irq >= NR_IRQS)) { | 1329 | if (!res || irq < 0) { |
1330 | dev_err(&pdev->dev, "error getting resources.\n"); | 1330 | dev_err(&pdev->dev, "error getting resources.\n"); |
1331 | ret = -ENXIO; | 1331 | ret = -ENXIO; |
1332 | goto err_exit; | 1332 | goto err_exit; |
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index ee8d1ec61fab..ed5da4d47668 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c | |||
@@ -1225,7 +1225,7 @@ static int ravb_open(struct net_device *ndev) | |||
1225 | /* Device init */ | 1225 | /* Device init */ |
1226 | error = ravb_dmac_init(ndev); | 1226 | error = ravb_dmac_init(ndev); |
1227 | if (error) | 1227 | if (error) |
1228 | goto out_free_irq; | 1228 | goto out_free_irq2; |
1229 | ravb_emac_init(ndev); | 1229 | ravb_emac_init(ndev); |
1230 | 1230 | ||
1231 | /* Initialise PTP Clock driver */ | 1231 | /* Initialise PTP Clock driver */ |
@@ -1243,9 +1243,11 @@ static int ravb_open(struct net_device *ndev) | |||
1243 | out_ptp_stop: | 1243 | out_ptp_stop: |
1244 | /* Stop PTP Clock driver */ | 1244 | /* Stop PTP Clock driver */ |
1245 | ravb_ptp_stop(ndev); | 1245 | ravb_ptp_stop(ndev); |
1246 | out_free_irq2: | ||
1247 | if (priv->chip_id == RCAR_GEN3) | ||
1248 | free_irq(priv->emac_irq, ndev); | ||
1246 | out_free_irq: | 1249 | out_free_irq: |
1247 | free_irq(ndev->irq, ndev); | 1250 | free_irq(ndev->irq, ndev); |
1248 | free_irq(priv->emac_irq, ndev); | ||
1249 | out_napi_off: | 1251 | out_napi_off: |
1250 | napi_disable(&priv->napi[RAVB_NC]); | 1252 | napi_disable(&priv->napi[RAVB_NC]); |
1251 | napi_disable(&priv->napi[RAVB_BE]); | 1253 | napi_disable(&priv->napi[RAVB_BE]); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c index 7f6f4a4fcc70..58c05acc2aab 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c | |||
@@ -299,16 +299,17 @@ static int sti_dwmac_parse_data(struct sti_dwmac *dwmac, | |||
299 | if (IS_PHY_IF_MODE_GBIT(dwmac->interface)) { | 299 | if (IS_PHY_IF_MODE_GBIT(dwmac->interface)) { |
300 | const char *rs; | 300 | const char *rs; |
301 | 301 | ||
302 | dwmac->tx_retime_src = TX_RETIME_SRC_CLKGEN; | ||
303 | |||
302 | err = of_property_read_string(np, "st,tx-retime-src", &rs); | 304 | err = of_property_read_string(np, "st,tx-retime-src", &rs); |
303 | if (err < 0) { | 305 | if (err < 0) { |
304 | dev_warn(dev, "Use internal clock source\n"); | 306 | dev_warn(dev, "Use internal clock source\n"); |
305 | dwmac->tx_retime_src = TX_RETIME_SRC_CLKGEN; | 307 | } else { |
306 | } else if (!strcasecmp(rs, "clk_125")) { | 308 | if (!strcasecmp(rs, "clk_125")) |
307 | dwmac->tx_retime_src = TX_RETIME_SRC_CLK_125; | 309 | dwmac->tx_retime_src = TX_RETIME_SRC_CLK_125; |
308 | } else if (!strcasecmp(rs, "txclk")) { | 310 | else if (!strcasecmp(rs, "txclk")) |
309 | dwmac->tx_retime_src = TX_RETIME_SRC_TXCLK; | 311 | dwmac->tx_retime_src = TX_RETIME_SRC_TXCLK; |
310 | } | 312 | } |
311 | |||
312 | dwmac->speed = SPEED_1000; | 313 | dwmac->speed = SPEED_1000; |
313 | } | 314 | } |
314 | 315 | ||
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 64d8aa4e0cad..3c6549aee11d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
@@ -185,7 +185,7 @@ static void stmmac_clk_csr_set(struct stmmac_priv *priv) | |||
185 | priv->clk_csr = STMMAC_CSR_100_150M; | 185 | priv->clk_csr = STMMAC_CSR_100_150M; |
186 | else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M)) | 186 | else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M)) |
187 | priv->clk_csr = STMMAC_CSR_150_250M; | 187 | priv->clk_csr = STMMAC_CSR_150_250M; |
188 | else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M)) | 188 | else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M)) |
189 | priv->clk_csr = STMMAC_CSR_250_300M; | 189 | priv->clk_csr = STMMAC_CSR_250_300M; |
190 | } | 190 | } |
191 | } | 191 | } |
@@ -2232,6 +2232,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit) | |||
2232 | 2232 | ||
2233 | frame_len = priv->hw->desc->get_rx_frame_len(p, coe); | 2233 | frame_len = priv->hw->desc->get_rx_frame_len(p, coe); |
2234 | 2234 | ||
2235 | /* check if frame_len fits the preallocated memory */ | ||
2236 | if (frame_len > priv->dma_buf_sz) { | ||
2237 | priv->dev->stats.rx_length_errors++; | ||
2238 | break; | ||
2239 | } | ||
2240 | |||
2235 | /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 | 2241 | /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 |
2236 | * Type frames (LLC/LLC-SNAP) | 2242 | * Type frames (LLC/LLC-SNAP) |
2237 | */ | 2243 | */ |
@@ -3102,6 +3108,7 @@ int stmmac_resume(struct net_device *ndev) | |||
3102 | init_dma_desc_rings(ndev, GFP_ATOMIC); | 3108 | init_dma_desc_rings(ndev, GFP_ATOMIC); |
3103 | stmmac_hw_setup(ndev, false); | 3109 | stmmac_hw_setup(ndev, false); |
3104 | stmmac_init_tx_coalesce(priv); | 3110 | stmmac_init_tx_coalesce(priv); |
3111 | stmmac_set_rx_mode(ndev); | ||
3105 | 3112 | ||
3106 | napi_enable(&priv->napi); | 3113 | napi_enable(&priv->napi); |
3107 | 3114 | ||
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index ebf6abc4853f..bba670c42e37 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c | |||
@@ -138,7 +138,6 @@ int stmmac_mdio_reset(struct mii_bus *bus) | |||
138 | 138 | ||
139 | #ifdef CONFIG_OF | 139 | #ifdef CONFIG_OF |
140 | if (priv->device->of_node) { | 140 | if (priv->device->of_node) { |
141 | int reset_gpio, active_low; | ||
142 | 141 | ||
143 | if (data->reset_gpio < 0) { | 142 | if (data->reset_gpio < 0) { |
144 | struct device_node *np = priv->device->of_node; | 143 | struct device_node *np = priv->device->of_node; |
@@ -154,24 +153,23 @@ int stmmac_mdio_reset(struct mii_bus *bus) | |||
154 | "snps,reset-active-low"); | 153 | "snps,reset-active-low"); |
155 | of_property_read_u32_array(np, | 154 | of_property_read_u32_array(np, |
156 | "snps,reset-delays-us", data->delays, 3); | 155 | "snps,reset-delays-us", data->delays, 3); |
157 | } | ||
158 | 156 | ||
159 | reset_gpio = data->reset_gpio; | 157 | if (gpio_request(data->reset_gpio, "mdio-reset")) |
160 | active_low = data->active_low; | 158 | return 0; |
159 | } | ||
161 | 160 | ||
162 | if (!gpio_request(reset_gpio, "mdio-reset")) { | 161 | gpio_direction_output(data->reset_gpio, |
163 | gpio_direction_output(reset_gpio, active_low ? 1 : 0); | 162 | data->active_low ? 1 : 0); |
164 | if (data->delays[0]) | 163 | if (data->delays[0]) |
165 | msleep(DIV_ROUND_UP(data->delays[0], 1000)); | 164 | msleep(DIV_ROUND_UP(data->delays[0], 1000)); |
166 | 165 | ||
167 | gpio_set_value(reset_gpio, active_low ? 0 : 1); | 166 | gpio_set_value(data->reset_gpio, data->active_low ? 0 : 1); |
168 | if (data->delays[1]) | 167 | if (data->delays[1]) |
169 | msleep(DIV_ROUND_UP(data->delays[1], 1000)); | 168 | msleep(DIV_ROUND_UP(data->delays[1], 1000)); |
170 | 169 | ||
171 | gpio_set_value(reset_gpio, active_low ? 1 : 0); | 170 | gpio_set_value(data->reset_gpio, data->active_low ? 1 : 0); |
172 | if (data->delays[2]) | 171 | if (data->delays[2]) |
173 | msleep(DIV_ROUND_UP(data->delays[2], 1000)); | 172 | msleep(DIV_ROUND_UP(data->delays[2], 1000)); |
174 | } | ||
175 | } | 173 | } |
176 | #endif | 174 | #endif |
177 | 175 | ||
diff --git a/drivers/net/ethernet/ti/cpsw-common.c b/drivers/net/ethernet/ti/cpsw-common.c index c08be62bceba..1562ab4151e1 100644 --- a/drivers/net/ethernet/ti/cpsw-common.c +++ b/drivers/net/ethernet/ti/cpsw-common.c | |||
@@ -78,6 +78,9 @@ static int cpsw_am33xx_cm_get_macid(struct device *dev, u16 offset, int slave, | |||
78 | 78 | ||
79 | int ti_cm_get_macid(struct device *dev, int slave, u8 *mac_addr) | 79 | int ti_cm_get_macid(struct device *dev, int slave, u8 *mac_addr) |
80 | { | 80 | { |
81 | if (of_machine_is_compatible("ti,dm8148")) | ||
82 | return cpsw_am33xx_cm_get_macid(dev, 0x630, slave, mac_addr); | ||
83 | |||
81 | if (of_machine_is_compatible("ti,am33xx")) | 84 | if (of_machine_is_compatible("ti,am33xx")) |
82 | return cpsw_am33xx_cm_get_macid(dev, 0x630, slave, mac_addr); | 85 | return cpsw_am33xx_cm_get_macid(dev, 0x630, slave, mac_addr); |
83 | 86 | ||
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 54036ae0a388..0fc521941c71 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c | |||
@@ -498,7 +498,7 @@ static void macvtap_sock_write_space(struct sock *sk) | |||
498 | wait_queue_head_t *wqueue; | 498 | wait_queue_head_t *wqueue; |
499 | 499 | ||
500 | if (!sock_writeable(sk) || | 500 | if (!sock_writeable(sk) || |
501 | !test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags)) | 501 | !test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags)) |
502 | return; | 502 | return; |
503 | 503 | ||
504 | wqueue = sk_sleep(sk); | 504 | wqueue = sk_sleep(sk); |
@@ -585,7 +585,7 @@ static unsigned int macvtap_poll(struct file *file, poll_table * wait) | |||
585 | mask |= POLLIN | POLLRDNORM; | 585 | mask |= POLLIN | POLLRDNORM; |
586 | 586 | ||
587 | if (sock_writeable(&q->sk) || | 587 | if (sock_writeable(&q->sk) || |
588 | (!test_and_set_bit(SOCK_ASYNC_NOSPACE, &q->sock.flags) && | 588 | (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &q->sock.flags) && |
589 | sock_writeable(&q->sk))) | 589 | sock_writeable(&q->sk))) |
590 | mask |= POLLOUT | POLLWRNORM; | 590 | mask |= POLLOUT | POLLWRNORM; |
591 | 591 | ||
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c index 07a6119121c3..3ce5d9514623 100644 --- a/drivers/net/phy/broadcom.c +++ b/drivers/net/phy/broadcom.c | |||
@@ -614,7 +614,7 @@ static struct mdio_device_id __maybe_unused broadcom_tbl[] = { | |||
614 | { PHY_ID_BCM5461, 0xfffffff0 }, | 614 | { PHY_ID_BCM5461, 0xfffffff0 }, |
615 | { PHY_ID_BCM54616S, 0xfffffff0 }, | 615 | { PHY_ID_BCM54616S, 0xfffffff0 }, |
616 | { PHY_ID_BCM5464, 0xfffffff0 }, | 616 | { PHY_ID_BCM5464, 0xfffffff0 }, |
617 | { PHY_ID_BCM5482, 0xfffffff0 }, | 617 | { PHY_ID_BCM5481, 0xfffffff0 }, |
618 | { PHY_ID_BCM5482, 0xfffffff0 }, | 618 | { PHY_ID_BCM5482, 0xfffffff0 }, |
619 | { PHY_ID_BCM50610, 0xfffffff0 }, | 619 | { PHY_ID_BCM50610, 0xfffffff0 }, |
620 | { PHY_ID_BCM50610M, 0xfffffff0 }, | 620 | { PHY_ID_BCM50610M, 0xfffffff0 }, |
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 48ce6ef400fe..47cd306dbb3c 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c | |||
@@ -448,7 +448,8 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd) | |||
448 | mdiobus_write(phydev->bus, mii_data->phy_id, | 448 | mdiobus_write(phydev->bus, mii_data->phy_id, |
449 | mii_data->reg_num, val); | 449 | mii_data->reg_num, val); |
450 | 450 | ||
451 | if (mii_data->reg_num == MII_BMCR && | 451 | if (mii_data->phy_id == phydev->addr && |
452 | mii_data->reg_num == MII_BMCR && | ||
452 | val & BMCR_RESET) | 453 | val & BMCR_RESET) |
453 | return phy_init_hw(phydev); | 454 | return phy_init_hw(phydev); |
454 | 455 | ||
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index b1878faea397..f0db770e8b2f 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
@@ -1040,7 +1040,7 @@ static unsigned int tun_chr_poll(struct file *file, poll_table *wait) | |||
1040 | mask |= POLLIN | POLLRDNORM; | 1040 | mask |= POLLIN | POLLRDNORM; |
1041 | 1041 | ||
1042 | if (sock_writeable(sk) || | 1042 | if (sock_writeable(sk) || |
1043 | (!test_and_set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags) && | 1043 | (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) && |
1044 | sock_writeable(sk))) | 1044 | sock_writeable(sk))) |
1045 | mask |= POLLOUT | POLLWRNORM; | 1045 | mask |= POLLOUT | POLLWRNORM; |
1046 | 1046 | ||
@@ -1488,7 +1488,7 @@ static void tun_sock_write_space(struct sock *sk) | |||
1488 | if (!sock_writeable(sk)) | 1488 | if (!sock_writeable(sk)) |
1489 | return; | 1489 | return; |
1490 | 1490 | ||
1491 | if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags)) | 1491 | if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags)) |
1492 | return; | 1492 | return; |
1493 | 1493 | ||
1494 | wqueue = sk_sleep(sk); | 1494 | wqueue = sk_sleep(sk); |
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index a187f08113ec..3b1ba8237768 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c | |||
@@ -691,7 +691,6 @@ static void cdc_ncm_free(struct cdc_ncm_ctx *ctx) | |||
691 | 691 | ||
692 | int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting, int drvflags) | 692 | int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting, int drvflags) |
693 | { | 693 | { |
694 | const struct usb_cdc_union_desc *union_desc = NULL; | ||
695 | struct cdc_ncm_ctx *ctx; | 694 | struct cdc_ncm_ctx *ctx; |
696 | struct usb_driver *driver; | 695 | struct usb_driver *driver; |
697 | u8 *buf; | 696 | u8 *buf; |
@@ -725,15 +724,16 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_ | |||
725 | /* parse through descriptors associated with control interface */ | 724 | /* parse through descriptors associated with control interface */ |
726 | cdc_parse_cdc_header(&hdr, intf, buf, len); | 725 | cdc_parse_cdc_header(&hdr, intf, buf, len); |
727 | 726 | ||
728 | ctx->data = usb_ifnum_to_if(dev->udev, | 727 | if (hdr.usb_cdc_union_desc) |
729 | hdr.usb_cdc_union_desc->bSlaveInterface0); | 728 | ctx->data = usb_ifnum_to_if(dev->udev, |
729 | hdr.usb_cdc_union_desc->bSlaveInterface0); | ||
730 | ctx->ether_desc = hdr.usb_cdc_ether_desc; | 730 | ctx->ether_desc = hdr.usb_cdc_ether_desc; |
731 | ctx->func_desc = hdr.usb_cdc_ncm_desc; | 731 | ctx->func_desc = hdr.usb_cdc_ncm_desc; |
732 | ctx->mbim_desc = hdr.usb_cdc_mbim_desc; | 732 | ctx->mbim_desc = hdr.usb_cdc_mbim_desc; |
733 | ctx->mbim_extended_desc = hdr.usb_cdc_mbim_extended_desc; | 733 | ctx->mbim_extended_desc = hdr.usb_cdc_mbim_extended_desc; |
734 | 734 | ||
735 | /* some buggy devices have an IAD but no CDC Union */ | 735 | /* some buggy devices have an IAD but no CDC Union */ |
736 | if (!union_desc && intf->intf_assoc && intf->intf_assoc->bInterfaceCount == 2) { | 736 | if (!hdr.usb_cdc_union_desc && intf->intf_assoc && intf->intf_assoc->bInterfaceCount == 2) { |
737 | ctx->data = usb_ifnum_to_if(dev->udev, intf->cur_altsetting->desc.bInterfaceNumber + 1); | 737 | ctx->data = usb_ifnum_to_if(dev->udev, intf->cur_altsetting->desc.bInterfaceNumber + 1); |
738 | dev_dbg(&intf->dev, "CDC Union missing - got slave from IAD\n"); | 738 | dev_dbg(&intf->dev, "CDC Union missing - got slave from IAD\n"); |
739 | } | 739 | } |
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 34799eaace41..9a5be8b85186 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c | |||
@@ -725,6 +725,7 @@ static const struct usb_device_id products[] = { | |||
725 | {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ | 725 | {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ |
726 | {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ | 726 | {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ |
727 | {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */ | 727 | {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */ |
728 | {QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */ | ||
728 | {QMI_FIXED_INTF(0x0b3c, 0xc000, 4)}, /* Olivetti Olicard 100 */ | 729 | {QMI_FIXED_INTF(0x0b3c, 0xc000, 4)}, /* Olivetti Olicard 100 */ |
729 | {QMI_FIXED_INTF(0x0b3c, 0xc001, 4)}, /* Olivetti Olicard 120 */ | 730 | {QMI_FIXED_INTF(0x0b3c, 0xc001, 4)}, /* Olivetti Olicard 120 */ |
730 | {QMI_FIXED_INTF(0x0b3c, 0xc002, 4)}, /* Olivetti Olicard 140 */ | 731 | {QMI_FIXED_INTF(0x0b3c, 0xc002, 4)}, /* Olivetti Olicard 140 */ |
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 899ea4288197..417903715437 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c | |||
@@ -587,6 +587,12 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx, | |||
587 | &adapter->pdev->dev, | 587 | &adapter->pdev->dev, |
588 | rbi->skb->data, rbi->len, | 588 | rbi->skb->data, rbi->len, |
589 | PCI_DMA_FROMDEVICE); | 589 | PCI_DMA_FROMDEVICE); |
590 | if (dma_mapping_error(&adapter->pdev->dev, | ||
591 | rbi->dma_addr)) { | ||
592 | dev_kfree_skb_any(rbi->skb); | ||
593 | rq->stats.rx_buf_alloc_failure++; | ||
594 | break; | ||
595 | } | ||
590 | } else { | 596 | } else { |
591 | /* rx buffer skipped by the device */ | 597 | /* rx buffer skipped by the device */ |
592 | } | 598 | } |
@@ -605,13 +611,18 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx, | |||
605 | &adapter->pdev->dev, | 611 | &adapter->pdev->dev, |
606 | rbi->page, 0, PAGE_SIZE, | 612 | rbi->page, 0, PAGE_SIZE, |
607 | PCI_DMA_FROMDEVICE); | 613 | PCI_DMA_FROMDEVICE); |
614 | if (dma_mapping_error(&adapter->pdev->dev, | ||
615 | rbi->dma_addr)) { | ||
616 | put_page(rbi->page); | ||
617 | rq->stats.rx_buf_alloc_failure++; | ||
618 | break; | ||
619 | } | ||
608 | } else { | 620 | } else { |
609 | /* rx buffers skipped by the device */ | 621 | /* rx buffers skipped by the device */ |
610 | } | 622 | } |
611 | val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT; | 623 | val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT; |
612 | } | 624 | } |
613 | 625 | ||
614 | BUG_ON(rbi->dma_addr == 0); | ||
615 | gd->rxd.addr = cpu_to_le64(rbi->dma_addr); | 626 | gd->rxd.addr = cpu_to_le64(rbi->dma_addr); |
616 | gd->dword[2] = cpu_to_le32((!ring->gen << VMXNET3_RXD_GEN_SHIFT) | 627 | gd->dword[2] = cpu_to_le32((!ring->gen << VMXNET3_RXD_GEN_SHIFT) |
617 | | val | rbi->len); | 628 | | val | rbi->len); |
@@ -655,7 +666,7 @@ vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd, | |||
655 | } | 666 | } |
656 | 667 | ||
657 | 668 | ||
658 | static void | 669 | static int |
659 | vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx, | 670 | vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx, |
660 | struct vmxnet3_tx_queue *tq, struct pci_dev *pdev, | 671 | struct vmxnet3_tx_queue *tq, struct pci_dev *pdev, |
661 | struct vmxnet3_adapter *adapter) | 672 | struct vmxnet3_adapter *adapter) |
@@ -715,6 +726,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx, | |||
715 | tbi->dma_addr = dma_map_single(&adapter->pdev->dev, | 726 | tbi->dma_addr = dma_map_single(&adapter->pdev->dev, |
716 | skb->data + buf_offset, buf_size, | 727 | skb->data + buf_offset, buf_size, |
717 | PCI_DMA_TODEVICE); | 728 | PCI_DMA_TODEVICE); |
729 | if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr)) | ||
730 | return -EFAULT; | ||
718 | 731 | ||
719 | tbi->len = buf_size; | 732 | tbi->len = buf_size; |
720 | 733 | ||
@@ -755,6 +768,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx, | |||
755 | tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag, | 768 | tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag, |
756 | buf_offset, buf_size, | 769 | buf_offset, buf_size, |
757 | DMA_TO_DEVICE); | 770 | DMA_TO_DEVICE); |
771 | if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr)) | ||
772 | return -EFAULT; | ||
758 | 773 | ||
759 | tbi->len = buf_size; | 774 | tbi->len = buf_size; |
760 | 775 | ||
@@ -782,6 +797,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx, | |||
782 | /* set the last buf_info for the pkt */ | 797 | /* set the last buf_info for the pkt */ |
783 | tbi->skb = skb; | 798 | tbi->skb = skb; |
784 | tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base; | 799 | tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base; |
800 | |||
801 | return 0; | ||
785 | } | 802 | } |
786 | 803 | ||
787 | 804 | ||
@@ -1020,7 +1037,8 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, | |||
1020 | } | 1037 | } |
1021 | 1038 | ||
1022 | /* fill tx descs related to addr & len */ | 1039 | /* fill tx descs related to addr & len */ |
1023 | vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter); | 1040 | if (vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter)) |
1041 | goto unlock_drop_pkt; | ||
1024 | 1042 | ||
1025 | /* setup the EOP desc */ | 1043 | /* setup the EOP desc */ |
1026 | ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP); | 1044 | ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP); |
@@ -1231,6 +1249,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, | |||
1231 | struct vmxnet3_rx_buf_info *rbi; | 1249 | struct vmxnet3_rx_buf_info *rbi; |
1232 | struct sk_buff *skb, *new_skb = NULL; | 1250 | struct sk_buff *skb, *new_skb = NULL; |
1233 | struct page *new_page = NULL; | 1251 | struct page *new_page = NULL; |
1252 | dma_addr_t new_dma_addr; | ||
1234 | int num_to_alloc; | 1253 | int num_to_alloc; |
1235 | struct Vmxnet3_RxDesc *rxd; | 1254 | struct Vmxnet3_RxDesc *rxd; |
1236 | u32 idx, ring_idx; | 1255 | u32 idx, ring_idx; |
@@ -1287,6 +1306,21 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, | |||
1287 | skip_page_frags = true; | 1306 | skip_page_frags = true; |
1288 | goto rcd_done; | 1307 | goto rcd_done; |
1289 | } | 1308 | } |
1309 | new_dma_addr = dma_map_single(&adapter->pdev->dev, | ||
1310 | new_skb->data, rbi->len, | ||
1311 | PCI_DMA_FROMDEVICE); | ||
1312 | if (dma_mapping_error(&adapter->pdev->dev, | ||
1313 | new_dma_addr)) { | ||
1314 | dev_kfree_skb(new_skb); | ||
1315 | /* Skb allocation failed, do not handover this | ||
1316 | * skb to stack. Reuse it. Drop the existing pkt | ||
1317 | */ | ||
1318 | rq->stats.rx_buf_alloc_failure++; | ||
1319 | ctx->skb = NULL; | ||
1320 | rq->stats.drop_total++; | ||
1321 | skip_page_frags = true; | ||
1322 | goto rcd_done; | ||
1323 | } | ||
1290 | 1324 | ||
1291 | dma_unmap_single(&adapter->pdev->dev, rbi->dma_addr, | 1325 | dma_unmap_single(&adapter->pdev->dev, rbi->dma_addr, |
1292 | rbi->len, | 1326 | rbi->len, |
@@ -1303,9 +1337,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, | |||
1303 | 1337 | ||
1304 | /* Immediate refill */ | 1338 | /* Immediate refill */ |
1305 | rbi->skb = new_skb; | 1339 | rbi->skb = new_skb; |
1306 | rbi->dma_addr = dma_map_single(&adapter->pdev->dev, | 1340 | rbi->dma_addr = new_dma_addr; |
1307 | rbi->skb->data, rbi->len, | ||
1308 | PCI_DMA_FROMDEVICE); | ||
1309 | rxd->addr = cpu_to_le64(rbi->dma_addr); | 1341 | rxd->addr = cpu_to_le64(rbi->dma_addr); |
1310 | rxd->len = rbi->len; | 1342 | rxd->len = rbi->len; |
1311 | if (adapter->version == 2 && | 1343 | if (adapter->version == 2 && |
@@ -1348,6 +1380,19 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, | |||
1348 | skip_page_frags = true; | 1380 | skip_page_frags = true; |
1349 | goto rcd_done; | 1381 | goto rcd_done; |
1350 | } | 1382 | } |
1383 | new_dma_addr = dma_map_page(&adapter->pdev->dev | ||
1384 | , rbi->page, | ||
1385 | 0, PAGE_SIZE, | ||
1386 | PCI_DMA_FROMDEVICE); | ||
1387 | if (dma_mapping_error(&adapter->pdev->dev, | ||
1388 | new_dma_addr)) { | ||
1389 | put_page(new_page); | ||
1390 | rq->stats.rx_buf_alloc_failure++; | ||
1391 | dev_kfree_skb(ctx->skb); | ||
1392 | ctx->skb = NULL; | ||
1393 | skip_page_frags = true; | ||
1394 | goto rcd_done; | ||
1395 | } | ||
1351 | 1396 | ||
1352 | dma_unmap_page(&adapter->pdev->dev, | 1397 | dma_unmap_page(&adapter->pdev->dev, |
1353 | rbi->dma_addr, rbi->len, | 1398 | rbi->dma_addr, rbi->len, |
@@ -1357,10 +1402,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, | |||
1357 | 1402 | ||
1358 | /* Immediate refill */ | 1403 | /* Immediate refill */ |
1359 | rbi->page = new_page; | 1404 | rbi->page = new_page; |
1360 | rbi->dma_addr = dma_map_page(&adapter->pdev->dev | 1405 | rbi->dma_addr = new_dma_addr; |
1361 | , rbi->page, | ||
1362 | 0, PAGE_SIZE, | ||
1363 | PCI_DMA_FROMDEVICE); | ||
1364 | rxd->addr = cpu_to_le64(rbi->dma_addr); | 1406 | rxd->addr = cpu_to_le64(rbi->dma_addr); |
1365 | rxd->len = rbi->len; | 1407 | rxd->len = rbi->len; |
1366 | } | 1408 | } |
@@ -2167,7 +2209,8 @@ vmxnet3_set_mc(struct net_device *netdev) | |||
2167 | PCI_DMA_TODEVICE); | 2209 | PCI_DMA_TODEVICE); |
2168 | } | 2210 | } |
2169 | 2211 | ||
2170 | if (new_table_pa) { | 2212 | if (!dma_mapping_error(&adapter->pdev->dev, |
2213 | new_table_pa)) { | ||
2171 | new_mode |= VMXNET3_RXM_MCAST; | 2214 | new_mode |= VMXNET3_RXM_MCAST; |
2172 | rxConf->mfTablePA = cpu_to_le64(new_table_pa); | 2215 | rxConf->mfTablePA = cpu_to_le64(new_table_pa); |
2173 | } else { | 2216 | } else { |
@@ -3075,6 +3118,11 @@ vmxnet3_probe_device(struct pci_dev *pdev, | |||
3075 | adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter, | 3118 | adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter, |
3076 | sizeof(struct vmxnet3_adapter), | 3119 | sizeof(struct vmxnet3_adapter), |
3077 | PCI_DMA_TODEVICE); | 3120 | PCI_DMA_TODEVICE); |
3121 | if (dma_mapping_error(&adapter->pdev->dev, adapter->adapter_pa)) { | ||
3122 | dev_err(&pdev->dev, "Failed to map dma\n"); | ||
3123 | err = -EFAULT; | ||
3124 | goto err_dma_map; | ||
3125 | } | ||
3078 | adapter->shared = dma_alloc_coherent( | 3126 | adapter->shared = dma_alloc_coherent( |
3079 | &adapter->pdev->dev, | 3127 | &adapter->pdev->dev, |
3080 | sizeof(struct Vmxnet3_DriverShared), | 3128 | sizeof(struct Vmxnet3_DriverShared), |
@@ -3233,6 +3281,7 @@ err_alloc_queue_desc: | |||
3233 | err_alloc_shared: | 3281 | err_alloc_shared: |
3234 | dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa, | 3282 | dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa, |
3235 | sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE); | 3283 | sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE); |
3284 | err_dma_map: | ||
3236 | free_netdev(netdev); | 3285 | free_netdev(netdev); |
3237 | return err; | 3286 | return err; |
3238 | } | 3287 | } |
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 92fa3e1ea65c..4f9748457f5a 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c | |||
@@ -907,7 +907,6 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev, | |||
907 | struct nlattr *tb[], struct nlattr *data[]) | 907 | struct nlattr *tb[], struct nlattr *data[]) |
908 | { | 908 | { |
909 | struct net_vrf *vrf = netdev_priv(dev); | 909 | struct net_vrf *vrf = netdev_priv(dev); |
910 | int err; | ||
911 | 910 | ||
912 | if (!data || !data[IFLA_VRF_TABLE]) | 911 | if (!data || !data[IFLA_VRF_TABLE]) |
913 | return -EINVAL; | 912 | return -EINVAL; |
@@ -916,15 +915,7 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev, | |||
916 | 915 | ||
917 | dev->priv_flags |= IFF_L3MDEV_MASTER; | 916 | dev->priv_flags |= IFF_L3MDEV_MASTER; |
918 | 917 | ||
919 | err = register_netdevice(dev); | 918 | return register_netdevice(dev); |
920 | if (err < 0) | ||
921 | goto out_fail; | ||
922 | |||
923 | return 0; | ||
924 | |||
925 | out_fail: | ||
926 | free_netdev(dev); | ||
927 | return err; | ||
928 | } | 919 | } |
929 | 920 | ||
930 | static size_t vrf_nl_getsize(const struct net_device *dev) | 921 | static size_t vrf_nl_getsize(const struct net_device *dev) |
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c index e92aaf615901..89541cc90e87 100644 --- a/drivers/net/wan/hdlc_fr.c +++ b/drivers/net/wan/hdlc_fr.c | |||
@@ -1075,11 +1075,10 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type) | |||
1075 | 1075 | ||
1076 | used = pvc_is_used(pvc); | 1076 | used = pvc_is_used(pvc); |
1077 | 1077 | ||
1078 | if (type == ARPHRD_ETHER) { | 1078 | if (type == ARPHRD_ETHER) |
1079 | dev = alloc_netdev(0, "pvceth%d", NET_NAME_UNKNOWN, | 1079 | dev = alloc_netdev(0, "pvceth%d", NET_NAME_UNKNOWN, |
1080 | ether_setup); | 1080 | ether_setup); |
1081 | dev->priv_flags &= ~IFF_TX_SKB_SHARING; | 1081 | else |
1082 | } else | ||
1083 | dev = alloc_netdev(0, "pvc%d", NET_NAME_UNKNOWN, pvc_setup); | 1082 | dev = alloc_netdev(0, "pvc%d", NET_NAME_UNKNOWN, pvc_setup); |
1084 | 1083 | ||
1085 | if (!dev) { | 1084 | if (!dev) { |
@@ -1088,9 +1087,10 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type) | |||
1088 | return -ENOBUFS; | 1087 | return -ENOBUFS; |
1089 | } | 1088 | } |
1090 | 1089 | ||
1091 | if (type == ARPHRD_ETHER) | 1090 | if (type == ARPHRD_ETHER) { |
1091 | dev->priv_flags &= ~IFF_TX_SKB_SHARING; | ||
1092 | eth_hw_addr_random(dev); | 1092 | eth_hw_addr_random(dev); |
1093 | else { | 1093 | } else { |
1094 | *(__be16*)dev->dev_addr = htons(dlci); | 1094 | *(__be16*)dev->dev_addr = htons(dlci); |
1095 | dlci_to_q922(dev->broadcast, dlci); | 1095 | dlci_to_q922(dev->broadcast, dlci); |
1096 | } | 1096 | } |
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c index 5c47b011a9d7..cd39025d2abf 100644 --- a/drivers/net/wan/x25_asy.c +++ b/drivers/net/wan/x25_asy.c | |||
@@ -549,16 +549,12 @@ static void x25_asy_receive_buf(struct tty_struct *tty, | |||
549 | 549 | ||
550 | static int x25_asy_open_tty(struct tty_struct *tty) | 550 | static int x25_asy_open_tty(struct tty_struct *tty) |
551 | { | 551 | { |
552 | struct x25_asy *sl = tty->disc_data; | 552 | struct x25_asy *sl; |
553 | int err; | 553 | int err; |
554 | 554 | ||
555 | if (tty->ops->write == NULL) | 555 | if (tty->ops->write == NULL) |
556 | return -EOPNOTSUPP; | 556 | return -EOPNOTSUPP; |
557 | 557 | ||
558 | /* First make sure we're not already connected. */ | ||
559 | if (sl && sl->magic == X25_ASY_MAGIC) | ||
560 | return -EEXIST; | ||
561 | |||
562 | /* OK. Find a free X.25 channel to use. */ | 558 | /* OK. Find a free X.25 channel to use. */ |
563 | sl = x25_asy_alloc(); | 559 | sl = x25_asy_alloc(); |
564 | if (sl == NULL) | 560 | if (sl == NULL) |
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index aa9bd92ac4ed..0947cc271e69 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c | |||
@@ -51,6 +51,7 @@ MODULE_PARM_DESC(rawmode, "Use raw 802.11 frame datapath"); | |||
51 | static const struct ath10k_hw_params ath10k_hw_params_list[] = { | 51 | static const struct ath10k_hw_params ath10k_hw_params_list[] = { |
52 | { | 52 | { |
53 | .id = QCA988X_HW_2_0_VERSION, | 53 | .id = QCA988X_HW_2_0_VERSION, |
54 | .dev_id = QCA988X_2_0_DEVICE_ID, | ||
54 | .name = "qca988x hw2.0", | 55 | .name = "qca988x hw2.0", |
55 | .patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR, | 56 | .patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR, |
56 | .uart_pin = 7, | 57 | .uart_pin = 7, |
@@ -69,6 +70,25 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { | |||
69 | }, | 70 | }, |
70 | { | 71 | { |
71 | .id = QCA6174_HW_2_1_VERSION, | 72 | .id = QCA6174_HW_2_1_VERSION, |
73 | .dev_id = QCA6164_2_1_DEVICE_ID, | ||
74 | .name = "qca6164 hw2.1", | ||
75 | .patch_load_addr = QCA6174_HW_2_1_PATCH_LOAD_ADDR, | ||
76 | .uart_pin = 6, | ||
77 | .otp_exe_param = 0, | ||
78 | .channel_counters_freq_hz = 88000, | ||
79 | .max_probe_resp_desc_thres = 0, | ||
80 | .fw = { | ||
81 | .dir = QCA6174_HW_2_1_FW_DIR, | ||
82 | .fw = QCA6174_HW_2_1_FW_FILE, | ||
83 | .otp = QCA6174_HW_2_1_OTP_FILE, | ||
84 | .board = QCA6174_HW_2_1_BOARD_DATA_FILE, | ||
85 | .board_size = QCA6174_BOARD_DATA_SZ, | ||
86 | .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ, | ||
87 | }, | ||
88 | }, | ||
89 | { | ||
90 | .id = QCA6174_HW_2_1_VERSION, | ||
91 | .dev_id = QCA6174_2_1_DEVICE_ID, | ||
72 | .name = "qca6174 hw2.1", | 92 | .name = "qca6174 hw2.1", |
73 | .patch_load_addr = QCA6174_HW_2_1_PATCH_LOAD_ADDR, | 93 | .patch_load_addr = QCA6174_HW_2_1_PATCH_LOAD_ADDR, |
74 | .uart_pin = 6, | 94 | .uart_pin = 6, |
@@ -86,6 +106,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { | |||
86 | }, | 106 | }, |
87 | { | 107 | { |
88 | .id = QCA6174_HW_3_0_VERSION, | 108 | .id = QCA6174_HW_3_0_VERSION, |
109 | .dev_id = QCA6174_2_1_DEVICE_ID, | ||
89 | .name = "qca6174 hw3.0", | 110 | .name = "qca6174 hw3.0", |
90 | .patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR, | 111 | .patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR, |
91 | .uart_pin = 6, | 112 | .uart_pin = 6, |
@@ -103,6 +124,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { | |||
103 | }, | 124 | }, |
104 | { | 125 | { |
105 | .id = QCA6174_HW_3_2_VERSION, | 126 | .id = QCA6174_HW_3_2_VERSION, |
127 | .dev_id = QCA6174_2_1_DEVICE_ID, | ||
106 | .name = "qca6174 hw3.2", | 128 | .name = "qca6174 hw3.2", |
107 | .patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR, | 129 | .patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR, |
108 | .uart_pin = 6, | 130 | .uart_pin = 6, |
@@ -121,6 +143,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { | |||
121 | }, | 143 | }, |
122 | { | 144 | { |
123 | .id = QCA99X0_HW_2_0_DEV_VERSION, | 145 | .id = QCA99X0_HW_2_0_DEV_VERSION, |
146 | .dev_id = QCA99X0_2_0_DEVICE_ID, | ||
124 | .name = "qca99x0 hw2.0", | 147 | .name = "qca99x0 hw2.0", |
125 | .patch_load_addr = QCA99X0_HW_2_0_PATCH_LOAD_ADDR, | 148 | .patch_load_addr = QCA99X0_HW_2_0_PATCH_LOAD_ADDR, |
126 | .uart_pin = 7, | 149 | .uart_pin = 7, |
@@ -139,10 +162,31 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { | |||
139 | }, | 162 | }, |
140 | { | 163 | { |
141 | .id = QCA9377_HW_1_0_DEV_VERSION, | 164 | .id = QCA9377_HW_1_0_DEV_VERSION, |
165 | .dev_id = QCA9377_1_0_DEVICE_ID, | ||
142 | .name = "qca9377 hw1.0", | 166 | .name = "qca9377 hw1.0", |
143 | .patch_load_addr = QCA9377_HW_1_0_PATCH_LOAD_ADDR, | 167 | .patch_load_addr = QCA9377_HW_1_0_PATCH_LOAD_ADDR, |
144 | .uart_pin = 7, | 168 | .uart_pin = 6, |
145 | .otp_exe_param = 0, | 169 | .otp_exe_param = 0, |
170 | .channel_counters_freq_hz = 88000, | ||
171 | .max_probe_resp_desc_thres = 0, | ||
172 | .fw = { | ||
173 | .dir = QCA9377_HW_1_0_FW_DIR, | ||
174 | .fw = QCA9377_HW_1_0_FW_FILE, | ||
175 | .otp = QCA9377_HW_1_0_OTP_FILE, | ||
176 | .board = QCA9377_HW_1_0_BOARD_DATA_FILE, | ||
177 | .board_size = QCA9377_BOARD_DATA_SZ, | ||
178 | .board_ext_size = QCA9377_BOARD_EXT_DATA_SZ, | ||
179 | }, | ||
180 | }, | ||
181 | { | ||
182 | .id = QCA9377_HW_1_1_DEV_VERSION, | ||
183 | .dev_id = QCA9377_1_0_DEVICE_ID, | ||
184 | .name = "qca9377 hw1.1", | ||
185 | .patch_load_addr = QCA9377_HW_1_0_PATCH_LOAD_ADDR, | ||
186 | .uart_pin = 6, | ||
187 | .otp_exe_param = 0, | ||
188 | .channel_counters_freq_hz = 88000, | ||
189 | .max_probe_resp_desc_thres = 0, | ||
146 | .fw = { | 190 | .fw = { |
147 | .dir = QCA9377_HW_1_0_FW_DIR, | 191 | .dir = QCA9377_HW_1_0_FW_DIR, |
148 | .fw = QCA9377_HW_1_0_FW_FILE, | 192 | .fw = QCA9377_HW_1_0_FW_FILE, |
@@ -1263,7 +1307,8 @@ static int ath10k_init_hw_params(struct ath10k *ar) | |||
1263 | for (i = 0; i < ARRAY_SIZE(ath10k_hw_params_list); i++) { | 1307 | for (i = 0; i < ARRAY_SIZE(ath10k_hw_params_list); i++) { |
1264 | hw_params = &ath10k_hw_params_list[i]; | 1308 | hw_params = &ath10k_hw_params_list[i]; |
1265 | 1309 | ||
1266 | if (hw_params->id == ar->target_version) | 1310 | if (hw_params->id == ar->target_version && |
1311 | hw_params->dev_id == ar->dev_id) | ||
1267 | break; | 1312 | break; |
1268 | } | 1313 | } |
1269 | 1314 | ||
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index 018c64f4fd25..858d75f49a9f 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h | |||
@@ -636,6 +636,7 @@ struct ath10k { | |||
636 | 636 | ||
637 | struct ath10k_hw_params { | 637 | struct ath10k_hw_params { |
638 | u32 id; | 638 | u32 id; |
639 | u16 dev_id; | ||
639 | const char *name; | 640 | const char *name; |
640 | u32 patch_load_addr; | 641 | u32 patch_load_addr; |
641 | int uart_pin; | 642 | int uart_pin; |
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index 39966a05c1cc..713c2bcea178 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h | |||
@@ -22,6 +22,12 @@ | |||
22 | 22 | ||
23 | #define ATH10K_FW_DIR "ath10k" | 23 | #define ATH10K_FW_DIR "ath10k" |
24 | 24 | ||
25 | #define QCA988X_2_0_DEVICE_ID (0x003c) | ||
26 | #define QCA6164_2_1_DEVICE_ID (0x0041) | ||
27 | #define QCA6174_2_1_DEVICE_ID (0x003e) | ||
28 | #define QCA99X0_2_0_DEVICE_ID (0x0040) | ||
29 | #define QCA9377_1_0_DEVICE_ID (0x0042) | ||
30 | |||
25 | /* QCA988X 1.0 definitions (unsupported) */ | 31 | /* QCA988X 1.0 definitions (unsupported) */ |
26 | #define QCA988X_HW_1_0_CHIP_ID_REV 0x0 | 32 | #define QCA988X_HW_1_0_CHIP_ID_REV 0x0 |
27 | 33 | ||
@@ -42,6 +48,10 @@ | |||
42 | #define QCA6174_HW_3_0_VERSION 0x05020000 | 48 | #define QCA6174_HW_3_0_VERSION 0x05020000 |
43 | #define QCA6174_HW_3_2_VERSION 0x05030000 | 49 | #define QCA6174_HW_3_2_VERSION 0x05030000 |
44 | 50 | ||
51 | /* QCA9377 target BMI version signatures */ | ||
52 | #define QCA9377_HW_1_0_DEV_VERSION 0x05020000 | ||
53 | #define QCA9377_HW_1_1_DEV_VERSION 0x05020001 | ||
54 | |||
45 | enum qca6174_pci_rev { | 55 | enum qca6174_pci_rev { |
46 | QCA6174_PCI_REV_1_1 = 0x11, | 56 | QCA6174_PCI_REV_1_1 = 0x11, |
47 | QCA6174_PCI_REV_1_3 = 0x13, | 57 | QCA6174_PCI_REV_1_3 = 0x13, |
@@ -60,6 +70,11 @@ enum qca6174_chip_id_rev { | |||
60 | QCA6174_HW_3_2_CHIP_ID_REV = 10, | 70 | QCA6174_HW_3_2_CHIP_ID_REV = 10, |
61 | }; | 71 | }; |
62 | 72 | ||
73 | enum qca9377_chip_id_rev { | ||
74 | QCA9377_HW_1_0_CHIP_ID_REV = 0x0, | ||
75 | QCA9377_HW_1_1_CHIP_ID_REV = 0x1, | ||
76 | }; | ||
77 | |||
63 | #define QCA6174_HW_2_1_FW_DIR "ath10k/QCA6174/hw2.1" | 78 | #define QCA6174_HW_2_1_FW_DIR "ath10k/QCA6174/hw2.1" |
64 | #define QCA6174_HW_2_1_FW_FILE "firmware.bin" | 79 | #define QCA6174_HW_2_1_FW_FILE "firmware.bin" |
65 | #define QCA6174_HW_2_1_OTP_FILE "otp.bin" | 80 | #define QCA6174_HW_2_1_OTP_FILE "otp.bin" |
@@ -85,8 +100,6 @@ enum qca6174_chip_id_rev { | |||
85 | #define QCA99X0_HW_2_0_PATCH_LOAD_ADDR 0x1234 | 100 | #define QCA99X0_HW_2_0_PATCH_LOAD_ADDR 0x1234 |
86 | 101 | ||
87 | /* QCA9377 1.0 definitions */ | 102 | /* QCA9377 1.0 definitions */ |
88 | #define QCA9377_HW_1_0_DEV_VERSION 0x05020001 | ||
89 | #define QCA9377_HW_1_0_CHIP_ID_REV 0x1 | ||
90 | #define QCA9377_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA9377/hw1.0" | 103 | #define QCA9377_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA9377/hw1.0" |
91 | #define QCA9377_HW_1_0_FW_FILE "firmware.bin" | 104 | #define QCA9377_HW_1_0_FW_FILE "firmware.bin" |
92 | #define QCA9377_HW_1_0_OTP_FILE "otp.bin" | 105 | #define QCA9377_HW_1_0_OTP_FILE "otp.bin" |
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index a7411fe90cc4..95a55405ebf0 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c | |||
@@ -4225,7 +4225,7 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed) | |||
4225 | 4225 | ||
4226 | static u32 get_nss_from_chainmask(u16 chain_mask) | 4226 | static u32 get_nss_from_chainmask(u16 chain_mask) |
4227 | { | 4227 | { |
4228 | if ((chain_mask & 0x15) == 0x15) | 4228 | if ((chain_mask & 0xf) == 0xf) |
4229 | return 4; | 4229 | return 4; |
4230 | else if ((chain_mask & 0x7) == 0x7) | 4230 | else if ((chain_mask & 0x7) == 0x7) |
4231 | return 3; | 4231 | return 3; |
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 3fca200b986c..930785a724e1 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c | |||
@@ -57,12 +57,6 @@ MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)"); | |||
57 | #define ATH10K_PCI_TARGET_WAIT 3000 | 57 | #define ATH10K_PCI_TARGET_WAIT 3000 |
58 | #define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3 | 58 | #define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3 |
59 | 59 | ||
60 | #define QCA988X_2_0_DEVICE_ID (0x003c) | ||
61 | #define QCA6164_2_1_DEVICE_ID (0x0041) | ||
62 | #define QCA6174_2_1_DEVICE_ID (0x003e) | ||
63 | #define QCA99X0_2_0_DEVICE_ID (0x0040) | ||
64 | #define QCA9377_1_0_DEVICE_ID (0x0042) | ||
65 | |||
66 | static const struct pci_device_id ath10k_pci_id_table[] = { | 60 | static const struct pci_device_id ath10k_pci_id_table[] = { |
67 | { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */ | 61 | { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */ |
68 | { PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */ | 62 | { PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */ |
@@ -92,7 +86,9 @@ static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = { | |||
92 | { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV }, | 86 | { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV }, |
93 | 87 | ||
94 | { QCA99X0_2_0_DEVICE_ID, QCA99X0_HW_2_0_CHIP_ID_REV }, | 88 | { QCA99X0_2_0_DEVICE_ID, QCA99X0_HW_2_0_CHIP_ID_REV }, |
89 | |||
95 | { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_0_CHIP_ID_REV }, | 90 | { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_0_CHIP_ID_REV }, |
91 | { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_1_CHIP_ID_REV }, | ||
96 | }; | 92 | }; |
97 | 93 | ||
98 | static void ath10k_pci_buffer_cleanup(struct ath10k *ar); | 94 | static void ath10k_pci_buffer_cleanup(struct ath10k *ar); |
@@ -111,8 +107,9 @@ static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state); | |||
111 | static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state); | 107 | static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state); |
112 | static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state); | 108 | static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state); |
113 | static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state); | 109 | static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state); |
110 | static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state); | ||
114 | 111 | ||
115 | static const struct ce_attr host_ce_config_wlan[] = { | 112 | static struct ce_attr host_ce_config_wlan[] = { |
116 | /* CE0: host->target HTC control and raw streams */ | 113 | /* CE0: host->target HTC control and raw streams */ |
117 | { | 114 | { |
118 | .flags = CE_ATTR_FLAGS, | 115 | .flags = CE_ATTR_FLAGS, |
@@ -128,7 +125,7 @@ static const struct ce_attr host_ce_config_wlan[] = { | |||
128 | .src_nentries = 0, | 125 | .src_nentries = 0, |
129 | .src_sz_max = 2048, | 126 | .src_sz_max = 2048, |
130 | .dest_nentries = 512, | 127 | .dest_nentries = 512, |
131 | .recv_cb = ath10k_pci_htc_rx_cb, | 128 | .recv_cb = ath10k_pci_htt_htc_rx_cb, |
132 | }, | 129 | }, |
133 | 130 | ||
134 | /* CE2: target->host WMI */ | 131 | /* CE2: target->host WMI */ |
@@ -217,7 +214,7 @@ static const struct ce_attr host_ce_config_wlan[] = { | |||
217 | }; | 214 | }; |
218 | 215 | ||
219 | /* Target firmware's Copy Engine configuration. */ | 216 | /* Target firmware's Copy Engine configuration. */ |
220 | static const struct ce_pipe_config target_ce_config_wlan[] = { | 217 | static struct ce_pipe_config target_ce_config_wlan[] = { |
221 | /* CE0: host->target HTC control and raw streams */ | 218 | /* CE0: host->target HTC control and raw streams */ |
222 | { | 219 | { |
223 | .pipenum = __cpu_to_le32(0), | 220 | .pipenum = __cpu_to_le32(0), |
@@ -330,7 +327,7 @@ static const struct ce_pipe_config target_ce_config_wlan[] = { | |||
330 | * This table is derived from the CE_PCI TABLE, above. | 327 | * This table is derived from the CE_PCI TABLE, above. |
331 | * It is passed to the Target at startup for use by firmware. | 328 | * It is passed to the Target at startup for use by firmware. |
332 | */ | 329 | */ |
333 | static const struct service_to_pipe target_service_to_ce_map_wlan[] = { | 330 | static struct service_to_pipe target_service_to_ce_map_wlan[] = { |
334 | { | 331 | { |
335 | __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO), | 332 | __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO), |
336 | __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ | 333 | __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */ |
@@ -1208,6 +1205,16 @@ static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state) | |||
1208 | ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler); | 1205 | ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler); |
1209 | } | 1206 | } |
1210 | 1207 | ||
1208 | static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state) | ||
1209 | { | ||
1210 | /* CE4 polling needs to be done whenever CE pipe which transports | ||
1211 | * HTT Rx (target->host) is processed. | ||
1212 | */ | ||
1213 | ath10k_ce_per_engine_service(ce_state->ar, 4); | ||
1214 | |||
1215 | ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler); | ||
1216 | } | ||
1217 | |||
1211 | /* Called by lower (CE) layer when a send to HTT Target completes. */ | 1218 | /* Called by lower (CE) layer when a send to HTT Target completes. */ |
1212 | static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state) | 1219 | static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state) |
1213 | { | 1220 | { |
@@ -2027,6 +2034,29 @@ static int ath10k_pci_init_config(struct ath10k *ar) | |||
2027 | return 0; | 2034 | return 0; |
2028 | } | 2035 | } |
2029 | 2036 | ||
2037 | static void ath10k_pci_override_ce_config(struct ath10k *ar) | ||
2038 | { | ||
2039 | struct ce_attr *attr; | ||
2040 | struct ce_pipe_config *config; | ||
2041 | |||
2042 | /* For QCA6174 we're overriding the Copy Engine 5 configuration, | ||
2043 | * since it is currently used for other feature. | ||
2044 | */ | ||
2045 | |||
2046 | /* Override Host's Copy Engine 5 configuration */ | ||
2047 | attr = &host_ce_config_wlan[5]; | ||
2048 | attr->src_sz_max = 0; | ||
2049 | attr->dest_nentries = 0; | ||
2050 | |||
2051 | /* Override Target firmware's Copy Engine configuration */ | ||
2052 | config = &target_ce_config_wlan[5]; | ||
2053 | config->pipedir = __cpu_to_le32(PIPEDIR_OUT); | ||
2054 | config->nbytes_max = __cpu_to_le32(2048); | ||
2055 | |||
2056 | /* Map from service/endpoint to Copy Engine */ | ||
2057 | target_service_to_ce_map_wlan[15].pipenum = __cpu_to_le32(1); | ||
2058 | } | ||
2059 | |||
2030 | static int ath10k_pci_alloc_pipes(struct ath10k *ar) | 2060 | static int ath10k_pci_alloc_pipes(struct ath10k *ar) |
2031 | { | 2061 | { |
2032 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); | 2062 | struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); |
@@ -3020,6 +3050,9 @@ static int ath10k_pci_probe(struct pci_dev *pdev, | |||
3020 | goto err_core_destroy; | 3050 | goto err_core_destroy; |
3021 | } | 3051 | } |
3022 | 3052 | ||
3053 | if (QCA_REV_6174(ar)) | ||
3054 | ath10k_pci_override_ce_config(ar); | ||
3055 | |||
3023 | ret = ath10k_pci_alloc_pipes(ar); | 3056 | ret = ath10k_pci_alloc_pipes(ar); |
3024 | if (ret) { | 3057 | if (ret) { |
3025 | ath10k_err(ar, "failed to allocate copy engine pipes: %d\n", | 3058 | ath10k_err(ar, "failed to allocate copy engine pipes: %d\n", |
diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c index 1a73c7a1da77..bf88ec3a65fa 100644 --- a/drivers/net/wireless/iwlwifi/iwl-7000.c +++ b/drivers/net/wireless/iwlwifi/iwl-7000.c | |||
@@ -69,7 +69,7 @@ | |||
69 | #include "iwl-agn-hw.h" | 69 | #include "iwl-agn-hw.h" |
70 | 70 | ||
71 | /* Highest firmware API version supported */ | 71 | /* Highest firmware API version supported */ |
72 | #define IWL7260_UCODE_API_MAX 17 | 72 | #define IWL7260_UCODE_API_MAX 19 |
73 | 73 | ||
74 | /* Oldest version we won't warn about */ | 74 | /* Oldest version we won't warn about */ |
75 | #define IWL7260_UCODE_API_OK 13 | 75 | #define IWL7260_UCODE_API_OK 13 |
diff --git a/drivers/net/wireless/iwlwifi/iwl-8000.c b/drivers/net/wireless/iwlwifi/iwl-8000.c index 0116e5a4c393..9bcc0bf937d8 100644 --- a/drivers/net/wireless/iwlwifi/iwl-8000.c +++ b/drivers/net/wireless/iwlwifi/iwl-8000.c | |||
@@ -69,7 +69,7 @@ | |||
69 | #include "iwl-agn-hw.h" | 69 | #include "iwl-agn-hw.h" |
70 | 70 | ||
71 | /* Highest firmware API version supported */ | 71 | /* Highest firmware API version supported */ |
72 | #define IWL8000_UCODE_API_MAX 17 | 72 | #define IWL8000_UCODE_API_MAX 19 |
73 | 73 | ||
74 | /* Oldest version we won't warn about */ | 74 | /* Oldest version we won't warn about */ |
75 | #define IWL8000_UCODE_API_OK 13 | 75 | #define IWL8000_UCODE_API_OK 13 |
diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c index 85ae902df7c0..29ae58ebf223 100644 --- a/drivers/net/wireless/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/iwlwifi/mvm/d3.c | |||
@@ -309,9 +309,9 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw, | |||
309 | * to transmit packets to the AP, i.e. the PTK. | 309 | * to transmit packets to the AP, i.e. the PTK. |
310 | */ | 310 | */ |
311 | if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) { | 311 | if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) { |
312 | key->hw_key_idx = 0; | ||
313 | mvm->ptk_ivlen = key->iv_len; | 312 | mvm->ptk_ivlen = key->iv_len; |
314 | mvm->ptk_icvlen = key->icv_len; | 313 | mvm->ptk_icvlen = key->icv_len; |
314 | ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 0); | ||
315 | } else { | 315 | } else { |
316 | /* | 316 | /* |
317 | * firmware only supports TSC/RSC for a single key, | 317 | * firmware only supports TSC/RSC for a single key, |
@@ -319,12 +319,11 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw, | |||
319 | * with new ones -- this relies on mac80211 doing | 319 | * with new ones -- this relies on mac80211 doing |
320 | * list_add_tail(). | 320 | * list_add_tail(). |
321 | */ | 321 | */ |
322 | key->hw_key_idx = 1; | ||
323 | mvm->gtk_ivlen = key->iv_len; | 322 | mvm->gtk_ivlen = key->iv_len; |
324 | mvm->gtk_icvlen = key->icv_len; | 323 | mvm->gtk_icvlen = key->icv_len; |
324 | ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 1); | ||
325 | } | 325 | } |
326 | 326 | ||
327 | ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, true); | ||
328 | data->error = ret != 0; | 327 | data->error = ret != 0; |
329 | out_unlock: | 328 | out_unlock: |
330 | mutex_unlock(&mvm->mutex); | 329 | mutex_unlock(&mvm->mutex); |
@@ -772,9 +771,6 @@ static int iwl_mvm_switch_to_d3(struct iwl_mvm *mvm) | |||
772 | */ | 771 | */ |
773 | set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); | 772 | set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); |
774 | 773 | ||
775 | /* We reprogram keys and shouldn't allocate new key indices */ | ||
776 | memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table)); | ||
777 | |||
778 | mvm->ptk_ivlen = 0; | 774 | mvm->ptk_ivlen = 0; |
779 | mvm->ptk_icvlen = 0; | 775 | mvm->ptk_icvlen = 0; |
780 | mvm->ptk_ivlen = 0; | 776 | mvm->ptk_ivlen = 0; |
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index 1fb684693040..e88afac51c5d 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c | |||
@@ -2941,6 +2941,7 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, | |||
2941 | { | 2941 | { |
2942 | struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); | 2942 | struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); |
2943 | int ret; | 2943 | int ret; |
2944 | u8 key_offset; | ||
2944 | 2945 | ||
2945 | if (iwlwifi_mod_params.sw_crypto) { | 2946 | if (iwlwifi_mod_params.sw_crypto) { |
2946 | IWL_DEBUG_MAC80211(mvm, "leave - hwcrypto disabled\n"); | 2947 | IWL_DEBUG_MAC80211(mvm, "leave - hwcrypto disabled\n"); |
@@ -3006,10 +3007,14 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, | |||
3006 | break; | 3007 | break; |
3007 | } | 3008 | } |
3008 | 3009 | ||
3010 | /* in HW restart reuse the index, otherwise request a new one */ | ||
3011 | if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) | ||
3012 | key_offset = key->hw_key_idx; | ||
3013 | else | ||
3014 | key_offset = STA_KEY_IDX_INVALID; | ||
3015 | |||
3009 | IWL_DEBUG_MAC80211(mvm, "set hwcrypto key\n"); | 3016 | IWL_DEBUG_MAC80211(mvm, "set hwcrypto key\n"); |
3010 | ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, | 3017 | ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, key_offset); |
3011 | test_bit(IWL_MVM_STATUS_IN_HW_RESTART, | ||
3012 | &mvm->status)); | ||
3013 | if (ret) { | 3018 | if (ret) { |
3014 | IWL_WARN(mvm, "set key failed\n"); | 3019 | IWL_WARN(mvm, "set key failed\n"); |
3015 | /* | 3020 | /* |
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c index 300a249486e4..354acbde088e 100644 --- a/drivers/net/wireless/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/iwlwifi/mvm/sta.c | |||
@@ -1201,7 +1201,8 @@ static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm) | |||
1201 | return max_offs; | 1201 | return max_offs; |
1202 | } | 1202 | } |
1203 | 1203 | ||
1204 | static u8 iwl_mvm_get_key_sta_id(struct ieee80211_vif *vif, | 1204 | static u8 iwl_mvm_get_key_sta_id(struct iwl_mvm *mvm, |
1205 | struct ieee80211_vif *vif, | ||
1205 | struct ieee80211_sta *sta) | 1206 | struct ieee80211_sta *sta) |
1206 | { | 1207 | { |
1207 | struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); | 1208 | struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); |
@@ -1218,8 +1219,21 @@ static u8 iwl_mvm_get_key_sta_id(struct ieee80211_vif *vif, | |||
1218 | * station ID, then use AP's station ID. | 1219 | * station ID, then use AP's station ID. |
1219 | */ | 1220 | */ |
1220 | if (vif->type == NL80211_IFTYPE_STATION && | 1221 | if (vif->type == NL80211_IFTYPE_STATION && |
1221 | mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) | 1222 | mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) { |
1222 | return mvmvif->ap_sta_id; | 1223 | u8 sta_id = mvmvif->ap_sta_id; |
1224 | |||
1225 | sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], | ||
1226 | lockdep_is_held(&mvm->mutex)); | ||
1227 | /* | ||
1228 | * It is possible that the 'sta' parameter is NULL, | ||
1229 | * for example when a GTK is removed - the sta_id will then | ||
1230 | * be the AP ID, and no station was passed by mac80211. | ||
1231 | */ | ||
1232 | if (IS_ERR_OR_NULL(sta)) | ||
1233 | return IWL_MVM_STATION_COUNT; | ||
1234 | |||
1235 | return sta_id; | ||
1236 | } | ||
1223 | 1237 | ||
1224 | return IWL_MVM_STATION_COUNT; | 1238 | return IWL_MVM_STATION_COUNT; |
1225 | } | 1239 | } |
@@ -1227,7 +1241,8 @@ static u8 iwl_mvm_get_key_sta_id(struct ieee80211_vif *vif, | |||
1227 | static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm, | 1241 | static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm, |
1228 | struct iwl_mvm_sta *mvm_sta, | 1242 | struct iwl_mvm_sta *mvm_sta, |
1229 | struct ieee80211_key_conf *keyconf, bool mcast, | 1243 | struct ieee80211_key_conf *keyconf, bool mcast, |
1230 | u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags) | 1244 | u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags, |
1245 | u8 key_offset) | ||
1231 | { | 1246 | { |
1232 | struct iwl_mvm_add_sta_key_cmd cmd = {}; | 1247 | struct iwl_mvm_add_sta_key_cmd cmd = {}; |
1233 | __le16 key_flags; | 1248 | __le16 key_flags; |
@@ -1269,7 +1284,7 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm, | |||
1269 | if (mcast) | 1284 | if (mcast) |
1270 | key_flags |= cpu_to_le16(STA_KEY_MULTICAST); | 1285 | key_flags |= cpu_to_le16(STA_KEY_MULTICAST); |
1271 | 1286 | ||
1272 | cmd.key_offset = keyconf->hw_key_idx; | 1287 | cmd.key_offset = key_offset; |
1273 | cmd.key_flags = key_flags; | 1288 | cmd.key_flags = key_flags; |
1274 | cmd.sta_id = sta_id; | 1289 | cmd.sta_id = sta_id; |
1275 | 1290 | ||
@@ -1360,6 +1375,7 @@ static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm, | |||
1360 | struct ieee80211_vif *vif, | 1375 | struct ieee80211_vif *vif, |
1361 | struct ieee80211_sta *sta, | 1376 | struct ieee80211_sta *sta, |
1362 | struct ieee80211_key_conf *keyconf, | 1377 | struct ieee80211_key_conf *keyconf, |
1378 | u8 key_offset, | ||
1363 | bool mcast) | 1379 | bool mcast) |
1364 | { | 1380 | { |
1365 | struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); | 1381 | struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); |
@@ -1375,17 +1391,17 @@ static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm, | |||
1375 | ieee80211_get_key_rx_seq(keyconf, 0, &seq); | 1391 | ieee80211_get_key_rx_seq(keyconf, 0, &seq); |
1376 | ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k); | 1392 | ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k); |
1377 | ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast, | 1393 | ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast, |
1378 | seq.tkip.iv32, p1k, 0); | 1394 | seq.tkip.iv32, p1k, 0, key_offset); |
1379 | break; | 1395 | break; |
1380 | case WLAN_CIPHER_SUITE_CCMP: | 1396 | case WLAN_CIPHER_SUITE_CCMP: |
1381 | case WLAN_CIPHER_SUITE_WEP40: | 1397 | case WLAN_CIPHER_SUITE_WEP40: |
1382 | case WLAN_CIPHER_SUITE_WEP104: | 1398 | case WLAN_CIPHER_SUITE_WEP104: |
1383 | ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast, | 1399 | ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast, |
1384 | 0, NULL, 0); | 1400 | 0, NULL, 0, key_offset); |
1385 | break; | 1401 | break; |
1386 | default: | 1402 | default: |
1387 | ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast, | 1403 | ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast, |
1388 | 0, NULL, 0); | 1404 | 0, NULL, 0, key_offset); |
1389 | } | 1405 | } |
1390 | 1406 | ||
1391 | return ret; | 1407 | return ret; |
@@ -1433,7 +1449,7 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, | |||
1433 | struct ieee80211_vif *vif, | 1449 | struct ieee80211_vif *vif, |
1434 | struct ieee80211_sta *sta, | 1450 | struct ieee80211_sta *sta, |
1435 | struct ieee80211_key_conf *keyconf, | 1451 | struct ieee80211_key_conf *keyconf, |
1436 | bool have_key_offset) | 1452 | u8 key_offset) |
1437 | { | 1453 | { |
1438 | bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); | 1454 | bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); |
1439 | u8 sta_id; | 1455 | u8 sta_id; |
@@ -1443,7 +1459,7 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, | |||
1443 | lockdep_assert_held(&mvm->mutex); | 1459 | lockdep_assert_held(&mvm->mutex); |
1444 | 1460 | ||
1445 | /* Get the station id from the mvm local station table */ | 1461 | /* Get the station id from the mvm local station table */ |
1446 | sta_id = iwl_mvm_get_key_sta_id(vif, sta); | 1462 | sta_id = iwl_mvm_get_key_sta_id(mvm, vif, sta); |
1447 | if (sta_id == IWL_MVM_STATION_COUNT) { | 1463 | if (sta_id == IWL_MVM_STATION_COUNT) { |
1448 | IWL_ERR(mvm, "Failed to find station id\n"); | 1464 | IWL_ERR(mvm, "Failed to find station id\n"); |
1449 | return -EINVAL; | 1465 | return -EINVAL; |
@@ -1470,18 +1486,25 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, | |||
1470 | if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif)) | 1486 | if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif)) |
1471 | return -EINVAL; | 1487 | return -EINVAL; |
1472 | 1488 | ||
1473 | if (!have_key_offset) { | 1489 | /* If the key_offset is not pre-assigned, we need to find a |
1474 | /* | 1490 | * new offset to use. In normal cases, the offset is not |
1475 | * The D3 firmware hardcodes the PTK offset to 0, so we have to | 1491 | * pre-assigned, but during HW_RESTART we want to reuse the |
1476 | * configure it there. As a result, this workaround exists to | 1492 | * same indices, so we pass them when this function is called. |
1477 | * let the caller set the key offset (hw_key_idx), see d3.c. | 1493 | * |
1478 | */ | 1494 | * In D3 entry, we need to hardcoded the indices (because the |
1479 | keyconf->hw_key_idx = iwl_mvm_set_fw_key_idx(mvm); | 1495 | * firmware hardcodes the PTK offset to 0). In this case, we |
1480 | if (keyconf->hw_key_idx == STA_KEY_IDX_INVALID) | 1496 | * need to make sure we don't overwrite the hw_key_idx in the |
1497 | * keyconf structure, because otherwise we cannot configure | ||
1498 | * the original ones back when resuming. | ||
1499 | */ | ||
1500 | if (key_offset == STA_KEY_IDX_INVALID) { | ||
1501 | key_offset = iwl_mvm_set_fw_key_idx(mvm); | ||
1502 | if (key_offset == STA_KEY_IDX_INVALID) | ||
1481 | return -ENOSPC; | 1503 | return -ENOSPC; |
1504 | keyconf->hw_key_idx = key_offset; | ||
1482 | } | 1505 | } |
1483 | 1506 | ||
1484 | ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, mcast); | 1507 | ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast); |
1485 | if (ret) { | 1508 | if (ret) { |
1486 | __clear_bit(keyconf->hw_key_idx, mvm->fw_key_table); | 1509 | __clear_bit(keyconf->hw_key_idx, mvm->fw_key_table); |
1487 | goto end; | 1510 | goto end; |
@@ -1495,7 +1518,8 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, | |||
1495 | */ | 1518 | */ |
1496 | if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 || | 1519 | if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 || |
1497 | keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) { | 1520 | keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) { |
1498 | ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, !mcast); | 1521 | ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, |
1522 | key_offset, !mcast); | ||
1499 | if (ret) { | 1523 | if (ret) { |
1500 | __clear_bit(keyconf->hw_key_idx, mvm->fw_key_table); | 1524 | __clear_bit(keyconf->hw_key_idx, mvm->fw_key_table); |
1501 | __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast); | 1525 | __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast); |
@@ -1521,7 +1545,7 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, | |||
1521 | lockdep_assert_held(&mvm->mutex); | 1545 | lockdep_assert_held(&mvm->mutex); |
1522 | 1546 | ||
1523 | /* Get the station id from the mvm local station table */ | 1547 | /* Get the station id from the mvm local station table */ |
1524 | sta_id = iwl_mvm_get_key_sta_id(vif, sta); | 1548 | sta_id = iwl_mvm_get_key_sta_id(mvm, vif, sta); |
1525 | 1549 | ||
1526 | IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n", | 1550 | IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n", |
1527 | keyconf->keyidx, sta_id); | 1551 | keyconf->keyidx, sta_id); |
@@ -1547,24 +1571,6 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, | |||
1547 | return 0; | 1571 | return 0; |
1548 | } | 1572 | } |
1549 | 1573 | ||
1550 | /* | ||
1551 | * It is possible that the 'sta' parameter is NULL, and thus | ||
1552 | * there is a need to retrieve the sta from the local station table, | ||
1553 | * for example when a GTK is removed (where the sta_id will then be | ||
1554 | * the AP ID, and no station was passed by mac80211.) | ||
1555 | */ | ||
1556 | if (!sta) { | ||
1557 | sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], | ||
1558 | lockdep_is_held(&mvm->mutex)); | ||
1559 | if (!sta) { | ||
1560 | IWL_ERR(mvm, "Invalid station id\n"); | ||
1561 | return -EINVAL; | ||
1562 | } | ||
1563 | } | ||
1564 | |||
1565 | if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif)) | ||
1566 | return -EINVAL; | ||
1567 | |||
1568 | ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast); | 1574 | ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast); |
1569 | if (ret) | 1575 | if (ret) |
1570 | return ret; | 1576 | return ret; |
@@ -1584,7 +1590,7 @@ void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm, | |||
1584 | u16 *phase1key) | 1590 | u16 *phase1key) |
1585 | { | 1591 | { |
1586 | struct iwl_mvm_sta *mvm_sta; | 1592 | struct iwl_mvm_sta *mvm_sta; |
1587 | u8 sta_id = iwl_mvm_get_key_sta_id(vif, sta); | 1593 | u8 sta_id = iwl_mvm_get_key_sta_id(mvm, vif, sta); |
1588 | bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); | 1594 | bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); |
1589 | 1595 | ||
1590 | if (WARN_ON_ONCE(sta_id == IWL_MVM_STATION_COUNT)) | 1596 | if (WARN_ON_ONCE(sta_id == IWL_MVM_STATION_COUNT)) |
@@ -1602,7 +1608,7 @@ void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm, | |||
1602 | 1608 | ||
1603 | mvm_sta = iwl_mvm_sta_from_mac80211(sta); | 1609 | mvm_sta = iwl_mvm_sta_from_mac80211(sta); |
1604 | iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast, | 1610 | iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast, |
1605 | iv32, phase1key, CMD_ASYNC); | 1611 | iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx); |
1606 | rcu_read_unlock(); | 1612 | rcu_read_unlock(); |
1607 | } | 1613 | } |
1608 | 1614 | ||
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.h b/drivers/net/wireless/iwlwifi/mvm/sta.h index eedb215eba3f..0631cc0a6d3c 100644 --- a/drivers/net/wireless/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/iwlwifi/mvm/sta.h | |||
@@ -365,8 +365,8 @@ int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm, | |||
365 | int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, | 365 | int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, |
366 | struct ieee80211_vif *vif, | 366 | struct ieee80211_vif *vif, |
367 | struct ieee80211_sta *sta, | 367 | struct ieee80211_sta *sta, |
368 | struct ieee80211_key_conf *key, | 368 | struct ieee80211_key_conf *keyconf, |
369 | bool have_key_offset); | 369 | u8 key_offset); |
370 | int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, | 370 | int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, |
371 | struct ieee80211_vif *vif, | 371 | struct ieee80211_vif *vif, |
372 | struct ieee80211_sta *sta, | 372 | struct ieee80211_sta *sta, |
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c index 644b58bc5226..639761fb2bfb 100644 --- a/drivers/net/wireless/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/iwlwifi/pcie/drv.c | |||
@@ -423,14 +423,21 @@ static const struct pci_device_id iwl_hw_card_ids[] = { | |||
423 | /* 8000 Series */ | 423 | /* 8000 Series */ |
424 | {IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)}, | 424 | {IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)}, |
425 | {IWL_PCI_DEVICE(0x24F3, 0x1010, iwl8260_2ac_cfg)}, | 425 | {IWL_PCI_DEVICE(0x24F3, 0x1010, iwl8260_2ac_cfg)}, |
426 | {IWL_PCI_DEVICE(0x24F3, 0x0130, iwl8260_2ac_cfg)}, | ||
427 | {IWL_PCI_DEVICE(0x24F3, 0x1130, iwl8260_2ac_cfg)}, | ||
428 | {IWL_PCI_DEVICE(0x24F3, 0x0132, iwl8260_2ac_cfg)}, | ||
429 | {IWL_PCI_DEVICE(0x24F3, 0x1132, iwl8260_2ac_cfg)}, | ||
426 | {IWL_PCI_DEVICE(0x24F3, 0x0110, iwl8260_2ac_cfg)}, | 430 | {IWL_PCI_DEVICE(0x24F3, 0x0110, iwl8260_2ac_cfg)}, |
431 | {IWL_PCI_DEVICE(0x24F3, 0x01F0, iwl8260_2ac_cfg)}, | ||
432 | {IWL_PCI_DEVICE(0x24F3, 0x0012, iwl8260_2ac_cfg)}, | ||
433 | {IWL_PCI_DEVICE(0x24F3, 0x1012, iwl8260_2ac_cfg)}, | ||
427 | {IWL_PCI_DEVICE(0x24F3, 0x1110, iwl8260_2ac_cfg)}, | 434 | {IWL_PCI_DEVICE(0x24F3, 0x1110, iwl8260_2ac_cfg)}, |
428 | {IWL_PCI_DEVICE(0x24F3, 0x0050, iwl8260_2ac_cfg)}, | 435 | {IWL_PCI_DEVICE(0x24F3, 0x0050, iwl8260_2ac_cfg)}, |
429 | {IWL_PCI_DEVICE(0x24F3, 0x0250, iwl8260_2ac_cfg)}, | 436 | {IWL_PCI_DEVICE(0x24F3, 0x0250, iwl8260_2ac_cfg)}, |
430 | {IWL_PCI_DEVICE(0x24F3, 0x1050, iwl8260_2ac_cfg)}, | 437 | {IWL_PCI_DEVICE(0x24F3, 0x1050, iwl8260_2ac_cfg)}, |
431 | {IWL_PCI_DEVICE(0x24F3, 0x0150, iwl8260_2ac_cfg)}, | 438 | {IWL_PCI_DEVICE(0x24F3, 0x0150, iwl8260_2ac_cfg)}, |
439 | {IWL_PCI_DEVICE(0x24F3, 0x1150, iwl8260_2ac_cfg)}, | ||
432 | {IWL_PCI_DEVICE(0x24F4, 0x0030, iwl8260_2ac_cfg)}, | 440 | {IWL_PCI_DEVICE(0x24F4, 0x0030, iwl8260_2ac_cfg)}, |
433 | {IWL_PCI_DEVICE(0x24F4, 0x1130, iwl8260_2ac_cfg)}, | ||
434 | {IWL_PCI_DEVICE(0x24F4, 0x1030, iwl8260_2ac_cfg)}, | 441 | {IWL_PCI_DEVICE(0x24F4, 0x1030, iwl8260_2ac_cfg)}, |
435 | {IWL_PCI_DEVICE(0x24F3, 0xC010, iwl8260_2ac_cfg)}, | 442 | {IWL_PCI_DEVICE(0x24F3, 0xC010, iwl8260_2ac_cfg)}, |
436 | {IWL_PCI_DEVICE(0x24F3, 0xC110, iwl8260_2ac_cfg)}, | 443 | {IWL_PCI_DEVICE(0x24F3, 0xC110, iwl8260_2ac_cfg)}, |
@@ -438,18 +445,28 @@ static const struct pci_device_id iwl_hw_card_ids[] = { | |||
438 | {IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8260_2ac_cfg)}, | 445 | {IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8260_2ac_cfg)}, |
439 | {IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8260_2ac_cfg)}, | 446 | {IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8260_2ac_cfg)}, |
440 | {IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8260_2ac_cfg)}, | 447 | {IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8260_2ac_cfg)}, |
448 | {IWL_PCI_DEVICE(0x24F3, 0x8110, iwl8260_2ac_cfg)}, | ||
441 | {IWL_PCI_DEVICE(0x24F3, 0x9010, iwl8260_2ac_cfg)}, | 449 | {IWL_PCI_DEVICE(0x24F3, 0x9010, iwl8260_2ac_cfg)}, |
450 | {IWL_PCI_DEVICE(0x24F3, 0x9110, iwl8260_2ac_cfg)}, | ||
442 | {IWL_PCI_DEVICE(0x24F4, 0x8030, iwl8260_2ac_cfg)}, | 451 | {IWL_PCI_DEVICE(0x24F4, 0x8030, iwl8260_2ac_cfg)}, |
443 | {IWL_PCI_DEVICE(0x24F4, 0x9030, iwl8260_2ac_cfg)}, | 452 | {IWL_PCI_DEVICE(0x24F4, 0x9030, iwl8260_2ac_cfg)}, |
453 | {IWL_PCI_DEVICE(0x24F3, 0x8130, iwl8260_2ac_cfg)}, | ||
454 | {IWL_PCI_DEVICE(0x24F3, 0x9130, iwl8260_2ac_cfg)}, | ||
455 | {IWL_PCI_DEVICE(0x24F3, 0x8132, iwl8260_2ac_cfg)}, | ||
456 | {IWL_PCI_DEVICE(0x24F3, 0x9132, iwl8260_2ac_cfg)}, | ||
444 | {IWL_PCI_DEVICE(0x24F3, 0x8050, iwl8260_2ac_cfg)}, | 457 | {IWL_PCI_DEVICE(0x24F3, 0x8050, iwl8260_2ac_cfg)}, |
458 | {IWL_PCI_DEVICE(0x24F3, 0x8150, iwl8260_2ac_cfg)}, | ||
445 | {IWL_PCI_DEVICE(0x24F3, 0x9050, iwl8260_2ac_cfg)}, | 459 | {IWL_PCI_DEVICE(0x24F3, 0x9050, iwl8260_2ac_cfg)}, |
460 | {IWL_PCI_DEVICE(0x24F3, 0x9150, iwl8260_2ac_cfg)}, | ||
446 | {IWL_PCI_DEVICE(0x24F3, 0x0004, iwl8260_2n_cfg)}, | 461 | {IWL_PCI_DEVICE(0x24F3, 0x0004, iwl8260_2n_cfg)}, |
462 | {IWL_PCI_DEVICE(0x24F3, 0x0044, iwl8260_2n_cfg)}, | ||
447 | {IWL_PCI_DEVICE(0x24F5, 0x0010, iwl4165_2ac_cfg)}, | 463 | {IWL_PCI_DEVICE(0x24F5, 0x0010, iwl4165_2ac_cfg)}, |
448 | {IWL_PCI_DEVICE(0x24F6, 0x0030, iwl4165_2ac_cfg)}, | 464 | {IWL_PCI_DEVICE(0x24F6, 0x0030, iwl4165_2ac_cfg)}, |
449 | {IWL_PCI_DEVICE(0x24F3, 0x0810, iwl8260_2ac_cfg)}, | 465 | {IWL_PCI_DEVICE(0x24F3, 0x0810, iwl8260_2ac_cfg)}, |
450 | {IWL_PCI_DEVICE(0x24F3, 0x0910, iwl8260_2ac_cfg)}, | 466 | {IWL_PCI_DEVICE(0x24F3, 0x0910, iwl8260_2ac_cfg)}, |
451 | {IWL_PCI_DEVICE(0x24F3, 0x0850, iwl8260_2ac_cfg)}, | 467 | {IWL_PCI_DEVICE(0x24F3, 0x0850, iwl8260_2ac_cfg)}, |
452 | {IWL_PCI_DEVICE(0x24F3, 0x0950, iwl8260_2ac_cfg)}, | 468 | {IWL_PCI_DEVICE(0x24F3, 0x0950, iwl8260_2ac_cfg)}, |
469 | {IWL_PCI_DEVICE(0x24F3, 0x0930, iwl8260_2ac_cfg)}, | ||
453 | #endif /* CONFIG_IWLMVM */ | 470 | #endif /* CONFIG_IWLMVM */ |
454 | 471 | ||
455 | {0} | 472 | {0} |
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c index 6e9418ed90c2..bbb789f8990b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c | |||
@@ -2272,7 +2272,7 @@ void rtl8821ae_enable_interrupt(struct ieee80211_hw *hw) | |||
2272 | struct rtl_priv *rtlpriv = rtl_priv(hw); | 2272 | struct rtl_priv *rtlpriv = rtl_priv(hw); |
2273 | struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); | 2273 | struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); |
2274 | 2274 | ||
2275 | if (!rtlpci->int_clear) | 2275 | if (rtlpci->int_clear) |
2276 | rtl8821ae_clear_interrupt(hw);/*clear it here first*/ | 2276 | rtl8821ae_clear_interrupt(hw);/*clear it here first*/ |
2277 | 2277 | ||
2278 | rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF); | 2278 | rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF); |
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c index 8ee141a55bc5..142bdff4ed60 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c | |||
@@ -448,7 +448,7 @@ MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n"); | |||
448 | MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 1)\n"); | 448 | MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 1)\n"); |
449 | MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)"); | 449 | MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)"); |
450 | MODULE_PARM_DESC(disable_watchdog, "Set to 1 to disable the watchdog (default 0)\n"); | 450 | MODULE_PARM_DESC(disable_watchdog, "Set to 1 to disable the watchdog (default 0)\n"); |
451 | MODULE_PARM_DESC(int_clear, "Set to 1 to disable interrupt clear before set (default 0)\n"); | 451 | MODULE_PARM_DESC(int_clear, "Set to 0 to disable interrupt clear before set (default 1)\n"); |
452 | 452 | ||
453 | static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume); | 453 | static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume); |
454 | 454 | ||
diff --git a/drivers/nvme/host/Makefile b/drivers/nvme/host/Makefile index 219dc206fa5f..a5fe23952586 100644 --- a/drivers/nvme/host/Makefile +++ b/drivers/nvme/host/Makefile | |||
@@ -1,4 +1,5 @@ | |||
1 | 1 | ||
2 | obj-$(CONFIG_BLK_DEV_NVME) += nvme.o | 2 | obj-$(CONFIG_BLK_DEV_NVME) += nvme.o |
3 | 3 | ||
4 | nvme-y += pci.o scsi.o lightnvm.o | 4 | lightnvm-$(CONFIG_NVM) := lightnvm.o |
5 | nvme-y += pci.o scsi.o $(lightnvm-y) | ||
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c index 9202d1a468d0..06c336410235 100644 --- a/drivers/nvme/host/lightnvm.c +++ b/drivers/nvme/host/lightnvm.c | |||
@@ -22,8 +22,6 @@ | |||
22 | 22 | ||
23 | #include "nvme.h" | 23 | #include "nvme.h" |
24 | 24 | ||
25 | #ifdef CONFIG_NVM | ||
26 | |||
27 | #include <linux/nvme.h> | 25 | #include <linux/nvme.h> |
28 | #include <linux/bitops.h> | 26 | #include <linux/bitops.h> |
29 | #include <linux/lightnvm.h> | 27 | #include <linux/lightnvm.h> |
@@ -357,10 +355,11 @@ out: | |||
357 | return ret; | 355 | return ret; |
358 | } | 356 | } |
359 | 357 | ||
360 | static int nvme_nvm_get_bb_tbl(struct request_queue *q, struct ppa_addr ppa, | 358 | static int nvme_nvm_get_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr ppa, |
361 | int nr_blocks, nvm_bb_update_fn *update_bbtbl, | 359 | int nr_blocks, nvm_bb_update_fn *update_bbtbl, |
362 | void *priv) | 360 | void *priv) |
363 | { | 361 | { |
362 | struct request_queue *q = nvmdev->q; | ||
364 | struct nvme_ns *ns = q->queuedata; | 363 | struct nvme_ns *ns = q->queuedata; |
365 | struct nvme_dev *dev = ns->dev; | 364 | struct nvme_dev *dev = ns->dev; |
366 | struct nvme_nvm_command c = {}; | 365 | struct nvme_nvm_command c = {}; |
@@ -404,6 +403,7 @@ static int nvme_nvm_get_bb_tbl(struct request_queue *q, struct ppa_addr ppa, | |||
404 | goto out; | 403 | goto out; |
405 | } | 404 | } |
406 | 405 | ||
406 | ppa = dev_to_generic_addr(nvmdev, ppa); | ||
407 | ret = update_bbtbl(ppa, nr_blocks, bb_tbl->blk, priv); | 407 | ret = update_bbtbl(ppa, nr_blocks, bb_tbl->blk, priv); |
408 | if (ret) { | 408 | if (ret) { |
409 | ret = -EINTR; | 409 | ret = -EINTR; |
@@ -571,31 +571,27 @@ void nvme_nvm_unregister(struct request_queue *q, char *disk_name) | |||
571 | nvm_unregister(disk_name); | 571 | nvm_unregister(disk_name); |
572 | } | 572 | } |
573 | 573 | ||
574 | /* move to shared place when used in multiple places. */ | ||
575 | #define PCI_VENDOR_ID_CNEX 0x1d1d | ||
576 | #define PCI_DEVICE_ID_CNEX_WL 0x2807 | ||
577 | #define PCI_DEVICE_ID_CNEX_QEMU 0x1f1f | ||
578 | |||
574 | int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id) | 579 | int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id) |
575 | { | 580 | { |
576 | struct nvme_dev *dev = ns->dev; | 581 | struct nvme_dev *dev = ns->dev; |
577 | struct pci_dev *pdev = to_pci_dev(dev->dev); | 582 | struct pci_dev *pdev = to_pci_dev(dev->dev); |
578 | 583 | ||
579 | /* QEMU NVMe simulator - PCI ID + Vendor specific bit */ | 584 | /* QEMU NVMe simulator - PCI ID + Vendor specific bit */ |
580 | if (pdev->vendor == PCI_VENDOR_ID_INTEL && pdev->device == 0x5845 && | 585 | if (pdev->vendor == PCI_VENDOR_ID_CNEX && |
586 | pdev->device == PCI_DEVICE_ID_CNEX_QEMU && | ||
581 | id->vs[0] == 0x1) | 587 | id->vs[0] == 0x1) |
582 | return 1; | 588 | return 1; |
583 | 589 | ||
584 | /* CNEX Labs - PCI ID + Vendor specific bit */ | 590 | /* CNEX Labs - PCI ID + Vendor specific bit */ |
585 | if (pdev->vendor == 0x1d1d && pdev->device == 0x2807 && | 591 | if (pdev->vendor == PCI_VENDOR_ID_CNEX && |
592 | pdev->device == PCI_DEVICE_ID_CNEX_WL && | ||
586 | id->vs[0] == 0x1) | 593 | id->vs[0] == 0x1) |
587 | return 1; | 594 | return 1; |
588 | 595 | ||
589 | return 0; | 596 | return 0; |
590 | } | 597 | } |
591 | #else | ||
592 | int nvme_nvm_register(struct request_queue *q, char *disk_name) | ||
593 | { | ||
594 | return 0; | ||
595 | } | ||
596 | void nvme_nvm_unregister(struct request_queue *q, char *disk_name) {}; | ||
597 | int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id) | ||
598 | { | ||
599 | return 0; | ||
600 | } | ||
601 | #endif /* CONFIG_NVM */ | ||
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index fdb4e5bad9ac..044253dca30a 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h | |||
@@ -136,8 +136,22 @@ int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr); | |||
136 | int nvme_sg_io32(struct nvme_ns *ns, unsigned long arg); | 136 | int nvme_sg_io32(struct nvme_ns *ns, unsigned long arg); |
137 | int nvme_sg_get_version_num(int __user *ip); | 137 | int nvme_sg_get_version_num(int __user *ip); |
138 | 138 | ||
139 | #ifdef CONFIG_NVM | ||
139 | int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id); | 140 | int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id); |
140 | int nvme_nvm_register(struct request_queue *q, char *disk_name); | 141 | int nvme_nvm_register(struct request_queue *q, char *disk_name); |
141 | void nvme_nvm_unregister(struct request_queue *q, char *disk_name); | 142 | void nvme_nvm_unregister(struct request_queue *q, char *disk_name); |
143 | #else | ||
144 | static inline int nvme_nvm_register(struct request_queue *q, char *disk_name) | ||
145 | { | ||
146 | return 0; | ||
147 | } | ||
148 | |||
149 | static inline void nvme_nvm_unregister(struct request_queue *q, char *disk_name) {}; | ||
150 | |||
151 | static inline int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id) | ||
152 | { | ||
153 | return 0; | ||
154 | } | ||
155 | #endif /* CONFIG_NVM */ | ||
142 | 156 | ||
143 | #endif /* _NVME_H */ | 157 | #endif /* _NVME_H */ |
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index f3b53af789ef..9e294ff4e652 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c | |||
@@ -2708,6 +2708,18 @@ static int nvme_dev_map(struct nvme_dev *dev) | |||
2708 | dev->q_depth = min_t(int, NVME_CAP_MQES(cap) + 1, NVME_Q_DEPTH); | 2708 | dev->q_depth = min_t(int, NVME_CAP_MQES(cap) + 1, NVME_Q_DEPTH); |
2709 | dev->db_stride = 1 << NVME_CAP_STRIDE(cap); | 2709 | dev->db_stride = 1 << NVME_CAP_STRIDE(cap); |
2710 | dev->dbs = ((void __iomem *)dev->bar) + 4096; | 2710 | dev->dbs = ((void __iomem *)dev->bar) + 4096; |
2711 | |||
2712 | /* | ||
2713 | * Temporary fix for the Apple controller found in the MacBook8,1 and | ||
2714 | * some MacBook7,1 to avoid controller resets and data loss. | ||
2715 | */ | ||
2716 | if (pdev->vendor == PCI_VENDOR_ID_APPLE && pdev->device == 0x2001) { | ||
2717 | dev->q_depth = 2; | ||
2718 | dev_warn(dev->dev, "detected Apple NVMe controller, set " | ||
2719 | "queue depth=%u to work around controller resets\n", | ||
2720 | dev->q_depth); | ||
2721 | } | ||
2722 | |||
2711 | if (readl(&dev->bar->vs) >= NVME_VS(1, 2)) | 2723 | if (readl(&dev->bar->vs) >= NVME_VS(1, 2)) |
2712 | dev->cmb = nvme_map_cmb(dev); | 2724 | dev->cmb = nvme_map_cmb(dev); |
2713 | 2725 | ||
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 4446fcb5effd..d7ffd66814bb 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c | |||
@@ -1146,9 +1146,21 @@ static int pci_pm_runtime_suspend(struct device *dev) | |||
1146 | pci_dev->state_saved = false; | 1146 | pci_dev->state_saved = false; |
1147 | pci_dev->no_d3cold = false; | 1147 | pci_dev->no_d3cold = false; |
1148 | error = pm->runtime_suspend(dev); | 1148 | error = pm->runtime_suspend(dev); |
1149 | suspend_report_result(pm->runtime_suspend, error); | 1149 | if (error) { |
1150 | if (error) | 1150 | /* |
1151 | * -EBUSY and -EAGAIN is used to request the runtime PM core | ||
1152 | * to schedule a new suspend, so log the event only with debug | ||
1153 | * log level. | ||
1154 | */ | ||
1155 | if (error == -EBUSY || error == -EAGAIN) | ||
1156 | dev_dbg(dev, "can't suspend now (%pf returned %d)\n", | ||
1157 | pm->runtime_suspend, error); | ||
1158 | else | ||
1159 | dev_err(dev, "can't suspend (%pf returned %d)\n", | ||
1160 | pm->runtime_suspend, error); | ||
1161 | |||
1151 | return error; | 1162 | return error; |
1163 | } | ||
1152 | if (!pci_dev->d3cold_allowed) | 1164 | if (!pci_dev->d3cold_allowed) |
1153 | pci_dev->no_d3cold = true; | 1165 | pci_dev->no_d3cold = true; |
1154 | 1166 | ||
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig index b422e4ed73f4..312c78b27a32 100644 --- a/drivers/pinctrl/Kconfig +++ b/drivers/pinctrl/Kconfig | |||
@@ -5,8 +5,6 @@ | |||
5 | config PINCTRL | 5 | config PINCTRL |
6 | bool | 6 | bool |
7 | 7 | ||
8 | if PINCTRL | ||
9 | |||
10 | menu "Pin controllers" | 8 | menu "Pin controllers" |
11 | depends on PINCTRL | 9 | depends on PINCTRL |
12 | 10 | ||
@@ -274,5 +272,3 @@ config PINCTRL_TB10X | |||
274 | select GPIOLIB | 272 | select GPIOLIB |
275 | 273 | ||
276 | endmenu | 274 | endmenu |
277 | |||
278 | endif | ||
diff --git a/drivers/pinctrl/freescale/pinctrl-imx1-core.c b/drivers/pinctrl/freescale/pinctrl-imx1-core.c index 88a7fac11bd4..acaf84cadca3 100644 --- a/drivers/pinctrl/freescale/pinctrl-imx1-core.c +++ b/drivers/pinctrl/freescale/pinctrl-imx1-core.c | |||
@@ -538,8 +538,10 @@ static int imx1_pinctrl_parse_functions(struct device_node *np, | |||
538 | func->groups[i] = child->name; | 538 | func->groups[i] = child->name; |
539 | grp = &info->groups[grp_index++]; | 539 | grp = &info->groups[grp_index++]; |
540 | ret = imx1_pinctrl_parse_groups(child, grp, info, i++); | 540 | ret = imx1_pinctrl_parse_groups(child, grp, info, i++); |
541 | if (ret == -ENOMEM) | 541 | if (ret == -ENOMEM) { |
542 | of_node_put(child); | ||
542 | return ret; | 543 | return ret; |
544 | } | ||
543 | } | 545 | } |
544 | 546 | ||
545 | return 0; | 547 | return 0; |
@@ -582,8 +584,10 @@ static int imx1_pinctrl_parse_dt(struct platform_device *pdev, | |||
582 | 584 | ||
583 | for_each_child_of_node(np, child) { | 585 | for_each_child_of_node(np, child) { |
584 | ret = imx1_pinctrl_parse_functions(child, info, ifunc++); | 586 | ret = imx1_pinctrl_parse_functions(child, info, ifunc++); |
585 | if (ret == -ENOMEM) | 587 | if (ret == -ENOMEM) { |
588 | of_node_put(child); | ||
586 | return -ENOMEM; | 589 | return -ENOMEM; |
590 | } | ||
587 | } | 591 | } |
588 | 592 | ||
589 | return 0; | 593 | return 0; |
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c index f307f1d27d64..5c717275a7fa 100644 --- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c +++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c | |||
@@ -747,7 +747,7 @@ static int mtk_gpio_get_direction(struct gpio_chip *chip, unsigned offset) | |||
747 | reg_addr = mtk_get_port(pctl, offset) + pctl->devdata->dir_offset; | 747 | reg_addr = mtk_get_port(pctl, offset) + pctl->devdata->dir_offset; |
748 | bit = BIT(offset & 0xf); | 748 | bit = BIT(offset & 0xf); |
749 | regmap_read(pctl->regmap1, reg_addr, &read_val); | 749 | regmap_read(pctl->regmap1, reg_addr, &read_val); |
750 | return !!(read_val & bit); | 750 | return !(read_val & bit); |
751 | } | 751 | } |
752 | 752 | ||
753 | static int mtk_gpio_get(struct gpio_chip *chip, unsigned offset) | 753 | static int mtk_gpio_get(struct gpio_chip *chip, unsigned offset) |
@@ -757,12 +757,8 @@ static int mtk_gpio_get(struct gpio_chip *chip, unsigned offset) | |||
757 | unsigned int read_val = 0; | 757 | unsigned int read_val = 0; |
758 | struct mtk_pinctrl *pctl = dev_get_drvdata(chip->dev); | 758 | struct mtk_pinctrl *pctl = dev_get_drvdata(chip->dev); |
759 | 759 | ||
760 | if (mtk_gpio_get_direction(chip, offset)) | 760 | reg_addr = mtk_get_port(pctl, offset) + |
761 | reg_addr = mtk_get_port(pctl, offset) + | 761 | pctl->devdata->din_offset; |
762 | pctl->devdata->dout_offset; | ||
763 | else | ||
764 | reg_addr = mtk_get_port(pctl, offset) + | ||
765 | pctl->devdata->din_offset; | ||
766 | 762 | ||
767 | bit = BIT(offset & 0xf); | 763 | bit = BIT(offset & 0xf); |
768 | regmap_read(pctl->regmap1, reg_addr, &read_val); | 764 | regmap_read(pctl->regmap1, reg_addr, &read_val); |
@@ -997,6 +993,7 @@ static struct gpio_chip mtk_gpio_chip = { | |||
997 | .owner = THIS_MODULE, | 993 | .owner = THIS_MODULE, |
998 | .request = gpiochip_generic_request, | 994 | .request = gpiochip_generic_request, |
999 | .free = gpiochip_generic_free, | 995 | .free = gpiochip_generic_free, |
996 | .get_direction = mtk_gpio_get_direction, | ||
1000 | .direction_input = mtk_gpio_direction_input, | 997 | .direction_input = mtk_gpio_direction_input, |
1001 | .direction_output = mtk_gpio_direction_output, | 998 | .direction_output = mtk_gpio_direction_output, |
1002 | .get = mtk_gpio_get, | 999 | .get = mtk_gpio_get, |
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c index d809c9eaa323..19a3c3bc2f1f 100644 --- a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c +++ b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c | |||
@@ -672,7 +672,7 @@ static int pm8xxx_gpio_probe(struct platform_device *pdev) | |||
672 | return -ENOMEM; | 672 | return -ENOMEM; |
673 | 673 | ||
674 | pctrl->dev = &pdev->dev; | 674 | pctrl->dev = &pdev->dev; |
675 | pctrl->npins = (unsigned)of_device_get_match_data(&pdev->dev); | 675 | pctrl->npins = (unsigned long)of_device_get_match_data(&pdev->dev); |
676 | 676 | ||
677 | pctrl->regmap = dev_get_regmap(pdev->dev.parent, NULL); | 677 | pctrl->regmap = dev_get_regmap(pdev->dev.parent, NULL); |
678 | if (!pctrl->regmap) { | 678 | if (!pctrl->regmap) { |
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c index 8982027de8e8..b868ef1766a0 100644 --- a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c +++ b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c | |||
@@ -763,7 +763,7 @@ static int pm8xxx_mpp_probe(struct platform_device *pdev) | |||
763 | return -ENOMEM; | 763 | return -ENOMEM; |
764 | 764 | ||
765 | pctrl->dev = &pdev->dev; | 765 | pctrl->dev = &pdev->dev; |
766 | pctrl->npins = (unsigned)of_device_get_match_data(&pdev->dev); | 766 | pctrl->npins = (unsigned long)of_device_get_match_data(&pdev->dev); |
767 | 767 | ||
768 | pctrl->regmap = dev_get_regmap(pdev->dev.parent, NULL); | 768 | pctrl->regmap = dev_get_regmap(pdev->dev.parent, NULL); |
769 | if (!pctrl->regmap) { | 769 | if (!pctrl->regmap) { |
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7734.c b/drivers/pinctrl/sh-pfc/pfc-sh7734.c index e7deb51de7dc..9842bb106796 100644 --- a/drivers/pinctrl/sh-pfc/pfc-sh7734.c +++ b/drivers/pinctrl/sh-pfc/pfc-sh7734.c | |||
@@ -31,11 +31,11 @@ | |||
31 | PORT_GP_12(5, fn, sfx) | 31 | PORT_GP_12(5, fn, sfx) |
32 | 32 | ||
33 | #undef _GP_DATA | 33 | #undef _GP_DATA |
34 | #define _GP_DATA(bank, pin, name, sfx) \ | 34 | #define _GP_DATA(bank, pin, name, sfx, cfg) \ |
35 | PINMUX_DATA(name##_DATA, name##_FN, name##_IN, name##_OUT) | 35 | PINMUX_DATA(name##_DATA, name##_FN, name##_IN, name##_OUT) |
36 | 36 | ||
37 | #define _GP_INOUTSEL(bank, pin, name, sfx) name##_IN, name##_OUT | 37 | #define _GP_INOUTSEL(bank, pin, name, sfx, cfg) name##_IN, name##_OUT |
38 | #define _GP_INDT(bank, pin, name, sfx) name##_DATA | 38 | #define _GP_INDT(bank, pin, name, sfx, cfg) name##_DATA |
39 | #define GP_INOUTSEL(bank) PORT_GP_32_REV(bank, _GP_INOUTSEL, unused) | 39 | #define GP_INOUTSEL(bank) PORT_GP_32_REV(bank, _GP_INOUTSEL, unused) |
40 | #define GP_INDT(bank) PORT_GP_32_REV(bank, _GP_INDT, unused) | 40 | #define GP_INDT(bank) PORT_GP_32_REV(bank, _GP_INDT, unused) |
41 | 41 | ||
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c index 8b3130f22b42..9e03d158f411 100644 --- a/drivers/remoteproc/remoteproc_core.c +++ b/drivers/remoteproc/remoteproc_core.c | |||
@@ -1478,6 +1478,8 @@ module_init(remoteproc_init); | |||
1478 | 1478 | ||
1479 | static void __exit remoteproc_exit(void) | 1479 | static void __exit remoteproc_exit(void) |
1480 | { | 1480 | { |
1481 | ida_destroy(&rproc_dev_index); | ||
1482 | |||
1481 | rproc_exit_debugfs(); | 1483 | rproc_exit_debugfs(); |
1482 | } | 1484 | } |
1483 | module_exit(remoteproc_exit); | 1485 | module_exit(remoteproc_exit); |
diff --git a/drivers/remoteproc/remoteproc_debugfs.c b/drivers/remoteproc/remoteproc_debugfs.c index 9d30809bb407..916af5096f57 100644 --- a/drivers/remoteproc/remoteproc_debugfs.c +++ b/drivers/remoteproc/remoteproc_debugfs.c | |||
@@ -156,7 +156,7 @@ rproc_recovery_write(struct file *filp, const char __user *user_buf, | |||
156 | char buf[10]; | 156 | char buf[10]; |
157 | int ret; | 157 | int ret; |
158 | 158 | ||
159 | if (count > sizeof(buf)) | 159 | if (count < 1 || count > sizeof(buf)) |
160 | return count; | 160 | return count; |
161 | 161 | ||
162 | ret = copy_from_user(buf, user_buf, count); | 162 | ret = copy_from_user(buf, user_buf, count); |
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 5f692ae40749..64eed87d34a8 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig | |||
@@ -364,6 +364,7 @@ config SCSI_HPSA | |||
364 | tristate "HP Smart Array SCSI driver" | 364 | tristate "HP Smart Array SCSI driver" |
365 | depends on PCI && SCSI | 365 | depends on PCI && SCSI |
366 | select CHECK_SIGNATURE | 366 | select CHECK_SIGNATURE |
367 | select SCSI_SAS_ATTRS | ||
367 | help | 368 | help |
368 | This driver supports HP Smart Array Controllers (circa 2009). | 369 | This driver supports HP Smart Array Controllers (circa 2009). |
369 | It is a SCSI alternative to the cciss driver, which is a block | 370 | It is a SCSI alternative to the cciss driver, which is a block |
@@ -499,6 +500,7 @@ config SCSI_ADVANSYS | |||
499 | tristate "AdvanSys SCSI support" | 500 | tristate "AdvanSys SCSI support" |
500 | depends on SCSI | 501 | depends on SCSI |
501 | depends on ISA || EISA || PCI | 502 | depends on ISA || EISA || PCI |
503 | depends on ISA_DMA_API || !ISA | ||
502 | help | 504 | help |
503 | This is a driver for all SCSI host adapters manufactured by | 505 | This is a driver for all SCSI host adapters manufactured by |
504 | AdvanSys. It is documented in the kernel source in | 506 | AdvanSys. It is documented in the kernel source in |
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c index 519f9a4b3dad..febbd83e2ecd 100644 --- a/drivers/scsi/advansys.c +++ b/drivers/scsi/advansys.c | |||
@@ -7803,7 +7803,7 @@ adv_build_req(struct asc_board *boardp, struct scsi_cmnd *scp, | |||
7803 | return ASC_BUSY; | 7803 | return ASC_BUSY; |
7804 | } | 7804 | } |
7805 | scsiqp->sense_addr = cpu_to_le32(sense_addr); | 7805 | scsiqp->sense_addr = cpu_to_le32(sense_addr); |
7806 | scsiqp->sense_len = cpu_to_le32(SCSI_SENSE_BUFFERSIZE); | 7806 | scsiqp->sense_len = SCSI_SENSE_BUFFERSIZE; |
7807 | 7807 | ||
7808 | /* Build ADV_SCSI_REQ_Q */ | 7808 | /* Build ADV_SCSI_REQ_Q */ |
7809 | 7809 | ||
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index 323982fd00c3..82ac1cd818ac 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c | |||
@@ -333,6 +333,17 @@ static void scsi_host_dev_release(struct device *dev) | |||
333 | kfree(queuedata); | 333 | kfree(queuedata); |
334 | } | 334 | } |
335 | 335 | ||
336 | if (shost->shost_state == SHOST_CREATED) { | ||
337 | /* | ||
338 | * Free the shost_dev device name here if scsi_host_alloc() | ||
339 | * and scsi_host_put() have been called but neither | ||
340 | * scsi_host_add() nor scsi_host_remove() has been called. | ||
341 | * This avoids that the memory allocated for the shost_dev | ||
342 | * name is leaked. | ||
343 | */ | ||
344 | kfree(dev_name(&shost->shost_dev)); | ||
345 | } | ||
346 | |||
336 | scsi_destroy_command_freelist(shost); | 347 | scsi_destroy_command_freelist(shost); |
337 | if (shost_use_blk_mq(shost)) { | 348 | if (shost_use_blk_mq(shost)) { |
338 | if (shost->tag_set.tags) | 349 | if (shost->tag_set.tags) |
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 6a8f95808ee0..a3860367b568 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c | |||
@@ -8671,7 +8671,7 @@ static void hpsa_disable_rld_caching(struct ctlr_info *h) | |||
8671 | if ((rc != 0) || (c->err_info->CommandStatus != 0)) | 8671 | if ((rc != 0) || (c->err_info->CommandStatus != 0)) |
8672 | goto errout; | 8672 | goto errout; |
8673 | 8673 | ||
8674 | if (*options && HPSA_DIAG_OPTS_DISABLE_RLD_CACHING) | 8674 | if (*options & HPSA_DIAG_OPTS_DISABLE_RLD_CACHING) |
8675 | goto out; | 8675 | goto out; |
8676 | 8676 | ||
8677 | errout: | 8677 | errout: |
diff --git a/drivers/scsi/mpt3sas/Kconfig b/drivers/scsi/mpt3sas/Kconfig index 29061467cc17..b736dbc80485 100644 --- a/drivers/scsi/mpt3sas/Kconfig +++ b/drivers/scsi/mpt3sas/Kconfig | |||
@@ -71,3 +71,12 @@ config SCSI_MPT3SAS_MAX_SGE | |||
71 | MAX_PHYS_SEGMENTS in most kernels. However in SuSE kernels this | 71 | MAX_PHYS_SEGMENTS in most kernels. However in SuSE kernels this |
72 | can be 256. However, it may decreased down to 16. Decreasing this | 72 | can be 256. However, it may decreased down to 16. Decreasing this |
73 | parameter will reduce memory requirements on a per controller instance. | 73 | parameter will reduce memory requirements on a per controller instance. |
74 | |||
75 | config SCSI_MPT2SAS | ||
76 | tristate "Legacy MPT2SAS config option" | ||
77 | default n | ||
78 | select SCSI_MPT3SAS | ||
79 | depends on PCI && SCSI | ||
80 | ---help--- | ||
81 | Dummy config option for backwards compatiblity: configure the MPT3SAS | ||
82 | driver instead. | ||
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index d95206b7e116..9ab77b06434d 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c | |||
@@ -3905,8 +3905,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) | |||
3905 | * We do not expose raid functionality to upper layer for warpdrive. | 3905 | * We do not expose raid functionality to upper layer for warpdrive. |
3906 | */ | 3906 | */ |
3907 | if (!ioc->is_warpdrive && !scsih_is_raid(&scmd->device->sdev_gendev) | 3907 | if (!ioc->is_warpdrive && !scsih_is_raid(&scmd->device->sdev_gendev) |
3908 | && (sas_device_priv_data->flags & MPT_DEVICE_TLR_ON) && | 3908 | && sas_is_tlr_enabled(scmd->device) && scmd->cmd_len != 32) |
3909 | scmd->cmd_len != 32) | ||
3910 | mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON; | 3909 | mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON; |
3911 | 3910 | ||
3912 | smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd); | 3911 | smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd); |
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c index 90fdf0e859e3..675e7fab0796 100644 --- a/drivers/scsi/mvsas/mv_init.c +++ b/drivers/scsi/mvsas/mv_init.c | |||
@@ -758,7 +758,7 @@ mvs_store_interrupt_coalescing(struct device *cdev, | |||
758 | struct device_attribute *attr, | 758 | struct device_attribute *attr, |
759 | const char *buffer, size_t size) | 759 | const char *buffer, size_t size) |
760 | { | 760 | { |
761 | int val = 0; | 761 | unsigned int val = 0; |
762 | struct mvs_info *mvi = NULL; | 762 | struct mvs_info *mvi = NULL; |
763 | struct Scsi_Host *shost = class_to_shost(cdev); | 763 | struct Scsi_Host *shost = class_to_shost(cdev); |
764 | struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); | 764 | struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); |
@@ -766,7 +766,7 @@ mvs_store_interrupt_coalescing(struct device *cdev, | |||
766 | if (buffer == NULL) | 766 | if (buffer == NULL) |
767 | return size; | 767 | return size; |
768 | 768 | ||
769 | if (sscanf(buffer, "%d", &val) != 1) | 769 | if (sscanf(buffer, "%u", &val) != 1) |
770 | return -EINVAL; | 770 | return -EINVAL; |
771 | 771 | ||
772 | if (val >= 0x10000) { | 772 | if (val >= 0x10000) { |
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c index eb0cc5475c45..b6b4cfdd7620 100644 --- a/drivers/scsi/qla2xxx/qla_nx.c +++ b/drivers/scsi/qla2xxx/qla_nx.c | |||
@@ -433,7 +433,7 @@ qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *ha, ulong off_in, | |||
433 | if (off_in < QLA82XX_PCI_CRBSPACE) | 433 | if (off_in < QLA82XX_PCI_CRBSPACE) |
434 | return -1; | 434 | return -1; |
435 | 435 | ||
436 | *off_out = (void __iomem *)(off_in - QLA82XX_PCI_CRBSPACE); | 436 | off_in -= QLA82XX_PCI_CRBSPACE; |
437 | 437 | ||
438 | /* Try direct map */ | 438 | /* Try direct map */ |
439 | m = &crb_128M_2M_map[CRB_BLK(off_in)].sub_block[CRB_SUBBLK(off_in)]; | 439 | m = &crb_128M_2M_map[CRB_BLK(off_in)].sub_block[CRB_SUBBLK(off_in)]; |
@@ -443,6 +443,7 @@ qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *ha, ulong off_in, | |||
443 | return 0; | 443 | return 0; |
444 | } | 444 | } |
445 | /* Not in direct map, use crb window */ | 445 | /* Not in direct map, use crb window */ |
446 | *off_out = (void __iomem *)off_in; | ||
446 | return 1; | 447 | return 1; |
447 | } | 448 | } |
448 | 449 | ||
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index dfcc45bb03b1..d09d60293c27 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c | |||
@@ -465,8 +465,9 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = { | |||
465 | 0} }, | 465 | 0} }, |
466 | {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */ | 466 | {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */ |
467 | {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, | 467 | {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, |
468 | {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* VERIFY */ | 468 | {0, 0x2f, 0, F_D_OUT_MAYBE | FF_DIRECT_IO, NULL, NULL, /* VERIFY(10) */ |
469 | {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, | 469 | {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, |
470 | 0, 0, 0, 0, 0, 0} }, | ||
470 | {1, 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_DIRECT_IO, resp_read_dt0, | 471 | {1, 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_DIRECT_IO, resp_read_dt0, |
471 | vl_iarr, {32, 0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0x9, 0xfe, 0, | 472 | vl_iarr, {32, 0xc7, 0, 0, 0, 0, 0x1f, 0x18, 0x0, 0x9, 0xfe, 0, |
472 | 0xff, 0xff, 0xff, 0xff} },/* VARIABLE LENGTH, READ(32) */ | 473 | 0xff, 0xff, 0xff, 0xff} },/* VARIABLE LENGTH, READ(32) */ |
@@ -477,8 +478,8 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEMENT + 1] = { | |||
477 | {10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, | 478 | {10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, |
478 | 0} }, | 479 | 0} }, |
479 | /* 20 */ | 480 | /* 20 */ |
480 | {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ALLOW REMOVAL */ | 481 | {0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */ |
481 | {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, | 482 | {6, 0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, |
482 | {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */ | 483 | {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */ |
483 | {6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, | 484 | {6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }, |
484 | {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */ | 485 | {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */ |
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 83245391e956..054923e3393c 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c | |||
@@ -701,9 +701,12 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result, | |||
701 | * strings. | 701 | * strings. |
702 | */ | 702 | */ |
703 | if (sdev->inquiry_len < 36) { | 703 | if (sdev->inquiry_len < 36) { |
704 | sdev_printk(KERN_INFO, sdev, | 704 | if (!sdev->host->short_inquiry) { |
705 | "scsi scan: INQUIRY result too short (%d)," | 705 | shost_printk(KERN_INFO, sdev->host, |
706 | " using 36\n", sdev->inquiry_len); | 706 | "scsi scan: INQUIRY result too short (%d)," |
707 | " using 36\n", sdev->inquiry_len); | ||
708 | sdev->host->short_inquiry = 1; | ||
709 | } | ||
707 | sdev->inquiry_len = 36; | 710 | sdev->inquiry_len = 36; |
708 | } | 711 | } |
709 | 712 | ||
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index 8d2312239ae0..21930c9ac9cd 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c | |||
@@ -1102,6 +1102,14 @@ void __scsi_remove_device(struct scsi_device *sdev) | |||
1102 | { | 1102 | { |
1103 | struct device *dev = &sdev->sdev_gendev; | 1103 | struct device *dev = &sdev->sdev_gendev; |
1104 | 1104 | ||
1105 | /* | ||
1106 | * This cleanup path is not reentrant and while it is impossible | ||
1107 | * to get a new reference with scsi_device_get() someone can still | ||
1108 | * hold a previously acquired one. | ||
1109 | */ | ||
1110 | if (sdev->sdev_state == SDEV_DEL) | ||
1111 | return; | ||
1112 | |||
1105 | if (sdev->is_visible) { | 1113 | if (sdev->is_visible) { |
1106 | if (scsi_device_set_state(sdev, SDEV_CANCEL) != 0) | 1114 | if (scsi_device_set_state(sdev, SDEV_CANCEL) != 0) |
1107 | return; | 1115 | return; |
@@ -1110,7 +1118,9 @@ void __scsi_remove_device(struct scsi_device *sdev) | |||
1110 | device_unregister(&sdev->sdev_dev); | 1118 | device_unregister(&sdev->sdev_dev); |
1111 | transport_remove_device(dev); | 1119 | transport_remove_device(dev); |
1112 | scsi_dh_remove_device(sdev); | 1120 | scsi_dh_remove_device(sdev); |
1113 | } | 1121 | device_del(dev); |
1122 | } else | ||
1123 | put_device(&sdev->sdev_dev); | ||
1114 | 1124 | ||
1115 | /* | 1125 | /* |
1116 | * Stop accepting new requests and wait until all queuecommand() and | 1126 | * Stop accepting new requests and wait until all queuecommand() and |
@@ -1121,16 +1131,6 @@ void __scsi_remove_device(struct scsi_device *sdev) | |||
1121 | blk_cleanup_queue(sdev->request_queue); | 1131 | blk_cleanup_queue(sdev->request_queue); |
1122 | cancel_work_sync(&sdev->requeue_work); | 1132 | cancel_work_sync(&sdev->requeue_work); |
1123 | 1133 | ||
1124 | /* | ||
1125 | * Remove the device after blk_cleanup_queue() has been called such | ||
1126 | * a possible bdi_register() call with the same name occurs after | ||
1127 | * blk_cleanup_queue() has called bdi_destroy(). | ||
1128 | */ | ||
1129 | if (sdev->is_visible) | ||
1130 | device_del(dev); | ||
1131 | else | ||
1132 | put_device(&sdev->sdev_dev); | ||
1133 | |||
1134 | if (sdev->host->hostt->slave_destroy) | 1134 | if (sdev->host->hostt->slave_destroy) |
1135 | sdev->host->hostt->slave_destroy(sdev); | 1135 | sdev->host->hostt->slave_destroy(sdev); |
1136 | transport_destroy_device(dev); | 1136 | transport_destroy_device(dev); |
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 54519804c46a..3d22fc3e3c1a 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
@@ -638,11 +638,24 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode) | |||
638 | unsigned int max_blocks = 0; | 638 | unsigned int max_blocks = 0; |
639 | 639 | ||
640 | q->limits.discard_zeroes_data = 0; | 640 | q->limits.discard_zeroes_data = 0; |
641 | q->limits.discard_alignment = sdkp->unmap_alignment * | 641 | |
642 | logical_block_size; | 642 | /* |
643 | q->limits.discard_granularity = | 643 | * When LBPRZ is reported, discard alignment and granularity |
644 | max(sdkp->physical_block_size, | 644 | * must be fixed to the logical block size. Otherwise the block |
645 | sdkp->unmap_granularity * logical_block_size); | 645 | * layer will drop misaligned portions of the request which can |
646 | * lead to data corruption. If LBPRZ is not set, we honor the | ||
647 | * device preference. | ||
648 | */ | ||
649 | if (sdkp->lbprz) { | ||
650 | q->limits.discard_alignment = 0; | ||
651 | q->limits.discard_granularity = 1; | ||
652 | } else { | ||
653 | q->limits.discard_alignment = sdkp->unmap_alignment * | ||
654 | logical_block_size; | ||
655 | q->limits.discard_granularity = | ||
656 | max(sdkp->physical_block_size, | ||
657 | sdkp->unmap_granularity * logical_block_size); | ||
658 | } | ||
646 | 659 | ||
647 | sdkp->provisioning_mode = mode; | 660 | sdkp->provisioning_mode = mode; |
648 | 661 | ||
@@ -2321,11 +2334,8 @@ got_data: | |||
2321 | } | 2334 | } |
2322 | } | 2335 | } |
2323 | 2336 | ||
2324 | if (sdkp->capacity > 0xffffffff) { | 2337 | if (sdkp->capacity > 0xffffffff) |
2325 | sdp->use_16_for_rw = 1; | 2338 | sdp->use_16_for_rw = 1; |
2326 | sdkp->max_xfer_blocks = SD_MAX_XFER_BLOCKS; | ||
2327 | } else | ||
2328 | sdkp->max_xfer_blocks = SD_DEF_XFER_BLOCKS; | ||
2329 | 2339 | ||
2330 | /* Rescale capacity to 512-byte units */ | 2340 | /* Rescale capacity to 512-byte units */ |
2331 | if (sector_size == 4096) | 2341 | if (sector_size == 4096) |
@@ -2642,7 +2652,6 @@ static void sd_read_block_limits(struct scsi_disk *sdkp) | |||
2642 | { | 2652 | { |
2643 | unsigned int sector_sz = sdkp->device->sector_size; | 2653 | unsigned int sector_sz = sdkp->device->sector_size; |
2644 | const int vpd_len = 64; | 2654 | const int vpd_len = 64; |
2645 | u32 max_xfer_length; | ||
2646 | unsigned char *buffer = kmalloc(vpd_len, GFP_KERNEL); | 2655 | unsigned char *buffer = kmalloc(vpd_len, GFP_KERNEL); |
2647 | 2656 | ||
2648 | if (!buffer || | 2657 | if (!buffer || |
@@ -2650,14 +2659,11 @@ static void sd_read_block_limits(struct scsi_disk *sdkp) | |||
2650 | scsi_get_vpd_page(sdkp->device, 0xb0, buffer, vpd_len)) | 2659 | scsi_get_vpd_page(sdkp->device, 0xb0, buffer, vpd_len)) |
2651 | goto out; | 2660 | goto out; |
2652 | 2661 | ||
2653 | max_xfer_length = get_unaligned_be32(&buffer[8]); | ||
2654 | if (max_xfer_length) | ||
2655 | sdkp->max_xfer_blocks = max_xfer_length; | ||
2656 | |||
2657 | blk_queue_io_min(sdkp->disk->queue, | 2662 | blk_queue_io_min(sdkp->disk->queue, |
2658 | get_unaligned_be16(&buffer[6]) * sector_sz); | 2663 | get_unaligned_be16(&buffer[6]) * sector_sz); |
2659 | blk_queue_io_opt(sdkp->disk->queue, | 2664 | |
2660 | get_unaligned_be32(&buffer[12]) * sector_sz); | 2665 | sdkp->max_xfer_blocks = get_unaligned_be32(&buffer[8]); |
2666 | sdkp->opt_xfer_blocks = get_unaligned_be32(&buffer[12]); | ||
2661 | 2667 | ||
2662 | if (buffer[3] == 0x3c) { | 2668 | if (buffer[3] == 0x3c) { |
2663 | unsigned int lba_count, desc_count; | 2669 | unsigned int lba_count, desc_count; |
@@ -2806,6 +2812,11 @@ static int sd_try_extended_inquiry(struct scsi_device *sdp) | |||
2806 | return 0; | 2812 | return 0; |
2807 | } | 2813 | } |
2808 | 2814 | ||
2815 | static inline u32 logical_to_sectors(struct scsi_device *sdev, u32 blocks) | ||
2816 | { | ||
2817 | return blocks << (ilog2(sdev->sector_size) - 9); | ||
2818 | } | ||
2819 | |||
2809 | /** | 2820 | /** |
2810 | * sd_revalidate_disk - called the first time a new disk is seen, | 2821 | * sd_revalidate_disk - called the first time a new disk is seen, |
2811 | * performs disk spin up, read_capacity, etc. | 2822 | * performs disk spin up, read_capacity, etc. |
@@ -2815,8 +2826,9 @@ static int sd_revalidate_disk(struct gendisk *disk) | |||
2815 | { | 2826 | { |
2816 | struct scsi_disk *sdkp = scsi_disk(disk); | 2827 | struct scsi_disk *sdkp = scsi_disk(disk); |
2817 | struct scsi_device *sdp = sdkp->device; | 2828 | struct scsi_device *sdp = sdkp->device; |
2829 | struct request_queue *q = sdkp->disk->queue; | ||
2818 | unsigned char *buffer; | 2830 | unsigned char *buffer; |
2819 | unsigned int max_xfer; | 2831 | unsigned int dev_max, rw_max; |
2820 | 2832 | ||
2821 | SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, | 2833 | SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, |
2822 | "sd_revalidate_disk\n")); | 2834 | "sd_revalidate_disk\n")); |
@@ -2864,11 +2876,26 @@ static int sd_revalidate_disk(struct gendisk *disk) | |||
2864 | */ | 2876 | */ |
2865 | sd_set_flush_flag(sdkp); | 2877 | sd_set_flush_flag(sdkp); |
2866 | 2878 | ||
2867 | max_xfer = sdkp->max_xfer_blocks; | 2879 | /* Initial block count limit based on CDB TRANSFER LENGTH field size. */ |
2868 | max_xfer <<= ilog2(sdp->sector_size) - 9; | 2880 | dev_max = sdp->use_16_for_rw ? SD_MAX_XFER_BLOCKS : SD_DEF_XFER_BLOCKS; |
2881 | |||
2882 | /* Some devices report a maximum block count for READ/WRITE requests. */ | ||
2883 | dev_max = min_not_zero(dev_max, sdkp->max_xfer_blocks); | ||
2884 | q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max); | ||
2885 | |||
2886 | /* | ||
2887 | * Use the device's preferred I/O size for reads and writes | ||
2888 | * unless the reported value is unreasonably large (or garbage). | ||
2889 | */ | ||
2890 | if (sdkp->opt_xfer_blocks && sdkp->opt_xfer_blocks <= dev_max && | ||
2891 | sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS) | ||
2892 | rw_max = q->limits.io_opt = | ||
2893 | logical_to_sectors(sdp, sdkp->opt_xfer_blocks); | ||
2894 | else | ||
2895 | rw_max = BLK_DEF_MAX_SECTORS; | ||
2869 | 2896 | ||
2870 | sdkp->disk->queue->limits.max_sectors = | 2897 | /* Combine with controller limits */ |
2871 | min_not_zero(queue_max_hw_sectors(sdkp->disk->queue), max_xfer); | 2898 | q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q)); |
2872 | 2899 | ||
2873 | set_capacity(disk, sdkp->capacity); | 2900 | set_capacity(disk, sdkp->capacity); |
2874 | sd_config_write_same(sdkp); | 2901 | sd_config_write_same(sdkp); |
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h index 63ba5ca7f9a1..5f2a84aff29f 100644 --- a/drivers/scsi/sd.h +++ b/drivers/scsi/sd.h | |||
@@ -67,6 +67,7 @@ struct scsi_disk { | |||
67 | atomic_t openers; | 67 | atomic_t openers; |
68 | sector_t capacity; /* size in 512-byte sectors */ | 68 | sector_t capacity; /* size in 512-byte sectors */ |
69 | u32 max_xfer_blocks; | 69 | u32 max_xfer_blocks; |
70 | u32 opt_xfer_blocks; | ||
70 | u32 max_ws_blocks; | 71 | u32 max_ws_blocks; |
71 | u32 max_unmap_blocks; | 72 | u32 max_unmap_blocks; |
72 | u32 unmap_granularity; | 73 | u32 unmap_granularity; |
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index e0a1e52a04e7..2e522951b619 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c | |||
@@ -4083,6 +4083,7 @@ static int create_one_cdev(struct scsi_tape *tape, int mode, int rew) | |||
4083 | } | 4083 | } |
4084 | cdev->owner = THIS_MODULE; | 4084 | cdev->owner = THIS_MODULE; |
4085 | cdev->ops = &st_fops; | 4085 | cdev->ops = &st_fops; |
4086 | STm->cdevs[rew] = cdev; | ||
4086 | 4087 | ||
4087 | error = cdev_add(cdev, cdev_devno, 1); | 4088 | error = cdev_add(cdev, cdev_devno, 1); |
4088 | if (error) { | 4089 | if (error) { |
@@ -4091,7 +4092,6 @@ static int create_one_cdev(struct scsi_tape *tape, int mode, int rew) | |||
4091 | pr_err("st%d: Device not attached.\n", dev_num); | 4092 | pr_err("st%d: Device not attached.\n", dev_num); |
4092 | goto out_free; | 4093 | goto out_free; |
4093 | } | 4094 | } |
4094 | STm->cdevs[rew] = cdev; | ||
4095 | 4095 | ||
4096 | i = mode << (4 - ST_NBR_MODE_BITS); | 4096 | i = mode << (4 - ST_NBR_MODE_BITS); |
4097 | snprintf(name, 10, "%s%s%s", rew ? "n" : "", | 4097 | snprintf(name, 10, "%s%s%s", rew ? "n" : "", |
@@ -4110,8 +4110,9 @@ static int create_one_cdev(struct scsi_tape *tape, int mode, int rew) | |||
4110 | return 0; | 4110 | return 0; |
4111 | out_free: | 4111 | out_free: |
4112 | cdev_del(STm->cdevs[rew]); | 4112 | cdev_del(STm->cdevs[rew]); |
4113 | STm->cdevs[rew] = NULL; | ||
4114 | out: | 4113 | out: |
4114 | STm->cdevs[rew] = NULL; | ||
4115 | STm->devs[rew] = NULL; | ||
4115 | return error; | 4116 | return error; |
4116 | } | 4117 | } |
4117 | 4118 | ||
diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c index 06858e04ec59..bf9a610e5b89 100644 --- a/drivers/spi/spi-bcm63xx.c +++ b/drivers/spi/spi-bcm63xx.c | |||
@@ -562,8 +562,8 @@ static int bcm63xx_spi_probe(struct platform_device *pdev) | |||
562 | goto out_clk_disable; | 562 | goto out_clk_disable; |
563 | } | 563 | } |
564 | 564 | ||
565 | dev_info(dev, "at 0x%08x (irq %d, FIFOs size %d)\n", | 565 | dev_info(dev, "at %pr (irq %d, FIFOs size %d)\n", |
566 | r->start, irq, bs->fifo_size); | 566 | r, irq, bs->fifo_size); |
567 | 567 | ||
568 | return 0; | 568 | return 0; |
569 | 569 | ||
diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c index 563954a61424..7840067062a8 100644 --- a/drivers/spi/spi-mt65xx.c +++ b/drivers/spi/spi-mt65xx.c | |||
@@ -410,7 +410,7 @@ static int mtk_spi_setup(struct spi_device *spi) | |||
410 | if (!spi->controller_data) | 410 | if (!spi->controller_data) |
411 | spi->controller_data = (void *)&mtk_default_chip_info; | 411 | spi->controller_data = (void *)&mtk_default_chip_info; |
412 | 412 | ||
413 | if (mdata->dev_comp->need_pad_sel) | 413 | if (mdata->dev_comp->need_pad_sel && gpio_is_valid(spi->cs_gpio)) |
414 | gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH)); | 414 | gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH)); |
415 | 415 | ||
416 | return 0; | 416 | return 0; |
@@ -632,13 +632,23 @@ static int mtk_spi_probe(struct platform_device *pdev) | |||
632 | goto err_put_master; | 632 | goto err_put_master; |
633 | } | 633 | } |
634 | 634 | ||
635 | for (i = 0; i < master->num_chipselect; i++) { | 635 | if (!master->cs_gpios && master->num_chipselect > 1) { |
636 | ret = devm_gpio_request(&pdev->dev, master->cs_gpios[i], | 636 | dev_err(&pdev->dev, |
637 | dev_name(&pdev->dev)); | 637 | "cs_gpios not specified and num_chipselect > 1\n"); |
638 | if (ret) { | 638 | ret = -EINVAL; |
639 | dev_err(&pdev->dev, | 639 | goto err_put_master; |
640 | "can't get CS GPIO %i\n", i); | 640 | } |
641 | goto err_put_master; | 641 | |
642 | if (master->cs_gpios) { | ||
643 | for (i = 0; i < master->num_chipselect; i++) { | ||
644 | ret = devm_gpio_request(&pdev->dev, | ||
645 | master->cs_gpios[i], | ||
646 | dev_name(&pdev->dev)); | ||
647 | if (ret) { | ||
648 | dev_err(&pdev->dev, | ||
649 | "can't get CS GPIO %i\n", i); | ||
650 | goto err_put_master; | ||
651 | } | ||
642 | } | 652 | } |
643 | } | 653 | } |
644 | } | 654 | } |
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c index 94af80676684..5e5fd77e2711 100644 --- a/drivers/spi/spi-pl022.c +++ b/drivers/spi/spi-pl022.c | |||
@@ -1171,19 +1171,31 @@ err_no_rxchan: | |||
1171 | static int pl022_dma_autoprobe(struct pl022 *pl022) | 1171 | static int pl022_dma_autoprobe(struct pl022 *pl022) |
1172 | { | 1172 | { |
1173 | struct device *dev = &pl022->adev->dev; | 1173 | struct device *dev = &pl022->adev->dev; |
1174 | struct dma_chan *chan; | ||
1175 | int err; | ||
1174 | 1176 | ||
1175 | /* automatically configure DMA channels from platform, normally using DT */ | 1177 | /* automatically configure DMA channels from platform, normally using DT */ |
1176 | pl022->dma_rx_channel = dma_request_slave_channel(dev, "rx"); | 1178 | chan = dma_request_slave_channel_reason(dev, "rx"); |
1177 | if (!pl022->dma_rx_channel) | 1179 | if (IS_ERR(chan)) { |
1180 | err = PTR_ERR(chan); | ||
1178 | goto err_no_rxchan; | 1181 | goto err_no_rxchan; |
1182 | } | ||
1183 | |||
1184 | pl022->dma_rx_channel = chan; | ||
1179 | 1185 | ||
1180 | pl022->dma_tx_channel = dma_request_slave_channel(dev, "tx"); | 1186 | chan = dma_request_slave_channel_reason(dev, "tx"); |
1181 | if (!pl022->dma_tx_channel) | 1187 | if (IS_ERR(chan)) { |
1188 | err = PTR_ERR(chan); | ||
1182 | goto err_no_txchan; | 1189 | goto err_no_txchan; |
1190 | } | ||
1191 | |||
1192 | pl022->dma_tx_channel = chan; | ||
1183 | 1193 | ||
1184 | pl022->dummypage = kmalloc(PAGE_SIZE, GFP_KERNEL); | 1194 | pl022->dummypage = kmalloc(PAGE_SIZE, GFP_KERNEL); |
1185 | if (!pl022->dummypage) | 1195 | if (!pl022->dummypage) { |
1196 | err = -ENOMEM; | ||
1186 | goto err_no_dummypage; | 1197 | goto err_no_dummypage; |
1198 | } | ||
1187 | 1199 | ||
1188 | return 0; | 1200 | return 0; |
1189 | 1201 | ||
@@ -1194,7 +1206,7 @@ err_no_txchan: | |||
1194 | dma_release_channel(pl022->dma_rx_channel); | 1206 | dma_release_channel(pl022->dma_rx_channel); |
1195 | pl022->dma_rx_channel = NULL; | 1207 | pl022->dma_rx_channel = NULL; |
1196 | err_no_rxchan: | 1208 | err_no_rxchan: |
1197 | return -ENODEV; | 1209 | return err; |
1198 | } | 1210 | } |
1199 | 1211 | ||
1200 | static void terminate_dma(struct pl022 *pl022) | 1212 | static void terminate_dma(struct pl022 *pl022) |
@@ -2236,6 +2248,10 @@ static int pl022_probe(struct amba_device *adev, const struct amba_id *id) | |||
2236 | 2248 | ||
2237 | /* Get DMA channels, try autoconfiguration first */ | 2249 | /* Get DMA channels, try autoconfiguration first */ |
2238 | status = pl022_dma_autoprobe(pl022); | 2250 | status = pl022_dma_autoprobe(pl022); |
2251 | if (status == -EPROBE_DEFER) { | ||
2252 | dev_dbg(dev, "deferring probe to get DMA channel\n"); | ||
2253 | goto err_no_irq; | ||
2254 | } | ||
2239 | 2255 | ||
2240 | /* If that failed, use channels from platform_info */ | 2256 | /* If that failed, use channels from platform_info */ |
2241 | if (status == 0) | 2257 | if (status == 0) |
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index e2415be209d5..2b0a8ec3affb 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c | |||
@@ -376,6 +376,7 @@ static void spi_drv_shutdown(struct device *dev) | |||
376 | 376 | ||
377 | /** | 377 | /** |
378 | * __spi_register_driver - register a SPI driver | 378 | * __spi_register_driver - register a SPI driver |
379 | * @owner: owner module of the driver to register | ||
379 | * @sdrv: the driver to register | 380 | * @sdrv: the driver to register |
380 | * Context: can sleep | 381 | * Context: can sleep |
381 | * | 382 | * |
@@ -2130,6 +2131,7 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message) | |||
2130 | * Set transfer tx_nbits and rx_nbits as single transfer default | 2131 | * Set transfer tx_nbits and rx_nbits as single transfer default |
2131 | * (SPI_NBITS_SINGLE) if it is not set for this transfer. | 2132 | * (SPI_NBITS_SINGLE) if it is not set for this transfer. |
2132 | */ | 2133 | */ |
2134 | message->frame_length = 0; | ||
2133 | list_for_each_entry(xfer, &message->transfers, transfer_list) { | 2135 | list_for_each_entry(xfer, &message->transfers, transfer_list) { |
2134 | message->frame_length += xfer->len; | 2136 | message->frame_length += xfer->len; |
2135 | if (!xfer->bits_per_word) | 2137 | if (!xfer->bits_per_word) |
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h index f5d741f25ffd..485ab2670918 100644 --- a/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h +++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h | |||
@@ -110,7 +110,6 @@ struct libcfs_ioctl_handler { | |||
110 | #define IOC_LIBCFS_CLEAR_DEBUG _IOWR('e', 31, long) | 110 | #define IOC_LIBCFS_CLEAR_DEBUG _IOWR('e', 31, long) |
111 | #define IOC_LIBCFS_MARK_DEBUG _IOWR('e', 32, long) | 111 | #define IOC_LIBCFS_MARK_DEBUG _IOWR('e', 32, long) |
112 | #define IOC_LIBCFS_MEMHOG _IOWR('e', 36, long) | 112 | #define IOC_LIBCFS_MEMHOG _IOWR('e', 36, long) |
113 | #define IOC_LIBCFS_PING_TEST _IOWR('e', 37, long) | ||
114 | /* lnet ioctls */ | 113 | /* lnet ioctls */ |
115 | #define IOC_LIBCFS_GET_NI _IOWR('e', 50, long) | 114 | #define IOC_LIBCFS_GET_NI _IOWR('e', 50, long) |
116 | #define IOC_LIBCFS_FAIL_NID _IOWR('e', 51, long) | 115 | #define IOC_LIBCFS_FAIL_NID _IOWR('e', 51, long) |
diff --git a/drivers/staging/lustre/lustre/libcfs/module.c b/drivers/staging/lustre/lustre/libcfs/module.c index 07a68594c279..e7c2b26156b9 100644 --- a/drivers/staging/lustre/lustre/libcfs/module.c +++ b/drivers/staging/lustre/lustre/libcfs/module.c | |||
@@ -274,23 +274,6 @@ static int libcfs_ioctl_int(struct cfs_psdev_file *pfile, unsigned long cmd, | |||
274 | } | 274 | } |
275 | break; | 275 | break; |
276 | 276 | ||
277 | case IOC_LIBCFS_PING_TEST: { | ||
278 | extern void (kping_client)(struct libcfs_ioctl_data *); | ||
279 | void (*ping)(struct libcfs_ioctl_data *); | ||
280 | |||
281 | CDEBUG(D_IOCTL, "doing %d pings to nid %s (%s)\n", | ||
282 | data->ioc_count, libcfs_nid2str(data->ioc_nid), | ||
283 | libcfs_nid2str(data->ioc_nid)); | ||
284 | ping = symbol_get(kping_client); | ||
285 | if (!ping) | ||
286 | CERROR("symbol_get failed\n"); | ||
287 | else { | ||
288 | ping(data); | ||
289 | symbol_put(kping_client); | ||
290 | } | ||
291 | return 0; | ||
292 | } | ||
293 | |||
294 | default: { | 277 | default: { |
295 | struct libcfs_ioctl_handler *hand; | 278 | struct libcfs_ioctl_handler *hand; |
296 | 279 | ||
diff --git a/fs/direct-io.c b/fs/direct-io.c index cb5337d8c273..1c75a3a07f8f 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c | |||
@@ -1169,6 +1169,15 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, | |||
1169 | } | 1169 | } |
1170 | } | 1170 | } |
1171 | 1171 | ||
1172 | /* Once we sampled i_size check for reads beyond EOF */ | ||
1173 | dio->i_size = i_size_read(inode); | ||
1174 | if (iov_iter_rw(iter) == READ && offset >= dio->i_size) { | ||
1175 | if (dio->flags & DIO_LOCKING) | ||
1176 | mutex_unlock(&inode->i_mutex); | ||
1177 | kmem_cache_free(dio_cache, dio); | ||
1178 | goto out; | ||
1179 | } | ||
1180 | |||
1172 | /* | 1181 | /* |
1173 | * For file extending writes updating i_size before data writeouts | 1182 | * For file extending writes updating i_size before data writeouts |
1174 | * complete can expose uninitialized blocks in dumb filesystems. | 1183 | * complete can expose uninitialized blocks in dumb filesystems. |
@@ -1222,7 +1231,6 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, | |||
1222 | sdio.next_block_for_io = -1; | 1231 | sdio.next_block_for_io = -1; |
1223 | 1232 | ||
1224 | dio->iocb = iocb; | 1233 | dio->iocb = iocb; |
1225 | dio->i_size = i_size_read(inode); | ||
1226 | 1234 | ||
1227 | spin_lock_init(&dio->bio_lock); | 1235 | spin_lock_init(&dio->bio_lock); |
1228 | dio->refcount = 1; | 1236 | dio->refcount = 1; |
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c index 87e9d796cf7d..3a37bd3f9637 100644 --- a/fs/dlm/lowcomms.c +++ b/fs/dlm/lowcomms.c | |||
@@ -421,7 +421,7 @@ static void lowcomms_write_space(struct sock *sk) | |||
421 | 421 | ||
422 | if (test_and_clear_bit(CF_APP_LIMITED, &con->flags)) { | 422 | if (test_and_clear_bit(CF_APP_LIMITED, &con->flags)) { |
423 | con->sock->sk->sk_write_pending--; | 423 | con->sock->sk->sk_write_pending--; |
424 | clear_bit(SOCK_ASYNC_NOSPACE, &con->sock->flags); | 424 | clear_bit(SOCKWQ_ASYNC_NOSPACE, &con->sock->flags); |
425 | } | 425 | } |
426 | 426 | ||
427 | if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags)) | 427 | if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags)) |
@@ -1448,7 +1448,7 @@ static void send_to_sock(struct connection *con) | |||
1448 | msg_flags); | 1448 | msg_flags); |
1449 | if (ret == -EAGAIN || ret == 0) { | 1449 | if (ret == -EAGAIN || ret == 0) { |
1450 | if (ret == -EAGAIN && | 1450 | if (ret == -EAGAIN && |
1451 | test_bit(SOCK_ASYNC_NOSPACE, &con->sock->flags) && | 1451 | test_bit(SOCKWQ_ASYNC_NOSPACE, &con->sock->flags) && |
1452 | !test_and_set_bit(CF_APP_LIMITED, &con->flags)) { | 1452 | !test_and_set_bit(CF_APP_LIMITED, &con->flags)) { |
1453 | /* Notify TCP that we're limited by the | 1453 | /* Notify TCP that we're limited by the |
1454 | * application window size. | 1454 | * application window size. |
diff --git a/fs/namei.c b/fs/namei.c index d84d7c7515fc..0c3974cd3ecd 100644 --- a/fs/namei.c +++ b/fs/namei.c | |||
@@ -1996,7 +1996,6 @@ static const char *path_init(struct nameidata *nd, unsigned flags) | |||
1996 | nd->last_type = LAST_ROOT; /* if there are only slashes... */ | 1996 | nd->last_type = LAST_ROOT; /* if there are only slashes... */ |
1997 | nd->flags = flags | LOOKUP_JUMPED | LOOKUP_PARENT; | 1997 | nd->flags = flags | LOOKUP_JUMPED | LOOKUP_PARENT; |
1998 | nd->depth = 0; | 1998 | nd->depth = 0; |
1999 | nd->total_link_count = 0; | ||
2000 | if (flags & LOOKUP_ROOT) { | 1999 | if (flags & LOOKUP_ROOT) { |
2001 | struct dentry *root = nd->root.dentry; | 2000 | struct dentry *root = nd->root.dentry; |
2002 | struct inode *inode = root->d_inode; | 2001 | struct inode *inode = root->d_inode; |
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c index 871fcb67be97..0a8983492d91 100644 --- a/fs/overlayfs/copy_up.c +++ b/fs/overlayfs/copy_up.c | |||
@@ -195,8 +195,7 @@ int ovl_set_attr(struct dentry *upperdentry, struct kstat *stat) | |||
195 | 195 | ||
196 | static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir, | 196 | static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir, |
197 | struct dentry *dentry, struct path *lowerpath, | 197 | struct dentry *dentry, struct path *lowerpath, |
198 | struct kstat *stat, struct iattr *attr, | 198 | struct kstat *stat, const char *link) |
199 | const char *link) | ||
200 | { | 199 | { |
201 | struct inode *wdir = workdir->d_inode; | 200 | struct inode *wdir = workdir->d_inode; |
202 | struct inode *udir = upperdir->d_inode; | 201 | struct inode *udir = upperdir->d_inode; |
@@ -240,8 +239,6 @@ static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir, | |||
240 | 239 | ||
241 | mutex_lock(&newdentry->d_inode->i_mutex); | 240 | mutex_lock(&newdentry->d_inode->i_mutex); |
242 | err = ovl_set_attr(newdentry, stat); | 241 | err = ovl_set_attr(newdentry, stat); |
243 | if (!err && attr) | ||
244 | err = notify_change(newdentry, attr, NULL); | ||
245 | mutex_unlock(&newdentry->d_inode->i_mutex); | 242 | mutex_unlock(&newdentry->d_inode->i_mutex); |
246 | if (err) | 243 | if (err) |
247 | goto out_cleanup; | 244 | goto out_cleanup; |
@@ -286,8 +283,7 @@ out_cleanup: | |||
286 | * that point the file will have already been copied up anyway. | 283 | * that point the file will have already been copied up anyway. |
287 | */ | 284 | */ |
288 | int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry, | 285 | int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry, |
289 | struct path *lowerpath, struct kstat *stat, | 286 | struct path *lowerpath, struct kstat *stat) |
290 | struct iattr *attr) | ||
291 | { | 287 | { |
292 | struct dentry *workdir = ovl_workdir(dentry); | 288 | struct dentry *workdir = ovl_workdir(dentry); |
293 | int err; | 289 | int err; |
@@ -345,26 +341,19 @@ int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry, | |||
345 | } | 341 | } |
346 | upperdentry = ovl_dentry_upper(dentry); | 342 | upperdentry = ovl_dentry_upper(dentry); |
347 | if (upperdentry) { | 343 | if (upperdentry) { |
348 | unlock_rename(workdir, upperdir); | 344 | /* Raced with another copy-up? Nothing to do, then... */ |
349 | err = 0; | 345 | err = 0; |
350 | /* Raced with another copy-up? Do the setattr here */ | 346 | goto out_unlock; |
351 | if (attr) { | ||
352 | mutex_lock(&upperdentry->d_inode->i_mutex); | ||
353 | err = notify_change(upperdentry, attr, NULL); | ||
354 | mutex_unlock(&upperdentry->d_inode->i_mutex); | ||
355 | } | ||
356 | goto out_put_cred; | ||
357 | } | 347 | } |
358 | 348 | ||
359 | err = ovl_copy_up_locked(workdir, upperdir, dentry, lowerpath, | 349 | err = ovl_copy_up_locked(workdir, upperdir, dentry, lowerpath, |
360 | stat, attr, link); | 350 | stat, link); |
361 | if (!err) { | 351 | if (!err) { |
362 | /* Restore timestamps on parent (best effort) */ | 352 | /* Restore timestamps on parent (best effort) */ |
363 | ovl_set_timestamps(upperdir, &pstat); | 353 | ovl_set_timestamps(upperdir, &pstat); |
364 | } | 354 | } |
365 | out_unlock: | 355 | out_unlock: |
366 | unlock_rename(workdir, upperdir); | 356 | unlock_rename(workdir, upperdir); |
367 | out_put_cred: | ||
368 | revert_creds(old_cred); | 357 | revert_creds(old_cred); |
369 | put_cred(override_cred); | 358 | put_cred(override_cred); |
370 | 359 | ||
@@ -406,7 +395,7 @@ int ovl_copy_up(struct dentry *dentry) | |||
406 | ovl_path_lower(next, &lowerpath); | 395 | ovl_path_lower(next, &lowerpath); |
407 | err = vfs_getattr(&lowerpath, &stat); | 396 | err = vfs_getattr(&lowerpath, &stat); |
408 | if (!err) | 397 | if (!err) |
409 | err = ovl_copy_up_one(parent, next, &lowerpath, &stat, NULL); | 398 | err = ovl_copy_up_one(parent, next, &lowerpath, &stat); |
410 | 399 | ||
411 | dput(parent); | 400 | dput(parent); |
412 | dput(next); | 401 | dput(next); |
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c index ec0c2a050043..4060ffde8722 100644 --- a/fs/overlayfs/inode.c +++ b/fs/overlayfs/inode.c | |||
@@ -12,8 +12,7 @@ | |||
12 | #include <linux/xattr.h> | 12 | #include <linux/xattr.h> |
13 | #include "overlayfs.h" | 13 | #include "overlayfs.h" |
14 | 14 | ||
15 | static int ovl_copy_up_last(struct dentry *dentry, struct iattr *attr, | 15 | static int ovl_copy_up_truncate(struct dentry *dentry) |
16 | bool no_data) | ||
17 | { | 16 | { |
18 | int err; | 17 | int err; |
19 | struct dentry *parent; | 18 | struct dentry *parent; |
@@ -30,10 +29,8 @@ static int ovl_copy_up_last(struct dentry *dentry, struct iattr *attr, | |||
30 | if (err) | 29 | if (err) |
31 | goto out_dput_parent; | 30 | goto out_dput_parent; |
32 | 31 | ||
33 | if (no_data) | 32 | stat.size = 0; |
34 | stat.size = 0; | 33 | err = ovl_copy_up_one(parent, dentry, &lowerpath, &stat); |
35 | |||
36 | err = ovl_copy_up_one(parent, dentry, &lowerpath, &stat, attr); | ||
37 | 34 | ||
38 | out_dput_parent: | 35 | out_dput_parent: |
39 | dput(parent); | 36 | dput(parent); |
@@ -49,13 +46,13 @@ int ovl_setattr(struct dentry *dentry, struct iattr *attr) | |||
49 | if (err) | 46 | if (err) |
50 | goto out; | 47 | goto out; |
51 | 48 | ||
52 | upperdentry = ovl_dentry_upper(dentry); | 49 | err = ovl_copy_up(dentry); |
53 | if (upperdentry) { | 50 | if (!err) { |
51 | upperdentry = ovl_dentry_upper(dentry); | ||
52 | |||
54 | mutex_lock(&upperdentry->d_inode->i_mutex); | 53 | mutex_lock(&upperdentry->d_inode->i_mutex); |
55 | err = notify_change(upperdentry, attr, NULL); | 54 | err = notify_change(upperdentry, attr, NULL); |
56 | mutex_unlock(&upperdentry->d_inode->i_mutex); | 55 | mutex_unlock(&upperdentry->d_inode->i_mutex); |
57 | } else { | ||
58 | err = ovl_copy_up_last(dentry, attr, false); | ||
59 | } | 56 | } |
60 | ovl_drop_write(dentry); | 57 | ovl_drop_write(dentry); |
61 | out: | 58 | out: |
@@ -353,7 +350,7 @@ struct inode *ovl_d_select_inode(struct dentry *dentry, unsigned file_flags) | |||
353 | return ERR_PTR(err); | 350 | return ERR_PTR(err); |
354 | 351 | ||
355 | if (file_flags & O_TRUNC) | 352 | if (file_flags & O_TRUNC) |
356 | err = ovl_copy_up_last(dentry, NULL, true); | 353 | err = ovl_copy_up_truncate(dentry); |
357 | else | 354 | else |
358 | err = ovl_copy_up(dentry); | 355 | err = ovl_copy_up(dentry); |
359 | ovl_drop_write(dentry); | 356 | ovl_drop_write(dentry); |
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h index ea5a40b06e3a..e17154aeaae4 100644 --- a/fs/overlayfs/overlayfs.h +++ b/fs/overlayfs/overlayfs.h | |||
@@ -194,7 +194,6 @@ void ovl_cleanup(struct inode *dir, struct dentry *dentry); | |||
194 | /* copy_up.c */ | 194 | /* copy_up.c */ |
195 | int ovl_copy_up(struct dentry *dentry); | 195 | int ovl_copy_up(struct dentry *dentry); |
196 | int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry, | 196 | int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry, |
197 | struct path *lowerpath, struct kstat *stat, | 197 | struct path *lowerpath, struct kstat *stat); |
198 | struct iattr *attr); | ||
199 | int ovl_copy_xattr(struct dentry *old, struct dentry *new); | 198 | int ovl_copy_xattr(struct dentry *old, struct dentry *new); |
200 | int ovl_set_attr(struct dentry *upper, struct kstat *stat); | 199 | int ovl_set_attr(struct dentry *upper, struct kstat *stat); |
diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 8e1df1f7057c..a8e01aaca087 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h | |||
@@ -309,6 +309,11 @@ struct drm_file { | |||
309 | unsigned universal_planes:1; | 309 | unsigned universal_planes:1; |
310 | /* true if client understands atomic properties */ | 310 | /* true if client understands atomic properties */ |
311 | unsigned atomic:1; | 311 | unsigned atomic:1; |
312 | /* | ||
313 | * This client is allowed to gain master privileges for @master. | ||
314 | * Protected by struct drm_device::master_mutex. | ||
315 | */ | ||
316 | unsigned allowed_master:1; | ||
312 | 317 | ||
313 | struct pid *pid; | 318 | struct pid *pid; |
314 | kuid_t uid; | 319 | kuid_t uid; |
@@ -912,6 +917,7 @@ extern int drm_open(struct inode *inode, struct file *filp); | |||
912 | extern ssize_t drm_read(struct file *filp, char __user *buffer, | 917 | extern ssize_t drm_read(struct file *filp, char __user *buffer, |
913 | size_t count, loff_t *offset); | 918 | size_t count, loff_t *offset); |
914 | extern int drm_release(struct inode *inode, struct file *filp); | 919 | extern int drm_release(struct inode *inode, struct file *filp); |
920 | extern int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv); | ||
915 | 921 | ||
916 | /* Mapping support (drm_vm.h) */ | 922 | /* Mapping support (drm_vm.h) */ |
917 | extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait); | 923 | extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait); |
@@ -949,6 +955,10 @@ extern void drm_send_vblank_event(struct drm_device *dev, unsigned int pipe, | |||
949 | struct drm_pending_vblank_event *e); | 955 | struct drm_pending_vblank_event *e); |
950 | extern void drm_crtc_send_vblank_event(struct drm_crtc *crtc, | 956 | extern void drm_crtc_send_vblank_event(struct drm_crtc *crtc, |
951 | struct drm_pending_vblank_event *e); | 957 | struct drm_pending_vblank_event *e); |
958 | extern void drm_arm_vblank_event(struct drm_device *dev, unsigned int pipe, | ||
959 | struct drm_pending_vblank_event *e); | ||
960 | extern void drm_crtc_arm_vblank_event(struct drm_crtc *crtc, | ||
961 | struct drm_pending_vblank_event *e); | ||
952 | extern bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe); | 962 | extern bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe); |
953 | extern bool drm_crtc_handle_vblank(struct drm_crtc *crtc); | 963 | extern bool drm_crtc_handle_vblank(struct drm_crtc *crtc); |
954 | extern int drm_vblank_get(struct drm_device *dev, unsigned int pipe); | 964 | extern int drm_vblank_get(struct drm_device *dev, unsigned int pipe); |
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 054833939995..1991aea2ec4c 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h | |||
@@ -870,8 +870,8 @@ static inline int acpi_dev_get_property(struct acpi_device *adev, | |||
870 | } | 870 | } |
871 | 871 | ||
872 | static inline int acpi_node_get_property_reference(struct fwnode_handle *fwnode, | 872 | static inline int acpi_node_get_property_reference(struct fwnode_handle *fwnode, |
873 | const char *name, const char *cells_name, | 873 | const char *name, size_t index, |
874 | size_t index, struct acpi_reference_args *args) | 874 | struct acpi_reference_args *args) |
875 | { | 875 | { |
876 | return -ENXIO; | 876 | return -ENXIO; |
877 | } | 877 | } |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index c0d2b7927c1f..0169ba2e2e64 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -254,6 +254,7 @@ struct queue_limits { | |||
254 | unsigned long virt_boundary_mask; | 254 | unsigned long virt_boundary_mask; |
255 | 255 | ||
256 | unsigned int max_hw_sectors; | 256 | unsigned int max_hw_sectors; |
257 | unsigned int max_dev_sectors; | ||
257 | unsigned int chunk_sectors; | 258 | unsigned int chunk_sectors; |
258 | unsigned int max_sectors; | 259 | unsigned int max_sectors; |
259 | unsigned int max_segment_size; | 260 | unsigned int max_segment_size; |
@@ -773,7 +774,6 @@ extern void blk_rq_set_block_pc(struct request *); | |||
773 | extern void blk_requeue_request(struct request_queue *, struct request *); | 774 | extern void blk_requeue_request(struct request_queue *, struct request *); |
774 | extern void blk_add_request_payload(struct request *rq, struct page *page, | 775 | extern void blk_add_request_payload(struct request *rq, struct page *page, |
775 | unsigned int len); | 776 | unsigned int len); |
776 | extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); | ||
777 | extern int blk_lld_busy(struct request_queue *q); | 777 | extern int blk_lld_busy(struct request_queue *q); |
778 | extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, | 778 | extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, |
779 | struct bio_set *bs, gfp_t gfp_mask, | 779 | struct bio_set *bs, gfp_t gfp_mask, |
@@ -960,7 +960,6 @@ extern struct request_queue *blk_init_allocated_queue(struct request_queue *, | |||
960 | extern void blk_cleanup_queue(struct request_queue *); | 960 | extern void blk_cleanup_queue(struct request_queue *); |
961 | extern void blk_queue_make_request(struct request_queue *, make_request_fn *); | 961 | extern void blk_queue_make_request(struct request_queue *, make_request_fn *); |
962 | extern void blk_queue_bounce_limit(struct request_queue *, u64); | 962 | extern void blk_queue_bounce_limit(struct request_queue *, u64); |
963 | extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int); | ||
964 | extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); | 963 | extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); |
965 | extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int); | 964 | extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int); |
966 | extern void blk_queue_max_segments(struct request_queue *, unsigned short); | 965 | extern void blk_queue_max_segments(struct request_queue *, unsigned short); |
diff --git a/include/linux/bpf.h b/include/linux/bpf.h index de464e6683b6..83d1926c61e4 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h | |||
@@ -40,6 +40,7 @@ struct bpf_map { | |||
40 | struct user_struct *user; | 40 | struct user_struct *user; |
41 | const struct bpf_map_ops *ops; | 41 | const struct bpf_map_ops *ops; |
42 | struct work_struct work; | 42 | struct work_struct work; |
43 | atomic_t usercnt; | ||
43 | }; | 44 | }; |
44 | 45 | ||
45 | struct bpf_map_type_list { | 46 | struct bpf_map_type_list { |
@@ -167,8 +168,10 @@ struct bpf_prog *bpf_prog_get(u32 ufd); | |||
167 | void bpf_prog_put(struct bpf_prog *prog); | 168 | void bpf_prog_put(struct bpf_prog *prog); |
168 | void bpf_prog_put_rcu(struct bpf_prog *prog); | 169 | void bpf_prog_put_rcu(struct bpf_prog *prog); |
169 | 170 | ||
170 | struct bpf_map *bpf_map_get(u32 ufd); | 171 | struct bpf_map *bpf_map_get_with_uref(u32 ufd); |
171 | struct bpf_map *__bpf_map_get(struct fd f); | 172 | struct bpf_map *__bpf_map_get(struct fd f); |
173 | void bpf_map_inc(struct bpf_map *map, bool uref); | ||
174 | void bpf_map_put_with_uref(struct bpf_map *map); | ||
172 | void bpf_map_put(struct bpf_map *map); | 175 | void bpf_map_put(struct bpf_map *map); |
173 | 176 | ||
174 | extern int sysctl_unprivileged_bpf_disabled; | 177 | extern int sysctl_unprivileged_bpf_disabled; |
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index ef4c5b1a860f..177c7680c1a8 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h | |||
@@ -77,6 +77,7 @@ struct cpufreq_policy { | |||
77 | unsigned int suspend_freq; /* freq to set during suspend */ | 77 | unsigned int suspend_freq; /* freq to set during suspend */ |
78 | 78 | ||
79 | unsigned int policy; /* see above */ | 79 | unsigned int policy; /* see above */ |
80 | unsigned int last_policy; /* policy before unplug */ | ||
80 | struct cpufreq_governor *governor; /* see below */ | 81 | struct cpufreq_governor *governor; /* see below */ |
81 | void *governor_data; | 82 | void *governor_data; |
82 | bool governor_enabled; /* governor start/stop flag */ | 83 | bool governor_enabled; /* governor start/stop flag */ |
diff --git a/include/linux/dns_resolver.h b/include/linux/dns_resolver.h index cc92268af89a..6ac3cad9aef1 100644 --- a/include/linux/dns_resolver.h +++ b/include/linux/dns_resolver.h | |||
@@ -27,7 +27,7 @@ | |||
27 | #ifdef __KERNEL__ | 27 | #ifdef __KERNEL__ |
28 | 28 | ||
29 | extern int dns_query(const char *type, const char *name, size_t namelen, | 29 | extern int dns_query(const char *type, const char *name, size_t namelen, |
30 | const char *options, char **_result, time_t *_expiry); | 30 | const char *options, char **_result, time64_t *_expiry); |
31 | 31 | ||
32 | #endif /* KERNEL */ | 32 | #endif /* KERNEL */ |
33 | 33 | ||
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 0ef2a97ccdb5..402753bccafa 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h | |||
@@ -227,7 +227,7 @@ struct ipv6_pinfo { | |||
227 | struct ipv6_ac_socklist *ipv6_ac_list; | 227 | struct ipv6_ac_socklist *ipv6_ac_list; |
228 | struct ipv6_fl_socklist __rcu *ipv6_fl_list; | 228 | struct ipv6_fl_socklist __rcu *ipv6_fl_list; |
229 | 229 | ||
230 | struct ipv6_txoptions *opt; | 230 | struct ipv6_txoptions __rcu *opt; |
231 | struct sk_buff *pktoptions; | 231 | struct sk_buff *pktoptions; |
232 | struct sk_buff *rxpmtu; | 232 | struct sk_buff *rxpmtu; |
233 | struct inet6_cork cork; | 233 | struct inet6_cork cork; |
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h index 3db5552b17d5..c6916aec43b6 100644 --- a/include/linux/lightnvm.h +++ b/include/linux/lightnvm.h | |||
@@ -179,7 +179,7 @@ typedef int (nvm_bb_update_fn)(struct ppa_addr, int, u8 *, void *); | |||
179 | typedef int (nvm_id_fn)(struct request_queue *, struct nvm_id *); | 179 | typedef int (nvm_id_fn)(struct request_queue *, struct nvm_id *); |
180 | typedef int (nvm_get_l2p_tbl_fn)(struct request_queue *, u64, u32, | 180 | typedef int (nvm_get_l2p_tbl_fn)(struct request_queue *, u64, u32, |
181 | nvm_l2p_update_fn *, void *); | 181 | nvm_l2p_update_fn *, void *); |
182 | typedef int (nvm_op_bb_tbl_fn)(struct request_queue *, struct ppa_addr, int, | 182 | typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, int, |
183 | nvm_bb_update_fn *, void *); | 183 | nvm_bb_update_fn *, void *); |
184 | typedef int (nvm_op_set_bb_fn)(struct request_queue *, struct nvm_rq *, int); | 184 | typedef int (nvm_op_set_bb_fn)(struct request_queue *, struct nvm_rq *, int); |
185 | typedef int (nvm_submit_io_fn)(struct request_queue *, struct nvm_rq *); | 185 | typedef int (nvm_submit_io_fn)(struct request_queue *, struct nvm_rq *); |
diff --git a/include/linux/net.h b/include/linux/net.h index 70ac5e28e6b7..0b4ac7da583a 100644 --- a/include/linux/net.h +++ b/include/linux/net.h | |||
@@ -34,8 +34,12 @@ struct inode; | |||
34 | struct file; | 34 | struct file; |
35 | struct net; | 35 | struct net; |
36 | 36 | ||
37 | #define SOCK_ASYNC_NOSPACE 0 | 37 | /* Historically, SOCKWQ_ASYNC_NOSPACE & SOCKWQ_ASYNC_WAITDATA were located |
38 | #define SOCK_ASYNC_WAITDATA 1 | 38 | * in sock->flags, but moved into sk->sk_wq->flags to be RCU protected. |
39 | * Eventually all flags will be in sk->sk_wq_flags. | ||
40 | */ | ||
41 | #define SOCKWQ_ASYNC_NOSPACE 0 | ||
42 | #define SOCKWQ_ASYNC_WAITDATA 1 | ||
39 | #define SOCK_NOSPACE 2 | 43 | #define SOCK_NOSPACE 2 |
40 | #define SOCK_PASSCRED 3 | 44 | #define SOCK_PASSCRED 3 |
41 | #define SOCK_PASSSEC 4 | 45 | #define SOCK_PASSSEC 4 |
@@ -89,6 +93,7 @@ struct socket_wq { | |||
89 | /* Note: wait MUST be first field of socket_wq */ | 93 | /* Note: wait MUST be first field of socket_wq */ |
90 | wait_queue_head_t wait; | 94 | wait_queue_head_t wait; |
91 | struct fasync_struct *fasync_list; | 95 | struct fasync_struct *fasync_list; |
96 | unsigned long flags; /* %SOCKWQ_ASYNC_NOSPACE, etc */ | ||
92 | struct rcu_head rcu; | 97 | struct rcu_head rcu; |
93 | } ____cacheline_aligned_in_smp; | 98 | } ____cacheline_aligned_in_smp; |
94 | 99 | ||
@@ -96,7 +101,7 @@ struct socket_wq { | |||
96 | * struct socket - general BSD socket | 101 | * struct socket - general BSD socket |
97 | * @state: socket state (%SS_CONNECTED, etc) | 102 | * @state: socket state (%SS_CONNECTED, etc) |
98 | * @type: socket type (%SOCK_STREAM, etc) | 103 | * @type: socket type (%SOCK_STREAM, etc) |
99 | * @flags: socket flags (%SOCK_ASYNC_NOSPACE, etc) | 104 | * @flags: socket flags (%SOCK_NOSPACE, etc) |
100 | * @ops: protocol specific socket operations | 105 | * @ops: protocol specific socket operations |
101 | * @file: File back pointer for gc | 106 | * @file: File back pointer for gc |
102 | * @sk: internal networking protocol agnostic socket representation | 107 | * @sk: internal networking protocol agnostic socket representation |
@@ -202,7 +207,7 @@ enum { | |||
202 | SOCK_WAKE_URG, | 207 | SOCK_WAKE_URG, |
203 | }; | 208 | }; |
204 | 209 | ||
205 | int sock_wake_async(struct socket *sk, int how, int band); | 210 | int sock_wake_async(struct socket_wq *sk_wq, int how, int band); |
206 | int sock_register(const struct net_proto_family *fam); | 211 | int sock_register(const struct net_proto_family *fam); |
207 | void sock_unregister(int family); | 212 | void sock_unregister(int family); |
208 | int __sock_create(struct net *net, int family, int type, int proto, | 213 | int __sock_create(struct net *net, int family, int type, int proto, |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 67bfac1abfc1..3b5d134e945a 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -1398,7 +1398,8 @@ enum netdev_priv_flags { | |||
1398 | * @dma: DMA channel | 1398 | * @dma: DMA channel |
1399 | * @mtu: Interface MTU value | 1399 | * @mtu: Interface MTU value |
1400 | * @type: Interface hardware type | 1400 | * @type: Interface hardware type |
1401 | * @hard_header_len: Hardware header length | 1401 | * @hard_header_len: Hardware header length, which means that this is the |
1402 | * minimum size of a packet. | ||
1402 | * | 1403 | * |
1403 | * @needed_headroom: Extra headroom the hardware may need, but not in all | 1404 | * @needed_headroom: Extra headroom the hardware may need, but not in all |
1404 | * cases can this be guaranteed | 1405 | * cases can this be guaranteed |
diff --git a/include/net/af_unix.h b/include/net/af_unix.h index b36d837c701e..2a91a0561a47 100644 --- a/include/net/af_unix.h +++ b/include/net/af_unix.h | |||
@@ -62,6 +62,7 @@ struct unix_sock { | |||
62 | #define UNIX_GC_CANDIDATE 0 | 62 | #define UNIX_GC_CANDIDATE 0 |
63 | #define UNIX_GC_MAYBE_CYCLE 1 | 63 | #define UNIX_GC_MAYBE_CYCLE 1 |
64 | struct socket_wq peer_wq; | 64 | struct socket_wq peer_wq; |
65 | wait_queue_t peer_wake; | ||
65 | }; | 66 | }; |
66 | 67 | ||
67 | static inline struct unix_sock *unix_sk(const struct sock *sk) | 68 | static inline struct unix_sock *unix_sk(const struct sock *sk) |
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h index 2bfb2ad2fab1..877f682989b8 100644 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h | |||
@@ -133,27 +133,18 @@ void rt6_clean_tohost(struct net *net, struct in6_addr *gateway); | |||
133 | /* | 133 | /* |
134 | * Store a destination cache entry in a socket | 134 | * Store a destination cache entry in a socket |
135 | */ | 135 | */ |
136 | static inline void __ip6_dst_store(struct sock *sk, struct dst_entry *dst, | 136 | static inline void ip6_dst_store(struct sock *sk, struct dst_entry *dst, |
137 | const struct in6_addr *daddr, | 137 | const struct in6_addr *daddr, |
138 | const struct in6_addr *saddr) | 138 | const struct in6_addr *saddr) |
139 | { | 139 | { |
140 | struct ipv6_pinfo *np = inet6_sk(sk); | 140 | struct ipv6_pinfo *np = inet6_sk(sk); |
141 | struct rt6_info *rt = (struct rt6_info *) dst; | ||
142 | 141 | ||
142 | np->dst_cookie = rt6_get_cookie((struct rt6_info *)dst); | ||
143 | sk_setup_caps(sk, dst); | 143 | sk_setup_caps(sk, dst); |
144 | np->daddr_cache = daddr; | 144 | np->daddr_cache = daddr; |
145 | #ifdef CONFIG_IPV6_SUBTREES | 145 | #ifdef CONFIG_IPV6_SUBTREES |
146 | np->saddr_cache = saddr; | 146 | np->saddr_cache = saddr; |
147 | #endif | 147 | #endif |
148 | np->dst_cookie = rt6_get_cookie(rt); | ||
149 | } | ||
150 | |||
151 | static inline void ip6_dst_store(struct sock *sk, struct dst_entry *dst, | ||
152 | struct in6_addr *daddr, struct in6_addr *saddr) | ||
153 | { | ||
154 | spin_lock(&sk->sk_dst_lock); | ||
155 | __ip6_dst_store(sk, dst, daddr, saddr); | ||
156 | spin_unlock(&sk->sk_dst_lock); | ||
157 | } | 148 | } |
158 | 149 | ||
159 | static inline bool ipv6_unicast_destination(const struct sk_buff *skb) | 150 | static inline bool ipv6_unicast_destination(const struct sk_buff *skb) |
diff --git a/include/net/ipv6.h b/include/net/ipv6.h index e1a10b0ac0b0..9a5c9f013784 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h | |||
@@ -205,6 +205,7 @@ extern rwlock_t ip6_ra_lock; | |||
205 | */ | 205 | */ |
206 | 206 | ||
207 | struct ipv6_txoptions { | 207 | struct ipv6_txoptions { |
208 | atomic_t refcnt; | ||
208 | /* Length of this structure */ | 209 | /* Length of this structure */ |
209 | int tot_len; | 210 | int tot_len; |
210 | 211 | ||
@@ -217,7 +218,7 @@ struct ipv6_txoptions { | |||
217 | struct ipv6_opt_hdr *dst0opt; | 218 | struct ipv6_opt_hdr *dst0opt; |
218 | struct ipv6_rt_hdr *srcrt; /* Routing Header */ | 219 | struct ipv6_rt_hdr *srcrt; /* Routing Header */ |
219 | struct ipv6_opt_hdr *dst1opt; | 220 | struct ipv6_opt_hdr *dst1opt; |
220 | 221 | struct rcu_head rcu; | |
221 | /* Option buffer, as read by IPV6_PKTOPTIONS, starts here. */ | 222 | /* Option buffer, as read by IPV6_PKTOPTIONS, starts here. */ |
222 | }; | 223 | }; |
223 | 224 | ||
@@ -252,6 +253,24 @@ struct ipv6_fl_socklist { | |||
252 | struct rcu_head rcu; | 253 | struct rcu_head rcu; |
253 | }; | 254 | }; |
254 | 255 | ||
256 | static inline struct ipv6_txoptions *txopt_get(const struct ipv6_pinfo *np) | ||
257 | { | ||
258 | struct ipv6_txoptions *opt; | ||
259 | |||
260 | rcu_read_lock(); | ||
261 | opt = rcu_dereference(np->opt); | ||
262 | if (opt && !atomic_inc_not_zero(&opt->refcnt)) | ||
263 | opt = NULL; | ||
264 | rcu_read_unlock(); | ||
265 | return opt; | ||
266 | } | ||
267 | |||
268 | static inline void txopt_put(struct ipv6_txoptions *opt) | ||
269 | { | ||
270 | if (opt && atomic_dec_and_test(&opt->refcnt)) | ||
271 | kfree_rcu(opt, rcu); | ||
272 | } | ||
273 | |||
255 | struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label); | 274 | struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label); |
256 | struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space, | 275 | struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space, |
257 | struct ip6_flowlabel *fl, | 276 | struct ip6_flowlabel *fl, |
@@ -490,6 +509,7 @@ struct ip6_create_arg { | |||
490 | u32 user; | 509 | u32 user; |
491 | const struct in6_addr *src; | 510 | const struct in6_addr *src; |
492 | const struct in6_addr *dst; | 511 | const struct in6_addr *dst; |
512 | int iif; | ||
493 | u8 ecn; | 513 | u8 ecn; |
494 | }; | 514 | }; |
495 | 515 | ||
diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 82045fca388b..760bc4d5a2cf 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h | |||
@@ -2003,8 +2003,10 @@ enum ieee80211_hw_flags { | |||
2003 | * it shouldn't be set. | 2003 | * it shouldn't be set. |
2004 | * | 2004 | * |
2005 | * @max_tx_aggregation_subframes: maximum number of subframes in an | 2005 | * @max_tx_aggregation_subframes: maximum number of subframes in an |
2006 | * aggregate an HT driver will transmit, used by the peer as a | 2006 | * aggregate an HT driver will transmit. Though ADDBA will advertise |
2007 | * hint to size its reorder buffer. | 2007 | * a constant value of 64 as some older APs can crash if the window |
2008 | * size is smaller (an example is LinkSys WRT120N with FW v1.0.07 | ||
2009 | * build 002 Jun 18 2012). | ||
2008 | * | 2010 | * |
2009 | * @offchannel_tx_hw_queue: HW queue ID to use for offchannel TX | 2011 | * @offchannel_tx_hw_queue: HW queue ID to use for offchannel TX |
2010 | * (if %IEEE80211_HW_QUEUE_CONTROL is set) | 2012 | * (if %IEEE80211_HW_QUEUE_CONTROL is set) |
diff --git a/include/net/ndisc.h b/include/net/ndisc.h index bf3937431030..2d8edaad29cb 100644 --- a/include/net/ndisc.h +++ b/include/net/ndisc.h | |||
@@ -181,8 +181,7 @@ void ndisc_cleanup(void); | |||
181 | int ndisc_rcv(struct sk_buff *skb); | 181 | int ndisc_rcv(struct sk_buff *skb); |
182 | 182 | ||
183 | void ndisc_send_ns(struct net_device *dev, const struct in6_addr *solicit, | 183 | void ndisc_send_ns(struct net_device *dev, const struct in6_addr *solicit, |
184 | const struct in6_addr *daddr, const struct in6_addr *saddr, | 184 | const struct in6_addr *daddr, const struct in6_addr *saddr); |
185 | struct sk_buff *oskb); | ||
186 | 185 | ||
187 | void ndisc_send_rs(struct net_device *dev, | 186 | void ndisc_send_rs(struct net_device *dev, |
188 | const struct in6_addr *saddr, const struct in6_addr *daddr); | 187 | const struct in6_addr *saddr, const struct in6_addr *daddr); |
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 4c79ce8c1f92..b2a8e6338576 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h | |||
@@ -61,6 +61,9 @@ struct Qdisc { | |||
61 | */ | 61 | */ |
62 | #define TCQ_F_WARN_NONWC (1 << 16) | 62 | #define TCQ_F_WARN_NONWC (1 << 16) |
63 | #define TCQ_F_CPUSTATS 0x20 /* run using percpu statistics */ | 63 | #define TCQ_F_CPUSTATS 0x20 /* run using percpu statistics */ |
64 | #define TCQ_F_NOPARENT 0x40 /* root of its hierarchy : | ||
65 | * qdisc_tree_decrease_qlen() should stop. | ||
66 | */ | ||
64 | u32 limit; | 67 | u32 limit; |
65 | const struct Qdisc_ops *ops; | 68 | const struct Qdisc_ops *ops; |
66 | struct qdisc_size_table __rcu *stab; | 69 | struct qdisc_size_table __rcu *stab; |
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 495c87e367b3..7bbb71081aeb 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h | |||
@@ -775,10 +775,10 @@ struct sctp_transport { | |||
775 | hb_sent:1, | 775 | hb_sent:1, |
776 | 776 | ||
777 | /* Is the Path MTU update pending on this tranport */ | 777 | /* Is the Path MTU update pending on this tranport */ |
778 | pmtu_pending:1; | 778 | pmtu_pending:1, |
779 | 779 | ||
780 | /* Has this transport moved the ctsn since we last sacked */ | 780 | /* Has this transport moved the ctsn since we last sacked */ |
781 | __u32 sack_generation; | 781 | sack_generation:1; |
782 | u32 dst_cookie; | 782 | u32 dst_cookie; |
783 | 783 | ||
784 | struct flowi fl; | 784 | struct flowi fl; |
@@ -1482,19 +1482,19 @@ struct sctp_association { | |||
1482 | prsctp_capable:1, /* Can peer do PR-SCTP? */ | 1482 | prsctp_capable:1, /* Can peer do PR-SCTP? */ |
1483 | auth_capable:1; /* Is peer doing SCTP-AUTH? */ | 1483 | auth_capable:1; /* Is peer doing SCTP-AUTH? */ |
1484 | 1484 | ||
1485 | /* Ack State : This flag indicates if the next received | 1485 | /* sack_needed : This flag indicates if the next received |
1486 | * : packet is to be responded to with a | 1486 | * : packet is to be responded to with a |
1487 | * : SACK. This is initializedto 0. When a packet | 1487 | * : SACK. This is initialized to 0. When a packet |
1488 | * : is received it is incremented. If this value | 1488 | * : is received sack_cnt is incremented. If this value |
1489 | * : reaches 2 or more, a SACK is sent and the | 1489 | * : reaches 2 or more, a SACK is sent and the |
1490 | * : value is reset to 0. Note: This is used only | 1490 | * : value is reset to 0. Note: This is used only |
1491 | * : when no DATA chunks are received out of | 1491 | * : when no DATA chunks are received out of |
1492 | * : order. When DATA chunks are out of order, | 1492 | * : order. When DATA chunks are out of order, |
1493 | * : SACK's are not delayed (see Section 6). | 1493 | * : SACK's are not delayed (see Section 6). |
1494 | */ | 1494 | */ |
1495 | __u8 sack_needed; /* Do we need to sack the peer? */ | 1495 | __u8 sack_needed:1, /* Do we need to sack the peer? */ |
1496 | sack_generation:1; | ||
1496 | __u32 sack_cnt; | 1497 | __u32 sack_cnt; |
1497 | __u32 sack_generation; | ||
1498 | 1498 | ||
1499 | __u32 adaptation_ind; /* Adaptation Code point. */ | 1499 | __u32 adaptation_ind; /* Adaptation Code point. */ |
1500 | 1500 | ||
diff --git a/include/net/sock.h b/include/net/sock.h index 7f89e4ba18d1..52d27ee924f4 100644 --- a/include/net/sock.h +++ b/include/net/sock.h | |||
@@ -254,7 +254,6 @@ struct cg_proto; | |||
254 | * @sk_wq: sock wait queue and async head | 254 | * @sk_wq: sock wait queue and async head |
255 | * @sk_rx_dst: receive input route used by early demux | 255 | * @sk_rx_dst: receive input route used by early demux |
256 | * @sk_dst_cache: destination cache | 256 | * @sk_dst_cache: destination cache |
257 | * @sk_dst_lock: destination cache lock | ||
258 | * @sk_policy: flow policy | 257 | * @sk_policy: flow policy |
259 | * @sk_receive_queue: incoming packets | 258 | * @sk_receive_queue: incoming packets |
260 | * @sk_wmem_alloc: transmit queue bytes committed | 259 | * @sk_wmem_alloc: transmit queue bytes committed |
@@ -384,14 +383,16 @@ struct sock { | |||
384 | int sk_rcvbuf; | 383 | int sk_rcvbuf; |
385 | 384 | ||
386 | struct sk_filter __rcu *sk_filter; | 385 | struct sk_filter __rcu *sk_filter; |
387 | struct socket_wq __rcu *sk_wq; | 386 | union { |
388 | 387 | struct socket_wq __rcu *sk_wq; | |
388 | struct socket_wq *sk_wq_raw; | ||
389 | }; | ||
389 | #ifdef CONFIG_XFRM | 390 | #ifdef CONFIG_XFRM |
390 | struct xfrm_policy *sk_policy[2]; | 391 | struct xfrm_policy *sk_policy[2]; |
391 | #endif | 392 | #endif |
392 | struct dst_entry *sk_rx_dst; | 393 | struct dst_entry *sk_rx_dst; |
393 | struct dst_entry __rcu *sk_dst_cache; | 394 | struct dst_entry __rcu *sk_dst_cache; |
394 | spinlock_t sk_dst_lock; | 395 | /* Note: 32bit hole on 64bit arches */ |
395 | atomic_t sk_wmem_alloc; | 396 | atomic_t sk_wmem_alloc; |
396 | atomic_t sk_omem_alloc; | 397 | atomic_t sk_omem_alloc; |
397 | int sk_sndbuf; | 398 | int sk_sndbuf; |
@@ -2005,10 +2006,27 @@ static inline unsigned long sock_wspace(struct sock *sk) | |||
2005 | return amt; | 2006 | return amt; |
2006 | } | 2007 | } |
2007 | 2008 | ||
2008 | static inline void sk_wake_async(struct sock *sk, int how, int band) | 2009 | /* Note: |
2010 | * We use sk->sk_wq_raw, from contexts knowing this | ||
2011 | * pointer is not NULL and cannot disappear/change. | ||
2012 | */ | ||
2013 | static inline void sk_set_bit(int nr, struct sock *sk) | ||
2009 | { | 2014 | { |
2010 | if (sock_flag(sk, SOCK_FASYNC)) | 2015 | set_bit(nr, &sk->sk_wq_raw->flags); |
2011 | sock_wake_async(sk->sk_socket, how, band); | 2016 | } |
2017 | |||
2018 | static inline void sk_clear_bit(int nr, struct sock *sk) | ||
2019 | { | ||
2020 | clear_bit(nr, &sk->sk_wq_raw->flags); | ||
2021 | } | ||
2022 | |||
2023 | static inline void sk_wake_async(const struct sock *sk, int how, int band) | ||
2024 | { | ||
2025 | if (sock_flag(sk, SOCK_FASYNC)) { | ||
2026 | rcu_read_lock(); | ||
2027 | sock_wake_async(rcu_dereference(sk->sk_wq), how, band); | ||
2028 | rcu_read_unlock(); | ||
2029 | } | ||
2012 | } | 2030 | } |
2013 | 2031 | ||
2014 | /* Since sk_{r,w}mem_alloc sums skb->truesize, even a small frame might | 2032 | /* Since sk_{r,w}mem_alloc sums skb->truesize, even a small frame might |
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index ed527121031d..fcfa3d7f5e7e 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h | |||
@@ -668,6 +668,9 @@ struct Scsi_Host { | |||
668 | unsigned use_blk_mq:1; | 668 | unsigned use_blk_mq:1; |
669 | unsigned use_cmd_list:1; | 669 | unsigned use_cmd_list:1; |
670 | 670 | ||
671 | /* Host responded with short (<36 bytes) INQUIRY result */ | ||
672 | unsigned short_inquiry:1; | ||
673 | |||
671 | /* | 674 | /* |
672 | * Optional work queue to be utilized by the transport | 675 | * Optional work queue to be utilized by the transport |
673 | */ | 676 | */ |
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h index 7855cfe46b69..95a937eafb79 100644 --- a/include/sound/soc-dapm.h +++ b/include/sound/soc-dapm.h | |||
@@ -398,6 +398,7 @@ int snd_soc_dapm_del_routes(struct snd_soc_dapm_context *dapm, | |||
398 | int snd_soc_dapm_weak_routes(struct snd_soc_dapm_context *dapm, | 398 | int snd_soc_dapm_weak_routes(struct snd_soc_dapm_context *dapm, |
399 | const struct snd_soc_dapm_route *route, int num); | 399 | const struct snd_soc_dapm_route *route, int num); |
400 | void snd_soc_dapm_free_widget(struct snd_soc_dapm_widget *w); | 400 | void snd_soc_dapm_free_widget(struct snd_soc_dapm_widget *w); |
401 | void snd_soc_dapm_reset_cache(struct snd_soc_dapm_context *dapm); | ||
401 | 402 | ||
402 | /* dapm events */ | 403 | /* dapm events */ |
403 | void snd_soc_dapm_stream_event(struct snd_soc_pcm_runtime *rtd, int stream, | 404 | void snd_soc_dapm_stream_event(struct snd_soc_pcm_runtime *rtd, int stream, |
diff --git a/include/video/imx-ipu-v3.h b/include/video/imx-ipu-v3.h index 85dedca3dcfb..eeba75395f7d 100644 --- a/include/video/imx-ipu-v3.h +++ b/include/video/imx-ipu-v3.h | |||
@@ -343,7 +343,6 @@ struct ipu_client_platformdata { | |||
343 | int di; | 343 | int di; |
344 | int dc; | 344 | int dc; |
345 | int dp; | 345 | int dp; |
346 | int dmfc; | ||
347 | int dma[2]; | 346 | int dma[2]; |
348 | }; | 347 | }; |
349 | 348 | ||
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index 3f4c99e06c6b..b0799bced518 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c | |||
@@ -28,11 +28,17 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr) | |||
28 | attr->value_size == 0) | 28 | attr->value_size == 0) |
29 | return ERR_PTR(-EINVAL); | 29 | return ERR_PTR(-EINVAL); |
30 | 30 | ||
31 | if (attr->value_size >= 1 << (KMALLOC_SHIFT_MAX - 1)) | ||
32 | /* if value_size is bigger, the user space won't be able to | ||
33 | * access the elements. | ||
34 | */ | ||
35 | return ERR_PTR(-E2BIG); | ||
36 | |||
31 | elem_size = round_up(attr->value_size, 8); | 37 | elem_size = round_up(attr->value_size, 8); |
32 | 38 | ||
33 | /* check round_up into zero and u32 overflow */ | 39 | /* check round_up into zero and u32 overflow */ |
34 | if (elem_size == 0 || | 40 | if (elem_size == 0 || |
35 | attr->max_entries > (U32_MAX - sizeof(*array)) / elem_size) | 41 | attr->max_entries > (U32_MAX - PAGE_SIZE - sizeof(*array)) / elem_size) |
36 | return ERR_PTR(-ENOMEM); | 42 | return ERR_PTR(-ENOMEM); |
37 | 43 | ||
38 | array_size = sizeof(*array) + attr->max_entries * elem_size; | 44 | array_size = sizeof(*array) + attr->max_entries * elem_size; |
@@ -105,7 +111,7 @@ static int array_map_update_elem(struct bpf_map *map, void *key, void *value, | |||
105 | /* all elements already exist */ | 111 | /* all elements already exist */ |
106 | return -EEXIST; | 112 | return -EEXIST; |
107 | 113 | ||
108 | memcpy(array->value + array->elem_size * index, value, array->elem_size); | 114 | memcpy(array->value + array->elem_size * index, value, map->value_size); |
109 | return 0; | 115 | return 0; |
110 | } | 116 | } |
111 | 117 | ||
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index 19909b22b4f8..34777b3746fa 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c | |||
@@ -64,12 +64,35 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr) | |||
64 | */ | 64 | */ |
65 | goto free_htab; | 65 | goto free_htab; |
66 | 66 | ||
67 | err = -ENOMEM; | 67 | if (htab->map.value_size >= (1 << (KMALLOC_SHIFT_MAX - 1)) - |
68 | MAX_BPF_STACK - sizeof(struct htab_elem)) | ||
69 | /* if value_size is bigger, the user space won't be able to | ||
70 | * access the elements via bpf syscall. This check also makes | ||
71 | * sure that the elem_size doesn't overflow and it's | ||
72 | * kmalloc-able later in htab_map_update_elem() | ||
73 | */ | ||
74 | goto free_htab; | ||
75 | |||
76 | htab->elem_size = sizeof(struct htab_elem) + | ||
77 | round_up(htab->map.key_size, 8) + | ||
78 | htab->map.value_size; | ||
79 | |||
68 | /* prevent zero size kmalloc and check for u32 overflow */ | 80 | /* prevent zero size kmalloc and check for u32 overflow */ |
69 | if (htab->n_buckets == 0 || | 81 | if (htab->n_buckets == 0 || |
70 | htab->n_buckets > U32_MAX / sizeof(struct hlist_head)) | 82 | htab->n_buckets > U32_MAX / sizeof(struct hlist_head)) |
71 | goto free_htab; | 83 | goto free_htab; |
72 | 84 | ||
85 | if ((u64) htab->n_buckets * sizeof(struct hlist_head) + | ||
86 | (u64) htab->elem_size * htab->map.max_entries >= | ||
87 | U32_MAX - PAGE_SIZE) | ||
88 | /* make sure page count doesn't overflow */ | ||
89 | goto free_htab; | ||
90 | |||
91 | htab->map.pages = round_up(htab->n_buckets * sizeof(struct hlist_head) + | ||
92 | htab->elem_size * htab->map.max_entries, | ||
93 | PAGE_SIZE) >> PAGE_SHIFT; | ||
94 | |||
95 | err = -ENOMEM; | ||
73 | htab->buckets = kmalloc_array(htab->n_buckets, sizeof(struct hlist_head), | 96 | htab->buckets = kmalloc_array(htab->n_buckets, sizeof(struct hlist_head), |
74 | GFP_USER | __GFP_NOWARN); | 97 | GFP_USER | __GFP_NOWARN); |
75 | 98 | ||
@@ -85,13 +108,6 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr) | |||
85 | raw_spin_lock_init(&htab->lock); | 108 | raw_spin_lock_init(&htab->lock); |
86 | htab->count = 0; | 109 | htab->count = 0; |
87 | 110 | ||
88 | htab->elem_size = sizeof(struct htab_elem) + | ||
89 | round_up(htab->map.key_size, 8) + | ||
90 | htab->map.value_size; | ||
91 | |||
92 | htab->map.pages = round_up(htab->n_buckets * sizeof(struct hlist_head) + | ||
93 | htab->elem_size * htab->map.max_entries, | ||
94 | PAGE_SIZE) >> PAGE_SHIFT; | ||
95 | return &htab->map; | 111 | return &htab->map; |
96 | 112 | ||
97 | free_htab: | 113 | free_htab: |
@@ -222,7 +238,7 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value, | |||
222 | WARN_ON_ONCE(!rcu_read_lock_held()); | 238 | WARN_ON_ONCE(!rcu_read_lock_held()); |
223 | 239 | ||
224 | /* allocate new element outside of lock */ | 240 | /* allocate new element outside of lock */ |
225 | l_new = kmalloc(htab->elem_size, GFP_ATOMIC); | 241 | l_new = kmalloc(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN); |
226 | if (!l_new) | 242 | if (!l_new) |
227 | return -ENOMEM; | 243 | return -ENOMEM; |
228 | 244 | ||
diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c index be6d726e31c9..5a8a797d50b7 100644 --- a/kernel/bpf/inode.c +++ b/kernel/bpf/inode.c | |||
@@ -34,7 +34,7 @@ static void *bpf_any_get(void *raw, enum bpf_type type) | |||
34 | atomic_inc(&((struct bpf_prog *)raw)->aux->refcnt); | 34 | atomic_inc(&((struct bpf_prog *)raw)->aux->refcnt); |
35 | break; | 35 | break; |
36 | case BPF_TYPE_MAP: | 36 | case BPF_TYPE_MAP: |
37 | atomic_inc(&((struct bpf_map *)raw)->refcnt); | 37 | bpf_map_inc(raw, true); |
38 | break; | 38 | break; |
39 | default: | 39 | default: |
40 | WARN_ON_ONCE(1); | 40 | WARN_ON_ONCE(1); |
@@ -51,7 +51,7 @@ static void bpf_any_put(void *raw, enum bpf_type type) | |||
51 | bpf_prog_put(raw); | 51 | bpf_prog_put(raw); |
52 | break; | 52 | break; |
53 | case BPF_TYPE_MAP: | 53 | case BPF_TYPE_MAP: |
54 | bpf_map_put(raw); | 54 | bpf_map_put_with_uref(raw); |
55 | break; | 55 | break; |
56 | default: | 56 | default: |
57 | WARN_ON_ONCE(1); | 57 | WARN_ON_ONCE(1); |
@@ -64,7 +64,7 @@ static void *bpf_fd_probe_obj(u32 ufd, enum bpf_type *type) | |||
64 | void *raw; | 64 | void *raw; |
65 | 65 | ||
66 | *type = BPF_TYPE_MAP; | 66 | *type = BPF_TYPE_MAP; |
67 | raw = bpf_map_get(ufd); | 67 | raw = bpf_map_get_with_uref(ufd); |
68 | if (IS_ERR(raw)) { | 68 | if (IS_ERR(raw)) { |
69 | *type = BPF_TYPE_PROG; | 69 | *type = BPF_TYPE_PROG; |
70 | raw = bpf_prog_get(ufd); | 70 | raw = bpf_prog_get(ufd); |
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 0d3313d02a7e..3b39550d8485 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c | |||
@@ -82,6 +82,14 @@ static void bpf_map_free_deferred(struct work_struct *work) | |||
82 | map->ops->map_free(map); | 82 | map->ops->map_free(map); |
83 | } | 83 | } |
84 | 84 | ||
85 | static void bpf_map_put_uref(struct bpf_map *map) | ||
86 | { | ||
87 | if (atomic_dec_and_test(&map->usercnt)) { | ||
88 | if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) | ||
89 | bpf_fd_array_map_clear(map); | ||
90 | } | ||
91 | } | ||
92 | |||
85 | /* decrement map refcnt and schedule it for freeing via workqueue | 93 | /* decrement map refcnt and schedule it for freeing via workqueue |
86 | * (unrelying map implementation ops->map_free() might sleep) | 94 | * (unrelying map implementation ops->map_free() might sleep) |
87 | */ | 95 | */ |
@@ -93,17 +101,15 @@ void bpf_map_put(struct bpf_map *map) | |||
93 | } | 101 | } |
94 | } | 102 | } |
95 | 103 | ||
96 | static int bpf_map_release(struct inode *inode, struct file *filp) | 104 | void bpf_map_put_with_uref(struct bpf_map *map) |
97 | { | 105 | { |
98 | struct bpf_map *map = filp->private_data; | 106 | bpf_map_put_uref(map); |
99 | |||
100 | if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) | ||
101 | /* prog_array stores refcnt-ed bpf_prog pointers | ||
102 | * release them all when user space closes prog_array_fd | ||
103 | */ | ||
104 | bpf_fd_array_map_clear(map); | ||
105 | |||
106 | bpf_map_put(map); | 107 | bpf_map_put(map); |
108 | } | ||
109 | |||
110 | static int bpf_map_release(struct inode *inode, struct file *filp) | ||
111 | { | ||
112 | bpf_map_put_with_uref(filp->private_data); | ||
107 | return 0; | 113 | return 0; |
108 | } | 114 | } |
109 | 115 | ||
@@ -142,6 +148,7 @@ static int map_create(union bpf_attr *attr) | |||
142 | return PTR_ERR(map); | 148 | return PTR_ERR(map); |
143 | 149 | ||
144 | atomic_set(&map->refcnt, 1); | 150 | atomic_set(&map->refcnt, 1); |
151 | atomic_set(&map->usercnt, 1); | ||
145 | 152 | ||
146 | err = bpf_map_charge_memlock(map); | 153 | err = bpf_map_charge_memlock(map); |
147 | if (err) | 154 | if (err) |
@@ -174,7 +181,14 @@ struct bpf_map *__bpf_map_get(struct fd f) | |||
174 | return f.file->private_data; | 181 | return f.file->private_data; |
175 | } | 182 | } |
176 | 183 | ||
177 | struct bpf_map *bpf_map_get(u32 ufd) | 184 | void bpf_map_inc(struct bpf_map *map, bool uref) |
185 | { | ||
186 | atomic_inc(&map->refcnt); | ||
187 | if (uref) | ||
188 | atomic_inc(&map->usercnt); | ||
189 | } | ||
190 | |||
191 | struct bpf_map *bpf_map_get_with_uref(u32 ufd) | ||
178 | { | 192 | { |
179 | struct fd f = fdget(ufd); | 193 | struct fd f = fdget(ufd); |
180 | struct bpf_map *map; | 194 | struct bpf_map *map; |
@@ -183,7 +197,7 @@ struct bpf_map *bpf_map_get(u32 ufd) | |||
183 | if (IS_ERR(map)) | 197 | if (IS_ERR(map)) |
184 | return map; | 198 | return map; |
185 | 199 | ||
186 | atomic_inc(&map->refcnt); | 200 | bpf_map_inc(map, true); |
187 | fdput(f); | 201 | fdput(f); |
188 | 202 | ||
189 | return map; | 203 | return map; |
@@ -226,7 +240,7 @@ static int map_lookup_elem(union bpf_attr *attr) | |||
226 | goto free_key; | 240 | goto free_key; |
227 | 241 | ||
228 | err = -ENOMEM; | 242 | err = -ENOMEM; |
229 | value = kmalloc(map->value_size, GFP_USER); | 243 | value = kmalloc(map->value_size, GFP_USER | __GFP_NOWARN); |
230 | if (!value) | 244 | if (!value) |
231 | goto free_key; | 245 | goto free_key; |
232 | 246 | ||
@@ -285,7 +299,7 @@ static int map_update_elem(union bpf_attr *attr) | |||
285 | goto free_key; | 299 | goto free_key; |
286 | 300 | ||
287 | err = -ENOMEM; | 301 | err = -ENOMEM; |
288 | value = kmalloc(map->value_size, GFP_USER); | 302 | value = kmalloc(map->value_size, GFP_USER | __GFP_NOWARN); |
289 | if (!value) | 303 | if (!value) |
290 | goto free_key; | 304 | goto free_key; |
291 | 305 | ||
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index c6073056badf..a7945d10b378 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c | |||
@@ -2021,8 +2021,7 @@ static int replace_map_fd_with_map_ptr(struct verifier_env *env) | |||
2021 | * will be used by the valid program until it's unloaded | 2021 | * will be used by the valid program until it's unloaded |
2022 | * and all maps are released in free_bpf_prog_info() | 2022 | * and all maps are released in free_bpf_prog_info() |
2023 | */ | 2023 | */ |
2024 | atomic_inc(&map->refcnt); | 2024 | bpf_map_inc(map, false); |
2025 | |||
2026 | fdput(f); | 2025 | fdput(f); |
2027 | next_insn: | 2026 | next_insn: |
2028 | insn++; | 2027 | insn++; |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 4d568ac9319e..7063c6a07440 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -1947,13 +1947,38 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) | |||
1947 | 1947 | ||
1948 | #ifdef CONFIG_SMP | 1948 | #ifdef CONFIG_SMP |
1949 | /* | 1949 | /* |
1950 | * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be | ||
1951 | * possible to, falsely, observe p->on_cpu == 0. | ||
1952 | * | ||
1953 | * One must be running (->on_cpu == 1) in order to remove oneself | ||
1954 | * from the runqueue. | ||
1955 | * | ||
1956 | * [S] ->on_cpu = 1; [L] ->on_rq | ||
1957 | * UNLOCK rq->lock | ||
1958 | * RMB | ||
1959 | * LOCK rq->lock | ||
1960 | * [S] ->on_rq = 0; [L] ->on_cpu | ||
1961 | * | ||
1962 | * Pairs with the full barrier implied in the UNLOCK+LOCK on rq->lock | ||
1963 | * from the consecutive calls to schedule(); the first switching to our | ||
1964 | * task, the second putting it to sleep. | ||
1965 | */ | ||
1966 | smp_rmb(); | ||
1967 | |||
1968 | /* | ||
1950 | * If the owning (remote) cpu is still in the middle of schedule() with | 1969 | * If the owning (remote) cpu is still in the middle of schedule() with |
1951 | * this task as prev, wait until its done referencing the task. | 1970 | * this task as prev, wait until its done referencing the task. |
1952 | */ | 1971 | */ |
1953 | while (p->on_cpu) | 1972 | while (p->on_cpu) |
1954 | cpu_relax(); | 1973 | cpu_relax(); |
1955 | /* | 1974 | /* |
1956 | * Pairs with the smp_wmb() in finish_lock_switch(). | 1975 | * Combined with the control dependency above, we have an effective |
1976 | * smp_load_acquire() without the need for full barriers. | ||
1977 | * | ||
1978 | * Pairs with the smp_store_release() in finish_lock_switch(). | ||
1979 | * | ||
1980 | * This ensures that tasks getting woken will be fully ordered against | ||
1981 | * their previous state and preserve Program Order. | ||
1957 | */ | 1982 | */ |
1958 | smp_rmb(); | 1983 | smp_rmb(); |
1959 | 1984 | ||
@@ -2039,7 +2064,6 @@ out: | |||
2039 | */ | 2064 | */ |
2040 | int wake_up_process(struct task_struct *p) | 2065 | int wake_up_process(struct task_struct *p) |
2041 | { | 2066 | { |
2042 | WARN_ON(task_is_stopped_or_traced(p)); | ||
2043 | return try_to_wake_up(p, TASK_NORMAL, 0); | 2067 | return try_to_wake_up(p, TASK_NORMAL, 0); |
2044 | } | 2068 | } |
2045 | EXPORT_SYMBOL(wake_up_process); | 2069 | EXPORT_SYMBOL(wake_up_process); |
@@ -5847,13 +5871,13 @@ static int init_rootdomain(struct root_domain *rd) | |||
5847 | { | 5871 | { |
5848 | memset(rd, 0, sizeof(*rd)); | 5872 | memset(rd, 0, sizeof(*rd)); |
5849 | 5873 | ||
5850 | if (!alloc_cpumask_var(&rd->span, GFP_KERNEL)) | 5874 | if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL)) |
5851 | goto out; | 5875 | goto out; |
5852 | if (!alloc_cpumask_var(&rd->online, GFP_KERNEL)) | 5876 | if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL)) |
5853 | goto free_span; | 5877 | goto free_span; |
5854 | if (!alloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL)) | 5878 | if (!zalloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL)) |
5855 | goto free_online; | 5879 | goto free_online; |
5856 | if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) | 5880 | if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) |
5857 | goto free_dlo_mask; | 5881 | goto free_dlo_mask; |
5858 | 5882 | ||
5859 | init_dl_bw(&rd->dl_bw); | 5883 | init_dl_bw(&rd->dl_bw); |
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index 26a54461bf59..05de80b48586 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c | |||
@@ -788,6 +788,9 @@ cputime_t task_gtime(struct task_struct *t) | |||
788 | unsigned int seq; | 788 | unsigned int seq; |
789 | cputime_t gtime; | 789 | cputime_t gtime; |
790 | 790 | ||
791 | if (!context_tracking_is_enabled()) | ||
792 | return t->gtime; | ||
793 | |||
791 | do { | 794 | do { |
792 | seq = read_seqbegin(&t->vtime_seqlock); | 795 | seq = read_seqbegin(&t->vtime_seqlock); |
793 | 796 | ||
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index e3cc16312046..8ec86abe0ea1 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c | |||
@@ -64,7 +64,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b) | |||
64 | raw_spin_unlock(&rt_b->rt_runtime_lock); | 64 | raw_spin_unlock(&rt_b->rt_runtime_lock); |
65 | } | 65 | } |
66 | 66 | ||
67 | #ifdef CONFIG_SMP | 67 | #if defined(CONFIG_SMP) && defined(HAVE_RT_PUSH_IPI) |
68 | static void push_irq_work_func(struct irq_work *work); | 68 | static void push_irq_work_func(struct irq_work *work); |
69 | #endif | 69 | #endif |
70 | 70 | ||
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index efd3bfc7e347..b242775bf670 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h | |||
@@ -1073,6 +1073,9 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) | |||
1073 | * We must ensure this doesn't happen until the switch is completely | 1073 | * We must ensure this doesn't happen until the switch is completely |
1074 | * finished. | 1074 | * finished. |
1075 | * | 1075 | * |
1076 | * In particular, the load of prev->state in finish_task_switch() must | ||
1077 | * happen before this. | ||
1078 | * | ||
1076 | * Pairs with the control dependency and rmb in try_to_wake_up(). | 1079 | * Pairs with the control dependency and rmb in try_to_wake_up(). |
1077 | */ | 1080 | */ |
1078 | smp_store_release(&prev->on_cpu, 0); | 1081 | smp_store_release(&prev->on_cpu, 0); |
diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c index 052e02672d12..f10bd873e684 100644 --- a/kernel/sched/wait.c +++ b/kernel/sched/wait.c | |||
@@ -583,18 +583,18 @@ EXPORT_SYMBOL(wake_up_atomic_t); | |||
583 | 583 | ||
584 | __sched int bit_wait(struct wait_bit_key *word) | 584 | __sched int bit_wait(struct wait_bit_key *word) |
585 | { | 585 | { |
586 | if (signal_pending_state(current->state, current)) | ||
587 | return 1; | ||
588 | schedule(); | 586 | schedule(); |
587 | if (signal_pending(current)) | ||
588 | return -EINTR; | ||
589 | return 0; | 589 | return 0; |
590 | } | 590 | } |
591 | EXPORT_SYMBOL(bit_wait); | 591 | EXPORT_SYMBOL(bit_wait); |
592 | 592 | ||
593 | __sched int bit_wait_io(struct wait_bit_key *word) | 593 | __sched int bit_wait_io(struct wait_bit_key *word) |
594 | { | 594 | { |
595 | if (signal_pending_state(current->state, current)) | ||
596 | return 1; | ||
597 | io_schedule(); | 595 | io_schedule(); |
596 | if (signal_pending(current)) | ||
597 | return -EINTR; | ||
598 | return 0; | 598 | return 0; |
599 | } | 599 | } |
600 | EXPORT_SYMBOL(bit_wait_io); | 600 | EXPORT_SYMBOL(bit_wait_io); |
@@ -602,11 +602,11 @@ EXPORT_SYMBOL(bit_wait_io); | |||
602 | __sched int bit_wait_timeout(struct wait_bit_key *word) | 602 | __sched int bit_wait_timeout(struct wait_bit_key *word) |
603 | { | 603 | { |
604 | unsigned long now = READ_ONCE(jiffies); | 604 | unsigned long now = READ_ONCE(jiffies); |
605 | if (signal_pending_state(current->state, current)) | ||
606 | return 1; | ||
607 | if (time_after_eq(now, word->timeout)) | 605 | if (time_after_eq(now, word->timeout)) |
608 | return -EAGAIN; | 606 | return -EAGAIN; |
609 | schedule_timeout(word->timeout - now); | 607 | schedule_timeout(word->timeout - now); |
608 | if (signal_pending(current)) | ||
609 | return -EINTR; | ||
610 | return 0; | 610 | return 0; |
611 | } | 611 | } |
612 | EXPORT_SYMBOL_GPL(bit_wait_timeout); | 612 | EXPORT_SYMBOL_GPL(bit_wait_timeout); |
@@ -614,11 +614,11 @@ EXPORT_SYMBOL_GPL(bit_wait_timeout); | |||
614 | __sched int bit_wait_io_timeout(struct wait_bit_key *word) | 614 | __sched int bit_wait_io_timeout(struct wait_bit_key *word) |
615 | { | 615 | { |
616 | unsigned long now = READ_ONCE(jiffies); | 616 | unsigned long now = READ_ONCE(jiffies); |
617 | if (signal_pending_state(current->state, current)) | ||
618 | return 1; | ||
619 | if (time_after_eq(now, word->timeout)) | 617 | if (time_after_eq(now, word->timeout)) |
620 | return -EAGAIN; | 618 | return -EAGAIN; |
621 | io_schedule_timeout(word->timeout - now); | 619 | io_schedule_timeout(word->timeout - now); |
620 | if (signal_pending(current)) | ||
621 | return -EINTR; | ||
622 | return 0; | 622 | return 0; |
623 | } | 623 | } |
624 | EXPORT_SYMBOL_GPL(bit_wait_io_timeout); | 624 | EXPORT_SYMBOL_GPL(bit_wait_io_timeout); |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 75f1d05ea82d..9c6045a27ba3 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -1887,12 +1887,6 @@ rb_event_index(struct ring_buffer_event *event) | |||
1887 | return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE; | 1887 | return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE; |
1888 | } | 1888 | } |
1889 | 1889 | ||
1890 | static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | ||
1891 | { | ||
1892 | cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp; | ||
1893 | cpu_buffer->reader_page->read = 0; | ||
1894 | } | ||
1895 | |||
1896 | static void rb_inc_iter(struct ring_buffer_iter *iter) | 1890 | static void rb_inc_iter(struct ring_buffer_iter *iter) |
1897 | { | 1891 | { |
1898 | struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; | 1892 | struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; |
@@ -2803,8 +2797,11 @@ rb_reserve_next_event(struct ring_buffer *buffer, | |||
2803 | 2797 | ||
2804 | event = __rb_reserve_next(cpu_buffer, &info); | 2798 | event = __rb_reserve_next(cpu_buffer, &info); |
2805 | 2799 | ||
2806 | if (unlikely(PTR_ERR(event) == -EAGAIN)) | 2800 | if (unlikely(PTR_ERR(event) == -EAGAIN)) { |
2801 | if (info.add_timestamp) | ||
2802 | info.length -= RB_LEN_TIME_EXTEND; | ||
2807 | goto again; | 2803 | goto again; |
2804 | } | ||
2808 | 2805 | ||
2809 | if (!event) | 2806 | if (!event) |
2810 | goto out_fail; | 2807 | goto out_fail; |
@@ -3626,7 +3623,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | |||
3626 | 3623 | ||
3627 | /* Finally update the reader page to the new head */ | 3624 | /* Finally update the reader page to the new head */ |
3628 | cpu_buffer->reader_page = reader; | 3625 | cpu_buffer->reader_page = reader; |
3629 | rb_reset_reader_page(cpu_buffer); | 3626 | cpu_buffer->reader_page->read = 0; |
3630 | 3627 | ||
3631 | if (overwrite != cpu_buffer->last_overrun) { | 3628 | if (overwrite != cpu_buffer->last_overrun) { |
3632 | cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun; | 3629 | cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun; |
@@ -3636,6 +3633,10 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | |||
3636 | goto again; | 3633 | goto again; |
3637 | 3634 | ||
3638 | out: | 3635 | out: |
3636 | /* Update the read_stamp on the first event */ | ||
3637 | if (reader && reader->read == 0) | ||
3638 | cpu_buffer->read_stamp = reader->page->time_stamp; | ||
3639 | |||
3639 | arch_spin_unlock(&cpu_buffer->lock); | 3640 | arch_spin_unlock(&cpu_buffer->lock); |
3640 | local_irq_restore(flags); | 3641 | local_irq_restore(flags); |
3641 | 3642 | ||
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 6bbc5f652355..4f6ef6912e00 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
@@ -582,6 +582,12 @@ static void __ftrace_clear_event_pids(struct trace_array *tr) | |||
582 | unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre, tr); | 582 | unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre, tr); |
583 | unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_post, tr); | 583 | unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_post, tr); |
584 | 584 | ||
585 | unregister_trace_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre, tr); | ||
586 | unregister_trace_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post, tr); | ||
587 | |||
588 | unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_pre, tr); | ||
589 | unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_post, tr); | ||
590 | |||
585 | list_for_each_entry(file, &tr->events, list) { | 591 | list_for_each_entry(file, &tr->events, list) { |
586 | clear_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags); | 592 | clear_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags); |
587 | } | 593 | } |
@@ -1729,6 +1735,16 @@ ftrace_event_pid_write(struct file *filp, const char __user *ubuf, | |||
1729 | tr, INT_MAX); | 1735 | tr, INT_MAX); |
1730 | register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_post, | 1736 | register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_post, |
1731 | tr, 0); | 1737 | tr, 0); |
1738 | |||
1739 | register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre, | ||
1740 | tr, INT_MAX); | ||
1741 | register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post, | ||
1742 | tr, 0); | ||
1743 | |||
1744 | register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_pre, | ||
1745 | tr, INT_MAX); | ||
1746 | register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_post, | ||
1747 | tr, 0); | ||
1732 | } | 1748 | } |
1733 | 1749 | ||
1734 | /* | 1750 | /* |
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c index a3bffd1ec2b4..70306cc9d814 100644 --- a/net/bluetooth/af_bluetooth.c +++ b/net/bluetooth/af_bluetooth.c | |||
@@ -271,11 +271,11 @@ static long bt_sock_data_wait(struct sock *sk, long timeo) | |||
271 | if (signal_pending(current) || !timeo) | 271 | if (signal_pending(current) || !timeo) |
272 | break; | 272 | break; |
273 | 273 | ||
274 | set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); | 274 | sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); |
275 | release_sock(sk); | 275 | release_sock(sk); |
276 | timeo = schedule_timeout(timeo); | 276 | timeo = schedule_timeout(timeo); |
277 | lock_sock(sk); | 277 | lock_sock(sk); |
278 | clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); | 278 | sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); |
279 | } | 279 | } |
280 | 280 | ||
281 | __set_current_state(TASK_RUNNING); | 281 | __set_current_state(TASK_RUNNING); |
@@ -441,7 +441,7 @@ unsigned int bt_sock_poll(struct file *file, struct socket *sock, | |||
441 | if (!test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags) && sock_writeable(sk)) | 441 | if (!test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags) && sock_writeable(sk)) |
442 | mask |= POLLOUT | POLLWRNORM | POLLWRBAND; | 442 | mask |= POLLOUT | POLLWRNORM | POLLWRBAND; |
443 | else | 443 | else |
444 | set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); | 444 | sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); |
445 | 445 | ||
446 | return mask; | 446 | return mask; |
447 | } | 447 | } |
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c index c91353841e40..ffed8a1d4f27 100644 --- a/net/bluetooth/smp.c +++ b/net/bluetooth/smp.c | |||
@@ -3027,8 +3027,13 @@ static void smp_ready_cb(struct l2cap_chan *chan) | |||
3027 | 3027 | ||
3028 | BT_DBG("chan %p", chan); | 3028 | BT_DBG("chan %p", chan); |
3029 | 3029 | ||
3030 | /* No need to call l2cap_chan_hold() here since we already own | ||
3031 | * the reference taken in smp_new_conn_cb(). This is just the | ||
3032 | * first time that we tie it to a specific pointer. The code in | ||
3033 | * l2cap_core.c ensures that there's no risk this function wont | ||
3034 | * get called if smp_new_conn_cb was previously called. | ||
3035 | */ | ||
3030 | conn->smp = chan; | 3036 | conn->smp = chan; |
3031 | l2cap_chan_hold(chan); | ||
3032 | 3037 | ||
3033 | if (hcon->type == ACL_LINK && test_bit(HCI_CONN_ENCRYPT, &hcon->flags)) | 3038 | if (hcon->type == ACL_LINK && test_bit(HCI_CONN_ENCRYPT, &hcon->flags)) |
3034 | bredr_pairing(chan); | 3039 | bredr_pairing(chan); |
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index cc858919108e..aa209b1066c9 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c | |||
@@ -323,7 +323,7 @@ static long caif_stream_data_wait(struct sock *sk, long timeo) | |||
323 | !timeo) | 323 | !timeo) |
324 | break; | 324 | break; |
325 | 325 | ||
326 | set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); | 326 | sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); |
327 | release_sock(sk); | 327 | release_sock(sk); |
328 | timeo = schedule_timeout(timeo); | 328 | timeo = schedule_timeout(timeo); |
329 | lock_sock(sk); | 329 | lock_sock(sk); |
@@ -331,7 +331,7 @@ static long caif_stream_data_wait(struct sock *sk, long timeo) | |||
331 | if (sock_flag(sk, SOCK_DEAD)) | 331 | if (sock_flag(sk, SOCK_DEAD)) |
332 | break; | 332 | break; |
333 | 333 | ||
334 | clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); | 334 | sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); |
335 | } | 335 | } |
336 | 336 | ||
337 | finish_wait(sk_sleep(sk), &wait); | 337 | finish_wait(sk_sleep(sk), &wait); |
diff --git a/net/core/datagram.c b/net/core/datagram.c index 617088aee21d..d62af69ad844 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c | |||
@@ -785,7 +785,7 @@ unsigned int datagram_poll(struct file *file, struct socket *sock, | |||
785 | if (sock_writeable(sk)) | 785 | if (sock_writeable(sk)) |
786 | mask |= POLLOUT | POLLWRNORM | POLLWRBAND; | 786 | mask |= POLLOUT | POLLWRNORM | POLLWRBAND; |
787 | else | 787 | else |
788 | set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); | 788 | sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); |
789 | 789 | ||
790 | return mask; | 790 | return mask; |
791 | } | 791 | } |
diff --git a/net/core/neighbour.c b/net/core/neighbour.c index e6af42da28d9..f18ae91b652e 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c | |||
@@ -2215,7 +2215,7 @@ static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn, | |||
2215 | ndm->ndm_pad2 = 0; | 2215 | ndm->ndm_pad2 = 0; |
2216 | ndm->ndm_flags = pn->flags | NTF_PROXY; | 2216 | ndm->ndm_flags = pn->flags | NTF_PROXY; |
2217 | ndm->ndm_type = RTN_UNICAST; | 2217 | ndm->ndm_type = RTN_UNICAST; |
2218 | ndm->ndm_ifindex = pn->dev->ifindex; | 2218 | ndm->ndm_ifindex = pn->dev ? pn->dev->ifindex : 0; |
2219 | ndm->ndm_state = NUD_NONE; | 2219 | ndm->ndm_state = NUD_NONE; |
2220 | 2220 | ||
2221 | if (nla_put(skb, NDA_DST, tbl->key_len, pn->key)) | 2221 | if (nla_put(skb, NDA_DST, tbl->key_len, pn->key)) |
@@ -2333,7 +2333,7 @@ static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb, | |||
2333 | if (h > s_h) | 2333 | if (h > s_h) |
2334 | s_idx = 0; | 2334 | s_idx = 0; |
2335 | for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) { | 2335 | for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) { |
2336 | if (dev_net(n->dev) != net) | 2336 | if (pneigh_net(n) != net) |
2337 | continue; | 2337 | continue; |
2338 | if (idx < s_idx) | 2338 | if (idx < s_idx) |
2339 | goto next; | 2339 | goto next; |
diff --git a/net/core/netclassid_cgroup.c b/net/core/netclassid_cgroup.c index 6441f47b1a8f..2e4df84c34a1 100644 --- a/net/core/netclassid_cgroup.c +++ b/net/core/netclassid_cgroup.c | |||
@@ -56,7 +56,7 @@ static void cgrp_css_free(struct cgroup_subsys_state *css) | |||
56 | kfree(css_cls_state(css)); | 56 | kfree(css_cls_state(css)); |
57 | } | 57 | } |
58 | 58 | ||
59 | static int update_classid(const void *v, struct file *file, unsigned n) | 59 | static int update_classid_sock(const void *v, struct file *file, unsigned n) |
60 | { | 60 | { |
61 | int err; | 61 | int err; |
62 | struct socket *sock = sock_from_file(file, &err); | 62 | struct socket *sock = sock_from_file(file, &err); |
@@ -67,18 +67,25 @@ static int update_classid(const void *v, struct file *file, unsigned n) | |||
67 | return 0; | 67 | return 0; |
68 | } | 68 | } |
69 | 69 | ||
70 | static void cgrp_attach(struct cgroup_subsys_state *css, | 70 | static void update_classid(struct cgroup_subsys_state *css, void *v) |
71 | struct cgroup_taskset *tset) | ||
72 | { | 71 | { |
73 | struct cgroup_cls_state *cs = css_cls_state(css); | 72 | struct css_task_iter it; |
74 | void *v = (void *)(unsigned long)cs->classid; | ||
75 | struct task_struct *p; | 73 | struct task_struct *p; |
76 | 74 | ||
77 | cgroup_taskset_for_each(p, tset) { | 75 | css_task_iter_start(css, &it); |
76 | while ((p = css_task_iter_next(&it))) { | ||
78 | task_lock(p); | 77 | task_lock(p); |
79 | iterate_fd(p->files, 0, update_classid, v); | 78 | iterate_fd(p->files, 0, update_classid_sock, v); |
80 | task_unlock(p); | 79 | task_unlock(p); |
81 | } | 80 | } |
81 | css_task_iter_end(&it); | ||
82 | } | ||
83 | |||
84 | static void cgrp_attach(struct cgroup_subsys_state *css, | ||
85 | struct cgroup_taskset *tset) | ||
86 | { | ||
87 | update_classid(css, | ||
88 | (void *)(unsigned long)css_cls_state(css)->classid); | ||
82 | } | 89 | } |
83 | 90 | ||
84 | static u64 read_classid(struct cgroup_subsys_state *css, struct cftype *cft) | 91 | static u64 read_classid(struct cgroup_subsys_state *css, struct cftype *cft) |
@@ -89,8 +96,11 @@ static u64 read_classid(struct cgroup_subsys_state *css, struct cftype *cft) | |||
89 | static int write_classid(struct cgroup_subsys_state *css, struct cftype *cft, | 96 | static int write_classid(struct cgroup_subsys_state *css, struct cftype *cft, |
90 | u64 value) | 97 | u64 value) |
91 | { | 98 | { |
92 | css_cls_state(css)->classid = (u32) value; | 99 | struct cgroup_cls_state *cs = css_cls_state(css); |
100 | |||
101 | cs->classid = (u32)value; | ||
93 | 102 | ||
103 | update_classid(css, (void *)(unsigned long)cs->classid); | ||
94 | return 0; | 104 | return 0; |
95 | } | 105 | } |
96 | 106 | ||
diff --git a/net/core/scm.c b/net/core/scm.c index 3b6899b7d810..8a1741b14302 100644 --- a/net/core/scm.c +++ b/net/core/scm.c | |||
@@ -305,6 +305,8 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm) | |||
305 | err = put_user(cmlen, &cm->cmsg_len); | 305 | err = put_user(cmlen, &cm->cmsg_len); |
306 | if (!err) { | 306 | if (!err) { |
307 | cmlen = CMSG_SPACE(i*sizeof(int)); | 307 | cmlen = CMSG_SPACE(i*sizeof(int)); |
308 | if (msg->msg_controllen < cmlen) | ||
309 | cmlen = msg->msg_controllen; | ||
308 | msg->msg_control += cmlen; | 310 | msg->msg_control += cmlen; |
309 | msg->msg_controllen -= cmlen; | 311 | msg->msg_controllen -= cmlen; |
310 | } | 312 | } |
diff --git a/net/core/sock.c b/net/core/sock.c index 1e4dd54bfb5a..e31dfcee1729 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -1530,7 +1530,6 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) | |||
1530 | skb_queue_head_init(&newsk->sk_receive_queue); | 1530 | skb_queue_head_init(&newsk->sk_receive_queue); |
1531 | skb_queue_head_init(&newsk->sk_write_queue); | 1531 | skb_queue_head_init(&newsk->sk_write_queue); |
1532 | 1532 | ||
1533 | spin_lock_init(&newsk->sk_dst_lock); | ||
1534 | rwlock_init(&newsk->sk_callback_lock); | 1533 | rwlock_init(&newsk->sk_callback_lock); |
1535 | lockdep_set_class_and_name(&newsk->sk_callback_lock, | 1534 | lockdep_set_class_and_name(&newsk->sk_callback_lock, |
1536 | af_callback_keys + newsk->sk_family, | 1535 | af_callback_keys + newsk->sk_family, |
@@ -1607,7 +1606,7 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst) | |||
1607 | { | 1606 | { |
1608 | u32 max_segs = 1; | 1607 | u32 max_segs = 1; |
1609 | 1608 | ||
1610 | __sk_dst_set(sk, dst); | 1609 | sk_dst_set(sk, dst); |
1611 | sk->sk_route_caps = dst->dev->features; | 1610 | sk->sk_route_caps = dst->dev->features; |
1612 | if (sk->sk_route_caps & NETIF_F_GSO) | 1611 | if (sk->sk_route_caps & NETIF_F_GSO) |
1613 | sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE; | 1612 | sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE; |
@@ -1815,7 +1814,7 @@ static long sock_wait_for_wmem(struct sock *sk, long timeo) | |||
1815 | { | 1814 | { |
1816 | DEFINE_WAIT(wait); | 1815 | DEFINE_WAIT(wait); |
1817 | 1816 | ||
1818 | clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); | 1817 | sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); |
1819 | for (;;) { | 1818 | for (;;) { |
1820 | if (!timeo) | 1819 | if (!timeo) |
1821 | break; | 1820 | break; |
@@ -1861,7 +1860,7 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, | |||
1861 | if (sk_wmem_alloc_get(sk) < sk->sk_sndbuf) | 1860 | if (sk_wmem_alloc_get(sk) < sk->sk_sndbuf) |
1862 | break; | 1861 | break; |
1863 | 1862 | ||
1864 | set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); | 1863 | sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); |
1865 | set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); | 1864 | set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); |
1866 | err = -EAGAIN; | 1865 | err = -EAGAIN; |
1867 | if (!timeo) | 1866 | if (!timeo) |
@@ -2048,9 +2047,9 @@ int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb) | |||
2048 | DEFINE_WAIT(wait); | 2047 | DEFINE_WAIT(wait); |
2049 | 2048 | ||
2050 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); | 2049 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
2051 | set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); | 2050 | sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); |
2052 | rc = sk_wait_event(sk, timeo, skb_peek_tail(&sk->sk_receive_queue) != skb); | 2051 | rc = sk_wait_event(sk, timeo, skb_peek_tail(&sk->sk_receive_queue) != skb); |
2053 | clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); | 2052 | sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); |
2054 | finish_wait(sk_sleep(sk), &wait); | 2053 | finish_wait(sk_sleep(sk), &wait); |
2055 | return rc; | 2054 | return rc; |
2056 | } | 2055 | } |
@@ -2388,7 +2387,6 @@ void sock_init_data(struct socket *sock, struct sock *sk) | |||
2388 | } else | 2387 | } else |
2389 | sk->sk_wq = NULL; | 2388 | sk->sk_wq = NULL; |
2390 | 2389 | ||
2391 | spin_lock_init(&sk->sk_dst_lock); | ||
2392 | rwlock_init(&sk->sk_callback_lock); | 2390 | rwlock_init(&sk->sk_callback_lock); |
2393 | lockdep_set_class_and_name(&sk->sk_callback_lock, | 2391 | lockdep_set_class_and_name(&sk->sk_callback_lock, |
2394 | af_callback_keys + sk->sk_family, | 2392 | af_callback_keys + sk->sk_family, |
diff --git a/net/core/stream.c b/net/core/stream.c index d70f77a0c889..b96f7a79e544 100644 --- a/net/core/stream.c +++ b/net/core/stream.c | |||
@@ -39,7 +39,7 @@ void sk_stream_write_space(struct sock *sk) | |||
39 | wake_up_interruptible_poll(&wq->wait, POLLOUT | | 39 | wake_up_interruptible_poll(&wq->wait, POLLOUT | |
40 | POLLWRNORM | POLLWRBAND); | 40 | POLLWRNORM | POLLWRBAND); |
41 | if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN)) | 41 | if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN)) |
42 | sock_wake_async(sock, SOCK_WAKE_SPACE, POLL_OUT); | 42 | sock_wake_async(wq, SOCK_WAKE_SPACE, POLL_OUT); |
43 | rcu_read_unlock(); | 43 | rcu_read_unlock(); |
44 | } | 44 | } |
45 | } | 45 | } |
@@ -126,7 +126,7 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p) | |||
126 | current_timeo = vm_wait = (prandom_u32() % (HZ / 5)) + 2; | 126 | current_timeo = vm_wait = (prandom_u32() % (HZ / 5)) + 2; |
127 | 127 | ||
128 | while (1) { | 128 | while (1) { |
129 | set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); | 129 | sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); |
130 | 130 | ||
131 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); | 131 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
132 | 132 | ||
@@ -139,7 +139,7 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p) | |||
139 | } | 139 | } |
140 | if (signal_pending(current)) | 140 | if (signal_pending(current)) |
141 | goto do_interrupted; | 141 | goto do_interrupted; |
142 | clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); | 142 | sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); |
143 | if (sk_stream_memory_free(sk) && !vm_wait) | 143 | if (sk_stream_memory_free(sk) && !vm_wait) |
144 | break; | 144 | break; |
145 | 145 | ||
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index db5fc2440a23..9c6d0508e63a 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c | |||
@@ -202,7 +202,9 @@ static int dccp_v6_send_response(const struct sock *sk, struct request_sock *req | |||
202 | security_req_classify_flow(req, flowi6_to_flowi(&fl6)); | 202 | security_req_classify_flow(req, flowi6_to_flowi(&fl6)); |
203 | 203 | ||
204 | 204 | ||
205 | final_p = fl6_update_dst(&fl6, np->opt, &final); | 205 | rcu_read_lock(); |
206 | final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt), &final); | ||
207 | rcu_read_unlock(); | ||
206 | 208 | ||
207 | dst = ip6_dst_lookup_flow(sk, &fl6, final_p); | 209 | dst = ip6_dst_lookup_flow(sk, &fl6, final_p); |
208 | if (IS_ERR(dst)) { | 210 | if (IS_ERR(dst)) { |
@@ -219,7 +221,10 @@ static int dccp_v6_send_response(const struct sock *sk, struct request_sock *req | |||
219 | &ireq->ir_v6_loc_addr, | 221 | &ireq->ir_v6_loc_addr, |
220 | &ireq->ir_v6_rmt_addr); | 222 | &ireq->ir_v6_rmt_addr); |
221 | fl6.daddr = ireq->ir_v6_rmt_addr; | 223 | fl6.daddr = ireq->ir_v6_rmt_addr; |
222 | err = ip6_xmit(sk, skb, &fl6, np->opt, np->tclass); | 224 | rcu_read_lock(); |
225 | err = ip6_xmit(sk, skb, &fl6, rcu_dereference(np->opt), | ||
226 | np->tclass); | ||
227 | rcu_read_unlock(); | ||
223 | err = net_xmit_eval(err); | 228 | err = net_xmit_eval(err); |
224 | } | 229 | } |
225 | 230 | ||
@@ -387,6 +392,7 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk, | |||
387 | struct inet_request_sock *ireq = inet_rsk(req); | 392 | struct inet_request_sock *ireq = inet_rsk(req); |
388 | struct ipv6_pinfo *newnp; | 393 | struct ipv6_pinfo *newnp; |
389 | const struct ipv6_pinfo *np = inet6_sk(sk); | 394 | const struct ipv6_pinfo *np = inet6_sk(sk); |
395 | struct ipv6_txoptions *opt; | ||
390 | struct inet_sock *newinet; | 396 | struct inet_sock *newinet; |
391 | struct dccp6_sock *newdp6; | 397 | struct dccp6_sock *newdp6; |
392 | struct sock *newsk; | 398 | struct sock *newsk; |
@@ -453,7 +459,7 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk, | |||
453 | * comment in that function for the gory details. -acme | 459 | * comment in that function for the gory details. -acme |
454 | */ | 460 | */ |
455 | 461 | ||
456 | __ip6_dst_store(newsk, dst, NULL, NULL); | 462 | ip6_dst_store(newsk, dst, NULL, NULL); |
457 | newsk->sk_route_caps = dst->dev->features & ~(NETIF_F_IP_CSUM | | 463 | newsk->sk_route_caps = dst->dev->features & ~(NETIF_F_IP_CSUM | |
458 | NETIF_F_TSO); | 464 | NETIF_F_TSO); |
459 | newdp6 = (struct dccp6_sock *)newsk; | 465 | newdp6 = (struct dccp6_sock *)newsk; |
@@ -488,13 +494,15 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk, | |||
488 | * Yes, keeping reference count would be much more clever, but we make | 494 | * Yes, keeping reference count would be much more clever, but we make |
489 | * one more one thing there: reattach optmem to newsk. | 495 | * one more one thing there: reattach optmem to newsk. |
490 | */ | 496 | */ |
491 | if (np->opt != NULL) | 497 | opt = rcu_dereference(np->opt); |
492 | newnp->opt = ipv6_dup_options(newsk, np->opt); | 498 | if (opt) { |
493 | 499 | opt = ipv6_dup_options(newsk, opt); | |
500 | RCU_INIT_POINTER(newnp->opt, opt); | ||
501 | } | ||
494 | inet_csk(newsk)->icsk_ext_hdr_len = 0; | 502 | inet_csk(newsk)->icsk_ext_hdr_len = 0; |
495 | if (newnp->opt != NULL) | 503 | if (opt) |
496 | inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen + | 504 | inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen + |
497 | newnp->opt->opt_flen); | 505 | opt->opt_flen; |
498 | 506 | ||
499 | dccp_sync_mss(newsk, dst_mtu(dst)); | 507 | dccp_sync_mss(newsk, dst_mtu(dst)); |
500 | 508 | ||
@@ -757,6 +765,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | |||
757 | struct ipv6_pinfo *np = inet6_sk(sk); | 765 | struct ipv6_pinfo *np = inet6_sk(sk); |
758 | struct dccp_sock *dp = dccp_sk(sk); | 766 | struct dccp_sock *dp = dccp_sk(sk); |
759 | struct in6_addr *saddr = NULL, *final_p, final; | 767 | struct in6_addr *saddr = NULL, *final_p, final; |
768 | struct ipv6_txoptions *opt; | ||
760 | struct flowi6 fl6; | 769 | struct flowi6 fl6; |
761 | struct dst_entry *dst; | 770 | struct dst_entry *dst; |
762 | int addr_type; | 771 | int addr_type; |
@@ -856,7 +865,8 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | |||
856 | fl6.fl6_sport = inet->inet_sport; | 865 | fl6.fl6_sport = inet->inet_sport; |
857 | security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); | 866 | security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); |
858 | 867 | ||
859 | final_p = fl6_update_dst(&fl6, np->opt, &final); | 868 | opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk)); |
869 | final_p = fl6_update_dst(&fl6, opt, &final); | ||
860 | 870 | ||
861 | dst = ip6_dst_lookup_flow(sk, &fl6, final_p); | 871 | dst = ip6_dst_lookup_flow(sk, &fl6, final_p); |
862 | if (IS_ERR(dst)) { | 872 | if (IS_ERR(dst)) { |
@@ -873,12 +883,11 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | |||
873 | np->saddr = *saddr; | 883 | np->saddr = *saddr; |
874 | inet->inet_rcv_saddr = LOOPBACK4_IPV6; | 884 | inet->inet_rcv_saddr = LOOPBACK4_IPV6; |
875 | 885 | ||
876 | __ip6_dst_store(sk, dst, NULL, NULL); | 886 | ip6_dst_store(sk, dst, NULL, NULL); |
877 | 887 | ||
878 | icsk->icsk_ext_hdr_len = 0; | 888 | icsk->icsk_ext_hdr_len = 0; |
879 | if (np->opt != NULL) | 889 | if (opt) |
880 | icsk->icsk_ext_hdr_len = (np->opt->opt_flen + | 890 | icsk->icsk_ext_hdr_len = opt->opt_flen + opt->opt_nflen; |
881 | np->opt->opt_nflen); | ||
882 | 891 | ||
883 | inet->inet_dport = usin->sin6_port; | 892 | inet->inet_dport = usin->sin6_port; |
884 | 893 | ||
diff --git a/net/dccp/proto.c b/net/dccp/proto.c index b5cf13a28009..41e65804ddf5 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c | |||
@@ -339,8 +339,7 @@ unsigned int dccp_poll(struct file *file, struct socket *sock, | |||
339 | if (sk_stream_is_writeable(sk)) { | 339 | if (sk_stream_is_writeable(sk)) { |
340 | mask |= POLLOUT | POLLWRNORM; | 340 | mask |= POLLOUT | POLLWRNORM; |
341 | } else { /* send SIGIO later */ | 341 | } else { /* send SIGIO later */ |
342 | set_bit(SOCK_ASYNC_NOSPACE, | 342 | sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); |
343 | &sk->sk_socket->flags); | ||
344 | set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); | 343 | set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); |
345 | 344 | ||
346 | /* Race breaker. If space is freed after | 345 | /* Race breaker. If space is freed after |
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c index 675cf94e04f8..eebf5ac8ce18 100644 --- a/net/decnet/af_decnet.c +++ b/net/decnet/af_decnet.c | |||
@@ -1747,9 +1747,9 @@ static int dn_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, | |||
1747 | } | 1747 | } |
1748 | 1748 | ||
1749 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); | 1749 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
1750 | set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); | 1750 | sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); |
1751 | sk_wait_event(sk, &timeo, dn_data_ready(sk, queue, flags, target)); | 1751 | sk_wait_event(sk, &timeo, dn_data_ready(sk, queue, flags, target)); |
1752 | clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); | 1752 | sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); |
1753 | finish_wait(sk_sleep(sk), &wait); | 1753 | finish_wait(sk_sleep(sk), &wait); |
1754 | } | 1754 | } |
1755 | 1755 | ||
@@ -2004,10 +2004,10 @@ static int dn_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) | |||
2004 | } | 2004 | } |
2005 | 2005 | ||
2006 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); | 2006 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
2007 | set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); | 2007 | sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); |
2008 | sk_wait_event(sk, &timeo, | 2008 | sk_wait_event(sk, &timeo, |
2009 | !dn_queue_too_long(scp, queue, flags)); | 2009 | !dn_queue_too_long(scp, queue, flags)); |
2010 | clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); | 2010 | sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); |
2011 | finish_wait(sk_sleep(sk), &wait); | 2011 | finish_wait(sk_sleep(sk), &wait); |
2012 | continue; | 2012 | continue; |
2013 | } | 2013 | } |
diff --git a/net/dns_resolver/dns_query.c b/net/dns_resolver/dns_query.c index 4677b6fa6dda..ecc28cff08ab 100644 --- a/net/dns_resolver/dns_query.c +++ b/net/dns_resolver/dns_query.c | |||
@@ -67,7 +67,7 @@ | |||
67 | * Returns the size of the result on success, -ve error code otherwise. | 67 | * Returns the size of the result on success, -ve error code otherwise. |
68 | */ | 68 | */ |
69 | int dns_query(const char *type, const char *name, size_t namelen, | 69 | int dns_query(const char *type, const char *name, size_t namelen, |
70 | const char *options, char **_result, time_t *_expiry) | 70 | const char *options, char **_result, time64_t *_expiry) |
71 | { | 71 | { |
72 | struct key *rkey; | 72 | struct key *rkey; |
73 | const struct user_key_payload *upayload; | 73 | const struct user_key_payload *upayload; |
diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c index 35a9788bb3ae..c7d1adca30d8 100644 --- a/net/hsr/hsr_device.c +++ b/net/hsr/hsr_device.c | |||
@@ -312,7 +312,7 @@ static void send_hsr_supervision_frame(struct hsr_port *master, u8 type) | |||
312 | return; | 312 | return; |
313 | 313 | ||
314 | out: | 314 | out: |
315 | WARN_ON_ONCE("HSR: Could not send supervision frame\n"); | 315 | WARN_ONCE(1, "HSR: Could not send supervision frame\n"); |
316 | kfree_skb(skb); | 316 | kfree_skb(skb); |
317 | } | 317 | } |
318 | 318 | ||
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 6baf36e11808..05e4cba14162 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c | |||
@@ -2126,7 +2126,7 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr) | |||
2126 | ASSERT_RTNL(); | 2126 | ASSERT_RTNL(); |
2127 | 2127 | ||
2128 | in_dev = ip_mc_find_dev(net, imr); | 2128 | in_dev = ip_mc_find_dev(net, imr); |
2129 | if (!in_dev) { | 2129 | if (!imr->imr_ifindex && !imr->imr_address.s_addr && !in_dev) { |
2130 | ret = -ENODEV; | 2130 | ret = -ENODEV; |
2131 | goto out; | 2131 | goto out; |
2132 | } | 2132 | } |
@@ -2147,7 +2147,8 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr) | |||
2147 | 2147 | ||
2148 | *imlp = iml->next_rcu; | 2148 | *imlp = iml->next_rcu; |
2149 | 2149 | ||
2150 | ip_mc_dec_group(in_dev, group); | 2150 | if (in_dev) |
2151 | ip_mc_dec_group(in_dev, group); | ||
2151 | 2152 | ||
2152 | /* decrease mem now to avoid the memleak warning */ | 2153 | /* decrease mem now to avoid the memleak warning */ |
2153 | atomic_sub(sizeof(*iml), &sk->sk_omem_alloc); | 2154 | atomic_sub(sizeof(*iml), &sk->sk_omem_alloc); |
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 92dd4b74d513..c3a38353f5dc 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c | |||
@@ -134,7 +134,7 @@ static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, | |||
134 | struct mfc_cache *c, struct rtmsg *rtm); | 134 | struct mfc_cache *c, struct rtmsg *rtm); |
135 | static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc, | 135 | static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc, |
136 | int cmd); | 136 | int cmd); |
137 | static void mroute_clean_tables(struct mr_table *mrt); | 137 | static void mroute_clean_tables(struct mr_table *mrt, bool all); |
138 | static void ipmr_expire_process(unsigned long arg); | 138 | static void ipmr_expire_process(unsigned long arg); |
139 | 139 | ||
140 | #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES | 140 | #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES |
@@ -350,7 +350,7 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id) | |||
350 | static void ipmr_free_table(struct mr_table *mrt) | 350 | static void ipmr_free_table(struct mr_table *mrt) |
351 | { | 351 | { |
352 | del_timer_sync(&mrt->ipmr_expire_timer); | 352 | del_timer_sync(&mrt->ipmr_expire_timer); |
353 | mroute_clean_tables(mrt); | 353 | mroute_clean_tables(mrt, true); |
354 | kfree(mrt); | 354 | kfree(mrt); |
355 | } | 355 | } |
356 | 356 | ||
@@ -441,10 +441,6 @@ struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v) | |||
441 | return dev; | 441 | return dev; |
442 | 442 | ||
443 | failure: | 443 | failure: |
444 | /* allow the register to be completed before unregistering. */ | ||
445 | rtnl_unlock(); | ||
446 | rtnl_lock(); | ||
447 | |||
448 | unregister_netdevice(dev); | 444 | unregister_netdevice(dev); |
449 | return NULL; | 445 | return NULL; |
450 | } | 446 | } |
@@ -540,10 +536,6 @@ static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt) | |||
540 | return dev; | 536 | return dev; |
541 | 537 | ||
542 | failure: | 538 | failure: |
543 | /* allow the register to be completed before unregistering. */ | ||
544 | rtnl_unlock(); | ||
545 | rtnl_lock(); | ||
546 | |||
547 | unregister_netdevice(dev); | 539 | unregister_netdevice(dev); |
548 | return NULL; | 540 | return NULL; |
549 | } | 541 | } |
@@ -1208,7 +1200,7 @@ static int ipmr_mfc_add(struct net *net, struct mr_table *mrt, | |||
1208 | * Close the multicast socket, and clear the vif tables etc | 1200 | * Close the multicast socket, and clear the vif tables etc |
1209 | */ | 1201 | */ |
1210 | 1202 | ||
1211 | static void mroute_clean_tables(struct mr_table *mrt) | 1203 | static void mroute_clean_tables(struct mr_table *mrt, bool all) |
1212 | { | 1204 | { |
1213 | int i; | 1205 | int i; |
1214 | LIST_HEAD(list); | 1206 | LIST_HEAD(list); |
@@ -1217,8 +1209,9 @@ static void mroute_clean_tables(struct mr_table *mrt) | |||
1217 | /* Shut down all active vif entries */ | 1209 | /* Shut down all active vif entries */ |
1218 | 1210 | ||
1219 | for (i = 0; i < mrt->maxvif; i++) { | 1211 | for (i = 0; i < mrt->maxvif; i++) { |
1220 | if (!(mrt->vif_table[i].flags & VIFF_STATIC)) | 1212 | if (!all && (mrt->vif_table[i].flags & VIFF_STATIC)) |
1221 | vif_delete(mrt, i, 0, &list); | 1213 | continue; |
1214 | vif_delete(mrt, i, 0, &list); | ||
1222 | } | 1215 | } |
1223 | unregister_netdevice_many(&list); | 1216 | unregister_netdevice_many(&list); |
1224 | 1217 | ||
@@ -1226,7 +1219,7 @@ static void mroute_clean_tables(struct mr_table *mrt) | |||
1226 | 1219 | ||
1227 | for (i = 0; i < MFC_LINES; i++) { | 1220 | for (i = 0; i < MFC_LINES; i++) { |
1228 | list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[i], list) { | 1221 | list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[i], list) { |
1229 | if (c->mfc_flags & MFC_STATIC) | 1222 | if (!all && (c->mfc_flags & MFC_STATIC)) |
1230 | continue; | 1223 | continue; |
1231 | list_del_rcu(&c->list); | 1224 | list_del_rcu(&c->list); |
1232 | mroute_netlink_event(mrt, c, RTM_DELROUTE); | 1225 | mroute_netlink_event(mrt, c, RTM_DELROUTE); |
@@ -1261,7 +1254,7 @@ static void mrtsock_destruct(struct sock *sk) | |||
1261 | NETCONFA_IFINDEX_ALL, | 1254 | NETCONFA_IFINDEX_ALL, |
1262 | net->ipv4.devconf_all); | 1255 | net->ipv4.devconf_all); |
1263 | RCU_INIT_POINTER(mrt->mroute_sk, NULL); | 1256 | RCU_INIT_POINTER(mrt->mroute_sk, NULL); |
1264 | mroute_clean_tables(mrt); | 1257 | mroute_clean_tables(mrt, false); |
1265 | } | 1258 | } |
1266 | } | 1259 | } |
1267 | rtnl_unlock(); | 1260 | rtnl_unlock(); |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index c1728771cf89..c82cca18c90f 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -517,8 +517,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait) | |||
517 | if (sk_stream_is_writeable(sk)) { | 517 | if (sk_stream_is_writeable(sk)) { |
518 | mask |= POLLOUT | POLLWRNORM; | 518 | mask |= POLLOUT | POLLWRNORM; |
519 | } else { /* send SIGIO later */ | 519 | } else { /* send SIGIO later */ |
520 | set_bit(SOCK_ASYNC_NOSPACE, | 520 | sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); |
521 | &sk->sk_socket->flags); | ||
522 | set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); | 521 | set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); |
523 | 522 | ||
524 | /* Race breaker. If space is freed after | 523 | /* Race breaker. If space is freed after |
@@ -906,7 +905,7 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset, | |||
906 | goto out_err; | 905 | goto out_err; |
907 | } | 906 | } |
908 | 907 | ||
909 | clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); | 908 | sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); |
910 | 909 | ||
911 | mss_now = tcp_send_mss(sk, &size_goal, flags); | 910 | mss_now = tcp_send_mss(sk, &size_goal, flags); |
912 | copied = 0; | 911 | copied = 0; |
@@ -1134,7 +1133,7 @@ int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) | |||
1134 | } | 1133 | } |
1135 | 1134 | ||
1136 | /* This should be in poll */ | 1135 | /* This should be in poll */ |
1137 | clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); | 1136 | sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); |
1138 | 1137 | ||
1139 | mss_now = tcp_send_mss(sk, &size_goal, flags); | 1138 | mss_now = tcp_send_mss(sk, &size_goal, flags); |
1140 | 1139 | ||
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index fdd88c3803a6..2d656eef7f8e 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -4481,19 +4481,34 @@ static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int | |||
4481 | int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size) | 4481 | int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size) |
4482 | { | 4482 | { |
4483 | struct sk_buff *skb; | 4483 | struct sk_buff *skb; |
4484 | int err = -ENOMEM; | ||
4485 | int data_len = 0; | ||
4484 | bool fragstolen; | 4486 | bool fragstolen; |
4485 | 4487 | ||
4486 | if (size == 0) | 4488 | if (size == 0) |
4487 | return 0; | 4489 | return 0; |
4488 | 4490 | ||
4489 | skb = alloc_skb(size, sk->sk_allocation); | 4491 | if (size > PAGE_SIZE) { |
4492 | int npages = min_t(size_t, size >> PAGE_SHIFT, MAX_SKB_FRAGS); | ||
4493 | |||
4494 | data_len = npages << PAGE_SHIFT; | ||
4495 | size = data_len + (size & ~PAGE_MASK); | ||
4496 | } | ||
4497 | skb = alloc_skb_with_frags(size - data_len, data_len, | ||
4498 | PAGE_ALLOC_COSTLY_ORDER, | ||
4499 | &err, sk->sk_allocation); | ||
4490 | if (!skb) | 4500 | if (!skb) |
4491 | goto err; | 4501 | goto err; |
4492 | 4502 | ||
4503 | skb_put(skb, size - data_len); | ||
4504 | skb->data_len = data_len; | ||
4505 | skb->len = size; | ||
4506 | |||
4493 | if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) | 4507 | if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) |
4494 | goto err_free; | 4508 | goto err_free; |
4495 | 4509 | ||
4496 | if (memcpy_from_msg(skb_put(skb, size), msg, size)) | 4510 | err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size); |
4511 | if (err) | ||
4497 | goto err_free; | 4512 | goto err_free; |
4498 | 4513 | ||
4499 | TCP_SKB_CB(skb)->seq = tcp_sk(sk)->rcv_nxt; | 4514 | TCP_SKB_CB(skb)->seq = tcp_sk(sk)->rcv_nxt; |
@@ -4509,7 +4524,8 @@ int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size) | |||
4509 | err_free: | 4524 | err_free: |
4510 | kfree_skb(skb); | 4525 | kfree_skb(skb); |
4511 | err: | 4526 | err: |
4512 | return -ENOMEM; | 4527 | return err; |
4528 | |||
4513 | } | 4529 | } |
4514 | 4530 | ||
4515 | static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) | 4531 | static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) |
@@ -5667,6 +5683,7 @@ discard: | |||
5667 | } | 5683 | } |
5668 | 5684 | ||
5669 | tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; | 5685 | tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; |
5686 | tp->copied_seq = tp->rcv_nxt; | ||
5670 | tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1; | 5687 | tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1; |
5671 | 5688 | ||
5672 | /* RFC1323: The window in SYN & SYN/ACK segments is | 5689 | /* RFC1323: The window in SYN & SYN/ACK segments is |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index ba09016d1bfd..db003438aaf5 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -921,7 +921,8 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr, | |||
921 | } | 921 | } |
922 | 922 | ||
923 | md5sig = rcu_dereference_protected(tp->md5sig_info, | 923 | md5sig = rcu_dereference_protected(tp->md5sig_info, |
924 | sock_owned_by_user(sk)); | 924 | sock_owned_by_user(sk) || |
925 | lockdep_is_held(&sk->sk_lock.slock)); | ||
925 | if (!md5sig) { | 926 | if (!md5sig) { |
926 | md5sig = kmalloc(sizeof(*md5sig), gfp); | 927 | md5sig = kmalloc(sizeof(*md5sig), gfp); |
927 | if (!md5sig) | 928 | if (!md5sig) |
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index c9c716a483e4..193ba1fa8a9a 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c | |||
@@ -168,7 +168,7 @@ static int tcp_write_timeout(struct sock *sk) | |||
168 | dst_negative_advice(sk); | 168 | dst_negative_advice(sk); |
169 | if (tp->syn_fastopen || tp->syn_data) | 169 | if (tp->syn_fastopen || tp->syn_data) |
170 | tcp_fastopen_cache_set(sk, 0, NULL, true, 0); | 170 | tcp_fastopen_cache_set(sk, 0, NULL, true, 0); |
171 | if (tp->syn_data) | 171 | if (tp->syn_data && icsk->icsk_retransmits == 1) |
172 | NET_INC_STATS_BH(sock_net(sk), | 172 | NET_INC_STATS_BH(sock_net(sk), |
173 | LINUX_MIB_TCPFASTOPENACTIVEFAIL); | 173 | LINUX_MIB_TCPFASTOPENACTIVEFAIL); |
174 | } | 174 | } |
@@ -176,6 +176,18 @@ static int tcp_write_timeout(struct sock *sk) | |||
176 | syn_set = true; | 176 | syn_set = true; |
177 | } else { | 177 | } else { |
178 | if (retransmits_timed_out(sk, sysctl_tcp_retries1, 0, 0)) { | 178 | if (retransmits_timed_out(sk, sysctl_tcp_retries1, 0, 0)) { |
179 | /* Some middle-boxes may black-hole Fast Open _after_ | ||
180 | * the handshake. Therefore we conservatively disable | ||
181 | * Fast Open on this path on recurring timeouts with | ||
182 | * few or zero bytes acked after Fast Open. | ||
183 | */ | ||
184 | if (tp->syn_data_acked && | ||
185 | tp->bytes_acked <= tp->rx_opt.mss_clamp) { | ||
186 | tcp_fastopen_cache_set(sk, 0, NULL, true, 0); | ||
187 | if (icsk->icsk_retransmits == sysctl_tcp_retries1) | ||
188 | NET_INC_STATS_BH(sock_net(sk), | ||
189 | LINUX_MIB_TCPFASTOPENACTIVEFAIL); | ||
190 | } | ||
179 | /* Black hole detection */ | 191 | /* Black hole detection */ |
180 | tcp_mtu_probing(icsk, sk); | 192 | tcp_mtu_probing(icsk, sk); |
181 | 193 | ||
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 24ec14f9825c..0c7b0e61b917 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -100,7 +100,6 @@ | |||
100 | #include <linux/slab.h> | 100 | #include <linux/slab.h> |
101 | #include <net/tcp_states.h> | 101 | #include <net/tcp_states.h> |
102 | #include <linux/skbuff.h> | 102 | #include <linux/skbuff.h> |
103 | #include <linux/netdevice.h> | ||
104 | #include <linux/proc_fs.h> | 103 | #include <linux/proc_fs.h> |
105 | #include <linux/seq_file.h> | 104 | #include <linux/seq_file.h> |
106 | #include <net/net_namespace.h> | 105 | #include <net/net_namespace.h> |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index d84742f003a9..61f26851655c 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -3642,7 +3642,7 @@ static void addrconf_dad_work(struct work_struct *w) | |||
3642 | 3642 | ||
3643 | /* send a neighbour solicitation for our addr */ | 3643 | /* send a neighbour solicitation for our addr */ |
3644 | addrconf_addr_solict_mult(&ifp->addr, &mcaddr); | 3644 | addrconf_addr_solict_mult(&ifp->addr, &mcaddr); |
3645 | ndisc_send_ns(ifp->idev->dev, &ifp->addr, &mcaddr, &in6addr_any, NULL); | 3645 | ndisc_send_ns(ifp->idev->dev, &ifp->addr, &mcaddr, &in6addr_any); |
3646 | out: | 3646 | out: |
3647 | in6_ifa_put(ifp); | 3647 | in6_ifa_put(ifp); |
3648 | rtnl_unlock(); | 3648 | rtnl_unlock(); |
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 44bb66bde0e2..8ec0df75f1c4 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c | |||
@@ -428,9 +428,11 @@ void inet6_destroy_sock(struct sock *sk) | |||
428 | 428 | ||
429 | /* Free tx options */ | 429 | /* Free tx options */ |
430 | 430 | ||
431 | opt = xchg(&np->opt, NULL); | 431 | opt = xchg((__force struct ipv6_txoptions **)&np->opt, NULL); |
432 | if (opt) | 432 | if (opt) { |
433 | sock_kfree_s(sk, opt, opt->tot_len); | 433 | atomic_sub(opt->tot_len, &sk->sk_omem_alloc); |
434 | txopt_put(opt); | ||
435 | } | ||
434 | } | 436 | } |
435 | EXPORT_SYMBOL_GPL(inet6_destroy_sock); | 437 | EXPORT_SYMBOL_GPL(inet6_destroy_sock); |
436 | 438 | ||
@@ -659,7 +661,10 @@ int inet6_sk_rebuild_header(struct sock *sk) | |||
659 | fl6.fl6_sport = inet->inet_sport; | 661 | fl6.fl6_sport = inet->inet_sport; |
660 | security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); | 662 | security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); |
661 | 663 | ||
662 | final_p = fl6_update_dst(&fl6, np->opt, &final); | 664 | rcu_read_lock(); |
665 | final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt), | ||
666 | &final); | ||
667 | rcu_read_unlock(); | ||
663 | 668 | ||
664 | dst = ip6_dst_lookup_flow(sk, &fl6, final_p); | 669 | dst = ip6_dst_lookup_flow(sk, &fl6, final_p); |
665 | if (IS_ERR(dst)) { | 670 | if (IS_ERR(dst)) { |
@@ -668,7 +673,7 @@ int inet6_sk_rebuild_header(struct sock *sk) | |||
668 | return PTR_ERR(dst); | 673 | return PTR_ERR(dst); |
669 | } | 674 | } |
670 | 675 | ||
671 | __ip6_dst_store(sk, dst, NULL, NULL); | 676 | ip6_dst_store(sk, dst, NULL, NULL); |
672 | } | 677 | } |
673 | 678 | ||
674 | return 0; | 679 | return 0; |
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index d70b0238f468..517c55b01ba8 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c | |||
@@ -167,8 +167,10 @@ ipv4_connected: | |||
167 | 167 | ||
168 | security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); | 168 | security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); |
169 | 169 | ||
170 | opt = flowlabel ? flowlabel->opt : np->opt; | 170 | rcu_read_lock(); |
171 | opt = flowlabel ? flowlabel->opt : rcu_dereference(np->opt); | ||
171 | final_p = fl6_update_dst(&fl6, opt, &final); | 172 | final_p = fl6_update_dst(&fl6, opt, &final); |
173 | rcu_read_unlock(); | ||
172 | 174 | ||
173 | dst = ip6_dst_lookup_flow(sk, &fl6, final_p); | 175 | dst = ip6_dst_lookup_flow(sk, &fl6, final_p); |
174 | err = 0; | 176 | err = 0; |
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c index ce203b0402be..ea7c4d64a00a 100644 --- a/net/ipv6/exthdrs.c +++ b/net/ipv6/exthdrs.c | |||
@@ -727,6 +727,7 @@ ipv6_dup_options(struct sock *sk, struct ipv6_txoptions *opt) | |||
727 | *((char **)&opt2->dst1opt) += dif; | 727 | *((char **)&opt2->dst1opt) += dif; |
728 | if (opt2->srcrt) | 728 | if (opt2->srcrt) |
729 | *((char **)&opt2->srcrt) += dif; | 729 | *((char **)&opt2->srcrt) += dif; |
730 | atomic_set(&opt2->refcnt, 1); | ||
730 | } | 731 | } |
731 | return opt2; | 732 | return opt2; |
732 | } | 733 | } |
@@ -790,7 +791,7 @@ ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt, | |||
790 | return ERR_PTR(-ENOBUFS); | 791 | return ERR_PTR(-ENOBUFS); |
791 | 792 | ||
792 | memset(opt2, 0, tot_len); | 793 | memset(opt2, 0, tot_len); |
793 | 794 | atomic_set(&opt2->refcnt, 1); | |
794 | opt2->tot_len = tot_len; | 795 | opt2->tot_len = tot_len; |
795 | p = (char *)(opt2 + 1); | 796 | p = (char *)(opt2 + 1); |
796 | 797 | ||
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index 36c5a98b0472..0a37ddc7af51 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c | |||
@@ -834,11 +834,6 @@ void icmpv6_flow_init(struct sock *sk, struct flowi6 *fl6, | |||
834 | security_sk_classify_flow(sk, flowi6_to_flowi(fl6)); | 834 | security_sk_classify_flow(sk, flowi6_to_flowi(fl6)); |
835 | } | 835 | } |
836 | 836 | ||
837 | /* | ||
838 | * Special lock-class for __icmpv6_sk: | ||
839 | */ | ||
840 | static struct lock_class_key icmpv6_socket_sk_dst_lock_key; | ||
841 | |||
842 | static int __net_init icmpv6_sk_init(struct net *net) | 837 | static int __net_init icmpv6_sk_init(struct net *net) |
843 | { | 838 | { |
844 | struct sock *sk; | 839 | struct sock *sk; |
@@ -860,15 +855,6 @@ static int __net_init icmpv6_sk_init(struct net *net) | |||
860 | 855 | ||
861 | net->ipv6.icmp_sk[i] = sk; | 856 | net->ipv6.icmp_sk[i] = sk; |
862 | 857 | ||
863 | /* | ||
864 | * Split off their lock-class, because sk->sk_dst_lock | ||
865 | * gets used from softirqs, which is safe for | ||
866 | * __icmpv6_sk (because those never get directly used | ||
867 | * via userspace syscalls), but unsafe for normal sockets. | ||
868 | */ | ||
869 | lockdep_set_class(&sk->sk_dst_lock, | ||
870 | &icmpv6_socket_sk_dst_lock_key); | ||
871 | |||
872 | /* Enough space for 2 64K ICMP packets, including | 858 | /* Enough space for 2 64K ICMP packets, including |
873 | * sk_buff struct overhead. | 859 | * sk_buff struct overhead. |
874 | */ | 860 | */ |
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c index 5d1c7cee2cb2..a7ca2cde2ecb 100644 --- a/net/ipv6/inet6_connection_sock.c +++ b/net/ipv6/inet6_connection_sock.c | |||
@@ -78,7 +78,9 @@ struct dst_entry *inet6_csk_route_req(const struct sock *sk, | |||
78 | memset(fl6, 0, sizeof(*fl6)); | 78 | memset(fl6, 0, sizeof(*fl6)); |
79 | fl6->flowi6_proto = proto; | 79 | fl6->flowi6_proto = proto; |
80 | fl6->daddr = ireq->ir_v6_rmt_addr; | 80 | fl6->daddr = ireq->ir_v6_rmt_addr; |
81 | final_p = fl6_update_dst(fl6, np->opt, &final); | 81 | rcu_read_lock(); |
82 | final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final); | ||
83 | rcu_read_unlock(); | ||
82 | fl6->saddr = ireq->ir_v6_loc_addr; | 84 | fl6->saddr = ireq->ir_v6_loc_addr; |
83 | fl6->flowi6_oif = ireq->ir_iif; | 85 | fl6->flowi6_oif = ireq->ir_iif; |
84 | fl6->flowi6_mark = ireq->ir_mark; | 86 | fl6->flowi6_mark = ireq->ir_mark; |
@@ -109,14 +111,6 @@ void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr) | |||
109 | EXPORT_SYMBOL_GPL(inet6_csk_addr2sockaddr); | 111 | EXPORT_SYMBOL_GPL(inet6_csk_addr2sockaddr); |
110 | 112 | ||
111 | static inline | 113 | static inline |
112 | void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst, | ||
113 | const struct in6_addr *daddr, | ||
114 | const struct in6_addr *saddr) | ||
115 | { | ||
116 | __ip6_dst_store(sk, dst, daddr, saddr); | ||
117 | } | ||
118 | |||
119 | static inline | ||
120 | struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie) | 114 | struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie) |
121 | { | 115 | { |
122 | return __sk_dst_check(sk, cookie); | 116 | return __sk_dst_check(sk, cookie); |
@@ -142,14 +136,16 @@ static struct dst_entry *inet6_csk_route_socket(struct sock *sk, | |||
142 | fl6->fl6_dport = inet->inet_dport; | 136 | fl6->fl6_dport = inet->inet_dport; |
143 | security_sk_classify_flow(sk, flowi6_to_flowi(fl6)); | 137 | security_sk_classify_flow(sk, flowi6_to_flowi(fl6)); |
144 | 138 | ||
145 | final_p = fl6_update_dst(fl6, np->opt, &final); | 139 | rcu_read_lock(); |
140 | final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final); | ||
141 | rcu_read_unlock(); | ||
146 | 142 | ||
147 | dst = __inet6_csk_dst_check(sk, np->dst_cookie); | 143 | dst = __inet6_csk_dst_check(sk, np->dst_cookie); |
148 | if (!dst) { | 144 | if (!dst) { |
149 | dst = ip6_dst_lookup_flow(sk, fl6, final_p); | 145 | dst = ip6_dst_lookup_flow(sk, fl6, final_p); |
150 | 146 | ||
151 | if (!IS_ERR(dst)) | 147 | if (!IS_ERR(dst)) |
152 | __inet6_csk_dst_store(sk, dst, NULL, NULL); | 148 | ip6_dst_store(sk, dst, NULL, NULL); |
153 | } | 149 | } |
154 | return dst; | 150 | return dst; |
155 | } | 151 | } |
@@ -175,7 +171,8 @@ int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl_unused | |||
175 | /* Restore final destination back after routing done */ | 171 | /* Restore final destination back after routing done */ |
176 | fl6.daddr = sk->sk_v6_daddr; | 172 | fl6.daddr = sk->sk_v6_daddr; |
177 | 173 | ||
178 | res = ip6_xmit(sk, skb, &fl6, np->opt, np->tclass); | 174 | res = ip6_xmit(sk, skb, &fl6, rcu_dereference(np->opt), |
175 | np->tclass); | ||
179 | rcu_read_unlock(); | 176 | rcu_read_unlock(); |
180 | return res; | 177 | return res; |
181 | } | 178 | } |
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index eabffbb89795..137fca42aaa6 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c | |||
@@ -177,7 +177,7 @@ void ip6_tnl_dst_reset(struct ip6_tnl *t) | |||
177 | int i; | 177 | int i; |
178 | 178 | ||
179 | for_each_possible_cpu(i) | 179 | for_each_possible_cpu(i) |
180 | ip6_tnl_per_cpu_dst_set(raw_cpu_ptr(t->dst_cache), NULL); | 180 | ip6_tnl_per_cpu_dst_set(per_cpu_ptr(t->dst_cache, i), NULL); |
181 | } | 181 | } |
182 | EXPORT_SYMBOL_GPL(ip6_tnl_dst_reset); | 182 | EXPORT_SYMBOL_GPL(ip6_tnl_dst_reset); |
183 | 183 | ||
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index ad19136086dd..a10e77103c88 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c | |||
@@ -118,7 +118,7 @@ static void mr6_netlink_event(struct mr6_table *mrt, struct mfc6_cache *mfc, | |||
118 | int cmd); | 118 | int cmd); |
119 | static int ip6mr_rtm_dumproute(struct sk_buff *skb, | 119 | static int ip6mr_rtm_dumproute(struct sk_buff *skb, |
120 | struct netlink_callback *cb); | 120 | struct netlink_callback *cb); |
121 | static void mroute_clean_tables(struct mr6_table *mrt); | 121 | static void mroute_clean_tables(struct mr6_table *mrt, bool all); |
122 | static void ipmr_expire_process(unsigned long arg); | 122 | static void ipmr_expire_process(unsigned long arg); |
123 | 123 | ||
124 | #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES | 124 | #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES |
@@ -334,7 +334,7 @@ static struct mr6_table *ip6mr_new_table(struct net *net, u32 id) | |||
334 | static void ip6mr_free_table(struct mr6_table *mrt) | 334 | static void ip6mr_free_table(struct mr6_table *mrt) |
335 | { | 335 | { |
336 | del_timer_sync(&mrt->ipmr_expire_timer); | 336 | del_timer_sync(&mrt->ipmr_expire_timer); |
337 | mroute_clean_tables(mrt); | 337 | mroute_clean_tables(mrt, true); |
338 | kfree(mrt); | 338 | kfree(mrt); |
339 | } | 339 | } |
340 | 340 | ||
@@ -765,10 +765,6 @@ static struct net_device *ip6mr_reg_vif(struct net *net, struct mr6_table *mrt) | |||
765 | return dev; | 765 | return dev; |
766 | 766 | ||
767 | failure: | 767 | failure: |
768 | /* allow the register to be completed before unregistering. */ | ||
769 | rtnl_unlock(); | ||
770 | rtnl_lock(); | ||
771 | |||
772 | unregister_netdevice(dev); | 768 | unregister_netdevice(dev); |
773 | return NULL; | 769 | return NULL; |
774 | } | 770 | } |
@@ -1542,7 +1538,7 @@ static int ip6mr_mfc_add(struct net *net, struct mr6_table *mrt, | |||
1542 | * Close the multicast socket, and clear the vif tables etc | 1538 | * Close the multicast socket, and clear the vif tables etc |
1543 | */ | 1539 | */ |
1544 | 1540 | ||
1545 | static void mroute_clean_tables(struct mr6_table *mrt) | 1541 | static void mroute_clean_tables(struct mr6_table *mrt, bool all) |
1546 | { | 1542 | { |
1547 | int i; | 1543 | int i; |
1548 | LIST_HEAD(list); | 1544 | LIST_HEAD(list); |
@@ -1552,8 +1548,9 @@ static void mroute_clean_tables(struct mr6_table *mrt) | |||
1552 | * Shut down all active vif entries | 1548 | * Shut down all active vif entries |
1553 | */ | 1549 | */ |
1554 | for (i = 0; i < mrt->maxvif; i++) { | 1550 | for (i = 0; i < mrt->maxvif; i++) { |
1555 | if (!(mrt->vif6_table[i].flags & VIFF_STATIC)) | 1551 | if (!all && (mrt->vif6_table[i].flags & VIFF_STATIC)) |
1556 | mif6_delete(mrt, i, &list); | 1552 | continue; |
1553 | mif6_delete(mrt, i, &list); | ||
1557 | } | 1554 | } |
1558 | unregister_netdevice_many(&list); | 1555 | unregister_netdevice_many(&list); |
1559 | 1556 | ||
@@ -1562,7 +1559,7 @@ static void mroute_clean_tables(struct mr6_table *mrt) | |||
1562 | */ | 1559 | */ |
1563 | for (i = 0; i < MFC6_LINES; i++) { | 1560 | for (i = 0; i < MFC6_LINES; i++) { |
1564 | list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[i], list) { | 1561 | list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[i], list) { |
1565 | if (c->mfc_flags & MFC_STATIC) | 1562 | if (!all && (c->mfc_flags & MFC_STATIC)) |
1566 | continue; | 1563 | continue; |
1567 | write_lock_bh(&mrt_lock); | 1564 | write_lock_bh(&mrt_lock); |
1568 | list_del(&c->list); | 1565 | list_del(&c->list); |
@@ -1625,7 +1622,7 @@ int ip6mr_sk_done(struct sock *sk) | |||
1625 | net->ipv6.devconf_all); | 1622 | net->ipv6.devconf_all); |
1626 | write_unlock_bh(&mrt_lock); | 1623 | write_unlock_bh(&mrt_lock); |
1627 | 1624 | ||
1628 | mroute_clean_tables(mrt); | 1625 | mroute_clean_tables(mrt, false); |
1629 | err = 0; | 1626 | err = 0; |
1630 | break; | 1627 | break; |
1631 | } | 1628 | } |
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 63e6956917c9..4449ad1f8114 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c | |||
@@ -111,7 +111,8 @@ struct ipv6_txoptions *ipv6_update_options(struct sock *sk, | |||
111 | icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie); | 111 | icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie); |
112 | } | 112 | } |
113 | } | 113 | } |
114 | opt = xchg(&inet6_sk(sk)->opt, opt); | 114 | opt = xchg((__force struct ipv6_txoptions **)&inet6_sk(sk)->opt, |
115 | opt); | ||
115 | sk_dst_reset(sk); | 116 | sk_dst_reset(sk); |
116 | 117 | ||
117 | return opt; | 118 | return opt; |
@@ -231,9 +232,12 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, | |||
231 | sk->sk_socket->ops = &inet_dgram_ops; | 232 | sk->sk_socket->ops = &inet_dgram_ops; |
232 | sk->sk_family = PF_INET; | 233 | sk->sk_family = PF_INET; |
233 | } | 234 | } |
234 | opt = xchg(&np->opt, NULL); | 235 | opt = xchg((__force struct ipv6_txoptions **)&np->opt, |
235 | if (opt) | 236 | NULL); |
236 | sock_kfree_s(sk, opt, opt->tot_len); | 237 | if (opt) { |
238 | atomic_sub(opt->tot_len, &sk->sk_omem_alloc); | ||
239 | txopt_put(opt); | ||
240 | } | ||
237 | pktopt = xchg(&np->pktoptions, NULL); | 241 | pktopt = xchg(&np->pktoptions, NULL); |
238 | kfree_skb(pktopt); | 242 | kfree_skb(pktopt); |
239 | 243 | ||
@@ -403,7 +407,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, | |||
403 | if (optname != IPV6_RTHDR && !ns_capable(net->user_ns, CAP_NET_RAW)) | 407 | if (optname != IPV6_RTHDR && !ns_capable(net->user_ns, CAP_NET_RAW)) |
404 | break; | 408 | break; |
405 | 409 | ||
406 | opt = ipv6_renew_options(sk, np->opt, optname, | 410 | opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk)); |
411 | opt = ipv6_renew_options(sk, opt, optname, | ||
407 | (struct ipv6_opt_hdr __user *)optval, | 412 | (struct ipv6_opt_hdr __user *)optval, |
408 | optlen); | 413 | optlen); |
409 | if (IS_ERR(opt)) { | 414 | if (IS_ERR(opt)) { |
@@ -432,8 +437,10 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, | |||
432 | retv = 0; | 437 | retv = 0; |
433 | opt = ipv6_update_options(sk, opt); | 438 | opt = ipv6_update_options(sk, opt); |
434 | sticky_done: | 439 | sticky_done: |
435 | if (opt) | 440 | if (opt) { |
436 | sock_kfree_s(sk, opt, opt->tot_len); | 441 | atomic_sub(opt->tot_len, &sk->sk_omem_alloc); |
442 | txopt_put(opt); | ||
443 | } | ||
437 | break; | 444 | break; |
438 | } | 445 | } |
439 | 446 | ||
@@ -486,6 +493,7 @@ sticky_done: | |||
486 | break; | 493 | break; |
487 | 494 | ||
488 | memset(opt, 0, sizeof(*opt)); | 495 | memset(opt, 0, sizeof(*opt)); |
496 | atomic_set(&opt->refcnt, 1); | ||
489 | opt->tot_len = sizeof(*opt) + optlen; | 497 | opt->tot_len = sizeof(*opt) + optlen; |
490 | retv = -EFAULT; | 498 | retv = -EFAULT; |
491 | if (copy_from_user(opt+1, optval, optlen)) | 499 | if (copy_from_user(opt+1, optval, optlen)) |
@@ -502,8 +510,10 @@ update: | |||
502 | retv = 0; | 510 | retv = 0; |
503 | opt = ipv6_update_options(sk, opt); | 511 | opt = ipv6_update_options(sk, opt); |
504 | done: | 512 | done: |
505 | if (opt) | 513 | if (opt) { |
506 | sock_kfree_s(sk, opt, opt->tot_len); | 514 | atomic_sub(opt->tot_len, &sk->sk_omem_alloc); |
515 | txopt_put(opt); | ||
516 | } | ||
507 | break; | 517 | break; |
508 | } | 518 | } |
509 | case IPV6_UNICAST_HOPS: | 519 | case IPV6_UNICAST_HOPS: |
@@ -1110,10 +1120,11 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, | |||
1110 | case IPV6_RTHDR: | 1120 | case IPV6_RTHDR: |
1111 | case IPV6_DSTOPTS: | 1121 | case IPV6_DSTOPTS: |
1112 | { | 1122 | { |
1123 | struct ipv6_txoptions *opt; | ||
1113 | 1124 | ||
1114 | lock_sock(sk); | 1125 | lock_sock(sk); |
1115 | len = ipv6_getsockopt_sticky(sk, np->opt, | 1126 | opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk)); |
1116 | optname, optval, len); | 1127 | len = ipv6_getsockopt_sticky(sk, opt, optname, optval, len); |
1117 | release_sock(sk); | 1128 | release_sock(sk); |
1118 | /* check if ipv6_getsockopt_sticky() returns err code */ | 1129 | /* check if ipv6_getsockopt_sticky() returns err code */ |
1119 | if (len < 0) | 1130 | if (len < 0) |
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 3e0f855e1bea..d6161e1c48c8 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c | |||
@@ -556,8 +556,7 @@ static void ndisc_send_unsol_na(struct net_device *dev) | |||
556 | } | 556 | } |
557 | 557 | ||
558 | void ndisc_send_ns(struct net_device *dev, const struct in6_addr *solicit, | 558 | void ndisc_send_ns(struct net_device *dev, const struct in6_addr *solicit, |
559 | const struct in6_addr *daddr, const struct in6_addr *saddr, | 559 | const struct in6_addr *daddr, const struct in6_addr *saddr) |
560 | struct sk_buff *oskb) | ||
561 | { | 560 | { |
562 | struct sk_buff *skb; | 561 | struct sk_buff *skb; |
563 | struct in6_addr addr_buf; | 562 | struct in6_addr addr_buf; |
@@ -593,9 +592,6 @@ void ndisc_send_ns(struct net_device *dev, const struct in6_addr *solicit, | |||
593 | ndisc_fill_addr_option(skb, ND_OPT_SOURCE_LL_ADDR, | 592 | ndisc_fill_addr_option(skb, ND_OPT_SOURCE_LL_ADDR, |
594 | dev->dev_addr); | 593 | dev->dev_addr); |
595 | 594 | ||
596 | if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE) && oskb) | ||
597 | skb_dst_copy(skb, oskb); | ||
598 | |||
599 | ndisc_send_skb(skb, daddr, saddr); | 595 | ndisc_send_skb(skb, daddr, saddr); |
600 | } | 596 | } |
601 | 597 | ||
@@ -682,12 +678,12 @@ static void ndisc_solicit(struct neighbour *neigh, struct sk_buff *skb) | |||
682 | "%s: trying to ucast probe in NUD_INVALID: %pI6\n", | 678 | "%s: trying to ucast probe in NUD_INVALID: %pI6\n", |
683 | __func__, target); | 679 | __func__, target); |
684 | } | 680 | } |
685 | ndisc_send_ns(dev, target, target, saddr, skb); | 681 | ndisc_send_ns(dev, target, target, saddr); |
686 | } else if ((probes -= NEIGH_VAR(neigh->parms, APP_PROBES)) < 0) { | 682 | } else if ((probes -= NEIGH_VAR(neigh->parms, APP_PROBES)) < 0) { |
687 | neigh_app_ns(neigh); | 683 | neigh_app_ns(neigh); |
688 | } else { | 684 | } else { |
689 | addrconf_addr_solict_mult(target, &mcaddr); | 685 | addrconf_addr_solict_mult(target, &mcaddr); |
690 | ndisc_send_ns(dev, target, &mcaddr, saddr, skb); | 686 | ndisc_send_ns(dev, target, &mcaddr, saddr); |
691 | } | 687 | } |
692 | } | 688 | } |
693 | 689 | ||
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index d5efeb87350e..bab4441ed4e4 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c | |||
@@ -190,7 +190,7 @@ static void nf_ct_frag6_expire(unsigned long data) | |||
190 | /* Creation primitives. */ | 190 | /* Creation primitives. */ |
191 | static inline struct frag_queue *fq_find(struct net *net, __be32 id, | 191 | static inline struct frag_queue *fq_find(struct net *net, __be32 id, |
192 | u32 user, struct in6_addr *src, | 192 | u32 user, struct in6_addr *src, |
193 | struct in6_addr *dst, u8 ecn) | 193 | struct in6_addr *dst, int iif, u8 ecn) |
194 | { | 194 | { |
195 | struct inet_frag_queue *q; | 195 | struct inet_frag_queue *q; |
196 | struct ip6_create_arg arg; | 196 | struct ip6_create_arg arg; |
@@ -200,6 +200,7 @@ static inline struct frag_queue *fq_find(struct net *net, __be32 id, | |||
200 | arg.user = user; | 200 | arg.user = user; |
201 | arg.src = src; | 201 | arg.src = src; |
202 | arg.dst = dst; | 202 | arg.dst = dst; |
203 | arg.iif = iif; | ||
203 | arg.ecn = ecn; | 204 | arg.ecn = ecn; |
204 | 205 | ||
205 | local_bh_disable(); | 206 | local_bh_disable(); |
@@ -601,7 +602,7 @@ struct sk_buff *nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 use | |||
601 | fhdr = (struct frag_hdr *)skb_transport_header(clone); | 602 | fhdr = (struct frag_hdr *)skb_transport_header(clone); |
602 | 603 | ||
603 | fq = fq_find(net, fhdr->identification, user, &hdr->saddr, &hdr->daddr, | 604 | fq = fq_find(net, fhdr->identification, user, &hdr->saddr, &hdr->daddr, |
604 | ip6_frag_ecn(hdr)); | 605 | skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr)); |
605 | if (fq == NULL) { | 606 | if (fq == NULL) { |
606 | pr_debug("Can't find and can't create new queue\n"); | 607 | pr_debug("Can't find and can't create new queue\n"); |
607 | goto ret_orig; | 608 | goto ret_orig; |
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index dc65ec198f7c..99140986e887 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c | |||
@@ -733,6 +733,7 @@ static int raw6_getfrag(void *from, char *to, int offset, int len, int odd, | |||
733 | 733 | ||
734 | static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) | 734 | static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) |
735 | { | 735 | { |
736 | struct ipv6_txoptions *opt_to_free = NULL; | ||
736 | struct ipv6_txoptions opt_space; | 737 | struct ipv6_txoptions opt_space; |
737 | DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name); | 738 | DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name); |
738 | struct in6_addr *daddr, *final_p, final; | 739 | struct in6_addr *daddr, *final_p, final; |
@@ -839,8 +840,10 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) | |||
839 | if (!(opt->opt_nflen|opt->opt_flen)) | 840 | if (!(opt->opt_nflen|opt->opt_flen)) |
840 | opt = NULL; | 841 | opt = NULL; |
841 | } | 842 | } |
842 | if (!opt) | 843 | if (!opt) { |
843 | opt = np->opt; | 844 | opt = txopt_get(np); |
845 | opt_to_free = opt; | ||
846 | } | ||
844 | if (flowlabel) | 847 | if (flowlabel) |
845 | opt = fl6_merge_options(&opt_space, flowlabel, opt); | 848 | opt = fl6_merge_options(&opt_space, flowlabel, opt); |
846 | opt = ipv6_fixup_options(&opt_space, opt); | 849 | opt = ipv6_fixup_options(&opt_space, opt); |
@@ -906,6 +909,7 @@ done: | |||
906 | dst_release(dst); | 909 | dst_release(dst); |
907 | out: | 910 | out: |
908 | fl6_sock_release(flowlabel); | 911 | fl6_sock_release(flowlabel); |
912 | txopt_put(opt_to_free); | ||
909 | return err < 0 ? err : len; | 913 | return err < 0 ? err : len; |
910 | do_confirm: | 914 | do_confirm: |
911 | dst_confirm(dst); | 915 | dst_confirm(dst); |
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index 44e21a03cfc3..45f5ae51de65 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c | |||
@@ -108,7 +108,10 @@ bool ip6_frag_match(const struct inet_frag_queue *q, const void *a) | |||
108 | return fq->id == arg->id && | 108 | return fq->id == arg->id && |
109 | fq->user == arg->user && | 109 | fq->user == arg->user && |
110 | ipv6_addr_equal(&fq->saddr, arg->src) && | 110 | ipv6_addr_equal(&fq->saddr, arg->src) && |
111 | ipv6_addr_equal(&fq->daddr, arg->dst); | 111 | ipv6_addr_equal(&fq->daddr, arg->dst) && |
112 | (arg->iif == fq->iif || | ||
113 | !(ipv6_addr_type(arg->dst) & (IPV6_ADDR_MULTICAST | | ||
114 | IPV6_ADDR_LINKLOCAL))); | ||
112 | } | 115 | } |
113 | EXPORT_SYMBOL(ip6_frag_match); | 116 | EXPORT_SYMBOL(ip6_frag_match); |
114 | 117 | ||
@@ -180,7 +183,7 @@ static void ip6_frag_expire(unsigned long data) | |||
180 | 183 | ||
181 | static struct frag_queue * | 184 | static struct frag_queue * |
182 | fq_find(struct net *net, __be32 id, const struct in6_addr *src, | 185 | fq_find(struct net *net, __be32 id, const struct in6_addr *src, |
183 | const struct in6_addr *dst, u8 ecn) | 186 | const struct in6_addr *dst, int iif, u8 ecn) |
184 | { | 187 | { |
185 | struct inet_frag_queue *q; | 188 | struct inet_frag_queue *q; |
186 | struct ip6_create_arg arg; | 189 | struct ip6_create_arg arg; |
@@ -190,6 +193,7 @@ fq_find(struct net *net, __be32 id, const struct in6_addr *src, | |||
190 | arg.user = IP6_DEFRAG_LOCAL_DELIVER; | 193 | arg.user = IP6_DEFRAG_LOCAL_DELIVER; |
191 | arg.src = src; | 194 | arg.src = src; |
192 | arg.dst = dst; | 195 | arg.dst = dst; |
196 | arg.iif = iif; | ||
193 | arg.ecn = ecn; | 197 | arg.ecn = ecn; |
194 | 198 | ||
195 | hash = inet6_hash_frag(id, src, dst); | 199 | hash = inet6_hash_frag(id, src, dst); |
@@ -551,7 +555,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb) | |||
551 | } | 555 | } |
552 | 556 | ||
553 | fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr, | 557 | fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr, |
554 | ip6_frag_ecn(hdr)); | 558 | skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr)); |
555 | if (fq) { | 559 | if (fq) { |
556 | int ret; | 560 | int ret; |
557 | 561 | ||
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 6f01fe122abd..826e6aa44f8d 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -523,7 +523,7 @@ static void rt6_probe_deferred(struct work_struct *w) | |||
523 | container_of(w, struct __rt6_probe_work, work); | 523 | container_of(w, struct __rt6_probe_work, work); |
524 | 524 | ||
525 | addrconf_addr_solict_mult(&work->target, &mcaddr); | 525 | addrconf_addr_solict_mult(&work->target, &mcaddr); |
526 | ndisc_send_ns(work->dev, &work->target, &mcaddr, NULL, NULL); | 526 | ndisc_send_ns(work->dev, &work->target, &mcaddr, NULL); |
527 | dev_put(work->dev); | 527 | dev_put(work->dev); |
528 | kfree(work); | 528 | kfree(work); |
529 | } | 529 | } |
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c index bb8f2fa1c7fb..eaf7ac496d50 100644 --- a/net/ipv6/syncookies.c +++ b/net/ipv6/syncookies.c | |||
@@ -222,7 +222,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) | |||
222 | memset(&fl6, 0, sizeof(fl6)); | 222 | memset(&fl6, 0, sizeof(fl6)); |
223 | fl6.flowi6_proto = IPPROTO_TCP; | 223 | fl6.flowi6_proto = IPPROTO_TCP; |
224 | fl6.daddr = ireq->ir_v6_rmt_addr; | 224 | fl6.daddr = ireq->ir_v6_rmt_addr; |
225 | final_p = fl6_update_dst(&fl6, np->opt, &final); | 225 | final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt), &final); |
226 | fl6.saddr = ireq->ir_v6_loc_addr; | 226 | fl6.saddr = ireq->ir_v6_loc_addr; |
227 | fl6.flowi6_oif = sk->sk_bound_dev_if; | 227 | fl6.flowi6_oif = sk->sk_bound_dev_if; |
228 | fl6.flowi6_mark = ireq->ir_mark; | 228 | fl6.flowi6_mark = ireq->ir_mark; |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index c5429a636f1a..e7aab561b7b4 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -120,6 +120,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | |||
120 | struct ipv6_pinfo *np = inet6_sk(sk); | 120 | struct ipv6_pinfo *np = inet6_sk(sk); |
121 | struct tcp_sock *tp = tcp_sk(sk); | 121 | struct tcp_sock *tp = tcp_sk(sk); |
122 | struct in6_addr *saddr = NULL, *final_p, final; | 122 | struct in6_addr *saddr = NULL, *final_p, final; |
123 | struct ipv6_txoptions *opt; | ||
123 | struct flowi6 fl6; | 124 | struct flowi6 fl6; |
124 | struct dst_entry *dst; | 125 | struct dst_entry *dst; |
125 | int addr_type; | 126 | int addr_type; |
@@ -235,7 +236,8 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | |||
235 | fl6.fl6_dport = usin->sin6_port; | 236 | fl6.fl6_dport = usin->sin6_port; |
236 | fl6.fl6_sport = inet->inet_sport; | 237 | fl6.fl6_sport = inet->inet_sport; |
237 | 238 | ||
238 | final_p = fl6_update_dst(&fl6, np->opt, &final); | 239 | opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk)); |
240 | final_p = fl6_update_dst(&fl6, opt, &final); | ||
239 | 241 | ||
240 | security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); | 242 | security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); |
241 | 243 | ||
@@ -255,7 +257,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | |||
255 | inet->inet_rcv_saddr = LOOPBACK4_IPV6; | 257 | inet->inet_rcv_saddr = LOOPBACK4_IPV6; |
256 | 258 | ||
257 | sk->sk_gso_type = SKB_GSO_TCPV6; | 259 | sk->sk_gso_type = SKB_GSO_TCPV6; |
258 | __ip6_dst_store(sk, dst, NULL, NULL); | 260 | ip6_dst_store(sk, dst, NULL, NULL); |
259 | 261 | ||
260 | if (tcp_death_row.sysctl_tw_recycle && | 262 | if (tcp_death_row.sysctl_tw_recycle && |
261 | !tp->rx_opt.ts_recent_stamp && | 263 | !tp->rx_opt.ts_recent_stamp && |
@@ -263,9 +265,9 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, | |||
263 | tcp_fetch_timewait_stamp(sk, dst); | 265 | tcp_fetch_timewait_stamp(sk, dst); |
264 | 266 | ||
265 | icsk->icsk_ext_hdr_len = 0; | 267 | icsk->icsk_ext_hdr_len = 0; |
266 | if (np->opt) | 268 | if (opt) |
267 | icsk->icsk_ext_hdr_len = (np->opt->opt_flen + | 269 | icsk->icsk_ext_hdr_len = opt->opt_flen + |
268 | np->opt->opt_nflen); | 270 | opt->opt_nflen; |
269 | 271 | ||
270 | tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); | 272 | tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); |
271 | 273 | ||
@@ -461,7 +463,8 @@ static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst, | |||
461 | if (np->repflow && ireq->pktopts) | 463 | if (np->repflow && ireq->pktopts) |
462 | fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts)); | 464 | fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts)); |
463 | 465 | ||
464 | err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass); | 466 | err = ip6_xmit(sk, skb, fl6, rcu_dereference(np->opt), |
467 | np->tclass); | ||
465 | err = net_xmit_eval(err); | 468 | err = net_xmit_eval(err); |
466 | } | 469 | } |
467 | 470 | ||
@@ -972,6 +975,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff * | |||
972 | struct inet_request_sock *ireq; | 975 | struct inet_request_sock *ireq; |
973 | struct ipv6_pinfo *newnp; | 976 | struct ipv6_pinfo *newnp; |
974 | const struct ipv6_pinfo *np = inet6_sk(sk); | 977 | const struct ipv6_pinfo *np = inet6_sk(sk); |
978 | struct ipv6_txoptions *opt; | ||
975 | struct tcp6_sock *newtcp6sk; | 979 | struct tcp6_sock *newtcp6sk; |
976 | struct inet_sock *newinet; | 980 | struct inet_sock *newinet; |
977 | struct tcp_sock *newtp; | 981 | struct tcp_sock *newtp; |
@@ -1056,7 +1060,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff * | |||
1056 | */ | 1060 | */ |
1057 | 1061 | ||
1058 | newsk->sk_gso_type = SKB_GSO_TCPV6; | 1062 | newsk->sk_gso_type = SKB_GSO_TCPV6; |
1059 | __ip6_dst_store(newsk, dst, NULL, NULL); | 1063 | ip6_dst_store(newsk, dst, NULL, NULL); |
1060 | inet6_sk_rx_dst_set(newsk, skb); | 1064 | inet6_sk_rx_dst_set(newsk, skb); |
1061 | 1065 | ||
1062 | newtcp6sk = (struct tcp6_sock *)newsk; | 1066 | newtcp6sk = (struct tcp6_sock *)newsk; |
@@ -1098,13 +1102,15 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff * | |||
1098 | but we make one more one thing there: reattach optmem | 1102 | but we make one more one thing there: reattach optmem |
1099 | to newsk. | 1103 | to newsk. |
1100 | */ | 1104 | */ |
1101 | if (np->opt) | 1105 | opt = rcu_dereference(np->opt); |
1102 | newnp->opt = ipv6_dup_options(newsk, np->opt); | 1106 | if (opt) { |
1103 | 1107 | opt = ipv6_dup_options(newsk, opt); | |
1108 | RCU_INIT_POINTER(newnp->opt, opt); | ||
1109 | } | ||
1104 | inet_csk(newsk)->icsk_ext_hdr_len = 0; | 1110 | inet_csk(newsk)->icsk_ext_hdr_len = 0; |
1105 | if (newnp->opt) | 1111 | if (opt) |
1106 | inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen + | 1112 | inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen + |
1107 | newnp->opt->opt_flen); | 1113 | opt->opt_flen; |
1108 | 1114 | ||
1109 | tcp_ca_openreq_child(newsk, dst); | 1115 | tcp_ca_openreq_child(newsk, dst); |
1110 | 1116 | ||
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 01bcb49619ee..9da3287a3923 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -1110,6 +1110,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) | |||
1110 | DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name); | 1110 | DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name); |
1111 | struct in6_addr *daddr, *final_p, final; | 1111 | struct in6_addr *daddr, *final_p, final; |
1112 | struct ipv6_txoptions *opt = NULL; | 1112 | struct ipv6_txoptions *opt = NULL; |
1113 | struct ipv6_txoptions *opt_to_free = NULL; | ||
1113 | struct ip6_flowlabel *flowlabel = NULL; | 1114 | struct ip6_flowlabel *flowlabel = NULL; |
1114 | struct flowi6 fl6; | 1115 | struct flowi6 fl6; |
1115 | struct dst_entry *dst; | 1116 | struct dst_entry *dst; |
@@ -1263,8 +1264,10 @@ do_udp_sendmsg: | |||
1263 | opt = NULL; | 1264 | opt = NULL; |
1264 | connected = 0; | 1265 | connected = 0; |
1265 | } | 1266 | } |
1266 | if (!opt) | 1267 | if (!opt) { |
1267 | opt = np->opt; | 1268 | opt = txopt_get(np); |
1269 | opt_to_free = opt; | ||
1270 | } | ||
1268 | if (flowlabel) | 1271 | if (flowlabel) |
1269 | opt = fl6_merge_options(&opt_space, flowlabel, opt); | 1272 | opt = fl6_merge_options(&opt_space, flowlabel, opt); |
1270 | opt = ipv6_fixup_options(&opt_space, opt); | 1273 | opt = ipv6_fixup_options(&opt_space, opt); |
@@ -1373,6 +1376,7 @@ release_dst: | |||
1373 | out: | 1376 | out: |
1374 | dst_release(dst); | 1377 | dst_release(dst); |
1375 | fl6_sock_release(flowlabel); | 1378 | fl6_sock_release(flowlabel); |
1379 | txopt_put(opt_to_free); | ||
1376 | if (!err) | 1380 | if (!err) |
1377 | return len; | 1381 | return len; |
1378 | /* | 1382 | /* |
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index fcb2752419c6..435608c4306d 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c | |||
@@ -1483,7 +1483,7 @@ unsigned int iucv_sock_poll(struct file *file, struct socket *sock, | |||
1483 | if (sock_writeable(sk) && iucv_below_msglim(sk)) | 1483 | if (sock_writeable(sk) && iucv_below_msglim(sk)) |
1484 | mask |= POLLOUT | POLLWRNORM | POLLWRBAND; | 1484 | mask |= POLLOUT | POLLWRNORM | POLLWRBAND; |
1485 | else | 1485 | else |
1486 | set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); | 1486 | sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); |
1487 | 1487 | ||
1488 | return mask; | 1488 | return mask; |
1489 | } | 1489 | } |
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index aca38d8aed8e..a2c8747d2936 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c | |||
@@ -486,6 +486,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) | |||
486 | DECLARE_SOCKADDR(struct sockaddr_l2tpip6 *, lsa, msg->msg_name); | 486 | DECLARE_SOCKADDR(struct sockaddr_l2tpip6 *, lsa, msg->msg_name); |
487 | struct in6_addr *daddr, *final_p, final; | 487 | struct in6_addr *daddr, *final_p, final; |
488 | struct ipv6_pinfo *np = inet6_sk(sk); | 488 | struct ipv6_pinfo *np = inet6_sk(sk); |
489 | struct ipv6_txoptions *opt_to_free = NULL; | ||
489 | struct ipv6_txoptions *opt = NULL; | 490 | struct ipv6_txoptions *opt = NULL; |
490 | struct ip6_flowlabel *flowlabel = NULL; | 491 | struct ip6_flowlabel *flowlabel = NULL; |
491 | struct dst_entry *dst = NULL; | 492 | struct dst_entry *dst = NULL; |
@@ -575,8 +576,10 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) | |||
575 | opt = NULL; | 576 | opt = NULL; |
576 | } | 577 | } |
577 | 578 | ||
578 | if (opt == NULL) | 579 | if (!opt) { |
579 | opt = np->opt; | 580 | opt = txopt_get(np); |
581 | opt_to_free = opt; | ||
582 | } | ||
580 | if (flowlabel) | 583 | if (flowlabel) |
581 | opt = fl6_merge_options(&opt_space, flowlabel, opt); | 584 | opt = fl6_merge_options(&opt_space, flowlabel, opt); |
582 | opt = ipv6_fixup_options(&opt_space, opt); | 585 | opt = ipv6_fixup_options(&opt_space, opt); |
@@ -631,6 +634,7 @@ done: | |||
631 | dst_release(dst); | 634 | dst_release(dst); |
632 | out: | 635 | out: |
633 | fl6_sock_release(flowlabel); | 636 | fl6_sock_release(flowlabel); |
637 | txopt_put(opt_to_free); | ||
634 | 638 | ||
635 | return err < 0 ? err : len; | 639 | return err < 0 ? err : len; |
636 | 640 | ||
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c index a758eb84e8f0..ff757181b0a8 100644 --- a/net/mac80211/agg-tx.c +++ b/net/mac80211/agg-tx.c | |||
@@ -500,7 +500,7 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid) | |||
500 | /* send AddBA request */ | 500 | /* send AddBA request */ |
501 | ieee80211_send_addba_request(sdata, sta->sta.addr, tid, | 501 | ieee80211_send_addba_request(sdata, sta->sta.addr, tid, |
502 | tid_tx->dialog_token, start_seq_num, | 502 | tid_tx->dialog_token, start_seq_num, |
503 | local->hw.max_tx_aggregation_subframes, | 503 | IEEE80211_MAX_AMPDU_BUF, |
504 | tid_tx->timeout); | 504 | tid_tx->timeout); |
505 | } | 505 | } |
506 | 506 | ||
@@ -926,6 +926,7 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local, | |||
926 | amsdu = capab & IEEE80211_ADDBA_PARAM_AMSDU_MASK; | 926 | amsdu = capab & IEEE80211_ADDBA_PARAM_AMSDU_MASK; |
927 | tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2; | 927 | tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2; |
928 | buf_size = (capab & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6; | 928 | buf_size = (capab & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK) >> 6; |
929 | buf_size = min(buf_size, local->hw.max_tx_aggregation_subframes); | ||
929 | 930 | ||
930 | mutex_lock(&sta->ampdu_mlme.mtx); | 931 | mutex_lock(&sta->ampdu_mlme.mtx); |
931 | 932 | ||
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index c2bd1b6a6922..da471eef07bb 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c | |||
@@ -3454,8 +3454,12 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, | |||
3454 | goto out_unlock; | 3454 | goto out_unlock; |
3455 | } | 3455 | } |
3456 | } else { | 3456 | } else { |
3457 | /* for cookie below */ | 3457 | /* Assign a dummy non-zero cookie, it's not sent to |
3458 | ack_skb = skb; | 3458 | * userspace in this case but we rely on its value |
3459 | * internally in the need_offchan case to distinguish | ||
3460 | * mgmt-tx from remain-on-channel. | ||
3461 | */ | ||
3462 | *cookie = 0xffffffff; | ||
3459 | } | 3463 | } |
3460 | 3464 | ||
3461 | if (!need_offchan) { | 3465 | if (!need_offchan) { |
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index d0dc1bfaeec2..c9e325d2e120 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c | |||
@@ -76,7 +76,8 @@ bool __ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata) | |||
76 | void ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata, | 76 | void ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata, |
77 | bool update_bss) | 77 | bool update_bss) |
78 | { | 78 | { |
79 | if (__ieee80211_recalc_txpower(sdata) || update_bss) | 79 | if (__ieee80211_recalc_txpower(sdata) || |
80 | (update_bss && ieee80211_sdata_running(sdata))) | ||
80 | ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_TXPOWER); | 81 | ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_TXPOWER); |
81 | } | 82 | } |
82 | 83 | ||
@@ -1861,6 +1862,7 @@ void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata) | |||
1861 | unregister_netdevice(sdata->dev); | 1862 | unregister_netdevice(sdata->dev); |
1862 | } else { | 1863 | } else { |
1863 | cfg80211_unregister_wdev(&sdata->wdev); | 1864 | cfg80211_unregister_wdev(&sdata->wdev); |
1865 | ieee80211_teardown_sdata(sdata); | ||
1864 | kfree(sdata); | 1866 | kfree(sdata); |
1865 | } | 1867 | } |
1866 | } | 1868 | } |
@@ -1870,7 +1872,6 @@ void ieee80211_sdata_stop(struct ieee80211_sub_if_data *sdata) | |||
1870 | if (WARN_ON_ONCE(!test_bit(SDATA_STATE_RUNNING, &sdata->state))) | 1872 | if (WARN_ON_ONCE(!test_bit(SDATA_STATE_RUNNING, &sdata->state))) |
1871 | return; | 1873 | return; |
1872 | ieee80211_do_stop(sdata, true); | 1874 | ieee80211_do_stop(sdata, true); |
1873 | ieee80211_teardown_sdata(sdata); | ||
1874 | } | 1875 | } |
1875 | 1876 | ||
1876 | void ieee80211_remove_interfaces(struct ieee80211_local *local) | 1877 | void ieee80211_remove_interfaces(struct ieee80211_local *local) |
diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 858f6b1cb149..175ffcf7fb06 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c | |||
@@ -541,8 +541,7 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len, | |||
541 | NL80211_FEATURE_HT_IBSS | | 541 | NL80211_FEATURE_HT_IBSS | |
542 | NL80211_FEATURE_VIF_TXPOWER | | 542 | NL80211_FEATURE_VIF_TXPOWER | |
543 | NL80211_FEATURE_MAC_ON_CREATE | | 543 | NL80211_FEATURE_MAC_ON_CREATE | |
544 | NL80211_FEATURE_USERSPACE_MPM | | 544 | NL80211_FEATURE_USERSPACE_MPM; |
545 | NL80211_FEATURE_FULL_AP_CLIENT_STATE; | ||
546 | 545 | ||
547 | if (!ops->hw_scan) | 546 | if (!ops->hw_scan) |
548 | wiphy->features |= NL80211_FEATURE_LOW_PRIORITY_SCAN | | 547 | wiphy->features |= NL80211_FEATURE_LOW_PRIORITY_SCAN | |
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index b890e225a8f1..b3b44a5dd375 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c | |||
@@ -779,10 +779,8 @@ void mesh_plink_broken(struct sta_info *sta) | |||
779 | static void mesh_path_node_reclaim(struct rcu_head *rp) | 779 | static void mesh_path_node_reclaim(struct rcu_head *rp) |
780 | { | 780 | { |
781 | struct mpath_node *node = container_of(rp, struct mpath_node, rcu); | 781 | struct mpath_node *node = container_of(rp, struct mpath_node, rcu); |
782 | struct ieee80211_sub_if_data *sdata = node->mpath->sdata; | ||
783 | 782 | ||
784 | del_timer_sync(&node->mpath->timer); | 783 | del_timer_sync(&node->mpath->timer); |
785 | atomic_dec(&sdata->u.mesh.mpaths); | ||
786 | kfree(node->mpath); | 784 | kfree(node->mpath); |
787 | kfree(node); | 785 | kfree(node); |
788 | } | 786 | } |
@@ -790,8 +788,9 @@ static void mesh_path_node_reclaim(struct rcu_head *rp) | |||
790 | /* needs to be called with the corresponding hashwlock taken */ | 788 | /* needs to be called with the corresponding hashwlock taken */ |
791 | static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node) | 789 | static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node) |
792 | { | 790 | { |
793 | struct mesh_path *mpath; | 791 | struct mesh_path *mpath = node->mpath; |
794 | mpath = node->mpath; | 792 | struct ieee80211_sub_if_data *sdata = node->mpath->sdata; |
793 | |||
795 | spin_lock(&mpath->state_lock); | 794 | spin_lock(&mpath->state_lock); |
796 | mpath->flags |= MESH_PATH_RESOLVING; | 795 | mpath->flags |= MESH_PATH_RESOLVING; |
797 | if (mpath->is_gate) | 796 | if (mpath->is_gate) |
@@ -799,6 +798,7 @@ static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node) | |||
799 | hlist_del_rcu(&node->list); | 798 | hlist_del_rcu(&node->list); |
800 | call_rcu(&node->rcu, mesh_path_node_reclaim); | 799 | call_rcu(&node->rcu, mesh_path_node_reclaim); |
801 | spin_unlock(&mpath->state_lock); | 800 | spin_unlock(&mpath->state_lock); |
801 | atomic_dec(&sdata->u.mesh.mpaths); | ||
802 | atomic_dec(&tbl->entries); | 802 | atomic_dec(&tbl->entries); |
803 | } | 803 | } |
804 | 804 | ||
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index 4aeca4b0c3cb..a413e52f7691 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c | |||
@@ -597,8 +597,8 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata, | |||
597 | /* We need to ensure power level is at max for scanning. */ | 597 | /* We need to ensure power level is at max for scanning. */ |
598 | ieee80211_hw_config(local, 0); | 598 | ieee80211_hw_config(local, 0); |
599 | 599 | ||
600 | if ((req->channels[0]->flags & | 600 | if ((req->channels[0]->flags & (IEEE80211_CHAN_NO_IR | |
601 | IEEE80211_CHAN_NO_IR) || | 601 | IEEE80211_CHAN_RADAR)) || |
602 | !req->n_ssids) { | 602 | !req->n_ssids) { |
603 | next_delay = IEEE80211_PASSIVE_CHANNEL_TIME; | 603 | next_delay = IEEE80211_PASSIVE_CHANNEL_TIME; |
604 | } else { | 604 | } else { |
@@ -645,7 +645,7 @@ ieee80211_scan_get_channel_time(struct ieee80211_channel *chan) | |||
645 | * TODO: channel switching also consumes quite some time, | 645 | * TODO: channel switching also consumes quite some time, |
646 | * add that delay as well to get a better estimation | 646 | * add that delay as well to get a better estimation |
647 | */ | 647 | */ |
648 | if (chan->flags & IEEE80211_CHAN_NO_IR) | 648 | if (chan->flags & (IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_RADAR)) |
649 | return IEEE80211_PASSIVE_CHANNEL_TIME; | 649 | return IEEE80211_PASSIVE_CHANNEL_TIME; |
650 | return IEEE80211_PROBE_DELAY + IEEE80211_CHANNEL_TIME; | 650 | return IEEE80211_PROBE_DELAY + IEEE80211_CHANNEL_TIME; |
651 | } | 651 | } |
@@ -777,7 +777,8 @@ static void ieee80211_scan_state_set_channel(struct ieee80211_local *local, | |||
777 | * | 777 | * |
778 | * In any case, it is not necessary for a passive scan. | 778 | * In any case, it is not necessary for a passive scan. |
779 | */ | 779 | */ |
780 | if (chan->flags & IEEE80211_CHAN_NO_IR || !scan_req->n_ssids) { | 780 | if ((chan->flags & (IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_RADAR)) || |
781 | !scan_req->n_ssids) { | ||
781 | *next_delay = IEEE80211_PASSIVE_CHANNEL_TIME; | 782 | *next_delay = IEEE80211_PASSIVE_CHANNEL_TIME; |
782 | local->next_scan_state = SCAN_DECISION; | 783 | local->next_scan_state = SCAN_DECISION; |
783 | return; | 784 | return; |
diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c index b7de0da46acd..ecf0a0196f18 100644 --- a/net/nfc/llcp_sock.c +++ b/net/nfc/llcp_sock.c | |||
@@ -572,7 +572,7 @@ static unsigned int llcp_sock_poll(struct file *file, struct socket *sock, | |||
572 | if (sock_writeable(sk) && sk->sk_state == LLCP_CONNECTED) | 572 | if (sock_writeable(sk) && sk->sk_state == LLCP_CONNECTED) |
573 | mask |= POLLOUT | POLLWRNORM | POLLWRBAND; | 573 | mask |= POLLOUT | POLLWRNORM | POLLWRBAND; |
574 | else | 574 | else |
575 | set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); | 575 | sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); |
576 | 576 | ||
577 | pr_debug("mask 0x%x\n", mask); | 577 | pr_debug("mask 0x%x\n", mask); |
578 | 578 | ||
diff --git a/net/openvswitch/dp_notify.c b/net/openvswitch/dp_notify.c index a7a80a6b77b0..653d073bae45 100644 --- a/net/openvswitch/dp_notify.c +++ b/net/openvswitch/dp_notify.c | |||
@@ -58,7 +58,7 @@ void ovs_dp_notify_wq(struct work_struct *work) | |||
58 | struct hlist_node *n; | 58 | struct hlist_node *n; |
59 | 59 | ||
60 | hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node) { | 60 | hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node) { |
61 | if (vport->ops->type != OVS_VPORT_TYPE_NETDEV) | 61 | if (vport->ops->type == OVS_VPORT_TYPE_INTERNAL) |
62 | continue; | 62 | continue; |
63 | 63 | ||
64 | if (!(vport->dev->priv_flags & IFF_OVS_DATAPATH)) | 64 | if (!(vport->dev->priv_flags & IFF_OVS_DATAPATH)) |
diff --git a/net/openvswitch/vport-geneve.c b/net/openvswitch/vport-geneve.c index efb736bb6855..e41cd12d9b2d 100644 --- a/net/openvswitch/vport-geneve.c +++ b/net/openvswitch/vport-geneve.c | |||
@@ -117,7 +117,6 @@ static struct vport_ops ovs_geneve_vport_ops = { | |||
117 | .destroy = ovs_netdev_tunnel_destroy, | 117 | .destroy = ovs_netdev_tunnel_destroy, |
118 | .get_options = geneve_get_options, | 118 | .get_options = geneve_get_options, |
119 | .send = dev_queue_xmit, | 119 | .send = dev_queue_xmit, |
120 | .owner = THIS_MODULE, | ||
121 | }; | 120 | }; |
122 | 121 | ||
123 | static int __init ovs_geneve_tnl_init(void) | 122 | static int __init ovs_geneve_tnl_init(void) |
diff --git a/net/openvswitch/vport-gre.c b/net/openvswitch/vport-gre.c index c3257d78d3d2..7f8897f33a67 100644 --- a/net/openvswitch/vport-gre.c +++ b/net/openvswitch/vport-gre.c | |||
@@ -89,7 +89,6 @@ static struct vport_ops ovs_gre_vport_ops = { | |||
89 | .create = gre_create, | 89 | .create = gre_create, |
90 | .send = dev_queue_xmit, | 90 | .send = dev_queue_xmit, |
91 | .destroy = ovs_netdev_tunnel_destroy, | 91 | .destroy = ovs_netdev_tunnel_destroy, |
92 | .owner = THIS_MODULE, | ||
93 | }; | 92 | }; |
94 | 93 | ||
95 | static int __init ovs_gre_tnl_init(void) | 94 | static int __init ovs_gre_tnl_init(void) |
diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c index b327368a3848..6b0190b987ec 100644 --- a/net/openvswitch/vport-netdev.c +++ b/net/openvswitch/vport-netdev.c | |||
@@ -180,9 +180,13 @@ void ovs_netdev_tunnel_destroy(struct vport *vport) | |||
180 | if (vport->dev->priv_flags & IFF_OVS_DATAPATH) | 180 | if (vport->dev->priv_flags & IFF_OVS_DATAPATH) |
181 | ovs_netdev_detach_dev(vport); | 181 | ovs_netdev_detach_dev(vport); |
182 | 182 | ||
183 | /* Early release so we can unregister the device */ | 183 | /* We can be invoked by both explicit vport deletion and |
184 | * underlying netdev deregistration; delete the link only | ||
185 | * if it's not already shutting down. | ||
186 | */ | ||
187 | if (vport->dev->reg_state == NETREG_REGISTERED) | ||
188 | rtnl_delete_link(vport->dev); | ||
184 | dev_put(vport->dev); | 189 | dev_put(vport->dev); |
185 | rtnl_delete_link(vport->dev); | ||
186 | vport->dev = NULL; | 190 | vport->dev = NULL; |
187 | rtnl_unlock(); | 191 | rtnl_unlock(); |
188 | 192 | ||
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c index 0ac0fd004d7e..31cbc8c5c7db 100644 --- a/net/openvswitch/vport.c +++ b/net/openvswitch/vport.c | |||
@@ -71,7 +71,7 @@ static struct hlist_head *hash_bucket(const struct net *net, const char *name) | |||
71 | return &dev_table[hash & (VPORT_HASH_BUCKETS - 1)]; | 71 | return &dev_table[hash & (VPORT_HASH_BUCKETS - 1)]; |
72 | } | 72 | } |
73 | 73 | ||
74 | int ovs_vport_ops_register(struct vport_ops *ops) | 74 | int __ovs_vport_ops_register(struct vport_ops *ops) |
75 | { | 75 | { |
76 | int err = -EEXIST; | 76 | int err = -EEXIST; |
77 | struct vport_ops *o; | 77 | struct vport_ops *o; |
@@ -87,7 +87,7 @@ errout: | |||
87 | ovs_unlock(); | 87 | ovs_unlock(); |
88 | return err; | 88 | return err; |
89 | } | 89 | } |
90 | EXPORT_SYMBOL_GPL(ovs_vport_ops_register); | 90 | EXPORT_SYMBOL_GPL(__ovs_vport_ops_register); |
91 | 91 | ||
92 | void ovs_vport_ops_unregister(struct vport_ops *ops) | 92 | void ovs_vport_ops_unregister(struct vport_ops *ops) |
93 | { | 93 | { |
@@ -256,8 +256,8 @@ int ovs_vport_set_options(struct vport *vport, struct nlattr *options) | |||
256 | * | 256 | * |
257 | * @vport: vport to delete. | 257 | * @vport: vport to delete. |
258 | * | 258 | * |
259 | * Detaches @vport from its datapath and destroys it. It is possible to fail | 259 | * Detaches @vport from its datapath and destroys it. ovs_mutex must |
260 | * for reasons such as lack of memory. ovs_mutex must be held. | 260 | * be held. |
261 | */ | 261 | */ |
262 | void ovs_vport_del(struct vport *vport) | 262 | void ovs_vport_del(struct vport *vport) |
263 | { | 263 | { |
diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h index bdfd82a7c064..8ea3a96980ac 100644 --- a/net/openvswitch/vport.h +++ b/net/openvswitch/vport.h | |||
@@ -196,7 +196,13 @@ static inline const char *ovs_vport_name(struct vport *vport) | |||
196 | return vport->dev->name; | 196 | return vport->dev->name; |
197 | } | 197 | } |
198 | 198 | ||
199 | int ovs_vport_ops_register(struct vport_ops *ops); | 199 | int __ovs_vport_ops_register(struct vport_ops *ops); |
200 | #define ovs_vport_ops_register(ops) \ | ||
201 | ({ \ | ||
202 | (ops)->owner = THIS_MODULE; \ | ||
203 | __ovs_vport_ops_register(ops); \ | ||
204 | }) | ||
205 | |||
200 | void ovs_vport_ops_unregister(struct vport_ops *ops); | 206 | void ovs_vport_ops_unregister(struct vport_ops *ops); |
201 | 207 | ||
202 | static inline struct rtable *ovs_tunnel_route_lookup(struct net *net, | 208 | static inline struct rtable *ovs_tunnel_route_lookup(struct net *net, |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 1cf928fb573e..992396aa635c 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -2329,8 +2329,8 @@ static void tpacket_destruct_skb(struct sk_buff *skb) | |||
2329 | static bool ll_header_truncated(const struct net_device *dev, int len) | 2329 | static bool ll_header_truncated(const struct net_device *dev, int len) |
2330 | { | 2330 | { |
2331 | /* net device doesn't like empty head */ | 2331 | /* net device doesn't like empty head */ |
2332 | if (unlikely(len <= dev->hard_header_len)) { | 2332 | if (unlikely(len < dev->hard_header_len)) { |
2333 | net_warn_ratelimited("%s: packet size is too short (%d <= %d)\n", | 2333 | net_warn_ratelimited("%s: packet size is too short (%d < %d)\n", |
2334 | current->comm, len, dev->hard_header_len); | 2334 | current->comm, len, dev->hard_header_len); |
2335 | return true; | 2335 | return true; |
2336 | } | 2336 | } |
diff --git a/net/rds/connection.c b/net/rds/connection.c index d4564036a339..e3b118cae81d 100644 --- a/net/rds/connection.c +++ b/net/rds/connection.c | |||
@@ -186,12 +186,6 @@ static struct rds_connection *__rds_conn_create(struct net *net, | |||
186 | } | 186 | } |
187 | } | 187 | } |
188 | 188 | ||
189 | if (trans == NULL) { | ||
190 | kmem_cache_free(rds_conn_slab, conn); | ||
191 | conn = ERR_PTR(-ENODEV); | ||
192 | goto out; | ||
193 | } | ||
194 | |||
195 | conn->c_trans = trans; | 189 | conn->c_trans = trans; |
196 | 190 | ||
197 | ret = trans->conn_alloc(conn, gfp); | 191 | ret = trans->conn_alloc(conn, gfp); |
diff --git a/net/rds/send.c b/net/rds/send.c index 827155c2ead1..c9cdb358ea88 100644 --- a/net/rds/send.c +++ b/net/rds/send.c | |||
@@ -1013,11 +1013,13 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len) | |||
1013 | release_sock(sk); | 1013 | release_sock(sk); |
1014 | } | 1014 | } |
1015 | 1015 | ||
1016 | /* racing with another thread binding seems ok here */ | 1016 | lock_sock(sk); |
1017 | if (daddr == 0 || rs->rs_bound_addr == 0) { | 1017 | if (daddr == 0 || rs->rs_bound_addr == 0) { |
1018 | release_sock(sk); | ||
1018 | ret = -ENOTCONN; /* XXX not a great errno */ | 1019 | ret = -ENOTCONN; /* XXX not a great errno */ |
1019 | goto out; | 1020 | goto out; |
1020 | } | 1021 | } |
1022 | release_sock(sk); | ||
1021 | 1023 | ||
1022 | if (payload_len > rds_sk_sndbuf(rs)) { | 1024 | if (payload_len > rds_sk_sndbuf(rs)) { |
1023 | ret = -EMSGSIZE; | 1025 | ret = -EMSGSIZE; |
diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c index e0547f521f20..adc555e0323d 100644 --- a/net/rxrpc/ar-ack.c +++ b/net/rxrpc/ar-ack.c | |||
@@ -723,8 +723,10 @@ process_further: | |||
723 | 723 | ||
724 | if ((call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY || | 724 | if ((call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY || |
725 | call->state == RXRPC_CALL_SERVER_AWAIT_ACK) && | 725 | call->state == RXRPC_CALL_SERVER_AWAIT_ACK) && |
726 | hard > tx) | 726 | hard > tx) { |
727 | call->acks_hard = tx; | ||
727 | goto all_acked; | 728 | goto all_acked; |
729 | } | ||
728 | 730 | ||
729 | smp_rmb(); | 731 | smp_rmb(); |
730 | rxrpc_rotate_tx_window(call, hard - 1); | 732 | rxrpc_rotate_tx_window(call, hard - 1); |
diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c index a40d3afe93b7..14c4e12c47b0 100644 --- a/net/rxrpc/ar-output.c +++ b/net/rxrpc/ar-output.c | |||
@@ -531,7 +531,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx, | |||
531 | timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); | 531 | timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); |
532 | 532 | ||
533 | /* this should be in poll */ | 533 | /* this should be in poll */ |
534 | clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); | 534 | sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); |
535 | 535 | ||
536 | if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) | 536 | if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) |
537 | return -EPIPE; | 537 | return -EPIPE; |
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index f43c8f33f09e..7ec667dd4ce1 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c | |||
@@ -253,7 +253,8 @@ int qdisc_set_default(const char *name) | |||
253 | } | 253 | } |
254 | 254 | ||
255 | /* We know handle. Find qdisc among all qdisc's attached to device | 255 | /* We know handle. Find qdisc among all qdisc's attached to device |
256 | (root qdisc, all its children, children of children etc.) | 256 | * (root qdisc, all its children, children of children etc.) |
257 | * Note: caller either uses rtnl or rcu_read_lock() | ||
257 | */ | 258 | */ |
258 | 259 | ||
259 | static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle) | 260 | static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle) |
@@ -264,7 +265,7 @@ static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle) | |||
264 | root->handle == handle) | 265 | root->handle == handle) |
265 | return root; | 266 | return root; |
266 | 267 | ||
267 | list_for_each_entry(q, &root->list, list) { | 268 | list_for_each_entry_rcu(q, &root->list, list) { |
268 | if (q->handle == handle) | 269 | if (q->handle == handle) |
269 | return q; | 270 | return q; |
270 | } | 271 | } |
@@ -277,15 +278,18 @@ void qdisc_list_add(struct Qdisc *q) | |||
277 | struct Qdisc *root = qdisc_dev(q)->qdisc; | 278 | struct Qdisc *root = qdisc_dev(q)->qdisc; |
278 | 279 | ||
279 | WARN_ON_ONCE(root == &noop_qdisc); | 280 | WARN_ON_ONCE(root == &noop_qdisc); |
280 | list_add_tail(&q->list, &root->list); | 281 | ASSERT_RTNL(); |
282 | list_add_tail_rcu(&q->list, &root->list); | ||
281 | } | 283 | } |
282 | } | 284 | } |
283 | EXPORT_SYMBOL(qdisc_list_add); | 285 | EXPORT_SYMBOL(qdisc_list_add); |
284 | 286 | ||
285 | void qdisc_list_del(struct Qdisc *q) | 287 | void qdisc_list_del(struct Qdisc *q) |
286 | { | 288 | { |
287 | if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) | 289 | if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) { |
288 | list_del(&q->list); | 290 | ASSERT_RTNL(); |
291 | list_del_rcu(&q->list); | ||
292 | } | ||
289 | } | 293 | } |
290 | EXPORT_SYMBOL(qdisc_list_del); | 294 | EXPORT_SYMBOL(qdisc_list_del); |
291 | 295 | ||
@@ -750,14 +754,18 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n) | |||
750 | if (n == 0) | 754 | if (n == 0) |
751 | return; | 755 | return; |
752 | drops = max_t(int, n, 0); | 756 | drops = max_t(int, n, 0); |
757 | rcu_read_lock(); | ||
753 | while ((parentid = sch->parent)) { | 758 | while ((parentid = sch->parent)) { |
754 | if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS)) | 759 | if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS)) |
755 | return; | 760 | break; |
756 | 761 | ||
762 | if (sch->flags & TCQ_F_NOPARENT) | ||
763 | break; | ||
764 | /* TODO: perform the search on a per txq basis */ | ||
757 | sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid)); | 765 | sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid)); |
758 | if (sch == NULL) { | 766 | if (sch == NULL) { |
759 | WARN_ON(parentid != TC_H_ROOT); | 767 | WARN_ON_ONCE(parentid != TC_H_ROOT); |
760 | return; | 768 | break; |
761 | } | 769 | } |
762 | cops = sch->ops->cl_ops; | 770 | cops = sch->ops->cl_ops; |
763 | if (cops->qlen_notify) { | 771 | if (cops->qlen_notify) { |
@@ -768,6 +776,7 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n) | |||
768 | sch->q.qlen -= n; | 776 | sch->q.qlen -= n; |
769 | __qdisc_qstats_drop(sch, drops); | 777 | __qdisc_qstats_drop(sch, drops); |
770 | } | 778 | } |
779 | rcu_read_unlock(); | ||
771 | } | 780 | } |
772 | EXPORT_SYMBOL(qdisc_tree_decrease_qlen); | 781 | EXPORT_SYMBOL(qdisc_tree_decrease_qlen); |
773 | 782 | ||
@@ -941,7 +950,7 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue, | |||
941 | } | 950 | } |
942 | lockdep_set_class(qdisc_lock(sch), &qdisc_tx_lock); | 951 | lockdep_set_class(qdisc_lock(sch), &qdisc_tx_lock); |
943 | if (!netif_is_multiqueue(dev)) | 952 | if (!netif_is_multiqueue(dev)) |
944 | sch->flags |= TCQ_F_ONETXQUEUE; | 953 | sch->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; |
945 | } | 954 | } |
946 | 955 | ||
947 | sch->handle = handle; | 956 | sch->handle = handle; |
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index cb5d4ad32946..e82a1ad80aa5 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -737,7 +737,7 @@ static void attach_one_default_qdisc(struct net_device *dev, | |||
737 | return; | 737 | return; |
738 | } | 738 | } |
739 | if (!netif_is_multiqueue(dev)) | 739 | if (!netif_is_multiqueue(dev)) |
740 | qdisc->flags |= TCQ_F_ONETXQUEUE; | 740 | qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; |
741 | dev_queue->qdisc_sleeping = qdisc; | 741 | dev_queue->qdisc_sleeping = qdisc; |
742 | } | 742 | } |
743 | 743 | ||
diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c index f3cbaecd283a..3e82f047caaf 100644 --- a/net/sched/sch_mq.c +++ b/net/sched/sch_mq.c | |||
@@ -63,7 +63,7 @@ static int mq_init(struct Qdisc *sch, struct nlattr *opt) | |||
63 | if (qdisc == NULL) | 63 | if (qdisc == NULL) |
64 | goto err; | 64 | goto err; |
65 | priv->qdiscs[ntx] = qdisc; | 65 | priv->qdiscs[ntx] = qdisc; |
66 | qdisc->flags |= TCQ_F_ONETXQUEUE; | 66 | qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; |
67 | } | 67 | } |
68 | 68 | ||
69 | sch->flags |= TCQ_F_MQROOT; | 69 | sch->flags |= TCQ_F_MQROOT; |
@@ -156,7 +156,7 @@ static int mq_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new, | |||
156 | 156 | ||
157 | *old = dev_graft_qdisc(dev_queue, new); | 157 | *old = dev_graft_qdisc(dev_queue, new); |
158 | if (new) | 158 | if (new) |
159 | new->flags |= TCQ_F_ONETXQUEUE; | 159 | new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; |
160 | if (dev->flags & IFF_UP) | 160 | if (dev->flags & IFF_UP) |
161 | dev_activate(dev); | 161 | dev_activate(dev); |
162 | return 0; | 162 | return 0; |
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c index 3811a745452c..ad70ecf57ce7 100644 --- a/net/sched/sch_mqprio.c +++ b/net/sched/sch_mqprio.c | |||
@@ -132,7 +132,7 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt) | |||
132 | goto err; | 132 | goto err; |
133 | } | 133 | } |
134 | priv->qdiscs[i] = qdisc; | 134 | priv->qdiscs[i] = qdisc; |
135 | qdisc->flags |= TCQ_F_ONETXQUEUE; | 135 | qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; |
136 | } | 136 | } |
137 | 137 | ||
138 | /* If the mqprio options indicate that hardware should own | 138 | /* If the mqprio options indicate that hardware should own |
@@ -209,7 +209,7 @@ static int mqprio_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new, | |||
209 | *old = dev_graft_qdisc(dev_queue, new); | 209 | *old = dev_graft_qdisc(dev_queue, new); |
210 | 210 | ||
211 | if (new) | 211 | if (new) |
212 | new->flags |= TCQ_F_ONETXQUEUE; | 212 | new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; |
213 | 213 | ||
214 | if (dev->flags & IFF_UP) | 214 | if (dev->flags & IFF_UP) |
215 | dev_activate(dev); | 215 | dev_activate(dev); |
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index e917d27328ea..acb45b8c2a9d 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c | |||
@@ -209,6 +209,7 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport) | |||
209 | struct sock *sk = skb->sk; | 209 | struct sock *sk = skb->sk; |
210 | struct ipv6_pinfo *np = inet6_sk(sk); | 210 | struct ipv6_pinfo *np = inet6_sk(sk); |
211 | struct flowi6 *fl6 = &transport->fl.u.ip6; | 211 | struct flowi6 *fl6 = &transport->fl.u.ip6; |
212 | int res; | ||
212 | 213 | ||
213 | pr_debug("%s: skb:%p, len:%d, src:%pI6 dst:%pI6\n", __func__, skb, | 214 | pr_debug("%s: skb:%p, len:%d, src:%pI6 dst:%pI6\n", __func__, skb, |
214 | skb->len, &fl6->saddr, &fl6->daddr); | 215 | skb->len, &fl6->saddr, &fl6->daddr); |
@@ -220,7 +221,10 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport) | |||
220 | 221 | ||
221 | SCTP_INC_STATS(sock_net(sk), SCTP_MIB_OUTSCTPPACKS); | 222 | SCTP_INC_STATS(sock_net(sk), SCTP_MIB_OUTSCTPPACKS); |
222 | 223 | ||
223 | return ip6_xmit(sk, skb, fl6, np->opt, np->tclass); | 224 | rcu_read_lock(); |
225 | res = ip6_xmit(sk, skb, fl6, rcu_dereference(np->opt), np->tclass); | ||
226 | rcu_read_unlock(); | ||
227 | return res; | ||
224 | } | 228 | } |
225 | 229 | ||
226 | /* Returns the dst cache entry for the given source and destination ip | 230 | /* Returns the dst cache entry for the given source and destination ip |
@@ -262,7 +266,10 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, | |||
262 | pr_debug("src=%pI6 - ", &fl6->saddr); | 266 | pr_debug("src=%pI6 - ", &fl6->saddr); |
263 | } | 267 | } |
264 | 268 | ||
265 | final_p = fl6_update_dst(fl6, np->opt, &final); | 269 | rcu_read_lock(); |
270 | final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final); | ||
271 | rcu_read_unlock(); | ||
272 | |||
266 | dst = ip6_dst_lookup_flow(sk, fl6, final_p); | 273 | dst = ip6_dst_lookup_flow(sk, fl6, final_p); |
267 | if (!asoc || saddr) | 274 | if (!asoc || saddr) |
268 | goto out; | 275 | goto out; |
@@ -321,7 +328,7 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, | |||
321 | if (baddr) { | 328 | if (baddr) { |
322 | fl6->saddr = baddr->v6.sin6_addr; | 329 | fl6->saddr = baddr->v6.sin6_addr; |
323 | fl6->fl6_sport = baddr->v6.sin6_port; | 330 | fl6->fl6_sport = baddr->v6.sin6_port; |
324 | final_p = fl6_update_dst(fl6, np->opt, &final); | 331 | final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final); |
325 | dst = ip6_dst_lookup_flow(sk, fl6, final_p); | 332 | dst = ip6_dst_lookup_flow(sk, fl6, final_p); |
326 | } | 333 | } |
327 | 334 | ||
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 897c01c029ca..03c8256063ec 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -972,7 +972,7 @@ static int sctp_setsockopt_bindx(struct sock *sk, | |||
972 | return -EFAULT; | 972 | return -EFAULT; |
973 | 973 | ||
974 | /* Alloc space for the address array in kernel memory. */ | 974 | /* Alloc space for the address array in kernel memory. */ |
975 | kaddrs = kmalloc(addrs_size, GFP_KERNEL); | 975 | kaddrs = kmalloc(addrs_size, GFP_USER | __GFP_NOWARN); |
976 | if (unlikely(!kaddrs)) | 976 | if (unlikely(!kaddrs)) |
977 | return -ENOMEM; | 977 | return -ENOMEM; |
978 | 978 | ||
@@ -4928,7 +4928,7 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len, | |||
4928 | to = optval + offsetof(struct sctp_getaddrs, addrs); | 4928 | to = optval + offsetof(struct sctp_getaddrs, addrs); |
4929 | space_left = len - offsetof(struct sctp_getaddrs, addrs); | 4929 | space_left = len - offsetof(struct sctp_getaddrs, addrs); |
4930 | 4930 | ||
4931 | addrs = kmalloc(space_left, GFP_KERNEL); | 4931 | addrs = kmalloc(space_left, GFP_USER | __GFP_NOWARN); |
4932 | if (!addrs) | 4932 | if (!addrs) |
4933 | return -ENOMEM; | 4933 | return -ENOMEM; |
4934 | 4934 | ||
@@ -6458,7 +6458,7 @@ unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait) | |||
6458 | if (sctp_writeable(sk)) { | 6458 | if (sctp_writeable(sk)) { |
6459 | mask |= POLLOUT | POLLWRNORM; | 6459 | mask |= POLLOUT | POLLWRNORM; |
6460 | } else { | 6460 | } else { |
6461 | set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); | 6461 | sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); |
6462 | /* | 6462 | /* |
6463 | * Since the socket is not locked, the buffer | 6463 | * Since the socket is not locked, the buffer |
6464 | * might be made available after the writeable check and | 6464 | * might be made available after the writeable check and |
@@ -6801,26 +6801,30 @@ no_packet: | |||
6801 | static void __sctp_write_space(struct sctp_association *asoc) | 6801 | static void __sctp_write_space(struct sctp_association *asoc) |
6802 | { | 6802 | { |
6803 | struct sock *sk = asoc->base.sk; | 6803 | struct sock *sk = asoc->base.sk; |
6804 | struct socket *sock = sk->sk_socket; | ||
6805 | 6804 | ||
6806 | if ((sctp_wspace(asoc) > 0) && sock) { | 6805 | if (sctp_wspace(asoc) <= 0) |
6807 | if (waitqueue_active(&asoc->wait)) | 6806 | return; |
6808 | wake_up_interruptible(&asoc->wait); | 6807 | |
6808 | if (waitqueue_active(&asoc->wait)) | ||
6809 | wake_up_interruptible(&asoc->wait); | ||
6809 | 6810 | ||
6810 | if (sctp_writeable(sk)) { | 6811 | if (sctp_writeable(sk)) { |
6811 | wait_queue_head_t *wq = sk_sleep(sk); | 6812 | struct socket_wq *wq; |
6812 | 6813 | ||
6813 | if (wq && waitqueue_active(wq)) | 6814 | rcu_read_lock(); |
6814 | wake_up_interruptible(wq); | 6815 | wq = rcu_dereference(sk->sk_wq); |
6816 | if (wq) { | ||
6817 | if (waitqueue_active(&wq->wait)) | ||
6818 | wake_up_interruptible(&wq->wait); | ||
6815 | 6819 | ||
6816 | /* Note that we try to include the Async I/O support | 6820 | /* Note that we try to include the Async I/O support |
6817 | * here by modeling from the current TCP/UDP code. | 6821 | * here by modeling from the current TCP/UDP code. |
6818 | * We have not tested with it yet. | 6822 | * We have not tested with it yet. |
6819 | */ | 6823 | */ |
6820 | if (!(sk->sk_shutdown & SEND_SHUTDOWN)) | 6824 | if (!(sk->sk_shutdown & SEND_SHUTDOWN)) |
6821 | sock_wake_async(sock, | 6825 | sock_wake_async(wq, SOCK_WAKE_SPACE, POLL_OUT); |
6822 | SOCK_WAKE_SPACE, POLL_OUT); | ||
6823 | } | 6826 | } |
6827 | rcu_read_unlock(); | ||
6824 | } | 6828 | } |
6825 | } | 6829 | } |
6826 | 6830 | ||
@@ -7375,6 +7379,13 @@ struct proto sctp_prot = { | |||
7375 | 7379 | ||
7376 | #if IS_ENABLED(CONFIG_IPV6) | 7380 | #if IS_ENABLED(CONFIG_IPV6) |
7377 | 7381 | ||
7382 | #include <net/transp_v6.h> | ||
7383 | static void sctp_v6_destroy_sock(struct sock *sk) | ||
7384 | { | ||
7385 | sctp_destroy_sock(sk); | ||
7386 | inet6_destroy_sock(sk); | ||
7387 | } | ||
7388 | |||
7378 | struct proto sctpv6_prot = { | 7389 | struct proto sctpv6_prot = { |
7379 | .name = "SCTPv6", | 7390 | .name = "SCTPv6", |
7380 | .owner = THIS_MODULE, | 7391 | .owner = THIS_MODULE, |
@@ -7384,7 +7395,7 @@ struct proto sctpv6_prot = { | |||
7384 | .accept = sctp_accept, | 7395 | .accept = sctp_accept, |
7385 | .ioctl = sctp_ioctl, | 7396 | .ioctl = sctp_ioctl, |
7386 | .init = sctp_init_sock, | 7397 | .init = sctp_init_sock, |
7387 | .destroy = sctp_destroy_sock, | 7398 | .destroy = sctp_v6_destroy_sock, |
7388 | .shutdown = sctp_shutdown, | 7399 | .shutdown = sctp_shutdown, |
7389 | .setsockopt = sctp_setsockopt, | 7400 | .setsockopt = sctp_setsockopt, |
7390 | .getsockopt = sctp_getsockopt, | 7401 | .getsockopt = sctp_getsockopt, |
diff --git a/net/socket.c b/net/socket.c index dd2c247c99e3..456fadb3d819 100644 --- a/net/socket.c +++ b/net/socket.c | |||
@@ -1056,27 +1056,20 @@ static int sock_fasync(int fd, struct file *filp, int on) | |||
1056 | return 0; | 1056 | return 0; |
1057 | } | 1057 | } |
1058 | 1058 | ||
1059 | /* This function may be called only under socket lock or callback_lock or rcu_lock */ | 1059 | /* This function may be called only under rcu_lock */ |
1060 | 1060 | ||
1061 | int sock_wake_async(struct socket *sock, int how, int band) | 1061 | int sock_wake_async(struct socket_wq *wq, int how, int band) |
1062 | { | 1062 | { |
1063 | struct socket_wq *wq; | 1063 | if (!wq || !wq->fasync_list) |
1064 | |||
1065 | if (!sock) | ||
1066 | return -1; | ||
1067 | rcu_read_lock(); | ||
1068 | wq = rcu_dereference(sock->wq); | ||
1069 | if (!wq || !wq->fasync_list) { | ||
1070 | rcu_read_unlock(); | ||
1071 | return -1; | 1064 | return -1; |
1072 | } | 1065 | |
1073 | switch (how) { | 1066 | switch (how) { |
1074 | case SOCK_WAKE_WAITD: | 1067 | case SOCK_WAKE_WAITD: |
1075 | if (test_bit(SOCK_ASYNC_WAITDATA, &sock->flags)) | 1068 | if (test_bit(SOCKWQ_ASYNC_WAITDATA, &wq->flags)) |
1076 | break; | 1069 | break; |
1077 | goto call_kill; | 1070 | goto call_kill; |
1078 | case SOCK_WAKE_SPACE: | 1071 | case SOCK_WAKE_SPACE: |
1079 | if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags)) | 1072 | if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags)) |
1080 | break; | 1073 | break; |
1081 | /* fall through */ | 1074 | /* fall through */ |
1082 | case SOCK_WAKE_IO: | 1075 | case SOCK_WAKE_IO: |
@@ -1086,7 +1079,7 @@ call_kill: | |||
1086 | case SOCK_WAKE_URG: | 1079 | case SOCK_WAKE_URG: |
1087 | kill_fasync(&wq->fasync_list, SIGURG, band); | 1080 | kill_fasync(&wq->fasync_list, SIGURG, band); |
1088 | } | 1081 | } |
1089 | rcu_read_unlock(); | 1082 | |
1090 | return 0; | 1083 | return 0; |
1091 | } | 1084 | } |
1092 | EXPORT_SYMBOL(sock_wake_async); | 1085 | EXPORT_SYMBOL(sock_wake_async); |
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 1d1a70498910..2ffaf6a79499 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
@@ -398,7 +398,7 @@ static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, | |||
398 | if (unlikely(!sock)) | 398 | if (unlikely(!sock)) |
399 | return -ENOTSOCK; | 399 | return -ENOTSOCK; |
400 | 400 | ||
401 | clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags); | 401 | clear_bit(SOCKWQ_ASYNC_NOSPACE, &sock->flags); |
402 | if (base != 0) { | 402 | if (base != 0) { |
403 | addr = NULL; | 403 | addr = NULL; |
404 | addrlen = 0; | 404 | addrlen = 0; |
@@ -442,7 +442,7 @@ static void xs_nospace_callback(struct rpc_task *task) | |||
442 | struct sock_xprt *transport = container_of(task->tk_rqstp->rq_xprt, struct sock_xprt, xprt); | 442 | struct sock_xprt *transport = container_of(task->tk_rqstp->rq_xprt, struct sock_xprt, xprt); |
443 | 443 | ||
444 | transport->inet->sk_write_pending--; | 444 | transport->inet->sk_write_pending--; |
445 | clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags); | 445 | clear_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags); |
446 | } | 446 | } |
447 | 447 | ||
448 | /** | 448 | /** |
@@ -467,7 +467,7 @@ static int xs_nospace(struct rpc_task *task) | |||
467 | 467 | ||
468 | /* Don't race with disconnect */ | 468 | /* Don't race with disconnect */ |
469 | if (xprt_connected(xprt)) { | 469 | if (xprt_connected(xprt)) { |
470 | if (test_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags)) { | 470 | if (test_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags)) { |
471 | /* | 471 | /* |
472 | * Notify TCP that we're limited by the application | 472 | * Notify TCP that we're limited by the application |
473 | * window size | 473 | * window size |
@@ -478,7 +478,7 @@ static int xs_nospace(struct rpc_task *task) | |||
478 | xprt_wait_for_buffer_space(task, xs_nospace_callback); | 478 | xprt_wait_for_buffer_space(task, xs_nospace_callback); |
479 | } | 479 | } |
480 | } else { | 480 | } else { |
481 | clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags); | 481 | clear_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags); |
482 | ret = -ENOTCONN; | 482 | ret = -ENOTCONN; |
483 | } | 483 | } |
484 | 484 | ||
@@ -626,7 +626,7 @@ process_status: | |||
626 | case -EPERM: | 626 | case -EPERM: |
627 | /* When the server has died, an ICMP port unreachable message | 627 | /* When the server has died, an ICMP port unreachable message |
628 | * prompts ECONNREFUSED. */ | 628 | * prompts ECONNREFUSED. */ |
629 | clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags); | 629 | clear_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags); |
630 | } | 630 | } |
631 | 631 | ||
632 | return status; | 632 | return status; |
@@ -715,7 +715,7 @@ static int xs_tcp_send_request(struct rpc_task *task) | |||
715 | case -EADDRINUSE: | 715 | case -EADDRINUSE: |
716 | case -ENOBUFS: | 716 | case -ENOBUFS: |
717 | case -EPIPE: | 717 | case -EPIPE: |
718 | clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags); | 718 | clear_bit(SOCKWQ_ASYNC_NOSPACE, &transport->sock->flags); |
719 | } | 719 | } |
720 | 720 | ||
721 | return status; | 721 | return status; |
@@ -1618,7 +1618,7 @@ static void xs_write_space(struct sock *sk) | |||
1618 | 1618 | ||
1619 | if (unlikely(!(xprt = xprt_from_sock(sk)))) | 1619 | if (unlikely(!(xprt = xprt_from_sock(sk)))) |
1620 | return; | 1620 | return; |
1621 | if (test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags) == 0) | 1621 | if (test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sock->flags) == 0) |
1622 | return; | 1622 | return; |
1623 | 1623 | ||
1624 | xprt_write_space(xprt); | 1624 | xprt_write_space(xprt); |
diff --git a/net/tipc/link.c b/net/tipc/link.c index 9efbdbde2b08..91aea071ab27 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c | |||
@@ -191,6 +191,7 @@ void tipc_link_add_bc_peer(struct tipc_link *snd_l, | |||
191 | 191 | ||
192 | snd_l->ackers++; | 192 | snd_l->ackers++; |
193 | rcv_l->acked = snd_l->snd_nxt - 1; | 193 | rcv_l->acked = snd_l->snd_nxt - 1; |
194 | snd_l->state = LINK_ESTABLISHED; | ||
194 | tipc_link_build_bc_init_msg(uc_l, xmitq); | 195 | tipc_link_build_bc_init_msg(uc_l, xmitq); |
195 | } | 196 | } |
196 | 197 | ||
@@ -206,6 +207,7 @@ void tipc_link_remove_bc_peer(struct tipc_link *snd_l, | |||
206 | rcv_l->state = LINK_RESET; | 207 | rcv_l->state = LINK_RESET; |
207 | if (!snd_l->ackers) { | 208 | if (!snd_l->ackers) { |
208 | tipc_link_reset(snd_l); | 209 | tipc_link_reset(snd_l); |
210 | snd_l->state = LINK_RESET; | ||
209 | __skb_queue_purge(xmitq); | 211 | __skb_queue_purge(xmitq); |
210 | } | 212 | } |
211 | } | 213 | } |
diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 552dbaba9cf3..b53246fb0412 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c | |||
@@ -105,6 +105,7 @@ struct tipc_sock { | |||
105 | static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb); | 105 | static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb); |
106 | static void tipc_data_ready(struct sock *sk); | 106 | static void tipc_data_ready(struct sock *sk); |
107 | static void tipc_write_space(struct sock *sk); | 107 | static void tipc_write_space(struct sock *sk); |
108 | static void tipc_sock_destruct(struct sock *sk); | ||
108 | static int tipc_release(struct socket *sock); | 109 | static int tipc_release(struct socket *sock); |
109 | static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags); | 110 | static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags); |
110 | static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p); | 111 | static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p); |
@@ -381,6 +382,7 @@ static int tipc_sk_create(struct net *net, struct socket *sock, | |||
381 | sk->sk_rcvbuf = sysctl_tipc_rmem[1]; | 382 | sk->sk_rcvbuf = sysctl_tipc_rmem[1]; |
382 | sk->sk_data_ready = tipc_data_ready; | 383 | sk->sk_data_ready = tipc_data_ready; |
383 | sk->sk_write_space = tipc_write_space; | 384 | sk->sk_write_space = tipc_write_space; |
385 | sk->sk_destruct = tipc_sock_destruct; | ||
384 | tsk->conn_timeout = CONN_TIMEOUT_DEFAULT; | 386 | tsk->conn_timeout = CONN_TIMEOUT_DEFAULT; |
385 | tsk->sent_unacked = 0; | 387 | tsk->sent_unacked = 0; |
386 | atomic_set(&tsk->dupl_rcvcnt, 0); | 388 | atomic_set(&tsk->dupl_rcvcnt, 0); |
@@ -470,9 +472,6 @@ static int tipc_release(struct socket *sock) | |||
470 | tipc_node_remove_conn(net, dnode, tsk->portid); | 472 | tipc_node_remove_conn(net, dnode, tsk->portid); |
471 | } | 473 | } |
472 | 474 | ||
473 | /* Discard any remaining (connection-based) messages in receive queue */ | ||
474 | __skb_queue_purge(&sk->sk_receive_queue); | ||
475 | |||
476 | /* Reject any messages that accumulated in backlog queue */ | 475 | /* Reject any messages that accumulated in backlog queue */ |
477 | sock->state = SS_DISCONNECTING; | 476 | sock->state = SS_DISCONNECTING; |
478 | release_sock(sk); | 477 | release_sock(sk); |
@@ -1515,6 +1514,11 @@ static void tipc_data_ready(struct sock *sk) | |||
1515 | rcu_read_unlock(); | 1514 | rcu_read_unlock(); |
1516 | } | 1515 | } |
1517 | 1516 | ||
1517 | static void tipc_sock_destruct(struct sock *sk) | ||
1518 | { | ||
1519 | __skb_queue_purge(&sk->sk_receive_queue); | ||
1520 | } | ||
1521 | |||
1518 | /** | 1522 | /** |
1519 | * filter_connect - Handle all incoming messages for a connection-based socket | 1523 | * filter_connect - Handle all incoming messages for a connection-based socket |
1520 | * @tsk: TIPC socket | 1524 | * @tsk: TIPC socket |
diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c index ad2719ad4c1b..70c03271b798 100644 --- a/net/tipc/udp_media.c +++ b/net/tipc/udp_media.c | |||
@@ -158,8 +158,11 @@ static int tipc_udp_send_msg(struct net *net, struct sk_buff *skb, | |||
158 | struct udp_media_addr *src = (struct udp_media_addr *)&b->addr.value; | 158 | struct udp_media_addr *src = (struct udp_media_addr *)&b->addr.value; |
159 | struct rtable *rt; | 159 | struct rtable *rt; |
160 | 160 | ||
161 | if (skb_headroom(skb) < UDP_MIN_HEADROOM) | 161 | if (skb_headroom(skb) < UDP_MIN_HEADROOM) { |
162 | pskb_expand_head(skb, UDP_MIN_HEADROOM, 0, GFP_ATOMIC); | 162 | err = pskb_expand_head(skb, UDP_MIN_HEADROOM, 0, GFP_ATOMIC); |
163 | if (err) | ||
164 | goto tx_error; | ||
165 | } | ||
163 | 166 | ||
164 | skb_set_inner_protocol(skb, htons(ETH_P_TIPC)); | 167 | skb_set_inner_protocol(skb, htons(ETH_P_TIPC)); |
165 | ub = rcu_dereference_rtnl(b->media_ptr); | 168 | ub = rcu_dereference_rtnl(b->media_ptr); |
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 955ec152cb71..45aebd966978 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c | |||
@@ -326,6 +326,118 @@ found: | |||
326 | return s; | 326 | return s; |
327 | } | 327 | } |
328 | 328 | ||
329 | /* Support code for asymmetrically connected dgram sockets | ||
330 | * | ||
331 | * If a datagram socket is connected to a socket not itself connected | ||
332 | * to the first socket (eg, /dev/log), clients may only enqueue more | ||
333 | * messages if the present receive queue of the server socket is not | ||
334 | * "too large". This means there's a second writeability condition | ||
335 | * poll and sendmsg need to test. The dgram recv code will do a wake | ||
336 | * up on the peer_wait wait queue of a socket upon reception of a | ||
337 | * datagram which needs to be propagated to sleeping would-be writers | ||
338 | * since these might not have sent anything so far. This can't be | ||
339 | * accomplished via poll_wait because the lifetime of the server | ||
340 | * socket might be less than that of its clients if these break their | ||
341 | * association with it or if the server socket is closed while clients | ||
342 | * are still connected to it and there's no way to inform "a polling | ||
343 | * implementation" that it should let go of a certain wait queue | ||
344 | * | ||
345 | * In order to propagate a wake up, a wait_queue_t of the client | ||
346 | * socket is enqueued on the peer_wait queue of the server socket | ||
347 | * whose wake function does a wake_up on the ordinary client socket | ||
348 | * wait queue. This connection is established whenever a write (or | ||
349 | * poll for write) hit the flow control condition and broken when the | ||
350 | * association to the server socket is dissolved or after a wake up | ||
351 | * was relayed. | ||
352 | */ | ||
353 | |||
354 | static int unix_dgram_peer_wake_relay(wait_queue_t *q, unsigned mode, int flags, | ||
355 | void *key) | ||
356 | { | ||
357 | struct unix_sock *u; | ||
358 | wait_queue_head_t *u_sleep; | ||
359 | |||
360 | u = container_of(q, struct unix_sock, peer_wake); | ||
361 | |||
362 | __remove_wait_queue(&unix_sk(u->peer_wake.private)->peer_wait, | ||
363 | q); | ||
364 | u->peer_wake.private = NULL; | ||
365 | |||
366 | /* relaying can only happen while the wq still exists */ | ||
367 | u_sleep = sk_sleep(&u->sk); | ||
368 | if (u_sleep) | ||
369 | wake_up_interruptible_poll(u_sleep, key); | ||
370 | |||
371 | return 0; | ||
372 | } | ||
373 | |||
374 | static int unix_dgram_peer_wake_connect(struct sock *sk, struct sock *other) | ||
375 | { | ||
376 | struct unix_sock *u, *u_other; | ||
377 | int rc; | ||
378 | |||
379 | u = unix_sk(sk); | ||
380 | u_other = unix_sk(other); | ||
381 | rc = 0; | ||
382 | spin_lock(&u_other->peer_wait.lock); | ||
383 | |||
384 | if (!u->peer_wake.private) { | ||
385 | u->peer_wake.private = other; | ||
386 | __add_wait_queue(&u_other->peer_wait, &u->peer_wake); | ||
387 | |||
388 | rc = 1; | ||
389 | } | ||
390 | |||
391 | spin_unlock(&u_other->peer_wait.lock); | ||
392 | return rc; | ||
393 | } | ||
394 | |||
395 | static void unix_dgram_peer_wake_disconnect(struct sock *sk, | ||
396 | struct sock *other) | ||
397 | { | ||
398 | struct unix_sock *u, *u_other; | ||
399 | |||
400 | u = unix_sk(sk); | ||
401 | u_other = unix_sk(other); | ||
402 | spin_lock(&u_other->peer_wait.lock); | ||
403 | |||
404 | if (u->peer_wake.private == other) { | ||
405 | __remove_wait_queue(&u_other->peer_wait, &u->peer_wake); | ||
406 | u->peer_wake.private = NULL; | ||
407 | } | ||
408 | |||
409 | spin_unlock(&u_other->peer_wait.lock); | ||
410 | } | ||
411 | |||
412 | static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk, | ||
413 | struct sock *other) | ||
414 | { | ||
415 | unix_dgram_peer_wake_disconnect(sk, other); | ||
416 | wake_up_interruptible_poll(sk_sleep(sk), | ||
417 | POLLOUT | | ||
418 | POLLWRNORM | | ||
419 | POLLWRBAND); | ||
420 | } | ||
421 | |||
422 | /* preconditions: | ||
423 | * - unix_peer(sk) == other | ||
424 | * - association is stable | ||
425 | */ | ||
426 | static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other) | ||
427 | { | ||
428 | int connected; | ||
429 | |||
430 | connected = unix_dgram_peer_wake_connect(sk, other); | ||
431 | |||
432 | if (unix_recvq_full(other)) | ||
433 | return 1; | ||
434 | |||
435 | if (connected) | ||
436 | unix_dgram_peer_wake_disconnect(sk, other); | ||
437 | |||
438 | return 0; | ||
439 | } | ||
440 | |||
329 | static int unix_writable(const struct sock *sk) | 441 | static int unix_writable(const struct sock *sk) |
330 | { | 442 | { |
331 | return sk->sk_state != TCP_LISTEN && | 443 | return sk->sk_state != TCP_LISTEN && |
@@ -431,6 +543,8 @@ static void unix_release_sock(struct sock *sk, int embrion) | |||
431 | skpair->sk_state_change(skpair); | 543 | skpair->sk_state_change(skpair); |
432 | sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP); | 544 | sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP); |
433 | } | 545 | } |
546 | |||
547 | unix_dgram_peer_wake_disconnect(sk, skpair); | ||
434 | sock_put(skpair); /* It may now die */ | 548 | sock_put(skpair); /* It may now die */ |
435 | unix_peer(sk) = NULL; | 549 | unix_peer(sk) = NULL; |
436 | } | 550 | } |
@@ -666,6 +780,7 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern) | |||
666 | INIT_LIST_HEAD(&u->link); | 780 | INIT_LIST_HEAD(&u->link); |
667 | mutex_init(&u->readlock); /* single task reading lock */ | 781 | mutex_init(&u->readlock); /* single task reading lock */ |
668 | init_waitqueue_head(&u->peer_wait); | 782 | init_waitqueue_head(&u->peer_wait); |
783 | init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay); | ||
669 | unix_insert_socket(unix_sockets_unbound(sk), sk); | 784 | unix_insert_socket(unix_sockets_unbound(sk), sk); |
670 | out: | 785 | out: |
671 | if (sk == NULL) | 786 | if (sk == NULL) |
@@ -1033,6 +1148,8 @@ restart: | |||
1033 | if (unix_peer(sk)) { | 1148 | if (unix_peer(sk)) { |
1034 | struct sock *old_peer = unix_peer(sk); | 1149 | struct sock *old_peer = unix_peer(sk); |
1035 | unix_peer(sk) = other; | 1150 | unix_peer(sk) = other; |
1151 | unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer); | ||
1152 | |||
1036 | unix_state_double_unlock(sk, other); | 1153 | unix_state_double_unlock(sk, other); |
1037 | 1154 | ||
1038 | if (other != old_peer) | 1155 | if (other != old_peer) |
@@ -1434,6 +1551,14 @@ static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool sen | |||
1434 | return err; | 1551 | return err; |
1435 | } | 1552 | } |
1436 | 1553 | ||
1554 | static bool unix_passcred_enabled(const struct socket *sock, | ||
1555 | const struct sock *other) | ||
1556 | { | ||
1557 | return test_bit(SOCK_PASSCRED, &sock->flags) || | ||
1558 | !other->sk_socket || | ||
1559 | test_bit(SOCK_PASSCRED, &other->sk_socket->flags); | ||
1560 | } | ||
1561 | |||
1437 | /* | 1562 | /* |
1438 | * Some apps rely on write() giving SCM_CREDENTIALS | 1563 | * Some apps rely on write() giving SCM_CREDENTIALS |
1439 | * We include credentials if source or destination socket | 1564 | * We include credentials if source or destination socket |
@@ -1444,14 +1569,41 @@ static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock, | |||
1444 | { | 1569 | { |
1445 | if (UNIXCB(skb).pid) | 1570 | if (UNIXCB(skb).pid) |
1446 | return; | 1571 | return; |
1447 | if (test_bit(SOCK_PASSCRED, &sock->flags) || | 1572 | if (unix_passcred_enabled(sock, other)) { |
1448 | !other->sk_socket || | ||
1449 | test_bit(SOCK_PASSCRED, &other->sk_socket->flags)) { | ||
1450 | UNIXCB(skb).pid = get_pid(task_tgid(current)); | 1573 | UNIXCB(skb).pid = get_pid(task_tgid(current)); |
1451 | current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid); | 1574 | current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid); |
1452 | } | 1575 | } |
1453 | } | 1576 | } |
1454 | 1577 | ||
1578 | static int maybe_init_creds(struct scm_cookie *scm, | ||
1579 | struct socket *socket, | ||
1580 | const struct sock *other) | ||
1581 | { | ||
1582 | int err; | ||
1583 | struct msghdr msg = { .msg_controllen = 0 }; | ||
1584 | |||
1585 | err = scm_send(socket, &msg, scm, false); | ||
1586 | if (err) | ||
1587 | return err; | ||
1588 | |||
1589 | if (unix_passcred_enabled(socket, other)) { | ||
1590 | scm->pid = get_pid(task_tgid(current)); | ||
1591 | current_uid_gid(&scm->creds.uid, &scm->creds.gid); | ||
1592 | } | ||
1593 | return err; | ||
1594 | } | ||
1595 | |||
1596 | static bool unix_skb_scm_eq(struct sk_buff *skb, | ||
1597 | struct scm_cookie *scm) | ||
1598 | { | ||
1599 | const struct unix_skb_parms *u = &UNIXCB(skb); | ||
1600 | |||
1601 | return u->pid == scm->pid && | ||
1602 | uid_eq(u->uid, scm->creds.uid) && | ||
1603 | gid_eq(u->gid, scm->creds.gid) && | ||
1604 | unix_secdata_eq(scm, skb); | ||
1605 | } | ||
1606 | |||
1455 | /* | 1607 | /* |
1456 | * Send AF_UNIX data. | 1608 | * Send AF_UNIX data. |
1457 | */ | 1609 | */ |
@@ -1472,6 +1624,7 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg, | |||
1472 | struct scm_cookie scm; | 1624 | struct scm_cookie scm; |
1473 | int max_level; | 1625 | int max_level; |
1474 | int data_len = 0; | 1626 | int data_len = 0; |
1627 | int sk_locked; | ||
1475 | 1628 | ||
1476 | wait_for_unix_gc(); | 1629 | wait_for_unix_gc(); |
1477 | err = scm_send(sock, msg, &scm, false); | 1630 | err = scm_send(sock, msg, &scm, false); |
@@ -1550,12 +1703,14 @@ restart: | |||
1550 | goto out_free; | 1703 | goto out_free; |
1551 | } | 1704 | } |
1552 | 1705 | ||
1706 | sk_locked = 0; | ||
1553 | unix_state_lock(other); | 1707 | unix_state_lock(other); |
1708 | restart_locked: | ||
1554 | err = -EPERM; | 1709 | err = -EPERM; |
1555 | if (!unix_may_send(sk, other)) | 1710 | if (!unix_may_send(sk, other)) |
1556 | goto out_unlock; | 1711 | goto out_unlock; |
1557 | 1712 | ||
1558 | if (sock_flag(other, SOCK_DEAD)) { | 1713 | if (unlikely(sock_flag(other, SOCK_DEAD))) { |
1559 | /* | 1714 | /* |
1560 | * Check with 1003.1g - what should | 1715 | * Check with 1003.1g - what should |
1561 | * datagram error | 1716 | * datagram error |
@@ -1563,10 +1718,14 @@ restart: | |||
1563 | unix_state_unlock(other); | 1718 | unix_state_unlock(other); |
1564 | sock_put(other); | 1719 | sock_put(other); |
1565 | 1720 | ||
1721 | if (!sk_locked) | ||
1722 | unix_state_lock(sk); | ||
1723 | |||
1566 | err = 0; | 1724 | err = 0; |
1567 | unix_state_lock(sk); | ||
1568 | if (unix_peer(sk) == other) { | 1725 | if (unix_peer(sk) == other) { |
1569 | unix_peer(sk) = NULL; | 1726 | unix_peer(sk) = NULL; |
1727 | unix_dgram_peer_wake_disconnect_wakeup(sk, other); | ||
1728 | |||
1570 | unix_state_unlock(sk); | 1729 | unix_state_unlock(sk); |
1571 | 1730 | ||
1572 | unix_dgram_disconnected(sk, other); | 1731 | unix_dgram_disconnected(sk, other); |
@@ -1592,21 +1751,38 @@ restart: | |||
1592 | goto out_unlock; | 1751 | goto out_unlock; |
1593 | } | 1752 | } |
1594 | 1753 | ||
1595 | if (unix_peer(other) != sk && unix_recvq_full(other)) { | 1754 | if (unlikely(unix_peer(other) != sk && unix_recvq_full(other))) { |
1596 | if (!timeo) { | 1755 | if (timeo) { |
1597 | err = -EAGAIN; | 1756 | timeo = unix_wait_for_peer(other, timeo); |
1598 | goto out_unlock; | 1757 | |
1758 | err = sock_intr_errno(timeo); | ||
1759 | if (signal_pending(current)) | ||
1760 | goto out_free; | ||
1761 | |||
1762 | goto restart; | ||
1599 | } | 1763 | } |
1600 | 1764 | ||
1601 | timeo = unix_wait_for_peer(other, timeo); | 1765 | if (!sk_locked) { |
1766 | unix_state_unlock(other); | ||
1767 | unix_state_double_lock(sk, other); | ||
1768 | } | ||
1602 | 1769 | ||
1603 | err = sock_intr_errno(timeo); | 1770 | if (unix_peer(sk) != other || |
1604 | if (signal_pending(current)) | 1771 | unix_dgram_peer_wake_me(sk, other)) { |
1605 | goto out_free; | 1772 | err = -EAGAIN; |
1773 | sk_locked = 1; | ||
1774 | goto out_unlock; | ||
1775 | } | ||
1606 | 1776 | ||
1607 | goto restart; | 1777 | if (!sk_locked) { |
1778 | sk_locked = 1; | ||
1779 | goto restart_locked; | ||
1780 | } | ||
1608 | } | 1781 | } |
1609 | 1782 | ||
1783 | if (unlikely(sk_locked)) | ||
1784 | unix_state_unlock(sk); | ||
1785 | |||
1610 | if (sock_flag(other, SOCK_RCVTSTAMP)) | 1786 | if (sock_flag(other, SOCK_RCVTSTAMP)) |
1611 | __net_timestamp(skb); | 1787 | __net_timestamp(skb); |
1612 | maybe_add_creds(skb, sock, other); | 1788 | maybe_add_creds(skb, sock, other); |
@@ -1620,6 +1796,8 @@ restart: | |||
1620 | return len; | 1796 | return len; |
1621 | 1797 | ||
1622 | out_unlock: | 1798 | out_unlock: |
1799 | if (sk_locked) | ||
1800 | unix_state_unlock(sk); | ||
1623 | unix_state_unlock(other); | 1801 | unix_state_unlock(other); |
1624 | out_free: | 1802 | out_free: |
1625 | kfree_skb(skb); | 1803 | kfree_skb(skb); |
@@ -1741,8 +1919,10 @@ out_err: | |||
1741 | static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page, | 1919 | static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page, |
1742 | int offset, size_t size, int flags) | 1920 | int offset, size_t size, int flags) |
1743 | { | 1921 | { |
1744 | int err = 0; | 1922 | int err; |
1745 | bool send_sigpipe = true; | 1923 | bool send_sigpipe = false; |
1924 | bool init_scm = true; | ||
1925 | struct scm_cookie scm; | ||
1746 | struct sock *other, *sk = socket->sk; | 1926 | struct sock *other, *sk = socket->sk; |
1747 | struct sk_buff *skb, *newskb = NULL, *tail = NULL; | 1927 | struct sk_buff *skb, *newskb = NULL, *tail = NULL; |
1748 | 1928 | ||
@@ -1760,7 +1940,7 @@ alloc_skb: | |||
1760 | newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT, | 1940 | newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT, |
1761 | &err, 0); | 1941 | &err, 0); |
1762 | if (!newskb) | 1942 | if (!newskb) |
1763 | return err; | 1943 | goto err; |
1764 | } | 1944 | } |
1765 | 1945 | ||
1766 | /* we must acquire readlock as we modify already present | 1946 | /* we must acquire readlock as we modify already present |
@@ -1769,12 +1949,12 @@ alloc_skb: | |||
1769 | err = mutex_lock_interruptible(&unix_sk(other)->readlock); | 1949 | err = mutex_lock_interruptible(&unix_sk(other)->readlock); |
1770 | if (err) { | 1950 | if (err) { |
1771 | err = flags & MSG_DONTWAIT ? -EAGAIN : -ERESTARTSYS; | 1951 | err = flags & MSG_DONTWAIT ? -EAGAIN : -ERESTARTSYS; |
1772 | send_sigpipe = false; | ||
1773 | goto err; | 1952 | goto err; |
1774 | } | 1953 | } |
1775 | 1954 | ||
1776 | if (sk->sk_shutdown & SEND_SHUTDOWN) { | 1955 | if (sk->sk_shutdown & SEND_SHUTDOWN) { |
1777 | err = -EPIPE; | 1956 | err = -EPIPE; |
1957 | send_sigpipe = true; | ||
1778 | goto err_unlock; | 1958 | goto err_unlock; |
1779 | } | 1959 | } |
1780 | 1960 | ||
@@ -1783,17 +1963,27 @@ alloc_skb: | |||
1783 | if (sock_flag(other, SOCK_DEAD) || | 1963 | if (sock_flag(other, SOCK_DEAD) || |
1784 | other->sk_shutdown & RCV_SHUTDOWN) { | 1964 | other->sk_shutdown & RCV_SHUTDOWN) { |
1785 | err = -EPIPE; | 1965 | err = -EPIPE; |
1966 | send_sigpipe = true; | ||
1786 | goto err_state_unlock; | 1967 | goto err_state_unlock; |
1787 | } | 1968 | } |
1788 | 1969 | ||
1970 | if (init_scm) { | ||
1971 | err = maybe_init_creds(&scm, socket, other); | ||
1972 | if (err) | ||
1973 | goto err_state_unlock; | ||
1974 | init_scm = false; | ||
1975 | } | ||
1976 | |||
1789 | skb = skb_peek_tail(&other->sk_receive_queue); | 1977 | skb = skb_peek_tail(&other->sk_receive_queue); |
1790 | if (tail && tail == skb) { | 1978 | if (tail && tail == skb) { |
1791 | skb = newskb; | 1979 | skb = newskb; |
1792 | } else if (!skb) { | 1980 | } else if (!skb || !unix_skb_scm_eq(skb, &scm)) { |
1793 | if (newskb) | 1981 | if (newskb) { |
1794 | skb = newskb; | 1982 | skb = newskb; |
1795 | else | 1983 | } else { |
1984 | tail = skb; | ||
1796 | goto alloc_skb; | 1985 | goto alloc_skb; |
1986 | } | ||
1797 | } else if (newskb) { | 1987 | } else if (newskb) { |
1798 | /* this is fast path, we don't necessarily need to | 1988 | /* this is fast path, we don't necessarily need to |
1799 | * call to kfree_skb even though with newskb == NULL | 1989 | * call to kfree_skb even though with newskb == NULL |
@@ -1814,6 +2004,9 @@ alloc_skb: | |||
1814 | atomic_add(size, &sk->sk_wmem_alloc); | 2004 | atomic_add(size, &sk->sk_wmem_alloc); |
1815 | 2005 | ||
1816 | if (newskb) { | 2006 | if (newskb) { |
2007 | err = unix_scm_to_skb(&scm, skb, false); | ||
2008 | if (err) | ||
2009 | goto err_state_unlock; | ||
1817 | spin_lock(&other->sk_receive_queue.lock); | 2010 | spin_lock(&other->sk_receive_queue.lock); |
1818 | __skb_queue_tail(&other->sk_receive_queue, newskb); | 2011 | __skb_queue_tail(&other->sk_receive_queue, newskb); |
1819 | spin_unlock(&other->sk_receive_queue.lock); | 2012 | spin_unlock(&other->sk_receive_queue.lock); |
@@ -1823,7 +2016,7 @@ alloc_skb: | |||
1823 | mutex_unlock(&unix_sk(other)->readlock); | 2016 | mutex_unlock(&unix_sk(other)->readlock); |
1824 | 2017 | ||
1825 | other->sk_data_ready(other); | 2018 | other->sk_data_ready(other); |
1826 | 2019 | scm_destroy(&scm); | |
1827 | return size; | 2020 | return size; |
1828 | 2021 | ||
1829 | err_state_unlock: | 2022 | err_state_unlock: |
@@ -1834,6 +2027,8 @@ err: | |||
1834 | kfree_skb(newskb); | 2027 | kfree_skb(newskb); |
1835 | if (send_sigpipe && !(flags & MSG_NOSIGNAL)) | 2028 | if (send_sigpipe && !(flags & MSG_NOSIGNAL)) |
1836 | send_sig(SIGPIPE, current, 0); | 2029 | send_sig(SIGPIPE, current, 0); |
2030 | if (!init_scm) | ||
2031 | scm_destroy(&scm); | ||
1837 | return err; | 2032 | return err; |
1838 | } | 2033 | } |
1839 | 2034 | ||
@@ -1996,7 +2191,7 @@ static long unix_stream_data_wait(struct sock *sk, long timeo, | |||
1996 | !timeo) | 2191 | !timeo) |
1997 | break; | 2192 | break; |
1998 | 2193 | ||
1999 | set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); | 2194 | sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); |
2000 | unix_state_unlock(sk); | 2195 | unix_state_unlock(sk); |
2001 | timeo = freezable_schedule_timeout(timeo); | 2196 | timeo = freezable_schedule_timeout(timeo); |
2002 | unix_state_lock(sk); | 2197 | unix_state_lock(sk); |
@@ -2004,7 +2199,7 @@ static long unix_stream_data_wait(struct sock *sk, long timeo, | |||
2004 | if (sock_flag(sk, SOCK_DEAD)) | 2199 | if (sock_flag(sk, SOCK_DEAD)) |
2005 | break; | 2200 | break; |
2006 | 2201 | ||
2007 | clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); | 2202 | sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); |
2008 | } | 2203 | } |
2009 | 2204 | ||
2010 | finish_wait(sk_sleep(sk), &wait); | 2205 | finish_wait(sk_sleep(sk), &wait); |
@@ -2137,10 +2332,7 @@ unlock: | |||
2137 | 2332 | ||
2138 | if (check_creds) { | 2333 | if (check_creds) { |
2139 | /* Never glue messages from different writers */ | 2334 | /* Never glue messages from different writers */ |
2140 | if ((UNIXCB(skb).pid != scm.pid) || | 2335 | if (!unix_skb_scm_eq(skb, &scm)) |
2141 | !uid_eq(UNIXCB(skb).uid, scm.creds.uid) || | ||
2142 | !gid_eq(UNIXCB(skb).gid, scm.creds.gid) || | ||
2143 | !unix_secdata_eq(&scm, skb)) | ||
2144 | break; | 2336 | break; |
2145 | } else if (test_bit(SOCK_PASSCRED, &sock->flags)) { | 2337 | } else if (test_bit(SOCK_PASSCRED, &sock->flags)) { |
2146 | /* Copy credentials */ | 2338 | /* Copy credentials */ |
@@ -2476,20 +2668,22 @@ static unsigned int unix_dgram_poll(struct file *file, struct socket *sock, | |||
2476 | return mask; | 2668 | return mask; |
2477 | 2669 | ||
2478 | writable = unix_writable(sk); | 2670 | writable = unix_writable(sk); |
2479 | other = unix_peer_get(sk); | 2671 | if (writable) { |
2480 | if (other) { | 2672 | unix_state_lock(sk); |
2481 | if (unix_peer(other) != sk) { | 2673 | |
2482 | sock_poll_wait(file, &unix_sk(other)->peer_wait, wait); | 2674 | other = unix_peer(sk); |
2483 | if (unix_recvq_full(other)) | 2675 | if (other && unix_peer(other) != sk && |
2484 | writable = 0; | 2676 | unix_recvq_full(other) && |
2485 | } | 2677 | unix_dgram_peer_wake_me(sk, other)) |
2486 | sock_put(other); | 2678 | writable = 0; |
2679 | |||
2680 | unix_state_unlock(sk); | ||
2487 | } | 2681 | } |
2488 | 2682 | ||
2489 | if (writable) | 2683 | if (writable) |
2490 | mask |= POLLOUT | POLLWRNORM | POLLWRBAND; | 2684 | mask |= POLLOUT | POLLWRNORM | POLLWRBAND; |
2491 | else | 2685 | else |
2492 | set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); | 2686 | sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); |
2493 | 2687 | ||
2494 | return mask; | 2688 | return mask; |
2495 | } | 2689 | } |
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index c8b8ef5246a6..ef198903c0c3 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c | |||
@@ -955,6 +955,7 @@ static int patch_conexant_auto(struct hda_codec *codec) | |||
955 | */ | 955 | */ |
956 | 956 | ||
957 | static const struct hda_device_id snd_hda_id_conexant[] = { | 957 | static const struct hda_device_id snd_hda_id_conexant[] = { |
958 | HDA_CODEC_ENTRY(0x14f12008, "CX8200", patch_conexant_auto), | ||
958 | HDA_CODEC_ENTRY(0x14f15045, "CX20549 (Venice)", patch_conexant_auto), | 959 | HDA_CODEC_ENTRY(0x14f15045, "CX20549 (Venice)", patch_conexant_auto), |
959 | HDA_CODEC_ENTRY(0x14f15047, "CX20551 (Waikiki)", patch_conexant_auto), | 960 | HDA_CODEC_ENTRY(0x14f15047, "CX20551 (Waikiki)", patch_conexant_auto), |
960 | HDA_CODEC_ENTRY(0x14f15051, "CX20561 (Hermosa)", patch_conexant_auto), | 961 | HDA_CODEC_ENTRY(0x14f15051, "CX20561 (Hermosa)", patch_conexant_auto), |
@@ -972,9 +973,9 @@ static const struct hda_device_id snd_hda_id_conexant[] = { | |||
972 | HDA_CODEC_ENTRY(0x14f150ac, "CX20652", patch_conexant_auto), | 973 | HDA_CODEC_ENTRY(0x14f150ac, "CX20652", patch_conexant_auto), |
973 | HDA_CODEC_ENTRY(0x14f150b8, "CX20664", patch_conexant_auto), | 974 | HDA_CODEC_ENTRY(0x14f150b8, "CX20664", patch_conexant_auto), |
974 | HDA_CODEC_ENTRY(0x14f150b9, "CX20665", patch_conexant_auto), | 975 | HDA_CODEC_ENTRY(0x14f150b9, "CX20665", patch_conexant_auto), |
975 | HDA_CODEC_ENTRY(0x14f150f1, "CX20721", patch_conexant_auto), | 976 | HDA_CODEC_ENTRY(0x14f150f1, "CX21722", patch_conexant_auto), |
976 | HDA_CODEC_ENTRY(0x14f150f2, "CX20722", patch_conexant_auto), | 977 | HDA_CODEC_ENTRY(0x14f150f2, "CX20722", patch_conexant_auto), |
977 | HDA_CODEC_ENTRY(0x14f150f3, "CX20723", patch_conexant_auto), | 978 | HDA_CODEC_ENTRY(0x14f150f3, "CX21724", patch_conexant_auto), |
978 | HDA_CODEC_ENTRY(0x14f150f4, "CX20724", patch_conexant_auto), | 979 | HDA_CODEC_ENTRY(0x14f150f4, "CX20724", patch_conexant_auto), |
979 | HDA_CODEC_ENTRY(0x14f1510f, "CX20751/2", patch_conexant_auto), | 980 | HDA_CODEC_ENTRY(0x14f1510f, "CX20751/2", patch_conexant_auto), |
980 | HDA_CODEC_ENTRY(0x14f15110, "CX20751/2", patch_conexant_auto), | 981 | HDA_CODEC_ENTRY(0x14f15110, "CX20751/2", patch_conexant_auto), |
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index bdb6f226d006..4b6fb668c91c 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c | |||
@@ -2352,6 +2352,12 @@ static void intel_pin_eld_notify(void *audio_ptr, int port) | |||
2352 | struct hda_codec *codec = audio_ptr; | 2352 | struct hda_codec *codec = audio_ptr; |
2353 | int pin_nid = port + 0x04; | 2353 | int pin_nid = port + 0x04; |
2354 | 2354 | ||
2355 | /* skip notification during system suspend (but not in runtime PM); | ||
2356 | * the state will be updated at resume | ||
2357 | */ | ||
2358 | if (snd_power_get_state(codec->card) != SNDRV_CTL_POWER_D0) | ||
2359 | return; | ||
2360 | |||
2355 | check_presence_and_report(codec, pin_nid); | 2361 | check_presence_and_report(codec, pin_nid); |
2356 | } | 2362 | } |
2357 | 2363 | ||
diff --git a/sound/soc/codecs/arizona.c b/sound/soc/codecs/arizona.c index 9929efc6b9aa..b3ea24d64c50 100644 --- a/sound/soc/codecs/arizona.c +++ b/sound/soc/codecs/arizona.c | |||
@@ -1023,24 +1023,18 @@ void arizona_init_dvfs(struct arizona_priv *priv) | |||
1023 | } | 1023 | } |
1024 | EXPORT_SYMBOL_GPL(arizona_init_dvfs); | 1024 | EXPORT_SYMBOL_GPL(arizona_init_dvfs); |
1025 | 1025 | ||
1026 | static unsigned int arizona_sysclk_48k_rates[] = { | 1026 | static unsigned int arizona_opclk_ref_48k_rates[] = { |
1027 | 6144000, | 1027 | 6144000, |
1028 | 12288000, | 1028 | 12288000, |
1029 | 24576000, | 1029 | 24576000, |
1030 | 49152000, | 1030 | 49152000, |
1031 | 73728000, | ||
1032 | 98304000, | ||
1033 | 147456000, | ||
1034 | }; | 1031 | }; |
1035 | 1032 | ||
1036 | static unsigned int arizona_sysclk_44k1_rates[] = { | 1033 | static unsigned int arizona_opclk_ref_44k1_rates[] = { |
1037 | 5644800, | 1034 | 5644800, |
1038 | 11289600, | 1035 | 11289600, |
1039 | 22579200, | 1036 | 22579200, |
1040 | 45158400, | 1037 | 45158400, |
1041 | 67737600, | ||
1042 | 90316800, | ||
1043 | 135475200, | ||
1044 | }; | 1038 | }; |
1045 | 1039 | ||
1046 | static int arizona_set_opclk(struct snd_soc_codec *codec, unsigned int clk, | 1040 | static int arizona_set_opclk(struct snd_soc_codec *codec, unsigned int clk, |
@@ -1065,11 +1059,11 @@ static int arizona_set_opclk(struct snd_soc_codec *codec, unsigned int clk, | |||
1065 | } | 1059 | } |
1066 | 1060 | ||
1067 | if (refclk % 8000) | 1061 | if (refclk % 8000) |
1068 | rates = arizona_sysclk_44k1_rates; | 1062 | rates = arizona_opclk_ref_44k1_rates; |
1069 | else | 1063 | else |
1070 | rates = arizona_sysclk_48k_rates; | 1064 | rates = arizona_opclk_ref_48k_rates; |
1071 | 1065 | ||
1072 | for (ref = 0; ref < ARRAY_SIZE(arizona_sysclk_48k_rates) && | 1066 | for (ref = 0; ref < ARRAY_SIZE(arizona_opclk_ref_48k_rates) && |
1073 | rates[ref] <= refclk; ref++) { | 1067 | rates[ref] <= refclk; ref++) { |
1074 | div = 1; | 1068 | div = 1; |
1075 | while (rates[ref] / div >= freq && div < 32) { | 1069 | while (rates[ref] / div >= freq && div < 32) { |
diff --git a/sound/soc/codecs/es8328.c b/sound/soc/codecs/es8328.c index 969e337dc17c..84f5eb07a91b 100644 --- a/sound/soc/codecs/es8328.c +++ b/sound/soc/codecs/es8328.c | |||
@@ -205,18 +205,18 @@ static const struct snd_kcontrol_new es8328_right_line_controls = | |||
205 | 205 | ||
206 | /* Left Mixer */ | 206 | /* Left Mixer */ |
207 | static const struct snd_kcontrol_new es8328_left_mixer_controls[] = { | 207 | static const struct snd_kcontrol_new es8328_left_mixer_controls[] = { |
208 | SOC_DAPM_SINGLE("Playback Switch", ES8328_DACCONTROL17, 8, 1, 0), | 208 | SOC_DAPM_SINGLE("Playback Switch", ES8328_DACCONTROL17, 7, 1, 0), |
209 | SOC_DAPM_SINGLE("Left Bypass Switch", ES8328_DACCONTROL17, 7, 1, 0), | 209 | SOC_DAPM_SINGLE("Left Bypass Switch", ES8328_DACCONTROL17, 6, 1, 0), |
210 | SOC_DAPM_SINGLE("Right Playback Switch", ES8328_DACCONTROL18, 8, 1, 0), | 210 | SOC_DAPM_SINGLE("Right Playback Switch", ES8328_DACCONTROL18, 7, 1, 0), |
211 | SOC_DAPM_SINGLE("Right Bypass Switch", ES8328_DACCONTROL18, 7, 1, 0), | 211 | SOC_DAPM_SINGLE("Right Bypass Switch", ES8328_DACCONTROL18, 6, 1, 0), |
212 | }; | 212 | }; |
213 | 213 | ||
214 | /* Right Mixer */ | 214 | /* Right Mixer */ |
215 | static const struct snd_kcontrol_new es8328_right_mixer_controls[] = { | 215 | static const struct snd_kcontrol_new es8328_right_mixer_controls[] = { |
216 | SOC_DAPM_SINGLE("Left Playback Switch", ES8328_DACCONTROL19, 8, 1, 0), | 216 | SOC_DAPM_SINGLE("Left Playback Switch", ES8328_DACCONTROL19, 7, 1, 0), |
217 | SOC_DAPM_SINGLE("Left Bypass Switch", ES8328_DACCONTROL19, 7, 1, 0), | 217 | SOC_DAPM_SINGLE("Left Bypass Switch", ES8328_DACCONTROL19, 6, 1, 0), |
218 | SOC_DAPM_SINGLE("Playback Switch", ES8328_DACCONTROL20, 8, 1, 0), | 218 | SOC_DAPM_SINGLE("Playback Switch", ES8328_DACCONTROL20, 7, 1, 0), |
219 | SOC_DAPM_SINGLE("Right Bypass Switch", ES8328_DACCONTROL20, 7, 1, 0), | 219 | SOC_DAPM_SINGLE("Right Bypass Switch", ES8328_DACCONTROL20, 6, 1, 0), |
220 | }; | 220 | }; |
221 | 221 | ||
222 | static const char * const es8328_pga_sel[] = { | 222 | static const char * const es8328_pga_sel[] = { |
diff --git a/sound/soc/codecs/nau8825.c b/sound/soc/codecs/nau8825.c index 7fc7b4e3f444..c1b87c5800b1 100644 --- a/sound/soc/codecs/nau8825.c +++ b/sound/soc/codecs/nau8825.c | |||
@@ -1271,6 +1271,36 @@ static int nau8825_i2c_remove(struct i2c_client *client) | |||
1271 | return 0; | 1271 | return 0; |
1272 | } | 1272 | } |
1273 | 1273 | ||
1274 | #ifdef CONFIG_PM_SLEEP | ||
1275 | static int nau8825_suspend(struct device *dev) | ||
1276 | { | ||
1277 | struct i2c_client *client = to_i2c_client(dev); | ||
1278 | struct nau8825 *nau8825 = dev_get_drvdata(dev); | ||
1279 | |||
1280 | disable_irq(client->irq); | ||
1281 | regcache_cache_only(nau8825->regmap, true); | ||
1282 | regcache_mark_dirty(nau8825->regmap); | ||
1283 | |||
1284 | return 0; | ||
1285 | } | ||
1286 | |||
1287 | static int nau8825_resume(struct device *dev) | ||
1288 | { | ||
1289 | struct i2c_client *client = to_i2c_client(dev); | ||
1290 | struct nau8825 *nau8825 = dev_get_drvdata(dev); | ||
1291 | |||
1292 | regcache_cache_only(nau8825->regmap, false); | ||
1293 | regcache_sync(nau8825->regmap); | ||
1294 | enable_irq(client->irq); | ||
1295 | |||
1296 | return 0; | ||
1297 | } | ||
1298 | #endif | ||
1299 | |||
1300 | static const struct dev_pm_ops nau8825_pm = { | ||
1301 | SET_SYSTEM_SLEEP_PM_OPS(nau8825_suspend, nau8825_resume) | ||
1302 | }; | ||
1303 | |||
1274 | static const struct i2c_device_id nau8825_i2c_ids[] = { | 1304 | static const struct i2c_device_id nau8825_i2c_ids[] = { |
1275 | { "nau8825", 0 }, | 1305 | { "nau8825", 0 }, |
1276 | { } | 1306 | { } |
@@ -1297,6 +1327,7 @@ static struct i2c_driver nau8825_driver = { | |||
1297 | .name = "nau8825", | 1327 | .name = "nau8825", |
1298 | .of_match_table = of_match_ptr(nau8825_of_ids), | 1328 | .of_match_table = of_match_ptr(nau8825_of_ids), |
1299 | .acpi_match_table = ACPI_PTR(nau8825_acpi_match), | 1329 | .acpi_match_table = ACPI_PTR(nau8825_acpi_match), |
1330 | .pm = &nau8825_pm, | ||
1300 | }, | 1331 | }, |
1301 | .probe = nau8825_i2c_probe, | 1332 | .probe = nau8825_i2c_probe, |
1302 | .remove = nau8825_i2c_remove, | 1333 | .remove = nau8825_i2c_remove, |
diff --git a/sound/soc/codecs/rl6231.c b/sound/soc/codecs/rl6231.c index aca479fa7670..1dc68ab08a17 100644 --- a/sound/soc/codecs/rl6231.c +++ b/sound/soc/codecs/rl6231.c | |||
@@ -80,8 +80,10 @@ int rl6231_calc_dmic_clk(int rate) | |||
80 | } | 80 | } |
81 | 81 | ||
82 | for (i = 0; i < ARRAY_SIZE(div); i++) { | 82 | for (i = 0; i < ARRAY_SIZE(div); i++) { |
83 | /* find divider that gives DMIC frequency below 3MHz */ | 83 | if ((div[i] % 3) == 0) |
84 | if (3000000 * div[i] >= rate) | 84 | continue; |
85 | /* find divider that gives DMIC frequency below 3.072MHz */ | ||
86 | if (3072000 * div[i] >= rate) | ||
85 | return i; | 87 | return i; |
86 | } | 88 | } |
87 | 89 | ||
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c index 28132375e427..ef76940f9dcb 100644 --- a/sound/soc/codecs/rt5645.c +++ b/sound/soc/codecs/rt5645.c | |||
@@ -245,7 +245,7 @@ struct rt5645_priv { | |||
245 | struct snd_soc_jack *hp_jack; | 245 | struct snd_soc_jack *hp_jack; |
246 | struct snd_soc_jack *mic_jack; | 246 | struct snd_soc_jack *mic_jack; |
247 | struct snd_soc_jack *btn_jack; | 247 | struct snd_soc_jack *btn_jack; |
248 | struct delayed_work jack_detect_work; | 248 | struct delayed_work jack_detect_work, rcclock_work; |
249 | struct regulator_bulk_data supplies[ARRAY_SIZE(rt5645_supply_names)]; | 249 | struct regulator_bulk_data supplies[ARRAY_SIZE(rt5645_supply_names)]; |
250 | struct rt5645_eq_param_s *eq_param; | 250 | struct rt5645_eq_param_s *eq_param; |
251 | 251 | ||
@@ -565,12 +565,33 @@ static int rt5645_hweq_put(struct snd_kcontrol *kcontrol, | |||
565 | .put = rt5645_hweq_put \ | 565 | .put = rt5645_hweq_put \ |
566 | } | 566 | } |
567 | 567 | ||
568 | static int rt5645_spk_put_volsw(struct snd_kcontrol *kcontrol, | ||
569 | struct snd_ctl_elem_value *ucontrol) | ||
570 | { | ||
571 | struct snd_soc_component *component = snd_kcontrol_chip(kcontrol); | ||
572 | struct rt5645_priv *rt5645 = snd_soc_component_get_drvdata(component); | ||
573 | int ret; | ||
574 | |||
575 | cancel_delayed_work_sync(&rt5645->rcclock_work); | ||
576 | |||
577 | regmap_update_bits(rt5645->regmap, RT5645_MICBIAS, | ||
578 | RT5645_PWR_CLK25M_MASK, RT5645_PWR_CLK25M_PU); | ||
579 | |||
580 | ret = snd_soc_put_volsw(kcontrol, ucontrol); | ||
581 | |||
582 | queue_delayed_work(system_power_efficient_wq, &rt5645->rcclock_work, | ||
583 | msecs_to_jiffies(200)); | ||
584 | |||
585 | return ret; | ||
586 | } | ||
587 | |||
568 | static const struct snd_kcontrol_new rt5645_snd_controls[] = { | 588 | static const struct snd_kcontrol_new rt5645_snd_controls[] = { |
569 | /* Speaker Output Volume */ | 589 | /* Speaker Output Volume */ |
570 | SOC_DOUBLE("Speaker Channel Switch", RT5645_SPK_VOL, | 590 | SOC_DOUBLE("Speaker Channel Switch", RT5645_SPK_VOL, |
571 | RT5645_VOL_L_SFT, RT5645_VOL_R_SFT, 1, 1), | 591 | RT5645_VOL_L_SFT, RT5645_VOL_R_SFT, 1, 1), |
572 | SOC_DOUBLE_TLV("Speaker Playback Volume", RT5645_SPK_VOL, | 592 | SOC_DOUBLE_EXT_TLV("Speaker Playback Volume", RT5645_SPK_VOL, |
573 | RT5645_L_VOL_SFT, RT5645_R_VOL_SFT, 39, 1, out_vol_tlv), | 593 | RT5645_L_VOL_SFT, RT5645_R_VOL_SFT, 39, 1, snd_soc_get_volsw, |
594 | rt5645_spk_put_volsw, out_vol_tlv), | ||
574 | 595 | ||
575 | /* ClassD modulator Speaker Gain Ratio */ | 596 | /* ClassD modulator Speaker Gain Ratio */ |
576 | SOC_SINGLE_TLV("Speaker ClassD Playback Volume", RT5645_SPO_CLSD_RATIO, | 597 | SOC_SINGLE_TLV("Speaker ClassD Playback Volume", RT5645_SPO_CLSD_RATIO, |
@@ -1498,7 +1519,7 @@ static void hp_amp_power(struct snd_soc_codec *codec, int on) | |||
1498 | regmap_write(rt5645->regmap, RT5645_PR_BASE + | 1519 | regmap_write(rt5645->regmap, RT5645_PR_BASE + |
1499 | RT5645_MAMP_INT_REG2, 0xfc00); | 1520 | RT5645_MAMP_INT_REG2, 0xfc00); |
1500 | snd_soc_write(codec, RT5645_DEPOP_M2, 0x1140); | 1521 | snd_soc_write(codec, RT5645_DEPOP_M2, 0x1140); |
1501 | msleep(40); | 1522 | msleep(70); |
1502 | rt5645->hp_on = true; | 1523 | rt5645->hp_on = true; |
1503 | } else { | 1524 | } else { |
1504 | /* depop parameters */ | 1525 | /* depop parameters */ |
@@ -3122,6 +3143,15 @@ static void rt5645_jack_detect_work(struct work_struct *work) | |||
3122 | SND_JACK_BTN_2 | SND_JACK_BTN_3); | 3143 | SND_JACK_BTN_2 | SND_JACK_BTN_3); |
3123 | } | 3144 | } |
3124 | 3145 | ||
3146 | static void rt5645_rcclock_work(struct work_struct *work) | ||
3147 | { | ||
3148 | struct rt5645_priv *rt5645 = | ||
3149 | container_of(work, struct rt5645_priv, rcclock_work.work); | ||
3150 | |||
3151 | regmap_update_bits(rt5645->regmap, RT5645_MICBIAS, | ||
3152 | RT5645_PWR_CLK25M_MASK, RT5645_PWR_CLK25M_PD); | ||
3153 | } | ||
3154 | |||
3125 | static irqreturn_t rt5645_irq(int irq, void *data) | 3155 | static irqreturn_t rt5645_irq(int irq, void *data) |
3126 | { | 3156 | { |
3127 | struct rt5645_priv *rt5645 = data; | 3157 | struct rt5645_priv *rt5645 = data; |
@@ -3348,6 +3378,27 @@ static const struct dmi_system_id dmi_platform_intel_braswell[] = { | |||
3348 | DMI_MATCH(DMI_PRODUCT_NAME, "Reks"), | 3378 | DMI_MATCH(DMI_PRODUCT_NAME, "Reks"), |
3349 | }, | 3379 | }, |
3350 | }, | 3380 | }, |
3381 | { | ||
3382 | .ident = "Google Edgar", | ||
3383 | .callback = strago_quirk_cb, | ||
3384 | .matches = { | ||
3385 | DMI_MATCH(DMI_PRODUCT_NAME, "Edgar"), | ||
3386 | }, | ||
3387 | }, | ||
3388 | { | ||
3389 | .ident = "Google Wizpig", | ||
3390 | .callback = strago_quirk_cb, | ||
3391 | .matches = { | ||
3392 | DMI_MATCH(DMI_PRODUCT_NAME, "Wizpig"), | ||
3393 | }, | ||
3394 | }, | ||
3395 | { | ||
3396 | .ident = "Google Terra", | ||
3397 | .callback = strago_quirk_cb, | ||
3398 | .matches = { | ||
3399 | DMI_MATCH(DMI_PRODUCT_NAME, "Terra"), | ||
3400 | }, | ||
3401 | }, | ||
3351 | { } | 3402 | { } |
3352 | }; | 3403 | }; |
3353 | 3404 | ||
@@ -3587,6 +3638,7 @@ static int rt5645_i2c_probe(struct i2c_client *i2c, | |||
3587 | } | 3638 | } |
3588 | 3639 | ||
3589 | INIT_DELAYED_WORK(&rt5645->jack_detect_work, rt5645_jack_detect_work); | 3640 | INIT_DELAYED_WORK(&rt5645->jack_detect_work, rt5645_jack_detect_work); |
3641 | INIT_DELAYED_WORK(&rt5645->rcclock_work, rt5645_rcclock_work); | ||
3590 | 3642 | ||
3591 | if (rt5645->i2c->irq) { | 3643 | if (rt5645->i2c->irq) { |
3592 | ret = request_threaded_irq(rt5645->i2c->irq, NULL, rt5645_irq, | 3644 | ret = request_threaded_irq(rt5645->i2c->irq, NULL, rt5645_irq, |
@@ -3621,6 +3673,7 @@ static int rt5645_i2c_remove(struct i2c_client *i2c) | |||
3621 | free_irq(i2c->irq, rt5645); | 3673 | free_irq(i2c->irq, rt5645); |
3622 | 3674 | ||
3623 | cancel_delayed_work_sync(&rt5645->jack_detect_work); | 3675 | cancel_delayed_work_sync(&rt5645->jack_detect_work); |
3676 | cancel_delayed_work_sync(&rt5645->rcclock_work); | ||
3624 | 3677 | ||
3625 | snd_soc_unregister_codec(&i2c->dev); | 3678 | snd_soc_unregister_codec(&i2c->dev); |
3626 | regulator_bulk_disable(ARRAY_SIZE(rt5645->supplies), rt5645->supplies); | 3679 | regulator_bulk_disable(ARRAY_SIZE(rt5645->supplies), rt5645->supplies); |
diff --git a/sound/soc/codecs/rt5670.h b/sound/soc/codecs/rt5670.h index dc2b46236c5c..3f1b0f1df809 100644 --- a/sound/soc/codecs/rt5670.h +++ b/sound/soc/codecs/rt5670.h | |||
@@ -973,12 +973,12 @@ | |||
973 | #define RT5670_SCLK_SRC_MCLK (0x0 << 14) | 973 | #define RT5670_SCLK_SRC_MCLK (0x0 << 14) |
974 | #define RT5670_SCLK_SRC_PLL1 (0x1 << 14) | 974 | #define RT5670_SCLK_SRC_PLL1 (0x1 << 14) |
975 | #define RT5670_SCLK_SRC_RCCLK (0x2 << 14) /* 15MHz */ | 975 | #define RT5670_SCLK_SRC_RCCLK (0x2 << 14) /* 15MHz */ |
976 | #define RT5670_PLL1_SRC_MASK (0x3 << 12) | 976 | #define RT5670_PLL1_SRC_MASK (0x7 << 11) |
977 | #define RT5670_PLL1_SRC_SFT 12 | 977 | #define RT5670_PLL1_SRC_SFT 11 |
978 | #define RT5670_PLL1_SRC_MCLK (0x0 << 12) | 978 | #define RT5670_PLL1_SRC_MCLK (0x0 << 11) |
979 | #define RT5670_PLL1_SRC_BCLK1 (0x1 << 12) | 979 | #define RT5670_PLL1_SRC_BCLK1 (0x1 << 11) |
980 | #define RT5670_PLL1_SRC_BCLK2 (0x2 << 12) | 980 | #define RT5670_PLL1_SRC_BCLK2 (0x2 << 11) |
981 | #define RT5670_PLL1_SRC_BCLK3 (0x3 << 12) | 981 | #define RT5670_PLL1_SRC_BCLK3 (0x3 << 11) |
982 | #define RT5670_PLL1_PD_MASK (0x1 << 3) | 982 | #define RT5670_PLL1_PD_MASK (0x1 << 3) |
983 | #define RT5670_PLL1_PD_SFT 3 | 983 | #define RT5670_PLL1_PD_SFT 3 |
984 | #define RT5670_PLL1_PD_1 (0x0 << 3) | 984 | #define RT5670_PLL1_PD_1 (0x0 << 3) |
diff --git a/sound/soc/codecs/rt5677.c b/sound/soc/codecs/rt5677.c index b4cd7e3bf5f8..69d987a9935c 100644 --- a/sound/soc/codecs/rt5677.c +++ b/sound/soc/codecs/rt5677.c | |||
@@ -1386,90 +1386,90 @@ static const struct snd_kcontrol_new rt5677_dac_r_mix[] = { | |||
1386 | }; | 1386 | }; |
1387 | 1387 | ||
1388 | static const struct snd_kcontrol_new rt5677_sto1_dac_l_mix[] = { | 1388 | static const struct snd_kcontrol_new rt5677_sto1_dac_l_mix[] = { |
1389 | SOC_DAPM_SINGLE("ST L Switch", RT5677_STO1_DAC_MIXER, | 1389 | SOC_DAPM_SINGLE_AUTODISABLE("ST L Switch", RT5677_STO1_DAC_MIXER, |
1390 | RT5677_M_ST_DAC1_L_SFT, 1, 1), | 1390 | RT5677_M_ST_DAC1_L_SFT, 1, 1), |
1391 | SOC_DAPM_SINGLE("DAC1 L Switch", RT5677_STO1_DAC_MIXER, | 1391 | SOC_DAPM_SINGLE_AUTODISABLE("DAC1 L Switch", RT5677_STO1_DAC_MIXER, |
1392 | RT5677_M_DAC1_L_STO_L_SFT, 1, 1), | 1392 | RT5677_M_DAC1_L_STO_L_SFT, 1, 1), |
1393 | SOC_DAPM_SINGLE("DAC2 L Switch", RT5677_STO1_DAC_MIXER, | 1393 | SOC_DAPM_SINGLE_AUTODISABLE("DAC2 L Switch", RT5677_STO1_DAC_MIXER, |
1394 | RT5677_M_DAC2_L_STO_L_SFT, 1, 1), | 1394 | RT5677_M_DAC2_L_STO_L_SFT, 1, 1), |
1395 | SOC_DAPM_SINGLE("DAC1 R Switch", RT5677_STO1_DAC_MIXER, | 1395 | SOC_DAPM_SINGLE_AUTODISABLE("DAC1 R Switch", RT5677_STO1_DAC_MIXER, |
1396 | RT5677_M_DAC1_R_STO_L_SFT, 1, 1), | 1396 | RT5677_M_DAC1_R_STO_L_SFT, 1, 1), |
1397 | }; | 1397 | }; |
1398 | 1398 | ||
1399 | static const struct snd_kcontrol_new rt5677_sto1_dac_r_mix[] = { | 1399 | static const struct snd_kcontrol_new rt5677_sto1_dac_r_mix[] = { |
1400 | SOC_DAPM_SINGLE("ST R Switch", RT5677_STO1_DAC_MIXER, | 1400 | SOC_DAPM_SINGLE_AUTODISABLE("ST R Switch", RT5677_STO1_DAC_MIXER, |
1401 | RT5677_M_ST_DAC1_R_SFT, 1, 1), | 1401 | RT5677_M_ST_DAC1_R_SFT, 1, 1), |
1402 | SOC_DAPM_SINGLE("DAC1 R Switch", RT5677_STO1_DAC_MIXER, | 1402 | SOC_DAPM_SINGLE_AUTODISABLE("DAC1 R Switch", RT5677_STO1_DAC_MIXER, |
1403 | RT5677_M_DAC1_R_STO_R_SFT, 1, 1), | 1403 | RT5677_M_DAC1_R_STO_R_SFT, 1, 1), |
1404 | SOC_DAPM_SINGLE("DAC2 R Switch", RT5677_STO1_DAC_MIXER, | 1404 | SOC_DAPM_SINGLE_AUTODISABLE("DAC2 R Switch", RT5677_STO1_DAC_MIXER, |
1405 | RT5677_M_DAC2_R_STO_R_SFT, 1, 1), | 1405 | RT5677_M_DAC2_R_STO_R_SFT, 1, 1), |
1406 | SOC_DAPM_SINGLE("DAC1 L Switch", RT5677_STO1_DAC_MIXER, | 1406 | SOC_DAPM_SINGLE_AUTODISABLE("DAC1 L Switch", RT5677_STO1_DAC_MIXER, |
1407 | RT5677_M_DAC1_L_STO_R_SFT, 1, 1), | 1407 | RT5677_M_DAC1_L_STO_R_SFT, 1, 1), |
1408 | }; | 1408 | }; |
1409 | 1409 | ||
1410 | static const struct snd_kcontrol_new rt5677_mono_dac_l_mix[] = { | 1410 | static const struct snd_kcontrol_new rt5677_mono_dac_l_mix[] = { |
1411 | SOC_DAPM_SINGLE("ST L Switch", RT5677_MONO_DAC_MIXER, | 1411 | SOC_DAPM_SINGLE_AUTODISABLE("ST L Switch", RT5677_MONO_DAC_MIXER, |
1412 | RT5677_M_ST_DAC2_L_SFT, 1, 1), | 1412 | RT5677_M_ST_DAC2_L_SFT, 1, 1), |
1413 | SOC_DAPM_SINGLE("DAC1 L Switch", RT5677_MONO_DAC_MIXER, | 1413 | SOC_DAPM_SINGLE_AUTODISABLE("DAC1 L Switch", RT5677_MONO_DAC_MIXER, |
1414 | RT5677_M_DAC1_L_MONO_L_SFT, 1, 1), | 1414 | RT5677_M_DAC1_L_MONO_L_SFT, 1, 1), |
1415 | SOC_DAPM_SINGLE("DAC2 L Switch", RT5677_MONO_DAC_MIXER, | 1415 | SOC_DAPM_SINGLE_AUTODISABLE("DAC2 L Switch", RT5677_MONO_DAC_MIXER, |
1416 | RT5677_M_DAC2_L_MONO_L_SFT, 1, 1), | 1416 | RT5677_M_DAC2_L_MONO_L_SFT, 1, 1), |
1417 | SOC_DAPM_SINGLE("DAC2 R Switch", RT5677_MONO_DAC_MIXER, | 1417 | SOC_DAPM_SINGLE_AUTODISABLE("DAC2 R Switch", RT5677_MONO_DAC_MIXER, |
1418 | RT5677_M_DAC2_R_MONO_L_SFT, 1, 1), | 1418 | RT5677_M_DAC2_R_MONO_L_SFT, 1, 1), |
1419 | }; | 1419 | }; |
1420 | 1420 | ||
1421 | static const struct snd_kcontrol_new rt5677_mono_dac_r_mix[] = { | 1421 | static const struct snd_kcontrol_new rt5677_mono_dac_r_mix[] = { |
1422 | SOC_DAPM_SINGLE("ST R Switch", RT5677_MONO_DAC_MIXER, | 1422 | SOC_DAPM_SINGLE_AUTODISABLE("ST R Switch", RT5677_MONO_DAC_MIXER, |
1423 | RT5677_M_ST_DAC2_R_SFT, 1, 1), | 1423 | RT5677_M_ST_DAC2_R_SFT, 1, 1), |
1424 | SOC_DAPM_SINGLE("DAC1 R Switch", RT5677_MONO_DAC_MIXER, | 1424 | SOC_DAPM_SINGLE_AUTODISABLE("DAC1 R Switch", RT5677_MONO_DAC_MIXER, |
1425 | RT5677_M_DAC1_R_MONO_R_SFT, 1, 1), | 1425 | RT5677_M_DAC1_R_MONO_R_SFT, 1, 1), |
1426 | SOC_DAPM_SINGLE("DAC2 R Switch", RT5677_MONO_DAC_MIXER, | 1426 | SOC_DAPM_SINGLE_AUTODISABLE("DAC2 R Switch", RT5677_MONO_DAC_MIXER, |
1427 | RT5677_M_DAC2_R_MONO_R_SFT, 1, 1), | 1427 | RT5677_M_DAC2_R_MONO_R_SFT, 1, 1), |
1428 | SOC_DAPM_SINGLE("DAC2 L Switch", RT5677_MONO_DAC_MIXER, | 1428 | SOC_DAPM_SINGLE_AUTODISABLE("DAC2 L Switch", RT5677_MONO_DAC_MIXER, |
1429 | RT5677_M_DAC2_L_MONO_R_SFT, 1, 1), | 1429 | RT5677_M_DAC2_L_MONO_R_SFT, 1, 1), |
1430 | }; | 1430 | }; |
1431 | 1431 | ||
1432 | static const struct snd_kcontrol_new rt5677_dd1_l_mix[] = { | 1432 | static const struct snd_kcontrol_new rt5677_dd1_l_mix[] = { |
1433 | SOC_DAPM_SINGLE("Sto DAC Mix L Switch", RT5677_DD1_MIXER, | 1433 | SOC_DAPM_SINGLE_AUTODISABLE("Sto DAC Mix L Switch", RT5677_DD1_MIXER, |
1434 | RT5677_M_STO_L_DD1_L_SFT, 1, 1), | 1434 | RT5677_M_STO_L_DD1_L_SFT, 1, 1), |
1435 | SOC_DAPM_SINGLE("Mono DAC Mix L Switch", RT5677_DD1_MIXER, | 1435 | SOC_DAPM_SINGLE_AUTODISABLE("Mono DAC Mix L Switch", RT5677_DD1_MIXER, |
1436 | RT5677_M_MONO_L_DD1_L_SFT, 1, 1), | 1436 | RT5677_M_MONO_L_DD1_L_SFT, 1, 1), |
1437 | SOC_DAPM_SINGLE("DAC3 L Switch", RT5677_DD1_MIXER, | 1437 | SOC_DAPM_SINGLE_AUTODISABLE("DAC3 L Switch", RT5677_DD1_MIXER, |
1438 | RT5677_M_DAC3_L_DD1_L_SFT, 1, 1), | 1438 | RT5677_M_DAC3_L_DD1_L_SFT, 1, 1), |
1439 | SOC_DAPM_SINGLE("DAC3 R Switch", RT5677_DD1_MIXER, | 1439 | SOC_DAPM_SINGLE_AUTODISABLE("DAC3 R Switch", RT5677_DD1_MIXER, |
1440 | RT5677_M_DAC3_R_DD1_L_SFT, 1, 1), | 1440 | RT5677_M_DAC3_R_DD1_L_SFT, 1, 1), |
1441 | }; | 1441 | }; |
1442 | 1442 | ||
1443 | static const struct snd_kcontrol_new rt5677_dd1_r_mix[] = { | 1443 | static const struct snd_kcontrol_new rt5677_dd1_r_mix[] = { |
1444 | SOC_DAPM_SINGLE("Sto DAC Mix R Switch", RT5677_DD1_MIXER, | 1444 | SOC_DAPM_SINGLE_AUTODISABLE("Sto DAC Mix R Switch", RT5677_DD1_MIXER, |
1445 | RT5677_M_STO_R_DD1_R_SFT, 1, 1), | 1445 | RT5677_M_STO_R_DD1_R_SFT, 1, 1), |
1446 | SOC_DAPM_SINGLE("Mono DAC Mix R Switch", RT5677_DD1_MIXER, | 1446 | SOC_DAPM_SINGLE_AUTODISABLE("Mono DAC Mix R Switch", RT5677_DD1_MIXER, |
1447 | RT5677_M_MONO_R_DD1_R_SFT, 1, 1), | 1447 | RT5677_M_MONO_R_DD1_R_SFT, 1, 1), |
1448 | SOC_DAPM_SINGLE("DAC3 R Switch", RT5677_DD1_MIXER, | 1448 | SOC_DAPM_SINGLE_AUTODISABLE("DAC3 R Switch", RT5677_DD1_MIXER, |
1449 | RT5677_M_DAC3_R_DD1_R_SFT, 1, 1), | 1449 | RT5677_M_DAC3_R_DD1_R_SFT, 1, 1), |
1450 | SOC_DAPM_SINGLE("DAC3 L Switch", RT5677_DD1_MIXER, | 1450 | SOC_DAPM_SINGLE_AUTODISABLE("DAC3 L Switch", RT5677_DD1_MIXER, |
1451 | RT5677_M_DAC3_L_DD1_R_SFT, 1, 1), | 1451 | RT5677_M_DAC3_L_DD1_R_SFT, 1, 1), |
1452 | }; | 1452 | }; |
1453 | 1453 | ||
1454 | static const struct snd_kcontrol_new rt5677_dd2_l_mix[] = { | 1454 | static const struct snd_kcontrol_new rt5677_dd2_l_mix[] = { |
1455 | SOC_DAPM_SINGLE("Sto DAC Mix L Switch", RT5677_DD2_MIXER, | 1455 | SOC_DAPM_SINGLE_AUTODISABLE("Sto DAC Mix L Switch", RT5677_DD2_MIXER, |
1456 | RT5677_M_STO_L_DD2_L_SFT, 1, 1), | 1456 | RT5677_M_STO_L_DD2_L_SFT, 1, 1), |
1457 | SOC_DAPM_SINGLE("Mono DAC Mix L Switch", RT5677_DD2_MIXER, | 1457 | SOC_DAPM_SINGLE_AUTODISABLE("Mono DAC Mix L Switch", RT5677_DD2_MIXER, |
1458 | RT5677_M_MONO_L_DD2_L_SFT, 1, 1), | 1458 | RT5677_M_MONO_L_DD2_L_SFT, 1, 1), |
1459 | SOC_DAPM_SINGLE("DAC4 L Switch", RT5677_DD2_MIXER, | 1459 | SOC_DAPM_SINGLE_AUTODISABLE("DAC4 L Switch", RT5677_DD2_MIXER, |
1460 | RT5677_M_DAC4_L_DD2_L_SFT, 1, 1), | 1460 | RT5677_M_DAC4_L_DD2_L_SFT, 1, 1), |
1461 | SOC_DAPM_SINGLE("DAC4 R Switch", RT5677_DD2_MIXER, | 1461 | SOC_DAPM_SINGLE_AUTODISABLE("DAC4 R Switch", RT5677_DD2_MIXER, |
1462 | RT5677_M_DAC4_R_DD2_L_SFT, 1, 1), | 1462 | RT5677_M_DAC4_R_DD2_L_SFT, 1, 1), |
1463 | }; | 1463 | }; |
1464 | 1464 | ||
1465 | static const struct snd_kcontrol_new rt5677_dd2_r_mix[] = { | 1465 | static const struct snd_kcontrol_new rt5677_dd2_r_mix[] = { |
1466 | SOC_DAPM_SINGLE("Sto DAC Mix R Switch", RT5677_DD2_MIXER, | 1466 | SOC_DAPM_SINGLE_AUTODISABLE("Sto DAC Mix R Switch", RT5677_DD2_MIXER, |
1467 | RT5677_M_STO_R_DD2_R_SFT, 1, 1), | 1467 | RT5677_M_STO_R_DD2_R_SFT, 1, 1), |
1468 | SOC_DAPM_SINGLE("Mono DAC Mix R Switch", RT5677_DD2_MIXER, | 1468 | SOC_DAPM_SINGLE_AUTODISABLE("Mono DAC Mix R Switch", RT5677_DD2_MIXER, |
1469 | RT5677_M_MONO_R_DD2_R_SFT, 1, 1), | 1469 | RT5677_M_MONO_R_DD2_R_SFT, 1, 1), |
1470 | SOC_DAPM_SINGLE("DAC4 R Switch", RT5677_DD2_MIXER, | 1470 | SOC_DAPM_SINGLE_AUTODISABLE("DAC4 R Switch", RT5677_DD2_MIXER, |
1471 | RT5677_M_DAC4_R_DD2_R_SFT, 1, 1), | 1471 | RT5677_M_DAC4_R_DD2_R_SFT, 1, 1), |
1472 | SOC_DAPM_SINGLE("DAC4 L Switch", RT5677_DD2_MIXER, | 1472 | SOC_DAPM_SINGLE_AUTODISABLE("DAC4 L Switch", RT5677_DD2_MIXER, |
1473 | RT5677_M_DAC4_L_DD2_R_SFT, 1, 1), | 1473 | RT5677_M_DAC4_L_DD2_R_SFT, 1, 1), |
1474 | }; | 1474 | }; |
1475 | 1475 | ||
@@ -2596,6 +2596,21 @@ static int rt5677_vref_event(struct snd_soc_dapm_widget *w, | |||
2596 | return 0; | 2596 | return 0; |
2597 | } | 2597 | } |
2598 | 2598 | ||
2599 | static int rt5677_filter_power_event(struct snd_soc_dapm_widget *w, | ||
2600 | struct snd_kcontrol *kcontrol, int event) | ||
2601 | { | ||
2602 | switch (event) { | ||
2603 | case SND_SOC_DAPM_POST_PMU: | ||
2604 | msleep(50); | ||
2605 | break; | ||
2606 | |||
2607 | default: | ||
2608 | return 0; | ||
2609 | } | ||
2610 | |||
2611 | return 0; | ||
2612 | } | ||
2613 | |||
2599 | static const struct snd_soc_dapm_widget rt5677_dapm_widgets[] = { | 2614 | static const struct snd_soc_dapm_widget rt5677_dapm_widgets[] = { |
2600 | SND_SOC_DAPM_SUPPLY("PLL1", RT5677_PWR_ANLG2, RT5677_PWR_PLL1_BIT, | 2615 | SND_SOC_DAPM_SUPPLY("PLL1", RT5677_PWR_ANLG2, RT5677_PWR_PLL1_BIT, |
2601 | 0, rt5677_set_pll1_event, SND_SOC_DAPM_PRE_PMU | | 2616 | 0, rt5677_set_pll1_event, SND_SOC_DAPM_PRE_PMU | |
@@ -3072,19 +3087,26 @@ static const struct snd_soc_dapm_widget rt5677_dapm_widgets[] = { | |||
3072 | 3087 | ||
3073 | /* DAC Mixer */ | 3088 | /* DAC Mixer */ |
3074 | SND_SOC_DAPM_SUPPLY("dac stereo1 filter", RT5677_PWR_DIG2, | 3089 | SND_SOC_DAPM_SUPPLY("dac stereo1 filter", RT5677_PWR_DIG2, |
3075 | RT5677_PWR_DAC_S1F_BIT, 0, NULL, 0), | 3090 | RT5677_PWR_DAC_S1F_BIT, 0, rt5677_filter_power_event, |
3091 | SND_SOC_DAPM_POST_PMU), | ||
3076 | SND_SOC_DAPM_SUPPLY("dac mono2 left filter", RT5677_PWR_DIG2, | 3092 | SND_SOC_DAPM_SUPPLY("dac mono2 left filter", RT5677_PWR_DIG2, |
3077 | RT5677_PWR_DAC_M2F_L_BIT, 0, NULL, 0), | 3093 | RT5677_PWR_DAC_M2F_L_BIT, 0, rt5677_filter_power_event, |
3094 | SND_SOC_DAPM_POST_PMU), | ||
3078 | SND_SOC_DAPM_SUPPLY("dac mono2 right filter", RT5677_PWR_DIG2, | 3095 | SND_SOC_DAPM_SUPPLY("dac mono2 right filter", RT5677_PWR_DIG2, |
3079 | RT5677_PWR_DAC_M2F_R_BIT, 0, NULL, 0), | 3096 | RT5677_PWR_DAC_M2F_R_BIT, 0, rt5677_filter_power_event, |
3097 | SND_SOC_DAPM_POST_PMU), | ||
3080 | SND_SOC_DAPM_SUPPLY("dac mono3 left filter", RT5677_PWR_DIG2, | 3098 | SND_SOC_DAPM_SUPPLY("dac mono3 left filter", RT5677_PWR_DIG2, |
3081 | RT5677_PWR_DAC_M3F_L_BIT, 0, NULL, 0), | 3099 | RT5677_PWR_DAC_M3F_L_BIT, 0, rt5677_filter_power_event, |
3100 | SND_SOC_DAPM_POST_PMU), | ||
3082 | SND_SOC_DAPM_SUPPLY("dac mono3 right filter", RT5677_PWR_DIG2, | 3101 | SND_SOC_DAPM_SUPPLY("dac mono3 right filter", RT5677_PWR_DIG2, |
3083 | RT5677_PWR_DAC_M3F_R_BIT, 0, NULL, 0), | 3102 | RT5677_PWR_DAC_M3F_R_BIT, 0, rt5677_filter_power_event, |
3103 | SND_SOC_DAPM_POST_PMU), | ||
3084 | SND_SOC_DAPM_SUPPLY("dac mono4 left filter", RT5677_PWR_DIG2, | 3104 | SND_SOC_DAPM_SUPPLY("dac mono4 left filter", RT5677_PWR_DIG2, |
3085 | RT5677_PWR_DAC_M4F_L_BIT, 0, NULL, 0), | 3105 | RT5677_PWR_DAC_M4F_L_BIT, 0, rt5677_filter_power_event, |
3106 | SND_SOC_DAPM_POST_PMU), | ||
3086 | SND_SOC_DAPM_SUPPLY("dac mono4 right filter", RT5677_PWR_DIG2, | 3107 | SND_SOC_DAPM_SUPPLY("dac mono4 right filter", RT5677_PWR_DIG2, |
3087 | RT5677_PWR_DAC_M4F_R_BIT, 0, NULL, 0), | 3108 | RT5677_PWR_DAC_M4F_R_BIT, 0, rt5677_filter_power_event, |
3109 | SND_SOC_DAPM_POST_PMU), | ||
3088 | 3110 | ||
3089 | SND_SOC_DAPM_MIXER("Stereo DAC MIXL", SND_SOC_NOPM, 0, 0, | 3111 | SND_SOC_DAPM_MIXER("Stereo DAC MIXL", SND_SOC_NOPM, 0, 0, |
3090 | rt5677_sto1_dac_l_mix, ARRAY_SIZE(rt5677_sto1_dac_l_mix)), | 3112 | rt5677_sto1_dac_l_mix, ARRAY_SIZE(rt5677_sto1_dac_l_mix)), |
diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c index 056375339ea3..5380798883b5 100644 --- a/sound/soc/codecs/wm8960.c +++ b/sound/soc/codecs/wm8960.c | |||
@@ -229,7 +229,7 @@ SOC_DOUBLE_R_TLV("Capture Volume", WM8960_LINVOL, WM8960_RINVOL, | |||
229 | SOC_DOUBLE_R("Capture Volume ZC Switch", WM8960_LINVOL, WM8960_RINVOL, | 229 | SOC_DOUBLE_R("Capture Volume ZC Switch", WM8960_LINVOL, WM8960_RINVOL, |
230 | 6, 1, 0), | 230 | 6, 1, 0), |
231 | SOC_DOUBLE_R("Capture Switch", WM8960_LINVOL, WM8960_RINVOL, | 231 | SOC_DOUBLE_R("Capture Switch", WM8960_LINVOL, WM8960_RINVOL, |
232 | 7, 1, 0), | 232 | 7, 1, 1), |
233 | 233 | ||
234 | SOC_SINGLE_TLV("Right Input Boost Mixer RINPUT3 Volume", | 234 | SOC_SINGLE_TLV("Right Input Boost Mixer RINPUT3 Volume", |
235 | WM8960_INBMIX1, 4, 7, 0, lineinboost_tlv), | 235 | WM8960_INBMIX1, 4, 7, 0, lineinboost_tlv), |
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c index 39ebd7bf4f53..a7e79784fc16 100644 --- a/sound/soc/codecs/wm8962.c +++ b/sound/soc/codecs/wm8962.c | |||
@@ -365,8 +365,8 @@ static const struct reg_default wm8962_reg[] = { | |||
365 | { 16924, 0x0059 }, /* R16924 - HDBASS_PG_1 */ | 365 | { 16924, 0x0059 }, /* R16924 - HDBASS_PG_1 */ |
366 | { 16925, 0x999A }, /* R16925 - HDBASS_PG_0 */ | 366 | { 16925, 0x999A }, /* R16925 - HDBASS_PG_0 */ |
367 | 367 | ||
368 | { 17048, 0x0083 }, /* R17408 - HPF_C_1 */ | 368 | { 17408, 0x0083 }, /* R17408 - HPF_C_1 */ |
369 | { 17049, 0x98AD }, /* R17409 - HPF_C_0 */ | 369 | { 17409, 0x98AD }, /* R17409 - HPF_C_0 */ |
370 | 370 | ||
371 | { 17920, 0x007F }, /* R17920 - ADCL_RETUNE_C1_1 */ | 371 | { 17920, 0x007F }, /* R17920 - ADCL_RETUNE_C1_1 */ |
372 | { 17921, 0xFFFF }, /* R17921 - ADCL_RETUNE_C1_0 */ | 372 | { 17921, 0xFFFF }, /* R17921 - ADCL_RETUNE_C1_0 */ |
diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c index 4495a40a9468..c1c9c2e3525b 100644 --- a/sound/soc/davinci/davinci-mcasp.c +++ b/sound/soc/davinci/davinci-mcasp.c | |||
@@ -681,8 +681,8 @@ static int davinci_mcasp_set_tdm_slot(struct snd_soc_dai *dai, | |||
681 | } | 681 | } |
682 | 682 | ||
683 | mcasp->tdm_slots = slots; | 683 | mcasp->tdm_slots = slots; |
684 | mcasp->tdm_mask[SNDRV_PCM_STREAM_PLAYBACK] = rx_mask; | 684 | mcasp->tdm_mask[SNDRV_PCM_STREAM_PLAYBACK] = tx_mask; |
685 | mcasp->tdm_mask[SNDRV_PCM_STREAM_CAPTURE] = tx_mask; | 685 | mcasp->tdm_mask[SNDRV_PCM_STREAM_CAPTURE] = rx_mask; |
686 | mcasp->slot_width = slot_width; | 686 | mcasp->slot_width = slot_width; |
687 | 687 | ||
688 | return davinci_mcasp_set_ch_constraints(mcasp); | 688 | return davinci_mcasp_set_ch_constraints(mcasp); |
@@ -908,6 +908,14 @@ static int mcasp_i2s_hw_param(struct davinci_mcasp *mcasp, int stream, | |||
908 | mcasp_set_bits(mcasp, DAVINCI_MCASP_RXFMT_REG, busel | RXORD); | 908 | mcasp_set_bits(mcasp, DAVINCI_MCASP_RXFMT_REG, busel | RXORD); |
909 | mcasp_mod_bits(mcasp, DAVINCI_MCASP_RXFMCTL_REG, | 909 | mcasp_mod_bits(mcasp, DAVINCI_MCASP_RXFMCTL_REG, |
910 | FSRMOD(total_slots), FSRMOD(0x1FF)); | 910 | FSRMOD(total_slots), FSRMOD(0x1FF)); |
911 | /* | ||
912 | * If McASP is set to be TX/RX synchronous and the playback is | ||
913 | * not running already we need to configure the TX slots in | ||
914 | * order to have correct FSX on the bus | ||
915 | */ | ||
916 | if (mcasp_is_synchronous(mcasp) && !mcasp->channels) | ||
917 | mcasp_mod_bits(mcasp, DAVINCI_MCASP_TXFMCTL_REG, | ||
918 | FSXMOD(total_slots), FSXMOD(0x1FF)); | ||
911 | } | 919 | } |
912 | 920 | ||
913 | return 0; | 921 | return 0; |
diff --git a/sound/soc/fsl/Kconfig b/sound/soc/fsl/Kconfig index 19c302b0d763..14dfdee05fd5 100644 --- a/sound/soc/fsl/Kconfig +++ b/sound/soc/fsl/Kconfig | |||
@@ -283,6 +283,8 @@ config SND_SOC_IMX_MC13783 | |||
283 | config SND_SOC_FSL_ASOC_CARD | 283 | config SND_SOC_FSL_ASOC_CARD |
284 | tristate "Generic ASoC Sound Card with ASRC support" | 284 | tristate "Generic ASoC Sound Card with ASRC support" |
285 | depends on OF && I2C | 285 | depends on OF && I2C |
286 | # enforce SND_SOC_FSL_ASOC_CARD=m if SND_AC97_CODEC=m: | ||
287 | depends on SND_AC97_CODEC || SND_AC97_CODEC=n | ||
286 | select SND_SOC_IMX_AUDMUX | 288 | select SND_SOC_IMX_AUDMUX |
287 | select SND_SOC_IMX_PCM_DMA | 289 | select SND_SOC_IMX_PCM_DMA |
288 | select SND_SOC_FSL_ESAI | 290 | select SND_SOC_FSL_ESAI |
diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c index a4435f5e3be9..ffd5f9acc849 100644 --- a/sound/soc/fsl/fsl_sai.c +++ b/sound/soc/fsl/fsl_sai.c | |||
@@ -454,7 +454,8 @@ static int fsl_sai_trigger(struct snd_pcm_substream *substream, int cmd, | |||
454 | * Rx sync with Tx clocks: Clear SYNC for Tx, set it for Rx. | 454 | * Rx sync with Tx clocks: Clear SYNC for Tx, set it for Rx. |
455 | * Tx sync with Rx clocks: Clear SYNC for Rx, set it for Tx. | 455 | * Tx sync with Rx clocks: Clear SYNC for Rx, set it for Tx. |
456 | */ | 456 | */ |
457 | regmap_update_bits(sai->regmap, FSL_SAI_TCR2, FSL_SAI_CR2_SYNC, 0); | 457 | regmap_update_bits(sai->regmap, FSL_SAI_TCR2, FSL_SAI_CR2_SYNC, |
458 | sai->synchronous[TX] ? FSL_SAI_CR2_SYNC : 0); | ||
458 | regmap_update_bits(sai->regmap, FSL_SAI_RCR2, FSL_SAI_CR2_SYNC, | 459 | regmap_update_bits(sai->regmap, FSL_SAI_RCR2, FSL_SAI_CR2_SYNC, |
459 | sai->synchronous[RX] ? FSL_SAI_CR2_SYNC : 0); | 460 | sai->synchronous[RX] ? FSL_SAI_CR2_SYNC : 0); |
460 | 461 | ||
diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig index 7b778ab85f8b..d430ef5a4f38 100644 --- a/sound/soc/intel/Kconfig +++ b/sound/soc/intel/Kconfig | |||
@@ -144,7 +144,7 @@ config SND_SOC_INTEL_SKYLAKE | |||
144 | 144 | ||
145 | config SND_SOC_INTEL_SKL_RT286_MACH | 145 | config SND_SOC_INTEL_SKL_RT286_MACH |
146 | tristate "ASoC Audio driver for SKL with RT286 I2S mode" | 146 | tristate "ASoC Audio driver for SKL with RT286 I2S mode" |
147 | depends on X86 && ACPI | 147 | depends on X86 && ACPI && I2C |
148 | select SND_SOC_INTEL_SST | 148 | select SND_SOC_INTEL_SST |
149 | select SND_SOC_INTEL_SKYLAKE | 149 | select SND_SOC_INTEL_SKYLAKE |
150 | select SND_SOC_RT286 | 150 | select SND_SOC_RT286 |
diff --git a/sound/soc/intel/skylake/skl-topology.c b/sound/soc/intel/skylake/skl-topology.c index a7854c8fc523..ffea427aeca8 100644 --- a/sound/soc/intel/skylake/skl-topology.c +++ b/sound/soc/intel/skylake/skl-topology.c | |||
@@ -1240,6 +1240,7 @@ int skl_tplg_init(struct snd_soc_platform *platform, struct hdac_ext_bus *ebus) | |||
1240 | */ | 1240 | */ |
1241 | ret = snd_soc_tplg_component_load(&platform->component, | 1241 | ret = snd_soc_tplg_component_load(&platform->component, |
1242 | &skl_tplg_ops, fw, 0); | 1242 | &skl_tplg_ops, fw, 0); |
1243 | release_firmware(fw); | ||
1243 | if (ret < 0) { | 1244 | if (ret < 0) { |
1244 | dev_err(bus->dev, "tplg component load failed%d\n", ret); | 1245 | dev_err(bus->dev, "tplg component load failed%d\n", ret); |
1245 | return -EINVAL; | 1246 | return -EINVAL; |
diff --git a/sound/soc/rockchip/rockchip_spdif.c b/sound/soc/rockchip/rockchip_spdif.c index a38a3029062c..ac72ff5055bb 100644 --- a/sound/soc/rockchip/rockchip_spdif.c +++ b/sound/soc/rockchip/rockchip_spdif.c | |||
@@ -280,7 +280,7 @@ static int rk_spdif_probe(struct platform_device *pdev) | |||
280 | int ret; | 280 | int ret; |
281 | 281 | ||
282 | match = of_match_node(rk_spdif_match, np); | 282 | match = of_match_node(rk_spdif_match, np); |
283 | if ((int) match->data == RK_SPDIF_RK3288) { | 283 | if (match->data == (void *)RK_SPDIF_RK3288) { |
284 | struct regmap *grf; | 284 | struct regmap *grf; |
285 | 285 | ||
286 | grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf"); | 286 | grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf"); |
diff --git a/sound/soc/rockchip/rockchip_spdif.h b/sound/soc/rockchip/rockchip_spdif.h index 07f86a21046a..921b4095fb92 100644 --- a/sound/soc/rockchip/rockchip_spdif.h +++ b/sound/soc/rockchip/rockchip_spdif.h | |||
@@ -28,9 +28,9 @@ | |||
28 | #define SPDIF_CFGR_VDW(x) (x << SPDIF_CFGR_VDW_SHIFT) | 28 | #define SPDIF_CFGR_VDW(x) (x << SPDIF_CFGR_VDW_SHIFT) |
29 | #define SDPIF_CFGR_VDW_MASK (0xf << SPDIF_CFGR_VDW_SHIFT) | 29 | #define SDPIF_CFGR_VDW_MASK (0xf << SPDIF_CFGR_VDW_SHIFT) |
30 | 30 | ||
31 | #define SPDIF_CFGR_VDW_16 SPDIF_CFGR_VDW(0x00) | 31 | #define SPDIF_CFGR_VDW_16 SPDIF_CFGR_VDW(0x0) |
32 | #define SPDIF_CFGR_VDW_20 SPDIF_CFGR_VDW(0x01) | 32 | #define SPDIF_CFGR_VDW_20 SPDIF_CFGR_VDW(0x1) |
33 | #define SPDIF_CFGR_VDW_24 SPDIF_CFGR_VDW(0x10) | 33 | #define SPDIF_CFGR_VDW_24 SPDIF_CFGR_VDW(0x2) |
34 | 34 | ||
35 | /* | 35 | /* |
36 | * DMACR | 36 | * DMACR |
diff --git a/sound/soc/sh/rcar/gen.c b/sound/soc/sh/rcar/gen.c index 76da7620904c..edcf4cc2e84f 100644 --- a/sound/soc/sh/rcar/gen.c +++ b/sound/soc/sh/rcar/gen.c | |||
@@ -235,7 +235,7 @@ static int rsnd_gen2_probe(struct platform_device *pdev, | |||
235 | RSND_GEN_S_REG(SCU_SYS_STATUS0, 0x1c8), | 235 | RSND_GEN_S_REG(SCU_SYS_STATUS0, 0x1c8), |
236 | RSND_GEN_S_REG(SCU_SYS_INT_EN0, 0x1cc), | 236 | RSND_GEN_S_REG(SCU_SYS_INT_EN0, 0x1cc), |
237 | RSND_GEN_S_REG(SCU_SYS_STATUS1, 0x1d0), | 237 | RSND_GEN_S_REG(SCU_SYS_STATUS1, 0x1d0), |
238 | RSND_GEN_S_REG(SCU_SYS_INT_EN1, 0x1c4), | 238 | RSND_GEN_S_REG(SCU_SYS_INT_EN1, 0x1d4), |
239 | RSND_GEN_M_REG(SRC_SWRSR, 0x200, 0x40), | 239 | RSND_GEN_M_REG(SRC_SWRSR, 0x200, 0x40), |
240 | RSND_GEN_M_REG(SRC_SRCIR, 0x204, 0x40), | 240 | RSND_GEN_M_REG(SRC_SRCIR, 0x204, 0x40), |
241 | RSND_GEN_M_REG(SRC_ADINR, 0x214, 0x40), | 241 | RSND_GEN_M_REG(SRC_ADINR, 0x214, 0x40), |
diff --git a/sound/soc/sh/rcar/src.c b/sound/soc/sh/rcar/src.c index 261b50217c48..68b439ed22d7 100644 --- a/sound/soc/sh/rcar/src.c +++ b/sound/soc/sh/rcar/src.c | |||
@@ -923,6 +923,7 @@ static int rsnd_src_pcm_new_gen2(struct rsnd_mod *mod, | |||
923 | struct snd_soc_pcm_runtime *rtd) | 923 | struct snd_soc_pcm_runtime *rtd) |
924 | { | 924 | { |
925 | struct rsnd_dai *rdai = rsnd_io_to_rdai(io); | 925 | struct rsnd_dai *rdai = rsnd_io_to_rdai(io); |
926 | struct rsnd_mod *dvc = rsnd_io_to_mod_dvc(io); | ||
926 | struct rsnd_src *src = rsnd_mod_to_src(mod); | 927 | struct rsnd_src *src = rsnd_mod_to_src(mod); |
927 | int ret; | 928 | int ret; |
928 | 929 | ||
@@ -937,6 +938,12 @@ static int rsnd_src_pcm_new_gen2(struct rsnd_mod *mod, | |||
937 | return 0; | 938 | return 0; |
938 | 939 | ||
939 | /* | 940 | /* |
941 | * SRC In doesn't work if DVC was enabled | ||
942 | */ | ||
943 | if (dvc && !rsnd_io_is_play(io)) | ||
944 | return 0; | ||
945 | |||
946 | /* | ||
940 | * enable sync convert | 947 | * enable sync convert |
941 | */ | 948 | */ |
942 | ret = rsnd_kctrl_new_s(mod, io, rtd, | 949 | ret = rsnd_kctrl_new_s(mod, io, rtd, |
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index 24b096066a07..a1305f827a98 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c | |||
@@ -795,12 +795,12 @@ static void soc_resume_deferred(struct work_struct *work) | |||
795 | 795 | ||
796 | dev_dbg(card->dev, "ASoC: resume work completed\n"); | 796 | dev_dbg(card->dev, "ASoC: resume work completed\n"); |
797 | 797 | ||
798 | /* userspace can access us now we are back as we were before */ | ||
799 | snd_power_change_state(card->snd_card, SNDRV_CTL_POWER_D0); | ||
800 | |||
801 | /* Recheck all endpoints too, their state is affected by suspend */ | 798 | /* Recheck all endpoints too, their state is affected by suspend */ |
802 | dapm_mark_endpoints_dirty(card); | 799 | dapm_mark_endpoints_dirty(card); |
803 | snd_soc_dapm_sync(&card->dapm); | 800 | snd_soc_dapm_sync(&card->dapm); |
801 | |||
802 | /* userspace can access us now we are back as we were before */ | ||
803 | snd_power_change_state(card->snd_card, SNDRV_CTL_POWER_D0); | ||
804 | } | 804 | } |
805 | 805 | ||
806 | /* powers up audio subsystem after a suspend */ | 806 | /* powers up audio subsystem after a suspend */ |
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index 016eba10b1ec..7d009428934a 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c | |||
@@ -2293,6 +2293,12 @@ void snd_soc_dapm_free_widget(struct snd_soc_dapm_widget *w) | |||
2293 | kfree(w); | 2293 | kfree(w); |
2294 | } | 2294 | } |
2295 | 2295 | ||
2296 | void snd_soc_dapm_reset_cache(struct snd_soc_dapm_context *dapm) | ||
2297 | { | ||
2298 | dapm->path_sink_cache.widget = NULL; | ||
2299 | dapm->path_source_cache.widget = NULL; | ||
2300 | } | ||
2301 | |||
2296 | /* free all dapm widgets and resources */ | 2302 | /* free all dapm widgets and resources */ |
2297 | static void dapm_free_widgets(struct snd_soc_dapm_context *dapm) | 2303 | static void dapm_free_widgets(struct snd_soc_dapm_context *dapm) |
2298 | { | 2304 | { |
@@ -2303,6 +2309,7 @@ static void dapm_free_widgets(struct snd_soc_dapm_context *dapm) | |||
2303 | continue; | 2309 | continue; |
2304 | snd_soc_dapm_free_widget(w); | 2310 | snd_soc_dapm_free_widget(w); |
2305 | } | 2311 | } |
2312 | snd_soc_dapm_reset_cache(dapm); | ||
2306 | } | 2313 | } |
2307 | 2314 | ||
2308 | static struct snd_soc_dapm_widget *dapm_find_widget( | 2315 | static struct snd_soc_dapm_widget *dapm_find_widget( |
diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c index ecd38e52285a..2f67ba6d7a8f 100644 --- a/sound/soc/soc-ops.c +++ b/sound/soc/soc-ops.c | |||
@@ -404,7 +404,7 @@ EXPORT_SYMBOL_GPL(snd_soc_get_volsw_sx); | |||
404 | /** | 404 | /** |
405 | * snd_soc_put_volsw_sx - double mixer set callback | 405 | * snd_soc_put_volsw_sx - double mixer set callback |
406 | * @kcontrol: mixer control | 406 | * @kcontrol: mixer control |
407 | * @uinfo: control element information | 407 | * @ucontrol: control element information |
408 | * | 408 | * |
409 | * Callback to set the value of a double mixer control that spans 2 registers. | 409 | * Callback to set the value of a double mixer control that spans 2 registers. |
410 | * | 410 | * |
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c index 8d7ec80af51b..6963ba20991c 100644 --- a/sound/soc/soc-topology.c +++ b/sound/soc/soc-topology.c | |||
@@ -531,7 +531,7 @@ static int soc_tplg_kcontrol_bind_io(struct snd_soc_tplg_ctl_hdr *hdr, | |||
531 | /* TLV bytes controls need standard kcontrol info handler, | 531 | /* TLV bytes controls need standard kcontrol info handler, |
532 | * TLV callback and extended put/get handlers. | 532 | * TLV callback and extended put/get handlers. |
533 | */ | 533 | */ |
534 | k->info = snd_soc_bytes_info; | 534 | k->info = snd_soc_bytes_info_ext; |
535 | k->tlv.c = snd_soc_bytes_tlv_callback; | 535 | k->tlv.c = snd_soc_bytes_tlv_callback; |
536 | 536 | ||
537 | ext_ops = tplg->bytes_ext_ops; | 537 | ext_ops = tplg->bytes_ext_ops; |
@@ -1805,6 +1805,7 @@ void snd_soc_tplg_widget_remove_all(struct snd_soc_dapm_context *dapm, | |||
1805 | snd_soc_tplg_widget_remove(w); | 1805 | snd_soc_tplg_widget_remove(w); |
1806 | snd_soc_dapm_free_widget(w); | 1806 | snd_soc_dapm_free_widget(w); |
1807 | } | 1807 | } |
1808 | snd_soc_dapm_reset_cache(dapm); | ||
1808 | } | 1809 | } |
1809 | EXPORT_SYMBOL_GPL(snd_soc_tplg_widget_remove_all); | 1810 | EXPORT_SYMBOL_GPL(snd_soc_tplg_widget_remove_all); |
1810 | 1811 | ||
diff --git a/sound/soc/sti/uniperif_player.c b/sound/soc/sti/uniperif_player.c index 843f037a317d..5c2bc53f0a9b 100644 --- a/sound/soc/sti/uniperif_player.c +++ b/sound/soc/sti/uniperif_player.c | |||
@@ -669,6 +669,7 @@ static int uni_player_startup(struct snd_pcm_substream *substream, | |||
669 | { | 669 | { |
670 | struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai); | 670 | struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai); |
671 | struct uniperif *player = priv->dai_data.uni; | 671 | struct uniperif *player = priv->dai_data.uni; |
672 | player->substream = substream; | ||
672 | 673 | ||
673 | player->clk_adj = 0; | 674 | player->clk_adj = 0; |
674 | 675 | ||
@@ -950,6 +951,8 @@ static void uni_player_shutdown(struct snd_pcm_substream *substream, | |||
950 | if (player->state != UNIPERIF_STATE_STOPPED) | 951 | if (player->state != UNIPERIF_STATE_STOPPED) |
951 | /* Stop the player */ | 952 | /* Stop the player */ |
952 | uni_player_stop(player); | 953 | uni_player_stop(player); |
954 | |||
955 | player->substream = NULL; | ||
953 | } | 956 | } |
954 | 957 | ||
955 | static int uni_player_parse_dt_clk_glue(struct platform_device *pdev, | 958 | static int uni_player_parse_dt_clk_glue(struct platform_device *pdev, |
@@ -989,7 +992,7 @@ static int uni_player_parse_dt(struct platform_device *pdev, | |||
989 | if (!info) | 992 | if (!info) |
990 | return -ENOMEM; | 993 | return -ENOMEM; |
991 | 994 | ||
992 | if (of_property_read_u32(pnode, "version", &player->ver) || | 995 | if (of_property_read_u32(pnode, "st,version", &player->ver) || |
993 | player->ver == SND_ST_UNIPERIF_VERSION_UNKNOWN) { | 996 | player->ver == SND_ST_UNIPERIF_VERSION_UNKNOWN) { |
994 | dev_err(dev, "Unknown uniperipheral version "); | 997 | dev_err(dev, "Unknown uniperipheral version "); |
995 | return -EINVAL; | 998 | return -EINVAL; |
@@ -998,13 +1001,13 @@ static int uni_player_parse_dt(struct platform_device *pdev, | |||
998 | if (player->ver >= SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0) | 1001 | if (player->ver >= SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0) |
999 | info->underflow_enabled = 1; | 1002 | info->underflow_enabled = 1; |
1000 | 1003 | ||
1001 | if (of_property_read_u32(pnode, "uniperiph-id", &info->id)) { | 1004 | if (of_property_read_u32(pnode, "st,uniperiph-id", &info->id)) { |
1002 | dev_err(dev, "uniperipheral id not defined"); | 1005 | dev_err(dev, "uniperipheral id not defined"); |
1003 | return -EINVAL; | 1006 | return -EINVAL; |
1004 | } | 1007 | } |
1005 | 1008 | ||
1006 | /* Read the device mode property */ | 1009 | /* Read the device mode property */ |
1007 | if (of_property_read_string(pnode, "mode", &mode)) { | 1010 | if (of_property_read_string(pnode, "st,mode", &mode)) { |
1008 | dev_err(dev, "uniperipheral mode not defined"); | 1011 | dev_err(dev, "uniperipheral mode not defined"); |
1009 | return -EINVAL; | 1012 | return -EINVAL; |
1010 | } | 1013 | } |
diff --git a/sound/soc/sti/uniperif_reader.c b/sound/soc/sti/uniperif_reader.c index f791239a3087..8a0eb2050169 100644 --- a/sound/soc/sti/uniperif_reader.c +++ b/sound/soc/sti/uniperif_reader.c | |||
@@ -316,7 +316,7 @@ static int uni_reader_parse_dt(struct platform_device *pdev, | |||
316 | if (!info) | 316 | if (!info) |
317 | return -ENOMEM; | 317 | return -ENOMEM; |
318 | 318 | ||
319 | if (of_property_read_u32(node, "version", &reader->ver) || | 319 | if (of_property_read_u32(node, "st,version", &reader->ver) || |
320 | reader->ver == SND_ST_UNIPERIF_VERSION_UNKNOWN) { | 320 | reader->ver == SND_ST_UNIPERIF_VERSION_UNKNOWN) { |
321 | dev_err(&pdev->dev, "Unknown uniperipheral version "); | 321 | dev_err(&pdev->dev, "Unknown uniperipheral version "); |
322 | return -EINVAL; | 322 | return -EINVAL; |
@@ -346,7 +346,6 @@ int uni_reader_init(struct platform_device *pdev, | |||
346 | reader->hw = &uni_reader_pcm_hw; | 346 | reader->hw = &uni_reader_pcm_hw; |
347 | reader->dai_ops = &uni_reader_dai_ops; | 347 | reader->dai_ops = &uni_reader_dai_ops; |
348 | 348 | ||
349 | dev_err(reader->dev, "%s: enter\n", __func__); | ||
350 | ret = uni_reader_parse_dt(pdev, reader); | 349 | ret = uni_reader_parse_dt(pdev, reader); |
351 | if (ret < 0) { | 350 | if (ret < 0) { |
352 | dev_err(reader->dev, "Failed to parse DeviceTree"); | 351 | dev_err(reader->dev, "Failed to parse DeviceTree"); |
diff --git a/sound/soc/sunxi/sun4i-codec.c b/sound/soc/sunxi/sun4i-codec.c index bcbf4da168b6..1bb896d78d09 100644 --- a/sound/soc/sunxi/sun4i-codec.c +++ b/sound/soc/sunxi/sun4i-codec.c | |||
@@ -2,6 +2,7 @@ | |||
2 | * Copyright 2014 Emilio López <emilio@elopez.com.ar> | 2 | * Copyright 2014 Emilio López <emilio@elopez.com.ar> |
3 | * Copyright 2014 Jon Smirl <jonsmirl@gmail.com> | 3 | * Copyright 2014 Jon Smirl <jonsmirl@gmail.com> |
4 | * Copyright 2015 Maxime Ripard <maxime.ripard@free-electrons.com> | 4 | * Copyright 2015 Maxime Ripard <maxime.ripard@free-electrons.com> |
5 | * Copyright 2015 Adam Sampson <ats@offog.org> | ||
5 | * | 6 | * |
6 | * Based on the Allwinner SDK driver, released under the GPL. | 7 | * Based on the Allwinner SDK driver, released under the GPL. |
7 | * | 8 | * |
@@ -404,7 +405,7 @@ static const struct snd_kcontrol_new sun4i_codec_pa_mute = | |||
404 | static DECLARE_TLV_DB_SCALE(sun4i_codec_pa_volume_scale, -6300, 100, 1); | 405 | static DECLARE_TLV_DB_SCALE(sun4i_codec_pa_volume_scale, -6300, 100, 1); |
405 | 406 | ||
406 | static const struct snd_kcontrol_new sun4i_codec_widgets[] = { | 407 | static const struct snd_kcontrol_new sun4i_codec_widgets[] = { |
407 | SOC_SINGLE_TLV("PA Volume", SUN4I_CODEC_DAC_ACTL, | 408 | SOC_SINGLE_TLV("Power Amplifier Volume", SUN4I_CODEC_DAC_ACTL, |
408 | SUN4I_CODEC_DAC_ACTL_PA_VOL, 0x3F, 0, | 409 | SUN4I_CODEC_DAC_ACTL_PA_VOL, 0x3F, 0, |
409 | sun4i_codec_pa_volume_scale), | 410 | sun4i_codec_pa_volume_scale), |
410 | }; | 411 | }; |
@@ -452,12 +453,12 @@ static const struct snd_soc_dapm_widget sun4i_codec_dapm_widgets[] = { | |||
452 | SND_SOC_DAPM_SUPPLY("Mixer Enable", SUN4I_CODEC_DAC_ACTL, | 453 | SND_SOC_DAPM_SUPPLY("Mixer Enable", SUN4I_CODEC_DAC_ACTL, |
453 | SUN4I_CODEC_DAC_ACTL_MIXEN, 0, NULL, 0), | 454 | SUN4I_CODEC_DAC_ACTL_MIXEN, 0, NULL, 0), |
454 | 455 | ||
455 | /* Pre-Amplifier */ | 456 | /* Power Amplifier */ |
456 | SND_SOC_DAPM_MIXER("Pre-Amplifier", SUN4I_CODEC_ADC_ACTL, | 457 | SND_SOC_DAPM_MIXER("Power Amplifier", SUN4I_CODEC_ADC_ACTL, |
457 | SUN4I_CODEC_ADC_ACTL_PA_EN, 0, | 458 | SUN4I_CODEC_ADC_ACTL_PA_EN, 0, |
458 | sun4i_codec_pa_mixer_controls, | 459 | sun4i_codec_pa_mixer_controls, |
459 | ARRAY_SIZE(sun4i_codec_pa_mixer_controls)), | 460 | ARRAY_SIZE(sun4i_codec_pa_mixer_controls)), |
460 | SND_SOC_DAPM_SWITCH("Pre-Amplifier Mute", SND_SOC_NOPM, 0, 0, | 461 | SND_SOC_DAPM_SWITCH("Power Amplifier Mute", SND_SOC_NOPM, 0, 0, |
461 | &sun4i_codec_pa_mute), | 462 | &sun4i_codec_pa_mute), |
462 | 463 | ||
463 | SND_SOC_DAPM_OUTPUT("HP Right"), | 464 | SND_SOC_DAPM_OUTPUT("HP Right"), |
@@ -480,16 +481,16 @@ static const struct snd_soc_dapm_route sun4i_codec_dapm_routes[] = { | |||
480 | { "Left Mixer", NULL, "Mixer Enable" }, | 481 | { "Left Mixer", NULL, "Mixer Enable" }, |
481 | { "Left Mixer", "Left DAC Playback Switch", "Left DAC" }, | 482 | { "Left Mixer", "Left DAC Playback Switch", "Left DAC" }, |
482 | 483 | ||
483 | /* Pre-Amplifier Mixer Routes */ | 484 | /* Power Amplifier Routes */ |
484 | { "Pre-Amplifier", "Mixer Playback Switch", "Left Mixer" }, | 485 | { "Power Amplifier", "Mixer Playback Switch", "Left Mixer" }, |
485 | { "Pre-Amplifier", "Mixer Playback Switch", "Right Mixer" }, | 486 | { "Power Amplifier", "Mixer Playback Switch", "Right Mixer" }, |
486 | { "Pre-Amplifier", "DAC Playback Switch", "Left DAC" }, | 487 | { "Power Amplifier", "DAC Playback Switch", "Left DAC" }, |
487 | { "Pre-Amplifier", "DAC Playback Switch", "Right DAC" }, | 488 | { "Power Amplifier", "DAC Playback Switch", "Right DAC" }, |
488 | 489 | ||
489 | /* PA -> HP path */ | 490 | /* Headphone Output Routes */ |
490 | { "Pre-Amplifier Mute", "Switch", "Pre-Amplifier" }, | 491 | { "Power Amplifier Mute", "Switch", "Power Amplifier" }, |
491 | { "HP Right", NULL, "Pre-Amplifier Mute" }, | 492 | { "HP Right", NULL, "Power Amplifier Mute" }, |
492 | { "HP Left", NULL, "Pre-Amplifier Mute" }, | 493 | { "HP Left", NULL, "Power Amplifier Mute" }, |
493 | }; | 494 | }; |
494 | 495 | ||
495 | static struct snd_soc_codec_driver sun4i_codec_codec = { | 496 | static struct snd_soc_codec_driver sun4i_codec_codec = { |
diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c index 40ab4476c80a..51cf8256c6cd 100644 --- a/tools/testing/nvdimm/test/nfit.c +++ b/tools/testing/nvdimm/test/nfit.c | |||
@@ -420,8 +420,7 @@ static struct nfit_test_resource *nfit_test_lookup(resource_size_t addr) | |||
420 | 420 | ||
421 | static int nfit_test0_alloc(struct nfit_test *t) | 421 | static int nfit_test0_alloc(struct nfit_test *t) |
422 | { | 422 | { |
423 | size_t nfit_size = sizeof(struct acpi_table_nfit) | 423 | size_t nfit_size = sizeof(struct acpi_nfit_system_address) * NUM_SPA |
424 | + sizeof(struct acpi_nfit_system_address) * NUM_SPA | ||
425 | + sizeof(struct acpi_nfit_memory_map) * NUM_MEM | 424 | + sizeof(struct acpi_nfit_memory_map) * NUM_MEM |
426 | + sizeof(struct acpi_nfit_control_region) * NUM_DCR | 425 | + sizeof(struct acpi_nfit_control_region) * NUM_DCR |
427 | + sizeof(struct acpi_nfit_data_region) * NUM_BDW | 426 | + sizeof(struct acpi_nfit_data_region) * NUM_BDW |
@@ -471,8 +470,7 @@ static int nfit_test0_alloc(struct nfit_test *t) | |||
471 | 470 | ||
472 | static int nfit_test1_alloc(struct nfit_test *t) | 471 | static int nfit_test1_alloc(struct nfit_test *t) |
473 | { | 472 | { |
474 | size_t nfit_size = sizeof(struct acpi_table_nfit) | 473 | size_t nfit_size = sizeof(struct acpi_nfit_system_address) |
475 | + sizeof(struct acpi_nfit_system_address) | ||
476 | + sizeof(struct acpi_nfit_memory_map) | 474 | + sizeof(struct acpi_nfit_memory_map) |
477 | + sizeof(struct acpi_nfit_control_region); | 475 | + sizeof(struct acpi_nfit_control_region); |
478 | 476 | ||
@@ -488,39 +486,24 @@ static int nfit_test1_alloc(struct nfit_test *t) | |||
488 | return 0; | 486 | return 0; |
489 | } | 487 | } |
490 | 488 | ||
491 | static void nfit_test_init_header(struct acpi_table_nfit *nfit, size_t size) | ||
492 | { | ||
493 | memcpy(nfit->header.signature, ACPI_SIG_NFIT, 4); | ||
494 | nfit->header.length = size; | ||
495 | nfit->header.revision = 1; | ||
496 | memcpy(nfit->header.oem_id, "LIBND", 6); | ||
497 | memcpy(nfit->header.oem_table_id, "TEST", 5); | ||
498 | nfit->header.oem_revision = 1; | ||
499 | memcpy(nfit->header.asl_compiler_id, "TST", 4); | ||
500 | nfit->header.asl_compiler_revision = 1; | ||
501 | } | ||
502 | |||
503 | static void nfit_test0_setup(struct nfit_test *t) | 489 | static void nfit_test0_setup(struct nfit_test *t) |
504 | { | 490 | { |
505 | struct nvdimm_bus_descriptor *nd_desc; | 491 | struct nvdimm_bus_descriptor *nd_desc; |
506 | struct acpi_nfit_desc *acpi_desc; | 492 | struct acpi_nfit_desc *acpi_desc; |
507 | struct acpi_nfit_memory_map *memdev; | 493 | struct acpi_nfit_memory_map *memdev; |
508 | void *nfit_buf = t->nfit_buf; | 494 | void *nfit_buf = t->nfit_buf; |
509 | size_t size = t->nfit_size; | ||
510 | struct acpi_nfit_system_address *spa; | 495 | struct acpi_nfit_system_address *spa; |
511 | struct acpi_nfit_control_region *dcr; | 496 | struct acpi_nfit_control_region *dcr; |
512 | struct acpi_nfit_data_region *bdw; | 497 | struct acpi_nfit_data_region *bdw; |
513 | struct acpi_nfit_flush_address *flush; | 498 | struct acpi_nfit_flush_address *flush; |
514 | unsigned int offset; | 499 | unsigned int offset; |
515 | 500 | ||
516 | nfit_test_init_header(nfit_buf, size); | ||
517 | |||
518 | /* | 501 | /* |
519 | * spa0 (interleave first half of dimm0 and dimm1, note storage | 502 | * spa0 (interleave first half of dimm0 and dimm1, note storage |
520 | * does not actually alias the related block-data-window | 503 | * does not actually alias the related block-data-window |
521 | * regions) | 504 | * regions) |
522 | */ | 505 | */ |
523 | spa = nfit_buf + sizeof(struct acpi_table_nfit); | 506 | spa = nfit_buf; |
524 | spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; | 507 | spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; |
525 | spa->header.length = sizeof(*spa); | 508 | spa->header.length = sizeof(*spa); |
526 | memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16); | 509 | memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16); |
@@ -533,7 +516,7 @@ static void nfit_test0_setup(struct nfit_test *t) | |||
533 | * does not actually alias the related block-data-window | 516 | * does not actually alias the related block-data-window |
534 | * regions) | 517 | * regions) |
535 | */ | 518 | */ |
536 | spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa); | 519 | spa = nfit_buf + sizeof(*spa); |
537 | spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; | 520 | spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; |
538 | spa->header.length = sizeof(*spa); | 521 | spa->header.length = sizeof(*spa); |
539 | memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16); | 522 | memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16); |
@@ -542,7 +525,7 @@ static void nfit_test0_setup(struct nfit_test *t) | |||
542 | spa->length = SPA1_SIZE; | 525 | spa->length = SPA1_SIZE; |
543 | 526 | ||
544 | /* spa2 (dcr0) dimm0 */ | 527 | /* spa2 (dcr0) dimm0 */ |
545 | spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 2; | 528 | spa = nfit_buf + sizeof(*spa) * 2; |
546 | spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; | 529 | spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; |
547 | spa->header.length = sizeof(*spa); | 530 | spa->header.length = sizeof(*spa); |
548 | memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16); | 531 | memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16); |
@@ -551,7 +534,7 @@ static void nfit_test0_setup(struct nfit_test *t) | |||
551 | spa->length = DCR_SIZE; | 534 | spa->length = DCR_SIZE; |
552 | 535 | ||
553 | /* spa3 (dcr1) dimm1 */ | 536 | /* spa3 (dcr1) dimm1 */ |
554 | spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 3; | 537 | spa = nfit_buf + sizeof(*spa) * 3; |
555 | spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; | 538 | spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; |
556 | spa->header.length = sizeof(*spa); | 539 | spa->header.length = sizeof(*spa); |
557 | memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16); | 540 | memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16); |
@@ -560,7 +543,7 @@ static void nfit_test0_setup(struct nfit_test *t) | |||
560 | spa->length = DCR_SIZE; | 543 | spa->length = DCR_SIZE; |
561 | 544 | ||
562 | /* spa4 (dcr2) dimm2 */ | 545 | /* spa4 (dcr2) dimm2 */ |
563 | spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 4; | 546 | spa = nfit_buf + sizeof(*spa) * 4; |
564 | spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; | 547 | spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; |
565 | spa->header.length = sizeof(*spa); | 548 | spa->header.length = sizeof(*spa); |
566 | memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16); | 549 | memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16); |
@@ -569,7 +552,7 @@ static void nfit_test0_setup(struct nfit_test *t) | |||
569 | spa->length = DCR_SIZE; | 552 | spa->length = DCR_SIZE; |
570 | 553 | ||
571 | /* spa5 (dcr3) dimm3 */ | 554 | /* spa5 (dcr3) dimm3 */ |
572 | spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 5; | 555 | spa = nfit_buf + sizeof(*spa) * 5; |
573 | spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; | 556 | spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; |
574 | spa->header.length = sizeof(*spa); | 557 | spa->header.length = sizeof(*spa); |
575 | memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16); | 558 | memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16); |
@@ -578,7 +561,7 @@ static void nfit_test0_setup(struct nfit_test *t) | |||
578 | spa->length = DCR_SIZE; | 561 | spa->length = DCR_SIZE; |
579 | 562 | ||
580 | /* spa6 (bdw for dcr0) dimm0 */ | 563 | /* spa6 (bdw for dcr0) dimm0 */ |
581 | spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 6; | 564 | spa = nfit_buf + sizeof(*spa) * 6; |
582 | spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; | 565 | spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; |
583 | spa->header.length = sizeof(*spa); | 566 | spa->header.length = sizeof(*spa); |
584 | memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16); | 567 | memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16); |
@@ -587,7 +570,7 @@ static void nfit_test0_setup(struct nfit_test *t) | |||
587 | spa->length = DIMM_SIZE; | 570 | spa->length = DIMM_SIZE; |
588 | 571 | ||
589 | /* spa7 (bdw for dcr1) dimm1 */ | 572 | /* spa7 (bdw for dcr1) dimm1 */ |
590 | spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 7; | 573 | spa = nfit_buf + sizeof(*spa) * 7; |
591 | spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; | 574 | spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; |
592 | spa->header.length = sizeof(*spa); | 575 | spa->header.length = sizeof(*spa); |
593 | memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16); | 576 | memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16); |
@@ -596,7 +579,7 @@ static void nfit_test0_setup(struct nfit_test *t) | |||
596 | spa->length = DIMM_SIZE; | 579 | spa->length = DIMM_SIZE; |
597 | 580 | ||
598 | /* spa8 (bdw for dcr2) dimm2 */ | 581 | /* spa8 (bdw for dcr2) dimm2 */ |
599 | spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 8; | 582 | spa = nfit_buf + sizeof(*spa) * 8; |
600 | spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; | 583 | spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; |
601 | spa->header.length = sizeof(*spa); | 584 | spa->header.length = sizeof(*spa); |
602 | memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16); | 585 | memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16); |
@@ -605,7 +588,7 @@ static void nfit_test0_setup(struct nfit_test *t) | |||
605 | spa->length = DIMM_SIZE; | 588 | spa->length = DIMM_SIZE; |
606 | 589 | ||
607 | /* spa9 (bdw for dcr3) dimm3 */ | 590 | /* spa9 (bdw for dcr3) dimm3 */ |
608 | spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 9; | 591 | spa = nfit_buf + sizeof(*spa) * 9; |
609 | spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; | 592 | spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; |
610 | spa->header.length = sizeof(*spa); | 593 | spa->header.length = sizeof(*spa); |
611 | memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16); | 594 | memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16); |
@@ -613,7 +596,7 @@ static void nfit_test0_setup(struct nfit_test *t) | |||
613 | spa->address = t->dimm_dma[3]; | 596 | spa->address = t->dimm_dma[3]; |
614 | spa->length = DIMM_SIZE; | 597 | spa->length = DIMM_SIZE; |
615 | 598 | ||
616 | offset = sizeof(struct acpi_table_nfit) + sizeof(*spa) * 10; | 599 | offset = sizeof(*spa) * 10; |
617 | /* mem-region0 (spa0, dimm0) */ | 600 | /* mem-region0 (spa0, dimm0) */ |
618 | memdev = nfit_buf + offset; | 601 | memdev = nfit_buf + offset; |
619 | memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; | 602 | memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP; |
@@ -1100,15 +1083,13 @@ static void nfit_test0_setup(struct nfit_test *t) | |||
1100 | 1083 | ||
1101 | static void nfit_test1_setup(struct nfit_test *t) | 1084 | static void nfit_test1_setup(struct nfit_test *t) |
1102 | { | 1085 | { |
1103 | size_t size = t->nfit_size, offset; | 1086 | size_t offset; |
1104 | void *nfit_buf = t->nfit_buf; | 1087 | void *nfit_buf = t->nfit_buf; |
1105 | struct acpi_nfit_memory_map *memdev; | 1088 | struct acpi_nfit_memory_map *memdev; |
1106 | struct acpi_nfit_control_region *dcr; | 1089 | struct acpi_nfit_control_region *dcr; |
1107 | struct acpi_nfit_system_address *spa; | 1090 | struct acpi_nfit_system_address *spa; |
1108 | 1091 | ||
1109 | nfit_test_init_header(nfit_buf, size); | 1092 | offset = 0; |
1110 | |||
1111 | offset = sizeof(struct acpi_table_nfit); | ||
1112 | /* spa0 (flat range with no bdw aliasing) */ | 1093 | /* spa0 (flat range with no bdw aliasing) */ |
1113 | spa = nfit_buf + offset; | 1094 | spa = nfit_buf + offset; |
1114 | spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; | 1095 | spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS; |